1、双标图
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn import datasets
# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)
iris = datasets.load_iris()
data = iris.data
y = iris.target
data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 将变量缩放为均值为0,标准偏差为1。使用StandardScaler也可以
# In general, it's a good idea to scale the data prior to PCA.
# scaler = StandardScaler()
# scaler.fit(data)
# data = scaler.transform(data)
pca = PCA()
x_new = pca.fit_transform(data)
def myplot(score,coeff,labels=None):
xs = score[:,0]
ys = score[:,1]
n = coeff.shape[0]
scalex = 1.0/(xs.max() - xs.min())
scaley = 1.0/(ys.max() - ys.min())
plt.scatter(xs * scalex,ys * scaley, c=y)
for i in range(n):
plt.arrow(0, 0, coeff[i,0], coeff[i,1],color='r',alpha = 1,
head_width=0.04,head_length=0.03,overhang=1)
if labels is None:
plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, "Var"+str(i+1), color = 'g', ha = 'center', va = 'center')
else:
plt.text(coeff[i,0]* 1.15, coeff[i,1] * 1.15, labels[i], color = 'g', ha = 'center', va = 'center')
plt.xlim(-1,1)
plt.ylim(-1,1)
plt.xlabel("PC{}".format(1))
plt.ylabel("PC{}".format(2))
plt.grid()
#Call the function. Use only the 2 PCs.
myplot(x_new[:,0:2],np.transpose(pca.components_[0:2, :]),
["a1","a2","a3","a4","a5","a6","a7","a8","a9","a10"])
plt.show()
带图例的
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn import datasets
# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)
iris = datasets.load_iris()
data = iris.data
y = iris.target
data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 将变量缩放为均值为0,标准偏差为1。使用StandardScaler也可以
# In general, it's a good idea to scale the data prior to PCA.
# scaler = StandardScaler()
# scaler.fit(data)
# data = scaler.transform(data)
pca = PCA()
x_new = pca.fit_transform(data)
def myplot(score, coeff, labels=None):
xs = score[:, 0]
ys = score[:, 1]
n = coeff.shape[0]
scalex = 1.0 / (xs.max() - xs.min())
scaley = 1.0 / (ys.max() - ys.min())
for i in range(3):
plt.scatter(xs[y == i] * scalex,
ys[y == i] * scaley,
linewidth=0.01,label=i)
for i in range(n):
plt.arrow(0, 0, coeff[i, 0], coeff[i, 1], color='r', alpha=1,
head_width=0.04, head_length=0.03, overhang=1)
if labels is None:
plt.text(coeff[i, 0] * 1.15, coeff[i, 1] * 1.15, "Var" + str(i + 1), color='g', ha='center', va='center')
else:
plt.text(coeff[i, 0] * 1.15, coeff[i, 1] * 1.15, labels[i], color='g', ha='center', va='center')
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel("PC{}".format(1))
plt.ylabel("PC{}".format(2))
plt.grid()
# Call the function. Use only the 2 PCs.
myplot(x_new[:, 0:2], np.transpose(pca.components_[0:2, :]),
["a1", "a2", "a3", "a4", "a5", "a6", "a7", "a8", "a9", "a10"])
plt.legend()
plt.show()
标出95%的置信区间
from matplotlib.patches import Ellipse
from sklearn.decomposition import PCA
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
def plot_point_cov(points, nstd=3, ax=None, **kwargs):
# 求所有点的均值作为置信圆的圆心
pos = points.mean(axis=0)
# 求协方差
cov = np.cov(points, rowvar=False)
return plot_cov_ellipse(cov, pos, nstd, ax, **kwargs)
def plot_cov_ellipse(cov, pos, nstd=3, ax=None, **kwargs):
def eigsorted(cov):
cov = np.array(cov)
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:, order]
if ax is None:
ax = plt.gca()
vals, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
width, height = 2 * nstd * np.sqrt(vals)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta, **kwargs)
ax.add_artist(ellip)
return ellip
# 画置信圆
def show_ellipse(X_pca, y, pca,feature_label=None):
# 定义颜色
colors = ['tab:blue', 'tab:orange', 'seagreen']
category_label = ['Ethiopia', 'Somalia', 'Kenya']
# 定义分辨率
plt.figure(dpi=100, figsize=(8, 6))
# 三分类则为3
xs = X_pca[:, 0]
ys = X_pca[:, 1]
scalex = 1.0 / (xs.max() - xs.min())
scaley = 1.0 / (ys.max() - ys.min())
xs = xs * scalex
ys = ys * scaley
data = np.concatenate((xs[:,None],ys[:,None]),1)
for i in range(max(y)+1):
plt.plot(data[:,0][y == i],data[:,1][y == i],'.',color=colors[i], label=category_label[i], markersize=8)
plot_point_cov(data[y == i], nstd=3, alpha=0.25, color=colors[i])
plt.plot([0,0], [-1,1], '--', lw=1, color='#cccccc')
plt.plot([-1, 1], [0, 0], '--', lw=1, color='#cccccc')
coeff = np.transpose(pca.components_[0:2, :])
for i in range(coeff.shape[0]):
plt.arrow(0, 0, coeff[i, 0], coeff[i, 1], color='r', alpha=1,
head_width=0.04, head_length=0.03, overhang=1)
if feature_label is None:
plt.text(coeff[i, 0] * 1.15, coeff[i, 1] * 1.15, "Var" + str(i + 1), color='g', ha='center', va='center')
else:
plt.text(coeff[i, 0] * 1.15, coeff[i, 1] * 1.15, feature_label[i], color='g', ha='center', va='center')
# 添加坐标轴
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xticks(size=10, family='Times New Roman')
plt.yticks(size=10, family='Times New Roman')
font = {'family': 'Times New Roman', 'size': 10}
plt.xlabel('PC1 ({} %)'.format(round(pca.explained_variance_ratio_[0] * 100, 2)), font)
plt.ylabel('PC2 ({} %)'.format(round(pca.explained_variance_ratio_[1] * 100, 2)), font)
plt.legend(prop={"family": "Times New Roman", "size": 8}, loc='upper right')
plt.show()
if __name__ == '__main__':
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = (X-np.mean(X,axis=0))/np.std(X,axis=0) # 将变量缩放为均值为0,标准偏差为1。使用StandardScaler也可以
pca = PCA()
x_new = pca.fit_transform(X)
show_ellipse(x_new, y, pca)
2、碎石图
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets
# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)
iris = datasets.load_iris()
data = iris.data
y = iris.target
data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 将变量缩放为均值为0,标准偏差为1。使用StandardScaler也可以
# 使用自助法随机抽样
np.random.seed(0)
sample = data[np.random.randint(0,100,100)]
var = []
for i in range(500):
sample_n = sample[np.random.randint(0,100,100)]
pca = PCA()
pca.fit(sample_n)
var.append(pca.explained_variance_ratio_)
var = np.array(var)
plt.errorbar(np.linspace(1,data.shape[1],data.shape[1]),np.mean(var,axis=0),yerr=np.std(var,axis=0),
lw=2,elinewidth=1.5,ms=5,capsize=3,fmt='b-o') # 'r-x': k控制折线颜色,o控制点的类型
# print(pca.components_)
# print(pca.explained_variance_ratio_)
# print(np.mean(pca.components_,axis=1).sum())
# plt.plot(pca.explained_variance_ratio_,marker='o')
# plt.legend()
plt.show()
带抖动的散点图集合
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets
data = np.random.random((1000,10))
y = np.random.randint(0,6,1000)
# iris = datasets.load_iris()
# data = iris.data
# y = iris.target
data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 将变量缩放为均值为0,标准偏差为1。使用StandardScaler也可以
# 使用自助法随机抽样
np.random.seed(0)
sample = data[np.random.randint(0, 100, 100)]
var = []
for i in range(500):
sample_n = sample[np.random.randint(0, 100, 100)]
pca = PCA()
pca.fit(sample_n)
var.append(pca.explained_variance_ratio_)
var = np.array(var)
plt.errorbar(np.linspace(1, data.shape[1], data.shape[1]), np.mean(var, axis=0), yerr=np.std(var, axis=0),
lw=2, elinewidth=1.5, ms=5, capsize=5, fmt='b-o') # 'r-x': k控制折线颜色,o控制点的类型
# 绘制具有抖动的散点图
x_jittered = np.random.uniform(-0.1,0.1,size=var.shape[0]*var.shape[1])
cc = np.repeat(np.linspace(1, data.shape[1], data.shape[1]),var.shape[0])+x_jittered
plt.scatter(cc,var.T.reshape(-1),c="#cccccc",marker=".",alpha=0.5,linewidths=0)
"""
# 或者这样也可以
for i, d in enumerate(var.T):
x_ = (i+1)+np.random.uniform(-0.1, 0.1, size=var.shape[0])
plt.scatter(x_, d, c="#cccccc", marker=".", alpha=0.5, linewidths=0)
"""
# print(pca.components_)
# print(pca.explained_variance_ratio_)
# print(np.mean(pca.components_,axis=1).sum())
# plt.plot(pca.explained_variance_ratio_,marker='o')
# plt.legend()
plt.show()
3、变量载荷图
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets
# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)
iris = datasets.load_iris()
data = iris.data
y = iris.target
data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 将变量缩放为均值为0,标准偏差为1。使用StandardScaler也可以
# 使用自助法随机抽样
np.random.seed(0)
pca = PCA()
x_new = pca.fit_transform(data)
# 绘制载荷图
fig, ax = plt.subplots()
b = ax.barh(range(1, data.shape[1]+1), pca.components_[0], color='#6699CC') # 第一主成分
# b = ax.barh(range(1, data.shape[1]+1), pca.components_[1], color='#6699CC') # 第二主成分
plt.show()
4、变量贡献图
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets
# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)
iris = datasets.load_iris()
data = iris.data
y = iris.target
data = (data-np.mean(data,axis=0))/np.std(data,axis=0) # 将变量缩放为均值为0,标准偏差为1。使用StandardScaler也可以
# 使用自助法随机抽样
np.random.seed(0)
pca = PCA()
x_new = pca.fit_transform(data)
# 获取每个特征对于每个主成分的贡献率
explained_variance_ratio = pca.explained_variance_ratio_
# 计算每个变量的贡献程度
variable_contribution = np.multiply(explained_variance_ratio[:, np.newaxis], pca.components_ ** 2)
def contri(x):
total_ = np.sum(x,axis=1,keepdims=True)
return x/total_
# 计算百分比
variable_contribution = contri(variable_contribution)*100
# 绘制变量贡献图
fig, ax = plt.subplots()
b = ax.barh(range(1, data.shape[1]+1), variable_contribution[0,:], color='#6699CC') # 第一主成分
# b = ax.barh(range(1, data.shape[1]+1), variable_contribution[1,:], color='#6699CC') # 第二主成分
plt.show()
注意:
其实变量贡献图就是双标图中特征向量在不同主成分上的投影,也就是特征向量。我们在绘制变量贡献图的时候,其实对特征向量进行平方就可以了(保证为正)。但是上述结果也是对的,因为计算结果相同。
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn import datasets
# data = np.random.random((1000,10))
# y = np.random.randint(0,6,1000)
iris = datasets.load_iris()
data = iris.data
y = iris.target
data = (data - np.mean(data, axis=0)) / np.std(data, axis=0) # 将变量缩放为均值为0,标准偏差为1。使用StandardScaler也可以
# 使用自助法随机抽样
np.random.seed(0)
pca = PCA()
x_new = pca.fit_transform(data)
# 获取每个特征对于每个主成分的贡献率
explained_variance_ratio = pca.explained_variance_ratio_
# 绘制变量贡献图
fig, ax = plt.subplots()
b = ax.barh(range(1, data.shape[1] + 1), (pca.components_ ** 2)[0, :], color='#6699CC') # 第一主成分
# b = ax.barh(range(1, data.shape[1]+1), (pca.components_ ** 2)[1,:], color='#6699CC') # 第二主成分
plt.show()
# pca.components_ ** 2的结果与第一个代码计算的variable_contribution结果一致
5、附录
绘图效果来自nature communications的一篇论文。
参考:Leaf-level coordination principles propagate to the ecosystem scale (https://doi.org/10.1038/s41467-023-39572-5)、主成分分析图。
此图相关R代码、数据:PCA双标图、碎石图R代码、数据
6、注意(重要)
在主成分分析中,变量 经常被缩放(即标准化)。当变量以不同的尺度(例如:公斤、公里、厘米……)测量时,特别推荐这样做;否则,获得的 PCA 输出将受到严重影响。
目标是使变量具有可比性。通常,变量被缩放以具有 标准偏差 1 和均值为零。
数据标准化是在 PCA 和聚类分析之前广泛用于基因表达数据分析的一种方法。当变量的均值和/或标准差相差很大时,我们可能还想对数据进行缩放。
缩放变量时,数据可以转换如下:
mean(x) - X的均值
sd(x) - 标准差文章来源:https://www.toymoban.com/news/detail-661570.html
注意,我们在使用R语言,还有Origin进行PCA时候,它们是默认进行了自动标准化数据,而python的PCA是没有的,因此我们需要手动计算,才能保持结果一致。文章来源地址https://www.toymoban.com/news/detail-661570.html
到了这里,关于绘制 PCA 双标图、碎石图、变量载荷图和变量贡献图的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!