

#vegan 包中的 PCA 方法
library(vegan)
dat <- read.delim('data.txt', row.names = 1, sep = '\t', stringsAsFactors = FALSE, check.names = FALSE)
pca <- rda(dat, scale = TRUE)
pca1 <- round(100*pca$CA$eig[1]/sum(pca$CA$eig), 2)
pca2 <- round(100*pca$CA$eig[2]/sum(pca$CA$eig), 2)
#I 型標尺的 PCA 排序圖
p <- ordiplot(pca, dis = 'site', type = 'n', choices = c(1, 2), scaling = 1,
xlab = paste('PCA1:', pca1, '%'), ylab = paste('PCA2:', pca2, '%'))
points(pca, dis = 'site', choices = c(1, 2), scaling = 1, pch = 21, cex = 1.2,
bg = c(rep('red', 5), rep('orange', 5), rep('green3', 5)), col = NA)
#新增聚類,如 UPGMA 聚類
tree <- hclust(dist(scale(dat), method = 'euclidean'), method = 'average')
plot(tree)
ordicluster(p, tree, col = 'gray')

排序圖繪製以二維散點圖,並將聚類樹投影到二維平面中。
如果覺得二維平面難以觀測層次結構,FactoMineR包提供了聚類樹的三維視覺化方案,同樣以PCA為例展示。
#FactoMineR 包的三維聚類樹
#參考連結:http://www.sthda.com/english/articles/31-principal-component-methods-in-r-practical-guide/117-hcpc-hierarchical-clustering-on-principal-components-essentials/
library(FactoMineR)
#PCA
res.pca <- PCA(dat, ncp = 3, scale.unit = TRUE, graph = FALSE)
#新增層次聚類,如 UPGMA 聚類
res.hcpc <- HCPC(res.pca, metric = 'euclidean', method = 'average', graph = FALSE)
#三維效果圖
plot(res.hcpc, choice = '3D.map')

相比上述二維平面,層次結構更加清晰明瞭。
當然上述的層次聚類結果也可以這樣來表示。
以下是一些示例。
#以 PCoA 為例降維,並在排序圖中標註模糊 c 均值聚類結果
#計算距離,以歐幾里得距離為例
dis_euc <- dist(scale(dat), method = 'euclidean')
#PCoA,事實上以歐幾里得距離的 PCoA 等同於以原始資料的 PCA
pcoa <- cmdscale(dis_euc, k = (nrow(dat) – 1), eig = TRUE)
eig_prop <- 100*pcoa$eig/sum(pcoa$eig)
pcoa_site <- pcoa$point[ ,1:2]
plot(pcoa_site,
xlab = paste('PCoA axis1:', round(eig_prop[1], 2), '%'),
ylab = paste('PCoA axis2:', round(eig_prop[2], 2), '%'))
#模糊 c 均值聚類,聚為 3 類
library(cluster)
set.seed(123)
cm.fanny <- fanny(dis_euc, k = 3, diss = TRUE, maxit = 100)
#標註各物件最接近的聚類簇
for (i in 1:3) {
gg <- pcoa_site[cm.fanny$clustering == i, ]
hpts <- chull(gg)
hpts <- c(hpts, hpts[1])
lines(gg[hpts, ], col = 1+1)
}
#星圖展示了各物件的成員值
stars(cm.fanny$membership, location = pcoa_site, draw.segments = TRUE,
add = TRUE, scale = FALSE, col.segments = 2:4, len = 0.5)

對於硬聚類的方法,由於每個物件只有一個確定的分類,因此這種分組邊界的繪製就很簡單,直接透過橢圓或多邊形“圈起來”即可。
#ggplot2 中的一些方法
library(ggplot2)
#以上文 PCA 的排序結果為例展示
#首先提取物件排序座標,以 I 型標尺為例,以前兩軸為例
pca_site <- data.frame(scores(pca, choices = 1:2, scaling = 1, display = 'site'))
#k-means 聚類,聚為 3 類
set.seed(123)
dat_kmeans <- kmeans(scale(dat), centers = 3, nstart = 25)
dat_kmeans$cluster
pca_site$cluster <- as.character(dat_kmeans$cluster)
#ggplot2 繪製二維散點圖
p <- ggplot(data = pca_site, aes(x = PC1, y = PC2)) +
geom_point(aes(color = cluster)) +
scale_color_manual(values = c('red', 'blue', 'green3')) +
theme(panel.grid = element_blank(), panel.background = element_rect(color = 'black', fill = 'transparent'), legend.key = element_rect(fill = 'transparent')) +
labs(x = paste('PCA1:', pca1, '%'), y = paste('PCA2:', pca2, '%'))
#新增置信橢圓,可用於表示物件分類
p + stat_ellipse(aes(color = cluster), level = 0.95, linetype = 2, show.legend = FALSE)
p + stat_ellipse(aes(fill = cluster), geom = 'polygon', level = 0.95, alpha = 0.1, show.legend = FALSE) +
scale_fill_manual(values = c('red', 'purple', 'green'))
#將同類別對象連線在一起
source('geom_enterotype.r') #可獲取自 https://pan.baidu.com/s/1KUA5owf4y13y1RJbff-l7g
p + geom_enterotype(aes(fill = cluster, color = cluster, label = cluster), show.legend = FALSE) +
scale_fill_manual(values = c('#ffa6a9', '#e8b4fd', '#c7ffc4'))
#多邊形連線同類別對象的樣式
library(plyr)
cluster_border <- ddply(pca_site, 'cluster', function(df) df[chull(df[[1]], df[[2]]), ])
p + geom_polygon(data = cluster_border, aes(fill = cluster), alpha = 0.1, show.legend = FALSE) +
scale_fill_manual(values = c('red', 'purple', 'green'))

上述提到的FactoMineR包也能用於繪製這類統計圖,以上述新增層次聚類的PCA繼續。
#上述已經展示了使用 FactoMineR 包視覺化 PCA + 聚類樹
#將聚類樹更改為分組邊界也是常見的視覺化方案
library(factoextra)
library(FactoMineR)
fviz_dend(res.hcpc,
cex = 0.7, # Label size
palette = 'jco', # Color palette see ?ggpubr::ggpar
rect = TRUE, rect_fill = TRUE, # Add rectangle around groups
rect_border = 'jco', # Rectangle color
labels_track_height = 0.8) # Augment the room for labels
fviz_cluster(res.hcpc,
repel = TRUE, # Avoid label overlapping
show.clust.cent = TRUE, # Show cluster centers
palette = 'jco', # Color palette see ?ggpubr::ggpar
ggtheme = theme_minimal(),
main = 'Factor map')

#以上文 PCA 的排序結果為例展示
#首先提取物件排序座標,以 I 型標尺為例,以前 3 軸為例
pca_site <- data.frame(scores(pca, choices = 1:3, scaling = 1, display = 'site'))
pca1 <- round(100*pca$CA$eig[1]/sum(pca$CA$eig), 2)
pca2 <- round(100*pca$CA$eig[2]/sum(pca$CA$eig), 2)
pca3 <- round(100*pca$CA$eig[3]/sum(pca$CA$eig), 2)
#k-means 聚類,聚為 3 類
set.seed(123)
dat_kmeans <- kmeans(scale(dat), centers = 3, nstart = 25)
dat_kmeans$cluster
pca_site$cluster <- as.character(dat_kmeans$cluster)
#car+rgl 包,互動式三維圖
library(car)
library(rgl)
scatter3d(pca_site$PC1, pca_site$PC2, pca_site$PC3, groups = as.factor(pca_site$cluster), surface = FALSE, ellipsoid = TRUE,
xlab = paste('PCA1:', pca1, '%'), ylab = paste('PCA2:', pca2, '%'), zlab = paste('PCA3:', pca3, '%'))




