K-means是一种常用的聚类算法,进阶版展示如下,代码传送门:
import random
from sklearn import datasets
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# 正规化数据集 X
def normalizeX, axis=-1, p=2):
lp_norm = np.atleast_1dnp.linalg.normX, p, axis))
lp_norm[lp_norm == 0] = 1
return X / np.expand_dimslp_norm, axis)
# 计算一个样本与数据集中所有样本的欧氏距离的平方
def euclidean_distanceone_sample, X):
one_sample = one_sample.reshape1, -1)
X = X.reshapeX.shape[0], -1)
distances = np.powernp.tileone_sample, X.shape[0], 1)) - X, 2).sumaxis=1)
return distances
class Kmeans):
"""Kmeans聚类算法.
Parameters:
-----------
k: int
聚类的数目.
max_iterations: int
最大迭代次数.
varepsilon: float
判断是否收敛, 如果上一次的所有k个聚类中心与本次的所有k个聚类中心的差都小于varepsilon,
则说明算法已经收敛
"""
def __init__self, k=2, max_iterations=500, varepsilon=0.0001):
self.k = k
self.max_iterations = max_iterations
self.varepsilon = varepsilon
# 从所有样本中随机选取self.k样本作为初始的聚类中心
def init_random_centroidsself, X):
n_samples, n_features = np.shapeX)
centroids = np.zerosself.k, n_features))
for i in rangeself.k):
centroid = X[np.random.choicerangen_samples))]
centroids[i] = centroid
return centroids
# 返回距离该样本最近的一个中心索引[0, self.k)
def _closest_centroidself, sample, centroids):
distances = euclidean_distancesample, centroids)
closest_i = np.argmindistances)
return closest_i
# 将所有样本进行归类,归类规则就是将该样本归类到与其最近的中心
def create_clustersself, centroids, X):
n_samples = np.shapeX)[0]
clusters = [[] for _ in rangeself.k)]
for sample_i, sample in enumerateX):
centroid_i = self._closest_centroidsample, centroids)
clusters[centroid_i].appendsample_i)
return clusters
# 对中心进行更新
def update_centroidsself, clusters, X):
n_features = np.shapeX)[1]
centroids = np.zerosself.k, n_features))
for i, cluster in enumerateclusters):
centroid = np.meanX[cluster], axis=0)
centroids[i] = centroid
return centroids
# 将所有样本进行归类,其所在的类别的索引就是其类别标签
def get_cluster_labelsself, clusters, X):
y_pred = np.zerosnp.shapeX)[0])
for cluster_i, cluster in enumerateclusters):
for sample_i in cluster:
y_pred[sample_i] = cluster_i
return y_pred
# 对整个数据集X进行Kmeans聚类,返回其聚类的标签
def predictself, X):
# 从所有样本中随机选取self.k样本作为初始的聚类中心
centroids = self.init_random_centroidsX)
# 迭代,直到算法收敛上一次的聚类中心和这一次的聚类中心几乎重合)或者达到最大迭代次数
for _ in rangeself.max_iterations):
# 将所有进行归类,归类规则就是将该样本归类到与其最近的中心
clusters = self.create_clusterscentroids, X)
former_centroids = centroids
# 计算新的聚类中心
centroids = self.update_centroidsclusters, X)
# 如果聚类中心几乎没有变化,说明算法已经收敛,退出迭代
diff = centroids - former_centroids
if diff.any) < self.varepsilon:
break
return self.get_cluster_labelsclusters, X)
def main):
# Load the dataset
X, y = datasets.make_blobsn_samples=10000,
n_features=3,
centers=[[3,3, 3], [0,0,0], [1,1,1], [2,2,2]],
cluster_std=[0.2, 0.1, 0.2, 0.2],
random_state =9)
# 用Kmeans算法进行聚类
clf = Kmeansk=4)
y_pred = clf.predictX)
# 可视化聚类效果
fig = plt.figurefigsize=12, 8))
ax = Axes3Dfig, rect=[0, 0, 1, 1], elev=30, azim=20)
plt.scatterX[y==0][:, 0], X[y==0][:, 1], X[y==0][:, 2])
plt.scatterX[y==1][:, 0], X[y==1][:, 1], X[y==1][:, 2])
plt.scatterX[y==2][:, 0], X[y==2][:, 1], X[y==2][:, 2])
plt.scatterX[y==3][:, 0], X[y==3][:, 1], X[y==3][:, 2])
plt.show)
if __name__ == "__main__":
main)
效果图:
备注:本文代码系非原创的,因需要做聚类,几乎将博客里的关于这部分的代码都尝试了一遍,这份代码是没有报错的,感恩大神。