- 容易實現
- 可能收斂局部最小值(局部最優解),大數據集上收斂慢
- 適用於數值類型數據
- 用戶給定分組數量
隨機選擇K個起點做爲質心
計算每一個數據點到每一個質心的距離
將數據點分配到最近的簇
對每一個簇計算簇中數據點的均值做爲新的質心,直到每一個數據點所在的簇不發生變化
python代碼
def loadDataSet(fileName): #general function to parse tab -delimited floats
dataMat = [] #assume last column is target value
fr = open(fileName)
for line in fr.readlines():
curLine = line.strip().split('\t')
fltLine = map(float,curLine) #map all elements to float()
dataMat.append(fltLine)
return dataMat
def distEclud(vecA, vecB):
return sqrt(sum(power(vecA - vecB, 2))) #la.norm(vecA-vecB)
def randCent(dataSet, k):
n = shape(dataSet)[1]
centroids = mat(zeros((k,n)))#create centroid mat
for j in range(n):#create random cluster centers, within bounds of each dimension
minJ = min(dataSet[:,j])
rangeJ = float(max(dataSet[:,j]) - minJ)
centroids[:,j] = mat(minJ + rangeJ * random.rand(k,1))
return centroids
def kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):
m = shape(dataSet)[0]
clusterAssment = mat(zeros((m,2)))#create mat to assign data points
#to a centroid, also holds SE of each point
centroids = createCent(dataSet, k)
clusterChanged = True
while clusterChanged:
clusterChanged = False
for i in range(m):#for each data point assign it to the closest centroid
minDist = inf; minIndex = -1
for j in range(k):
distJI = distMeas(centroids[j,:],dataSet[i,:])
if distJI < minDist:
minDist = distJI; minIndex = j
if clusterAssment[i,0] != minIndex: clusterChanged = True
clusterAssment[i,:] = minIndex,minDist**2
print centroids
for cent in range(k):#recalculate centroids
ptsInClust = dataSet[nonzero(clusterAssment[:,0].A==cent)[0]]#get all the point in this cluster
centroids[cent,:] = mean(ptsInClust, axis=0) #assign centroid to mean
return centroids, clusterAssment
datas = mat(loadDataSet('testSet.txt'))
centroids,clusterAssment = kMeans(datas, 2)
經過SSE(偏差平方和)來評判結果
- clusterAssment第一列的之和
- 對偏差取平方表明更重視遠離的點
方法:
- 增長k值(不符合初衷)
- 合併靠近的簇而後從新計算k爲2的質心
- 二分K-均值(見10.3章)