學習cluster以前,須要瞭解process相關的知識,若是不瞭解的話建議先閱讀process模塊、child_process模塊。node
cluster藉助child_process模塊的fork()方法來建立子進程,經過fork方式建立的子進程與父進程之間創建了IPC通道,支持雙向通訊。nginx
cluster模塊最先出如今node.js v0.8版本中windows
Node.js是單線程的,那麼若是但願利用服務器的多核的資源的話,就應該多建立幾個進程,由多個進程共同提供服務。若是直接採用下列方式啓動多個服務的話,會提示端口占用。bash
const http = require('http');
http.createServer((req, res) => {
res.writeHead(200);
res.end('hello world\n');
}).listen(8000);
// 啓動第一個服務 node index.js &
// 啓動第二個服務 node index.js &
throw er; // Unhandled 'error' event
^
Error: listen EADDRINUSE :::8000
at Server.setupListenHandle [as _listen2] (net.js:1330:14)
at listenInCluster (net.js:1378:12)
at Server.listen (net.js:1465:7)
at Object.<anonymous> (/Users/xiji/workspace/learn/node-basic/cluster/simple.js:5:4)
at Module._compile (internal/modules/cjs/loader.js:702:30)
at Object.Module._extensions..js (internal/modules/cjs/loader.js:713:10)
at Module.load (internal/modules/cjs/loader.js:612:32)
at tryModuleLoad (internal/modules/cjs/loader.js:551:12)
at Function.Module._load (internal/modules/cjs/loader.js:543:3)
at Function.Module.runMain (internal/modules/cjs/loader.js:744:10)
複製代碼
若是改用cluster的話就沒有問題服務器
const cluster = require('cluster');
const http = require('http');
const numCPUs = require('os').cpus().length;
if (cluster.isMaster) {
console.log(`Master ${process.pid} is running`);
// Fork workers.
for (let i = 0; i < numCPUs; i++) {
cluster.fork();
}
cluster.on('exit', (worker, code, signal) => {
console.log(`worker ${worker.process.pid} died`);
});
} else {
// Workers can share any TCP connection
// In this case it is an HTTP server
http.createServer((req, res) => {
res.writeHead(200);
res.end('hello world\n');
}).listen(8000);
console.log(`Worker ${process.pid} started`);
}
// node index.js 執行完啓動了一個主進程和8個子進程(子進程數與cpu核數相一致)
Master 11851 is running
Worker 11852 started
Worker 11854 started
Worker 11853 started
Worker 11855 started
Worker 11857 started
Worker 11858 started
Worker 11856 started
Worker 11859 started
複製代碼
cluster建立的進程分兩種,父進程和子進程,父進程只有一個,子進程有多個(通常根據cpu核數建立)app
有三個問題須要回答:負載均衡
net.js源碼中的listen方法經過listenInCluster方法來區分是父進程仍是子進程,不一樣進程的差別在listenInCluster方法中體現socket
function listenInCluster(server, address, port, addressType, backlog, fd, excluseive) {
if (cluster.isMaster || exclusive) {
server._listen2(address, port, addressType, backlog, fd);
return;
}
const serverQuery = { address: address ......};
cluster._getServer(server, serverQuery, listenOnMasterHandle);
function listenOnMasterHandle(err, handle) {
server._handle = handle;
server._listen2(address, port, addressType, backlog, fd);
}
}
複製代碼
上面是精簡過的代碼,當子進程調用listen方法時,會先執行_getServer,而後經過callback的形式指定server._handle的值,以後再調用_listen2方法。函數
cluster._getServer = function(obj, options, cb) {
...
const message = util._extend({
act: 'queryServer',
index: indexes[indexesKey],
data: null
}, options);
message.address = address;
send(message, (reply, handle) => {
if (handle)
shared(reply, handle, indexesKey, cb); // Shared listen socket.
else
rr(reply, indexesKey, cb); // Round-robin.
});
...
};
複製代碼
_getServer方法會向主進程發送queryServer的message,父進程執行完會調用回調函數,根據是否返回handle來區分是調用shared方法仍是rr方法,這裏實際上是會調用rr方法。而rr方法的主要做用就是僞造了TCPWrapper來調用net的listenOnMasterHandle回調函數oop
function rr(message, indexesKey, cb) {
var key = message.key;
function listen(backlog) {
return 0;
}
function close() {
if (key === undefined)
return;
send({ act: 'close', key });
delete handles[key];
delete indexes[indexesKey];
key = undefined;
}
function getsockname(out) {
if (key)
util._extend(out, message.sockname);
return 0;
}
const handle = { close, listen, ref: noop, unref: noop };
handles[key] = handle;
cb(0, handle);
}
複製代碼
因爲子進程的server拿到的是圍繞的TCPWrapper,當調用listen方法時並不會執行任何操做,因此在子進程中調用listen方法並不會綁定端口,於是也並不會報錯。
在子進程發送給父進程的queryServer message時,父進程會檢測是否建立了TCP Server,若是沒有的話就會建立TCP Server並綁定端口,而後再把子進程記錄下來,方便後續的用戶請求worker分發。
父進程因爲綁定了端口號,因此能夠捕獲鏈接請求,父進程的onconnection方法會被觸發,onconnection方法觸發時會傳遞TCP對象參數,因爲以前父進程記錄了全部的worker,因此父進程能夠選擇要處理請求的worker,而後經過向worker發送act爲newconn的消息,並傳遞TCP對象,子進程監聽到消息後,對傳遞過來的TCP對象進行封裝,封裝成socket,而後觸發connection事件。這樣就實現了子進程雖然不監聽端口,可是依然能夠處理用戶請求的目的。
負載均衡直接依賴cluster的請求調度策略,在v6.0版本以前,cluster的調用策略採用的是cluster.SCHED_NONE(依賴於操做系統),SCHED_NODE理論上來講性能最好(Ferando Micalli寫過一篇Node.js 6.0版本的cluster和iptables以及nginx性能對比的文章,點此訪問)可是從實際角度發現,在請求調度方面會出現不太均勻的狀況(可能出現8個子進程中的其中2到3個處理了70%的鏈接請求)。所以在6.0版本中Node.js增長了cluster.SCHED_RR(round-robin),目前已成爲默認的調度策略(除了windows環境)
能夠經過設置NODE_CLUSTER_SCHED_POLICY
環境變量來修改調度策略
NODE_CLUSTER_SCHED_POLICY='rr'
NODE_CLUSTER_SCHED_POLICY='none'
複製代碼
或者設置cluster的schedulingPolicy
屬性
cluster.schedulingPolicy = cluster.SCHED_NONE;
cluster.schedulingPolicy = cluster.SCHED_RR;
複製代碼
Node.js內部維護了兩個隊列:
當新請求到達的時候父進程將請求暫存handles隊列,從free隊列中出隊一個worker,進入worker處理(handoff)階段,關鍵邏輯實現以下:
RoundRobinHandle.prototype.distribute = function(err, handle) {
this.handles.push(handle);
const worker = this.free.shift();
if (worker) {
this.handoff(worker);
}
};
複製代碼
worker處理階段首先從handles隊列出隊一個請求,而後經過進程通訊的方式通知子worker進行請求處理,當worker接收到通訊消息後發送ack信息,繼續響應handles隊列中的請求任務,當worker沒法接受請求時,父進程負責從新調度worker進行處理。關鍵邏輯以下:
RoundRobinHandle.prototype.handoff = function(worker) {
const handle = this.handles.shift();
if (handle === undefined) {
this.free.push(worker); // Add to ready queue again.
return;
}
const message = { act: 'newconn', key: this.key };
sendHelper(worker.process, message, handle, (reply) => {
if (reply.accepted)
handle.close();
else
this.distribute(0, handle); // Worker is shutting down. Send to another.
this.handoff(worker);
});
};
複製代碼
注意:主進程與子進程之間創建了IPC,所以主進程與子進程之間能夠通訊,可是各個子進程之間是相互獨立的(沒法通訊)
https://medium.com/@fermads/node-js-process-load-balancing-comparing-cluster-iptables-and-nginx-6746aaf38272