Java中基於AQS框架提供了一系列的鎖,可是當須要在集羣中的多臺機器上互斥執行一段代碼或使用資源時Java提供的這種單機鎖就沒了用武之地,此時須要使用分佈式鎖協調它們。分佈式鎖有不少實現,基於redis、基於數據庫等等,本次討論的是基於zk實現分佈式鎖。html
免責聲明:下面的分佈式鎖是本人學習zk時根據其特性摸索出來的實現,並不表明業內權威作法,僅做爲不一樣的思想碰撞出靈感的小火花之用,若有錯誤之處還望多多指教。java
首先比較容易想到的就是約定一個路徑的建立做爲搶佔鎖的條件,好比路徑/zk-lock/foo,而後集羣中每臺機器上的程序都嘗試去建立這樣一個臨時節點,誰建立成功了誰就搶到了鎖,沒有建立成功的把本身休眠,休眠以前在/zk-lock/foo節點上添加watcher,watcher中當/zk-lock/foo節點被刪除時將本身叫醒從新搶佔鎖,一直重複這個過程直到全部線程都搶到過鎖。node
再來講搶到鎖的機器,這臺機器搶到鎖以後就開始執行一些業務邏輯等亂七八糟的事情,執行完以後須要unlock釋放鎖,這裏所謂的釋放鎖實際上就是將/zk-lock/foo節點刪除,以前說過了其它沒有搶到鎖的機器都在這個節點上面添加了一個watcher,因此刪除/zk-lock/foo實際上就是將其它休眠的機器喚醒讓它們從新搶佔鎖。redis
上述過程流程圖:數據庫
代碼實現,首先是分佈式鎖的接口定義:apache
package cc11001100.zookeeper.lock; /** * @author CC11001100 */ public interface DistributeLock { void lock() throws InterruptedException; void unlock(); /** * 釋放此分佈式鎖所須要的資源,好比zookeeper鏈接 */ void releaseResource(); }
按上述流程圖對分佈式鎖的一個實現:網絡
package cc11001100.zookeeper.lock; import cc11001100.zookeeper.utils.ZooKeeperUtil; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.ZooKeeper; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.UUID; /** * 不可重入有羊羣效應的分佈式鎖(不公平鎖) * * @author CC11001100 */ public class NotReentrantDistributeLockWithHerdEffect implements DistributeLock { private final Thread currentThread; private String lockBasePath; private String lockName; private String lockFullPath; private ZooKeeper zooKeeper; private String myId; private String myName; public NotReentrantDistributeLockWithHerdEffect(String lockBasePath, String lockName) throws IOException { this.lockBasePath = lockBasePath; this.lockName = lockName; this.lockFullPath = this.lockBasePath + "/" + lockName; this.zooKeeper = ZooKeeperUtil.getZooKeeper(); this.currentThread = Thread.currentThread(); this.myId = UUID.randomUUID().toString(); this.myName = Thread.currentThread().getName(); createLockBasePath(); } private void createLockBasePath() { try { if (zooKeeper.exists(lockBasePath, null) == null) { zooKeeper.create(lockBasePath, "".getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } } catch (KeeperException | InterruptedException ignored) { } } @Override public void lock() throws InterruptedException { try { zooKeeper.create(lockFullPath, myId.getBytes(StandardCharsets.UTF_8), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL); System.out.println(myName + ": get lock"); } catch (KeeperException.NodeExistsException e) { // 若是節點已經存在,則監聽此節點,當節點被刪除時再搶佔鎖 try { zooKeeper.exists(lockFullPath, event -> { synchronized (currentThread) { currentThread.notify(); System.out.println(myName + ": wake"); } }); } catch (KeeperException.NoNodeException e1) { // 間不容髮之際,其它人釋放了鎖 lock(); } catch (KeeperException | InterruptedException e1) { e1.printStackTrace(); } synchronized (currentThread) { currentThread.wait(); } lock(); } catch (KeeperException | InterruptedException e) { e.printStackTrace(); } } @Override public void unlock() { try { byte[] nodeBytes = zooKeeper.getData(lockFullPath, false, null); String currentHoldLockNodeId = new String(nodeBytes, StandardCharsets.UTF_8); // 只有當前鎖的持有者是本身的時候,才能刪除節點 if (myId.equalsIgnoreCase(currentHoldLockNodeId)) { zooKeeper.delete(lockFullPath, -1); } } catch (KeeperException | InterruptedException e) { e.printStackTrace(); } System.out.println(myName + ": releaseResource lock"); } @Override public void releaseResource() { try { // 將zookeeper鏈接釋放掉 zooKeeper.close(); } catch (InterruptedException e) { e.printStackTrace(); } } }
測試代碼以下:框架
package cc11001100.zookeeper.lock; import java.io.IOException; import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; import java.util.concurrent.TimeUnit; /** * @author CC11001100 */ public class NotReentrantDistributeLockWithHerdEffectTest { private static String now() { return LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")); } public static void main(String[] args) { int nodeNum = 10; for (int i = 0; i < nodeNum; i++) { new Thread(() -> { DistributeLock lock = null; try { lock = new NotReentrantDistributeLockWithHerdEffect("/zk-lock", "foo"); lock.lock(); String myName = Thread.currentThread().getName(); System.out.println(myName + ": hold lock, now=" + now()); TimeUnit.SECONDS.sleep(3); lock.unlock(); } catch (IOException | InterruptedException e) { e.printStackTrace(); } finally { if (lock != null) { lock.releaseResource(); } } }, "thread-name-" + i).start(); } // output: // thread-name-8: get lock // thread-name-8: hold lock, now=2019-01-14 13:31:48 // thread-name-8: releaseResource lock // thread-name-2: wake // thread-name-5: wake // thread-name-3: wake // thread-name-1: wake // thread-name-4: wake // thread-name-9: wake // thread-name-0: wake // thread-name-7: wake // thread-name-6: wake // thread-name-2: get lock // thread-name-2: hold lock, now=2019-01-14 13:31:51 // thread-name-7: wake // thread-name-2: releaseResource lock // thread-name-6: wake // thread-name-0: wake // thread-name-9: wake // thread-name-1: wake // thread-name-3: wake // thread-name-5: wake // thread-name-4: wake // thread-name-0: get lock // thread-name-0: hold lock, now=2019-01-14 13:31:54 // thread-name-6: wake // thread-name-1: wake // thread-name-7: wake // thread-name-4: wake // thread-name-3: wake // thread-name-5: wake // thread-name-0: releaseResource lock // thread-name-9: wake // thread-name-1: get lock // thread-name-1: hold lock, now=2019-01-14 13:31:57 // thread-name-9: wake // thread-name-6: wake // thread-name-7: wake // thread-name-3: wake // thread-name-5: wake // thread-name-4: wake // thread-name-1: releaseResource lock // thread-name-9: get lock // thread-name-9: hold lock, now=2019-01-14 13:32:00 // thread-name-7: wake // thread-name-4: wake // thread-name-5: wake // thread-name-3: wake // thread-name-9: releaseResource lock // thread-name-6: wake // thread-name-3: get lock // thread-name-3: hold lock, now=2019-01-14 13:32:03 // thread-name-6: wake // thread-name-5: wake // thread-name-4: wake // thread-name-3: releaseResource lock // thread-name-7: wake // thread-name-6: get lock // thread-name-6: hold lock, now=2019-01-14 13:32:06 // thread-name-7: wake // thread-name-6: releaseResource lock // thread-name-4: wake // thread-name-5: wake // thread-name-7: get lock // thread-name-7: hold lock, now=2019-01-14 13:32:09 // thread-name-4: wake // thread-name-7: releaseResource lock // thread-name-5: wake // thread-name-5: get lock // thread-name-5: hold lock, now=2019-01-14 13:32:12 // thread-name-5: releaseResource lock // thread-name-4: wake // thread-name-4: get lock // thread-name-4: hold lock, now=2019-01-14 13:32:15 // thread-name-4: releaseResource lock } }
上面實現的代碼雖然實現了基本的鎖的功能,可是它有一個比較致命的問題,就是當釋放鎖的時候會把其它全部機器所有喚醒,可是最終只有一臺機器會搶到鎖,沒搶到的還得繼續休眠,尤爲是當機器數比較多的時候,叫醒了好幾十臺機器,結果仍是隻有一臺機器可以搶到鎖(堪比12306的搶票難度...),可是不公平鎖就是這樣啊,須要你們一塊兒搶,不然怎麼能體現出不公平呢,這個現象被稱爲羊羣效應。dom
其實這個問題能夠變通一下,參考Java中AQS框架的CLH隊列的實現,咱們可讓其排隊,每臺機器來搶鎖的時候都要排隊,排隊的時候領一個號碼稱爲本身的ticket,這個ticket是有序的,每次增長1,因此只須要前面的人釋放鎖的時候叫醒後面的第一我的就能夠了,即每臺機器在領完號碼的時候都往本身前面的機器的ticket上添加一個watcher,當前面的執行完業務邏輯釋放鎖的時候將本身叫醒,當前面沒有人的時候說明已經輪到了本身,即直接得到了鎖,關於獲取前面一我的的ticket,由於ticket是每次遞增1的,因此徹底能夠經過本身的ticket計算出前面一我的的ticket,沒必需要非獲取父節點下的全部children。而後就是號碼的發放,使用zookeeper的順序臨時節點能夠保證每次發放的號碼都是有序的,同時當持有鎖的機器意外掛掉的時候可以自動釋放鎖。分佈式
上述過程流程圖:
代碼實現:
package cc11001100.zookeeper.lock; import cc11001100.zookeeper.utils.ZooKeeperUtil; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.data.Stat; import java.io.IOException; /** * 不可重入沒有羊羣效應的鎖(公平鎖) * * @author CC11001100 */ public class NotReentrantDistributeLock implements DistributeLock { private final Thread currentThread; private String lockBasePath; private String lockPrefix; private String lockFullPath; private ZooKeeper zooKeeper; private String myName; private int myTicket; public NotReentrantDistributeLock(String lockBasePath, String lockPrefix) throws IOException { this.lockBasePath = lockBasePath; this.lockPrefix = lockPrefix; this.lockFullPath = lockBasePath + "/" + lockPrefix; this.zooKeeper = ZooKeeperUtil.getZooKeeper(); this.currentThread = Thread.currentThread(); this.myName = currentThread.getName(); createLockBasePath(); } private void createLockBasePath() { try { if (zooKeeper.exists(lockBasePath, null) == null) { zooKeeper.create(lockBasePath, "".getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } } catch (KeeperException | InterruptedException ignored) { } } @Override public void lock() throws InterruptedException { log("begin get lock"); try { String path = zooKeeper.create(lockFullPath, "".getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL); myTicket = extractMyTicket(path); String previousNode = buildPath(myTicket - 1); Stat exists = zooKeeper.exists(previousNode, event -> { synchronized (currentThread) { currentThread.notify(); log("wake"); } }); if (exists != null) { synchronized (currentThread) { currentThread.wait(); } } log("get lock success"); } catch (KeeperException e) { e.printStackTrace(); } } private int extractMyTicket(String path) { int splitIndex = path.lastIndexOf("/"); return Integer.parseInt(path.substring(splitIndex + 1).replace(lockPrefix, "")); } private String buildPath(int ticket) { return String.format("%s%010d", lockFullPath, ticket); } @Override public void unlock() { log("begin release lock"); try { zooKeeper.delete(buildPath(myTicket), -1); } catch (KeeperException | InterruptedException e) { e.printStackTrace(); } log("end release lock"); } @Override public void releaseResource() { try { // 將zookeeper鏈接釋放掉 zooKeeper.close(); } catch (InterruptedException e) { e.printStackTrace(); } } private void log(String msg) { System.out.printf("[%d] %s : %s\n", myTicket, myName, msg); } }
測試代碼:
package cc11001100.zookeeper.lock; import java.io.IOException; import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; import java.util.concurrent.TimeUnit; /** * @author CC11001100 */ public class NotReentrantDistributeLockTest { private static String now() { return LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")); } public static void main(String[] args) { int nodeNum = 10; for (int i = 0; i < nodeNum; i++) { new Thread(() -> { DistributeLock lock = null; try { lock = new NotReentrantDistributeLock("/zk-lock", "bar"); lock.lock(); String myName = Thread.currentThread().getName(); System.out.println(myName + ": get lock success, now=" + now()); TimeUnit.SECONDS.sleep(3); lock.unlock(); } catch (IOException | InterruptedException e) { e.printStackTrace(); } finally { if (lock != null) { lock.releaseResource(); } } }, "thread-name-" + i).start(); } // output: // [0] thread-name-6 : begin get lock // [0] thread-name-3 : begin get lock // [0] thread-name-5 : begin get lock // [0] thread-name-4 : begin get lock // [0] thread-name-8 : begin get lock // [0] thread-name-9 : begin get lock // [0] thread-name-2 : begin get lock // [0] thread-name-7 : begin get lock // [0] thread-name-1 : begin get lock // [0] thread-name-0 : begin get lock // [170] thread-name-1 : get lock success // thread-name-1: get lock success, now=2019-01-14 17:59:56 // [170] thread-name-1 : begin release lock // [170] thread-name-1 : end release lock // [171] thread-name-9 : wake // [171] thread-name-9 : get lock success // thread-name-9: get lock success, now=2019-01-14 17:59:59 // [171] thread-name-9 : begin release lock // [171] thread-name-9 : end release lock // [172] thread-name-4 : wake // [172] thread-name-4 : get lock success // thread-name-4: get lock success, now=2019-01-14 18:00:02 // [172] thread-name-4 : begin release lock // [173] thread-name-0 : wake // [172] thread-name-4 : end release lock // [173] thread-name-0 : get lock success // thread-name-0: get lock success, now=2019-01-14 18:00:05 // [173] thread-name-0 : begin release lock // [174] thread-name-6 : wake // [173] thread-name-0 : end release lock // [174] thread-name-6 : get lock success // thread-name-6: get lock success, now=2019-01-14 18:00:08 // [174] thread-name-6 : begin release lock // [175] thread-name-7 : wake // [174] thread-name-6 : end release lock // [175] thread-name-7 : get lock success // thread-name-7: get lock success, now=2019-01-14 18:00:11 // [175] thread-name-7 : begin release lock // [176] thread-name-3 : wake // [175] thread-name-7 : end release lock // [176] thread-name-3 : get lock success // thread-name-3: get lock success, now=2019-01-14 18:00:14 // [176] thread-name-3 : begin release lock // [177] thread-name-8 : wake // [176] thread-name-3 : end release lock // [177] thread-name-8 : get lock success // thread-name-8: get lock success, now=2019-01-14 18:00:17 // [177] thread-name-8 : begin release lock // [178] thread-name-2 : wake // [177] thread-name-8 : end release lock // [178] thread-name-2 : get lock success // thread-name-2: get lock success, now=2019-01-14 18:00:20 // [178] thread-name-2 : begin release lock // [179] thread-name-5 : wake // [178] thread-name-2 : end release lock // [179] thread-name-5 : get lock success // thread-name-5: get lock success, now=2019-01-14 18:00:23 // [179] thread-name-5 : begin release lock // [179] thread-name-5 : end release lock } }
咱們的鎖實現得愈來愈好了,可是它還有一個比較致命的問題,就是它不能重入,所謂的重入就是遞歸獲取鎖,當已經持有鎖時再嘗試獲取鎖時,可重入鎖會直接獲取成功,同時將重入次數加1,當釋放鎖時,將重入次數減1,只有當重入次數爲0時才完全釋放掉鎖。而上面的鎖屬於不可重入鎖,當嘗試遞歸獲取鎖時會產生死鎖永遠等待下去,這很明顯是不合理的,這一小節就討論一下如何將上一節的不可重入鎖轉換爲重入鎖。
其實個人想法很簡單,首先可以肯定的是確定須要在哪裏保存鎖的重入次數,由於已經得到了鎖的機器的重入次數並不須要讓其它機器知道,其它機器只要知道如今鎖在這哥們手裏攥着就能夠了,因此能夠簡單的將重入次數做爲DistributeLock的一個成員變量,好比叫作reentrantCount,每次嘗試獲取鎖的時候先判斷這個變量是否不爲0,若是不爲0的話說明當前機器已經得到了鎖,根據重入鎖的特性,直接將重入次數加1便可,只有當reentrantCount爲0的時候才須要去排隊等待獲取鎖。還有釋放鎖的時候也是判斷當前reentrantCount的值,若是這個值不爲0的話說明當前仍是在遞歸嵌套中,直接將reentrantCount減1便可,而後判斷reentrantCount的值若是爲0的話說明鎖的全部重入都已經釋放掉了,直接將當前鎖的ticket刪除,由於隊列中的後一臺機器已經在當前的ticket上添加了watcher,因此刪除當前ticket的動做至關於叫醒下一個排隊的人。
上述過程流程圖:
代碼實現:
package cc11001100.zookeeper.lock; import cc11001100.zookeeper.utils.ZooKeeperUtil; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.ZooDefs; import org.apache.zookeeper.ZooKeeper; import org.apache.zookeeper.data.Stat; import java.io.IOException; import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; /** * 可重入無羊羣效應的分佈式鎖(公平鎖) * * @author CC11001100 */ public class ReentrantDistributeLock implements DistributeLock { private final Thread currentThread; private String lockBasePath; private String lockPrefix; private String lockFullPath; private ZooKeeper zooKeeper; private String myName; private int myTicket; // 使用一個本機變量記載重複次數,這個值就不存儲在zookeeper上了, // 一則修改zk節點值還須要網絡會慢一些,二是其它節點只須要知道有個節點當前持有鎖就能夠了,至於重入幾回不關它們的事呀 private int reentrantCount = 0; public ReentrantDistributeLock(String lockBasePath, String lockName) throws IOException { this.lockBasePath = lockBasePath; this.lockPrefix = lockName; this.lockFullPath = lockBasePath + "/" + lockName; this.zooKeeper = ZooKeeperUtil.getZooKeeper(); this.currentThread = Thread.currentThread(); this.myName = currentThread.getName(); createLockBasePath(); } private void createLockBasePath() { try { if (zooKeeper.exists(lockBasePath, null) == null) { zooKeeper.create(lockBasePath, "".getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } } catch (KeeperException | InterruptedException ignored) { } } @Override public void lock() throws InterruptedException { log("begin get lock"); // 若是reentrantCount不爲0說明當前節點已經持有鎖了,無需等待,直接增長重入次數便可 if (reentrantCount != 0) { reentrantCount++; log("get lock success"); return; } // 說明尚未獲取到鎖,須要設置watcher監聽上一個節點釋放鎖事件 try { String path = zooKeeper.create(lockFullPath, "".getBytes(), ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL_SEQUENTIAL); myTicket = extractMyTicket(path); String previousNode = buildPathByTicket(myTicket - 1); Stat exists = zooKeeper.exists(previousNode, event -> { synchronized (currentThread) { currentThread.notify(); log("wake"); } }); if (exists != null) { synchronized (currentThread) { currentThread.wait(); } } reentrantCount++; log("get lock success"); } catch (KeeperException e) { e.printStackTrace(); } } private int extractMyTicket(String path) { int splitIndex = path.lastIndexOf("/"); return Integer.parseInt(path.substring(splitIndex + 1).replace(lockPrefix, "")); } private String buildPathByTicket(int ticket) { return String.format("%s%010d", lockFullPath, ticket); } @Override public void unlock() { log("begin release lock"); // 每次unlock的時候將遞歸次數減1,沒有減到0說明還在遞歸中 reentrantCount--; if (reentrantCount != 0) { log("end release lock"); return; } // 只有當重入次數爲0的時候才刪除節點,將鎖釋放掉 try { zooKeeper.delete(buildPathByTicket(myTicket), -1); } catch (KeeperException | InterruptedException e) { e.printStackTrace(); } log("end release lock"); } @Override public void releaseResource() { try { // 將zookeeper鏈接釋放掉 zooKeeper.close(); } catch (InterruptedException e) { e.printStackTrace(); } } private void log(String msg) { System.out.printf("[%s], ticket=%d, reentrantCount=%d, threadName=%s, msg=%s\n", now(), myTicket, reentrantCount, myName, msg); } private String now() { return LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")); } }
測試代碼:
package cc11001100.zookeeper.lock; import java.io.IOException; import java.util.Random; import java.util.concurrent.TimeUnit; /** * 測試分佈式可重入鎖,每一個鎖隨機重入,看是否會發生錯誤 * * @author CC11001100 */ public class ReentrantDistributeLockTest { public static void main(String[] args) { int nodeNum = 10; for (int i = 0; i < nodeNum; i++) { new Thread(() -> { DistributeLock lock = null; try { lock = new ReentrantDistributeLock("/zk-lock", "bar"); lock.lock(); TimeUnit.SECONDS.sleep(1); int reentrantTimes = new Random().nextInt(10); int reentrantCount = 0; for (int j = 0; j < reentrantTimes; j++) { lock.lock(); reentrantCount++; if (Math.random() < 0.5) { lock.unlock(); reentrantCount--; } } while (reentrantCount-- > 0) { lock.unlock(); } lock.unlock(); } catch (IOException | InterruptedException e) { e.printStackTrace(); } finally { if (lock != null) { lock.releaseResource(); } } }, "thread-name-" + i).start(); } // output: // [2019-01-14 17:57:43], ticket=0, reentrantCount=0, threadName=thread-name-4, msg=begin get lock // [2019-01-14 17:57:43], ticket=0, reentrantCount=0, threadName=thread-name-5, msg=begin get lock // [2019-01-14 17:57:43], ticket=0, reentrantCount=0, threadName=thread-name-8, msg=begin get lock // [2019-01-14 17:57:43], ticket=0, reentrantCount=0, threadName=thread-name-0, msg=begin get lock // [2019-01-14 17:57:43], ticket=0, reentrantCount=0, threadName=thread-name-6, msg=begin get lock // [2019-01-14 17:57:43], ticket=0, reentrantCount=0, threadName=thread-name-1, msg=begin get lock // [2019-01-14 17:57:43], ticket=0, reentrantCount=0, threadName=thread-name-7, msg=begin get lock // [2019-01-14 17:57:43], ticket=0, reentrantCount=0, threadName=thread-name-9, msg=begin get lock // [2019-01-14 17:57:43], ticket=0, reentrantCount=0, threadName=thread-name-3, msg=begin get lock // [2019-01-14 17:57:43], ticket=0, reentrantCount=0, threadName=thread-name-2, msg=begin get lock // [2019-01-14 17:57:43], ticket=160, reentrantCount=1, threadName=thread-name-0, msg=get lock success // [2019-01-14 17:57:44], ticket=160, reentrantCount=1, threadName=thread-name-0, msg=begin get lock // [2019-01-14 17:57:44], ticket=160, reentrantCount=2, threadName=thread-name-0, msg=get lock success // [2019-01-14 17:57:44], ticket=160, reentrantCount=2, threadName=thread-name-0, msg=begin release lock // [2019-01-14 17:57:44], ticket=160, reentrantCount=1, threadName=thread-name-0, msg=end release lock // [2019-01-14 17:57:44], ticket=160, reentrantCount=1, threadName=thread-name-0, msg=begin get lock // [2019-01-14 17:57:44], ticket=160, reentrantCount=2, threadName=thread-name-0, msg=get lock success // [2019-01-14 17:57:44], ticket=160, reentrantCount=2, threadName=thread-name-0, msg=begin get lock // [2019-01-14 17:57:44], ticket=160, reentrantCount=3, threadName=thread-name-0, msg=get lock success // [2019-01-14 17:57:44], ticket=160, reentrantCount=3, threadName=thread-name-0, msg=begin release lock // [2019-01-14 17:57:44], ticket=160, reentrantCount=2, threadName=thread-name-0, msg=end release lock // [2019-01-14 17:57:44], ticket=160, reentrantCount=2, threadName=thread-name-0, msg=begin get lock // [2019-01-14 17:57:44], ticket=160, reentrantCount=3, threadName=thread-name-0, msg=get lock success // [2019-01-14 17:57:44], ticket=160, reentrantCount=3, threadName=thread-name-0, msg=begin release lock // [2019-01-14 17:57:44], ticket=160, reentrantCount=2, threadName=thread-name-0, msg=end release lock // [2019-01-14 17:57:44], ticket=160, reentrantCount=2, threadName=thread-name-0, msg=begin release lock // [2019-01-14 17:57:44], ticket=160, reentrantCount=1, threadName=thread-name-0, msg=end release lock // [2019-01-14 17:57:44], ticket=160, reentrantCount=1, threadName=thread-name-0, msg=begin release lock // [2019-01-14 17:57:44], ticket=160, reentrantCount=0, threadName=thread-name-0, msg=end release lock // [2019-01-14 17:57:44], ticket=161, reentrantCount=0, threadName=thread-name-6, msg=wake // [2019-01-14 17:57:44], ticket=161, reentrantCount=1, threadName=thread-name-6, msg=get lock success // [2019-01-14 17:57:45], ticket=161, reentrantCount=1, threadName=thread-name-6, msg=begin get lock // [2019-01-14 17:57:45], ticket=161, reentrantCount=2, threadName=thread-name-6, msg=get lock success // [2019-01-14 17:57:45], ticket=161, reentrantCount=2, threadName=thread-name-6, msg=begin get lock // [2019-01-14 17:57:45], ticket=161, reentrantCount=3, threadName=thread-name-6, msg=get lock success // [2019-01-14 17:57:45], ticket=161, reentrantCount=3, threadName=thread-name-6, msg=begin release lock // [2019-01-14 17:57:45], ticket=161, reentrantCount=2, threadName=thread-name-6, msg=end release lock // [2019-01-14 17:57:45], ticket=161, reentrantCount=2, threadName=thread-name-6, msg=begin release lock // [2019-01-14 17:57:45], ticket=161, reentrantCount=1, threadName=thread-name-6, msg=end release lock // [2019-01-14 17:57:45], ticket=161, reentrantCount=1, threadName=thread-name-6, msg=begin release lock // [2019-01-14 17:57:45], ticket=162, reentrantCount=0, threadName=thread-name-5, msg=wake // [2019-01-14 17:57:45], ticket=161, reentrantCount=0, threadName=thread-name-6, msg=end release lock // [2019-01-14 17:57:45], ticket=162, reentrantCount=1, threadName=thread-name-5, msg=get lock success // [2019-01-14 17:57:46], ticket=162, reentrantCount=1, threadName=thread-name-5, msg=begin release lock // [2019-01-14 17:57:46], ticket=162, reentrantCount=0, threadName=thread-name-5, msg=end release lock // [2019-01-14 17:57:46], ticket=163, reentrantCount=0, threadName=thread-name-8, msg=wake // [2019-01-14 17:57:46], ticket=163, reentrantCount=1, threadName=thread-name-8, msg=get lock success // [2019-01-14 17:57:47], ticket=163, reentrantCount=1, threadName=thread-name-8, msg=begin get lock // [2019-01-14 17:57:47], ticket=163, reentrantCount=2, threadName=thread-name-8, msg=get lock success // [2019-01-14 17:57:47], ticket=163, reentrantCount=2, threadName=thread-name-8, msg=begin release lock // [2019-01-14 17:57:47], ticket=163, reentrantCount=1, threadName=thread-name-8, msg=end release lock // [2019-01-14 17:57:47], ticket=163, reentrantCount=1, threadName=thread-name-8, msg=begin get lock // [2019-01-14 17:57:47], ticket=163, reentrantCount=2, threadName=thread-name-8, msg=get lock success // [2019-01-14 17:57:47], ticket=163, reentrantCount=2, threadName=thread-name-8, msg=begin get lock // [2019-01-14 17:57:47], ticket=163, reentrantCount=3, threadName=thread-name-8, msg=get lock success // [2019-01-14 17:57:47], ticket=163, reentrantCount=3, threadName=thread-name-8, msg=begin get lock // [2019-01-14 17:57:47], ticket=163, reentrantCount=4, threadName=thread-name-8, msg=get lock success // [2019-01-14 17:57:47], ticket=163, reentrantCount=4, threadName=thread-name-8, msg=begin get lock // [2019-01-14 17:57:47], ticket=163, reentrantCount=5, threadName=thread-name-8, msg=get lock success // [2019-01-14 17:57:47], ticket=163, reentrantCount=5, threadName=thread-name-8, msg=begin release lock // [2019-01-14 17:57:47], ticket=163, reentrantCount=4, threadName=thread-name-8, msg=end release lock // [2019-01-14 17:57:47], ticket=163, reentrantCount=4, threadName=thread-name-8, msg=begin release lock // [2019-01-14 17:57:47], ticket=163, reentrantCount=3, threadName=thread-name-8, msg=end release lock // [2019-01-14 17:57:47], ticket=163, reentrantCount=3, threadName=thread-name-8, msg=begin release lock // [2019-01-14 17:57:47], ticket=163, reentrantCount=2, threadName=thread-name-8, msg=end release lock // [2019-01-14 17:57:47], ticket=163, reentrantCount=2, threadName=thread-name-8, msg=begin release lock // [2019-01-14 17:57:47], ticket=163, reentrantCount=1, threadName=thread-name-8, msg=end release lock // [2019-01-14 17:57:47], ticket=163, reentrantCount=1, threadName=thread-name-8, msg=begin release lock // [2019-01-14 17:57:47], ticket=164, reentrantCount=0, threadName=thread-name-2, msg=wake // [2019-01-14 17:57:47], ticket=163, reentrantCount=0, threadName=thread-name-8, msg=end release lock // [2019-01-14 17:57:47], ticket=164, reentrantCount=1, threadName=thread-name-2, msg=get lock success // [2019-01-14 17:57:48], ticket=164, reentrantCount=1, threadName=thread-name-2, msg=begin release lock // [2019-01-14 17:57:48], ticket=165, reentrantCount=0, threadName=thread-name-9, msg=wake // [2019-01-14 17:57:48], ticket=164, reentrantCount=0, threadName=thread-name-2, msg=end release lock // [2019-01-14 17:57:48], ticket=165, reentrantCount=1, threadName=thread-name-9, msg=get lock success // [2019-01-14 17:57:49], ticket=165, reentrantCount=1, threadName=thread-name-9, msg=begin get lock // [2019-01-14 17:57:49], ticket=165, reentrantCount=2, threadName=thread-name-9, msg=get lock success // [2019-01-14 17:57:49], ticket=165, reentrantCount=2, threadName=thread-name-9, msg=begin release lock // [2019-01-14 17:57:49], ticket=165, reentrantCount=1, threadName=thread-name-9, msg=end release lock // [2019-01-14 17:57:49], ticket=165, reentrantCount=1, threadName=thread-name-9, msg=begin get lock // [2019-01-14 17:57:49], ticket=165, reentrantCount=2, threadName=thread-name-9, msg=get lock success // [2019-01-14 17:57:49], ticket=165, reentrantCount=2, threadName=thread-name-9, msg=begin get lock // [2019-01-14 17:57:49], ticket=165, reentrantCount=3, threadName=thread-name-9, msg=get lock success // [2019-01-14 17:57:49], ticket=165, reentrantCount=3, threadName=thread-name-9, msg=begin release lock // [2019-01-14 17:57:49], ticket=165, reentrantCount=2, threadName=thread-name-9, msg=end release lock // [2019-01-14 17:57:49], ticket=165, reentrantCount=2, threadName=thread-name-9, msg=begin release lock // [2019-01-14 17:57:49], ticket=165, reentrantCount=1, threadName=thread-name-9, msg=end release lock // [2019-01-14 17:57:49], ticket=165, reentrantCount=1, threadName=thread-name-9, msg=begin release lock // [2019-01-14 17:57:49], ticket=166, reentrantCount=0, threadName=thread-name-1, msg=wake // [2019-01-14 17:57:49], ticket=165, reentrantCount=0, threadName=thread-name-9, msg=end release lock // [2019-01-14 17:57:49], ticket=166, reentrantCount=1, threadName=thread-name-1, msg=get lock success // [2019-01-14 17:57:50], ticket=166, reentrantCount=1, threadName=thread-name-1, msg=begin release lock // [2019-01-14 17:57:50], ticket=167, reentrantCount=0, threadName=thread-name-4, msg=wake // [2019-01-14 17:57:50], ticket=166, reentrantCount=0, threadName=thread-name-1, msg=end release lock // [2019-01-14 17:57:50], ticket=167, reentrantCount=1, threadName=thread-name-4, msg=get lock success // [2019-01-14 17:57:51], ticket=167, reentrantCount=1, threadName=thread-name-4, msg=begin get lock // [2019-01-14 17:57:51], ticket=167, reentrantCount=2, threadName=thread-name-4, msg=get lock success // [2019-01-14 17:57:51], ticket=167, reentrantCount=2, threadName=thread-name-4, msg=begin release lock // [2019-01-14 17:57:51], ticket=167, reentrantCount=1, threadName=thread-name-4, msg=end release lock // [2019-01-14 17:57:51], ticket=167, reentrantCount=1, threadName=thread-name-4, msg=begin get lock // [2019-01-14 17:57:51], ticket=167, reentrantCount=2, threadName=thread-name-4, msg=get lock success // [2019-01-14 17:57:51], ticket=167, reentrantCount=2, threadName=thread-name-4, msg=begin get lock // [2019-01-14 17:57:51], ticket=167, reentrantCount=3, threadName=thread-name-4, msg=get lock success // [2019-01-14 17:57:51], ticket=167, reentrantCount=3, threadName=thread-name-4, msg=begin release lock // [2019-01-14 17:57:51], ticket=167, reentrantCount=2, threadName=thread-name-4, msg=end release lock // [2019-01-14 17:57:51], ticket=167, reentrantCount=2, threadName=thread-name-4, msg=begin release lock // [2019-01-14 17:57:51], ticket=167, reentrantCount=1, threadName=thread-name-4, msg=end release lock // [2019-01-14 17:57:51], ticket=167, reentrantCount=1, threadName=thread-name-4, msg=begin release lock // [2019-01-14 17:57:51], ticket=168, reentrantCount=0, threadName=thread-name-3, msg=wake // [2019-01-14 17:57:51], ticket=167, reentrantCount=0, threadName=thread-name-4, msg=end release lock // [2019-01-14 17:57:51], ticket=168, reentrantCount=1, threadName=thread-name-3, msg=get lock success // [2019-01-14 17:57:52], ticket=168, reentrantCount=1, threadName=thread-name-3, msg=begin get lock // [2019-01-14 17:57:52], ticket=168, reentrantCount=2, threadName=thread-name-3, msg=get lock success // [2019-01-14 17:57:52], ticket=168, reentrantCount=2, threadName=thread-name-3, msg=begin get lock // [2019-01-14 17:57:52], ticket=168, reentrantCount=3, threadName=thread-name-3, msg=get lock success // [2019-01-14 17:57:52], ticket=168, reentrantCount=3, threadName=thread-name-3, msg=begin release lock // [2019-01-14 17:57:52], ticket=168, reentrantCount=2, threadName=thread-name-3, msg=end release lock // [2019-01-14 17:57:52], ticket=168, reentrantCount=2, threadName=thread-name-3, msg=begin release lock // [2019-01-14 17:57:52], ticket=168, reentrantCount=1, threadName=thread-name-3, msg=end release lock // [2019-01-14 17:57:52], ticket=168, reentrantCount=1, threadName=thread-name-3, msg=begin release lock // [2019-01-14 17:57:52], ticket=169, reentrantCount=0, threadName=thread-name-7, msg=wake // [2019-01-14 17:57:52], ticket=168, reentrantCount=0, threadName=thread-name-3, msg=end release lock // [2019-01-14 17:57:52], ticket=169, reentrantCount=1, threadName=thread-name-7, msg=get lock success // [2019-01-14 17:57:53], ticket=169, reentrantCount=1, threadName=thread-name-7, msg=begin get lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=2, threadName=thread-name-7, msg=get lock success // [2019-01-14 17:57:53], ticket=169, reentrantCount=2, threadName=thread-name-7, msg=begin get lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=3, threadName=thread-name-7, msg=get lock success // [2019-01-14 17:57:53], ticket=169, reentrantCount=3, threadName=thread-name-7, msg=begin release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=2, threadName=thread-name-7, msg=end release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=2, threadName=thread-name-7, msg=begin get lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=3, threadName=thread-name-7, msg=get lock success // [2019-01-14 17:57:53], ticket=169, reentrantCount=3, threadName=thread-name-7, msg=begin get lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=4, threadName=thread-name-7, msg=get lock success // [2019-01-14 17:57:53], ticket=169, reentrantCount=4, threadName=thread-name-7, msg=begin release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=3, threadName=thread-name-7, msg=end release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=3, threadName=thread-name-7, msg=begin get lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=4, threadName=thread-name-7, msg=get lock success // [2019-01-14 17:57:53], ticket=169, reentrantCount=4, threadName=thread-name-7, msg=begin release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=3, threadName=thread-name-7, msg=end release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=3, threadName=thread-name-7, msg=begin get lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=4, threadName=thread-name-7, msg=get lock success // [2019-01-14 17:57:53], ticket=169, reentrantCount=4, threadName=thread-name-7, msg=begin release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=3, threadName=thread-name-7, msg=end release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=3, threadName=thread-name-7, msg=begin get lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=4, threadName=thread-name-7, msg=get lock success // [2019-01-14 17:57:53], ticket=169, reentrantCount=4, threadName=thread-name-7, msg=begin get lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=5, threadName=thread-name-7, msg=get lock success // [2019-01-14 17:57:53], ticket=169, reentrantCount=5, threadName=thread-name-7, msg=begin get lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=6, threadName=thread-name-7, msg=get lock success // [2019-01-14 17:57:53], ticket=169, reentrantCount=6, threadName=thread-name-7, msg=begin release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=5, threadName=thread-name-7, msg=end release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=5, threadName=thread-name-7, msg=begin release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=4, threadName=thread-name-7, msg=end release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=4, threadName=thread-name-7, msg=begin release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=3, threadName=thread-name-7, msg=end release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=3, threadName=thread-name-7, msg=begin release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=2, threadName=thread-name-7, msg=end release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=2, threadName=thread-name-7, msg=begin release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=1, threadName=thread-name-7, msg=end release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=1, threadName=thread-name-7, msg=begin release lock // [2019-01-14 17:57:53], ticket=169, reentrantCount=0, threadName=thread-name-7, msg=end release lock } }
.