咱們先來看一下抽象類java
* @auther draymonder */ public abstract class AbstractClassTest { private int Test1; public int Test2; public void test1() { return ; } protected void test2() { return ; } private void test3() { return ; } void test4() { return ; } public abstract void test5(); protected abstract void test6(); public static void test7() { return ; } }
咱們再來看一下接口node
/** * @auther draymonder */ public interface IntefaceTest { public int Test1 = 0; void test1(); default void test2() { return ; } public static void test3() { return ; } }
由此咱們能夠知道程序員
JDK8
下interface
可使用default
實現方法)數組的默認大小爲 10。算法
private static final int DEFAULT_CAPACITY = 10;
添加元素時使用 ensureCapacityInternal()
方法來保證容量足夠,若是不夠時,須要使用 grow()
方法進行擴容,新容量的大小爲 oldCapacity + (oldCapacity >> 1)
,也就是舊容量的 1.5 倍。數組
數組的默認大小爲 10。多線程
Vector 每次擴容請求其大小的 2 倍空間,而 ArrayList 是 1.5 倍。併發
Vector 是同步的,所以開銷就比 ArrayList 要大,訪問速度更慢。最好使用 ArrayList 而不是 Vector,由於同步操做徹底能夠由程序員本身來控制;
可使用collections的同步list的方法less
List<String> list = new ArrayList<>(); List<String> synList = Collections.synchronizedList(list);
public boolean add(E e) { final ReentrantLock lock = this.lock; lock.lock(); try { Object[] elements = getArray(); int len = elements.length; Object[] newElements = Arrays.copyOf(elements, len + 1); newElements[len] = e; setArray(newElements); return true; } finally { lock.unlock(); } } final void setArray(Object[] a) { array = a; }
CopyOnWriteArrayList 在寫操做的同時容許讀操做,大大提升了讀操做的性能,所以很適合讀多寫少的應用場景。dom
可是 CopyOnWriteArrayList 有其缺陷:函數
key & (hash - 1)
等同於key % hash
,但前者效率比後者高擴容的時候,table cap
變爲2 * table cap
,rehash僅僅須要判斷key & hash
若是爲0,仍是原來的table[old]
,不然是table[old+table cap]
mask |= mask >> 1 11011000 mask |= mask >> 2 11111110 mask |= mask >> 4 11111111
mask+1 是大於原始數字的最小的 2 的 n 次方。
num 10010000 mask+1 100000000
如下是 HashMap 中計算數組容量的代碼:
static final int tableSizeFor(int cap) { int n = cap - 1; n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16; return (n < 0) ? 1 : (n >= MAXIMUM_CAPACITY) ? MAXIMUM_CAPACITY : n + 1; }
在擴容時候,因爲是頭插法,因此,原來是A->B,可是多線程狀況下會出現。
線程1剛剛拿出A, 並準備rehash到B的後面,可是存在B->A尚未解除的狀況,所以正好出現了A->B->A的狀況
若是load factor過小,那麼空間利用率過低;若是load factor太大,那麼hash衝撞就會比較多
咱們來看一下hashmap的註釋
Because TreeNodes are about twice the size of regular nodes, we use them only when bins contain enough nodes to warrant use (see TREEIFY_THRESHOLD). And when they become too small (due to removal or resizing) they are converted back to plain bins. In usages with well-distributed user hashCodes, tree bins are rarely used. Ideally, under random hashCodes, the frequency of nodes in bins follows a Poisson distribution (http://en.wikipedia.org/wiki/Poisson_distribution) with a parameter of about 0.5 on average for the default resizing threshold of 0.75, although with a large variance because of resizing granularity. Ignoring variance, the expected occurrences of list size k are (exp(-0.5) * pow(0.5, k) / factorial(k)). The first values are: 0: 0.60653066 1: 0.30326533 2: 0.07581633 3: 0.01263606 4: 0.00157952 5: 0.00015795 6: 0.00001316 7: 0.00000094 8: 0.00000006 more: less than 1 in ten million
咱們去有道翻譯translate一下
由於樹節點的大小大約是普通節點的兩倍,因此咱們 只有當容器中包含足夠的節點以保證使用時才使用它們 (見TREEIFY_THRESHOLD)。當它們變得過小的時候 移除或調整大小)它們被轉換回普通的箱子。在 使用分佈良好的用戶哈希碼,樹箱是 不多使用。理想狀況下,在隨機哈希碼下 箱中的節點遵循泊松分佈 (http://en.wikipedia.org/wiki/Poisson_distribution) 默認大小調整的參數平均約爲0.5 閾值爲0.75,雖然因爲方差較大 調整粒度。忽略方差,獲得指望 列表大小k的出現次數爲(exp(-0.5) * pow(0.5, k) / 階乘(k))。第一個值是: 0:0.60653066 1:0.30326533 2:0.07581633 3:0.01263606 4:0.00157952 5:0.00015795 6:0.00001316 7:0.00000094 8:0.00000006 多於:少於千分之一
因此,節點插入遵循泊松分佈,所以出現一個桶內8個節點是極小機率事件,因此遇到這種狀況咱們能夠用紅黑樹加速get
操做
不支持 key爲null 也不知支持 value爲null
//默認的數組大小16(HashMap裏的那個數組) static final int DEFAULT_INITIAL_CAPACITY = 16; //擴容因子0.75 static final float DEFAULT_LOAD_FACTOR = 0.75f; //ConcurrentHashMap中的數組 final Segment<K,V>[] segments //默認併發標準16 static final int DEFAULT_CONCURRENCY_LEVEL = 16; //Segment是ReentrantLock子類,所以擁有鎖的操做 static final class Segment<K,V> extends ReentrantLock implements Serializable { //HashMap的那一套,分別是數組、鍵值對數量、閾值、負載因子 transient volatile HashEntry<K,V>[] table; transient int count; transient int threshold; final float loadFactor; Segment(float lf, int threshold, HashEntry<K,V>[] tab) { this.loadFactor = lf; this.threshold = threshold; this.table = tab; } } //換了馬甲仍是認識你!!!HashEntry對象,存key、value、hash值以及下一個節點 static final class HashEntry<K,V> { final int hash; final K key; volatile V value; volatile HashEntry<K,V> next; } //segment中HashEntry[]數組最小長度 static final int MIN_SEGMENT_TABLE_CAPACITY = 2; //用於定位在segments數組中的位置,下面介紹 final int segmentMask; final int segmentShift;
public V put(K key, V value) { Segment<K,V> s; //步驟①注意valus不能爲空!!! if (value == null) throw new NullPointerException(); //根據key計算hash值,key也不能爲null,不然hash(key)報空指針 int hash = hash(key); //步驟②派上用場了,根據hash值計算在segments數組中的位置 int j = (hash >>> segmentShift) & segmentMask; //步驟③查看當前數組中指定位置Segment是否爲空 //若爲空,先建立初始化Segment再put值,不爲空,直接put值。 if ((s = (Segment<K,V>)UNSAFE.getObject // nonvolatile; recheck (segments, (j << SSHIFT) + SBASE)) == null) // in ensureSegment s = ensureSegment(j); return s.put(key, hash, value, false); }
能夠看到JDK7版本下,ConcurrentHashMap
的segment
也是使用寫時複製
的,而且使用CAS
算法來將副本替換
private Segment<K,V> ensureSegment(int k) { //獲取segments final Segment<K,V>[] ss = this.segments; long u = (k << SSHIFT) + SBASE; // raw offset Segment<K,V> seg; if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) { //拷貝一份和segment 0同樣的segment Segment<K,V> proto = ss[0]; // use segment 0 as prototype //大小和segment 0一致,爲2 int cap = proto.table.length; //負載因子和segment 0一致,爲0.75 float lf = proto.loadFactor; //閾值和segment 0一致,爲1 int threshold = (int)(cap * lf); //根據大小建立HashEntry數組tab HashEntry<K,V>[] tab = (HashEntry<K,V>[])new HashEntry[cap]; //再次檢查 if ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) { // recheck 根據已有屬性建立指定位置的Segment Segment<K,V> s = new Segment<K,V>(lf, threshold, tab); while ((seg = (Segment<K,V>)UNSAFE.getObjectVolatile(ss, u)) == null) { if (UNSAFE.compareAndSwapObject(ss, u, null, seg = s)) break; } } } return seg; }
首先lock獲取 tab[hash(key)]
而後進行操做
final V put(K key, int hash, V value, boolean onlyIfAbsent) { //步驟① start HashEntry<K,V> node = tryLock() ? null : scanAndLockForPut(key, hash, value); //步驟① end V oldValue; try { //步驟② start //獲取Segment中的HashEntry[] HashEntry<K,V>[] tab = table; //算出在HashEntry[]中的位置 int index = (tab.length - 1) & hash; //找到HashEntry[]中的指定位置的第一個節點 HashEntry<K,V> first = entryAt(tab, index); for (HashEntry<K,V> e = first;;) { //若是不爲空,遍歷這條鏈 if (e != null) { K k; //狀況① 以前已存過,則替換原值 if ((k = e.key) == key || (e.hash == hash && key.equals(k))) { oldValue = e.value; if (!onlyIfAbsent) { e.value = value; ++modCount; } break; } e = e.next; } else { //狀況② 另外一個線程的準備工做 if (node != null) //鏈表頭插入方式 node.setNext(first); else //狀況③ 該位置爲空,則新建一個節點(注意這裏採用鏈表頭插入方式) node = new HashEntry<K,V>(hash, key, value, first); //鍵值對數量+1 int c = count + 1; //若是鍵值對數量超過閾值 if (c > threshold && tab.length < MAXIMUM_CAPACITY) //擴容 rehash(node); else //未超過閾值,直接放在指定位置 setEntryAt(tab, index, node); ++modCount; count = c; //插入成功返回null oldValue = null; break; } } //步驟② end } finally { //步驟③ //解鎖 unlock(); } //修改爲功,返回原值 return oldValue; }
先retries
64次,不行的話,才用ReentrantLock
重入鎖
private HashEntry<K,V> scanAndLockForPut(K key, int hash, V value) { //經過Segment和hash值尋找匹配的HashEntry HashEntry<K,V> first = entryForHash(this, hash); HashEntry<K,V> e = first; HashEntry<K,V> node = null; //重試次數 int retries = -1; // negative while locating node //循環嘗試獲取鎖 while (!tryLock()) { HashEntry<K,V> f; // to recheck first below //步驟① if (retries < 0) { //狀況① 沒找到,以前表中不存在 if (e == null) { if (node == null) // speculatively create node //新建 HashEntry 備用,retries改爲0 node = new HashEntry<K,V>(hash, key, value, null); retries = 0; } //狀況② 找到,恰好第一個節點就是,retries改爲0 else if (key.equals(e.key)) retries = 0; //狀況③ 第一個節點不是,移到下一個,retries仍是-1,繼續找 else e = e.next; } //步驟② //嘗試了MAX_SCAN_RETRIES次還沒拿到鎖,簡直B了dog! else if (++retries > MAX_SCAN_RETRIES) { //泉水掛機 lock(); break; } //步驟③ //在MAX_SCAN_RETRIES次過程當中,key對應的entry發生了變化,則從頭開始 else if ((retries & 1) == 0 && (f = entryForHash(this, hash)) != first) { e = first = f; // re-traverse if entry changed retries = -1; } } return node; }
rehash
的話 同jdk8版本下的rehash
retries
2次 若是仍是不一樣,那麼就reentranLock
依次等待unlock
計算每一個tab的size
public V put(K key, V value) { return putVal(key, value, false); } /** Implementation for put and putIfAbsent */ final V putVal(K key, V value, boolean onlyIfAbsent) { // key/value不能爲空!!! if (key == null || value == null) throw new NullPointerException(); //計算hash值 int hash = spread(key.hashCode()); int binCount = 0; for (Node<K,V>[] tab = table;;) { Node<K,V> f; int n, i, fh; //註釋① 表爲null則初始化 if (tab == null || (n = tab.length) == 0) tab = initTable(); //CAS方法判斷指定位置是否爲null,爲空則經過建立新節點,經過CAS方法設置在指定位置 else if ((f = tabAt(tab, i = (n - 1) & hash)) == null) { if (casTabAt(tab, i, null, new Node<K,V>(hash, key, value, null))) break; // no lock when adding to empty bin } //當前節點正在擴容 else if ((fh = f.hash) == MOVED) tab = helpTransfer(tab, f); //指定位置不爲空 else { V oldVal = null; //註釋② 加鎖 synchronized (f) { if (tabAt(tab, i) == f) { //節點是鏈表的狀況 if (fh >= 0) { binCount = 1; //遍歷總體鏈 for (Node<K,V> e = f;; ++binCount) { K ek; //若是已存在,替換原值 if (e.hash == hash && ((ek = e.key) == key || (ek != null && key.equals(ek)))) { oldVal = e.val; if (!onlyIfAbsent) e.val = value; break; } Node<K,V> pred = e; //若是是新加節點,則以尾部插入實現添加 if ((e = e.next) == null) { pred.next = new Node<K,V>(hash, key, value, null); break; } } } //節點是紅黑樹的狀況 else if (f instanceof TreeBin) { Node<K,V> p; binCount = 2; //遍歷紅黑樹 if ((p = ((TreeBin<K,V>)f).putTreeVal(hash, key, value)) != null) { oldVal = p.val; if (!onlyIfAbsent) p.val = value; } } else if (f instanceof ReservationNode) throw new IllegalStateException("Recursive update"); } } if (binCount != 0) { //鏈表中節點個數超過8轉成紅黑樹 if (binCount >= TREEIFY_THRESHOLD) treeifyBin(tab, i); if (oldVal != null) return oldVal; break; } } } //註釋③ 添加節點 addCount(1L, binCount); return null; }
其實hash衝撞的概率蠻低的,因此synchronized調用的次數並很少,更多的是在cas那裏...
而後就是cas比synchronized的優勢...
每次put
完畢,都會調用addCount
方法
private final void addCount(long x, int check) { CounterCell[] as; long b, s; if ((as = counterCells) != null || !U.compareAndSwapLong(this, BASECOUNT, b = baseCount, s = b + x)) { CounterCell a; long v; int m; boolean uncontended = true; if (as == null || (m = as.length - 1) < 0 || (a = as[ThreadLocalRandom.getProbe() & m]) == null || !(uncontended = U.compareAndSwapLong(a, CELLVALUE, v = a.value, v + x))) { fullAddCount(x, uncontended); return; } if (check <= 1) return; s = sumCount(); } if (check >= 0) { Node<K,V>[] tab, nt; int n, sc; while (s >= (long)(sc = sizeCtl) && (tab = table) != null && (n = tab.length) < MAXIMUM_CAPACITY) { int rs = resizeStamp(n); if (sc < 0) { if ((sc >>> RESIZE_STAMP_SHIFT) != rs || sc == rs + 1 || sc == rs + MAX_RESIZERS || (nt = nextTable) == null || transferIndex <= 0) break; if (U.compareAndSwapInt(this, SIZECTL, sc, sc + 1)) transfer(tab, nt); } else if (U.compareAndSwapInt(this, SIZECTL, sc, (rs << RESIZE_STAMP_SHIFT) + 2)) transfer(tab, null); s = sumCount(); } } }