手寫一個本身的LocalCache - 基於LinkedHashMap實現LRU

功能目標

     實現一個全局範圍的LocalCache,各個業務點使用本身的Namespace對LocalCache進行邏輯分區。因此在LocalCache中進行讀寫採用的key爲(namespace+(分隔符)+數據key)。如存在下面的一對keyValue :  NameToAge,Troy -> 23 。要求LocalCache線程安全,且LocalCache中總keyValue數量可控,提供清空,調整大小,dump到本地文件等一系列操做。


用LinkedHashMap實現LRU Map

     LinkedHashMap提供了鍵值對的儲存功能,且可依據其支持訪問排序的特性來模擬LRU算法。簡單來講,LinkedHashMap在訪問已存在元素或插入新元素時,會將該元素放置在鏈表的尾部,因此在鏈表頭部的元素是近期最少未使用的元素,而這正是LRU算法的描寫敘述。由於其底層基於鏈表實現,因此對於元素的移動和插入操做性能表現優異。咱們將利用一個LinkedHashMap實現一個線程安全的LRU Map。

LRU Map的實現

public class LRUMap<T> extends LinkedHashMap<String, SoftReference<T>> implements Externalizable {

    private static final long serialVersionUID = -7076355612133906912L;

    /** The maximum size of the cache. */
    private int maxCacheSize;

    /* lock for map */
    private final Lock lock = new ReentrantLock();

    /**
     * 默認構造函數,LRUMap的大小爲Integer.MAX_VALUE
     */
    public LRUMap() {
        super();
        maxCacheSize = Integer.MAX_VALUE;
    }

    /**
     * Constructs a new, empty cache with the specified maximum size.
     */
    public LRUMap(int size) {
        super(size + 1, 1f, true);
        maxCacheSize = size;
    }

    /**
     * 讓LinkHashMap支持LRU。假設Map的大小超過了預約值,則返回true,LinkedHashMap自身實現返回
     * fasle。即永遠不刪除元素
     */
    @Override
    protected boolean removeEldestEntry(Map.Entry<String, SoftReference<T>> eldest) {
        boolean tmp = (size() > maxCacheSize);
        return tmp;
    }

    public T addEntry(String key, T entry) {
        try {
            SoftReference<T> sr_entry = new SoftReference<T>(entry);
            // add entry to hashmap
            lock.lock();
            put(key, sr_entry);
        }
        finally {
            lock.unlock();
        }
        return entry;
    }

    public T getEntry(String key) {
        SoftReference<T> sr_entry;
        try {
            lock.lock();
            if ((sr_entry = get(key)) == null)
                return null;
            // if soft reference is null then the entry has been
            // garbage collected and so the key should be removed also.
            if (sr_entry.get() == null) {
                remove(key);
                return null;
            }
        }
        finally {
            lock.unlock();
        }
        return sr_entry.get();
    }

    @Override
    public SoftReference<T> remove(Object key) {
        try {
            lock.lock();
            return super.remove(key);
        }
        finally {
            lock.unlock();
        }
    }

    @Override
    public synchronized void clear() {
        super.clear();
    }

    public void writeExternal(ObjectOutput out) throws IOException {
        Iterator<Map.Entry<String, SoftReference<T>>> i = (size() > 0) ?

entrySet().iterator() : null; // Write out size out.writeInt(size()); // Write out keys and values if (i != null) { while (i.hasNext()) { Map.Entry<String, SoftReference<T>> e = i.next(); if (e != null && e.getValue() != null && e.getValue().get() != null) { out.writeObject(e.getKey()); out.writeObject(e.getValue().get()); } } } } public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { // Read in size int size = in.readInt(); // Read the keys and values, and put the mappings in the Map for (int i = 0; i < size; i++) { String key = (String) in.readObject(); @SuppressWarnings("unchecked") T value = (T) in.readObject(); addEntry(key, value); } } }java

   

LocalCache設計

     假設在LocalCache中僅僅使用一個LRU Map。將產生性能問題:1. 單個LinkedHashMap中元素數量太多 2. 高併發下讀寫鎖限制。

     因此可以在LocalCache中使用多個LRU Map,並使用key 來 hash到某個LRU Map上,以此來提升在單個LinkedHashMap中檢索的速度以及提升整體併發度。

LocalCache實現

     這裏hash選用了Wang/Jenkins hash算法。實現Hash的方式參考了ConcurrentHashMap的實現。
public class LocalCache{

     private final int size;
    /**
     * 本地緩存最大容量
     */
    static final int MAXIMUM_CAPACITY = 1 << 30;

    /**
     * 本地緩存支持最大的分區數
     */
    static final int MAX_SEGMENTS = 1 << 16; // slightly conservative

    /**
     * 本地緩存存儲的LRUMap數組
     */
    LRUMap<CacheObject>[] segments;

    /**
     * Mask value for indexing into segments. The upper bits of a key's hash
     * code are used to choose the segment.
     */
    int segmentMask;

    /**
     * Shift value for indexing within segments.
     */
    int segmentShift;

    /**
     * 
     * 計數器重置閥值
     */
    private static final int MAX_LOOKUP = 100000000;

    /**
     * 用於重置計數器的鎖。防止屢次重置計數器
     */
    private final Lock lock = new ReentrantLock();

    /**
     * Number of requests made to lookup a cache entry.
     */
    private AtomicLong lookup = new AtomicLong(0);

    /**
     * Number of successful requests for cache entries.
     */
    private AtomicLong found = new AtomicLong(0);
    
    public LocalCacheServiceImpl(int size) {
          this.size = size;
     }


     public CacheObject get(String key) {
        if (StringUtils.isBlank(key)) {
            return null;
        }
        // 添加計數器
        lookup.incrementAndGet();

        // 假設必要重置計數器
        if (lookup.get() > MAX_LOOKUP) {
            if (lock.tryLock()) {
                try {
                    lookup.set(0);
                    found.set(0);
                }
                finally {
                    lock.unlock();
                }
            }
        }

        int hash = hash(key.hashCode());
        CacheObject ret = segmentFor(hash).getEntry(key);
        if (ret != null)
            found.incrementAndGet();
        return ret;
    }


    public void remove(String key) {
        if (StringUtils.isBlank(key)) {
            return;
        }
        int hash = hash(key.hashCode());
        segmentFor(hash).remove(key);
        return;
    }

    public void put(String key, CacheObject val) {
        if (StringUtils.isBlank(key) || val == null) {
            return;
        }
        int hash = hash(key.hashCode());
        segmentFor(hash).addEntry(key, val);
        return;
    }

    public synchronized void clearCache() {
        for (int i = 0; i < segments.length; ++i)
            segments[i].clear();
    }

    public synchronized void reload() throws Exception {
       clearCache();
       init();
    }

    public synchronized void dumpLocalCache() throws Exception {
        for (int i = 0; i < segments.length; ++i) {
            String tmpDir = System.getProperty("java.io.tmpdir");
            String fileName = tmpDir + File.separator + "localCache-dump-file" + i + ".cache";
            File file = new File(fileName);
            ObjectUtils.objectToFile(segments[i], file);
        }
    }

    @SuppressWarnings("unchecked")
    public synchronized void restoreLocalCache() throws Exception {
        for (int i = 0; i < segments.length; ++i) {
            String tmpDir = System.getProperty("java.io.tmpdir");
            String fileName = tmpDir + File.separator + "localCache-dump-file" + i + ".cache";
            File file = new File(fileName);
            LRUMap<CacheObject> lruMap = (LRUMap<CacheObject>) ObjectUtils.fileToObject(file);
            if (lruMap != null) {
                Set<Entry<String, SoftReference<CacheObject>>> set = lruMap.entrySet();
                Iterator<Entry<String, SoftReference<CacheObject>>> it = set.iterator();
                while (it.hasNext()) {
                    Entry<String, SoftReference<CacheObject>> entry = it.next();
                    if (entry.getValue() != null && entry.getValue().get() != null)
                        segments[i].addEntry(entry.getKey(), entry.getValue().get());
                }
            }
        }
    }


    /**
     * 本地緩存命中次數,在計數器RESET的時刻可能會出現0的命中率
     */
    public int getHitRate() {
        long query = lookup.get();
        return query == 0 ? 0 : (int) ((found.get() * 100) / query);
    }

    /**
     * 本地緩存訪問次數。在計數器RESET時可能會出現0的查找次數
     */
    public long getCount() {
        return lookup.get();
    }

    public int size() {
        final LRUMap<CacheObject>[] segments = this.segments;
        long sum = 0;
        for (int i = 0; i < segments.length; ++i) {
            sum += segments[i].size();
        }
        if (sum > Integer.MAX_VALUE)
            return Integer.MAX_VALUE;
        else
            return (int) sum;
    }


    /**
     * Returns the segment that should be used for key with given hash
     * 
     * @param hash
     *            the hash code for the key
     * @return the segment
     */
    final LRUMap<CacheObject> segmentFor(int hash) {
        return segments[(hash >>> segmentShift) & segmentMask];
    }


    /* ---------------- Small Utilities -------------- */

    /**
     * Applies a supplemental hash function to a given hashCode, which defends
     * against poor quality hash functions. This is critical because
     * ConcurrentHashMap uses power-of-two length hash tables, that otherwise
     * encounter collisions for hashCodes that do not differ in lower or upper
     * bits.
     */
    private static int hash(int h) {
        // Spread bits to regularize both segment and index locations,
        // using variant of single-word Wang/Jenkins hash.
        h += (h << 15) ^ 0xffffcd7d;
        h ^= (h >>> 10);
        h += (h << 3);
        h ^= (h >>> 6);
        h += (h << 2) + (h << 14);
        return h ^ (h >>> 16);
    }

    @SuppressWarnings("unchecked")
    public void init() throws Exception {
        int concurrencyLevel = 16;
        int capacity = size;
        if (capacity < 0 || concurrencyLevel <= 0)
            throw new IllegalArgumentException();
        if (concurrencyLevel > MAX_SEGMENTS)
            concurrencyLevel = MAX_SEGMENTS;
        // Find power-of-two sizes best matching arguments
        int sshift = 0;
        int ssize = 1;
        while (ssize < concurrencyLevel) {
            ++sshift;
            ssize <<= 1;
        }
        segmentShift = 32 - sshift;
        segmentMask = ssize - 1;
        this.segments = new LRUMap[ssize];
        if (capacity > MAXIMUM_CAPACITY)
            capacity = MAXIMUM_CAPACITY;
        int c = capacity / ssize;
        if (c * ssize < capacity)
            ++c;
        int cap = 1;
        while (cap < c)
            cap <<= 1;
        cap >>= 1;
        for (int i = 0; i < this.segments.length; ++i)
            this.segments[i] = new LRUMap<CacheObject>(cap);
    }
}
相關文章
相關標籤/搜索