【內核源碼學習筆記】slab分配器(5)釋放slab對象

6.釋放slab緩衝對象

釋放slab緩衝對象主要是調用kmem_cache_free函數。node

void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
	unsigned long flags;
	cachep = cache_from_obj(cachep, objp);
	if (!cachep)
		return;

	local_irq_save(flags);
	debug_check_no_locks_freed(objp, cachep->object_size);
	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
		debug_check_no_obj_freed(objp, cachep->object_size);
	__cache_free(cachep, objp, _RET_IP_);
	local_irq_restore(flags);

	trace_kmem_cache_free(_RET_IP_, objp);
}

第一步,先獲取這個obj的kmem_cache.數組

static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
	struct kmem_cache *cachep;
	struct page *page;

	/*
當不使用kmemcg時,兩個賦值應該返回相同的值。但在這種狀況下,咱們不想支付轉讓價格。若是它不是在中編譯的,編譯器應該足夠聰明,甚至不作賦值。在這種狀況下,slab_equal_or_root也將是一個常數。
	 */
	if (!memcg_kmem_enabled() &&
	    !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
		return s;

	page = virt_to_head_page(x);
	cachep = page->slab_cache;
	if (slab_equal_or_root(cachep, s))
		return cachep;

	pr_err("%s: Wrong slab cache. %s but object is from %s\n",
	       __func__, s->name, cachep->name);
	WARN_ON_ONCE(1);
	return s;
}

static inline struct page *virt_to_head_page(const void *x)
{
	struct page *page = virt_to_page(x);

	return compound_head(page);
}

#define virt_to_page(addr)	pfn_to_page(virt_to_pfn(addr))

在這個函數中,主要是先找到這個obj的虛擬地址所在的page,而後拿到slab_cache。緩存

而後回到kmem_cache_free函數中。這裏會先關掉中斷,而後調用__cache_free函數去釋放這個obj。app

static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
					 unsigned long caller)
{
	/* Put the object into the quarantine, don't touch it for now. */
	if (kasan_slab_free(cachep, objp, _RET_IP_))
		return;

	___cache_free(cachep, objp, caller);
}

void ___cache_free(struct kmem_cache *cachep, void *objp,
		unsigned long caller)
{
	struct array_cache *ac = cpu_cache_get(cachep);

	check_irq_off();
	kmemleak_free_recursive(objp, cachep->flags);
	objp = cache_free_debugcheck(cachep, objp, caller);

	/*
當平臺不是numa時,跳過調用cache_free_alien()。這將避免在訪問slabp(每一個頁面內存引用)以獲取nodeid時發生的緩存未命中。相反,使用一個全局變量來跳過調用,這極可能出如今緩存中。
	 */
	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
		return;

	if (ac->avail < ac->limit) {
		STATS_INC_FREEHIT(cachep);
	} else {
		STATS_INC_FREEMISS(cachep);
		cache_flusharray(cachep, ac);
	}

	if (sk_memalloc_socks()) {
		struct page *page = virt_to_head_page(objp);

		if (unlikely(PageSlabPfmemalloc(page))) {
			cache_free_pfmemalloc(cachep, page, objp);
			return;
		}
	}

	ac->entry[ac->avail++] = objp;
}

在這個函數中,第一步先獲取本地對象緩衝池。檢查中斷是否已經關閉。若是共享緩衝池的可用對象大於等於limit,就調用cache_flusharray函數作flush動做來回收空閒對象。ide

static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
{
	int batchcount;
	struct kmem_cache_node *n;
	int node = numa_mem_id();
	LIST_HEAD(list);

	batchcount = ac->batchcount;

	check_irq_off();
	n = get_node(cachep, node);
	spin_lock(&n->list_lock);
	if (n->shared) {
		struct array_cache *shared_array = n->shared;
		int max = shared_array->limit - shared_array->avail;
		if (max) {
			if (batchcount > max)
				batchcount = max;
			memcpy(&(shared_array->entry[shared_array->avail]),
			       ac->entry, sizeof(void *) * batchcount);
			shared_array->avail += batchcount;
			goto free_done;
		}
	}

	free_block(cachep, ac->entry, batchcount, node, &list);
free_done:
#if STATS
	{
		int i = 0;
		struct page *page;

		list_for_each_entry(page, &n->slabs_free, lru) {
			BUG_ON(page->active);

			i++;
		}
		STATS_SET_FREEABLE(cachep, i);
	}
#endif
	spin_unlock(&n->list_lock);
	slabs_destroy(cachep, &list);
	ac->avail -= batchcount;
	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
}

這裏會先從node數組中獲取到kmem_cache_node節點。函數

若是這個node節點中有共享對象緩衝池,共享對象緩衝池的limit減去可用對象數量爲max。而後會從本地對象緩衝池中複製batchcount個(若是max<batchcount的話就複製max個)空閒對象到共享對象緩衝池的entry中,共享對象緩衝池的數數量增長。此時程序會直接運行到free_done這裏。ui

若是沒有共享對象緩衝池的話,就會調用free_block函數。spa

static void free_block(struct kmem_cache *cachep, void **objpp,
			int nr_objects, int node, struct list_head *list)
{
	int i;
	struct kmem_cache_node *n = get_node(cachep, node);
	struct page *page;

	n->free_objects += nr_objects;

	for (i = 0; i < nr_objects; i++) {
		void *objp;
		struct page *page;

		objp = objpp[i];

		page = virt_to_head_page(objp);
		list_del(&page->lru);
		check_spinlock_acquired_node(cachep, node);
		slab_put_obj(cachep, page, objp);
		STATS_DEC_ACTIVE(cachep);

		/* fixup slab chains */
		if (page->active == 0) {
			list_add(&page->lru, &n->slabs_free);
			n->free_slabs++;
		} else {
			/* 無條件地將一個slab無條件地移動到部分列表的末尾,這也是釋放其餘對象的最大時間。*/
			list_add_tail(&page->lru, &n->slabs_partial);
		}
	}

	while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
		n->free_objects -= cachep->num;

		page = list_last_entry(&n->slabs_free, struct page, lru);
		list_move(&page->lru, list);
		n->free_slabs--;
		n->total_slabs--;
	}
}

這裏先給這個kmem_cache_node中的freeobj個數加上nr。而後從0開始遍歷到nr_objects。從本地緩衝池的entry中取出第i個obj,找到這個obj所在的page,刪除這個page 的lru,調用slab_put_objdebug

static void slab_put_obj(struct kmem_cache *cachep,
			struct page *page, void *objp)
{
	unsigned int objnr = obj_to_index(cachep, page, objp);
#if DEBUG
	unsigned int i;

	/* Verify double free bug */
	for (i = page->active; i < cachep->num; i++) {
		if (get_free_obj(page, i) == objnr) {
			pr_err("slab: double free detected in cache '%s', objp %px\n",
			       cachep->name, objp);
			BUG();
		}
	}
#endif
	page->active--;
	if (!page->freelist)
		page->freelist = objp + obj_offset(cachep);

	set_free_obj(page, page->active, objnr);
}

static inline unsigned int obj_to_index(const struct kmem_cache *cache,
					const struct page *page, void *obj)
{
	u32 offset = (obj - page->s_mem);
	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
}

這個函數首先會找到這個obj的序號,page中的active對象數量減1,若是page中的freelist爲空,那麼就使page的freelist指向這個obj。最後調用set_free_obj將這個對象的序號填入freelist數組中。指針

而後再回到free_block函數中,此時,若是page中的active對象個數爲0,就將這個epage的lru指針加入到node 的slab_free鏈表中去,不然就加入到部分空閒鏈表中partial_slabs。

當kmem_cache_node中的空閒對象個數大於free_limit個數,而且node的slabs_free鏈表不爲空,就將個數減去kmem_cache的num.摘掉slabs_free鏈表的末尾的page,組成一個鏈表list。

 

而後咱們再次回到了cache_flusharray函數的free_done標籤中,

static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
{
	struct page *page, *n;

	list_for_each_entry_safe(page, n, list, lru) {
		list_del(&page->lru);
		slab_destroy(cachep, page);
	}
}

static void slab_destroy(struct kmem_cache *cachep, struct page *page)
{
	void *freelist;

	freelist = page->freelist;
	slab_destroy_debugcheck(cachep, page);
	if (unlikely(cachep->flags & SLAB_TYPESAFE_BY_RCU))
		call_rcu(&page->rcu_head, kmem_rcu_free);
	else
		kmem_freepages(cachep, page);

	/*從如今開始,儘管能夠在rcu上下文中釋放實際頁面,但咱們不使用freelist*/
	if (OFF_SLAB(cachep))
		kmem_cache_free(cachep->freelist_cache, freelist);
}

調用slabs_destory函數遍歷list中的配一個page ,找到page的freelist,釋放page.

static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
{
	int order = cachep->gfporder;

	BUG_ON(!PageSlab(page));
	__ClearPageSlabPfmemalloc(page);
	__ClearPageSlab(page);
	page_mapcount_reset(page);
	page->mapping = NULL;

	if (current->reclaim_state)
		current->reclaim_state->reclaimed_slab += 1 << order;
	uncharge_slab_page(page, order, cachep);
	__free_pages(page, order);
}

這裏會將連續gfporder個頁面都釋放掉。

而後共享緩衝池中的avail會減小batchcount個。並將這些對象挪到ertry的頭部。

相關文章
相關標籤/搜索