在上一篇文章Linux内核源代码情景分析-内存管理之slab-分配与释放,最后形成了如下图的结构:
图 1
我们看到空闲slab块占用的若干页面,不会自己释放;我们是通过kmem_cache_reap和kmem_cache_shrink来回收的。他们的区别是:
1、我们先看kmem_cache_shrink,代码如下:
int kmem_cache_shrink(kmem_cache_t *cachep) { if (!cachep || in_interrupt() || !is_chained_kmem_cache(cachep)) BUG(); return __kmem_cache_shrink(cachep); }
static int __kmem_cache_shrink(kmem_cache_t *cachep) { slab_t *slabp; int ret; drain_cpu_caches(cachep); spin_lock_irq(&cachep->spinlock); /* If the cache is growing, stop shrinking. */ while (!cachep->growing) {//确定缓存区不在growing struct list_head *p; p = cachep->slabs.prev;//链表最后面的是空闲块,本例中就是第4个空闲块 if (p == &cachep->slabs)//如果链表中没有slab块,就直接break break; slabp = list_entry(cachep->slabs.prev, slab_t, list); if (slabp->inuse)//如果不是空闲块,就break break; list_del(&slabp->list);//如果是空闲块就删除,这样下一次循环cachep->slabs.prev找的就是新的slab块 if (cachep->firstnotfull == &slabp->list)//如果firstnotfull就是这个空闲块,那么系统中一定没有部分块或者空闲块可供分配了 cachep->firstnotfull = &cachep->slabs;//指向cachep->slabs spin_unlock_irq(&cachep->spinlock); kmem_slab_destroy(cachep, slabp);//析构对象,并释放空闲slab块所在的所有页面 spin_lock_irq(&cachep->spinlock); } ret = !list_empty(&cachep->slabs); spin_unlock_irq(&cachep->spinlock); return ret; }
static void kmem_slab_destroy (kmem_cache_t *cachep, slab_t *slabp) { if (cachep->dtor ...... ) { int i; for (i = 0; i < cachep->num; i++) { void* objp = slabp->s_mem+cachep->objsize*i; ...... if (cachep->dtor) (cachep->dtor)(objp, cachep, 0);//析构所有对象 ...... } } kmem_freepages(cachep, slabp->s_mem-slabp->colouroff);//释放空闲slab块所占的所有页面 if (OFF_SLAB(cachep)) kmem_cache_free(cachep->slabp_cache, slabp); }
static inline void kmem_freepages (kmem_cache_t *cachep, void *addr) { unsigned long i = (1<<cachep->gfporder); struct page *page = virt_to_page(addr); /* free_pages() does not clear the type bit - we do that. * The pages have been unlinked from their cache-slab, * but their ‘struct page‘s might be accessed in * vm_scan(). Shouldn‘t be a worry. */ while (i--) { PageClearSlab(page); page++; } free_pages((unsigned long)addr, cachep->gfporder); }
void kmem_cache_reap (int gfp_mask) { slab_t *slabp; kmem_cache_t *searchp; kmem_cache_t *best_cachep; unsigned int best_pages; unsigned int best_len; unsigned int scan; if (gfp_mask & __GFP_WAIT) down(&cache_chain_sem); else if (down_trylock(&cache_chain_sem)) return; scan = REAP_SCANLEN; best_len = 0; best_pages = 0; best_cachep = NULL; searchp = clock_searchp;//上一次考察的缓存区 do { unsigned int pages; struct list_head* p; unsigned int full_free; /* It‘s safe to test this without holding the cache-lock. */ if (searchp->flags & SLAB_NO_REAP) goto next; spin_lock_irq(&searchp->spinlock); if (searchp->growing)//缓存区不能处于增长状态 goto next_unlock; if (searchp->dflags & DFLGS_GROWN) { searchp->dflags &= ~DFLGS_GROWN; goto next_unlock; } ...... full_free = 0; p = searchp->slabs.prev;//从空闲块开始 while (p != &searchp->slabs) { slabp = list_entry(p, slab_t, list); if (slabp->inuse) break; full_free++;//有一个空闲块,full_free就加1 p = p->prev;//往前继续搜索 } /* * Try to avoid slabs with constructors and/or * more than one page per slab (as it can be difficult * to get high orders from gfp()). */ pages = full_free * (1<<searchp->gfporder);//若干空闲块所占的页面数 if (searchp->ctor) pages = (pages*4+1)/5; if (searchp->gfporder) pages = (pages*4+1)/5;//页面数的百分之80 if (pages > best_pages) {//找到空闲块所占页面数最多的 best_cachep = searchp; best_len = full_free;//空闲块的个数 best_pages = pages;//若干空闲块所占的页面数的百分之80 if (full_free >= REAP_PERFECT) { clock_searchp = list_entry(searchp->next.next, kmem_cache_t,next); goto perfect; } } next_unlock: spin_unlock_irq(&searchp->spinlock); next: searchp = list_entry(searchp->next.next,kmem_cache_t,next); } while (--scan && searchp != clock_searchp);//遍历cache_chain链表查找合适的缓存区 clock_searchp = searchp;//这次考察的,也就是下次考察的开始 if (!best_cachep)//没有找到 /* couldn‘t find anything to reap */ goto out; spin_lock_irq(&best_cachep->spinlock); perfect: /* free only 80% of the free slabs */ best_len = (best_len*4 + 1)/5;//空闲块个数的百分之80 for (scan = 0; scan < best_len; scan++) {//依次释放空闲块 struct list_head *p; if (best_cachep->growing)//不能处于增长状态 break; p = best_cachep->slabs.prev;//因为刚才空闲slab块已经删除,所以又指向了新的空闲slab块 if (p == &best_cachep->slabs)//说明所有slab块都已经被遍历过了,break break; slabp = list_entry(p,slab_t,list); if (slabp->inuse)//不是空闲块,break break; list_del(&slabp->list);//删除空闲slab if (best_cachep->firstnotfull == &slabp->list)//如果firstnotfull就是这个空闲块,那么系统中一定没有部分块或者空闲块可供分配了 best_cachep->firstnotfull = &best_cachep->slabs;//指向cachep->slabs STATS_INC_REAPED(best_cachep); /* Safe to drop the lock. The slab is no longer linked to the * cache. */ spin_unlock_irq(&best_cachep->spinlock); kmem_slab_destroy(best_cachep, slabp);//同上 spin_lock_irq(&best_cachep->spinlock); } spin_unlock_irq(&best_cachep->spinlock); out: up(&cache_chain_sem); return; }
原文:http://blog.csdn.net/jltxgcy/article/details/44062731