From: Dave Hansen Date: Tue, 8 Apr 2014 20:44:27 +0000 (-0700) Subject: mm: slab/slub: use page->list consistently instead of page->lru X-Git-Tag: firefly_0821_release~176^2~4057^2 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=34bf6ef94a835a8f1d8abd3e7d38c6c08d205867;p=firefly-linux-kernel-4.4.55.git mm: slab/slub: use page->list consistently instead of page->lru 'struct page' has two list_head fields: 'lru' and 'list'. Conveniently, they are unioned together. This means that code can use them interchangably, which gets horribly confusing like with this nugget from slab.c: > list_del(&page->lru); > if (page->active == cachep->num) > list_add(&page->list, &n->slabs_full); This patch makes the slab and slub code use page->lru universally instead of mixing ->list and ->lru. So, the new rule is: page->lru is what the you use if you want to keep your page on a list. Don't like the fact that it's not called ->list? Too bad. Signed-off-by: Dave Hansen Acked-by: Christoph Lameter Acked-by: David Rientjes Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Pekka Enberg --- diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 290901a8c1de..84b74080beb7 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -124,6 +124,8 @@ struct page { union { struct list_head lru; /* Pageout list, eg. active_list * protected by zone->lru_lock ! + * Can be used as a generic list + * by the page owner. */ struct { /* slub per cpu partial pages */ struct page *next; /* Next partial slab */ @@ -136,7 +138,6 @@ struct page { #endif }; - struct list_head list; /* slobs list of pages */ struct slab *slab_page; /* slab fields */ struct rcu_head rcu_head; /* Used by SLAB * when destroying via RCU diff --git a/mm/slab.c b/mm/slab.c index 8dd8e0875e4c..f6718197cdd0 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2922,9 +2922,9 @@ retry: /* move slabp to correct slabp list: */ list_del(&page->lru); if (page->active == cachep->num) - list_add(&page->list, &n->slabs_full); + list_add(&page->lru, &n->slabs_full); else - list_add(&page->list, &n->slabs_partial); + list_add(&page->lru, &n->slabs_partial); } must_grow: diff --git a/mm/slob.c b/mm/slob.c index 4bf8809dfcce..730cad45d4be 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -111,13 +111,13 @@ static inline int slob_page_free(struct page *sp) static void set_slob_page_free(struct page *sp, struct list_head *list) { - list_add(&sp->list, list); + list_add(&sp->lru, list); __SetPageSlobFree(sp); } static inline void clear_slob_page_free(struct page *sp) { - list_del(&sp->list); + list_del(&sp->lru); __ClearPageSlobFree(sp); } @@ -282,7 +282,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) spin_lock_irqsave(&slob_lock, flags); /* Iterate through each partially free page, try to find room */ - list_for_each_entry(sp, slob_list, list) { + list_for_each_entry(sp, slob_list, lru) { #ifdef CONFIG_NUMA /* * If there's a node specification, search for a partial @@ -296,7 +296,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) continue; /* Attempt to alloc */ - prev = sp->list.prev; + prev = sp->lru.prev; b = slob_page_alloc(sp, size, align); if (!b) continue; @@ -322,7 +322,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node) spin_lock_irqsave(&slob_lock, flags); sp->units = SLOB_UNITS(PAGE_SIZE); sp->freelist = b; - INIT_LIST_HEAD(&sp->list); + INIT_LIST_HEAD(&sp->lru); set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); set_slob_page_free(sp, slob_list); b = slob_page_alloc(sp, size, align);