Skip to content

Commit

Permalink
Merge branch 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel…
Browse files Browse the repository at this point in the history
…/git/penberg/linux

Pull slab changes from Pekka Enberg:
 "The biggest change is byte-sized freelist indices which reduces slab
  freelist memory usage:

    https://lkml.org/lkml/2013/12/2/64"

* 'slab/next' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/linux:
  mm: slab/slub: use page->list consistently instead of page->lru
  mm/slab.c: cleanup outdated comments and unify variables naming
  slab: fix wrongly used macro
  slub: fix high order page allocation problem with __GFP_NOFAIL
  slab: Make allocations with GFP_ZERO slightly more efficient
  slab: make more slab management structure off the slab
  slab: introduce byte sized index for the freelist of a slab
  slab: restrict the number of objects in a slab
  slab: introduce helper functions to get/set free object
  slab: factor out calculate nr objects in cache_estimate
  • Loading branch information
torvalds committed Apr 13, 2014
2 parents 321d03c + 34bf6ef commit bf3a340
Show file tree
Hide file tree
Showing 5 changed files with 128 additions and 84 deletions.
3 changes: 2 additions & 1 deletion include/linux/mm_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,8 @@ struct page {
union {
struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock !
* Can be used as a generic list
* by the page owner.
*/
struct { /* slub per cpu partial pages */
struct page *next; /* Next partial slab */
Expand All @@ -136,7 +138,6 @@ struct page {
#endif
};

struct list_head list; /* slobs list of pages */
struct slab *slab_page; /* slab fields */
struct rcu_head rcu_head; /* Used by SLAB
* when destroying via RCU
Expand Down
11 changes: 11 additions & 0 deletions include/linux/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,17 @@ struct kmem_cache {
#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
#endif

/*
* This restriction comes from byte sized index implementation.
* Page size is normally 2^12 bytes and, in this case, if we want to use
* byte sized index which can represent 2^8 entries, the size of the object
* should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
* If minimum size of kmalloc is less than 16, we use it as minimum object
* size and give up to use byte sized index.
*/
#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
(KMALLOC_MIN_SIZE) : 16)

#ifndef CONFIG_SLOB
extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
#ifdef CONFIG_ZONE_DMA
Expand Down
Loading

0 comments on commit bf3a340

Please sign in to comment.