Skip to content

Commit

Permalink
slub: Take node lock during object free checks
Browse files Browse the repository at this point in the history
Only applies to scenarios where debugging is on:

Validation of slabs can currently occur while debugging
information is updated from the fast paths of the allocator.
This results in various races where we get false reports about
slab metadata not being in order.

This patch makes the fast paths take the node lock so that
serialization with slab validation will occur. Causes additional
slowdown in debug scenarios.

Reported-by: Waiman Long <[email protected]>
Signed-off-by: Christoph Lameter <[email protected]>
Signed-off-by: Pekka Enberg <[email protected]>
  • Loading branch information
Christoph Lameter authored and penberg committed Aug 16, 2012
1 parent 455ce9e commit 19c7ff9
Showing 1 changed file with 18 additions and 12 deletions.
30 changes: 18 additions & 12 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1069,13 +1069,13 @@ static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *pa
return 0;
}

static noinline int free_debug_processing(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr)
static noinline struct kmem_cache_node *free_debug_processing(
struct kmem_cache *s, struct page *page, void *object,
unsigned long addr, unsigned long *flags)
{
unsigned long flags;
int rc = 0;
struct kmem_cache_node *n = get_node(s, page_to_nid(page));

local_irq_save(flags);
spin_lock_irqsave(&n->list_lock, *flags);
slab_lock(page);

if (!check_slab(s, page))
Expand Down Expand Up @@ -1113,15 +1113,19 @@ static noinline int free_debug_processing(struct kmem_cache *s,
set_track(s, object, TRACK_FREE, addr);
trace(s, page, object, 0);
init_object(s, object, SLUB_RED_INACTIVE);
rc = 1;
out:
slab_unlock(page);
local_irq_restore(flags);
return rc;
/*
* Keep node_lock to preserve integrity
* until the object is actually freed
*/
return n;

fail:
slab_unlock(page);
spin_unlock_irqrestore(&n->list_lock, *flags);
slab_fix(s, "Object at 0x%p not freed", object);
goto out;
return NULL;
}

static int __init setup_slub_debug(char *str)
Expand Down Expand Up @@ -1214,8 +1218,9 @@ static inline void setup_object_debug(struct kmem_cache *s,
static inline int alloc_debug_processing(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr) { return 0; }

static inline int free_debug_processing(struct kmem_cache *s,
struct page *page, void *object, unsigned long addr) { return 0; }
static inline struct kmem_cache_node *free_debug_processing(
struct kmem_cache *s, struct page *page, void *object,
unsigned long addr, unsigned long *flags) { return NULL; }

static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
{ return 1; }
Expand Down Expand Up @@ -2452,7 +2457,8 @@ static void __slab_free(struct kmem_cache *s, struct page *page,

stat(s, FREE_SLOWPATH);

if (kmem_cache_debug(s) && !free_debug_processing(s, page, x, addr))
if (kmem_cache_debug(s) &&
!(n = free_debug_processing(s, page, x, addr, &flags)))
return;

do {
Expand Down

0 comments on commit 19c7ff9

Please sign in to comment.