Skip to content

Commit

Permalink
slab: destroy a slab without holding any alien cache lock
Browse files Browse the repository at this point in the history
I haven't heard that this alien cache lock is contended, but to reduce
chance of contention would be better generally.  And with this change,
we can simplify complex lockdep annotation in slab code.  In the
following patch, it will be implemented.

Signed-off-by: Joonsoo Kim <[email protected]>
Acked-by: Christoph Lameter <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: David Rientjes <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
JoonsooKim authored and torvalds committed Aug 7, 2014
1 parent 49dfc30 commit 833b706
Showing 1 changed file with 13 additions and 7 deletions.
20 changes: 13 additions & 7 deletions mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -1050,10 +1050,10 @@ static void free_alien_cache(struct alien_cache **alc_ptr)
}

static void __drain_alien_cache(struct kmem_cache *cachep,
struct array_cache *ac, int node)
struct array_cache *ac, int node,
struct list_head *list)
{
struct kmem_cache_node *n = get_node(cachep, node);
LIST_HEAD(list);

if (ac->avail) {
spin_lock(&n->list_lock);
Expand All @@ -1065,10 +1065,9 @@ static void __drain_alien_cache(struct kmem_cache *cachep,
if (n->shared)
transfer_objects(n->shared, ac, ac->limit);

free_block(cachep, ac->entry, ac->avail, node, &list);
free_block(cachep, ac->entry, ac->avail, node, list);
ac->avail = 0;
spin_unlock(&n->list_lock);
slabs_destroy(cachep, &list);
}
}

Expand All @@ -1086,8 +1085,11 @@ static void reap_alien(struct kmem_cache *cachep, struct kmem_cache_node *n)
if (alc) {
ac = &alc->ac;
if (ac->avail && spin_trylock_irq(&alc->lock)) {
__drain_alien_cache(cachep, ac, node);
LIST_HEAD(list);

__drain_alien_cache(cachep, ac, node, &list);
spin_unlock_irq(&alc->lock);
slabs_destroy(cachep, &list);
}
}
}
Expand All @@ -1104,10 +1106,13 @@ static void drain_alien_cache(struct kmem_cache *cachep,
for_each_online_node(i) {
alc = alien[i];
if (alc) {
LIST_HEAD(list);

ac = &alc->ac;
spin_lock_irqsave(&alc->lock, flags);
__drain_alien_cache(cachep, ac, i);
__drain_alien_cache(cachep, ac, i, &list);
spin_unlock_irqrestore(&alc->lock, flags);
slabs_destroy(cachep, &list);
}
}
}
Expand Down Expand Up @@ -1138,10 +1143,11 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
spin_lock(&alien->lock);
if (unlikely(ac->avail == ac->limit)) {
STATS_INC_ACOVERFLOW(cachep);
__drain_alien_cache(cachep, ac, nodeid);
__drain_alien_cache(cachep, ac, nodeid, &list);
}
ac_put_obj(cachep, ac, objp);
spin_unlock(&alien->lock);
slabs_destroy(cachep, &list);
} else {
n = get_node(cachep, nodeid);
spin_lock(&n->list_lock);
Expand Down

0 comments on commit 833b706

Please sign in to comment.