Skip to content

Commit

Permalink
sl[au]b: always get the cache from its page in kmem_cache_free()
Browse files Browse the repository at this point in the history
struct page already has this information.  If we start chaining caches,
this information will always be more trustworthy than whatever is passed
into the function.

Signed-off-by: Glauber Costa <[email protected]>
Cc: Christoph Lameter <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Greg Thelen <[email protected]>
Cc: Johannes Weiner <[email protected]>
Cc: JoonSoo Kim <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Michal Hocko <[email protected]>
Cc: Pekka Enberg <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Suleiman Souhlal <[email protected]>
Cc: Tejun Heo <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Glauber Costa authored and torvalds committed Dec 18, 2012
1 parent 0e9d92f commit b9ce5ef
Show file tree
Hide file tree
Showing 5 changed files with 53 additions and 14 deletions.
5 changes: 5 additions & 0 deletions include/linux/memcontrol.h
Original file line number Diff line number Diff line change
Expand Up @@ -554,6 +554,11 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
return __memcg_kmem_get_cache(cachep, gfp);
}
#else
static inline bool memcg_kmem_enabled(void)
{
return false;
}

static inline bool
memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
{
Expand Down
6 changes: 5 additions & 1 deletion mm/slab.c
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,6 @@
*/

#include <linux/slab.h>
#include "slab.h"
#include <linux/mm.h>
#include <linux/poison.h>
#include <linux/swap.h>
Expand Down Expand Up @@ -128,6 +127,8 @@

#include "internal.h"

#include "slab.h"

/*
* DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
* 0 for faster, smaller code (especially in the critical paths).
Expand Down Expand Up @@ -3883,6 +3884,9 @@ EXPORT_SYMBOL(__kmalloc);
void kmem_cache_free(struct kmem_cache *cachep, void *objp)
{
unsigned long flags;
cachep = cache_from_obj(cachep, objp);
if (!cachep)
return;

local_irq_save(flags);
debug_check_no_locks_freed(objp, cachep->object_size);
Expand Down
39 changes: 39 additions & 0 deletions mm/slab.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,13 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep,
return (is_root_cache(cachep) && !memcg) ||
(cachep->memcg_params->memcg == memcg);
}

static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p)
{
return (p == s) ||
(s->memcg_params && (p == s->memcg_params->root_cache));
}
#else
static inline bool is_root_cache(struct kmem_cache *s)
{
Expand All @@ -127,5 +134,37 @@ static inline bool cache_match_memcg(struct kmem_cache *cachep,
{
return true;
}

static inline bool slab_equal_or_root(struct kmem_cache *s,
struct kmem_cache *p)
{
return true;
}
#endif

static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
{
struct kmem_cache *cachep;
struct page *page;

/*
* When kmemcg is not being used, both assignments should return the
* same value. but we don't want to pay the assignment price in that
* case. If it is not compiled in, the compiler should be smart enough
* to not do even the assignment. In that case, slab_equal_or_root
* will also be a constant.
*/
if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
return s;

page = virt_to_head_page(x);
cachep = page->slab_cache;
if (slab_equal_or_root(cachep, s))
return cachep;

pr_err("%s: Wrong slab cache. %s but object is from %s\n",
__FUNCTION__, cachep->name, s->name);
WARN_ON_ONCE(1);
return s;
}
#endif
2 changes: 1 addition & 1 deletion mm/slob.c
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,6 @@

#include <linux/kernel.h>
#include <linux/slab.h>
#include "slab.h"

#include <linux/mm.h>
#include <linux/swap.h> /* struct reclaim_state */
Expand All @@ -73,6 +72,7 @@

#include <linux/atomic.h>

#include "slab.h"
/*
* slob_block has a field 'units', which indicates size of block if +ve,
* or offset of next block if -ve (in SLOB_UNITs).
Expand Down
15 changes: 3 additions & 12 deletions mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -2611,19 +2611,10 @@ static __always_inline void slab_free(struct kmem_cache *s,

void kmem_cache_free(struct kmem_cache *s, void *x)
{
struct page *page;

page = virt_to_head_page(x);

if (kmem_cache_debug(s) && page->slab_cache != s) {
pr_err("kmem_cache_free: Wrong slab cache. %s but object"
" is from %s\n", page->slab_cache->name, s->name);
WARN_ON_ONCE(1);
s = cache_from_obj(s, x);
if (!s)
return;
}

slab_free(s, page, x, _RET_IP_);

slab_free(s, virt_to_head_page(x), x, _RET_IP_);
trace_kmem_cache_free(_RET_IP_, x);
}
EXPORT_SYMBOL(kmem_cache_free);
Expand Down

0 comments on commit b9ce5ef

Please sign in to comment.