Skip to content

Commit

Permalink
drivers: convert shrinkers to new count/scan API
Browse files Browse the repository at this point in the history
Convert the driver shrinkers to the new API.  Most changes are compile
tested only because I either don't have the hardware or it's staging
stuff.

FWIW, the md and android code is pretty good, but the rest of it makes me
want to claw my eyes out.  The amount of broken code I just encountered is
mind boggling.  I've added comments explaining what is broken, but I fear
that some of the code would be best dealt with by being dragged behind the
bike shed, burying in mud up to it's neck and then run over repeatedly
with a blunt lawn mower.

Special mention goes to the zcache/zcache2 drivers.  They can't co-exist
in the build at the same time, they are under different menu options in
menuconfig, they only show up when you've got the right set of mm
subsystem options configured and so even compile testing is an exercise in
pulling teeth.  And that doesn't even take into account the horrible,
broken code...

[[email protected]: fixes for i915, android lowmem, zcache, bcache]
Signed-off-by: Dave Chinner <[email protected]>
Signed-off-by: Glauber Costa <[email protected]>
Acked-by: Mel Gorman <[email protected]>
Cc: Daniel Vetter <[email protected]>
Cc: Kent Overstreet <[email protected]>
Cc: John Stultz <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Jerome Glisse <[email protected]>
Cc: Thomas Hellstrom <[email protected]>
Cc: "Theodore Ts'o" <[email protected]>
Cc: Adrian Hunter <[email protected]>
Cc: Al Viro <[email protected]>
Cc: Artem Bityutskiy <[email protected]>
Cc: Arve Hjønnevåg <[email protected]>
Cc: Carlos Maiolino <[email protected]>
Cc: Christoph Hellwig <[email protected]>
Cc: Chuck Lever <[email protected]>
Cc: Daniel Vetter <[email protected]>
Cc: David Rientjes <[email protected]>
Cc: Gleb Natapov <[email protected]>
Cc: Greg Thelen <[email protected]>
Cc: J. Bruce Fields <[email protected]>
Cc: Jan Kara <[email protected]>
Cc: Jerome Glisse <[email protected]>
Cc: John Stultz <[email protected]>
Cc: KAMEZAWA Hiroyuki <[email protected]>
Cc: Kent Overstreet <[email protected]>
Cc: Kirill A. Shutemov <[email protected]>
Cc: Marcelo Tosatti <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Steven Whitehouse <[email protected]>
Cc: Thomas Hellstrom <[email protected]>
Cc: Trond Myklebust <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>

Signed-off-by: Al Viro <[email protected]>
  • Loading branch information
Dave Chinner authored and Al Viro committed Sep 10, 2013
1 parent 1ab6c49 commit 7dc19d5
Show file tree
Hide file tree
Showing 9 changed files with 236 additions and 136 deletions.
4 changes: 2 additions & 2 deletions drivers/gpu/drm/i915/i915_dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -1667,7 +1667,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;

out_gem_unload:
if (dev_priv->mm.inactive_shrinker.shrink)
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);

if (dev->pdev->msi_enabled)
Expand Down Expand Up @@ -1706,7 +1706,7 @@ int i915_driver_unload(struct drm_device *dev)

i915_teardown_sysfs(dev);

if (dev_priv->mm.inactive_shrinker.shrink)
if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);

mutex_lock(&dev->struct_mutex);
Expand Down
78 changes: 55 additions & 23 deletions drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,12 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);

static int i915_gem_inactive_shrink(struct shrinker *shrinker,
struct shrink_control *sc);
static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
struct shrink_control *sc);
static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
struct shrink_control *sc);
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);

static bool cpu_cache_is_coherent(struct drm_device *dev,
Expand Down Expand Up @@ -1736,16 +1738,21 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
return __i915_gem_shrink(dev_priv, target, true);
}

static void
static long
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj, *next;
long freed = 0;

i915_gem_evict_everything(dev_priv->dev);

list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
global_list)
global_list) {
if (obj->pages_pin_count == 0)
freed += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put_pages(obj);
}
return freed;
}

static int
Expand Down Expand Up @@ -4526,7 +4533,8 @@ i915_gem_load(struct drm_device *dev)

dev_priv->mm.interruptible = true;

dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker);
}
Expand Down Expand Up @@ -4749,18 +4757,17 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}

static int
i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
static unsigned long
i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker,
struct drm_i915_private,
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
int nr_to_scan = sc->nr_to_scan;
bool unlock = true;
int cnt;
unsigned long count;

if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
Expand All @@ -4772,31 +4779,22 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
unlock = false;
}

if (nr_to_scan) {
nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
if (nr_to_scan > 0)
nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
false);
if (nr_to_scan > 0)
i915_gem_shrink_all(dev_priv);
}

cnt = 0;
count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
count += obj->base.size >> PAGE_SHIFT;

list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->active)
continue;

if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
count += obj->base.size >> PAGE_SHIFT;
}

if (unlock)
mutex_unlock(&dev->struct_mutex);
return cnt;
return count;
}

/* All the new VM stuff */
Expand Down Expand Up @@ -4860,6 +4858,40 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
return 0;
}

static unsigned long
i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker,
struct drm_i915_private,
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
int nr_to_scan = sc->nr_to_scan;
unsigned long freed;
bool unlock = true;

if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
return 0;

if (dev_priv->mm.shrinker_no_lock_stealing)
return 0;

unlock = false;
}

freed = i915_gem_purge(dev_priv, nr_to_scan);
if (freed < nr_to_scan)
freed += __i915_gem_shrink(dev_priv, nr_to_scan,
false);
if (freed < nr_to_scan)
freed += i915_gem_shrink_all(dev_priv);

if (unlock)
mutex_unlock(&dev->struct_mutex);
return freed;
}

struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
Expand Down
44 changes: 28 additions & 16 deletions drivers/gpu/drm/ttm/ttm_page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -377,28 +377,26 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
return nr_free;
}

/* Get good estimation how many pages are free in pools */
static int ttm_pool_get_num_unused_pages(void)
{
unsigned i;
int total = 0;
for (i = 0; i < NUM_POOLS; ++i)
total += _manager->pools[i].npages;

return total;
}

/**
* Callback for mm to request pool to reduce number of page held.
*
* XXX: (dchinner) Deadlock warning!
*
* ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
* this can deadlock when called a sc->gfp_mask that is not equal to
* GFP_KERNEL.
*
* This code is crying out for a shrinker per pool....
*/
static int ttm_pool_mm_shrink(struct shrinker *shrink,
struct shrink_control *sc)
static unsigned long
ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned i;
unsigned pool_offset = atomic_add_return(1, &start_pool);
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
unsigned long freed = 0;

pool_offset = pool_offset % NUM_POOLS;
/* select start pool in round robin fashion */
Expand All @@ -408,14 +406,28 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
shrink_pages = ttm_page_pool_free(pool, nr_free);
freed += nr_free - shrink_pages;
}
/* return estimated number of unused pages in pool */
return ttm_pool_get_num_unused_pages();
return freed;
}


static unsigned long
ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
unsigned i;
unsigned long count = 0;

for (i = 0; i < NUM_POOLS; ++i)
count += _manager->pools[i].npages;

return count;
}

static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
manager->mm_shrink.count_objects = ttm_pool_shrink_count;
manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
Expand Down
51 changes: 32 additions & 19 deletions drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -918,19 +918,6 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
}
EXPORT_SYMBOL_GPL(ttm_dma_populate);

/* Get good estimation how many pages are free in pools */
static int ttm_dma_pool_get_num_unused_pages(void)
{
struct device_pools *p;
unsigned total = 0;

mutex_lock(&_manager->lock);
list_for_each_entry(p, &_manager->pools, pools)
total += p->pool->npages_free;
mutex_unlock(&_manager->lock);
return total;
}

/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
Expand Down Expand Up @@ -1002,18 +989,29 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);

/**
* Callback for mm to request pool to reduce number of page held.
*
* XXX: (dchinner) Deadlock warning!
*
* ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
* needs to be paid to sc->gfp_mask to determine if this can be done or not.
* GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
* bad.
*
* I'm getting sadder as I hear more pathetical whimpers about needing per-pool
* shrinkers
*/
static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
struct shrink_control *sc)
static unsigned long
ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned idx = 0;
unsigned pool_offset = atomic_add_return(1, &start_pool);
unsigned shrink_pages = sc->nr_to_scan;
struct device_pools *p;
unsigned long freed = 0;

if (list_empty(&_manager->pools))
return 0;
return SHRINK_STOP;

mutex_lock(&_manager->lock);
pool_offset = pool_offset % _manager->npools;
Expand All @@ -1029,18 +1027,33 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
continue;
nr_free = shrink_pages;
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
freed += nr_free - shrink_pages;

pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
p->pool->dev_name, p->pool->name, current->pid,
nr_free, shrink_pages);
}
mutex_unlock(&_manager->lock);
/* return estimated number of unused pages in pool */
return ttm_dma_pool_get_num_unused_pages();
return freed;
}

static unsigned long
ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
{
struct device_pools *p;
unsigned long count = 0;

mutex_lock(&_manager->lock);
list_for_each_entry(p, &_manager->pools, pools)
count += p->pool->npages_free;
mutex_unlock(&_manager->lock);
return count;
}

static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
Expand Down
Loading

0 comments on commit 7dc19d5

Please sign in to comment.