Skip to content

Commit

Permalink
mm: page allocator: do not call direct reclaim for THP allocations wh…
Browse files Browse the repository at this point in the history
…ile compaction is deferred

If compaction is deferred, direct reclaim is used to try to free enough
pages for the allocation to succeed.  For small high-orders, this has a
reasonable chance of success.  However, if the caller has specified
__GFP_NO_KSWAPD to limit the disruption to the system, it makes more sense
to fail the allocation rather than stall the caller in direct reclaim.
This patch skips direct reclaim if compaction is deferred and the caller
specifies __GFP_NO_KSWAPD.

Async compaction only considers a subset of pages so it is possible for
compaction to be deferred prematurely and not enter direct reclaim even in
cases where it should.  To compensate for this, this patch also defers
compaction only if sync compaction failed.

Signed-off-by: Mel Gorman <[email protected]>
Acked-by: Minchan Kim <[email protected]>
Reviewed-by: Rik van Riel<[email protected]>
Cc: Andrea Arcangeli <[email protected]>
Cc: Dave Jones <[email protected]>
Cc: Jan Kara <[email protected]>
Cc: Andy Isaacson <[email protected]>
Cc: Nai Xia <[email protected]>
Cc: Johannes Weiner <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Mel Gorman authored and torvalds committed Jan 13, 2012
1 parent c824493 commit 6619971
Showing 1 changed file with 35 additions and 10 deletions.
45 changes: 35 additions & 10 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1981,14 +1981,20 @@ static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
int migratetype, unsigned long *did_some_progress,
bool sync_migration)
int migratetype, bool sync_migration,
bool *deferred_compaction,
unsigned long *did_some_progress)
{
struct page *page;

if (!order || compaction_deferred(preferred_zone))
if (!order)
return NULL;

if (compaction_deferred(preferred_zone)) {
*deferred_compaction = true;
return NULL;
}

current->flags |= PF_MEMALLOC;
*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
nodemask, sync_migration);
Expand Down Expand Up @@ -2016,7 +2022,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
* but not enough to satisfy watermarks.
*/
count_vm_event(COMPACTFAIL);
defer_compaction(preferred_zone);

/*
* As async compaction considers a subset of pageblocks, only
* defer if the failure was a sync compaction failure.
*/
if (sync_migration)
defer_compaction(preferred_zone);

cond_resched();
}
Expand All @@ -2028,8 +2040,9 @@ static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
int migratetype, unsigned long *did_some_progress,
bool sync_migration)
int migratetype, bool sync_migration,
bool *deferred_compaction,
unsigned long *did_some_progress)
{
return NULL;
}
Expand Down Expand Up @@ -2179,6 +2192,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
unsigned long pages_reclaimed = 0;
unsigned long did_some_progress;
bool sync_migration = false;
bool deferred_compaction = false;

/*
* In the slowpath, we sanity check order to avoid ever trying to
Expand Down Expand Up @@ -2259,12 +2273,22 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
zonelist, high_zoneidx,
nodemask,
alloc_flags, preferred_zone,
migratetype, &did_some_progress,
sync_migration);
migratetype, sync_migration,
&deferred_compaction,
&did_some_progress);
if (page)
goto got_pg;
sync_migration = true;

/*
* If compaction is deferred for high-order allocations, it is because
* sync compaction recently failed. In this is the case and the caller
* has requested the system not be heavily disrupted, fail the
* allocation now instead of entering direct reclaim
*/
if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD))
goto nopage;

/* Try direct reclaim and then allocating */
page = __alloc_pages_direct_reclaim(gfp_mask, order,
zonelist, high_zoneidx,
Expand Down Expand Up @@ -2328,8 +2352,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
zonelist, high_zoneidx,
nodemask,
alloc_flags, preferred_zone,
migratetype, &did_some_progress,
sync_migration);
migratetype, sync_migration,
&deferred_compaction,
&did_some_progress);
if (page)
goto got_pg;
}
Expand Down

0 comments on commit 6619971

Please sign in to comment.