Skip to content

Commit

Permalink
mm: delete NR_PAGES_SCANNED and pgdat_reclaimable()
Browse files Browse the repository at this point in the history
NR_PAGES_SCANNED counts number of pages scanned since the last page free
event in the allocator.  This was used primarily to measure the
reclaimability of zones and nodes, and determine when reclaim should
give up on them.  In that role, it has been replaced in the preceding
patches by a different mechanism.

Being implemented as an efficient vmstat counter, it was automatically
exported to userspace as well.  It's however unlikely that anyone
outside the kernel is using this counter in any meaningful way.

Remove the counter and the unused pgdat_reclaimable().

Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Johannes Weiner <[email protected]>
Acked-by: Hillf Danton <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Cc: Jia He <[email protected]>
Cc: Mel Gorman <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
hnaz authored and torvalds committed May 3, 2017
1 parent 688035f commit c822f62
Show file tree
Hide file tree
Showing 5 changed files with 3 additions and 41 deletions.
1 change: 0 additions & 1 deletion include/linux/mmzone.h
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,6 @@ enum node_stat_item {
NR_UNEVICTABLE, /* " " " " " */
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
NR_PAGES_SCANNED, /* pages scanned since last reclaim */
WORKINGSET_REFAULT,
WORKINGSET_ACTIVATE,
WORKINGSET_NODERECLAIM,
Expand Down
1 change: 0 additions & 1 deletion mm/internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,6 @@ extern unsigned long highest_memmap_pfn;
*/
extern int isolate_lru_page(struct page *page);
extern void putback_lru_page(struct page *page);
extern bool pgdat_reclaimable(struct pglist_data *pgdat);

/*
* in mm/rmap.c:
Expand Down
11 changes: 0 additions & 11 deletions mm/page_alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1090,14 +1090,10 @@ static void free_pcppages_bulk(struct zone *zone, int count,
{
int migratetype = 0;
int batch_free = 0;
unsigned long nr_scanned;
bool isolated_pageblocks;

spin_lock(&zone->lock);
isolated_pageblocks = has_isolate_pageblock(zone);
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
if (nr_scanned)
__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);

while (count) {
struct page *page;
Expand Down Expand Up @@ -1150,12 +1146,7 @@ static void free_one_page(struct zone *zone,
unsigned int order,
int migratetype)
{
unsigned long nr_scanned;
spin_lock(&zone->lock);
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
if (nr_scanned)
__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);

if (unlikely(has_isolate_pageblock(zone) ||
is_migrate_isolate(migratetype))) {
migratetype = get_pfnblock_migratetype(page, pfn);
Expand Down Expand Up @@ -4504,7 +4495,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
#endif
" writeback_tmp:%lukB"
" unstable:%lukB"
" pages_scanned:%lu"
" all_unreclaimable? %s"
"\n",
pgdat->node_id,
Expand All @@ -4527,7 +4517,6 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
#endif
K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
node_page_state(pgdat, NR_PAGES_SCANNED),
pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
"yes" : "no");
}
Expand Down
9 changes: 0 additions & 9 deletions mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -230,12 +230,6 @@ unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat)
return nr;
}

bool pgdat_reclaimable(struct pglist_data *pgdat)
{
return node_page_state_snapshot(pgdat, NR_PAGES_SCANNED) <
pgdat_reclaimable_pages(pgdat) * 6;
}

/**
* lruvec_lru_size - Returns the number of pages on the given LRU list.
* @lruvec: lru vector
Expand Down Expand Up @@ -1750,7 +1744,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
reclaim_stat->recent_scanned[file] += nr_taken;

if (global_reclaim(sc)) {
__mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
if (current_is_kswapd())
__count_vm_events(PGSCAN_KSWAPD, nr_scanned);
else
Expand Down Expand Up @@ -1953,8 +1946,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
__mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, nr_taken);
reclaim_stat->recent_scanned[file] += nr_taken;

if (global_reclaim(sc))
__mod_node_page_state(pgdat, NR_PAGES_SCANNED, nr_scanned);
__count_vm_events(PGREFILL, nr_scanned);

spin_unlock_irq(&pgdat->lru_lock);
Expand Down
22 changes: 3 additions & 19 deletions mm/vmstat.c
Original file line number Diff line number Diff line change
Expand Up @@ -954,7 +954,6 @@ const char * const vmstat_text[] = {
"nr_unevictable",
"nr_isolated_anon",
"nr_isolated_file",
"nr_pages_scanned",
"workingset_refault",
"workingset_activate",
"workingset_nodereclaim",
Expand Down Expand Up @@ -1378,15 +1377,13 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
"\n min %lu"
"\n low %lu"
"\n high %lu"
"\n node_scanned %lu"
"\n spanned %lu"
"\n present %lu"
"\n managed %lu",
zone_page_state(zone, NR_FREE_PAGES),
min_wmark_pages(zone),
low_wmark_pages(zone),
high_wmark_pages(zone),
node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED),
zone->spanned_pages,
zone->present_pages,
zone->managed_pages);
Expand Down Expand Up @@ -1586,22 +1583,9 @@ int vmstat_refresh(struct ctl_table *table, int write,
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) {
val = atomic_long_read(&vm_zone_stat[i]);
if (val < 0) {
switch (i) {
case NR_PAGES_SCANNED:
/*
* This is often seen to go negative in
* recent kernels, but not to go permanently
* negative. Whilst it would be nicer not to
* have exceptions, rooting them out would be
* another task, of rather low priority.
*/
break;
default:
pr_warn("%s: %s %ld\n",
__func__, vmstat_text[i], val);
err = -EINVAL;
break;
}
pr_warn("%s: %s %ld\n",
__func__, vmstat_text[i], val);
err = -EINVAL;
}
}
if (err)
Expand Down

0 comments on commit c822f62

Please sign in to comment.