Skip to content

Commit

Permalink
mm: remove lru parameter from __lru_cache_add and lru_cache_add_lru
Browse files Browse the repository at this point in the history
Similar to __pagevec_lru_add, this patch removes the LRU parameter from
__lru_cache_add and lru_cache_add_lru as the caller does not control the
exact LRU the page gets added to.  lru_cache_add_lru gets renamed to
lru_cache_add the name is silly without the lru parameter.  With the
parameter removed, it is required that the caller indicate if they want
the page added to the active or inactive list by setting or clearing
PageActive respectively.

[[email protected]: Suggested the patch]
[[email protected]: fix used-unintialized warning]
Signed-off-by: Mel Gorman <[email protected]>
Signed-off-by: Chen Gang <[email protected]>
Cc: Jan Kara <[email protected]>
Cc: Rik van Riel <[email protected]>
Acked-by: Johannes Weiner <[email protected]>
Cc: Alexey Lyahkov <[email protected]>
Cc: Andrew Perepechko <[email protected]>
Cc: Robin Dong <[email protected]>
Cc: Theodore Tso <[email protected]>
Cc: Hugh Dickins <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Bernd Schubert <[email protected]>
Cc: David Howells <[email protected]>
Cc: Trond Myklebust <[email protected]>
Cc: Mel Gorman <[email protected]>
Cc: Rik van Riel <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
Mel Gorman authored and torvalds committed Jul 3, 2013
1 parent a0b8cab commit c53954a
Show file tree
Hide file tree
Showing 4 changed files with 20 additions and 20 deletions.
11 changes: 7 additions & 4 deletions include/linux/swap.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include <linux/node.h>
#include <linux/fs.h>
#include <linux/atomic.h>
#include <linux/page-flags.h>
#include <asm/page.h>

struct notifier_block;
Expand Down Expand Up @@ -233,8 +234,8 @@ extern unsigned long nr_free_pagecache_pages(void);


/* linux/mm/swap.c */
extern void __lru_cache_add(struct page *, enum lru_list lru);
extern void lru_cache_add_lru(struct page *, enum lru_list lru);
extern void __lru_cache_add(struct page *);
extern void lru_cache_add(struct page *);
extern void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec, struct list_head *head);
extern void activate_page(struct page *);
Expand All @@ -254,12 +255,14 @@ extern void add_page_to_unevictable_list(struct page *page);
*/
static inline void lru_cache_add_anon(struct page *page)
{
__lru_cache_add(page, LRU_INACTIVE_ANON);
ClearPageActive(page);
__lru_cache_add(page);
}

static inline void lru_cache_add_file(struct page *page)
{
__lru_cache_add(page, LRU_INACTIVE_FILE);
ClearPageActive(page);
__lru_cache_add(page);
}

/* linux/mm/vmscan.c */
Expand Down
7 changes: 4 additions & 3 deletions mm/rmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1093,9 +1093,10 @@ void page_add_new_anon_rmap(struct page *page,
else
__inc_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
__page_set_anon_rmap(page, vma, address, 1);
if (!mlocked_vma_newpage(vma, page))
lru_cache_add_lru(page, LRU_ACTIVE_ANON);
else
if (!mlocked_vma_newpage(vma, page)) {
SetPageActive(page);
lru_cache_add(page);
} else
add_page_to_unevictable_list(page);
}

Expand Down
17 changes: 7 additions & 10 deletions mm/swap.c
Original file line number Diff line number Diff line change
Expand Up @@ -494,15 +494,10 @@ EXPORT_SYMBOL(mark_page_accessed);
* pagevec is drained. This gives a chance for the caller of __lru_cache_add()
* have the page added to the active list using mark_page_accessed().
*/
void __lru_cache_add(struct page *page, enum lru_list lru)
void __lru_cache_add(struct page *page)
{
struct pagevec *pvec = &get_cpu_var(lru_add_pvec);

if (is_active_lru(lru))
SetPageActive(page);
else
ClearPageActive(page);

page_cache_get(page);
if (!pagevec_space(pvec))
__pagevec_lru_add(pvec);
Expand All @@ -512,11 +507,10 @@ void __lru_cache_add(struct page *page, enum lru_list lru)
EXPORT_SYMBOL(__lru_cache_add);

/**
* lru_cache_add_lru - add a page to a page list
* lru_cache_add - add a page to a page list
* @page: the page to be added to the LRU.
* @lru: the LRU list to which the page is added.
*/
void lru_cache_add_lru(struct page *page, enum lru_list lru)
void lru_cache_add(struct page *page)
{
if (PageActive(page)) {
VM_BUG_ON(PageUnevictable(page));
Expand All @@ -525,7 +519,7 @@ void lru_cache_add_lru(struct page *page, enum lru_list lru)
}

VM_BUG_ON(PageLRU(page));
__lru_cache_add(page, lru);
__lru_cache_add(page);
}

/**
Expand Down Expand Up @@ -745,6 +739,9 @@ void release_pages(struct page **pages, int nr, int cold)
del_page_from_lru_list(page, lruvec, page_off_lru(page));
}

/* Clear Active bit in case of parallel mark_page_accessed */
ClearPageActive(page);

list_add(&page->lru, &pages_to_free);
}
if (zone)
Expand Down
5 changes: 2 additions & 3 deletions mm/vmscan.c
Original file line number Diff line number Diff line change
Expand Up @@ -546,7 +546,6 @@ int remove_mapping(struct address_space *mapping, struct page *page)
void putback_lru_page(struct page *page)
{
int lru;
int active = !!TestClearPageActive(page);
int was_unevictable = PageUnevictable(page);

VM_BUG_ON(PageLRU(page));
Expand All @@ -561,8 +560,8 @@ void putback_lru_page(struct page *page)
* unevictable page on [in]active list.
* We know how to handle that.
*/
lru = active + page_lru_base_type(page);
lru_cache_add_lru(page, lru);
lru = page_lru_base_type(page);
lru_cache_add(page);
} else {
/*
* Put unevictable pages directly on zone's unevictable
Expand Down

0 comments on commit c53954a

Please sign in to comment.