Skip to content

Commit

Permalink
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
Browse files Browse the repository at this point in the history
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.

This promise never materialized.  And unlikely will.

We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE.  And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.

Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.

Let's stop pretending that pages in page cache are special.  They are
not.

The changes are pretty straight-forward:

 - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;

 - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;

 - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};

 - page_cache_get() -> get_page();

 - page_cache_release() -> put_page();

This patch contains automated changes generated with coccinelle using
script below.  For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.

The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.

There are few places in the code where coccinelle didn't reach.  I'll
fix them manually in a separate patch.  Comments and documentation also
will be addressed with the separate patch.

virtual patch

@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E

@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E

@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT

@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE

@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK

@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)

@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)

@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)

Signed-off-by: Kirill A. Shutemov <[email protected]>
Acked-by: Michal Hocko <[email protected]>
Signed-off-by: Linus Torvalds <[email protected]>
  • Loading branch information
kiryl authored and torvalds committed Apr 4, 2016
1 parent c05c2ec commit 09cbfea
Show file tree
Hide file tree
Showing 381 changed files with 2,722 additions and 2,721 deletions.
2 changes: 1 addition & 1 deletion arch/arc/mm/cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -628,7 +628,7 @@ void flush_dcache_page(struct page *page)

/* kernel reading from page with U-mapping */
phys_addr_t paddr = (unsigned long)page_address(page);
unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
unsigned long vaddr = page->index << PAGE_SHIFT;

if (addr_not_cache_congruent(paddr, vaddr))
__flush_dcache_page(paddr, vaddr);
Expand Down
4 changes: 2 additions & 2 deletions arch/arm/mm/flush.c
Original file line number Diff line number Diff line change
Expand Up @@ -235,7 +235,7 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
*/
if (mapping && cache_is_vipt_aliasing())
flush_pfn_alias(page_to_pfn(page),
page->index << PAGE_CACHE_SHIFT);
page->index << PAGE_SHIFT);
}

static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
Expand All @@ -250,7 +250,7 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
* data in the current VM view associated with this page.
* - aliasing VIPT: we only need to find one mapping of this page.
*/
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
pgoff = page->index;

flush_dcache_mmap_lock(mapping);
vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
Expand Down
2 changes: 1 addition & 1 deletion arch/parisc/kernel/cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -319,7 +319,7 @@ void flush_dcache_page(struct page *page)
if (!mapping)
return;

pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
pgoff = page->index;

/* We have carefully arranged in arch_get_unmapped_area() that
* *any* mappings of a file are always congruently mapped (whether
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/platforms/cell/spufs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -732,8 +732,8 @@ spufs_fill_super(struct super_block *sb, void *data, int silent)
return -ENOMEM;

sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = SPUFS_MAGIC;
sb->s_op = &s_ops;
sb->s_fs_info = info;
Expand Down
4 changes: 2 additions & 2 deletions arch/s390/hypfs/inode.c
Original file line number Diff line number Diff line change
Expand Up @@ -278,8 +278,8 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
sbi->uid = current_uid();
sbi->gid = current_gid();
sb->s_fs_info = sbi;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_blocksize = PAGE_SIZE;
sb->s_blocksize_bits = PAGE_SHIFT;
sb->s_magic = HYPFS_MAGIC;
sb->s_op = &hypfs_s_ops;
if (hypfs_parse_options(data, sb))
Expand Down
8 changes: 4 additions & 4 deletions block/bio.c
Original file line number Diff line number Diff line change
Expand Up @@ -1339,7 +1339,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
* release the pages we didn't map into the bio, if any
*/
while (j < page_limit)
page_cache_release(pages[j++]);
put_page(pages[j++]);
}

kfree(pages);
Expand All @@ -1365,7 +1365,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
for (j = 0; j < nr_pages; j++) {
if (!pages[j])
break;
page_cache_release(pages[j]);
put_page(pages[j]);
}
out:
kfree(pages);
Expand All @@ -1385,7 +1385,7 @@ static void __bio_unmap_user(struct bio *bio)
if (bio_data_dir(bio) == READ)
set_page_dirty_lock(bvec->bv_page);

page_cache_release(bvec->bv_page);
put_page(bvec->bv_page);
}

bio_put(bio);
Expand Down Expand Up @@ -1658,7 +1658,7 @@ void bio_check_pages_dirty(struct bio *bio)
struct page *page = bvec->bv_page;

if (PageDirty(page) || PageCompound(page)) {
page_cache_release(page);
put_page(page);
bvec->bv_page = NULL;
} else {
nr_clean_pages++;
Expand Down
2 changes: 1 addition & 1 deletion block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -706,7 +706,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
goto fail_id;

q->backing_dev_info.ra_pages =
(VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
(VM_MAX_READAHEAD * 1024) / PAGE_SIZE;
q->backing_dev_info.capabilities = BDI_CAP_CGROUP_WRITEBACK;
q->backing_dev_info.name = "block";
q->node = node_id;
Expand Down
12 changes: 6 additions & 6 deletions block/blk-settings.c
Original file line number Diff line number Diff line change
Expand Up @@ -239,8 +239,8 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
struct queue_limits *limits = &q->limits;
unsigned int max_sectors;

if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) {
max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
if ((max_hw_sectors << 9) < PAGE_SIZE) {
max_hw_sectors = 1 << (PAGE_SHIFT - 9);
printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_hw_sectors);
}
Expand Down Expand Up @@ -329,8 +329,8 @@ EXPORT_SYMBOL(blk_queue_max_segments);
**/
void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
if (max_size < PAGE_CACHE_SIZE) {
max_size = PAGE_CACHE_SIZE;
if (max_size < PAGE_SIZE) {
max_size = PAGE_SIZE;
printk(KERN_INFO "%s: set to minimum %d\n",
__func__, max_size);
}
Expand Down Expand Up @@ -760,8 +760,8 @@ EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
**/
void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
{
if (mask < PAGE_CACHE_SIZE - 1) {
mask = PAGE_CACHE_SIZE - 1;
if (mask < PAGE_SIZE - 1) {
mask = PAGE_SIZE - 1;
printk(KERN_INFO "%s: set to minimum %lx\n",
__func__, mask);
}
Expand Down
8 changes: 4 additions & 4 deletions block/blk-sysfs.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
static ssize_t queue_ra_show(struct request_queue *q, char *page)
{
unsigned long ra_kb = q->backing_dev_info.ra_pages <<
(PAGE_CACHE_SHIFT - 10);
(PAGE_SHIFT - 10);

return queue_var_show(ra_kb, (page));
}
Expand All @@ -90,7 +90,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
if (ret < 0)
return ret;

q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_SHIFT - 10);

return ret;
}
Expand All @@ -117,7 +117,7 @@ static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
if (blk_queue_cluster(q))
return queue_var_show(queue_max_segment_size(q), (page));

return queue_var_show(PAGE_CACHE_SIZE, (page));
return queue_var_show(PAGE_SIZE, (page));
}

static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
Expand Down Expand Up @@ -198,7 +198,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
{
unsigned long max_sectors_kb,
max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
page_kb = 1 << (PAGE_SHIFT - 10);
ssize_t ret = queue_var_store(&max_sectors_kb, page, count);

if (ret < 0)
Expand Down
2 changes: 1 addition & 1 deletion block/cfq-iosched.c
Original file line number Diff line number Diff line change
Expand Up @@ -4075,7 +4075,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
* idle timer unplug to continue working.
*/
if (cfq_cfqq_wait_request(cfqq)) {
if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
if (blk_rq_bytes(rq) > PAGE_SIZE ||
cfqd->busy_queues > 1) {
cfq_del_timer(cfqd, cfqq);
cfq_clear_cfqq_wait_request(cfqq);
Expand Down
4 changes: 2 additions & 2 deletions block/compat_ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -710,7 +710,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
return -EINVAL;
bdi = blk_get_backing_dev_info(bdev);
return compat_put_long(arg,
(bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
(bdi->ra_pages * PAGE_SIZE) / 512);
case BLKROGET: /* compatible */
return compat_put_int(arg, bdev_read_only(bdev) != 0);
case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
Expand All @@ -729,7 +729,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
bdi = blk_get_backing_dev_info(bdev);
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKGETSIZE:
size = i_size_read(bdev->bd_inode);
Expand Down
4 changes: 2 additions & 2 deletions block/ioctl.c
Original file line number Diff line number Diff line change
Expand Up @@ -550,7 +550,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if (!arg)
return -EINVAL;
bdi = blk_get_backing_dev_info(bdev);
return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
return put_long(arg, (bdi->ra_pages * PAGE_SIZE) / 512);
case BLKROGET:
return put_int(arg, bdev_read_only(bdev) != 0);
case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
Expand Down Expand Up @@ -578,7 +578,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if(!capable(CAP_SYS_ADMIN))
return -EACCES;
bdi = blk_get_backing_dev_info(bdev);
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKBSZSET:
return blkdev_bszset(bdev, mode, argp);
Expand Down
8 changes: 4 additions & 4 deletions block/partition-generic.c
Original file line number Diff line number Diff line change
Expand Up @@ -566,8 +566,8 @@ static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
{
struct address_space *mapping = bdev->bd_inode->i_mapping;

return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
NULL);
return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_SHIFT-9)),
NULL);
}

unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
Expand All @@ -584,9 +584,9 @@ unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
if (PageError(page))
goto fail;
p->v = page;
return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_CACHE_SHIFT - 9)) - 1)) << 9);
return (unsigned char *)page_address(page) + ((n & ((1 << (PAGE_SHIFT - 9)) - 1)) << 9);
fail:
page_cache_release(page);
put_page(page);
}
p->v = NULL;
return NULL;
Expand Down
2 changes: 1 addition & 1 deletion drivers/block/aoe/aoeblk.c
Original file line number Diff line number Diff line change
Expand Up @@ -397,7 +397,7 @@ aoeblk_gdalloc(void *vp)
WARN_ON(d->flags & DEVFL_UP);
blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS);
q->backing_dev_info.name = "aoe";
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE;
q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_SIZE;
d->bufpool = mp;
d->blkq = gd->queue = q;
q->queuedata = d;
Expand Down
2 changes: 1 addition & 1 deletion drivers/block/brd.c
Original file line number Diff line number Diff line change
Expand Up @@ -374,7 +374,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
struct page *page, int rw)
{
struct brd_device *brd = bdev->bd_disk->private_data;
int err = brd_do_bvec(brd, page, PAGE_CACHE_SIZE, 0, rw, sector);
int err = brd_do_bvec(brd, page, PAGE_SIZE, 0, rw, sector);
page_endio(page, rw & WRITE, err);
return err;
}
Expand Down
2 changes: 1 addition & 1 deletion drivers/block/drbd/drbd_nl.c
Original file line number Diff line number Diff line change
Expand Up @@ -1178,7 +1178,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi
blk_queue_max_hw_sectors(q, max_hw_sectors);
/* This is the workaround for "bio would need to, but cannot, be split" */
blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
blk_queue_segment_boundary(q, PAGE_SIZE-1);

if (b) {
struct drbd_connection *connection = first_peer_device(device)->connection;
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
Original file line number Diff line number Diff line change
Expand Up @@ -616,7 +616,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
set_page_dirty(page);

mark_page_accessed(page);
page_cache_release(page);
put_page(page);
}

sg_free_table(ttm->sg);
Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/armada/armada_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,7 @@ armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,

release:
for_each_sg(sgt->sgl, sg, num, i)
page_cache_release(sg_page(sg));
put_page(sg_page(sg));
free_table:
sg_free_table(sgt);
free_sgt:
Expand All @@ -502,7 +502,7 @@ static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
if (dobj->obj.filp) {
struct scatterlist *sg;
for_each_sg(sgt->sgl, sg, sgt->nents, i)
page_cache_release(sg_page(sg));
put_page(sg_page(sg));
}

sg_free_table(sgt);
Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/drm_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,7 @@ struct page **drm_gem_get_pages(struct drm_gem_object *obj)

fail:
while (i--)
page_cache_release(pages[i]);
put_page(pages[i]);

drm_free_large(pages);
return ERR_CAST(p);
Expand Down Expand Up @@ -569,7 +569,7 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
mark_page_accessed(pages[i]);

/* Undo the reference we took when populating the table */
page_cache_release(pages[i]);
put_page(pages[i]);
}

drm_free_large(pages);
Expand Down
8 changes: 4 additions & 4 deletions drivers/gpu/drm/i915/i915_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
drm_clflush_virt_range(vaddr, PAGE_SIZE);
kunmap_atomic(src);

page_cache_release(page);
put_page(page);
vaddr += PAGE_SIZE;
}

Expand Down Expand Up @@ -243,7 +243,7 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
set_page_dirty(page);
if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
page_cache_release(page);
put_page(page);
vaddr += PAGE_SIZE;
}
obj->dirty = 0;
Expand Down Expand Up @@ -2206,7 +2206,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(page);

page_cache_release(page);
put_page(page);
}
obj->dirty = 0;

Expand Down Expand Up @@ -2346,7 +2346,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
err_pages:
sg_mark_end(sg);
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
page_cache_release(sg_page_iter_page(&sg_iter));
put_page(sg_page_iter_page(&sg_iter));
sg_free_table(st);
kfree(st);

Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/i915_gem_userptr.c
Original file line number Diff line number Diff line change
Expand Up @@ -683,7 +683,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
set_page_dirty(page);

mark_page_accessed(page);
page_cache_release(page);
put_page(page);
}
obj->dirty = 0;

Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/radeon/radeon_ttm.c
Original file line number Diff line number Diff line change
Expand Up @@ -609,7 +609,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
set_page_dirty(page);

mark_page_accessed(page);
page_cache_release(page);
put_page(page);
}

sg_free_table(ttm->sg);
Expand Down
4 changes: 2 additions & 2 deletions drivers/gpu/drm/ttm/ttm_tt.c
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
goto out_err;

copy_highpage(to_page, from_page);
page_cache_release(from_page);
put_page(from_page);
}

if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP))
Expand Down Expand Up @@ -361,7 +361,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
copy_highpage(to_page, from_page);
set_page_dirty(to_page);
mark_page_accessed(to_page);
page_cache_release(to_page);
put_page(to_page);
}

ttm_tt_unpopulate(ttm);
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/via/via_dmablit.c
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
if (NULL != (page = vsg->pages[i])) {
if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
SetPageDirty(page);
page_cache_release(page);
put_page(page);
}
}
case dr_via_pages_alloc:
Expand Down
Loading

0 comments on commit 09cbfea

Please sign in to comment.