Skip to content

Commit

Permalink
cifs: Transition from ->readpages() to ->readahead()
Browse files Browse the repository at this point in the history
Transition the cifs filesystem from using the old ->readpages() method to
using the new ->readahead() method.

For the moment, this removes any invocation of fscache to read data from
the local cache, leaving that to another patch.

Signed-off-by: David Howells <[email protected]>
cc: Steve French <[email protected]>
cc: Shyam Prasad N <[email protected]>
cc: Matthew Wilcox <[email protected]>
cc: Jeff Layton <[email protected]>
cc: [email protected]
cc: [email protected]
Reviewed-by: Rohith Surabattula <[email protected]>
Acked-by: Jeff Layton <[email protected]>
Signed-off-by: Steve French <[email protected]>
  • Loading branch information
dhowells authored and Steve French committed Feb 1, 2022
1 parent 489f710 commit 052e04a
Showing 1 changed file with 35 additions and 137 deletions.
172 changes: 35 additions & 137 deletions fs/cifs/file.c
Original file line number Diff line number Diff line change
Expand Up @@ -4269,8 +4269,6 @@ cifs_readv_complete(struct work_struct *work)
for (i = 0; i < rdata->nr_pages; i++) {
struct page *page = rdata->pages[i];

lru_cache_add(page);

if (rdata->result == 0 ||
(rdata->result == -EAGAIN && got_bytes)) {
flush_dcache_page(page);
Expand Down Expand Up @@ -4340,7 +4338,6 @@ readpages_fill_pages(struct TCP_Server_Info *server,
* fill them until the writes are flushed.
*/
zero_user(page, 0, PAGE_SIZE);
lru_cache_add(page);
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
Expand All @@ -4350,7 +4347,6 @@ readpages_fill_pages(struct TCP_Server_Info *server,
continue;
} else {
/* no need to hold page hostage */
lru_cache_add(page);
unlock_page(page);
put_page(page);
rdata->pages[i] = NULL;
Expand Down Expand Up @@ -4393,92 +4389,16 @@ cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
return readpages_fill_pages(server, rdata, iter, iter->count);
}

static int
readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
unsigned int rsize, struct list_head *tmplist,
unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
static void cifs_readahead(struct readahead_control *ractl)
{
struct page *page, *tpage;
unsigned int expected_index;
int rc;
gfp_t gfp = readahead_gfp_mask(mapping);

INIT_LIST_HEAD(tmplist);

page = lru_to_page(page_list);

/*
* Lock the page and put it in the cache. Since no one else
* should have access to this page, we're safe to simply set
* PG_locked without checking it first.
*/
__SetPageLocked(page);
rc = add_to_page_cache_locked(page, mapping,
page->index, gfp);

/* give up if we can't stick it in the cache */
if (rc) {
__ClearPageLocked(page);
return rc;
}

/* move first page to the tmplist */
*offset = (loff_t)page->index << PAGE_SHIFT;
*bytes = PAGE_SIZE;
*nr_pages = 1;
list_move_tail(&page->lru, tmplist);

/* now try and add more pages onto the request */
expected_index = page->index + 1;
list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
/* discontinuity ? */
if (page->index != expected_index)
break;

/* would this page push the read over the rsize? */
if (*bytes + PAGE_SIZE > rsize)
break;

__SetPageLocked(page);
rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
if (rc) {
__ClearPageLocked(page);
break;
}
list_move_tail(&page->lru, tmplist);
(*bytes) += PAGE_SIZE;
expected_index++;
(*nr_pages)++;
}
return rc;
}

static int cifs_readpages(struct file *file, struct address_space *mapping,
struct list_head *page_list, unsigned num_pages)
{
int rc;
int err = 0;
struct list_head tmplist;
struct cifsFileInfo *open_file = file->private_data;
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
struct cifsFileInfo *open_file = ractl->file->private_data;
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file);
struct TCP_Server_Info *server;
pid_t pid;
unsigned int xid;
unsigned int xid, last_batch_size = 0;

xid = get_xid();
/*
* Reads as many pages as possible from fscache. Returns -ENOBUFS
* immediately if the cookie is negative
*
* After this point, every page in the list might have PG_fscache set,
* so we will need to clean that up off of every page we don't use.
*/
rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
&num_pages);
if (rc == 0) {
free_xid(xid);
return rc;
}

if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
Expand All @@ -4489,93 +4409,72 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);

cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
__func__, file, mapping, num_pages);
__func__, ractl->file, ractl->mapping, readahead_count(ractl));

/*
* Start with the page at end of list and move it to private
* list. Do the same with any following pages until we hit
* the rsize limit, hit an index discontinuity, or run out of
* pages. Issue the async read and then start the loop again
* until the list is empty.
*
* Note that list order is important. The page_list is in
* the order of declining indexes. When we put the pages in
* the rdata->pages, then we want them in increasing order.
* Chop the readahead request up into rsize-sized read requests.
*/
while (!list_empty(page_list) && !err) {
unsigned int i, nr_pages, bytes, rsize;
loff_t offset;
struct page *page, *tpage;
while (readahead_count(ractl) - last_batch_size) {
unsigned int i, nr_pages, got, rsize;
struct page *page;
struct cifs_readdata *rdata;
struct cifs_credits credits_on_stack;
struct cifs_credits *credits = &credits_on_stack;

if (open_file->invalidHandle) {
rc = cifs_reopen_file(open_file, true);
if (rc == -EAGAIN)
continue;
else if (rc)
if (rc) {
if (rc == -EAGAIN)
continue;
break;
}
}

rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
&rsize, credits);
if (rc)
break;
nr_pages = min_t(size_t, rsize / PAGE_SIZE, readahead_count(ractl));

/*
* Give up immediately if rsize is too small to read an entire
* page. The VFS will fall back to readpage. We should never
* reach this point however since we set ra_pages to 0 when the
* rsize is smaller than a cache page.
*/
if (unlikely(rsize < PAGE_SIZE)) {
add_credits_and_wake_if(server, credits, 0);
free_xid(xid);
return 0;
}

nr_pages = 0;
err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
&nr_pages, &offset, &bytes);
if (!nr_pages) {
if (unlikely(!nr_pages)) {
add_credits_and_wake_if(server, credits, 0);
break;
}

rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
if (!rdata) {
/* best to give up if we're out of mem */
list_for_each_entry_safe(page, tpage, &tmplist, lru) {
list_del(&page->lru);
lru_cache_add(page);
unlock_page(page);
put_page(page);
}
rc = -ENOMEM;
add_credits_and_wake_if(server, credits, 0);
break;
}

rdata->cfile = cifsFileInfo_get(open_file);
rdata->server = server;
rdata->mapping = mapping;
rdata->offset = offset;
rdata->bytes = bytes;
rdata->pid = pid;
rdata->pagesz = PAGE_SIZE;
rdata->tailsz = PAGE_SIZE;
got = __readahead_batch(ractl, rdata->pages, nr_pages);
if (got != nr_pages) {
pr_warn("__readahead_batch() returned %u/%u\n",
got, nr_pages);
nr_pages = got;
}

rdata->nr_pages = nr_pages;
rdata->bytes = readahead_batch_length(ractl);
rdata->cfile = cifsFileInfo_get(open_file);
rdata->server = server;
rdata->mapping = ractl->mapping;
rdata->offset = readahead_pos(ractl);
rdata->pid = pid;
rdata->pagesz = PAGE_SIZE;
rdata->tailsz = PAGE_SIZE;
rdata->read_into_pages = cifs_readpages_read_into_pages;
rdata->copy_into_pages = cifs_readpages_copy_into_pages;
rdata->credits = credits_on_stack;

list_for_each_entry_safe(page, tpage, &tmplist, lru) {
list_del(&page->lru);
rdata->pages[rdata->nr_pages++] = page;
}
rdata->credits = credits_on_stack;

rc = adjust_credits(server, &rdata->credits, rdata->bytes);

if (!rc) {
if (rdata->cfile->invalidHandle)
rc = -EAGAIN;
Expand All @@ -4587,7 +4486,6 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
add_credits_and_wake_if(server, &rdata->credits, 0);
for (i = 0; i < rdata->nr_pages; i++) {
page = rdata->pages[i];
lru_cache_add(page);
unlock_page(page);
put_page(page);
}
Expand All @@ -4597,10 +4495,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
}

kref_put(&rdata->refcount, cifs_readdata_release);
last_batch_size = nr_pages;
}

free_xid(xid);
return rc;
}

/*
Expand Down Expand Up @@ -4924,7 +4822,7 @@ void cifs_oplock_break(struct work_struct *work)
* In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
* so this method should never be called.
*
* Direct IO is not yet supported in the cached mode.
* Direct IO is not yet supported in the cached mode.
*/
static ssize_t
cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
Expand Down Expand Up @@ -5006,7 +4904,7 @@ static int cifs_set_page_dirty(struct page *page)

const struct address_space_operations cifs_addr_ops = {
.readpage = cifs_readpage,
.readpages = cifs_readpages,
.readahead = cifs_readahead,
.writepage = cifs_writepage,
.writepages = cifs_writepages,
.write_begin = cifs_write_begin,
Expand Down

0 comments on commit 052e04a

Please sign in to comment.