forked from torvalds/linux
-
Notifications
You must be signed in to change notification settings - Fork 17
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
ext4: convert mpage_prepare_extent_to_map() to use filemap_get_folios…
…_tag() Convert the function to use folios throughout. This is in preparation for the removal of find_get_pages_range_tag(). Now supports large folios. This change removes 11 calls to compound_head(). Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Vishal Moola (Oracle) <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
- Loading branch information
Showing
1 changed file
with
32 additions
and
33 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -2595,8 +2595,8 @@ static bool ext4_page_nomap_can_writeout(struct page *page) | |
static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) | ||
{ | ||
struct address_space *mapping = mpd->inode->i_mapping; | ||
struct pagevec pvec; | ||
unsigned int nr_pages; | ||
struct folio_batch fbatch; | ||
unsigned int nr_folios; | ||
long left = mpd->wbc->nr_to_write; | ||
pgoff_t index = mpd->first_page; | ||
pgoff_t end = mpd->last_page; | ||
|
@@ -2610,18 +2610,17 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) | |
tag = PAGECACHE_TAG_TOWRITE; | ||
else | ||
tag = PAGECACHE_TAG_DIRTY; | ||
|
||
pagevec_init(&pvec); | ||
folio_batch_init(&fbatch); | ||
mpd->map.m_len = 0; | ||
mpd->next_page = index; | ||
while (index <= end) { | ||
nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end, | ||
tag); | ||
if (nr_pages == 0) | ||
nr_folios = filemap_get_folios_tag(mapping, &index, end, | ||
tag, &fbatch); | ||
if (nr_folios == 0) | ||
break; | ||
|
||
for (i = 0; i < nr_pages; i++) { | ||
struct page *page = pvec.pages[i]; | ||
for (i = 0; i < nr_folios; i++) { | ||
struct folio *folio = fbatch.folios[i]; | ||
|
||
/* | ||
* Accumulated enough dirty pages? This doesn't apply | ||
|
@@ -2635,27 +2634,27 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) | |
goto out; | ||
|
||
/* If we can't merge this page, we are done. */ | ||
if (mpd->map.m_len > 0 && mpd->next_page != page->index) | ||
if (mpd->map.m_len > 0 && mpd->next_page != folio->index) | ||
goto out; | ||
|
||
lock_page(page); | ||
folio_lock(folio); | ||
/* | ||
* If the page is no longer dirty, or its mapping no | ||
* longer corresponds to inode we are writing (which | ||
* means it has been truncated or invalidated), or the | ||
* page is already under writeback and we are not doing | ||
* a data integrity writeback, skip the page | ||
*/ | ||
if (!PageDirty(page) || | ||
(PageWriteback(page) && | ||
if (!folio_test_dirty(folio) || | ||
(folio_test_writeback(folio) && | ||
(mpd->wbc->sync_mode == WB_SYNC_NONE)) || | ||
unlikely(page->mapping != mapping)) { | ||
unlock_page(page); | ||
unlikely(folio->mapping != mapping)) { | ||
folio_unlock(folio); | ||
continue; | ||
} | ||
|
||
wait_on_page_writeback(page); | ||
BUG_ON(PageWriteback(page)); | ||
folio_wait_writeback(folio); | ||
BUG_ON(folio_test_writeback(folio)); | ||
|
||
/* | ||
* Should never happen but for buggy code in | ||
|
@@ -2666,49 +2665,49 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd) | |
* | ||
* [1] https://lore.kernel.org/linux-mm/[email protected] | ||
*/ | ||
if (!page_has_buffers(page)) { | ||
ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", page->index); | ||
ClearPageDirty(page); | ||
unlock_page(page); | ||
if (!folio_buffers(folio)) { | ||
ext4_warning_inode(mpd->inode, "page %lu does not have buffers attached", folio->index); | ||
folio_clear_dirty(folio); | ||
folio_unlock(folio); | ||
continue; | ||
} | ||
|
||
if (mpd->map.m_len == 0) | ||
mpd->first_page = page->index; | ||
mpd->next_page = page->index + 1; | ||
mpd->first_page = folio->index; | ||
mpd->next_page = folio->index + folio_nr_pages(folio); | ||
/* | ||
* Writeout for transaction commit where we cannot | ||
* modify metadata is simple. Just submit the page. | ||
*/ | ||
if (!mpd->can_map) { | ||
if (ext4_page_nomap_can_writeout(page)) { | ||
err = mpage_submit_page(mpd, page); | ||
if (ext4_page_nomap_can_writeout(&folio->page)) { | ||
err = mpage_submit_page(mpd, &folio->page); | ||
if (err < 0) | ||
goto out; | ||
} else { | ||
unlock_page(page); | ||
mpd->first_page++; | ||
folio_unlock(folio); | ||
mpd->first_page += folio_nr_pages(folio); | ||
} | ||
} else { | ||
/* Add all dirty buffers to mpd */ | ||
lblk = ((ext4_lblk_t)page->index) << | ||
lblk = ((ext4_lblk_t)folio->index) << | ||
(PAGE_SHIFT - blkbits); | ||
head = page_buffers(page); | ||
head = folio_buffers(folio); | ||
err = mpage_process_page_bufs(mpd, head, head, | ||
lblk); | ||
lblk); | ||
if (err <= 0) | ||
goto out; | ||
err = 0; | ||
} | ||
left--; | ||
left -= folio_nr_pages(folio); | ||
} | ||
pagevec_release(&pvec); | ||
folio_batch_release(&fbatch); | ||
cond_resched(); | ||
} | ||
mpd->scanned_until_end = 1; | ||
return 0; | ||
out: | ||
pagevec_release(&pvec); | ||
folio_batch_release(&fbatch); | ||
return err; | ||
} | ||
|
||
|