f2fs: convert f2fs_write_cache_pages() to use filemap_get_folios_tag()

Convert the function to use a folio_batch instead of pagevec.  This is in
preparation for the removal of find_get_pages_range_tag().

Also modified f2fs_all_cluster_page_ready to take in a folio_batch instead
of pagevec.  This does NOT support large folios.  The function currently
only utilizes folios of size 1 so this shouldn't cause any issues right
now.

This version of the patch limits the number of pages fetched to
F2FS_ONSTACK_PAGES.  If that ever happens, update the start index here
since filemap_get_folios_tag() updates the index to be after the last
found folio, not necessarily the last used page.

Link: https://lkml.kernel.org/r/20230104211448.4804-15-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Acked-by: Chao Yu <chao@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Vishal Moola (Oracle) 2023-01-04 13:14:39 -08:00 committed by Andrew Morton
parent 7525486aff
commit 1cd98ee747
1 changed files with 58 additions and 26 deletions

View File

@ -2957,6 +2957,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
int ret = 0;
int done = 0, retry = 0;
struct page *pages[F2FS_ONSTACK_PAGES];
struct folio_batch fbatch;
struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
struct bio *bio = NULL;
sector_t last_block;
@ -2977,6 +2978,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
.private = NULL,
};
#endif
int nr_folios, p, idx;
int nr_pages;
pgoff_t index;
pgoff_t end; /* Inclusive */
@ -2987,6 +2989,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
int submitted = 0;
int i;
folio_batch_init(&fbatch);
if (get_dirty_pages(mapping->host) <=
SM_I(F2FS_M_SB(mapping))->min_hot_blocks)
set_inode_flag(mapping->host, FI_HOT_DATA);
@ -3012,13 +3016,38 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && !retry && (index <= end)) {
nr_pages = find_get_pages_range_tag(mapping, &index, end,
tag, F2FS_ONSTACK_PAGES, pages);
if (nr_pages == 0)
nr_pages = 0;
again:
nr_folios = filemap_get_folios_tag(mapping, &index, end,
tag, &fbatch);
if (nr_folios == 0) {
if (nr_pages)
goto write;
break;
}
for (i = 0; i < nr_folios; i++) {
struct folio *folio = fbatch.folios[i];
idx = 0;
p = folio_nr_pages(folio);
add_more:
pages[nr_pages] = folio_page(folio, idx);
folio_get(folio);
if (++nr_pages == F2FS_ONSTACK_PAGES) {
index = folio->index + idx + 1;
folio_batch_release(&fbatch);
goto write;
}
if (++idx < p)
goto add_more;
}
folio_batch_release(&fbatch);
goto again;
write:
for (i = 0; i < nr_pages; i++) {
struct page *page = pages[i];
struct folio *folio = page_folio(page);
bool need_readd;
readd:
need_readd = false;
@ -3035,7 +3064,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
}
if (!f2fs_cluster_can_merge_page(&cc,
page->index)) {
folio->index)) {
ret = f2fs_write_multi_pages(&cc,
&submitted, wbc, io_type);
if (!ret)
@ -3044,27 +3073,28 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
}
if (unlikely(f2fs_cp_error(sbi)))
goto lock_page;
goto lock_folio;
if (!f2fs_cluster_is_empty(&cc))
goto lock_page;
goto lock_folio;
if (f2fs_all_cluster_page_ready(&cc,
pages, i, nr_pages, true))
goto lock_page;
goto lock_folio;
ret2 = f2fs_prepare_compress_overwrite(
inode, &pagep,
page->index, &fsdata);
folio->index, &fsdata);
if (ret2 < 0) {
ret = ret2;
done = 1;
break;
} else if (ret2 &&
(!f2fs_compress_write_end(inode,
fsdata, page->index, 1) ||
fsdata, folio->index, 1) ||
!f2fs_all_cluster_page_ready(&cc,
pages, i, nr_pages, false))) {
pages, i, nr_pages,
false))) {
retry = 1;
break;
}
@ -3077,46 +3107,47 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
break;
}
#ifdef CONFIG_F2FS_FS_COMPRESSION
lock_page:
lock_folio:
#endif
done_index = page->index;
done_index = folio->index;
retry_write:
lock_page(page);
folio_lock(folio);
if (unlikely(page->mapping != mapping)) {
if (unlikely(folio->mapping != mapping)) {
continue_unlock:
unlock_page(page);
folio_unlock(folio);
continue;
}
if (!PageDirty(page)) {
if (!folio_test_dirty(folio)) {
/* someone wrote it for us */
goto continue_unlock;
}
if (PageWriteback(page)) {
if (folio_test_writeback(folio)) {
if (wbc->sync_mode != WB_SYNC_NONE)
f2fs_wait_on_page_writeback(page,
f2fs_wait_on_page_writeback(
&folio->page,
DATA, true, true);
else
goto continue_unlock;
}
if (!clear_page_dirty_for_io(page))
if (!folio_clear_dirty_for_io(folio))
goto continue_unlock;
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
get_page(page);
f2fs_compress_ctx_add_page(&cc, page);
folio_get(folio);
f2fs_compress_ctx_add_page(&cc, &folio->page);
continue;
}
#endif
ret = f2fs_write_single_data_page(page, &submitted,
&bio, &last_block, wbc, io_type,
0, true);
ret = f2fs_write_single_data_page(&folio->page,
&submitted, &bio, &last_block,
wbc, io_type, 0, true);
if (ret == AOP_WRITEPAGE_ACTIVATE)
unlock_page(page);
folio_unlock(folio);
#ifdef CONFIG_F2FS_FS_COMPRESSION
result:
#endif
@ -3140,7 +3171,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
}
goto next;
}
done_index = page->index + 1;
done_index = folio->index +
folio_nr_pages(folio);
done = 1;
break;
}