hugetlb: Convert huge_add_to_page_cache() to use a folio

Remove the last caller of add_to_page_cache()

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-06-01 15:11:01 -04:00
parent 211d04445b
commit d9ef44de5d
2 changed files with 11 additions and 5 deletions

View File

@ -759,7 +759,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
SetHPageMigratable(page);
/*
* unlock_page because locked by add_to_page_cache()
* unlock_page because locked by huge_add_to_page_cache()
* put_page() due to reference from alloc_huge_page()
*/
unlock_page(page);

View File

@ -5414,19 +5414,25 @@ static bool hugetlbfs_pagecache_present(struct hstate *h,
int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
pgoff_t idx)
{
struct folio *folio = page_folio(page);
struct inode *inode = mapping->host;
struct hstate *h = hstate_inode(inode);
int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
int err;
if (err)
__folio_set_locked(folio);
err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
if (unlikely(err)) {
__folio_clear_locked(folio);
return err;
}
ClearHPageRestoreReserve(page);
/*
* set page dirty so that it will not be removed from cache/file
* mark folio dirty so that it will not be removed from cache/file
* by non-hugetlbfs specific code paths.
*/
set_page_dirty(page);
folio_mark_dirty(folio);
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h);