filemap: Remove add_to_page_cache() and add_to_page_cache_locked()
These functions have no more users, so delete them. Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Mike Kravetz <mike.kravetz@oracle.com> Reviewed-by: Muchun Song <songmuchun@bytedance.com>
This commit is contained in:
parent
d9ef44de5d
commit
2bb876b58d
|
@ -97,7 +97,7 @@ Under below explanation, we assume CONFIG_MEM_RES_CTRL_SWAP=y.
|
||||||
=============
|
=============
|
||||||
|
|
||||||
Page Cache is charged at
|
Page Cache is charged at
|
||||||
- add_to_page_cache_locked().
|
- filemap_add_folio().
|
||||||
|
|
||||||
The logic is very clear. (About migration, see below)
|
The logic is very clear. (About migration, see below)
|
||||||
|
|
||||||
|
|
|
@ -1098,8 +1098,6 @@ size_t fault_in_subpage_writeable(char __user *uaddr, size_t size);
|
||||||
size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
|
size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
|
||||||
size_t fault_in_readable(const char __user *uaddr, size_t size);
|
size_t fault_in_readable(const char __user *uaddr, size_t size);
|
||||||
|
|
||||||
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
|
||||||
pgoff_t index, gfp_t gfp);
|
|
||||||
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
|
||||||
pgoff_t index, gfp_t gfp);
|
pgoff_t index, gfp_t gfp);
|
||||||
int filemap_add_folio(struct address_space *mapping, struct folio *folio,
|
int filemap_add_folio(struct address_space *mapping, struct folio *folio,
|
||||||
|
@ -1115,22 +1113,6 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp);
|
||||||
loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
|
loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
|
||||||
int whence);
|
int whence);
|
||||||
|
|
||||||
/*
|
|
||||||
* Like add_to_page_cache_locked, but used to add newly allocated pages:
|
|
||||||
* the page is new, so we can just run __SetPageLocked() against it.
|
|
||||||
*/
|
|
||||||
static inline int add_to_page_cache(struct page *page,
|
|
||||||
struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
|
|
||||||
{
|
|
||||||
int error;
|
|
||||||
|
|
||||||
__SetPageLocked(page);
|
|
||||||
error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
|
|
||||||
if (unlikely(error))
|
|
||||||
__ClearPageLocked(page);
|
|
||||||
return error;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Must be non-static for BPF error injection */
|
/* Must be non-static for BPF error injection */
|
||||||
int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
|
int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
|
||||||
pgoff_t index, gfp_t gfp, void **shadowp);
|
pgoff_t index, gfp_t gfp, void **shadowp);
|
||||||
|
|
20
mm/filemap.c
20
mm/filemap.c
|
@ -929,26 +929,6 @@ noinline int __filemap_add_folio(struct address_space *mapping,
|
||||||
}
|
}
|
||||||
ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
|
ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);
|
||||||
|
|
||||||
/**
|
|
||||||
* add_to_page_cache_locked - add a locked page to the pagecache
|
|
||||||
* @page: page to add
|
|
||||||
* @mapping: the page's address_space
|
|
||||||
* @offset: page index
|
|
||||||
* @gfp_mask: page allocation mode
|
|
||||||
*
|
|
||||||
* This function is used to add a page to the pagecache. It must be locked.
|
|
||||||
* This function does not add the page to the LRU. The caller must do that.
|
|
||||||
*
|
|
||||||
* Return: %0 on success, negative error code otherwise.
|
|
||||||
*/
|
|
||||||
int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
|
|
||||||
pgoff_t offset, gfp_t gfp_mask)
|
|
||||||
{
|
|
||||||
return __filemap_add_folio(mapping, page_folio(page), offset,
|
|
||||||
gfp_mask, NULL);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(add_to_page_cache_locked);
|
|
||||||
|
|
||||||
int filemap_add_folio(struct address_space *mapping, struct folio *folio,
|
int filemap_add_folio(struct address_space *mapping, struct folio *folio,
|
||||||
pgoff_t index, gfp_t gfp)
|
pgoff_t index, gfp_t gfp)
|
||||||
{
|
{
|
||||||
|
|
|
@ -693,7 +693,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
|
||||||
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Like add_to_page_cache_locked, but error if expected item has gone.
|
* Like filemap_add_folio, but error if expected item has gone.
|
||||||
*/
|
*/
|
||||||
static int shmem_add_to_page_cache(struct folio *folio,
|
static int shmem_add_to_page_cache(struct folio *folio,
|
||||||
struct address_space *mapping,
|
struct address_space *mapping,
|
||||||
|
|
|
@ -95,7 +95,7 @@ void *get_shadow_from_swap_cache(swp_entry_t entry)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
|
* add_to_swap_cache resembles filemap_add_folio on swapper_space,
|
||||||
* but sets SwapCache flag and private instead of mapping and index.
|
* but sets SwapCache flag and private instead of mapping and index.
|
||||||
*/
|
*/
|
||||||
int add_to_swap_cache(struct page *page, swp_entry_t entry,
|
int add_to_swap_cache(struct page *page, swp_entry_t entry,
|
||||||
|
|
Loading…
Reference in New Issue