mm/swap: convert __delete_from_swap_cache() to a folio
All callers now have a folio, so convert the entire function to operate on folios. Link: https://lkml.kernel.org/r/20220617175020.717127-23-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
75fa68a5d8
commit
ceff9d3354
|
@ -36,7 +36,7 @@ bool add_to_swap(struct folio *folio);
|
|||
void *get_shadow_from_swap_cache(swp_entry_t entry);
|
||||
int add_to_swap_cache(struct page *page, swp_entry_t entry,
|
||||
gfp_t gfp, void **shadowp);
|
||||
void __delete_from_swap_cache(struct page *page,
|
||||
void __delete_from_swap_cache(struct folio *folio,
|
||||
swp_entry_t entry, void *shadow);
|
||||
void delete_from_swap_cache(struct folio *folio);
|
||||
void clear_shadow_from_swap_cache(int type, unsigned long begin,
|
||||
|
@ -135,7 +135,7 @@ static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
|
|||
return -1;
|
||||
}
|
||||
|
||||
static inline void __delete_from_swap_cache(struct page *page,
|
||||
static inline void __delete_from_swap_cache(struct folio *folio,
|
||||
swp_entry_t entry, void *shadow)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -133,31 +133,32 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry,
|
|||
}
|
||||
|
||||
/*
|
||||
* This must be called only on pages that have
|
||||
* This must be called only on folios that have
|
||||
* been verified to be in the swap cache.
|
||||
*/
|
||||
void __delete_from_swap_cache(struct page *page,
|
||||
void __delete_from_swap_cache(struct folio *folio,
|
||||
swp_entry_t entry, void *shadow)
|
||||
{
|
||||
struct address_space *address_space = swap_address_space(entry);
|
||||
int i, nr = thp_nr_pages(page);
|
||||
int i;
|
||||
long nr = folio_nr_pages(folio);
|
||||
pgoff_t idx = swp_offset(entry);
|
||||
XA_STATE(xas, &address_space->i_pages, idx);
|
||||
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_PAGE(!PageSwapCache(page), page);
|
||||
VM_BUG_ON_PAGE(PageWriteback(page), page);
|
||||
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
|
||||
VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
|
||||
VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
void *entry = xas_store(&xas, shadow);
|
||||
VM_BUG_ON_PAGE(entry != page, entry);
|
||||
set_page_private(page + i, 0);
|
||||
VM_BUG_ON_FOLIO(entry != folio, folio);
|
||||
set_page_private(folio_page(folio, i), 0);
|
||||
xas_next(&xas);
|
||||
}
|
||||
ClearPageSwapCache(page);
|
||||
folio_clear_swapcache(folio);
|
||||
address_space->nrpages -= nr;
|
||||
__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
|
||||
__mod_lruvec_page_state(page, NR_SWAPCACHE, -nr);
|
||||
__node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
|
||||
__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -233,7 +234,7 @@ void delete_from_swap_cache(struct folio *folio)
|
|||
struct address_space *address_space = swap_address_space(entry);
|
||||
|
||||
xa_lock_irq(&address_space->i_pages);
|
||||
__delete_from_swap_cache(&folio->page, entry, NULL);
|
||||
__delete_from_swap_cache(folio, entry, NULL);
|
||||
xa_unlock_irq(&address_space->i_pages);
|
||||
|
||||
put_swap_page(&folio->page, entry);
|
||||
|
|
|
@ -1329,7 +1329,7 @@ static int __remove_mapping(struct address_space *mapping, struct folio *folio,
|
|||
mem_cgroup_swapout(folio, swap);
|
||||
if (reclaimed && !mapping_exiting(mapping))
|
||||
shadow = workingset_eviction(folio, target_memcg);
|
||||
__delete_from_swap_cache(&folio->page, swap, shadow);
|
||||
__delete_from_swap_cache(folio, swap, shadow);
|
||||
xa_unlock_irq(&mapping->i_pages);
|
||||
put_swap_page(&folio->page, swap);
|
||||
} else {
|
||||
|
|
Loading…
Reference in New Issue