mm/swap: convert __page_cache_release() to use a folio

All the callers now have a folio.  Saves several calls to compound_head,
totalling 502 bytes of text.

Link: https://lkml.kernel.org/r/20220617175020.717127-19-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-06-17 18:50:16 +01:00 committed by akpm
parent 5ef82fe7f6
commit 188e8caee9
1 changed files with 16 additions and 17 deletions

View File

@ -77,31 +77,30 @@ static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = {
* This path almost never happens for VM activity - pages are normally freed * This path almost never happens for VM activity - pages are normally freed
* via pagevecs. But it gets used by networking - and for compound pages. * via pagevecs. But it gets used by networking - and for compound pages.
*/ */
static void __page_cache_release(struct page *page) static void __page_cache_release(struct folio *folio)
{ {
if (PageLRU(page)) { if (folio_test_lru(folio)) {
struct folio *folio = page_folio(page);
struct lruvec *lruvec; struct lruvec *lruvec;
unsigned long flags; unsigned long flags;
lruvec = folio_lruvec_lock_irqsave(folio, &flags); lruvec = folio_lruvec_lock_irqsave(folio, &flags);
del_page_from_lru_list(page, lruvec); lruvec_del_folio(lruvec, folio);
__clear_page_lru_flags(page); __folio_clear_lru_flags(folio);
unlock_page_lruvec_irqrestore(lruvec, flags); unlock_page_lruvec_irqrestore(lruvec, flags);
} }
/* See comment on PageMlocked in release_pages() */ /* See comment on folio_test_mlocked in release_pages() */
if (unlikely(PageMlocked(page))) { if (unlikely(folio_test_mlocked(folio))) {
int nr_pages = thp_nr_pages(page); long nr_pages = folio_nr_pages(folio);
__ClearPageMlocked(page); __folio_clear_mlocked(folio);
mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages);
count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
} }
} }
static void __folio_put_small(struct folio *folio) static void __folio_put_small(struct folio *folio)
{ {
__page_cache_release(&folio->page); __page_cache_release(folio);
mem_cgroup_uncharge(folio); mem_cgroup_uncharge(folio);
free_unref_page(&folio->page, 0); free_unref_page(&folio->page, 0);
} }
@ -115,7 +114,7 @@ static void __folio_put_large(struct folio *folio)
* be called for hugetlb (it has a separate hugetlb_cgroup.) * be called for hugetlb (it has a separate hugetlb_cgroup.)
*/ */
if (!folio_test_hugetlb(folio)) if (!folio_test_hugetlb(folio))
__page_cache_release(&folio->page); __page_cache_release(folio);
destroy_compound_page(&folio->page); destroy_compound_page(&folio->page);
} }
@ -199,14 +198,14 @@ static void lru_add_fn(struct lruvec *lruvec, struct folio *folio)
/* /*
* Is an smp_mb__after_atomic() still required here, before * Is an smp_mb__after_atomic() still required here, before
* folio_evictable() tests PageMlocked, to rule out the possibility * folio_evictable() tests the mlocked flag, to rule out the possibility
* of stranding an evictable folio on an unevictable LRU? I think * of stranding an evictable folio on an unevictable LRU? I think
* not, because __munlock_page() only clears PageMlocked while the LRU * not, because __munlock_page() only clears the mlocked flag
* lock is held. * while the LRU lock is held.
* *
* (That is not true of __page_cache_release(), and not necessarily * (That is not true of __page_cache_release(), and not necessarily
* true of release_pages(): but those only clear PageMlocked after * true of release_pages(): but those only clear the mlocked flag after
* put_page_testzero() has excluded any other users of the page.) * folio_put_testzero() has excluded any other users of the folio.)
*/ */
if (folio_evictable(folio)) { if (folio_evictable(folio)) {
if (was_unevictable) if (was_unevictable)