mm/hugeltb_cgroup: convert hugetlb_cgroup_commit_charge*() to folios

Convert hugetlb_cgroup_commit_charge*() to internally use folios to clean
up the code after __set_hugetlb_cgroup() was changed to take a folio.

Link: https://lkml.kernel.org/r/20221101223059.460937-9-sidhartha.kumar@oracle.com
Signed-off-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
Reviewed-by: Muchun Song <songmuchun@bytedance.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Bui Quang Minh <minhquangbui99@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Mina Almasry <almasrymina@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Sidhartha Kumar 2022-11-01 15:30:58 -07:00 committed by Andrew Morton
parent d4ab0316cc
commit 541b7c7b3e
1 changed files with 10 additions and 6 deletions

View File

@ -310,21 +310,21 @@ int hugetlb_cgroup_charge_cgroup_rsvd(int idx, unsigned long nr_pages,
/* Should be called with hugetlb_lock held */ /* Should be called with hugetlb_lock held */
static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, static void __hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg, struct hugetlb_cgroup *h_cg,
struct page *page, bool rsvd) struct folio *folio, bool rsvd)
{ {
if (hugetlb_cgroup_disabled() || !h_cg) if (hugetlb_cgroup_disabled() || !h_cg)
return; return;
__set_hugetlb_cgroup(page_folio(page), h_cg, rsvd); __set_hugetlb_cgroup(folio, h_cg, rsvd);
if (!rsvd) { if (!rsvd) {
unsigned long usage = unsigned long usage =
h_cg->nodeinfo[page_to_nid(page)]->usage[idx]; h_cg->nodeinfo[folio_nid(folio)]->usage[idx];
/* /*
* This write is not atomic due to fetching usage and writing * This write is not atomic due to fetching usage and writing
* to it, but that's fine because we call this with * to it, but that's fine because we call this with
* hugetlb_lock held anyway. * hugetlb_lock held anyway.
*/ */
WRITE_ONCE(h_cg->nodeinfo[page_to_nid(page)]->usage[idx], WRITE_ONCE(h_cg->nodeinfo[folio_nid(folio)]->usage[idx],
usage + nr_pages); usage + nr_pages);
} }
} }
@ -333,14 +333,18 @@ void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg, struct hugetlb_cgroup *h_cg,
struct page *page) struct page *page)
{ {
__hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, false); struct folio *folio = page_folio(page);
__hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, false);
} }
void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages, void hugetlb_cgroup_commit_charge_rsvd(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg, struct hugetlb_cgroup *h_cg,
struct page *page) struct page *page)
{ {
__hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, page, true); struct folio *folio = page_folio(page);
__hugetlb_cgroup_commit_charge(idx, nr_pages, h_cg, folio, true);
} }
/* /*