mm: add NR_SECONDARY_PAGETABLE to count secondary page table uses.
We keep track of several kernel memory stats (total kernel memory, page tables, stack, vmalloc, etc) on multiple levels (global, per-node, per-memcg, etc). These stats give insights to users to how much memory is used by the kernel and for what purposes. Currently, memory used by KVM mmu is not accounted in any of those kernel memory stats. This patch series accounts the memory pages used by KVM for page tables in those stats in a new NR_SECONDARY_PAGETABLE stat. This stat can be later extended to account for other types of secondary pages tables (e.g. iommu page tables). KVM has a decent number of large allocations that aren't for page tables, but for most of them, the number/size of those allocations scales linearly with either the number of vCPUs or the amount of memory assigned to the VM. KVM's secondary page table allocations do not scale linearly, especially when nested virtualization is in use. From a KVM perspective, NR_SECONDARY_PAGETABLE will scale with KVM's per-VM pages_{4k,2m,1g} stats unless the guest is doing something bizarre (e.g. accessing only 4kb chunks of 2mb pages so that KVM is forced to allocate a large number of page tables even though the guest isn't accessing that much memory). However, someone would need to either understand how KVM works to make that connection, or know (or be told) to go look at KVM's stats if they're running VMs to better decipher the stats. Furthermore, having NR_PAGETABLE side-by-side with NR_SECONDARY_PAGETABLE is informative. For example, when backing a VM with THP vs. HugeTLB, NR_SECONDARY_PAGETABLE is roughly the same, but NR_PAGETABLE is an order of magnitude higher with THP. So having this stat will at the very least prove to be useful for understanding tradeoffs between VM backing types, and likely even steer folks towards potential optimizations. The original discussion with more details about the rationale: https://lore.kernel.org/all/87ilqoi77b.wl-maz@kernel.org This stat will be used by subsequent patches to count KVM mmu memory usage. Signed-off-by: Yosry Ahmed <yosryahmed@google.com> Acked-by: Shakeel Butt <shakeelb@google.com> Acked-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20220823004639.2387269-2-yosryahmed@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
This commit is contained in:
parent
d7c9bfb9ca
commit
ebc97a52b5
|
@ -1355,6 +1355,11 @@ PAGE_SIZE multiple when read back.
|
|||
pagetables
|
||||
Amount of memory allocated for page tables.
|
||||
|
||||
sec_pagetables
|
||||
Amount of memory allocated for secondary page tables,
|
||||
this currently includes KVM mmu allocations on x86
|
||||
and arm64.
|
||||
|
||||
percpu (npn)
|
||||
Amount of memory used for storing per-cpu kernel
|
||||
data structures.
|
||||
|
|
|
@ -982,6 +982,7 @@ Example output. You may not have all of these fields.
|
|||
SUnreclaim: 142336 kB
|
||||
KernelStack: 11168 kB
|
||||
PageTables: 20540 kB
|
||||
SecPageTables: 0 kB
|
||||
NFS_Unstable: 0 kB
|
||||
Bounce: 0 kB
|
||||
WritebackTmp: 0 kB
|
||||
|
@ -1090,6 +1091,9 @@ KernelStack
|
|||
Memory consumed by the kernel stacks of all tasks
|
||||
PageTables
|
||||
Memory consumed by userspace page tables
|
||||
SecPageTables
|
||||
Memory consumed by secondary page tables, this currently
|
||||
currently includes KVM mmu allocations on x86 and arm64.
|
||||
NFS_Unstable
|
||||
Always zero. Previous counted pages which had been written to
|
||||
the server, but has not been committed to stable storage.
|
||||
|
|
|
@ -433,6 +433,7 @@ static ssize_t node_read_meminfo(struct device *dev,
|
|||
"Node %d ShadowCallStack:%8lu kB\n"
|
||||
#endif
|
||||
"Node %d PageTables: %8lu kB\n"
|
||||
"Node %d SecPageTables: %8lu kB\n"
|
||||
"Node %d NFS_Unstable: %8lu kB\n"
|
||||
"Node %d Bounce: %8lu kB\n"
|
||||
"Node %d WritebackTmp: %8lu kB\n"
|
||||
|
@ -459,6 +460,7 @@ static ssize_t node_read_meminfo(struct device *dev,
|
|||
nid, node_page_state(pgdat, NR_KERNEL_SCS_KB),
|
||||
#endif
|
||||
nid, K(node_page_state(pgdat, NR_PAGETABLE)),
|
||||
nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
|
||||
nid, 0UL,
|
||||
nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
|
||||
nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
|
||||
|
|
|
@ -115,6 +115,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
|
|||
#endif
|
||||
show_val_kb(m, "PageTables: ",
|
||||
global_node_page_state(NR_PAGETABLE));
|
||||
show_val_kb(m, "SecPageTables: ",
|
||||
global_node_page_state(NR_SECONDARY_PAGETABLE));
|
||||
|
||||
show_val_kb(m, "NFS_Unstable: ", 0);
|
||||
show_val_kb(m, "Bounce: ",
|
||||
|
|
|
@ -216,6 +216,7 @@ enum node_stat_item {
|
|||
NR_KERNEL_SCS_KB, /* measured in KiB */
|
||||
#endif
|
||||
NR_PAGETABLE, /* used for pagetables */
|
||||
NR_SECONDARY_PAGETABLE, /* secondary pagetables, e.g. KVM pagetables */
|
||||
#ifdef CONFIG_SWAP
|
||||
NR_SWAPCACHE,
|
||||
#endif
|
||||
|
|
|
@ -1401,6 +1401,7 @@ static const struct memory_stat memory_stats[] = {
|
|||
{ "kernel", MEMCG_KMEM },
|
||||
{ "kernel_stack", NR_KERNEL_STACK_KB },
|
||||
{ "pagetables", NR_PAGETABLE },
|
||||
{ "sec_pagetables", NR_SECONDARY_PAGETABLE },
|
||||
{ "percpu", MEMCG_PERCPU_B },
|
||||
{ "sock", MEMCG_SOCK },
|
||||
{ "vmalloc", MEMCG_VMALLOC },
|
||||
|
|
|
@ -6039,7 +6039,8 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
|
|||
" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
|
||||
" unevictable:%lu dirty:%lu writeback:%lu\n"
|
||||
" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
|
||||
" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
|
||||
" mapped:%lu shmem:%lu pagetables:%lu\n"
|
||||
" sec_pagetables:%lu bounce:%lu\n"
|
||||
" kernel_misc_reclaimable:%lu\n"
|
||||
" free:%lu free_pcp:%lu free_cma:%lu\n",
|
||||
global_node_page_state(NR_ACTIVE_ANON),
|
||||
|
@ -6056,6 +6057,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
|
|||
global_node_page_state(NR_FILE_MAPPED),
|
||||
global_node_page_state(NR_SHMEM),
|
||||
global_node_page_state(NR_PAGETABLE),
|
||||
global_node_page_state(NR_SECONDARY_PAGETABLE),
|
||||
global_zone_page_state(NR_BOUNCE),
|
||||
global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE),
|
||||
global_zone_page_state(NR_FREE_PAGES),
|
||||
|
@ -6089,6 +6091,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
|
|||
" shadow_call_stack:%lukB"
|
||||
#endif
|
||||
" pagetables:%lukB"
|
||||
" sec_pagetables:%lukB"
|
||||
" all_unreclaimable? %s"
|
||||
"\n",
|
||||
pgdat->node_id,
|
||||
|
@ -6114,6 +6117,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
|
|||
node_page_state(pgdat, NR_KERNEL_SCS_KB),
|
||||
#endif
|
||||
K(node_page_state(pgdat, NR_PAGETABLE)),
|
||||
K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
|
||||
pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
|
||||
"yes" : "no");
|
||||
}
|
||||
|
|
|
@ -1240,6 +1240,7 @@ const char * const vmstat_text[] = {
|
|||
"nr_shadow_call_stack",
|
||||
#endif
|
||||
"nr_page_table_pages",
|
||||
"nr_sec_page_table_pages",
|
||||
#ifdef CONFIG_SWAP
|
||||
"nr_swapcached",
|
||||
#endif
|
||||
|
|
Loading…
Reference in New Issue