mm: fix some typos and code style problems
fix some typos and code style problems in mm. gfp.h: s/MAXNODES/MAX_NUMNODES mmzone.h: s/then/than rmap.c: s/__vma_split()/__vma_adjust() swap.c: s/__mod_zone_page_stat/__mod_zone_page_state, s/is is/is swap_state.c: s/whoes/whose z3fold.c: code style problem fix in z3fold_unregister_migration zsmalloc.c: s/of/or, s/give/given Link: https://lkml.kernel.org/r/20210419083057.64820-1-luoshijie1@huawei.com Signed-off-by: Shijie Luo <luoshijie1@huawei.com> Signed-off-by: Miaohe Lin <linmiaohe@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b1989a3db4
commit
cb152a1a95
|
@ -490,7 +490,7 @@ static inline int gfp_zonelist(gfp_t flags)
|
|||
|
||||
/*
|
||||
* We get the zone list from the current node and the gfp_mask.
|
||||
* This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
|
||||
* This zone list contains a maximum of MAX_NUMNODES*MAX_NR_ZONES zones.
|
||||
* There are two zonelists per node, one for all zones with memory and
|
||||
* one containing just zones from the node the zonelist belongs to.
|
||||
*
|
||||
|
|
|
@ -55,7 +55,7 @@ enum migratetype {
|
|||
* pageblocks to MIGRATE_CMA which can be done by
|
||||
* __free_pageblock_cma() function. What is important though
|
||||
* is that a range of pageblocks must be aligned to
|
||||
* MAX_ORDER_NR_PAGES should biggest page be bigger then
|
||||
* MAX_ORDER_NR_PAGES should biggest page be bigger than
|
||||
* a single pageblock.
|
||||
*/
|
||||
MIGRATE_CMA,
|
||||
|
|
|
@ -257,7 +257,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
|
|||
* Attach the anon_vmas from src to dst.
|
||||
* Returns 0 on success, -ENOMEM on failure.
|
||||
*
|
||||
* anon_vma_clone() is called by __vma_split(), __split_vma(), copy_vma() and
|
||||
* anon_vma_clone() is called by __vma_adjust(), __split_vma(), copy_vma() and
|
||||
* anon_vma_fork(). The first three want an exact copy of src, while the last
|
||||
* one, anon_vma_fork(), may try to reuse an existing anon_vma to prevent
|
||||
* endless growth of anon_vma. Since dst->anon_vma is set to NULL before call,
|
||||
|
|
|
@ -496,7 +496,7 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
|
|||
if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
|
||||
int nr_pages = thp_nr_pages(page);
|
||||
/*
|
||||
* We use the irq-unsafe __mod_zone_page_stat because this
|
||||
* We use the irq-unsafe __mod_zone_page_state because this
|
||||
* counter is not modified from interrupt context, and the pte
|
||||
* lock is held(spinlock), which implies preemption disabled.
|
||||
*/
|
||||
|
@ -808,7 +808,7 @@ inline void __lru_add_drain_all(bool force_all_cpus)
|
|||
* below which drains the page vectors.
|
||||
*
|
||||
* Let x, y, and z represent some system CPU numbers, where x < y < z.
|
||||
* Assume CPU #z is is in the middle of the for_each_online_cpu loop
|
||||
* Assume CPU #z is in the middle of the for_each_online_cpu loop
|
||||
* below and has already reached CPU #y's per-cpu data. CPU #x comes
|
||||
* along, adds some pages to its per-cpu vectors, then calls
|
||||
* lru_add_drain_all().
|
||||
|
|
|
@ -792,7 +792,7 @@ static void swap_ra_info(struct vm_fault *vmf,
|
|||
*
|
||||
* Returns the struct page for entry and addr, after queueing swapin.
|
||||
*
|
||||
* Primitive swap readahead code. We simply read in a few pages whoes
|
||||
* Primitive swap readahead code. We simply read in a few pages whose
|
||||
* virtual addresses are around the fault address in the same vma.
|
||||
*
|
||||
* Caller must hold read mmap_lock if vmf->vma is not NULL.
|
||||
|
|
|
@ -391,7 +391,7 @@ static void z3fold_unregister_migration(struct z3fold_pool *pool)
|
|||
{
|
||||
if (pool->inode)
|
||||
iput(pool->inode);
|
||||
}
|
||||
}
|
||||
|
||||
/* Initializes the z3fold header of a newly allocated z3fold page */
|
||||
static struct z3fold_header *init_z3fold_page(struct page *page, bool headless,
|
||||
|
|
|
@ -61,7 +61,7 @@
|
|||
#define ZSPAGE_MAGIC 0x58
|
||||
|
||||
/*
|
||||
* This must be power of 2 and greater than of equal to sizeof(link_free).
|
||||
* This must be power of 2 and greater than or equal to sizeof(link_free).
|
||||
* These two conditions ensure that any 'struct link_free' itself doesn't
|
||||
* span more than 1 page which avoids complex case of mapping 2 pages simply
|
||||
* to restore link_free pointer values.
|
||||
|
@ -530,7 +530,7 @@ static void set_zspage_mapping(struct zspage *zspage,
|
|||
* class maintains a list of zspages where each zspage is divided
|
||||
* into equal sized chunks. Each allocation falls into one of these
|
||||
* classes depending on its size. This function returns index of the
|
||||
* size class which has chunk size big enough to hold the give size.
|
||||
* size class which has chunk size big enough to hold the given size.
|
||||
*/
|
||||
static int get_size_class_index(int size)
|
||||
{
|
||||
|
|
Loading…
Reference in New Issue