highmem: fix kmap_to_page() for kmap_local_page() addresses

kmap_to_page() is used to get the page for a virtual address which may
be kmap'ed.  Unfortunately, kmap_local_page() stores mappings in a
thread local array separate from kmap().  These mappings were not
checked by the call.

Check the kmap_local_page() mappings and return the page if found.

Because it is intended to remove kmap_to_page() add a warn on once to
the kmap checks to flag potential issues early.

NOTE Due to 32bit x86 use of kmap local in iomap atmoic, KMAP_LOCAL does
not require HIGHMEM to be set.  Therefore the support calls required a
new KMAP_LOCAL section to fix 0day build errors.

[akpm@linux-foundation.org: fix warning]
Link: https://lkml.kernel.org/r/20221006040555.1502679-1-ira.weiny@intel.com
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
Reported-by: Al Viro <viro@zeniv.linux.org.uk>
Reported-by: kernel test robot <lkp@intel.com>
Cc: "Fabio M. De Francesco" <fmdefrancesco@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Ira Weiny 2022-10-05 21:05:55 -07:00 committed by Andrew Morton
parent 15cd90049d
commit ef6e06b2ef
1 changed files with 31 additions and 12 deletions

View File

@ -30,6 +30,17 @@
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#ifdef CONFIG_KMAP_LOCAL
static inline int kmap_local_calc_idx(int idx)
{
return idx + KM_MAX_IDX * smp_processor_id();
}
#ifndef arch_kmap_local_map_idx
#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
#endif
#endif /* CONFIG_KMAP_LOCAL */
/* /*
* Virtual_count is not a pure "count". * Virtual_count is not a pure "count".
* 0 means that it is not mapped, and has not been mapped * 0 means that it is not mapped, and has not been mapped
@ -142,12 +153,29 @@ pte_t *pkmap_page_table;
struct page *__kmap_to_page(void *vaddr) struct page *__kmap_to_page(void *vaddr)
{ {
unsigned long base = (unsigned long) vaddr & PAGE_MASK;
struct kmap_ctrl *kctrl = &current->kmap_ctrl;
unsigned long addr = (unsigned long)vaddr; unsigned long addr = (unsigned long)vaddr;
int i;
if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) { /* kmap() mappings */
int i = PKMAP_NR(addr); if (WARN_ON_ONCE(addr >= PKMAP_ADDR(0) &&
addr < PKMAP_ADDR(LAST_PKMAP)))
return pte_page(pkmap_page_table[PKMAP_NR(addr)]);
return pte_page(pkmap_page_table[i]); /* kmap_local_page() mappings */
if (WARN_ON_ONCE(base >= __fix_to_virt(FIX_KMAP_END) &&
base < __fix_to_virt(FIX_KMAP_BEGIN))) {
for (i = 0; i < kctrl->idx; i++) {
unsigned long base_addr;
int idx;
idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
base_addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
if (base_addr == base)
return pte_page(kctrl->pteval[i]);
}
} }
return virt_to_page(vaddr); return virt_to_page(vaddr);
@ -462,10 +490,6 @@ static inline void kmap_local_idx_pop(void)
# define arch_kmap_local_post_unmap(vaddr) do { } while (0) # define arch_kmap_local_post_unmap(vaddr) do { } while (0)
#endif #endif
#ifndef arch_kmap_local_map_idx
#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
#endif
#ifndef arch_kmap_local_unmap_idx #ifndef arch_kmap_local_unmap_idx
#define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx) #define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
#endif #endif
@ -494,11 +518,6 @@ static inline bool kmap_high_unmap_local(unsigned long vaddr)
return false; return false;
} }
static inline int kmap_local_calc_idx(int idx)
{
return idx + KM_MAX_IDX * smp_processor_id();
}
static pte_t *__kmap_pte; static pte_t *__kmap_pte;
static pte_t *kmap_get_pte(unsigned long vaddr, int idx) static pte_t *kmap_get_pte(unsigned long vaddr, int idx)