asm-generic/tlb, arch: Provide generic VIPT cache flush
The one obvious thing SH and ARM want is a sensible default for tlb_start_vma(). (also: https://lkml.org/lkml/2004/1/15/6 ) Avoid all VIPT architectures providing their own tlb_start_vma() implementation and rely on architectures to provide a no-op flush_cache_range() when it is not relevant. This patch makes tlb_start_vma() default to flush_cache_range(), which should be right and sufficient. The only exceptions that I found where (oddly): - m68k-mmu - sparc64 - unicore Those architectures appear to have flush_cache_range(), but their current tlb_start_vma() does not call it. No change in behavior intended. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will.deacon@arm.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Miller <davem@davemloft.net> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nick Piggin <npiggin@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@surriel.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
ed6a79352c
commit
e7fd28a706
|
@ -23,15 +23,6 @@ do { \
|
|||
*
|
||||
* Note, read http://lkml.org/lkml/2004/1/15/6
|
||||
*/
|
||||
#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
|
||||
#define tlb_start_vma(tlb, vma)
|
||||
#else
|
||||
#define tlb_start_vma(tlb, vma) \
|
||||
do { \
|
||||
if (!tlb->fullmm) \
|
||||
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
|
||||
} while(0)
|
||||
#endif
|
||||
|
||||
#define tlb_end_vma(tlb, vma) \
|
||||
do { \
|
||||
|
|
|
@ -5,15 +5,6 @@
|
|||
#include <asm/cpu-features.h>
|
||||
#include <asm/mipsregs.h>
|
||||
|
||||
/*
|
||||
* MIPS doesn't need any special per-pte or per-vma handling, except
|
||||
* we need to flush cache for area to be unmapped.
|
||||
*/
|
||||
#define tlb_start_vma(tlb, vma) \
|
||||
do { \
|
||||
if (!tlb->fullmm) \
|
||||
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
|
||||
} while (0)
|
||||
#define tlb_end_vma(tlb, vma) do { } while (0)
|
||||
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
|
||||
|
||||
|
|
|
@ -4,12 +4,6 @@
|
|||
#ifndef __ASMNDS32_TLB_H
|
||||
#define __ASMNDS32_TLB_H
|
||||
|
||||
#define tlb_start_vma(tlb,vma) \
|
||||
do { \
|
||||
if (!tlb->fullmm) \
|
||||
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
|
||||
} while (0)
|
||||
|
||||
#define tlb_end_vma(tlb,vma) \
|
||||
do { \
|
||||
if(!tlb->fullmm) \
|
||||
|
|
|
@ -15,16 +15,6 @@
|
|||
|
||||
extern void set_mmu_pid(unsigned long pid);
|
||||
|
||||
/*
|
||||
* NiosII doesn't need any special per-pte or per-vma handling, except
|
||||
* we need to flush cache for the area to be unmapped.
|
||||
*/
|
||||
#define tlb_start_vma(tlb, vma) \
|
||||
do { \
|
||||
if (!tlb->fullmm) \
|
||||
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
|
||||
} while (0)
|
||||
|
||||
#define tlb_end_vma(tlb, vma) do { } while (0)
|
||||
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
|
||||
|
||||
|
|
|
@ -7,11 +7,6 @@ do { if ((tlb)->fullmm) \
|
|||
flush_tlb_mm((tlb)->mm);\
|
||||
} while (0)
|
||||
|
||||
#define tlb_start_vma(tlb, vma) \
|
||||
do { if (!(tlb)->fullmm) \
|
||||
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
|
||||
} while (0)
|
||||
|
||||
#define tlb_end_vma(tlb, vma) \
|
||||
do { if (!(tlb)->fullmm) \
|
||||
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
||||
|
|
|
@ -2,11 +2,6 @@
|
|||
#ifndef _SPARC_TLB_H
|
||||
#define _SPARC_TLB_H
|
||||
|
||||
#define tlb_start_vma(tlb, vma) \
|
||||
do { \
|
||||
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
|
||||
} while (0)
|
||||
|
||||
#define tlb_end_vma(tlb, vma) \
|
||||
do { \
|
||||
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
|
||||
|
|
|
@ -16,19 +16,10 @@
|
|||
|
||||
#if (DCACHE_WAY_SIZE <= PAGE_SIZE)
|
||||
|
||||
/* Note, read http://lkml.org/lkml/2004/1/15/6 */
|
||||
|
||||
# define tlb_start_vma(tlb,vma) do { } while (0)
|
||||
# define tlb_end_vma(tlb,vma) do { } while (0)
|
||||
|
||||
#else
|
||||
|
||||
# define tlb_start_vma(tlb, vma) \
|
||||
do { \
|
||||
if (!tlb->fullmm) \
|
||||
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
|
||||
} while(0)
|
||||
|
||||
# define tlb_end_vma(tlb, vma) \
|
||||
do { \
|
||||
if (!tlb->fullmm) \
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/swap.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
|
||||
|
@ -356,17 +357,19 @@ static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
|
|||
* the vmas are adjusted to only cover the region to be torn down.
|
||||
*/
|
||||
#ifndef tlb_start_vma
|
||||
#define tlb_start_vma(tlb, vma) do { } while (0)
|
||||
#define tlb_start_vma(tlb, vma) \
|
||||
do { \
|
||||
if (!tlb->fullmm) \
|
||||
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#define __tlb_end_vma(tlb, vma) \
|
||||
do { \
|
||||
if (!tlb->fullmm) \
|
||||
tlb_flush_mmu_tlbonly(tlb); \
|
||||
} while (0)
|
||||
|
||||
#ifndef tlb_end_vma
|
||||
#define tlb_end_vma __tlb_end_vma
|
||||
#define tlb_end_vma(tlb, vma) \
|
||||
do { \
|
||||
if (!tlb->fullmm) \
|
||||
tlb_flush_mmu_tlbonly(tlb); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#ifndef __tlb_remove_tlb_entry
|
||||
|
|
Loading…
Reference in New Issue