arc/mm: support __HAVE_ARCH_PTE_SWP_EXCLUSIVE

Let's support __HAVE_ARCH_PTE_SWP_EXCLUSIVE by using bit 5, which is yet
unused.  The only important parts seems to be to not use _PAGE_PRESENT
(bit 9).

Link: https://lkml.kernel.org/r/20230113171026.582290-4-david@redhat.com
Signed-off-by: David Hildenbrand <david@redhat.com>
Cc: Vineet Gupta <vgupta@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
David Hildenbrand 2023-01-13 18:10:03 +01:00 committed by Andrew Morton
parent a172d51287
commit 4a446b3dd3
1 changed files with 24 additions and 3 deletions

View File

@ -26,6 +26,9 @@
#define _PAGE_GLOBAL (1 << 8) /* ASID agnostic (H) */ #define _PAGE_GLOBAL (1 << 8) /* ASID agnostic (H) */
#define _PAGE_PRESENT (1 << 9) /* PTE/TLB Valid (H) */ #define _PAGE_PRESENT (1 << 9) /* PTE/TLB Valid (H) */
/* We borrow bit 5 to store the exclusive marker in swap PTEs. */
#define _PAGE_SWP_EXCLUSIVE _PAGE_DIRTY
#ifdef CONFIG_ARC_MMU_V4 #ifdef CONFIG_ARC_MMU_V4
#define _PAGE_HW_SZ (1 << 10) /* Normal/super (H) */ #define _PAGE_HW_SZ (1 << 10) /* Normal/super (H) */
#else #else
@ -106,9 +109,18 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep); pte_t *ptep);
/* Encode swap {type,off} tuple into PTE /*
* We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
* PAGE_PRESENT is zero in a PTE holding swap "identifier" * are !pte_none() && !pte_present().
*
* Format of swap PTEs:
*
* 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* <-------------- offset -------------> <--- zero --> E < type ->
*
* E is the exclusive marker that is not stored in swap entries.
* The zero'ed bits include _PAGE_PRESENT.
*/ */
#define __swp_entry(type, off) ((swp_entry_t) \ #define __swp_entry(type, off) ((swp_entry_t) \
{ ((type) & 0x1f) | ((off) << 13) }) { ((type) & 0x1f) | ((off) << 13) })
@ -120,6 +132,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
static inline int pte_swp_exclusive(pte_t pte)
{
return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
}
PTE_BIT_FUNC(swp_mkexclusive, |= (_PAGE_SWP_EXCLUSIVE));
PTE_BIT_FUNC(swp_clear_exclusive, &= ~(_PAGE_SWP_EXCLUSIVE));
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
#include <asm/hugepage.h> #include <asm/hugepage.h>
#endif #endif