2023-05-10 05:44:22 +08:00
|
|
|
/*
|
|
|
|
* Xtensa MMU support
|
|
|
|
*
|
|
|
|
* Private data declarations
|
|
|
|
*
|
|
|
|
* Copyright (c) 2022 Intel Corporation
|
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef ZEPHYR_ARCH_XTENSA_XTENSA_MMU_PRIV_H_
|
|
|
|
#define ZEPHYR_ARCH_XTENSA_XTENSA_MMU_PRIV_H_
|
|
|
|
|
|
|
|
#include <stdint.h>
|
|
|
|
#include <xtensa/config/core-isa.h>
|
|
|
|
#include <zephyr/toolchain.h>
|
|
|
|
#include <zephyr/sys/util_macro.h>
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @defgroup xtensa_mmu_internal_apis Xtensa Memory Management Unit (MMU) Internal APIs
|
|
|
|
* @ingroup xtensa_mmu_apis
|
|
|
|
* @{
|
|
|
|
*/
|
|
|
|
|
|
|
|
/** Mask for VPN in PTE */
|
|
|
|
#define XTENSA_MMU_PTE_VPN_MASK 0xFFFFF000U
|
|
|
|
|
|
|
|
/** Mask for PPN in PTE */
|
|
|
|
#define XTENSA_MMU_PTE_PPN_MASK 0xFFFFF000U
|
|
|
|
|
|
|
|
/** Mask for attributes in PTE */
|
|
|
|
#define XTENSA_MMU_PTE_ATTR_MASK 0x0000000FU
|
|
|
|
|
|
|
|
/** Mask for cache mode in PTE */
|
|
|
|
#define XTENSA_MMU_PTE_ATTR_CACHED_MASK 0x0000000CU
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Mask used to figure out which L1 page table to use */
|
|
|
|
#define XTENSA_MMU_L1_MASK 0x3FF00000U
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Mask used to figure out which L2 page table to use */
|
|
|
|
#define XTENSA_MMU_L2_MASK 0x3FFFFFU
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
#define XTENSA_MMU_PTEBASE_MASK 0xFFC00000
|
2023-12-09 09:06:56 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Number of bits to shift for PPN in PTE */
|
|
|
|
#define XTENSA_MMU_PTE_PPN_SHIFT 12U
|
2022-12-14 16:35:36 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Mask for ring in PTE */
|
|
|
|
#define XTENSA_MMU_PTE_RING_MASK 0x00000030U
|
2022-12-14 16:35:36 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Number of bits to shift for ring in PTE */
|
|
|
|
#define XTENSA_MMU_PTE_RING_SHIFT 4U
|
2022-12-14 16:35:36 +08:00
|
|
|
|
2024-04-10 15:42:23 +08:00
|
|
|
/** Number of bits to shift for SW reserved ared in PTE */
|
|
|
|
#define XTENSA_MMU_PTE_SW_SHIFT 6U
|
|
|
|
|
|
|
|
/** Mask for SW bits in PTE */
|
|
|
|
#define XTENSA_MMU_PTE_SW_MASK 0x00000FC0U
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Internal bit just used to indicate that the attr field must
|
|
|
|
* be set in the SW bits too. It is used later when duplicating the
|
|
|
|
* kernel page tables.
|
|
|
|
*/
|
|
|
|
#define XTENSA_MMU_PTE_ATTR_ORIGINAL BIT(31)
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Construct a page table entry (PTE) */
|
2024-04-10 15:42:23 +08:00
|
|
|
#define XTENSA_MMU_PTE(paddr, ring, sw, attr) \
|
2023-12-08 06:54:22 +08:00
|
|
|
(((paddr) & XTENSA_MMU_PTE_PPN_MASK) | \
|
|
|
|
(((ring) << XTENSA_MMU_PTE_RING_SHIFT) & XTENSA_MMU_PTE_RING_MASK) | \
|
2024-04-10 15:42:23 +08:00
|
|
|
(((sw) << XTENSA_MMU_PTE_SW_SHIFT) & XTENSA_MMU_PTE_SW_MASK) | \
|
2023-12-08 06:54:22 +08:00
|
|
|
((attr) & XTENSA_MMU_PTE_ATTR_MASK))
|
2022-12-14 16:35:36 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Get the attributes from a PTE */
|
|
|
|
#define XTENSA_MMU_PTE_ATTR_GET(pte) \
|
|
|
|
((pte) & XTENSA_MMU_PTE_ATTR_MASK)
|
2022-12-14 16:35:36 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Set the attributes in a PTE */
|
|
|
|
#define XTENSA_MMU_PTE_ATTR_SET(pte, attr) \
|
2024-04-10 15:42:23 +08:00
|
|
|
(((pte) & ~XTENSA_MMU_PTE_ATTR_MASK) | (attr & XTENSA_MMU_PTE_ATTR_MASK))
|
|
|
|
|
|
|
|
/** Set the SW field in a PTE */
|
|
|
|
#define XTENSA_MMU_PTE_SW_SET(pte, sw) \
|
|
|
|
(((pte) & ~XTENSA_MMU_PTE_SW_MASK) | (sw << XTENSA_MMU_PTE_SW_SHIFT))
|
|
|
|
|
|
|
|
/** Get the SW field from a PTE */
|
|
|
|
#define XTENSA_MMU_PTE_SW_GET(pte) \
|
|
|
|
(((pte) & XTENSA_MMU_PTE_SW_MASK) >> XTENSA_MMU_PTE_SW_SHIFT)
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Set the ring in a PTE */
|
|
|
|
#define XTENSA_MMU_PTE_RING_SET(pte, ring) \
|
|
|
|
(((pte) & ~XTENSA_MMU_PTE_RING_MASK) | \
|
|
|
|
((ring) << XTENSA_MMU_PTE_RING_SHIFT))
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Get the ring from a PTE */
|
|
|
|
#define XTENSA_MMU_PTE_RING_GET(pte) \
|
2024-04-10 15:46:49 +08:00
|
|
|
(((pte) & XTENSA_MMU_PTE_RING_MASK) >> XTENSA_MMU_PTE_RING_SHIFT)
|
2022-12-14 16:35:36 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Get the ASID from the RASID register corresponding to the ring in a PTE */
|
|
|
|
#define XTENSA_MMU_PTE_ASID_GET(pte, rasid) \
|
|
|
|
(((rasid) >> ((((pte) & XTENSA_MMU_PTE_RING_MASK) \
|
|
|
|
>> XTENSA_MMU_PTE_RING_SHIFT) * 8)) & 0xFF)
|
|
|
|
|
|
|
|
/** Calculate the L2 page table position from a virtual address */
|
|
|
|
#define XTENSA_MMU_L2_POS(vaddr) \
|
|
|
|
(((vaddr) & XTENSA_MMU_L2_MASK) >> 12U)
|
|
|
|
|
|
|
|
/** Calculate the L1 page table position from a virtual address */
|
|
|
|
#define XTENSA_MMU_L1_POS(vaddr) \
|
2022-12-14 16:35:36 +08:00
|
|
|
((vaddr) >> 22U)
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @def XTENSA_MMU_PAGE_TABLE_ATTR
|
|
|
|
*
|
|
|
|
* PTE attributes for entries in the L1 page table. Should never be
|
2022-12-14 16:35:36 +08:00
|
|
|
* writable, may be cached in non-SMP contexts only
|
|
|
|
*/
|
|
|
|
#if CONFIG_MP_MAX_NUM_CPUS == 1
|
2023-12-08 06:54:22 +08:00
|
|
|
#define XTENSA_MMU_PAGE_TABLE_ATTR XTENSA_MMU_CACHED_WB
|
2022-12-14 16:35:36 +08:00
|
|
|
#else
|
2023-12-08 06:54:22 +08:00
|
|
|
#define XTENSA_MMU_PAGE_TABLE_ATTR 0
|
2022-12-14 16:35:36 +08:00
|
|
|
#endif
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** This ASID is shared between all domains and kernel. */
|
|
|
|
#define XTENSA_MMU_SHARED_ASID 255
|
2022-12-14 16:35:36 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Fixed data TLB way to map the page table */
|
|
|
|
#define XTENSA_MMU_PTE_WAY 7
|
2022-12-14 16:35:36 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Fixed data TLB way to map the vecbase */
|
|
|
|
#define XTENSA_MMU_VECBASE_WAY 8
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Kernel specific ASID. Ring field in the PTE */
|
|
|
|
#define XTENSA_MMU_KERNEL_RING 0
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** User specific ASID. Ring field in the PTE */
|
|
|
|
#define XTENSA_MMU_USER_RING 2
|
2022-12-14 16:35:36 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Ring value for MMU_SHARED_ASID */
|
|
|
|
#define XTENSA_MMU_SHARED_RING 3
|
2022-12-14 16:35:36 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Number of data TLB ways [0-9] */
|
|
|
|
#define XTENSA_MMU_NUM_DTLB_WAYS 10
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Number of instruction TLB ways [0-6] */
|
|
|
|
#define XTENSA_MMU_NUM_ITLB_WAYS 7
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Number of auto-refill ways */
|
|
|
|
#define XTENSA_MMU_NUM_TLB_AUTOREFILL_WAYS 4
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/** Indicate PTE is illegal. */
|
|
|
|
#define XTENSA_MMU_PTE_ILLEGAL (BIT(3) | BIT(2))
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* PITLB HIT bit.
|
|
|
|
*
|
|
|
|
* For more information see
|
2023-05-10 05:44:22 +08:00
|
|
|
* Xtensa Instruction Set Architecture (ISA) Reference Manual
|
|
|
|
* 4.6.5.7 Formats for Probing MMU Option TLB Entries
|
|
|
|
*/
|
2023-12-08 06:54:22 +08:00
|
|
|
#define XTENSA_MMU_PITLB_HIT BIT(3)
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* PDTLB HIT bit.
|
|
|
|
*
|
|
|
|
* For more information see
|
2023-05-10 05:44:22 +08:00
|
|
|
* Xtensa Instruction Set Architecture (ISA) Reference Manual
|
|
|
|
* 4.6.5.7 Formats for Probing MMU Option TLB Entries
|
|
|
|
*/
|
2023-12-08 06:54:22 +08:00
|
|
|
#define XTENSA_MMU_PDTLB_HIT BIT(4)
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
2023-05-10 05:44:22 +08:00
|
|
|
* Virtual address where the page table is mapped
|
|
|
|
*/
|
2023-12-08 06:54:22 +08:00
|
|
|
#define XTENSA_MMU_PTEVADDR CONFIG_XTENSA_MMU_PTEVADDR
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* Find the PTE entry address of a given vaddr.
|
2023-05-10 05:44:22 +08:00
|
|
|
*
|
|
|
|
* For example, assuming PTEVADDR in 0xE0000000,
|
|
|
|
* the page spans from 0xE0000000 - 0xE03FFFFF
|
|
|
|
*
|
|
|
|
* address 0x00 is in 0xE0000000
|
|
|
|
* address 0x1000 is in 0xE0000004
|
|
|
|
* .....
|
|
|
|
* address 0xE0000000 (where the page is) is in 0xE0380000
|
|
|
|
*
|
|
|
|
* Generalizing it, any PTE virtual address can be calculated this way:
|
|
|
|
*
|
|
|
|
* PTE_ENTRY_ADDRESS = PTEVADDR + ((VADDR / 4096) * 4)
|
|
|
|
*/
|
2023-12-08 06:54:22 +08:00
|
|
|
#define XTENSA_MMU_PTE_ENTRY_VADDR(base, vaddr) \
|
2023-11-20 07:44:56 +08:00
|
|
|
((base) + (((vaddr) / KB(4)) * 4))
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* Get ASID for a given ring from RASID register.
|
|
|
|
*
|
|
|
|
* RASID contains four 8-bit ASIDs, one per ring.
|
2022-12-14 16:35:36 +08:00
|
|
|
*/
|
2023-12-08 06:54:22 +08:00
|
|
|
#define XTENSA_MMU_RASID_ASID_GET(rasid, ring) \
|
2022-12-14 16:35:36 +08:00
|
|
|
(((rasid) >> ((ring) * 8)) & 0xff)
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Set RASID register.
|
|
|
|
*
|
|
|
|
* @param rasid Value to be set.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static ALWAYS_INLINE void xtensa_rasid_set(uint32_t rasid)
|
|
|
|
{
|
|
|
|
__asm__ volatile("wsr %0, rasid\n\t"
|
|
|
|
"isync\n" : : "a"(rasid));
|
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Get RASID register.
|
|
|
|
*
|
|
|
|
* @return Register value.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static ALWAYS_INLINE uint32_t xtensa_rasid_get(void)
|
|
|
|
{
|
|
|
|
uint32_t rasid;
|
|
|
|
|
|
|
|
__asm__ volatile("rsr %0, rasid" : "=a"(rasid));
|
|
|
|
return rasid;
|
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Set a ring in RASID register to be particular value.
|
|
|
|
*
|
|
|
|
* @param asid ASID to be set.
|
|
|
|
* @param ring ASID of which ring to be manipulated.
|
|
|
|
*/
|
|
|
|
static ALWAYS_INLINE void xtensa_rasid_asid_set(uint8_t asid, uint8_t ring)
|
2022-12-14 16:35:36 +08:00
|
|
|
{
|
|
|
|
uint32_t rasid = xtensa_rasid_get();
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
rasid = (rasid & ~(0xff << (ring * 8))) | ((uint32_t)asid << (ring * 8));
|
2022-12-14 16:35:36 +08:00
|
|
|
|
|
|
|
xtensa_rasid_set(rasid);
|
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Invalidate a particular instruction TLB entry.
|
|
|
|
*
|
|
|
|
* @param entry Entry to be invalidated.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static ALWAYS_INLINE void xtensa_itlb_entry_invalidate(uint32_t entry)
|
|
|
|
{
|
|
|
|
__asm__ volatile("iitlb %0\n\t"
|
|
|
|
: : "a" (entry));
|
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Synchronously invalidate of a particular instruction TLB entry.
|
|
|
|
*
|
|
|
|
* @param entry Entry to be invalidated.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static ALWAYS_INLINE void xtensa_itlb_entry_invalidate_sync(uint32_t entry)
|
|
|
|
{
|
|
|
|
__asm__ volatile("iitlb %0\n\t"
|
|
|
|
"isync\n\t"
|
|
|
|
: : "a" (entry));
|
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Synchronously invalidate of a particular data TLB entry.
|
|
|
|
*
|
|
|
|
* @param entry Entry to be invalidated.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static ALWAYS_INLINE void xtensa_dtlb_entry_invalidate_sync(uint32_t entry)
|
|
|
|
{
|
|
|
|
__asm__ volatile("idtlb %0\n\t"
|
|
|
|
"dsync\n\t"
|
|
|
|
: : "a" (entry));
|
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Invalidate a particular data TLB entry.
|
|
|
|
*
|
|
|
|
* @param entry Entry to be invalidated.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static ALWAYS_INLINE void xtensa_dtlb_entry_invalidate(uint32_t entry)
|
|
|
|
{
|
|
|
|
__asm__ volatile("idtlb %0\n\t"
|
|
|
|
: : "a" (entry));
|
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Synchronously write to a particular data TLB entry.
|
|
|
|
*
|
|
|
|
* @param pte Value to be written.
|
|
|
|
* @param entry Entry to be written.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static ALWAYS_INLINE void xtensa_dtlb_entry_write_sync(uint32_t pte, uint32_t entry)
|
|
|
|
{
|
|
|
|
__asm__ volatile("wdtlb %0, %1\n\t"
|
|
|
|
"dsync\n\t"
|
|
|
|
: : "a" (pte), "a"(entry));
|
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Write to a particular data TLB entry.
|
|
|
|
*
|
|
|
|
* @param pte Value to be written.
|
|
|
|
* @param entry Entry to be written.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static ALWAYS_INLINE void xtensa_dtlb_entry_write(uint32_t pte, uint32_t entry)
|
|
|
|
{
|
|
|
|
__asm__ volatile("wdtlb %0, %1\n\t"
|
|
|
|
: : "a" (pte), "a"(entry));
|
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Synchronously write to a particular instruction TLB entry.
|
|
|
|
*
|
|
|
|
* @param pte Value to be written.
|
|
|
|
* @param entry Entry to be written.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static ALWAYS_INLINE void xtensa_itlb_entry_write(uint32_t pte, uint32_t entry)
|
|
|
|
{
|
|
|
|
__asm__ volatile("witlb %0, %1\n\t"
|
|
|
|
: : "a" (pte), "a"(entry));
|
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Synchronously write to a particular instruction TLB entry.
|
|
|
|
*
|
|
|
|
* @param pte Value to be written.
|
|
|
|
* @param entry Entry to be written.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static ALWAYS_INLINE void xtensa_itlb_entry_write_sync(uint32_t pte, uint32_t entry)
|
|
|
|
{
|
|
|
|
__asm__ volatile("witlb %0, %1\n\t"
|
|
|
|
"isync\n\t"
|
|
|
|
: : "a" (pte), "a"(entry));
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2023-11-18 05:54:36 +08:00
|
|
|
* @brief Invalidate all autorefill DTLB and ITLB entries.
|
2023-05-10 05:44:22 +08:00
|
|
|
*
|
2023-11-18 05:54:36 +08:00
|
|
|
* This should be used carefully since all refill entries in the data
|
|
|
|
* and instruction TLB. At least two pages, the current code page and
|
|
|
|
* the current stack, will be repopulated by this code as it returns.
|
2023-05-10 05:44:22 +08:00
|
|
|
*
|
2023-11-18 05:54:36 +08:00
|
|
|
* This needs to be called in any circumstance where the mappings for
|
|
|
|
* a previously-used page table change. It does not need to be called
|
|
|
|
* on context switch, where ASID tagging isolates entries for us.
|
2023-05-10 05:44:22 +08:00
|
|
|
*/
|
2023-11-18 05:54:36 +08:00
|
|
|
static inline void xtensa_tlb_autorefill_invalidate(void)
|
2023-05-10 05:44:22 +08:00
|
|
|
{
|
2023-11-18 05:54:36 +08:00
|
|
|
uint8_t way, i, entries;
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-11-18 05:54:36 +08:00
|
|
|
entries = BIT(MAX(XCHAL_ITLB_ARF_ENTRIES_LOG2,
|
|
|
|
XCHAL_DTLB_ARF_ENTRIES_LOG2));
|
2023-05-10 05:44:22 +08:00
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
for (way = 0; way < XTENSA_MMU_NUM_TLB_AUTOREFILL_WAYS; way++) {
|
2023-11-18 05:54:36 +08:00
|
|
|
for (i = 0; i < entries; i++) {
|
2023-12-08 06:54:22 +08:00
|
|
|
uint32_t entry = way + (i << XTENSA_MMU_PTE_PPN_SHIFT);
|
|
|
|
|
2024-01-18 06:52:16 +08:00
|
|
|
xtensa_dtlb_entry_invalidate(entry);
|
|
|
|
xtensa_itlb_entry_invalidate(entry);
|
2023-05-10 05:44:22 +08:00
|
|
|
}
|
|
|
|
}
|
2024-01-18 06:52:16 +08:00
|
|
|
__asm__ volatile("isync");
|
2023-05-10 05:44:22 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Set the page tables.
|
|
|
|
*
|
|
|
|
* The page tables is set writing ptevaddr address.
|
|
|
|
*
|
|
|
|
* @param ptables The page tables address (virtual address)
|
|
|
|
*/
|
|
|
|
static ALWAYS_INLINE void xtensa_ptevaddr_set(void *ptables)
|
|
|
|
{
|
|
|
|
__asm__ volatile("wsr.ptevaddr %0" : : "a"((uint32_t)ptables));
|
|
|
|
}
|
|
|
|
|
2022-12-14 16:35:36 +08:00
|
|
|
/**
|
|
|
|
* @brief Get the current page tables.
|
|
|
|
*
|
|
|
|
* The page tables is obtained by reading ptevaddr address.
|
|
|
|
*
|
|
|
|
* @return ptables The page tables address (virtual address)
|
|
|
|
*/
|
|
|
|
static ALWAYS_INLINE void *xtensa_ptevaddr_get(void)
|
|
|
|
{
|
|
|
|
uint32_t ptables;
|
|
|
|
|
|
|
|
__asm__ volatile("rsr.ptevaddr %0" : "=a" (ptables));
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
return (void *)(ptables & XTENSA_MMU_PTEBASE_MASK);
|
2022-12-14 16:35:36 +08:00
|
|
|
}
|
2023-12-08 06:54:22 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* @brief Get the virtual address associated with a particular data TLB entry.
|
|
|
|
*
|
|
|
|
* @param entry TLB entry to be queried.
|
2023-05-10 05:44:22 +08:00
|
|
|
*/
|
|
|
|
static ALWAYS_INLINE void *xtensa_dtlb_vaddr_read(uint32_t entry)
|
|
|
|
{
|
|
|
|
uint32_t vaddr;
|
|
|
|
|
|
|
|
__asm__ volatile("rdtlb0 %0, %1\n\t" : "=a" (vaddr) : "a" (entry));
|
2023-12-08 06:54:22 +08:00
|
|
|
return (void *)(vaddr & XTENSA_MMU_PTE_VPN_MASK);
|
2023-05-10 05:44:22 +08:00
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Get the physical address associated with a particular data TLB entry.
|
|
|
|
*
|
|
|
|
* @param entry TLB entry to be queried.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static ALWAYS_INLINE uint32_t xtensa_dtlb_paddr_read(uint32_t entry)
|
|
|
|
{
|
|
|
|
uint32_t paddr;
|
|
|
|
|
|
|
|
__asm__ volatile("rdtlb1 %0, %1\n\t" : "=a" (paddr) : "a" (entry));
|
2023-12-08 06:54:22 +08:00
|
|
|
return (paddr & XTENSA_MMU_PTE_PPN_MASK);
|
2023-05-10 05:44:22 +08:00
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Get the virtual address associated with a particular instruction TLB entry.
|
|
|
|
*
|
|
|
|
* @param entry TLB entry to be queried.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static ALWAYS_INLINE void *xtensa_itlb_vaddr_read(uint32_t entry)
|
|
|
|
{
|
|
|
|
uint32_t vaddr;
|
|
|
|
|
|
|
|
__asm__ volatile("ritlb0 %0, %1\n\t" : "=a" (vaddr), "+a" (entry));
|
2023-12-08 06:54:22 +08:00
|
|
|
return (void *)(vaddr & XTENSA_MMU_PTE_VPN_MASK);
|
2023-05-10 05:44:22 +08:00
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Get the physical address associated with a particular instruction TLB entry.
|
|
|
|
*
|
|
|
|
* @param entry TLB entry to be queried.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static ALWAYS_INLINE uint32_t xtensa_itlb_paddr_read(uint32_t entry)
|
|
|
|
{
|
|
|
|
uint32_t paddr;
|
|
|
|
|
|
|
|
__asm__ volatile("ritlb1 %0, %1\n\t" : "=a" (paddr), "+a" (entry));
|
2023-12-08 06:54:22 +08:00
|
|
|
return (paddr & XTENSA_MMU_PTE_PPN_MASK);
|
2023-05-10 05:44:22 +08:00
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Probe for instruction TLB entry from a virtual address.
|
|
|
|
*
|
|
|
|
* @param vaddr Virtual address.
|
|
|
|
*
|
|
|
|
* @return Return of the PITLB instruction.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static ALWAYS_INLINE uint32_t xtensa_itlb_probe(void *vaddr)
|
|
|
|
{
|
|
|
|
uint32_t ret;
|
|
|
|
|
|
|
|
__asm__ __volatile__("pitlb %0, %1\n\t" : "=a" (ret) : "a" ((uint32_t)vaddr));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Probe for data TLB entry from a virtual address.
|
|
|
|
*
|
|
|
|
* @param vaddr Virtual address.
|
|
|
|
*
|
|
|
|
* @return Return of the PDTLB instruction.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static ALWAYS_INLINE uint32_t xtensa_dtlb_probe(void *vaddr)
|
|
|
|
{
|
|
|
|
uint32_t ret;
|
|
|
|
|
|
|
|
__asm__ __volatile__("pdtlb %0, %1\n\t" : "=a" (ret) : "a" ((uint32_t)vaddr));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Invalidate an instruction TLB entry associated with a virtual address.
|
|
|
|
*
|
|
|
|
* This invalidated an instruction TLB entry associated with a virtual address
|
|
|
|
* if such TLB entry exists. Otherwise, do nothing.
|
|
|
|
*
|
|
|
|
* @param vaddr Virtual address.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static inline void xtensa_itlb_vaddr_invalidate(void *vaddr)
|
|
|
|
{
|
|
|
|
uint32_t entry = xtensa_itlb_probe(vaddr);
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
if (entry & XTENSA_MMU_PITLB_HIT) {
|
2023-05-10 05:44:22 +08:00
|
|
|
xtensa_itlb_entry_invalidate_sync(entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Invalidate a data TLB entry associated with a virtual address.
|
|
|
|
*
|
|
|
|
* This invalidated a data TLB entry associated with a virtual address
|
|
|
|
* if such TLB entry exists. Otherwise, do nothing.
|
|
|
|
*
|
|
|
|
* @param vaddr Virtual address.
|
|
|
|
*/
|
2023-05-10 05:44:22 +08:00
|
|
|
static inline void xtensa_dtlb_vaddr_invalidate(void *vaddr)
|
|
|
|
{
|
|
|
|
uint32_t entry = xtensa_dtlb_probe(vaddr);
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
if (entry & XTENSA_MMU_PDTLB_HIT) {
|
2023-05-10 05:44:22 +08:00
|
|
|
xtensa_dtlb_entry_invalidate_sync(entry);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Tell hardware to use a page table very first time after boot.
|
|
|
|
*
|
|
|
|
* @param l1_page Pointer to the page table to be used.
|
|
|
|
*/
|
2023-11-20 07:44:56 +08:00
|
|
|
void xtensa_init_paging(uint32_t *l1_page);
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @brief Switch to a new page table.
|
|
|
|
*
|
|
|
|
* @param asid The ASID of the memory domain associated with the incoming page table.
|
|
|
|
* @param l1_page Page table to be switched to.
|
|
|
|
*/
|
2023-11-20 07:44:56 +08:00
|
|
|
void xtensa_set_paging(uint32_t asid, uint32_t *l1_page);
|
|
|
|
|
2023-12-08 06:54:22 +08:00
|
|
|
/**
|
|
|
|
* @}
|
|
|
|
*/
|
|
|
|
|
2023-05-10 05:44:22 +08:00
|
|
|
#endif /* ZEPHYR_ARCH_XTENSA_XTENSA_MMU_PRIV_H_ */
|