x86: add support for on-demand mappings

This makes x86 compatible with K_MEM_MAP_UNPAGED.

Signed-off-by: Nicolas Pitre <npitre@baylibre.com>
This commit is contained in:
Nicolas Pitre 2024-09-10 23:00:06 -04:00 committed by Anas Nashif
parent 56dcafece8
commit 66853f4307
3 changed files with 21 additions and 0 deletions

View File

@ -92,6 +92,7 @@ config X86
select ARCH_HAS_TIMING_FUNCTIONS
select ARCH_HAS_THREAD_LOCAL_STORAGE
select ARCH_HAS_DEMAND_PAGING if !X86_64
select ARCH_HAS_DEMAND_MAPPING if ARCH_HAS_DEMAND_PAGING
select IRQ_OFFLOAD_NESTED if IRQ_OFFLOAD
select NEED_LIBC_MEM_PARTITION if USERSPACE && TIMING_FUNCTIONS \
&& !BOARD_HAS_TIMING_FUNCTIONS \

View File

@ -1281,6 +1281,12 @@ static pentry_t flags_to_entry(uint32_t flags)
entry_flags |= ENTRY_XD;
}
if (IS_ENABLED(CONFIG_DEMAND_MAPPING) && (flags & K_MEM_MAP_UNPAGED) != 0U) {
/* same state as in arch_mem_page_out() */
entry_flags &= ~MMU_P;
entry_flags |= MMU_A;
}
return entry_flags;
}

View File

@ -32,6 +32,20 @@
/* Use an PAT bit for this one since it's never set in a mapped PTE */
#define ARCH_DATA_PAGE_NOT_MAPPED ((uintptr_t)BIT(7))
/*
* Special unpaged "location" tags. These are defined as the highest possible
* PTE address values unlikely to conflict with backing store locations.
* As noted in arch_page_info_get(), those values on PAE systems, whose
* pentry_t is larger than uintptr_t get truncated.
*/
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define ARCH_UNPAGED_ANON_ZERO ((uintptr_t)0x07FFFFFFFFFFF000ULL)
#define ARCH_UNPAGED_ANON_UNINIT ((uintptr_t)0x07FFFFFFFFFFE000ULL)
#else
#define ARCH_UNPAGED_ANON_ZERO ((uintptr_t)0xFFFFF000U)
#define ARCH_UNPAGED_ANON_UNINIT ((uintptr_t)0xFFFFE000U)
#endif
/* Always true with 32-bit page tables, don't enable
* CONFIG_EXECUTE_XOR_WRITE and expect it to work for you
*/