This completes the implementation of shared memory support

This commit is contained in:
Gregory Nutt 2014-09-24 09:27:17 -06:00
parent 7dcc39cf2b
commit 468097b3b9
4 changed files with 310 additions and 44 deletions

View File

@ -88,18 +88,6 @@ extern "C"
* Public Function Prototypes
****************************************************************************/
/****************************************************************************
* Name: set_l2_entry
*
* Description:
* Set the L2 table entry as part of the initialization of the L2 Page
* table.
*
****************************************************************************/
void set_l2_entry(FAR uint32_t *l2table, uintptr_t paddr, uintptr_t vaddr,
uint32_t mmuflags);
/****************************************************************************
* Name: arm_addrenv_create_region
*

View File

@ -40,9 +40,17 @@
#include <nuttx/config.h>
#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <nuttx/arch.h>
#include <nuttx/sched.h>
#include <nuttx/addrenv.h>
#include "mmu.h"
#include "cache.h"
#include "addrenv.h"
#include "pgalloc.h"
#if defined(CONFIG_BUILD_KERNEL) && defined(CONFIG_MM_SHM)
@ -83,8 +91,134 @@
int up_shmat(FAR uintptr_t *pages, unsigned int npages, uintptr_t vaddr)
{
#warning Missing logic
return -ENOSYS;
FAR struct tcb_s *tcb = sched_self();
FAR struct task_group_s *group;
FAR uintptr_t *l1entry;
FAR uint32_t *l2table;
irqstate_t flags;
uintptr_t paddr;
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
uint32_t l1save;
#endif
unsigned int nmapped;
unsigned int shmndx;
shmvdbg("pages=%p npages=%d vaddr=%08lx\n",
pages, npages, (unsigned long)vaddr);
/* Sanity checks */
DEBUGASSERT(pages && npages > 0 && tcb && tcb->group);
DEBUGASSERT(vaddr >= CONFIG_ARCH_SHM_VBASE && vaddr < ARCH_SHM_VEND);
DEBUGASSERT(MM_ISALIGNED(vaddr));
group = tcb->group;
/* Loop until all pages have been mapped into the caller's address space. */
for (nmapped = 0; nmapped < npages; )
{
/* Get the shm[] index associated with the virtual address */
shmndx = (vaddr - CONFIG_ARCH_SHM_VBASE) >> SECTION_SHIFT;
/* Has a level 1 page table entry been created for this virtual address */
l1entry = group->tg_addrenv.shm[shmndx];
if (l1entry == NULL)
{
/* No.. Allocate one physical page for the L2 page table */
paddr = mm_pgalloc(1);
if (!paddr)
{
return -ENOMEM;
}
DEBUGASSERT(MM_ISALIGNED(paddr));
/* We need to be more careful after we begin modifying
* global resources.
*/
flags = irqsave();
group->tg_addrenv.shm[shmndx] = (FAR uintptr_t *)paddr;
#ifdef CONFIG_ARCH_PGPOOL_MAPPING
/* Get the virtual address corresponding to the physical page
* address.
*/
l2table = (FAR uint32_t *)arm_pgvaddr(paddr);
#else
/* Temporarily map the page into the virtual address space */
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE,
MMU_MEMFLAGS);
l2table = (FAR uint32_t *)
(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
#endif
/* Initialize the page table */
memset(l2table, 0, ENTRIES_PER_L2TABLE * sizeof(uint32_t));
}
else
{
/* Get the physical address of the L2 page table from the L1 page
* table entry.
*/
paddr = (uintptr_t)l1entry & ~SECTION_MASK;
flags = irqsave();
#ifdef CONFIG_ARCH_PGPOOL_MAPPING
/* Get the virtual address corresponding to the physical page\
* address.
*/
l2table = (FAR uint32_t *)arm_pgvaddr(paddr);
#else
/* Temporarily map the page into the virtual address space */
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE,
MMU_MEMFLAGS);
l2table = (FAR uint32_t *)
(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
#endif
}
/* Map the virtual address to this physical address */
DEBUGASSERT(get_l2_entry(l2table, vaddr) == 0);
paddr = *pages++;
set_l2_entry(l2table, paddr, vaddr, MMU_MEMFLAGS);
nmapped++;
vaddr += MM_PGSIZE;
/* Make sure that the initialized L2 table is flushed to physical
* memory.
*
* REVISIT: We could be smarter in doing this. Currently, we may
* flush the entire L2 page table numerous times.
*/
arch_flush_dcache((uintptr_t)l2table,
(uintptr_t)l2table +
ENTRIES_PER_L2TABLE * sizeof(uint32_t));
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
/* Restore the scratch section L1 page table entry */
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
#endif
irqrestore(flags);
}
return OK;
}
/****************************************************************************
@ -106,8 +240,103 @@ int up_shmat(FAR uintptr_t *pages, unsigned int npages, uintptr_t vaddr)
int up_shmdt(uintptr_t vaddr, unsigned int npages)
{
#warning Missing logic
return -ENOSYS;
FAR struct tcb_s *tcb = sched_self();
FAR struct task_group_s *group;
FAR uintptr_t *l1entry;
FAR uint32_t *l2table;
irqstate_t flags;
uintptr_t paddr;
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
uint32_t l1save;
#endif
unsigned int nunmapped;
unsigned int shmndx;
shmvdbg("npages=%d vaddr=%08lx\n", npages, (unsigned long)vaddr);
/* Sanity checks */
DEBUGASSERT(npages > 0 && tcb && tcb->group);
DEBUGASSERT(vaddr >= CONFIG_ARCH_SHM_VBASE && vaddr < ARCH_SHM_VEND);
DEBUGASSERT(MM_ISALIGNED(vaddr));
group = tcb->group;
/* Loop until all pages have been unmapped from the caller's address
* space.
*/
for (nunmapped = 0; nunmapped < npages; )
{
/* Get the shm[] index associated with the virtual address */
shmndx = (vaddr - CONFIG_ARCH_SHM_VBASE) >> SECTION_SHIFT;
/* Get the level 1 page table entry for this virtual address */
l1entry = group->tg_addrenv.shm[shmndx];
DEBUGASSERT(l1entry != NULL);
/* Get the physical address of the L2 page table from the L1 page
* table entry.
*/
paddr = (uintptr_t)l1entry & ~SECTION_MASK;
flags = irqsave();
#ifdef CONFIG_ARCH_PGPOOL_MAPPING
/* Get the virtual address corresponding to the physical page\
* address.
*/
l2table = (FAR uint32_t *)arm_pgvaddr(paddr);
#else
/* Temporarily map the page into the virtual address space */
l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE,
MMU_MEMFLAGS);
l2table = (FAR uint32_t *)
(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
#endif
/* Unmap this virtual page address.
*
* REVISIT: Note that the page allocated for the level 2 page table
* is not freed nor is the level 1 page table entry ever cleared.
* This means that the 4KiB page is still allocated to the process
* even though it may not contain any mappings and that the it will
* persist until the process terminates. That is not all bad because
* it means that we will be able to re-instantiate the shared memory
* mapping very quickly.
*/
DEBUGASSERT(get_l2_entry(l2table, vaddr) != 0);
clr_l2_entry(l2table, vaddr);
nunmapped++;
vaddr += MM_PGSIZE;
/* Make sure that the modified L2 table is flushed to physical
* memory.
*
* REVISIT: We could be smarter in doing this. Currently, we may
* flush the entire L2 page table numerous times.
*/
arch_flush_dcache((uintptr_t)l2table,
(uintptr_t)l2table +
ENTRIES_PER_L2TABLE * sizeof(uint32_t));
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
/* Restore the scratch section L1 page table entry */
mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
#endif
irqrestore(flags);
}
return OK;
}
#endif /* CONFIG_BUILD_KERNEL && CONFIG_MM_SHM */

View File

@ -69,32 +69,6 @@
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: set_l2_entry
*
* Description:
* Set the L2 table entry as part of the initialization of the L2 Page
* table.
*
****************************************************************************/
void set_l2_entry(FAR uint32_t *l2table, uintptr_t paddr, uintptr_t vaddr,
uint32_t mmuflags)
{
uint32_t index;
/* The table divides a 1Mb address space up into 256 entries, each
* corresponding to 4Kb of address space. The page table index is
* related to the offset from the beginning of 1Mb region.
*/
index = (vaddr & 0x000ff000) >> 12;
/* Save the table entry */
l2table[index] = (paddr | mmuflags);
}
/****************************************************************************
* Name: arm_addrenv_create_region
*
@ -129,7 +103,7 @@ int arm_addrenv_create_region(FAR uintptr_t **list, unsigned int listlen,
/* Verify that we are configured with enough virtual address space to
* support this memory region.
*
* npages pages correspondes to (npages << MM_PGSHIFT) bytes
* npages pages corresponds to (npages << MM_PGSHIFT) bytes
* listlen sections corresponds to (listlen << 20) bytes
*/
@ -193,7 +167,7 @@ int arm_addrenv_create_region(FAR uintptr_t **list, unsigned int listlen,
return -ENOMEM;
}
/* Map the .text region virtual address to this physical address */
/* Map the virtual address to this physical address */
set_l2_entry(l2table, paddr, vaddr, mmuflags);
nmapped += MM_PGSIZE;

View File

@ -144,6 +144,81 @@ static inline bool arm_uservaddr(uintptr_t vaddr)
);
}
/****************************************************************************
* Name: set_l2_entry
*
* Description:
* Set the L2 table entry as part of the initialization of the L2 Page
* table.
*
****************************************************************************/
static inline void set_l2_entry(FAR uint32_t *l2table, uintptr_t paddr,
uintptr_t vaddr, uint32_t mmuflags)
{
uint32_t index;
/* The table divides a 1Mb address space up into 256 entries, each
* corresponding to 4Kb of address space. The page table index is
* related to the offset from the beginning of 1Mb region.
*/
index = (vaddr & 0x000ff000) >> 12;
/* Save the level 2 page table entry */
l2table[index] = (paddr | mmuflags);
}
/****************************************************************************
* Name: clr_l2_entry
*
* Description:
* Claear the L2 table entry.
*
****************************************************************************/
static inline void clr_l2_entry(FAR uint32_t *l2table, uintptr_t vaddr)
{
uint32_t index;
/* The table divides a 1Mb address space up into 256 entries, each
* corresponding to 4Kb of address space. The page table index is
* related to the offset from the beginning of 1Mb region.
*/
index = (vaddr & 0x000ff000) >> 12;
/* Save the level 2 page table entry */
l2table[index] = 0;
}
/****************************************************************************
* Name: get_l2_entry
*
* Description:
* Set the L2 table entry as part of the initialization of the L2 Page
* table.
*
****************************************************************************/
static inline uintptr_t get_l2_entry(FAR uint32_t *l2table, uintptr_t vaddr)
{
uint32_t index;
/* The table divides a 1Mb address space up into 256 entries, each
* corresponding to 4Kb of address space. The page table index is
* related to the offset from the beginning of 1Mb region.
*/
index = (vaddr & 0x000ff000) >> 12;
/* Return the level 2 page table entry */
return l2table[index];
}
/****************************************************************************
* Public Functions
****************************************************************************/