LLEXT: fix a needless allocation
When CONFIG_LLEXT_STORAGE_WRITABLE is selected and .pre_located is set, the BSS section is allocated by the user too, no need to allocate it internally. Signed-off-by: Guennadi Liakhovetski <guennadi.liakhovetski@linux.intel.com>
This commit is contained in:
parent
7e8ee25479
commit
ae8c373314
|
@ -132,7 +132,10 @@ struct llext_load_param {
|
|||
bool relocate_local;
|
||||
/**
|
||||
* Use the virtual symbol addresses from the ELF, not addresses within
|
||||
* the memory buffer, when calculating relocation targets.
|
||||
* the memory buffer, when calculating relocation targets. It also
|
||||
* means, that the application will take care to place the extension at
|
||||
* those pre-defined addresses, so the LLEXT core doesn't have to do any
|
||||
* allocation and copying internally.
|
||||
*/
|
||||
bool pre_located;
|
||||
/**
|
||||
|
|
|
@ -684,7 +684,7 @@ int do_llext_load(struct llext_loader *ldr, struct llext *ext,
|
|||
}
|
||||
|
||||
LOG_DBG("Allocate and copy regions...");
|
||||
ret = llext_copy_regions(ldr, ext);
|
||||
ret = llext_copy_regions(ldr, ext, ldr_parm);
|
||||
if (ret != 0) {
|
||||
LOG_ERR("Failed to copy regions, ret %d", ret);
|
||||
goto out;
|
||||
|
|
|
@ -59,7 +59,7 @@ static void llext_init_mem_part(struct llext *ext, enum llext_mem mem_idx,
|
|||
}
|
||||
|
||||
static int llext_copy_section(struct llext_loader *ldr, struct llext *ext,
|
||||
enum llext_mem mem_idx)
|
||||
enum llext_mem mem_idx, const struct llext_load_param *ldr_parm)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -68,18 +68,31 @@ static int llext_copy_section(struct llext_loader *ldr, struct llext *ext,
|
|||
}
|
||||
ext->mem_size[mem_idx] = ldr->sects[mem_idx].sh_size;
|
||||
|
||||
if (ldr->sects[mem_idx].sh_type != SHT_NOBITS &&
|
||||
IS_ENABLED(CONFIG_LLEXT_STORAGE_WRITABLE)) {
|
||||
/* Directly use data from the ELF buffer if peek() is supported */
|
||||
ext->mem[mem_idx] = llext_peek(ldr, ldr->sects[mem_idx].sh_offset);
|
||||
if (ext->mem[mem_idx]) {
|
||||
llext_init_mem_part(ext, mem_idx, (uintptr_t)ext->mem[mem_idx],
|
||||
ldr->sects[mem_idx].sh_size);
|
||||
if (IS_ENABLED(CONFIG_LLEXT_STORAGE_WRITABLE)) {
|
||||
if (ldr->sects[mem_idx].sh_type != SHT_NOBITS) {
|
||||
/* Directly use data from the ELF buffer if peek() is supported */
|
||||
ext->mem[mem_idx] = llext_peek(ldr, ldr->sects[mem_idx].sh_offset);
|
||||
if (ext->mem[mem_idx]) {
|
||||
llext_init_mem_part(ext, mem_idx, (uintptr_t)ext->mem[mem_idx],
|
||||
ldr->sects[mem_idx].sh_size);
|
||||
ext->mem_on_heap[mem_idx] = false;
|
||||
return 0;
|
||||
}
|
||||
} else if (ldr_parm && ldr_parm->pre_located) {
|
||||
/*
|
||||
* ldr_parm cannot be NULL here with the current flow, but
|
||||
* we add a check to make it future-proof
|
||||
*/
|
||||
ext->mem[mem_idx] = NULL;
|
||||
ext->mem_on_heap[mem_idx] = false;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (ldr_parm && ldr_parm->pre_located) {
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/* On ARM with an MPU a pow(2, N)*32 sized and aligned region is needed,
|
||||
* otherwise its typically an mmu page (sized and aligned memory region)
|
||||
* we are after that we can assign memory permission bits on.
|
||||
|
@ -132,16 +145,17 @@ err:
|
|||
|
||||
int llext_copy_strings(struct llext_loader *ldr, struct llext *ext)
|
||||
{
|
||||
int ret = llext_copy_section(ldr, ext, LLEXT_MEM_SHSTRTAB);
|
||||
int ret = llext_copy_section(ldr, ext, LLEXT_MEM_SHSTRTAB, NULL);
|
||||
|
||||
if (!ret) {
|
||||
ret = llext_copy_section(ldr, ext, LLEXT_MEM_STRTAB);
|
||||
ret = llext_copy_section(ldr, ext, LLEXT_MEM_STRTAB, NULL);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int llext_copy_regions(struct llext_loader *ldr, struct llext *ext)
|
||||
int llext_copy_regions(struct llext_loader *ldr, struct llext *ext,
|
||||
const struct llext_load_param *ldr_parm)
|
||||
{
|
||||
for (enum llext_mem mem_idx = 0; mem_idx < LLEXT_MEM_COUNT; mem_idx++) {
|
||||
/* strings have already been copied */
|
||||
|
@ -149,7 +163,7 @@ int llext_copy_regions(struct llext_loader *ldr, struct llext *ext)
|
|||
continue;
|
||||
}
|
||||
|
||||
int ret = llext_copy_section(ldr, ext, mem_idx);
|
||||
int ret = llext_copy_section(ldr, ext, mem_idx, ldr_parm);
|
||||
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
|
|
|
@ -20,7 +20,8 @@ struct llext_elf_sect_map {
|
|||
*/
|
||||
|
||||
int llext_copy_strings(struct llext_loader *ldr, struct llext *ext);
|
||||
int llext_copy_regions(struct llext_loader *ldr, struct llext *ext);
|
||||
int llext_copy_regions(struct llext_loader *ldr, struct llext *ext,
|
||||
const struct llext_load_param *ldr_parm);
|
||||
void llext_free_regions(struct llext *ext);
|
||||
void llext_adjust_mmu_permissions(struct llext *ext);
|
||||
|
||||
|
|
Loading…
Reference in New Issue