From 9a1fe73a6bfcaabf3d2504f9c2b9dda6be858134 Mon Sep 17 00:00:00 2001 From: Kishore S N Date: Thu, 13 Jul 2023 18:03:15 +0530 Subject: [PATCH 1/3] os/binfmt: Modify app section allocation for MMU Modify the allocation of app sections in accordance with the requirements for ARM MMU: - Each section size must be a multiple of 4KB (Page size) - Each section must be aligned to 4KB boundary Signed-off-by: Kishore S N --- .../configs/rtl8730e/loadable_apps/defconfig | 2 +- docs/MPURegionsUsageGuide.md | 2 +- os/arch/arm/src/armv7-a/mmu.h | 1 + os/arch/arm/src/common/up_mpuinit.c | 4 +- os/arch/arm/src/common/up_restoretask.c | 2 +- os/binfmt/Kconfig | 3 +- os/binfmt/binfmt.h | 2 +- os/binfmt/binfmt_unloadmodule.c | 2 +- os/binfmt/libelf/libelf_addrenv.c | 39 +++++++++++----- os/include/tinyara/binfmt/binfmt.h | 10 +++- os/include/tinyara/mmu.h | 46 +++++++++++++++++++ os/include/tinyara/mpu.h | 8 ---- os/include/tinyara/sched.h | 2 +- os/kernel/task/task_setup.c | 2 +- os/kernel/task/task_terminate.c | 2 +- os/kernel/task/task_terminate_unloaded.c | 2 +- 16 files changed, 96 insertions(+), 33 deletions(-) create mode 100644 os/include/tinyara/mmu.h diff --git a/build/configs/rtl8730e/loadable_apps/defconfig b/build/configs/rtl8730e/loadable_apps/defconfig index cbd8000778..89bbe74a48 100644 --- a/build/configs/rtl8730e/loadable_apps/defconfig +++ b/build/configs/rtl8730e/loadable_apps/defconfig @@ -1206,7 +1206,7 @@ CONFIG_ELF_CACHE_READ=y CONFIG_ELF_CACHE_BLOCK_SIZE=2048 CONFIG_ELF_CACHE_BLOCKS_COUNT=60 # CONFIG_SYMTAB_ORDEREDBYNAME is not set -# CONFIG_OPTIMIZE_APP_RELOAD_TIME is not set +CONFIG_OPTIMIZE_APP_RELOAD_TIME=y # CONFIG_SAVE_BIN_SECTION_ADDR is not set # diff --git a/docs/MPURegionsUsageGuide.md b/docs/MPURegionsUsageGuide.md index d4e4c63482..340f04721a 100644 --- a/docs/MPURegionsUsageGuide.md +++ b/docs/MPURegionsUsageGuide.md @@ -55,7 +55,7 @@ ```c void mpu_region_initialize(struct mpu_usages_s *mpu) - offset += MPU_NUM_REGIONS; + offset += NUM_APP_REGIONS; mpu->nregion_xxx_xx = offset; ``` 2. **Board specific MPU region reservation** diff --git a/os/arch/arm/src/armv7-a/mmu.h b/os/arch/arm/src/armv7-a/mmu.h index 8da3b73d18..a4effa5958 100644 --- a/os/arch/arm/src/armv7-a/mmu.h +++ b/os/arch/arm/src/armv7-a/mmu.h @@ -664,6 +664,7 @@ # define ALL_PGTABLE_SIZE PGTABLE_SIZE #endif + /* Virtual Page Table Location **********************************************/ #ifdef CONFIG_PAGING diff --git a/os/arch/arm/src/common/up_mpuinit.c b/os/arch/arm/src/common/up_mpuinit.c index a9c599f225..5c59c043c5 100644 --- a/os/arch/arm/src/common/up_mpuinit.c +++ b/os/arch/arm/src/common/up_mpuinit.c @@ -76,12 +76,12 @@ void mpu_region_initialize(struct mpu_usages_s *mpu) uint8_t offset = mpu->nregion_board_specific; #ifdef CONFIG_SUPPORT_COMMON_BINARY - offset += MPU_NUM_REGIONS; + offset += NUM_APP_REGIONS; mpu->nregion_common_bin = offset; #endif #ifdef CONFIG_APP_BINARY_SEPARATION - offset += MPU_NUM_REGIONS; + offset += NUM_APP_REGIONS; mpu->nregion_app_bin = offset; #endif diff --git a/os/arch/arm/src/common/up_restoretask.c b/os/arch/arm/src/common/up_restoretask.c index 73d813a4a8..ddf55a125e 100644 --- a/os/arch/arm/src/common/up_restoretask.c +++ b/os/arch/arm/src/common/up_restoretask.c @@ -76,7 +76,7 @@ void up_restoretask(struct tcb_s *tcb) /* Condition check : Update MPU registers only if this is not a kernel thread. */ if ((tcb->flags & TCB_FLAG_TTYPE_MASK) != TCB_FLAG_TTYPE_KERNEL) { - for (int i = 0; i < MPU_REG_NUMBER * MPU_NUM_REGIONS; i += MPU_REG_NUMBER) { + for (int i = 0; i < MPU_REG_NUMBER * NUM_APP_REGIONS; i += MPU_REG_NUMBER) { up_mpu_set_register(&tcb->mpu_regs[i]); } } diff --git a/os/binfmt/Kconfig b/os/binfmt/Kconfig index dd70982f99..8dc8540a0e 100644 --- a/os/binfmt/Kconfig +++ b/os/binfmt/Kconfig @@ -94,6 +94,7 @@ config OPTIMIZE_APP_RELOAD_TIME config BINFMT_SECTION_UNIFIED_MEMORY bool "Allocate section memory as one chunk" depends on OPTIMIZE_APP_RELOAD_TIME + depends on ARM_MPU default n ---help--- When this option is enabled, binary loader allocates one big memory chunk enough to contain each loading sections @@ -101,7 +102,7 @@ config BINFMT_SECTION_UNIFIED_MEMORY When this option is disabled, by default, loader allocates separated memory for each section. In ARMv7m MPU, there are size and address alignment restrictions, so it can enhance memory usage efficiency. In ARMv8m MPU, there is no restriction about address alignment, it is not recommended to use this option. - + This option does not provide any advantage when MMU is being used. config SAVE_BIN_SECTION_ADDR bool "Save binary section address" diff --git a/os/binfmt/binfmt.h b/os/binfmt/binfmt.h index b55282c74c..2913ccfc78 100644 --- a/os/binfmt/binfmt.h +++ b/os/binfmt/binfmt.h @@ -184,7 +184,7 @@ static inline void binfmt_set_mpu(struct binary_s *binp) #ifdef CONFIG_SUPPORT_COMMON_BINARY if (binp->islibrary) { /* Set MPU register values to real MPU h/w */ - for (int i = 0; i < MPU_REG_NUMBER * MPU_NUM_REGIONS; i += MPU_REG_NUMBER) { + for (int i = 0; i < MPU_REG_NUMBER * NUM_APP_REGIONS; i += MPU_REG_NUMBER) { up_mpu_set_register(®s[i]); } } diff --git a/os/binfmt/binfmt_unloadmodule.c b/os/binfmt/binfmt_unloadmodule.c index 459fc201f0..4bdb8dc88e 100644 --- a/os/binfmt/binfmt_unloadmodule.c +++ b/os/binfmt/binfmt_unloadmodule.c @@ -202,7 +202,7 @@ int unload_module(FAR struct binary_s *binp) if (binp->islibrary) { #if (defined(CONFIG_ARMV7M_MPU) || defined(CONFIG_ARMV8M_MPU)) #ifdef CONFIG_OPTIMIZE_APP_RELOAD_TIME - for (int i = 0; i < MPU_REG_NUMBER * MPU_NUM_REGIONS; i += MPU_REG_NUMBER) { + for (int i = 0; i < MPU_REG_NUMBER * NUM_APP_REGIONS; i += MPU_REG_NUMBER) { up_mpu_disable_region(&binp->cmn_mpu_regs[i]); } #else diff --git a/os/binfmt/libelf/libelf_addrenv.c b/os/binfmt/libelf/libelf_addrenv.c index 1bfcb9ff10..79c5d25830 100644 --- a/os/binfmt/libelf/libelf_addrenv.c +++ b/os/binfmt/libelf/libelf_addrenv.c @@ -65,6 +65,9 @@ #ifdef CONFIG_ARM_MPU #include #endif +#ifdef CONFIG_ARCH_USE_MMU +#include +#endif #include "libelf.h" /**************************************************************************** @@ -88,8 +91,8 @@ #ifdef CONFIG_OPTIMIZE_APP_RELOAD_TIME static int allocateregions(FAR struct elf_loadinfo_s *loadinfo) { - size_t sizes[MPU_NUM_REGIONS] = {loadinfo->binp->sizes[BIN_TEXT], loadinfo->binp->sizes[BIN_RO], loadinfo->binp->ramsize}; - uintptr_t *allocs[MPU_NUM_REGIONS] = {&loadinfo->binp->sections[BIN_TEXT], &loadinfo->binp->sections[BIN_RO], &loadinfo->binp->sections[BIN_DATA]}; + size_t sizes[NUM_APP_REGIONS] = {loadinfo->binp->sizes[BIN_TEXT], loadinfo->binp->sizes[BIN_RO], loadinfo->binp->ramsize}; + uintptr_t *allocs[NUM_APP_REGIONS] = {&loadinfo->binp->sections[BIN_TEXT], &loadinfo->binp->sections[BIN_RO], &loadinfo->binp->sections[BIN_DATA]}; int count = 0; int i; for (i = 0; i < CONFIG_KMM_REGIONS; i++) { @@ -102,7 +105,7 @@ static int allocateregions(FAR struct elf_loadinfo_s *loadinfo) ASSERT(0); } -#ifdef CONFIG_BINFMT_SECTION_UNIFIED_MEMORY +#if defined(CONFIG_BINFMT_SECTION_UNIFIED_MEMORY) && !defined(CONFIG_ARCH_USE_MMU) /* If there are size and address alignment restrictions like ARMV7M, * it is better to allocate one big memory chunk enough to contain each loading sections like text, ro, data. */ @@ -112,8 +115,8 @@ static int allocateregions(FAR struct elf_loadinfo_s *loadinfo) uintptr_t *tmpalloc; uint8_t j; - for (i = 0; i < MPU_NUM_REGIONS; i++) { - for (j = 0; j < MPU_NUM_REGIONS - (i + 1); j++) { + for (i = 0; i < NUM_APP_REGIONS; i++) { + for (j = 0; j < NUM_APP_REGIONS - (i + 1); j++) { if (sizes[j] < sizes[j + 1]) { tmpsz = sizes[j]; sizes[j] = sizes[j + 1]; @@ -138,7 +141,8 @@ static int allocateregions(FAR struct elf_loadinfo_s *loadinfo) /* ARMV8M requires addresses to be aligned to the power of two. */ *allocs[0] = (uintptr_t)kmm_memalign_at(CONFIG_HEAP_INDEX_LOADED_APP, MPU_ALIGNMENT_BYTES, totalsize); #else -#error "Unknown MPU version. Expected either ARMV7M or ARMV8M" +#warn "Unknown MPU version. Expected either ARMV7M or ARMV8M" + *allocs[0] = (uintptr_t)kmm_malloc_at(CONFIG_HEAP_INDEX_LOADED_APP, totalsize); #endif if (*allocs[0] == (uintptr_t)NULL) { return -ENOMEM; @@ -152,13 +156,16 @@ static int allocateregions(FAR struct elf_loadinfo_s *loadinfo) * Allocate each loading section respectively. */ int region_idx; - for (region_idx = 0; region_idx < MPU_NUM_REGIONS; region_idx++) { -#ifdef CONFIG_ARMV7M_MPU + for (region_idx = 0; region_idx < NUM_APP_REGIONS; region_idx++) { +#if defined(CONFIG_ARMV7M_MPU) *allocs[region_idx] = (uintptr_t)kmm_memalign_at(CONFIG_HEAP_INDEX_LOADED_APP, sizes[region_idx], sizes[region_idx]); -#elif CONFIG_ARMV8M_MPU +#elif defined(CONFIG_ARMV8M_MPU) *allocs[region_idx] = (uintptr_t)kmm_memalign_at(CONFIG_HEAP_INDEX_LOADED_APP, MPU_ALIGNMENT_BYTES, sizes[region_idx]); +#elif defined(CONFIG_ARCH_USE_MMU) + *allocs[region_idx] = (uintptr_t)kmm_memalign_at(CONFIG_HEAP_INDEX_LOADED_APP, MMU_ALIGNMENT_BYTES, sizes[region_idx]); #else -#error "Unknown MPU version. Expected either ARMV7M or ARMV8M" +#warn "Unknown MPU version. Expected either ARMV7M or ARMV8M" + *allocs[region_idx] = (uintptr_t)kmm_malloc_at(CONFIG_HEAP_INDEX_LOADED_APP, sizes[region_idx]); #endif if (*allocs[region_idx] == (uintptr_t)NULL) { return -ENOMEM; @@ -204,10 +211,14 @@ int elf_addrenv_alloc(FAR struct elf_loadinfo_s *loadinfo) loadinfo->binp->sizes[BIN_TEXT] = MPU_ALIGN_UP(loadinfo->binp->sizes[BIN_TEXT]); loadinfo->binp->sizes[BIN_RO] = MPU_ALIGN_UP(loadinfo->binp->sizes[BIN_RO]); datamemsize = MPU_ALIGN_UP(datamemsize); - loadinfo->binp->ramsize = datamemsize; +#elif defined(CONFIG_ARCH_USE_MMU) + loadinfo->binp->sizes[BIN_TEXT] = MMU_ALIGN_UP(loadinfo->binp->sizes[BIN_TEXT]); + loadinfo->binp->sizes[BIN_RO] = MMU_ALIGN_UP(loadinfo->binp->sizes[BIN_RO]); + datamemsize = MMU_ALIGN_UP(datamemsize); #else -#error "Unknown MPU version. Expected either ARMV7M or ARMV8M" +#warn "Unknown MPU version. Expected either ARMV7M or ARMV8M" #endif + loadinfo->binp->ramsize = datamemsize; if (allocateregions(loadinfo) < 0) { berr("ERROR: failed to allocate memory\n"); @@ -229,7 +240,11 @@ int elf_addrenv_alloc(FAR struct elf_loadinfo_s *loadinfo) #elif defined(CONFIG_ARMV8M_MPU) loadinfo->binp->ramsize = MPU_ALIGN_UP(loadinfo->binp->ramsize); loadinfo->binp->ramstart = (uint32_t)kmm_memalign_at(CONFIG_HEAP_INDEX_LOADED_APP, MPU_ALIGNMENT_BYTES, loadinfo->binp->ramsize); +#elif defined(CONFIG_ARCH_USE_MMU) + loadinfo->binp->ramsize = MMU_ALIGN_UP(loadinfo->binp->ramsize); + loadinfo->binp->ramstart = (uint32_t)kmm_memalign_at(CONFIG_HEAP_INDEX_LOADED_APP, MMU_ALIGNMENT_BYTES, loadinfo->binp->ramsize); #else +#warn "Unknown MPU version. Expected either ARMV7M or ARMV8M" loadinfo->binp->ramstart = kmm_malloc_at(CONFIG_HEAP_INDEX_LOADED_APP, loadinfo->binp->ramsize); #endif diff --git a/os/include/tinyara/binfmt/binfmt.h b/os/include/tinyara/binfmt/binfmt.h index d91d57b5ec..5d8fee28f1 100644 --- a/os/include/tinyara/binfmt/binfmt.h +++ b/os/include/tinyara/binfmt/binfmt.h @@ -71,6 +71,14 @@ * Pre-processor Definitions ****************************************************************************/ +#ifdef CONFIG_OPTIMIZE_APP_RELOAD_TIME +/* Separate three MPU regions (text, ro and rw) to optimize reloading time */ +#define NUM_APP_REGIONS 3 +#else +/* Just a MPU region for all of section data */ +#define NUM_APP_REGIONS 1 +#endif + /**************************************************************************** * Public Types ****************************************************************************/ @@ -125,7 +133,7 @@ struct binary_s { uint8_t run_library_ctors; /* Flag to check if we need to run ctors for common binary */ #endif #ifdef CONFIG_ARM_MPU /* MPU register values for common binary only */ - uint32_t cmn_mpu_regs[MPU_REG_NUMBER * MPU_NUM_REGIONS]; /* Common binary MPU is configured during loading and disabled during unload_module */ + uint32_t cmn_mpu_regs[MPU_REG_NUMBER * NUM_APP_REGIONS]; /* Common binary MPU is configured during loading and disabled during unload_module */ #endif #endif FAR char *const *argv; /* Argument list */ diff --git a/os/include/tinyara/mmu.h b/os/include/tinyara/mmu.h new file mode 100644 index 0000000000..603a8be289 --- /dev/null +++ b/os/include/tinyara/mmu.h @@ -0,0 +1,46 @@ +/**************************************************************************** + * + * Copyright 2023 Samsung Electronics All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + * + ****************************************************************************/ + +///@file tinyara/mmu.h + +#ifndef __INCLUDE_TINYARA_MMU_H +#define __INCLUDE_TINYARA_MMU_H + +/******************************************************************************** + * Included Files + ********************************************************************************/ + +#include + +#include +#include + +/******************************************************************************** + * Pre-processor Definitions + ********************************************************************************/ + +#define SMALL_PAGE_SIZE (4096) /* 4KB small page */ +#define MMU_ALIGNMENT_BYTES SMALL_PAGE_SIZE +#define MMU_ALIGN_UP(a) (((a) + MMU_ALIGNMENT_BYTES - 1) & ~(MMU_ALIGNMENT_BYTES - 1)) + + +/******************************************************************************** + * Public Function Prototypes + ********************************************************************************/ + +#endif diff --git a/os/include/tinyara/mpu.h b/os/include/tinyara/mpu.h index c6e2fefc4a..63b3075bd5 100644 --- a/os/include/tinyara/mpu.h +++ b/os/include/tinyara/mpu.h @@ -60,14 +60,6 @@ enum mpu_region_usages_e { MPU_REGION_MAX }; -#ifdef CONFIG_OPTIMIZE_APP_RELOAD_TIME -/* Separate three MPU regions (text, ro and rw) to optimize reloading time */ -#define MPU_NUM_REGIONS 3 -#else -/* Just a MPU region for all of section data */ -#define MPU_NUM_REGIONS 1 -#endif - #ifdef CONFIG_ARMV7M_MPU #define MPU_ALIGN_UP(a) (1 << mpu_log2regionceil(0, a)) #elif CONFIG_ARMV8M_MPU diff --git a/os/include/tinyara/sched.h b/os/include/tinyara/sched.h index ec3e75cb19..d413edffa7 100644 --- a/os/include/tinyara/sched.h +++ b/os/include/tinyara/sched.h @@ -641,7 +641,7 @@ struct tcb_s { uint32_t uspace; /* User space object for app binary */ #ifdef CONFIG_ARM_MPU /* MPU register values for loadable apps only */ - uint32_t mpu_regs[MPU_REG_NUMBER * MPU_NUM_REGIONS]; /* MPU for apps is configured during loading and disabled in task_terminate */ + uint32_t mpu_regs[MPU_REG_NUMBER * NUM_APP_REGIONS]; /* MPU for apps is configured during loading and disabled in task_terminate */ #endif #endif diff --git a/os/kernel/task/task_setup.c b/os/kernel/task/task_setup.c index 3a24858f0c..ec992ab5b5 100644 --- a/os/kernel/task/task_setup.c +++ b/os/kernel/task/task_setup.c @@ -466,7 +466,7 @@ static int thread_schedsetup(FAR struct tcb_s *tcb, int priority, start_t start, #ifdef CONFIG_ARM_MPU int i = 0; #ifdef CONFIG_OPTIMIZE_APP_RELOAD_TIME - for (; i < MPU_REG_NUMBER * MPU_NUM_REGIONS; i += MPU_REG_NUMBER) + for (; i < MPU_REG_NUMBER * NUM_APP_REGIONS; i += MPU_REG_NUMBER) #endif { tcb->mpu_regs[i + MPU_REG_RNR] = rtcb->mpu_regs[i + MPU_REG_RNR]; diff --git a/os/kernel/task/task_terminate.c b/os/kernel/task/task_terminate.c index 13b2cb0b0a..290e01a97c 100644 --- a/os/kernel/task/task_terminate.c +++ b/os/kernel/task/task_terminate.c @@ -189,7 +189,7 @@ int task_terminate(pid_t pid, bool nonblocking) /* Disable mpu regions when the binary is unloaded if its own mpu registers are set in mpu h/w. */ if (IS_BINARY_MAINTASK(dtcb) && up_mpu_check_active(&dtcb->mpu_regs[0])) { #ifdef CONFIG_OPTIMIZE_APP_RELOAD_TIME - for (int i = 0; i < MPU_REG_NUMBER * MPU_NUM_REGIONS; i += MPU_REG_NUMBER) { + for (int i = 0; i < MPU_REG_NUMBER * NUM_APP_REGIONS; i += MPU_REG_NUMBER) { up_mpu_disable_region(&dtcb->mpu_regs[i]); } #else diff --git a/os/kernel/task/task_terminate_unloaded.c b/os/kernel/task/task_terminate_unloaded.c index 66866fb300..b1e80e9346 100644 --- a/os/kernel/task/task_terminate_unloaded.c +++ b/os/kernel/task/task_terminate_unloaded.c @@ -113,7 +113,7 @@ int task_terminate_unloaded(FAR struct tcb_s *tcb) /* Disable mpu regions when the binary is unloaded if its own mpu registers are set in mpu h/w. */ if (IS_BINARY_MAINTASK(tcb) && up_mpu_check_active(&tcb->mpu_regs[0])) { #ifdef CONFIG_OPTIMIZE_APP_RELOAD_TIME - for (int i = 0; i < MPU_REG_NUMBER * MPU_NUM_REGIONS; i += MPU_REG_NUMBER) { + for (int i = 0; i < MPU_REG_NUMBER * NUM_APP_REGIONS; i += MPU_REG_NUMBER) { up_mpu_disable_region(&tcb->mpu_regs[i]); } #else From ee660cc929b9cac2314a4c21d719433227003ba3 Mon Sep 17 00:00:00 2001 From: Kishore S N Date: Tue, 18 Jul 2023 13:41:44 +0530 Subject: [PATCH 2/3] MMU for loadable apps Signed-off-by: Kishore S N --- os/arch/arm/src/amebasmart/Make.defs | 2 +- os/arch/arm/src/armv7-a/arm_blocktask.c | 1 + os/arch/arm/src/armv7-a/arm_head.S | 4 + os/arch/arm/src/armv7-a/arm_mmu.c | 296 ++++++++++++++++++ os/arch/arm/src/armv7-a/arm_prefetchabort.c | 5 + os/arch/arm/src/armv7-a/arm_releasepending.c | 1 + os/arch/arm/src/armv7-a/arm_reprioritizertr.c | 1 + os/arch/arm/src/armv7-a/arm_syscall.c | 9 +- os/arch/arm/src/armv7-a/arm_unblocktask.c | 1 + os/arch/arm/src/armv7-a/mmu.h | 124 +++++++- os/arch/arm/src/common/up_restoretask.c | 52 ++- os/arch/arm/src/imx6/Make.defs | 2 +- os/binfmt/Makefile | 3 + os/binfmt/binfmt.h | 3 + os/binfmt/binfmt_execmodule.c | 20 +- os/binfmt/binfmt_loadbinary.c | 12 +- os/binfmt/binfmt_pgtable.c | 123 ++++++++ os/include/tinyara/mmu.h | 5 + os/include/tinyara/sched.h | 8 +- os/kernel/init/os_start.c | 8 + os/kernel/task/task_setup.c | 4 +- 21 files changed, 645 insertions(+), 39 deletions(-) create mode 100644 os/binfmt/binfmt_pgtable.c diff --git a/os/arch/arm/src/amebasmart/Make.defs b/os/arch/arm/src/amebasmart/Make.defs index 534f0b6bb0..6502242919 100644 --- a/os/arch/arm/src/amebasmart/Make.defs +++ b/os/arch/arm/src/amebasmart/Make.defs @@ -147,7 +147,7 @@ CMN_CSRCS += arm_doirq.c arm_gicv2.c arm_gicv2_dump.c CMN_CSRCS += arm_initialstate.c arm_mmu.c arm_prefetchabort.c CMN_CSRCS += arm_schedulesigaction.c arm_sigdeliver.c CMN_CSRCS += arm_syscall.c arm_tcbinfo.c arm_undefinedinsn.c -CMN_CSRCS += arm_perf.c up_checkspace.c +CMN_CSRCS += arm_perf.c up_checkspace.c up_restoretask.c ifeq ($(CONFIG_ARMV7A_L2CC_PL310),y) CMN_CSRCS += arm_l2cc_pl310.c diff --git a/os/arch/arm/src/armv7-a/arm_blocktask.c b/os/arch/arm/src/armv7-a/arm_blocktask.c index 7f729ad485..78dd66fc27 100644 --- a/os/arch/arm/src/armv7-a/arm_blocktask.c +++ b/os/arch/arm/src/armv7-a/arm_blocktask.c @@ -134,6 +134,7 @@ void up_block_task(struct tcb_s *tcb, tstate_t task_state) /* Reset scheduler parameters */ // sched_resume_scheduler(rtcb); + up_restoretask(rtcb); /* Then switch contexts. Any necessary address environment * changes will be made when the interrupt returns. diff --git a/os/arch/arm/src/armv7-a/arm_head.S b/os/arch/arm/src/armv7-a/arm_head.S index 9e88e8da92..2d0fea28e8 100755 --- a/os/arch/arm/src/armv7-a/arm_head.S +++ b/os/arch/arm/src/armv7-a/arm_head.S @@ -543,7 +543,11 @@ __start: .LCptinfo: .long (PGTABLE_BASE_PADDR & 0xfff00000) /* Physical base address */ .long (PGTABLE_BASE_VADDR & 0xfff00000) /* Virtual base address */ +#ifdef CONFIG_APP_BINARY_SEPARATION + .long MMU_USR_PGTBL_MAPPING /* MMU flags for text section in RAM */ +#else .long MMU_MEMFLAGS /* MMU flags for text section in RAM */ +#endif .size .LCptinfo, . -.LCptinfo #endif diff --git a/os/arch/arm/src/armv7-a/arm_mmu.c b/os/arch/arm/src/armv7-a/arm_mmu.c index 1c2946896c..9b4a0b8540 100644 --- a/os/arch/arm/src/armv7-a/arm_mmu.c +++ b/os/arch/arm/src/armv7-a/arm_mmu.c @@ -46,6 +46,41 @@ #include "cp15_cacheops.h" #include "mmu.h" +#ifdef CONFIG_APP_BINARY_SEPARATION +#include +#endif +/**************************************************************************** + * Private Functions + ****************************************************************************/ +static void mmu_set_flags(uint32_t *val, bool ro, bool exec, uint8_t isL1, uint8_t isGlobal) +{ + if (isL1) { + if (ro && exec) { + *val |= MMU_APP_L1_ROX; + } else if (ro) { + *val |= MMU_APP_L1_RO; + } else { + *val |= MMU_APP_L1_RW; + } + + if (!isGlobal) { + *val |= PMD_SECT_NG; + } + } else { + if (ro && exec) { + *val |= MMU_APP_L2_ROX; + } else if (ro) { + *val |= MMU_APP_L2_RO; + } else { + *val |= MMU_APP_L2_RW; + } + + if (!isGlobal) { + *val |= PTE_NG; + } + } +} + /**************************************************************************** * Public Functions ****************************************************************************/ @@ -254,3 +289,264 @@ void mmu_invalidate_region(uint32_t vstart, size_t size) } } #endif + +#ifdef CONFIG_APP_BINARY_SEPARATION +/**************************************************************************** + * Name: mmu_get_os_l1_pgtbl + * + * Description: + * Returns the virtual address of the kernel L1 page table. + * + * Input Parameters: + * + * Returned Value: + * Page table address + ****************************************************************************/ +uint32_t *mmu_get_os_l1_pgtbl(void) +{ + return (uint32_t *)PGTABLE_BASE_VADDR; +} + +/**************************************************************************** + * Name: mmu_allocate_app_l1_pgtbl + * + * Description: + * Allocate space for L1 page table of application, in accordance with + * the requirements of the arch specific mmu. + * + * Input Parameters: + * + * Returned Value: + * L1 Page table address + ****************************************************************************/ +uint32_t *mmu_allocate_app_l1_pgtbl(int app_id) +{ + uint32_t *addr = (uint32_t *)(PGTABLE_BASE_VADDR + (app_id * 16384)); + // uint32_t *addr = (uint32_t *)kmm_memalign(L1_PGTBL_ALIGNMENT, L1_PGTBL_SIZE); + // ASSERT(addr); + // memset(addr, 0, L1_PGTBL_SIZE); + return addr; +} + +/**************************************************************************** + * Name: mmu_allocate_app_l2_pgtbl + * + * Description: + * Allocate space for L2 page table of application, in accordance with + * the requirements of the arch specific mmu. + * + * Input Parameters: + * + * Returned Value: + * L2 Page table address + ****************************************************************************/ +uint32_t *mmu_allocate_app_l2_pgtbl(int app_id, int l2_idx) +{ + app_id--; + uint32_t *addr = (uint32_t *)(PGTABLE_BASE_VADDR + (2 * 16384) + (app_id * l2_idx * 1024)); + // uint32_t *addr = (uint32_t *)kmm_memalign(L2_PGTBL_ALIGNMENT, L2_PGTBL_SIZE); + // ASSERT(addr); + // memset(addr, 0, L2_PGTBL_SIZE); + return addr; +} + +/**************************************************************************** + * Name: mmu_update_app_l1_pgtbl_ospgtbl + * + * Description: + * Loop through the L1 page table. + * Copy kernel L1 page table to app page table. + * If the entry is pointing to a L2 page table + * Allocate L2 page table for app. + * Copy entries from kernel to app L2 table. + * Update the L2 page table address in L1 table. + * + * Input Parameters: + * app_pgtbl: Pointer to L1 page table of app + * + ****************************************************************************/ +void mmu_update_app_l1_pgtbl_ospgtbl(uint32_t *app_l1_pgtbl) +{ + uint32_t *os_l1_pgtbl = (uint32_t *)PGTABLE_BASE_VADDR; + + memcpy((void *)app_l1_pgtbl, (void *)os_l1_pgtbl, L1_PGTBL_SIZE); + cp15_flush_dcache((uintptr_t)app_l1_pgtbl, (uintptr_t)app_l1_pgtbl + L1_PGTBL_SIZE); + +#ifdef CONFIG_SUPPORT_COMMON_BINARY + for (int i = 0; i < L1_PGTBL_NENTRIES; i++) { + if ((os_l1_pgtbl[i] & PMD_TYPE_MASK) == PMD_TYPE_PTE) { + //Found a L2 page table. + uint32_t *os_l2_pgtbl = (uint32_t *)(os_l1_pgtbl[i] & PMD_PTE_PADDR_MASK); + uint32_t *app_l2_pgtbl = mmu_allocate_app_l2_pgtbl(); + memcpy(app_l2_pgtbl, os_l2_pgtbl, L2_PGTBL_SIZE); + app_l1_pgtbl[i] &= ~PMD_PTE_PADDR_MASK; + app_l1_pgtbl[i] |= (uint32_t)app_l2_pgtbl & PMD_PTE_PADDR_MASK; + } + } +#endif +} + +/**************************************************************************** + * Name: mmu_map_app_region + * + * Description + * + * Input Parameters: + * + * Returned Value: + ****************************************************************************/ +void mmu_map_app_region(int app_id, uint32_t *l1_pgtbl, uint32_t start, uint32_t size, bool ro, bool exec, bool global) +{ + uint32_t idx; + uint32_t val; + uint32_t end = start + size; + irqstate_t flags; + + lldbg("start = 0x%08x end = 0x%08x size = %x\n", start, end, size); + + // Run a loop until the entire region is mapped. + while (start < end) { + // Check if this address can be mapped to a section. + if (!(start & SECTION_MASK) && !(size & SECTION_MASK)) { + // Yes. Update the section entry in the the L1 page table. + idx = start >> 20; + val = start & PMD_PTE_PADDR_MASK; + mmu_set_flags(&val, ro, exec, true, global); + + lldbg("Add section for addr 0x%08x idx = %d\n", start, idx); + + if (global) { + // If this update is for the common binary, then it is done + // in the kernel page tables and so the cache and tlbs need + // to be flushed and invalidated. + flags = enter_critical_section(); + l1_pgtbl[idx] = val; + cp15_clean_dcache_bymva((uint32_t)&l1_pgtbl[idx]); + mmu_invalidate_region(start, SECTION_SIZE); + leave_critical_section(flags); + } else { + l1_pgtbl[idx] = val; + cp15_clean_dcache_bymva((uint32_t)&l1_pgtbl[idx]); + } + + // Advance the memory region address. + start += SECTION_SIZE; + } else { // Check if this address can be mapped to a small page. + + // Check if L2 page table is not created. + idx = (start & 0xfff00000) >> 20; + int l2_idx = 0; + uint32_t *l2_pgtbl = (uint32_t *)(l1_pgtbl[idx] & PMD_PTE_PADDR_MASK); + if ((l1_pgtbl[idx] & PMD_TYPE_MASK) != PMD_TYPE_PTE) { + // Yes. Allocate L2 page table for app. + l2_pgtbl = mmu_allocate_app_l2_pgtbl(app_id, l2_idx++); + + lldbg("Allocated L2 pgtbl at 0x%08x\n", l2_pgtbl); + if (global) { + // If this update is for the common binary, then it is done + // in the kernel page tables and so the cache and tlbs need + // to be flushed and invalidated. + + flags = enter_critical_section(); + + // Fill default entries into L2 page table. + uint32_t tmp = start; + for (idx = 0; idx < L2_PGTBL_NENTRIES; idx++) { + val = tmp & PTE_SMALL_PADDR_MASK; + val |= MMU_MEMFLAGS; + l2_pgtbl[idx] = val; + cp15_clean_dcache_bymva((uint32_t)&l2_pgtbl[idx]); + tmp += 4096; + } + + // Update L2 page table address in L1 page table. + val = (uint32_t)l2_pgtbl & PMD_PTE_PADDR_MASK; + val |= MMU_L1_DATAFLAGS; + l1_pgtbl[idx] = val; + + cp15_clean_dcache_bymva((uint32_t)&l1_pgtbl[idx]); + cp15_invalidate_tlb_bymva(start); + leave_critical_section(flags); + } else { + // Update L2 page table address in L1 page table. + val = (uint32_t)l2_pgtbl & PMD_PTE_PADDR_MASK; + val |= MMU_L1_PGTABFLAGS; + l1_pgtbl[idx] = val; + // dbg("Set l1 pte at 0x%08x = 0x%08x\n", &l1_pgtbl[idx], val); + cp15_clean_dcache_bymva((uint32_t)&l1_pgtbl[idx]); + } + } + + // Update the L2 page table entry. + idx = (start & 0x000ff000) >> 12; + val = start & PTE_SMALL_PADDR_MASK; + mmu_set_flags(&val, ro, exec, false, global); + + if (global) { + flags = enter_critical_section(); + l2_pgtbl[idx] = val; + // If this update is for the common binary, then it is done + // in the kernel page tables and so the cache and tlbs need + // to be flushed and invalidated. + cp15_clean_dcache_bymva((uint32_t)&l2_pgtbl[idx]); + cp15_invalidate_tlb_bymva(start); + leave_critical_section(flags); + } else { + l2_pgtbl[idx] = val; + // dbg("Set l2 pte at 0x%08x = 0x%08x\n", &l2_pgtbl[idx], val); + cp15_clean_dcache_bymva((uint32_t)&l2_pgtbl[idx]); + } + + // Advance the memory region address. + start += SMALL_PAGE_SIZE; + } + } +} + +#endif // CONFIG_APP_BINARY_SEPARATION + + +void mmu_dump_pgtbl(void) +{ + struct tcb_s *rtcb = sched_self(); + if (rtcb->app_id < 1) { + return; + } + + uint32_t *l1tbl = mmu_l1_pgtable(); + + lldbg("L1 page table base addr = 0x%08x\n", l1tbl); + + lldbg("================================================\n"); + lldbg("ENTRY TYPE OUT NG AP XN\n"); + lldbg("ADDR ADDR \n"); + lldbg("================================================\n"); + for (int i = 0; i < L1_PGTBL_NENTRIES; i++) { + bool ng = (l1tbl[i] & PMD_SECT_NG) ? 1 : 0; + if (ng && (l1tbl[i] & PMD_TYPE_MASK) == PMD_TYPE_SECT) { + lldbg("0x%08x SECT 0x%08x %d %1x%1x %d\n", + &l1tbl[i], + l1tbl[i] & PMD_SECT_PADDR_MASK, + (l1tbl[i] & PMD_SECT_NG) ? 1 : 0, + (l1tbl[i] & PMD_SECT_AP2) ? 1 : 0, + (l1tbl[i] & PMD_SECT_AP_MASK) >> PMD_SECT_AP_SHIFT, + (l1tbl[i] & PMD_SECT_XN) ? 1 : 0); + } else if((l1tbl[i] & PMD_TYPE_MASK) == PMD_TYPE_PTE) { + lldbg("0x%08x L1PTE 0x%08x\n", &l1tbl[i], l1tbl[i] & PMD_PTE_PADDR_MASK); + uint32_t *l2tbl = (uint32_t)l1tbl[i] & PMD_PTE_PADDR_MASK; + for (int j = 0; j < L2_PGTBL_NENTRIES; j++) { + bool ng = (l2tbl[j] & PTE_NG) ? 1 : 0; + if (ng && ((l2tbl[j] & PTE_TYPE_MASK) != PTE_TYPE_FAULT)) { + lldbg("0x%08x PAGE 0x%08x %d %1x%1x %d\n", + &l2tbl[j], + l2tbl[j] & PTE_SMALL_PADDR_MASK, + (l2tbl[j] & PTE_NG) ? 1 : 0, + (l2tbl[j] & PTE_AP2) ? 1 : 0, + (l2tbl[j] & PTE_AP_MASK) >> PTE_AP_SHIFT, + (l2tbl[j] & PTE_SMALL_XN) ? 1 : 0); + } + } + } + } + lldbg("=============================================\n"); +} diff --git a/os/arch/arm/src/armv7-a/arm_prefetchabort.c b/os/arch/arm/src/armv7-a/arm_prefetchabort.c index f9182eb7a9..4f8a23f408 100644 --- a/os/arch/arm/src/armv7-a/arm_prefetchabort.c +++ b/os/arch/arm/src/armv7-a/arm_prefetchabort.c @@ -52,6 +52,7 @@ #include "sched/sched.h" #include "arm_internal.h" +#include "mmu.h" /**************************************************************************** * Public Functions @@ -148,6 +149,10 @@ uint32_t *arm_prefetchabort(uint32_t *regs, uint32_t ifar, uint32_t ifsr) _alert("Prefetch abort. PC: %08x IFAR: %08x IFSR: %08x\n", regs[REG_PC], ifar, ifsr); + + _alert("MMU L1 Entry for 0x%08x = 0x%08x\n", ifar, mmu_l1_getentry(ifar)); + mmu_dump_pgtbl(); + PANIC(); return regs; /* To keep the compiler happy */ } diff --git a/os/arch/arm/src/armv7-a/arm_releasepending.c b/os/arch/arm/src/armv7-a/arm_releasepending.c index cebdafefd9..525886e47a 100644 --- a/os/arch/arm/src/armv7-a/arm_releasepending.c +++ b/os/arch/arm/src/armv7-a/arm_releasepending.c @@ -101,6 +101,7 @@ void up_release_pending(void) /* Update scheduler parameters */ // sched_resume_scheduler(rtcb); + up_restoretask(rtcb); /* Then switch contexts. Any necessary address environment * changes will be made when the interrupt returns. diff --git a/os/arch/arm/src/armv7-a/arm_reprioritizertr.c b/os/arch/arm/src/armv7-a/arm_reprioritizertr.c index 91cb3764cf..92e9952dc2 100644 --- a/os/arch/arm/src/armv7-a/arm_reprioritizertr.c +++ b/os/arch/arm/src/armv7-a/arm_reprioritizertr.c @@ -156,6 +156,7 @@ void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority) /* Update scheduler parameters */ //sched_resume_scheduler(rtcb); + up_restoretask(rtcb); /* Then switch contexts. Any necessary address environment * changes will be made when the interrupt returns. diff --git a/os/arch/arm/src/armv7-a/arm_syscall.c b/os/arch/arm/src/armv7-a/arm_syscall.c index 1f77982a35..81b55fc33a 100644 --- a/os/arch/arm/src/armv7-a/arm_syscall.c +++ b/os/arch/arm/src/armv7-a/arm_syscall.c @@ -181,6 +181,7 @@ uint32_t *arm_syscall(uint32_t *regs) #ifdef CONFIG_BUILD_PROTECTED uint32_t cpsr; #endif + struct tcb_s *rtcb = sched_self(); /* Nested interrupts are not supported */ @@ -221,7 +222,6 @@ uint32_t *arm_syscall(uint32_t *regs) #ifdef CONFIG_LIB_SYSCALL case SYS_syscall_return: { - struct tcb_s *rtcb = sched_self(); int index = (int)rtcb->xcp.nsyscalls - 1; /* Make sure that there is a saved SYSCALL return address. */ @@ -289,6 +289,7 @@ uint32_t *arm_syscall(uint32_t *regs) CURRENT_REGS = (uint32_t *)regs[REG_R1]; DEBUGASSERT(CURRENT_REGS); + up_restoretask(rtcb); } break; @@ -314,6 +315,7 @@ uint32_t *arm_syscall(uint32_t *regs) DEBUGASSERT(regs[REG_R1] != 0 && regs[REG_R2] != 0); *(uint32_t **)regs[REG_R1] = regs; CURRENT_REGS = (uint32_t *)regs[REG_R2]; + up_restoretask(rtcb); } break; @@ -333,7 +335,6 @@ uint32_t *arm_syscall(uint32_t *regs) #ifdef CONFIG_BUILD_PROTECTED case SYS_task_start: { - struct tcb_s *rtcb = sched_self(); DEBUGASSERT(rtcb->uspace); /* Set up to return to the user-space _start function in * unprivileged mode. We need: @@ -370,7 +371,6 @@ uint32_t *arm_syscall(uint32_t *regs) #if !defined(CONFIG_BUILD_FLAT) && !defined(CONFIG_DISABLE_PTHREAD) case SYS_pthread_start: { - struct tcb_s *rtcb = sched_self(); DEBUGASSERT(rtcb->uspace); /* Set up to enter the user-space pthread start-up function in * unprivileged mode. We need: @@ -409,7 +409,6 @@ uint32_t *arm_syscall(uint32_t *regs) case SYS_signal_handler: { - struct tcb_s *rtcb = sched_self(); /* Remember the caller's return address */ DEBUGASSERT(rtcb->uspace); @@ -474,7 +473,6 @@ uint32_t *arm_syscall(uint32_t *regs) case SYS_signal_handler_return: { - struct tcb_s *rtcb = sched_self(); /* Set up to return to the kernel-mode signal dispatching logic. */ DEBUGASSERT(rtcb->xcp.sigreturn != 0); @@ -511,7 +509,6 @@ uint32_t *arm_syscall(uint32_t *regs) default: { #ifdef CONFIG_LIB_SYSCALL - struct tcb_s *rtcb = sched_self(); int index = rtcb->xcp.nsyscalls; /* Verify that the SYS call number is within range */ diff --git a/os/arch/arm/src/armv7-a/arm_unblocktask.c b/os/arch/arm/src/armv7-a/arm_unblocktask.c index 78ed55db31..e4c12261fa 100644 --- a/os/arch/arm/src/armv7-a/arm_unblocktask.c +++ b/os/arch/arm/src/armv7-a/arm_unblocktask.c @@ -117,6 +117,7 @@ sched_removeblocked(tcb); /* Update scheduler parameters */ // sched_resume_scheduler(rtcb); + up_restoretask(rtcb); /* Then switch contexts. Any necessary address environment * changes will be made when the interrupt returns. diff --git a/os/arch/arm/src/armv7-a/mmu.h b/os/arch/arm/src/armv7-a/mmu.h index a4effa5958..87d8fc0e43 100644 --- a/os/arch/arm/src/armv7-a/mmu.h +++ b/os/arch/arm/src/armv7-a/mmu.h @@ -325,6 +325,7 @@ #define PMD_SECT_AP2 (1 << 15) /* Bit 15: AP[2]: Access permission bit 2 */ #define PMD_SECT_S (1 << 16) /* Bit 16: Shareable bit */ #define PMD_SECT_NG (1 << 17) /* Bit 17: Not global bit. */ +#define PMD_SECT_NS (1 << 19) /* Bit 19: Non-secure bit. */ #define PMD_SECT_PADDR_MASK (0xfff00000) /* Bits 20-31: Section base address, PA[31:20] */ /* Super Section (differences only) */ @@ -457,6 +458,7 @@ #define PTE_SMALL_FLAG_MASK (0x0000003f) /* Bits 0-11: MMU flags (mostly) */ #define PTE_SMALL_PADDR_MASK (0xfffff000) /* Bits 12-31: Small page base address, PA[31:12] */ +#define PTE_SMALL_XN (1 << 0) /* Bit 0 indicates execute never */ /* Level 2 Translation Table Access Permissions: * @@ -598,11 +600,7 @@ #define MMU_MEMFLAGS (PMD_TYPE_SECT | PMD_SECT_AP_RW1 | PMD_CACHEABLE | \ PMD_SECT_S | PMD_SECT_DOM(0)) #else -//PORTNOTE -//TODO: Temporarily changed PMD_SECT_AP_RW1 to PMD_SECT_AP_RW01 below -//to provide access permission to loadable apps. This needs to be reverted -//when we implement separate memory protection logic for loadable apps. -#define MMU_MEMFLAGS (PMD_TYPE_SECT | PMD_SECT_AP_RW01 | PMD_CACHEABLE | \ +#define MMU_MEMFLAGS (PMD_TYPE_SECT | PMD_SECT_AP_RW1 | PMD_CACHEABLE | \ PMD_SECT_DOM(0)) #endif @@ -623,7 +621,7 @@ # define MMU_L2_UTEXTFLAGS (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_RW12_R0) #endif -#define MMU_L1_DATAFLAGS (PMD_TYPE_PTE | PMD_PTE_PXN | PMD_PTE_DOM(0)) +#define MMU_L1_DATAFLAGS (PMD_TYPE_PTE | PMD_PTE_DOM(0) | PMD_PTE_NS) #define MMU_L2_UDATAFLAGS (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_RW01) #define MMU_L2_KDATAFLAGS (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_RW1) #define MMU_L2_UALLOCFLAGS (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_RW01) @@ -641,12 +639,44 @@ #define MMU_L2_VECTROFLAGS (PTE_TYPE_SMALL | PTE_WRITE_THROUGH | PTE_AP_R1) #define MMU_L2_VECTORFLAGS MMU_L2_VECTRWFLAGS +#define MMU_APP_L1_RO (PMD_TYPE_SECT | PMD_SECT_AP_R01 | PMD_CACHEABLE | \ + PMD_SECT_DOM(0) | PMD_SECT_XN | PMD_SECT_NS) +#define MMU_APP_L1_ROX (PMD_TYPE_SECT | PMD_SECT_AP_R01 | PMD_CACHEABLE | \ + PMD_SECT_DOM(0) | PMD_SECT_NS) +#ifdef CONFIG_SMP +#define MMU_APP_L1_RW (PMD_TYPE_SECT | PMD_SECT_AP_RW01 | PMD_CACHEABLE | \ + PMD_SECT_S | PMD_SECT_DOM(0) | PMD_SECT_XN | PMD_SECT_NS) +#else +#define MMU_APP_L1_RW (PMD_TYPE_SECT | PMD_SECT_AP_RW01 | PMD_CACHEABLE | \ + PMD_SECT_DOM(0) | PMD_SECT_XN | PMD_SECT_NS) +#endif +#define MMU_APP_L2_RO (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_R01 | PTE_SMALL_XN) +#define MMU_APP_L2_ROX (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_R01) +#ifdef CONFIG_SMP +#define MMU_APP_L2_RW (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_RW01 | PTE_SMALL_XN | PTE_S) +#else +#define MMU_APP_L2_RW (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_RW01 | PTE_SMALL_XN) +#endif + +#ifdef CONFIG_SMP +#define MMU_USR_PGTBL_MAPPING (PMD_TYPE_SECT | PMD_SECT_AP_RW01 | PMD_CACHEABLE | \ + PMD_SECT_S | PMD_SECT_DOM(0)) +#else +#define MMU_USR_PGTBL_MAPPING (PMD_TYPE_SECT | PMD_SECT_AP_RW01 | PMD_CACHEABLE | \ + PMD_SECT_DOM(0)) +#endif + + /* Mapped section size */ #define SECTION_SHIFT (20) #define SECTION_SIZE (1 << SECTION_SHIFT) /* 1Mb */ #define SECTION_MASK (SECTION_SIZE - 1) +#define SMALL_PAGE_SHIFT (12) +#define SMALL_PAGE_SIZE (1 << SMALL_PAGE_SHIFT) /* 4Kb */ +#define SMALL_PAGE_MASK (SMALL_PAGE_SIZE - 1) + /* The Cortex-A5 supports two translation table base address registers. In * this, implementation, only Translation Table Base Register 0 (TTBR0) is * used. The TTBR0 contains the upper bits of the address a a page table in @@ -657,13 +687,24 @@ * require up to 16Kb of memory. */ +#ifdef CONFIG_APP_BINARY_SEPARATION +#define PGTABLE_SIZE 0x00010000 +#else #define PGTABLE_SIZE 0x00004000 +#endif + #ifdef CONFIG_ARCH_ADDRENV # define ALL_PGTABLE_SIZE (PGTABLE_SIZE * CONFIG_SMP_NCPUS) #else # define ALL_PGTABLE_SIZE PGTABLE_SIZE #endif +#define L1_PGTBL_SIZE (16384) +#define L1_PGTBL_ALIGNMENT (16384) +#define L1_PGTBL_NENTRIES (4096) +#define L2_PGTBL_SIZE (1024) +#define L2_PGTBL_ALIGNMENT (1024) +#define L2_PGTBL_NENTRIES (256) /* Virtual Page Table Location **********************************************/ @@ -1345,7 +1386,20 @@ static inline void cp15_wrttb(unsigned int ttb) { __asm__ __volatile__ ( - "\tmcr p15, 0, %0, c2, c0, 0\n" + "\tdsb\n" + "\tmrc p15, 0, r1, c2, c0, 0\n" + "\tand r1, r1, #63\n" + "\torr r1, r1, %0\n" + "\tmcr p15, 0, r1, c2, c0, 0\n" +#if 0 +#ifdef CONFIG_ARM_HAVE_MPCORE + "\tmcr p15, 0, r0, c8, c3, 0\n" /* TLBIALLIS */ + "\tmcr p15, 0, r0, c7, c1, 6\n" /* BPIALLIS */ +#else + "\tmcr p15, 0, r0, c8, c7, 0\n" /* TLBIALL */ + "\tmcr p15, 0, r0, c7, c5, 6\n" /* BPIALL */ +#endif +#endif "\tnop\n" "\tnop\n" "\tnop\n" @@ -1354,14 +1408,57 @@ static inline void cp15_wrttb(unsigned int ttb) "\tnop\n" "\tnop\n" "\tnop\n" +#if 0 "\tmov r1, #0\n" "\tmcr p15, 0, r1, c2, c0, 2\n" +#endif + "\tdsb\n" + "\tisb\n" : : "r" (ttb) : "r1", "memory" ); } +static inline uint32_t mmu_get_asid(void) +{ + uint32_t asid = 0; + __asm__ __volatile__ + ( + "\tmrc p15, 0, %[asid], c13, c0, 1\n" // Read the contextid register + : [asid] "=r" (asid) + : + : + ); + return asid; +} + +static inline void mmu_switch_ttbr_asid(uint32_t ttb, uint32_t asid) +{ + uint32_t globalttb = PGTABLE_BASE_VADDR; + __asm__ __volatile__ + ( + "\tmcr p15, 0, %[gttb], c2, c0, 0\n" // Write global ttb into ttbr0 + "\tisb\n" + "\tmcr p15, 0, %[asid], c13, c0, 1\n" // Write new asid to contextid register + "\tisb\n" + "\tmcr p15, 0, %[ttb], c2, c0, 0\n" // Write new ttb into ttbr0 + "\tnop\n" + "\tnop\n" + "\tnop\n" + "\tnop\n" + "\tnop\n" + "\tnop\n" + "\tnop\n" + "\tnop\n" + : + : [gttb] "r" (globalttb), [asid] "r" (asid), [ttb] "r" (ttb) + : "memory" + ); + +} + + /**************************************************************************** * Name: mmu_l1_pgtable * @@ -1391,6 +1488,18 @@ static inline uint32_t *mmu_l1_pgtable(void) pgtable = ttbr0 & TTBR0_BASE_MASK(0); return (uint32_t *)(pgtable - PGTABLE_BASE_PADDR + PGTABLE_BASE_VADDR); +#elif defined(CONFIG_APP_BINARY_SEPARATION) + uint32_t ttbr0; + __asm__ __volatile__ + ( + "\tmrc p15, 0, %0, c2, c0, 0\n" + : "=r" (ttbr0) + : + : + ); + + ttbr0 &= TTBR0_BASE_MASK(0); + return ttbr0; #else return (uint32_t *)PGTABLE_BASE_VADDR; #endif @@ -1597,6 +1706,7 @@ void mmu_l1_map_regions(const struct section_mapping_s *mappings, void mmu_invalidate_region(uint32_t vstart, size_t size); #endif +void mmu_dump_pgtbl(void); #undef EXTERN #ifdef __cplusplus } diff --git a/os/arch/arm/src/common/up_restoretask.c b/os/arch/arm/src/common/up_restoretask.c index ddf55a125e..e0783381f1 100644 --- a/os/arch/arm/src/common/up_restoretask.c +++ b/os/arch/arm/src/common/up_restoretask.c @@ -40,6 +40,9 @@ #include "up_internal.h" #include "sched/sched.h" +#if defined(CONFIG_APP_BINARY_SEPARATION) && defined(CONFIG_ARCH_USE_MMU) +#include "mmu.h" +#endif /**************************************************************************** * Private Data @@ -71,8 +74,50 @@ void up_restoretask(struct tcb_s *tcb) #endif /* Restore the MPU registers in case we are switching to an application task */ -#ifdef CONFIG_ARM_MPU #ifdef CONFIG_APP_BINARY_SEPARATION +#ifdef CONFIG_SUPPORT_COMMON_BINARY + if (g_umm_app_id) { + *g_umm_app_id = tcb->app_id; + } +#endif + +#ifdef CONFIG_ARCH_USE_MMU +#if 0 + uint32_t ctxidr = mmu_get_asid(); + uint8_t asid = ctxidr & 0xFF; + if (asid != tcb->app_id) { + mmu_switch_ttbr_asid((uint32_t)tcb->pgtbl, tcb->app_id); + } +#endif + + irqstate_t flags; + flags = enter_critical_section(); + lldbg("Write TTBR = 0x%08x\n", tcb->pgtbl); + cp15_wrttb((uint32_t)tcb->pgtbl); + cp15_invalidate_tlbs(); + if (tcb->app_id) + { + // If this tcb belongs to an app (and not kernel thread) + // binfmt_invalidate_app_regions(((struct task_tcb_s *)tcb)->bininfo); + //cp15_invalidate_tlbs(); + } + + uint32_t ttbr0; + + __asm__ __volatile__ + ( + "\tmrc p15, 0, %0, c2, c0, 0\n" + : "=r" (ttbr0) + : + : + ); + + lldbg("Updated TTBR = 0x%08x\n", ttbr0); + + leave_critical_section(flags); +#endif + +#ifdef CONFIG_ARM_MPU /* Condition check : Update MPU registers only if this is not a kernel thread. */ if ((tcb->flags & TCB_FLAG_TTYPE_MASK) != TCB_FLAG_TTYPE_KERNEL) { @@ -81,16 +126,11 @@ void up_restoretask(struct tcb_s *tcb) } } -#ifdef CONFIG_SUPPORT_COMMON_BINARY - if (g_umm_app_id) { - *g_umm_app_id = tcb->app_id; - } #endif #endif #ifdef CONFIG_MPU_STACK_OVERFLOW_PROTECTION up_mpu_set_register(tcb->stack_mpu_regs); #endif -#endif #ifdef CONFIG_TASK_MONITOR /* Update tcb active flag for monitoring. */ diff --git a/os/arch/arm/src/imx6/Make.defs b/os/arch/arm/src/imx6/Make.defs index 39bcbf7166..2b21d8a6b0 100644 --- a/os/arch/arm/src/imx6/Make.defs +++ b/os/arch/arm/src/imx6/Make.defs @@ -115,7 +115,7 @@ CMN_CSRCS += arm_doirq.c arm_gicv2.c arm_gicv2_dump.c CMN_CSRCS += arm_initialstate.c arm_mmu.c arm_prefetchabort.c CMN_CSRCS += arm_schedulesigaction.c arm_sigdeliver.c CMN_CSRCS += arm_syscall.c arm_tcbinfo.c arm_undefinedinsn.c -CMN_CSRCS += arm_perf.c +CMN_CSRCS += arm_perf.c up_restoretask.c ifeq ($(CONFIG_ARMV7A_L2CC_PL310),y) CMN_CSRCS += arm_l2cc_pl310.c diff --git a/os/binfmt/Makefile b/os/binfmt/Makefile index d5ca6e1b36..ab3a7d70b9 100644 --- a/os/binfmt/Makefile +++ b/os/binfmt/Makefile @@ -75,6 +75,9 @@ ifeq ($(CONFIG_LIBC_EXECFUNCS),y) BINFMT_CSRCS += binfmt_execsymtab.c endif +ifeq ($(CONFIG_ARCH_USE_MMU),y) +BINFMT_CSRCS += binfmt_pgtable.c +endif # Add configured binary modules VPATH = diff --git a/os/binfmt/binfmt.h b/os/binfmt/binfmt.h index 2913ccfc78..742e4da4ad 100644 --- a/os/binfmt/binfmt.h +++ b/os/binfmt/binfmt.h @@ -151,6 +151,9 @@ void binfmt_freeargv(FAR struct binary_s *bin); #define binfmt_freeargv(bin) #endif +#ifdef CONFIG_ARCH_USE_MMU +void binfmt_setup_app_pgtable(struct binary_s *binp); +#endif #ifdef CONFIG_ARM_MPU static inline void binfmt_set_mpu(struct binary_s *binp) diff --git a/os/binfmt/binfmt_execmodule.c b/os/binfmt/binfmt_execmodule.c index be2f2e6401..48224aa302 100644 --- a/os/binfmt/binfmt_execmodule.c +++ b/os/binfmt/binfmt_execmodule.c @@ -71,6 +71,9 @@ #include #endif #include +#if defined(CONFIG_APP_BINARY_SEPARATION) && defined(CONFIG_ARCH_USE_MMU) +#include +#endif #ifdef CONFIG_BINARY_MANAGER #include @@ -127,7 +130,7 @@ static void exec_ctors(FAR void *arg) if (g_lib_binp->run_library_ctors) { ctor = g_lib_binp->ctors; for (i = 0; i < g_lib_binp->nctors; i++) { - binfo("Calling ctor %d at %p\n", i, (FAR void *)ctor); + dbg("Calling ctor %d at %p\n", i, (FAR void *)ctor); (*ctor)(); ctor++; @@ -141,7 +144,7 @@ static void exec_ctors(FAR void *arg) /* Execute each constructor */ for (i = 0; i < binp->nctors; i++) { - binfo("Calling ctor %d at %p\n", i, (FAR void *)ctor); + dbg("Calling ctor %d at %p\n", i, (FAR void *)ctor); (*ctor)(); ctor++; @@ -182,7 +185,7 @@ int exec_module(FAR struct binary_s *binp) } #endif - binfo("Executing %s\n", binp->filename); + dbg("Executing %s\n", binp->filename); binary_idx = binp->binary_idx; binp->uheap = (struct mm_heap_s *)binp->sections[BIN_HEAP]; @@ -193,8 +196,8 @@ int exec_module(FAR struct binary_s *binp) } mm_add_app_heap_list(binp->uheap, binp->bin_name); - binfo("------------------------%s Binary Heap Information------------------------\n", binp->bin_name); - binfo("Start addr = 0x%x, size = %u \n", (void *)binp->sections[BIN_HEAP] + sizeof(struct mm_heap_s), binp->sizes[BIN_HEAP]); + dbg("------------------------%s Binary Heap Information------------------------\n", binp->bin_name); + dbg("Start addr = 0x%x, size = %u \n", (void *)binp->sections[BIN_HEAP] + sizeof(struct mm_heap_s), binp->sizes[BIN_HEAP]); /* The first 4 bytes of the data section of the application must contain a pointer to the application's mm_heap object. Here we will store the mm_heap @@ -301,6 +304,9 @@ int exec_module(FAR struct binary_s *binp) #ifdef CONFIG_ARM_MPU memset(rtcb->mpu_regs, 0, sizeof(rtcb->mpu_regs)); #endif +#ifdef CONFIG_ARCH_USE_MMU + rtcb->pgtbl = mmu_get_os_l1_pgtbl(); +#endif /* Store the address of the applications userspace object in the newtcb */ /* The app's userspace object will be found at an offset of 4 bytes from the start of the binary */ @@ -308,9 +314,7 @@ int exec_module(FAR struct binary_s *binp) newtcb->cmn.uheap = (uint32_t)binp->uheap; #ifdef CONFIG_BINARY_MANAGER -#ifdef CONFIG_SUPPORT_COMMON_BINARY newtcb->cmn.app_id = binp->binary_idx; -#endif /* Set task name as binary name */ strncpy(newtcb->cmn.name, binp->bin_name, CONFIG_TASK_NAME_SIZE); @@ -346,7 +350,7 @@ int exec_module(FAR struct binary_s *binp) goto errout_with_tcbinit; } - binfo("%s loaded @ 0x%08x and running with pid = %d\n", binp->filename, binp->sections[BIN_TEXT], pid); + dbg("%s loaded @ 0x%08x and running with pid = %d\n", binp->filename, binp->sections[BIN_TEXT], pid); return (int)pid; diff --git a/os/binfmt/binfmt_loadbinary.c b/os/binfmt/binfmt_loadbinary.c index 41cd6781c8..312ce13fcc 100644 --- a/os/binfmt/binfmt_loadbinary.c +++ b/os/binfmt/binfmt_loadbinary.c @@ -180,8 +180,10 @@ int load_binary(int binary_idx, FAR const char *filename, load_attr_t *load_attr #endif elf_save_bin_section_addr(bin); -#ifdef CONFIG_ARM_MPU +#if defined(CONFIG_ARM_MPU) binfmt_set_mpu(bin); +#elif defined(CONFIG_ARCH_USE_MMU) + binfmt_setup_app_pgtable(bin); #endif #ifdef CONFIG_SUPPORT_COMMON_BINARY if (bin->islibrary) { @@ -213,12 +215,12 @@ int load_binary(int binary_idx, FAR const char *filename, load_attr_t *load_attr /* Print Binary section address & size details */ - binfo("[%s] text start addr = 0x%x size = %u\n", bin->bin_name, bin->sections[BIN_TEXT], bin->sizes[BIN_TEXT]); + dbg("[%s] text start addr = 0x%x size = %u\n", bin->bin_name, bin->sections[BIN_TEXT], bin->sizes[BIN_TEXT]); #ifdef CONFIG_OPTIMIZE_APP_RELOAD_TIME - binfo("[%s] rodata start addr = 0x%x size = %u\n", bin->bin_name, bin->sections[BIN_RO], bin->sizes[BIN_RO]); + dbg("[%s] rodata start addr = 0x%x size = %u\n", bin->bin_name, bin->sections[BIN_RO], bin->sizes[BIN_RO]); #endif - binfo("[%s] data start addr = 0x%x size = %u\n", bin->bin_name, bin->sections[BIN_DATA], bin->sizes[BIN_DATA]); - binfo("[%s] bss start addr = 0x%x size = %u\n", bin->bin_name, bin->sections[BIN_BSS], bin->sizes[BIN_BSS]); + dbg("[%s] data start addr = 0x%x size = %u\n", bin->bin_name, bin->sections[BIN_DATA], bin->sizes[BIN_DATA]); + dbg("[%s] bss start addr = 0x%x size = %u\n", bin->bin_name, bin->sections[BIN_BSS], bin->sizes[BIN_BSS]); return pid; diff --git a/os/binfmt/binfmt_pgtable.c b/os/binfmt/binfmt_pgtable.c new file mode 100644 index 0000000000..394ed2ab87 --- /dev/null +++ b/os/binfmt/binfmt_pgtable.c @@ -0,0 +1,123 @@ +/**************************************************************************** + * + * Copyright 2019 Samsung Electronics All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the License. + * + ****************************************************************************/ +/**************************************************************************** + * Included Files + ****************************************************************************/ + +#include + +#include +#include + +#include +#include +#include +#include + +#include "binfmt.h" + +#if defined(CONFIG_BINFMT_ENABLE) && defined(CONFIG_ARCH_USE_MMU) + +/**************************************************************************** + * Private Functions + ****************************************************************************/ +/**************************************************************************** + * Public Functions + ****************************************************************************/ +/**************************************************************************** + * Name: binfmt_setup_app_pgtable + * + * Description: + * + * + * Returned Value: + * Zero (OK) is returned on success; Otherwise a negated errno value. + * + ****************************************************************************/ + +void binfmt_setup_app_pgtable(struct binary_s *binp) +{ + uint32_t *l1tbl = 0; + bool islibrary = false; + + dbg("Start mapping regions to mmu\n"); + // Check if this is common binary. +#ifdef CONFIG_SUPPORT_COMMON_BINARY + if (binp->islibrary) { + // Yes. We will update the kernel page tables for common binary. + l1tbl = mmu_get_os_l1_pgtbl(); + islibrary = true; + } else +#endif + { + // No. We will create separate page tables for the app. + + // Allocate L1 page table for app. + l1tbl = mmu_allocate_app_l1_pgtbl(binp->binary_idx); + + dbg("p1\n"); + // Loop through the L1 page table. + // Copy kernel L1 page table to app page table. + // If the entry is pointing to a L2 page table + // Allocate L2 page table for app. + // Copy entries from kernel to app L2 table. + // Update the L2 page table address in L2 table. + mmu_update_app_l1_pgtbl_ospgtbl(l1tbl); + + dbg("p2\n"); + // Set the pointer of the L1 pgtbl into the tcb so that it is + // passed on to all the tasks in the app. + sched_self()->pgtbl = l1tbl; + } + + dbg("l1_pgtbl = 0x%08x\n", l1tbl); + + // We will create page table entries such that we minimize the total + // number of entries by allocating the largest possible memory region + // for each entry. + + // Get the start and end address of the memory region. + // Map the region to the page tables. +#ifdef CONFIG_OPTIMIZE_APP_RELOAD_TIME + /* Configure text section as RO and executable region */ + mmu_map_app_region(binp->binary_idx, l1tbl, binp->sections[BIN_TEXT], binp->sizes[BIN_TEXT], true, true, islibrary); + /* Configure ro section as RO and non-executable region */ + mmu_map_app_region(binp->binary_idx, l1tbl, binp->sections[BIN_RO], binp->sizes[BIN_RO], true, false, islibrary); + /* Complete RAM partition will be configured as RW region */ + mmu_map_app_region(binp->binary_idx, l1tbl, binp->sections[BIN_DATA], binp->ramsize, false, false, islibrary); +#else + /* Complete RAM partition will be configured as RW region */ + mmu_map_app_region(binp->binary_idx, l1tbl, binp->ramstart, binp->ramsize, false, true, islibrary); +#endif + + dbg("Finished mapping regions to mmu\n"); +} + + +void binfmt_invalidate_app_regions(struct binary_s *binp) +{ +#ifdef CONFIG_OPTIMIZE_APP_RELOAD_TIME +// mmu_invalidate_region(binp->sections[BIN_TEXT], binp->sizes[BIN_TEXT]); + mmu_invalidate_region(binp->sections[BIN_RO], binp->sizes[BIN_RO]); + mmu_invalidate_region(binp->sections[BIN_DATA], binp->ramsize); +#else + mmu_invalidate_region(binp->ramstart, binp->ramsize); +#endif +} + +#endif //defined(CONFIG_BINFMT_ENABLE) && defined(CONFIG_ARCH_USE_MMU) diff --git a/os/include/tinyara/mmu.h b/os/include/tinyara/mmu.h index 603a8be289..a910052e7f 100644 --- a/os/include/tinyara/mmu.h +++ b/os/include/tinyara/mmu.h @@ -42,5 +42,10 @@ /******************************************************************************** * Public Function Prototypes ********************************************************************************/ +uint32_t *mmu_get_os_l1_pgtbl(); +uint32_t *mmu_allocate_app_l1_pgtbl(int app_id); +uint32_t *mmu_allocate_app_l2_pgtbl(int app_id, int l2_idx); +void mmu_update_app_l1_pgtbl_ospgtbl(uint32_t *app_l1_pgtbl); +void mmu_map_app_region(int app_id, uint32_t *l1_pgtbl, uint32_t start, uint32_t size, bool ro, bool exec, bool global); #endif diff --git a/os/include/tinyara/sched.h b/os/include/tinyara/sched.h index d413edffa7..4cfcf5bfe8 100644 --- a/os/include/tinyara/sched.h +++ b/os/include/tinyara/sched.h @@ -639,6 +639,11 @@ struct tcb_s { uint32_t uheap; /* User heap object pointer */ #ifdef CONFIG_APP_BINARY_SEPARATION uint32_t uspace; /* User space object for app binary */ + uint32_t app_id; /* Indicates app id of the task */ + +#ifdef CONFIG_ARCH_USE_MMU + uint32_t *pgtbl; /* Pointer to L1 page table of app */ +#endif #ifdef CONFIG_ARM_MPU /* MPU register values for loadable apps only */ uint32_t mpu_regs[MPU_REG_NUMBER * NUM_APP_REGIONS]; /* MPU for apps is configured during loading and disabled in task_terminate */ @@ -649,9 +654,6 @@ struct tcb_s { uint32_t stack_mpu_regs[MPU_REG_NUMBER]; /* MPU for stack is configured during stack creation and disabled at stack release */ #endif -#ifdef CONFIG_SUPPORT_COMMON_BINARY - uint32_t app_id; /* Indicates app id of the task and used to index into umm_heap_table */ -#endif #ifdef CONFIG_ARMV8M_TRUSTZONE volatile TZ_ModuleId_t tz_context; diff --git a/os/kernel/init/os_start.c b/os/kernel/init/os_start.c index e56300d238..6ea4398c98 100644 --- a/os/kernel/init/os_start.c +++ b/os/kernel/init/os_start.c @@ -79,6 +79,9 @@ #ifdef CONFIG_DRIVERS_OS_API_TEST #include #endif +#if defined(CONFIG_APP_BINARY_SEPARATION) && defined(CONFIG_ARCH_USE_MMU) +#include +#endif #include "sched/sched.h" #include "signal/signal.h" @@ -344,6 +347,11 @@ void os_start(void) g_idletcb.cmn.entry.main = (main_t)os_start; g_idletcb.cmn.flags = TCB_FLAG_TTYPE_KERNEL; +#if defined(CONFIG_APP_BINARY_SEPARATION) && defined(CONFIG_ARCH_USE_MMU) + g_idletcb.cmn.app_id = 0; + g_idletcb.cmn.pgtbl = mmu_get_os_l1_pgtbl(); + lldbg("Kernel L1 Pgtbl = 0x%08x\n", g_idletcb.cmn.pgtbl); +#endif /* Set the IDLE task name */ #if CONFIG_TASK_NAME_SIZE > 0 diff --git a/os/kernel/task/task_setup.c b/os/kernel/task/task_setup.c index ec992ab5b5..ce202d333f 100644 --- a/os/kernel/task/task_setup.c +++ b/os/kernel/task/task_setup.c @@ -458,10 +458,10 @@ static int thread_schedsetup(FAR struct tcb_s *tcb, int priority, start_t start, rtcb = this_task(); tcb->uspace = rtcb->uspace; tcb->uheap = rtcb->uheap; -#ifdef CONFIG_SUPPORT_COMMON_BINARY tcb->app_id = rtcb->app_id; +#ifdef CONFIG_ARCH_USE_MMU + tcb->pgtbl = rtcb->pgtbl; #endif - /* Copy the MPU register values from parent to child task */ #ifdef CONFIG_ARM_MPU int i = 0; From 54ac696cdbb63506871fef2ed6c061923cfeec8f Mon Sep 17 00:00:00 2001 From: Abhishek Akkabathula Date: Thu, 24 Aug 2023 14:45:03 +0530 Subject: [PATCH 3/3] arm/src/armv7a: fix hang issue with mmu enabled. Set the app pages as global (not using asid), correct the flags while setting ttbr0, populate the l2 page table completely. missing items : 1. indexing of l2 page tables might be wrong, curretnly l2_idx is 0 2. with current implementation, heap end is after page tables, need to adjust it properly. Signed-off-by: Abhishek Akkabathula --- os/arch/arm/src/armv7-a/arm_mmu.c | 27 +++++++++++++++++++------ os/arch/arm/src/armv7-a/mmu.h | 8 ++++---- os/arch/arm/src/common/up_restoretask.c | 2 +- 3 files changed, 26 insertions(+), 11 deletions(-) diff --git a/os/arch/arm/src/armv7-a/arm_mmu.c b/os/arch/arm/src/armv7-a/arm_mmu.c index 9b4a0b8540..785e88cb2b 100644 --- a/os/arch/arm/src/armv7-a/arm_mmu.c +++ b/os/arch/arm/src/armv7-a/arm_mmu.c @@ -76,7 +76,7 @@ static void mmu_set_flags(uint32_t *val, bool ro, bool exec, uint8_t isL1, uint8 } if (!isGlobal) { - *val |= PTE_NG; + // *val |= PTE_NG; disable non global bit for now, will check later if required to use asid.. } } } @@ -441,7 +441,21 @@ void mmu_map_app_region(int app_id, uint32_t *l1_pgtbl, uint32_t start, uint32_t // Yes. Allocate L2 page table for app. l2_pgtbl = mmu_allocate_app_l2_pgtbl(app_id, l2_idx++); + /* fill the newly allocated l2 page table with default kernel flags */ + uint32_t l2_start = start & PMD_SECT_PADDR_MASK; + lldbg("start address %x l2 page\n", l2_start); + for (int i = 0; i < L2_PGTBL_NENTRIES; i++) { + l2_pgtbl[i] = l2_start | (1<<10) | PTE_AP_RW1 | PTE_TYPE_SMALL | PMD_CACHEABLE; + lldbg("l2[%d] = %x\n", i, l2_pgtbl[i]); + l2_start += 0x1000; + } + lldbg("end address %x l2 page\n", l2_start); + val = l2_pgtbl; + lldbg("setting l2 pg tbl in l1 , %x\n", val); + l1_pgtbl[idx] = (val & PMD_PTE_PADDR_MASK) | PMD_SECT_DOM(0) | PMD_TYPE_PTE | (1 << 3); + lldbg("Allocated L2 pgtbl at 0x%08x\n", l2_pgtbl); + if (global) { // If this update is for the common binary, then it is done // in the kernel page tables and so the cache and tlbs need @@ -468,12 +482,13 @@ void mmu_map_app_region(int app_id, uint32_t *l1_pgtbl, uint32_t start, uint32_t cp15_invalidate_tlb_bymva(start); leave_critical_section(flags); } else { + //already updated above // Update L2 page table address in L1 page table. - val = (uint32_t)l2_pgtbl & PMD_PTE_PADDR_MASK; - val |= MMU_L1_PGTABFLAGS; - l1_pgtbl[idx] = val; + //val = (uint32_t)l2_pgtbl & PMD_PTE_PADDR_MASK; + //val |= MMU_L1_PGTABFLAGS; + //l1_pgtbl[idx] = val; // dbg("Set l1 pte at 0x%08x = 0x%08x\n", &l1_pgtbl[idx], val); - cp15_clean_dcache_bymva((uint32_t)&l1_pgtbl[idx]); + //cp15_clean_dcache_bymva((uint32_t)&l1_pgtbl[idx]); } } @@ -494,7 +509,7 @@ void mmu_map_app_region(int app_id, uint32_t *l1_pgtbl, uint32_t start, uint32_t } else { l2_pgtbl[idx] = val; // dbg("Set l2 pte at 0x%08x = 0x%08x\n", &l2_pgtbl[idx], val); - cp15_clean_dcache_bymva((uint32_t)&l2_pgtbl[idx]); + //cp15_clean_dcache_bymva((uint32_t)&l2_pgtbl[idx]); } // Advance the memory region address. diff --git a/os/arch/arm/src/armv7-a/mmu.h b/os/arch/arm/src/armv7-a/mmu.h index 87d8fc0e43..8b4ca71b77 100644 --- a/os/arch/arm/src/armv7-a/mmu.h +++ b/os/arch/arm/src/armv7-a/mmu.h @@ -650,12 +650,12 @@ #define MMU_APP_L1_RW (PMD_TYPE_SECT | PMD_SECT_AP_RW01 | PMD_CACHEABLE | \ PMD_SECT_DOM(0) | PMD_SECT_XN | PMD_SECT_NS) #endif -#define MMU_APP_L2_RO (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_R01 | PTE_SMALL_XN) -#define MMU_APP_L2_ROX (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_R01) +#define MMU_APP_L2_RO (PTE_TYPE_SMALL | PMD_CACHEABLE | PTE_AP_R01 | PTE_SMALL_XN) +#define MMU_APP_L2_ROX (PTE_TYPE_SMALL | PMD_CACHEABLE | PTE_AP_R01) #ifdef CONFIG_SMP -#define MMU_APP_L2_RW (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_RW01 | PTE_SMALL_XN | PTE_S) +#define MMU_APP_L2_RW (PTE_TYPE_SMALL | PMD_CACHEABLE | PTE_AP_RW01 | PTE_SMALL_XN | PTE_S) #else -#define MMU_APP_L2_RW (PTE_TYPE_SMALL | PTE_WRITE_BACK | PTE_AP_RW01 | PTE_SMALL_XN) +#define MMU_APP_L2_RW (PTE_TYPE_SMALL | PMD_CACHEABLE | PTE_AP_RW01 | PTE_SMALL_XN) #endif #ifdef CONFIG_SMP diff --git a/os/arch/arm/src/common/up_restoretask.c b/os/arch/arm/src/common/up_restoretask.c index e0783381f1..fc9e46c272 100644 --- a/os/arch/arm/src/common/up_restoretask.c +++ b/os/arch/arm/src/common/up_restoretask.c @@ -93,7 +93,7 @@ void up_restoretask(struct tcb_s *tcb) irqstate_t flags; flags = enter_critical_section(); lldbg("Write TTBR = 0x%08x\n", tcb->pgtbl); - cp15_wrttb((uint32_t)tcb->pgtbl); + cp15_wrttb((uint32_t)tcb->pgtbl | TTBR0_RGN_WBWA | TTBR0_IRGN0); cp15_invalidate_tlbs(); if (tcb->app_id) {