From e7093d0f681ea3e160fe94eb26d34a6643cd247b Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 13:40:49 +0200 Subject: [PATCH 01/20] mk/clang.mk: -Wno-gnu-alignof-expression Add -Wno-gnu-alignof-expression to the warnings flag for Clang in order to avoid warnings like: '_Alignof' applied to an expression is a GNU extension [-Werror,-Wgnu-alignof-expression] when alignof() is applied on an expression like dereferencing a pointer to get the alignment of type. Signed-off-by: Jens Wiklander Reviewed-by: Jerome Forissier Reviewed-by: Etienne Carriere --- mk/clang.mk | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mk/clang.mk b/mk/clang.mk index a045beee848..d08b26e6f95 100644 --- a/mk/clang.mk +++ b/mk/clang.mk @@ -26,7 +26,8 @@ nostdinc$(sm) := -nostdinc -isystem $(shell $(CC$(sm)) \ -print-file-name=include 2> /dev/null) comp-cflags-warns-clang := -Wno-language-extension-token \ - -Wno-gnu-zero-variadic-macro-arguments + -Wno-gnu-zero-variadic-macro-arguments \ + -Wno-gnu-alignof-expression # Note, use the compiler runtime library (libclang_rt.builtins.*.a) instead of # libgcc for clang From 72ee3535b15439e55f7f35e92a5eb23b87a840d9 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 12:43:36 +0200 Subject: [PATCH 02/20] core: arm64: increase thread stack size for debug Increase STACK_THREAD_SIZE when CFG_CORE_DEBUG_CHECK_STACKS=y. Signed-off-by: Jens Wiklander Reviewed-by: Jerome Forissier Reviewed-by: Etienne Carriere --- core/arch/arm/include/kernel/thread_private_arch.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/arch/arm/include/kernel/thread_private_arch.h b/core/arch/arm/include/kernel/thread_private_arch.h index 2986a3f0392..11a32a17b82 100644 --- a/core/arch/arm/include/kernel/thread_private_arch.h +++ b/core/arch/arm/include/kernel/thread_private_arch.h @@ -43,7 +43,7 @@ #else #define STACK_TMP_SIZE (2048 + STACK_TMP_OFFS + CFG_STACK_TMP_EXTRA) #endif -#if defined(CFG_CORE_SANITIZE_KADDRESS) +#if defined(CFG_CORE_SANITIZE_KADDRESS) || defined(CFG_CORE_DEBUG_CHECK_STACKS) #define STACK_THREAD_SIZE (10240 + CFG_STACK_THREAD_EXTRA) #else #define STACK_THREAD_SIZE (8192 + CFG_STACK_THREAD_EXTRA) From e0882e7e7c17568316852b6c75e808ad081d4cf8 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 11:48:21 +0200 Subject: [PATCH 03/20] core: mm: add vaddr_to_phys() Add a wrapper function for virt_to_phys() using vaddr_t instead of a void pointer. Signed-off-by: Jens Wiklander Reviewed-by: Jerome Forissier Reviewed-by: Etienne Carriere --- core/include/mm/core_memprot.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/core/include/mm/core_memprot.h b/core/include/mm/core_memprot.h index 32e3431eae9..fd07e077660 100644 --- a/core/include/mm/core_memprot.h +++ b/core/include/mm/core_memprot.h @@ -90,6 +90,11 @@ void *phys_to_virt_io(paddr_t pa, size_t len); */ paddr_t virt_to_phys(void *va); +static inline paddr_t vaddr_to_phys(vaddr_t va) +{ + return virt_to_phys((void *)va); +} + /* * Return runtime usable address, irrespective of whether * the MMU is enabled or not. In case of MMU enabled also will be performed From e85425ba1d1d9a7ecb826caefcbb5460c066b8c9 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 11:48:22 +0200 Subject: [PATCH 04/20] core: remove CORE_MEM_TA_RAM The buffer attribute CORE_MEM_TA_RAM isn't used to query the status of a buffer anywhere. So remove the attribute to allow future simplifications. Signed-off-by: Jens Wiklander Reviewed-by: Jerome Forissier Reviewed-by: Etienne Carriere --- core/include/mm/core_memprot.h | 1 - core/mm/core_mmu.c | 5 ----- core/mm/mobj.c | 3 +-- 3 files changed, 1 insertion(+), 8 deletions(-) diff --git a/core/include/mm/core_memprot.h b/core/include/mm/core_memprot.h index fd07e077660..2f275fab80d 100644 --- a/core/include/mm/core_memprot.h +++ b/core/include/mm/core_memprot.h @@ -28,7 +28,6 @@ enum buf_is_attr { CORE_MEM_NON_SEC, CORE_MEM_SEC, CORE_MEM_TEE_RAM, - CORE_MEM_TA_RAM, CORE_MEM_SDP_MEM, CORE_MEM_REG_SHM, }; diff --git a/core/mm/core_mmu.c b/core/mm/core_mmu.c index 9dc1e9d6b76..a37d15af8fb 100644 --- a/core/mm/core_mmu.c +++ b/core/mm/core_mmu.c @@ -1597,8 +1597,6 @@ bool core_mmu_mattr_is_ok(uint32_t mattr) */ bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len) { - paddr_t ta_base = 0; - size_t ta_size = 0; struct tee_mmap_region *map; /* Empty buffers complies with anything */ @@ -1614,9 +1612,6 @@ bool core_pbuf_is(uint32_t attr, paddr_t pbuf, size_t len) case CORE_MEM_TEE_RAM: return core_is_buffer_inside(pbuf, len, TEE_RAM_START, TEE_RAM_PH_SIZE); - case CORE_MEM_TA_RAM: - core_mmu_get_ta_range(&ta_base, &ta_size); - return core_is_buffer_inside(pbuf, len, ta_base, ta_size); #ifdef CFG_CORE_RESERVED_SHM case CORE_MEM_NSEC_SHM: return core_is_buffer_inside(pbuf, len, TEE_SHMEM_START, diff --git a/core/mm/mobj.c b/core/mm/mobj.c index 4f48901330d..50862429755 100644 --- a/core/mm/mobj.c +++ b/core/mm/mobj.c @@ -93,11 +93,10 @@ static bool mobj_phys_matches(struct mobj *mobj, enum buf_is_attr attr) switch (attr) { case CORE_MEM_SEC: return a == CORE_MEM_SEC || a == CORE_MEM_TEE_RAM || - a == CORE_MEM_TA_RAM || a == CORE_MEM_SDP_MEM; + a == CORE_MEM_SDP_MEM; case CORE_MEM_NON_SEC: return a == CORE_MEM_NSEC_SHM; case CORE_MEM_TEE_RAM: - case CORE_MEM_TA_RAM: case CORE_MEM_NSEC_SHM: case CORE_MEM_SDP_MEM: return attr == a; From b8bc7fbc380ec4c8c53daf21d41215cdfdf528ba Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 11:48:23 +0200 Subject: [PATCH 05/20] core: add VCORE_FREE_{PA,SZ,END_PA} Add VCORE_FREE_{PA,SZ,END_PA} defines to identify the unused and free memory range at the end of TEE_RAM_START..(TEE_RAM_START + TEE_RAM_VA_SIZE). VCORE_FREE_SZ is 0 in a pager configuration since all the memory is used by the pager. The VCORE_FREE range is excluded from the TEE_RAM_RW area for CFG_NS_VIRTUALIZATION=y and instead put in a separate NEX_RAM_RW area. This makes each partition use a bit less memory and leaves the VCORE_FREE range available for the Nexus. The VCORE_FREE range is added to the TEE_RAM_RW area for the normal configuration with CFG_NS_VIRTUALIZATION=n and CFG_WITH_PAGER=n. It's in practice unchanged behaviour in this configuration. Signed-off-by: Jens Wiklander Acked-by: Jerome Forissier Reviewed-by: Etienne Carriere --- core/arch/arm/kernel/kern.ld.S | 15 ++++++++++++--- core/arch/arm/kernel/link_dummy.ld | 3 +++ core/arch/riscv/kernel/kern.ld.S | 10 +++++++++- core/include/kernel/linker.h | 13 +++++++++++++ core/mm/core_mmu.c | 4 ++++ 5 files changed, 41 insertions(+), 4 deletions(-) diff --git a/core/arch/arm/kernel/kern.ld.S b/core/arch/arm/kernel/kern.ld.S index 62771009e87..a3a0e17e2fa 100644 --- a/core/arch/arm/kernel/kern.ld.S +++ b/core/arch/arm/kernel/kern.ld.S @@ -279,8 +279,11 @@ SECTIONS . = ALIGN(8); __nozi_stack_end = .; } - -#ifdef CFG_WITH_PAGER +#ifndef CFG_WITH_PAGER + . = ALIGN(SMALL_PAGE_SIZE); + __flatmap_free_start = .; + __flatmap_unpg_rw_size = __flatmap_free_start - __flatmap_unpg_rw_start; +#else .heap2 (NOLOAD) : { __heap2_start = .; /* @@ -409,8 +412,8 @@ SECTIONS _end_of_ram = .; #ifndef CFG_WITH_PAGER - __flatmap_unpg_rw_size = _end_of_ram - __flatmap_unpg_rw_start; __get_tee_init_end = .; + __flatmap_free_size = _end_of_ram - __flatmap_free_start; #endif /* @@ -461,6 +464,12 @@ __vcore_unpg_rw_start = __flatmap_unpg_rw_start; __vcore_unpg_rw_size = __flatmap_unpg_rw_size; __vcore_unpg_rw_end = __vcore_unpg_rw_start + __vcore_unpg_rw_size; +#ifndef CFG_WITH_PAGER +__vcore_free_start = __flatmap_free_start; +__vcore_free_size = __flatmap_free_size; +__vcore_free_end = __flatmap_free_start + __flatmap_free_size; +#endif + #ifdef CFG_NS_VIRTUALIZATION /* Nexus read-write memory */ __vcore_nex_rw_start = __flatmap_nex_rw_start; diff --git a/core/arch/arm/kernel/link_dummy.ld b/core/arch/arm/kernel/link_dummy.ld index 1393088bd84..cd87c2cf003 100644 --- a/core/arch/arm/kernel/link_dummy.ld +++ b/core/arch/arm/kernel/link_dummy.ld @@ -100,6 +100,9 @@ __vcore_unpg_rw_start = .; __vcore_unpg_rx_end = .; __vcore_unpg_rx_size = .; __vcore_unpg_rx_start = .; +__vcore_free_start = .; +__vcore_free_size = .; +__vcore_free_end = .; PROVIDE(core_v_str = 0); PROVIDE(tee_entry_std = 0); PROVIDE(init_teecore = 0); diff --git a/core/arch/riscv/kernel/kern.ld.S b/core/arch/riscv/kernel/kern.ld.S index a2aade5603e..2c5d8f8f3f1 100644 --- a/core/arch/riscv/kernel/kern.ld.S +++ b/core/arch/riscv/kernel/kern.ld.S @@ -194,6 +194,10 @@ SECTIONS __nozi_stack_end = .; } + . = ALIGN(SMALL_PAGE_SIZE); + __flatmap_free_start = .; + __flatmap_rw_size = __flatmap_free_start - __flatmap_rw_start; + #ifdef CFG_CORE_SANITIZE_KADDRESS . = TEE_RAM_START + (TEE_RAM_VA_SIZE * 8) / 9 - 8; . = ALIGN(8); @@ -218,8 +222,8 @@ SECTIONS _end_of_ram = .; - __flatmap_rw_size = _end_of_ram - __flatmap_rw_start; __get_tee_init_end = .; + __flatmap_free_size = _end_of_ram - __flatmap_free_start; /* * These regions will not become a normal part of the dumped @@ -269,6 +273,10 @@ __vcore_unpg_rw_start = __flatmap_rw_start; __vcore_unpg_rw_size = __flatmap_rw_size; __vcore_unpg_rw_end = __vcore_unpg_rw_start + __vcore_unpg_rw_size; +__vcore_free_start = __flatmap_free_start; +__vcore_free_size = __flatmap_free_size; +__vcore_free_end = __flatmap_free_start + __flatmap_free_size; + #ifdef CFG_CORE_SANITIZE_KADDRESS __asan_map_start = (__asan_shadow_start / SMALL_PAGE_SIZE) * SMALL_PAGE_SIZE; diff --git a/core/include/kernel/linker.h b/core/include/kernel/linker.h index 949ffd8cba6..5a2c917ed0e 100644 --- a/core/include/kernel/linker.h +++ b/core/include/kernel/linker.h @@ -46,6 +46,18 @@ extern const uint8_t __extab_end[]; #define VCORE_START_VA ((vaddr_t)__text_start) +#ifndef CFG_WITH_PAGER +#define VCORE_FREE_PA ((unsigned long)__vcore_free_start) +#define VCORE_FREE_SZ ((size_t)(__vcore_free_end - \ + __vcore_free_start)) +#define VCORE_FREE_END_PA ((unsigned long)__vcore_free_end) +#else +/* No VCORE_FREE range in pager configuration since it uses all memory */ +#define VCORE_FREE_PA PADDR_MAX +#define VCORE_FREE_SZ 0 +#define VCORE_FREE_END_PA PADDR_MAX +#endif + #define EMIT_SECTION_INFO_SYMBOLS(section_name) \ extern const uint8_t __vcore_ ## section_name ## _start[]; \ extern const uint8_t __vcore_ ## section_name ## _end[]; \ @@ -57,6 +69,7 @@ EMIT_SECTION_INFO_SYMBOLS(unpg_rw); EMIT_SECTION_INFO_SYMBOLS(nex_rw); EMIT_SECTION_INFO_SYMBOLS(init_ro); EMIT_SECTION_INFO_SYMBOLS(init_rx); +EMIT_SECTION_INFO_SYMBOLS(free); #undef EMIT_SECTION_INFO_SYMBOLS diff --git a/core/mm/core_mmu.c b/core/mm/core_mmu.c index a37d15af8fb..b8f58b710ea 100644 --- a/core/mm/core_mmu.c +++ b/core/mm/core_mmu.c @@ -1091,9 +1091,13 @@ static void collect_mem_ranges(struct memory_map *mem_map) VCORE_UNPG_RW_SZ); ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_NEX_RW_PA, VCORE_NEX_RW_SZ); + ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_FREE_PA, + VCORE_FREE_SZ); } else { ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_UNPG_RW_PA, VCORE_UNPG_RW_SZ); + ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_FREE_PA, + VCORE_FREE_SZ); } if (IS_ENABLED(CFG_WITH_PAGER)) { From 5618fbb3f5e679ca49d29ea02f3cf2202fa5db74 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 11:48:24 +0200 Subject: [PATCH 06/20] core: mm: allow unmapping VCORE_FREE Allow unmapping core memory in the VCORE_FREE range when the original boot mapping isn't needed any more. Signed-off-by: Jens Wiklander Reviewed-by: Jerome Forissier Reviewed-by: Etienne Carriere --- core/mm/core_mmu.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/core/mm/core_mmu.c b/core/mm/core_mmu.c index b8f58b710ea..cc1623deaf3 100644 --- a/core/mm/core_mmu.c +++ b/core/mm/core_mmu.c @@ -2057,6 +2057,12 @@ TEE_Result core_mmu_map_contiguous_pages(vaddr_t vstart, paddr_t pstart, return TEE_SUCCESS; } +static bool mem_range_is_in_vcore_free(vaddr_t vstart, size_t num_pages) +{ + return core_is_buffer_inside(vstart, num_pages * SMALL_PAGE_SIZE, + VCORE_FREE_PA, VCORE_FREE_SZ); +} + void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages) { struct core_mmu_table_info tbl_info; @@ -2071,7 +2077,8 @@ void core_mmu_unmap_pages(vaddr_t vstart, size_t num_pages) if (!mm || !va_is_in_map(mm, vstart + num_pages * SMALL_PAGE_SIZE - 1)) panic("VA does not belong to any known mm region"); - if (!core_mmu_is_dynamic_vaspace(mm)) + if (!core_mmu_is_dynamic_vaspace(mm) && + !mem_range_is_in_vcore_free(vstart, num_pages)) panic("Trying to unmap static region"); for (i = 0; i < num_pages; i++, vstart += SMALL_PAGE_SIZE) { From f8c0a29c51da4ea49d1fbfcec7a840b3d5d7de9a Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 11:48:26 +0200 Subject: [PATCH 07/20] core: mm: replace MEM_AREA_TA_RAM Replace MEM_AREA_TA_RAM with MEM_AREA_SEC_RAM_OVERALL. All read/write secure memory is covered by MEM_AREA_SEC_RAM_OVERALL, sometimes using an aliased map. But secure read-only or execute core memory is not covered as that would defeat the purpose of CFG_CORE_RWDATA_NOEXEC. Since the partition TA memory isn't accessed via MEM_AREA_TA_RAM any longer, don't map it using the partition specific map. This is needed later where unification of OP-TEE core and physical TA memory is possible. Signed-off-by: Jens Wiklander Acked-by: Etienne Carriere --- core/arch/arm/kernel/boot.c | 10 +-- core/arch/arm/kernel/secure_partition.c | 2 +- core/arch/arm/kernel/virtualization.c | 20 ++---- core/include/mm/core_mmu.h | 3 - core/kernel/ree_fs_ta.c | 2 +- core/mm/core_mmu.c | 84 ++++++++++++++++--------- core/mm/fobj.c | 8 ++- core/mm/pgt_cache.c | 3 +- 8 files changed, 70 insertions(+), 62 deletions(-) diff --git a/core/arch/arm/kernel/boot.c b/core/arch/arm/kernel/boot.c index 94380f9df95..f7cbe64e0e8 100644 --- a/core/arch/arm/kernel/boot.c +++ b/core/arch/arm/kernel/boot.c @@ -381,12 +381,8 @@ static TEE_Result mmap_clear_memtag(struct tee_mmap_region *map, void *ptr __unused) { switch (map->type) { - case MEM_AREA_TEE_RAM: - case MEM_AREA_TEE_RAM_RW: case MEM_AREA_NEX_RAM_RO: - case MEM_AREA_NEX_RAM_RW: - case MEM_AREA_TEE_ASAN: - case MEM_AREA_TA_RAM: + case MEM_AREA_SEC_RAM_OVERALL: DMSG("Clearing tags for VA %#"PRIxVA"..%#"PRIxVA, map->va, map->va + map->size - 1); memtag_set_tags((void *)map->va, map->size, 0); @@ -549,8 +545,8 @@ static void init_runtime(unsigned long pageable_part) mm = nex_phys_mem_ta_alloc(pageable_size); assert(mm); - paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM, - pageable_size); + paged_store = phys_to_virt(tee_mm_get_smem(mm), + MEM_AREA_SEC_RAM_OVERALL, pageable_size); /* * Load pageable part in the dedicated allocated area: * - Move pageable non-init part into pageable area. Note bootloader diff --git a/core/arch/arm/kernel/secure_partition.c b/core/arch/arm/kernel/secure_partition.c index a995bdec2e8..f1b5bb24910 100644 --- a/core/arch/arm/kernel/secure_partition.c +++ b/core/arch/arm/kernel/secure_partition.c @@ -1871,7 +1871,7 @@ static const struct ts_ops sp_ops = { static TEE_Result process_sp_pkg(uint64_t sp_pkg_pa, TEE_UUID *sp_uuid) { - enum teecore_memtypes mtype = MEM_AREA_TA_RAM; + enum teecore_memtypes mtype = MEM_AREA_SEC_RAM_OVERALL; struct sp_pkg_header *sp_pkg_hdr = NULL; struct fip_sp *sp = NULL; uint64_t sp_fdt_end = 0; diff --git a/core/arch/arm/kernel/virtualization.c b/core/arch/arm/kernel/virtualization.c index ec15c006df5..456e6c040dc 100644 --- a/core/arch/arm/kernel/virtualization.c +++ b/core/arch/arm/kernel/virtualization.c @@ -110,7 +110,7 @@ static size_t get_ta_ram_size(void) } static TEE_Result prepare_memory_map(struct memory_map *mem_map, - paddr_t tee_data, paddr_t ta_ram) + paddr_t tee_data) { struct tee_mmap_region *map = NULL; vaddr_t max_va = 0; @@ -146,18 +146,6 @@ static TEE_Result prepare_memory_map(struct memory_map *mem_map, max_va = map->va + map->size; } - /* Map TA_RAM */ - mem_map->count++; - map = ins_array_elem(mem_map->map, mem_map->count, - sizeof(*mem_map->map), n, NULL); - map->region_size = SMALL_PAGE_SIZE; - map->va = ROUNDUP(max_va, map->region_size); - map->va += (ta_ram - map->va) & CORE_MMU_PGDIR_MASK; - map->pa = ta_ram; - map->size = get_ta_ram_size(); - map->type = MEM_AREA_TA_RAM; - map->attr = core_mmu_type_to_attr(map->type); - DMSG("New map (%08lx):", (vaddr_t)(VCORE_UNPG_RW_PA)); for (n = 0; n < mem_map->count; n++) @@ -241,8 +229,8 @@ static TEE_Result configure_guest_prtn_mem(struct guest_partition *prtn) goto err; } - res = prepare_memory_map(&prtn->mem_map, tee_mm_get_smem(prtn->tee_ram), - tee_mm_get_smem(prtn->ta_ram)); + res = prepare_memory_map(&prtn->mem_map, + tee_mm_get_smem(prtn->tee_ram)); if (res) goto err; @@ -576,7 +564,7 @@ void virt_get_ta_ram(vaddr_t *start, vaddr_t *end) struct guest_partition *prtn = get_current_prtn(); *start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram), - MEM_AREA_TA_RAM, + MEM_AREA_SEC_RAM_OVERALL, tee_mm_get_bytes(prtn->ta_ram)); *end = *start + tee_mm_get_bytes(prtn->ta_ram); } diff --git a/core/include/mm/core_mmu.h b/core/include/mm/core_mmu.h index 00b47b24363..35f8d9f3a3b 100644 --- a/core/include/mm/core_mmu.h +++ b/core/include/mm/core_mmu.h @@ -68,7 +68,6 @@ * MEM_AREA_TEE_COHERENT: teecore coherent RAM (secure, reserved to TEE) * MEM_AREA_TEE_ASAN: core address sanitizer RAM (secure, reserved to TEE) * MEM_AREA_IDENTITY_MAP_RX: core identity mapped r/o executable memory (secure) - * MEM_AREA_TA_RAM: Secure RAM where teecore loads/exec TA instances. * MEM_AREA_NSEC_SHM: NonSecure shared RAM between NSec and TEE. * MEM_AREA_NEX_NSEC_SHM: nexus non-secure shared RAM between NSec and TEE. * MEM_AREA_RAM_NSEC: NonSecure RAM storing data @@ -98,7 +97,6 @@ enum teecore_memtypes { MEM_AREA_TEE_COHERENT, MEM_AREA_TEE_ASAN, MEM_AREA_IDENTITY_MAP_RX, - MEM_AREA_TA_RAM, MEM_AREA_NSEC_SHM, MEM_AREA_NEX_NSEC_SHM, MEM_AREA_RAM_NSEC, @@ -133,7 +131,6 @@ static inline const char *teecore_memtype_name(enum teecore_memtypes type) [MEM_AREA_TEE_ASAN] = "TEE_ASAN", [MEM_AREA_IDENTITY_MAP_RX] = "IDENTITY_MAP_RX", [MEM_AREA_TEE_COHERENT] = "TEE_COHERENT", - [MEM_AREA_TA_RAM] = "TA_RAM", [MEM_AREA_NSEC_SHM] = "NSEC_SHM", [MEM_AREA_NEX_NSEC_SHM] = "NEX_NSEC_SHM", [MEM_AREA_RAM_NSEC] = "RAM_NSEC", diff --git a/core/kernel/ree_fs_ta.c b/core/kernel/ree_fs_ta.c index e816ef86181..a8ecb7c988f 100644 --- a/core/kernel/ree_fs_ta.c +++ b/core/kernel/ree_fs_ta.c @@ -726,7 +726,7 @@ static TEE_Result buf_ta_open(const TEE_UUID *uuid, goto err; } handle->buf = phys_to_virt(tee_mm_get_smem(handle->mm), - MEM_AREA_TA_RAM, handle->ta_size); + MEM_AREA_SEC_RAM_OVERALL, handle->ta_size); if (!handle->buf) { res = TEE_ERROR_OUT_OF_MEMORY; goto err; diff --git a/core/mm/core_mmu.c b/core/mm/core_mmu.c index cc1623deaf3..ae4ee53502e 100644 --- a/core/mm/core_mmu.c +++ b/core/mm/core_mmu.c @@ -66,12 +66,15 @@ static struct tee_mmap_region static_mmap_regions[CFG_MMAP_REGIONS #if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE) + 1 #endif - + 1] __nex_bss; + + 4] __nex_bss; static struct memory_map static_memory_map __nex_data = { .map = static_mmap_regions, .alloc_count = ARRAY_SIZE(static_mmap_regions), }; +/* Offset of the first TEE RAM mapping from start of secure RAM */ +static size_t tee_ram_initial_offs __nex_bss; + /* Define the platform's memory layout. */ struct memaccess_area { paddr_t paddr; @@ -826,8 +829,6 @@ uint32_t core_mmu_type_to_attr(enum teecore_memtypes t) return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged; case MEM_AREA_TEE_COHERENT: return attr | TEE_MATTR_SECURE | TEE_MATTR_PRWX | noncache; - case MEM_AREA_TA_RAM: - return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged; case MEM_AREA_NSEC_SHM: case MEM_AREA_NEX_NSEC_SHM: return attr | TEE_MATTR_PRW | cached; @@ -852,8 +853,9 @@ uint32_t core_mmu_type_to_attr(enum teecore_memtypes t) case MEM_AREA_RAM_NSEC: return attr | TEE_MATTR_PRW | cached; case MEM_AREA_RAM_SEC: - case MEM_AREA_SEC_RAM_OVERALL: return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | cached; + case MEM_AREA_SEC_RAM_OVERALL: + return attr | TEE_MATTR_SECURE | TEE_MATTR_PRW | tagged; case MEM_AREA_ROM_SEC: return attr | TEE_MATTR_SECURE | TEE_MATTR_PR | cached; case MEM_AREA_RES_VASPACE: @@ -1074,13 +1076,24 @@ static void collect_mem_ranges(struct memory_map *mem_map) { const struct core_mmu_phys_mem *mem = NULL; vaddr_t ram_start = secure_only[0].paddr; + size_t n = 0; #define ADD_PHYS_MEM(_type, _addr, _size) \ add_phys_mem(mem_map, #_addr, (_type), (_addr), (_size)) if (IS_ENABLED(CFG_CORE_RWDATA_NOEXEC)) { - ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, ram_start, + paddr_t next_pa = 0; + + /* + * Read-only and read-execute physical memory areas must + * not be mapped by MEM_AREA_SEC_RAM_OVERALL, but all the + * read/write should. + */ + ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, ram_start, VCORE_UNPG_RX_PA - ram_start); + assert(VCORE_UNPG_RX_PA >= ram_start); + tee_ram_initial_offs = VCORE_UNPG_RX_PA - ram_start; + DMSG("tee_ram_initial_offs %#zx", tee_ram_initial_offs); ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RX, VCORE_UNPG_RX_PA, VCORE_UNPG_RX_SZ); ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RO, VCORE_UNPG_RO_PA, @@ -1089,15 +1102,30 @@ static void collect_mem_ranges(struct memory_map *mem_map) if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RO, VCORE_UNPG_RW_PA, VCORE_UNPG_RW_SZ); + ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_UNPG_RW_PA, + VCORE_UNPG_RW_SZ); + ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_NEX_RW_PA, VCORE_NEX_RW_SZ); + ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_NEX_RW_PA, + VCORE_NEX_RW_SZ); + ADD_PHYS_MEM(MEM_AREA_NEX_RAM_RW, VCORE_FREE_PA, VCORE_FREE_SZ); + ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_FREE_PA, + VCORE_FREE_SZ); + next_pa = VCORE_FREE_PA + VCORE_FREE_SZ; } else { ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_UNPG_RW_PA, VCORE_UNPG_RW_SZ); + ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_UNPG_RW_PA, + VCORE_UNPG_RW_SZ); + ADD_PHYS_MEM(MEM_AREA_TEE_RAM_RW, VCORE_FREE_PA, VCORE_FREE_SZ); + ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, VCORE_FREE_PA, + VCORE_FREE_SZ); + next_pa = VCORE_FREE_PA + VCORE_FREE_SZ; } if (IS_ENABLED(CFG_WITH_PAGER)) { @@ -1105,25 +1133,20 @@ static void collect_mem_ranges(struct memory_map *mem_map) VCORE_INIT_RX_SZ); ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RO, VCORE_INIT_RO_PA, VCORE_INIT_RO_SZ); + } else { + ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, next_pa, + secure_only[0].paddr + + secure_only[0].size - next_pa); } } else { ADD_PHYS_MEM(MEM_AREA_TEE_RAM, TEE_RAM_START, TEE_RAM_PH_SIZE); + ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, secure_only[n].paddr, + secure_only[0].size); } - if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { - ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, TRUSTED_DRAM_BASE, - TRUSTED_DRAM_SIZE); - } else { - /* - * Every guest will have own TA RAM if virtualization - * support is enabled. - */ - paddr_t ta_base = 0; - size_t ta_size = 0; - - core_mmu_get_ta_range(&ta_base, &ta_size); - ADD_PHYS_MEM(MEM_AREA_TA_RAM, ta_base, ta_size); - } + for (n = 1; n < ARRAY_SIZE(secure_only); n++) + ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, secure_only[n].paddr, + secure_only[n].size); if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS) && IS_ENABLED(CFG_WITH_PAGER)) { @@ -1219,7 +1242,7 @@ static bool assign_mem_va_dir(vaddr_t tee_ram_va, struct memory_map *mem_map, * since it handles virtual memory which covers the part of the ELF * that cannot fit directly into memory. */ - va = tee_ram_va; + va = tee_ram_va + tee_ram_initial_offs; for (n = 0; n < mem_map->count; n++) { map = mem_map->map + n; if (map_is_tee_ram(map) || @@ -1485,15 +1508,14 @@ static void check_mem_map(struct memory_map *mem_map) if (!pbuf_is_inside(secure_only, m->pa, m->size)) panic("TEE_RAM can't fit in secure_only"); break; - case MEM_AREA_TA_RAM: + case MEM_AREA_SEC_RAM_OVERALL: if (!pbuf_is_inside(secure_only, m->pa, m->size)) - panic("TA_RAM can't fit in secure_only"); + panic("SEC_RAM_OVERALL can't fit in secure_only"); break; case MEM_AREA_NSEC_SHM: if (!pbuf_is_inside(nsec_shared, m->pa, m->size)) panic("NS_SHM can't fit in nsec_shared"); break; - case MEM_AREA_SEC_RAM_OVERALL: case MEM_AREA_TEE_COHERENT: case MEM_AREA_TEE_ASAN: case MEM_AREA_IO_SEC: @@ -2607,8 +2629,6 @@ early_init(teecore_init_pub_ram); void core_mmu_init_phys_mem(void) { - vaddr_t s = 0; - vaddr_t e = 0; paddr_t ps = 0; size_t size = 0; @@ -2616,13 +2636,17 @@ void core_mmu_init_phys_mem(void) * Get virtual addr/size of RAM where TA are loaded/executedNSec * shared mem allocated from teecore. */ - if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) + if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { + vaddr_t s = 0; + vaddr_t e = 0; + virt_get_ta_ram(&s, &e); - else - core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e); + ps = virt_to_phys((void *)s); + size = e - s; - ps = virt_to_phys((void *)s); - size = e - s; + } else { + core_mmu_get_ta_range(&ps, &size); + } phys_mem_init(0, 0, ps, size); } diff --git a/core/mm/fobj.c b/core/mm/fobj.c index 99c0a65965d..5ab61cb75e5 100644 --- a/core/mm/fobj.c +++ b/core/mm/fobj.c @@ -270,7 +270,8 @@ static struct fobj *rwp_unpaged_iv_alloc(unsigned int num_pages) mm = nex_phys_mem_ta_alloc(size); if (!mm) goto err_free_state; - rwp->store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM, size); + rwp->store = phys_to_virt(tee_mm_get_smem(mm), + MEM_AREA_SEC_RAM_OVERALL, size); assert(rwp->store); fobj_init(&rwp->fobj, &ops_rwp_unpaged_iv, num_pages); @@ -395,7 +396,7 @@ static TEE_Result rwp_init(void) assert(rwp_state_base); rwp_store_base = phys_to_virt(nex_phys_mem_get_ta_base(), - MEM_AREA_TA_RAM, ta_size); + MEM_AREA_SEC_RAM_OVERALL, ta_size); assert(rwp_store_base); return TEE_SUCCESS; @@ -766,7 +767,8 @@ struct fobj *fobj_sec_mem_alloc(unsigned int num_pages) if (!f->mm) goto err; - va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_TA_RAM, size); + va = phys_to_virt(tee_mm_get_smem(f->mm), MEM_AREA_SEC_RAM_OVERALL, + size); if (!va) goto err; diff --git a/core/mm/pgt_cache.c b/core/mm/pgt_cache.c index b6d6597b36d..cd8f11fbc4c 100644 --- a/core/mm/pgt_cache.c +++ b/core/mm/pgt_cache.c @@ -112,7 +112,8 @@ static struct pgt_parent *alloc_pgt_parent(void) free(parent); return NULL; } - tbl = phys_to_virt(tee_mm_get_smem(parent->mm), MEM_AREA_TA_RAM, + tbl = phys_to_virt(tee_mm_get_smem(parent->mm), + MEM_AREA_SEC_RAM_OVERALL, PGT_PARENT_SIZE); assert(tbl); /* "can't fail" */ From 59f1f1e3e876d116cbf96cbf5beafec2ebdc789b Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 11:48:27 +0200 Subject: [PATCH 08/20] core: mm: unify secure core and TA memory In configurations where secure core and TA memory is allocated from the same contiguous physical memory block, carve out the memory needed by OP-TEE core and make the rest available as TA memory. This is needed by later patches where more core memory is allocated as needed from the pool of TA memory. Signed-off-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/mm/core_mmu.c | 55 ++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 4 deletions(-) diff --git a/core/mm/core_mmu.c b/core/mm/core_mmu.c index ae4ee53502e..e9530866eff 100644 --- a/core/mm/core_mmu.c +++ b/core/mm/core_mmu.c @@ -2627,6 +2627,15 @@ static TEE_Result teecore_init_pub_ram(void) early_init(teecore_init_pub_ram); #endif /*CFG_CORE_RESERVED_SHM*/ +static void __maybe_unused carve_out_core_mem(paddr_t pa, paddr_t end_pa) +{ + tee_mm_entry_t *mm __maybe_unused = NULL; + + DMSG("%#"PRIxPA" .. %#"PRIxPA, pa, end_pa); + mm = phys_mem_alloc2(pa, end_pa - pa); + assert(mm); +} + void core_mmu_init_phys_mem(void) { paddr_t ps = 0; @@ -2643,10 +2652,48 @@ void core_mmu_init_phys_mem(void) virt_get_ta_ram(&s, &e); ps = virt_to_phys((void *)s); size = e - s; - + phys_mem_init(0, 0, ps, size); } else { - core_mmu_get_ta_range(&ps, &size); - } +#ifdef CFG_WITH_PAGER + /* + * The pager uses all core memory so there's no need to add + * it to the pool. + */ + static_assert(ARRAY_SIZE(secure_only) == 2); + phys_mem_init(0, 0, secure_only[1].paddr, secure_only[1].size); +#else /*!CFG_WITH_PAGER*/ + size_t align = BIT(CORE_MMU_USER_CODE_SHIFT); + paddr_t end_pa = 0; + paddr_t pa = 0; + + static_assert(ARRAY_SIZE(secure_only) <= 2); + if (ARRAY_SIZE(secure_only) == 2) { + ps = secure_only[1].paddr; + size = secure_only[1].size; + } + phys_mem_init(secure_only[0].paddr, secure_only[0].size, + ps, size); + + /* + * The VCORE macros are relocatable so we need to translate + * the addresses now that the MMU is enabled. + */ + end_pa = vaddr_to_phys(ROUNDUP(VCORE_FREE_END_PA, + align) - 1) + 1; + /* Carve out the part used by OP-TEE core */ + carve_out_core_mem(vaddr_to_phys(VCORE_UNPG_RX_PA), end_pa); + if (IS_ENABLED(CFG_CORE_SANITIZE_KADDRESS)) { + pa = vaddr_to_phys(ROUNDUP(ASAN_MAP_PA, align)); + carve_out_core_mem(pa, pa + ASAN_MAP_SZ); + } - phys_mem_init(0, 0, ps, size); + /* Carve out test SDP memory */ +#ifdef TEE_SDP_TEST_MEM_BASE + if (TEE_SDP_TEST_MEM_SIZE) { + pa = vaddr_to_phys(TEE_SDP_TEST_MEM_BASE); + carve_out_core_mem(pa, pa + TEE_SDP_TEST_MEM_SIZE); + } +#endif +#endif /*!CFG_WITH_PAGER*/ + } } From c0791c60f4c2ba2d63d18bb4910ee66a88966adc Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 11:48:28 +0200 Subject: [PATCH 09/20] core: virt: phys_mem_core_alloc() use both pools With CFG_NS_VIRTUALIZATION=y let phys_mem_core_alloc() allocate from both the core_pool and ta_pool since both pools keep equally secure memory. This is needed in later patches when some translation tables are dynamically allocated from spare physical core memory. Signed-off-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/mm/phys_mem.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/core/mm/phys_mem.c b/core/mm/phys_mem.c index 4551c9723c9..25b522fd4c8 100644 --- a/core/mm/phys_mem.c +++ b/core/mm/phys_mem.c @@ -194,7 +194,12 @@ tee_mm_entry_t *phys_mem_mm_find(paddr_t addr) tee_mm_entry_t *phys_mem_core_alloc(size_t size) { - return mm_alloc(core_pool, NULL, size); + /* + * With CFG_NS_VIRTUALIZATION all memory is equally secure so we + * should normally be able to use one pool only, but if we have two + * make sure to use both even for core allocations. + */ + return mm_alloc(core_pool, ta_pool, size); } tee_mm_entry_t *phys_mem_ta_alloc(size_t size) From 80ccdbfc28e02d0da3824d35a7ddda679f312229 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Sat, 14 Sep 2024 19:19:45 +0200 Subject: [PATCH 10/20] core: arm: core_mmu_v7.c: increase MAX_XLAT_TABLES by 2 Increase MAX_XLAT_TABLES by 2 to be able to map all TEE memory with 4k pages. Signed-off-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/arch/arm/mm/core_mmu_v7.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/arch/arm/mm/core_mmu_v7.c b/core/arch/arm/mm/core_mmu_v7.c index 8d4accaa04f..5c688476d6c 100644 --- a/core/arch/arm/mm/core_mmu_v7.c +++ b/core/arch/arm/mm/core_mmu_v7.c @@ -188,7 +188,7 @@ #else # define XLAT_TABLE_ASLR_EXTRA 0 #endif -#define MAX_XLAT_TABLES (4 + XLAT_TABLE_ASLR_EXTRA) +#define MAX_XLAT_TABLES (6 + XLAT_TABLE_ASLR_EXTRA) #endif /*!MAX_XLAT_TABLES*/ enum desc_type { From 9130e5b7e0be217118a79c32e9bad262dc32b8c9 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 17:08:47 +0200 Subject: [PATCH 11/20] core: mm: map memory using requested block size TEE memory is always supposed to be mapped with 4k pages for maximum flexibility, but can_map_at_level() doesn't check the requested block size for a region, so fix that. However, assign_mem_granularity() assigns smaller than necessary block sizes on page aligned regions, so fix that by only requesting 4k granularity for TEE memory and PGDIR granularity for the rest. This is needed in later patches where some TEE memory is unmapped. Signed-off-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/mm/core_mmu.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/core/mm/core_mmu.c b/core/mm/core_mmu.c index e9530866eff..49ca2a15fe2 100644 --- a/core/mm/core_mmu.c +++ b/core/mm/core_mmu.c @@ -909,14 +909,12 @@ static void dump_mmap_table(struct memory_map *mem_map) size_t n = 0; for (n = 0; n < mem_map->count; n++) { - struct tee_mmap_region *map = mem_map->map + n; - vaddr_t __maybe_unused vstart; + struct tee_mmap_region *map __maybe_unused = mem_map->map + n; - vstart = map->va + ((vaddr_t)map->pa & (map->region_size - 1)); DMSG("type %-12s va 0x%08" PRIxVA "..0x%08" PRIxVA " pa 0x%08" PRIxPA "..0x%08" PRIxPA " size 0x%08zx (%s)", - teecore_memtype_name(map->type), vstart, - vstart + map->size - 1, map->pa, + teecore_memtype_name(map->type), map->va, + map->va + map->size - 1, map->pa, (paddr_t)(map->pa + map->size - 1), map->size, map->region_size == SMALL_PAGE_SIZE ? "smallpg" : "pgdir"); } @@ -1190,15 +1188,13 @@ static void assign_mem_granularity(struct memory_map *mem_map) for (n = 0; n < mem_map->count; n++) { paddr_t mask = mem_map->map[n].pa | mem_map->map[n].size; - if (!(mask & CORE_MMU_PGDIR_MASK)) - mem_map->map[n].region_size = CORE_MMU_PGDIR_SIZE; - else if (!(mask & SMALL_PAGE_MASK)) - mem_map->map[n].region_size = SMALL_PAGE_SIZE; - else + if (mask & SMALL_PAGE_MASK) panic("Impossible memory alignment"); if (map_is_tee_ram(mem_map->map + n)) mem_map->map[n].region_size = SMALL_PAGE_SIZE; + else + mem_map->map[n].region_size = CORE_MMU_PGDIR_SIZE; } } @@ -1852,7 +1848,7 @@ static void set_pg_region(struct core_mmu_table_info *dir_info, static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr, size_t size_left, paddr_t block_size, - struct tee_mmap_region *mm __maybe_unused) + struct tee_mmap_region *mm) { /* VA and PA are aligned to block size at current level */ if ((vaddr | paddr) & (block_size - 1)) @@ -1862,6 +1858,13 @@ static bool can_map_at_level(paddr_t paddr, vaddr_t vaddr, if (size_left < block_size) return false; + /* + * The required block size of the region is compatible with the + * block size of the current level. + */ + if (mm->region_size < block_size) + return false; + #ifdef CFG_WITH_PAGER /* * If pager is enabled, we need to map TEE RAM and the whole pager From bd3436b8bbae21539c62a904265ea01d566da716 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Sat, 14 Sep 2024 17:27:41 +0200 Subject: [PATCH 12/20] core: arm,pager: make __vcore_init_ro_start follow __vcore_init_rx_end This concerns configurations with CFG_WITH_PAGER=y. Until this patch, even if __vcore_init_ro_size (VCORE_INIT_RO_SZ) is 0 for CFG_CORE_RODATA_NOEXEC=n, __vcore_init_ro_start was using some value smaller than __vcore_init_rx_end. To simplify code trying to find the end of VCORE_INIT_RX and VCORE_INIT_RO parts of the binary, make sure that __vcore_init_ro_start follows right after __vcore_init_rx_end. Signed-off-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/arch/arm/kernel/kern.ld.S | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/core/arch/arm/kernel/kern.ld.S b/core/arch/arm/kernel/kern.ld.S index a3a0e17e2fa..3c25d2d5b2f 100644 --- a/core/arch/arm/kernel/kern.ld.S +++ b/core/arch/arm/kernel/kern.ld.S @@ -489,17 +489,20 @@ __vcore_nex_rw_end = __vcore_nex_rw_start + __vcore_nex_rw_size; /* Paged/init read-only memories */ __vcore_init_rx_start = __flatmap_init_rx_start; -__vcore_init_ro_start = __flatmap_init_ro_start; #ifdef CFG_CORE_RODATA_NOEXEC __vcore_init_rx_size = __flatmap_init_rx_size; +__vcore_init_ro_start = __flatmap_init_ro_start; __vcore_init_ro_size = __flatmap_init_ro_size + __FLATMAP_PAGER_TRAILING_SPACE; #else __vcore_init_rx_size = __flatmap_init_rx_size + __flatmap_init_ro_size + __FLATMAP_PAGER_TRAILING_SPACE; +__vcore_init_ro_start = __vcore_init_rx_end; __vcore_init_ro_size = 0; #endif /* CFG_CORE_RODATA_NOEXEC */ __vcore_init_rx_end = __vcore_init_rx_start + __vcore_init_rx_size; __vcore_init_ro_end = __vcore_init_ro_start + __vcore_init_ro_size; +ASSERT(__vcore_init_ro_start == __vcore_init_rx_end, + "__vcore_init_ro_start should follow __vcore_init_rx_end") #endif /* CFG_WITH_PAGER */ #ifdef CFG_CORE_SANITIZE_KADDRESS From 93973e8abcbd7bfddc81e286c96caf15e6d8dbd4 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 11:48:29 +0200 Subject: [PATCH 13/20] core: mm,pager: map remaining physical memory For CFG_WITH_PAGER=y map the remaining memory following the VCORE_INIT_RO memory to make sure that all physical TEE memory is mapped even if VCORE_INIT_RO doesn't cover it entirely. This will be used in later patches to use the temporarily unused memory while booting. Signed-off-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/mm/core_mmu.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/core/mm/core_mmu.c b/core/mm/core_mmu.c index 49ca2a15fe2..9c9881637d2 100644 --- a/core/mm/core_mmu.c +++ b/core/mm/core_mmu.c @@ -1127,10 +1127,22 @@ static void collect_mem_ranges(struct memory_map *mem_map) } if (IS_ENABLED(CFG_WITH_PAGER)) { + paddr_t pa = 0; + size_t sz = 0; + ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RX, VCORE_INIT_RX_PA, VCORE_INIT_RX_SZ); ADD_PHYS_MEM(MEM_AREA_INIT_RAM_RO, VCORE_INIT_RO_PA, VCORE_INIT_RO_SZ); + /* + * Core init mapping shall cover up to end of the + * physical RAM. This is required since the hash + * table is appended to the binary data after the + * firmware build sequence. + */ + pa = VCORE_INIT_RO_PA + VCORE_INIT_RO_SZ; + sz = TEE_RAM_START + TEE_RAM_PH_SIZE - pa; + ADD_PHYS_MEM(MEM_AREA_TEE_RAM, pa, sz); } else { ADD_PHYS_MEM(MEM_AREA_SEC_RAM_OVERALL, next_pa, secure_only[0].paddr + From ba022470a6958115195a03c1fd8a0d56a9845b48 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 11:48:30 +0200 Subject: [PATCH 14/20] core: add CFG_BOOT_MEM and boot_mem_*() functions Adds CFG_BOOT_MEM to support stack-like memory allocations during boot before a heap has been configured. Signed-off-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/include/kernel/boot.h | 26 ++++ core/mm/boot_mem.c | 246 +++++++++++++++++++++++++++++++++++++ core/mm/sub.mk | 1 + mk/config.mk | 3 + 4 files changed, 276 insertions(+) create mode 100644 core/mm/boot_mem.c diff --git a/core/include/kernel/boot.h b/core/include/kernel/boot.h index 393c231a454..012fdfdf94c 100644 --- a/core/include/kernel/boot.h +++ b/core/include/kernel/boot.h @@ -105,4 +105,30 @@ void discover_nsec_memory(void); /* Add reserved memory for static shared memory in the device-tree */ int mark_static_shm_as_reserved(struct dt_descriptor *dt); +#ifdef CFG_BOOT_MEM +/* + * Stack-like memory allocations during boot before a heap has been + * configured. boot_mem_relocate() performs relocation of the boot memory + * and address cells registered with boot_mem_add_reloc() during virtual + * memory initialization. Unused memory is unmapped and released to pool of + * free physical memory once MMU is initialized. + */ +void boot_mem_init(vaddr_t start, vaddr_t end, vaddr_t orig_end); +void boot_mem_add_reloc(void *ptr); +void boot_mem_relocate(size_t offs); +void *boot_mem_alloc(size_t len, size_t align); +void *boot_mem_alloc_tmp(size_t len, size_t align); +vaddr_t boot_mem_release_unused(void); +void boot_mem_release_tmp_alloc(void); +#else +static inline void boot_mem_add_reloc(void *ptr __unused) { } +static inline void *boot_mem_alloc(size_t len __unused, size_t align __unused) +{ return NULL; } +static inline void *boot_mem_alloc_tmp(size_t len __unused, + size_t align __unused) +{ return NULL; } +static inline vaddr_t boot_mem_release_unused(void) { return 0; } +static inline void boot_mem_release_tmp_alloc(void) { } +#endif + #endif /* __KERNEL_BOOT_H */ diff --git a/core/mm/boot_mem.c b/core/mm/boot_mem.c new file mode 100644 index 00000000000..5b1f45fe19a --- /dev/null +++ b/core/mm/boot_mem.c @@ -0,0 +1,246 @@ +// SPDX-License-Identifier: BSD-2-Clause +/* + * Copyright (c) 2024, Linaro Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * struct boot_mem_reloc - Pointers relocated in memory during boot + * @ptrs: Array of relocation + * @count: Number of cells used in @ptrs + * @next: Next relocation array when @ptrs is fully used + */ +struct boot_mem_reloc { + void **ptrs[64]; + size_t count; + struct boot_mem_reloc *next; +}; + +/* + * struct boot_mem_desc - Stack like boot memory allocation pool + * @orig_mem_start: Boot memory stack base address + * @orig_mem_end: Boot memory start end address + * @mem_start: Boot memory free space start address + * @mem_end: Boot memory free space end address + * @reloc: Boot memory pointers requiring relocation + */ +struct boot_mem_desc { + vaddr_t orig_mem_start; + vaddr_t orig_mem_end; + vaddr_t mem_start; + vaddr_t mem_end; + struct boot_mem_reloc *reloc; +}; + +static struct boot_mem_desc *boot_mem_desc; + +static void *mem_alloc_tmp(struct boot_mem_desc *desc, size_t len, size_t align) +{ + vaddr_t va = 0; + + assert(desc && desc->mem_start && desc->mem_end); + assert(IS_POWER_OF_TWO(align) && !(len % align)); + if (SUB_OVERFLOW(desc->mem_end, len, &va)) + panic(); + va = ROUNDDOWN(va, align); + if (va < desc->mem_start) + panic(); + desc->mem_end = va; + return (void *)va; +} + +static void *mem_alloc(struct boot_mem_desc *desc, size_t len, size_t align) +{ + vaddr_t va = 0; + vaddr_t ve = 0; + + runtime_assert(!IS_ENABLED(CFG_WITH_PAGER)); + assert(desc && desc->mem_start && desc->mem_end); + assert(IS_POWER_OF_TWO(align) && !(len % align)); + va = ROUNDUP(desc->mem_start, align); + if (ADD_OVERFLOW(va, len, &ve)) + panic(); + if (ve > desc->mem_end) + panic(); + desc->mem_start = ve; + return (void *)va; +} + +void boot_mem_init(vaddr_t start, vaddr_t end, vaddr_t orig_end) +{ + struct boot_mem_desc desc = { + .orig_mem_start = start, + .orig_mem_end = orig_end, + .mem_start = start, + .mem_end = end, + }; + + boot_mem_desc = mem_alloc_tmp(&desc, sizeof(desc), alignof(desc)); + *boot_mem_desc = desc; + boot_mem_desc->reloc = mem_alloc_tmp(boot_mem_desc, + sizeof(*boot_mem_desc->reloc), + alignof(*boot_mem_desc->reloc)); + memset(boot_mem_desc->reloc, 0, sizeof(*boot_mem_desc->reloc)); +} + +void boot_mem_add_reloc(void *ptr) +{ + struct boot_mem_reloc *reloc = NULL; + + assert(boot_mem_desc && boot_mem_desc->reloc); + reloc = boot_mem_desc->reloc; + + /* If the reloc struct is full, allocate a new and link it first */ + if (reloc->count == ARRAY_SIZE(reloc->ptrs)) { + reloc = boot_mem_alloc_tmp(sizeof(*reloc), alignof(*reloc)); + reloc->next = boot_mem_desc->reloc; + boot_mem_desc->reloc = reloc; + } + + reloc->ptrs[reloc->count] = ptr; + reloc->count++; +} + +static void *add_offs(void *p, size_t offs) +{ + assert(p); + return (uint8_t *)p + offs; +} + +void boot_mem_relocate(size_t offs) +{ + struct boot_mem_reloc *reloc = NULL; + size_t n = 0; + + boot_mem_desc = add_offs(boot_mem_desc, offs); + + boot_mem_desc->orig_mem_start += offs; + boot_mem_desc->orig_mem_end += offs; + boot_mem_desc->mem_start += offs; + boot_mem_desc->mem_end += offs; + boot_mem_desc->reloc = add_offs(boot_mem_desc->reloc, offs); + + for (reloc = boot_mem_desc->reloc;; reloc = reloc->next) { + for (n = 0; n < reloc->count; n++) { + reloc->ptrs[n] = add_offs(reloc->ptrs[n], offs); + *reloc->ptrs[n] = add_offs(*reloc->ptrs[n], offs); + } + if (!reloc->next) + break; + reloc->next = add_offs(reloc->next, offs); + } +} + +void *boot_mem_alloc(size_t len, size_t align) +{ + return mem_alloc(boot_mem_desc, len, align); +} + +void *boot_mem_alloc_tmp(size_t len, size_t align) +{ + return mem_alloc_tmp(boot_mem_desc, len, align); +} + +vaddr_t boot_mem_release_unused(void) +{ + tee_mm_entry_t *mm = NULL; + paddr_t pa = 0; + vaddr_t va = 0; + size_t n = 0; + vaddr_t tmp_va = 0; + paddr_t tmp_pa = 0; + size_t tmp_n = 0; + + assert(boot_mem_desc); + + n = boot_mem_desc->mem_start - boot_mem_desc->orig_mem_start; + DMSG("Allocated %zu bytes at va %#"PRIxVA" pa %#"PRIxPA, + n, boot_mem_desc->orig_mem_start, + vaddr_to_phys(boot_mem_desc->orig_mem_start)); + + DMSG("Tempalloc %zu bytes at va %#"PRIxVA, + (size_t)(boot_mem_desc->orig_mem_end - boot_mem_desc->mem_end), + boot_mem_desc->mem_end); + + if (IS_ENABLED(CFG_WITH_PAGER)) + goto out; + + pa = vaddr_to_phys(ROUNDUP(boot_mem_desc->orig_mem_start, + SMALL_PAGE_SIZE)); + mm = nex_phys_mem_mm_find(pa); + if (!mm) + panic(); + + va = ROUNDUP(boot_mem_desc->mem_start, SMALL_PAGE_SIZE); + + tmp_va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE); + tmp_n = boot_mem_desc->orig_mem_end - tmp_va; + tmp_pa = vaddr_to_phys(tmp_va); + + pa = tee_mm_get_smem(mm); + n = vaddr_to_phys(boot_mem_desc->mem_start) - pa; + tee_mm_free(mm); + DMSG("Carving out %#"PRIxPA"..%#"PRIxPA, pa, pa + n - 1); + mm = nex_phys_mem_alloc2(pa, n); + if (!mm) + panic(); + mm = nex_phys_mem_alloc2(tmp_pa, tmp_n); + if (!mm) + panic(); + + n = tmp_va - boot_mem_desc->mem_start; + DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va); + + /* Unmap the now unused pages */ + core_mmu_unmap_pages(va, n / SMALL_PAGE_SIZE); + +out: + /* Stop further allocations. */ + boot_mem_desc->mem_start = boot_mem_desc->mem_end; + return va; +} + +void boot_mem_release_tmp_alloc(void) +{ + tee_mm_entry_t *mm = NULL; + vaddr_t va = 0; + paddr_t pa = 0; + size_t n = 0; + + assert(boot_mem_desc && + boot_mem_desc->mem_start == boot_mem_desc->mem_end); + + if (IS_ENABLED(CFG_WITH_PAGER)) { + n = boot_mem_desc->orig_mem_end - boot_mem_desc->mem_end; + va = boot_mem_desc->mem_end; + boot_mem_desc = NULL; + DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va); + return; + } + + va = ROUNDDOWN(boot_mem_desc->mem_end, SMALL_PAGE_SIZE); + pa = vaddr_to_phys(va); + + mm = nex_phys_mem_mm_find(pa); + if (!mm) + panic(); + assert(pa == tee_mm_get_smem(mm)); + n = tee_mm_get_bytes(mm); + + /* Boot memory allocation is now done */ + boot_mem_desc = NULL; + + DMSG("Releasing %zu bytes from va %#"PRIxVA, n, va); + + /* Unmap the now unused pages */ + core_mmu_unmap_pages(va, n / SMALL_PAGE_SIZE); +} diff --git a/core/mm/sub.mk b/core/mm/sub.mk index 9f10ca0f5e7..df6f22669a3 100644 --- a/core/mm/sub.mk +++ b/core/mm/sub.mk @@ -10,3 +10,4 @@ srcs-y += phys_mem.c ifneq ($(CFG_CORE_FFA),y) srcs-$(CFG_CORE_DYN_SHM) += mobj_dyn_shm.c endif +srcs-$(CFG_BOOT_MEM) += boot_mem.c diff --git a/mk/config.mk b/mk/config.mk index c3c61ad8e0d..adcbdc33b74 100644 --- a/mk/config.mk +++ b/mk/config.mk @@ -1263,3 +1263,6 @@ CFG_CORE_UNSAFE_MODEXP ?= n # when enabled, makes MBedTLS library for TAs use 'unsafe' modular # exponentiation algorithm. CFG_TA_MEBDTLS_UNSAFE_MODEXP ?= n + +# CFG_BOOT_MEM, when enabled, adds stack like memory allocation during boot. +CFG_BOOT_MEM ?= n From baa5de35733bd6e41f7581a811e5efa4e216a88e Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 20 Sep 2024 10:28:17 +0200 Subject: [PATCH 15/20] core: arm: add boot_cached_mem_end Add boot_cached_mem_end in C code, replacing the previous read-only mapped cached_mem_end. This allows updates to boot_cached_mem_end after MMU has been enabled. Signed-off-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/arch/arm/kernel/boot.c | 6 ++++++ core/arch/arm/kernel/entry_a32.S | 37 +++++++++++++++++--------------- core/arch/arm/kernel/entry_a64.S | 18 +++++++--------- 3 files changed, 34 insertions(+), 27 deletions(-) diff --git a/core/arch/arm/kernel/boot.c b/core/arch/arm/kernel/boot.c index f7cbe64e0e8..69ec2e66133 100644 --- a/core/arch/arm/kernel/boot.c +++ b/core/arch/arm/kernel/boot.c @@ -80,6 +80,12 @@ uint32_t sem_cpu_sync[CFG_TEE_CORE_NB_CORE]; DECLARE_KEEP_PAGER(sem_cpu_sync); #endif +/* + * Must not be in .bss since it's initialized and used from assembly before + * .bss is cleared. + */ +vaddr_t boot_cached_mem_end __nex_data = 1; + static unsigned long boot_arg_fdt __nex_bss; static unsigned long boot_arg_nsec_entry __nex_bss; static unsigned long boot_arg_pageable_part __nex_bss; diff --git a/core/arch/arm/kernel/entry_a32.S b/core/arch/arm/kernel/entry_a32.S index 4e9030987f8..e3f983e3a2e 100644 --- a/core/arch/arm/kernel/entry_a32.S +++ b/core/arch/arm/kernel/entry_a32.S @@ -284,11 +284,13 @@ DECLARE_KEEP_INIT _start assert_flat_mapped_range (\vbase), (\line) bl pl310_base ldr r1, \vbase - ldr r2, \vend + ldr r2, =\vend + ldr r2, [r2] bl arm_cl2_invbypa #endif ldr r0, \vbase - ldr r1, \vend + ldr r1, =\vend + ldr r1, [r1] sub r1, r1, r0 bl dcache_inv_range .endm @@ -297,16 +299,19 @@ DECLARE_KEEP_INIT _start #if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL) assert_flat_mapped_range (\vbase), (\line) ldr r0, \vbase - ldr r1, \vend + ldr r1, =\vend + ldr r1, [r1] sub r1, r1, r0 bl dcache_clean_range bl pl310_base ldr r1, \vbase - ldr r2, \vend + ldr r2, =\vend + ldr r2, [r2] bl arm_cl2_cleaninvbypa #endif ldr r0, \vbase - ldr r1, \vend + ldr r1, =\vend + ldr r1, [r1] sub r1, r1, r0 bl dcache_cleaninv_range .endm @@ -349,7 +354,8 @@ UNWIND( .cantunwind) /* Copy backwards (as memmove) in case we're overlapping */ add r0, r0, r2 /* __init_start + len */ add r1, r1, r2 /* __data_end + len */ - str r0, cached_mem_end + ldr r3, =boot_cached_mem_end + str r0, [r3] ldr r2, =__init_start copy_init: ldmdb r1!, {r3, r9-r12} @@ -369,8 +375,8 @@ copy_init: /* Copy backwards (as memmove) in case we're overlapping */ add r0, r0, r2 add r1, r1, r2 - str r0, cached_mem_end - ldr r2, =__end + ldr r3, =boot_cached_mem_end + str r2, [r3] copy_init: ldmdb r1!, {r3, r9-r12} @@ -463,7 +469,7 @@ shadow_stack_access_ok: * invalidate memory not used by OP-TEE since we may invalidate * entries used by for instance ARM Trusted Firmware. */ - inval_cache_vrange(cached_mem_start, cached_mem_end) + inval_cache_vrange(cached_mem_start, boot_cached_mem_end) #if defined(CFG_PL310) && !defined(CFG_PL310_SIP_PROTOCOL) /* Enable PL310 if not yet enabled */ @@ -492,12 +498,13 @@ shadow_stack_access_ok: ldr r0, =boot_mmu_config ldr r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET] /* - * Update cached_mem_end address with load offset since it was + * Update boot_cached_mem_end address with load offset since it was * calculated before relocation. */ - ldr r2, cached_mem_end + ldr r3, =boot_cached_mem_end + ldr r2, [r3] add r2, r2, r0 - str r2, cached_mem_end + str r2, [r3] bl relocate #endif @@ -558,7 +565,7 @@ shadow_stack_access_ok: * they have turned on their D-cache, clean and invalidate the * D-cache before exiting to normal world. */ - flush_cache_vrange(cached_mem_start, cached_mem_end) + flush_cache_vrange(cached_mem_start, boot_cached_mem_end) /* release secondary boot cores and sync with them */ cpu_is_ready @@ -632,10 +639,6 @@ LOCAL_DATA cached_mem_start , : .word __text_start END_DATA cached_mem_start -LOCAL_DATA cached_mem_end , : - .skip 4 -END_DATA cached_mem_end - LOCAL_FUNC unhandled_cpu , : wfi b unhandled_cpu diff --git a/core/arch/arm/kernel/entry_a64.S b/core/arch/arm/kernel/entry_a64.S index 86bd400c57d..06ac2d761cc 100644 --- a/core/arch/arm/kernel/entry_a64.S +++ b/core/arch/arm/kernel/entry_a64.S @@ -200,7 +200,7 @@ FUNC _start , : /* Copy backwards (as memmove) in case we're overlapping */ add x0, x0, x2 /* __init_start + len */ add x1, x1, x2 /* __data_end + len */ - adr x3, cached_mem_end + adr_l x3, boot_cached_mem_end str x0, [x3] adr x2, __init_start copy_init: @@ -221,7 +221,7 @@ copy_init: /* Copy backwards (as memmove) in case we're overlapping */ add x0, x0, x2 add x1, x1, x2 - adr x3, cached_mem_end + adr_l x3, boot_cached_mem_end str x0, [x3] adr_l x2, __end @@ -289,7 +289,8 @@ clear_nex_bss: * entries used by for instance ARM Trusted Firmware. */ adr_l x0, __text_start - ldr x1, cached_mem_end + adr_l x1, boot_cached_mem_end + ldr x1, [x1] sub x1, x1, x0 bl dcache_cleaninv_range @@ -333,10 +334,10 @@ clear_nex_bss: ldr x0, boot_mmu_config + CORE_MMU_CONFIG_MAP_OFFSET cbz x0, 1f /* - * Update cached_mem_end address with load offset since it was + * Update boot_cached_mem_end address with load offset since it was * calculated before relocation. */ - adr x5, cached_mem_end + adr_l x5, boot_cached_mem_end ldr x6, [x5] add x6, x6, x0 str x6, [x5] @@ -427,7 +428,8 @@ clear_nex_bss: * D-cache before exiting to normal world. */ adr_l x0, __text_start - ldr x1, cached_mem_end + adr_l x1, boot_cached_mem_end + ldr x1, [x1] sub x1, x1, x0 bl dcache_cleaninv_range @@ -470,10 +472,6 @@ DECLARE_KEEP_INIT _start .section .identity_map.data .balign 8 -LOCAL_DATA cached_mem_end , : - .skip 8 -END_DATA cached_mem_end - #if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE) LOCAL_FUNC relocate , : /* From e0baad48fc22942df7af47a4bda38f6198b13d7b Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 11:48:32 +0200 Subject: [PATCH 16/20] core: arm: enable CFG_BOOT_MEM unconditionally Enable CFG_BOOT_MEM unconditionally and call the boot_mem_*() functions as needed from entry_*.S and boot.c. The pager will reuse all boot_mem memory internally when configured. The non-pager configuration will unmap the memory and make it available for TAs if needed. __FLATMAP_PAGER_TRAILING_SPACE is removed from the link script, collect_mem_ranges() in core/mm/core_mmu.c maps the memory following VCORE_INIT_RO automatically. Signed-off-by: Jens Wiklander Acked-by: Etienne Carriere --- core/arch/arm/kernel/boot.c | 106 ++++++++++++++++++------------- core/arch/arm/kernel/entry_a32.S | 49 ++++++++++++-- core/arch/arm/kernel/entry_a64.S | 51 ++++++++++++--- core/arch/arm/kernel/kern.ld.S | 14 +--- mk/config.mk | 4 ++ 5 files changed, 151 insertions(+), 73 deletions(-) diff --git a/core/arch/arm/kernel/boot.c b/core/arch/arm/kernel/boot.c index 69ec2e66133..8bd41cefa02 100644 --- a/core/arch/arm/kernel/boot.c +++ b/core/arch/arm/kernel/boot.c @@ -498,7 +498,7 @@ static struct fobj *ro_paged_alloc(tee_mm_entry_t *mm, void *hashes, #endif } -static void init_runtime(unsigned long pageable_part) +static void init_pager_runtime(unsigned long pageable_part) { size_t n; size_t init_size = (size_t)(__init_end - __init_start); @@ -523,12 +523,6 @@ static void init_runtime(unsigned long pageable_part) tmp_hashes = __init_end + embdata->hashes_offset; - init_asan(); - - /* Add heap2 first as heap1 may be too small as initial bget pool */ - malloc_add_pool(__heap2_start, __heap2_end - __heap2_start); - malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); - /* * This needs to be initialized early to support address lookup * in MEM_AREA_TEE_RAM @@ -542,10 +536,10 @@ static void init_runtime(unsigned long pageable_part) asan_memcpy_unchecked(hashes, tmp_hashes, hash_size); /* - * Need physical memory pool initialized to be able to allocate - * secure physical memory below. + * The pager is about the be enabled below, eventual temporary boot + * memory allocation must be removed now. */ - core_mmu_init_phys_mem(); + boot_mem_release_tmp_alloc(); carve_out_asan_mem(); @@ -654,27 +648,9 @@ static void init_runtime(unsigned long pageable_part) print_pager_pool_size(); } -#else - -static void init_runtime(unsigned long pageable_part __unused) +#else /*!CFG_WITH_PAGER*/ +static void init_pager_runtime(unsigned long pageable_part __unused) { - init_asan(); - - /* - * By default whole OP-TEE uses malloc, so we need to initialize - * it early. But, when virtualization is enabled, malloc is used - * only by TEE runtime, so malloc should be initialized later, for - * every virtual partition separately. Core code uses nex_malloc - * instead. - */ -#ifdef CFG_NS_VIRTUALIZATION - nex_malloc_add_pool(__nex_heap_start, __nex_heap_end - - __nex_heap_start); -#else - malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); -#endif - - IMSG_RAW("\n"); } #endif @@ -891,10 +867,9 @@ static void update_external_dt(void) void init_tee_runtime(void) { -#ifndef CFG_WITH_PAGER - /* Pager initializes TA RAM early */ - core_mmu_init_phys_mem(); -#endif + if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) + core_mmu_init_phys_mem(); + /* * With virtualization we call this function when creating the * OP-TEE partition instead. @@ -925,6 +900,8 @@ void init_tee_runtime(void) static void init_primary(unsigned long pageable_part, unsigned long nsec_entry) { + vaddr_t va = 0; + thread_init_core_local_stacks(); /* * Mask asynchronous exceptions before switch to the thread vector @@ -940,14 +917,54 @@ static void init_primary(unsigned long pageable_part, unsigned long nsec_entry) if (IS_ENABLED(CFG_CRYPTO_WITH_CE)) check_crypto_extensions(); + init_asan(); + /* - * Pager: init_runtime() calls thread_kernel_enable_vfp() so we must - * set a current thread right now to avoid a chicken-and-egg problem - * (thread_init_boot_thread() sets the current thread but needs - * things set by init_runtime()). + * By default whole OP-TEE uses malloc, so we need to initialize + * it early. But, when virtualization is enabled, malloc is used + * only by TEE runtime, so malloc should be initialized later, for + * every virtual partition separately. Core code uses nex_malloc + * instead. */ - thread_get_core_local()->curr_thread = 0; - init_runtime(pageable_part); +#ifdef CFG_WITH_PAGER + /* Add heap2 first as heap1 may be too small as initial bget pool */ + malloc_add_pool(__heap2_start, __heap2_end - __heap2_start); +#endif +#ifdef CFG_NS_VIRTUALIZATION + nex_malloc_add_pool(__nex_heap_start, __nex_heap_end - + __nex_heap_start); +#else + malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); +#endif + IMSG_RAW("\n"); + + if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { + IMSG("Initializing virtualization support"); + core_mmu_init_virtualization(); + } else { + core_mmu_init_phys_mem(); + } + va = boot_mem_release_unused(); + if (!IS_ENABLED(CFG_WITH_PAGER)) { + /* + * We must update boot_cached_mem_end to reflect the memory + * just unmapped by boot_mem_release_unused(). + */ + assert(va && va <= boot_cached_mem_end); + boot_cached_mem_end = va; + } + + if (IS_ENABLED(CFG_WITH_PAGER)) { + /* + * Pager: init_runtime() calls thread_kernel_enable_vfp() + * so we must set a current thread right now to avoid a + * chicken-and-egg problem (thread_init_boot_thread() sets + * the current thread but needs things set by + * init_runtime()). + */ + thread_get_core_local()->curr_thread = 0; + init_pager_runtime(pageable_part); + } if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { /* @@ -1028,12 +1045,8 @@ void __weak boot_init_primary_late(unsigned long fdt __unused, boot_primary_init_intc(); init_vfp_nsec(); - if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { - IMSG("Initializing virtualization support"); - core_mmu_init_virtualization(); - } else { + if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) init_tee_runtime(); - } } /* @@ -1042,6 +1055,9 @@ void __weak boot_init_primary_late(unsigned long fdt __unused, */ void __weak boot_init_primary_final(void) { + if (!IS_ENABLED(CFG_WITH_PAGER)) + boot_mem_release_tmp_alloc(); + if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) call_driver_initcalls(); call_finalcalls(); diff --git a/core/arch/arm/kernel/entry_a32.S b/core/arch/arm/kernel/entry_a32.S index e3f983e3a2e..80ee7ed4e4f 100644 --- a/core/arch/arm/kernel/entry_a32.S +++ b/core/arch/arm/kernel/entry_a32.S @@ -366,22 +366,30 @@ copy_init: /* * The binary is built as: * [Core, rodata and data] : In correct location - * [struct boot_embdata + data] : Should be moved to __end, first - * uint32_t tells the length of the struct + data + * [struct boot_embdata + data] : Should be moved to right before + * __vcore_free_end, the first uint32_t tells the length of the + * struct + data */ - ldr r0, =__end /* dst */ ldr r1, =__data_end /* src */ ldr r2, [r1] /* struct boot_embdata::total_len */ + /* dst */ + ldr r0, =__vcore_free_end + sub r0, r0, r2 + /* round down to beginning of page */ + mov r3, #(SMALL_PAGE_SIZE - 1) + bic r0, r0, r3 + ldr r3, =boot_embdata_ptr + str r0, [r3] /* Copy backwards (as memmove) in case we're overlapping */ - add r0, r0, r2 add r1, r1, r2 + add r2, r0, r2 ldr r3, =boot_cached_mem_end str r2, [r3] copy_init: ldmdb r1!, {r3, r9-r12} - stmdb r0!, {r3, r9-r12} - cmp r0, r2 + stmdb r2!, {r3, r9-r12} + cmp r2, r0 bgt copy_init #endif @@ -458,6 +466,23 @@ shadow_stack_access_ok: bl boot_save_args add sp, sp, #(2 * 4) +#ifdef CFG_WITH_PAGER + ldr r0, =__init_end /* pointer to boot_embdata */ + ldr r1, [r0] /* struct boot_embdata::total_len */ + add r0, r0, r1 + mov_imm r1, 0xfff + add r0, r0, r1 /* round up */ + bic r0, r0, r1 /* to next page */ + mov_imm r1, (TEE_RAM_PH_SIZE + TEE_RAM_START) + mov r2, r1 +#else + ldr r0, =__vcore_free_start + ldr r1, =boot_embdata_ptr + ldr r1, [r1] + ldr r2, =__vcore_free_end +#endif + bl boot_mem_init + #ifdef CFG_PL310 bl pl310_base bl arm_cl2_config @@ -512,6 +537,9 @@ shadow_stack_access_ok: bl __get_core_pos bl enable_mmu #ifdef CFG_CORE_ASLR + ldr r0, =boot_mmu_config + ldr r0, [r0, #CORE_MMU_CONFIG_MAP_OFFSET] + bl boot_mem_relocate /* * Reinitialize console, since register_serial_console() has * previously registered a PA and with ASLR the VA is different @@ -639,6 +667,12 @@ LOCAL_DATA cached_mem_start , : .word __text_start END_DATA cached_mem_start +#ifndef CFG_WITH_PAGER +LOCAL_DATA boot_embdata_ptr , : + .skip 4 +END_DATA boot_embdata_ptr +#endif + LOCAL_FUNC unhandled_cpu , : wfi b unhandled_cpu @@ -651,7 +685,8 @@ LOCAL_FUNC relocate , : #ifdef CFG_WITH_PAGER ldr r12, =__init_end #else - ldr r12, =__end + ldr r12, =boot_embdata_ptr + ldr r12, [r12] #endif ldr r2, [r12, #BOOT_EMBDATA_RELOC_OFFSET] ldr r3, [r12, #BOOT_EMBDATA_RELOC_LEN] diff --git a/core/arch/arm/kernel/entry_a64.S b/core/arch/arm/kernel/entry_a64.S index 06ac2d761cc..04dc781e144 100644 --- a/core/arch/arm/kernel/entry_a64.S +++ b/core/arch/arm/kernel/entry_a64.S @@ -212,23 +212,30 @@ copy_init: /* * The binary is built as: * [Core, rodata and data] : In correct location - * [struct boot_embdata + data] : Should be moved to __end, first - * uint32_t tells the length of the struct + data + * [struct boot_embdata + data] : Should be moved to right before + * __vcore_free_end, the first uint32_t tells the length of the + * struct + data */ - adr_l x0, __end /* dst */ adr_l x1, __data_end /* src */ ldr w2, [x1] /* struct boot_embdata::total_len */ + /* dst */ + adr_l x0, __vcore_free_end + sub x0, x0, x2 + /* round down to beginning of page */ + bic x0, x0, #(SMALL_PAGE_SIZE - 1) + adr_l x3, boot_embdata_ptr + str x0, [x3] + /* Copy backwards (as memmove) in case we're overlapping */ - add x0, x0, x2 add x1, x1, x2 + add x2, x0, x2 adr_l x3, boot_cached_mem_end - str x0, [x3] - adr_l x2, __end + str x2, [x3] copy_init: ldp x3, x4, [x1, #-16]! - stp x3, x4, [x0, #-16]! - cmp x0, x2 + stp x3, x4, [x2, #-16]! + cmp x2, x0 b.gt copy_init #endif @@ -304,6 +311,22 @@ clear_nex_bss: mov x4, xzr bl boot_save_args +#ifdef CFG_WITH_PAGER + adr_l x0, __init_end /* pointer to boot_embdata */ + ldr w1, [x0] /* struct boot_embdata::total_len */ + add x0, x0, x1 + add x0, x0, #0xfff /* round up */ + bic x0, x0, #0xfff /* to next page */ + mov_imm x1, (TEE_RAM_PH_SIZE + TEE_RAM_START) + mov x2, x1 +#else + adr_l x0, __vcore_free_start + adr_l x1, boot_embdata_ptr + ldr x1, [x1] + adr_l x2, __vcore_free_end; +#endif + bl boot_mem_init + #ifdef CFG_MEMTAG /* * If FEAT_MTE2 is available, initializes the memtag callbacks. @@ -349,6 +372,9 @@ clear_nex_bss: bl __get_core_pos bl enable_mmu #ifdef CFG_CORE_ASLR + adr_l x0, boot_mmu_config + ldr x0, [x0, #CORE_MMU_CONFIG_MAP_OFFSET] + bl boot_mem_relocate /* * Reinitialize console, since register_serial_console() has * previously registered a PA and with ASLR the VA is different @@ -470,8 +496,14 @@ clear_nex_bss: END_FUNC _start DECLARE_KEEP_INIT _start +#ifndef CFG_WITH_PAGER .section .identity_map.data .balign 8 +LOCAL_DATA boot_embdata_ptr , : + .skip 8 +END_DATA boot_embdata_ptr +#endif + #if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE) LOCAL_FUNC relocate , : /* @@ -481,7 +513,8 @@ LOCAL_FUNC relocate , : #ifdef CFG_WITH_PAGER adr_l x6, __init_end #else - adr_l x6, __end + adr_l x6, boot_embdata_ptr + ldr x6, [x6] #endif ldp w2, w3, [x6, #BOOT_EMBDATA_RELOC_OFFSET] diff --git a/core/arch/arm/kernel/kern.ld.S b/core/arch/arm/kernel/kern.ld.S index 3c25d2d5b2f..03ae13045d5 100644 --- a/core/arch/arm/kernel/kern.ld.S +++ b/core/arch/arm/kernel/kern.ld.S @@ -478,24 +478,14 @@ __vcore_nex_rw_end = __vcore_nex_rw_start + __vcore_nex_rw_size; #endif #ifdef CFG_WITH_PAGER -/* - * Core init mapping shall cover up to end of the physical RAM. - * This is required since the hash table is appended to the - * binary data after the firmware build sequence. - */ -#define __FLATMAP_PAGER_TRAILING_SPACE \ - (TEE_RAM_START + TEE_RAM_PH_SIZE - \ - (__flatmap_init_ro_start + __flatmap_init_ro_size)) - /* Paged/init read-only memories */ __vcore_init_rx_start = __flatmap_init_rx_start; #ifdef CFG_CORE_RODATA_NOEXEC __vcore_init_rx_size = __flatmap_init_rx_size; __vcore_init_ro_start = __flatmap_init_ro_start; -__vcore_init_ro_size = __flatmap_init_ro_size + __FLATMAP_PAGER_TRAILING_SPACE; +__vcore_init_ro_size = __flatmap_init_ro_size; #else -__vcore_init_rx_size = __flatmap_init_rx_size + __flatmap_init_ro_size + - __FLATMAP_PAGER_TRAILING_SPACE; +__vcore_init_rx_size = __flatmap_init_rx_size + __flatmap_init_ro_size; __vcore_init_ro_start = __vcore_init_rx_end; __vcore_init_ro_size = 0; #endif /* CFG_CORE_RODATA_NOEXEC */ diff --git a/mk/config.mk b/mk/config.mk index adcbdc33b74..03f92950825 100644 --- a/mk/config.mk +++ b/mk/config.mk @@ -1265,4 +1265,8 @@ CFG_CORE_UNSAFE_MODEXP ?= n CFG_TA_MEBDTLS_UNSAFE_MODEXP ?= n # CFG_BOOT_MEM, when enabled, adds stack like memory allocation during boot. +ifeq ($(ARCH),arm) +$(call force,CFG_BOOT_MEM,y) +else CFG_BOOT_MEM ?= n +endif From a9fbe20aef03768127a8f9a4f17aa4780c7b3e4c Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 11:48:33 +0200 Subject: [PATCH 17/20] core: mm: allocate temporary memory map array With CFG_BOOT_MEM enabled, allocate a temporary memory map array using boot_mem_alloc_tmp() instead of using the global static_mmap_regions[]. core_mmu_save_mem_map() is added and called from boot_init_primary_late() before the temporary memory is reused. Signed-off-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/arch/arm/kernel/boot.c | 1 + core/include/mm/core_mmu.h | 5 +++ core/mm/core_mmu.c | 71 +++++++++++++++++++++++++++++++++++-- 3 files changed, 74 insertions(+), 3 deletions(-) diff --git a/core/arch/arm/kernel/boot.c b/core/arch/arm/kernel/boot.c index 8bd41cefa02..f6b69906c00 100644 --- a/core/arch/arm/kernel/boot.c +++ b/core/arch/arm/kernel/boot.c @@ -938,6 +938,7 @@ static void init_primary(unsigned long pageable_part, unsigned long nsec_entry) #endif IMSG_RAW("\n"); + core_mmu_save_mem_map(); if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { IMSG("Initializing virtualization support"); core_mmu_init_virtualization(); diff --git a/core/include/mm/core_mmu.h b/core/include/mm/core_mmu.h index 35f8d9f3a3b..7ec4ecaf0be 100644 --- a/core/include/mm/core_mmu.h +++ b/core/include/mm/core_mmu.h @@ -295,6 +295,11 @@ extern const unsigned long core_mmu_tee_load_pa; void core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg); void core_init_mmu_regs(struct core_mmu_config *cfg); +/* + * Copy static memory map from temporary boot_mem to heap when CFG_BOOT_MEM + * is enabled. + */ +void core_mmu_save_mem_map(void); /* Arch specific function to help optimizing 1 MMU xlat table */ bool core_mmu_prefer_tee_ram_at_top(paddr_t paddr); diff --git a/core/mm/core_mmu.c b/core/mm/core_mmu.c index 9c9881637d2..dbdb3d436a1 100644 --- a/core/mm/core_mmu.c +++ b/core/mm/core_mmu.c @@ -62,6 +62,9 @@ unsigned long default_nsec_shm_size __nex_bss; unsigned long default_nsec_shm_paddr __nex_bss; #endif +#ifdef CFG_BOOT_MEM +static struct memory_map static_memory_map __nex_bss; +#else static struct tee_mmap_region static_mmap_regions[CFG_MMAP_REGIONS #if defined(CFG_CORE_ASLR) || defined(CFG_CORE_PHYS_RELOCATABLE) + 1 @@ -71,6 +74,8 @@ static struct memory_map static_memory_map __nex_data = { .map = static_mmap_regions, .alloc_count = ARRAY_SIZE(static_mmap_regions), }; +#endif +void (*memory_map_realloc_func)(struct memory_map *mem_map) __nex_bss; /* Offset of the first TEE RAM mapping from start of secure RAM */ static size_t tee_ram_initial_offs __nex_bss; @@ -126,11 +131,43 @@ static void mmu_unlock(uint32_t exceptions) cpu_spin_unlock_xrestore(&mmu_spinlock, exceptions); } +static void heap_realloc_memory_map(struct memory_map *mem_map) +{ + struct tee_mmap_region *m = NULL; + struct tee_mmap_region *old = mem_map->map; + size_t old_sz = sizeof(*old) * mem_map->alloc_count; + size_t sz = old_sz + sizeof(*m); + + assert(nex_malloc_buffer_is_within_alloced(old, old_sz)); + m = nex_realloc(old, sz); + if (!m) + panic(); + mem_map->map = m; + mem_map->alloc_count++; +} + +static void boot_mem_realloc_memory_map(struct memory_map *mem_map) +{ + struct tee_mmap_region *m = NULL; + struct tee_mmap_region *old = mem_map->map; + size_t old_sz = sizeof(*old) * mem_map->alloc_count; + size_t sz = old_sz * 2; + + m = boot_mem_alloc_tmp(sz, alignof(*m)); + memcpy(m, old, old_sz); + mem_map->map = m; + mem_map->alloc_count *= 2; +} + static void grow_mem_map(struct memory_map *mem_map) { if (mem_map->count == mem_map->alloc_count) { - EMSG("Out of entries (%zu) in mem_map", mem_map->alloc_count); - panic(); + if (!memory_map_realloc_func) { + EMSG("Out of entries (%zu) in mem_map", + mem_map->alloc_count); + panic(); + } + memory_map_realloc_func(mem_map); } mem_map->count++; } @@ -1576,7 +1613,16 @@ void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg) check_sec_nsec_mem_config(); - mem_map = static_memory_map; + if (IS_ENABLED(CFG_BOOT_MEM)) { + mem_map.alloc_count = CFG_MMAP_REGIONS; + mem_map.map = boot_mem_alloc_tmp(mem_map.alloc_count * + sizeof(*mem_map.map), + alignof(*mem_map.map)); + memory_map_realloc_func = boot_mem_realloc_memory_map; + } else { + mem_map = static_memory_map; + } + static_memory_map = (struct memory_map){ .map = &tmp_mmap_region, .alloc_count = 1, @@ -1603,6 +1649,25 @@ void __weak core_init_mmu_map(unsigned long seed, struct core_mmu_config *cfg) core_init_mmu_regs(cfg); cfg->map_offset = offs; static_memory_map = mem_map; + boot_mem_add_reloc(&static_memory_map.map); +} + +void core_mmu_save_mem_map(void) +{ + if (IS_ENABLED(CFG_BOOT_MEM)) { + size_t alloc_count = static_memory_map.count + 5; + size_t elem_sz = sizeof(*static_memory_map.map); + void *p = NULL; + + p = nex_calloc(alloc_count, elem_sz); + if (!p) + panic(); + memcpy(p, static_memory_map.map, + static_memory_map.count * elem_sz); + static_memory_map.map = p; + static_memory_map.alloc_count = alloc_count; + memory_map_realloc_func = heap_realloc_memory_map; + } } bool core_mmu_mattr_is_ok(uint32_t mattr) From 598686488f397b3254ac71952b7249d70478812e Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 11:48:34 +0200 Subject: [PATCH 18/20] core: initialize guest physical memory early Initialize guest physical memory in virt_guest_created() before the first entry into the guest from normal world. This replaces the call to core_mmu_init_phys_mem() in init_tee_runtime(). Remove unused code in core_mmu_init_phys_mem() and the now unused functions core_mmu_get_ta_range() and virt_get_ta_ram(). Signed-off-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/arch/arm/kernel/boot.c | 3 -- core/arch/arm/kernel/virtualization.c | 12 ++----- core/include/kernel/virtualization.h | 9 ----- core/include/mm/core_mmu.h | 7 ---- core/mm/core_mmu.c | 48 +-------------------------- 5 files changed, 3 insertions(+), 76 deletions(-) diff --git a/core/arch/arm/kernel/boot.c b/core/arch/arm/kernel/boot.c index f6b69906c00..ddd33ff7928 100644 --- a/core/arch/arm/kernel/boot.c +++ b/core/arch/arm/kernel/boot.c @@ -867,9 +867,6 @@ static void update_external_dt(void) void init_tee_runtime(void) { - if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) - core_mmu_init_phys_mem(); - /* * With virtualization we call this function when creating the * OP-TEE partition instead. diff --git a/core/arch/arm/kernel/virtualization.c b/core/arch/arm/kernel/virtualization.c index 456e6c040dc..3d0cab0b71a 100644 --- a/core/arch/arm/kernel/virtualization.c +++ b/core/arch/arm/kernel/virtualization.c @@ -322,6 +322,8 @@ TEE_Result virt_guest_created(uint16_t guest_id) set_current_prtn(prtn); malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); + phys_mem_init(0, 0, tee_mm_get_smem(prtn->ta_ram), + tee_mm_get_bytes(prtn->ta_ram)); /* Initialize threads */ thread_init_threads(); /* Do the preinitcalls */ @@ -559,16 +561,6 @@ struct memory_map *virt_get_memory_map(void) return &prtn->mem_map; } -void virt_get_ta_ram(vaddr_t *start, vaddr_t *end) -{ - struct guest_partition *prtn = get_current_prtn(); - - *start = (vaddr_t)phys_to_virt(tee_mm_get_smem(prtn->ta_ram), - MEM_AREA_SEC_RAM_OVERALL, - tee_mm_get_bytes(prtn->ta_ram)); - *end = *start + tee_mm_get_bytes(prtn->ta_ram); -} - #ifdef CFG_CORE_SEL1_SPMC static int find_cookie(struct guest_partition *prtn, uint64_t cookie) { diff --git a/core/include/kernel/virtualization.h b/core/include/kernel/virtualization.h index 62b2c452398..865899bb21d 100644 --- a/core/include/kernel/virtualization.h +++ b/core/include/kernel/virtualization.h @@ -90,13 +90,6 @@ void virt_init_memory(struct memory_map *mem_map, paddr_t secmem0_base, */ struct memory_map *virt_get_memory_map(void); -/** - * virt_get_ta_ram() - get TA RAM mapping for current VM - * @start: beginning of TA RAM returned here - * @end: end of TA RAM returned here - */ -void virt_get_ta_ram(vaddr_t *start, vaddr_t *end); - /** * virt_get_current_guest_id() - return current guest ID * @@ -201,8 +194,6 @@ static inline TEE_Result virt_set_guest(uint16_t guest_id __unused) static inline void virt_unset_guest(void) { } static inline void virt_on_stdcall(void) { } static inline struct memory_map *virt_get_memory_map(void) { return NULL; } -static inline void -virt_get_ta_ram(vaddr_t *start __unused, vaddr_t *end __unused) { } static inline void virt_init_memory(struct memory_map *mem_map __unused, paddr_t secmem0_base __unused, paddr_size_t secmem0_size __unused, diff --git a/core/include/mm/core_mmu.h b/core/include/mm/core_mmu.h index 7ec4ecaf0be..101db33cc45 100644 --- a/core/include/mm/core_mmu.h +++ b/core/include/mm/core_mmu.h @@ -697,13 +697,6 @@ void core_mmu_set_secure_memory(paddr_t base, size_t size); */ void core_mmu_get_secure_memory(paddr_t *base, paddr_size_t *size); -/* - * core_mmu_get_ta_range() - get physical memory range reserved for TAs - * @base: [out] range base address ref or NULL - * @size: [out] range size ref or NULL - */ -void core_mmu_get_ta_range(paddr_t *base, size_t *size); - #endif /*__ASSEMBLER__*/ #endif /* __MM_CORE_MMU_H */ diff --git a/core/mm/core_mmu.c b/core/mm/core_mmu.c index dbdb3d436a1..0be6b5d04b8 100644 --- a/core/mm/core_mmu.c +++ b/core/mm/core_mmu.c @@ -196,40 +196,6 @@ void core_mmu_set_secure_memory(paddr_t base, size_t size) secure_only[0].size = size; } -void core_mmu_get_ta_range(paddr_t *base, size_t *size) -{ - paddr_t b = 0; - size_t s = 0; - - static_assert(!(TEE_RAM_VA_SIZE % SMALL_PAGE_SIZE)); -#ifdef TA_RAM_START - b = TA_RAM_START; - s = TA_RAM_SIZE; -#else - static_assert(ARRAY_SIZE(secure_only) <= 2); - if (ARRAY_SIZE(secure_only) == 1) { - vaddr_t load_offs = 0; - - assert(core_mmu_tee_load_pa >= secure_only[0].paddr); - load_offs = core_mmu_tee_load_pa - secure_only[0].paddr; - - assert(secure_only[0].size > - load_offs + TEE_RAM_VA_SIZE + TEE_SDP_TEST_MEM_SIZE); - b = secure_only[0].paddr + load_offs + TEE_RAM_VA_SIZE; - s = secure_only[0].size - load_offs - TEE_RAM_VA_SIZE - - TEE_SDP_TEST_MEM_SIZE; - } else { - assert(secure_only[1].size > TEE_SDP_TEST_MEM_SIZE); - b = secure_only[1].paddr; - s = secure_only[1].size - TEE_SDP_TEST_MEM_SIZE; - } -#endif - if (base) - *base = b; - if (size) - *size = s; -} - static struct memory_map *get_memory_map(void) { if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { @@ -2721,19 +2687,7 @@ void core_mmu_init_phys_mem(void) paddr_t ps = 0; size_t size = 0; - /* - * Get virtual addr/size of RAM where TA are loaded/executedNSec - * shared mem allocated from teecore. - */ - if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { - vaddr_t s = 0; - vaddr_t e = 0; - - virt_get_ta_ram(&s, &e); - ps = virt_to_phys((void *)s); - size = e - s; - phys_mem_init(0, 0, ps, size); - } else { + if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) { #ifdef CFG_WITH_PAGER /* * The pager uses all core memory so there's no need to add From 6945236d651490ea408873cd99c84fbb6db33255 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 13 Sep 2024 11:48:38 +0200 Subject: [PATCH 19/20] core: merge core_mmu_init_phys_mem() and core_mmu_init_virtualization() Moves the implementation of core_mmu_init_virtualization() into core_mmu_init_phys_mem(). This simplifies init_primary() in core/arch/arm/kernel/boot.c. Signed-off-by: Jens Wiklander Reviewed-by: Etienne Carriere --- core/arch/arm/kernel/boot.c | 7 +------ core/include/mm/core_mmu.h | 2 -- core/mm/core_mmu.c | 31 ++++++++++++++----------------- 3 files changed, 15 insertions(+), 25 deletions(-) diff --git a/core/arch/arm/kernel/boot.c b/core/arch/arm/kernel/boot.c index ddd33ff7928..7d5758094c9 100644 --- a/core/arch/arm/kernel/boot.c +++ b/core/arch/arm/kernel/boot.c @@ -936,12 +936,7 @@ static void init_primary(unsigned long pageable_part, unsigned long nsec_entry) IMSG_RAW("\n"); core_mmu_save_mem_map(); - if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { - IMSG("Initializing virtualization support"); - core_mmu_init_virtualization(); - } else { - core_mmu_init_phys_mem(); - } + core_mmu_init_phys_mem(); va = boot_mem_release_unused(); if (!IS_ENABLED(CFG_WITH_PAGER)) { /* diff --git a/core/include/mm/core_mmu.h b/core/include/mm/core_mmu.h index 101db33cc45..42fa12b64d5 100644 --- a/core/include/mm/core_mmu.h +++ b/core/include/mm/core_mmu.h @@ -650,8 +650,6 @@ void core_mmu_set_default_prtn(void); void core_mmu_set_default_prtn_tbl(void); #endif -void core_mmu_init_virtualization(void); - /* Initialize physical memory pool */ void core_mmu_init_phys_mem(void); diff --git a/core/mm/core_mmu.c b/core/mm/core_mmu.c index 0be6b5d04b8..2b528fd6817 100644 --- a/core/mm/core_mmu.c +++ b/core/mm/core_mmu.c @@ -2590,20 +2590,6 @@ bool is_nexus(const void *va) } #endif -void core_mmu_init_virtualization(void) -{ - paddr_t b1 = 0; - paddr_size_t s1 = 0; - - static_assert(ARRAY_SIZE(secure_only) <= 2); - if (ARRAY_SIZE(secure_only) == 2) { - b1 = secure_only[1].paddr; - s1 = secure_only[1].size; - } - virt_init_memory(&static_memory_map, secure_only[0].paddr, - secure_only[0].size, b1, s1); -} - vaddr_t io_pa_or_va(struct io_pa_va *p, size_t len) { assert(p->pa); @@ -2684,10 +2670,19 @@ static void __maybe_unused carve_out_core_mem(paddr_t pa, paddr_t end_pa) void core_mmu_init_phys_mem(void) { - paddr_t ps = 0; - size_t size = 0; + if (IS_ENABLED(CFG_NS_VIRTUALIZATION)) { + paddr_t b1 = 0; + paddr_size_t s1 = 0; + + static_assert(ARRAY_SIZE(secure_only) <= 2); - if (!IS_ENABLED(CFG_NS_VIRTUALIZATION)) { + if (ARRAY_SIZE(secure_only) == 2) { + b1 = secure_only[1].paddr; + s1 = secure_only[1].size; + } + virt_init_memory(&static_memory_map, secure_only[0].paddr, + secure_only[0].size, b1, s1); + } else { #ifdef CFG_WITH_PAGER /* * The pager uses all core memory so there's no need to add @@ -2698,6 +2693,8 @@ void core_mmu_init_phys_mem(void) #else /*!CFG_WITH_PAGER*/ size_t align = BIT(CORE_MMU_USER_CODE_SHIFT); paddr_t end_pa = 0; + size_t size = 0; + paddr_t ps = 0; paddr_t pa = 0; static_assert(ARRAY_SIZE(secure_only) <= 2); From e10641b85eadd17cbad49878c54a321badc7d0bf Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Fri, 20 Sep 2024 15:56:27 +0200 Subject: [PATCH 20/20] core: arm: add CFG_NS_VIRTUALIZATION boot log Add a log entry when CFG_NS_VIRTUALIZATION is enabled, for example: D/TC:0 0 boot_init_primary_late:1028 NS-Virtualization enabled, supporting 2 guests Signed-off-by: Jens Wiklander Reviewed-by: Jerome Forissier Reviewed-by: Etienne Carriere --- core/arch/arm/kernel/boot.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/core/arch/arm/kernel/boot.c b/core/arch/arm/kernel/boot.c index 7d5758094c9..52899add58c 100644 --- a/core/arch/arm/kernel/boot.c +++ b/core/arch/arm/kernel/boot.c @@ -1022,6 +1022,10 @@ void __weak boot_init_primary_late(unsigned long fdt __unused, #ifdef CFG_CORE_ASLR DMSG("Executing at offset %#lx with virtual load address %#"PRIxVA, (unsigned long)boot_mmu_config.map_offset, VCORE_START_VA); +#endif +#ifdef CFG_NS_VIRTUALIZATION + DMSG("NS-virtualization enabled, supporting %u guests", + CFG_VIRT_GUEST_COUNT); #endif if (IS_ENABLED(CFG_MEMTAG)) DMSG("Memory tagging %s",