diff --git a/arch/arm/arm/arch.c b/arch/arm/arm/arch.c index 3accb62fff..9ebac39c19 100644 --- a/arch/arm/arm/arch.c +++ b/arch/arm/arm/arch.c @@ -341,8 +341,22 @@ void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3 LTRACEF("loader address %p, phys 0x%lx, surrounding large page 0x%lx\n", &arm_chain_load, loader_pa, loader_pa_section); + arch_aspace_t *aspace; + bool need_context_switch; + // if loader_pa is within the kernel aspace, we can simply use arch_mmu_map to identity map it + // if its outside, we need to create a new aspace and context switch to it + if (arch_mmu_is_valid_vaddr(&vmm_get_kernel_aspace()->arch_aspace, loader_pa)) { + aspace = &vmm_get_kernel_aspace()->arch_aspace; + need_context_switch = false; + } else { + aspace = malloc(sizeof(*aspace)); + arch_mmu_init_aspace(aspace, loader_pa_section, SECTION_SIZE, 0); + need_context_switch = true; + } + /* using large pages, map around the target location */ - arch_mmu_map(&vmm_get_kernel_aspace()->arch_aspace, loader_pa_section, loader_pa_section, (2 * SECTION_SIZE / PAGE_SIZE), 0); + arch_mmu_map(aspace, loader_pa_section, loader_pa_section, (2 * SECTION_SIZE / PAGE_SIZE), 0); + if (need_context_switch) arch_mmu_context_switch(aspace); #else /* for non vm case, just branch directly into it */ entry_pa = (paddr_t)entry; @@ -358,6 +372,9 @@ void arch_chain_load(void *entry, ulong arg0, ulong arg1, ulong arg2, ulong arg3 /* put the booting cpu back into close to a default state */ arch_quiesce(); + // linux wont re-enable the FPU during boot, so it must be enabled when chainloading + arm_fpu_set_enable(true); + LTRACEF("branching to physical address of loader\n"); /* branch to the physical address version of the chain loader routine */ diff --git a/arch/arm/arm/include/arch/aspace.h b/arch/arm/arm/include/arch/aspace.h index c0e3f93df1..f0ff86d78e 100644 --- a/arch/arm/arm/include/arch/aspace.h +++ b/arch/arm/arm/include/arch/aspace.h @@ -25,4 +25,8 @@ struct arch_aspace { struct list_node pt_page_list; }; +static inline bool arch_mmu_is_valid_vaddr(struct arch_aspace *aspace, vaddr_t vaddr) { + return (vaddr >= aspace->base && vaddr <= aspace->base + aspace->size - 1); +} + __END_CDECLS diff --git a/arch/arm/arm/mmu.c b/arch/arm/arm/mmu.c index b633911144..eed5a6c4b6 100644 --- a/arch/arm/arm/mmu.c +++ b/arch/arm/arm/mmu.c @@ -132,10 +132,6 @@ static uint32_t mmu_flags_to_l2_arch_flags_small_page(uint flags) { return arch_flags; } -static inline bool is_valid_vaddr(arch_aspace_t *aspace, vaddr_t vaddr) { - return (vaddr >= aspace->base && vaddr <= aspace->base + aspace->size - 1); -} - static void arm_mmu_map_section(arch_aspace_t *aspace, addr_t paddr, addr_t vaddr, uint flags) { int index; @@ -242,8 +238,8 @@ status_t arch_mmu_query(arch_aspace_t *aspace, vaddr_t vaddr, paddr_t *paddr, ui DEBUG_ASSERT(aspace); DEBUG_ASSERT(aspace->tt_virt); - DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr)); - if (!is_valid_vaddr(aspace, vaddr)) + DEBUG_ASSERT(arch_mmu_is_valid_vaddr(aspace, vaddr)); + if (!arch_mmu_is_valid_vaddr(aspace, vaddr)) return ERR_OUT_OF_RANGE; /* Get the index into the translation table */ @@ -487,8 +483,8 @@ int arch_mmu_map(arch_aspace_t *aspace, addr_t vaddr, paddr_t paddr, uint count, DEBUG_ASSERT(aspace); DEBUG_ASSERT(aspace->tt_virt); - DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr)); - if (!is_valid_vaddr(aspace, vaddr)) + DEBUG_ASSERT(arch_mmu_is_valid_vaddr(aspace, vaddr)); + if (!arch_mmu_is_valid_vaddr(aspace, vaddr)) return ERR_OUT_OF_RANGE; #if !WITH_ARCH_MMU_PICK_SPOT @@ -583,9 +579,9 @@ int arch_mmu_unmap(arch_aspace_t *aspace, vaddr_t vaddr, uint count) { DEBUG_ASSERT(aspace); DEBUG_ASSERT(aspace->tt_virt); - DEBUG_ASSERT(is_valid_vaddr(aspace, vaddr)); + DEBUG_ASSERT(arch_mmu_is_valid_vaddr(aspace, vaddr)); - if (!is_valid_vaddr(aspace, vaddr)) + if (!arch_mmu_is_valid_vaddr(aspace, vaddr)) return ERR_OUT_OF_RANGE; DEBUG_ASSERT(IS_PAGE_ALIGNED(vaddr));