diff --git a/api/inc/unvic_exports.h b/api/inc/unvic_exports.h index 50f917fb..69474793 100644 --- a/api/inc/unvic_exports.h +++ b/api/inc/unvic_exports.h @@ -21,7 +21,7 @@ /* this value refers to the minimum allowable priority in the physical NVIC * module, but not in the virtualised one (vIRQ) */ -#define __UVISOR_NVIC_MIN_PRIORITY ((uint32_t) 1) +#define __UVISOR_NVIC_MIN_PRIORITY ((uint32_t) 2) /* this is the maximum priority allowed for the vIRQ module */ /* users of uVisor APIs can use this to determine the maximum level of diff --git a/core/debug/inc/debug.h b/core/debug/inc/debug.h index 0dcc0a35..d0b95292 100644 --- a/core/debug/inc/debug.h +++ b/core/debug/inc/debug.h @@ -42,6 +42,13 @@ uint32_t debug_get_version(void); void debug_halt_error(THaltError reason); void debug_reboot(TResetReason reason); +/* Enter the debug box from a privileged mode exception handler. This function + * requires the caller to have already switched the PSP to the debug box stack. + * We currently only call this on MPU faults and Hard Faults in + * vmpu_sys_mux_handler. If called from outside a privileged mode exception + * handler, this function does nothing. */ +uint32_t debug_box_enter_from_priv(uint32_t lr); + #ifdef NDEBUG #define DEBUG_INIT(...) {} diff --git a/core/debug/src/debug_box.c b/core/debug/src/debug_box.c index 666ea9fd..0a3c3f2c 100644 --- a/core/debug/src/debug_box.c +++ b/core/debug/src/debug_box.c @@ -47,14 +47,14 @@ static void debug_deprivilege_and_return(void * debug_handler, void * return_han { /* Source box: Get the current stack pointer. */ /* Note: The source stack pointer is only used to assess the stack - * alignment. */ + * alignment and to read the xpsr. */ uint32_t src_sp = context_validate_exc_sf(__get_PSP()); /* Destination box: The debug box. */ uint8_t dst_id = g_debug_box.box_id; /* Copy the xPSR from the source exception stack frame. */ - uint32_t xpsr = ((uint32_t *) src_sp)[7]; + uint32_t xpsr = vmpu_unpriv_uint32_read((uint32_t) &((uint32_t *) src_sp)[7]); /* Destination box: Forge the destination stack frame. */ /* Note: We manually have to set the 4 parameters on the destination stack, @@ -142,3 +142,31 @@ void debug_register_driver(const TUvisorDebugDriver * const driver) g_debug_box.box_id = g_active_box; g_debug_box.initialized = 1; } + +/* FIXME This is a bit platform specific. Consider moving to a platform + * specific location. */ +uint32_t debug_box_enter_from_priv(uint32_t lr) { + uint32_t shcsr; + uint32_t from_priv = !(lr & 0x4); + + /* If we are not handling an exception caused from privileged mode, return + * the original lr. */ + if (!from_priv) { + return lr; + } + + shcsr = SCB->SHCSR; + + /* Make sure SVC is active. */ + assert(shcsr & SCB_SHCSR_SVCALLACT_Msk); + + /* We had a fault (from SVC), so clear the SVC fault before returning. SVC + * and all other exceptions must be no longer active after the EXC RETURN, + * or else we cause usage faults when doing SVCs later (for example, to + * reboot via the debug_reboot SVC). */ + SCB->SHCSR = shcsr & ~SCB_SHCSR_SVCALLACT_Msk; + + /* Return to Thread mode and use the Process Stack for return. The PSP will + * have been changed already. */ + return 0xFFFFFFFD; +} diff --git a/core/system/src/system.c b/core/system/src/system.c index c8ad6d36..b634a3e2 100644 --- a/core/system/src/system.c +++ b/core/system/src/system.c @@ -91,9 +91,8 @@ void UVISOR_NAKED UVISOR_NORETURN isr_default_sys_handler(void) asm volatile( "mov r0, lr\n" "mrs r1, MSP\n" - "push {lr}\n" - "blx vmpu_sys_mux_handler\n" - "pop {pc}\n" + "bl vmpu_sys_mux_handler\n" + "bx r0\n" ); } diff --git a/core/system/src/unvic.c b/core/system/src/unvic.c index a39897a4..321525f0 100644 --- a/core/system/src/unvic.c +++ b/core/system/src/unvic.c @@ -579,15 +579,29 @@ void unvic_init(void) /* Verify that the priority bits read at runtime are realistic. */ assert(g_nvic_prio_bits > 0 && g_nvic_prio_bits <= 8); - /* check that minimum priority is still in the range of possible priority - * levels */ + /* Check that minimum priority is still in the range of possible priority + * levels. */ assert(__UVISOR_NVIC_MIN_PRIORITY < UVISOR_VIRQ_MAX_PRIORITY); - /* by setting the priority group to 0 we make sure that all priority levels + /* Set the priority of each exception. SVC is lower priority than + * MemManage, BusFault, and UsageFault, so that we can recover from + * stacking MemManage faults more simply. */ + static const uint32_t priority_0 = __UVISOR_NVIC_MIN_PRIORITY - 2; + static const uint32_t priority_1 = __UVISOR_NVIC_MIN_PRIORITY - 1; + assert(priority_0 < __UVISOR_NVIC_MIN_PRIORITY); + assert(priority_1 < __UVISOR_NVIC_MIN_PRIORITY); + NVIC_SetPriority(MemoryManagement_IRQn, priority_0); + NVIC_SetPriority(BusFault_IRQn, priority_0); + NVIC_SetPriority(UsageFault_IRQn, priority_0); + NVIC_SetPriority(SVCall_IRQn, priority_1); + NVIC_SetPriority(DebugMonitor_IRQn, __UVISOR_NVIC_MIN_PRIORITY); + NVIC_SetPriority(PendSV_IRQn, UVISOR_VIRQ_MAX_PRIORITY); + NVIC_SetPriority(SysTick_IRQn, UVISOR_VIRQ_MAX_PRIORITY); + + /* By setting the priority group to 0 we make sure that all priority levels * are available for pre-emption and that interrupts with the same priority * level occurring at the same time are served in the default way, that is, - * by IRQ number - * for example, IRQ 0 has precedence over IRQ 1 if both have the same - * priority level */ + * by IRQ number. For example, IRQ 0 has precedence over IRQ 1 if both have + * the same priority level. */ NVIC_SetPriorityGrouping(0); } diff --git a/core/vmpu/inc/vmpu.h b/core/vmpu/inc/vmpu.h index f288a2f6..630ca3d0 100644 --- a/core/vmpu/inc/vmpu.h +++ b/core/vmpu/inc/vmpu.h @@ -155,7 +155,9 @@ extern void vmpu_arch_init_hw(void); extern int vmpu_init_pre(void); extern void vmpu_init_post(void); -extern void vmpu_sys_mux_handler(uint32_t lr, uint32_t msp); +/* Handle system exceptions and interrupts. Return the EXC_RETURN desired for + * returning from exception mode. */ +extern uint32_t vmpu_sys_mux_handler(uint32_t lr, uint32_t msp); /* contains the total number of boxes * boxes are enumerated from 0 to (g_vmpu_box_count - 1) and the following diff --git a/core/vmpu/src/armv7m/vmpu_armv7m.c b/core/vmpu/src/armv7m/vmpu_armv7m.c index 085e1c78..822de8a8 100644 --- a/core/vmpu/src/armv7m/vmpu_armv7m.c +++ b/core/vmpu/src/armv7m/vmpu_armv7m.c @@ -39,17 +39,17 @@ static const MpuRegion* vmpu_fault_find_region(uint32_t fault_addr) { const MpuRegion *region; - /* check current box if not base */ + /* Check current box if not base. */ if ((g_active_box) && ((region = vmpu_region_find_for_address(g_active_box, fault_addr)) != NULL)) { return region; } - /* check base-box */ + /* Check base-box. */ if ((region = vmpu_region_find_for_address(0, fault_addr)) != NULL) { return region; } - /* If no region was found. */ + /* If no region was found */ return NULL; } @@ -57,27 +57,26 @@ uint32_t vmpu_fault_find_acl(uint32_t fault_addr, uint32_t size) { const MpuRegion *region; - /* return ACL if available */ + /* Return ACL if available. */ /* FIXME: Use SECURE_ACCESS for SCR! */ if (fault_addr == (uint32_t) &SCB->SCR) { return UVISOR_TACL_UWRITE | UVISOR_TACL_UREAD; } - /* translate fault_addr into its physical address if it is in the bit-banding region */ + /* Translate fault_addr into its physical address if it is in the bit-banding region. */ if (fault_addr >= VMPU_PERIPH_BITBAND_START && fault_addr <= VMPU_PERIPH_BITBAND_END) { fault_addr = VMPU_PERIPH_BITBAND_ALIAS_TO_ADDR(fault_addr); - } - else if (fault_addr >= VMPU_SRAM_BITBAND_START && fault_addr <= VMPU_SRAM_BITBAND_END) { + } else if (fault_addr >= VMPU_SRAM_BITBAND_START && fault_addr <= VMPU_SRAM_BITBAND_END) { fault_addr = VMPU_SRAM_BITBAND_ALIAS_TO_ADDR(fault_addr); } - /* search base box and active box ACLs */ + /* Search base box and active box ACLs. */ if (!(region = vmpu_fault_find_region(fault_addr))) { return 0; } - /* ensure that data fits in selected region */ - if((fault_addr + size) > region->end) { + /* Ensure that data fits in selected region. */ + if ((fault_addr + size) > region->end) { return 0; } @@ -91,20 +90,21 @@ static int vmpu_fault_recovery_mpu(uint32_t pc, uint32_t sp, uint32_t fault_addr const MpuRegion *region; uint8_t mask, index, page; - /* no recovery possible if the MPU syndrome register is not valid */ - if (fault_status != 0x82) { + /* No recovery is possible if the MPU syndrome register is not valid or + * this is not a stacking fault (where the MPU syndrome register would not + * be valid, but we can still recover). */ + if (!((fault_status == (SCB_CFSR_MMARVALID_Msk | SCB_CFSR_DACCVIOL_Msk)) || + (fault_status & (SCB_CFSR_MSTKERR_Msk | SCB_CFSR_MUNSTKERR_Msk)))) { return 0; } - if (page_allocator_get_active_mask_for_address(fault_addr, &mask, &index, &page) == UVISOR_ERROR_PAGE_OK) - { + if (page_allocator_get_active_mask_for_address(fault_addr, &mask, &index, &page) == UVISOR_ERROR_PAGE_OK) { /* Remember this fault. */ page_allocator_register_fault(page); vmpu_mem_push_page_acl_iterator(mask, UVISOR_PAGE_MAP_COUNT * 4 - 1 - index); - } - else { - /* find region for faulting address */ + } else { + /* Find region for faulting address. */ if ((region = vmpu_fault_find_region(fault_addr)) == NULL) { return 0; } @@ -115,84 +115,90 @@ static int vmpu_fault_recovery_mpu(uint32_t pc, uint32_t sp, uint32_t fault_addr return 1; } -void vmpu_sys_mux_handler(uint32_t lr, uint32_t msp) +uint32_t vmpu_sys_mux_handler(uint32_t lr, uint32_t msp) { uint32_t psp, pc; uint32_t fault_addr, fault_status; - /* the IPSR enumerates interrupt numbers from 0 up, while *_IRQn numbers are - * both positive (hardware IRQn) and negative (system IRQn); here we convert - * the IPSR value to this latter encoding */ + /* The IPSR enumerates interrupt numbers from 0 up, while *_IRQn numbers + * are both positive (hardware IRQn) and negative (system IRQn); here we + * convert the IPSR value to this latter encoding. */ int ipsr = ((int) (__get_IPSR() & 0x1FF)) - NVIC_OFFSET; /* PSP at fault */ psp = __get_PSP(); - switch(ipsr) - { + switch (ipsr) { case MemoryManagement_IRQn: - /* currently we only support recovery from unprivileged mode */ - if(lr & 0x4) - { + fault_status = VMPU_SCB_MMFSR; + + /* If we are having an unstacking fault, we can't read the pc + * at fault. */ + if (fault_status & (SCB_CFSR_MSTKERR_Msk | SCB_CFSR_MUNSTKERR_Msk)) { + /* Fake pc */ + pc = 0x0; + + /* The stack pointer is at fault. MMFAR doesn't contain a + * valid fault address. */ + fault_addr = lr & 0x4 ? psp : msp; + } else { /* pc at fault */ - pc = vmpu_unpriv_uint32_read(psp + (6 * 4)); + if (lr & 0x4) { + pc = vmpu_unpriv_uint32_read(psp + (6 * 4)); + } else { + /* We can be privileged here if we tried doing an ldrt or + * strt to a region not currently loaded in the MPU. In + * such cases, we are reading from the msp and shouldn't go + * through vmpu_unpriv_uint32_read. A box wouldn't have + * access to our stack. */ + pc = *(uint32_t *) (msp + (6 * 4)); + } - /* backup fault address and status */ + /* Backup fault address and status */ fault_addr = SCB->MMFAR; - fault_status = VMPU_SCB_MMFSR; - - /* check if the fault is an MPU fault */ - if (vmpu_fault_recovery_mpu(pc, psp, fault_addr, fault_status)) { - VMPU_SCB_MMFSR = fault_status; - return; - } + } - /* if recovery was not successful, throw an error and halt */ - DEBUG_FAULT(FAULT_MEMMANAGE, lr, psp); + /* Check if the fault is an MPU fault. */ + if (vmpu_fault_recovery_mpu(pc, psp, fault_addr, fault_status)) { VMPU_SCB_MMFSR = fault_status; - HALT_ERROR(PERMISSION_DENIED, "Access to restricted resource denied"); - } - else - { - DEBUG_FAULT(FAULT_MEMMANAGE, lr, msp); - HALT_ERROR(FAULT_MEMMANAGE, "Cannot recover from privileged MemManage fault"); + return lr; } + + /* If recovery was not successful, throw an error and halt. */ + DEBUG_FAULT(FAULT_MEMMANAGE, lr, lr & 0x4 ? psp : msp); + VMPU_SCB_MMFSR = fault_status; + HALT_ERROR(PERMISSION_DENIED, "Access to restricted resource denied"); + lr = debug_box_enter_from_priv(lr); break; case BusFault_IRQn: - /* bus faults can be used in a "managed" way, triggered to let uVisor - * handle some restricted registers - * note: this feature will not be needed anymore when the register-level - * will be implemented */ + /* Bus faults can be used in a "managed" way, triggered to let + * uVisor handle some restricted registers. + * Note: This feature will not be needed anymore when the + * register-level will be implemented. */ - /* note: all recovery functions update the stacked stack pointer so - * that exception return points to the correct instruction */ + /* Note: All recovery functions update the stacked stack pointer so + * that exception return points to the correct instruction. */ - /* currently we only support recovery from unprivileged mode */ - if(lr & 0x4) - { + /* Currently we only support recovery from unprivileged mode. */ + if (lr & 0x4) { /* pc at fault */ pc = vmpu_unpriv_uint32_read(psp + (6 * 4)); - /* backup fault address and status */ + /* Backup fault address and status */ fault_addr = SCB->BFAR; fault_status = VMPU_SCB_BFSR; - /* check if the fault is the special register corner case */ + /* Check if the fault is the special register corner case. */ if (!vmpu_fault_recovery_bus(pc, psp, fault_addr, fault_status)) { VMPU_SCB_BFSR = fault_status; - return; + return lr; } - - /* if recovery was not successful, throw an error and halt */ - DEBUG_FAULT(FAULT_BUS, lr, psp); - HALT_ERROR(PERMISSION_DENIED, "Access to restricted resource denied"); - } - else - { - DEBUG_FAULT(FAULT_BUS, lr, msp); - HALT_ERROR(FAULT_BUS, "Cannot recover from privileged bus fault"); } + + /* If recovery was not successful, throw an error and halt. */ + DEBUG_FAULT(FAULT_BUS, lr, lr & 0x4 ? psp : msp); + HALT_ERROR(PERMISSION_DENIED, "Access to restricted resource denied"); break; case UsageFault_IRQn: @@ -203,6 +209,7 @@ void vmpu_sys_mux_handler(uint32_t lr, uint32_t msp) case HardFault_IRQn: DEBUG_FAULT(FAULT_HARD, lr, lr & 0x4 ? psp : msp); HALT_ERROR(FAULT_HARD, "Cannot recover from a hard fault."); + lr = debug_box_enter_from_priv(lr); break; case DebugMonitor_IRQn: @@ -219,9 +226,11 @@ void vmpu_sys_mux_handler(uint32_t lr, uint32_t msp) break; default: - HALT_ERROR(NOT_ALLOWED, "Active IRQn is not a system interrupt"); + HALT_ERROR(NOT_ALLOWED, "Active IRQn(%i) is not a system interrupt", ipsr); break; } + + return lr; } static int vmpu_mem_push_page_acl_iterator(uint8_t mask, uint8_t index) @@ -240,7 +249,7 @@ static int vmpu_mem_push_page_acl_iterator(uint8_t mask, uint8_t index) return 0; } -/* FIXME: added very simple MPU region switching - optimize! */ +/* FIXME: We've added very simple MPU region switching. - Optimize! */ void vmpu_switch(uint8_t src_box, uint8_t dst_box) { uint32_t dst_count; @@ -258,8 +267,7 @@ void vmpu_switch(uint8_t src_box, uint8_t dst_box) vmpu_region_get_for_box(dst_box, ®ion, &dst_count); /* Only write stack and context ACL for secure boxes. */ - if (dst_box) - { + if (dst_box) { assert(dst_count); /* Push the stack and context protection ACL into ARMv7M_MPU_REGIONS_STATIC. */ vmpu_mpu_push(region, 255); @@ -271,24 +279,23 @@ void vmpu_switch(uint8_t src_box, uint8_t dst_box) page_allocator_iterate_active_page_masks(vmpu_mem_push_page_acl_iterator, PAGE_ALLOCATOR_ITERATOR_DIRECTION_BACKWARD); /* g_mpu_slot may now have been incremented by one, if page heap is used by this box. */ - while (dst_count-- && vmpu_mpu_push(region++, 2)) ; + while (dst_count-- && vmpu_mpu_push(region++, 2)); - if (!dst_box) - { + if (!dst_box) { /* Handle main box ACLs last. */ vmpu_region_get_for_box(0, ®ion, &dst_count); - while (dst_count-- && vmpu_mpu_push(region++, 1)) ; + while (dst_count-- && vmpu_mpu_push(region++, 1)); } - } void vmpu_load_box(uint8_t box_id) { - if(box_id == 0) + if (box_id == 0) { vmpu_switch(0, 0); - else + } else { HALT_ERROR(NOT_IMPLEMENTED, "currently only box 0 can be loaded"); + } } extern int vmpu_region_bits(uint32_t size); @@ -303,31 +310,30 @@ void vmpu_acl_stack(uint8_t box_id, uint32_t bss_size, uint32_t stack_size) box_mem_pos = (uint32_t) __uvisor_config.bss_boxes_start; } - /* handle main box */ + /* Handle main box. */ if (box_id == 0) { DPRINTF("ctx=%i stack=%i\n\r", bss_size, stack_size); - /* non-important sanity checks */ + /* Non-important sanity checks */ assert(stack_size == 0); - /* assign main box stack pointer to existing - * unprivileged stack pointer */ + /* Assign main box stack pointer to existing unprivileged stack + * pointer. */ g_context_current_states[0].sp = __get_PSP(); /* Box 0 still uses the main heap to be backwards compatible. */ g_context_current_states[0].bss = (uint32_t) __uvisor_config.heap_start; return; } - /* ensure that box stack is at least UVISOR_MIN_STACK_SIZE */ + /* Ensure that box stack is at least UVISOR_MIN_STACK_SIZE. */ stack_size = UVISOR_MIN_STACK(stack_size); - /* ensure that 2/8th are available for protecting stack from - * context - include rounding error margin */ + /* Ensure that 2/8th are available for protecting stack from context - + * include rounding error margin. */ bits = vmpu_region_bits(((stack_size + bss_size) * 8) / 6); - /* ensure MPU region size of at least 256 bytes for - * subregion support */ - if(bits < 8) { + /* Ensure MPU region size of at least 256 bytes for subregion support. */ + if (bits < 8) { bits = 8; } size = 1UL << bits; @@ -335,30 +341,30 @@ void vmpu_acl_stack(uint8_t box_id, uint32_t bss_size, uint32_t stack_size) DPRINTF("\tbox[%i] stack=%i bss=%i rounded=%i\n\r", box_id, stack_size, bss_size, size); - /* check for correct context address alignment: - * alignment needs to be a muiltiple of the size */ - if( (box_mem_pos & (size - 1)) != 0 ) { + /* Check for correct context address alignment. Alignment needs to be a + * muiltiple of the size. */ + if ((box_mem_pos & (size - 1)) != 0) { box_mem_pos = (box_mem_pos & ~(size - 1)) + size; } - /* check if we have enough memory left */ - if((box_mem_pos + size) > ((uint32_t) __uvisor_config.bss_boxes_end)) { + /* Check if we have enough memory left. */ + if ((box_mem_pos + size) > ((uint32_t) __uvisor_config.bss_boxes_end)) { HALT_ERROR(SANITY_CHECK_FAILED, "memory overflow - increase uvisor memory allocation\n\r"); } - /* round context sizes, leave one free slot */ + /* Round context sizes. Leave one free slot. */ slots_ctx = (bss_size + block_size - 1) / block_size; slots_stack = slots_ctx ? (8 - slots_ctx - 1) : 8; - /* final sanity checks */ - if( (slots_ctx * block_size) < bss_size ) { + /* Final sanity checks */ + if ((slots_ctx * block_size) < bss_size) { HALT_ERROR(SANITY_CHECK_FAILED, "slots_ctx underrun\n\r"); } - if( (slots_stack * block_size) < stack_size ) { + if ((slots_stack * block_size) < stack_size) { HALT_ERROR(SANITY_CHECK_FAILED, "slots_stack underrun\n\r"); } - /* allocate context pointer */ + /* Allocate context pointer. */ g_context_current_states[box_id].bss = slots_ctx ? box_mem_pos : (uint32_t) NULL; /* `(box_mem_pos + size)` is already outside the memory protected by the * MPU region, so a pointer 8B below stack top is chosen (8B due to stack @@ -370,7 +376,7 @@ void vmpu_acl_stack(uint8_t box_id, uint32_t bss_size, uint32_t stack_size) memset((void *) box_mem_pos, 0, bss_size); } - /* create stack protection region */ + /* Create stack protection region. */ size = vmpu_region_add_static_acl( box_id, box_mem_pos, @@ -379,7 +385,7 @@ void vmpu_acl_stack(uint8_t box_id, uint32_t bss_size, uint32_t stack_size) slots_ctx ? 1UL << slots_ctx : 0 ); - /* move on to the next memory block */ + /* Move on to the next memory block. */ box_mem_pos += size; } @@ -417,13 +423,13 @@ void vmpu_arch_init_hw(void) /* Enable the public SRAM: * * We use one region for this, which start at SRAM origin (which is always - * aligned) and has a power-of-two size that is equal or _larger_ than SRAM. - * This means the region may end _behind_ the end of SRAM! + * aligned) and has a power-of-two size that is equal or _larger_ than + * SRAM. This means the region may end _behind_ the end of SRAM! * - * At the beginning of SRAM uVisor places its private BSS section and behind - * that the page heap. In order to use only one region, we require the end of - * the page heap to align with 1/8th of the region size, so that we can use - * the subregion mask. + * At the beginning of SRAM uVisor places its private BSS section and + * behind that the page heap. In order to use only one region, we require + * the end of the page heap to align with 1/8th of the region size, so that + * we can use the subregion mask. * The page heap reduces the memory wastage to less than one page size, by * "growing" the page heap downwards from the subregion alignment towards * the uVisor bss. @@ -484,10 +490,10 @@ void vmpu_arch_init_hw(void) /* On page heap alignments: * - * Individual pages in the page heap are protected by subregions. - * A page of size 2^N must have its start address aligned to 2^N. - * However, for page sizes > 1/8th region size, the start address is - * not guaranteed to be aligned to 2^N. + * Individual pages in the page heap are protected by subregions. A page of + * size 2^N must have its start address aligned to 2^N. However, for page + * sizes > 1/8th region size, the start address is not guaranteed to be + * aligned to 2^N. * * Example: 2^N = page size, 2^(N-1) = 1/8th SRAM (32kB page size in a 128kB SRAM). * @@ -497,8 +503,8 @@ void vmpu_arch_init_hw(void) * +-----------+ <-- page start address: 0x30000 - 32kB = 0x10000 is not aligned to 32kB!! * | | * - * Due to these contradicting alignment requirements, it is not possible - * to have a page size larger than 1/8th region size. + * Due to these contradicting alignment requirements, it is not possible to + * have a page size larger than 1/8th region size. */ if (subregions_size < *__uvisor_config.page_size) { HALT_ERROR(SANITY_CHECK_FAILED, diff --git a/core/vmpu/src/kinetis/vmpu_kinetis.c b/core/vmpu/src/kinetis/vmpu_kinetis.c index 58c2d2a9..8adb14ad 100644 --- a/core/vmpu/src/kinetis/vmpu_kinetis.c +++ b/core/vmpu/src/kinetis/vmpu_kinetis.c @@ -35,7 +35,7 @@ static int vmpu_fault_recovery_mpu(uint32_t pc, uint32_t sp, uint32_t fault_addr uint32_t start_addr, end_addr; uint8_t page; - /* Check if fault address is a page */ + /* Check if the fault address is a page. */ if (page_allocator_get_active_region_for_address(fault_addr, &start_addr, &end_addr, &page) == UVISOR_ERROR_PAGE_OK) { /* Remember this fault. */ @@ -50,98 +50,115 @@ static int vmpu_fault_recovery_mpu(uint32_t pc, uint32_t sp, uint32_t fault_addr return -1; } -void vmpu_sys_mux_handler(uint32_t lr, uint32_t msp) +uint32_t vmpu_sys_mux_handler(uint32_t lr, uint32_t msp) { uint32_t psp, pc; uint32_t fault_addr, fault_status; - /* the IPSR enumerates interrupt numbers from 0 up, while *_IRQn numbers are - * both positive (hardware IRQn) and negative (system IRQn); here we convert - * the IPSR value to this latter encoding */ + /* The IPSR enumerates interrupt numbers from 0 up, while *_IRQn numbers + * are both positive (hardware IRQn) and negative (system IRQn); here we + * convert the IPSR value to this latter encoding */ int ipsr = ((int) (__get_IPSR() & 0x1FF)) - NVIC_OFFSET; /* PSP at fault */ psp = __get_PSP(); - switch(ipsr) - { + switch (ipsr) { case MemoryManagement_IRQn: DEBUG_FAULT(FAULT_MEMMANAGE, lr, lr & 0x4 ? psp : msp); + HALT_ERROR(FAULT_MEMMANAGE, "Cannot recover from a memmanage fault"); break; case BusFault_IRQn: - /* where a Freescale MPU is used, bus faults can originate both as + /* Where a Freescale MPU is used, bus faults can originate both as * pure bus faults or as MPU faults; in addition, they can be both - * precise and imprecise - * there is also an additional corner case: some registers (MK64F) + * precise and imprecise. + * There is also an additional corner case: some registers (MK64F) * cannot be accessed in unprivileged mode even if an MPU region is * created for them and the corresponding bit in PACRx is set. In - * some cases we want to allow access for them (with ACLs), hence we - * use a function that looks for a specially crafted opcode */ + * some cases we want to allow access for them (with ACLs), hence + * we use a function that looks for a specially crafted opcode. */ + + /* Note: All recovery functions update the stacked stack pointer so + * that exception return points to the correct instruction. */ - /* note: all recovery functions update the stacked stack pointer so - * that exception return points to the correct instruction */ + fault_status = VMPU_SCB_BFSR; - /* currently we only support recovery from unprivileged mode */ - if(lr & 0x4) - { + /* If we are having an unstacking fault, we can't read the pc + * at fault. */ + if (fault_status & (SCB_CFSR_MSTKERR_Msk | SCB_CFSR_MUNSTKERR_Msk)) { + /* fake pc */ + pc = 0x0; + + /* The stack pointer is at fault. BFAR doesn't contain a + * valid fault address. */ + fault_addr = psp; + } else { /* pc at fault */ - pc = vmpu_unpriv_uint32_read(psp + (6 * 4)); + if (lr & 0x4) { + pc = vmpu_unpriv_uint32_read(psp + (6 * 4)); + } else { + /* We can be privileged here if we tried doing an ldrt or + * strt to a region not currently loaded in the MPU. In + * such cases, we are reading from the msp and shouldn't go + * through vmpu_unpriv_uint32_read. A box wouldn't have + * access to our stack. */ + pc = *(uint32_t *) (msp + (6 * 4)); + } /* backup fault address and status */ fault_addr = SCB->BFAR; - fault_status = VMPU_SCB_BFSR; - - /* check if the fault is an MPU fault */ - int slave_port = vmpu_fault_get_slave_port(); - if (slave_port >= 0) { - /* If the fault comes from the MPU module, we don't use the - * bus fault syndrome register, but the MPU one. */ - fault_addr = MPU->SP[slave_port].EAR; - - /* Check if we can recover from the MPU fault. */ - if (!vmpu_fault_recovery_mpu(pc, psp, fault_addr)) { - /* We clear the bus fault status anyway. */ - VMPU_SCB_BFSR = fault_status; - - /* We also clear the MPU fault status bit. */ - vmpu_fault_clear_slave_port(slave_port); - - /* Recover from the exception. */ - return; - } - } else if (slave_port == VMPU_FAULT_MULTIPLE) { - DPRINTF("Multiple MPU violations found.\r\n"); - } + } - /* check if the fault is the special register corner case */ - if (!vmpu_fault_recovery_bus(pc, psp, fault_addr, fault_status)) { + /* Check if the fault is an MPU fault. */ + int slave_port = vmpu_fault_get_slave_port(); + if (slave_port >= 0) { + /* If the fault comes from the MPU module, we don't use the + * bus fault syndrome register, but the MPU one. */ + fault_addr = MPU->SP[slave_port].EAR; + + /* Check if we can recover from the MPU fault. */ + if (!vmpu_fault_recovery_mpu(pc, psp, fault_addr)) { + /* We clear the bus fault status anyway. */ VMPU_SCB_BFSR = fault_status; - return; + + /* We also clear the MPU fault status bit. */ + vmpu_fault_clear_slave_port(slave_port); + + /* Recover from the exception. */ + return lr; } + } else if (slave_port == VMPU_FAULT_MULTIPLE) { + DPRINTF("Multiple MPU violations found.\r\n"); + } - /* if recovery was not successful, throw an error and halt */ - DEBUG_FAULT(FAULT_BUS, lr, psp); + /* Check if the fault is the special register corner case. */ + if (!vmpu_fault_recovery_bus(pc, psp, fault_addr, fault_status)) { VMPU_SCB_BFSR = fault_status; - HALT_ERROR(PERMISSION_DENIED, "Access to restricted resource denied"); - } - else - { - DEBUG_FAULT(FAULT_BUS, lr, msp); - HALT_ERROR(FAULT_BUS, "Cannot recover from privileged bus fault"); + return lr; } + + /* If recovery was not successful, throw an error and halt. */ + DEBUG_FAULT(FAULT_BUS, lr, lr & 0x4 ? psp : msp); + VMPU_SCB_BFSR = fault_status; + HALT_ERROR(PERMISSION_DENIED, "Access to restricted resource denied"); + lr = debug_box_enter_from_priv(lr); break; case UsageFault_IRQn: DEBUG_FAULT(FAULT_USAGE, lr, lr & 0x4 ? psp : msp); + HALT_ERROR(FAULT_USAGE, "Cannot recover from a usage fault."); break; case HardFault_IRQn: DEBUG_FAULT(FAULT_HARD, lr, lr & 0x4 ? psp : msp); + HALT_ERROR(FAULT_HARD, "Cannot recover from a hard fault."); + lr = debug_box_enter_from_priv(lr); break; case DebugMonitor_IRQn: DEBUG_FAULT(FAULT_DEBUG, lr, lr & 0x4 ? psp : msp); + HALT_ERROR(FAULT_DEBUG, "Cannot recover from a debug fault."); break; case PendSV_IRQn: @@ -156,21 +173,22 @@ void vmpu_sys_mux_handler(uint32_t lr, uint32_t msp) HALT_ERROR(NOT_ALLOWED, "Active IRQn(%i) is not a system interrupt", ipsr); break; } + + return lr; } void vmpu_acl_stack(uint8_t box_id, uint32_t bss_size, uint32_t stack_size) { static uint32_t g_box_mem_pos = 0; - /* handle main box */ - if (box_id == 0) - { + /* Handle main box. */ + if (box_id == 0) { DPRINTF("ctx=%i stack=%i\n\r", bss_size, stack_size); /* non-important sanity checks */ assert(stack_size == 0); - /* assign main box stack pointer to existing - * unprivileged stack pointer */ + /* Assign main box stack pointer to existing unprivileged stack + * pointer. */ g_context_current_states[0].sp = __get_PSP(); /* Box 0 still uses the main heap to be backwards compatible. */ g_context_current_states[0].bss = (uint32_t) __uvisor_config.heap_start; @@ -178,16 +196,16 @@ void vmpu_acl_stack(uint8_t box_id, uint32_t bss_size, uint32_t stack_size) } if (!g_box_mem_pos) { - /* initialize box memories, leave stack-band sized gap */ + /* Initialize box memories. Leave stack-band sized gap. */ g_box_mem_pos = UVISOR_REGION_ROUND_UP( (uint32_t)__uvisor_config.bss_boxes_start) + UVISOR_STACK_BAND_SIZE; } - /* ensure stack & context alignment */ + /* Ensure stack & context alignment. */ stack_size = UVISOR_REGION_ROUND_UP(UVISOR_MIN_STACK(stack_size)); - /* add stack ACL */ + /* Add stack ACL. */ vmpu_region_add_static_acl( box_id, g_box_mem_pos, @@ -196,13 +214,13 @@ void vmpu_acl_stack(uint8_t box_id, uint32_t bss_size, uint32_t stack_size) 0 ); - /* set stack pointer to box stack size minus guard band */ + /* Set stack pointer to box stack size minus guard band. */ g_box_mem_pos += stack_size; g_context_current_states[box_id].sp = g_box_mem_pos; - /* add stack protection band */ + /* Add stack protection band. */ g_box_mem_pos += UVISOR_STACK_BAND_SIZE; - /* add context ACL */ + /* Add context ACL. */ assert(bss_size != 0); bss_size = UVISOR_REGION_ROUND_UP(bss_size); g_context_current_states[box_id].bss = g_box_mem_pos; @@ -212,14 +230,14 @@ void vmpu_acl_stack(uint8_t box_id, uint32_t bss_size, uint32_t stack_size) bss_size ); - /* reset uninitialized secured box context */ + /* Reset uninitialized secured box context. */ memset( (void *) g_box_mem_pos, 0, bss_size ); - /* add context ACL */ + /* Add context ACL. */ vmpu_region_add_static_acl( box_id, g_box_mem_pos, @@ -233,7 +251,7 @@ void vmpu_acl_stack(uint8_t box_id, uint32_t bss_size, uint32_t stack_size) void vmpu_switch(uint8_t src_box, uint8_t dst_box) { - /* check for errors */ + /* Check for errors. */ if (!vmpu_is_box_id_valid(src_box)) { HALT_ERROR(SANITY_CHECK_FAILED, "vMPU switch: The source box ID is out of range (%u).\r\n", src_box); } @@ -241,31 +259,30 @@ void vmpu_switch(uint8_t src_box, uint8_t dst_box) HALT_ERROR(SANITY_CHECK_FAILED, "vMPU switch: The destination box ID is out of range (%u).\r\n", dst_box); } - /* switch ACLs for peripherals */ + /* Switch ACLs for peripherals. */ vmpu_aips_switch(src_box, dst_box); - /* switch ACLs for memory regions */ + /* Switch ACLs for memory regions. */ vmpu_mem_switch(src_box, dst_box); } uint32_t vmpu_fault_find_acl(uint32_t fault_addr, uint32_t size) { - /* only support peripheral access and corner cases for now */ + /* Only support peripheral access and corner cases for now. */ /* FIXME: Use SECURE_ACCESS for SCR! */ if (fault_addr == (uint32_t) &SCB->SCR) { return UVISOR_TACL_UWRITE | UVISOR_TACL_UREAD; } - /* translate fault_addr into its physical address if it is in the bit-banding region */ + /* Translate fault_addr into its physical address if it is in the bit-banding region. */ if (VMPU_PERIPH_BITBAND_START <= fault_addr && fault_addr <= VMPU_PERIPH_BITBAND_END) { fault_addr = VMPU_PERIPH_BITBAND_ALIAS_TO_ADDR(fault_addr); - } - else if (VMPU_SRAM_BITBAND_START <= fault_addr && fault_addr <= VMPU_SRAM_BITBAND_END) { + } else if (VMPU_SRAM_BITBAND_START <= fault_addr && fault_addr <= VMPU_SRAM_BITBAND_END) { fault_addr = VMPU_SRAM_BITBAND_ALIAS_TO_ADDR(fault_addr); } - /* look for ACL */ - if( (AIPS0_BASE <= fault_addr) && (fault_addr < (AIPS0_BASE + 0xFEUL * AIPSx_SLOT_SIZE)) ) { + /* Look for ACL. */ + if ((AIPS0_BASE <= fault_addr) && (fault_addr < (AIPS0_BASE + 0xFEUL * AIPSx_SLOT_SIZE))) { return vmpu_fault_find_acl_aips(g_active_box, fault_addr, size); } @@ -274,7 +291,7 @@ uint32_t vmpu_fault_find_acl(uint32_t fault_addr, uint32_t size) void vmpu_load_box(uint8_t box_id) { - if(box_id != 0) { + if (box_id != 0) { HALT_ERROR(NOT_IMPLEMENTED, "currently only box 0 can be loaded"); } vmpu_aips_switch(box_id, box_id); @@ -285,7 +302,7 @@ void vmpu_arch_init(void) { vmpu_mpu_init(); - /* init memory protection */ + /* Init memory protection. */ vmpu_mem_init(); vmpu_mpu_lock(); diff --git a/core/vmpu/src/vmpu.c b/core/vmpu/src/vmpu.c index 50137097..7fd85a37 100644 --- a/core/vmpu/src/vmpu.c +++ b/core/vmpu/src/vmpu.c @@ -368,18 +368,25 @@ int vmpu_fault_recovery_bus(uint32_t pc, uint32_t sp, uint32_t fault_addr, uint3 HALT_ERROR(NOT_ALLOWED, "This is not the PC (0x%08X) your were searching for", pc); } - /* Check fault register; the following two configurations are allowed: - * 0x04 - imprecise data bus fault, no stacking/unstacking errors. - * 0x82 - precise data bus fault, no stacking/unstacking errors. */ + /* Check fault register; the following two configurations are allowed. + * - Precise data bus fault, no stacking/unstacking errors + * - Imprecise data bus fault, no stacking/unstacking errors */ /* Note: Currently the faulting address argument is not used, since it * is saved in r0 for managed bus faults. */ switch (fault_status) { - case 0x82: + case (SCB_CFSR_MMARVALID_Msk | SCB_CFSR_DACCVIOL_Msk): + /* Precise data bus fault, no stacking/unstacking errors */ cnt_max = 0; break; - case 0x04: + + /* Shift right by a byte because our BFSR (read into fault_status) is + * already shifted relative to CFSR. The CMSIS masks are CFSR relative, + * so we need to shift the mask to align with our BFSR. */ + case (SCB_CFSR_IMPRECISERR_Msk >> 8): + /* Imprecise data bus fault, no stacking/unstacking errors */ cnt_max = UVISOR_NOP_CNT; break; + default: return -1; }