Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
227 changes: 212 additions & 15 deletions arch/arm/core/cortex_m/swap_helper.S
Original file line number Diff line number Diff line change
Expand Up @@ -489,20 +489,219 @@ SECTION_FUNC(TEXT, z_arm_svc)
* r8 - saved link register
*/
.L_do_syscall:
/*
* Build a privilege stack frame from the user stack frame, then switch PSP
* to it. This ensures return from SVC does not rely on the user stack.
*
* Layout of privilege stack created from user stack:
*
* +------+-------------------------+------+-------------------------+--------------------------+
* | User stack | Privilege stack | Notes |
* +------+-------------------------+------+-------------------------+--------------------------+
* |Offset| contents |Offset| contents | |
* +------+-------------------------+------+-------------------------+--------------------------+
* | 0 | R0 -> | 0 | R0 | PSP switches from 0th |
* | | | | | offset of user stack to |
* | | | | | 0th offset of priv stack |
* | 4 | R1 -> | 4 | R1 | |
* | 8 | R2 -> | 8 | R2 | |
* | 12 | R3 -> |12 | R3 | |
* | 16 | R12 -> |16 | R12 | |
* | 20 | LR -> |20 | LR | |
* | 24 | Return Address -x> |24 | z_arm_do_syscall |return address from user |
* | | | | |sf is not copied. Instead,|
* | | | | |it is replaced so that |
* | | | | |z_arm_svc returns to |
* | | | | |z_arm_do_syscall. |
* | | | | | |
* | 28 | xPSR (w/ or w/o pad) -> |28 | xPSR (pad bit cleared) |This completes the basic |
* | | | | |exception sf w/ or w/o pad|
* | | | | | |
* | -- | FP regs + FPSCR -> |-- | FP regs + FPSCR |For arch supporting fp |
* | | (w/ or w/o pad) | | |context an additional |
* | | | | |extended sf is copied. |
* |________________________________|______|_________________________|__________________________|
* | | | | |On returning to |
* | | | | |z_arm_do_syscall, the |
* | | | | |above sf has already been |
* | | | | |unstacked and 8B from the |
* | | | | |then sf are used to pass |
* | | | | |original pre-svc sp & the |
* | | | | |return address. |
* | | | | |Note: at the moment |
* | | | | |z_arm_do_syscall also |
* | | | | |expects the return address|
* | | | | |to be set in r8. |
* | | | | | |
* | | | 0 | address that |z_arm_do_syscall expects |
* | | | | z_arm_do_syscall should |the original pre-svc sp at|
* | | | | set as PSP before |0th offset i.e. new sp[0] |
* | | | | returning from svc. |and, |
* | | | | | |
* | | | 4 | Address that |the return address at |
* | | | | z_arm_do_syscall should |sp[4]. Note that this is |
* | | | | return to after handling|the return address copied |
* | | | | svc |from user exception sf[24]|
* | | | | |which was not copied in |
* | | | | |the previous sf. |
* +------+-------------------------+------+-------------------------+--------------------------+
* "sf" in this function is used as abbreviation for "stack frame".
* Note that the "FP regs + FPSCR" are only present if CONFIG_FPU_SHARING=y, and the optional pad
* is only present if PSP was not 8-byte aligned when SVC was executed.
* Also note that FPU cannot be present in ARMv6-M or ARMv8-M Baseline implementations
* (i.e., it may only be present when CONFIG_ARMV7_M_ARMV8_M_MAINLINE is enabled).
*/
/* Start by fetching the top of privileged stack */
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
movs r3, #24
ldr r1, [r0, r3] /* grab address of PC from stack frame */
mov r8, r1
ldr r1, =_kernel
ldr r1, [r1, #_kernel_offset_to_current]
adds r1, r1, #_thread_offset_to_priv_stack_start
ldr r1, [r1] /* bottom of priv stack */
ldr r3, =CONFIG_PRIVILEGED_STACK_SIZE
subs r3, #(_EXC_HW_SAVED_BASIC_SF_SIZE+8) /* 8 for original sp and pc */
add r1, r3
mov ip, r1
#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
ldr r8, [r0, #24] /* grab address of PC from stack frame */
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
ldr ip, [ip, #_thread_offset_to_priv_stack_start] /* bottom of priv stack */
add ip, #CONFIG_PRIVILEGED_STACK_SIZE
#ifdef CONFIG_FPU_SHARING
/* Assess whether svc calling thread had been using the FP registers. */
tst lr, #_EXC_RETURN_FTYPE_Msk
ite eq
moveq r8, #_EXC_HW_SAVED_EXTENDED_SF_SIZE
movne r8, #_EXC_HW_SAVED_BASIC_SF_SIZE
#else
mov r8, #_EXC_HW_SAVED_BASIC_SF_SIZE
#endif
ldr r1, =z_arm_do_syscall
sub ip, #8 /* z_arm_do_syscall will use this to get original sp and pc */
sub ip, r8 /* 32 for basic sf + 72 for the optional esf */
#endif

/*
* At this point:
* r0 has PSP i.e. top of user stack
* ip has top of privilege stack
* r8 has hardware-saved stack frame size (only in case of mainline)
*/
push {r4-r7}
push {r2}
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
str r1, [r0, r3] /* overwrite the PC to point to z_arm_do_syscall */
mov r2, r0 /* safe to use r2 since it is saved on MSP */

/* Check for padding in the sf */
ldr r1, [r0, #_EXC_HW_SAVED_BASIC_SF_XPSR_OFFSET] /* grab xPSR from sf which has the pad bit */
movs r3, #1
/* Check if pad bit 9 is set */
lsls r3, r3, #9
tst r1, r3
beq .L_no_padding
/* special handling for padded sf */
bics r1, r3 /* clear the pad bit (priv stack is aligned and doesn't need it) */
adds r2, #4
.L_no_padding:
/* Calculate original pre-svc user sp which is psp + sf size (+4B if pad bit was set) */
adds r2, #_EXC_HW_SAVED_BASIC_SF_SIZE
mov r3, ip
str r2,[r3, #0]

/* Store the pre-SVC user SP at the offset expected by z_arm_do_syscall,
* as detailed in the table above.
*/
str r2,[r3, #_EXC_HW_SAVED_BASIC_SF_SIZE]
/* sf of priv stack has the same xPSR as user stack but with 9th bit reset */
str r1,[r3, #_EXC_HW_SAVED_BASIC_SF_XPSR_OFFSET]

/* r0-r3, r12, LR from user stack sf are copied to sf of priv stack */
mov r1, r0
mov r2, r3
ldmia r1!, {r4-r7}
stmia r2!, {r4-r7}
ldmia r1!, {r4-r5}
stmia r2!, {r4-r5}

/* Store the svc return address at the offset expected by z_arm_do_syscall,
* as detailed in the table above.
*/
str r5, [r3, #(_EXC_HW_SAVED_BASIC_SF_SIZE+4)]

ldr r1, =z_arm_do_syscall
str r1, [r3, #_EXC_HW_SAVED_BASIC_SF_RETADDR_OFFSET] /* Execution return to z_arm_do_syscall */
ldr r1, [r0, #_EXC_HW_SAVED_BASIC_SF_RETADDR_OFFSET] /* grab address of PC from stack frame */
/* Store the svc return address (i.e. next instr to svc) in r8 as expected by z_arm_do_syscall.
*/
mov r8, r1

#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
str r1, [r0, #24] /* overwrite the PC to point to z_arm_do_syscall */
mov r2, r0 /* safe to use r2 since it is saved on MSP */

/* Calculate original pre-svc user sp without pad which is psp + sf size */
add r2, r8

/* Also, check for padding in the sf */
ldr r1, [r0, #_EXC_HW_SAVED_BASIC_SF_XPSR_OFFSET] /* grab xPSR from sf which has the pad bit */
tst r1, #(1<<9) /* Check if pad bit 9 is set */
beq .L_no_padding
bics r1, #(1<<9) /* clear the pad bit (priv stack is aligned and doesn't need it) */
/* Calculate original pre-svc user sp with pad */
add r2, #4
.L_no_padding:
str r2,[ip, #0]
/* Store the pre-SVC user SP at the offset expected by z_arm_do_syscall,
* as detailed in the table above.
*/
str r2,[ip, r8]
str r1,[ip, #_EXC_HW_SAVED_BASIC_SF_XPSR_OFFSET] /* priv sf get user sf xPSR with bit9 reset */

/* r0-r3, r12, LR from user stack sf are copied to sf of priv stack */
mov r1, r0
mov r2, ip
ldmia r1!, {r4-r7}
stmia r2!, {r4-r7}
ldmia r1!, {r4-r5}
stmia r2!, {r4-r5}

/* Store the svc return address at the offset expected by z_arm_do_syscall,
* as detailed in the table above.
*/
add r8, #4
str r5, [ip, r8]

ldr r1, =z_arm_do_syscall
str r1, [ip, #_EXC_HW_SAVED_BASIC_SF_RETADDR_OFFSET] /* Execution return to z_arm_do_syscall */
ldr r1, [r0, #_EXC_HW_SAVED_BASIC_SF_RETADDR_OFFSET] /* grab address of PC from stack frame */
/* Store the svc return address (i.e. next instr to svc) in r8 as expected by z_arm_do_syscall.
*/
mov r8, r1

/* basic stack frame is copied at this point to privilege stack,
* now time to copy the fp context
*/
#ifdef CONFIG_FPU_SHARING
tst lr, #_EXC_RETURN_FTYPE_Msk
bne .L_skip_fp_copy
add r1, r0, #32
add r2, ip, #32

vldmia r1!, {s0-s15}
vstmia r2!, {s0-s15}

/* copy FPSCR + reserved (8 bytes) */
ldmia r1!, {r4, r5}
stmia r2!, {r4, r5}
.L_skip_fp_copy:
#endif

#endif
pop {r2} /* restore CONTROL value */
pop {r4-r7}

/* Point PSP to privilege stack,
* note that r0 still has the old PSP
*/
msr PSP, ip

#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
ldr r3, =K_SYSCALL_LIMIT
cmp r6, r3
Expand Down Expand Up @@ -556,14 +755,12 @@ SECTION_FUNC(TEXT, z_arm_svc)
isb

#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* Thread is now in privileged mode; after returning from SCVall it
* will use the default (user) stack before switching to the privileged
* stack to execute the system call. We need to protect the user stack
* against stack overflows until this stack transition.
*/
ldr r1, [r0, #_thread_offset_to_stack_info_start] /* stack_info.start */
msr PSPLIM, r1
#endif /* CONFIG_BUILTIN_STACK_GUARD */
/* Set stack pointer limit (needed in privileged mode) */
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
ldr ip, [ip, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
msr PSPLIM, ip
#endif

/* return from SVC to the modified LR - z_arm_do_syscall */
bx lr
Expand Down
55 changes: 4 additions & 51 deletions arch/arm/core/userspace.S
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
* Userspace and service handler hooks
*
* Copyright (c) 2017 Linaro Limited
* Copyright 2025 Arm Limited and/or its affiliates <open-source-office@arm.com>
*
* SPDX-License-Identifier: Apache-2.0
*
Expand Down Expand Up @@ -308,9 +309,8 @@ SECTION_FUNC(TEXT,z_arm_userspace_enter)
* This function is used to do system calls from unprivileged code. This
* function is responsible for the following:
* 1) Fixing up bad syscalls
* 2) Configuring privileged stack and loading up stack arguments
* 3) Dispatching the system call
* 4) Restoring stack and calling back to the caller of the SVC
* 2) Dispatching the system call
* 3) Restoring stack and calling back to the caller of the SVC
*
*/
SECTION_FUNC(TEXT, z_arm_do_syscall)
Expand All @@ -328,41 +328,7 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
* At this point PSPLIM is already configured to guard the default (user)
* stack, so pushing to the default thread's stack is safe.
*/
#if defined(CONFIG_ARMV6_M_ARMV8_M_BASELINE)
/* save current stack pointer (user stack) */
mov ip, sp
/* temporarily push to user stack */
push {r0,r1}
/* setup privileged stack */
ldr r0, =_kernel
ldr r0, [r0, #_kernel_offset_to_current]
adds r0, r0, #_thread_offset_to_priv_stack_start
ldr r0, [r0] /* priv stack ptr */
ldr r1, =CONFIG_PRIVILEGED_STACK_SIZE
add r0, r1

/* Store current SP and LR at the beginning of the priv stack */
subs r0, #8
mov r1, ip
str r1, [r0, #0]
mov r1, lr
str r1, [r0, #4]
mov ip, r0
/* Restore user stack and original r0, r1 */
pop {r0, r1}

#elif defined(CONFIG_ARMV7_M_ARMV8_M_MAINLINE)
/* setup privileged stack */
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
ldr ip, [ip, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
add ip, #CONFIG_PRIVILEGED_STACK_SIZE

/* Store current SP and LR at the beginning of the priv stack */
subs ip, #8
str sp, [ip, #0]
str lr, [ip, #4]
#elif defined(CONFIG_CPU_AARCH32_CORTEX_R)
#if defined(CONFIG_CPU_AARCH32_CORTEX_R)
/*
* The SVC handler has already switched to the privileged stack.
* Store the user SP and LR at the beginning of the priv stack.
Expand All @@ -373,11 +339,6 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
push {ip, lr}
#endif

#if !defined(CONFIG_CPU_AARCH32_CORTEX_R)
/* switch to privileged stack */
msr PSP, ip
#endif

/* Note (applies when using stack limit checking):
* We do not need to lock IRQs after switching PSP to the privileged stack;
* PSPLIM is guarding the default (user) stack, which, by design, is
Expand All @@ -386,14 +347,6 @@ SECTION_FUNC(TEXT, z_arm_do_syscall)
* the maximum exception stack frame.
*/

#if defined(CONFIG_BUILTIN_STACK_GUARD)
/* Set stack pointer limit (needed in privileged mode) */
ldr ip, =_kernel
ldr ip, [ip, #_kernel_offset_to_current]
ldr ip, [ip, #_thread_offset_to_priv_stack_start] /* priv stack ptr */
msr PSPLIM, ip
#endif

/*
* r0-r5 contain arguments
* r6 contains call_id
Expand Down
1 change: 1 addition & 0 deletions boards/arm/mps2/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,5 @@ if(CONFIG_BOARD_MPS2_AN521_CPU1 AND NOT CONFIG_OPENAMP)
BUILD_BYPRODUCTS "${CPU0_BINARY_DIR}/${KERNEL_BIN_NAME}"
BUILD_ALWAYS True
)
add_dependencies(app empty_cpu0)
endif()
35 changes: 35 additions & 0 deletions include/zephyr/arch/arm/cortex_m/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,41 @@
#define _EXC_RETURN_SPSEL_Msk (1 << 2)
#define _EXC_RETURN_FTYPE_Msk (1 << 4)

/*
* Cortex-M Exception Stack Frame Layouts
*
* When an exception is taken, the processor automatically pushes
* registers to the current stack. The layout depends on whether
* the FPU is active.
*/

/* Basic hardware-saved exception stack frame (no FPU context):
* R0-R3 (4 x 4B = 16B)
* R12 (4B)
* LR (4B)
* Return address (4B)
* RETPSR (4B)
*--------------------------
* Total: 32 bytes
*/
#define _EXC_HW_SAVED_BASIC_SF_SIZE (32)
#define _EXC_HW_SAVED_BASIC_SF_RETADDR_OFFSET (24)
#define _EXC_HW_SAVED_BASIC_SF_XPSR_OFFSET (28)

/* Extended hardware saved stack frame consists of:
* R0-R3 (16B)
* R12 (4B)
* LR (R14) (4B)
* Return address (4B)
* RETPSR (4B)
* S0-S15 (16 x 4B = 64B)
* FPSCR (4B)
* Reserved (4B)
*--------------------------
* Total: 104 bytes
*/
#define _EXC_HW_SAVED_EXTENDED_SF_SIZE (104)

#else
#include <stdint.h>

Expand Down
8 changes: 8 additions & 0 deletions tests/arch/arm/arm_user_stack_test/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
# SPDX-License-Identifier: Apache-2.0

cmake_minimum_required(VERSION 3.20.0)

find_package(Zephyr REQUIRED HINTS $ENV{ZEPHYR_BASE})
project(arm_user_stack_test)

target_sources(app PRIVATE src/main.c)
1 change: 1 addition & 0 deletions tests/arch/arm/arm_user_stack_test/boards/mps2_an386.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
CONFIG_ARM_MPU=y
1 change: 1 addition & 0 deletions tests/arch/arm/arm_user_stack_test/boards/mps2_an500.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
CONFIG_ARM_MPU=y
Loading
Loading