xref: /nrf52832-nimble/rt-thread/libcpu/arm/realview-a8-vmm/context_gcc.S (revision 104654410c56c573564690304ae786df310c91fc)
1*10465441SEvalZero/*
2*10465441SEvalZero * Copyright (c) 2006-2018, RT-Thread Development Team
3*10465441SEvalZero *
4*10465441SEvalZero * SPDX-License-Identifier: Apache-2.0
5*10465441SEvalZero *
6*10465441SEvalZero * Change Logs:
7*10465441SEvalZero * Date           Author       Notes
8*10465441SEvalZero * 2013-07-05     Bernard      the first version
9*10465441SEvalZero */
10*10465441SEvalZero
11*10465441SEvalZero#include <rtconfig.h>
12*10465441SEvalZero
13*10465441SEvalZero#ifdef RT_USING_VMM
14*10465441SEvalZero#include <vmm.h>
15*10465441SEvalZero#endif
16*10465441SEvalZero
17*10465441SEvalZero.section .text, "ax"
18*10465441SEvalZero/*
19*10465441SEvalZero * rt_base_t rt_hw_interrupt_disable();
20*10465441SEvalZero */
21*10465441SEvalZero.globl rt_hw_interrupt_disable
22*10465441SEvalZerort_hw_interrupt_disable:
23*10465441SEvalZero    mrs r0, cpsr
24*10465441SEvalZero    cpsid i
25*10465441SEvalZero    bx  lr
26*10465441SEvalZero
27*10465441SEvalZero/*
28*10465441SEvalZero * void rt_hw_interrupt_enable(rt_base_t level);
29*10465441SEvalZero */
30*10465441SEvalZero.globl rt_hw_interrupt_enable
31*10465441SEvalZerort_hw_interrupt_enable:
32*10465441SEvalZero    msr cpsr, r0
33*10465441SEvalZero    bx  lr
34*10465441SEvalZero
35*10465441SEvalZero/*
36*10465441SEvalZero * void rt_hw_context_switch_to(rt_uint32 to);
37*10465441SEvalZero * r0 --> to
38*10465441SEvalZero */
39*10465441SEvalZero.globl rt_hw_context_switch_to
40*10465441SEvalZerort_hw_context_switch_to:
41*10465441SEvalZero    ldr sp, [r0]            @ get new task stack pointer
42*10465441SEvalZero
43*10465441SEvalZero    ldmfd sp!, {r4}         @ pop new task spsr
44*10465441SEvalZero    msr spsr_cxsf, r4
45*10465441SEvalZero
46*10465441SEvalZero    ldmfd sp!, {r0-r12, lr, pc}^   @ pop new task r0-r12, lr & pc
47*10465441SEvalZero
48*10465441SEvalZero.section .bss.share.isr
49*10465441SEvalZero_guest_switch_lvl:
50*10465441SEvalZero    .word 0
51*10465441SEvalZero
52*10465441SEvalZero.globl vmm_virq_update
53*10465441SEvalZero
54*10465441SEvalZero.section .text.isr, "ax"
55*10465441SEvalZero/*
56*10465441SEvalZero * void rt_hw_context_switch(rt_uint32 from, rt_uint32 to);
57*10465441SEvalZero * r0 --> from
58*10465441SEvalZero * r1 --> to
59*10465441SEvalZero */
60*10465441SEvalZero.globl rt_hw_context_switch
61*10465441SEvalZerort_hw_context_switch:
62*10465441SEvalZero    stmfd   sp!, {lr}       @ push pc (lr should be pushed in place of PC)
63*10465441SEvalZero    stmfd   sp!, {r0-r12, lr}   @ push lr & register file
64*10465441SEvalZero
65*10465441SEvalZero    mrs r4, cpsr
66*10465441SEvalZero    tst lr, #0x01
67*10465441SEvalZero    orrne r4, r4, #0x20     @ it's thumb code
68*10465441SEvalZero
69*10465441SEvalZero    stmfd sp!, {r4}         @ push cpsr
70*10465441SEvalZero
71*10465441SEvalZero    str sp, [r0]            @ store sp in preempted tasks TCB
72*10465441SEvalZero    ldr sp, [r1]            @ get new task stack pointer
73*10465441SEvalZero
74*10465441SEvalZero#ifdef RT_USING_VMM
75*10465441SEvalZero#ifdef RT_VMM_USING_DOMAIN
76*10465441SEvalZero    @ need to make sure we are in vmm domain as we would use rt_current_thread
77*10465441SEvalZero    ldr     r2, =vmm_domain_val
78*10465441SEvalZero    ldr     r7, [r2]
79*10465441SEvalZero    mcr     p15, 0, r7, c3, c0
80*10465441SEvalZero#endif
81*10465441SEvalZero
82*10465441SEvalZero    /* check whether vmm thread, otherwise, update vIRQ */
83*10465441SEvalZero    ldr     r3, =rt_current_thread
84*10465441SEvalZero    ldr     r4, [r3]
85*10465441SEvalZero    ldr     r5, =vmm_thread
86*10465441SEvalZero    cmp     r4, r5
87*10465441SEvalZero    beq     switch_to_guest
88*10465441SEvalZero
89*10465441SEvalZero    @ not falling into guest. Simple task ;-)
90*10465441SEvalZero    ldmfd sp!, {r6}         @ pop new task cpsr to spsr
91*10465441SEvalZero    msr spsr_cxsf, r6
92*10465441SEvalZero    ldmfd sp!, {r0-r12, lr, pc}^
93*10465441SEvalZero
94*10465441SEvalZeroswitch_to_guest:
95*10465441SEvalZero#ifdef RT_VMM_USING_DOMAIN
96*10465441SEvalZero    @ the stack is saved in the guest domain so we need to
97*10465441SEvalZero    @ come back to the guest domain to get the registers.
98*10465441SEvalZero    ldr     r1, =super_domain_val
99*10465441SEvalZero    ldr     r0, [r1]
100*10465441SEvalZero    mcr     p15, 0, r0, c3, c0
101*10465441SEvalZero#endif
102*10465441SEvalZero    /* The user can do nearly anything in rt_thread_idle_excute because it will
103*10465441SEvalZero    call the thread->cleanup. One common thing is sending events and wake up
104*10465441SEvalZero    threads. So the guest thread will be preempted. This is the only point that
105*10465441SEvalZero    the guest thread would call rt_hw_context_switch and "yield".
106*10465441SEvalZero
107*10465441SEvalZero    More over, rt_schedule will call this function and this function *will*
108*10465441SEvalZero    reentrant. If that happens, we need to make sure that call the
109*10465441SEvalZero    rt_thread_idle_excute and vmm_virq_update again and we are in super domain.
110*10465441SEvalZero    I use a "reference count" to achieve such behaviour. If you have better
111*10465441SEvalZero    idea, tell me. */
112*10465441SEvalZero    ldr     r4, =_guest_switch_lvl
113*10465441SEvalZero    ldr     r5, [r4]
114*10465441SEvalZero    add     r5, r5, #1
115*10465441SEvalZero    str     r5, [r4]
116*10465441SEvalZero    cmp     r5, #1
117*10465441SEvalZero    bne     _switch_through
118*10465441SEvalZero
119*10465441SEvalZero    bl      rt_thread_idle_excute
120*10465441SEvalZero    bl      vmm_virq_update
121*10465441SEvalZero
122*10465441SEvalZero    /* we need _guest_switch_lvl to protect until _switch_through, but it's OK
123*10465441SEvalZero     * to cleanup the reference count here because the code below will not be
124*10465441SEvalZero     * reentrant. */
125*10465441SEvalZero    sub     r5, r5, #1
126*10465441SEvalZero    str     r5, [r4]
127*10465441SEvalZero
128*10465441SEvalZero#ifdef RT_VMM_USING_DOMAIN
129*10465441SEvalZero    ldr     r1, =guest_domain_val
130*10465441SEvalZero    ldr     r0, [r1]
131*10465441SEvalZero    mcr     p15, 0, r0, c3, c0
132*10465441SEvalZero#endif
133*10465441SEvalZero_switch_through:
134*10465441SEvalZero#endif /* RT_USING_VMM */
135*10465441SEvalZero    ldmfd sp!, {r4}         @ pop new task cpsr to spsr
136*10465441SEvalZero    msr spsr_cxsf, r4
137*10465441SEvalZero    ldmfd sp!, {r0-r12, lr, pc}^  @ pop new task r0-r12, lr & pc, copy spsr to cpsr
138*10465441SEvalZero
139*10465441SEvalZero/*
140*10465441SEvalZero * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to);
141*10465441SEvalZero */
142*10465441SEvalZero.globl rt_thread_switch_interrupt_flag
143*10465441SEvalZero.globl rt_interrupt_from_thread
144*10465441SEvalZero.globl rt_interrupt_to_thread
145*10465441SEvalZero.globl rt_hw_context_switch_interrupt
146*10465441SEvalZerort_hw_context_switch_interrupt:
147*10465441SEvalZero    ldr r2, =rt_thread_switch_interrupt_flag
148*10465441SEvalZero    ldr r3, [r2]
149*10465441SEvalZero    cmp r3, #1
150*10465441SEvalZero    beq _reswitch
151*10465441SEvalZero    ldr ip, =rt_interrupt_from_thread   @ set rt_interrupt_from_thread
152*10465441SEvalZero    mov r3, #1              @ set rt_thread_switch_interrupt_flag to 1
153*10465441SEvalZero    str r0, [ip]
154*10465441SEvalZero    str r3, [r2]
155*10465441SEvalZero_reswitch:
156*10465441SEvalZero    ldr r2, =rt_interrupt_to_thread     @ set rt_interrupt_to_thread
157*10465441SEvalZero    str r1, [r2]
158*10465441SEvalZero    bx  lr
159