Lines Matching +full:host +full:- +full:side

2  * COPYRIGHT (C) 2013-2014, Real-Thread Information Technology Ltd
5 * SPDX-License-Identifier: Apache-2.0
9 * 2013-11-04 Grissiom add comment
29 /* When loading RT-Thread, the IRQ on the guest should be disabled. */ in vmm_context_init()
30 _vmm_context->virq_status = 1; in vmm_context_init()
36 /* some RT-Thread code need to be called in the guest
38 * "super" domain mode to have access of both side. The code executed in super
47 domain->kernel, domain->user, domain->io, in vmm_context_init_domain()
48 domain->vmm, domain->vmm_share); in vmm_context_init_domain()
50 if (domain->kernel == domain->vmm || in vmm_context_init_domain()
51 domain->io == domain->vmm) in vmm_context_init_domain()
61 vmm_domain_val |= (1 << (domain->vmm * 2)) | (1 << (domain->vmm_share * 2)); in vmm_context_init_domain()
64 /* super domain has access to both side */ in vmm_context_init_domain()
65 super_domain_val |= (1 << (domain->kernel * 2)) | (1 << (domain->user * 2)); in vmm_context_init_domain()
90 _vmm_context->virq_pending[irq / 32] |= (1 << (irq % 32)); in vmm_virq_pending()
91 _vmm_context->virq_pended = 1; in vmm_virq_pending()
92 /* mask this IRQ in host */ in vmm_virq_pending()
98 if ((!_vmm_context->virq_status) && in vmm_virq_update()
99 ( _vmm_context->virq_pended)) in vmm_virq_update()
107 * @return 0 on guest should handle IRQ, -1 on should restore the guest context
112 if ((!_vmm_context->virq_status) && in vmm_virq_check()
113 ( _vmm_context->virq_pended)) in vmm_virq_check()
118 return -1; in vmm_virq_check()
122 static char _vmbuf[10*ARRAY_SIZE(_vmm_context->virq_pending)];
127 vmm_info("---- virtual IRQ ----\n"); in vmm_dump_virq()
129 _vmm_context->virq_status, _vmm_context->virq_pended); in vmm_dump_virq()
130 for (s = 0, i = 0; i < ARRAY_SIZE(_vmm_context->virq_pending); i++) in vmm_dump_virq()
132 s += rt_snprintf(_vmbuf+s, sizeof(_vmbuf)-s, in vmm_dump_virq()
133 "%08x, ", _vmm_context->virq_pending[i]); in vmm_dump_virq()
136 vmm_info("---- virtual IRQ ----\n"); in vmm_dump_virq()
144 for (i = 0; i < ARRAY_SIZE(_vmm_context->virq_pending); i++) in vmm_virq_coherence_ok()
146 should_pend |= _vmm_context->virq_pending[i]; in vmm_virq_coherence_ok()
149 res = (_vmm_context->virq_pended == !!should_pend); in vmm_virq_coherence_ok()
153 vmm_info("--- %x %x, %x\n", in vmm_virq_coherence_ok()
154 _vmm_context->virq_pended, should_pend, !!should_pend); in vmm_virq_coherence_ok()
172 sp->cpsr, sp->pc, sp->lr, sp+1); in vmm_show_guest_reg()
238 cpsr = sp->cpsr; in vmm_verify_guest_status()
253 if ((cpsr & I_Bit) && (sp->pc <= VMM_BEGIN)) in vmm_verify_guest_status()
267 if (_vmm_context->virq_status & 1) in vmm_verify_guest_status()
273 if ((sp->pc > 0xbf000000) && (sp->pc < 0xffff0000)) in vmm_verify_guest_status()
282 if (_vmm_context->virq_pended) in vmm_verify_guest_status()
290 else if ((cpsr & MODEMASK) == SVCMODE && sp->pc < 0xbf000000) in vmm_verify_guest_status()