Lines Matching +full:0 +full:x8ff

25 static bool __read_mostly nested_early_check = 0;
74 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); in init_vmcs_shadow_fields()
75 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); in init_vmcs_shadow_fields()
77 for (i = j = 0; i < max_shadow_read_only_fields; i++) { in init_vmcs_shadow_fields()
98 for (i = j = 0; i < max_shadow_read_write_fields; i++) { in init_vmcs_shadow_fields()
240 hv_vcpu->nested.vm_id = 0; in nested_release_evmcs()
241 hv_vcpu->nested.vp_id = 0; in nested_release_evmcs()
314 vcpu->arch.regs_dirty = 0; in vmx_switch_vmcs()
391 unsigned long roots = 0; in nested_ept_invalidate_addr()
397 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { in nested_ept_invalidate_addr()
439 exit_qualification = 0; in nested_ept_inject_page_fault()
459 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification); in nested_ept_inject_page_fault()
498 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0; in nested_vmx_is_page_fault_vmexit()
527 return 0; in nested_vmx_check_io_bitmap_controls()
533 return 0; in nested_vmx_check_io_bitmap_controls()
540 return 0; in nested_vmx_check_msr_bitmap_controls()
545 return 0; in nested_vmx_check_msr_bitmap_controls()
552 return 0; in nested_vmx_check_tpr_shadow_controls()
557 return 0; in nested_vmx_check_tpr_shadow_controls()
580 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { in enable_x2apic_msr_intercepts()
583 msr_bitmap[word] = ~0; in enable_x2apic_msr_intercepts()
584 msr_bitmap[word + (0x800 / sizeof(long))] = ~0; in enable_x2apic_msr_intercepts()
665 * L0 need not intercept reads for MSRs between 0x800 in nested_vmx_prepare_msr_bitmap()
666 * and 0x8ff, it just lets the processor take the value in nested_vmx_prepare_msr_bitmap()
670 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) { in nested_vmx_prepare_msr_bitmap()
779 return 0; in nested_vmx_check_apic_access_controls()
789 return 0; in nested_vmx_check_apicv_controls()
811 * bits 5:0 of posted_intr_desc_addr should be zero. in nested_vmx_check_apicv_controls()
816 CC((vmcs12->posted_intr_nv & 0xff00)) || in nested_vmx_check_apicv_controls()
824 return 0; in nested_vmx_check_apicv_controls()
830 if (count == 0) in nested_vmx_check_msr_switch()
831 return 0; in nested_vmx_check_msr_switch()
837 return 0; in nested_vmx_check_msr_switch()
851 return 0; in nested_vmx_check_exit_msr_switch_controls()
862 return 0; in nested_vmx_check_entry_msr_switch_controls()
869 return 0; in nested_vmx_check_pml_controls()
875 return 0; in nested_vmx_check_pml_controls()
884 return 0; in nested_vmx_check_unrestricted_guest_controls()
893 return 0; in nested_vmx_check_mode_based_ept_exec_controls()
900 return 0; in nested_vmx_check_shadow_vmcs_controls()
906 return 0; in nested_vmx_check_shadow_vmcs_controls()
913 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)) in nested_vmx_msr_check_common()
918 if (CC(e->reserved != 0)) in nested_vmx_msr_check_common()
920 return 0; in nested_vmx_msr_check_common()
931 return 0; in nested_vmx_load_msr_check()
940 return 0; in nested_vmx_store_msr_check()
954 * return 0 for success, entry index for failure.
967 for (i = 0; i < count; i++) { in nested_vmx_load_msr()
974 "%s cannot read MSR entry (%u, 0x%08llx)\n", in nested_vmx_load_msr()
980 "%s check failed (%u, 0x%x, 0x%x)\n", in nested_vmx_load_msr()
986 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", in nested_vmx_load_msr()
991 return 0; in nested_vmx_load_msr()
1012 if (i >= 0) { in nested_vmx_get_vmexit_msr_value()
1021 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__, in nested_vmx_get_vmexit_msr_value()
1035 "%s cannot read MSR entry (%u, 0x%08llx)\n", in read_and_check_msr_entry()
1041 "%s check failed (%u, 0x%x, 0x%x)\n", in read_and_check_msr_entry()
1055 for (i = 0; i < count; i++) { in nested_vmx_store_msr()
1070 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n", in nested_vmx_store_msr()
1075 return 0; in nested_vmx_store_msr()
1086 for (i = 0; i < count; i++) { in nested_msr_store_list_has_msr()
1107 in_autostore_list = msr_autostore_slot >= 0; in prepare_vmx_msr_autostore_list()
1166 return 0; in nested_vmx_load_cr3()
1200 * If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the in nested_vmx_transition_tlb_flush()
1202 * mappings for VPID=0 must be flushed at VM-Enter and VM-Exit. KVM in nested_vmx_transition_tlb_flush()
1203 * emulates L2 sharing L1's VPID=0 by using vpid01 while running L2, in nested_vmx_transition_tlb_flush()
1204 * and so KVM must also emulate TLB flush of VPID=0, i.e. vpid01. This in nested_vmx_transition_tlb_flush()
1293 return 0; in vmx_restore_vmx_basic()
1336 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0))) in vmx_restore_control_msr()
1339 /* Check must-be-0 bits are still 0. */ in vmx_restore_control_msr()
1346 return 0; in vmx_restore_control_msr()
1394 return 0; in vmx_restore_vmx_misc()
1408 return 0; in vmx_restore_vmx_ept_vpid_cap()
1435 return 0; in vmx_restore_fixed0_msr()
1441 * Returns 0 on success, non-0 otherwise.
1465 * If userspace wants to emulate VMX_BASIC[55]=0, userspace in vmx_set_vmx_msr()
1493 return 0; in vmx_set_vmx_msr()
1498 return 0; in vmx_set_vmx_msr()
1507 /* Returns 0 on success, non-0 otherwise. */
1582 return 0; in vmx_get_vmx_msr()
1608 for (i = 0; i < max_shadow_read_write_fields; i++) { in copy_shadow_to_vmcs12()
1641 for (q = 0; q < ARRAY_SIZE(fields); q++) { in copy_vmcs12_to_shadow()
1642 for (i = 0; i < max_fields[q]; i++) { in copy_vmcs12_to_shadow()
2119 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is in nested_vmx_handle_enlightened_vmptrld()
2128 * CPUID.0x4000000A.EAX[0:15]. in nested_vmx_handle_enlightened_vmptrld()
2151 memset(vmcs12, 0, sizeof(*vmcs12)); in nested_vmx_handle_enlightened_vmptrld()
2223 if (preemption_timeout == 0) { in vmx_start_preemption_timer()
2228 if (vcpu->arch.virtual_tsc_khz == 0) in vmx_start_preemption_timer()
2271 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); in prepare_vmcs02_constant_state()
2278 vmcs_write64(VM_FUNCTION_CONTROL, 0); in prepare_vmcs02_constant_state()
2292 vmcs_write64(PML_ADDRESS, 0); in prepare_vmcs02_constant_state()
2322 * If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the in prepare_vmcs02_early_rare()
2325 * and VM-Exit are architecturally required to flush VPID=0, but *only* in prepare_vmcs02_early_rare()
2326 * VPID=0. I.e. using vpid02 would be ok (so long as KVM emulates the in prepare_vmcs02_early_rare()
2503 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); in prepare_vmcs02_early()
2588 * setting MASK=MATCH=0 and (see below) EB.PF=1. in prepare_vmcs02_rare()
2591 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when in prepare_vmcs02_rare()
2599 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0); in prepare_vmcs02_rare()
2600 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0); in prepare_vmcs02_rare()
2634 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2774 return 0; in prepare_vmcs02()
2787 return 0; in nested_vmx_check_nmi_controls()
2823 if (CC(!kvm_vcpu_is_legal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) in nested_vmx_check_eptp()
2891 return 0; in nested_check_vm_execution_controls()
2908 return 0; in nested_check_vm_exit_controls()
2949 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0)) in nested_check_vm_entry_controls()
2974 CC(vmcs12->vm_entry_instruction_len == 0 && in nested_check_vm_entry_controls()
2983 return 0; in nested_check_vm_entry_controls()
2999 return 0; in nested_vmx_check_controls()
3010 return 0; in nested_vmx_check_address_space_size()
3064 CC(vmcs12->host_cs_selector == 0) || in nested_vmx_check_host_state()
3065 CC(vmcs12->host_tr_selector == 0) || in nested_vmx_check_host_state()
3066 CC(vmcs12->host_ss_selector == 0 && !ia32e)) in nested_vmx_check_host_state()
3079 * IA32_EFER MSR must be 0 in the field for that register. In addition, in nested_vmx_check_host_state()
3090 return 0; in nested_vmx_check_host_state()
3101 return 0; in nested_vmx_check_vmcs_link_ptr()
3120 return 0; in nested_vmx_check_vmcs_link_ptr()
3133 return 0; in nested_check_guest_non_reg_state()
3176 * - Bits reserved in the IA32_EFER MSR must be 0. in nested_vmx_check_guest_state()
3199 return 0; in nested_vmx_check_guest_state()
3209 return 0; in nested_vmx_check_vmentry_hw()
3212 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); in nested_vmx_check_vmentry_hw()
3214 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0); in nested_vmx_check_vmentry_hw()
3226 vmcs_writel(GUEST_RFLAGS, 0); in nested_vmx_check_vmentry_hw()
3277 return 0; in nested_vmx_check_vmentry_hw()
3339 vcpu->run->internal.ndata = 0; in nested_get_vmcs12_pages()
3413 vcpu->run->internal.ndata = 0; in vmx_get_nested_state_pages()
3432 return 0; in nested_vmx_write_pml_buffer()
3443 return 0; in nested_vmx_write_pml_buffer()
3450 gpa &= ~0xFFFull; in nested_vmx_write_pml_buffer()
3455 return 0; in nested_vmx_write_pml_buffer()
3459 return 0; in nested_vmx_write_pml_buffer()
3473 return 0; in nested_vmx_check_permission()
3477 kvm_inject_gp(vcpu, 0); in nested_vmx_check_permission()
3478 return 0; in nested_vmx_check_permission()
3768 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3773 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3783 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3785 return 0; in nested_vmx_run()
3856 vmcs12->idt_vectoring_info_field = 0; in vmcs12_save_pending_event()
3891 vmcs12->idt_vectoring_info_field = 0; in vmcs12_save_pending_event()
3925 return 0; in vmx_complete_nested_posted_interrupt()
3933 return 0; in vmx_complete_nested_posted_interrupt()
3936 if (max_irr > 0) { in vmx_complete_nested_posted_interrupt()
3944 if ((u8)max_irr > ((u8)status & 0xff)) { in vmx_complete_nested_posted_interrupt()
3945 status &= ~0xff; in vmx_complete_nested_posted_interrupt()
3952 return 0; in vmx_complete_nested_posted_interrupt()
3975 exit_qual = 0; in nested_vmx_inject_exception_vmexit()
4028 return 0; in vmx_get_pending_dbg_trap()
4096 if ((max_irr & 0xf0) > (vppr & 0xf0)) in vmx_has_nested_events()
4102 if (max_irr > 0 && (max_irr & 0xf0) > (vppr & 0xf0)) in vmx_has_nested_events()
4230 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0); in vmx_check_nested_events()
4234 return 0; in vmx_check_nested_events()
4244 nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0, in vmx_check_nested_events()
4245 apic->sipi_vector & 0xFFUL); in vmx_check_nested_events()
4246 return 0; in vmx_check_nested_events()
4267 return 0; in vmx_check_nested_events()
4281 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0); in vmx_check_nested_events()
4282 return 0; in vmx_check_nested_events()
4290 return 0; in vmx_check_nested_events()
4302 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0); in vmx_check_nested_events()
4303 return 0; in vmx_check_nested_events()
4320 INTR_INFO_VALID_MASK, 0); in vmx_check_nested_events()
4325 vcpu->arch.nmi_pending = 0; in vmx_check_nested_events()
4327 return 0; in vmx_check_nested_events()
4344 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0); in vmx_check_nested_events()
4345 return 0; in vmx_check_nested_events()
4354 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0); in vmx_check_nested_events()
4355 return 0; in vmx_check_nested_events()
4359 if (WARN_ON_ONCE(irq < 0)) in vmx_check_nested_events()
4388 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR | irq, 0); in vmx_check_nested_events()
4396 return 0; in vmx_check_nested_events()
4409 if (ktime_to_ns(remaining) <= 0) in vmx_get_preemption_timer_value()
4410 return 0; in vmx_get_preemption_timer_value()
4692 vmx_set_interrupt_shadow(vcpu, 0); in load_vmcs12_host_state()
4724 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); in load_vmcs12_host_state()
4725 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); in load_vmcs12_host_state()
4729 vmcs_write64(GUEST_BNDCFGS, 0); in load_vmcs12_host_state()
4743 .base = 0, in load_vmcs12_host_state()
4744 .limit = 0xFFFFFFFF, in load_vmcs12_host_state()
4757 .base = 0, in load_vmcs12_host_state()
4758 .limit = 0xFFFFFFFF, in load_vmcs12_host_state()
4779 .limit = 0x67, in load_vmcs12_host_state()
4786 memset(&seg, 0, sizeof(seg)); in load_vmcs12_host_state()
4790 kvm_set_dr(vcpu, 7, 0x400); in load_vmcs12_host_state()
4791 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); in load_vmcs12_host_state()
4811 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { in nested_vmx_get_vmcs01_guest_efer()
4884 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) { in nested_vmx_restore_host_state()
4888 "%s read MSR index failed (%u, 0x%08llx)\n", in nested_vmx_restore_host_state()
4893 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) { in nested_vmx_restore_host_state()
4897 "%s read MSR failed (%u, 0x%08llx)\n", in nested_vmx_restore_host_state()
4908 "%s check failed (%u, 0x%x, 0x%x)\n", in nested_vmx_restore_host_state()
4915 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n", in nested_vmx_restore_host_state()
5118 vmx->fail = 0; in nested_vmx_vmexit()
5124 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0); in nested_vmx_triple_fault()
5130 * On success, returns 0. When the operand is invalid, returns 1 and throws
5152 int index_reg = (vmx_instruction_info >> 18) & 0xf; in get_vmx_mem_address()
5154 int base_reg = (vmx_instruction_info >> 23) & 0xf; in get_vmx_mem_address()
5167 else if (addr_size == 0) in get_vmx_mem_address()
5182 off &= 0xffffffff; in get_vmx_mem_address()
5183 else if (addr_size == 0) /* 16 bit */ in get_vmx_mem_address()
5184 off &= 0xffff; in get_vmx_mem_address()
5199 *ret = vmx_get_untagged_addr(vcpu, *ret, 0); in get_vmx_mem_address()
5200 /* Long mode: #GP(0)/#SS(0) if the memory address is in a in get_vmx_mem_address()
5204 exn = is_noncanonical_address(*ret, vcpu, 0); in get_vmx_mem_address()
5211 *ret = (s.base + off) & 0xffffffff; in get_vmx_mem_address()
5215 * - segment type check (#GP(0) may be thrown) in get_vmx_mem_address()
5216 * - usability check (#GP(0)/#SS(0)) in get_vmx_mem_address()
5217 * - limit check (#GP(0)/#SS(0)) in get_vmx_mem_address()
5220 /* #GP(0) if the destination operand is located in a in get_vmx_mem_address()
5223 exn = ((s.type & 0xa) == 0 || (s.type & 8)); in get_vmx_mem_address()
5225 /* #GP(0) if the source operand is located in an in get_vmx_mem_address()
5228 exn = ((s.type & 0xa) == 8); in get_vmx_mem_address()
5230 kvm_queue_exception_e(vcpu, GP_VECTOR, 0); in get_vmx_mem_address()
5233 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable. in get_vmx_mem_address()
5235 exn = (s.unusable != 0); in get_vmx_mem_address()
5238 * Protected mode: #GP(0)/#SS(0) if the memory operand is in get_vmx_mem_address()
5240 * limit checks for flat segments, i.e. segments with base==0, in get_vmx_mem_address()
5241 * limit==0xffffffff and of type expand-up data or code. in get_vmx_mem_address()
5243 if (!(s.base == 0 && s.limit == 0xffffffff && in get_vmx_mem_address()
5251 0); in get_vmx_mem_address()
5255 return 0; in get_vmx_mem_address()
5278 return 0; in nested_vmx_get_vmptr()
5315 if (r < 0) in enter_vmx_operation()
5340 vmx->pt_desc.guest.ctl = 0; in enter_vmx_operation()
5344 return 0; in enter_vmx_operation()
5389 * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root, in handle_vmxon()
5396 * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's in handle_vmxon()
5401 kvm_inject_gp(vcpu, 0); in handle_vmxon()
5415 kvm_inject_gp(vcpu, 0); in handle_vmxon()
5421 kvm_inject_gp(vcpu, 0); in handle_vmxon()
5471 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); in nested_release_vmcs12()
5496 u32 zero = 0; in handle_vmclear()
5557 gva_t gva = 0; in handle_vmread()
5565 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); in handle_vmread()
5578 if (offset < 0) in handle_vmread()
5601 if (offset < 0) in handle_vmread()
5614 kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value); in handle_vmread()
5620 /* _system ok, nested_vmx_check_permission has verified cpl=0 */ in handle_vmread()
5673 u64 value = 0; in handle_vmwrite()
5688 value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf)); in handle_vmwrite()
5699 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf)); in handle_vmwrite()
5702 if (offset < 0) in handle_vmwrite()
5729 value &= 0x1f0ff; in handle_vmwrite()
5862 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */ in handle_vmptrst()
5926 roots_to_free = 0; in handle_invept()
5931 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { in handle_invept()
6081 return 0; in nested_vmx_eptp_switching()
6114 case 0: in handle_vmfunc()
6149 while (size > 0) { in nested_vmx_check_io_bitmaps()
6150 if (port < 0x8000) in nested_vmx_check_io_bitmaps()
6152 else if (port < 0x10000) in nested_vmx_check_io_bitmaps()
6156 bitmap += (port & 0x7fff) / 8; in nested_vmx_check_io_bitmaps()
6214 if (msr_index >= 0xc0000000) { in nested_vmx_exit_handled_msr()
6215 msr_index -= 0xc0000000; in nested_vmx_exit_handled_msr()
6243 case 0: /* mov to cr */ in nested_vmx_exit_handled_cr()
6247 case 0: in nested_vmx_exit_handled_cr()
6288 * lmsw can change bits 1..3 of cr0, and only set bit 0 of in nested_vmx_exit_handled_cr()
6291 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; in nested_vmx_exit_handled_cr()
6292 if (vmcs12->cr0_guest_host_mask & 0xe & in nested_vmx_exit_handled_cr()
6295 if ((vmcs12->cr0_guest_host_mask & 0x1) && in nested_vmx_exit_handled_cr()
6296 !(vmcs12->cr0_read_shadow & 0x1) && in nested_vmx_exit_handled_cr()
6297 (val & 0x1)) in nested_vmx_exit_handled_cr()
6331 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); in nested_vmx_exit_handled_vmcs_access()
6352 * interruption-type to 7 (other event) and the vector field to 0. Such in nested_vmx_exit_handled_mtf()
6588 exit_intr_info = 0; in nested_vmx_reflect_vmexit()
6589 exit_qual = 0; in nested_vmx_reflect_vmexit()
6630 .flags = 0, in vmx_get_nested_state()
6633 .hdr.vmx.flags = 0, in vmx_get_nested_state()
6636 .hdr.vmx.preemption_timer_deadline = 0, in vmx_get_nested_state()
6639 &user_kvm_nested_state->data.vmx[0]; in vmx_get_nested_state()
6720 copy_enlightened_to_vmcs12(vmx, 0); in vmx_get_nested_state()
6749 to_vmx(vcpu)->nested.nested_run_pending = 0; in vmx_leave_nested()
6750 nested_vmx_vmexit(vcpu, -1, 0, 0); in vmx_leave_nested()
6763 &user_kvm_nested_state->data.vmx[0]; in vmx_set_nested_state()
6829 return 0; in vmx_set_nested_state()
6844 return 0; in vmx_set_nested_state()
6884 return 0; in vmx_set_nested_state()
6935 return 0; in vmx_set_nested_state()
6938 vmx->nested.nested_run_pending = 0; in vmx_set_nested_state()
6970 max_idx = 0; in nested_vmx_calc_vmcs_enum_msr()
6971 for (i = 0; i < nr_vmcs12_fields; i++) { in nested_vmx_calc_vmcs_enum_msr()
6995 (enable_apicv ? PIN_BASED_POSTED_INTR : 0); in nested_vmx_setup_pinbased_ctls()
7084 msrs->secondary_ctls_low = 0; in nested_vmx_setup_secondary_ctls()
7172 msrs->misc_high = 0; in nested_vmx_setup_misc_data()
7232 * be set to 0, meaning that L1 may turn off any of these bits. The in nested_vmx_setup_ctls_msrs()
7263 for (i = 0; i < VMX_BITMAP_NR; i++) in nested_vmx_hardware_unsetup()
7273 enable_shadow_vmcs = 0; in nested_vmx_hardware_setup()
7275 for (i = 0; i < VMX_BITMAP_NR; i++) { in nested_vmx_hardware_setup()
7304 return 0; in nested_vmx_hardware_setup()