|
1 | 1 | use alloc::collections::VecDeque;
|
2 | 2 | use bit_field::BitField;
|
3 | 3 | use core::fmt::{Debug, Formatter, Result};
|
4 |
| -use core::{arch::asm, mem::size_of}; |
| 4 | +use core::{arch::naked_asm, mem::size_of}; |
5 | 5 | use raw_cpuid::CpuId;
|
6 | 6 | use x86::bits64::vmx;
|
7 |
| -use x86::controlregs::{xcr0 as xcr0_read, xcr0_write, Xcr0}; |
| 7 | +use x86::controlregs::{Xcr0, xcr0 as xcr0_read, xcr0_write}; |
8 | 8 | use x86::dtables::{self, DescriptorTablePointer};
|
9 | 9 | use x86::segmentation::SegmentSelector;
|
10 | 10 | use x86_64::registers::control::{Cr0, Cr0Flags, Cr3, Cr4, Cr4Flags, EferFlags};
|
11 | 11 |
|
12 | 12 | use axaddrspace::{GuestPhysAddr, GuestVirtAddr, HostPhysAddr, NestedPageFaultInfo};
|
13 |
| -use axerrno::{ax_err, ax_err_type, AxResult}; |
| 13 | +use axerrno::{AxResult, ax_err, ax_err_type}; |
14 | 14 | use axvcpu::{AccessWidth, AxArchVCpu, AxVCpuExitReason, AxVCpuHal};
|
15 | 15 |
|
| 16 | +use super::VmxExitInfo; |
16 | 17 | use super::as_axerr;
|
17 | 18 | use super::definitions::VmxExitReason;
|
18 | 19 | use super::structs::{IOBitmap, MsrBitmap, VmxRegion};
|
19 | 20 | use super::vmcs::{
|
20 | 21 | self, VmcsControl32, VmcsControl64, VmcsControlNW, VmcsGuest16, VmcsGuest32, VmcsGuest64,
|
21 | 22 | VmcsGuestNW, VmcsHost16, VmcsHost32, VmcsHost64, VmcsHostNW,
|
22 | 23 | };
|
23 |
| -use super::VmxExitInfo; |
24 | 24 | use crate::{ept::GuestPageWalkInfo, msr::Msr, regs::GeneralRegisters};
|
25 | 25 |
|
26 |
| -const VMX_PREEMPTION_TIMER_SET_VALUE: u32 = 1000_000; |
| 26 | +const VMX_PREEMPTION_TIMER_SET_VALUE: u32 = 1_000_000; |
27 | 27 |
|
28 | 28 | pub struct XState {
|
29 | 29 | host_xcr0: u64,
|
@@ -147,14 +147,14 @@ impl<H: AxVCpuHal> VmxVcpu<H> {
|
147 | 147 | if (ia32_efer & MSR_IA32_EFER_LMA_BIT) != 0 {
|
148 | 148 | if (cs_access_right & 0x2000) != 0 {
|
149 | 149 | // CS.L = 1
|
150 |
| - return VmCpuMode::Mode64; |
| 150 | + VmCpuMode::Mode64 |
151 | 151 | } else {
|
152 |
| - return VmCpuMode::Compatibility; |
| 152 | + VmCpuMode::Compatibility |
153 | 153 | }
|
154 | 154 | } else if (cr0 & CR0_PE) != 0 {
|
155 |
| - return VmCpuMode::Protected; |
| 155 | + VmCpuMode::Protected |
156 | 156 | } else {
|
157 |
| - return VmCpuMode::Real; |
| 157 | + VmCpuMode::Real |
158 | 158 | }
|
159 | 159 | }
|
160 | 160 |
|
@@ -188,7 +188,12 @@ impl<H: AxVCpuHal> VmxVcpu<H> {
|
188 | 188 | match self.builtin_vmexit_handler(&exit_info) {
|
189 | 189 | Some(result) => {
|
190 | 190 | if result.is_err() {
|
191 |
| - panic!("VmxVcpu failed to handle a VM-exit that should be handled by itself: {:?}, error {:?}, vcpu: {:#x?}", exit_info.exit_reason, result.unwrap_err(), self); |
| 191 | + panic!( |
| 192 | + "VmxVcpu failed to handle a VM-exit that should be handled by itself: {:?}, error {:?}, vcpu: {:#x?}", |
| 193 | + exit_info.exit_reason, |
| 194 | + result.unwrap_err(), |
| 195 | + self |
| 196 | + ); |
192 | 197 | }
|
193 | 198 |
|
194 | 199 | None
|
@@ -245,12 +250,11 @@ impl<H: AxVCpuHal> VmxVcpu<H> {
|
245 | 250 | /// Translate guest virtual addr to linear addr
|
246 | 251 | pub fn gla2gva(&self, guest_rip: GuestVirtAddr) -> GuestVirtAddr {
|
247 | 252 | let cpu_mode = self.get_cpu_mode();
|
248 |
| - let seg_base; |
249 |
| - if cpu_mode == VmCpuMode::Mode64 { |
250 |
| - seg_base = 0; |
| 253 | + let seg_base = if cpu_mode == VmCpuMode::Mode64 { |
| 254 | + 0 |
251 | 255 | } else {
|
252 |
| - seg_base = VmcsGuestNW::CS_BASE.read().unwrap(); |
253 |
| - } |
| 256 | + VmcsGuestNW::CS_BASE.read().unwrap() |
| 257 | + }; |
254 | 258 | // debug!(
|
255 | 259 | // "seg_base: {:#x}, guest_rip: {:#x} cpu mode:{:?}",
|
256 | 260 | // seg_base, guest_rip, cpu_mode
|
@@ -313,7 +317,7 @@ impl<H: AxVCpuHal> VmxVcpu<H> {
|
313 | 317 |
|
314 | 318 | /// Advance guest `RIP` by `instr_len` bytes.
|
315 | 319 | pub fn advance_rip(&mut self, instr_len: u8) -> AxResult {
|
316 |
| - Ok(VmcsGuestNW::RIP.write(VmcsGuestNW::RIP.read()? + instr_len as usize)?) |
| 320 | + VmcsGuestNW::RIP.write(VmcsGuestNW::RIP.read()? + instr_len as usize) |
317 | 321 | }
|
318 | 322 |
|
319 | 323 | /// Add a virtual interrupt or exception to the pending events list,
|
@@ -717,17 +721,19 @@ impl<H: AxVCpuHal> VmxVcpu<H> {
|
717 | 721 | /// Get ready then vmlaunch or vmresume.
|
718 | 722 | macro_rules! vmx_entry_with {
|
719 | 723 | ($instr:literal) => {
|
720 |
| - asm!( |
721 |
| - save_regs_to_stack!(), // save host status |
722 |
| - "mov [rdi + {host_stack_size}], rsp", // save current RSP to Vcpu::host_stack_top |
723 |
| - "mov rsp, rdi", // set RSP to guest regs area |
724 |
| - restore_regs_from_stack!(), // restore guest status |
725 |
| - $instr, // let's go! |
726 |
| - "jmp {failed}", |
727 |
| - host_stack_size = const size_of::<GeneralRegisters>(), |
728 |
| - failed = sym Self::vmx_entry_failed, |
729 |
| - options(noreturn), |
730 |
| - ) |
| 724 | + unsafe { |
| 725 | + naked_asm!( |
| 726 | + save_regs_to_stack!(), // save host status |
| 727 | + "mov [rdi + {host_stack_size}], rsp", // save current RSP to Vcpu::host_stack_top |
| 728 | + "mov rsp, rdi", // set RSP to guest regs area |
| 729 | + restore_regs_from_stack!(), // restore guest status |
| 730 | + $instr, // let's go! |
| 731 | + "jmp {failed}", |
| 732 | + host_stack_size = const size_of::<GeneralRegisters>(), |
| 733 | + failed = sym Self::vmx_entry_failed, |
| 734 | + // options(noreturn), |
| 735 | + ) |
| 736 | + } |
731 | 737 | }
|
732 | 738 | }
|
733 | 739 |
|
@@ -757,14 +763,15 @@ impl<H: AxVCpuHal> VmxVcpu<H> {
|
757 | 763 | ///
|
758 | 764 | /// The return value is a dummy value.
|
759 | 765 | unsafe extern "C" fn vmx_exit(&mut self) -> usize {
|
760 |
| - asm!( |
761 |
| - save_regs_to_stack!(), // save guest status |
762 |
| - "mov rsp, [rsp + {host_stack_top}]", // set RSP to Vcpu::host_stack_top |
763 |
| - restore_regs_from_stack!(), // restore host status |
764 |
| - "ret", |
765 |
| - host_stack_top = const size_of::<GeneralRegisters>(), |
766 |
| - options(noreturn), |
767 |
| - ); |
| 766 | + unsafe { |
| 767 | + naked_asm!( |
| 768 | + save_regs_to_stack!(), // save guest status |
| 769 | + "mov rsp, [rsp + {host_stack_top}]", // set RSP to Vcpu::host_stack_top |
| 770 | + restore_regs_from_stack!(), // restore host status |
| 771 | + "ret", |
| 772 | + host_stack_top = const size_of::<GeneralRegisters>(), |
| 773 | + ); |
| 774 | + } |
768 | 775 | }
|
769 | 776 |
|
770 | 777 | fn vmx_entry_failed() -> ! {
|
@@ -827,6 +834,7 @@ impl<H: AxVCpuHal> VmxVcpu<H> {
|
827 | 834 | Ok(())
|
828 | 835 | }
|
829 | 836 |
|
| 837 | + #[allow(clippy::single_match)] |
830 | 838 | fn handle_cr(&mut self) -> AxResult {
|
831 | 839 | const VM_EXIT_INSTR_LEN_MV_TO_CR: u8 = 3;
|
832 | 840 |
|
@@ -864,7 +872,7 @@ impl<H: AxVCpuHal> VmxVcpu<H> {
|
864 | 872 | }
|
865 | 873 |
|
866 | 874 | fn handle_cpuid(&mut self) -> AxResult {
|
867 |
| - use raw_cpuid::{cpuid, CpuIdResult}; |
| 875 | + use raw_cpuid::{CpuIdResult, cpuid}; |
868 | 876 |
|
869 | 877 | const VM_EXIT_INSTR_LEN_CPUID: u8 = 2;
|
870 | 878 | const LEAF_FEATURE_INFO: u32 = 0x1;
|
@@ -895,7 +903,7 @@ impl<H: AxVCpuHal> VmxVcpu<H> {
|
895 | 903 | if regs_clone.rcx == 0 {
|
896 | 904 | // Bit 05: WAITPKG.
|
897 | 905 | res.ecx.set_bit(5, false); // clear waitpkg
|
898 |
| - // Bit 16: LA57. Supports 57-bit linear addresses and five-level paging if 1. |
| 906 | + // Bit 16: LA57. Supports 57-bit linear addresses and five-level paging if 1. |
899 | 907 | res.ecx.set_bit(16, false); // clear LA57
|
900 | 908 | }
|
901 | 909 |
|
@@ -939,9 +947,7 @@ impl<H: AxVCpuHal> VmxVcpu<H> {
|
939 | 947 |
|
940 | 948 | trace!(
|
941 | 949 | "VM exit: CPUID({:#x}, {:#x}): {:?}",
|
942 |
| - regs_clone.rax, |
943 |
| - regs_clone.rcx, |
944 |
| - res |
| 950 | + regs_clone.rax, regs_clone.rcx, res |
945 | 951 | );
|
946 | 952 |
|
947 | 953 | let regs = self.regs_mut();
|
@@ -980,14 +986,12 @@ impl<H: AxVCpuHal> VmxVcpu<H> {
|
980 | 986 | if x.contains(Xcr0::XCR0_OPMASK_STATE)
|
981 | 987 | || x.contains(Xcr0::XCR0_ZMM_HI256_STATE)
|
982 | 988 | || x.contains(Xcr0::XCR0_HI16_ZMM_STATE)
|
| 989 | + || !x.contains(Xcr0::XCR0_AVX_STATE) |
| 990 | + || !x.contains(Xcr0::XCR0_OPMASK_STATE) |
| 991 | + || !x.contains(Xcr0::XCR0_ZMM_HI256_STATE) |
| 992 | + || !x.contains(Xcr0::XCR0_HI16_ZMM_STATE) |
983 | 993 | {
|
984 |
| - if !x.contains(Xcr0::XCR0_AVX_STATE) |
985 |
| - || !x.contains(Xcr0::XCR0_OPMASK_STATE) |
986 |
| - || !x.contains(Xcr0::XCR0_ZMM_HI256_STATE) |
987 |
| - || !x.contains(Xcr0::XCR0_HI16_ZMM_STATE) |
988 |
| - { |
989 |
| - return None; |
990 |
| - } |
| 994 | + return None; |
991 | 995 | }
|
992 | 996 |
|
993 | 997 | Some(x)
|
@@ -1139,18 +1143,16 @@ impl<H: AxVCpuHal> AxArchVCpu for VmxVcpu<H> {
|
1139 | 1143 |
|
1140 | 1144 | if io_info.is_in {
|
1141 | 1145 | AxVCpuExitReason::IoRead { port, width }
|
| 1146 | + } else if port == QEMU_EXIT_PORT |
| 1147 | + && width == AccessWidth::Word |
| 1148 | + && self.regs().rax == QEMU_EXIT_MAGIC |
| 1149 | + { |
| 1150 | + AxVCpuExitReason::SystemDown |
1142 | 1151 | } else {
|
1143 |
| - if port == QEMU_EXIT_PORT |
1144 |
| - && width == AccessWidth::Word |
1145 |
| - && self.regs().rax == QEMU_EXIT_MAGIC |
1146 |
| - { |
1147 |
| - AxVCpuExitReason::SystemDown |
1148 |
| - } else { |
1149 |
| - AxVCpuExitReason::IoWrite { |
1150 |
| - port, |
1151 |
| - width, |
1152 |
| - data: self.regs().rax.get_bits(width.bits_range()), |
1153 |
| - } |
| 1152 | + AxVCpuExitReason::IoWrite { |
| 1153 | + port, |
| 1154 | + width, |
| 1155 | + data: self.regs().rax.get_bits(width.bits_range()), |
1154 | 1156 | }
|
1155 | 1157 | }
|
1156 | 1158 | }
|
|
0 commit comments