diff --git a/docs/syzos.md b/docs/syzos.md index df292ea72a36..60730ae281df 100644 --- a/docs/syzos.md +++ b/docs/syzos.md @@ -165,7 +165,7 @@ Modify the architecture-specific executor header (e.g., `executor/common_kvm_amd GUEST_CODE static void guest_handle_nested_amd_vmcb_write_mask(struct api_call_5* cmd, uint64 cpu_id); ``` -Note: make sure to choose the optimal api_call_N structure that exactly matches the number of arguments required by your new primitive (e.g., use struct api_call_2 for a command needing two arguments). +Note: make sure to choose the optimal api_call_N structure that exactly matches the number of arguments required by your new primitive (e.g., use struct api_call_2 for a command needing two arguments). If no arguments are required, omit the `cmd` parameter altogether. If the guest code does not access VMCB/VMCS, omit the `cpu_id` parameter. ### Step 2: Implement Guest Logic and Dispatch In the same file (or corresponding source), implement the guest logic. diff --git a/executor/common_kvm_amd64_syzos.h b/executor/common_kvm_amd64_syzos.h index 58444fbc15e4..846569af5b0a 100644 --- a/executor/common_kvm_amd64_syzos.h +++ b/executor/common_kvm_amd64_syzos.h @@ -34,6 +34,9 @@ typedef enum { SYZOS_API_NESTED_INTEL_VMWRITE_MASK = 340, SYZOS_API_NESTED_AMD_VMCB_WRITE_MASK = 380, SYZOS_API_NESTED_AMD_INVLPGA = 381, + SYZOS_API_NESTED_AMD_STGI = 382, + SYZOS_API_NESTED_AMD_CLGI = 383, + SYZOS_API_NESTED_AMD_INJECT_EVENT = 384, SYZOS_API_STOP, // Must be the last one } syzos_api_id; @@ -115,6 +118,9 @@ GUEST_CODE static void guest_handle_nested_vmresume(struct api_call_1* cmd, uint GUEST_CODE static void guest_handle_nested_intel_vmwrite_mask(struct api_call_5* cmd, uint64 cpu_id); GUEST_CODE static void guest_handle_nested_amd_vmcb_write_mask(struct api_call_5* cmd, uint64 cpu_id); GUEST_CODE static void guest_handle_nested_amd_invlpga(struct api_call_2* cmd, uint64 cpu_id); +GUEST_CODE static void guest_handle_nested_amd_stgi(); +GUEST_CODE static void guest_handle_nested_amd_clgi(); +GUEST_CODE static void guest_handle_nested_amd_inject_event(struct api_call_5* cmd, uint64 cpu_id); typedef enum { UEXIT_END = (uint64)-1, @@ -233,6 +239,15 @@ guest_main(uint64 size, uint64 cpu) } else if (call == SYZOS_API_NESTED_AMD_INVLPGA) { // Invalidate TLB mappings for the specified address/ASID. guest_handle_nested_amd_invlpga((struct api_call_2*)cmd, cpu); + } else if (call == SYZOS_API_NESTED_AMD_STGI) { + // Set Global Interrupt Flag (Enable Interrupts). + guest_handle_nested_amd_stgi(); + } else if (call == SYZOS_API_NESTED_AMD_CLGI) { + // Clear Global Interrupt Flag (Disable Interrupts, including NMI). + guest_handle_nested_amd_clgi(); + } else if (call == SYZOS_API_NESTED_AMD_INJECT_EVENT) { + // Inject an event (IRQ/Exception) into the L2 guest via VMCB. + guest_handle_nested_amd_inject_event((struct api_call_5*)cmd, cpu); } addr += cmd->size; size -= cmd->size; @@ -1300,4 +1315,48 @@ guest_handle_nested_amd_invlpga(struct api_call_2* cmd, uint64 cpu_id) asm volatile("invlpga" : : "a"(linear_addr), "c"(asid) : "memory"); } +GUEST_CODE static noinline void +guest_handle_nested_amd_stgi() +{ + if (get_cpu_vendor() != CPU_VENDOR_AMD) + return; + asm volatile("stgi" ::: "memory"); +} + +GUEST_CODE static noinline void +guest_handle_nested_amd_clgi() +{ + if (get_cpu_vendor() != CPU_VENDOR_AMD) + return; + asm volatile("clgi" ::: "memory"); +} + +GUEST_CODE static noinline void +guest_handle_nested_amd_inject_event(struct api_call_5* cmd, uint64 cpu_id) +{ + if (get_cpu_vendor() != CPU_VENDOR_AMD) + return; + + uint64 vm_id = cmd->args[0]; + uint64 vmcb_addr = X86_SYZOS_ADDR_VMCS_VMCB(cpu_id, vm_id); + + uint64 vector = cmd->args[1] & 0xFF; + uint64 type = cmd->args[2] & 0x7; + uint64 error_code = cmd->args[3] & 0xFFFFFFFF; + uint64 flags = cmd->args[4]; + + // Flags bit 0: Valid (V) + // Flags bit 1: Error Code Valid (EV) + uint64 event_inj = vector; + event_inj |= (type << 8); + if (flags & 2) + event_inj |= (1ULL << 11); // EV bit + if (flags & 1) + event_inj |= (1ULL << 31); // V bit + event_inj |= (error_code << 32); + + // Write to VMCB Offset 0x60 (EVENTINJ) + vmcb_write64(vmcb_addr, 0x60, event_inj); +} + #endif // EXECUTOR_COMMON_KVM_AMD64_SYZOS_H diff --git a/sys/linux/dev_kvm_amd64.txt b/sys/linux/dev_kvm_amd64.txt index abdde5644068..7ac8028d926f 100644 --- a/sys/linux/dev_kvm_amd64.txt +++ b/sys/linux/dev_kvm_amd64.txt @@ -152,6 +152,14 @@ syzos_api_nested_amd_invlpga { asid int64[0:65535] } +syzos_api_nested_amd_inject_event { + vm_id syzos_api_vm_id + vector int64[0:255] + type int64[0:7] + error_code int64 + flags int64[0:3] +} + # IDs here must match those in executor/common_kvm_amd64_syzos.h. syzos_api_call$x86 [ uexit syzos_api$x86[0, intptr] @@ -172,6 +180,9 @@ syzos_api_call$x86 [ nested_intel_vmwrite_mask syzos_api$x86[340, syzos_api_nested_intel_vmwrite_mask] nested_amd_vmcb_write_mask syzos_api$x86[380, syzos_api_nested_amd_vmcb_write_mask] nested_amd_invlpga syzos_api$x86[381, syzos_api_nested_amd_invlpga] + nested_amd_stgi syzos_api$x86[382, void] + nested_amd_clgi syzos_api$x86[383, void] + nested_amd_inject_event syzos_api$x86[384, syzos_api_nested_amd_inject_event] ] [varlen] kvm_text_x86 [ diff --git a/sys/linux/test/amd64-syz_kvm_nested_amd_inject b/sys/linux/test/amd64-syz_kvm_nested_amd_inject new file mode 100644 index 000000000000..f41b89489f69 --- /dev/null +++ b/sys/linux/test/amd64-syz_kvm_nested_amd_inject @@ -0,0 +1,37 @@ +# +# requires: arch=amd64 -threaded +# +r0 = openat$kvm(0, &AUTO='/dev/kvm\x00', 0x0, 0x0) +r1 = ioctl$KVM_CREATE_VM(r0, AUTO, 0x0) +r2 = syz_kvm_setup_syzos_vm$x86(r1, &(0x7f0000c00000/0x400000)=nil) + +# Test AMD Nested Event Injection. +# +# 1. Setup nested environment (L1) and Create VM (L2). +# 2. Inject an NMI (Vector 2, Type 2, Valid) into L2 via VMCB EVENTINJ. +# 3. Launch L2. +# - This forces KVM L0 to parse EVENTINJ and handle the injection. +# - We expect L0 to succeed without crashing. +# +# Arguments for INJECT_EVENT: +# vm_id=0 +# vector=2 (NMI) +# type=2 (NMI) +# error_code=0 +# flags=1 (Valid=1, EV=0) +# +r3 = syz_kvm_add_vcpu$x86(r2, &AUTO={0x0, &AUTO=[@enable_nested={AUTO, AUTO, 0x0}, @nested_create_vm={AUTO, AUTO, 0x0}, @nested_amd_inject_event={AUTO, AUTO, {0x0, 0x2, 0x2, 0x0, 0x1}}, @nested_vmlaunch={AUTO, AUTO, 0x0}, @uexit={AUTO, AUTO, 0xface}], AUTO}) +r4 = ioctl$KVM_GET_VCPU_MMAP_SIZE(r0, AUTO) +r5 = mmap$KVM_VCPU(&(0x7f0000009000/0x1000)=nil, r4, 0x3, 0x1, r3, 0x0) + +# Run the VCPU. +# The guest executes the injection and launch. +# If KVM L0 processes the EVENTINJ correctly, the guest should eventually exit back to us. +# +ioctl$KVM_RUN(r3, AUTO, 0x0) +syz_kvm_assert_syzos_uexit$x86(r5, 0xface) + +# Cleanup +# +ioctl$KVM_RUN(r3, AUTO, 0x0) +syz_kvm_assert_syzos_uexit$x86(r5, 0xffffffff) diff --git a/sys/linux/test/amd64-syz_kvm_nested_amd_stgi b/sys/linux/test/amd64-syz_kvm_nested_amd_stgi new file mode 100644 index 000000000000..21f57e87815f --- /dev/null +++ b/sys/linux/test/amd64-syz_kvm_nested_amd_stgi @@ -0,0 +1,47 @@ +# +# requires: arch=amd64 -threaded +# +r0 = openat$kvm(0, &AUTO='/dev/kvm\x00', 0x0, 0x0) +r1 = ioctl$KVM_CREATE_VM(r0, AUTO, 0x0) +r2 = syz_kvm_setup_syzos_vm$x86(r1, &(0x7f0000c00000/0x400000)=nil) + +# Test AMD SVM STGI/CLGI interaction with Host NMI Injection. +# +# 1. Guest executes CLGI (disabling interrupts). +# 2. Guest executes UEXIT (0x1337) to yield to Host. +# 3. Host injects an NMI via KVM_NMI. +# - Because GIF=0, this NMI must remain PENDING and NOT be delivered yet. +# 4. Guest resumes and executes STGI. +# - NMI should be delivered immediately after STGI. +# +r3 = syz_kvm_add_vcpu$x86(r2, &AUTO={0x0, &AUTO=[@enable_nested={AUTO, AUTO, 0x0}, @nested_create_vm={AUTO, AUTO, 0x0}, @nested_amd_clgi={AUTO, AUTO, ""}, @uexit={AUTO, AUTO, 0x1337}, @nested_amd_stgi={AUTO, AUTO, ""}, @uexit={AUTO, AUTO, 0xface}], AUTO}) +r4 = ioctl$KVM_GET_VCPU_MMAP_SIZE(r0, AUTO) +r5 = mmap$KVM_VCPU(&(0x7f0000009000/0x1000)=nil, r4, 0x3, 0x1, r3, 0x0) + +# Run 1: Execute CLGI -> UEXIT(0x1337) +# +ioctl$KVM_RUN(r3, AUTO, 0x0) +syz_kvm_assert_syzos_uexit$x86(r5, 0x1337) + +# Inject NMI into the vCPU. +# Since GIF=0 in the guest, this NMI should be queued by L0. +# +ioctl$KVM_NMI(r3, 0x0) + +# Run 2: Resume -> STGI -> NMI Delivery -> UEXIT(0xface) +# We verify that the guest survives the NMI delivery and reaches the final exit. +# (If KVM fails to queue the NMI and delivers it early, or corrupts state, this may crash). +# +ioctl$KVM_RUN(r3, AUTO, 0x0) + +# We check for successful completion. +# Note: If NMI is delivered, it might cause a standard KVM exit depending on interception settings. +# If the guest handles it transparently, we see 0xface. +# For this regression test, ensuring we don't crash L0 is the primary goal. +# +syz_kvm_assert_syzos_uexit$x86(r5, 0xface) + +# Cleanup. +# +ioctl$KVM_RUN(r3, AUTO, 0x0) +syz_kvm_assert_syzos_uexit$x86(r5, 0xffffffff)