@@ -567,8 +567,6 @@ uint32 load_vmcs(struct vcpu_t *vcpu, preempt_flag *flags)
567
567
struct per_cpu_data * cpu_data ;
568
568
paddr_t vmcs_phy ;
569
569
paddr_t curr_vmcs = VMCS_NONE ;
570
- vmx_error_t err = 0 ;
571
- uint64 fc_msr ;
572
570
573
571
hax_disable_preemption (flags );
574
572
@@ -584,87 +582,9 @@ uint32 load_vmcs(struct vcpu_t *vcpu, preempt_flag *flags)
584
582
return 0 ;
585
583
}
586
584
587
- cpu_data -> host_cr4_vmxe = (get_cr4 () & CR4_VMXE );
588
- if (cpu_data -> host_cr4_vmxe ) {
589
- if (debug_vmcs_count % 100000 == 0 ) {
590
- hax_debug ("host VT has enabled!\n" );
591
- hax_debug ("Cr4 value = 0x%lx\n" , get_cr4 ());
592
- log_host_cr4_vmxe = 1 ;
593
- log_host_cr4 = get_cr4 ();
594
- }
595
- debug_vmcs_count ++ ;
596
- }
597
- set_cr4 (get_cr4 () | CR4_VMXE );
598
- /* HP systems & Mac systems workaround
599
- * When resuming from S3, some HP/Mac set the IA32_FEATURE_CONTROL MSR to
600
- * zero. Setting the lock bit to zero & then doing 'vmxon' would cause a GP.
601
- * As a workaround, when we see this condition, we enable the bits so that
602
- * we can launch vmxon & thereby hax.
603
- * bit 0 - Lock bit
604
- * bit 2 - Enable VMX outside SMX operation
605
- *
606
- * ********* To Do **************************************
607
- * This is the workground to fix BSOD when resume from S3
608
- * The best way is to add one power management handler, and set
609
- * IA32_FEATURE_CONTROL MSR in that PM S3 handler
610
- * *****************************************************
611
- */
612
- fc_msr = ia32_rdmsr (IA32_FEATURE_CONTROL );
613
- if (!(fc_msr & FC_LOCKED ))
614
- ia32_wrmsr (IA32_FEATURE_CONTROL ,
615
- fc_msr | FC_LOCKED | FC_VMXON_OUTSMX );
616
-
617
- err = __vmxon (hax_page_pa (cpu_data -> vmxon_page ));
618
-
619
- log_vmxon_err = err ;
620
- log_vmxon_addr = hax_page_pa (cpu_data -> vmxon_page );
621
-
622
- if (!(err & VMX_FAIL_MASK ))
623
- cpu_data -> vmm_flag |= VMXON_HAX ;
624
- else {
625
- bool fatal = true;
626
-
627
- #ifdef __MACH__
628
- if ((err & VMX_FAIL_INVALID ) && cpu_data -> host_cr4_vmxe ) {
629
- // On macOS, if VMXON fails with VMX_FAIL_INVALID and host CR4.VMXE
630
- // was already set, it is very likely that another VMM (VirtualBox
631
- // or any VMM based on macOS Hypervisor Framework, e.g. Docker) is
632
- // running and did not call VMXOFF. In that case, the current host
633
- // logical processor is already in VMX operation, and we can use an
634
- // innocuous VMX instruction (VMPTRST) to confirm that.
635
- // However, if the above assumption is wrong and the host processor
636
- // is not actually in VMX operation, VMPTRST will probably cause a
637
- // host reboot. But we don't have a better choice, and it is worth
638
- // taking the risk.
639
- curr_vmcs = __vmptrst ();
640
- if (curr_vmcs == VMCS_NONE ) {
641
- hax_debug ("Already in VMX operation, courtesy of another"
642
- " VMM (VirtualBox or macOS Hypervisor Framework)\n" );
643
- fatal = false;
644
- // Indicate that it is not necessary to call VMXOFF later
645
- cpu_data -> vmm_flag &= ~VMXON_HAX ;
646
- } else {
647
- // Should never happen
648
- hax_error ("VMXON failed with VMX_FAIL_INVALID, but there is a"
649
- " current VMCS at 0x%llx\n" , curr_vmcs );
650
- }
651
- }
652
- #endif
653
-
654
- if (fatal ) {
655
- hax_error ("VMXON failed for region 0x%llx (err=0x%x)\n" ,
656
- hax_page_pa (cpu_data -> vmxon_page ), (uint32 ) err );
657
- restore_host_cr4_vmxe (cpu_data );
658
- if (err & VMX_FAIL_INVALID ) {
659
- log_vmxon_err_type1 = 1 ;
660
- } else {
661
- // TODO: Should VMX_FAIL_VALID be ignored? The current VMCS can
662
- // be cleared (deactivated and saved to memory) using VMCLEAR
663
- log_vmxon_err_type2 = 1 ;
664
- }
665
- hax_enable_preemption (flags );
666
- return VMXON_FAIL ;
667
- }
585
+ if (cpu_vmxroot_enter () != VMX_SUCCEED ) {
586
+ hax_enable_preemption (flags );
587
+ return VMXON_FAIL ;
668
588
}
669
589
670
590
if (vcpu )
@@ -679,9 +599,7 @@ uint32 load_vmcs(struct vcpu_t *vcpu, preempt_flag *flags)
679
599
680
600
if (__vmptrld (vmcs_phy ) != VMX_SUCCEED ) {
681
601
hax_error ("HAX: vmptrld failed (%08llx)\n" , vmcs_phy );
682
- cpu_data -> vmm_flag = 0 ;
683
- __vmxoff ();
684
- restore_host_cr4_vmxe (cpu_data );
602
+ cpu_vmxroot_leave ();
685
603
log_vmxon_err_type3 = 1 ;
686
604
hax_enable_preemption (flags );
687
605
return VMPTRLD_FAIL ;
@@ -716,7 +634,6 @@ uint32 put_vmcs(struct vcpu_t *vcpu, preempt_flag *flags)
716
634
int cpu_id = hax_cpuid ();
717
635
struct per_cpu_data * cpu_data = hax_cpu_data [cpu_id ];
718
636
paddr_t vmcs_phy ;
719
- vmx_error_t err = 0 ;
720
637
vmx_error_t vmxoff_err = 0 ;
721
638
if (vcpu && cpu_data -> nested > 0 ) {
722
639
cpu_data -> nested -- ;
@@ -735,27 +652,8 @@ uint32 put_vmcs(struct vcpu_t *vcpu, preempt_flag *flags)
735
652
736
653
cpu_data -> current_vcpu = NULL ;
737
654
738
- if (cpu_data -> vmm_flag & VMXON_HAX ) {
739
- err = __vmxoff ();
740
- if (!(err & VMX_FAIL_MASK )) {
741
- restore_host_cr4_vmxe (cpu_data );
742
- } else {
743
- hax_error ("VMXOFF Failed..........\n" );
744
- vmxoff_err = err ;
745
- log_vmxoff_err = err ;
746
- }
747
- } else {
748
- log_vmxoff_no = 1 ;
749
- #ifdef __MACH__
750
- hax_debug ("Skipping VMXOFF because another VMM (VirtualBox or macOS"
751
- " Hypervisor Framework) is running\n" );
752
- #else
753
- vmxoff_err = 0x1 ;
754
- hax_error ("NO VMXOFF.......\n" );
755
- #endif
756
- }
655
+ vmxoff_err = cpu_vmxroot_leave ();
757
656
cpu_data -> other_vmcs = VMCS_NONE ;
758
- cpu_data -> vmm_flag = 0 ;
759
657
if (vcpu && vcpu -> is_vmcs_loaded )
760
658
vcpu -> is_vmcs_loaded = 0 ;
761
659
out :
@@ -817,3 +715,119 @@ static vmx_error_t cpu_vmentry_failed(struct vcpu_t *vcpu, vmx_error_t err)
817
715
hax_log ("end of cpu_vmentry_failed\n" );
818
716
return err ;
819
717
}
718
+
719
+ vmx_error_t cpu_vmxroot_leave (void )
720
+ {
721
+ struct per_cpu_data * cpu_data = current_cpu_data ();
722
+ vmx_error_t err = VMX_SUCCEED ;
723
+
724
+ if (cpu_data -> vmm_flag & VMXON_HAX ) {
725
+ err = __vmxoff ();
726
+ if (!(err & VMX_FAIL_MASK )) {
727
+ cpu_data -> vmm_flag &= ~VMXON_HAX ;
728
+ restore_host_cr4_vmxe (cpu_data );
729
+ } else {
730
+ hax_error ("VMXOFF Failed..........\n" );
731
+ }
732
+ } else {
733
+ log_vmxoff_no = 1 ;
734
+ #ifdef __MACH__
735
+ hax_debug ("Skipping VMXOFF because another VMM (VirtualBox or macOS"
736
+ " Hypervisor Framework) is running\n" );
737
+ #else
738
+ // It should not go here in Win64/win32
739
+ err = VMX_FAIL_VALID ;
740
+ hax_error ("NO VMXOFF.......\n" );
741
+ #endif
742
+ }
743
+ cpu_data -> vmxoff_err = err ;
744
+
745
+ return err ;
746
+ }
747
+
748
+ vmx_error_t cpu_vmxroot_enter (void )
749
+ {
750
+ struct per_cpu_data * cpu_data = current_cpu_data ();
751
+ uint64 fc_msr ;
752
+ vmx_error_t err = VMX_SUCCEED ;
753
+
754
+ cpu_data -> host_cr4_vmxe = (get_cr4 () & CR4_VMXE );
755
+ if (cpu_data -> host_cr4_vmxe ) {
756
+ if (debug_vmcs_count % 100000 == 0 ) {
757
+ hax_debug ("host VT has enabled!\n" );
758
+ hax_debug ("Cr4 value = 0x%lx\n" , get_cr4 ());
759
+ log_host_cr4_vmxe = 1 ;
760
+ log_host_cr4 = get_cr4 ();
761
+ }
762
+ debug_vmcs_count ++ ;
763
+ }
764
+
765
+ set_cr4 (get_cr4 () | CR4_VMXE );
766
+ /* HP systems & Mac systems workaround
767
+ * When resuming from S3, some HP/Mac set the IA32_FEATURE_CONTROL MSR to
768
+ * zero. Setting the lock bit to zero & then doing 'vmxon' would cause a GP.
769
+ * As a workaround, when we see this condition, we enable the bits so that
770
+ * we can launch vmxon & thereby hax.
771
+ * bit 0 - Lock bit
772
+ * bit 2 - Enable VMX outside SMX operation
773
+ *
774
+ * ********* To Do **************************************
775
+ * This is the workground to fix BSOD when resume from S3
776
+ * The best way is to add one power management handler, and set
777
+ * IA32_FEATURE_CONTROL MSR in that PM S3 handler
778
+ * *****************************************************
779
+ */
780
+ fc_msr = ia32_rdmsr (IA32_FEATURE_CONTROL );
781
+ if (!(fc_msr & FC_LOCKED ))
782
+ ia32_wrmsr (IA32_FEATURE_CONTROL ,
783
+ fc_msr | FC_LOCKED | FC_VMXON_OUTSMX );
784
+
785
+ err = __vmxon (hax_page_pa (cpu_data -> vmxon_page ));
786
+
787
+ log_vmxon_err = err ;
788
+ log_vmxon_addr = hax_page_pa (cpu_data -> vmxon_page );
789
+
790
+ if (!(err & VMX_FAIL_MASK )) {
791
+ cpu_data -> vmm_flag |= VMXON_HAX ;
792
+ } else {
793
+ bool fatal = true;
794
+
795
+ #ifdef __MACH__
796
+ if ((err & VMX_FAIL_INVALID ) && cpu_data -> host_cr4_vmxe ) {
797
+ // On macOS, if VMXON fails with VMX_FAIL_INVALID and host CR4.VMXE
798
+ // was already set, it is very likely that another VMM (VirtualBox
799
+ // or any VMM based on macOS Hypervisor Framework, e.g. Docker) is
800
+ // running and did not call VMXOFF. In that case, the current host
801
+ // logical processor is already in VMX operation, and we can use an
802
+ // innocuous VMX instruction (VMPTRST) to confirm that.
803
+ // However, if the above assumption is wrong and the host processor
804
+ // is not actually in VMX operation, VMPTRST will probably cause a
805
+ // host reboot. But we don't have a better choice, and it is worth
806
+ // taking the risk.
807
+ __vmptrst ();
808
+
809
+ // It is still alive - Just assumption is right.
810
+ fatal = false;
811
+ err = VMX_SUCCEED ;
812
+ // Indicate that it is not necessary to call VMXOFF later
813
+ cpu_data -> vmm_flag &= ~VMXON_HAX ;
814
+ }
815
+ #endif
816
+
817
+ if (fatal ) {
818
+ hax_error ("VMXON failed for region 0x%llx (err=0x%x, vmxe=%x)\n" ,
819
+ hax_page_pa (cpu_data -> vmxon_page ), (uint32 )err ,
820
+ (uint32 )cpu_data -> host_cr4_vmxe );
821
+ restore_host_cr4_vmxe (cpu_data );
822
+ if (err & VMX_FAIL_INVALID ) {
823
+ log_vmxon_err_type1 = 1 ;
824
+ } else {
825
+ // TODO: Should VMX_FAIL_VALID be ignored? The current VMCS can
826
+ // be cleared (deactivated and saved to memory) using VMCLEAR
827
+ log_vmxon_err_type2 = 1 ;
828
+ }
829
+ }
830
+ }
831
+ cpu_data -> vmxon_err = err ;
832
+ return err ;
833
+ }
0 commit comments