summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '0041-x86-vmx-Perform-VERW-flushing-later-in-the-VMExit-pa.patch')
-rw-r--r--0041-x86-vmx-Perform-VERW-flushing-later-in-the-VMExit-pa.patch146
1 files changed, 146 insertions, 0 deletions
diff --git a/0041-x86-vmx-Perform-VERW-flushing-later-in-the-VMExit-pa.patch b/0041-x86-vmx-Perform-VERW-flushing-later-in-the-VMExit-pa.patch
new file mode 100644
index 0000000..871f10f
--- /dev/null
+++ b/0041-x86-vmx-Perform-VERW-flushing-later-in-the-VMExit-pa.patch
@@ -0,0 +1,146 @@
+From 77f2bec134049aba29b9b459f955022722d10847 Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Fri, 23 Jun 2023 11:32:00 +0100
+Subject: [PATCH 41/67] x86/vmx: Perform VERW flushing later in the VMExit path
+
+Broken out of the following patch because this change is subtle enough on its
+own. See it for the rational of why we're moving VERW.
+
+As for how, extend the trick already used to hold one condition in
+flags (RESUME vs LAUNCH) through the POPing of GPRs.
+
+Move the MOV CR earlier. Intel specify flags to be undefined across it.
+
+Encode the two conditions we want using SF and PF. See the code comment for
+exactly how.
+
+Leave a comment to explain the lack of any content around
+SPEC_CTRL_EXIT_TO_VMX, but leave the block in place. Sods law says if we
+delete it, we'll need to reintroduce it.
+
+This is part of XSA-452 / CVE-2023-28746.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+(cherry picked from commit 475fa20b7384464210f42bad7195f87bd6f1c63f)
+---
+ xen/arch/x86/hvm/vmx/entry.S | 36 +++++++++++++++++++++---
+ xen/arch/x86/include/asm/asm_defns.h | 8 ++++++
+ xen/arch/x86/include/asm/spec_ctrl_asm.h | 7 +++++
+ xen/arch/x86/x86_64/asm-offsets.c | 1 +
+ 4 files changed, 48 insertions(+), 4 deletions(-)
+
+diff --git a/xen/arch/x86/hvm/vmx/entry.S b/xen/arch/x86/hvm/vmx/entry.S
+index 5f5de45a13..cdde76e138 100644
+--- a/xen/arch/x86/hvm/vmx/entry.S
++++ b/xen/arch/x86/hvm/vmx/entry.S
+@@ -87,17 +87,39 @@ UNLIKELY_END(realmode)
+
+ /* WARNING! `ret`, `call *`, `jmp *` not safe beyond this point. */
+ /* SPEC_CTRL_EXIT_TO_VMX Req: %rsp=regs/cpuinfo Clob: */
+- DO_SPEC_CTRL_COND_VERW
++ /*
++ * All speculation safety work happens to be elsewhere. VERW is after
++ * popping the GPRs, while restoring the guest MSR_SPEC_CTRL is left
++ * to the MSR load list.
++ */
+
+ mov VCPU_hvm_guest_cr2(%rbx),%rax
++ mov %rax, %cr2
++
++ /*
++ * We need to perform two conditional actions (VERW, and Resume vs
++ * Launch) after popping GPRs. With some cunning, we can encode both
++ * of these in eflags together.
++ *
++ * Parity is only calculated over the bottom byte of the answer, while
++ * Sign is simply the top bit.
++ *
++ * Therefore, the final OR instruction ends up producing:
++ * SF = VCPU_vmx_launched
++ * PF = !SCF_verw
++ */
++ BUILD_BUG_ON(SCF_verw & ~0xff)
++ movzbl VCPU_vmx_launched(%rbx), %ecx
++ shl $31, %ecx
++ movzbl CPUINFO_spec_ctrl_flags(%rsp), %eax
++ and $SCF_verw, %eax
++ or %eax, %ecx
+
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+- mov %rax,%cr2
+- cmpb $0,VCPU_vmx_launched(%rbx)
+ pop %rbx
+ pop %r11
+ pop %r10
+@@ -108,7 +130,13 @@ UNLIKELY_END(realmode)
+ pop %rdx
+ pop %rsi
+ pop %rdi
+- je .Lvmx_launch
++
++ jpe .L_skip_verw
++ /* VERW clobbers ZF, but preserves all others, including SF. */
++ verw STK_REL(CPUINFO_verw_sel, CPUINFO_error_code)(%rsp)
++.L_skip_verw:
++
++ jns .Lvmx_launch
+
+ /*.Lvmx_resume:*/
+ VMRESUME
+diff --git a/xen/arch/x86/include/asm/asm_defns.h b/xen/arch/x86/include/asm/asm_defns.h
+index d9431180cf..abc6822b08 100644
+--- a/xen/arch/x86/include/asm/asm_defns.h
++++ b/xen/arch/x86/include/asm/asm_defns.h
+@@ -81,6 +81,14 @@ register unsigned long current_stack_pointer asm("rsp");
+
+ #ifdef __ASSEMBLY__
+
++.macro BUILD_BUG_ON condstr, cond:vararg
++ .if \cond
++ .error "Condition \"\condstr\" not satisfied"
++ .endif
++.endm
++/* preprocessor macro to make error message more user friendly */
++#define BUILD_BUG_ON(cond) BUILD_BUG_ON #cond, cond
++
+ #ifdef HAVE_AS_QUOTED_SYM
+ #define SUBSECTION_LBL(tag) \
+ .ifndef .L.tag; \
+diff --git a/xen/arch/x86/include/asm/spec_ctrl_asm.h b/xen/arch/x86/include/asm/spec_ctrl_asm.h
+index f4b8b9d956..ca9cb0f5dd 100644
+--- a/xen/arch/x86/include/asm/spec_ctrl_asm.h
++++ b/xen/arch/x86/include/asm/spec_ctrl_asm.h
+@@ -164,6 +164,13 @@
+ #endif
+ .endm
+
++/*
++ * Helper to improve the readibility of stack dispacements with %rsp in
++ * unusual positions. Both @field and @top_of_stack should be constants from
++ * the same object. @top_of_stack should be where %rsp is currently pointing.
++ */
++#define STK_REL(field, top_of_stk) ((field) - (top_of_stk))
++
+ .macro DO_SPEC_CTRL_COND_VERW
+ /*
+ * Requires %rsp=cpuinfo
+diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c
+index 31fa63b77f..a4e94d6930 100644
+--- a/xen/arch/x86/x86_64/asm-offsets.c
++++ b/xen/arch/x86/x86_64/asm-offsets.c
+@@ -135,6 +135,7 @@ void __dummy__(void)
+ #endif
+
+ OFFSET(CPUINFO_guest_cpu_user_regs, struct cpu_info, guest_cpu_user_regs);
++ OFFSET(CPUINFO_error_code, struct cpu_info, guest_cpu_user_regs.error_code);
+ OFFSET(CPUINFO_verw_sel, struct cpu_info, verw_sel);
+ OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
+ OFFSET(CPUINFO_per_cpu_offset, struct cpu_info, per_cpu_offset);
+--
+2.44.0
+