summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '0039-x86-spec-ctrl-Defer-CR4_PV32_RESTORE-on-the-cstar_en.patch')
-rw-r--r--0039-x86-spec-ctrl-Defer-CR4_PV32_RESTORE-on-the-cstar_en.patch56
1 files changed, 56 insertions, 0 deletions
diff --git a/0039-x86-spec-ctrl-Defer-CR4_PV32_RESTORE-on-the-cstar_en.patch b/0039-x86-spec-ctrl-Defer-CR4_PV32_RESTORE-on-the-cstar_en.patch
new file mode 100644
index 0000000..74bcf67
--- /dev/null
+++ b/0039-x86-spec-ctrl-Defer-CR4_PV32_RESTORE-on-the-cstar_en.patch
@@ -0,0 +1,56 @@
+From 3c924fe46b455834b5c04268db6b528b549668d1 Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Fri, 10 Feb 2023 21:11:14 +0000
+Subject: [PATCH 39/61] x86/spec-ctrl: Defer CR4_PV32_RESTORE on the
+ cstar_enter path
+
+As stated (correctly) by the comment next to SPEC_CTRL_ENTRY_FROM_PV, between
+the two hunks visible in the patch, RET's are not safe prior to this point.
+
+CR4_PV32_RESTORE hides a CALL/RET pair in certain configurations (PV32
+compiled in, SMEP or SMAP active), and the RET can be attacked with one of
+several known speculative issues.
+
+Furthermore, CR4_PV32_RESTORE also hides a reference to the cr4_pv32_mask
+global variable, which is not safe when XPTI is active before restoring Xen's
+full pagetables.
+
+This crash has gone unnoticed because it is only AMD CPUs which permit the
+SYSCALL instruction in compatibility mode, and these are not vulnerable to
+Meltdown so don't activate XPTI by default.
+
+This is XSA-429 / CVE-2022-42331
+
+Fixes: 5e7962901131 ("x86/entry: Organise the use of MSR_SPEC_CTRL at each entry/exit point")
+Fixes: 5784de3e2067 ("x86: Meltdown band-aid against malicious 64-bit PV guests")
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+(cherry picked from commit df5b055b12116d9e63ced59ae5389e69a2a3de48)
+---
+ xen/arch/x86/x86_64/entry.S | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
+index fba8ae498f..db2ea7871e 100644
+--- a/xen/arch/x86/x86_64/entry.S
++++ b/xen/arch/x86/x86_64/entry.S
+@@ -288,7 +288,6 @@ ENTRY(cstar_enter)
+ ALTERNATIVE "", "setssbsy", X86_FEATURE_XEN_SHSTK
+ #endif
+ push %rax /* Guest %rsp */
+- CR4_PV32_RESTORE
+ movq 8(%rsp), %rax /* Restore guest %rax. */
+ movq $FLAT_USER_SS32, 8(%rsp) /* Assume a 64bit domain. Compat handled lower. */
+ pushq %r11
+@@ -312,6 +311,8 @@ ENTRY(cstar_enter)
+ .Lcstar_cr3_okay:
+ sti
+
++ CR4_PV32_RESTORE
++
+ movq STACK_CPUINFO_FIELD(current_vcpu)(%rbx), %rbx
+
+ #ifdef CONFIG_PV32
+--
+2.40.0
+