summaryrefslogtreecommitdiff
blob: 6bc6827a9e8cfeb8021bc890cb73e66cf309da0c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
From 3c22a9bf8703a297431ac5ad110e6d523758eae1 Mon Sep 17 00:00:00 2001
From: Andrew Cooper <andrew.cooper3@citrix.com>
Date: Tue, 26 Sep 2023 20:06:57 +0100
Subject: [PATCH 26/30] x86/svm: Fix asymmetry with AMD DR MASK context
 switching

The handling of MSR_DR{0..3}_MASK is asymmetric between PV and HVM guests.

HVM guests context switch in based on the guest view of DBEXT, whereas PV
guest switch in base on the host capability.  Both guest types leave the
context dirty for the next vCPU.

This leads to the following issue:

 * PV or HVM vCPU has debugging active (%dr7 + mask)
 * Switch out deactivates %dr7 but leaves other state stale in hardware
 * HVM vCPU with debugging activate but can't see DBEXT is switched in
 * Switch in loads %dr7 but leaves the mask MSRs alone

Now, the HVM vCPU is operating in the context of the prior vCPU's mask MSR,
and furthermore in a case where it genuinely expects there to be no masking
MSRs.

As a stopgap, adjust the HVM path to switch in/out the masks based on host
capabilities rather than guest visibility (i.e. like the PV path).  Adjustment
of the of the intercepts still needs to be dependent on the guest visibility
of DBEXT.

This is part of XSA-444 / CVE-2023-34327

Fixes: c097f54912d3 ("x86/SVM: support data breakpoint extension registers")
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
(cherry picked from commit 5d54282f984bb9a7a65b3d12208584f9fdf1c8e1)
---
 xen/arch/x86/hvm/svm/svm.c | 24 ++++++++++++++++++------
 xen/arch/x86/traps.c       |  5 +++++
 2 files changed, 23 insertions(+), 6 deletions(-)

diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index a019d196e0..ba4069f910 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -185,6 +185,10 @@ static void svm_save_dr(struct vcpu *v)
     v->arch.hvm.flag_dr_dirty = 0;
     vmcb_set_dr_intercepts(vmcb, ~0u);
 
+    /*
+     * The guest can only have changed the mask MSRs if we previous dropped
+     * intercepts.  Re-read them from hardware.
+     */
     if ( v->domain->arch.cpuid->extd.dbext )
     {
         svm_intercept_msr(v, MSR_AMD64_DR0_ADDRESS_MASK, MSR_INTERCEPT_RW);
@@ -216,17 +220,25 @@ static void __restore_debug_registers(struct vmcb_struct *vmcb, struct vcpu *v)
 
     ASSERT(v == current);
 
-    if ( v->domain->arch.cpuid->extd.dbext )
+    /*
+     * Both the PV and HVM paths leave stale DR_MASK values in hardware on
+     * context-switch-out.  If we're activating %dr7 for the guest, we must
+     * sync the DR_MASKs too, whether or not the guest can see them.
+     */
+    if ( boot_cpu_has(X86_FEATURE_DBEXT) )
     {
-        svm_intercept_msr(v, MSR_AMD64_DR0_ADDRESS_MASK, MSR_INTERCEPT_NONE);
-        svm_intercept_msr(v, MSR_AMD64_DR1_ADDRESS_MASK, MSR_INTERCEPT_NONE);
-        svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_NONE);
-        svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_NONE);
-
         wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, v->arch.msrs->dr_mask[0]);
         wrmsrl(MSR_AMD64_DR1_ADDRESS_MASK, v->arch.msrs->dr_mask[1]);
         wrmsrl(MSR_AMD64_DR2_ADDRESS_MASK, v->arch.msrs->dr_mask[2]);
         wrmsrl(MSR_AMD64_DR3_ADDRESS_MASK, v->arch.msrs->dr_mask[3]);
+
+        if ( v->domain->arch.cpuid->extd.dbext )
+        {
+            svm_intercept_msr(v, MSR_AMD64_DR0_ADDRESS_MASK, MSR_INTERCEPT_NONE);
+            svm_intercept_msr(v, MSR_AMD64_DR1_ADDRESS_MASK, MSR_INTERCEPT_NONE);
+            svm_intercept_msr(v, MSR_AMD64_DR2_ADDRESS_MASK, MSR_INTERCEPT_NONE);
+            svm_intercept_msr(v, MSR_AMD64_DR3_ADDRESS_MASK, MSR_INTERCEPT_NONE);
+        }
     }
 
     write_debugreg(0, v->arch.dr[0]);
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index f7992ff230..a142a63dd8 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2314,6 +2314,11 @@ void activate_debugregs(const struct vcpu *curr)
     if ( curr->arch.dr7 & DR7_ACTIVE_MASK )
         write_debugreg(7, curr->arch.dr7);
 
+    /*
+     * Both the PV and HVM paths leave stale DR_MASK values in hardware on
+     * context-switch-out.  If we're activating %dr7 for the guest, we must
+     * sync the DR_MASKs too, whether or not the guest can see them.
+     */
     if ( boot_cpu_has(X86_FEATURE_DBEXT) )
     {
         wrmsrl(MSR_AMD64_DR0_ADDRESS_MASK, curr->arch.msrs->dr_mask[0]);
-- 
2.43.0