1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
|
From 564de020d29fbc4efd20ef8052051e86b2465a1a Mon Sep 17 00:00:00 2001
From: Jan Beulich <jbeulich@suse.com>
Date: Tue, 21 Mar 2023 12:01:01 +0000
Subject: [PATCH 38/61] x86/HVM: serialize pinned cache attribute list
manipulation
While the RCU variants of list insertion and removal allow lockless list
traversal (with RCU just read-locked), insertions and removals still
need serializing amongst themselves. To keep things simple, use the
domain lock for this purpose.
This is CVE-2022-42334 / part of XSA-428.
Fixes: 642123c5123f ("x86/hvm: provide XEN_DMOP_pin_memory_cacheattr")
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Julien Grall <jgrall@amazon.com>
(cherry picked from commit 829ec245cf66560e3b50d140ccb3168e7fb7c945)
---
xen/arch/x86/hvm/mtrr.c | 51 +++++++++++++++++++++++++----------------
1 file changed, 31 insertions(+), 20 deletions(-)
diff --git a/xen/arch/x86/hvm/mtrr.c b/xen/arch/x86/hvm/mtrr.c
index 98e55bbdbd..9b3b33012b 100644
--- a/xen/arch/x86/hvm/mtrr.c
+++ b/xen/arch/x86/hvm/mtrr.c
@@ -594,7 +594,7 @@ static void free_pinned_cacheattr_entry(struct rcu_head *rcu)
int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start,
uint64_t gfn_end, uint32_t type)
{
- struct hvm_mem_pinned_cacheattr_range *range;
+ struct hvm_mem_pinned_cacheattr_range *range, *newr;
unsigned int nr = 0;
int rc = 1;
@@ -608,14 +608,15 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start,
{
case XEN_DOMCTL_DELETE_MEM_CACHEATTR:
/* Remove the requested range. */
- rcu_read_lock(&pinned_cacheattr_rcu_lock);
- list_for_each_entry_rcu ( range,
- &d->arch.hvm.pinned_cacheattr_ranges,
- list )
+ domain_lock(d);
+ list_for_each_entry ( range,
+ &d->arch.hvm.pinned_cacheattr_ranges,
+ list )
if ( range->start == gfn_start && range->end == gfn_end )
{
- rcu_read_unlock(&pinned_cacheattr_rcu_lock);
list_del_rcu(&range->list);
+ domain_unlock(d);
+
type = range->type;
call_rcu(&range->rcu, free_pinned_cacheattr_entry);
p2m_memory_type_changed(d);
@@ -636,7 +637,7 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start,
}
return 0;
}
- rcu_read_unlock(&pinned_cacheattr_rcu_lock);
+ domain_unlock(d);
return -ENOENT;
case PAT_TYPE_UC_MINUS:
@@ -651,7 +652,10 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start,
return -EINVAL;
}
- rcu_read_lock(&pinned_cacheattr_rcu_lock);
+ newr = xzalloc(struct hvm_mem_pinned_cacheattr_range);
+
+ domain_lock(d);
+
list_for_each_entry_rcu ( range,
&d->arch.hvm.pinned_cacheattr_ranges,
list )
@@ -669,27 +673,34 @@ int hvm_set_mem_pinned_cacheattr(struct domain *d, uint64_t gfn_start,
}
++nr;
}
- rcu_read_unlock(&pinned_cacheattr_rcu_lock);
+
if ( rc <= 0 )
- return rc;
+ /* nothing */;
+ else if ( nr >= 64 /* The limit is arbitrary. */ )
+ rc = -ENOSPC;
+ else if ( !newr )
+ rc = -ENOMEM;
+ else
+ {
+ newr->start = gfn_start;
+ newr->end = gfn_end;
+ newr->type = type;
- if ( nr >= 64 /* The limit is arbitrary. */ )
- return -ENOSPC;
+ list_add_rcu(&newr->list, &d->arch.hvm.pinned_cacheattr_ranges);
- range = xzalloc(struct hvm_mem_pinned_cacheattr_range);
- if ( range == NULL )
- return -ENOMEM;
+ newr = NULL;
+ rc = 0;
+ }
+
+ domain_unlock(d);
- range->start = gfn_start;
- range->end = gfn_end;
- range->type = type;
+ xfree(newr);
- list_add_rcu(&range->list, &d->arch.hvm.pinned_cacheattr_ranges);
p2m_memory_type_changed(d);
if ( type != PAT_TYPE_WRBACK )
flush_all(FLUSH_CACHE);
- return 0;
+ return rc;
}
static int hvm_save_mtrr_msr(struct vcpu *v, hvm_domain_context_t *h)
--
2.40.0
|