summaryrefslogtreecommitdiff
blob: d288a0bfef1c4cb155670b917ee30fab21ec4129 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
From 28d3f677ec97c98154311f64871ac48762cf980a Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
Date: Tue, 11 Oct 2022 14:53:27 +0200
Subject: [PATCH 07/87] x86/shadow: tolerate failure in shadow_prealloc()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

Prevent _shadow_prealloc() from calling BUG() when unable to fulfill
the pre-allocation and instead return true/false.  Modify
shadow_prealloc() to crash the domain on allocation failure (if the
domain is not already dying), as shadow cannot operate normally after
that.  Modify callers to also gracefully handle {_,}shadow_prealloc()
failing to fulfill the request.

Note this in turn requires adjusting the callers of
sh_make_monitor_table() also to handle it returning INVALID_MFN.
sh_update_paging_modes() is also modified to add additional error
paths in case of allocation failure, some of those will return with
null monitor page tables (and the domain likely crashed).  This is no
different that current error paths, but the newly introduced ones are
more likely to trigger.

The now added failure points in sh_update_paging_modes() also require
that on some error return paths the previous structures are cleared,
and thus monitor table is null.

While there adjust the 'type' parameter type of shadow_prealloc() to
unsigned int rather than u32.

This is part of CVE-2022-33746 / XSA-410.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Tim Deegan <tim@xen.org>
master commit: b7f93c6afb12b6061e2d19de2f39ea09b569ac68
master date: 2022-10-11 14:22:53 +0200
---
 xen/arch/x86/mm/shadow/common.c  | 69 ++++++++++++++++++++++++--------
 xen/arch/x86/mm/shadow/hvm.c     |  4 +-
 xen/arch/x86/mm/shadow/multi.c   | 11 +++--
 xen/arch/x86/mm/shadow/private.h |  3 +-
 4 files changed, 66 insertions(+), 21 deletions(-)

diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 00e520cbd05b..2067c7d16bb4 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -36,6 +36,7 @@
 #include <asm/flushtlb.h>
 #include <asm/shadow.h>
 #include <xen/numa.h>
+#include <public/sched.h>
 #include "private.h"
 
 DEFINE_PER_CPU(uint32_t,trace_shadow_path_flags);
@@ -928,14 +929,15 @@ static inline void trace_shadow_prealloc_unpin(struct domain *d, mfn_t smfn)
 
 /* Make sure there are at least count order-sized pages
  * available in the shadow page pool. */
-static void _shadow_prealloc(struct domain *d, unsigned int pages)
+static bool __must_check _shadow_prealloc(struct domain *d, unsigned int pages)
 {
     struct vcpu *v;
     struct page_info *sp, *t;
     mfn_t smfn;
     int i;
 
-    if ( d->arch.paging.shadow.free_pages >= pages ) return;
+    if ( d->arch.paging.shadow.free_pages >= pages )
+        return true;
 
     /* Shouldn't have enabled shadows if we've no vcpus. */
     ASSERT(d->vcpu && d->vcpu[0]);
@@ -951,7 +953,8 @@ static void _shadow_prealloc(struct domain *d, unsigned int pages)
         sh_unpin(d, smfn);
 
         /* See if that freed up enough space */
-        if ( d->arch.paging.shadow.free_pages >= pages ) return;
+        if ( d->arch.paging.shadow.free_pages >= pages )
+            return true;
     }
 
     /* Stage two: all shadow pages are in use in hierarchies that are
@@ -974,7 +977,7 @@ static void _shadow_prealloc(struct domain *d, unsigned int pages)
                 if ( d->arch.paging.shadow.free_pages >= pages )
                 {
                     guest_flush_tlb_mask(d, d->dirty_cpumask);
-                    return;
+                    return true;
                 }
             }
         }
@@ -987,7 +990,12 @@ static void _shadow_prealloc(struct domain *d, unsigned int pages)
            d->arch.paging.shadow.total_pages,
            d->arch.paging.shadow.free_pages,
            d->arch.paging.shadow.p2m_pages);
-    BUG();
+
+    ASSERT(d->is_dying);
+
+    guest_flush_tlb_mask(d, d->dirty_cpumask);
+
+    return false;
 }
 
 /* Make sure there are at least count pages of the order according to
@@ -995,9 +1003,19 @@ static void _shadow_prealloc(struct domain *d, unsigned int pages)
  * This must be called before any calls to shadow_alloc().  Since this
  * will free existing shadows to make room, it must be called early enough
  * to avoid freeing shadows that the caller is currently working on. */
-void shadow_prealloc(struct domain *d, u32 type, unsigned int count)
+bool shadow_prealloc(struct domain *d, unsigned int type, unsigned int count)
 {
-    return _shadow_prealloc(d, shadow_size(type) * count);
+    bool ret = _shadow_prealloc(d, shadow_size(type) * count);
+
+    if ( !ret && !d->is_dying &&
+         (!d->is_shutting_down || d->shutdown_code != SHUTDOWN_crash) )
+        /*
+         * Failing to allocate memory required for shadow usage can only result in
+         * a domain crash, do it here rather that relying on every caller to do it.
+         */
+        domain_crash(d);
+
+    return ret;
 }
 
 /* Deliberately free all the memory we can: this will tear down all of
@@ -1218,7 +1236,7 @@ void shadow_free(struct domain *d, mfn_t smfn)
 static struct page_info *
 shadow_alloc_p2m_page(struct domain *d)
 {
-    struct page_info *pg;
+    struct page_info *pg = NULL;
 
     /* This is called both from the p2m code (which never holds the
      * paging lock) and the log-dirty code (which always does). */
@@ -1236,16 +1254,18 @@ shadow_alloc_p2m_page(struct domain *d)
                     d->arch.paging.shadow.p2m_pages,
                     shadow_min_acceptable_pages(d));
         }
-        paging_unlock(d);
-        return NULL;
+        goto out;
     }
 
-    shadow_prealloc(d, SH_type_p2m_table, 1);
+    if ( !shadow_prealloc(d, SH_type_p2m_table, 1) )
+        goto out;
+
     pg = mfn_to_page(shadow_alloc(d, SH_type_p2m_table, 0));
     d->arch.paging.shadow.p2m_pages++;
     d->arch.paging.shadow.total_pages--;
     ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask));
 
+ out:
     paging_unlock(d);
 
     return pg;
@@ -1336,7 +1356,9 @@ int shadow_set_allocation(struct domain *d, unsigned int pages, bool *preempted)
         else if ( d->arch.paging.shadow.total_pages > pages )
         {
             /* Need to return memory to domheap */
-            _shadow_prealloc(d, 1);
+            if ( !_shadow_prealloc(d, 1) )
+                return -ENOMEM;
+
             sp = page_list_remove_head(&d->arch.paging.shadow.freelist);
             ASSERT(sp);
             /*
@@ -2334,12 +2356,13 @@ static void sh_update_paging_modes(struct vcpu *v)
     if ( mfn_eq(v->arch.paging.shadow.oos_snapshot[0], INVALID_MFN) )
     {
         int i;
+
+        if ( !shadow_prealloc(d, SH_type_oos_snapshot, SHADOW_OOS_PAGES) )
+            return;
+
         for(i = 0; i < SHADOW_OOS_PAGES; i++)
-        {
-            shadow_prealloc(d, SH_type_oos_snapshot, 1);
             v->arch.paging.shadow.oos_snapshot[i] =
                 shadow_alloc(d, SH_type_oos_snapshot, 0);
-        }
     }
 #endif /* OOS */
 
@@ -2403,6 +2426,9 @@ static void sh_update_paging_modes(struct vcpu *v)
             mfn_t mmfn = sh_make_monitor_table(
                              v, v->arch.paging.mode->shadow.shadow_levels);
 
+            if ( mfn_eq(mmfn, INVALID_MFN) )
+                return;
+
             v->arch.hvm.monitor_table = pagetable_from_mfn(mmfn);
             make_cr3(v, mmfn);
             hvm_update_host_cr3(v);
@@ -2441,6 +2467,12 @@ static void sh_update_paging_modes(struct vcpu *v)
                 v->arch.hvm.monitor_table = pagetable_null();
                 new_mfn = sh_make_monitor_table(
                               v, v->arch.paging.mode->shadow.shadow_levels);
+                if ( mfn_eq(new_mfn, INVALID_MFN) )
+                {
+                    sh_destroy_monitor_table(v, old_mfn,
+                                             old_mode->shadow.shadow_levels);
+                    return;
+                }
                 v->arch.hvm.monitor_table = pagetable_from_mfn(new_mfn);
                 SHADOW_PRINTK("new monitor table %"PRI_mfn "\n",
                                mfn_x(new_mfn));
@@ -2526,7 +2558,12 @@ void sh_set_toplevel_shadow(struct vcpu *v,
     if ( !mfn_valid(smfn) )
     {
         /* Make sure there's enough free shadow memory. */
-        shadow_prealloc(d, root_type, 1);
+        if ( !shadow_prealloc(d, root_type, 1) )
+        {
+            new_entry = pagetable_null();
+            goto install_new_entry;
+        }
+
         /* Shadow the page. */
         smfn = make_shadow(v, gmfn, root_type);
     }
diff --git a/xen/arch/x86/mm/shadow/hvm.c b/xen/arch/x86/mm/shadow/hvm.c
index d5f42102a0bd..a0878d9ad71a 100644
--- a/xen/arch/x86/mm/shadow/hvm.c
+++ b/xen/arch/x86/mm/shadow/hvm.c
@@ -700,7 +700,9 @@ mfn_t sh_make_monitor_table(const struct vcpu *v, unsigned int shadow_levels)
     ASSERT(!pagetable_get_pfn(v->arch.hvm.monitor_table));
 
     /* Guarantee we can get the memory we need */
-    shadow_prealloc(d, SH_type_monitor_table, CONFIG_PAGING_LEVELS);
+    if ( !shadow_prealloc(d, SH_type_monitor_table, CONFIG_PAGING_LEVELS) )
+        return INVALID_MFN;
+
     m4mfn = shadow_alloc(d, SH_type_monitor_table, 0);
     mfn_to_page(m4mfn)->shadow_flags = 4;
 
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 2ff78fe3362c..c07af0bd99da 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -2440,9 +2440,14 @@ static int sh_page_fault(struct vcpu *v,
      * Preallocate shadow pages *before* removing writable accesses
      * otherwhise an OOS L1 might be demoted and promoted again with
      * writable mappings. */
-    shadow_prealloc(d,
-                    SH_type_l1_shadow,
-                    GUEST_PAGING_LEVELS < 4 ? 1 : GUEST_PAGING_LEVELS - 1);
+    if ( !shadow_prealloc(d, SH_type_l1_shadow,
+                          GUEST_PAGING_LEVELS < 4
+                          ? 1 : GUEST_PAGING_LEVELS - 1) )
+    {
+        paging_unlock(d);
+        put_gfn(d, gfn_x(gfn));
+        return 0;
+    }
 
     rc = gw_remove_write_accesses(v, va, &gw);
 
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index 35efb1b984fb..738214f75e8d 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -383,7 +383,8 @@ void shadow_promote(struct domain *d, mfn_t gmfn, u32 type);
 void shadow_demote(struct domain *d, mfn_t gmfn, u32 type);
 
 /* Shadow page allocation functions */
-void  shadow_prealloc(struct domain *d, u32 shadow_type, unsigned int count);
+bool __must_check shadow_prealloc(struct domain *d, unsigned int shadow_type,
+                                  unsigned int count);
 mfn_t shadow_alloc(struct domain *d,
                     u32 shadow_type,
                     unsigned long backpointer);
-- 
2.37.4