1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
|
From 745e0b300dc3f5000e6d48c273b405d4bcc29ba7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Roger=20Pau=20Monn=C3=A9?= <roger.pau@citrix.com>
Date: Tue, 11 Oct 2022 14:53:41 +0200
Subject: [PATCH 08/87] x86/p2m: refuse new allocations for dying domains
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
This will in particular prevent any attempts to add entries to the p2m,
once - in a subsequent change - non-root entries have been removed.
This is part of CVE-2022-33746 / XSA-410.
Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Tim Deegan <tim@xen.org>
master commit: ff600a8cf8e36f8ecbffecf96a035952e022ab87
master date: 2022-10-11 14:23:22 +0200
---
xen/arch/x86/mm/hap/hap.c | 5 ++++-
xen/arch/x86/mm/shadow/common.c | 18 ++++++++++++++----
2 files changed, 18 insertions(+), 5 deletions(-)
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index d75dc2b9ed3d..787991233e53 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -245,6 +245,9 @@ static struct page_info *hap_alloc(struct domain *d)
ASSERT(paging_locked_by_me(d));
+ if ( unlikely(d->is_dying) )
+ return NULL;
+
pg = page_list_remove_head(&d->arch.paging.hap.freelist);
if ( unlikely(!pg) )
return NULL;
@@ -281,7 +284,7 @@ static struct page_info *hap_alloc_p2m_page(struct domain *d)
d->arch.paging.hap.p2m_pages++;
ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask));
}
- else if ( !d->arch.paging.p2m_alloc_failed )
+ else if ( !d->arch.paging.p2m_alloc_failed && !d->is_dying )
{
d->arch.paging.p2m_alloc_failed = 1;
dprintk(XENLOG_ERR, "d%i failed to allocate from HAP pool\n",
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 2067c7d16bb4..9807f6ec6c00 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -939,6 +939,10 @@ static bool __must_check _shadow_prealloc(struct domain *d, unsigned int pages)
if ( d->arch.paging.shadow.free_pages >= pages )
return true;
+ if ( unlikely(d->is_dying) )
+ /* No reclaim when the domain is dying, teardown will take care of it. */
+ return false;
+
/* Shouldn't have enabled shadows if we've no vcpus. */
ASSERT(d->vcpu && d->vcpu[0]);
@@ -991,7 +995,7 @@ static bool __must_check _shadow_prealloc(struct domain *d, unsigned int pages)
d->arch.paging.shadow.free_pages,
d->arch.paging.shadow.p2m_pages);
- ASSERT(d->is_dying);
+ ASSERT_UNREACHABLE();
guest_flush_tlb_mask(d, d->dirty_cpumask);
@@ -1005,10 +1009,13 @@ static bool __must_check _shadow_prealloc(struct domain *d, unsigned int pages)
* to avoid freeing shadows that the caller is currently working on. */
bool shadow_prealloc(struct domain *d, unsigned int type, unsigned int count)
{
- bool ret = _shadow_prealloc(d, shadow_size(type) * count);
+ bool ret;
- if ( !ret && !d->is_dying &&
- (!d->is_shutting_down || d->shutdown_code != SHUTDOWN_crash) )
+ if ( unlikely(d->is_dying) )
+ return false;
+
+ ret = _shadow_prealloc(d, shadow_size(type) * count);
+ if ( !ret && (!d->is_shutting_down || d->shutdown_code != SHUTDOWN_crash) )
/*
* Failing to allocate memory required for shadow usage can only result in
* a domain crash, do it here rather that relying on every caller to do it.
@@ -1238,6 +1245,9 @@ shadow_alloc_p2m_page(struct domain *d)
{
struct page_info *pg = NULL;
+ if ( unlikely(d->is_dying) )
+ return NULL;
+
/* This is called both from the p2m code (which never holds the
* paging lock) and the log-dirty code (which always does). */
paging_lock_recursive(d);
--
2.37.4
|