|
|
b2a8d1d |
From: George Dunlap <george.dunlap@citrix.com>
|
|
|
b2a8d1d |
Subject: xen/mm: make sure node is less than MAX_NUMNODES
|
|
|
b2a8d1d |
|
|
|
b2a8d1d |
The output of MEMF_get_node(memflags) can be as large as nodeid_t can
|
|
|
b2a8d1d |
hold (currently 255). This is then used as an index to arrays of size
|
|
|
b2a8d1d |
MAX_NUMNODE, which is 64 on x86 and 1 on ARM, can be passed in by an
|
|
|
b2a8d1d |
untrusted guest (via memory_exchange and increase_reservation) and is
|
|
|
b2a8d1d |
not currently bounds-checked.
|
|
|
b2a8d1d |
|
|
|
b2a8d1d |
Check the value in page_alloc.c before using it, and also check the
|
|
|
b2a8d1d |
value in the hypercall call sites and return -EINVAL if appropriate.
|
|
|
b2a8d1d |
Don't permit domains other than the hardware or control domain to
|
|
|
b2a8d1d |
allocate node-constrained memory.
|
|
|
b2a8d1d |
|
|
|
b2a8d1d |
This is XSA-231.
|
|
|
b2a8d1d |
|
|
|
b2a8d1d |
Reported-by: Matthew Daley <mattd@bugfuzz.com>
|
|
|
b2a8d1d |
Signed-off-by: George Dunlap <george.dunlap@citrix.com>
|
|
|
b2a8d1d |
Signed-off-by: Jan Beulich <jbeulich@suse.com>
|
|
|
b2a8d1d |
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
|
b2a8d1d |
|
|
|
b2a8d1d |
--- a/xen/common/memory.c
|
|
|
b2a8d1d |
+++ b/xen/common/memory.c
|
|
|
b2a8d1d |
@@ -390,6 +390,31 @@ static void decrease_reservation(struct
|
|
|
b2a8d1d |
a->nr_done = i;
|
|
|
b2a8d1d |
}
|
|
|
b2a8d1d |
|
|
|
b2a8d1d |
+static bool_t propagate_node(unsigned int xmf, unsigned int *memflags)
|
|
|
b2a8d1d |
+{
|
|
|
b2a8d1d |
+ const struct domain *currd = current->domain;
|
|
|
b2a8d1d |
+
|
|
|
b2a8d1d |
+ BUILD_BUG_ON(XENMEMF_get_node(0) != NUMA_NO_NODE);
|
|
|
b2a8d1d |
+ BUILD_BUG_ON(MEMF_get_node(0) != NUMA_NO_NODE);
|
|
|
b2a8d1d |
+
|
|
|
b2a8d1d |
+ if ( XENMEMF_get_node(xmf) == NUMA_NO_NODE )
|
|
|
b2a8d1d |
+ return 1;
|
|
|
b2a8d1d |
+
|
|
|
b2a8d1d |
+ if ( is_hardware_domain(currd) || is_control_domain(currd) )
|
|
|
b2a8d1d |
+ {
|
|
|
b2a8d1d |
+ if ( XENMEMF_get_node(xmf) >= MAX_NUMNODES )
|
|
|
b2a8d1d |
+ return 0;
|
|
|
b2a8d1d |
+
|
|
|
b2a8d1d |
+ *memflags |= MEMF_node(XENMEMF_get_node(xmf));
|
|
|
b2a8d1d |
+ if ( xmf & XENMEMF_exact_node_request )
|
|
|
b2a8d1d |
+ *memflags |= MEMF_exact_node;
|
|
|
b2a8d1d |
+ }
|
|
|
b2a8d1d |
+ else if ( xmf & XENMEMF_exact_node_request )
|
|
|
b2a8d1d |
+ return 0;
|
|
|
b2a8d1d |
+
|
|
|
b2a8d1d |
+ return 1;
|
|
|
b2a8d1d |
+}
|
|
|
b2a8d1d |
+
|
|
|
b2a8d1d |
static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
|
|
|
b2a8d1d |
{
|
|
|
b2a8d1d |
struct xen_memory_exchange exch;
|
|
|
b2a8d1d |
@@ -462,6 +487,12 @@ static long memory_exchange(XEN_GUEST_HA
|
|
|
b2a8d1d |
}
|
|
|
b2a8d1d |
}
|
|
|
b2a8d1d |
|
|
|
b2a8d1d |
+ if ( unlikely(!propagate_node(exch.out.mem_flags, &memflags)) )
|
|
|
b2a8d1d |
+ {
|
|
|
b2a8d1d |
+ rc = -EINVAL;
|
|
|
b2a8d1d |
+ goto fail_early;
|
|
|
b2a8d1d |
+ }
|
|
|
b2a8d1d |
+
|
|
|
b2a8d1d |
d = rcu_lock_domain_by_any_id(exch.in.domid);
|
|
|
b2a8d1d |
if ( d == NULL )
|
|
|
b2a8d1d |
{
|
|
|
b2a8d1d |
@@ -480,7 +511,6 @@ static long memory_exchange(XEN_GUEST_HA
|
|
|
b2a8d1d |
d,
|
|
|
b2a8d1d |
XENMEMF_get_address_bits(exch.out.mem_flags) ? :
|
|
|
b2a8d1d |
(BITS_PER_LONG+PAGE_SHIFT)));
|
|
|
b2a8d1d |
- memflags |= MEMF_node(XENMEMF_get_node(exch.out.mem_flags));
|
|
|
b2a8d1d |
|
|
|
b2a8d1d |
for ( i = (exch.nr_exchanged >> in_chunk_order);
|
|
|
b2a8d1d |
i < (exch.in.nr_extents >> in_chunk_order);
|
|
|
b2a8d1d |
@@ -834,12 +864,8 @@ static int construct_memop_from_reservat
|
|
|
b2a8d1d |
}
|
|
|
b2a8d1d |
read_unlock(&d->vnuma_rwlock);
|
|
|
b2a8d1d |
}
|
|
|
b2a8d1d |
- else
|
|
|
b2a8d1d |
- {
|
|
|
b2a8d1d |
- a->memflags |= MEMF_node(XENMEMF_get_node(r->mem_flags));
|
|
|
b2a8d1d |
- if ( r->mem_flags & XENMEMF_exact_node_request )
|
|
|
b2a8d1d |
- a->memflags |= MEMF_exact_node;
|
|
|
b2a8d1d |
- }
|
|
|
b2a8d1d |
+ else if ( unlikely(!propagate_node(r->mem_flags, &a->memflags)) )
|
|
|
b2a8d1d |
+ return -EINVAL;
|
|
|
b2a8d1d |
|
|
|
b2a8d1d |
return 0;
|
|
|
b2a8d1d |
}
|
|
|
b2a8d1d |
--- a/xen/common/page_alloc.c
|
|
|
b2a8d1d |
+++ b/xen/common/page_alloc.c
|
|
|
b2a8d1d |
@@ -711,9 +711,13 @@ static struct page_info *alloc_heap_page
|
|
|
b2a8d1d |
if ( node >= MAX_NUMNODES )
|
|
|
b2a8d1d |
node = cpu_to_node(smp_processor_id());
|
|
|
b2a8d1d |
}
|
|
|
b2a8d1d |
+ else if ( unlikely(node >= MAX_NUMNODES) )
|
|
|
b2a8d1d |
+ {
|
|
|
b2a8d1d |
+ ASSERT_UNREACHABLE();
|
|
|
b2a8d1d |
+ return NULL;
|
|
|
b2a8d1d |
+ }
|
|
|
b2a8d1d |
first_node = node;
|
|
|
b2a8d1d |
|
|
|
b2a8d1d |
- ASSERT(node < MAX_NUMNODES);
|
|
|
b2a8d1d |
ASSERT(zone_lo <= zone_hi);
|
|
|
b2a8d1d |
ASSERT(zone_hi < NR_ZONES);
|
|
|
b2a8d1d |
|