diff --git a/xen.spec b/xen.spec index a37353e..e451783 100644 --- a/xen.spec +++ b/xen.spec @@ -58,7 +58,7 @@ Summary: Xen is a virtual machine monitor Name: xen Version: 4.13.0 -Release: 6%{?dist} +Release: 7%{?dist} License: GPLv2+ and LGPLv2+ and BSD URL: http://xen.org/ Source0: https://downloads.xenproject.org/release/xen/%{version}/xen-%{version}.tar.gz @@ -116,6 +116,11 @@ Patch43: xsa312.patch Patch44: xen.ocaml.4.10.patch Patch45: xen.gcc10.fixes.patch Patch46: xen.pygrubfix.patch +Patch47: xsa313-1.patch +Patch48: xsa313-2.patch +Patch49: xsa314-4.13.patch +Patch50: xsa316-xen.patch +Patch51: xsa318.patch %if %build_qemutrad @@ -324,6 +329,11 @@ manage Xen virtual machines. %patch44 -p1 %patch45 -p1 %patch46 -p1 +%patch47 -p1 +%patch48 -p1 +%patch49 -p1 +%patch50 -p1 +%patch51 -p1 # qemu-xen-traditional patches pushd tools/qemu-xen-traditional @@ -911,6 +921,15 @@ fi %endif %changelog +* Tue Apr 14 2020 Michael Young - 4.13.0-7 +- multiple xenoprof issues [XSA-313, CVE-2020-11740, CVE-2020-11741] + (#1823912, #1823914) +- Missing memory barriers in read-write unlock paths [XSA-314, + CVE-2020-11739] (#1823784) +- Bad error path in GNTTABOP_map_grant [XSA-316, CVE-2020-11743] (#1823926) +- Bad continuation handling in GNTTABOP_copy [XSA-318, CVE-2020-11742] + (#1823943) + * Tue Mar 17 2020 Michael Young - 4.13.0-6 - fix issues in pygrub dependency found by python 3.8 diff --git a/xsa313-1.patch b/xsa313-1.patch new file mode 100644 index 0000000..95fde7e --- /dev/null +++ b/xsa313-1.patch @@ -0,0 +1,26 @@ +From: Jan Beulich +Subject: xenoprof: clear buffer intended to be shared with guests + +alloc_xenheap_pages() making use of MEMF_no_scrub is fine for Xen +internally used allocations, but buffers allocated to be shared with +(unpriviliged) guests need to be zapped of their prior content. + +This is part of XSA-313. + +Reported-by: Ilja Van Sprundel +Signed-off-by: Jan Beulich +Reviewed-by: Andrew Cooper +Reviewed-by: Wei Liu + +--- a/xen/common/xenoprof.c ++++ b/xen/common/xenoprof.c +@@ -253,6 +253,9 @@ static int alloc_xenoprof_struct( + return -ENOMEM; + } + ++ for ( i = 0; i < npages; ++i ) ++ clear_page(d->xenoprof->rawbuf + i * PAGE_SIZE); ++ + d->xenoprof->npages = npages; + d->xenoprof->nbuf = nvcpu; + d->xenoprof->bufsize = bufsize; diff --git a/xsa313-2.patch b/xsa313-2.patch new file mode 100644 index 0000000..d81b823 --- /dev/null +++ b/xsa313-2.patch @@ -0,0 +1,132 @@ +From: Jan Beulich +Subject: xenoprof: limit consumption of shared buffer data + +Since a shared buffer can be written to by the guest, we may only read +the head and tail pointers from there (all other fields should only ever +be written to). Furthermore, for any particular operation the two values +must be read exactly once, with both checks and consumption happening +with the thus read values. (The backtrace related xenoprof_buf_space() +use in xenoprof_log_event() is an exception: The values used there get +re-checked by every subsequent xenoprof_add_sample().) + +Since that code needed touching, also fix the double increment of the +lost samples count in case the backtrace related xenoprof_add_sample() +invocation in xenoprof_log_event() fails. + +Where code is being touched anyway, add const as appropriate, but take +the opportunity to entirely drop the now unused domain parameter of +xenoprof_buf_space(). + +This is part of XSA-313. + +Reported-by: Ilja Van Sprundel +Signed-off-by: Jan Beulich +Reviewed-by: George Dunlap +Reviewed-by: Wei Liu + +--- a/xen/common/xenoprof.c ++++ b/xen/common/xenoprof.c +@@ -479,25 +479,22 @@ static int add_passive_list(XEN_GUEST_HA + + + /* Get space in the buffer */ +-static int xenoprof_buf_space(struct domain *d, xenoprof_buf_t * buf, int size) ++static int xenoprof_buf_space(int head, int tail, int size) + { +- int head, tail; +- +- head = xenoprof_buf(d, buf, event_head); +- tail = xenoprof_buf(d, buf, event_tail); +- + return ((tail > head) ? 0 : size) + tail - head - 1; + } + + /* Check for space and add a sample. Return 1 if successful, 0 otherwise. */ +-static int xenoprof_add_sample(struct domain *d, xenoprof_buf_t *buf, ++static int xenoprof_add_sample(const struct domain *d, ++ const struct xenoprof_vcpu *v, + uint64_t eip, int mode, int event) + { ++ xenoprof_buf_t *buf = v->buffer; + int head, tail, size; + + head = xenoprof_buf(d, buf, event_head); + tail = xenoprof_buf(d, buf, event_tail); +- size = xenoprof_buf(d, buf, event_size); ++ size = v->event_size; + + /* make sure indexes in shared buffer are sane */ + if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) ) +@@ -506,7 +503,7 @@ static int xenoprof_add_sample(struct do + return 0; + } + +- if ( xenoprof_buf_space(d, buf, size) > 0 ) ++ if ( xenoprof_buf_space(head, tail, size) > 0 ) + { + xenoprof_buf(d, buf, event_log[head].eip) = eip; + xenoprof_buf(d, buf, event_log[head].mode) = mode; +@@ -530,7 +527,6 @@ static int xenoprof_add_sample(struct do + int xenoprof_add_trace(struct vcpu *vcpu, uint64_t pc, int mode) + { + struct domain *d = vcpu->domain; +- xenoprof_buf_t *buf = d->xenoprof->vcpu[vcpu->vcpu_id].buffer; + + /* Do not accidentally write an escape code due to a broken frame. */ + if ( pc == XENOPROF_ESCAPE_CODE ) +@@ -539,7 +535,8 @@ int xenoprof_add_trace(struct vcpu *vcpu + return 0; + } + +- return xenoprof_add_sample(d, buf, pc, mode, 0); ++ return xenoprof_add_sample(d, &d->xenoprof->vcpu[vcpu->vcpu_id], ++ pc, mode, 0); + } + + void xenoprof_log_event(struct vcpu *vcpu, const struct cpu_user_regs *regs, +@@ -570,17 +567,22 @@ void xenoprof_log_event(struct vcpu *vcp + /* Provide backtrace if requested. */ + if ( backtrace_depth > 0 ) + { +- if ( (xenoprof_buf_space(d, buf, v->event_size) < 2) || +- !xenoprof_add_sample(d, buf, XENOPROF_ESCAPE_CODE, mode, +- XENOPROF_TRACE_BEGIN) ) ++ if ( xenoprof_buf_space(xenoprof_buf(d, buf, event_head), ++ xenoprof_buf(d, buf, event_tail), ++ v->event_size) < 2 ) + { + xenoprof_buf(d, buf, lost_samples)++; + lost_samples++; + return; + } ++ ++ /* xenoprof_add_sample() will increment lost_samples on failure */ ++ if ( !xenoprof_add_sample(d, v, XENOPROF_ESCAPE_CODE, mode, ++ XENOPROF_TRACE_BEGIN) ) ++ return; + } + +- if ( xenoprof_add_sample(d, buf, pc, mode, event) ) ++ if ( xenoprof_add_sample(d, v, pc, mode, event) ) + { + if ( is_active(vcpu->domain) ) + active_samples++; +--- a/xen/include/xen/xenoprof.h ++++ b/xen/include/xen/xenoprof.h +@@ -61,12 +61,12 @@ struct xenoprof { + + #ifndef CONFIG_COMPAT + #define XENOPROF_COMPAT(x) 0 +-#define xenoprof_buf(d, b, field) ((b)->field) ++#define xenoprof_buf(d, b, field) ACCESS_ONCE((b)->field) + #else + #define XENOPROF_COMPAT(x) ((x)->is_compat) +-#define xenoprof_buf(d, b, field) (*(!(d)->xenoprof->is_compat ? \ +- &(b)->native.field : \ +- &(b)->compat.field)) ++#define xenoprof_buf(d, b, field) ACCESS_ONCE(*(!(d)->xenoprof->is_compat \ ++ ? &(b)->native.field \ ++ : &(b)->compat.field)) + #endif + + struct domain; diff --git a/xsa314-4.13.patch b/xsa314-4.13.patch new file mode 100644 index 0000000..67e0066 --- /dev/null +++ b/xsa314-4.13.patch @@ -0,0 +1,121 @@ +From ab49f005f7d01d4004d76f2e295d31aca7d4f93a Mon Sep 17 00:00:00 2001 +From: Julien Grall +Date: Thu, 20 Feb 2020 20:54:40 +0000 +Subject: [PATCH] xen/rwlock: Add missing memory barrier in the unlock path of + rwlock + +The rwlock unlock paths are using atomic_sub() to release the lock. +However the implementation of atomic_sub() rightfully doesn't contain a +memory barrier. On Arm, this means a processor is allowed to re-order +the memory access with the preceeding access. + +In other words, the unlock may be seen by another processor before all +the memory accesses within the "critical" section. + +The rwlock paths already contains barrier indirectly, but they are not +very useful without the counterpart in the unlock paths. + +The memory barriers are not necessary on x86 because loads/stores are +not re-ordered with lock instructions. + +So add arch_lock_release_barrier() in the unlock paths that will only +add memory barrier on Arm. + +Take the opportunity to document each lock paths explaining why a +barrier is not necessary. + +This is XSA-314. + +Signed-off-by: Julien Grall +Reviewed-by: Jan Beulich +Reviewed-by: Stefano Stabellini + +--- + xen/include/xen/rwlock.h | 29 ++++++++++++++++++++++++++++- + 1 file changed, 28 insertions(+), 1 deletion(-) + +diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h +index 3dfea1ac2a..516486306f 100644 +--- a/xen/include/xen/rwlock.h ++++ b/xen/include/xen/rwlock.h +@@ -48,6 +48,10 @@ static inline int _read_trylock(rwlock_t *lock) + if ( likely(!(cnts & _QW_WMASK)) ) + { + cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts); ++ /* ++ * atomic_add_return() is a full barrier so no need for an ++ * arch_lock_acquire_barrier(). ++ */ + if ( likely(!(cnts & _QW_WMASK)) ) + return 1; + atomic_sub(_QR_BIAS, &lock->cnts); +@@ -64,11 +68,19 @@ static inline void _read_lock(rwlock_t *lock) + u32 cnts; + + cnts = atomic_add_return(_QR_BIAS, &lock->cnts); ++ /* ++ * atomic_add_return() is a full barrier so no need for an ++ * arch_lock_acquire_barrier(). ++ */ + if ( likely(!(cnts & _QW_WMASK)) ) + return; + + /* The slowpath will decrement the reader count, if necessary. */ + queue_read_lock_slowpath(lock); ++ /* ++ * queue_read_lock_slowpath() is using spinlock and therefore is a ++ * full barrier. So no need for an arch_lock_acquire_barrier(). ++ */ + } + + static inline void _read_lock_irq(rwlock_t *lock) +@@ -92,6 +104,7 @@ static inline unsigned long _read_lock_irqsave(rwlock_t *lock) + */ + static inline void _read_unlock(rwlock_t *lock) + { ++ arch_lock_release_barrier(); + /* + * Atomically decrement the reader count + */ +@@ -121,11 +134,20 @@ static inline int _rw_is_locked(rwlock_t *lock) + */ + static inline void _write_lock(rwlock_t *lock) + { +- /* Optimize for the unfair lock case where the fair flag is 0. */ ++ /* ++ * Optimize for the unfair lock case where the fair flag is 0. ++ * ++ * atomic_cmpxchg() is a full barrier so no need for an ++ * arch_lock_acquire_barrier(). ++ */ + if ( atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0 ) + return; + + queue_write_lock_slowpath(lock); ++ /* ++ * queue_write_lock_slowpath() is using spinlock and therefore is a ++ * full barrier. So no need for an arch_lock_acquire_barrier(). ++ */ + } + + static inline void _write_lock_irq(rwlock_t *lock) +@@ -157,11 +179,16 @@ static inline int _write_trylock(rwlock_t *lock) + if ( unlikely(cnts) ) + return 0; + ++ /* ++ * atomic_cmpxchg() is a full barrier so no need for an ++ * arch_lock_acquire_barrier(). ++ */ + return likely(atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0); + } + + static inline void _write_unlock(rwlock_t *lock) + { ++ arch_lock_release_barrier(); + /* + * If the writer field is atomic, it can be cleared directly. + * Otherwise, an atomic subtraction will be used to clear it. +-- +2.17.1 + diff --git a/xsa316-xen.patch b/xsa316-xen.patch new file mode 100644 index 0000000..4962b4e --- /dev/null +++ b/xsa316-xen.patch @@ -0,0 +1,30 @@ +From: Ross Lagerwall +Subject: xen/gnttab: Fix error path in map_grant_ref() + +Part of XSA-295 (c/s 863e74eb2cffb) inadvertently re-positioned the brackets, +changing the logic. If the _set_status() call fails, the grant_map hypercall +would fail with a status of 1 (rc != GNTST_okay) instead of the expected +negative GNTST_* error. + +This error path can be taken due to bad guest state, and causes net/blk-back +in Linux to crash. + +This is XSA-316. + +Signed-off-by: Ross Lagerwall +Reviewed-by: Andrew Cooper +Reviewed-by: Julien Grall + +diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c +index 9fd6e60416..4b5344dc21 100644 +--- a/xen/common/grant_table.c ++++ b/xen/common/grant_table.c +@@ -1031,7 +1031,7 @@ map_grant_ref( + { + if ( (rc = _set_status(shah, status, rd, rgt->gt_version, act, + op->flags & GNTMAP_readonly, 1, +- ld->domain_id) != GNTST_okay) ) ++ ld->domain_id)) != GNTST_okay ) + goto act_release_out; + + if ( !act->pin ) diff --git a/xsa318.patch b/xsa318.patch new file mode 100644 index 0000000..f4becdf --- /dev/null +++ b/xsa318.patch @@ -0,0 +1,39 @@ +From: Jan Beulich +Subject: gnttab: fix GNTTABOP_copy continuation handling + +The XSA-226 fix was flawed - the backwards transformation on rc was done +too early, causing a continuation to not get invoked when the need for +preemption was determined at the very first iteration of the request. +This in particular means that all of the status fields of the individual +operations would be left untouched, i.e. set to whatever the caller may +or may not have initialized them to. + +This is part of XSA-318. + +Reported-by: Pawel Wieczorkiewicz +Tested-by: Pawel Wieczorkiewicz +Signed-off-by: Jan Beulich +Reviewed-by: Juergen Gross + +--- a/xen/common/grant_table.c ++++ b/xen/common/grant_table.c +@@ -3576,8 +3576,7 @@ do_grant_table_op( + rc = gnttab_copy(copy, count); + if ( rc > 0 ) + { +- rc = count - rc; +- guest_handle_add_offset(copy, rc); ++ guest_handle_add_offset(copy, count - rc); + uop = guest_handle_cast(copy, void); + } + break; +@@ -3644,6 +3643,9 @@ do_grant_table_op( + out: + if ( rc > 0 || opaque_out != 0 ) + { ++ /* Adjust rc, see gnttab_copy() for why this is needed. */ ++ if ( cmd == GNTTABOP_copy ) ++ rc = count - rc; + ASSERT(rc < count); + ASSERT((opaque_out & GNTTABOP_CMD_MASK) == 0); + rc = hypercall_create_continuation(__HYPERVISOR_grant_table_op, "ihi",