From 2d106595f4cd4bc5525d014c2e2cc2cd7cd5bfeb Mon Sep 17 00:00:00 2001 From: Michael Young Date: Sep 08 2016 20:54:49 +0000 Subject: x86: Disallow L3 recursive pagetable for 32-bit PV guests [XSA-185, CVE-2016-7092] (#1374470) x86: Mishandling of instruction pointer truncation during emulation [XSA-186, CVE-2016-7093] (#1374471) x86 HVM: Overflow of sh_ctxt->seg_reg[] [XSA-187, CVE-2016-7094] (#1374473) --- diff --git a/xen.spec b/xen.spec index b338a31..57b6733 100644 --- a/xen.spec +++ b/xen.spec @@ -51,7 +51,7 @@ Summary: Xen is a virtual machine monitor Name: xen Version: 4.6.3 -Release: 4%{?dist} +Release: 5%{?dist} Group: Development/Libraries License: GPLv2+ and LGPLv2+ and BSD URL: http://xen.org/ @@ -149,6 +149,10 @@ Patch170: xsa184-qemuu-master.patch Patch171: qemu.git-926cde5f3e4d2504ed161ed0.patch Patch172: qemu.git-cc96677469388bad3d664793.patch Patch173: qemu.trad.CVE-2016-6351.patch +Patch174: xsa185.patch +Patch175: xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch +Patch176: xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch +Patch177: xsa187-4.6-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root @@ -394,6 +398,10 @@ manage Xen virtual machines. %patch166 -p1 %patch167 -p1 %patch168 -p1 +%patch174 -p1 +%patch175 -p1 +%patch176 -p1 +%patch177 -p1 # qemu-xen-traditional patches pushd tools/qemu-xen-traditional @@ -936,6 +944,13 @@ rm -rf %{buildroot} %endif %changelog +* Thu Sep 08 2016 Michael Young - 4.6.3-5 +- x86: Disallow L3 recursive pagetable for 32-bit PV guests [XSA-185, + CVE-2016-7092] (#1374470) +- x86: Mishandling of instruction pointer truncation during emulation + [XSA-186, CVE-2016-7093] (#1374471) +- x86 HVM: Overflow of sh_ctxt->seg_reg[] [XSA-187, CVE-2016-7094] (#1374473) + * Wed Jul 27 2016 Michael Young - 4.6.3-4 - x86: Privilege escalation in PV guests [XSA-182, CVE-2016-6258] (#1360358) - x86: Missing SMAP whitelisting in 32-bit exception / event delivery diff --git a/xsa185.patch b/xsa185.patch new file mode 100644 index 0000000..a4c133e --- /dev/null +++ b/xsa185.patch @@ -0,0 +1,38 @@ +From 30aba4992b18245c436f16df7326a16c01a51570 Mon Sep 17 00:00:00 2001 +From: Jan Beulich +Date: Mon, 8 Aug 2016 10:58:12 +0100 +Subject: x86/32on64: don't allow recursive page tables from L3 + +L3 entries are special in PAE mode, and hence can't reasonably be used +for setting up recursive (and hence linear) page table mappings. Since +abuse is possible when the guest in fact gets run on 4-level page +tables, this needs to be excluded explicitly. + +This is XSA-185. + +Reported-by: Jérémie Boutoille +Reported-by: 栾尚聪(好风) +Signed-off-by: Jan Beulich +Reviewed-by: Andrew Cooper +--- + xen/arch/x86/mm.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c +index 109b8be..69b8b8d 100644 +--- a/xen/arch/x86/mm.c ++++ b/xen/arch/x86/mm.c +@@ -1122,7 +1122,9 @@ get_page_from_l3e( + + rc = get_page_and_type_from_pagenr( + l3e_get_pfn(l3e), PGT_l2_page_table, d, partial, 1); +- if ( unlikely(rc == -EINVAL) && get_l3_linear_pagetable(l3e, pfn, d) ) ++ if ( unlikely(rc == -EINVAL) && ++ !is_pv_32bit_domain(d) && ++ get_l3_linear_pagetable(l3e, pfn, d) ) + rc = 0; + + return rc; +-- +2.1.4 + diff --git a/xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch b/xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch new file mode 100644 index 0000000..b257497 --- /dev/null +++ b/xsa186-0001-x86-emulate-Correct-boundary-interactions-of-emulate.patch @@ -0,0 +1,73 @@ +From e938be013ba73ff08fa4f1d8670501aacefde7fb Mon Sep 17 00:00:00 2001 +From: Andrew Cooper +Date: Fri, 22 Jul 2016 16:02:54 +0000 +Subject: [PATCH 1/2] x86/emulate: Correct boundary interactions of emulated + instructions + +This reverts most of c/s 0640ffb6 "x86emul: fix rIP handling". + +Experimentally, in long mode processors will execute an instruction stream +which crosses the 64bit -1 -> 0 virtual boundary, whether the instruction +boundary is aligned on the virtual boundary, or is misaligned. + +In compatibility mode, Intel processors will execute an instruction stream +which crosses the 32bit -1 -> 0 virtual boundary, while AMD processors raise a +segmentation fault. Xen's segmentation behaviour matches AMD. + +For 16bit code, hardware does not ever truncated %ip. %eip is always used and +behaves normally as a 32bit register, including in 16bit protected mode +segments, as well as in Real and Unreal mode. + +This is XSA-186 + +Reported-by: Brian Marcotte +Signed-off-by: Andrew Cooper +Reviewed-by: Jan Beulich +--- + xen/arch/x86/x86_emulate/x86_emulate.c | 22 ++++------------------ + 1 file changed, 4 insertions(+), 18 deletions(-) + +diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c +index d5a56cf..bf3529a 100644 +--- a/xen/arch/x86/x86_emulate/x86_emulate.c ++++ b/xen/arch/x86/x86_emulate/x86_emulate.c +@@ -1570,10 +1570,6 @@ x86_emulate( + #endif + } + +- /* Truncate rIP to def_ad_bytes (2 or 4) if necessary. */ +- if ( def_ad_bytes < sizeof(_regs.eip) ) +- _regs.eip &= (1UL << (def_ad_bytes * 8)) - 1; +- + /* Prefix bytes. */ + for ( ; ; ) + { +@@ -3906,21 +3902,11 @@ x86_emulate( + + /* Commit shadow register state. */ + _regs.eflags &= ~EFLG_RF; +- switch ( __builtin_expect(def_ad_bytes, sizeof(_regs.eip)) ) +- { +- uint16_t ip; + +- case 2: +- ip = _regs.eip; +- _regs.eip = ctxt->regs->eip; +- *(uint16_t *)&_regs.eip = ip; +- break; +-#ifdef __x86_64__ +- case 4: +- _regs.rip = _regs._eip; +- break; +-#endif +- } ++ /* Zero the upper 32 bits of %rip if not in long mode. */ ++ if ( def_ad_bytes < sizeof(_regs.eip) ) ++ _regs.eip = (uint32_t)_regs.eip; ++ + *ctxt->regs = _regs; + + done: +-- +2.1.4 + diff --git a/xsa187-4.6-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch b/xsa187-4.6-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch new file mode 100644 index 0000000..e8cd1e7 --- /dev/null +++ b/xsa187-4.6-0002-x86-segment-Bounds-check-accesses-to-emulation-ctx.patch @@ -0,0 +1,142 @@ +From: Andrew Cooper +Subject: x86/segment: Bounds check accesses to emulation ctxt->seg_reg[] + +HVM HAP codepaths have space for all segment registers in the seg_reg[] +cache (with x86_seg_none still risking an array overrun), while the shadow +codepaths only have space for the user segments. + +Range check the input segment of *_get_seg_reg() against the size of the array +used to cache the results, to avoid overruns in the case that the callers +don't filter their input suitably. + +Subsume the is_x86_user_segment(seg) checks from the shadow code, which were +an incomplete attempt at range checking, and are now superceeded. Make +hvm_get_seg_reg() static, as it is not used outside of shadow/common.c + +No functional change, but far easier to reason that no overflow is possible. + +Reported-by: Andrew Cooper +Signed-off-by: Andrew Cooper +Acked-by: Tim Deegan +Acked-by: Jan Beulich + +--- a/xen/arch/x86/hvm/emulate.c ++++ b/xen/arch/x86/hvm/emulate.c +@@ -526,6 +526,8 @@ static int hvmemul_virtual_to_linear( + ? 1 : 4096); + + reg = hvmemul_get_seg_reg(seg, hvmemul_ctxt); ++ if ( IS_ERR(reg) ) ++ return -PTR_ERR(reg); + + if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) ) + { +@@ -1360,6 +1362,10 @@ static int hvmemul_read_segment( + struct hvm_emulate_ctxt *hvmemul_ctxt = + container_of(ctxt, struct hvm_emulate_ctxt, ctxt); + struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt); ++ ++ if ( IS_ERR(sreg) ) ++ return -PTR_ERR(sreg); ++ + memcpy(reg, sreg, sizeof(struct segment_register)); + return X86EMUL_OKAY; + } +@@ -1373,6 +1379,9 @@ static int hvmemul_write_segment( + container_of(ctxt, struct hvm_emulate_ctxt, ctxt); + struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt); + ++ if ( IS_ERR(sreg) ) ++ return -PTR_ERR(sreg); ++ + memcpy(sreg, reg, sizeof(struct segment_register)); + __set_bit(seg, &hvmemul_ctxt->seg_reg_dirty); + +@@ -1911,10 +1920,17 @@ void hvm_emulate_writeback( + } + } + ++/* ++ * Callers which pass a known in-range x86_segment can rely on the return ++ * pointer being valid. Other callers must explicitly check for errors. ++ */ + struct segment_register *hvmemul_get_seg_reg( + enum x86_segment seg, + struct hvm_emulate_ctxt *hvmemul_ctxt) + { ++ if ( seg < 0 || seg >= ARRAY_SIZE(hvmemul_ctxt->seg_reg) ) ++ return ERR_PTR(-X86EMUL_UNHANDLEABLE); ++ + if ( !__test_and_set_bit(seg, &hvmemul_ctxt->seg_reg_accessed) ) + hvm_get_segment_register(current, seg, &hvmemul_ctxt->seg_reg[seg]); + return &hvmemul_ctxt->seg_reg[seg]; +--- a/xen/arch/x86/mm/shadow/common.c ++++ b/xen/arch/x86/mm/shadow/common.c +@@ -125,10 +125,19 @@ __initcall(shadow_audit_key_init); + /* x86 emulator support for the shadow code + */ + ++/* ++ * Callers which pass a known in-range x86_segment can rely on the return ++ * pointer being valid. Other callers must explicitly check for errors. ++ */ + struct segment_register *hvm_get_seg_reg( + enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt) + { +- struct segment_register *seg_reg = &sh_ctxt->seg_reg[seg]; ++ struct segment_register *seg_reg; ++ ++ if ( seg < 0 || seg >= ARRAY_SIZE(sh_ctxt->seg_reg) ) ++ return ERR_PTR(-X86EMUL_UNHANDLEABLE); ++ ++ seg_reg = &sh_ctxt->seg_reg[seg]; + if ( !__test_and_set_bit(seg, &sh_ctxt->valid_seg_regs) ) + hvm_get_segment_register(current, seg, seg_reg); + return seg_reg; +@@ -145,14 +154,9 @@ static int hvm_translate_linear_addr( + struct segment_register *reg; + int okay; + +- /* +- * Can arrive here with non-user segments. However, no such cirucmstance +- * is part of a legitimate pagetable update, so fail the emulation. +- */ +- if ( !is_x86_user_segment(seg) ) +- return X86EMUL_UNHANDLEABLE; +- + reg = hvm_get_seg_reg(seg, sh_ctxt); ++ if ( IS_ERR(reg) ) ++ return -PTR_ERR(reg); + + okay = hvm_virtual_to_linear_addr( + seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, paddr); +@@ -254,9 +258,6 @@ hvm_emulate_write(enum x86_segment seg, + unsigned long addr; + int rc; + +- if ( !is_x86_user_segment(seg) ) +- return X86EMUL_UNHANDLEABLE; +- + /* How many emulations could we save if we unshadowed on stack writes? */ + if ( seg == x86_seg_ss ) + perfc_incr(shadow_fault_emulate_stack); +@@ -284,9 +285,6 @@ hvm_emulate_cmpxchg(enum x86_segment seg + unsigned long addr, old[2], new[2]; + int rc; + +- if ( !is_x86_user_segment(seg) ) +- return X86EMUL_UNHANDLEABLE; +- + rc = hvm_translate_linear_addr( + seg, offset, bytes, hvm_access_write, sh_ctxt, &addr); + if ( rc ) +--- a/xen/include/asm-x86/hvm/emulate.h ++++ b/xen/include/asm-x86/hvm/emulate.h +@@ -13,6 +13,7 @@ + #define __ASM_X86_HVM_EMULATE_H__ + + #include ++#include + #include + #include + diff --git a/xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch b/xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch new file mode 100644 index 0000000..bc99596 --- /dev/null +++ b/xsa187-4.7-0001-x86-shadow-Avoid-overflowing-sh_ctxt-seg.patch @@ -0,0 +1,42 @@ +From: Andrew Cooper +Subject: x86/shadow: Avoid overflowing sh_ctxt->seg_reg[] + +hvm_get_seg_reg() does not perform a range check on its input segment, calls +hvm_get_segment_register() and writes straight into sh_ctxt->seg_reg[]. + +x86_seg_none is outside the bounds of sh_ctxt->seg_reg[], and will hit a BUG() +in {vmx,svm}_get_segment_register(). + +HVM guests running with shadow paging can end up performing a virtual to +linear translation with x86_seg_none. This is used for addresses which are +already linear. However, none of this is a legitimate pagetable update, so +fail the emulation in such a case. + +This is XSA-187 + +Reported-by: Andrew Cooper +Signed-off-by: Andrew Cooper +Reviewed-by: Tim Deegan + +--- a/xen/arch/x86/mm/shadow/common.c ++++ b/xen/arch/x86/mm/shadow/common.c +@@ -140,9 +140,18 @@ static int hvm_translate_linear_addr( + struct sh_emulate_ctxt *sh_ctxt, + unsigned long *paddr) + { +- struct segment_register *reg = hvm_get_seg_reg(seg, sh_ctxt); ++ struct segment_register *reg; + int okay; + ++ /* ++ * Can arrive here with non-user segments. However, no such cirucmstance ++ * is part of a legitimate pagetable update, so fail the emulation. ++ */ ++ if ( !is_x86_user_segment(seg) ) ++ return X86EMUL_UNHANDLEABLE; ++ ++ reg = hvm_get_seg_reg(seg, sh_ctxt); ++ + okay = hvm_virtual_to_linear_addr( + seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, paddr); +