|
|
4d161cd |
From: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
|
4d161cd |
Subject: x86/segment: Bounds check accesses to emulation ctxt->seg_reg[]
|
|
|
4d161cd |
|
|
|
4d161cd |
HVM HAP codepaths have space for all segment registers in the seg_reg[]
|
|
|
4d161cd |
cache (with x86_seg_none still risking an array overrun), while the shadow
|
|
|
4d161cd |
codepaths only have space for the user segments.
|
|
|
4d161cd |
|
|
|
4d161cd |
Range check the input segment of *_get_seg_reg() against the size of the array
|
|
|
4d161cd |
used to cache the results, to avoid overruns in the case that the callers
|
|
|
4d161cd |
don't filter their input suitably.
|
|
|
4d161cd |
|
|
|
4d161cd |
Subsume the is_x86_user_segment(seg) checks from the shadow code, which were
|
|
|
4d161cd |
an incomplete attempt at range checking, and are now superceeded. Make
|
|
|
4d161cd |
hvm_get_seg_reg() static, as it is not used outside of shadow/common.c
|
|
|
4d161cd |
|
|
|
4d161cd |
No functional change, but far easier to reason that no overflow is possible.
|
|
|
4d161cd |
|
|
|
4d161cd |
Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
|
4d161cd |
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
|
|
|
4d161cd |
Acked-by: Tim Deegan <tim@xen.org>
|
|
|
4d161cd |
Acked-by: Jan Beulich <jbeulich@suse.com>
|
|
|
4d161cd |
|
|
|
4d161cd |
--- a/xen/arch/x86/hvm/emulate.c
|
|
|
4d161cd |
+++ b/xen/arch/x86/hvm/emulate.c
|
|
|
4d161cd |
@@ -526,6 +526,8 @@ static int hvmemul_virtual_to_linear(
|
|
|
4d161cd |
? 1 : 4096);
|
|
|
4d161cd |
|
|
|
4d161cd |
reg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
|
|
|
4d161cd |
+ if ( IS_ERR(reg) )
|
|
|
4d161cd |
+ return -PTR_ERR(reg);
|
|
|
4d161cd |
|
|
|
4d161cd |
if ( (hvmemul_ctxt->ctxt.regs->eflags & X86_EFLAGS_DF) && (*reps > 1) )
|
|
|
4d161cd |
{
|
|
|
4d161cd |
@@ -1360,6 +1362,10 @@ static int hvmemul_read_segment(
|
|
|
4d161cd |
struct hvm_emulate_ctxt *hvmemul_ctxt =
|
|
|
4d161cd |
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
|
|
|
4d161cd |
struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
|
|
|
4d161cd |
+
|
|
|
4d161cd |
+ if ( IS_ERR(sreg) )
|
|
|
4d161cd |
+ return -PTR_ERR(sreg);
|
|
|
4d161cd |
+
|
|
|
4d161cd |
memcpy(reg, sreg, sizeof(struct segment_register));
|
|
|
4d161cd |
return X86EMUL_OKAY;
|
|
|
4d161cd |
}
|
|
|
4d161cd |
@@ -1373,6 +1379,9 @@ static int hvmemul_write_segment(
|
|
|
4d161cd |
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
|
|
|
4d161cd |
struct segment_register *sreg = hvmemul_get_seg_reg(seg, hvmemul_ctxt);
|
|
|
4d161cd |
|
|
|
4d161cd |
+ if ( IS_ERR(sreg) )
|
|
|
4d161cd |
+ return -PTR_ERR(sreg);
|
|
|
4d161cd |
+
|
|
|
4d161cd |
memcpy(sreg, reg, sizeof(struct segment_register));
|
|
|
4d161cd |
__set_bit(seg, &hvmemul_ctxt->seg_reg_dirty);
|
|
|
4d161cd |
|
|
|
4d161cd |
@@ -1911,10 +1920,17 @@ void hvm_emulate_writeback(
|
|
|
4d161cd |
}
|
|
|
4d161cd |
}
|
|
|
4d161cd |
|
|
|
4d161cd |
+/*
|
|
|
4d161cd |
+ * Callers which pass a known in-range x86_segment can rely on the return
|
|
|
4d161cd |
+ * pointer being valid. Other callers must explicitly check for errors.
|
|
|
4d161cd |
+ */
|
|
|
4d161cd |
struct segment_register *hvmemul_get_seg_reg(
|
|
|
4d161cd |
enum x86_segment seg,
|
|
|
4d161cd |
struct hvm_emulate_ctxt *hvmemul_ctxt)
|
|
|
4d161cd |
{
|
|
|
4d161cd |
+ if ( seg < 0 || seg >= ARRAY_SIZE(hvmemul_ctxt->seg_reg) )
|
|
|
4d161cd |
+ return ERR_PTR(-X86EMUL_UNHANDLEABLE);
|
|
|
4d161cd |
+
|
|
|
4d161cd |
if ( !__test_and_set_bit(seg, &hvmemul_ctxt->seg_reg_accessed) )
|
|
|
4d161cd |
hvm_get_segment_register(current, seg, &hvmemul_ctxt->seg_reg[seg]);
|
|
|
4d161cd |
return &hvmemul_ctxt->seg_reg[seg];
|
|
|
4d161cd |
--- a/xen/arch/x86/mm/shadow/common.c
|
|
|
4d161cd |
+++ b/xen/arch/x86/mm/shadow/common.c
|
|
|
4d161cd |
@@ -125,10 +125,19 @@ __initcall(shadow_audit_key_init);
|
|
|
4d161cd |
/* x86 emulator support for the shadow code
|
|
|
4d161cd |
*/
|
|
|
4d161cd |
|
|
|
4d161cd |
+/*
|
|
|
4d161cd |
+ * Callers which pass a known in-range x86_segment can rely on the return
|
|
|
4d161cd |
+ * pointer being valid. Other callers must explicitly check for errors.
|
|
|
4d161cd |
+ */
|
|
|
4d161cd |
struct segment_register *hvm_get_seg_reg(
|
|
|
4d161cd |
enum x86_segment seg, struct sh_emulate_ctxt *sh_ctxt)
|
|
|
4d161cd |
{
|
|
|
4d161cd |
- struct segment_register *seg_reg = &sh_ctxt->seg_reg[seg];
|
|
|
4d161cd |
+ struct segment_register *seg_reg;
|
|
|
4d161cd |
+
|
|
|
4d161cd |
+ if ( seg < 0 || seg >= ARRAY_SIZE(sh_ctxt->seg_reg) )
|
|
|
4d161cd |
+ return ERR_PTR(-X86EMUL_UNHANDLEABLE);
|
|
|
4d161cd |
+
|
|
|
4d161cd |
+ seg_reg = &sh_ctxt->seg_reg[seg];
|
|
|
4d161cd |
if ( !__test_and_set_bit(seg, &sh_ctxt->valid_seg_regs) )
|
|
|
4d161cd |
hvm_get_segment_register(current, seg, seg_reg);
|
|
|
4d161cd |
return seg_reg;
|
|
|
4d161cd |
@@ -145,14 +154,9 @@ static int hvm_translate_linear_addr(
|
|
|
4d161cd |
struct segment_register *reg;
|
|
|
4d161cd |
int okay;
|
|
|
4d161cd |
|
|
|
4d161cd |
- /*
|
|
|
4d161cd |
- * Can arrive here with non-user segments. However, no such cirucmstance
|
|
|
4d161cd |
- * is part of a legitimate pagetable update, so fail the emulation.
|
|
|
4d161cd |
- */
|
|
|
4d161cd |
- if ( !is_x86_user_segment(seg) )
|
|
|
4d161cd |
- return X86EMUL_UNHANDLEABLE;
|
|
|
4d161cd |
-
|
|
|
4d161cd |
reg = hvm_get_seg_reg(seg, sh_ctxt);
|
|
|
4d161cd |
+ if ( IS_ERR(reg) )
|
|
|
4d161cd |
+ return -PTR_ERR(reg);
|
|
|
4d161cd |
|
|
|
4d161cd |
okay = hvm_virtual_to_linear_addr(
|
|
|
4d161cd |
seg, reg, offset, bytes, access_type, sh_ctxt->ctxt.addr_size, paddr);
|
|
|
4d161cd |
@@ -254,9 +258,6 @@ hvm_emulate_write(enum x86_segment seg,
|
|
|
4d161cd |
unsigned long addr;
|
|
|
4d161cd |
int rc;
|
|
|
4d161cd |
|
|
|
4d161cd |
- if ( !is_x86_user_segment(seg) )
|
|
|
4d161cd |
- return X86EMUL_UNHANDLEABLE;
|
|
|
4d161cd |
-
|
|
|
4d161cd |
/* How many emulations could we save if we unshadowed on stack writes? */
|
|
|
4d161cd |
if ( seg == x86_seg_ss )
|
|
|
4d161cd |
perfc_incr(shadow_fault_emulate_stack);
|
|
|
4d161cd |
@@ -284,9 +285,6 @@ hvm_emulate_cmpxchg(enum x86_segment seg
|
|
|
4d161cd |
unsigned long addr, old[2], new[2];
|
|
|
4d161cd |
int rc;
|
|
|
4d161cd |
|
|
|
4d161cd |
- if ( !is_x86_user_segment(seg) )
|
|
|
4d161cd |
- return X86EMUL_UNHANDLEABLE;
|
|
|
4d161cd |
-
|
|
|
4d161cd |
rc = hvm_translate_linear_addr(
|
|
|
4d161cd |
seg, offset, bytes, hvm_access_write, sh_ctxt, &addr);
|
|
|
4d161cd |
if ( rc )
|
|
|
4d161cd |
--- a/xen/include/asm-x86/hvm/emulate.h
|
|
|
4d161cd |
+++ b/xen/include/asm-x86/hvm/emulate.h
|
|
|
4d161cd |
@@ -13,6 +13,7 @@
|
|
|
4d161cd |
#define __ASM_X86_HVM_EMULATE_H__
|
|
|
4d161cd |
|
|
|
4d161cd |
#include <xen/config.h>
|
|
|
4d161cd |
+#include <xen/err.h>
|
|
|
4d161cd |
#include <asm/hvm/hvm.h>
|
|
|
4d161cd |
#include <asm/x86_emulate.h>
|
|
|
4d161cd |
|