diff --git a/xen.spec b/xen.spec index 1845570..2902f14 100644 --- a/xen.spec +++ b/xen.spec @@ -55,7 +55,7 @@ Summary: Xen is a virtual machine monitor Name: xen Version: 4.17.2 -Release: 5%{?dist} +Release: 6%{?dist} License: GPLv2+ and LGPLv2+ and BSD URL: http://xen.org/ Source0: https://downloads.xenproject.org/release/xen/%{version}/xen-%{version}.tar.gz @@ -141,6 +141,9 @@ Patch76: xsa444-4.17-1.patch Patch77: xsa444-4.17-2.patch Patch78: xsa445-4.17.patch Patch79: xsa446.patch +Patch80: xsa447.patch +Patch81: xsa449.patch +Patch82: xsa450.patch %if %build_qemutrad @@ -386,6 +389,9 @@ manage Xen virtual machines. %patch 77 -p1 %patch 78 -p1 %patch 79 -p1 +%patch 80 -p1 +%patch 81 -p1 +%patch 82 -p1 # qemu-xen-traditional patches pushd tools/qemu-xen-traditional @@ -993,6 +999,14 @@ fi %endif %changelog +* Tue Jan 30 2024 Michael Young - 4.17.2-6 +- arm32: The cache may not be properly cleaned/invalidated (take two) + [XSA-447, CVE-2023-46837] +- pci: phantom functions assigned to incorrect contexts [XSA-449, + CVE-2023-46839] +- VT-d: Failure to quarantine devices in !HVM build [XSA-450, + CVE-2023-46840] + * Tue Nov 14 2023 Michael Young - 4.17.2-5 - x86/AMD: mismatch in IOMMU quarantine page table levels [XSA-445, CVE-2023-46835] diff --git a/xsa447.patch b/xsa447.patch new file mode 100644 index 0000000..2e26396 --- /dev/null +++ b/xsa447.patch @@ -0,0 +1,117 @@ +From 084c7312fa6c1d4a7fa343efa1d7d73693dafff4 Mon Sep 17 00:00:00 2001 +From: Michal Orzel +Date: Thu, 23 Nov 2023 15:53:02 +0100 +Subject: [PATCH] xen/arm: page: Avoid pointer overflow on cache clean & + invalidate + +On Arm32, after cleaning and invalidating the last dcache line of the top +domheap page i.e. VA = 0xfffff000 (as a result of flushing the page to +RAM), we end up adding the value of a dcache line size to the pointer +once again, which results in a pointer arithmetic overflow (with 64B line +size, operation 0xffffffc0 + 0x40 overflows to 0x0). Such behavior is +undefined and given the wide range of compiler versions we support, it is +difficult to determine what could happen in such scenario. + +Modify clean_and_invalidate_dcache_va_range() as well as +clean_dcache_va_range() and invalidate_dcache_va_range() due to similarity +of handling to prevent pointer arithmetic overflow. Modify the loops to +use an additional variable to store the index of the next cacheline. +Add an assert to prevent passing a region that wraps around which is +illegal and would end up in a page fault anyway (region 0-2MB is +unmapped). Lastly, return early if size passed is 0. + +Note that on Arm64, we don't have this problem given that the max VA +space we support is 48-bits. + +This is XSA-447 / CVE-2023-46837. + +Signed-off-by: Michal Orzel +Reviewed-by: Julien Grall +--- + xen/arch/arm/include/asm/page.h | 35 ++++++++++++++++++++++++++------- + 1 file changed, 28 insertions(+), 7 deletions(-) + +diff --git a/xen/arch/arm/include/asm/page.h b/xen/arch/arm/include/asm/page.h +index ebaf5964f114..69f817d1e68a 100644 +--- a/xen/arch/arm/include/asm/page.h ++++ b/xen/arch/arm/include/asm/page.h +@@ -162,6 +162,13 @@ static inline size_t read_dcache_line_bytes(void) + static inline int invalidate_dcache_va_range(const void *p, unsigned long size) + { + size_t cacheline_mask = dcache_line_bytes - 1; ++ unsigned long idx = 0; ++ ++ if ( !size ) ++ return 0; ++ ++ /* Passing a region that wraps around is illegal */ ++ ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p); + + dsb(sy); /* So the CPU issues all writes to the range */ + +@@ -174,11 +181,11 @@ static inline int invalidate_dcache_va_range(const void *p, unsigned long size) + } + + for ( ; size >= dcache_line_bytes; +- p += dcache_line_bytes, size -= dcache_line_bytes ) +- asm volatile (__invalidate_dcache_one(0) : : "r" (p)); ++ idx += dcache_line_bytes, size -= dcache_line_bytes ) ++ asm volatile (__invalidate_dcache_one(0) : : "r" (p + idx)); + + if ( size > 0 ) +- asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p)); ++ asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p + idx)); + + dsb(sy); /* So we know the flushes happen before continuing */ + +@@ -188,14 +195,21 @@ static inline int invalidate_dcache_va_range(const void *p, unsigned long size) + static inline int clean_dcache_va_range(const void *p, unsigned long size) + { + size_t cacheline_mask = dcache_line_bytes - 1; ++ unsigned long idx = 0; ++ ++ if ( !size ) ++ return 0; ++ ++ /* Passing a region that wraps around is illegal */ ++ ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p); + + dsb(sy); /* So the CPU issues all writes to the range */ + size += (uintptr_t)p & cacheline_mask; + size = (size + cacheline_mask) & ~cacheline_mask; + p = (void *)((uintptr_t)p & ~cacheline_mask); + for ( ; size >= dcache_line_bytes; +- p += dcache_line_bytes, size -= dcache_line_bytes ) +- asm volatile (__clean_dcache_one(0) : : "r" (p)); ++ idx += dcache_line_bytes, size -= dcache_line_bytes ) ++ asm volatile (__clean_dcache_one(0) : : "r" (p + idx)); + dsb(sy); /* So we know the flushes happen before continuing */ + /* ARM callers assume that dcache_* functions cannot fail. */ + return 0; +@@ -205,14 +219,21 @@ static inline int clean_and_invalidate_dcache_va_range + (const void *p, unsigned long size) + { + size_t cacheline_mask = dcache_line_bytes - 1; ++ unsigned long idx = 0; ++ ++ if ( !size ) ++ return 0; ++ ++ /* Passing a region that wraps around is illegal */ ++ ASSERT(((uintptr_t)p + size - 1) >= (uintptr_t)p); + + dsb(sy); /* So the CPU issues all writes to the range */ + size += (uintptr_t)p & cacheline_mask; + size = (size + cacheline_mask) & ~cacheline_mask; + p = (void *)((uintptr_t)p & ~cacheline_mask); + for ( ; size >= dcache_line_bytes; +- p += dcache_line_bytes, size -= dcache_line_bytes ) +- asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p)); ++ idx += dcache_line_bytes, size -= dcache_line_bytes ) ++ asm volatile (__clean_and_invalidate_dcache_one(0) : : "r" (p + idx)); + dsb(sy); /* So we know the flushes happen before continuing */ + /* ARM callers assume that dcache_* functions cannot fail. */ + return 0; +-- +2.40.1 + diff --git a/xsa449.patch b/xsa449.patch new file mode 100644 index 0000000..80aeac2 --- /dev/null +++ b/xsa449.patch @@ -0,0 +1,89 @@ +From d8b92b21b224126860978e4c604302f3c1e3bf75 Mon Sep 17 00:00:00 2001 +From: Roger Pau Monne +Date: Wed, 13 Dec 2023 15:51:59 +0100 +Subject: [PATCH] pci: fail device assignment if phantom functions cannot be + assigned +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +The current behavior is that no error is reported if (some) phantom functions +fail to be assigned during device add or assignment, so the operation succeeds +even if some phantom functions are not correctly setup. + +This can lead to devices possibly being successfully assigned to a domU while +some of the device phantom functions are still assigned to dom0. Even when the +device is assigned domIO before being assigned to a domU phantom functions +might fail to be assigned to domIO, and also fail to be assigned to the domU, +leaving them assigned to dom0. + +Since the device can generate requests using the IDs of those phantom +functions, given the scenario above a device in such state would be in control +of a domU, but still capable of generating transactions that use a context ID +targeting dom0 owned memory. + +Modify device assign in order to attempt to deassign the device if phantom +functions failed to be assigned. + +Note that device addition is not modified in the same way, as in that case the +device is assigned to a trusted domain, and hence partial assign can lead to +device malfunction but not a security issue. + +This is XSA-449 / CVE-2023-46839 + +Fixes: 4e9950dc1bd2 ('IOMMU: add phantom function support') +Signed-off-by: Roger Pau Monné +Reviewed-by: Jan Beulich +--- + xen/drivers/passthrough/pci.c | 27 +++++++++++++++++++++------ + 1 file changed, 21 insertions(+), 6 deletions(-) + +diff --git a/xen/drivers/passthrough/pci.c b/xen/drivers/passthrough/pci.c +index 1439d1ef2b26..47c0eee7bdcc 100644 +--- a/xen/drivers/passthrough/pci.c ++++ b/xen/drivers/passthrough/pci.c +@@ -1488,11 +1488,10 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag) + + pdev->fault.count = 0; + +- if ( (rc = iommu_call(hd->platform_ops, assign_device, d, devfn, +- pci_to_dev(pdev), flag)) ) +- goto done; ++ rc = iommu_call(hd->platform_ops, assign_device, d, devfn, pci_to_dev(pdev), ++ flag); + +- for ( ; pdev->phantom_stride; rc = 0 ) ++ while ( pdev->phantom_stride && !rc ) + { + devfn += pdev->phantom_stride; + if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) ) +@@ -1503,8 +1502,24 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag) + + done: + if ( rc ) +- printk(XENLOG_G_WARNING "%pd: assign (%pp) failed (%d)\n", +- d, &PCI_SBDF(seg, bus, devfn), rc); ++ { ++ printk(XENLOG_G_WARNING "%pd: assign %s(%pp) failed (%d)\n", ++ d, devfn != pdev->devfn ? "phantom function " : "", ++ &PCI_SBDF(seg, bus, devfn), rc); ++ ++ if ( devfn != pdev->devfn && deassign_device(d, seg, bus, pdev->devfn) ) ++ { ++ /* ++ * Device with phantom functions that failed to both assign and ++ * rollback. Mark the device as broken and crash the target domain, ++ * as the state of the functions at this point is unknown and Xen ++ * has no way to assert consistent context assignment among them. ++ */ ++ pdev->broken = true; ++ if ( !is_hardware_domain(d) && d != dom_io ) ++ domain_crash(d); ++ } ++ } + /* The device is assigned to dom_io so mark it as quarantined */ + else if ( d == dom_io ) + pdev->quarantine = true; +-- +2.43.0 + diff --git a/xsa450.patch b/xsa450.patch new file mode 100644 index 0000000..e94933b --- /dev/null +++ b/xsa450.patch @@ -0,0 +1,59 @@ +From: Andrew Cooper +Subject: VT-d: Fix "else" vs "#endif" misplacement + +In domain_pgd_maddr() the "#endif" is misplaced with respect to "else". This +generates incorrect logic when CONFIG_HVM is compiled out, as the "else" body +is executed unconditionally. + +Rework the logic to use IS_ENABLED() instead of explicit #ifdef-ary, as it's +clearer to follow. This in turn involves adjusting p2m_get_pagetable() to +compile when CONFIG_HVM is disabled. + +This is XSA-450 / CVE-2023-46840. + +Reported-by: Reported-by: Teddy Astie +Fixes: 033ff90aa9c1 ("x86/P2M: p2m_{alloc,free}_ptp() and p2m_alloc_table() are HVM-only") +Signed-off-by: Andrew Cooper +Reviewed-by: Jan Beulich + +diff --git a/xen/arch/x86/include/asm/p2m.h b/xen/arch/x86/include/asm/p2m.h +index 32f3f394b05a..6ada585eaac2 100644 +--- a/xen/arch/x86/include/asm/p2m.h ++++ b/xen/arch/x86/include/asm/p2m.h +@@ -435,7 +435,14 @@ static inline bool p2m_is_altp2m(const struct p2m_domain *p2m) + return p2m->p2m_class == p2m_alternate; + } + +-#define p2m_get_pagetable(p2m) ((p2m)->phys_table) ++#ifdef CONFIG_HVM ++static inline pagetable_t p2m_get_pagetable(const struct p2m_domain *p2m) ++{ ++ return p2m->phys_table; ++} ++#else ++pagetable_t p2m_get_pagetable(const struct p2m_domain *p2m); ++#endif + + /* + * Ensure any deferred p2m TLB flush has been completed on all VCPUs. +diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c +index 99b642f12ef9..4244855032ee 100644 +--- a/xen/drivers/passthrough/vtd/iommu.c ++++ b/xen/drivers/passthrough/vtd/iommu.c +@@ -438,15 +438,13 @@ static paddr_t domain_pgd_maddr(struct domain *d, paddr_t pgd_maddr, + + if ( pgd_maddr ) + /* nothing */; +-#ifdef CONFIG_HVM +- else if ( iommu_use_hap_pt(d) ) ++ else if ( IS_ENABLED(CONFIG_HVM) && iommu_use_hap_pt(d) ) + { + pagetable_t pgt = p2m_get_pagetable(p2m_get_hostp2m(d)); + + pgd_maddr = pagetable_get_paddr(pgt); + } + else +-#endif + { + if ( !hd->arch.vtd.pgd_maddr ) + {