diff --git a/xen.git-1a52e3946d9b04eb8a38d561524e42556cdeb4fb.patch b/xen.git-1a52e3946d9b04eb8a38d561524e42556cdeb4fb.patch new file mode 100644 index 0000000..ad59a14 --- /dev/null +++ b/xen.git-1a52e3946d9b04eb8a38d561524e42556cdeb4fb.patch @@ -0,0 +1,94 @@ +From: Andrew Cooper +Date: Tue, 25 Jan 2022 17:14:48 +0000 (+0000) +Subject: x86/spec-ctrl: Introduce new has_spec_ctrl boolean +X-Git-Url: http://xenbits.xenproject.org/gitweb/?p=xen.git;a=commitdiff_plain;h=1a52e3946d9b04eb8a38d561524e42556cdeb4fb + +x86/spec-ctrl: Introduce new has_spec_ctrl boolean + +Most MSR_SPEC_CTRL setup will be common between Intel and AMD. Instead of +opencoding an OR of two features everywhere, introduce has_spec_ctrl instead. + +Reword the comment above the Intel specific alternatives block to highlight +that it is Intel specific, and pull the setting of default_xen_spec_ctrl.IBRS +out because it will want to be common. + +No functional change. + +Signed-off-by: Andrew Cooper +Reviewed-by: Jan Beulich +(cherry picked from commit 5d9eff3a312763d889cfbf3c8468b6dfb3ab490c) +--- + +diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c +index e85b0c0c7d..84d5de8856 100644 +--- a/xen/arch/x86/spec_ctrl.c ++++ b/xen/arch/x86/spec_ctrl.c +@@ -898,7 +898,7 @@ static __init void mds_calculations(uint64_t caps) + void __init init_speculation_mitigations(void) + { + enum ind_thunk thunk = THUNK_DEFAULT; +- bool ibrs = false, hw_smt_enabled; ++ bool has_spec_ctrl, ibrs = false, hw_smt_enabled; + bool cpu_has_bug_taa; + uint64_t caps = 0; + +@@ -907,6 +907,8 @@ void __init init_speculation_mitigations(void) + + hw_smt_enabled = check_smt_enabled(); + ++ has_spec_ctrl = boot_cpu_has(X86_FEATURE_IBRSB); ++ + /* + * First, disable the use of retpolines if Xen is using shadow stacks, as + * they are incompatible. +@@ -944,11 +946,11 @@ void __init init_speculation_mitigations(void) + */ + else if ( retpoline_safe(caps) ) + thunk = THUNK_RETPOLINE; +- else if ( boot_cpu_has(X86_FEATURE_IBRSB) ) ++ else if ( has_spec_ctrl ) + ibrs = true; + } + /* Without compiler thunk support, use IBRS if available. */ +- else if ( boot_cpu_has(X86_FEATURE_IBRSB) ) ++ else if ( has_spec_ctrl ) + ibrs = true; + } + +@@ -979,10 +981,7 @@ void __init init_speculation_mitigations(void) + else if ( thunk == THUNK_JMP ) + setup_force_cpu_cap(X86_FEATURE_IND_THUNK_JMP); + +- /* +- * If we are on hardware supporting MSR_SPEC_CTRL, see about setting up +- * the alternatives blocks so we can virtualise support for guests. +- */ ++ /* Intel hardware: MSR_SPEC_CTRL alternatives setup. */ + if ( boot_cpu_has(X86_FEATURE_IBRSB) ) + { + if ( opt_msr_sc_pv ) +@@ -1001,11 +1000,12 @@ void __init init_speculation_mitigations(void) + default_spec_ctrl_flags |= SCF_ist_wrmsr; + setup_force_cpu_cap(X86_FEATURE_SC_MSR_HVM); + } +- +- if ( ibrs ) +- default_xen_spec_ctrl |= SPEC_CTRL_IBRS; + } + ++ /* If we have IBRS available, see whether we should use it. */ ++ if ( has_spec_ctrl && ibrs ) ++ default_xen_spec_ctrl |= SPEC_CTRL_IBRS; ++ + /* If we have SSBD available, see whether we should use it. */ + if ( boot_cpu_has(X86_FEATURE_SSBD) && opt_ssbd ) + default_xen_spec_ctrl |= SPEC_CTRL_SSBD; +@@ -1220,7 +1220,7 @@ void __init init_speculation_mitigations(void) + * boot won't have any other code running in a position to mount an + * attack. + */ +- if ( boot_cpu_has(X86_FEATURE_IBRSB) ) ++ if ( has_spec_ctrl ) + { + bsp_delay_spec_ctrl = !cpu_has_hypervisor && default_xen_spec_ctrl; + diff --git a/xen.git-35d0ea6726f8f013cbf3699a90309136896ae55e.patch b/xen.git-35d0ea6726f8f013cbf3699a90309136896ae55e.patch new file mode 100644 index 0000000..b462ae4 --- /dev/null +++ b/xen.git-35d0ea6726f8f013cbf3699a90309136896ae55e.patch @@ -0,0 +1,62 @@ +From: Andrew Cooper +Date: Tue, 25 Jan 2022 16:09:59 +0000 (+0000) +Subject: x86/spec-ctrl: Drop use_spec_ctrl boolean +X-Git-Url: http://xenbits.xenproject.org/gitweb/?p=xen.git;a=commitdiff_plain;h=35d0ea6726f8f013cbf3699a90309136896ae55e + +x86/spec-ctrl: Drop use_spec_ctrl boolean + +Several bugfixes have reduced the utility of this variable from it's original +purpose, and now all it does is aid in the setup of SCF_ist_wrmsr. + +Simplify the logic by drop the variable, and doubling up the setting of +SCF_ist_wrmsr for the PV and HVM blocks, which will make the AMD SPEC_CTRL +support easier to follow. Leave a comment explaining why SCF_ist_wrmsr is +still necessary for the VMExit case. + +No functional change. + +Signed-off-by: Andrew Cooper +Reviewed-by: Jan Beulich +(cherry picked from commit ec083bf552c35e10347449e21809f4780f8155d2) +--- + +diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c +index f70535b6e7..e85b0c0c7d 100644 +--- a/xen/arch/x86/spec_ctrl.c ++++ b/xen/arch/x86/spec_ctrl.c +@@ -898,7 +898,7 @@ static __init void mds_calculations(uint64_t caps) + void __init init_speculation_mitigations(void) + { + enum ind_thunk thunk = THUNK_DEFAULT; +- bool use_spec_ctrl = false, ibrs = false, hw_smt_enabled; ++ bool ibrs = false, hw_smt_enabled; + bool cpu_has_bug_taa; + uint64_t caps = 0; + +@@ -987,19 +987,21 @@ void __init init_speculation_mitigations(void) + { + if ( opt_msr_sc_pv ) + { +- use_spec_ctrl = true; ++ default_spec_ctrl_flags |= SCF_ist_wrmsr; + setup_force_cpu_cap(X86_FEATURE_SC_MSR_PV); + } + + if ( opt_msr_sc_hvm ) + { +- use_spec_ctrl = true; ++ /* ++ * While the guest MSR_SPEC_CTRL value is loaded/saved atomically, ++ * Xen's value is not restored atomically. An early NMI hitting ++ * the VMExit path needs to restore Xen's value for safety. ++ */ ++ default_spec_ctrl_flags |= SCF_ist_wrmsr; + setup_force_cpu_cap(X86_FEATURE_SC_MSR_HVM); + } + +- if ( use_spec_ctrl ) +- default_spec_ctrl_flags |= SCF_ist_wrmsr; +- + if ( ibrs ) + default_xen_spec_ctrl |= SPEC_CTRL_IBRS; + } diff --git a/xen.spec b/xen.spec index 4e223ec..a7fd717 100644 --- a/xen.spec +++ b/xen.spec @@ -58,7 +58,7 @@ Summary: Xen is a virtual machine monitor Name: xen Version: 4.14.4 -Release: 1%{?dist} +Release: 2%{?dist} License: GPLv2+ and LGPLv2+ and BSD URL: http://xen.org/ Source0: https://downloads.xenproject.org/release/xen/%{version}/xen-%{version}.tar.gz @@ -118,6 +118,14 @@ Patch47: xen.git-d6627cf1b63ce57a6a7e2c1800dbc50eed742c32.patch Patch48: xen.git-d8099d94dfaa3573bd86ebfc457cbc8f70a3ecda.patch Patch49: xen.git-8169f82049efb5b2044b33aa482ba3a136b7804d.patch Patch56: xsa376.patch +Patch57: xsa398-4.14-1-xen-arm-Introduce-new-Arm-processors.patch +Patch58: xsa398-4.14-2-xen-arm-move-errata-CSV2-check-earlier.patch +Patch59: xsa398-4.14-3-xen-arm-Add-ECBHB-and-CLEARBHB-ID-fields.patch +Patch60: xsa398-4.14-4-xen-arm-Add-Spectre-BHB-handling.patch +Patch61: xsa398-4.14-5-xen-arm-Allow-to-discover-and-use-SMCCC_ARCH_WORKARO.patch +Patch62: xen.git-35d0ea6726f8f013cbf3699a90309136896ae55e.patch +Patch63: xen.git-1a52e3946d9b04eb8a38d561524e42556cdeb4fb.patch +Patch64: xsa398-4.14-6-x86-spec-ctrl-Cease-using-thunk-lfence-on-AMD.patch %if %build_qemutrad @@ -332,6 +340,14 @@ manage Xen virtual machines. %patch48 -p1 %patch49 -p1 %patch56 -p1 +%patch57 -p1 +%patch58 -p1 +%patch59 -p1 +%patch60 -p1 +%patch61 -p1 +%patch62 -p1 +%patch63 -p1 +%patch64 -p1 # qemu-xen-traditional patches pushd tools/qemu-xen-traditional @@ -938,6 +954,10 @@ fi %endif %changelog +* Sat Mar 12 2022 Michael Young - 4.14.4-2 +- Multiple speculative security issues [XSA-398] +- additional patches so above applies cleanly + * Thu Feb 03 2022 Michael Young - 4.14.4-1 - update to xen-4.14.4 remove or adjust patches now included or superceded upstream diff --git a/xsa398-4.14-1-xen-arm-Introduce-new-Arm-processors.patch b/xsa398-4.14-1-xen-arm-Introduce-new-Arm-processors.patch new file mode 100644 index 0000000..e6b2569 --- /dev/null +++ b/xsa398-4.14-1-xen-arm-Introduce-new-Arm-processors.patch @@ -0,0 +1,63 @@ +From 021466aa73caaa0c5983f02203678e649dd4d22c Mon Sep 17 00:00:00 2001 +From: Bertrand Marquis +Date: Tue, 15 Feb 2022 10:37:51 +0000 +Subject: xen/arm: Introduce new Arm processors + +Add some new processor identifiers in processor.h and sync Xen +definitions with status of Linux 5.17 (declared in +arch/arm64/include/asm/cputype.h). + +This is part of XSA-398 / CVE-2022-23960. + +Signed-off-by: Bertrand Marquis +Acked-by: Julien Grall +(cherry picked from commit 35d1b85a6b43483f6bd007d48757434e54743e98) + +diff --git a/xen/include/asm-arm/processor.h b/xen/include/asm-arm/processor.h +index 87c8136022df..17cc5cf486f9 100644 +--- a/xen/include/asm-arm/processor.h ++++ b/xen/include/asm-arm/processor.h +@@ -53,6 +53,7 @@ + #define ARM_CPU_PART_CORTEX_A17 0xC0E + #define ARM_CPU_PART_CORTEX_A15 0xC0F + #define ARM_CPU_PART_CORTEX_A53 0xD03 ++#define ARM_CPU_PART_CORTEX_A35 0xD04 + #define ARM_CPU_PART_CORTEX_A55 0xD05 + #define ARM_CPU_PART_CORTEX_A57 0xD07 + #define ARM_CPU_PART_CORTEX_A72 0xD08 +@@ -60,11 +61,20 @@ + #define ARM_CPU_PART_CORTEX_A75 0xD0A + #define ARM_CPU_PART_CORTEX_A76 0xD0B + #define ARM_CPU_PART_NEOVERSE_N1 0xD0C ++#define ARM_CPU_PART_CORTEX_A77 0xD0D ++#define ARM_CPU_PART_NEOVERSE_V1 0xD40 ++#define ARM_CPU_PART_CORTEX_A78 0xD41 ++#define ARM_CPU_PART_CORTEX_X1 0xD44 ++#define ARM_CPU_PART_CORTEX_A710 0xD47 ++#define ARM_CPU_PART_CORTEX_X2 0xD48 ++#define ARM_CPU_PART_NEOVERSE_N2 0xD49 ++#define ARM_CPU_PART_CORTEX_A78C 0xD4B + + #define MIDR_CORTEX_A12 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A12) + #define MIDR_CORTEX_A17 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A17) + #define MIDR_CORTEX_A15 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A15) + #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) ++#define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35) + #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) + #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) + #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) +@@ -72,6 +82,14 @@ + #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) + #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) + #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) ++#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77) ++#define MIDR_NEOVERSE_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_V1) ++#define MIDR_CORTEX_A78 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78) ++#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1) ++#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) ++#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2) ++#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) ++#define MIDR_CORTEX_A78C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C) + + /* MPIDR Multiprocessor Affinity Register */ + #define _MPIDR_UP (30) diff --git a/xsa398-4.14-2-xen-arm-move-errata-CSV2-check-earlier.patch b/xsa398-4.14-2-xen-arm-move-errata-CSV2-check-earlier.patch new file mode 100644 index 0000000..556b9c5 --- /dev/null +++ b/xsa398-4.14-2-xen-arm-move-errata-CSV2-check-earlier.patch @@ -0,0 +1,53 @@ +From 6da7a845fb476ef7395185ec08a58c76ebd8c442 Mon Sep 17 00:00:00 2001 +From: Bertrand Marquis +Date: Tue, 15 Feb 2022 10:39:47 +0000 +Subject: xen/arm: move errata CSV2 check earlier + +CSV2 availability check is done after printing to the user that +workaround 1 will be used. Move the check before to prevent saying to the +user that workaround 1 is used when it is not because it is not needed. +This will also allow to reuse install_bp_hardening_vec function for +other use cases. + +Code previously returning "true", now returns "0" to conform to +enable_smccc_arch_workaround_1 returning an int and surrounding code +doing a "return 0" if workaround is not needed. + +This is part of XSA-398 / CVE-2022-23960. + +Signed-off-by: Bertrand Marquis +Reviewed-by: Julien Grall +(cherry picked from commit 599616d70eb886b9ad0ef9d6b51693ce790504ba) + +diff --git a/xen/arch/arm/cpuerrata.c b/xen/arch/arm/cpuerrata.c +index 66d9a1e45cf8..9d79e3bad7e8 100644 +--- a/xen/arch/arm/cpuerrata.c ++++ b/xen/arch/arm/cpuerrata.c +@@ -103,13 +103,6 @@ install_bp_hardening_vec(const struct arm_cpu_capabilities *entry, + printk(XENLOG_INFO "CPU%u will %s on exception entry\n", + smp_processor_id(), desc); + +- /* +- * No need to install hardened vector when the processor has +- * ID_AA64PRF0_EL1.CSV2 set. +- */ +- if ( cpu_data[smp_processor_id()].pfr64.csv2 ) +- return true; +- + spin_lock(&bp_lock); + + /* +@@ -168,6 +161,13 @@ static int enable_smccc_arch_workaround_1(void *data) + if ( !entry->matches(entry) ) + return 0; + ++ /* ++ * No need to install hardened vector when the processor has ++ * ID_AA64PRF0_EL1.CSV2 set. ++ */ ++ if ( cpu_data[smp_processor_id()].pfr64.csv2 ) ++ return 0; ++ + if ( smccc_ver < SMCCC_VERSION(1, 1) ) + goto warn; + diff --git a/xsa398-4.14-3-xen-arm-Add-ECBHB-and-CLEARBHB-ID-fields.patch b/xsa398-4.14-3-xen-arm-Add-ECBHB-and-CLEARBHB-ID-fields.patch new file mode 100644 index 0000000..8b9bc8a --- /dev/null +++ b/xsa398-4.14-3-xen-arm-Add-ECBHB-and-CLEARBHB-ID-fields.patch @@ -0,0 +1,76 @@ +From ee4b53ae1b95966fd9a491668f0eca73028925e1 Mon Sep 17 00:00:00 2001 +From: Bertrand Marquis +Date: Wed, 23 Feb 2022 09:42:18 +0000 +Subject: xen/arm: Add ECBHB and CLEARBHB ID fields + +Introduce ID coprocessor register ID_AA64ISAR2_EL1. +Add definitions in cpufeature and sysregs of ECBHB field in mmfr1 and +CLEARBHB in isar2 ID coprocessor registers. + +This is part of XSA-398 / CVE-2022-23960. + +Signed-off-by: Bertrand Marquis +Acked-by: Julien Grall +(cherry picked from commit 4b68d12d98b8790d8002fcc2c25a9d713374a4d7) + +diff --git a/xen/arch/arm/cpufeature.c b/xen/arch/arm/cpufeature.c +index 44126dbf0723..13dac7ccaf94 100644 +--- a/xen/arch/arm/cpufeature.c ++++ b/xen/arch/arm/cpufeature.c +@@ -117,6 +117,7 @@ void identify_cpu(struct cpuinfo_arm *c) + + c->isa64.bits[0] = READ_SYSREG64(ID_AA64ISAR0_EL1); + c->isa64.bits[1] = READ_SYSREG64(ID_AA64ISAR1_EL1); ++ c->isa64.bits[2] = READ_SYSREG64(ID_AA64ISAR2_EL1); + #endif + + c->pfr32.bits[0] = READ_SYSREG32(ID_PFR0_EL1); +diff --git a/xen/include/asm-arm/arm64/sysregs.h b/xen/include/asm-arm/arm64/sysregs.h +index c60029d38f5b..cfd2e1d48699 100644 +--- a/xen/include/asm-arm/arm64/sysregs.h ++++ b/xen/include/asm-arm/arm64/sysregs.h +@@ -57,6 +57,10 @@ + #define ICH_AP1R2_EL2 __AP1Rx_EL2(2) + #define ICH_AP1R3_EL2 __AP1Rx_EL2(3) + ++#ifndef ID_AA64ISAR2_EL1 ++#define ID_AA64ISAR2_EL1 S3_0_C0_C6_2 ++#endif ++ + /* Access to system registers */ + + #define READ_SYSREG32(name) ((uint32_t)READ_SYSREG64(name)) +diff --git a/xen/include/asm-arm/cpufeature.h b/xen/include/asm-arm/cpufeature.h +index 016a9fe2039a..7be4ee8cf821 100644 +--- a/xen/include/asm-arm/cpufeature.h ++++ b/xen/include/asm-arm/cpufeature.h +@@ -188,12 +188,26 @@ struct cpuinfo_arm { + unsigned long lo:4; + unsigned long pan:4; + unsigned long __res1:8; +- unsigned long __res2:32; ++ unsigned long __res2:28; ++ unsigned long ecbhb:4; + }; + } mm64; + +- struct { +- uint64_t bits[2]; ++ union { ++ uint64_t bits[3]; ++ struct { ++ /* ISAR0 */ ++ unsigned long __res0:64; ++ ++ /* ISAR1 */ ++ unsigned long __res1:64; ++ ++ /* ISAR2 */ ++ unsigned long __res3:28; ++ unsigned long clearbhb:4; ++ ++ unsigned long __res4:32; ++ }; + } isa64; + + #endif diff --git a/xsa398-4.14-4-xen-arm-Add-Spectre-BHB-handling.patch b/xsa398-4.14-4-xen-arm-Add-Spectre-BHB-handling.patch new file mode 100644 index 0000000..18e01c6 --- /dev/null +++ b/xsa398-4.14-4-xen-arm-Add-Spectre-BHB-handling.patch @@ -0,0 +1,351 @@ +From fc56dd212e4574c5fd77f830d077036b330dc1b5 Mon Sep 17 00:00:00 2001 +From: Rahul Singh +Date: Mon, 14 Feb 2022 18:47:32 +0000 +Subject: xen/arm: Add Spectre BHB handling + +This commit is adding Spectre BHB handling to Xen on Arm. +The commit is introducing new alternative code to be executed during +exception entry: +- SMCC workaround 3 call +- loop workaround (with 8, 24 or 32 iterations) +- use of new clearbhb instruction + +Cpuerrata is modified by this patch to apply the required workaround for +CPU affected by Spectre BHB when CONFIG_ARM64_HARDEN_BRANCH_PREDICTOR is +enabled. + +To do this the system previously used to apply smcc workaround 1 is +reused and new alternative code to be copied in the exception handler is +introduced. + +To define the type of workaround required by a processor, 4 new cpu +capabilities are introduced (for each number of loop and for smcc +workaround 3). + +When a processor is affected, enable_spectre_bhb_workaround is called +and if the processor does not have CSV2 set to 3 or ECBHB feature (which +would mean that the processor is doing what is required in hardware), +the proper code is enabled at exception entry. + +In the case where workaround 3 is not supported by the firmware, we +enable workaround 1 when possible as it will also mitigate Spectre BHB +on systems without CSV2. + +This is part of XSA-398 / CVE-2022-23960. + +Signed-off-by: Bertrand Marquis +Signed-off-by: Rahul Singh +Acked-by: Julien Grall +(cherry picked from commit 62c91eb66a2904eefb1d1d9642e3697a1e3c3a3c) + +diff --git a/xen/arch/arm/arm64/bpi.S b/xen/arch/arm/arm64/bpi.S +index d8743d955c4a..4e6382522048 100644 +--- a/xen/arch/arm/arm64/bpi.S ++++ b/xen/arch/arm/arm64/bpi.S +@@ -58,16 +58,42 @@ ENTRY(__bp_harden_hyp_vecs_start) + .endr + ENTRY(__bp_harden_hyp_vecs_end) + +-ENTRY(__smccc_workaround_1_smc_start) ++.macro mitigate_spectre_bhb_loop count ++ENTRY(__mitigate_spectre_bhb_loop_start_\count) ++ stp x0, x1, [sp, #-16]! ++ mov x0, \count ++.Lspectre_bhb_loop\@: ++ b . + 4 ++ subs x0, x0, #1 ++ b.ne .Lspectre_bhb_loop\@ ++ sb ++ ldp x0, x1, [sp], #16 ++ENTRY(__mitigate_spectre_bhb_loop_end_\count) ++.endm ++ ++.macro smccc_workaround num smcc_id ++ENTRY(__smccc_workaround_smc_start_\num) + sub sp, sp, #(8 * 4) + stp x0, x1, [sp, #(8 * 2)] + stp x2, x3, [sp, #(8 * 0)] +- mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID ++ mov w0, \smcc_id + smc #0 + ldp x2, x3, [sp, #(8 * 0)] + ldp x0, x1, [sp, #(8 * 2)] + add sp, sp, #(8 * 4) +-ENTRY(__smccc_workaround_1_smc_end) ++ENTRY(__smccc_workaround_smc_end_\num) ++.endm ++ ++ENTRY(__mitigate_spectre_bhb_clear_insn_start) ++ clearbhb ++ isb ++ENTRY(__mitigate_spectre_bhb_clear_insn_end) ++ ++mitigate_spectre_bhb_loop 8 ++mitigate_spectre_bhb_loop 24 ++mitigate_spectre_bhb_loop 32 ++smccc_workaround 1, #ARM_SMCCC_ARCH_WORKAROUND_1_FID ++smccc_workaround 3, #ARM_SMCCC_ARCH_WORKAROUND_3_FID + + /* + * Local variables: +diff --git a/xen/arch/arm/cpuerrata.c b/xen/arch/arm/cpuerrata.c +index 9d79e3bad7e8..1c1149b2c795 100644 +--- a/xen/arch/arm/cpuerrata.c ++++ b/xen/arch/arm/cpuerrata.c +@@ -145,7 +145,16 @@ install_bp_hardening_vec(const struct arm_cpu_capabilities *entry, + return ret; + } + +-extern char __smccc_workaround_1_smc_start[], __smccc_workaround_1_smc_end[]; ++extern char __smccc_workaround_smc_start_1[], __smccc_workaround_smc_end_1[]; ++extern char __smccc_workaround_smc_start_3[], __smccc_workaround_smc_end_3[]; ++extern char __mitigate_spectre_bhb_clear_insn_start[], ++ __mitigate_spectre_bhb_clear_insn_end[]; ++extern char __mitigate_spectre_bhb_loop_start_8[], ++ __mitigate_spectre_bhb_loop_end_8[]; ++extern char __mitigate_spectre_bhb_loop_start_24[], ++ __mitigate_spectre_bhb_loop_end_24[]; ++extern char __mitigate_spectre_bhb_loop_start_32[], ++ __mitigate_spectre_bhb_loop_end_32[]; + + static int enable_smccc_arch_workaround_1(void *data) + { +@@ -177,8 +186,8 @@ static int enable_smccc_arch_workaround_1(void *data) + if ( (int)res.a0 < 0 ) + goto warn; + +- return !install_bp_hardening_vec(entry,__smccc_workaround_1_smc_start, +- __smccc_workaround_1_smc_end, ++ return !install_bp_hardening_vec(entry,__smccc_workaround_smc_start_1, ++ __smccc_workaround_smc_end_1, + "call ARM_SMCCC_ARCH_WORKAROUND_1"); + + warn: +@@ -193,6 +202,93 @@ static int enable_smccc_arch_workaround_1(void *data) + return 0; + } + ++/* ++ * Spectre BHB Mitigation ++ * ++ * CPU is either: ++ * - Having CVS2.3 so it is not affected. ++ * - Having ECBHB and is clearing the branch history buffer when an exception ++ * to a different exception level is happening so no mitigation is needed. ++ * - Mitigating using a loop on exception entry (number of loop depending on ++ * the CPU). ++ * - Mitigating using the firmware. ++ */ ++static int enable_spectre_bhb_workaround(void *data) ++{ ++ const struct arm_cpu_capabilities *entry = data; ++ ++ /* ++ * Enable callbacks are called on every CPU based on the capabilities, so ++ * double-check whether the CPU matches the entry. ++ */ ++ if ( !entry->matches(entry) ) ++ return 0; ++ ++ if ( cpu_data[smp_processor_id()].pfr64.csv2 == 3 ) ++ return 0; ++ ++ if ( cpu_data[smp_processor_id()].mm64.ecbhb ) ++ return 0; ++ ++ if ( cpu_data[smp_processor_id()].isa64.clearbhb ) ++ return !install_bp_hardening_vec(entry, ++ __mitigate_spectre_bhb_clear_insn_start, ++ __mitigate_spectre_bhb_clear_insn_end, ++ "use clearBHB instruction"); ++ ++ /* Apply solution depending on hwcaps set on arm_errata */ ++ if ( cpus_have_cap(ARM_WORKAROUND_BHB_LOOP_8) ) ++ return !install_bp_hardening_vec(entry, ++ __mitigate_spectre_bhb_loop_start_8, ++ __mitigate_spectre_bhb_loop_end_8, ++ "use 8 loops workaround"); ++ ++ if ( cpus_have_cap(ARM_WORKAROUND_BHB_LOOP_24) ) ++ return !install_bp_hardening_vec(entry, ++ __mitigate_spectre_bhb_loop_start_24, ++ __mitigate_spectre_bhb_loop_end_24, ++ "use 24 loops workaround"); ++ ++ if ( cpus_have_cap(ARM_WORKAROUND_BHB_LOOP_32) ) ++ return !install_bp_hardening_vec(entry, ++ __mitigate_spectre_bhb_loop_start_32, ++ __mitigate_spectre_bhb_loop_end_32, ++ "use 32 loops workaround"); ++ ++ if ( cpus_have_cap(ARM_WORKAROUND_BHB_SMCC_3) ) ++ { ++ struct arm_smccc_res res; ++ ++ if ( smccc_ver < SMCCC_VERSION(1, 1) ) ++ goto warn; ++ ++ arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FID, ++ ARM_SMCCC_ARCH_WORKAROUND_3_FID, &res); ++ /* The return value is in the lower 32-bits. */ ++ if ( (int)res.a0 < 0 ) ++ { ++ /* ++ * On processor affected with CSV2=0, workaround 1 will mitigate ++ * both Spectre v2 and BHB so use it when available ++ */ ++ if ( enable_smccc_arch_workaround_1(data) ) ++ return 1; ++ ++ goto warn; ++ } ++ ++ return !install_bp_hardening_vec(entry,__smccc_workaround_smc_start_3, ++ __smccc_workaround_smc_end_3, ++ "call ARM_SMCCC_ARCH_WORKAROUND_3"); ++ } ++ ++warn: ++ printk_once("**** No support for any spectre BHB workaround. ****\n" ++ "**** Please update your firmware. ****\n"); ++ ++ return 0; ++} ++ + #endif /* CONFIG_ARM64_HARDEN_BRANCH_PREDICTOR */ + + /* Hardening Branch predictor code for Arm32 */ +@@ -438,19 +534,77 @@ static const struct arm_cpu_capabilities arm_errata[] = { + }, + { + .capability = ARM_HARDEN_BRANCH_PREDICTOR, +- MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), ++ MIDR_RANGE(MIDR_CORTEX_A72, 0, 1 << MIDR_VARIANT_SHIFT), + .enable = enable_smccc_arch_workaround_1, + }, + { +- .capability = ARM_HARDEN_BRANCH_PREDICTOR, ++ .capability = ARM_WORKAROUND_BHB_SMCC_3, + MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), +- .enable = enable_smccc_arch_workaround_1, ++ .enable = enable_spectre_bhb_workaround, + }, + { +- .capability = ARM_HARDEN_BRANCH_PREDICTOR, ++ .capability = ARM_WORKAROUND_BHB_SMCC_3, + MIDR_ALL_VERSIONS(MIDR_CORTEX_A75), +- .enable = enable_smccc_arch_workaround_1, ++ .enable = enable_spectre_bhb_workaround, ++ }, ++ /* spectre BHB */ ++ { ++ .capability = ARM_WORKAROUND_BHB_LOOP_8, ++ MIDR_RANGE(MIDR_CORTEX_A72, 1 << MIDR_VARIANT_SHIFT, ++ (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)), ++ .enable = enable_spectre_bhb_workaround, ++ }, ++ { ++ .capability = ARM_WORKAROUND_BHB_LOOP_24, ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A76), ++ .enable = enable_spectre_bhb_workaround, ++ }, ++ { ++ .capability = ARM_WORKAROUND_BHB_LOOP_24, ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A77), ++ .enable = enable_spectre_bhb_workaround, ++ }, ++ { ++ .capability = ARM_WORKAROUND_BHB_LOOP_32, ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78), ++ .enable = enable_spectre_bhb_workaround, ++ }, ++ { ++ .capability = ARM_WORKAROUND_BHB_LOOP_32, ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C), ++ .enable = enable_spectre_bhb_workaround, ++ }, ++ { ++ .capability = ARM_WORKAROUND_BHB_LOOP_32, ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X1), ++ .enable = enable_spectre_bhb_workaround, ++ }, ++ { ++ .capability = ARM_WORKAROUND_BHB_LOOP_32, ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_X2), ++ .enable = enable_spectre_bhb_workaround, ++ }, ++ { ++ .capability = ARM_WORKAROUND_BHB_LOOP_32, ++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), ++ .enable = enable_spectre_bhb_workaround, + }, ++ { ++ .capability = ARM_WORKAROUND_BHB_LOOP_24, ++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1), ++ .enable = enable_spectre_bhb_workaround, ++ }, ++ { ++ .capability = ARM_WORKAROUND_BHB_LOOP_32, ++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), ++ .enable = enable_spectre_bhb_workaround, ++ }, ++ { ++ .capability = ARM_WORKAROUND_BHB_LOOP_32, ++ MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1), ++ .enable = enable_spectre_bhb_workaround, ++ }, ++ + #endif + #ifdef CONFIG_ARM32_HARDEN_BRANCH_PREDICTOR + { +diff --git a/xen/include/asm-arm/arm64/macros.h b/xen/include/asm-arm/arm64/macros.h +index f981b4f43e84..5100aed6e3ec 100644 +--- a/xen/include/asm-arm/arm64/macros.h ++++ b/xen/include/asm-arm/arm64/macros.h +@@ -21,6 +21,11 @@ + ldr \dst, [\dst, \tmp] + .endm + ++ /* clearbhb instruction clearing the branch history */ ++ .macro clearbhb ++ hint #22 ++ .endm ++ + /* + * Register aliases. + */ +diff --git a/xen/include/asm-arm/cpufeature.h b/xen/include/asm-arm/cpufeature.h +index 7be4ee8cf821..14c7f7d218e2 100644 +--- a/xen/include/asm-arm/cpufeature.h ++++ b/xen/include/asm-arm/cpufeature.h +@@ -46,8 +46,12 @@ + #define ARM_SMCCC_1_1 8 + #define ARM64_WORKAROUND_AT_SPECULATE 9 + #define ARM_WORKAROUND_858921 10 ++#define ARM_WORKAROUND_BHB_LOOP_8 11 ++#define ARM_WORKAROUND_BHB_LOOP_24 12 ++#define ARM_WORKAROUND_BHB_LOOP_32 13 ++#define ARM_WORKAROUND_BHB_SMCC_3 14 + +-#define ARM_NCAPS 11 ++#define ARM_NCAPS 15 + + #ifndef __ASSEMBLY__ + +diff --git a/xen/include/asm-arm/smccc.h b/xen/include/asm-arm/smccc.h +index 9d94beb3df2d..b3dbeecc90ad 100644 +--- a/xen/include/asm-arm/smccc.h ++++ b/xen/include/asm-arm/smccc.h +@@ -334,6 +334,12 @@ void __arm_smccc_1_0_smc(register_t a0, register_t a1, register_t a2, + ARM_SMCCC_OWNER_ARCH, \ + 0x7FFF) + ++#define ARM_SMCCC_ARCH_WORKAROUND_3_FID \ ++ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ ++ ARM_SMCCC_CONV_32, \ ++ ARM_SMCCC_OWNER_ARCH, \ ++ 0x3FFF) ++ + /* SMCCC error codes */ + #define ARM_SMCCC_NOT_REQUIRED (-2) + #define ARM_SMCCC_ERR_UNKNOWN_FUNCTION (-1) diff --git a/xsa398-4.14-5-xen-arm-Allow-to-discover-and-use-SMCCC_ARCH_WORKARO.patch b/xsa398-4.14-5-xen-arm-Allow-to-discover-and-use-SMCCC_ARCH_WORKARO.patch new file mode 100644 index 0000000..dc4db67 --- /dev/null +++ b/xsa398-4.14-5-xen-arm-Allow-to-discover-and-use-SMCCC_ARCH_WORKARO.patch @@ -0,0 +1,91 @@ +From 7cebd77c80ce87f84c63a6043a5ad7115ccab9d5 Mon Sep 17 00:00:00 2001 +From: Bertrand Marquis +Date: Thu, 17 Feb 2022 14:52:54 +0000 +Subject: xen/arm: Allow to discover and use SMCCC_ARCH_WORKAROUND_3 + +Allow guest to discover whether or not SMCCC_ARCH_WORKAROUND_3 is +supported and create a fastpath in the code to handle guests request to +do the workaround. + +The function SMCCC_ARCH_WORKAROUND_3 will be called by the guest for +flushing the branch history. So we want the handling to be as fast as +possible. + +As the mitigation is applied on every guest exit, we can check for the +call before saving all context and return very early. + +This is part of XSA-398 / CVE-2022-23960. + +Signed-off-by: Bertrand Marquis +Reviewed-by: Julien Grall +(cherry picked from commit c0a56ea0fd92ecb471936b7355ddbecbaea3707c) + +diff --git a/xen/arch/arm/arm64/entry.S b/xen/arch/arm/arm64/entry.S +index 175ea2981e72..a8c214506786 100644 +--- a/xen/arch/arm/arm64/entry.S ++++ b/xen/arch/arm/arm64/entry.S +@@ -338,16 +338,26 @@ guest_sync: + cbnz x1, guest_sync_slowpath /* should be 0 for HVC #0 */ + + /* +- * Fastest path possible for ARM_SMCCC_ARCH_WORKAROUND_1. +- * The workaround has already been applied on the exception ++ * Fastest path possible for ARM_SMCCC_ARCH_WORKAROUND_1 and ++ * ARM_SMCCC_ARCH_WORKAROUND_3. ++ * The workaround needed has already been applied on the exception + * entry from the guest, so let's quickly get back to the guest. + * + * Note that eor is used because the function identifier cannot + * be encoded as an immediate for cmp. + */ + eor w0, w0, #ARM_SMCCC_ARCH_WORKAROUND_1_FID +- cbnz w0, check_wa2 ++ cbz w0, fastpath_out_workaround + ++ /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */ ++ eor w0, w0, #(ARM_SMCCC_ARCH_WORKAROUND_1_FID ^ ARM_SMCCC_ARCH_WORKAROUND_2_FID) ++ cbz w0, wa2_ssbd ++ ++ /* Fastpath out for ARM_SMCCC_ARCH_WORKAROUND_3 */ ++ eor w0, w0, #(ARM_SMCCC_ARCH_WORKAROUND_2_FID ^ ARM_SMCCC_ARCH_WORKAROUND_3_FID) ++ cbnz w0, guest_sync_slowpath ++ ++fastpath_out_workaround: + /* + * Clobber both x0 and x1 to prevent leakage. Note that thanks + * the eor, x0 = 0. +@@ -356,10 +366,7 @@ guest_sync: + eret + sb + +-check_wa2: +- /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */ +- eor w0, w0, #(ARM_SMCCC_ARCH_WORKAROUND_1_FID ^ ARM_SMCCC_ARCH_WORKAROUND_2_FID) +- cbnz w0, guest_sync_slowpath ++wa2_ssbd: + #ifdef CONFIG_ARM_SSBD + alternative_cb arm_enable_wa2_handling + b wa2_end +diff --git a/xen/arch/arm/vsmc.c b/xen/arch/arm/vsmc.c +index a36db15fffc0..b633ff2fe897 100644 +--- a/xen/arch/arm/vsmc.c ++++ b/xen/arch/arm/vsmc.c +@@ -124,6 +124,10 @@ static bool handle_arch(struct cpu_user_regs *regs) + break; + } + break; ++ case ARM_SMCCC_ARCH_WORKAROUND_3_FID: ++ if ( cpus_have_cap(ARM_WORKAROUND_BHB_SMCC_3) ) ++ ret = 0; ++ break; + } + + set_user_reg(regs, 0, ret); +@@ -132,6 +136,7 @@ static bool handle_arch(struct cpu_user_regs *regs) + } + + case ARM_SMCCC_ARCH_WORKAROUND_1_FID: ++ case ARM_SMCCC_ARCH_WORKAROUND_3_FID: + /* No return value */ + return true; + diff --git a/xsa398-4.14-6-x86-spec-ctrl-Cease-using-thunk-lfence-on-AMD.patch b/xsa398-4.14-6-x86-spec-ctrl-Cease-using-thunk-lfence-on-AMD.patch new file mode 100644 index 0000000..e9efec5 --- /dev/null +++ b/xsa398-4.14-6-x86-spec-ctrl-Cease-using-thunk-lfence-on-AMD.patch @@ -0,0 +1,118 @@ +From ca304edd3ba8c19211107fd2e898249987557ce5 Mon Sep 17 00:00:00 2001 +From: Andrew Cooper +Date: Mon, 7 Mar 2022 16:35:52 +0000 +Subject: x86/spec-ctrl: Cease using thunk=lfence on AMD + +AMD have updated their Spectre v2 guidance, and lfence/jmp is no longer +considered safe. AMD are recommending using retpoline everywhere. + +Retpoline is incompatible with CET. All CET-capable hardware has efficient +IBRS (specifically, not something retrofitted in microcode), so use IBRS (and +STIBP for consistency sake). + +This is a logical change on AMD, but not on Intel as the default calculations +would end up with these settings anyway. Leave behind a message if IBRS is +found to be missing. + +Also update the default heuristics to never select THUNK_LFENCE. This causes +AMD CPUs to change their default to retpoline. + +Also update the printed message to include the AMD MSR_SPEC_CTRL settings, and +STIBP now that we set it for consistency sake. + +This is part of XSA-398 / CVE-2021-26401. + +Signed-off-by: Andrew Cooper +Reviewed-by: Jan Beulich +(cherry picked from commit 8d03080d2a339840d3a59e0932a94f804e45110d) + +diff --git a/docs/misc/xen-command-line.pandoc b/docs/misc/xen-command-line.pandoc +index fd8f82549152..c0bfbb7a5c27 100644 +--- a/docs/misc/xen-command-line.pandoc ++++ b/docs/misc/xen-command-line.pandoc +@@ -2140,9 +2140,9 @@ to use. + + If Xen was compiled with INDIRECT_THUNK support, `bti-thunk=` can be used to + select which of the thunks gets patched into the `__x86_indirect_thunk_%reg` +-locations. The default thunk is `retpoline` (generally preferred for Intel +-hardware), with the alternatives being `jmp` (a `jmp *%reg` gadget, minimal +-overhead), and `lfence` (an `lfence; jmp *%reg` gadget, preferred for AMD). ++locations. The default thunk is `retpoline` (generally preferred), with the ++alternatives being `jmp` (a `jmp *%reg` gadget, minimal overhead), and ++`lfence` (an `lfence; jmp *%reg` gadget). + + On hardware supporting IBRS (Indirect Branch Restricted Speculation), the + `ibrs=` option can be used to force or prevent Xen using the feature itself. +diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c +index 9301d95bd705..7ded6ecba197 100644 +--- a/xen/arch/x86/spec_ctrl.c ++++ b/xen/arch/x86/spec_ctrl.c +@@ -367,14 +367,19 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps) + "\n"); + + /* Settings for Xen's protection, irrespective of guests. */ +- printk(" Xen settings: BTI-Thunk %s, SPEC_CTRL: %s%s%s, Other:%s%s%s%s%s\n", ++ printk(" Xen settings: BTI-Thunk %s, SPEC_CTRL: %s%s%s%s, Other:%s%s%s%s%s\n", + thunk == THUNK_NONE ? "N/A" : + thunk == THUNK_RETPOLINE ? "RETPOLINE" : + thunk == THUNK_LFENCE ? "LFENCE" : + thunk == THUNK_JMP ? "JMP" : "?", +- !boot_cpu_has(X86_FEATURE_IBRSB) ? "No" : ++ (!boot_cpu_has(X86_FEATURE_IBRSB) && ++ !boot_cpu_has(X86_FEATURE_IBRS)) ? "No" : + (default_xen_spec_ctrl & SPEC_CTRL_IBRS) ? "IBRS+" : "IBRS-", +- !boot_cpu_has(X86_FEATURE_SSBD) ? "" : ++ (!boot_cpu_has(X86_FEATURE_STIBP) && ++ !boot_cpu_has(X86_FEATURE_AMD_STIBP)) ? "" : ++ (default_xen_spec_ctrl & SPEC_CTRL_STIBP) ? " STIBP+" : " STIBP-", ++ (!boot_cpu_has(X86_FEATURE_SSBD) && ++ !boot_cpu_has(X86_FEATURE_AMD_SSBD)) ? "" : + (default_xen_spec_ctrl & SPEC_CTRL_SSBD) ? " SSBD+" : " SSBD-", + !(caps & ARCH_CAPS_TSX_CTRL) ? "" : + (opt_tsx & 1) ? " TSX+" : " TSX-", +@@ -916,10 +921,23 @@ void __init init_speculation_mitigations(void) + /* + * First, disable the use of retpolines if Xen is using shadow stacks, as + * they are incompatible. ++ * ++ * In the absence of retpolines, IBRS needs to be used for speculative ++ * safety. All CET-capable hardware has efficient IBRS. + */ +- if ( cpu_has_xen_shstk && +- (opt_thunk == THUNK_DEFAULT || opt_thunk == THUNK_RETPOLINE) ) +- thunk = THUNK_JMP; ++ if ( cpu_has_xen_shstk ) ++ { ++ if ( !has_spec_ctrl ) ++ printk(XENLOG_WARNING "?!? CET active, but no MSR_SPEC_CTRL?\n"); ++ else if ( opt_ibrs == -1 ) ++ { ++ opt_ibrs = ibrs = true; ++ default_xen_spec_ctrl |= SPEC_CTRL_IBRS | SPEC_CTRL_STIBP; ++ } ++ ++ if ( opt_thunk == THUNK_DEFAULT || opt_thunk == THUNK_RETPOLINE ) ++ thunk = THUNK_JMP; ++ } + + /* + * Has the user specified any custom BTI mitigations? If so, follow their +@@ -939,16 +957,10 @@ void __init init_speculation_mitigations(void) + if ( IS_ENABLED(CONFIG_INDIRECT_THUNK) ) + { + /* +- * AMD's recommended mitigation is to set lfence as being dispatch +- * serialising, and to use IND_THUNK_LFENCE. +- */ +- if ( cpu_has_lfence_dispatch ) +- thunk = THUNK_LFENCE; +- /* +- * On Intel hardware, we'd like to use retpoline in preference to ++ * On all hardware, we'd like to use retpoline in preference to + * IBRS, but only if it is safe on this hardware. + */ +- else if ( retpoline_safe(caps) ) ++ if ( retpoline_safe(caps) ) + thunk = THUNK_RETPOLINE; + else if ( has_spec_ctrl ) + ibrs = true;