summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '0045-x86-spec-ctrl-Mitigate-Cross-Thread-Return-Address-P.patch')
-rw-r--r--0045-x86-spec-ctrl-Mitigate-Cross-Thread-Return-Address-P.patch120
1 files changed, 120 insertions, 0 deletions
diff --git a/0045-x86-spec-ctrl-Mitigate-Cross-Thread-Return-Address-P.patch b/0045-x86-spec-ctrl-Mitigate-Cross-Thread-Return-Address-P.patch
new file mode 100644
index 0000000..1720bdd
--- /dev/null
+++ b/0045-x86-spec-ctrl-Mitigate-Cross-Thread-Return-Address-P.patch
@@ -0,0 +1,120 @@
+From 3685e754e6017c616769b28133286d06bf07b613 Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Thu, 8 Sep 2022 21:27:58 +0100
+Subject: [PATCH 45/89] x86/spec-ctrl: Mitigate Cross-Thread Return Address
+ Predictions
+
+This is XSA-426 / CVE-2022-27672
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+(cherry picked from commit 63305e5392ec2d17b85e7996a97462744425db80)
+---
+ docs/misc/xen-command-line.pandoc | 2 +-
+ xen/arch/x86/include/asm/cpufeatures.h | 3 ++-
+ xen/arch/x86/include/asm/spec_ctrl.h | 15 +++++++++++++
+ xen/arch/x86/spec_ctrl.c | 31 +++++++++++++++++++++++---
+ 4 files changed, 46 insertions(+), 5 deletions(-)
+
+diff --git a/docs/misc/xen-command-line.pandoc b/docs/misc/xen-command-line.pandoc
+index 424b12cfb2..e7fe8b0cc9 100644
+--- a/docs/misc/xen-command-line.pandoc
++++ b/docs/misc/xen-command-line.pandoc
+@@ -2343,7 +2343,7 @@ guests to use.
+ on entry and exit. These blocks are necessary to virtualise support for
+ guests and if disabled, guests will be unable to use IBRS/STIBP/SSBD/etc.
+ * `rsb=` offers control over whether to overwrite the Return Stack Buffer /
+- Return Address Stack on entry to Xen.
++ Return Address Stack on entry to Xen and on idle.
+ * `md-clear=` offers control over whether to use VERW to flush
+ microarchitectural buffers on idle and exit from Xen. *Note: For
+ compatibility with development versions of this fix, `mds=` is also accepted
+diff --git a/xen/arch/x86/include/asm/cpufeatures.h b/xen/arch/x86/include/asm/cpufeatures.h
+index 865f110986..da0593de85 100644
+--- a/xen/arch/x86/include/asm/cpufeatures.h
++++ b/xen/arch/x86/include/asm/cpufeatures.h
+@@ -35,7 +35,8 @@ XEN_CPUFEATURE(SC_RSB_HVM, X86_SYNTH(19)) /* RSB overwrite needed for HVM
+ XEN_CPUFEATURE(XEN_SELFSNOOP, X86_SYNTH(20)) /* SELFSNOOP gets used by Xen itself */
+ XEN_CPUFEATURE(SC_MSR_IDLE, X86_SYNTH(21)) /* Clear MSR_SPEC_CTRL on idle */
+ XEN_CPUFEATURE(XEN_LBR, X86_SYNTH(22)) /* Xen uses MSR_DEBUGCTL.LBR */
+-/* Bits 23,24 unused. */
++/* Bits 23 unused. */
++XEN_CPUFEATURE(SC_RSB_IDLE, X86_SYNTH(24)) /* RSB overwrite needed for idle. */
+ XEN_CPUFEATURE(SC_VERW_IDLE, X86_SYNTH(25)) /* VERW used by Xen for idle */
+ XEN_CPUFEATURE(XEN_SHSTK, X86_SYNTH(26)) /* Xen uses CET Shadow Stacks */
+ XEN_CPUFEATURE(XEN_IBT, X86_SYNTH(27)) /* Xen uses CET Indirect Branch Tracking */
+diff --git a/xen/arch/x86/include/asm/spec_ctrl.h b/xen/arch/x86/include/asm/spec_ctrl.h
+index 6a77c39378..391973ef6a 100644
+--- a/xen/arch/x86/include/asm/spec_ctrl.h
++++ b/xen/arch/x86/include/asm/spec_ctrl.h
+@@ -159,6 +159,21 @@ static always_inline void spec_ctrl_enter_idle(struct cpu_info *info)
+ */
+ alternative_input("", "verw %[sel]", X86_FEATURE_SC_VERW_IDLE,
+ [sel] "m" (info->verw_sel));
++
++ /*
++ * Cross-Thread Return Address Predictions:
++ *
++ * On vulnerable systems, the return predictions (RSB/RAS) are statically
++ * partitioned between active threads. When entering idle, our entries
++ * are re-partitioned to allow the other threads to use them.
++ *
++ * In some cases, we might still have guest entries in the RAS, so flush
++ * them before injecting them sideways to our sibling thread.
++ *
++ * (ab)use alternative_input() to specify clobbers.
++ */
++ alternative_input("", "DO_OVERWRITE_RSB", X86_FEATURE_SC_RSB_IDLE,
++ : "rax", "rcx");
+ }
+
+ /* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */
+diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
+index a320b81947..e80e2a5ed1 100644
+--- a/xen/arch/x86/spec_ctrl.c
++++ b/xen/arch/x86/spec_ctrl.c
+@@ -1327,13 +1327,38 @@ void __init init_speculation_mitigations(void)
+ * 3) Some CPUs have RSBs which are not full width, which allow the
+ * attacker's entries to alias Xen addresses.
+ *
++ * 4) Some CPUs have RSBs which are re-partitioned based on thread
++ * idleness, which allows an attacker to inject entries into the other
++ * thread. We still active the optimisation in this case, and mitigate
++ * in the idle path which has lower overhead.
++ *
+ * It is safe to turn off RSB stuffing when Xen is using SMEP itself, and
+ * 32bit PV guests are disabled, and when the RSB is full width.
+ */
+ BUILD_BUG_ON(RO_MPT_VIRT_START != PML4_ADDR(256));
+- if ( opt_rsb_pv == -1 && boot_cpu_has(X86_FEATURE_XEN_SMEP) &&
+- !opt_pv32 && rsb_is_full_width() )
+- opt_rsb_pv = 0;
++ if ( opt_rsb_pv == -1 )
++ {
++ opt_rsb_pv = (opt_pv32 || !boot_cpu_has(X86_FEATURE_XEN_SMEP) ||
++ !rsb_is_full_width());
++
++ /*
++ * Cross-Thread Return Address Predictions.
++ *
++ * Vulnerable systems are Zen1/Zen2 uarch, which is AMD Fam17 / Hygon
++ * Fam18, when SMT is active.
++ *
++ * To mitigate, we must flush the RSB/RAS/RAP once between entering
++ * Xen and going idle.
++ *
++ * Most cases flush on entry to Xen anyway. The one case where we
++ * don't is when using the SMEP optimisation for PV guests. Flushing
++ * before going idle is less overhead than flushing on PV entry.
++ */
++ if ( !opt_rsb_pv && hw_smt_enabled &&
++ (boot_cpu_data.x86_vendor & (X86_VENDOR_AMD|X86_VENDOR_HYGON)) &&
++ (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) )
++ setup_force_cpu_cap(X86_FEATURE_SC_RSB_IDLE);
++ }
+
+ if ( opt_rsb_pv )
+ {
+--
+2.40.0
+