x86/speculation: Fill RSB on vmexit for IBRS

commit 9756bba28470722dacb79ffce554336dd1f6a6cd upstream.

Prevent RSB underflow/poisoning attacks with RSB.  While at it, add a
bunch of comments to attempt to document the current state of tribal
knowledge about RSB attacks and what exactly is being mitigated.

Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
[ bp: Adjust for the fact that vmexit is in inline assembly ]
Signed-off-by: Suraj Jitindar Singh <surajjs@amazon.com>
Signed-off-by: Suleiman Souhlal <suleiman@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Josh Poimboeuf 2022-11-17 18:19:45 +09:00 committed by Greg Kroah-Hartman
parent 6451e3ce91
commit f744b88dfc
4 changed files with 62 additions and 9 deletions

View File

@ -203,7 +203,7 @@
#define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */ #define X86_FEATURE_SME ( 7*32+10) /* AMD Secure Memory Encryption */
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */ #define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */ #define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
/* FREE! ( 7*32+13) */ #define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */ #define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */ #define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */ #define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */

View File

@ -279,7 +279,7 @@ static __always_inline void vmexit_fill_RSB(void)
asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
ALTERNATIVE("jmp 910f", ALTERNATIVE("jmp 910f",
__stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)), __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
X86_FEATURE_RETPOLINE) X86_FEATURE_RSB_VMEXIT)
"910:" "910:"
: "=r" (loops), ASM_CALL_CONSTRAINT : "=r" (loops), ASM_CALL_CONSTRAINT
: : "memory" ); : : "memory" );

View File

@ -1278,16 +1278,69 @@ static void __init spectre_v2_select_mitigation(void)
pr_info("%s\n", spectre_v2_strings[mode]); pr_info("%s\n", spectre_v2_strings[mode]);
/* /*
* If spectre v2 protection has been enabled, unconditionally fill * If Spectre v2 protection has been enabled, fill the RSB during a
* RSB during a context switch; this protects against two independent * context switch. In general there are two types of RSB attacks
* issues: * across context switches, for which the CALLs/RETs may be unbalanced.
* *
* - RSB underflow (and switch to BTB) on Skylake+ * 1) RSB underflow
* - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs *
* Some Intel parts have "bottomless RSB". When the RSB is empty,
* speculated return targets may come from the branch predictor,
* which could have a user-poisoned BTB or BHB entry.
*
* AMD has it even worse: *all* returns are speculated from the BTB,
* regardless of the state of the RSB.
*
* When IBRS or eIBRS is enabled, the "user -> kernel" attack
* scenario is mitigated by the IBRS branch prediction isolation
* properties, so the RSB buffer filling wouldn't be necessary to
* protect against this type of attack.
*
* The "user -> user" attack scenario is mitigated by RSB filling.
*
* 2) Poisoned RSB entry
*
* If the 'next' in-kernel return stack is shorter than 'prev',
* 'next' could be tricked into speculating with a user-poisoned RSB
* entry.
*
* The "user -> kernel" attack scenario is mitigated by SMEP and
* eIBRS.
*
* The "user -> user" scenario, also known as SpectreBHB, requires
* RSB clearing.
*
* So to mitigate all cases, unconditionally fill RSB on context
* switches.
*
* FIXME: Is this pointless for retbleed-affected AMD?
*/ */
setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
/*
* Similar to context switches, there are two types of RSB attacks
* after vmexit:
*
* 1) RSB underflow
*
* 2) Poisoned RSB entry
*
* When retpoline is enabled, both are mitigated by filling/clearing
* the RSB.
*
* When IBRS is enabled, while #1 would be mitigated by the IBRS branch
* prediction isolation protections, RSB still needs to be cleared
* because of #2. Note that SMEP provides no protection here, unlike
* user-space-poisoned RSB entries.
*
* eIBRS, on the other hand, has RSB-poisoning protections, so it
* doesn't need RSB clearing after vmexit.
*/
if (boot_cpu_has(X86_FEATURE_RETPOLINE) ||
boot_cpu_has(X86_FEATURE_KERNEL_IBRS))
setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
/* /*
* Retpoline protects the kernel, but doesn't protect firmware. IBRS * Retpoline protects the kernel, but doesn't protect firmware. IBRS
* and Enhanced IBRS protect firmware too, so enable IBRS around * and Enhanced IBRS protect firmware too, so enable IBRS around

View File

@ -11018,8 +11018,8 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* IMPORTANT: RSB filling and SPEC_CTRL handling must be done before * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
* the first unbalanced RET after vmexit! * the first unbalanced RET after vmexit!
* *
* For retpoline, RSB filling is needed to prevent poisoned RSB entries * For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB
* and (in some cases) RSB underflow. * entries and (in some cases) RSB underflow.
* *
* eIBRS has its own protection against poisoned RSB, so it doesn't * eIBRS has its own protection against poisoned RSB, so it doesn't
* need the RSB filling sequence. But it does need to be enabled * need the RSB filling sequence. But it does need to be enabled