2020-03-27 16:21:54 +00:00
/*
* Copyright ( C ) 1994 Linus Torvalds
*
* Cyrix stuff , June 1998 by :
* - Rafael R . Reilova ( moved everything from head . S ) ,
* < rreilova @ ececs . uc . edu >
* - Channing Corn ( tests & fixes ) ,
* - Andrew D . Balsa ( code cleanup ) .
*/
# include <linux/init.h>
# include <linux/utsname.h>
# include <linux/cpu.h>
# include <linux/module.h>
# include <linux/nospec.h>
# include <linux/prctl.h>
2018-11-25 18:33:39 +00:00
# include <linux/sched/smt.h>
2020-03-27 16:21:54 +00:00
# include <asm/spec-ctrl.h>
# include <asm/cmdline.h>
# include <asm/bugs.h>
# include <asm/processor.h>
# include <asm/processor-flags.h>
# include <asm/fpu/internal.h>
# include <asm/msr.h>
# include <asm/paravirt.h>
# include <asm/alternative.h>
2019-02-18 21:51:43 +00:00
# include <asm/hypervisor.h>
2020-03-27 16:21:54 +00:00
# include <asm/pgtable.h>
# include <asm/cacheflush.h>
# include <asm/intel-family.h>
# include <asm/e820.h>
static void __init spectre_v2_select_mitigation ( void ) ;
static void __init ssb_select_mitigation ( void ) ;
static void __init l1tf_select_mitigation ( void ) ;
2019-02-18 21:04:08 +00:00
static void __init mds_select_mitigation ( void ) ;
2020-03-27 16:21:54 +00:00
2018-09-25 12:38:55 +00:00
/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
2020-03-27 16:21:54 +00:00
u64 x86_spec_ctrl_base ;
EXPORT_SYMBOL_GPL ( x86_spec_ctrl_base ) ;
2018-09-25 12:38:55 +00:00
static DEFINE_MUTEX ( spec_ctrl_mutex ) ;
2020-03-27 16:21:54 +00:00
/*
* The vendor and possibly platform specific bits which can be modified in
* x86_spec_ctrl_base .
*/
static u64 x86_spec_ctrl_mask = SPEC_CTRL_IBRS ;
/*
* AMD specific MSR info for Speculative Store Bypass control .
* x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu ( ) .
*/
u64 x86_amd_ls_cfg_base ;
u64 x86_amd_ls_cfg_ssbd_mask ;
2018-11-25 18:33:45 +00:00
/* Control conditional STIPB in switch_to() */
DEFINE_STATIC_KEY_FALSE ( switch_to_cond_stibp ) ;
2018-11-25 18:33:49 +00:00
/* Control conditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE ( switch_mm_cond_ibpb ) ;
/* Control unconditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE ( switch_mm_always_ibpb ) ;
2018-11-25 18:33:45 +00:00
2019-02-18 22:42:51 +00:00
/* Control MDS CPU buffer clear before returning to user space */
DEFINE_STATIC_KEY_FALSE ( mds_user_clear ) ;
2019-02-18 22:04:01 +00:00
/* Control MDS CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE ( mds_idle_clear ) ;
EXPORT_SYMBOL_GPL ( mds_idle_clear ) ;
2019-02-18 22:42:51 +00:00
2020-03-27 16:21:54 +00:00
void __init check_bugs ( void )
{
identify_boot_cpu ( ) ;
if ( ! IS_ENABLED ( CONFIG_SMP ) ) {
pr_info ( " CPU: " ) ;
print_cpu_info ( & boot_cpu_data ) ;
}
/*
* Read the SPEC_CTRL MSR to account for reserved bits which may
* have unknown values . AMD64_LS_CFG MSR is cached in the early AMD
* init code as it is not enumerated and depends on the family .
*/
if ( boot_cpu_has ( X86_FEATURE_MSR_SPEC_CTRL ) )
rdmsrl ( MSR_IA32_SPEC_CTRL , x86_spec_ctrl_base ) ;
/* Allow STIBP in MSR_SPEC_CTRL if supported */
if ( boot_cpu_has ( X86_FEATURE_STIBP ) )
x86_spec_ctrl_mask | = SPEC_CTRL_STIBP ;
/* Select the proper spectre mitigation before patching alternatives */
spectre_v2_select_mitigation ( ) ;
/*
* Select proper mitigation for any exposure to the Speculative Store
* Bypass vulnerability .
*/
ssb_select_mitigation ( ) ;
l1tf_select_mitigation ( ) ;
2019-02-18 21:04:08 +00:00
mds_select_mitigation ( ) ;
2019-04-02 15:00:14 +00:00
arch_smt_update ( ) ;
2020-03-27 16:21:54 +00:00
# ifdef CONFIG_X86_32
/*
* Check whether we are able to run this kernel safely on SMP .
*
* - i386 is no longer supported .
* - In order to run on anything without a TSC , we need to be
* compiled for a i486 .
*/
if ( boot_cpu_data . x86 < 4 )
panic ( " Kernel requires i486+ for 'invlpg' and other features " ) ;
init_utsname ( ) - > machine [ 1 ] =
' 0 ' + ( boot_cpu_data . x86 > 6 ? 6 : boot_cpu_data . x86 ) ;
alternative_instructions ( ) ;
fpu__init_check_bugs ( ) ;
# else /* CONFIG_X86_64 */
alternative_instructions ( ) ;
/*
* Make sure the first 2 MB area is not mapped by huge pages
* There are typically fixed size MTRRs in there and overlapping
* MTRRs into large pages causes slow downs .
*
* Right now we don ' t do that with gbpages because there seems
* very little benefit for that case .
*/
if ( ! direct_gbpages )
set_memory_4k ( ( unsigned long ) __va ( 0 ) , 1 ) ;
# endif
}
void
x86_virt_spec_ctrl ( u64 guest_spec_ctrl , u64 guest_virt_spec_ctrl , bool setguest )
{
u64 msrval , guestval , hostval = x86_spec_ctrl_base ;
struct thread_info * ti = current_thread_info ( ) ;
/* Is MSR_SPEC_CTRL implemented ? */
if ( static_cpu_has ( X86_FEATURE_MSR_SPEC_CTRL ) ) {
/*
* Restrict guest_spec_ctrl to supported values . Clear the
* modifiable bits in the host base value and or the
* modifiable bits from the guest value .
*/
guestval = hostval & ~ x86_spec_ctrl_mask ;
guestval | = guest_spec_ctrl & x86_spec_ctrl_mask ;
/* SSBD controlled in MSR_SPEC_CTRL */
2018-07-02 21:36:02 +00:00
if ( static_cpu_has ( X86_FEATURE_SPEC_CTRL_SSBD ) | |
static_cpu_has ( X86_FEATURE_AMD_SSBD ) )
2020-03-27 16:21:54 +00:00
hostval | = ssbd_tif_to_spec_ctrl ( ti - > flags ) ;
2018-11-25 18:33:46 +00:00
/* Conditional STIBP enabled? */
if ( static_branch_unlikely ( & switch_to_cond_stibp ) )
hostval | = stibp_tif_to_spec_ctrl ( ti - > flags ) ;
2020-03-27 16:21:54 +00:00
if ( hostval ! = guestval ) {
msrval = setguest ? guestval : hostval ;
wrmsrl ( MSR_IA32_SPEC_CTRL , msrval ) ;
}
}
/*
* If SSBD is not handled in MSR_SPEC_CTRL on AMD , update
* MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported .
*/
if ( ! static_cpu_has ( X86_FEATURE_LS_CFG_SSBD ) & &
! static_cpu_has ( X86_FEATURE_VIRT_SSBD ) )
return ;
/*
* If the host has SSBD mitigation enabled , force it in the host ' s
* virtual MSR value . If its not permanently enabled , evaluate
* current ' s TIF_SSBD thread flag .
*/
if ( static_cpu_has ( X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ) )
hostval = SPEC_CTRL_SSBD ;
else
hostval = ssbd_tif_to_spec_ctrl ( ti - > flags ) ;
/* Sanitize the guest value */
guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD ;
if ( hostval ! = guestval ) {
unsigned long tif ;
tif = setguest ? ssbd_spec_ctrl_to_tif ( guestval ) :
ssbd_spec_ctrl_to_tif ( hostval ) ;
2018-11-25 18:33:34 +00:00
speculation_ctrl_update ( tif ) ;
2020-03-27 16:21:54 +00:00
}
}
EXPORT_SYMBOL_GPL ( x86_virt_spec_ctrl ) ;
static void x86_amd_ssb_disable ( void )
{
u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask ;
if ( boot_cpu_has ( X86_FEATURE_VIRT_SSBD ) )
wrmsrl ( MSR_AMD64_VIRT_SPEC_CTRL , SPEC_CTRL_SSBD ) ;
else if ( boot_cpu_has ( X86_FEATURE_LS_CFG_SSBD ) )
wrmsrl ( MSR_AMD64_LS_CFG , msrval ) ;
}
2019-02-18 21:04:08 +00:00
# undef pr_fmt
# define pr_fmt(fmt) "MDS: " fmt
2019-04-12 21:50:57 +00:00
/* Default mitigation for MDS-affected CPUs */
2019-02-18 21:04:08 +00:00
static enum mds_mitigations mds_mitigation = MDS_MITIGATION_FULL ;
static const char * const mds_strings [ ] = {
[ MDS_MITIGATION_OFF ] = " Vulnerable " ,
2019-02-20 08:40:40 +00:00
[ MDS_MITIGATION_FULL ] = " Mitigation: Clear CPU buffers " ,
[ MDS_MITIGATION_VMWERV ] = " Vulnerable: Clear CPU buffers attempted, no microcode " ,
2019-02-18 21:04:08 +00:00
} ;
static void __init mds_select_mitigation ( void )
{
2019-04-17 21:39:02 +00:00
if ( ! boot_cpu_has_bug ( X86_BUG_MDS ) | | cpu_mitigations_off ( ) ) {
2019-02-18 21:04:08 +00:00
mds_mitigation = MDS_MITIGATION_OFF ;
return ;
}
if ( mds_mitigation = = MDS_MITIGATION_FULL ) {
2019-02-20 08:40:40 +00:00
if ( ! boot_cpu_has ( X86_FEATURE_MD_CLEAR ) )
mds_mitigation = MDS_MITIGATION_VMWERV ;
static_branch_enable ( & mds_user_clear ) ;
2019-02-18 21:04:08 +00:00
}
pr_info ( " %s \n " , mds_strings [ mds_mitigation ] ) ;
}
static int __init mds_cmdline ( char * str )
{
if ( ! boot_cpu_has_bug ( X86_BUG_MDS ) )
return 0 ;
if ( ! str )
return - EINVAL ;
if ( ! strcmp ( str , " off " ) )
mds_mitigation = MDS_MITIGATION_OFF ;
else if ( ! strcmp ( str , " full " ) )
mds_mitigation = MDS_MITIGATION_FULL ;
return 0 ;
}
early_param ( " mds " , mds_cmdline ) ;
2018-11-25 18:33:41 +00:00
# undef pr_fmt
# define pr_fmt(fmt) "Spectre V2 : " fmt
static enum spectre_v2_mitigation spectre_v2_enabled = SPECTRE_V2_NONE ;
2018-11-25 18:33:45 +00:00
static enum spectre_v2_user_mitigation spectre_v2_user = SPECTRE_V2_USER_NONE ;
2020-03-27 16:21:54 +00:00
# ifdef RETPOLINE
static bool spectre_v2_bad_module ;
bool retpoline_module_ok ( bool has_retpoline )
{
if ( spectre_v2_enabled = = SPECTRE_V2_NONE | | has_retpoline )
return true ;
pr_err ( " System may be vulnerable to spectre v2 \n " ) ;
spectre_v2_bad_module = true ;
return false ;
}
static inline const char * spectre_v2_module_string ( void )
{
return spectre_v2_bad_module ? " - vulnerable module loaded " : " " ;
}
# else
static inline const char * spectre_v2_module_string ( void ) { return " " ; }
# endif
2018-11-25 18:33:41 +00:00
static inline bool match_option ( const char * arg , int arglen , const char * opt )
{
int len = strlen ( opt ) ;
return len = = arglen & & ! strncmp ( arg , opt , len ) ;
}
/* The kernel command line selection for spectre v2 */
enum spectre_v2_mitigation_cmd {
SPECTRE_V2_CMD_NONE ,
SPECTRE_V2_CMD_AUTO ,
SPECTRE_V2_CMD_FORCE ,
SPECTRE_V2_CMD_RETPOLINE ,
SPECTRE_V2_CMD_RETPOLINE_GENERIC ,
SPECTRE_V2_CMD_RETPOLINE_AMD ,
} ;
2018-11-25 18:33:45 +00:00
enum spectre_v2_user_cmd {
SPECTRE_V2_USER_CMD_NONE ,
SPECTRE_V2_USER_CMD_AUTO ,
SPECTRE_V2_USER_CMD_FORCE ,
2018-11-25 18:33:54 +00:00
SPECTRE_V2_USER_CMD_PRCTL ,
2018-11-25 18:33:56 +00:00
SPECTRE_V2_USER_CMD_PRCTL_IBPB ,
2018-11-25 18:33:55 +00:00
SPECTRE_V2_USER_CMD_SECCOMP ,
2018-11-25 18:33:56 +00:00
SPECTRE_V2_USER_CMD_SECCOMP_IBPB ,
2018-11-25 18:33:45 +00:00
} ;
static const char * const spectre_v2_user_strings [ ] = {
[ SPECTRE_V2_USER_NONE ] = " User space: Vulnerable " ,
[ SPECTRE_V2_USER_STRICT ] = " User space: Mitigation: STIBP protection " ,
2018-11-25 18:33:54 +00:00
[ SPECTRE_V2_USER_PRCTL ] = " User space: Mitigation: STIBP via prctl " ,
2018-11-25 18:33:55 +00:00
[ SPECTRE_V2_USER_SECCOMP ] = " User space: Mitigation: STIBP via seccomp and prctl " ,
2018-11-25 18:33:45 +00:00
} ;
static const struct {
const char * option ;
enum spectre_v2_user_cmd cmd ;
bool secure ;
2019-03-30 00:47:43 +00:00
} v2_user_options [ ] __initconst = {
2018-11-25 18:33:56 +00:00
{ " auto " , SPECTRE_V2_USER_CMD_AUTO , false } ,
{ " off " , SPECTRE_V2_USER_CMD_NONE , false } ,
{ " on " , SPECTRE_V2_USER_CMD_FORCE , true } ,
{ " prctl " , SPECTRE_V2_USER_CMD_PRCTL , false } ,
{ " prctl,ibpb " , SPECTRE_V2_USER_CMD_PRCTL_IBPB , false } ,
{ " seccomp " , SPECTRE_V2_USER_CMD_SECCOMP , false } ,
{ " seccomp,ibpb " , SPECTRE_V2_USER_CMD_SECCOMP_IBPB , false } ,
2018-11-25 18:33:45 +00:00
} ;
static void __init spec_v2_user_print_cond ( const char * reason , bool secure )
{
if ( boot_cpu_has_bug ( X86_BUG_SPECTRE_V2 ) ! = secure )
pr_info ( " spectre_v2_user=%s forced on command line. \n " , reason ) ;
}
static enum spectre_v2_user_cmd __init
spectre_v2_parse_user_cmdline ( enum spectre_v2_mitigation_cmd v2_cmd )
{
char arg [ 20 ] ;
int ret , i ;
switch ( v2_cmd ) {
case SPECTRE_V2_CMD_NONE :
return SPECTRE_V2_USER_CMD_NONE ;
case SPECTRE_V2_CMD_FORCE :
return SPECTRE_V2_USER_CMD_FORCE ;
default :
break ;
}
ret = cmdline_find_option ( boot_command_line , " spectre_v2_user " ,
arg , sizeof ( arg ) ) ;
if ( ret < 0 )
return SPECTRE_V2_USER_CMD_AUTO ;
for ( i = 0 ; i < ARRAY_SIZE ( v2_user_options ) ; i + + ) {
if ( match_option ( arg , ret , v2_user_options [ i ] . option ) ) {
spec_v2_user_print_cond ( v2_user_options [ i ] . option ,
v2_user_options [ i ] . secure ) ;
return v2_user_options [ i ] . cmd ;
}
}
pr_err ( " Unknown user space protection option (%s). Switching to AUTO select \n " , arg ) ;
return SPECTRE_V2_USER_CMD_AUTO ;
}
static void __init
spectre_v2_user_select_mitigation ( enum spectre_v2_mitigation_cmd v2_cmd )
{
enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE ;
bool smt_possible = IS_ENABLED ( CONFIG_SMP ) ;
2018-11-25 18:33:56 +00:00
enum spectre_v2_user_cmd cmd ;
2018-11-25 18:33:45 +00:00
if ( ! boot_cpu_has ( X86_FEATURE_IBPB ) & & ! boot_cpu_has ( X86_FEATURE_STIBP ) )
return ;
if ( ! IS_ENABLED ( CONFIG_SMP ) )
smt_possible = false ;
2018-11-25 18:33:56 +00:00
cmd = spectre_v2_parse_user_cmdline ( v2_cmd ) ;
switch ( cmd ) {
2018-11-25 18:33:45 +00:00
case SPECTRE_V2_USER_CMD_NONE :
goto set_mode ;
case SPECTRE_V2_USER_CMD_FORCE :
mode = SPECTRE_V2_USER_STRICT ;
break ;
2018-11-25 18:33:54 +00:00
case SPECTRE_V2_USER_CMD_PRCTL :
2018-11-25 18:33:56 +00:00
case SPECTRE_V2_USER_CMD_PRCTL_IBPB :
2018-11-25 18:33:54 +00:00
mode = SPECTRE_V2_USER_PRCTL ;
break ;
2018-11-25 18:33:55 +00:00
case SPECTRE_V2_USER_CMD_AUTO :
case SPECTRE_V2_USER_CMD_SECCOMP :
2018-11-25 18:33:56 +00:00
case SPECTRE_V2_USER_CMD_SECCOMP_IBPB :
2018-11-25 18:33:55 +00:00
if ( IS_ENABLED ( CONFIG_SECCOMP ) )
mode = SPECTRE_V2_USER_SECCOMP ;
else
mode = SPECTRE_V2_USER_PRCTL ;
break ;
2018-11-25 18:33:45 +00:00
}
/* Initialize Indirect Branch Prediction Barrier */
if ( boot_cpu_has ( X86_FEATURE_IBPB ) ) {
setup_force_cpu_cap ( X86_FEATURE_USE_IBPB ) ;
2018-11-25 18:33:49 +00:00
2018-11-25 18:33:56 +00:00
switch ( cmd ) {
case SPECTRE_V2_USER_CMD_FORCE :
case SPECTRE_V2_USER_CMD_PRCTL_IBPB :
case SPECTRE_V2_USER_CMD_SECCOMP_IBPB :
2018-11-25 18:33:49 +00:00
static_branch_enable ( & switch_mm_always_ibpb ) ;
break ;
2018-11-25 18:33:56 +00:00
case SPECTRE_V2_USER_CMD_PRCTL :
case SPECTRE_V2_USER_CMD_AUTO :
case SPECTRE_V2_USER_CMD_SECCOMP :
2018-11-25 18:33:54 +00:00
static_branch_enable ( & switch_mm_cond_ibpb ) ;
break ;
2018-11-25 18:33:49 +00:00
default :
break ;
}
pr_info ( " mitigation: Enabling %s Indirect Branch Prediction Barrier \n " ,
2018-11-25 18:33:56 +00:00
static_key_enabled ( & switch_mm_always_ibpb ) ?
" always-on " : " conditional " ) ;
2018-11-25 18:33:45 +00:00
}
/* If enhanced IBRS is enabled no STIPB required */
if ( spectre_v2_enabled = = SPECTRE_V2_IBRS_ENHANCED )
return ;
2018-11-25 18:33:54 +00:00
/*
* If SMT is not possible or STIBP is not available clear the STIPB
* mode .
*/
if ( ! smt_possible | | ! boot_cpu_has ( X86_FEATURE_STIBP ) )
mode = SPECTRE_V2_USER_NONE ;
2018-11-25 18:33:45 +00:00
set_mode :
spectre_v2_user = mode ;
/* Only print the STIBP mode when SMT possible */
if ( smt_possible )
pr_info ( " %s \n " , spectre_v2_user_strings [ mode ] ) ;
}
2018-11-25 18:33:42 +00:00
static const char * const spectre_v2_strings [ ] = {
2018-11-25 18:33:41 +00:00
[ SPECTRE_V2_NONE ] = " Vulnerable " ,
[ SPECTRE_V2_RETPOLINE_MINIMAL ] = " Vulnerable: Minimal generic ASM retpoline " ,
[ SPECTRE_V2_RETPOLINE_MINIMAL_AMD ] = " Vulnerable: Minimal AMD ASM retpoline " ,
[ SPECTRE_V2_RETPOLINE_GENERIC ] = " Mitigation: Full generic retpoline " ,
[ SPECTRE_V2_RETPOLINE_AMD ] = " Mitigation: Full AMD retpoline " ,
[ SPECTRE_V2_IBRS_ENHANCED ] = " Mitigation: Enhanced IBRS " ,
} ;
static const struct {
const char * option ;
enum spectre_v2_mitigation_cmd cmd ;
bool secure ;
2019-03-30 00:47:43 +00:00
} mitigation_options [ ] __initconst = {
2018-11-25 18:33:41 +00:00
{ " off " , SPECTRE_V2_CMD_NONE , false } ,
{ " on " , SPECTRE_V2_CMD_FORCE , true } ,
{ " retpoline " , SPECTRE_V2_CMD_RETPOLINE , false } ,
{ " retpoline,amd " , SPECTRE_V2_CMD_RETPOLINE_AMD , false } ,
{ " retpoline,generic " , SPECTRE_V2_CMD_RETPOLINE_GENERIC , false } ,
{ " auto " , SPECTRE_V2_CMD_AUTO , false } ,
} ;
2018-11-25 18:33:44 +00:00
static void __init spec_v2_print_cond ( const char * reason , bool secure )
2020-03-27 16:21:54 +00:00
{
2018-11-25 18:33:44 +00:00
if ( boot_cpu_has_bug ( X86_BUG_SPECTRE_V2 ) ! = secure )
2020-03-27 16:21:54 +00:00
pr_info ( " %s selected on command line. \n " , reason ) ;
}
static inline bool retp_compiler ( void )
{
return __is_defined ( RETPOLINE ) ;
}
static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline ( void )
{
2018-11-25 18:33:41 +00:00
enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO ;
2020-03-27 16:21:54 +00:00
char arg [ 20 ] ;
int ret , i ;
2019-04-12 20:39:29 +00:00
if ( cmdline_find_option_bool ( boot_command_line , " nospectre_v2 " ) | |
cpu_mitigations_off ( ) )
2020-03-27 16:21:54 +00:00
return SPECTRE_V2_CMD_NONE ;
2018-11-25 18:33:30 +00:00
ret = cmdline_find_option ( boot_command_line , " spectre_v2 " , arg , sizeof ( arg ) ) ;
if ( ret < 0 )
return SPECTRE_V2_CMD_AUTO ;
for ( i = 0 ; i < ARRAY_SIZE ( mitigation_options ) ; i + + ) {
if ( ! match_option ( arg , ret , mitigation_options [ i ] . option ) )
continue ;
cmd = mitigation_options [ i ] . cmd ;
break ;
}
if ( i > = ARRAY_SIZE ( mitigation_options ) ) {
pr_err ( " unknown option (%s). Switching to AUTO select \n " , arg ) ;
return SPECTRE_V2_CMD_AUTO ;
2020-03-27 16:21:54 +00:00
}
if ( ( cmd = = SPECTRE_V2_CMD_RETPOLINE | |
cmd = = SPECTRE_V2_CMD_RETPOLINE_AMD | |
cmd = = SPECTRE_V2_CMD_RETPOLINE_GENERIC ) & &
! IS_ENABLED ( CONFIG_RETPOLINE ) ) {
pr_err ( " %s selected but not compiled in. Switching to AUTO select \n " , mitigation_options [ i ] . option ) ;
return SPECTRE_V2_CMD_AUTO ;
}
if ( cmd = = SPECTRE_V2_CMD_RETPOLINE_AMD & &
boot_cpu_data . x86_vendor ! = X86_VENDOR_AMD ) {
pr_err ( " retpoline,amd selected but CPU is not AMD. Switching to AUTO select \n " ) ;
return SPECTRE_V2_CMD_AUTO ;
}
2018-11-25 18:33:44 +00:00
spec_v2_print_cond ( mitigation_options [ i ] . option ,
mitigation_options [ i ] . secure ) ;
2020-03-27 16:21:54 +00:00
return cmd ;
}
static void __init spectre_v2_select_mitigation ( void )
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline ( ) ;
enum spectre_v2_mitigation mode = SPECTRE_V2_NONE ;
/*
* If the CPU is not affected and the command line mode is NONE or AUTO
* then nothing to do .
*/
if ( ! boot_cpu_has_bug ( X86_BUG_SPECTRE_V2 ) & &
( cmd = = SPECTRE_V2_CMD_NONE | | cmd = = SPECTRE_V2_CMD_AUTO ) )
return ;
switch ( cmd ) {
case SPECTRE_V2_CMD_NONE :
return ;
case SPECTRE_V2_CMD_FORCE :
case SPECTRE_V2_CMD_AUTO :
2018-08-01 18:42:25 +00:00
if ( boot_cpu_has ( X86_FEATURE_IBRS_ENHANCED ) ) {
mode = SPECTRE_V2_IBRS_ENHANCED ;
/* Force it so VMEXIT will restore correctly */
x86_spec_ctrl_base | = SPEC_CTRL_IBRS ;
wrmsrl ( MSR_IA32_SPEC_CTRL , x86_spec_ctrl_base ) ;
goto specv2_set_mode ;
}
2020-03-27 16:21:54 +00:00
if ( IS_ENABLED ( CONFIG_RETPOLINE ) )
goto retpoline_auto ;
break ;
case SPECTRE_V2_CMD_RETPOLINE_AMD :
if ( IS_ENABLED ( CONFIG_RETPOLINE ) )
goto retpoline_amd ;
break ;
case SPECTRE_V2_CMD_RETPOLINE_GENERIC :
if ( IS_ENABLED ( CONFIG_RETPOLINE ) )
goto retpoline_generic ;
break ;
case SPECTRE_V2_CMD_RETPOLINE :
if ( IS_ENABLED ( CONFIG_RETPOLINE ) )
goto retpoline_auto ;
break ;
}
pr_err ( " Spectre mitigation: kernel not compiled with retpoline; no mitigation available! " ) ;
return ;
retpoline_auto :
if ( boot_cpu_data . x86_vendor = = X86_VENDOR_AMD ) {
retpoline_amd :
if ( ! boot_cpu_has ( X86_FEATURE_LFENCE_RDTSC ) ) {
pr_err ( " Spectre mitigation: LFENCE not serializing, switching to generic retpoline \n " ) ;
goto retpoline_generic ;
}
mode = retp_compiler ( ) ? SPECTRE_V2_RETPOLINE_AMD :
SPECTRE_V2_RETPOLINE_MINIMAL_AMD ;
setup_force_cpu_cap ( X86_FEATURE_RETPOLINE_AMD ) ;
setup_force_cpu_cap ( X86_FEATURE_RETPOLINE ) ;
} else {
retpoline_generic :
mode = retp_compiler ( ) ? SPECTRE_V2_RETPOLINE_GENERIC :
SPECTRE_V2_RETPOLINE_MINIMAL ;
setup_force_cpu_cap ( X86_FEATURE_RETPOLINE ) ;
}
2018-08-01 18:42:25 +00:00
specv2_set_mode :
2020-03-27 16:21:54 +00:00
spectre_v2_enabled = mode ;
pr_info ( " %s \n " , spectre_v2_strings [ mode ] ) ;
/*
* If spectre v2 protection has been enabled , unconditionally fill
* RSB during a context switch ; this protects against two independent
* issues :
*
* - RSB underflow ( and switch to BTB ) on Skylake +
* - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
*/
setup_force_cpu_cap ( X86_FEATURE_RSB_CTXSW ) ;
pr_info ( " Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch \n " ) ;
/*
* Retpoline means the kernel is safe because it has no indirect
2018-08-01 18:42:25 +00:00
* branches . Enhanced IBRS protects firmware too , so , enable restricted
* speculation around firmware calls only when Enhanced IBRS isn ' t
* supported .
*
* Use " mode " to check Enhanced IBRS instead of boot_cpu_has ( ) , because
* the user might select retpoline on the kernel command line and if
* the CPU supports Enhanced IBRS , kernel might un - intentionally not
* enable IBRS around firmware calls .
2020-03-27 16:21:54 +00:00
*/
2018-08-01 18:42:25 +00:00
if ( boot_cpu_has ( X86_FEATURE_IBRS ) & & mode ! = SPECTRE_V2_IBRS_ENHANCED ) {
2020-03-27 16:21:54 +00:00
setup_force_cpu_cap ( X86_FEATURE_USE_IBRS_FW ) ;
pr_info ( " Enabling Restricted Speculation for firmware calls \n " ) ;
}
2018-09-25 12:38:55 +00:00
2018-11-25 18:33:45 +00:00
/* Set up IBPB and STIBP depending on the general spectre V2 command */
spectre_v2_user_select_mitigation ( cmd ) ;
2020-03-27 16:21:54 +00:00
}
2018-11-25 18:33:52 +00:00
static void update_stibp_msr ( void * __unused )
2018-11-25 18:33:41 +00:00
{
2018-11-25 18:33:52 +00:00
wrmsrl ( MSR_IA32_SPEC_CTRL , x86_spec_ctrl_base ) ;
2018-11-25 18:33:41 +00:00
}
2018-11-25 18:33:52 +00:00
/* Update x86_spec_ctrl_base in case SMT state changed. */
static void update_stibp_strict ( void )
2018-11-25 18:33:41 +00:00
{
2018-11-25 18:33:52 +00:00
u64 mask = x86_spec_ctrl_base & ~ SPEC_CTRL_STIBP ;
if ( sched_smt_active ( ) )
mask | = SPEC_CTRL_STIBP ;
if ( mask = = x86_spec_ctrl_base )
return ;
pr_info ( " Update user space SMT mitigation: STIBP %s \n " ,
mask & SPEC_CTRL_STIBP ? " always-on " : " off " ) ;
x86_spec_ctrl_base = mask ;
on_each_cpu ( update_stibp_msr , NULL , 1 ) ;
2018-11-25 18:33:41 +00:00
}
2018-11-25 18:33:54 +00:00
/* Update the static key controlling the evaluation of TIF_SPEC_IB */
static void update_indir_branch_cond ( void )
{
if ( sched_smt_active ( ) )
static_branch_enable ( & switch_to_cond_stibp ) ;
else
static_branch_disable ( & switch_to_cond_stibp ) ;
}
2019-04-02 15:00:51 +00:00
# undef pr_fmt
# define pr_fmt(fmt) fmt
2019-02-18 21:04:08 +00:00
/* Update the static key controlling the MDS CPU buffer clear in idle */
static void update_mds_branch_idle ( void )
{
/*
* Enable the idle clearing if SMT is active on CPUs which are
* affected only by MSBDS and not any other MDS variant .
*
* The other variants cannot be mitigated when SMT is enabled , so
* clearing the buffers on idle just to prevent the Store Buffer
* repartitioning leak would be a window dressing exercise .
*/
if ( ! boot_cpu_has_bug ( X86_BUG_MSBDS_ONLY ) )
return ;
if ( sched_smt_active ( ) )
static_branch_enable ( & mds_idle_clear ) ;
else
static_branch_disable ( & mds_idle_clear ) ;
}
2019-04-02 15:00:51 +00:00
# define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https: //www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
2018-11-25 18:33:41 +00:00
void arch_smt_update ( void )
{
2018-11-25 18:33:52 +00:00
/* Enhanced IBRS implies STIBP. No update required. */
if ( spectre_v2_enabled = = SPECTRE_V2_IBRS_ENHANCED )
2018-11-25 18:33:41 +00:00
return ;
mutex_lock ( & spec_ctrl_mutex ) ;
2018-11-25 18:33:52 +00:00
switch ( spectre_v2_user ) {
case SPECTRE_V2_USER_NONE :
break ;
case SPECTRE_V2_USER_STRICT :
update_stibp_strict ( ) ;
break ;
x86/speculation: Add prctl() control for indirect branch speculation
commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream.
Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
indirect branch speculation via STIBP and IBPB.
Invocations:
Check indirect branch speculation status with
- prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
Enable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
Disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
Force disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
See Documentation/userspace-api/spec_ctrl.rst.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
[bwh: Backported to 4.4:
- Renumber the PFA flags
- Drop changes in tools/include/uapi/linux/prctl.h
- Adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-11-25 18:33:53 +00:00
case SPECTRE_V2_USER_PRCTL :
2018-11-25 18:33:55 +00:00
case SPECTRE_V2_USER_SECCOMP :
2018-11-25 18:33:54 +00:00
update_indir_branch_cond ( ) ;
x86/speculation: Add prctl() control for indirect branch speculation
commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream.
Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
indirect branch speculation via STIBP and IBPB.
Invocations:
Check indirect branch speculation status with
- prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
Enable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
Disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
Force disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
See Documentation/userspace-api/spec_ctrl.rst.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
[bwh: Backported to 4.4:
- Renumber the PFA flags
- Drop changes in tools/include/uapi/linux/prctl.h
- Adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-11-25 18:33:53 +00:00
break ;
2018-11-25 18:33:41 +00:00
}
2018-11-25 18:33:52 +00:00
2019-02-20 08:40:40 +00:00
switch ( mds_mitigation ) {
case MDS_MITIGATION_FULL :
case MDS_MITIGATION_VMWERV :
2019-04-02 15:00:51 +00:00
if ( sched_smt_active ( ) & & ! boot_cpu_has ( X86_BUG_MSBDS_ONLY ) )
pr_warn_once ( MDS_MSG_SMT ) ;
2019-02-18 21:04:08 +00:00
update_mds_branch_idle ( ) ;
2019-02-20 08:40:40 +00:00
break ;
case MDS_MITIGATION_OFF :
break ;
}
2019-02-18 21:04:08 +00:00
2018-11-25 18:33:41 +00:00
mutex_unlock ( & spec_ctrl_mutex ) ;
}
2020-03-27 16:21:54 +00:00
# undef pr_fmt
# define pr_fmt(fmt) "Speculative Store Bypass: " fmt
static enum ssb_mitigation ssb_mode = SPEC_STORE_BYPASS_NONE ;
/* The kernel command line selection */
enum ssb_mitigation_cmd {
SPEC_STORE_BYPASS_CMD_NONE ,
SPEC_STORE_BYPASS_CMD_AUTO ,
SPEC_STORE_BYPASS_CMD_ON ,
SPEC_STORE_BYPASS_CMD_PRCTL ,
SPEC_STORE_BYPASS_CMD_SECCOMP ,
} ;
2018-11-25 18:33:42 +00:00
static const char * const ssb_strings [ ] = {
2020-03-27 16:21:54 +00:00
[ SPEC_STORE_BYPASS_NONE ] = " Vulnerable " ,
[ SPEC_STORE_BYPASS_DISABLE ] = " Mitigation: Speculative Store Bypass disabled " ,
[ SPEC_STORE_BYPASS_PRCTL ] = " Mitigation: Speculative Store Bypass disabled via prctl " ,
[ SPEC_STORE_BYPASS_SECCOMP ] = " Mitigation: Speculative Store Bypass disabled via prctl and seccomp " ,
} ;
static const struct {
const char * option ;
enum ssb_mitigation_cmd cmd ;
2019-03-30 00:47:43 +00:00
} ssb_mitigation_options [ ] __initconst = {
2020-03-27 16:21:54 +00:00
{ " auto " , SPEC_STORE_BYPASS_CMD_AUTO } , /* Platform decides */
{ " on " , SPEC_STORE_BYPASS_CMD_ON } , /* Disable Speculative Store Bypass */
{ " off " , SPEC_STORE_BYPASS_CMD_NONE } , /* Don't touch Speculative Store Bypass */
{ " prctl " , SPEC_STORE_BYPASS_CMD_PRCTL } , /* Disable Speculative Store Bypass via prctl */
{ " seccomp " , SPEC_STORE_BYPASS_CMD_SECCOMP } , /* Disable Speculative Store Bypass via prctl and seccomp */
} ;
static enum ssb_mitigation_cmd __init ssb_parse_cmdline ( void )
{
enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO ;
char arg [ 20 ] ;
int ret , i ;
2019-04-12 20:39:29 +00:00
if ( cmdline_find_option_bool ( boot_command_line , " nospec_store_bypass_disable " ) | |
cpu_mitigations_off ( ) ) {
2020-03-27 16:21:54 +00:00
return SPEC_STORE_BYPASS_CMD_NONE ;
} else {
ret = cmdline_find_option ( boot_command_line , " spec_store_bypass_disable " ,
arg , sizeof ( arg ) ) ;
if ( ret < 0 )
return SPEC_STORE_BYPASS_CMD_AUTO ;
for ( i = 0 ; i < ARRAY_SIZE ( ssb_mitigation_options ) ; i + + ) {
if ( ! match_option ( arg , ret , ssb_mitigation_options [ i ] . option ) )
continue ;
cmd = ssb_mitigation_options [ i ] . cmd ;
break ;
}
if ( i > = ARRAY_SIZE ( ssb_mitigation_options ) ) {
pr_err ( " unknown option (%s). Switching to AUTO select \n " , arg ) ;
return SPEC_STORE_BYPASS_CMD_AUTO ;
}
}
return cmd ;
}
static enum ssb_mitigation __init __ssb_select_mitigation ( void )
{
enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE ;
enum ssb_mitigation_cmd cmd ;
if ( ! boot_cpu_has ( X86_FEATURE_SSBD ) )
return mode ;
cmd = ssb_parse_cmdline ( ) ;
if ( ! boot_cpu_has_bug ( X86_BUG_SPEC_STORE_BYPASS ) & &
( cmd = = SPEC_STORE_BYPASS_CMD_NONE | |
cmd = = SPEC_STORE_BYPASS_CMD_AUTO ) )
return mode ;
switch ( cmd ) {
case SPEC_STORE_BYPASS_CMD_AUTO :
case SPEC_STORE_BYPASS_CMD_SECCOMP :
/*
* Choose prctl + seccomp as the default mode if seccomp is
* enabled .
*/
if ( IS_ENABLED ( CONFIG_SECCOMP ) )
mode = SPEC_STORE_BYPASS_SECCOMP ;
else
mode = SPEC_STORE_BYPASS_PRCTL ;
break ;
case SPEC_STORE_BYPASS_CMD_ON :
mode = SPEC_STORE_BYPASS_DISABLE ;
break ;
case SPEC_STORE_BYPASS_CMD_PRCTL :
mode = SPEC_STORE_BYPASS_PRCTL ;
break ;
case SPEC_STORE_BYPASS_CMD_NONE :
break ;
}
2019-06-10 17:20:10 +00:00
/*
* If SSBD is controlled by the SPEC_CTRL MSR , then set the proper
* bit in the mask to allow guests to use the mitigation even in the
* case where the host does not enable it .
*/
if ( static_cpu_has ( X86_FEATURE_SPEC_CTRL_SSBD ) | |
static_cpu_has ( X86_FEATURE_AMD_SSBD ) ) {
x86_spec_ctrl_mask | = SPEC_CTRL_SSBD ;
}
2020-03-27 16:21:54 +00:00
/*
* We have three CPU feature flags that are in play here :
* - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible .
* - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
* - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
*/
if ( mode = = SPEC_STORE_BYPASS_DISABLE ) {
setup_force_cpu_cap ( X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ) ;
/*
2018-06-01 14:59:20 +00:00
* Intel uses the SPEC CTRL MSR Bit ( 2 ) for this , while AMD may
* use a completely different MSR and bit dependent on family .
2020-03-27 16:21:54 +00:00
*/
2018-07-02 21:36:02 +00:00
if ( ! static_cpu_has ( X86_FEATURE_SPEC_CTRL_SSBD ) & &
! static_cpu_has ( X86_FEATURE_AMD_SSBD ) ) {
2018-06-01 14:59:21 +00:00
x86_amd_ssb_disable ( ) ;
2018-07-02 21:36:02 +00:00
} else {
2020-03-27 16:21:54 +00:00
x86_spec_ctrl_base | = SPEC_CTRL_SSBD ;
wrmsrl ( MSR_IA32_SPEC_CTRL , x86_spec_ctrl_base ) ;
}
}
return mode ;
}
static void ssb_select_mitigation ( void )
{
ssb_mode = __ssb_select_mitigation ( ) ;
if ( boot_cpu_has_bug ( X86_BUG_SPEC_STORE_BYPASS ) )
pr_info ( " %s \n " , ssb_strings [ ssb_mode ] ) ;
}
# undef pr_fmt
# define pr_fmt(fmt) "Speculation prctl: " fmt
2018-11-28 09:56:57 +00:00
static void task_update_spec_tif ( struct task_struct * tsk )
2020-03-27 16:21:54 +00:00
{
2018-11-28 09:56:57 +00:00
/* Force the update of the real TIF bits */
set_tsk_thread_flag ( tsk , TIF_SPEC_FORCE_UPDATE ) ;
2018-11-25 18:33:51 +00:00
/*
* Immediately update the speculation control MSRs for the current
* task , but for a non - current task delay setting the CPU
* mitigation until it is scheduled next .
*
* This can only happen for SECCOMP mitigation . For PRCTL it ' s
* always the current task .
*/
2018-11-28 09:56:57 +00:00
if ( tsk = = current )
2018-11-25 18:33:51 +00:00
speculation_ctrl_update_current ( ) ;
}
static int ssb_prctl_set ( struct task_struct * task , unsigned long ctrl )
{
2020-03-27 16:21:54 +00:00
if ( ssb_mode ! = SPEC_STORE_BYPASS_PRCTL & &
ssb_mode ! = SPEC_STORE_BYPASS_SECCOMP )
return - ENXIO ;
switch ( ctrl ) {
case PR_SPEC_ENABLE :
/* If speculation is force disabled, enable is not allowed */
if ( task_spec_ssb_force_disable ( task ) )
return - EPERM ;
task_clear_spec_ssb_disable ( task ) ;
2018-11-28 09:56:57 +00:00
task_update_spec_tif ( task ) ;
2020-03-27 16:21:54 +00:00
break ;
case PR_SPEC_DISABLE :
task_set_spec_ssb_disable ( task ) ;
2018-11-28 09:56:57 +00:00
task_update_spec_tif ( task ) ;
2020-03-27 16:21:54 +00:00
break ;
case PR_SPEC_FORCE_DISABLE :
task_set_spec_ssb_disable ( task ) ;
task_set_spec_ssb_force_disable ( task ) ;
2018-11-28 09:56:57 +00:00
task_update_spec_tif ( task ) ;
2020-03-27 16:21:54 +00:00
break ;
default :
return - ERANGE ;
}
return 0 ;
}
x86/speculation: Add prctl() control for indirect branch speculation
commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream.
Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
indirect branch speculation via STIBP and IBPB.
Invocations:
Check indirect branch speculation status with
- prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
Enable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
Disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
Force disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
See Documentation/userspace-api/spec_ctrl.rst.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
[bwh: Backported to 4.4:
- Renumber the PFA flags
- Drop changes in tools/include/uapi/linux/prctl.h
- Adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-11-25 18:33:53 +00:00
static int ib_prctl_set ( struct task_struct * task , unsigned long ctrl )
{
switch ( ctrl ) {
case PR_SPEC_ENABLE :
if ( spectre_v2_user = = SPECTRE_V2_USER_NONE )
return 0 ;
/*
* Indirect branch speculation is always disabled in strict
* mode .
*/
if ( spectre_v2_user = = SPECTRE_V2_USER_STRICT )
return - EPERM ;
task_clear_spec_ib_disable ( task ) ;
task_update_spec_tif ( task ) ;
break ;
case PR_SPEC_DISABLE :
case PR_SPEC_FORCE_DISABLE :
/*
* Indirect branch speculation is always allowed when
* mitigation is force disabled .
*/
if ( spectre_v2_user = = SPECTRE_V2_USER_NONE )
return - EPERM ;
if ( spectre_v2_user = = SPECTRE_V2_USER_STRICT )
return 0 ;
task_set_spec_ib_disable ( task ) ;
if ( ctrl = = PR_SPEC_FORCE_DISABLE )
task_set_spec_ib_force_disable ( task ) ;
task_update_spec_tif ( task ) ;
break ;
default :
return - ERANGE ;
}
return 0 ;
}
2020-03-27 16:21:54 +00:00
int arch_prctl_spec_ctrl_set ( struct task_struct * task , unsigned long which ,
unsigned long ctrl )
{
switch ( which ) {
case PR_SPEC_STORE_BYPASS :
return ssb_prctl_set ( task , ctrl ) ;
x86/speculation: Add prctl() control for indirect branch speculation
commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream.
Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
indirect branch speculation via STIBP and IBPB.
Invocations:
Check indirect branch speculation status with
- prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
Enable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
Disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
Force disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
See Documentation/userspace-api/spec_ctrl.rst.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
[bwh: Backported to 4.4:
- Renumber the PFA flags
- Drop changes in tools/include/uapi/linux/prctl.h
- Adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-11-25 18:33:53 +00:00
case PR_SPEC_INDIRECT_BRANCH :
return ib_prctl_set ( task , ctrl ) ;
2020-03-27 16:21:54 +00:00
default :
return - ENODEV ;
}
}
# ifdef CONFIG_SECCOMP
void arch_seccomp_spec_mitigate ( struct task_struct * task )
{
if ( ssb_mode = = SPEC_STORE_BYPASS_SECCOMP )
ssb_prctl_set ( task , PR_SPEC_FORCE_DISABLE ) ;
2018-11-25 18:33:55 +00:00
if ( spectre_v2_user = = SPECTRE_V2_USER_SECCOMP )
ib_prctl_set ( task , PR_SPEC_FORCE_DISABLE ) ;
2020-03-27 16:21:54 +00:00
}
# endif
static int ssb_prctl_get ( struct task_struct * task )
{
switch ( ssb_mode ) {
case SPEC_STORE_BYPASS_DISABLE :
return PR_SPEC_DISABLE ;
case SPEC_STORE_BYPASS_SECCOMP :
case SPEC_STORE_BYPASS_PRCTL :
if ( task_spec_ssb_force_disable ( task ) )
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE ;
if ( task_spec_ssb_disable ( task ) )
return PR_SPEC_PRCTL | PR_SPEC_DISABLE ;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE ;
default :
if ( boot_cpu_has_bug ( X86_BUG_SPEC_STORE_BYPASS ) )
return PR_SPEC_ENABLE ;
return PR_SPEC_NOT_AFFECTED ;
}
}
x86/speculation: Add prctl() control for indirect branch speculation
commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream.
Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
indirect branch speculation via STIBP and IBPB.
Invocations:
Check indirect branch speculation status with
- prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
Enable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
Disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
Force disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
See Documentation/userspace-api/spec_ctrl.rst.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
[bwh: Backported to 4.4:
- Renumber the PFA flags
- Drop changes in tools/include/uapi/linux/prctl.h
- Adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-11-25 18:33:53 +00:00
static int ib_prctl_get ( struct task_struct * task )
{
if ( ! boot_cpu_has_bug ( X86_BUG_SPECTRE_V2 ) )
return PR_SPEC_NOT_AFFECTED ;
switch ( spectre_v2_user ) {
case SPECTRE_V2_USER_NONE :
return PR_SPEC_ENABLE ;
case SPECTRE_V2_USER_PRCTL :
2018-11-25 18:33:55 +00:00
case SPECTRE_V2_USER_SECCOMP :
x86/speculation: Add prctl() control for indirect branch speculation
commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream.
Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
indirect branch speculation via STIBP and IBPB.
Invocations:
Check indirect branch speculation status with
- prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
Enable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
Disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
Force disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
See Documentation/userspace-api/spec_ctrl.rst.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
[bwh: Backported to 4.4:
- Renumber the PFA flags
- Drop changes in tools/include/uapi/linux/prctl.h
- Adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-11-25 18:33:53 +00:00
if ( task_spec_ib_force_disable ( task ) )
return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE ;
if ( task_spec_ib_disable ( task ) )
return PR_SPEC_PRCTL | PR_SPEC_DISABLE ;
return PR_SPEC_PRCTL | PR_SPEC_ENABLE ;
case SPECTRE_V2_USER_STRICT :
return PR_SPEC_DISABLE ;
default :
return PR_SPEC_NOT_AFFECTED ;
}
}
2020-03-27 16:21:54 +00:00
int arch_prctl_spec_ctrl_get ( struct task_struct * task , unsigned long which )
{
switch ( which ) {
case PR_SPEC_STORE_BYPASS :
return ssb_prctl_get ( task ) ;
x86/speculation: Add prctl() control for indirect branch speculation
commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream.
Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
indirect branch speculation via STIBP and IBPB.
Invocations:
Check indirect branch speculation status with
- prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
Enable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
Disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
Force disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
See Documentation/userspace-api/spec_ctrl.rst.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
[bwh: Backported to 4.4:
- Renumber the PFA flags
- Drop changes in tools/include/uapi/linux/prctl.h
- Adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-11-25 18:33:53 +00:00
case PR_SPEC_INDIRECT_BRANCH :
return ib_prctl_get ( task ) ;
2020-03-27 16:21:54 +00:00
default :
return - ENODEV ;
}
}
void x86_spec_ctrl_setup_ap ( void )
{
if ( boot_cpu_has ( X86_FEATURE_MSR_SPEC_CTRL ) )
wrmsrl ( MSR_IA32_SPEC_CTRL , x86_spec_ctrl_base ) ;
if ( ssb_mode = = SPEC_STORE_BYPASS_DISABLE )
x86_amd_ssb_disable ( ) ;
}
# undef pr_fmt
# define pr_fmt(fmt) "L1TF: " fmt
/*
* These CPUs all support 44 bits physical address space internally in the
* cache but CPUID can report a smaller number of physical address bits .
*
* The L1TF mitigation uses the top most address bit for the inversion of
* non present PTEs . When the installed memory reaches into the top most
* address bit due to memory holes , which has been observed on machines
* which report 36 bits physical address bits and have 32 G RAM installed ,
* then the mitigation range check in l1tf_select_mitigation ( ) triggers .
* This is a false positive because the mitigation is still possible due to
* the fact that the cache uses 44 bit internally . Use the cache bits
* instead of the reported physical bits and adjust them on the affected
* machines to 44 bit if the reported bits are less than 44.
*/
static void override_cache_bits ( struct cpuinfo_x86 * c )
{
if ( c - > x86 ! = 6 )
return ;
switch ( c - > x86_model ) {
case INTEL_FAM6_NEHALEM :
case INTEL_FAM6_WESTMERE :
case INTEL_FAM6_SANDYBRIDGE :
case INTEL_FAM6_IVYBRIDGE :
case INTEL_FAM6_HASWELL_CORE :
case INTEL_FAM6_HASWELL_ULT :
case INTEL_FAM6_HASWELL_GT3E :
case INTEL_FAM6_BROADWELL_CORE :
case INTEL_FAM6_BROADWELL_GT3E :
case INTEL_FAM6_SKYLAKE_MOBILE :
case INTEL_FAM6_SKYLAKE_DESKTOP :
case INTEL_FAM6_KABYLAKE_MOBILE :
case INTEL_FAM6_KABYLAKE_DESKTOP :
if ( c - > x86_cache_bits < 44 )
c - > x86_cache_bits = 44 ;
break ;
}
}
static void __init l1tf_select_mitigation ( void )
{
u64 half_pa ;
if ( ! boot_cpu_has_bug ( X86_BUG_L1TF ) )
return ;
override_cache_bits ( & boot_cpu_data ) ;
# if CONFIG_PGTABLE_LEVELS == 2
pr_warn ( " Kernel not compiled for PAE. No mitigation for L1TF \n " ) ;
return ;
# endif
half_pa = ( u64 ) l1tf_pfn_limit ( ) < < PAGE_SHIFT ;
if ( e820_any_mapped ( half_pa , ULLONG_MAX - half_pa , E820_RAM ) ) {
pr_warn ( " System has more than MAX_PA/2 memory. L1TF mitigation not effective. \n " ) ;
pr_info ( " You may make it effective by booting the kernel with mem=%llu parameter. \n " ,
half_pa ) ;
pr_info ( " However, doing so will make a part of your RAM unusable. \n " ) ;
2019-02-19 10:10:49 +00:00
pr_info ( " Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide. \n " ) ;
2020-03-27 16:21:54 +00:00
return ;
}
setup_force_cpu_cap ( X86_FEATURE_L1TF_PTEINV ) ;
}
# undef pr_fmt
2019-04-02 15:00:51 +00:00
# define pr_fmt(fmt) fmt
2020-03-27 16:21:54 +00:00
# ifdef CONFIG_SYSFS
2019-02-18 21:51:43 +00:00
static ssize_t mds_show_state ( char * buf )
{
# ifdef CONFIG_HYPERVISOR_GUEST
if ( x86_hyper ) {
return sprintf ( buf , " %s; SMT Host state unknown \n " ,
mds_strings [ mds_mitigation ] ) ;
}
# endif
if ( boot_cpu_has ( X86_BUG_MSBDS_ONLY ) ) {
return sprintf ( buf , " %s; SMT %s \n " , mds_strings [ mds_mitigation ] ,
2019-04-12 21:50:58 +00:00
( mds_mitigation = = MDS_MITIGATION_OFF ? " vulnerable " :
sched_smt_active ( ) ? " mitigated " : " disabled " ) ) ;
2019-02-18 21:51:43 +00:00
}
return sprintf ( buf , " %s; SMT %s \n " , mds_strings [ mds_mitigation ] ,
sched_smt_active ( ) ? " vulnerable " : " disabled " ) ;
}
2018-11-25 18:33:32 +00:00
static char * stibp_state ( void )
{
2018-11-25 18:33:33 +00:00
if ( spectre_v2_enabled = = SPECTRE_V2_IBRS_ENHANCED )
return " " ;
2018-11-25 18:33:45 +00:00
switch ( spectre_v2_user ) {
case SPECTRE_V2_USER_NONE :
return " , STIBP: disabled " ;
case SPECTRE_V2_USER_STRICT :
return " , STIBP: forced " ;
x86/speculation: Add prctl() control for indirect branch speculation
commit 9137bb27e60e554dab694eafa4cca241fa3a694f upstream.
Add the PR_SPEC_INDIRECT_BRANCH option for the PR_GET_SPECULATION_CTRL and
PR_SET_SPECULATION_CTRL prctls to allow fine grained per task control of
indirect branch speculation via STIBP and IBPB.
Invocations:
Check indirect branch speculation status with
- prctl(PR_GET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, 0, 0, 0);
Enable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_ENABLE, 0, 0);
Disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_DISABLE, 0, 0);
Force disable indirect branch speculation with
- prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, PR_SPEC_FORCE_DISABLE, 0, 0);
See Documentation/userspace-api/spec_ctrl.rst.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: David Woodhouse <dwmw@amazon.co.uk>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Casey Schaufler <casey.schaufler@intel.com>
Cc: Asit Mallick <asit.k.mallick@intel.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Jon Masters <jcm@redhat.com>
Cc: Waiman Long <longman9394@gmail.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: Dave Stewart <david.c.stewart@intel.com>
Cc: Kees Cook <keescook@chromium.org>
Link: https://lkml.kernel.org/r/20181125185005.866780996@linutronix.de
[bwh: Backported to 4.4:
- Renumber the PFA flags
- Drop changes in tools/include/uapi/linux/prctl.h
- Adjust filename]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2018-11-25 18:33:53 +00:00
case SPECTRE_V2_USER_PRCTL :
2018-11-25 18:33:55 +00:00
case SPECTRE_V2_USER_SECCOMP :
2018-11-25 18:33:54 +00:00
if ( static_key_enabled ( & switch_to_cond_stibp ) )
return " , STIBP: conditional " ;
2018-11-25 18:33:45 +00:00
}
return " " ;
2018-11-25 18:33:32 +00:00
}
static char * ibpb_state ( void )
{
2018-11-25 18:33:49 +00:00
if ( boot_cpu_has ( X86_FEATURE_IBPB ) ) {
2018-11-25 18:33:54 +00:00
if ( static_key_enabled ( & switch_mm_always_ibpb ) )
2018-11-25 18:33:49 +00:00
return " , IBPB: always-on " ;
2018-11-25 18:33:54 +00:00
if ( static_key_enabled ( & switch_mm_cond_ibpb ) )
return " , IBPB: conditional " ;
return " , IBPB: disabled " ;
2018-11-25 18:33:49 +00:00
}
return " " ;
2018-11-25 18:33:32 +00:00
}
2020-03-27 16:21:54 +00:00
static ssize_t cpu_show_common ( struct device * dev , struct device_attribute * attr ,
char * buf , unsigned int bug )
{
if ( ! boot_cpu_has_bug ( bug ) )
return sprintf ( buf , " Not affected \n " ) ;
switch ( bug ) {
case X86_BUG_CPU_MELTDOWN :
if ( boot_cpu_has ( X86_FEATURE_KAISER ) )
return sprintf ( buf , " Mitigation: PTI \n " ) ;
break ;
case X86_BUG_SPECTRE_V1 :
return sprintf ( buf , " Mitigation: __user pointer sanitization \n " ) ;
case X86_BUG_SPECTRE_V2 :
2018-11-25 18:33:31 +00:00
return sprintf ( buf , " %s%s%s%s%s%s \n " , spectre_v2_strings [ spectre_v2_enabled ] ,
2018-11-25 18:33:32 +00:00
ibpb_state ( ) ,
2020-03-27 16:21:54 +00:00
boot_cpu_has ( X86_FEATURE_USE_IBRS_FW ) ? " , IBRS_FW " : " " ,
2018-11-25 18:33:32 +00:00
stibp_state ( ) ,
2018-09-25 12:39:28 +00:00
boot_cpu_has ( X86_FEATURE_RSB_CTXSW ) ? " , RSB filling " : " " ,
2020-03-27 16:21:54 +00:00
spectre_v2_module_string ( ) ) ;
case X86_BUG_SPEC_STORE_BYPASS :
return sprintf ( buf , " %s \n " , ssb_strings [ ssb_mode ] ) ;
case X86_BUG_L1TF :
if ( boot_cpu_has ( X86_FEATURE_L1TF_PTEINV ) )
2019-05-14 12:46:46 +00:00
return sprintf ( buf , " Mitigation: PTE Inversion \n " ) ;
2020-03-27 16:21:54 +00:00
break ;
2019-02-18 21:51:43 +00:00
case X86_BUG_MDS :
return mds_show_state ( buf ) ;
2020-03-27 16:21:54 +00:00
default :
break ;
}
return sprintf ( buf , " Vulnerable \n " ) ;
}
ssize_t cpu_show_meltdown ( struct device * dev , struct device_attribute * attr , char * buf )
{
return cpu_show_common ( dev , attr , buf , X86_BUG_CPU_MELTDOWN ) ;
}
ssize_t cpu_show_spectre_v1 ( struct device * dev , struct device_attribute * attr , char * buf )
{
return cpu_show_common ( dev , attr , buf , X86_BUG_SPECTRE_V1 ) ;
}
ssize_t cpu_show_spectre_v2 ( struct device * dev , struct device_attribute * attr , char * buf )
{
return cpu_show_common ( dev , attr , buf , X86_BUG_SPECTRE_V2 ) ;
}
ssize_t cpu_show_spec_store_bypass ( struct device * dev , struct device_attribute * attr , char * buf )
{
return cpu_show_common ( dev , attr , buf , X86_BUG_SPEC_STORE_BYPASS ) ;
}
ssize_t cpu_show_l1tf ( struct device * dev , struct device_attribute * attr , char * buf )
{
return cpu_show_common ( dev , attr , buf , X86_BUG_L1TF ) ;
}
2019-02-18 21:51:43 +00:00
ssize_t cpu_show_mds ( struct device * dev , struct device_attribute * attr , char * buf )
{
return cpu_show_common ( dev , attr , buf , X86_BUG_MDS ) ;
}
2020-03-27 16:21:54 +00:00
# endif