powerpc/64s: flush L1D after user accesses

commit 9a32a7e78bd0cd9a9b6332cbdc345ee5ffd0c5de upstream.

IBM Power9 processors can speculatively operate on data in the L1 cache
before it has been completely validated, via a way-prediction mechanism. It
is not possible for an attacker to determine the contents of impermissible
memory using this method, since these systems implement a combination of
hardware and software security measures to prevent scenarios where
protected data could be leaked.

However these measures don't address the scenario where an attacker induces
the operating system to speculatively execute instructions using data that
the attacker controls. This can be used for example to speculatively bypass
"kernel user access prevention" techniques, as discovered by Anthony
Steinhauser of Google's Safeside Project. This is not an attack by itself,
but there is a possibility it could be used in conjunction with
side-channels or other weaknesses in the privileged code to construct an
attack.

This issue can be mitigated by flushing the L1 cache between privilege
boundaries of concern. This patch flushes the L1 cache after user accesses.

This is part of the fix for CVE-2020-4788.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Daniel Axtens <dja@axtens.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Nicholas Piggin 2020-11-20 10:35:14 +11:00 committed by Greg Kroah-Hartman
parent b65458b6be
commit 09495b5f7a
13 changed files with 199 additions and 70 deletions

View File

@ -2668,6 +2668,7 @@
tsx_async_abort=off [X86] tsx_async_abort=off [X86]
kvm.nx_huge_pages=off [X86] kvm.nx_huge_pages=off [X86]
no_entry_flush [PPC] no_entry_flush [PPC]
no_uaccess_flush [PPC]
Exceptions: Exceptions:
This does not have any effect on This does not have any effect on
@ -3041,6 +3042,9 @@
nospec_store_bypass_disable nospec_store_bypass_disable
[HW] Disable all mitigations for the Speculative Store Bypass vulnerability [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
no_uaccess_flush
[PPC] Don't flush the L1-D cache after accessing user data.
noxsave [BUGS=X86] Disables x86 extended register state save noxsave [BUGS=X86] Disables x86 extended register state save
and restore using xsave. The kernel will fallback to and restore using xsave. The kernel will fallback to
enabling legacy floating-point and sse state. enabling legacy floating-point and sse state.

View File

@ -54,6 +54,8 @@
#else /* !__ASSEMBLY__ */ #else /* !__ASSEMBLY__ */
DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
#ifdef CONFIG_PPC_KUAP #ifdef CONFIG_PPC_KUAP
#include <asm/reg.h> #include <asm/reg.h>
@ -77,6 +79,17 @@ static inline void set_kuap(unsigned long value)
isync(); isync();
} }
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
}
#else /* CONFIG_PPC_KUAP */
static inline void set_kuap(unsigned long value) { }
#endif /* !CONFIG_PPC_KUAP */
static __always_inline void allow_user_access(void __user *to, const void __user *from, static __always_inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) unsigned long size, unsigned long dir)
{ {
@ -94,17 +107,10 @@ static inline void prevent_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) unsigned long size, unsigned long dir)
{ {
set_kuap(AMR_KUAP_BLOCKED); set_kuap(AMR_KUAP_BLOCKED);
if (static_branch_unlikely(&uaccess_flush_key))
do_uaccess_flush();
} }
static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return WARN(mmu_has_feature(MMU_FTR_RADIX_KUAP) &&
(regs->kuap & (is_write ? AMR_KUAP_BLOCK_WRITE : AMR_KUAP_BLOCK_READ)),
"Bug: %s fault blocked by AMR!", is_write ? "Write" : "Read");
}
#endif /* CONFIG_PPC_KUAP */
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */ #endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */

View File

@ -134,6 +134,9 @@
hrfid; \ hrfid; \
b hrfi_flush_fallback b hrfi_flush_fallback
#else /* __ASSEMBLY__ */
/* Prototype for function defined in exceptions-64s.S */
void do_uaccess_flush(void);
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_EXCEPTION_H */ #endif /* _ASM_POWERPC_EXCEPTION_H */

View File

@ -205,6 +205,14 @@ label##3: \
FTR_ENTRY_OFFSET 955b-956b; \ FTR_ENTRY_OFFSET 955b-956b; \
.popsection; .popsection;
#define UACCESS_FLUSH_FIXUP_SECTION \
959: \
.pushsection __uaccess_flush_fixup,"a"; \
.align 2; \
960: \
FTR_ENTRY_OFFSET 959b-960b; \
.popsection;
#define ENTRY_FLUSH_FIXUP_SECTION \ #define ENTRY_FLUSH_FIXUP_SECTION \
957: \ 957: \
.pushsection __entry_flush_fixup,"a"; \ .pushsection __entry_flush_fixup,"a"; \
@ -248,6 +256,7 @@ extern long stf_barrier_fallback;
extern long entry_flush_fallback; extern long entry_flush_fallback;
extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup; extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup; extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
extern long __start___entry_flush_fixup, __stop___entry_flush_fixup; extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup; extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup; extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;

View File

@ -45,15 +45,24 @@ static inline void setup_kuep(bool disabled) { }
void setup_kuap(bool disabled); void setup_kuap(bool disabled);
#else #else
static inline void setup_kuap(bool disabled) { } static inline void setup_kuap(bool disabled) { }
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { }
static inline void prevent_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { }
static inline bool static inline bool
bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{ {
return false; return false;
} }
/*
* book3s/64/kup-radix.h defines these functions for the !KUAP case to flush
* the L1D cache after user accesses. Only include the empty stubs for other
* platforms.
*/
#ifndef CONFIG_PPC64
static inline void allow_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { }
static inline void prevent_user_access(void __user *to, const void __user *from,
unsigned long size, unsigned long dir) { }
#endif /* CONFIG_PPC64 */
#endif /* CONFIG_PPC_KUAP */ #endif /* CONFIG_PPC_KUAP */
static inline void allow_read_from_user(const void __user *from, unsigned long size) static inline void allow_read_from_user(const void __user *from, unsigned long size)

View File

@ -87,6 +87,8 @@ static inline bool security_ftr_enabled(u64 feature)
// The L1-D cache should be flushed when entering the kernel // The L1-D cache should be flushed when entering the kernel
#define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull #define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull
// The L1-D cache should be flushed after user accesses from the kernel
#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull
// Features enabled by default // Features enabled by default
#define SEC_FTR_DEFAULT \ #define SEC_FTR_DEFAULT \
@ -94,6 +96,7 @@ static inline bool security_ftr_enabled(u64 feature)
SEC_FTR_L1D_FLUSH_PR | \ SEC_FTR_L1D_FLUSH_PR | \
SEC_FTR_BNDS_CHK_SPEC_BAR | \ SEC_FTR_BNDS_CHK_SPEC_BAR | \
SEC_FTR_L1D_FLUSH_ENTRY | \ SEC_FTR_L1D_FLUSH_ENTRY | \
SEC_FTR_L1D_FLUSH_UACCESS | \
SEC_FTR_FAVOUR_SECURITY) SEC_FTR_FAVOUR_SECURITY)
#endif /* _ASM_POWERPC_SECURITY_FEATURES_H */ #endif /* _ASM_POWERPC_SECURITY_FEATURES_H */

View File

@ -60,6 +60,7 @@ void setup_barrier_nospec(void);
#else #else
static inline void setup_barrier_nospec(void) { }; static inline void setup_barrier_nospec(void) { };
#endif #endif
void do_uaccess_flush_fixups(enum l1d_flush_type types);
void do_entry_flush_fixups(enum l1d_flush_type types); void do_entry_flush_fixups(enum l1d_flush_type types);
void do_barrier_nospec_fixups(bool enable); void do_barrier_nospec_fixups(bool enable);
extern bool barrier_nospec_enabled; extern bool barrier_nospec_enabled;

View File

@ -2046,11 +2046,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
.endr .endr
blr blr
TRAMP_REAL_BEGIN(entry_flush_fallback) /* Clobbers r10, r11, ctr */
std r9,PACA_EXRFI+EX_R9(r13) .macro L1D_DISPLACEMENT_FLUSH
std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13)
mfctr r9
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
ld r11,PACA_L1D_FLUSH_SIZE(r13) ld r11,PACA_L1D_FLUSH_SIZE(r13)
srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */ srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
@ -2076,7 +2073,14 @@ TRAMP_REAL_BEGIN(entry_flush_fallback)
ld r11,(0x80 + 8)*7(r10) ld r11,(0x80 + 8)*7(r10)
addi r10,r10,0x80*8 addi r10,r10,0x80*8
bdnz 1b bdnz 1b
.endm
TRAMP_REAL_BEGIN(entry_flush_fallback)
std r9,PACA_EXRFI+EX_R9(r13)
std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13)
mfctr r9
L1D_DISPLACEMENT_FLUSH
mtctr r9 mtctr r9
ld r9,PACA_EXRFI+EX_R9(r13) ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13) ld r10,PACA_EXRFI+EX_R10(r13)
@ -2092,32 +2096,7 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
std r10,PACA_EXRFI+EX_R10(r13) std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13) std r11,PACA_EXRFI+EX_R11(r13)
mfctr r9 mfctr r9
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) L1D_DISPLACEMENT_FLUSH
ld r11,PACA_L1D_FLUSH_SIZE(r13)
srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
mtctr r11
DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
/* order ld/st prior to dcbt stop all streams with flushing */
sync
/*
* The load adresses are at staggered offsets within cachelines,
* which suits some pipelines better (on others it should not
* hurt).
*/
1:
ld r11,(0x80 + 8)*0(r10)
ld r11,(0x80 + 8)*1(r10)
ld r11,(0x80 + 8)*2(r10)
ld r11,(0x80 + 8)*3(r10)
ld r11,(0x80 + 8)*4(r10)
ld r11,(0x80 + 8)*5(r10)
ld r11,(0x80 + 8)*6(r10)
ld r11,(0x80 + 8)*7(r10)
addi r10,r10,0x80*8
bdnz 1b
mtctr r9 mtctr r9
ld r9,PACA_EXRFI+EX_R9(r13) ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13) ld r10,PACA_EXRFI+EX_R10(r13)
@ -2135,32 +2114,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
std r10,PACA_EXRFI+EX_R10(r13) std r10,PACA_EXRFI+EX_R10(r13)
std r11,PACA_EXRFI+EX_R11(r13) std r11,PACA_EXRFI+EX_R11(r13)
mfctr r9 mfctr r9
ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13) L1D_DISPLACEMENT_FLUSH
ld r11,PACA_L1D_FLUSH_SIZE(r13)
srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
mtctr r11
DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
/* order ld/st prior to dcbt stop all streams with flushing */
sync
/*
* The load adresses are at staggered offsets within cachelines,
* which suits some pipelines better (on others it should not
* hurt).
*/
1:
ld r11,(0x80 + 8)*0(r10)
ld r11,(0x80 + 8)*1(r10)
ld r11,(0x80 + 8)*2(r10)
ld r11,(0x80 + 8)*3(r10)
ld r11,(0x80 + 8)*4(r10)
ld r11,(0x80 + 8)*5(r10)
ld r11,(0x80 + 8)*6(r10)
ld r11,(0x80 + 8)*7(r10)
addi r10,r10,0x80*8
bdnz 1b
mtctr r9 mtctr r9
ld r9,PACA_EXRFI+EX_R9(r13) ld r9,PACA_EXRFI+EX_R9(r13)
ld r10,PACA_EXRFI+EX_R10(r13) ld r10,PACA_EXRFI+EX_R10(r13)
@ -2169,6 +2123,19 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
GET_SCRATCH0(r13); GET_SCRATCH0(r13);
hrfid hrfid
USE_TEXT_SECTION()
_GLOBAL(do_uaccess_flush)
UACCESS_FLUSH_FIXUP_SECTION
nop
nop
nop
blr
L1D_DISPLACEMENT_FLUSH
blr
_ASM_NOKPROBE_SYMBOL(do_uaccess_flush)
EXPORT_SYMBOL(do_uaccess_flush)
/* /*
* Real mode exceptions actually use this too, but alternate * Real mode exceptions actually use this too, but alternate
* instruction code patches (which end up in the common .text area) * instruction code patches (which end up in the common .text area)

View File

@ -860,8 +860,12 @@ static enum l1d_flush_type enabled_flush_types;
static void *l1d_flush_fallback_area; static void *l1d_flush_fallback_area;
static bool no_rfi_flush; static bool no_rfi_flush;
static bool no_entry_flush; static bool no_entry_flush;
static bool no_uaccess_flush;
bool rfi_flush; bool rfi_flush;
bool entry_flush; bool entry_flush;
bool uaccess_flush;
DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
EXPORT_SYMBOL(uaccess_flush_key);
static int __init handle_no_rfi_flush(char *p) static int __init handle_no_rfi_flush(char *p)
{ {
@ -879,6 +883,14 @@ static int __init handle_no_entry_flush(char *p)
} }
early_param("no_entry_flush", handle_no_entry_flush); early_param("no_entry_flush", handle_no_entry_flush);
static int __init handle_no_uaccess_flush(char *p)
{
pr_info("uaccess-flush: disabled on command line.");
no_uaccess_flush = true;
return 0;
}
early_param("no_uaccess_flush", handle_no_uaccess_flush);
/* /*
* The RFI flush is not KPTI, but because users will see doco that says to use * The RFI flush is not KPTI, but because users will see doco that says to use
* nopti we hijack that option here to also disable the RFI flush. * nopti we hijack that option here to also disable the RFI flush.
@ -922,6 +934,20 @@ void entry_flush_enable(bool enable)
entry_flush = enable; entry_flush = enable;
} }
void uaccess_flush_enable(bool enable)
{
if (enable) {
do_uaccess_flush_fixups(enabled_flush_types);
static_branch_enable(&uaccess_flush_key);
on_each_cpu(do_nothing, NULL, 1);
} else {
static_branch_disable(&uaccess_flush_key);
do_uaccess_flush_fixups(L1D_FLUSH_NONE);
}
uaccess_flush = enable;
}
static void __ref init_fallback_flush(void) static void __ref init_fallback_flush(void)
{ {
u64 l1d_size, limit; u64 l1d_size, limit;
@ -993,6 +1019,15 @@ void setup_entry_flush(bool enable)
entry_flush_enable(enable); entry_flush_enable(enable);
} }
void setup_uaccess_flush(bool enable)
{
if (cpu_mitigations_off())
return;
if (!no_uaccess_flush)
uaccess_flush_enable(true);
}
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
static int rfi_flush_set(void *data, u64 val) static int rfi_flush_set(void *data, u64 val)
{ {
@ -1046,10 +1081,37 @@ static int entry_flush_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n"); DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
static int uaccess_flush_set(void *data, u64 val)
{
bool enable;
if (val == 1)
enable = true;
else if (val == 0)
enable = false;
else
return -EINVAL;
/* Only do anything if we're changing state */
if (enable != uaccess_flush)
uaccess_flush_enable(enable);
return 0;
}
static int uaccess_flush_get(void *data, u64 *val)
{
*val = uaccess_flush ? 1 : 0;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
static __init int rfi_flush_debugfs_init(void) static __init int rfi_flush_debugfs_init(void)
{ {
debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush); debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush); debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush);
debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush);
return 0; return 0;
} }
device_initcall(rfi_flush_debugfs_init); device_initcall(rfi_flush_debugfs_init);

View File

@ -143,6 +143,13 @@ SECTIONS
__stop___stf_entry_barrier_fixup = .; __stop___stf_entry_barrier_fixup = .;
} }
. = ALIGN(8);
__uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) {
__start___uaccess_flush_fixup = .;
*(__uaccess_flush_fixup)
__stop___uaccess_flush_fixup = .;
}
. = ALIGN(8); . = ALIGN(8);
__entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) { __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) {
__start___entry_flush_fixup = .; __start___entry_flush_fixup = .;

View File

@ -228,6 +228,56 @@ void do_stf_barrier_fixups(enum stf_barrier_type types)
do_stf_exit_barrier_fixups(types); do_stf_exit_barrier_fixups(types);
} }
void do_uaccess_flush_fixups(enum l1d_flush_type types)
{
unsigned int instrs[4], *dest;
long *start, *end;
int i;
start = PTRRELOC(&__start___uaccess_flush_fixup);
end = PTRRELOC(&__stop___uaccess_flush_fixup);
instrs[0] = 0x60000000; /* nop */
instrs[1] = 0x60000000; /* nop */
instrs[2] = 0x60000000; /* nop */
instrs[3] = 0x4e800020; /* blr */
i = 0;
if (types == L1D_FLUSH_FALLBACK) {
instrs[3] = 0x60000000; /* nop */
/* fallthrough to fallback flush */
}
if (types & L1D_FLUSH_ORI) {
instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
}
if (types & L1D_FLUSH_MTTRIG)
instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
for (i = 0; start < end; start++, i++) {
dest = (void *)start + *start;
pr_devel("patching dest %lx\n", (unsigned long)dest);
patch_instruction(dest, instrs[0]);
patch_instruction((dest + 1), instrs[1]);
patch_instruction((dest + 2), instrs[2]);
patch_instruction((dest + 3), instrs[3]);
}
printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
(types == L1D_FLUSH_NONE) ? "no" :
(types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
(types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
? "ori+mttrig type"
: "ori type" :
(types & L1D_FLUSH_MTTRIG) ? "mttrig type"
: "unknown");
}
void do_entry_flush_fixups(enum l1d_flush_type types) void do_entry_flush_fixups(enum l1d_flush_type types)
{ {
unsigned int instrs[3], *dest; unsigned int instrs[3], *dest;

View File

@ -139,6 +139,10 @@ static void pnv_setup_rfi_flush(void)
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY); security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
setup_entry_flush(enable); setup_entry_flush(enable);
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
setup_uaccess_flush(enable);
} }
static void __init pnv_setup_arch(void) static void __init pnv_setup_arch(void)

View File

@ -565,6 +565,10 @@ void pseries_setup_rfi_flush(void)
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY); security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
setup_entry_flush(enable); setup_entry_flush(enable);
enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
setup_uaccess_flush(enable);
} }
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV