summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2020-11-22 14:26:06 -0500
committerMike Pagano <mpagano@gentoo.org>2020-11-22 14:26:06 -0500
commitc869a8b6a8317d09e7c35c8fba10cc10a0200cc5 (patch)
tree0c34be7993668429c3a3f4ca82e1f541a40e0771
parentLinux patch 4.19.158 (diff)
downloadlinux-patches-c869a8b6a8317d09e7c35c8fba10cc10a0200cc5.tar.gz
linux-patches-c869a8b6a8317d09e7c35c8fba10cc10a0200cc5.tar.bz2
linux-patches-c869a8b6a8317d09e7c35c8fba10cc10a0200cc5.zip
Linux patch 4.19.1594.19-158
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1158_linux-4.19.159.patch1417
2 files changed, 1421 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 9824d3a3..9a903758 100644
--- a/0000_README
+++ b/0000_README
@@ -671,6 +671,10 @@ Patch: 1157_linux-4.19.158.patch
From: https://www.kernel.org
Desc: Linux 4.19.158
+Patch: 1158_linux-4.19.159.patch
+From: https://www.kernel.org
+Desc: Linux 4.19.159
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1158_linux-4.19.159.patch b/1158_linux-4.19.159.patch
new file mode 100644
index 00000000..29927766
--- /dev/null
+++ b/1158_linux-4.19.159.patch
@@ -0,0 +1,1417 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 8dbc8d4ec8f0c..7371643dd8d4f 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -2560,6 +2560,8 @@
+ mds=off [X86]
+ tsx_async_abort=off [X86]
+ kvm.nx_huge_pages=off [X86]
++ no_entry_flush [PPC]
++ no_uaccess_flush [PPC]
+
+ Exceptions:
+ This does not have any effect on
+@@ -2870,6 +2872,8 @@
+
+ noefi Disable EFI runtime services support.
+
++ no_entry_flush [PPC] Don't flush the L1-D cache when entering the kernel.
++
+ noexec [IA-64]
+
+ noexec [X86]
+@@ -2919,6 +2923,9 @@
+ nospec_store_bypass_disable
+ [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
+
++ no_uaccess_flush
++ [PPC] Don't flush the L1-D cache after accessing user data.
++
+ noxsave [BUGS=X86] Disables x86 extended register state save
+ and restore using xsave. The kernel will fallback to
+ enabling legacy floating-point and sse state.
+diff --git a/Makefile b/Makefile
+index 698a9cc2864bd..593fdbce712d7 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 158
++SUBLEVEL = 159
+ EXTRAVERSION =
+ NAME = "People's Front"
+
+diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
+new file mode 100644
+index 0000000000000..aa54ac2e5659e
+--- /dev/null
++++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
+@@ -0,0 +1,22 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
++#define _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H
++
++DECLARE_STATIC_KEY_FALSE(uaccess_flush_key);
++
++/* Prototype for function defined in exceptions-64s.S */
++void do_uaccess_flush(void);
++
++static __always_inline void allow_user_access(void __user *to, const void __user *from,
++ unsigned long size)
++{
++}
++
++static inline void prevent_user_access(void __user *to, const void __user *from,
++ unsigned long size)
++{
++ if (static_branch_unlikely(&uaccess_flush_key))
++ do_uaccess_flush();
++}
++
++#endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
+diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
+index a86feddddad0c..35fb5b11955a0 100644
+--- a/arch/powerpc/include/asm/exception-64s.h
++++ b/arch/powerpc/include/asm/exception-64s.h
+@@ -90,11 +90,18 @@
+ nop; \
+ nop
+
++#define ENTRY_FLUSH_SLOT \
++ ENTRY_FLUSH_FIXUP_SECTION; \
++ nop; \
++ nop; \
++ nop;
++
+ /*
+ * r10 must be free to use, r13 must be paca
+ */
+ #define INTERRUPT_TO_KERNEL \
+- STF_ENTRY_BARRIER_SLOT
++ STF_ENTRY_BARRIER_SLOT; \
++ ENTRY_FLUSH_SLOT
+
+ /*
+ * Macros for annotating the expected destination of (h)rfid
+diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h
+index 40a6c9261a6bf..5bf3f0779b936 100644
+--- a/arch/powerpc/include/asm/feature-fixups.h
++++ b/arch/powerpc/include/asm/feature-fixups.h
+@@ -205,6 +205,22 @@ label##3: \
+ FTR_ENTRY_OFFSET 955b-956b; \
+ .popsection;
+
++#define UACCESS_FLUSH_FIXUP_SECTION \
++959: \
++ .pushsection __uaccess_flush_fixup,"a"; \
++ .align 2; \
++960: \
++ FTR_ENTRY_OFFSET 959b-960b; \
++ .popsection;
++
++#define ENTRY_FLUSH_FIXUP_SECTION \
++957: \
++ .pushsection __entry_flush_fixup,"a"; \
++ .align 2; \
++958: \
++ FTR_ENTRY_OFFSET 957b-958b; \
++ .popsection;
++
+ #define RFI_FLUSH_FIXUP_SECTION \
+ 951: \
+ .pushsection __rfi_flush_fixup,"a"; \
+@@ -237,8 +253,11 @@ label##3: \
+ #include <linux/types.h>
+
+ extern long stf_barrier_fallback;
++extern long entry_flush_fallback;
+ extern long __start___stf_entry_barrier_fixup, __stop___stf_entry_barrier_fixup;
+ extern long __start___stf_exit_barrier_fixup, __stop___stf_exit_barrier_fixup;
++extern long __start___uaccess_flush_fixup, __stop___uaccess_flush_fixup;
++extern long __start___entry_flush_fixup, __stop___entry_flush_fixup;
+ extern long __start___rfi_flush_fixup, __stop___rfi_flush_fixup;
+ extern long __start___barrier_nospec_fixup, __stop___barrier_nospec_fixup;
+ extern long __start__btb_flush_fixup, __stop__btb_flush_fixup;
+diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
+index 2a7b01f97a56b..1eabc20dddd38 100644
+--- a/arch/powerpc/include/asm/futex.h
++++ b/arch/powerpc/include/asm/futex.h
+@@ -35,6 +35,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ {
+ int oldval = 0, ret;
+
++ allow_write_to_user(uaddr, sizeof(*uaddr));
+ pagefault_disable();
+
+ switch (op) {
+@@ -61,6 +62,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+
+ *oval = oldval;
+
++ prevent_write_to_user(uaddr, sizeof(*uaddr));
+ return ret;
+ }
+
+@@ -74,6 +76,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
++ allow_write_to_user(uaddr, sizeof(*uaddr));
+ __asm__ __volatile__ (
+ PPC_ATOMIC_ENTRY_BARRIER
+ "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
+@@ -94,6 +97,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ : "cc", "memory");
+
+ *uval = prev;
++ prevent_write_to_user(uaddr, sizeof(*uaddr));
+ return ret;
+ }
+
+diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
+new file mode 100644
+index 0000000000000..f0f8e36ad71f5
+--- /dev/null
++++ b/arch/powerpc/include/asm/kup.h
+@@ -0,0 +1,40 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_POWERPC_KUP_H_
++#define _ASM_POWERPC_KUP_H_
++
++#ifndef __ASSEMBLY__
++
++#include <asm/pgtable.h>
++
++#ifdef CONFIG_PPC_BOOK3S_64
++#include <asm/book3s/64/kup-radix.h>
++#else
++static inline void allow_user_access(void __user *to, const void __user *from,
++ unsigned long size) { }
++static inline void prevent_user_access(void __user *to, const void __user *from,
++ unsigned long size) { }
++#endif /* CONFIG_PPC_BOOK3S_64 */
++
++static inline void allow_read_from_user(const void __user *from, unsigned long size)
++{
++ allow_user_access(NULL, from, size);
++}
++
++static inline void allow_write_to_user(void __user *to, unsigned long size)
++{
++ allow_user_access(to, NULL, size);
++}
++
++static inline void prevent_read_from_user(const void __user *from, unsigned long size)
++{
++ prevent_user_access(NULL, from, size);
++}
++
++static inline void prevent_write_to_user(void __user *to, unsigned long size)
++{
++ prevent_user_access(to, NULL, size);
++}
++
++#endif /* !__ASSEMBLY__ */
++
++#endif /* _ASM_POWERPC_KUP_H_ */
+diff --git a/arch/powerpc/include/asm/security_features.h b/arch/powerpc/include/asm/security_features.h
+index ccf44c135389a..3b45a64e491e5 100644
+--- a/arch/powerpc/include/asm/security_features.h
++++ b/arch/powerpc/include/asm/security_features.h
+@@ -84,12 +84,19 @@ static inline bool security_ftr_enabled(unsigned long feature)
+ // Software required to flush link stack on context switch
+ #define SEC_FTR_FLUSH_LINK_STACK 0x0000000000001000ull
+
++// The L1-D cache should be flushed when entering the kernel
++#define SEC_FTR_L1D_FLUSH_ENTRY 0x0000000000004000ull
++
++// The L1-D cache should be flushed after user accesses from the kernel
++#define SEC_FTR_L1D_FLUSH_UACCESS 0x0000000000008000ull
+
+ // Features enabled by default
+ #define SEC_FTR_DEFAULT \
+ (SEC_FTR_L1D_FLUSH_HV | \
+ SEC_FTR_L1D_FLUSH_PR | \
+ SEC_FTR_BNDS_CHK_SPEC_BAR | \
++ SEC_FTR_L1D_FLUSH_ENTRY | \
++ SEC_FTR_L1D_FLUSH_UACCESS | \
+ SEC_FTR_FAVOUR_SECURITY)
+
+ #endif /* _ASM_POWERPC_SECURITY_FEATURES_H */
+diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
+index 65676e2325b85..6f2f4497e13b3 100644
+--- a/arch/powerpc/include/asm/setup.h
++++ b/arch/powerpc/include/asm/setup.h
+@@ -52,12 +52,16 @@ enum l1d_flush_type {
+ };
+
+ void setup_rfi_flush(enum l1d_flush_type, bool enable);
++void setup_entry_flush(bool enable);
++void setup_uaccess_flush(bool enable);
+ void do_rfi_flush_fixups(enum l1d_flush_type types);
+ #ifdef CONFIG_PPC_BARRIER_NOSPEC
+ void setup_barrier_nospec(void);
+ #else
+ static inline void setup_barrier_nospec(void) { };
+ #endif
++void do_uaccess_flush_fixups(enum l1d_flush_type types);
++void do_entry_flush_fixups(enum l1d_flush_type types);
+ void do_barrier_nospec_fixups(bool enable);
+ extern bool barrier_nospec_enabled;
+
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index 38a25ff8afb76..ab6612e35ace3 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -6,6 +6,7 @@
+ #include <asm/processor.h>
+ #include <asm/page.h>
+ #include <asm/extable.h>
++#include <asm/kup.h>
+
+ /*
+ * The fs value determines whether argument validity checking should be
+@@ -91,9 +92,14 @@ static inline int __access_ok(unsigned long addr, unsigned long size,
+ __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
+
+ #define __get_user(x, ptr) \
+- __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
++ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
+ #define __put_user(x, ptr) \
+- __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
++ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), true)
++
++#define __get_user_allowed(x, ptr) \
++ __get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
++#define __put_user_allowed(x, ptr) \
++ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), false)
+
+ #define __get_user_inatomic(x, ptr) \
+ __get_user_nosleep((x), (ptr), sizeof(*(ptr)))
+@@ -138,7 +144,7 @@ extern long __put_user_bad(void);
+ : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
+ #endif /* __powerpc64__ */
+
+-#define __put_user_size(x, ptr, size, retval) \
++#define __put_user_size_allowed(x, ptr, size, retval) \
+ do { \
+ retval = 0; \
+ switch (size) { \
+@@ -150,14 +156,28 @@ do { \
+ } \
+ } while (0)
+
+-#define __put_user_nocheck(x, ptr, size) \
++#define __put_user_size(x, ptr, size, retval) \
++do { \
++ allow_write_to_user(ptr, size); \
++ __put_user_size_allowed(x, ptr, size, retval); \
++ prevent_write_to_user(ptr, size); \
++} while (0)
++
++#define __put_user_nocheck(x, ptr, size, do_allow) \
+ ({ \
+ long __pu_err; \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
++ __typeof__(*(ptr)) __pu_val = (x); \
++ __typeof__(size) __pu_size = (size); \
++ \
+ if (!is_kernel_addr((unsigned long)__pu_addr)) \
+ might_fault(); \
+- __chk_user_ptr(ptr); \
+- __put_user_size((x), __pu_addr, (size), __pu_err); \
++ __chk_user_ptr(__pu_addr); \
++ if (do_allow) \
++ __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
++ else \
++ __put_user_size_allowed(__pu_val, __pu_addr, __pu_size, __pu_err); \
++ \
+ __pu_err; \
+ })
+
+@@ -165,9 +185,13 @@ do { \
+ ({ \
+ long __pu_err = -EFAULT; \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
++ __typeof__(*(ptr)) __pu_val = (x); \
++ __typeof__(size) __pu_size = (size); \
++ \
+ might_fault(); \
+- if (access_ok(VERIFY_WRITE, __pu_addr, size)) \
+- __put_user_size((x), __pu_addr, (size), __pu_err); \
++ if (access_ok(VERIFY_WRITE, __pu_addr, __pu_size)) \
++ __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
++ \
+ __pu_err; \
+ })
+
+@@ -175,8 +199,12 @@ do { \
+ ({ \
+ long __pu_err; \
+ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
+- __chk_user_ptr(ptr); \
+- __put_user_size((x), __pu_addr, (size), __pu_err); \
++ __typeof__(*(ptr)) __pu_val = (x); \
++ __typeof__(size) __pu_size = (size); \
++ \
++ __chk_user_ptr(__pu_addr); \
++ __put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
++ \
+ __pu_err; \
+ })
+
+@@ -234,7 +262,7 @@ extern long __get_user_bad(void);
+ : "b" (addr), "i" (-EFAULT), "0" (err))
+ #endif /* __powerpc64__ */
+
+-#define __get_user_size(x, ptr, size, retval) \
++#define __get_user_size_allowed(x, ptr, size, retval) \
+ do { \
+ retval = 0; \
+ __chk_user_ptr(ptr); \
+@@ -249,6 +277,13 @@ do { \
+ } \
+ } while (0)
+
++#define __get_user_size(x, ptr, size, retval) \
++do { \
++ allow_read_from_user(ptr, size); \
++ __get_user_size_allowed(x, ptr, size, retval); \
++ prevent_read_from_user(ptr, size); \
++} while (0)
++
+ /*
+ * This is a type: either unsigned long, if the argument fits into
+ * that type, or otherwise unsigned long long.
+@@ -256,17 +291,23 @@ do { \
+ #define __long_type(x) \
+ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
+
+-#define __get_user_nocheck(x, ptr, size) \
++#define __get_user_nocheck(x, ptr, size, do_allow) \
+ ({ \
+ long __gu_err; \
+ __long_type(*(ptr)) __gu_val; \
+ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+- __chk_user_ptr(ptr); \
++ __typeof__(size) __gu_size = (size); \
++ \
++ __chk_user_ptr(__gu_addr); \
+ if (!is_kernel_addr((unsigned long)__gu_addr)) \
+ might_fault(); \
+ barrier_nospec(); \
+- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
++ if (do_allow) \
++ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
++ else \
++ __get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
+ (x) = (__typeof__(*(ptr)))__gu_val; \
++ \
+ __gu_err; \
+ })
+
+@@ -275,12 +316,15 @@ do { \
+ long __gu_err = -EFAULT; \
+ __long_type(*(ptr)) __gu_val = 0; \
+ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
++ __typeof__(size) __gu_size = (size); \
++ \
+ might_fault(); \
+- if (access_ok(VERIFY_READ, __gu_addr, (size))) { \
++ if (access_ok(VERIFY_READ, __gu_addr, __gu_size)) { \
+ barrier_nospec(); \
+- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
++ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
+ } \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
++ \
+ __gu_err; \
+ })
+
+@@ -289,10 +333,13 @@ do { \
+ long __gu_err; \
+ __long_type(*(ptr)) __gu_val; \
+ __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
+- __chk_user_ptr(ptr); \
++ __typeof__(size) __gu_size = (size); \
++ \
++ __chk_user_ptr(__gu_addr); \
+ barrier_nospec(); \
+- __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
++ __get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
+ (x) = (__force __typeof__(*(ptr)))__gu_val; \
++ \
+ __gu_err; \
+ })
+
+@@ -306,16 +353,22 @@ extern unsigned long __copy_tofrom_user(void __user *to,
+ static inline unsigned long
+ raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
+ {
++ unsigned long ret;
++
+ barrier_nospec();
+- return __copy_tofrom_user(to, from, n);
++ allow_user_access(to, from, n);
++ ret = __copy_tofrom_user(to, from, n);
++ prevent_user_access(to, from, n);
++ return ret;
+ }
+ #endif /* __powerpc64__ */
+
+ static inline unsigned long raw_copy_from_user(void *to,
+ const void __user *from, unsigned long n)
+ {
++ unsigned long ret;
+ if (__builtin_constant_p(n) && (n <= 8)) {
+- unsigned long ret = 1;
++ ret = 1;
+
+ switch (n) {
+ case 1:
+@@ -340,27 +393,30 @@ static inline unsigned long raw_copy_from_user(void *to,
+ }
+
+ barrier_nospec();
+- return __copy_tofrom_user((__force void __user *)to, from, n);
++ allow_read_from_user(from, n);
++ ret = __copy_tofrom_user((__force void __user *)to, from, n);
++ prevent_read_from_user(from, n);
++ return ret;
+ }
+
+-static inline unsigned long raw_copy_to_user(void __user *to,
+- const void *from, unsigned long n)
++static inline unsigned long
++raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
+ {
+ if (__builtin_constant_p(n) && (n <= 8)) {
+ unsigned long ret = 1;
+
+ switch (n) {
+ case 1:
+- __put_user_size(*(u8 *)from, (u8 __user *)to, 1, ret);
++ __put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
+ break;
+ case 2:
+- __put_user_size(*(u16 *)from, (u16 __user *)to, 2, ret);
++ __put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
+ break;
+ case 4:
+- __put_user_size(*(u32 *)from, (u32 __user *)to, 4, ret);
++ __put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
+ break;
+ case 8:
+- __put_user_size(*(u64 *)from, (u64 __user *)to, 8, ret);
++ __put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
+ break;
+ }
+ if (ret == 0)
+@@ -370,14 +426,34 @@ static inline unsigned long raw_copy_to_user(void __user *to,
+ return __copy_tofrom_user(to, (__force const void __user *)from, n);
+ }
+
+-extern unsigned long __clear_user(void __user *addr, unsigned long size);
++static inline unsigned long
++raw_copy_to_user(void __user *to, const void *from, unsigned long n)
++{
++ unsigned long ret;
++
++ allow_write_to_user(to, n);
++ ret = raw_copy_to_user_allowed(to, from, n);
++ prevent_write_to_user(to, n);
++ return ret;
++}
++
++unsigned long __arch_clear_user(void __user *addr, unsigned long size);
+
+ static inline unsigned long clear_user(void __user *addr, unsigned long size)
+ {
++ unsigned long ret = size;
+ might_fault();
+- if (likely(access_ok(VERIFY_WRITE, addr, size)))
+- return __clear_user(addr, size);
+- return size;
++ if (likely(access_ok(VERIFY_WRITE, addr, size))) {
++ allow_write_to_user(addr, size);
++ ret = __arch_clear_user(addr, size);
++ prevent_write_to_user(addr, size);
++ }
++ return ret;
++}
++
++static inline unsigned long __clear_user(void __user *addr, unsigned long size)
++{
++ return clear_user(addr, size);
+ }
+
+ extern long strncpy_from_user(char *dst, const char __user *src, long count);
+@@ -388,4 +464,13 @@ extern long __copy_from_user_flushcache(void *dst, const void __user *src,
+ extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
+ size_t len);
+
++#define user_access_begin(type, ptr, len) access_ok(type, ptr, len)
++#define user_access_end() prevent_user_access(NULL, NULL, ~0ul)
++
++#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
++#define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
++#define unsafe_put_user(x, p, e) unsafe_op_wrap(__put_user_allowed(x, p), e)
++#define unsafe_copy_to_user(d, s, l, e) \
++ unsafe_op_wrap(raw_copy_to_user_allowed(d, s, l), e)
++
+ #endif /* _ARCH_POWERPC_UACCESS_H */
+diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
+index e1dab9b1e4478..344e2758b22df 100644
+--- a/arch/powerpc/kernel/exceptions-64s.S
++++ b/arch/powerpc/kernel/exceptions-64s.S
+@@ -540,7 +540,7 @@ EXC_COMMON_BEGIN(unrecover_mce)
+ b 1b
+
+
+-EXC_REAL(data_access, 0x300, 0x80)
++EXC_REAL_OOL(data_access, 0x300, 0x80)
+ EXC_VIRT(data_access, 0x4300, 0x80, 0x300)
+ TRAMP_KVM_SKIP(PACA_EXGEN, 0x300)
+
+@@ -572,13 +572,16 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+ EXC_REAL_BEGIN(data_access_slb, 0x380, 0x80)
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXSLB)
++ b tramp_data_access_slb
++EXC_REAL_END(data_access_slb, 0x380, 0x80)
++
++TRAMP_REAL_BEGIN(tramp_data_access_slb)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
+ mr r12,r3 /* save r3 */
+ mfspr r3,SPRN_DAR
+ mfspr r11,SPRN_SRR1
+ crset 4*cr6+eq
+ BRANCH_TO_COMMON(r10, slb_miss_common)
+-EXC_REAL_END(data_access_slb, 0x380, 0x80)
+
+ EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
+ SET_SCRATCH0(r13)
+@@ -593,7 +596,7 @@ EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
+ TRAMP_KVM_SKIP(PACA_EXSLB, 0x380)
+
+
+-EXC_REAL(instruction_access, 0x400, 0x80)
++EXC_REAL_OOL(instruction_access, 0x400, 0x80)
+ EXC_VIRT(instruction_access, 0x4400, 0x80, 0x400)
+ TRAMP_KVM(PACA_EXGEN, 0x400)
+
+@@ -616,13 +619,16 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+ EXC_REAL_BEGIN(instruction_access_slb, 0x480, 0x80)
+ SET_SCRATCH0(r13)
+ EXCEPTION_PROLOG_0(PACA_EXSLB)
++ b tramp_instruction_access_slb
++EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
++
++TRAMP_REAL_BEGIN(tramp_instruction_access_slb)
+ EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
+ mr r12,r3 /* save r3 */
+ mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
+ mfspr r11,SPRN_SRR1
+ crclr 4*cr6+eq
+ BRANCH_TO_COMMON(r10, slb_miss_common)
+-EXC_REAL_END(instruction_access_slb, 0x480, 0x80)
+
+ EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
+ SET_SCRATCH0(r13)
+@@ -883,13 +889,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
+
+
+ EXC_REAL_OOL_MASKABLE(decrementer, 0x900, 0x80, IRQS_DISABLED)
+-EXC_VIRT_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
++EXC_VIRT_OOL_MASKABLE(decrementer, 0x4900, 0x80, 0x900, IRQS_DISABLED)
+ TRAMP_KVM(PACA_EXGEN, 0x900)
+ EXC_COMMON_ASYNC(decrementer_common, 0x900, timer_interrupt)
+
+
+-EXC_REAL_HV(hdecrementer, 0x980, 0x80)
+-EXC_VIRT_HV(hdecrementer, 0x4980, 0x80, 0x980)
++EXC_REAL_OOL_HV(hdecrementer, 0x980, 0x80)
++EXC_VIRT_OOL_HV(hdecrementer, 0x4980, 0x80, 0x980)
+ TRAMP_KVM_HV(PACA_EXGEN, 0x980)
+ EXC_COMMON(hdecrementer_common, 0x980, hdec_interrupt)
+
+@@ -1523,15 +1529,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
+ .endr
+ blr
+
+-TRAMP_REAL_BEGIN(rfi_flush_fallback)
+- SET_SCRATCH0(r13);
+- GET_PACA(r13);
+- std r1,PACA_EXRFI+EX_R12(r13)
+- ld r1,PACAKSAVE(r13)
+- std r9,PACA_EXRFI+EX_R9(r13)
+- std r10,PACA_EXRFI+EX_R10(r13)
+- std r11,PACA_EXRFI+EX_R11(r13)
+- mfctr r9
++/* Clobbers r10, r11, ctr */
++.macro L1D_DISPLACEMENT_FLUSH
+ ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
+ ld r11,PACA_L1D_FLUSH_SIZE(r13)
+ srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
+@@ -1542,7 +1541,7 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
+ sync
+
+ /*
+- * The load adresses are at staggered offsets within cachelines,
++ * The load addresses are at staggered offsets within cachelines,
+ * which suits some pipelines better (on others it should not
+ * hurt).
+ */
+@@ -1557,7 +1556,30 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
+ ld r11,(0x80 + 8)*7(r10)
+ addi r10,r10,0x80*8
+ bdnz 1b
++.endm
++
++TRAMP_REAL_BEGIN(entry_flush_fallback)
++ std r9,PACA_EXRFI+EX_R9(r13)
++ std r10,PACA_EXRFI+EX_R10(r13)
++ std r11,PACA_EXRFI+EX_R11(r13)
++ mfctr r9
++ L1D_DISPLACEMENT_FLUSH
++ mtctr r9
++ ld r9,PACA_EXRFI+EX_R9(r13)
++ ld r10,PACA_EXRFI+EX_R10(r13)
++ ld r11,PACA_EXRFI+EX_R11(r13)
++ blr
+
++TRAMP_REAL_BEGIN(rfi_flush_fallback)
++ SET_SCRATCH0(r13);
++ GET_PACA(r13);
++ std r1,PACA_EXRFI+EX_R12(r13)
++ ld r1,PACAKSAVE(r13)
++ std r9,PACA_EXRFI+EX_R9(r13)
++ std r10,PACA_EXRFI+EX_R10(r13)
++ std r11,PACA_EXRFI+EX_R11(r13)
++ mfctr r9
++ L1D_DISPLACEMENT_FLUSH
+ mtctr r9
+ ld r9,PACA_EXRFI+EX_R9(r13)
+ ld r10,PACA_EXRFI+EX_R10(r13)
+@@ -1575,32 +1597,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
+ std r10,PACA_EXRFI+EX_R10(r13)
+ std r11,PACA_EXRFI+EX_R11(r13)
+ mfctr r9
+- ld r10,PACA_RFI_FLUSH_FALLBACK_AREA(r13)
+- ld r11,PACA_L1D_FLUSH_SIZE(r13)
+- srdi r11,r11,(7 + 3) /* 128 byte lines, unrolled 8x */
+- mtctr r11
+- DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r11) /* Stop prefetch streams */
+-
+- /* order ld/st prior to dcbt stop all streams with flushing */
+- sync
+-
+- /*
+- * The load adresses are at staggered offsets within cachelines,
+- * which suits some pipelines better (on others it should not
+- * hurt).
+- */
+-1:
+- ld r11,(0x80 + 8)*0(r10)
+- ld r11,(0x80 + 8)*1(r10)
+- ld r11,(0x80 + 8)*2(r10)
+- ld r11,(0x80 + 8)*3(r10)
+- ld r11,(0x80 + 8)*4(r10)
+- ld r11,(0x80 + 8)*5(r10)
+- ld r11,(0x80 + 8)*6(r10)
+- ld r11,(0x80 + 8)*7(r10)
+- addi r10,r10,0x80*8
+- bdnz 1b
+-
++ L1D_DISPLACEMENT_FLUSH
+ mtctr r9
+ ld r9,PACA_EXRFI+EX_R9(r13)
+ ld r10,PACA_EXRFI+EX_R10(r13)
+@@ -1609,6 +1606,19 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
+ GET_SCRATCH0(r13);
+ hrfid
+
++USE_TEXT_SECTION()
++
++_GLOBAL(do_uaccess_flush)
++ UACCESS_FLUSH_FIXUP_SECTION
++ nop
++ nop
++ nop
++ blr
++ L1D_DISPLACEMENT_FLUSH
++ blr
++_ASM_NOKPROBE_SYMBOL(do_uaccess_flush)
++EXPORT_SYMBOL(do_uaccess_flush)
++
+ /*
+ * Real mode exceptions actually use this too, but alternate
+ * instruction code patches (which end up in the common .text area)
+diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
+index 9fd2ff28b8ff2..dc99258f2e8c6 100644
+--- a/arch/powerpc/kernel/head_8xx.S
++++ b/arch/powerpc/kernel/head_8xx.S
+@@ -356,11 +356,9 @@ _ENTRY(ITLBMiss_cmp)
+ /* Load the MI_TWC with the attributes for this "segment." */
+ mtspr SPRN_MI_TWC, r11 /* Set segment attributes */
+
+-#ifdef CONFIG_SWAP
+- rlwinm r11, r10, 32-5, _PAGE_PRESENT
++ rlwinm r11, r10, 32-7, _PAGE_PRESENT
+ and r11, r11, r10
+ rlwimi r10, r11, 0, _PAGE_PRESENT
+-#endif
+ li r11, RPN_PATTERN | 0x200
+ /* The Linux PTE won't go exactly into the MMU TLB.
+ * Software indicator bits 20 and 23 must be clear.
+@@ -482,11 +480,9 @@ _ENTRY(DTLBMiss_jmp)
+ * r11 = ((r10 & PRESENT) & ((r10 & ACCESSED) >> 5));
+ * r10 = (r10 & ~PRESENT) | r11;
+ */
+-#ifdef CONFIG_SWAP
+- rlwinm r11, r10, 32-5, _PAGE_PRESENT
++ rlwinm r11, r10, 32-7, _PAGE_PRESENT
+ and r11, r11, r10
+ rlwimi r10, r11, 0, _PAGE_PRESENT
+-#endif
+ /* The Linux PTE won't go exactly into the MMU TLB.
+ * Software indicator bits 24, 25, 26, and 27 must be
+ * set. All other Linux PTE bits control the behavior
+diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
+index bd4996958b13d..122365624d3da 100644
+--- a/arch/powerpc/kernel/setup_64.c
++++ b/arch/powerpc/kernel/setup_64.c
+@@ -863,7 +863,13 @@ early_initcall(disable_hardlockup_detector);
+ static enum l1d_flush_type enabled_flush_types;
+ static void *l1d_flush_fallback_area;
+ static bool no_rfi_flush;
++static bool no_entry_flush;
++static bool no_uaccess_flush;
+ bool rfi_flush;
++bool entry_flush;
++bool uaccess_flush;
++DEFINE_STATIC_KEY_FALSE(uaccess_flush_key);
++EXPORT_SYMBOL(uaccess_flush_key);
+
+ static int __init handle_no_rfi_flush(char *p)
+ {
+@@ -873,6 +879,22 @@ static int __init handle_no_rfi_flush(char *p)
+ }
+ early_param("no_rfi_flush", handle_no_rfi_flush);
+
++static int __init handle_no_entry_flush(char *p)
++{
++ pr_info("entry-flush: disabled on command line.");
++ no_entry_flush = true;
++ return 0;
++}
++early_param("no_entry_flush", handle_no_entry_flush);
++
++static int __init handle_no_uaccess_flush(char *p)
++{
++ pr_info("uaccess-flush: disabled on command line.");
++ no_uaccess_flush = true;
++ return 0;
++}
++early_param("no_uaccess_flush", handle_no_uaccess_flush);
++
+ /*
+ * The RFI flush is not KPTI, but because users will see doco that says to use
+ * nopti we hijack that option here to also disable the RFI flush.
+@@ -904,6 +926,32 @@ void rfi_flush_enable(bool enable)
+ rfi_flush = enable;
+ }
+
++void entry_flush_enable(bool enable)
++{
++ if (enable) {
++ do_entry_flush_fixups(enabled_flush_types);
++ on_each_cpu(do_nothing, NULL, 1);
++ } else {
++ do_entry_flush_fixups(L1D_FLUSH_NONE);
++ }
++
++ entry_flush = enable;
++}
++
++void uaccess_flush_enable(bool enable)
++{
++ if (enable) {
++ do_uaccess_flush_fixups(enabled_flush_types);
++ static_branch_enable(&uaccess_flush_key);
++ on_each_cpu(do_nothing, NULL, 1);
++ } else {
++ static_branch_disable(&uaccess_flush_key);
++ do_uaccess_flush_fixups(L1D_FLUSH_NONE);
++ }
++
++ uaccess_flush = enable;
++}
++
+ static void __ref init_fallback_flush(void)
+ {
+ u64 l1d_size, limit;
+@@ -957,10 +1005,28 @@ void setup_rfi_flush(enum l1d_flush_type types, bool enable)
+
+ enabled_flush_types = types;
+
+- if (!no_rfi_flush && !cpu_mitigations_off())
++ if (!cpu_mitigations_off() && !no_rfi_flush)
+ rfi_flush_enable(enable);
+ }
+
++void setup_entry_flush(bool enable)
++{
++ if (cpu_mitigations_off())
++ return;
++
++ if (!no_entry_flush)
++ entry_flush_enable(enable);
++}
++
++void setup_uaccess_flush(bool enable)
++{
++ if (cpu_mitigations_off())
++ return;
++
++ if (!no_uaccess_flush)
++ uaccess_flush_enable(enable);
++}
++
+ #ifdef CONFIG_DEBUG_FS
+ static int rfi_flush_set(void *data, u64 val)
+ {
+@@ -988,9 +1054,63 @@ static int rfi_flush_get(void *data, u64 *val)
+
+ DEFINE_SIMPLE_ATTRIBUTE(fops_rfi_flush, rfi_flush_get, rfi_flush_set, "%llu\n");
+
++static int entry_flush_set(void *data, u64 val)
++{
++ bool enable;
++
++ if (val == 1)
++ enable = true;
++ else if (val == 0)
++ enable = false;
++ else
++ return -EINVAL;
++
++ /* Only do anything if we're changing state */
++ if (enable != entry_flush)
++ entry_flush_enable(enable);
++
++ return 0;
++}
++
++static int entry_flush_get(void *data, u64 *val)
++{
++ *val = entry_flush ? 1 : 0;
++ return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_entry_flush, entry_flush_get, entry_flush_set, "%llu\n");
++
++static int uaccess_flush_set(void *data, u64 val)
++{
++ bool enable;
++
++ if (val == 1)
++ enable = true;
++ else if (val == 0)
++ enable = false;
++ else
++ return -EINVAL;
++
++ /* Only do anything if we're changing state */
++ if (enable != uaccess_flush)
++ uaccess_flush_enable(enable);
++
++ return 0;
++}
++
++static int uaccess_flush_get(void *data, u64 *val)
++{
++ *val = uaccess_flush ? 1 : 0;
++ return 0;
++}
++
++DEFINE_SIMPLE_ATTRIBUTE(fops_uaccess_flush, uaccess_flush_get, uaccess_flush_set, "%llu\n");
++
+ static __init int rfi_flush_debugfs_init(void)
+ {
+ debugfs_create_file("rfi_flush", 0600, powerpc_debugfs_root, NULL, &fops_rfi_flush);
++ debugfs_create_file("entry_flush", 0600, powerpc_debugfs_root, NULL, &fops_entry_flush);
++ debugfs_create_file("uaccess_flush", 0600, powerpc_debugfs_root, NULL, &fops_uaccess_flush);
+ return 0;
+ }
+ device_initcall(rfi_flush_debugfs_init);
+diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
+index d081d726ca8ea..695432965f206 100644
+--- a/arch/powerpc/kernel/vmlinux.lds.S
++++ b/arch/powerpc/kernel/vmlinux.lds.S
+@@ -140,6 +140,20 @@ SECTIONS
+ __stop___stf_entry_barrier_fixup = .;
+ }
+
++ . = ALIGN(8);
++ __uaccess_flush_fixup : AT(ADDR(__uaccess_flush_fixup) - LOAD_OFFSET) {
++ __start___uaccess_flush_fixup = .;
++ *(__uaccess_flush_fixup)
++ __stop___uaccess_flush_fixup = .;
++ }
++
++ . = ALIGN(8);
++ __entry_flush_fixup : AT(ADDR(__entry_flush_fixup) - LOAD_OFFSET) {
++ __start___entry_flush_fixup = .;
++ *(__entry_flush_fixup)
++ __stop___entry_flush_fixup = .;
++ }
++
+ . = ALIGN(8);
+ __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
+ __start___stf_exit_barrier_fixup = .;
+diff --git a/arch/powerpc/lib/checksum_wrappers.c b/arch/powerpc/lib/checksum_wrappers.c
+index a0cb63fb76a1a..8d83c39be7e49 100644
+--- a/arch/powerpc/lib/checksum_wrappers.c
++++ b/arch/powerpc/lib/checksum_wrappers.c
+@@ -29,6 +29,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
+ unsigned int csum;
+
+ might_sleep();
++ allow_read_from_user(src, len);
+
+ *err_ptr = 0;
+
+@@ -60,6 +61,7 @@ __wsum csum_and_copy_from_user(const void __user *src, void *dst,
+ }
+
+ out:
++ prevent_read_from_user(src, len);
+ return (__force __wsum)csum;
+ }
+ EXPORT_SYMBOL(csum_and_copy_from_user);
+@@ -70,6 +72,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
+ unsigned int csum;
+
+ might_sleep();
++ allow_write_to_user(dst, len);
+
+ *err_ptr = 0;
+
+@@ -97,6 +100,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
+ }
+
+ out:
++ prevent_write_to_user(dst, len);
+ return (__force __wsum)csum;
+ }
+ EXPORT_SYMBOL(csum_and_copy_to_user);
+diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
+index dbe478e7b8e09..065a3426f0ebc 100644
+--- a/arch/powerpc/lib/feature-fixups.c
++++ b/arch/powerpc/lib/feature-fixups.c
+@@ -232,6 +232,110 @@ void do_stf_barrier_fixups(enum stf_barrier_type types)
+ do_stf_exit_barrier_fixups(types);
+ }
+
++void do_uaccess_flush_fixups(enum l1d_flush_type types)
++{
++ unsigned int instrs[4], *dest;
++ long *start, *end;
++ int i;
++
++ start = PTRRELOC(&__start___uaccess_flush_fixup);
++ end = PTRRELOC(&__stop___uaccess_flush_fixup);
++
++ instrs[0] = 0x60000000; /* nop */
++ instrs[1] = 0x60000000; /* nop */
++ instrs[2] = 0x60000000; /* nop */
++ instrs[3] = 0x4e800020; /* blr */
++
++ i = 0;
++ if (types == L1D_FLUSH_FALLBACK) {
++ instrs[3] = 0x60000000; /* nop */
++ /* fallthrough to fallback flush */
++ }
++
++ if (types & L1D_FLUSH_ORI) {
++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++ instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
++ }
++
++ if (types & L1D_FLUSH_MTTRIG)
++ instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
++
++ for (i = 0; start < end; start++, i++) {
++ dest = (void *)start + *start;
++
++ pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++ patch_instruction(dest, instrs[0]);
++
++ patch_instruction((dest + 1), instrs[1]);
++ patch_instruction((dest + 2), instrs[2]);
++ patch_instruction((dest + 3), instrs[3]);
++ }
++
++ printk(KERN_DEBUG "uaccess-flush: patched %d locations (%s flush)\n", i,
++ (types == L1D_FLUSH_NONE) ? "no" :
++ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
++ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
++ ? "ori+mttrig type"
++ : "ori type" :
++ (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
++ : "unknown");
++}
++
++void do_entry_flush_fixups(enum l1d_flush_type types)
++{
++ unsigned int instrs[3], *dest;
++ long *start, *end;
++ int i;
++
++ start = PTRRELOC(&__start___entry_flush_fixup);
++ end = PTRRELOC(&__stop___entry_flush_fixup);
++
++ instrs[0] = 0x60000000; /* nop */
++ instrs[1] = 0x60000000; /* nop */
++ instrs[2] = 0x60000000; /* nop */
++
++ i = 0;
++ if (types == L1D_FLUSH_FALLBACK) {
++ instrs[i++] = 0x7d4802a6; /* mflr r10 */
++ instrs[i++] = 0x60000000; /* branch patched below */
++ instrs[i++] = 0x7d4803a6; /* mtlr r10 */
++ }
++
++ if (types & L1D_FLUSH_ORI) {
++ instrs[i++] = 0x63ff0000; /* ori 31,31,0 speculation barrier */
++ instrs[i++] = 0x63de0000; /* ori 30,30,0 L1d flush*/
++ }
++
++ if (types & L1D_FLUSH_MTTRIG)
++ instrs[i++] = 0x7c12dba6; /* mtspr TRIG2,r0 (SPR #882) */
++
++ for (i = 0; start < end; start++, i++) {
++ dest = (void *)start + *start;
++
++ pr_devel("patching dest %lx\n", (unsigned long)dest);
++
++ patch_instruction(dest, instrs[0]);
++
++ if (types == L1D_FLUSH_FALLBACK)
++ patch_branch((dest + 1), (unsigned long)&entry_flush_fallback,
++ BRANCH_SET_LINK);
++ else
++ patch_instruction((dest + 1), instrs[1]);
++
++ patch_instruction((dest + 2), instrs[2]);
++ }
++
++ printk(KERN_DEBUG "entry-flush: patched %d locations (%s flush)\n", i,
++ (types == L1D_FLUSH_NONE) ? "no" :
++ (types == L1D_FLUSH_FALLBACK) ? "fallback displacement" :
++ (types & L1D_FLUSH_ORI) ? (types & L1D_FLUSH_MTTRIG)
++ ? "ori+mttrig type"
++ : "ori type" :
++ (types & L1D_FLUSH_MTTRIG) ? "mttrig type"
++ : "unknown");
++}
++
+ void do_rfi_flush_fixups(enum l1d_flush_type types)
+ {
+ unsigned int instrs[3], *dest;
+diff --git a/arch/powerpc/lib/string_32.S b/arch/powerpc/lib/string_32.S
+index f69a6aab7bfbb..1ddb26394e8ac 100644
+--- a/arch/powerpc/lib/string_32.S
++++ b/arch/powerpc/lib/string_32.S
+@@ -17,7 +17,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES
+ LG_CACHELINE_BYTES = L1_CACHE_SHIFT
+ CACHELINE_MASK = (L1_CACHE_BYTES-1)
+
+-_GLOBAL(__clear_user)
++_GLOBAL(__arch_clear_user)
+ /*
+ * Use dcbz on the complete cache lines in the destination
+ * to set them to zero. This requires that the destination
+@@ -87,4 +87,4 @@ _GLOBAL(__clear_user)
+ EX_TABLE(8b, 91b)
+ EX_TABLE(9b, 91b)
+
+-EXPORT_SYMBOL(__clear_user)
++EXPORT_SYMBOL(__arch_clear_user)
+diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
+index 56aac4c220257..ea3798f4f25f2 100644
+--- a/arch/powerpc/lib/string_64.S
++++ b/arch/powerpc/lib/string_64.S
+@@ -29,7 +29,7 @@ PPC64_CACHES:
+ .section ".text"
+
+ /**
+- * __clear_user: - Zero a block of memory in user space, with less checking.
++ * __arch_clear_user: - Zero a block of memory in user space, with less checking.
+ * @to: Destination address, in user space.
+ * @n: Number of bytes to zero.
+ *
+@@ -70,7 +70,7 @@ err3; stb r0,0(r3)
+ mr r3,r4
+ blr
+
+-_GLOBAL_TOC(__clear_user)
++_GLOBAL_TOC(__arch_clear_user)
+ cmpdi r4,32
+ neg r6,r3
+ li r0,0
+@@ -193,4 +193,4 @@ err1; dcbz 0,r3
+ cmpdi r4,32
+ blt .Lshort_clear
+ b .Lmedium_clear
+-EXPORT_SYMBOL(__clear_user)
++EXPORT_SYMBOL(__arch_clear_user)
+diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
+index adddde0236227..5068dd7f6e74b 100644
+--- a/arch/powerpc/platforms/powernv/setup.c
++++ b/arch/powerpc/platforms/powernv/setup.c
+@@ -125,12 +125,29 @@ static void pnv_setup_rfi_flush(void)
+ type = L1D_FLUSH_ORI;
+ }
+
++ /*
++ * If we are non-Power9 bare metal, we don't need to flush on kernel
++ * entry or after user access: they fix a P9 specific vulnerability.
++ */
++ if (!pvr_version_is(PVR_POWER9)) {
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
++ security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
++ }
++
+ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && \
+ (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || \
+ security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV));
+
+ setup_rfi_flush(type, enable);
+ setup_count_cache_flush();
++
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
++ setup_entry_flush(enable);
++
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
++ setup_uaccess_flush(enable);
+ }
+
+ static void __init pnv_setup_arch(void)
+diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
+index c2d318d1df021..2e0d38cafdd44 100644
+--- a/arch/powerpc/platforms/pseries/setup.c
++++ b/arch/powerpc/platforms/pseries/setup.c
+@@ -565,6 +565,14 @@ void pseries_setup_rfi_flush(void)
+
+ setup_rfi_flush(types, enable);
+ setup_count_cache_flush();
++
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_ENTRY);
++ setup_entry_flush(enable);
++
++ enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) &&
++ security_ftr_enabled(SEC_FTR_L1D_FLUSH_UACCESS);
++ setup_uaccess_flush(enable);
+ }
+
+ #ifdef CONFIG_PCI_IOV
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 670c2aedcefab..3e182c7ae7714 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -3994,6 +3994,12 @@ static int em_clflush(struct x86_emulate_ctxt *ctxt)
+ return X86EMUL_CONTINUE;
+ }
+
++static int em_clflushopt(struct x86_emulate_ctxt *ctxt)
++{
++ /* emulating clflushopt regardless of cpuid */
++ return X86EMUL_CONTINUE;
++}
++
+ static int em_movsxd(struct x86_emulate_ctxt *ctxt)
+ {
+ ctxt->dst.val = (s32) ctxt->src.val;
+@@ -4507,7 +4513,7 @@ static const struct opcode group11[] = {
+ };
+
+ static const struct gprefix pfx_0f_ae_7 = {
+- I(SrcMem | ByteOp, em_clflush), N, N, N,
++ I(SrcMem | ByteOp, em_clflush), I(SrcMem | ByteOp, em_clflushopt), N, N,
+ };
+
+ static const struct group_dual group15 = { {
+diff --git a/drivers/acpi/evged.c b/drivers/acpi/evged.c
+index 73f6093a5c16d..9f4b405a5c20b 100644
+--- a/drivers/acpi/evged.c
++++ b/drivers/acpi/evged.c
+@@ -110,7 +110,7 @@ static acpi_status acpi_ged_request_interrupt(struct acpi_resource *ares,
+
+ switch (gsi) {
+ case 0 ... 255:
+- sprintf(ev_name, "_%c%02hhX",
++ sprintf(ev_name, "_%c%02X",
+ trigger == ACPI_EDGE_SENSITIVE ? 'E' : 'L', gsi);
+
+ if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle)))
+diff --git a/drivers/input/keyboard/sunkbd.c b/drivers/input/keyboard/sunkbd.c
+index ad5d7f94f95a6..1c7aa86c92ab1 100644
+--- a/drivers/input/keyboard/sunkbd.c
++++ b/drivers/input/keyboard/sunkbd.c
+@@ -111,7 +111,8 @@ static irqreturn_t sunkbd_interrupt(struct serio *serio,
+ switch (data) {
+
+ case SUNKBD_RET_RESET:
+- schedule_work(&sunkbd->tq);
++ if (sunkbd->enabled)
++ schedule_work(&sunkbd->tq);
+ sunkbd->reset = -1;
+ break;
+
+@@ -212,16 +213,12 @@ static int sunkbd_initialize(struct sunkbd *sunkbd)
+ }
+
+ /*
+- * sunkbd_reinit() sets leds and beeps to a state the computer remembers they
+- * were in.
++ * sunkbd_set_leds_beeps() sets leds and beeps to a state the computer remembers
++ * they were in.
+ */
+
+-static void sunkbd_reinit(struct work_struct *work)
++static void sunkbd_set_leds_beeps(struct sunkbd *sunkbd)
+ {
+- struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
+-
+- wait_event_interruptible_timeout(sunkbd->wait, sunkbd->reset >= 0, HZ);
+-
+ serio_write(sunkbd->serio, SUNKBD_CMD_SETLED);
+ serio_write(sunkbd->serio,
+ (!!test_bit(LED_CAPSL, sunkbd->dev->led) << 3) |
+@@ -234,11 +231,39 @@ static void sunkbd_reinit(struct work_struct *work)
+ SUNKBD_CMD_BELLOFF - !!test_bit(SND_BELL, sunkbd->dev->snd));
+ }
+
++
++/*
++ * sunkbd_reinit() wait for the keyboard reset to complete and restores state
++ * of leds and beeps.
++ */
++
++static void sunkbd_reinit(struct work_struct *work)
++{
++ struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq);
++
++ /*
++ * It is OK that we check sunkbd->enabled without pausing serio,
++ * as we only want to catch true->false transition that will
++ * happen once and we will be woken up for it.
++ */
++ wait_event_interruptible_timeout(sunkbd->wait,
++ sunkbd->reset >= 0 || !sunkbd->enabled,
++ HZ);
++
++ if (sunkbd->reset >= 0 && sunkbd->enabled)
++ sunkbd_set_leds_beeps(sunkbd);
++}
++
+ static void sunkbd_enable(struct sunkbd *sunkbd, bool enable)
+ {
+ serio_pause_rx(sunkbd->serio);
+ sunkbd->enabled = enable;
+ serio_continue_rx(sunkbd->serio);
++
++ if (!enable) {
++ wake_up_interruptible(&sunkbd->wait);
++ cancel_work_sync(&sunkbd->tq);
++ }
+ }
+
+ /*
+diff --git a/net/can/proc.c b/net/can/proc.c
+index 70fea17bb04c5..a3071f43acd76 100644
+--- a/net/can/proc.c
++++ b/net/can/proc.c
+@@ -467,6 +467,9 @@ void can_init_proc(struct net *net)
+ */
+ void can_remove_proc(struct net *net)
+ {
++ if (!net->can.proc_dir)
++ return;
++
+ if (net->can.pde_version)
+ remove_proc_entry(CAN_PROC_VERSION, net->can.proc_dir);
+
+@@ -494,6 +497,5 @@ void can_remove_proc(struct net *net)
+ if (net->can.pde_rcvlist_sff)
+ remove_proc_entry(CAN_PROC_RCVLIST_SFF, net->can.proc_dir);
+
+- if (net->can.proc_dir)
+- remove_proc_entry("can", net->proc_net);
++ remove_proc_entry("can", net->proc_net);
+ }
+diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
+index 9968b8a976f19..d11eb5139c92a 100644
+--- a/net/mac80211/sta_info.c
++++ b/net/mac80211/sta_info.c
+@@ -244,6 +244,24 @@ struct sta_info *sta_info_get_by_idx(struct ieee80211_sub_if_data *sdata,
+ */
+ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
+ {
++ /*
++ * If we had used sta_info_pre_move_state() then we might not
++ * have gone through the state transitions down again, so do
++ * it here now (and warn if it's inserted).
++ *
++ * This will clear state such as fast TX/RX that may have been
++ * allocated during state transitions.
++ */
++ while (sta->sta_state > IEEE80211_STA_NONE) {
++ int ret;
++
++ WARN_ON_ONCE(test_sta_flag(sta, WLAN_STA_INSERTED));
++
++ ret = sta_info_move_state(sta, sta->sta_state - 1);
++ if (WARN_ONCE(ret, "sta_info_move_state() returned %d\n", ret))
++ break;
++ }
++
+ if (sta->rate_ctrl)
+ rate_control_free_sta(sta);
+
+diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
+index ad33b99f5d21e..7b5e15cc6b717 100644
+--- a/tools/perf/util/cs-etm.c
++++ b/tools/perf/util/cs-etm.c
+@@ -87,9 +87,6 @@ struct cs_etm_queue {
+ struct cs_etm_packet *packet;
+ };
+
+-/* RB tree for quick conversion between traceID and metadata pointers */
+-static struct intlist *traceid_list;
+-
+ static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
+ static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
+ pid_t tid, u64 time_);
+diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
+index c7ef97b198c77..37f8d48179cae 100644
+--- a/tools/perf/util/cs-etm.h
++++ b/tools/perf/util/cs-etm.h
+@@ -53,6 +53,9 @@ enum {
+ CS_ETMV4_PRIV_MAX,
+ };
+
++/* RB tree for quick conversion between traceID and CPUs */
++struct intlist *traceid_list;
++
+ #define KiB(x) ((x) * 1024)
+ #define MiB(x) ((x) * 1024 * 1024)
+