summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2020-01-17 14:57:03 -0500
committerMike Pagano <mpagano@gentoo.org>2020-01-17 14:57:03 -0500
commita7336dd10118cf162ded486495fe0992035aa8eb (patch)
treef48211d9426ddd00c6861a1b970dd8ac0d511f1d
parentLinux patch 5.4.12 (diff)
downloadlinux-patches-a7336dd10118cf162ded486495fe0992035aa8eb.tar.gz
linux-patches-a7336dd10118cf162ded486495fe0992035aa8eb.tar.bz2
linux-patches-a7336dd10118cf162ded486495fe0992035aa8eb.zip
Linux patch 5.4.135.4-13
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1012_linux-5.4.13.patch8072
2 files changed, 8076 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index dcf5f795..6df7de3c 100644
--- a/0000_README
+++ b/0000_README
@@ -91,6 +91,10 @@ Patch: 1011_linux-5.4.12.patch
From: http://www.kernel.org
Desc: Linux 5.4.12
+Patch: 1012_linux-5.4.13.patch
+From: http://www.kernel.org
+Desc: Linux 5.4.13
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1012_linux-5.4.13.patch b/1012_linux-5.4.13.patch
new file mode 100644
index 00000000..ef6f8ae1
--- /dev/null
+++ b/1012_linux-5.4.13.patch
@@ -0,0 +1,8072 @@
+diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
+index 8ca498447aeb..05601a90a9b6 100644
+--- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io
++++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io
+@@ -29,13 +29,13 @@ Description: This file shows the system fans direction:
+
+ The files are read only.
+
+-What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/jtag_enable
++What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld3_version
+
+ Date: November 2018
+ KernelVersion: 5.0
+ Contact: Vadim Pasternak <vadimpmellanox.com>
+ Description: These files show with which CPLD versions have been burned
+- on LED board.
++ on LED or Gearbox board.
+
+ The files are read only.
+
+@@ -121,6 +121,15 @@ Description: These files show the system reset cause, as following: ComEx
+
+ The files are read only.
+
++What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/cpld4_version
++Date: November 2018
++KernelVersion: 5.0
++Contact: Vadim Pasternak <vadimpmellanox.com>
++Description: These files show with which CPLD versions have been burned
++ on LED board.
++
++ The files are read only.
++
+ Date: June 2019
+ KernelVersion: 5.3
+ Contact: Vadim Pasternak <vadimpmellanox.com>
+diff --git a/Documentation/ABI/testing/sysfs-bus-mei b/Documentation/ABI/testing/sysfs-bus-mei
+index 6bd45346ac7e..3f8701e8fa24 100644
+--- a/Documentation/ABI/testing/sysfs-bus-mei
++++ b/Documentation/ABI/testing/sysfs-bus-mei
+@@ -4,7 +4,7 @@ KernelVersion: 3.10
+ Contact: Samuel Ortiz <sameo@linux.intel.com>
+ linux-mei@linux.intel.com
+ Description: Stores the same MODALIAS value emitted by uevent
+- Format: mei:<mei device name>:<device uuid>:
++ Format: mei:<mei device name>:<device uuid>:<protocol version>
+
+ What: /sys/bus/mei/devices/.../name
+ Date: May 2015
+diff --git a/Documentation/admin-guide/device-mapper/index.rst b/Documentation/admin-guide/device-mapper/index.rst
+index c77c58b8f67b..d8dec8911eb3 100644
+--- a/Documentation/admin-guide/device-mapper/index.rst
++++ b/Documentation/admin-guide/device-mapper/index.rst
+@@ -8,6 +8,7 @@ Device Mapper
+ cache-policies
+ cache
+ delay
++ dm-clone
+ dm-crypt
+ dm-flakey
+ dm-init
+diff --git a/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt
+index 6e5341b4f891..ee59409640f2 100644
+--- a/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt
++++ b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt
+@@ -22,6 +22,6 @@ Example:
+ };
+
+ &ethernet_switch {
+- resets = <&reset>;
++ resets = <&reset 26>;
+ reset-names = "switch";
+ };
+diff --git a/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt b/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt
+index d6d5207fa996..17ff3892f439 100644
+--- a/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt
++++ b/Documentation/devicetree/bindings/sound/mt8183-mt6358-ts3a227-max98357.txt
+@@ -2,9 +2,11 @@ MT8183 with MT6358, TS3A227 and MAX98357 CODECS
+
+ Required properties:
+ - compatible : "mediatek,mt8183_mt6358_ts3a227_max98357"
+-- mediatek,headset-codec: the phandles of ts3a227 codecs
+ - mediatek,platform: the phandle of MT8183 ASoC platform
+
++Optional properties:
++- mediatek,headset-codec: the phandles of ts3a227 codecs
++
+ Example:
+
+ sound {
+diff --git a/Documentation/networking/j1939.rst b/Documentation/networking/j1939.rst
+index dc60b13fcd09..f5be243d250a 100644
+--- a/Documentation/networking/j1939.rst
++++ b/Documentation/networking/j1939.rst
+@@ -339,7 +339,7 @@ To claim an address following code example can be used:
+ .pgn = J1939_PGN_ADDRESS_CLAIMED,
+ .pgn_mask = J1939_PGN_PDU1_MAX,
+ }, {
+- .pgn = J1939_PGN_ADDRESS_REQUEST,
++ .pgn = J1939_PGN_REQUEST,
+ .pgn_mask = J1939_PGN_PDU1_MAX,
+ }, {
+ .pgn = J1939_PGN_ADDRESS_COMMANDED,
+diff --git a/Documentation/scsi/smartpqi.txt b/Documentation/scsi/smartpqi.txt
+index 201f80c7c050..df129f55ace5 100644
+--- a/Documentation/scsi/smartpqi.txt
++++ b/Documentation/scsi/smartpqi.txt
+@@ -29,7 +29,7 @@ smartpqi specific entries in /sys
+ smartpqi host attributes:
+ -------------------------
+ /sys/class/scsi_host/host*/rescan
+- /sys/class/scsi_host/host*/version
++ /sys/class/scsi_host/host*/driver_version
+
+ The host rescan attribute is a write only attribute. Writing to this
+ attribute will trigger the driver to scan for new, changed, or removed
+diff --git a/MAINTAINERS b/MAINTAINERS
+index 9d3a5c54a41d..4f7ac27d8651 100644
+--- a/MAINTAINERS
++++ b/MAINTAINERS
+@@ -6973,6 +6973,7 @@ L: linux-acpi@vger.kernel.org
+ S: Maintained
+ F: Documentation/firmware-guide/acpi/gpio-properties.rst
+ F: drivers/gpio/gpiolib-acpi.c
++F: drivers/gpio/gpiolib-acpi.h
+
+ GPIO IR Transmitter
+ M: Sean Young <sean@mess.org>
+diff --git a/Makefile b/Makefile
+index 45c6264f1108..d4cf4700ae3f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 12
++SUBLEVEL = 13
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
+index 4b0bab2607e4..46e1be9e57a8 100644
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -240,6 +240,10 @@ int __cpu_disable(void)
+ if (ret)
+ return ret;
+
++#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
++ remove_cpu_topology(cpu);
++#endif
++
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
+index 5b9faba03afb..8d2e61d9e7a6 100644
+--- a/arch/arm/kernel/topology.c
++++ b/arch/arm/kernel/topology.c
+@@ -196,9 +196,8 @@ void store_cpu_topology(unsigned int cpuid)
+ struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
+ unsigned int mpidr;
+
+- /* If the cpu topology has been already set, just return */
+- if (cpuid_topo->core_id != -1)
+- return;
++ if (cpuid_topo->package_id != -1)
++ goto topology_populated;
+
+ mpidr = read_cpuid_mpidr();
+
+@@ -231,14 +230,15 @@ void store_cpu_topology(unsigned int cpuid)
+ cpuid_topo->package_id = -1;
+ }
+
+- update_siblings_masks(cpuid);
+-
+ update_cpu_capacity(cpuid);
+
+ pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
+ cpuid, cpu_topology[cpuid].thread_id,
+ cpu_topology[cpuid].core_id,
+ cpu_topology[cpuid].package_id, mpidr);
++
++topology_populated:
++ update_siblings_masks(cpuid);
+ }
+
+ static inline int cpu_corepower_flags(void)
+diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
+index 04ad2fb22b9a..dba3488492f1 100644
+--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
++++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
+@@ -623,6 +623,8 @@
+ l21 {
+ regulator-min-microvolt = <2950000>;
+ regulator-max-microvolt = <2950000>;
++ regulator-allow-set-load;
++ regulator-system-load = <200000>;
+ };
+ l22 {
+ regulator-min-microvolt = <3300000>;
+diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
+index ea873b8904c4..e3e27349a9fe 100644
+--- a/arch/arm64/crypto/aes-neonbs-glue.c
++++ b/arch/arm64/crypto/aes-neonbs-glue.c
+@@ -384,7 +384,7 @@ static int __xts_crypt(struct skcipher_request *req, bool encrypt,
+ goto xts_tail;
+
+ kernel_neon_end();
+- skcipher_walk_done(&walk, nbytes);
++ err = skcipher_walk_done(&walk, nbytes);
+ }
+
+ if (err || likely(!tail))
+diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
+index 12cd9231c4b8..0231d69c8bf2 100644
+--- a/arch/hexagon/include/asm/atomic.h
++++ b/arch/hexagon/include/asm/atomic.h
+@@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
+ "1: %0 = memw_locked(%1);\n" \
+ " %0 = "#op "(%0,%2);\n" \
+ " memw_locked(%1,P3)=%0;\n" \
+- " if !P3 jump 1b;\n" \
++ " if (!P3) jump 1b;\n" \
+ : "=&r" (output) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+@@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
+ "1: %0 = memw_locked(%1);\n" \
+ " %0 = "#op "(%0,%2);\n" \
+ " memw_locked(%1,P3)=%0;\n" \
+- " if !P3 jump 1b;\n" \
++ " if (!P3) jump 1b;\n" \
+ : "=&r" (output) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+@@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
+ "1: %0 = memw_locked(%2);\n" \
+ " %1 = "#op "(%0,%3);\n" \
+ " memw_locked(%2,P3)=%1;\n" \
+- " if !P3 jump 1b;\n" \
++ " if (!P3) jump 1b;\n" \
+ : "=&r" (output), "=&r" (val) \
+ : "r" (&v->counter), "r" (i) \
+ : "memory", "p3" \
+@@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+ " }"
+ " memw_locked(%2, p3) = %1;"
+ " {"
+- " if !p3 jump 1b;"
++ " if (!p3) jump 1b;"
+ " }"
+ "2:"
+ : "=&r" (__oldval), "=&r" (tmp)
+diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
+index 47384b094b94..71429f756af0 100644
+--- a/arch/hexagon/include/asm/bitops.h
++++ b/arch/hexagon/include/asm/bitops.h
+@@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
+ "1: R12 = memw_locked(R10);\n"
+ " { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
+ " memw_locked(R10,P1) = R12;\n"
+- " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
++ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
+ : "=&r" (oldval)
+ : "r" (addr), "r" (nr)
+ : "r10", "r11", "r12", "p0", "p1", "memory"
+@@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
+ "1: R12 = memw_locked(R10);\n"
+ " { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
+ " memw_locked(R10,P1) = R12;\n"
+- " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
++ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
+ : "=&r" (oldval)
+ : "r" (addr), "r" (nr)
+ : "r10", "r11", "r12", "p0", "p1", "memory"
+@@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
+ "1: R12 = memw_locked(R10);\n"
+ " { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
+ " memw_locked(R10,P1) = R12;\n"
+- " {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
++ " {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
+ : "=&r" (oldval)
+ : "r" (addr), "r" (nr)
+ : "r10", "r11", "r12", "p0", "p1", "memory"
+@@ -223,7 +223,7 @@ static inline int ffs(int x)
+ int r;
+
+ asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
+- "{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n"
++ "{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n"
+ : "=&r" (r)
+ : "r" (x)
+ : "p0");
+diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h
+index 6091322c3af9..92b8a02e588a 100644
+--- a/arch/hexagon/include/asm/cmpxchg.h
++++ b/arch/hexagon/include/asm/cmpxchg.h
+@@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
+ __asm__ __volatile__ (
+ "1: %0 = memw_locked(%1);\n" /* load into retval */
+ " memw_locked(%1,P0) = %2;\n" /* store into memory */
+- " if !P0 jump 1b;\n"
++ " if (!P0) jump 1b;\n"
+ : "=&r" (retval)
+ : "r" (ptr), "r" (x)
+ : "memory", "p0"
+diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
+index cb635216a732..0191f7c7193e 100644
+--- a/arch/hexagon/include/asm/futex.h
++++ b/arch/hexagon/include/asm/futex.h
+@@ -16,7 +16,7 @@
+ /* For example: %1 = %4 */ \
+ insn \
+ "2: memw_locked(%3,p2) = %1;\n" \
+- " if !p2 jump 1b;\n" \
++ " if (!p2) jump 1b;\n" \
+ " %1 = #0;\n" \
+ "3:\n" \
+ ".section .fixup,\"ax\"\n" \
+@@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
+ "1: %1 = memw_locked(%3)\n"
+ " {\n"
+ " p2 = cmp.eq(%1,%4)\n"
+- " if !p2.new jump:NT 3f\n"
++ " if (!p2.new) jump:NT 3f\n"
+ " }\n"
+ "2: memw_locked(%3,p2) = %5\n"
+- " if !p2 jump 1b\n"
++ " if (!p2) jump 1b\n"
+ "3:\n"
+ ".section .fixup,\"ax\"\n"
+ "4: %0 = #%6\n"
+diff --git a/arch/hexagon/include/asm/spinlock.h b/arch/hexagon/include/asm/spinlock.h
+index bfe07d842ff3..ef103b73bec8 100644
+--- a/arch/hexagon/include/asm/spinlock.h
++++ b/arch/hexagon/include/asm/spinlock.h
+@@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
+ __asm__ __volatile__(
+ "1: R6 = memw_locked(%0);\n"
+ " { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
+- " { if !P3 jump 1b; }\n"
++ " { if (!P3) jump 1b; }\n"
+ " memw_locked(%0,P3) = R6;\n"
+- " { if !P3 jump 1b; }\n"
++ " { if (!P3) jump 1b; }\n"
+ :
+ : "r" (&lock->lock)
+ : "memory", "r6", "p3"
+@@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
+ "1: R6 = memw_locked(%0);\n"
+ " R6 = add(R6,#-1);\n"
+ " memw_locked(%0,P3) = R6\n"
+- " if !P3 jump 1b;\n"
++ " if (!P3) jump 1b;\n"
+ :
+ : "r" (&lock->lock)
+ : "memory", "r6", "p3"
+@@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
+ __asm__ __volatile__(
+ " R6 = memw_locked(%1);\n"
+ " { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
+- " { if !P3 jump 1f; }\n"
++ " { if (!P3) jump 1f; }\n"
+ " memw_locked(%1,P3) = R6;\n"
+ " { %0 = P3 }\n"
+ "1:\n"
+@@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
+ __asm__ __volatile__(
+ "1: R6 = memw_locked(%0)\n"
+ " { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
+- " { if !P3 jump 1b; }\n"
++ " { if (!P3) jump 1b; }\n"
+ " memw_locked(%0,P3) = R6;\n"
+- " { if !P3 jump 1b; }\n"
++ " { if (!P3) jump 1b; }\n"
+ :
+ : "r" (&lock->lock)
+ : "memory", "r6", "p3"
+@@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
+ __asm__ __volatile__(
+ " R6 = memw_locked(%1)\n"
+ " { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
+- " { if !P3 jump 1f; }\n"
++ " { if (!P3) jump 1f; }\n"
+ " memw_locked(%1,P3) = R6;\n"
+ " %0 = P3;\n"
+ "1:\n"
+@@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
+ __asm__ __volatile__(
+ "1: R6 = memw_locked(%0);\n"
+ " P3 = cmp.eq(R6,#0);\n"
+- " { if !P3 jump 1b; R6 = #1; }\n"
++ " { if (!P3) jump 1b; R6 = #1; }\n"
+ " memw_locked(%0,P3) = R6;\n"
+- " { if !P3 jump 1b; }\n"
++ " { if (!P3) jump 1b; }\n"
+ :
+ : "r" (&lock->lock)
+ : "memory", "r6", "p3"
+@@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
+ __asm__ __volatile__(
+ " R6 = memw_locked(%1);\n"
+ " P3 = cmp.eq(R6,#0);\n"
+- " { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
++ " { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n"
+ " memw_locked(%1,P3) = R6;\n"
+ " %0 = P3;\n"
+ "1:\n"
+diff --git a/arch/hexagon/kernel/stacktrace.c b/arch/hexagon/kernel/stacktrace.c
+index 35f29423fda8..5ed02f699479 100644
+--- a/arch/hexagon/kernel/stacktrace.c
++++ b/arch/hexagon/kernel/stacktrace.c
+@@ -11,8 +11,6 @@
+ #include <linux/thread_info.h>
+ #include <linux/module.h>
+
+-register unsigned long current_frame_pointer asm("r30");
+-
+ struct stackframe {
+ unsigned long fp;
+ unsigned long rets;
+@@ -30,7 +28,7 @@ void save_stack_trace(struct stack_trace *trace)
+
+ low = (unsigned long)task_stack_page(current);
+ high = low + THREAD_SIZE;
+- fp = current_frame_pointer;
++ fp = (unsigned long)__builtin_frame_address(0);
+
+ while (fp >= low && fp <= (high - sizeof(*frame))) {
+ frame = (struct stackframe *)fp;
+diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S
+index 12242c27e2df..4023fdbea490 100644
+--- a/arch/hexagon/kernel/vm_entry.S
++++ b/arch/hexagon/kernel/vm_entry.S
+@@ -369,7 +369,7 @@ ret_from_fork:
+ R26.L = #LO(do_work_pending);
+ R0 = #VM_INT_DISABLE;
+ }
+- if P0 jump check_work_pending
++ if (P0) jump check_work_pending
+ {
+ R0 = R25;
+ callr R24
+diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
+index 172801ed35b8..d859f079b771 100644
+--- a/arch/mips/boot/compressed/Makefile
++++ b/arch/mips/boot/compressed/Makefile
+@@ -29,6 +29,9 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
+ -DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
+ -DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS)
+
++# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
++KCOV_INSTRUMENT := n
++
+ # decompressor objects (linked with vmlinuz)
+ vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
+
+diff --git a/arch/mips/include/asm/vdso/gettimeofday.h b/arch/mips/include/asm/vdso/gettimeofday.h
+index b08825531e9f..0ae9b4cbc153 100644
+--- a/arch/mips/include/asm/vdso/gettimeofday.h
++++ b/arch/mips/include/asm/vdso/gettimeofday.h
+@@ -26,8 +26,6 @@
+
+ #define __VDSO_USE_SYSCALL ULLONG_MAX
+
+-#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
+-
+ static __always_inline long gettimeofday_fallback(
+ struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+@@ -48,17 +46,6 @@ static __always_inline long gettimeofday_fallback(
+ return error ? -ret : ret;
+ }
+
+-#else
+-
+-static __always_inline long gettimeofday_fallback(
+- struct __kernel_old_timeval *_tv,
+- struct timezone *_tz)
+-{
+- return -1;
+-}
+-
+-#endif
+-
+ static __always_inline long clock_gettime_fallback(
+ clockid_t _clkid,
+ struct __kernel_timespec *_ts)
+diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
+index f777e44653d5..47312c529410 100644
+--- a/arch/mips/kernel/cacheinfo.c
++++ b/arch/mips/kernel/cacheinfo.c
+@@ -50,6 +50,25 @@ static int __init_cache_level(unsigned int cpu)
+ return 0;
+ }
+
++static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
++{
++ int cpu1;
++
++ for_each_possible_cpu(cpu1)
++ if (cpus_are_siblings(cpu, cpu1))
++ cpumask_set_cpu(cpu1, cpu_map);
++}
++
++static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
++{
++ int cpu1;
++ int cluster = cpu_cluster(&cpu_data[cpu]);
++
++ for_each_possible_cpu(cpu1)
++ if (cpu_cluster(&cpu_data[cpu1]) == cluster)
++ cpumask_set_cpu(cpu1, cpu_map);
++}
++
+ static int __populate_cache_leaves(unsigned int cpu)
+ {
+ struct cpuinfo_mips *c = &current_cpu_data;
+@@ -57,14 +76,20 @@ static int __populate_cache_leaves(unsigned int cpu)
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+
+ if (c->icache.waysize) {
++ /* L1 caches are per core */
++ fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
+ populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA);
++ fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
+ populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST);
+ } else {
+ populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED);
+ }
+
+- if (c->scache.waysize)
++ if (c->scache.waysize) {
++ /* L2 cache is per cluster */
++ fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
+ populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED);
++ }
+
+ if (c->tcache.waysize)
+ populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
+diff --git a/arch/mips/pci/pci-xtalk-bridge.c b/arch/mips/pci/pci-xtalk-bridge.c
+index 7b4d40354ee7..30017d5945bc 100644
+--- a/arch/mips/pci/pci-xtalk-bridge.c
++++ b/arch/mips/pci/pci-xtalk-bridge.c
+@@ -279,16 +279,15 @@ static int bridge_set_affinity(struct irq_data *d, const struct cpumask *mask,
+ struct bridge_irq_chip_data *data = d->chip_data;
+ int bit = d->parent_data->hwirq;
+ int pin = d->hwirq;
+- nasid_t nasid;
+ int ret, cpu;
+
+ ret = irq_chip_set_affinity_parent(d, mask, force);
+ if (ret >= 0) {
+ cpu = cpumask_first_and(mask, cpu_online_mask);
+- nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
++ data->nnasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
+ bridge_write(data->bc, b_int_addr[pin].addr,
+ (((data->bc->intr_addr >> 30) & 0x30000) |
+- bit | (nasid << 8)));
++ bit | (data->nasid << 8)));
+ bridge_read(data->bc, b_wid_tflush);
+ }
+ return ret;
+diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
+index 37be04975831..79a2f6bd2b5a 100644
+--- a/arch/mips/sgi-ip27/ip27-irq.c
++++ b/arch/mips/sgi-ip27/ip27-irq.c
+@@ -73,6 +73,9 @@ static void setup_hub_mask(struct hub_irq_data *hd, const struct cpumask *mask)
+ int cpu;
+
+ cpu = cpumask_first_and(mask, cpu_online_mask);
++ if (cpu >= nr_cpu_ids)
++ cpu = cpumask_any(cpu_online_mask);
++
+ nasid = COMPACT_TO_NASID_NODEID(cpu_to_node(cpu));
+ hd->cpu = cpu;
+ if (!cputoslice(cpu)) {
+@@ -139,6 +142,7 @@ static int hub_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ /* use CPU connected to nearest hub */
+ hub = hub_data(NASID_TO_COMPACT_NODEID(info->nasid));
+ setup_hub_mask(hd, &hub->h_cpus);
++ info->nasid = cpu_to_node(hd->cpu);
+
+ /* Make sure it's not already pending when we connect it. */
+ REMOTE_HUB_CLR_INTR(info->nasid, swlevel);
+diff --git a/arch/mips/vdso/vgettimeofday.c b/arch/mips/vdso/vgettimeofday.c
+index 6ebdc37c89fc..6b83b6376a4b 100644
+--- a/arch/mips/vdso/vgettimeofday.c
++++ b/arch/mips/vdso/vgettimeofday.c
+@@ -17,12 +17,22 @@ int __vdso_clock_gettime(clockid_t clock,
+ return __cvdso_clock_gettime32(clock, ts);
+ }
+
++#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
++
++/*
++ * This is behind the ifdef so that we don't provide the symbol when there's no
++ * possibility of there being a usable clocksource, because there's nothing we
++ * can do without it. When libc fails the symbol lookup it should fall back on
++ * the standard syscall path.
++ */
+ int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+ {
+ return __cvdso_gettimeofday(tv, tz);
+ }
+
++#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
++
+ int __vdso_clock_getres(clockid_t clock_id,
+ struct old_timespec32 *res)
+ {
+@@ -43,12 +53,22 @@ int __vdso_clock_gettime(clockid_t clock,
+ return __cvdso_clock_gettime(clock, ts);
+ }
+
++#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
++
++/*
++ * This is behind the ifdef so that we don't provide the symbol when there's no
++ * possibility of there being a usable clocksource, because there's nothing we
++ * can do without it. When libc fails the symbol lookup it should fall back on
++ * the standard syscall path.
++ */
+ int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
+ struct timezone *tz)
+ {
+ return __cvdso_gettimeofday(tv, tz);
+ }
+
++#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
++
+ int __vdso_clock_getres(clockid_t clock_id,
+ struct __kernel_timespec *res)
+ {
+diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h
+index d9ac7e6408ef..caddded56e77 100644
+--- a/arch/nds32/include/asm/cacheflush.h
++++ b/arch/nds32/include/asm/cacheflush.h
+@@ -9,7 +9,11 @@
+ #define PG_dcache_dirty PG_arch_1
+
+ void flush_icache_range(unsigned long start, unsigned long end);
++#define flush_icache_range flush_icache_range
++
+ void flush_icache_page(struct vm_area_struct *vma, struct page *page);
++#define flush_icache_page flush_icache_page
++
+ #ifdef CONFIG_CPU_CACHE_ALIASING
+ void flush_cache_mm(struct mm_struct *mm);
+ void flush_cache_dup_mm(struct mm_struct *mm);
+@@ -40,12 +44,11 @@ void invalidate_kernel_vmap_range(void *addr, int size);
+ #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
+
+ #else
+-#include <asm-generic/cacheflush.h>
+-#undef flush_icache_range
+-#undef flush_icache_page
+-#undef flush_icache_user_range
+ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len);
++#define flush_icache_user_range flush_icache_user_range
++
++#include <asm-generic/cacheflush.h>
+ #endif
+
+ #endif /* __NDS32_CACHEFLUSH_H__ */
+diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
+index 2825d004dece..c0bea75ac27b 100644
+--- a/arch/powerpc/platforms/powernv/pci.c
++++ b/arch/powerpc/platforms/powernv/pci.c
+@@ -945,6 +945,23 @@ void __init pnv_pci_init(void)
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ return;
+
++#ifdef CONFIG_PCIEPORTBUS
++ /*
++ * On PowerNV PCIe devices are (currently) managed in cooperation
++ * with firmware. This isn't *strictly* required, but there's enough
++ * assumptions baked into both firmware and the platform code that
++ * it's unwise to allow the portbus services to be used.
++ *
++ * We need to fix this eventually, but for now set this flag to disable
++ * the portbus driver. The AER service isn't required since that AER
++ * events are handled via EEH. The pciehp hotplug driver can't work
++ * without kernel changes (and portbus binding breaks pnv_php). The
++ * other services also require some thinking about how we're going
++ * to integrate them.
++ */
++ pcie_ports_disabled = true;
++#endif
++
+ /* Look for IODA IO-Hubs. */
+ for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
+ pnv_pci_init_ioda_hub(np);
+diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
+index 3f15938dec89..c54bd3c79955 100644
+--- a/arch/riscv/mm/cacheflush.c
++++ b/arch/riscv/mm/cacheflush.c
+@@ -14,6 +14,7 @@ void flush_icache_all(void)
+ {
+ sbi_remote_fence_i(NULL);
+ }
++EXPORT_SYMBOL(flush_icache_all);
+
+ /*
+ * Performs an icache flush for the given MM context. RISC-V has no direct
+diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
+index aa3336a7cb15..7d17b3addbbb 100644
+--- a/arch/x86/entry/syscall_32.c
++++ b/arch/x86/entry/syscall_32.c
+@@ -10,13 +10,11 @@
+ #ifdef CONFIG_IA32_EMULATION
+ /* On X86_64, we use struct pt_regs * to pass parameters to syscalls */
+ #define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *);
+-
+-/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */
+-extern asmlinkage long sys_ni_syscall(const struct pt_regs *);
+-
++#define __sys_ni_syscall __ia32_sys_ni_syscall
+ #else /* CONFIG_IA32_EMULATION */
+ #define __SYSCALL_I386(nr, sym, qual) extern asmlinkage long sym(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
+ extern asmlinkage long sys_ni_syscall(unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
++#define __sys_ni_syscall sys_ni_syscall
+ #endif /* CONFIG_IA32_EMULATION */
+
+ #include <asm/syscalls_32.h>
+@@ -29,6 +27,6 @@ __visible const sys_call_ptr_t ia32_sys_call_table[__NR_syscall_compat_max+1] =
+ * Smells like a compiler bug -- it doesn't work
+ * when the & below is removed.
+ */
+- [0 ... __NR_syscall_compat_max] = &sys_ni_syscall,
++ [0 ... __NR_syscall_compat_max] = &__sys_ni_syscall,
+ #include <asm/syscalls_32.h>
+ };
+diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
+index b1bf31713374..adf619a856e8 100644
+--- a/arch/x86/entry/syscall_64.c
++++ b/arch/x86/entry/syscall_64.c
+@@ -4,11 +4,17 @@
+ #include <linux/linkage.h>
+ #include <linux/sys.h>
+ #include <linux/cache.h>
++#include <linux/syscalls.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/syscall.h>
+
+-/* this is a lie, but it does not hurt as sys_ni_syscall just returns -EINVAL */
+-extern asmlinkage long sys_ni_syscall(const struct pt_regs *);
++extern asmlinkage long sys_ni_syscall(void);
++
++SYSCALL_DEFINE0(ni_syscall)
++{
++ return sys_ni_syscall();
++}
++
+ #define __SYSCALL_64(nr, sym, qual) extern asmlinkage long sym(const struct pt_regs *);
+ #define __SYSCALL_X32(nr, sym, qual) __SYSCALL_64(nr, sym, qual)
+ #include <asm/syscalls_64.h>
+@@ -23,7 +29,7 @@ asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
+ * Smells like a compiler bug -- it doesn't work
+ * when the & below is removed.
+ */
+- [0 ... __NR_syscall_max] = &sys_ni_syscall,
++ [0 ... __NR_syscall_max] = &__x64_sys_ni_syscall,
+ #include <asm/syscalls_64.h>
+ };
+
+@@ -40,7 +46,7 @@ asmlinkage const sys_call_ptr_t x32_sys_call_table[__NR_syscall_x32_max+1] = {
+ * Smells like a compiler bug -- it doesn't work
+ * when the & below is removed.
+ */
+- [0 ... __NR_syscall_x32_max] = &sys_ni_syscall,
++ [0 ... __NR_syscall_x32_max] = &__x64_sys_ni_syscall,
+ #include <asm/syscalls_64.h>
+ };
+
+diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
+index 3fe02546aed3..15908eb9b17e 100644
+--- a/arch/x86/entry/syscalls/syscall_32.tbl
++++ b/arch/x86/entry/syscalls/syscall_32.tbl
+@@ -124,13 +124,13 @@
+ 110 i386 iopl sys_iopl __ia32_sys_iopl
+ 111 i386 vhangup sys_vhangup __ia32_sys_vhangup
+ 112 i386 idle
+-113 i386 vm86old sys_vm86old sys_ni_syscall
++113 i386 vm86old sys_vm86old __ia32_sys_ni_syscall
+ 114 i386 wait4 sys_wait4 __ia32_compat_sys_wait4
+ 115 i386 swapoff sys_swapoff __ia32_sys_swapoff
+ 116 i386 sysinfo sys_sysinfo __ia32_compat_sys_sysinfo
+ 117 i386 ipc sys_ipc __ia32_compat_sys_ipc
+ 118 i386 fsync sys_fsync __ia32_sys_fsync
+-119 i386 sigreturn sys_sigreturn sys32_sigreturn
++119 i386 sigreturn sys_sigreturn __ia32_compat_sys_sigreturn
+ 120 i386 clone sys_clone __ia32_compat_sys_x86_clone
+ 121 i386 setdomainname sys_setdomainname __ia32_sys_setdomainname
+ 122 i386 uname sys_newuname __ia32_sys_newuname
+@@ -177,14 +177,14 @@
+ 163 i386 mremap sys_mremap __ia32_sys_mremap
+ 164 i386 setresuid sys_setresuid16 __ia32_sys_setresuid16
+ 165 i386 getresuid sys_getresuid16 __ia32_sys_getresuid16
+-166 i386 vm86 sys_vm86 sys_ni_syscall
++166 i386 vm86 sys_vm86 __ia32_sys_ni_syscall
+ 167 i386 query_module
+ 168 i386 poll sys_poll __ia32_sys_poll
+ 169 i386 nfsservctl
+ 170 i386 setresgid sys_setresgid16 __ia32_sys_setresgid16
+ 171 i386 getresgid sys_getresgid16 __ia32_sys_getresgid16
+ 172 i386 prctl sys_prctl __ia32_sys_prctl
+-173 i386 rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn
++173 i386 rt_sigreturn sys_rt_sigreturn __ia32_compat_sys_rt_sigreturn
+ 174 i386 rt_sigaction sys_rt_sigaction __ia32_compat_sys_rt_sigaction
+ 175 i386 rt_sigprocmask sys_rt_sigprocmask __ia32_compat_sys_rt_sigprocmask
+ 176 i386 rt_sigpending sys_rt_sigpending __ia32_compat_sys_rt_sigpending
+diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
+index 1cee10091b9f..30416d7f19d4 100644
+--- a/arch/x86/ia32/ia32_signal.c
++++ b/arch/x86/ia32/ia32_signal.c
+@@ -21,6 +21,7 @@
+ #include <linux/personality.h>
+ #include <linux/compat.h>
+ #include <linux/binfmts.h>
++#include <linux/syscalls.h>
+ #include <asm/ucontext.h>
+ #include <linux/uaccess.h>
+ #include <asm/fpu/internal.h>
+@@ -118,7 +119,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
+ return err;
+ }
+
+-asmlinkage long sys32_sigreturn(void)
++COMPAT_SYSCALL_DEFINE0(sigreturn)
+ {
+ struct pt_regs *regs = current_pt_regs();
+ struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(regs->sp-8);
+@@ -144,7 +145,7 @@ badframe:
+ return 0;
+ }
+
+-asmlinkage long sys32_rt_sigreturn(void)
++COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
+ {
+ struct pt_regs *regs = current_pt_regs();
+ struct rt_sigframe_ia32 __user *frame;
+diff --git a/arch/x86/include/asm/syscall_wrapper.h b/arch/x86/include/asm/syscall_wrapper.h
+index 90eb70df0b18..e2389ce9bf58 100644
+--- a/arch/x86/include/asm/syscall_wrapper.h
++++ b/arch/x86/include/asm/syscall_wrapper.h
+@@ -6,6 +6,8 @@
+ #ifndef _ASM_X86_SYSCALL_WRAPPER_H
+ #define _ASM_X86_SYSCALL_WRAPPER_H
+
++struct pt_regs;
++
+ /* Mapping of registers to parameters for syscalls on x86-64 and x32 */
+ #define SC_X86_64_REGS_TO_ARGS(x, ...) \
+ __MAP(x,__SC_ARGS \
+@@ -28,13 +30,21 @@
+ * kernel/sys_ni.c and SYS_NI in kernel/time/posix-stubs.c to cover this
+ * case as well.
+ */
++#define __IA32_COMPAT_SYS_STUB0(x, name) \
++ asmlinkage long __ia32_compat_sys_##name(const struct pt_regs *regs);\
++ ALLOW_ERROR_INJECTION(__ia32_compat_sys_##name, ERRNO); \
++ asmlinkage long __ia32_compat_sys_##name(const struct pt_regs *regs)\
++ { \
++ return __se_compat_sys_##name(); \
++ }
++
+ #define __IA32_COMPAT_SYS_STUBx(x, name, ...) \
+ asmlinkage long __ia32_compat_sys##name(const struct pt_regs *regs);\
+ ALLOW_ERROR_INJECTION(__ia32_compat_sys##name, ERRNO); \
+ asmlinkage long __ia32_compat_sys##name(const struct pt_regs *regs)\
+ { \
+ return __se_compat_sys##name(SC_IA32_REGS_TO_ARGS(x,__VA_ARGS__));\
+- } \
++ }
+
+ #define __IA32_SYS_STUBx(x, name, ...) \
+ asmlinkage long __ia32_sys##name(const struct pt_regs *regs); \
+@@ -56,9 +66,15 @@
+ SYSCALL_ALIAS(__ia32_sys_##sname, __x64_sys_##sname); \
+ asmlinkage long __x64_sys_##sname(const struct pt_regs *__unused)
+
+-#define COND_SYSCALL(name) \
+- cond_syscall(__x64_sys_##name); \
+- cond_syscall(__ia32_sys_##name)
++#define COND_SYSCALL(name) \
++ asmlinkage __weak long __x64_sys_##name(const struct pt_regs *__unused) \
++ { \
++ return sys_ni_syscall(); \
++ } \
++ asmlinkage __weak long __ia32_sys_##name(const struct pt_regs *__unused)\
++ { \
++ return sys_ni_syscall(); \
++ }
+
+ #define SYS_NI(name) \
+ SYSCALL_ALIAS(__x64_sys_##name, sys_ni_posix_timers); \
+@@ -76,15 +92,24 @@
+ * of the x86-64-style parameter ordering of x32 syscalls. The syscalls common
+ * with x86_64 obviously do not need such care.
+ */
++#define __X32_COMPAT_SYS_STUB0(x, name, ...) \
++ asmlinkage long __x32_compat_sys_##name(const struct pt_regs *regs);\
++ ALLOW_ERROR_INJECTION(__x32_compat_sys_##name, ERRNO); \
++ asmlinkage long __x32_compat_sys_##name(const struct pt_regs *regs)\
++ { \
++ return __se_compat_sys_##name();\
++ }
++
+ #define __X32_COMPAT_SYS_STUBx(x, name, ...) \
+ asmlinkage long __x32_compat_sys##name(const struct pt_regs *regs);\
+ ALLOW_ERROR_INJECTION(__x32_compat_sys##name, ERRNO); \
+ asmlinkage long __x32_compat_sys##name(const struct pt_regs *regs)\
+ { \
+ return __se_compat_sys##name(SC_X86_64_REGS_TO_ARGS(x,__VA_ARGS__));\
+- } \
++ }
+
+ #else /* CONFIG_X86_X32 */
++#define __X32_COMPAT_SYS_STUB0(x, name)
+ #define __X32_COMPAT_SYS_STUBx(x, name, ...)
+ #endif /* CONFIG_X86_X32 */
+
+@@ -95,6 +120,17 @@
+ * mapping of registers to parameters, we need to generate stubs for each
+ * of them.
+ */
++#define COMPAT_SYSCALL_DEFINE0(name) \
++ static long __se_compat_sys_##name(void); \
++ static inline long __do_compat_sys_##name(void); \
++ __IA32_COMPAT_SYS_STUB0(x, name) \
++ __X32_COMPAT_SYS_STUB0(x, name) \
++ static long __se_compat_sys_##name(void) \
++ { \
++ return __do_compat_sys_##name(); \
++ } \
++ static inline long __do_compat_sys_##name(void)
++
+ #define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__));\
+@@ -190,7 +226,11 @@
+ #endif
+
+ #ifndef COND_SYSCALL
+-#define COND_SYSCALL(name) cond_syscall(__x64_sys_##name)
++#define COND_SYSCALL(name) \
++ asmlinkage __weak long __x64_sys_##name(const struct pt_regs *__unused) \
++ { \
++ return sys_ni_syscall(); \
++ }
+ #endif
+
+ #ifndef SYS_NI
+@@ -202,7 +242,6 @@
+ * For VSYSCALLS, we need to declare these three syscalls with the new
+ * pt_regs-based calling convention for in-kernel use.
+ */
+-struct pt_regs;
+ asmlinkage long __x64_sys_getcpu(const struct pt_regs *regs);
+ asmlinkage long __x64_sys_gettimeofday(const struct pt_regs *regs);
+ asmlinkage long __x64_sys_time(const struct pt_regs *regs);
+diff --git a/block/bio.c b/block/bio.c
+index c822ceb7c4de..906da3581a3e 100644
+--- a/block/bio.c
++++ b/block/bio.c
+@@ -535,6 +535,16 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
+ }
+ EXPORT_SYMBOL(zero_fill_bio_iter);
+
++/**
++ * bio_truncate - truncate the bio to small size of @new_size
++ * @bio: the bio to be truncated
++ * @new_size: new size for truncating the bio
++ *
++ * Description:
++ * Truncate the bio to new size of @new_size. If bio_op(bio) is
++ * REQ_OP_READ, zero the truncated part. This function should only
++ * be used for handling corner cases, such as bio eod.
++ */
+ void bio_truncate(struct bio *bio, unsigned new_size)
+ {
+ struct bio_vec bv;
+@@ -545,7 +555,7 @@ void bio_truncate(struct bio *bio, unsigned new_size)
+ if (new_size >= bio->bi_iter.bi_size)
+ return;
+
+- if (bio_data_dir(bio) != READ)
++ if (bio_op(bio) != REQ_OP_READ)
+ goto exit;
+
+ bio_for_each_segment(bv, bio, iter) {
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index c1601edd70e3..e2c8ab408bed 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -56,7 +56,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
+ struct alg_sock *pask = alg_sk(psk);
+ struct af_alg_ctx *ctx = ask->private;
+ struct crypto_skcipher *tfm = pask->private;
+- unsigned int bs = crypto_skcipher_blocksize(tfm);
++ unsigned int bs = crypto_skcipher_chunksize(tfm);
+ struct af_alg_async_req *areq;
+ int err = 0;
+ size_t len = 0;
+diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
+index 27a95c86a80b..4fc294c2f9e8 100644
+--- a/drivers/clk/clk.c
++++ b/drivers/clk/clk.c
+@@ -3886,6 +3886,7 @@ void clk_unregister(struct clk *clk)
+ __func__, clk->core->name);
+
+ kref_put(&clk->core->ref, __clk_release);
++ free_clk(clk);
+ unlock:
+ clk_prepare_unlock();
+ }
+diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
+index d43b4a3c0de8..047f1d8fe323 100644
+--- a/drivers/clk/imx/clk-pll14xx.c
++++ b/drivers/clk/imx/clk-pll14xx.c
+@@ -112,43 +112,17 @@ static unsigned long clk_pll1443x_recalc_rate(struct clk_hw *hw,
+ return fvco;
+ }
+
+-static inline bool clk_pll1416x_mp_change(const struct imx_pll14xx_rate_table *rate,
++static inline bool clk_pll14xx_mp_change(const struct imx_pll14xx_rate_table *rate,
+ u32 pll_div)
+ {
+ u32 old_mdiv, old_pdiv;
+
+- old_mdiv = (pll_div >> MDIV_SHIFT) & MDIV_MASK;
+- old_pdiv = (pll_div >> PDIV_SHIFT) & PDIV_MASK;
++ old_mdiv = (pll_div & MDIV_MASK) >> MDIV_SHIFT;
++ old_pdiv = (pll_div & PDIV_MASK) >> PDIV_SHIFT;
+
+ return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv;
+ }
+
+-static inline bool clk_pll1443x_mpk_change(const struct imx_pll14xx_rate_table *rate,
+- u32 pll_div_ctl0, u32 pll_div_ctl1)
+-{
+- u32 old_mdiv, old_pdiv, old_kdiv;
+-
+- old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK;
+- old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK;
+- old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK;
+-
+- return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
+- rate->kdiv != old_kdiv;
+-}
+-
+-static inline bool clk_pll1443x_mp_change(const struct imx_pll14xx_rate_table *rate,
+- u32 pll_div_ctl0, u32 pll_div_ctl1)
+-{
+- u32 old_mdiv, old_pdiv, old_kdiv;
+-
+- old_mdiv = (pll_div_ctl0 >> MDIV_SHIFT) & MDIV_MASK;
+- old_pdiv = (pll_div_ctl0 >> PDIV_SHIFT) & PDIV_MASK;
+- old_kdiv = (pll_div_ctl1 >> KDIV_SHIFT) & KDIV_MASK;
+-
+- return rate->mdiv != old_mdiv || rate->pdiv != old_pdiv ||
+- rate->kdiv != old_kdiv;
+-}
+-
+ static int clk_pll14xx_wait_lock(struct clk_pll14xx *pll)
+ {
+ u32 val;
+@@ -174,7 +148,7 @@ static int clk_pll1416x_set_rate(struct clk_hw *hw, unsigned long drate,
+
+ tmp = readl_relaxed(pll->base + 4);
+
+- if (!clk_pll1416x_mp_change(rate, tmp)) {
++ if (!clk_pll14xx_mp_change(rate, tmp)) {
+ tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
+ tmp |= rate->sdiv << SDIV_SHIFT;
+ writel_relaxed(tmp, pll->base + 4);
+@@ -239,13 +213,15 @@ static int clk_pll1443x_set_rate(struct clk_hw *hw, unsigned long drate,
+ }
+
+ tmp = readl_relaxed(pll->base + 4);
+- div_val = readl_relaxed(pll->base + 8);
+
+- if (!clk_pll1443x_mpk_change(rate, tmp, div_val)) {
++ if (!clk_pll14xx_mp_change(rate, tmp)) {
+ tmp &= ~(SDIV_MASK) << SDIV_SHIFT;
+ tmp |= rate->sdiv << SDIV_SHIFT;
+ writel_relaxed(tmp, pll->base + 4);
+
++ tmp = rate->kdiv << KDIV_SHIFT;
++ writel_relaxed(tmp, pll->base + 8);
++
+ return 0;
+ }
+
+diff --git a/drivers/clk/meson/axg-audio.c b/drivers/clk/meson/axg-audio.c
+index 18b23cdf679c..aa2522624fd3 100644
+--- a/drivers/clk/meson/axg-audio.c
++++ b/drivers/clk/meson/axg-audio.c
+@@ -1001,7 +1001,7 @@ static const struct regmap_config axg_audio_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+- .max_register = AUDIO_CLK_PDMIN_CTRL1,
++ .max_register = AUDIO_CLK_SPDIFOUT_B_CTRL,
+ };
+
+ struct audioclk_data {
+diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
+index 31466cd1842f..3b7601647d7b 100644
+--- a/drivers/clk/samsung/clk-exynos5420.c
++++ b/drivers/clk/samsung/clk-exynos5420.c
+@@ -165,6 +165,8 @@ static const unsigned long exynos5x_clk_regs[] __initconst = {
+ GATE_BUS_CPU,
+ GATE_SCLK_CPU,
+ CLKOUT_CMU_CPU,
++ APLL_CON0,
++ KPLL_CON0,
+ CPLL_CON0,
+ DPLL_CON0,
+ EPLL_CON0,
+diff --git a/drivers/crypto/cavium/nitrox/nitrox_main.c b/drivers/crypto/cavium/nitrox/nitrox_main.c
+index bc924980e10c..c4632d84c9a1 100644
+--- a/drivers/crypto/cavium/nitrox/nitrox_main.c
++++ b/drivers/crypto/cavium/nitrox/nitrox_main.c
+@@ -103,8 +103,7 @@ static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size,
+ offset = UCD_UCODE_LOAD_BLOCK_NUM;
+ nitrox_write_csr(ndev, offset, block_num);
+
+- code_size = ucode_size;
+- code_size = roundup(code_size, 8);
++ code_size = roundup(ucode_size, 16);
+ while (code_size) {
+ data = ucode_data[i];
+ /* write 8 bytes at a time */
+@@ -220,11 +219,11 @@ static int nitrox_load_fw(struct nitrox_device *ndev)
+
+ /* write block number and firmware length
+ * bit:<2:0> block number
+- * bit:3 is set SE uses 32KB microcode
+- * bit:3 is clear SE uses 64KB microcode
++ * bit:3 is set AE uses 32KB microcode
++ * bit:3 is clear AE uses 64KB microcode
+ */
+ core_2_eid_val.value = 0ULL;
+- core_2_eid_val.ucode_blk = 0;
++ core_2_eid_val.ucode_blk = 2;
+ if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
+ core_2_eid_val.ucode_len = 1;
+ else
+diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
+index 940485112d15..73a899e6f837 100644
+--- a/drivers/crypto/geode-aes.c
++++ b/drivers/crypto/geode-aes.c
+@@ -10,7 +10,7 @@
+ #include <linux/spinlock.h>
+ #include <crypto/algapi.h>
+ #include <crypto/aes.h>
+-#include <crypto/skcipher.h>
++#include <crypto/internal/skcipher.h>
+
+ #include <linux/io.h>
+ #include <linux/delay.h>
+@@ -24,12 +24,12 @@ static spinlock_t lock;
+
+ /* Write a 128 bit field (either a writable key or IV) */
+ static inline void
+-_writefield(u32 offset, void *value)
++_writefield(u32 offset, const void *value)
+ {
+ int i;
+
+ for (i = 0; i < 4; i++)
+- iowrite32(((u32 *) value)[i], _iobase + offset + (i * 4));
++ iowrite32(((const u32 *) value)[i], _iobase + offset + (i * 4));
+ }
+
+ /* Read a 128 bit field (either a writable key or IV) */
+@@ -43,12 +43,12 @@ _readfield(u32 offset, void *value)
+ }
+
+ static int
+-do_crypt(void *src, void *dst, int len, u32 flags)
++do_crypt(const void *src, void *dst, u32 len, u32 flags)
+ {
+ u32 status;
+ u32 counter = AES_OP_TIMEOUT;
+
+- iowrite32(virt_to_phys(src), _iobase + AES_SOURCEA_REG);
++ iowrite32(virt_to_phys((void *)src), _iobase + AES_SOURCEA_REG);
+ iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG);
+ iowrite32(len, _iobase + AES_LENA_REG);
+
+@@ -65,16 +65,14 @@ do_crypt(void *src, void *dst, int len, u32 flags)
+ return counter ? 0 : 1;
+ }
+
+-static unsigned int
+-geode_aes_crypt(struct geode_aes_op *op)
++static void
++geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src,
++ void *dst, u32 len, u8 *iv, int mode, int dir)
+ {
+ u32 flags = 0;
+ unsigned long iflags;
+ int ret;
+
+- if (op->len == 0)
+- return 0;
+-
+ /* If the source and destination is the same, then
+ * we need to turn on the coherent flags, otherwise
+ * we don't need to worry
+@@ -82,32 +80,28 @@ geode_aes_crypt(struct geode_aes_op *op)
+
+ flags |= (AES_CTRL_DCA | AES_CTRL_SCA);
+
+- if (op->dir == AES_DIR_ENCRYPT)
++ if (dir == AES_DIR_ENCRYPT)
+ flags |= AES_CTRL_ENCRYPT;
+
+ /* Start the critical section */
+
+ spin_lock_irqsave(&lock, iflags);
+
+- if (op->mode == AES_MODE_CBC) {
++ if (mode == AES_MODE_CBC) {
+ flags |= AES_CTRL_CBC;
+- _writefield(AES_WRITEIV0_REG, op->iv);
++ _writefield(AES_WRITEIV0_REG, iv);
+ }
+
+- if (!(op->flags & AES_FLAGS_HIDDENKEY)) {
+- flags |= AES_CTRL_WRKEY;
+- _writefield(AES_WRITEKEY0_REG, op->key);
+- }
++ flags |= AES_CTRL_WRKEY;
++ _writefield(AES_WRITEKEY0_REG, tctx->key);
+
+- ret = do_crypt(op->src, op->dst, op->len, flags);
++ ret = do_crypt(src, dst, len, flags);
+ BUG_ON(ret);
+
+- if (op->mode == AES_MODE_CBC)
+- _readfield(AES_WRITEIV0_REG, op->iv);
++ if (mode == AES_MODE_CBC)
++ _readfield(AES_WRITEIV0_REG, iv);
+
+ spin_unlock_irqrestore(&lock, iflags);
+-
+- return op->len;
+ }
+
+ /* CRYPTO-API Functions */
+@@ -115,13 +109,13 @@ geode_aes_crypt(struct geode_aes_op *op)
+ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int len)
+ {
+- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
++ struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+ unsigned int ret;
+
+- op->keylen = len;
++ tctx->keylen = len;
+
+ if (len == AES_KEYSIZE_128) {
+- memcpy(op->key, key, len);
++ memcpy(tctx->key, key, len);
+ return 0;
+ }
+
+@@ -134,132 +128,93 @@ static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key,
+ /*
+ * The requested key size is not supported by HW, do a fallback
+ */
+- op->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+- op->fallback.cip->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
++ tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
++ tctx->fallback.cip->base.crt_flags |=
++ (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
+
+- ret = crypto_cipher_setkey(op->fallback.cip, key, len);
++ ret = crypto_cipher_setkey(tctx->fallback.cip, key, len);
+ if (ret) {
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+- tfm->crt_flags |= (op->fallback.cip->base.crt_flags & CRYPTO_TFM_RES_MASK);
++ tfm->crt_flags |= (tctx->fallback.cip->base.crt_flags &
++ CRYPTO_TFM_RES_MASK);
+ }
+ return ret;
+ }
+
+-static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
+- unsigned int len)
++static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key,
++ unsigned int len)
+ {
+- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
++ struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
+ unsigned int ret;
+
+- op->keylen = len;
++ tctx->keylen = len;
+
+ if (len == AES_KEYSIZE_128) {
+- memcpy(op->key, key, len);
++ memcpy(tctx->key, key, len);
+ return 0;
+ }
+
+ if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) {
+ /* not supported at all */
+- tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
++ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ /*
+ * The requested key size is not supported by HW, do a fallback
+ */
+- crypto_sync_skcipher_clear_flags(op->fallback.blk, CRYPTO_TFM_REQ_MASK);
+- crypto_sync_skcipher_set_flags(op->fallback.blk,
+- tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
+-
+- ret = crypto_sync_skcipher_setkey(op->fallback.blk, key, len);
+- if (ret) {
+- tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+- tfm->crt_flags |= crypto_sync_skcipher_get_flags(op->fallback.blk) &
+- CRYPTO_TFM_RES_MASK;
+- }
++ crypto_skcipher_clear_flags(tctx->fallback.skcipher,
++ CRYPTO_TFM_REQ_MASK);
++ crypto_skcipher_set_flags(tctx->fallback.skcipher,
++ crypto_skcipher_get_flags(tfm) &
++ CRYPTO_TFM_REQ_MASK);
++ ret = crypto_skcipher_setkey(tctx->fallback.skcipher, key, len);
++ crypto_skcipher_set_flags(tfm,
++ crypto_skcipher_get_flags(tctx->fallback.skcipher) &
++ CRYPTO_TFM_RES_MASK);
+ return ret;
+ }
+
+-static int fallback_blk_dec(struct blkcipher_desc *desc,
+- struct scatterlist *dst, struct scatterlist *src,
+- unsigned int nbytes)
+-{
+- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+- SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk);
+-
+- skcipher_request_set_sync_tfm(req, op->fallback.blk);
+- skcipher_request_set_callback(req, 0, NULL, NULL);
+- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+-
+- return crypto_skcipher_decrypt(req);
+-}
+-
+-static int fallback_blk_enc(struct blkcipher_desc *desc,
+- struct scatterlist *dst, struct scatterlist *src,
+- unsigned int nbytes)
+-{
+- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+- SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk);
+-
+- skcipher_request_set_sync_tfm(req, op->fallback.blk);
+- skcipher_request_set_callback(req, 0, NULL, NULL);
+- skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+-
+- return crypto_skcipher_encrypt(req);
+-}
+-
+ static void
+ geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ {
+- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
++ const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+
+- if (unlikely(op->keylen != AES_KEYSIZE_128)) {
+- crypto_cipher_encrypt_one(op->fallback.cip, out, in);
++ if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
++ crypto_cipher_encrypt_one(tctx->fallback.cip, out, in);
+ return;
+ }
+
+- op->src = (void *) in;
+- op->dst = (void *) out;
+- op->mode = AES_MODE_ECB;
+- op->flags = 0;
+- op->len = AES_BLOCK_SIZE;
+- op->dir = AES_DIR_ENCRYPT;
+-
+- geode_aes_crypt(op);
++ geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
++ AES_MODE_ECB, AES_DIR_ENCRYPT);
+ }
+
+
+ static void
+ geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
+ {
+- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
++ const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+
+- if (unlikely(op->keylen != AES_KEYSIZE_128)) {
+- crypto_cipher_decrypt_one(op->fallback.cip, out, in);
++ if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
++ crypto_cipher_decrypt_one(tctx->fallback.cip, out, in);
+ return;
+ }
+
+- op->src = (void *) in;
+- op->dst = (void *) out;
+- op->mode = AES_MODE_ECB;
+- op->flags = 0;
+- op->len = AES_BLOCK_SIZE;
+- op->dir = AES_DIR_DECRYPT;
+-
+- geode_aes_crypt(op);
++ geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL,
++ AES_MODE_ECB, AES_DIR_DECRYPT);
+ }
+
+ static int fallback_init_cip(struct crypto_tfm *tfm)
+ {
+ const char *name = crypto_tfm_alg_name(tfm);
+- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
++ struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+
+- op->fallback.cip = crypto_alloc_cipher(name, 0,
+- CRYPTO_ALG_NEED_FALLBACK);
++ tctx->fallback.cip = crypto_alloc_cipher(name, 0,
++ CRYPTO_ALG_NEED_FALLBACK);
+
+- if (IS_ERR(op->fallback.cip)) {
++ if (IS_ERR(tctx->fallback.cip)) {
+ printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+- return PTR_ERR(op->fallback.cip);
++ return PTR_ERR(tctx->fallback.cip);
+ }
+
+ return 0;
+@@ -267,10 +222,9 @@ static int fallback_init_cip(struct crypto_tfm *tfm)
+
+ static void fallback_exit_cip(struct crypto_tfm *tfm)
+ {
+- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
++ struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
+
+- crypto_free_cipher(op->fallback.cip);
+- op->fallback.cip = NULL;
++ crypto_free_cipher(tctx->fallback.cip);
+ }
+
+ static struct crypto_alg geode_alg = {
+@@ -283,7 +237,7 @@ static struct crypto_alg geode_alg = {
+ .cra_init = fallback_init_cip,
+ .cra_exit = fallback_exit_cip,
+ .cra_blocksize = AES_BLOCK_SIZE,
+- .cra_ctxsize = sizeof(struct geode_aes_op),
++ .cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
+ .cra_module = THIS_MODULE,
+ .cra_u = {
+ .cipher = {
+@@ -296,220 +250,126 @@ static struct crypto_alg geode_alg = {
+ }
+ };
+
+-static int
+-geode_cbc_decrypt(struct blkcipher_desc *desc,
+- struct scatterlist *dst, struct scatterlist *src,
+- unsigned int nbytes)
++static int geode_init_skcipher(struct crypto_skcipher *tfm)
+ {
+- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+- struct blkcipher_walk walk;
+- int err, ret;
+-
+- if (nbytes % AES_BLOCK_SIZE)
+- return -EINVAL;
+-
+- if (unlikely(op->keylen != AES_KEYSIZE_128))
+- return fallback_blk_dec(desc, dst, src, nbytes);
++ const char *name = crypto_tfm_alg_name(&tfm->base);
++ struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
+
+- blkcipher_walk_init(&walk, dst, src, nbytes);
+- err = blkcipher_walk_virt(desc, &walk);
+- op->iv = walk.iv;
+-
+- while ((nbytes = walk.nbytes)) {
+- op->src = walk.src.virt.addr,
+- op->dst = walk.dst.virt.addr;
+- op->mode = AES_MODE_CBC;
+- op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
+- op->dir = AES_DIR_DECRYPT;
+-
+- ret = geode_aes_crypt(op);
+-
+- nbytes -= ret;
+- err = blkcipher_walk_done(desc, &walk, nbytes);
++ tctx->fallback.skcipher =
++ crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK |
++ CRYPTO_ALG_ASYNC);
++ if (IS_ERR(tctx->fallback.skcipher)) {
++ printk(KERN_ERR "Error allocating fallback algo %s\n", name);
++ return PTR_ERR(tctx->fallback.skcipher);
+ }
+
+- return err;
++ crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
++ crypto_skcipher_reqsize(tctx->fallback.skcipher));
++ return 0;
+ }
+
+-static int
+-geode_cbc_encrypt(struct blkcipher_desc *desc,
+- struct scatterlist *dst, struct scatterlist *src,
+- unsigned int nbytes)
++static void geode_exit_skcipher(struct crypto_skcipher *tfm)
+ {
+- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+- struct blkcipher_walk walk;
+- int err, ret;
++ struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
+
+- if (nbytes % AES_BLOCK_SIZE)
+- return -EINVAL;
+-
+- if (unlikely(op->keylen != AES_KEYSIZE_128))
+- return fallback_blk_enc(desc, dst, src, nbytes);
++ crypto_free_skcipher(tctx->fallback.skcipher);
++}
+
+- blkcipher_walk_init(&walk, dst, src, nbytes);
+- err = blkcipher_walk_virt(desc, &walk);
+- op->iv = walk.iv;
++static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir)
++{
++ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
++ const struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm);
++ struct skcipher_walk walk;
++ unsigned int nbytes;
++ int err;
++
++ if (unlikely(tctx->keylen != AES_KEYSIZE_128)) {
++ struct skcipher_request *subreq = skcipher_request_ctx(req);
++
++ *subreq = *req;
++ skcipher_request_set_tfm(subreq, tctx->fallback.skcipher);
++ if (dir == AES_DIR_DECRYPT)
++ return crypto_skcipher_decrypt(subreq);
++ else
++ return crypto_skcipher_encrypt(subreq);
++ }
+
+- while ((nbytes = walk.nbytes)) {
+- op->src = walk.src.virt.addr,
+- op->dst = walk.dst.virt.addr;
+- op->mode = AES_MODE_CBC;
+- op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
+- op->dir = AES_DIR_ENCRYPT;
++ err = skcipher_walk_virt(&walk, req, false);
+
+- ret = geode_aes_crypt(op);
+- nbytes -= ret;
+- err = blkcipher_walk_done(desc, &walk, nbytes);
++ while ((nbytes = walk.nbytes) != 0) {
++ geode_aes_crypt(tctx, walk.src.virt.addr, walk.dst.virt.addr,
++ round_down(nbytes, AES_BLOCK_SIZE),
++ walk.iv, mode, dir);
++ err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE);
+ }
+
+ return err;
+ }
+
+-static int fallback_init_blk(struct crypto_tfm *tfm)
++static int geode_cbc_encrypt(struct skcipher_request *req)
+ {
+- const char *name = crypto_tfm_alg_name(tfm);
+- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+-
+- op->fallback.blk = crypto_alloc_sync_skcipher(name, 0,
+- CRYPTO_ALG_NEED_FALLBACK);
+- if (IS_ERR(op->fallback.blk)) {
+- printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+- return PTR_ERR(op->fallback.blk);
+- }
+-
+- return 0;
++ return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT);
+ }
+
+-static void fallback_exit_blk(struct crypto_tfm *tfm)
++static int geode_cbc_decrypt(struct skcipher_request *req)
+ {
+- struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+-
+- crypto_free_sync_skcipher(op->fallback.blk);
+- op->fallback.blk = NULL;
++ return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT);
+ }
+
+-static struct crypto_alg geode_cbc_alg = {
+- .cra_name = "cbc(aes)",
+- .cra_driver_name = "cbc-aes-geode",
+- .cra_priority = 400,
+- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+- CRYPTO_ALG_KERN_DRIVER_ONLY |
+- CRYPTO_ALG_NEED_FALLBACK,
+- .cra_init = fallback_init_blk,
+- .cra_exit = fallback_exit_blk,
+- .cra_blocksize = AES_BLOCK_SIZE,
+- .cra_ctxsize = sizeof(struct geode_aes_op),
+- .cra_alignmask = 15,
+- .cra_type = &crypto_blkcipher_type,
+- .cra_module = THIS_MODULE,
+- .cra_u = {
+- .blkcipher = {
+- .min_keysize = AES_MIN_KEY_SIZE,
+- .max_keysize = AES_MAX_KEY_SIZE,
+- .setkey = geode_setkey_blk,
+- .encrypt = geode_cbc_encrypt,
+- .decrypt = geode_cbc_decrypt,
+- .ivsize = AES_BLOCK_SIZE,
+- }
+- }
+-};
+-
+-static int
+-geode_ecb_decrypt(struct blkcipher_desc *desc,
+- struct scatterlist *dst, struct scatterlist *src,
+- unsigned int nbytes)
++static int geode_ecb_encrypt(struct skcipher_request *req)
+ {
+- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+- struct blkcipher_walk walk;
+- int err, ret;
+-
+- if (nbytes % AES_BLOCK_SIZE)
+- return -EINVAL;
+-
+- if (unlikely(op->keylen != AES_KEYSIZE_128))
+- return fallback_blk_dec(desc, dst, src, nbytes);
+-
+- blkcipher_walk_init(&walk, dst, src, nbytes);
+- err = blkcipher_walk_virt(desc, &walk);
+-
+- while ((nbytes = walk.nbytes)) {
+- op->src = walk.src.virt.addr,
+- op->dst = walk.dst.virt.addr;
+- op->mode = AES_MODE_ECB;
+- op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
+- op->dir = AES_DIR_DECRYPT;
+-
+- ret = geode_aes_crypt(op);
+- nbytes -= ret;
+- err = blkcipher_walk_done(desc, &walk, nbytes);
+- }
+-
+- return err;
++ return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT);
+ }
+
+-static int
+-geode_ecb_encrypt(struct blkcipher_desc *desc,
+- struct scatterlist *dst, struct scatterlist *src,
+- unsigned int nbytes)
++static int geode_ecb_decrypt(struct skcipher_request *req)
+ {
+- struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
+- struct blkcipher_walk walk;
+- int err, ret;
+-
+- if (nbytes % AES_BLOCK_SIZE)
+- return -EINVAL;
+-
+- if (unlikely(op->keylen != AES_KEYSIZE_128))
+- return fallback_blk_enc(desc, dst, src, nbytes);
+-
+- blkcipher_walk_init(&walk, dst, src, nbytes);
+- err = blkcipher_walk_virt(desc, &walk);
+-
+- while ((nbytes = walk.nbytes)) {
+- op->src = walk.src.virt.addr,
+- op->dst = walk.dst.virt.addr;
+- op->mode = AES_MODE_ECB;
+- op->len = nbytes - (nbytes % AES_BLOCK_SIZE);
+- op->dir = AES_DIR_ENCRYPT;
+-
+- ret = geode_aes_crypt(op);
+- nbytes -= ret;
+- ret = blkcipher_walk_done(desc, &walk, nbytes);
+- }
+-
+- return err;
++ return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT);
+ }
+
+-static struct crypto_alg geode_ecb_alg = {
+- .cra_name = "ecb(aes)",
+- .cra_driver_name = "ecb-aes-geode",
+- .cra_priority = 400,
+- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
+- CRYPTO_ALG_KERN_DRIVER_ONLY |
+- CRYPTO_ALG_NEED_FALLBACK,
+- .cra_init = fallback_init_blk,
+- .cra_exit = fallback_exit_blk,
+- .cra_blocksize = AES_BLOCK_SIZE,
+- .cra_ctxsize = sizeof(struct geode_aes_op),
+- .cra_alignmask = 15,
+- .cra_type = &crypto_blkcipher_type,
+- .cra_module = THIS_MODULE,
+- .cra_u = {
+- .blkcipher = {
+- .min_keysize = AES_MIN_KEY_SIZE,
+- .max_keysize = AES_MAX_KEY_SIZE,
+- .setkey = geode_setkey_blk,
+- .encrypt = geode_ecb_encrypt,
+- .decrypt = geode_ecb_decrypt,
+- }
+- }
++static struct skcipher_alg geode_skcipher_algs[] = {
++ {
++ .base.cra_name = "cbc(aes)",
++ .base.cra_driver_name = "cbc-aes-geode",
++ .base.cra_priority = 400,
++ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
++ CRYPTO_ALG_NEED_FALLBACK,
++ .base.cra_blocksize = AES_BLOCK_SIZE,
++ .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
++ .base.cra_alignmask = 15,
++ .base.cra_module = THIS_MODULE,
++ .init = geode_init_skcipher,
++ .exit = geode_exit_skcipher,
++ .setkey = geode_setkey_skcipher,
++ .encrypt = geode_cbc_encrypt,
++ .decrypt = geode_cbc_decrypt,
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ .ivsize = AES_BLOCK_SIZE,
++ }, {
++ .base.cra_name = "ecb(aes)",
++ .base.cra_driver_name = "ecb-aes-geode",
++ .base.cra_priority = 400,
++ .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
++ CRYPTO_ALG_NEED_FALLBACK,
++ .base.cra_blocksize = AES_BLOCK_SIZE,
++ .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx),
++ .base.cra_alignmask = 15,
++ .base.cra_module = THIS_MODULE,
++ .init = geode_init_skcipher,
++ .exit = geode_exit_skcipher,
++ .setkey = geode_setkey_skcipher,
++ .encrypt = geode_ecb_encrypt,
++ .decrypt = geode_ecb_decrypt,
++ .min_keysize = AES_MIN_KEY_SIZE,
++ .max_keysize = AES_MAX_KEY_SIZE,
++ },
+ };
+
+ static void geode_aes_remove(struct pci_dev *dev)
+ {
+ crypto_unregister_alg(&geode_alg);
+- crypto_unregister_alg(&geode_ecb_alg);
+- crypto_unregister_alg(&geode_cbc_alg);
++ crypto_unregister_skciphers(geode_skcipher_algs,
++ ARRAY_SIZE(geode_skcipher_algs));
+
+ pci_iounmap(dev, _iobase);
+ _iobase = NULL;
+@@ -547,20 +407,14 @@ static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ if (ret)
+ goto eiomap;
+
+- ret = crypto_register_alg(&geode_ecb_alg);
++ ret = crypto_register_skciphers(geode_skcipher_algs,
++ ARRAY_SIZE(geode_skcipher_algs));
+ if (ret)
+ goto ealg;
+
+- ret = crypto_register_alg(&geode_cbc_alg);
+- if (ret)
+- goto eecb;
+-
+ dev_notice(&dev->dev, "GEODE AES engine enabled.\n");
+ return 0;
+
+- eecb:
+- crypto_unregister_alg(&geode_ecb_alg);
+-
+ ealg:
+ crypto_unregister_alg(&geode_alg);
+
+diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h
+index f8a86898ac22..6d0a0cdc7647 100644
+--- a/drivers/crypto/geode-aes.h
++++ b/drivers/crypto/geode-aes.h
+@@ -46,21 +46,10 @@
+
+ #define AES_OP_TIMEOUT 0x50000
+
+-struct geode_aes_op {
+-
+- void *src;
+- void *dst;
+-
+- u32 mode;
+- u32 dir;
+- u32 flags;
+- int len;
+-
++struct geode_aes_tfm_ctx {
+ u8 key[AES_KEYSIZE_128];
+- u8 *iv;
+-
+ union {
+- struct crypto_sync_skcipher *blk;
++ struct crypto_skcipher *skcipher;
+ struct crypto_cipher *cip;
+ } fallback;
+ u32 keylen;
+diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
+index ebaf91e0146d..504daff7687d 100644
+--- a/drivers/crypto/hisilicon/Kconfig
++++ b/drivers/crypto/hisilicon/Kconfig
+@@ -17,6 +17,7 @@ config CRYPTO_DEV_HISI_SEC
+ config CRYPTO_DEV_HISI_QM
+ tristate
+ depends on ARM64 && PCI && PCI_MSI
++ select NEED_SG_DMA_LENGTH
+ help
+ HiSilicon accelerator engines use a common queue management
+ interface. Specific engine driver may use this module.
+diff --git a/drivers/crypto/virtio/virtio_crypto_algs.c b/drivers/crypto/virtio/virtio_crypto_algs.c
+index 673fb29fda53..82b316b2f537 100644
+--- a/drivers/crypto/virtio/virtio_crypto_algs.c
++++ b/drivers/crypto/virtio/virtio_crypto_algs.c
+@@ -435,6 +435,11 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
+ goto free;
+ }
+ memcpy(iv, req->info, ivsize);
++ if (!vc_sym_req->encrypt)
++ scatterwalk_map_and_copy(req->info, req->src,
++ req->nbytes - AES_BLOCK_SIZE,
++ AES_BLOCK_SIZE, 0);
++
+ sg_init_one(&iv_sg, iv, ivsize);
+ sgs[num_out++] = &iv_sg;
+ vc_sym_req->iv = iv;
+@@ -571,6 +576,10 @@ static void virtio_crypto_ablkcipher_finalize_req(
+ struct ablkcipher_request *req,
+ int err)
+ {
++ if (vc_sym_req->encrypt)
++ scatterwalk_map_and_copy(req->info, req->dst,
++ req->nbytes - AES_BLOCK_SIZE,
++ AES_BLOCK_SIZE, 0);
+ crypto_finalize_ablkcipher_request(vc_sym_req->base.dataq->engine,
+ req, err);
+ kzfree(vc_sym_req->iv);
+diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
+index defe1d438710..af4a3ccb96b3 100644
+--- a/drivers/devfreq/Kconfig
++++ b/drivers/devfreq/Kconfig
+@@ -99,6 +99,7 @@ config ARM_TEGRA_DEVFREQ
+ ARCH_TEGRA_210_SOC || \
+ COMPILE_TEST
+ select PM_OPP
++ depends on COMMON_CLK
+ help
+ This adds the DEVFREQ driver for the Tegra family of SoCs.
+ It reads ACTMON counters of memory controllers and adjusts the
+diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
+index c90c798e5ec3..0585d749d935 100644
+--- a/drivers/dma/dw/platform.c
++++ b/drivers/dma/dw/platform.c
+@@ -66,7 +66,7 @@ static int dw_probe(struct platform_device *pdev)
+
+ data->chip = chip;
+
+- chip->clk = devm_clk_get(chip->dev, "hclk");
++ chip->clk = devm_clk_get_optional(chip->dev, "hclk");
+ if (IS_ERR(chip->clk))
+ return PTR_ERR(chip->clk);
+ err = clk_prepare_enable(chip->clk);
+diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
+index 1a422a8b43cf..18c011e57592 100644
+--- a/drivers/dma/ioat/dma.c
++++ b/drivers/dma/ioat/dma.c
+@@ -377,10 +377,11 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
+
+ descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
+ SZ_2M, &descs->hw, flags);
+- if (!descs->virt && (i > 0)) {
++ if (!descs->virt) {
+ int idx;
+
+ for (idx = 0; idx < i; idx++) {
++ descs = &ioat_chan->descs[idx];
+ dma_free_coherent(to_dev(ioat_chan), SZ_2M,
+ descs->virt, descs->hw);
+ descs->virt = NULL;
+diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
+index 4b36c8810517..d05471653224 100644
+--- a/drivers/dma/k3dma.c
++++ b/drivers/dma/k3dma.c
+@@ -229,9 +229,11 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
+ c = p->vchan;
+ if (c && (tc1 & BIT(i))) {
+ spin_lock_irqsave(&c->vc.lock, flags);
+- vchan_cookie_complete(&p->ds_run->vd);
+- p->ds_done = p->ds_run;
+- p->ds_run = NULL;
++ if (p->ds_run != NULL) {
++ vchan_cookie_complete(&p->ds_run->vd);
++ p->ds_done = p->ds_run;
++ p->ds_run = NULL;
++ }
+ spin_unlock_irqrestore(&c->vc.lock, flags);
+ }
+ if (c && (tc2 & BIT(i))) {
+@@ -271,6 +273,10 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
+ if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
+ return -EAGAIN;
+
++ /* Avoid losing track of ds_run if a transaction is in flight */
++ if (c->phy->ds_run)
++ return -EAGAIN;
++
+ if (vd) {
+ struct k3_dma_desc_sw *ds =
+ container_of(vd, struct k3_dma_desc_sw, vd);
+diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
+index a031cbcdf6ef..d72a3a5507b0 100644
+--- a/drivers/gpio/gpio-mpc8xxx.c
++++ b/drivers/gpio/gpio-mpc8xxx.c
+@@ -346,6 +346,7 @@ static int mpc8xxx_probe(struct platform_device *pdev)
+ return -ENOMEM;
+
+ gc = &mpc8xxx_gc->gc;
++ gc->parent = &pdev->dev;
+
+ if (of_property_read_bool(np, "little-endian")) {
+ ret = bgpio_init(gc, &pdev->dev, 4,
+diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
+index cd475ff4bcad..7835aad6d162 100644
+--- a/drivers/gpio/gpio-zynq.c
++++ b/drivers/gpio/gpio-zynq.c
+@@ -681,6 +681,8 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
+ unsigned int bank_num;
+
+ for (bank_num = 0; bank_num < gpio->p_data->max_bank; bank_num++) {
++ writel_relaxed(ZYNQ_GPIO_IXR_DISABLE_ALL, gpio->base_addr +
++ ZYNQ_GPIO_INTDIS_OFFSET(bank_num));
+ writel_relaxed(gpio->context.datalsw[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_DATA_LSW_OFFSET(bank_num));
+@@ -690,9 +692,6 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
+ writel_relaxed(gpio->context.dirm[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_DIRM_OFFSET(bank_num));
+- writel_relaxed(gpio->context.int_en[bank_num],
+- gpio->base_addr +
+- ZYNQ_GPIO_INTEN_OFFSET(bank_num));
+ writel_relaxed(gpio->context.int_type[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_INTTYPE_OFFSET(bank_num));
+@@ -702,6 +701,9 @@ static void zynq_gpio_restore_context(struct zynq_gpio *gpio)
+ writel_relaxed(gpio->context.int_any[bank_num],
+ gpio->base_addr +
+ ZYNQ_GPIO_INTANY_OFFSET(bank_num));
++ writel_relaxed(~(gpio->context.int_en[bank_num]),
++ gpio->base_addr +
++ ZYNQ_GPIO_INTEN_OFFSET(bank_num));
+ }
+ }
+
+diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
+index 99d19f80440e..3d9524a2abc4 100644
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -4328,8 +4328,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id,
+
+ if (chip->ngpio <= p->chip_hwnum) {
+ dev_err(dev,
+- "requested GPIO %d is out of range [0..%d] for chip %s\n",
+- idx, chip->ngpio, chip->label);
++ "requested GPIO %u (%u) is out of range [0..%u] for chip %s\n",
++ idx, p->chip_hwnum, chip->ngpio - 1,
++ chip->label);
+ return ERR_PTR(-EINVAL);
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+index bd37df5dd6d0..d1e278e999ee 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+@@ -813,6 +813,7 @@ struct amdgpu_device {
+ uint8_t *bios;
+ uint32_t bios_size;
+ struct amdgpu_bo *stolen_vga_memory;
++ struct amdgpu_bo *discovery_memory;
+ uint32_t bios_scratch_reg_offset;
+ uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index 1481899f86c1..71198c5318e1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -136,7 +136,7 @@ static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *bin
+ {
+ uint32_t *p = (uint32_t *)binary;
+ uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
+- uint64_t pos = vram_size - BINARY_MAX_SIZE;
++ uint64_t pos = vram_size - DISCOVERY_TMR_SIZE;
+ unsigned long flags;
+
+ while (pos < vram_size) {
+@@ -179,7 +179,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev)
+ uint16_t checksum;
+ int r;
+
+- adev->discovery = kzalloc(BINARY_MAX_SIZE, GFP_KERNEL);
++ adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
+ if (!adev->discovery)
+ return -ENOMEM;
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+index 85b8c4d4d576..5a6693d7d269 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+@@ -24,6 +24,8 @@
+ #ifndef __AMDGPU_DISCOVERY__
+ #define __AMDGPU_DISCOVERY__
+
++#define DISCOVERY_TMR_SIZE (64 << 10)
++
+ int amdgpu_discovery_init(struct amdgpu_device *adev);
+ void amdgpu_discovery_fini(struct amdgpu_device *adev);
+ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+index 7289e1b4fb60..28361a9c5add 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+@@ -342,6 +342,67 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+ return 0;
+ }
+
++/**
++ * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location
++ *
++ * @adev: amdgpu device object
++ * @offset: offset of the BO
++ * @size: size of the BO
++ * @domain: where to place it
++ * @bo_ptr: used to initialize BOs in structures
++ * @cpu_addr: optional CPU address mapping
++ *
++ * Creates a kernel BO at a specific offset in the address space of the domain.
++ *
++ * Returns:
++ * 0 on success, negative error code otherwise.
++ */
++int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
++ uint64_t offset, uint64_t size, uint32_t domain,
++ struct amdgpu_bo **bo_ptr, void **cpu_addr)
++{
++ struct ttm_operation_ctx ctx = { false, false };
++ unsigned int i;
++ int r;
++
++ offset &= PAGE_MASK;
++ size = ALIGN(size, PAGE_SIZE);
++
++ r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr,
++ NULL, NULL);
++ if (r)
++ return r;
++
++ /*
++ * Remove the original mem node and create a new one at the request
++ * position.
++ */
++ for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
++ (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
++ (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
++ }
++
++ ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
++ r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
++ &(*bo_ptr)->tbo.mem, &ctx);
++ if (r)
++ goto error;
++
++ if (cpu_addr) {
++ r = amdgpu_bo_kmap(*bo_ptr, cpu_addr);
++ if (r)
++ goto error;
++ }
++
++ amdgpu_bo_unreserve(*bo_ptr);
++ return 0;
++
++error:
++ amdgpu_bo_unreserve(*bo_ptr);
++ amdgpu_bo_unref(bo_ptr);
++ return r;
++}
++
+ /**
+ * amdgpu_bo_free_kernel - free BO for kernel use
+ *
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+index 658f4c9779b7..4fcea23ee516 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+@@ -237,6 +237,9 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev,
+ unsigned long size, int align,
+ u32 domain, struct amdgpu_bo **bo_ptr,
+ u64 *gpu_addr, void **cpu_addr);
++int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
++ uint64_t offset, uint64_t size, uint32_t domain,
++ struct amdgpu_bo **bo_ptr, void **cpu_addr);
+ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
+ void **cpu_addr);
+ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+index 016ea274b955..9c5cbc47edf1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+@@ -65,12 +65,6 @@ const char *ras_block_string[] = {
+ /* inject address is 52 bits */
+ #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
+
+-static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
+- uint64_t offset, uint64_t size,
+- struct amdgpu_bo **bo_ptr);
+-static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
+- struct amdgpu_bo **bo_ptr);
+-
+ static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf,
+ size_t size, loff_t *pos)
+ {
+@@ -1214,75 +1208,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
+ atomic_set(&ras->in_recovery, 0);
+ }
+
+-static int amdgpu_ras_release_vram(struct amdgpu_device *adev,
+- struct amdgpu_bo **bo_ptr)
+-{
+- /* no need to free it actually. */
+- amdgpu_bo_free_kernel(bo_ptr, NULL, NULL);
+- return 0;
+-}
+-
+-/* reserve vram with size@offset */
+-static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev,
+- uint64_t offset, uint64_t size,
+- struct amdgpu_bo **bo_ptr)
+-{
+- struct ttm_operation_ctx ctx = { false, false };
+- struct amdgpu_bo_param bp;
+- int r = 0;
+- int i;
+- struct amdgpu_bo *bo;
+-
+- if (bo_ptr)
+- *bo_ptr = NULL;
+- memset(&bp, 0, sizeof(bp));
+- bp.size = size;
+- bp.byte_align = PAGE_SIZE;
+- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+- bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+- AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
+- bp.type = ttm_bo_type_kernel;
+- bp.resv = NULL;
+-
+- r = amdgpu_bo_create(adev, &bp, &bo);
+- if (r)
+- return -EINVAL;
+-
+- r = amdgpu_bo_reserve(bo, false);
+- if (r)
+- goto error_reserve;
+-
+- offset = ALIGN(offset, PAGE_SIZE);
+- for (i = 0; i < bo->placement.num_placement; ++i) {
+- bo->placements[i].fpfn = offset >> PAGE_SHIFT;
+- bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
+- }
+-
+- ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
+- r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx);
+- if (r)
+- goto error_pin;
+-
+- r = amdgpu_bo_pin_restricted(bo,
+- AMDGPU_GEM_DOMAIN_VRAM,
+- offset,
+- offset + size);
+- if (r)
+- goto error_pin;
+-
+- if (bo_ptr)
+- *bo_ptr = bo;
+-
+- amdgpu_bo_unreserve(bo);
+- return r;
+-
+-error_pin:
+- amdgpu_bo_unreserve(bo);
+-error_reserve:
+- amdgpu_bo_unref(&bo);
+- return r;
+-}
+-
+ /* alloc/realloc bps array */
+ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev,
+ struct ras_err_handler_data *data, int pages)
+@@ -1345,7 +1270,7 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_err_handler_data *data;
+ uint64_t bp;
+- struct amdgpu_bo *bo;
++ struct amdgpu_bo *bo = NULL;
+ int i;
+
+ if (!con || !con->eh_data)
+@@ -1359,12 +1284,14 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev)
+ for (i = data->last_reserved; i < data->count; i++) {
+ bp = data->bps[i].bp;
+
+- if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT,
+- PAGE_SIZE, &bo))
++ if (amdgpu_bo_create_kernel_at(adev, bp << PAGE_SHIFT, PAGE_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &bo, NULL))
+ DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp);
+
+ data->bps[i].bo = bo;
+ data->last_reserved = i + 1;
++ bo = NULL;
+ }
+ out:
+ mutex_unlock(&con->recovery_lock);
+@@ -1390,7 +1317,7 @@ static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev)
+ for (i = data->last_reserved - 1; i >= 0; i--) {
+ bo = data->bps[i].bo;
+
+- amdgpu_ras_release_vram(adev, &bo);
++ amdgpu_bo_free_kernel(&bo, NULL, NULL);
+
+ data->bps[i].bo = bo;
+ data->last_reserved = i;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+index c0e41f1f0c23..f15ded1ce905 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+@@ -1639,81 +1639,25 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev)
+ */
+ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev)
+ {
+- struct ttm_operation_ctx ctx = { false, false };
+- struct amdgpu_bo_param bp;
+- int r = 0;
+- int i;
+- u64 vram_size = adev->gmc.visible_vram_size;
+- u64 offset = adev->fw_vram_usage.start_offset;
+- u64 size = adev->fw_vram_usage.size;
+- struct amdgpu_bo *bo;
+-
+- memset(&bp, 0, sizeof(bp));
+- bp.size = adev->fw_vram_usage.size;
+- bp.byte_align = PAGE_SIZE;
+- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
+- bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+- bp.type = ttm_bo_type_kernel;
+- bp.resv = NULL;
++ uint64_t vram_size = adev->gmc.visible_vram_size;
++ int r;
++
+ adev->fw_vram_usage.va = NULL;
+ adev->fw_vram_usage.reserved_bo = NULL;
+
+- if (adev->fw_vram_usage.size > 0 &&
+- adev->fw_vram_usage.size <= vram_size) {
+-
+- r = amdgpu_bo_create(adev, &bp,
+- &adev->fw_vram_usage.reserved_bo);
+- if (r)
+- goto error_create;
+-
+- r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false);
+- if (r)
+- goto error_reserve;
+-
+- /* remove the original mem node and create a new one at the
+- * request position
+- */
+- bo = adev->fw_vram_usage.reserved_bo;
+- offset = ALIGN(offset, PAGE_SIZE);
+- for (i = 0; i < bo->placement.num_placement; ++i) {
+- bo->placements[i].fpfn = offset >> PAGE_SHIFT;
+- bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
+- }
+-
+- ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem);
+- r = ttm_bo_mem_space(&bo->tbo, &bo->placement,
+- &bo->tbo.mem, &ctx);
+- if (r)
+- goto error_pin;
+-
+- r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo,
+- AMDGPU_GEM_DOMAIN_VRAM,
+- adev->fw_vram_usage.start_offset,
+- (adev->fw_vram_usage.start_offset +
+- adev->fw_vram_usage.size));
+- if (r)
+- goto error_pin;
+- r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo,
+- &adev->fw_vram_usage.va);
+- if (r)
+- goto error_kmap;
+-
+- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+- }
+- return r;
++ if (adev->fw_vram_usage.size == 0 ||
++ adev->fw_vram_usage.size > vram_size)
++ return 0;
+
+-error_kmap:
+- amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo);
+-error_pin:
+- amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo);
+-error_reserve:
+- amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo);
+-error_create:
+- adev->fw_vram_usage.va = NULL;
+- adev->fw_vram_usage.reserved_bo = NULL;
++ return amdgpu_bo_create_kernel_at(adev,
++ adev->fw_vram_usage.start_offset,
++ adev->fw_vram_usage.size,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->fw_vram_usage.reserved_bo,
++ &adev->fw_vram_usage.va);
+ return r;
+ }
++
+ /**
+ * amdgpu_ttm_init - Init the memory management (ttm) as well as various
+ * gtt/vram related fields.
+@@ -1786,6 +1730,20 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
+ NULL, &stolen_vga_buf);
+ if (r)
+ return r;
++
++ /*
++ * reserve one TMR (64K) memory at the top of VRAM which holds
++ * IP Discovery data and is protected by PSP.
++ */
++ r = amdgpu_bo_create_kernel_at(adev,
++ adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE,
++ DISCOVERY_TMR_SIZE,
++ AMDGPU_GEM_DOMAIN_VRAM,
++ &adev->discovery_memory,
++ NULL);
++ if (r)
++ return r;
++
+ DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
+ (unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
+
+@@ -1850,6 +1808,9 @@ void amdgpu_ttm_late_init(struct amdgpu_device *adev)
+ void *stolen_vga_buf;
+ /* return the VGA stolen memory (if any) back to VRAM */
+ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf);
++
++ /* return the IP Discovery TMR memory back to VRAM */
++ amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+index c9ba2ec6d038..ab4a0d8545dc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+@@ -1038,17 +1038,10 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
+ case CHIP_VEGA20:
+ break;
+ case CHIP_RAVEN:
+- /* Disable GFXOFF on original raven. There are combinations
+- * of sbios and platforms that are not stable.
+- */
+- if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8))
+- adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+- else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)
+- &&((adev->gfx.rlc_fw_version != 106 &&
+- adev->gfx.rlc_fw_version < 531) ||
+- (adev->gfx.rlc_fw_version == 53815) ||
+- (adev->gfx.rlc_feature_version < 1) ||
+- !adev->gfx.rlc.is_rlc_v2_1))
++ if (!(adev->rev_id >= 0x8 ||
++ adev->pdev->device == 0x15d8) &&
++ (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */
++ !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
+diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
+index 5dcb776548d8..7ec4331e67f2 100644
+--- a/drivers/gpu/drm/amd/include/discovery.h
++++ b/drivers/gpu/drm/amd/include/discovery.h
+@@ -25,7 +25,6 @@
+ #define _DISCOVERY_H_
+
+ #define PSP_HEADER_SIZE 256
+-#define BINARY_MAX_SIZE (64 << 10)
+ #define BINARY_SIGNATURE 0x28211407
+ #define DISCOVERY_TABLE_SIGNATURE 0x53445049
+
+diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
+index 875a3a9eabfa..7d0e7b031e44 100644
+--- a/drivers/gpu/drm/arm/malidp_mw.c
++++ b/drivers/gpu/drm/arm/malidp_mw.c
+@@ -56,7 +56,7 @@ malidp_mw_connector_mode_valid(struct drm_connector *connector,
+ return MODE_OK;
+ }
+
+-const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
++static const struct drm_connector_helper_funcs malidp_mw_connector_helper_funcs = {
+ .get_modes = malidp_mw_connector_get_modes,
+ .mode_valid = malidp_mw_connector_mode_valid,
+ };
+diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
+index 6fb7d74ff553..bc7cc32140f8 100644
+--- a/drivers/gpu/drm/tegra/drm.c
++++ b/drivers/gpu/drm/tegra/drm.c
+@@ -201,19 +201,19 @@ hub:
+ if (tegra->hub)
+ tegra_display_hub_cleanup(tegra->hub);
+ device:
+- host1x_device_exit(device);
+-fbdev:
+- drm_kms_helper_poll_fini(drm);
+- tegra_drm_fb_free(drm);
+-config:
+- drm_mode_config_cleanup(drm);
+-
+ if (tegra->domain) {
+ mutex_destroy(&tegra->mm_lock);
+ drm_mm_takedown(&tegra->mm);
+ put_iova_domain(&tegra->carveout.domain);
+ iova_cache_put();
+ }
++
++ host1x_device_exit(device);
++fbdev:
++ drm_kms_helper_poll_fini(drm);
++ tegra_drm_fb_free(drm);
++config:
++ drm_mode_config_cleanup(drm);
+ domain:
+ if (tegra->domain)
+ iommu_domain_free(tegra->domain);
+diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+index a662394f6892..0a88ef11b9d3 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
++++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+@@ -463,29 +463,25 @@ out:
+ }
+
+ static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data,
+- struct drm_file *file)
++ struct drm_file *file)
+ {
+ struct drm_virtgpu_3d_wait *args = data;
+- struct drm_gem_object *obj;
+- long timeout = 15 * HZ;
++ struct drm_gem_object *gobj = NULL;
++ struct virtio_gpu_object *qobj = NULL;
+ int ret;
++ bool nowait = false;
+
+- obj = drm_gem_object_lookup(file, args->handle);
+- if (obj == NULL)
++ gobj = drm_gem_object_lookup(file, args->handle);
++ if (gobj == NULL)
+ return -ENOENT;
+
+- if (args->flags & VIRTGPU_WAIT_NOWAIT) {
+- ret = dma_resv_test_signaled_rcu(obj->resv, true);
+- } else {
+- ret = dma_resv_wait_timeout_rcu(obj->resv, true, true,
+- timeout);
+- }
+- if (ret == 0)
+- ret = -EBUSY;
+- else if (ret > 0)
+- ret = 0;
++ qobj = gem_to_virtio_gpu_obj(gobj);
+
+- drm_gem_object_put_unlocked(obj);
++ if (args->flags & VIRTGPU_WAIT_NOWAIT)
++ nowait = true;
++ ret = virtio_gpu_object_wait(qobj, nowait);
++
++ drm_gem_object_put_unlocked(gobj);
+ return ret;
+ }
+
+diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
+index c25e95c19cad..b382c6bf2c5c 100644
+--- a/drivers/hid/hidraw.c
++++ b/drivers/hid/hidraw.c
+@@ -249,13 +249,14 @@ out:
+ static __poll_t hidraw_poll(struct file *file, poll_table *wait)
+ {
+ struct hidraw_list *list = file->private_data;
++ __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* hidraw is always writable */
+
+ poll_wait(file, &list->hidraw->wait, wait);
+ if (list->head != list->tail)
+- return EPOLLIN | EPOLLRDNORM;
++ mask |= EPOLLIN | EPOLLRDNORM;
+ if (!list->hidraw->exist)
+- return EPOLLERR | EPOLLHUP;
+- return EPOLLOUT | EPOLLWRNORM;
++ mask |= EPOLLERR | EPOLLHUP;
++ return mask;
+ }
+
+ static int hidraw_open(struct inode *inode, struct file *file)
+diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
+index 935c3d0a3b63..8fe3efcb8327 100644
+--- a/drivers/hid/uhid.c
++++ b/drivers/hid/uhid.c
+@@ -766,13 +766,14 @@ unlock:
+ static __poll_t uhid_char_poll(struct file *file, poll_table *wait)
+ {
+ struct uhid_device *uhid = file->private_data;
++ __poll_t mask = EPOLLOUT | EPOLLWRNORM; /* uhid is always writable */
+
+ poll_wait(file, &uhid->waitq, wait);
+
+ if (uhid->head != uhid->tail)
+- return EPOLLIN | EPOLLRDNORM;
++ mask |= EPOLLIN | EPOLLRDNORM;
+
+- return EPOLLOUT | EPOLLWRNORM;
++ return mask;
+ }
+
+ static const struct file_operations uhid_fops = {
+diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
+index e01b2b57e724..5ab901ad615d 100644
+--- a/drivers/i2c/busses/i2c-bcm2835.c
++++ b/drivers/i2c/busses/i2c-bcm2835.c
+@@ -58,6 +58,7 @@ struct bcm2835_i2c_dev {
+ struct i2c_adapter adapter;
+ struct completion completion;
+ struct i2c_msg *curr_msg;
++ struct clk *bus_clk;
+ int num_msgs;
+ u32 msg_err;
+ u8 *msg_buf;
+@@ -404,7 +405,6 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
+ struct resource *mem, *irq;
+ int ret;
+ struct i2c_adapter *adap;
+- struct clk *bus_clk;
+ struct clk *mclk;
+ u32 bus_clk_rate;
+
+@@ -427,11 +427,11 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
+ return PTR_ERR(mclk);
+ }
+
+- bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev);
++ i2c_dev->bus_clk = bcm2835_i2c_register_div(&pdev->dev, mclk, i2c_dev);
+
+- if (IS_ERR(bus_clk)) {
++ if (IS_ERR(i2c_dev->bus_clk)) {
+ dev_err(&pdev->dev, "Could not register clock\n");
+- return PTR_ERR(bus_clk);
++ return PTR_ERR(i2c_dev->bus_clk);
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+@@ -442,13 +442,13 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
+ bus_clk_rate = 100000;
+ }
+
+- ret = clk_set_rate_exclusive(bus_clk, bus_clk_rate);
++ ret = clk_set_rate_exclusive(i2c_dev->bus_clk, bus_clk_rate);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not set clock frequency\n");
+ return ret;
+ }
+
+- ret = clk_prepare_enable(bus_clk);
++ ret = clk_prepare_enable(i2c_dev->bus_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't prepare clock");
+ return ret;
+@@ -491,10 +491,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
+ static int bcm2835_i2c_remove(struct platform_device *pdev)
+ {
+ struct bcm2835_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
+- struct clk *bus_clk = devm_clk_get(i2c_dev->dev, "div");
+
+- clk_rate_exclusive_put(bus_clk);
+- clk_disable_unprepare(bus_clk);
++ clk_rate_exclusive_put(i2c_dev->bus_clk);
++ clk_disable_unprepare(i2c_dev->bus_clk);
+
+ free_irq(i2c_dev->irq, i2c_dev);
+ i2c_del_adapter(&i2c_dev->adapter);
+diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
+index 7b966a41d623..cf7b59d97802 100644
+--- a/drivers/iio/imu/adis16480.c
++++ b/drivers/iio/imu/adis16480.c
+@@ -454,12 +454,14 @@ static int adis16480_get_calibbias(struct iio_dev *indio_dev,
+ case IIO_MAGN:
+ case IIO_PRESSURE:
+ ret = adis_read_reg_16(&st->adis, reg, &val16);
+- *bias = sign_extend32(val16, 15);
++ if (ret == 0)
++ *bias = sign_extend32(val16, 15);
+ break;
+ case IIO_ANGL_VEL:
+ case IIO_ACCEL:
+ ret = adis_read_reg_32(&st->adis, reg, &val32);
+- *bias = sign_extend32(val32, 31);
++ if (ret == 0)
++ *bias = sign_extend32(val32, 31);
+ break;
+ default:
+ ret = -EINVAL;
+diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+index 28e011b35f21..3e0528793d95 100644
+--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
++++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+@@ -152,9 +152,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
+ .addr = 0x10,
+ .mask = GENMASK(4, 3),
+ },
+- .fs_avl[0] = { IIO_DEGREE_TO_RAD(245), 0x0 },
+- .fs_avl[1] = { IIO_DEGREE_TO_RAD(500), 0x1 },
+- .fs_avl[2] = { IIO_DEGREE_TO_RAD(2000), 0x3 },
++
++ .fs_avl[0] = { IIO_DEGREE_TO_RAD(8750), 0x0 },
++ .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
++ .fs_avl[2] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+ .fs_len = 3,
+ },
+ },
+diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
+index 023478107f0e..46dd50ff7c85 100644
+--- a/drivers/infiniband/core/counters.c
++++ b/drivers/infiniband/core/counters.c
+@@ -466,10 +466,15 @@ static struct rdma_counter *rdma_get_counter_by_id(struct ib_device *dev,
+ int rdma_counter_bind_qpn(struct ib_device *dev, u8 port,
+ u32 qp_num, u32 counter_id)
+ {
++ struct rdma_port_counter *port_counter;
+ struct rdma_counter *counter;
+ struct ib_qp *qp;
+ int ret;
+
++ port_counter = &dev->port_data[port].port_counter;
++ if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO)
++ return -EINVAL;
++
+ qp = rdma_counter_get_qp(dev, qp_num);
+ if (!qp)
+ return -ENOENT;
+@@ -506,6 +511,7 @@ err:
+ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
+ u32 qp_num, u32 *counter_id)
+ {
++ struct rdma_port_counter *port_counter;
+ struct rdma_counter *counter;
+ struct ib_qp *qp;
+ int ret;
+@@ -513,9 +519,13 @@ int rdma_counter_bind_qpn_alloc(struct ib_device *dev, u8 port,
+ if (!rdma_is_port_valid(dev, port))
+ return -EINVAL;
+
+- if (!dev->port_data[port].port_counter.hstats)
++ port_counter = &dev->port_data[port].port_counter;
++ if (!port_counter->hstats)
+ return -EOPNOTSUPP;
+
++ if (port_counter->mode.mode == RDMA_COUNTER_MODE_AUTO)
++ return -EINVAL;
++
+ qp = rdma_counter_get_qp(dev, qp_num);
+ if (!qp)
+ return -ENOENT;
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index b4149dc9e824..ebc3e3d4a6e2 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -3323,8 +3323,10 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
+ int rc;
+
+ rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
+- if (rc)
++ if (rc) {
+ dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
++ return rc;
++ }
+
+ if (mr->pages) {
+ rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 958c1ff9c515..4d07d22bfa7b 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -2283,13 +2283,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
+ /* Add qp to flush list of the CQ */
+ bnxt_qplib_add_flush_qp(qp);
+ } else {
++ /* Before we complete, do WA 9060 */
++ if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
++ cqe_sq_cons)) {
++ *lib_qp = qp;
++ goto out;
++ }
+ if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
+- /* Before we complete, do WA 9060 */
+- if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
+- cqe_sq_cons)) {
+- *lib_qp = qp;
+- goto out;
+- }
+ cqe->status = CQ_REQ_STATUS_OK;
+ cqe++;
+ (*budget)--;
+diff --git a/drivers/infiniband/hw/hfi1/iowait.c b/drivers/infiniband/hw/hfi1/iowait.c
+index adb4a1ba921b..5836fe7b2817 100644
+--- a/drivers/infiniband/hw/hfi1/iowait.c
++++ b/drivers/infiniband/hw/hfi1/iowait.c
+@@ -81,7 +81,9 @@ void iowait_init(struct iowait *wait, u32 tx_limit,
+ void iowait_cancel_work(struct iowait *w)
+ {
+ cancel_work_sync(&iowait_get_ib_work(w)->iowork);
+- cancel_work_sync(&iowait_get_tid_work(w)->iowork);
++ /* Make sure that the iowork for TID RDMA is used */
++ if (iowait_get_tid_work(w)->iowork.func)
++ cancel_work_sync(&iowait_get_tid_work(w)->iowork);
+ }
+
+ /**
+diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
+index d602b698b57e..4921c1e40ccd 100644
+--- a/drivers/infiniband/hw/hns/Kconfig
++++ b/drivers/infiniband/hw/hns/Kconfig
+@@ -1,23 +1,34 @@
+ # SPDX-License-Identifier: GPL-2.0-only
+ config INFINIBAND_HNS
+- bool "HNS RoCE Driver"
++ tristate "HNS RoCE Driver"
+ depends on NET_VENDOR_HISILICON
+ depends on ARM64 || (COMPILE_TEST && 64BIT)
++ depends on (HNS_DSAF && HNS_ENET) || HNS3
+ ---help---
+ This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
+ is used in Hisilicon Hip06 and more further ICT SoC based on
+ platform device.
+
++ To compile HIP06 or HIP08 driver as module, choose M here.
++
+ config INFINIBAND_HNS_HIP06
+- tristate "Hisilicon Hip06 Family RoCE support"
++ bool "Hisilicon Hip06 Family RoCE support"
+ depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
++ depends on INFINIBAND_HNS=m || (HNS_DSAF=y && HNS_ENET=y)
+ ---help---
+ RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
+ Hip07 SoC. These RoCE engines are platform devices.
+
++ To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
++ module will be called hns-roce-hw-v1
++
+ config INFINIBAND_HNS_HIP08
+- tristate "Hisilicon Hip08 Family RoCE support"
++ bool "Hisilicon Hip08 Family RoCE support"
+ depends on INFINIBAND_HNS && PCI && HNS3
++ depends on INFINIBAND_HNS=m || HNS3=y
+ ---help---
+ RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
+ The RoCE engine is a PCI device.
++
++ To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
++ module will be called hns-roce-hw-v2.
+diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
+index 449a2d81319d..e105945b94a1 100644
+--- a/drivers/infiniband/hw/hns/Makefile
++++ b/drivers/infiniband/hw/hns/Makefile
+@@ -9,8 +9,12 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
+ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
+ hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
+
++ifdef CONFIG_INFINIBAND_HNS_HIP06
+ hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs)
+-obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
++obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o
++endif
+
++ifdef CONFIG_INFINIBAND_HNS_HIP08
+ hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
+-obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
++obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
++endif
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index e82567fcdeb7..79294f278b26 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -389,7 +389,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_VLAN_M,
+ V2_UD_SEND_WQE_BYTE_36_VLAN_S,
+- le16_to_cpu(ah->av.vlan));
++ ah->av.vlan);
+ roce_set_field(ud_sq_wqe->byte_36,
+ V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
+ V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
+@@ -4650,16 +4650,14 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
+ {
+ struct hns_roce_cq *send_cq, *recv_cq;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+- int ret;
++ int ret = 0;
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_RC && hr_qp->state != IB_QPS_RESET) {
+ /* Modify qp to reset before destroying qp */
+ ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
+ hr_qp->state, IB_QPS_RESET);
+- if (ret) {
++ if (ret)
+ ibdev_err(ibdev, "modify QP to Reset failed.\n");
+- return ret;
+- }
+ }
+
+ send_cq = to_hr_cq(hr_qp->ibqp.send_cq);
+@@ -4715,7 +4713,7 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
+ kfree(hr_qp->rq_inl_buf.wqe_list);
+ }
+
+- return 0;
++ return ret;
+ }
+
+ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+@@ -4725,11 +4723,9 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
+ int ret;
+
+ ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
+- if (ret) {
++ if (ret)
+ ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n",
+ hr_qp->qpn, ret);
+- return ret;
+- }
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_GSI)
+ kfree(hr_to_hr_sqp(hr_qp));
+@@ -6092,11 +6088,11 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
+ roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
+ SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
+ SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_S,
+- hr_dev->caps.idx_ba_pg_sz);
++ hr_dev->caps.idx_ba_pg_sz + PG_SHIFT_OFFSET);
+ roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
+ SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_M,
+ SRQC_BYTE_44_SRQ_IDX_BUF_PG_SZ_S,
+- hr_dev->caps.idx_buf_pg_sz);
++ hr_dev->caps.idx_buf_pg_sz + PG_SHIFT_OFFSET);
+
+ srq_context->idx_nxt_blk_addr =
+ cpu_to_le32(mtts_idx[1] >> PAGE_ADDR_SHIFT);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+index 43219d2f7de0..76a14db7028d 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+@@ -87,8 +87,8 @@
+ #define HNS_ROCE_V2_MTT_ENTRY_SZ 64
+ #define HNS_ROCE_V2_CQE_ENTRY_SIZE 32
+ #define HNS_ROCE_V2_SCCC_ENTRY_SZ 32
+-#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ 4096
+-#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ 4096
++#define HNS_ROCE_V2_QPC_TIMER_ENTRY_SZ PAGE_SIZE
++#define HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ PAGE_SIZE
+ #define HNS_ROCE_V2_PAGE_SIZE_SUPPORTED 0xFFFFF000
+ #define HNS_ROCE_V2_MAX_INNER_MTPT_NUM 2
+ #define HNS_ROCE_INVALID_LKEY 0x100
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index bd78ff90d998..8dd2d666f687 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -332,9 +332,8 @@ static int check_sq_size_with_integrity(struct hns_roce_dev *hr_dev,
+ u8 max_sq_stride = ilog2(roundup_sq_stride);
+
+ /* Sanity check SQ size before proceeding */
+- if ((u32)(1 << ucmd->log_sq_bb_count) > hr_dev->caps.max_wqes ||
+- ucmd->log_sq_stride > max_sq_stride ||
+- ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
++ if (ucmd->log_sq_stride > max_sq_stride ||
++ ucmd->log_sq_stride < HNS_ROCE_IB_MIN_SQ_STRIDE) {
+ ibdev_err(&hr_dev->ib_dev, "check SQ size error!\n");
+ return -EINVAL;
+ }
+@@ -358,13 +357,16 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
+ u32 max_cnt;
+ int ret;
+
++ if (check_shl_overflow(1, ucmd->log_sq_bb_count, &hr_qp->sq.wqe_cnt) ||
++ hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes)
++ return -EINVAL;
++
+ ret = check_sq_size_with_integrity(hr_dev, cap, ucmd);
+ if (ret) {
+ ibdev_err(&hr_dev->ib_dev, "Sanity check sq size failed\n");
+ return ret;
+ }
+
+- hr_qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count;
+ hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
+
+ max_cnt = max(1U, cap->max_send_sge);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
+index 06871731ac43..39c08217e861 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
++++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
+@@ -95,7 +95,7 @@ static int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
+
+ ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)context);
+ if (ret)
+- goto err;
++ return -EINVAL;
+
+ table_attr = nla_nest_start(msg, RDMA_NLDEV_ATTR_DRIVER);
+ if (!table_attr) {
+diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
+index 7019c12005f4..99d563dba91b 100644
+--- a/drivers/infiniband/hw/mlx5/mr.c
++++ b/drivers/infiniband/hw/mlx5/mr.c
+@@ -428,7 +428,7 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, int entry)
+
+ if (entry < 0 || entry >= MAX_MR_CACHE_ENTRIES) {
+ mlx5_ib_err(dev, "cache entry %d is out of range\n", entry);
+- return NULL;
++ return ERR_PTR(-EINVAL);
+ }
+
+ ent = &cache->ent[entry];
+diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
+index 8c1931a57f4a..0454561718d9 100644
+--- a/drivers/infiniband/sw/siw/siw_cm.c
++++ b/drivers/infiniband/sw/siw/siw_cm.c
+@@ -1867,14 +1867,7 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
+ list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
+ cep->state = SIW_EPSTATE_LISTENING;
+
+- if (addr_family == AF_INET)
+- siw_dbg(id->device, "Listen at laddr %pI4 %u\n",
+- &(((struct sockaddr_in *)laddr)->sin_addr),
+- ((struct sockaddr_in *)laddr)->sin_port);
+- else
+- siw_dbg(id->device, "Listen at laddr %pI6 %u\n",
+- &(((struct sockaddr_in6 *)laddr)->sin6_addr),
+- ((struct sockaddr_in6 *)laddr)->sin6_port);
++ siw_dbg(id->device, "Listen at laddr %pISp\n", laddr);
+
+ return 0;
+
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index e25c70a56be6..02b92e3cd9a8 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -1364,9 +1364,11 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx, u64 tag,
+ int status)
+ {
++ struct se_cmd *cmd = &ioctx->cmd;
+ struct srp_rsp *srp_rsp;
+ const u8 *sense_data;
+ int sense_data_len, max_sense_len;
++ u32 resid = cmd->residual_count;
+
+ /*
+ * The lowest bit of all SAM-3 status codes is zero (see also
+@@ -1388,6 +1390,28 @@ static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
+ srp_rsp->tag = tag;
+ srp_rsp->status = status;
+
++ if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
++ if (cmd->data_direction == DMA_TO_DEVICE) {
++ /* residual data from an underflow write */
++ srp_rsp->flags = SRP_RSP_FLAG_DOUNDER;
++ srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
++ } else if (cmd->data_direction == DMA_FROM_DEVICE) {
++ /* residual data from an underflow read */
++ srp_rsp->flags = SRP_RSP_FLAG_DIUNDER;
++ srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
++ }
++ } else if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
++ if (cmd->data_direction == DMA_TO_DEVICE) {
++ /* residual data from an overflow write */
++ srp_rsp->flags = SRP_RSP_FLAG_DOOVER;
++ srp_rsp->data_out_res_cnt = cpu_to_be32(resid);
++ } else if (cmd->data_direction == DMA_FROM_DEVICE) {
++ /* residual data from an overflow read */
++ srp_rsp->flags = SRP_RSP_FLAG_DIOVER;
++ srp_rsp->data_in_res_cnt = cpu_to_be32(resid);
++ }
++ }
++
+ if (sense_data_len) {
+ BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
+ max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 67aa317de6db..e84c5dfe146f 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -5593,8 +5593,10 @@ static int intel_iommu_add_device(struct device *dev)
+
+ group = iommu_group_get_for_dev(dev);
+
+- if (IS_ERR(group))
+- return PTR_ERR(group);
++ if (IS_ERR(group)) {
++ ret = PTR_ERR(group);
++ goto unlink;
++ }
+
+ iommu_group_put(group);
+
+@@ -5620,7 +5622,8 @@ static int intel_iommu_add_device(struct device *dev)
+ if (!get_private_domain_for_dev(dev)) {
+ dev_warn(dev,
+ "Failed to get a private domain.\n");
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto unlink;
+ }
+
+ dev_info(dev,
+@@ -5635,6 +5638,10 @@ static int intel_iommu_add_device(struct device *dev)
+ }
+
+ return 0;
++
++unlink:
++ iommu_device_unlink(&iommu->iommu, dev);
++ return ret;
+ }
+
+ static void intel_iommu_remove_device(struct device *dev)
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 24248aa8a7e5..cd3c0ea56657 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -751,6 +751,7 @@ err_put_group:
+ mutex_unlock(&group->mutex);
+ dev->iommu_group = NULL;
+ kobject_put(group->devices_kobj);
++ sysfs_remove_link(group->devices_kobj, device->name);
+ err_free_name:
+ kfree(device->name);
+ err_remove_link:
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index 67a483c1a935..c2f6c78fee44 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -219,22 +219,37 @@ static void mtk_iommu_tlb_sync(void *cookie)
+ static void mtk_iommu_tlb_flush_walk(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+ {
++ struct mtk_iommu_data *data = cookie;
++ unsigned long flags;
++
++ spin_lock_irqsave(&data->tlb_lock, flags);
+ mtk_iommu_tlb_add_flush_nosync(iova, size, granule, false, cookie);
+ mtk_iommu_tlb_sync(cookie);
++ spin_unlock_irqrestore(&data->tlb_lock, flags);
+ }
+
+ static void mtk_iommu_tlb_flush_leaf(unsigned long iova, size_t size,
+ size_t granule, void *cookie)
+ {
++ struct mtk_iommu_data *data = cookie;
++ unsigned long flags;
++
++ spin_lock_irqsave(&data->tlb_lock, flags);
+ mtk_iommu_tlb_add_flush_nosync(iova, size, granule, true, cookie);
+ mtk_iommu_tlb_sync(cookie);
++ spin_unlock_irqrestore(&data->tlb_lock, flags);
+ }
+
+ static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t granule,
+ void *cookie)
+ {
++ struct mtk_iommu_data *data = cookie;
++ unsigned long flags;
++
++ spin_lock_irqsave(&data->tlb_lock, flags);
+ mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie);
++ spin_unlock_irqrestore(&data->tlb_lock, flags);
+ }
+
+ static const struct iommu_flush_ops mtk_iommu_flush_ops = {
+@@ -447,13 +462,18 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
+
+ static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain)
+ {
+- mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
++ mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data());
+ }
+
+ static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
+ struct iommu_iotlb_gather *gather)
+ {
+- mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
++ struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
++ unsigned long flags;
++
++ spin_lock_irqsave(&data->tlb_lock, flags);
++ mtk_iommu_tlb_sync(data);
++ spin_unlock_irqrestore(&data->tlb_lock, flags);
+ }
+
+ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
+@@ -733,6 +753,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
+ if (ret)
+ return ret;
+
++ spin_lock_init(&data->tlb_lock);
+ list_add_tail(&data->list, &m4ulist);
+
+ if (!iommu_present(&platform_bus_type))
+diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
+index fc0f16eabacd..8cae22de7663 100644
+--- a/drivers/iommu/mtk_iommu.h
++++ b/drivers/iommu/mtk_iommu.h
+@@ -58,6 +58,7 @@ struct mtk_iommu_data {
+ struct iommu_group *m4u_group;
+ bool enable_4GB;
+ bool tlb_flush_active;
++ spinlock_t tlb_lock; /* lock for tlb range flush */
+
+ struct iommu_device iommu;
+ const struct mtk_iommu_plat_data *plat_data;
+diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
+index a5b2448c0abc..af482620f94a 100644
+--- a/drivers/media/i2c/ov6650.c
++++ b/drivers/media/i2c/ov6650.c
+@@ -201,7 +201,6 @@ struct ov6650 {
+ unsigned long pclk_max; /* from resolution and format */
+ struct v4l2_fract tpf; /* as requested with s_frame_interval */
+ u32 code;
+- enum v4l2_colorspace colorspace;
+ };
+
+
+@@ -214,6 +213,17 @@ static u32 ov6650_codes[] = {
+ MEDIA_BUS_FMT_Y8_1X8,
+ };
+
++static const struct v4l2_mbus_framefmt ov6650_def_fmt = {
++ .width = W_CIF,
++ .height = H_CIF,
++ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
++ .colorspace = V4L2_COLORSPACE_SRGB,
++ .field = V4L2_FIELD_NONE,
++ .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
++ .quantization = V4L2_QUANTIZATION_DEFAULT,
++ .xfer_func = V4L2_XFER_FUNC_DEFAULT,
++};
++
+ /* read a register */
+ static int ov6650_reg_read(struct i2c_client *client, u8 reg, u8 *val)
+ {
+@@ -514,12 +524,20 @@ static int ov6650_get_fmt(struct v4l2_subdev *sd,
+ if (format->pad)
+ return -EINVAL;
+
+- mf->width = priv->rect.width >> priv->half_scale;
+- mf->height = priv->rect.height >> priv->half_scale;
+- mf->code = priv->code;
+- mf->colorspace = priv->colorspace;
+- mf->field = V4L2_FIELD_NONE;
++ /* initialize response with default media bus frame format */
++ *mf = ov6650_def_fmt;
++
++ /* update media bus format code and frame size */
++ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
++ mf->width = cfg->try_fmt.width;
++ mf->height = cfg->try_fmt.height;
++ mf->code = cfg->try_fmt.code;
+
++ } else {
++ mf->width = priv->rect.width >> priv->half_scale;
++ mf->height = priv->rect.height >> priv->half_scale;
++ mf->code = priv->code;
++ }
+ return 0;
+ }
+
+@@ -624,11 +642,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
+ priv->pclk_max = 8000000;
+ }
+
+- if (code == MEDIA_BUS_FMT_SBGGR8_1X8)
+- priv->colorspace = V4L2_COLORSPACE_SRGB;
+- else if (code != 0)
+- priv->colorspace = V4L2_COLORSPACE_JPEG;
+-
+ if (half_scale) {
+ dev_dbg(&client->dev, "max resolution: QCIF\n");
+ coma_set |= COMA_QCIF;
+@@ -662,11 +675,6 @@ static int ov6650_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
+ if (!ret)
+ priv->code = code;
+
+- if (!ret) {
+- mf->colorspace = priv->colorspace;
+- mf->width = priv->rect.width >> half_scale;
+- mf->height = priv->rect.height >> half_scale;
+- }
+ return ret;
+ }
+
+@@ -685,8 +693,6 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
+ v4l_bound_align_image(&mf->width, 2, W_CIF, 1,
+ &mf->height, 2, H_CIF, 1, 0);
+
+- mf->field = V4L2_FIELD_NONE;
+-
+ switch (mf->code) {
+ case MEDIA_BUS_FMT_Y10_1X10:
+ mf->code = MEDIA_BUS_FMT_Y8_1X8;
+@@ -696,20 +702,39 @@ static int ov6650_set_fmt(struct v4l2_subdev *sd,
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+- mf->colorspace = V4L2_COLORSPACE_JPEG;
+ break;
+ default:
+ mf->code = MEDIA_BUS_FMT_SBGGR8_1X8;
+ /* fall through */
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+- mf->colorspace = V4L2_COLORSPACE_SRGB;
+ break;
+ }
+
+- if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+- return ov6650_s_fmt(sd, mf);
+- cfg->try_fmt = *mf;
++ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
++ /* store media bus format code and frame size in pad config */
++ cfg->try_fmt.width = mf->width;
++ cfg->try_fmt.height = mf->height;
++ cfg->try_fmt.code = mf->code;
+
++ /* return default mbus frame format updated with pad config */
++ *mf = ov6650_def_fmt;
++ mf->width = cfg->try_fmt.width;
++ mf->height = cfg->try_fmt.height;
++ mf->code = cfg->try_fmt.code;
++
++ } else {
++ /* apply new media bus format code and frame size */
++ int ret = ov6650_s_fmt(sd, mf);
++
++ if (ret)
++ return ret;
++
++ /* return default format updated with active size and code */
++ *mf = ov6650_def_fmt;
++ mf->width = priv->rect.width >> priv->half_scale;
++ mf->height = priv->rect.height >> priv->half_scale;
++ mf->code = priv->code;
++ }
+ return 0;
+ }
+
+@@ -852,6 +877,11 @@ static int ov6650_video_probe(struct v4l2_subdev *sd)
+ ret = ov6650_reset(client);
+ if (!ret)
+ ret = ov6650_prog_dflt(client);
++ if (!ret) {
++ struct v4l2_mbus_framefmt mf = ov6650_def_fmt;
++
++ ret = ov6650_s_fmt(sd, &mf);
++ }
+ if (!ret)
+ ret = v4l2_ctrl_handler_setup(&priv->hdl);
+
+@@ -1006,9 +1036,6 @@ static int ov6650_probe(struct i2c_client *client,
+ priv->rect.top = DEF_VSTRT << 1;
+ priv->rect.width = W_CIF;
+ priv->rect.height = H_CIF;
+- priv->half_scale = false;
+- priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
+- priv->colorspace = V4L2_COLORSPACE_JPEG;
+
+ /* Hardware default frame interval */
+ priv->tpf.numerator = GET_CLKRC_DIV(DEF_CLKRC);
+diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
+index 096a7c9a8963..4eaaf39b9223 100644
+--- a/drivers/media/platform/aspeed-video.c
++++ b/drivers/media/platform/aspeed-video.c
+@@ -1658,7 +1658,8 @@ static int aspeed_video_probe(struct platform_device *pdev)
+ {
+ int rc;
+ struct resource *res;
+- struct aspeed_video *video = kzalloc(sizeof(*video), GFP_KERNEL);
++ struct aspeed_video *video =
++ devm_kzalloc(&pdev->dev, sizeof(*video), GFP_KERNEL);
+
+ if (!video)
+ return -ENOMEM;
+diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
+index 31ace114eda1..be9ec59774d6 100644
+--- a/drivers/media/platform/cadence/cdns-csi2rx.c
++++ b/drivers/media/platform/cadence/cdns-csi2rx.c
+@@ -129,7 +129,7 @@ static int csi2rx_start(struct csi2rx_priv *csi2rx)
+ */
+ for (i = csi2rx->num_lanes; i < csi2rx->max_lanes; i++) {
+ unsigned int idx = find_first_zero_bit(&lanes_used,
+- sizeof(lanes_used));
++ csi2rx->max_lanes);
+ set_bit(idx, &lanes_used);
+ reg |= CSI2RX_STATIC_CFG_DLANE_MAP(i, i + 1);
+ }
+diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
+index 73222c0615c0..834f11fe9dc2 100644
+--- a/drivers/media/platform/coda/coda-common.c
++++ b/drivers/media/platform/coda/coda-common.c
+@@ -1084,16 +1084,16 @@ static int coda_decoder_cmd(struct file *file, void *fh,
+
+ switch (dc->cmd) {
+ case V4L2_DEC_CMD_START:
+- mutex_lock(&ctx->bitstream_mutex);
+ mutex_lock(&dev->coda_mutex);
++ mutex_lock(&ctx->bitstream_mutex);
+ coda_bitstream_flush(ctx);
+- mutex_unlock(&dev->coda_mutex);
+ dst_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ vb2_clear_last_buffer_dequeued(dst_vq);
+ ctx->bit_stream_param &= ~CODA_BIT_STREAM_END_FLAG;
+ coda_fill_bitstream(ctx, NULL);
+ mutex_unlock(&ctx->bitstream_mutex);
++ mutex_unlock(&dev->coda_mutex);
+ break;
+ case V4L2_DEC_CMD_STOP:
+ stream_end = false;
+diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
+index 378cc302e1f8..d2cbcdca0463 100644
+--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
++++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
+@@ -313,7 +313,7 @@ static int isp_video_release(struct file *file)
+ ivc->streaming = 0;
+ }
+
+- vb2_fop_release(file);
++ _vb2_fop_release(file, NULL);
+
+ if (v4l2_fh_is_singular_file(file)) {
+ fimc_pipeline_call(&ivc->ve, close);
+diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
+index cbc1c07f0a96..ec2796413e26 100644
+--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
++++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
+@@ -208,6 +208,7 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
+ ret = v4l2_subdev_call(sd, pad, set_fmt, pad_cfg, &format);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ goto done;
++ ret = 0;
+
+ v4l2_fill_pix_format(pix, &format.format);
+
+@@ -242,7 +243,7 @@ static int rvin_try_format(struct rvin_dev *vin, u32 which,
+ done:
+ v4l2_subdev_free_pad_config(pad_cfg);
+
+- return 0;
++ return ret;
+ }
+
+ static int rvin_querycap(struct file *file, void *priv,
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index 439d7d886873..a113e811faab 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -366,6 +366,8 @@ static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
+
+ static const struct dev_pm_ops smi_larb_pm_ops = {
+ SET_RUNTIME_PM_OPS(mtk_smi_larb_suspend, mtk_smi_larb_resume, NULL)
++ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
++ pm_runtime_force_resume)
+ };
+
+ static struct platform_driver mtk_smi_larb_driver = {
+@@ -507,6 +509,8 @@ static int __maybe_unused mtk_smi_common_suspend(struct device *dev)
+
+ static const struct dev_pm_ops smi_common_pm_ops = {
+ SET_RUNTIME_PM_OPS(mtk_smi_common_suspend, mtk_smi_common_resume, NULL)
++ SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
++ pm_runtime_force_resume)
+ };
+
+ static struct platform_driver mtk_smi_common_driver = {
+diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
+index 6d27ccfe0680..3c2d405bc79b 100644
+--- a/drivers/misc/enclosure.c
++++ b/drivers/misc/enclosure.c
+@@ -406,10 +406,9 @@ int enclosure_remove_device(struct enclosure_device *edev, struct device *dev)
+ cdev = &edev->component[i];
+ if (cdev->dev == dev) {
+ enclosure_remove_links(cdev);
+- device_del(&cdev->cdev);
+ put_device(dev);
+ cdev->dev = NULL;
+- return device_add(&cdev->cdev);
++ return 0;
+ }
+ }
+ return -ENODEV;
+diff --git a/drivers/mtd/nand/onenand/omap2.c b/drivers/mtd/nand/onenand/omap2.c
+index edf94ee54ec7..71a632b815aa 100644
+--- a/drivers/mtd/nand/onenand/omap2.c
++++ b/drivers/mtd/nand/onenand/omap2.c
+@@ -328,7 +328,8 @@ static inline int omap2_onenand_dma_transfer(struct omap2_onenand *c,
+ struct dma_async_tx_descriptor *tx;
+ dma_cookie_t cookie;
+
+- tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count, 0);
++ tx = dmaengine_prep_dma_memcpy(c->dma_chan, dst, src, count,
++ DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ if (!tx) {
+ dev_err(&c->pdev->dev, "Failed to prepare DMA memcpy\n");
+ return -EIO;
+diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+index 8cc852dc7d54..5c06e0b4d4ef 100644
+--- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c
++++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c
+@@ -37,6 +37,7 @@
+ /* Max ECC buffer length */
+ #define FMC2_MAX_ECC_BUF_LEN (FMC2_BCHDSRS_LEN * FMC2_MAX_SG)
+
++#define FMC2_TIMEOUT_US 1000
+ #define FMC2_TIMEOUT_MS 1000
+
+ /* Timings */
+@@ -53,6 +54,8 @@
+ #define FMC2_PMEM 0x88
+ #define FMC2_PATT 0x8c
+ #define FMC2_HECCR 0x94
++#define FMC2_ISR 0x184
++#define FMC2_ICR 0x188
+ #define FMC2_CSQCR 0x200
+ #define FMC2_CSQCFGR1 0x204
+ #define FMC2_CSQCFGR2 0x208
+@@ -118,6 +121,12 @@
+ #define FMC2_PATT_ATTHIZ(x) (((x) & 0xff) << 24)
+ #define FMC2_PATT_DEFAULT 0x0a0a0a0a
+
++/* Register: FMC2_ISR */
++#define FMC2_ISR_IHLF BIT(1)
++
++/* Register: FMC2_ICR */
++#define FMC2_ICR_CIHLF BIT(1)
++
+ /* Register: FMC2_CSQCR */
+ #define FMC2_CSQCR_CSQSTART BIT(0)
+
+@@ -1322,6 +1331,31 @@ static void stm32_fmc2_write_data(struct nand_chip *chip, const void *buf,
+ stm32_fmc2_set_buswidth_16(fmc2, true);
+ }
+
++static int stm32_fmc2_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
++{
++ struct stm32_fmc2_nfc *fmc2 = to_stm32_nfc(chip->controller);
++ const struct nand_sdr_timings *timings;
++ u32 isr, sr;
++
++ /* Check if there is no pending requests to the NAND flash */
++ if (readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_SR, sr,
++ sr & FMC2_SR_NWRF, 1,
++ FMC2_TIMEOUT_US))
++ dev_warn(fmc2->dev, "Waitrdy timeout\n");
++
++ /* Wait tWB before R/B# signal is low */
++ timings = nand_get_sdr_timings(&chip->data_interface);
++ ndelay(PSEC_TO_NSEC(timings->tWB_max));
++
++ /* R/B# signal is low, clear high level flag */
++ writel_relaxed(FMC2_ICR_CIHLF, fmc2->io_base + FMC2_ICR);
++
++ /* Wait R/B# signal is high */
++ return readl_relaxed_poll_timeout_atomic(fmc2->io_base + FMC2_ISR,
++ isr, isr & FMC2_ISR_IHLF,
++ 5, 1000 * timeout_ms);
++}
++
+ static int stm32_fmc2_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only)
+@@ -1366,8 +1400,8 @@ static int stm32_fmc2_exec_op(struct nand_chip *chip,
+ break;
+
+ case NAND_OP_WAITRDY_INSTR:
+- ret = nand_soft_waitrdy(chip,
+- instr->ctx.waitrdy.timeout_ms);
++ ret = stm32_fmc2_waitrdy(chip,
++ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+ }
+diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
+index 7acf4a93b592..1548e0f7f5f4 100644
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -2544,7 +2544,7 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
+ {
+ struct spi_nor *nor = mtd_to_spi_nor(mtd);
+- int ret;
++ ssize_t ret;
+
+ dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+
+@@ -2865,7 +2865,7 @@ static int spi_nor_hwcaps_pp2cmd(u32 hwcaps)
+ */
+ static int spi_nor_read_raw(struct spi_nor *nor, u32 addr, size_t len, u8 *buf)
+ {
+- int ret;
++ ssize_t ret;
+
+ while (len) {
+ ret = spi_nor_read_data(nor, addr, len, buf);
+diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
+index 159490f5a111..60731e07f681 100644
+--- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
++++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
+@@ -84,7 +84,7 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data,
+ val = swahb32(val);
+ }
+
+- __raw_writel(val, mem + reg);
++ iowrite32(val, mem + reg);
+ usleep_range(100, 120);
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index d9eb2b286438..c59cbb8cbdd7 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -514,6 +514,18 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
+ struct iwl_phy_cfg_cmd phy_cfg_cmd;
+ enum iwl_ucode_type ucode_type = mvm->fwrt.cur_fw_img;
+
++ if (iwl_mvm_has_unified_ucode(mvm) &&
++ !mvm->trans->cfg->tx_with_siso_diversity) {
++ return 0;
++ } else if (mvm->trans->cfg->tx_with_siso_diversity) {
++ /*
++ * TODO: currently we don't set the antenna but letting the NIC
++ * to decide which antenna to use. This should come from BIOS.
++ */
++ phy_cfg_cmd.phy_cfg =
++ cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED);
++ }
++
+ /* Set parameters */
+ phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
+
+@@ -1344,12 +1356,12 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
+ ret = iwl_send_phy_db_data(mvm->phy_db);
+ if (ret)
+ goto error;
+-
+- ret = iwl_send_phy_cfg_cmd(mvm);
+- if (ret)
+- goto error;
+ }
+
++ ret = iwl_send_phy_cfg_cmd(mvm);
++ if (ret)
++ goto error;
++
+ ret = iwl_mvm_send_bt_init_conf(mvm);
+ if (ret)
+ goto error;
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+index 8f50e2b121bd..098d48153a38 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+@@ -350,7 +350,13 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
+ u16 size = le32_to_cpu(notif->amsdu_size);
+ int i;
+
+- if (WARN_ON(sta->max_amsdu_len < size))
++ /*
++ * In debug sta->max_amsdu_len < size
++ * so also check with orig_amsdu_len which holds the original
++ * data before debugfs changed the value
++ */
++ if (WARN_ON(sta->max_amsdu_len < size &&
++ mvmsta->orig_amsdu_len < size))
+ goto out;
+
+ mvmsta->amsdu_enabled = le32_to_cpu(notif->amsdu_enabled);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 8a059da7a1fa..e3b2a2bf3863 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -935,7 +935,12 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
+ !(mvmsta->amsdu_enabled & BIT(tid)))
+ return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
+
+- max_amsdu_len = iwl_mvm_max_amsdu_size(mvm, sta, tid);
++ /*
++ * Take the min of ieee80211 station and mvm station
++ */
++ max_amsdu_len =
++ min_t(unsigned int, sta->max_amsdu_len,
++ iwl_mvm_max_amsdu_size(mvm, sta, tid));
+
+ /*
+ * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
+diff --git a/drivers/net/wireless/realtek/rtlwifi/regd.c b/drivers/net/wireless/realtek/rtlwifi/regd.c
+index c10432cd703e..8be31e0ad878 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/regd.c
++++ b/drivers/net/wireless/realtek/rtlwifi/regd.c
+@@ -386,7 +386,7 @@ int rtl_regd_init(struct ieee80211_hw *hw,
+ struct wiphy *wiphy = hw->wiphy;
+ struct country_code_to_enum_rd *country = NULL;
+
+- if (wiphy == NULL || &rtlpriv->regd == NULL)
++ if (!wiphy)
+ return -EINVAL;
+
+ /* init country_code from efuse channel plan */
+diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
+index 760eaffeebd6..23a1d00b5f38 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
++++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
+@@ -793,7 +793,7 @@ static int rsi_probe(struct usb_interface *pfunction,
+ adapter->device_model = RSI_DEV_9116;
+ } else {
+ rsi_dbg(ERR_ZONE, "%s: Unsupported RSI device id 0x%x\n",
+- __func__, id->idProduct);
++ __func__, id ? id->idProduct : 0x0);
+ goto err1;
+ }
+
+diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c
+index e35e9eaa50ee..b927a92e3463 100644
+--- a/drivers/pci/controller/dwc/pci-meson.c
++++ b/drivers/pci/controller/dwc/pci-meson.c
+@@ -250,15 +250,15 @@ static int meson_pcie_probe_clocks(struct meson_pcie *mp)
+ if (IS_ERR(res->port_clk))
+ return PTR_ERR(res->port_clk);
+
+- res->mipi_gate = meson_pcie_probe_clock(dev, "pcie_mipi_en", 0);
++ res->mipi_gate = meson_pcie_probe_clock(dev, "mipi", 0);
+ if (IS_ERR(res->mipi_gate))
+ return PTR_ERR(res->mipi_gate);
+
+- res->general_clk = meson_pcie_probe_clock(dev, "pcie_general", 0);
++ res->general_clk = meson_pcie_probe_clock(dev, "general", 0);
+ if (IS_ERR(res->general_clk))
+ return PTR_ERR(res->general_clk);
+
+- res->clk = meson_pcie_probe_clock(dev, "pcie", 0);
++ res->clk = meson_pcie_probe_clock(dev, "pclk", 0);
+ if (IS_ERR(res->clk))
+ return PTR_ERR(res->clk);
+
+diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
+index 0f36a926059a..8615f1548882 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-host.c
++++ b/drivers/pci/controller/dwc/pcie-designware-host.c
+@@ -78,7 +78,8 @@ static struct msi_domain_info dw_pcie_msi_domain_info = {
+ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+ {
+ int i, pos, irq;
+- u32 val, num_ctrls;
++ unsigned long val;
++ u32 status, num_ctrls;
+ irqreturn_t ret = IRQ_NONE;
+
+ num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
+@@ -86,14 +87,14 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
+ for (i = 0; i < num_ctrls; i++) {
+ dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS +
+ (i * MSI_REG_CTRL_BLOCK_SIZE),
+- 4, &val);
+- if (!val)
++ 4, &status);
++ if (!status)
+ continue;
+
+ ret = IRQ_HANDLED;
++ val = status;
+ pos = 0;
+- while ((pos = find_next_bit((unsigned long *) &val,
+- MAX_MSI_IRQS_PER_CTRL,
++ while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
+ pos)) != MAX_MSI_IRQS_PER_CTRL) {
+ irq = irq_find_mapping(pp->irq_domain,
+ (i * MAX_MSI_IRQS_PER_CTRL) +
+diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
+index fc0fe4d4de49..97245e076548 100644
+--- a/drivers/pci/controller/pci-aardvark.c
++++ b/drivers/pci/controller/pci-aardvark.c
+@@ -180,6 +180,8 @@
+ #define LINK_WAIT_MAX_RETRIES 10
+ #define LINK_WAIT_USLEEP_MIN 90000
+ #define LINK_WAIT_USLEEP_MAX 100000
++#define RETRAIN_WAIT_MAX_RETRIES 10
++#define RETRAIN_WAIT_USLEEP_US 2000
+
+ #define MSI_IRQ_NUM 32
+
+@@ -239,6 +241,17 @@ static int advk_pcie_wait_for_link(struct advk_pcie *pcie)
+ return -ETIMEDOUT;
+ }
+
++static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
++{
++ size_t retries;
++
++ for (retries = 0; retries < RETRAIN_WAIT_MAX_RETRIES; ++retries) {
++ if (!advk_pcie_link_up(pcie))
++ break;
++ udelay(RETRAIN_WAIT_USLEEP_US);
++ }
++}
++
+ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
+ {
+ u32 reg;
+@@ -415,7 +428,7 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
+
+ case PCI_EXP_RTCTL: {
+ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG);
+- *value = (val & PCIE_MSG_PM_PME_MASK) ? PCI_EXP_RTCTL_PMEIE : 0;
++ *value = (val & PCIE_MSG_PM_PME_MASK) ? 0 : PCI_EXP_RTCTL_PMEIE;
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
+@@ -426,11 +439,20 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
+ return PCI_BRIDGE_EMUL_HANDLED;
+ }
+
++ case PCI_EXP_LNKCTL: {
++ /* u32 contains both PCI_EXP_LNKCTL and PCI_EXP_LNKSTA */
++ u32 val = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg) &
++ ~(PCI_EXP_LNKSTA_LT << 16);
++ if (!advk_pcie_link_up(pcie))
++ val |= (PCI_EXP_LNKSTA_LT << 16);
++ *value = val;
++ return PCI_BRIDGE_EMUL_HANDLED;
++ }
++
+ case PCI_CAP_LIST_ID:
+ case PCI_EXP_DEVCAP:
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_LNKCAP:
+- case PCI_EXP_LNKCTL:
+ *value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
+ return PCI_BRIDGE_EMUL_HANDLED;
+ default:
+@@ -447,14 +469,24 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
+
+ switch (reg) {
+ case PCI_EXP_DEVCTL:
++ advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
++ break;
++
+ case PCI_EXP_LNKCTL:
+ advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
++ if (new & PCI_EXP_LNKCTL_RL)
++ advk_pcie_wait_for_retrain(pcie);
+ break;
+
+- case PCI_EXP_RTCTL:
+- new = (new & PCI_EXP_RTCTL_PMEIE) << 3;
+- advk_writel(pcie, new, PCIE_ISR0_MASK_REG);
++ case PCI_EXP_RTCTL: {
++ /* Only mask/unmask PME interrupt */
++ u32 val = advk_readl(pcie, PCIE_ISR0_MASK_REG) &
++ ~PCIE_MSG_PM_PME_MASK;
++ if ((new & PCI_EXP_RTCTL_PMEIE) == 0)
++ val |= PCIE_MSG_PM_PME_MASK;
++ advk_writel(pcie, val, PCIE_ISR0_MASK_REG);
+ break;
++ }
+
+ case PCI_EXP_RTSTA:
+ new = (new & PCI_EXP_RTSTA_PME) >> 9;
+diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
+index b3122c151b80..56daad828c9e 100644
+--- a/drivers/pci/hotplug/pciehp_core.c
++++ b/drivers/pci/hotplug/pciehp_core.c
+@@ -253,7 +253,7 @@ static bool pme_is_native(struct pcie_device *dev)
+ return pcie_ports_native || host->native_pme;
+ }
+
+-static int pciehp_suspend(struct pcie_device *dev)
++static void pciehp_disable_interrupt(struct pcie_device *dev)
+ {
+ /*
+ * Disable hotplug interrupt so that it does not trigger
+@@ -261,7 +261,19 @@ static int pciehp_suspend(struct pcie_device *dev)
+ */
+ if (pme_is_native(dev))
+ pcie_disable_interrupt(get_service_data(dev));
++}
+
++#ifdef CONFIG_PM_SLEEP
++static int pciehp_suspend(struct pcie_device *dev)
++{
++ /*
++ * If the port is already runtime suspended we can keep it that
++ * way.
++ */
++ if (dev_pm_smart_suspend_and_suspended(&dev->port->dev))
++ return 0;
++
++ pciehp_disable_interrupt(dev);
+ return 0;
+ }
+
+@@ -279,6 +291,7 @@ static int pciehp_resume_noirq(struct pcie_device *dev)
+
+ return 0;
+ }
++#endif
+
+ static int pciehp_resume(struct pcie_device *dev)
+ {
+@@ -292,6 +305,12 @@ static int pciehp_resume(struct pcie_device *dev)
+ return 0;
+ }
+
++static int pciehp_runtime_suspend(struct pcie_device *dev)
++{
++ pciehp_disable_interrupt(dev);
++ return 0;
++}
++
+ static int pciehp_runtime_resume(struct pcie_device *dev)
+ {
+ struct controller *ctrl = get_service_data(dev);
+@@ -318,10 +337,12 @@ static struct pcie_port_service_driver hpdriver_portdrv = {
+ .remove = pciehp_remove,
+
+ #ifdef CONFIG_PM
++#ifdef CONFIG_PM_SLEEP
+ .suspend = pciehp_suspend,
+ .resume_noirq = pciehp_resume_noirq,
+ .resume = pciehp_resume,
+- .runtime_suspend = pciehp_suspend,
++#endif
++ .runtime_suspend = pciehp_runtime_suspend,
+ .runtime_resume = pciehp_runtime_resume,
+ #endif /* PM */
+ };
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index d4ac8ce8c1f9..0c3086793e4e 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -941,12 +941,11 @@ static int pci_pm_resume_noirq(struct device *dev)
+ pci_pm_default_resume_early(pci_dev);
+
+ pci_fixup_device(pci_fixup_resume_early, pci_dev);
++ pcie_pme_root_status_cleanup(pci_dev);
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_resume_early(dev);
+
+- pcie_pme_root_status_cleanup(pci_dev);
+-
+ if (drv && drv->pm && drv->pm->resume_noirq)
+ error = drv->pm->resume_noirq(dev);
+
+diff --git a/drivers/pci/pcie/ptm.c b/drivers/pci/pcie/ptm.c
+index 98cfa30f3fae..9361f3aa26ab 100644
+--- a/drivers/pci/pcie/ptm.c
++++ b/drivers/pci/pcie/ptm.c
+@@ -21,7 +21,7 @@ static void pci_ptm_info(struct pci_dev *dev)
+ snprintf(clock_desc, sizeof(clock_desc), ">254ns");
+ break;
+ default:
+- snprintf(clock_desc, sizeof(clock_desc), "%udns",
++ snprintf(clock_desc, sizeof(clock_desc), "%uns",
+ dev->ptm_granularity);
+ break;
+ }
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 64ebe3e5e611..d3033873395d 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -572,6 +572,7 @@ static void devm_pci_release_host_bridge_dev(struct device *dev)
+ bridge->release_fn(bridge);
+
+ pci_free_resource_list(&bridge->windows);
++ pci_free_resource_list(&bridge->dma_ranges);
+ }
+
+ static void pci_release_host_bridge_dev(struct device *dev)
+diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
+index ee184d5607bd..f20524f0c21d 100644
+--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
++++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
+@@ -200,7 +200,7 @@ static void phy_mdm6600_status(struct work_struct *work)
+ struct phy_mdm6600 *ddata;
+ struct device *dev;
+ DECLARE_BITMAP(values, PHY_MDM6600_NR_STATUS_LINES);
+- int error, i, val = 0;
++ int error;
+
+ ddata = container_of(work, struct phy_mdm6600, status_work.work);
+ dev = ddata->dev;
+@@ -212,16 +212,11 @@ static void phy_mdm6600_status(struct work_struct *work)
+ if (error)
+ return;
+
+- for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
+- val |= test_bit(i, values) << i;
+- dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
+- __func__, i, test_bit(i, values), val);
+- }
+- ddata->status = values[0];
++ ddata->status = values[0] & ((1 << PHY_MDM6600_NR_STATUS_LINES) - 1);
+
+ dev_info(dev, "modem status: %i %s\n",
+ ddata->status,
+- phy_mdm6600_status_name[ddata->status & 7]);
++ phy_mdm6600_status_name[ddata->status]);
+ complete(&ddata->ack);
+ }
+
+diff --git a/drivers/pinctrl/cirrus/Kconfig b/drivers/pinctrl/cirrus/Kconfig
+index f1806fd781a0..530426a74f75 100644
+--- a/drivers/pinctrl/cirrus/Kconfig
++++ b/drivers/pinctrl/cirrus/Kconfig
+@@ -2,6 +2,7 @@
+ config PINCTRL_LOCHNAGAR
+ tristate "Cirrus Logic Lochnagar pinctrl driver"
+ depends on MFD_LOCHNAGAR
++ select GPIOLIB
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+diff --git a/drivers/pinctrl/intel/pinctrl-lewisburg.c b/drivers/pinctrl/intel/pinctrl-lewisburg.c
+index 2e06fb1464ab..7fdf4257df1e 100644
+--- a/drivers/pinctrl/intel/pinctrl-lewisburg.c
++++ b/drivers/pinctrl/intel/pinctrl-lewisburg.c
+@@ -33,6 +33,7 @@
+ .npins = ((e) - (s) + 1), \
+ }
+
++/* Lewisburg */
+ static const struct pinctrl_pin_desc lbg_pins[] = {
+ /* GPP_A */
+ PINCTRL_PIN(0, "RCINB"),
+@@ -72,7 +73,7 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
+ PINCTRL_PIN(33, "SRCCLKREQB_4"),
+ PINCTRL_PIN(34, "SRCCLKREQB_5"),
+ PINCTRL_PIN(35, "GPP_B_11"),
+- PINCTRL_PIN(36, "GLB_RST_WARN_N"),
++ PINCTRL_PIN(36, "SLP_S0B"),
+ PINCTRL_PIN(37, "PLTRSTB"),
+ PINCTRL_PIN(38, "SPKR"),
+ PINCTRL_PIN(39, "GPP_B_15"),
+@@ -185,96 +186,96 @@ static const struct pinctrl_pin_desc lbg_pins[] = {
+ PINCTRL_PIN(141, "GBE_PCI_DIS"),
+ PINCTRL_PIN(142, "GBE_LAN_DIS"),
+ PINCTRL_PIN(143, "GPP_I_10"),
+- PINCTRL_PIN(144, "GPIO_RCOMP_3P3"),
+ /* GPP_J */
+- PINCTRL_PIN(145, "GBE_LED_0_0"),
+- PINCTRL_PIN(146, "GBE_LED_0_1"),
+- PINCTRL_PIN(147, "GBE_LED_1_0"),
+- PINCTRL_PIN(148, "GBE_LED_1_1"),
+- PINCTRL_PIN(149, "GBE_LED_2_0"),
+- PINCTRL_PIN(150, "GBE_LED_2_1"),
+- PINCTRL_PIN(151, "GBE_LED_3_0"),
+- PINCTRL_PIN(152, "GBE_LED_3_1"),
+- PINCTRL_PIN(153, "GBE_SCL_0"),
+- PINCTRL_PIN(154, "GBE_SDA_0"),
+- PINCTRL_PIN(155, "GBE_SCL_1"),
+- PINCTRL_PIN(156, "GBE_SDA_1"),
+- PINCTRL_PIN(157, "GBE_SCL_2"),
+- PINCTRL_PIN(158, "GBE_SDA_2"),
+- PINCTRL_PIN(159, "GBE_SCL_3"),
+- PINCTRL_PIN(160, "GBE_SDA_3"),
+- PINCTRL_PIN(161, "GBE_SDP_0_0"),
+- PINCTRL_PIN(162, "GBE_SDP_0_1"),
+- PINCTRL_PIN(163, "GBE_SDP_1_0"),
+- PINCTRL_PIN(164, "GBE_SDP_1_1"),
+- PINCTRL_PIN(165, "GBE_SDP_2_0"),
+- PINCTRL_PIN(166, "GBE_SDP_2_1"),
+- PINCTRL_PIN(167, "GBE_SDP_3_0"),
+- PINCTRL_PIN(168, "GBE_SDP_3_1"),
++ PINCTRL_PIN(144, "GBE_LED_0_0"),
++ PINCTRL_PIN(145, "GBE_LED_0_1"),
++ PINCTRL_PIN(146, "GBE_LED_1_0"),
++ PINCTRL_PIN(147, "GBE_LED_1_1"),
++ PINCTRL_PIN(148, "GBE_LED_2_0"),
++ PINCTRL_PIN(149, "GBE_LED_2_1"),
++ PINCTRL_PIN(150, "GBE_LED_3_0"),
++ PINCTRL_PIN(151, "GBE_LED_3_1"),
++ PINCTRL_PIN(152, "GBE_SCL_0"),
++ PINCTRL_PIN(153, "GBE_SDA_0"),
++ PINCTRL_PIN(154, "GBE_SCL_1"),
++ PINCTRL_PIN(155, "GBE_SDA_1"),
++ PINCTRL_PIN(156, "GBE_SCL_2"),
++ PINCTRL_PIN(157, "GBE_SDA_2"),
++ PINCTRL_PIN(158, "GBE_SCL_3"),
++ PINCTRL_PIN(159, "GBE_SDA_3"),
++ PINCTRL_PIN(160, "GBE_SDP_0_0"),
++ PINCTRL_PIN(161, "GBE_SDP_0_1"),
++ PINCTRL_PIN(162, "GBE_SDP_1_0"),
++ PINCTRL_PIN(163, "GBE_SDP_1_1"),
++ PINCTRL_PIN(164, "GBE_SDP_2_0"),
++ PINCTRL_PIN(165, "GBE_SDP_2_1"),
++ PINCTRL_PIN(166, "GBE_SDP_3_0"),
++ PINCTRL_PIN(167, "GBE_SDP_3_1"),
+ /* GPP_K */
+- PINCTRL_PIN(169, "GBE_RMIICLK"),
+- PINCTRL_PIN(170, "GBE_RMII_TXD_0"),
+- PINCTRL_PIN(171, "GBE_RMII_TXD_1"),
++ PINCTRL_PIN(168, "GBE_RMIICLK"),
++ PINCTRL_PIN(169, "GBE_RMII_RXD_0"),
++ PINCTRL_PIN(170, "GBE_RMII_RXD_1"),
++ PINCTRL_PIN(171, "GBE_RMII_CRS_DV"),
+ PINCTRL_PIN(172, "GBE_RMII_TX_EN"),
+- PINCTRL_PIN(173, "GBE_RMII_CRS_DV"),
+- PINCTRL_PIN(174, "GBE_RMII_RXD_0"),
+- PINCTRL_PIN(175, "GBE_RMII_RXD_1"),
+- PINCTRL_PIN(176, "GBE_RMII_RX_ER"),
+- PINCTRL_PIN(177, "GBE_RMII_ARBIN"),
+- PINCTRL_PIN(178, "GBE_RMII_ARB_OUT"),
+- PINCTRL_PIN(179, "PE_RST_N"),
+- PINCTRL_PIN(180, "GPIO_RCOMP_1P8_3P3"),
++ PINCTRL_PIN(173, "GBE_RMII_TXD_0"),
++ PINCTRL_PIN(174, "GBE_RMII_TXD_1"),
++ PINCTRL_PIN(175, "GBE_RMII_RX_ER"),
++ PINCTRL_PIN(176, "GBE_RMII_ARBIN"),
++ PINCTRL_PIN(177, "GBE_RMII_ARB_OUT"),
++ PINCTRL_PIN(178, "PE_RST_N"),
+ /* GPP_G */
+- PINCTRL_PIN(181, "FAN_TACH_0"),
+- PINCTRL_PIN(182, "FAN_TACH_1"),
+- PINCTRL_PIN(183, "FAN_TACH_2"),
+- PINCTRL_PIN(184, "FAN_TACH_3"),
+- PINCTRL_PIN(185, "FAN_TACH_4"),
+- PINCTRL_PIN(186, "FAN_TACH_5"),
+- PINCTRL_PIN(187, "FAN_TACH_6"),
+- PINCTRL_PIN(188, "FAN_TACH_7"),
+- PINCTRL_PIN(189, "FAN_PWM_0"),
+- PINCTRL_PIN(190, "FAN_PWM_1"),
+- PINCTRL_PIN(191, "FAN_PWM_2"),
+- PINCTRL_PIN(192, "FAN_PWM_3"),
+- PINCTRL_PIN(193, "GSXDOUT"),
+- PINCTRL_PIN(194, "GSXSLOAD"),
+- PINCTRL_PIN(195, "GSXDIN"),
+- PINCTRL_PIN(196, "GSXSRESETB"),
+- PINCTRL_PIN(197, "GSXCLK"),
+- PINCTRL_PIN(198, "ADR_COMPLETE"),
+- PINCTRL_PIN(199, "NMIB"),
+- PINCTRL_PIN(200, "SMIB"),
+- PINCTRL_PIN(201, "SSATA_DEVSLP_0"),
+- PINCTRL_PIN(202, "SSATA_DEVSLP_1"),
+- PINCTRL_PIN(203, "SSATA_DEVSLP_2"),
+- PINCTRL_PIN(204, "SSATAXPCIE0_SSATAGP0"),
++ PINCTRL_PIN(179, "FAN_TACH_0"),
++ PINCTRL_PIN(180, "FAN_TACH_1"),
++ PINCTRL_PIN(181, "FAN_TACH_2"),
++ PINCTRL_PIN(182, "FAN_TACH_3"),
++ PINCTRL_PIN(183, "FAN_TACH_4"),
++ PINCTRL_PIN(184, "FAN_TACH_5"),
++ PINCTRL_PIN(185, "FAN_TACH_6"),
++ PINCTRL_PIN(186, "FAN_TACH_7"),
++ PINCTRL_PIN(187, "FAN_PWM_0"),
++ PINCTRL_PIN(188, "FAN_PWM_1"),
++ PINCTRL_PIN(189, "FAN_PWM_2"),
++ PINCTRL_PIN(190, "FAN_PWM_3"),
++ PINCTRL_PIN(191, "GSXDOUT"),
++ PINCTRL_PIN(192, "GSXSLOAD"),
++ PINCTRL_PIN(193, "GSXDIN"),
++ PINCTRL_PIN(194, "GSXSRESETB"),
++ PINCTRL_PIN(195, "GSXCLK"),
++ PINCTRL_PIN(196, "ADR_COMPLETE"),
++ PINCTRL_PIN(197, "NMIB"),
++ PINCTRL_PIN(198, "SMIB"),
++ PINCTRL_PIN(199, "SSATA_DEVSLP_0"),
++ PINCTRL_PIN(200, "SSATA_DEVSLP_1"),
++ PINCTRL_PIN(201, "SSATA_DEVSLP_2"),
++ PINCTRL_PIN(202, "SSATAXPCIE0_SSATAGP0"),
+ /* GPP_H */
+- PINCTRL_PIN(205, "SRCCLKREQB_6"),
+- PINCTRL_PIN(206, "SRCCLKREQB_7"),
+- PINCTRL_PIN(207, "SRCCLKREQB_8"),
+- PINCTRL_PIN(208, "SRCCLKREQB_9"),
+- PINCTRL_PIN(209, "SRCCLKREQB_10"),
+- PINCTRL_PIN(210, "SRCCLKREQB_11"),
+- PINCTRL_PIN(211, "SRCCLKREQB_12"),
+- PINCTRL_PIN(212, "SRCCLKREQB_13"),
+- PINCTRL_PIN(213, "SRCCLKREQB_14"),
+- PINCTRL_PIN(214, "SRCCLKREQB_15"),
+- PINCTRL_PIN(215, "SML2CLK"),
+- PINCTRL_PIN(216, "SML2DATA"),
+- PINCTRL_PIN(217, "SML2ALERTB"),
+- PINCTRL_PIN(218, "SML3CLK"),
+- PINCTRL_PIN(219, "SML3DATA"),
+- PINCTRL_PIN(220, "SML3ALERTB"),
+- PINCTRL_PIN(221, "SML4CLK"),
+- PINCTRL_PIN(222, "SML4DATA"),
+- PINCTRL_PIN(223, "SML4ALERTB"),
+- PINCTRL_PIN(224, "SSATAXPCIE1_SSATAGP1"),
+- PINCTRL_PIN(225, "SSATAXPCIE2_SSATAGP2"),
+- PINCTRL_PIN(226, "SSATAXPCIE3_SSATAGP3"),
+- PINCTRL_PIN(227, "SSATAXPCIE4_SSATAGP4"),
+- PINCTRL_PIN(228, "SSATAXPCIE5_SSATAGP5"),
++ PINCTRL_PIN(203, "SRCCLKREQB_6"),
++ PINCTRL_PIN(204, "SRCCLKREQB_7"),
++ PINCTRL_PIN(205, "SRCCLKREQB_8"),
++ PINCTRL_PIN(206, "SRCCLKREQB_9"),
++ PINCTRL_PIN(207, "SRCCLKREQB_10"),
++ PINCTRL_PIN(208, "SRCCLKREQB_11"),
++ PINCTRL_PIN(209, "SRCCLKREQB_12"),
++ PINCTRL_PIN(210, "SRCCLKREQB_13"),
++ PINCTRL_PIN(211, "SRCCLKREQB_14"),
++ PINCTRL_PIN(212, "SRCCLKREQB_15"),
++ PINCTRL_PIN(213, "SML2CLK"),
++ PINCTRL_PIN(214, "SML2DATA"),
++ PINCTRL_PIN(215, "SML2ALERTB"),
++ PINCTRL_PIN(216, "SML3CLK"),
++ PINCTRL_PIN(217, "SML3DATA"),
++ PINCTRL_PIN(218, "SML3ALERTB"),
++ PINCTRL_PIN(219, "SML4CLK"),
++ PINCTRL_PIN(220, "SML4DATA"),
++ PINCTRL_PIN(221, "SML4ALERTB"),
++ PINCTRL_PIN(222, "SSATAXPCIE1_SSATAGP1"),
++ PINCTRL_PIN(223, "SSATAXPCIE2_SSATAGP2"),
++ PINCTRL_PIN(224, "SSATAXPCIE3_SSATAGP3"),
++ PINCTRL_PIN(225, "SSATAXPCIE4_SSATAGP4"),
++ PINCTRL_PIN(226, "SSATAXPCIE5_SSATAGP5"),
+ /* GPP_L */
++ PINCTRL_PIN(227, "GPP_L_0"),
++ PINCTRL_PIN(228, "EC_CSME_INTR_OUT"),
+ PINCTRL_PIN(229, "VISA2CH0_D0"),
+ PINCTRL_PIN(230, "VISA2CH0_D1"),
+ PINCTRL_PIN(231, "VISA2CH0_D2"),
+diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
+index 8bba9d053d9f..aba479a1150c 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson.c
++++ b/drivers/pinctrl/meson/pinctrl-meson.c
+@@ -441,6 +441,7 @@ static int meson_pinconf_get_drive_strength(struct meson_pinctrl *pc,
+ return ret;
+
+ meson_calc_reg_and_bit(bank, pin, REG_DS, &reg, &bit);
++ bit = bit << 1;
+
+ ret = regmap_read(pc->reg_ds, reg, &val);
+ if (ret)
+diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
+index b8640ad41bef..ce983247c9e2 100644
+--- a/drivers/pinctrl/sh-pfc/core.c
++++ b/drivers/pinctrl/sh-pfc/core.c
+@@ -29,12 +29,12 @@
+ static int sh_pfc_map_resources(struct sh_pfc *pfc,
+ struct platform_device *pdev)
+ {
+- unsigned int num_windows, num_irqs;
+ struct sh_pfc_window *windows;
+ unsigned int *irqs = NULL;
++ unsigned int num_windows;
+ struct resource *res;
+ unsigned int i;
+- int irq;
++ int num_irqs;
+
+ /* Count the MEM and IRQ resources. */
+ for (num_windows = 0;; num_windows++) {
+@@ -42,17 +42,13 @@ static int sh_pfc_map_resources(struct sh_pfc *pfc,
+ if (!res)
+ break;
+ }
+- for (num_irqs = 0;; num_irqs++) {
+- irq = platform_get_irq(pdev, num_irqs);
+- if (irq == -EPROBE_DEFER)
+- return irq;
+- if (irq < 0)
+- break;
+- }
+-
+ if (num_windows == 0)
+ return -EINVAL;
+
++ num_irqs = platform_irq_count(pdev);
++ if (num_irqs < 0)
++ return num_irqs;
++
+ /* Allocate memory windows and IRQs arrays. */
+ windows = devm_kcalloc(pfc->dev, num_windows, sizeof(*windows),
+ GFP_KERNEL);
+diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
+index 835148fc0f28..cab7da130925 100644
+--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
++++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
+@@ -422,12 +422,12 @@ extern const struct sh_pfc_soc_info shx3_pinmux_info;
+ /*
+ * Describe a pinmux configuration in which a pin is physically multiplexed
+ * with other pins.
+- * - ipsr: IPSR field (unused, for documentation purposes only)
++ * - ipsr: IPSR field
+ * - fn: Function name
+ * - psel: Physical multiplexing selector
+ */
+ #define PINMUX_IPSR_PHYS(ipsr, fn, psel) \
+- PINMUX_DATA(fn##_MARK, FN_##psel)
++ PINMUX_DATA(fn##_MARK, FN_##psel, FN_##ipsr)
+
+ /*
+ * Describe a pinmux configuration for a single-function pin with GPIO
+diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+index e5e7f1f22813..b522ca010332 100644
+--- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
++++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c
+@@ -496,7 +496,7 @@ static int ti_iodelay_dt_node_to_map(struct pinctrl_dev *pctldev,
+ return -EINVAL;
+
+ rows = pinctrl_count_index_with_args(np, name);
+- if (rows == -EINVAL)
++ if (rows < 0)
+ return rows;
+
+ *map = devm_kzalloc(iod->dev, sizeof(**map), GFP_KERNEL);
+diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c
+index 9a5c9fd2dbc6..5739a9669b29 100644
+--- a/drivers/platform/mellanox/mlxbf-tmfifo.c
++++ b/drivers/platform/mellanox/mlxbf-tmfifo.c
+@@ -149,7 +149,7 @@ struct mlxbf_tmfifo_irq_info {
+ * @work: work struct for deferred process
+ * @timer: background timer
+ * @vring: Tx/Rx ring
+- * @spin_lock: spin lock
++ * @spin_lock: Tx/Rx spin lock
+ * @is_ready: ready flag
+ */
+ struct mlxbf_tmfifo {
+@@ -164,7 +164,7 @@ struct mlxbf_tmfifo {
+ struct work_struct work;
+ struct timer_list timer;
+ struct mlxbf_tmfifo_vring *vring[2];
+- spinlock_t spin_lock; /* spin lock */
++ spinlock_t spin_lock[2]; /* spin lock */
+ bool is_ready;
+ };
+
+@@ -525,7 +525,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
+ writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
+
+ /* Use spin-lock to protect the 'cons->tx_buf'. */
+- spin_lock_irqsave(&fifo->spin_lock, flags);
++ spin_lock_irqsave(&fifo->spin_lock[0], flags);
+
+ while (size > 0) {
+ addr = cons->tx_buf.buf + cons->tx_buf.tail;
+@@ -552,7 +552,7 @@ static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
+ }
+ }
+
+- spin_unlock_irqrestore(&fifo->spin_lock, flags);
++ spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
+ }
+
+ /* Rx/Tx one word in the descriptor buffer. */
+@@ -731,9 +731,9 @@ static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
+ fifo->vring[is_rx] = NULL;
+
+ /* Notify upper layer that packet is done. */
+- spin_lock_irqsave(&fifo->spin_lock, flags);
++ spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
+ vring_interrupt(0, vring->vq);
+- spin_unlock_irqrestore(&fifo->spin_lock, flags);
++ spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags);
+ }
+
+ mlxbf_tmfifo_desc_done:
+@@ -852,10 +852,10 @@ static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
+ * worker handler.
+ */
+ if (vring->vdev_id == VIRTIO_ID_CONSOLE) {
+- spin_lock_irqsave(&fifo->spin_lock, flags);
++ spin_lock_irqsave(&fifo->spin_lock[0], flags);
+ tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
+ mlxbf_tmfifo_console_output(tm_vdev, vring);
+- spin_unlock_irqrestore(&fifo->spin_lock, flags);
++ spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
+ } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
+ &fifo->pend_events)) {
+ return true;
+@@ -1189,7 +1189,8 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev)
+ if (!fifo)
+ return -ENOMEM;
+
+- spin_lock_init(&fifo->spin_lock);
++ spin_lock_init(&fifo->spin_lock[0]);
++ spin_lock_init(&fifo->spin_lock[1]);
+ INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
+ mutex_init(&fifo->lock);
+
+diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
+index a7f184bb47e0..3d29a11c1d6b 100644
+--- a/drivers/platform/mips/cpu_hwmon.c
++++ b/drivers/platform/mips/cpu_hwmon.c
+@@ -161,7 +161,7 @@ static int __init loongson_hwmon_init(void)
+
+ cpu_hwmon_dev = hwmon_device_register(NULL);
+ if (IS_ERR(cpu_hwmon_dev)) {
+- ret = -ENOMEM;
++ ret = PTR_ERR(cpu_hwmon_dev);
+ pr_err("hwmon_device_register fail!\n");
+ goto fail_hwmon_device_register;
+ }
+diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
+index 821b08e01635..982f0cc8270c 100644
+--- a/drivers/platform/x86/asus-wmi.c
++++ b/drivers/platform/x86/asus-wmi.c
+@@ -512,13 +512,7 @@ static void kbd_led_update(struct asus_wmi *asus)
+ {
+ int ctrl_param = 0;
+
+- /*
+- * bits 0-2: level
+- * bit 7: light on/off
+- */
+- if (asus->kbd_led_wk > 0)
+- ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
+-
++ ctrl_param = 0x80 | (asus->kbd_led_wk & 0x7F);
+ asus_wmi_set_devstate(ASUS_WMI_DEVID_KBD_BACKLIGHT, ctrl_param, NULL);
+ }
+
+diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c
+index be85ed966bf3..73eb1572b966 100644
+--- a/drivers/platform/x86/gpd-pocket-fan.c
++++ b/drivers/platform/x86/gpd-pocket-fan.c
+@@ -16,17 +16,27 @@
+
+ #define MAX_SPEED 3
+
+-static int temp_limits[3] = { 55000, 60000, 65000 };
++#define TEMP_LIMIT0_DEFAULT 55000
++#define TEMP_LIMIT1_DEFAULT 60000
++#define TEMP_LIMIT2_DEFAULT 65000
++
++#define HYSTERESIS_DEFAULT 3000
++
++#define SPEED_ON_AC_DEFAULT 2
++
++static int temp_limits[3] = {
++ TEMP_LIMIT0_DEFAULT, TEMP_LIMIT1_DEFAULT, TEMP_LIMIT2_DEFAULT,
++};
+ module_param_array(temp_limits, int, NULL, 0444);
+ MODULE_PARM_DESC(temp_limits,
+ "Millicelsius values above which the fan speed increases");
+
+-static int hysteresis = 3000;
++static int hysteresis = HYSTERESIS_DEFAULT;
+ module_param(hysteresis, int, 0444);
+ MODULE_PARM_DESC(hysteresis,
+ "Hysteresis in millicelsius before lowering the fan speed");
+
+-static int speed_on_ac = 2;
++static int speed_on_ac = SPEED_ON_AC_DEFAULT;
+ module_param(speed_on_ac, int, 0444);
+ MODULE_PARM_DESC(speed_on_ac,
+ "minimum fan speed to allow when system is powered by AC");
+@@ -120,18 +130,21 @@ static int gpd_pocket_fan_probe(struct platform_device *pdev)
+ if (temp_limits[i] < 40000 || temp_limits[i] > 70000) {
+ dev_err(&pdev->dev, "Invalid temp-limit %d (must be between 40000 and 70000)\n",
+ temp_limits[i]);
+- return -EINVAL;
++ temp_limits[0] = TEMP_LIMIT0_DEFAULT;
++ temp_limits[1] = TEMP_LIMIT1_DEFAULT;
++ temp_limits[2] = TEMP_LIMIT2_DEFAULT;
++ break;
+ }
+ }
+ if (hysteresis < 1000 || hysteresis > 10000) {
+ dev_err(&pdev->dev, "Invalid hysteresis %d (must be between 1000 and 10000)\n",
+ hysteresis);
+- return -EINVAL;
++ hysteresis = HYSTERESIS_DEFAULT;
+ }
+ if (speed_on_ac < 0 || speed_on_ac > MAX_SPEED) {
+ dev_err(&pdev->dev, "Invalid speed_on_ac %d (must be between 0 and 3)\n",
+ speed_on_ac);
+- return -EINVAL;
++ speed_on_ac = SPEED_ON_AC_DEFAULT;
+ }
+
+ fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
+diff --git a/drivers/reset/reset-brcmstb.c b/drivers/reset/reset-brcmstb.c
+index a608f445dad6..f213264c8567 100644
+--- a/drivers/reset/reset-brcmstb.c
++++ b/drivers/reset/reset-brcmstb.c
+@@ -91,12 +91,6 @@ static int brcmstb_reset_probe(struct platform_device *pdev)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- if (!IS_ALIGNED(res->start, SW_INIT_BANK_SIZE) ||
+- !IS_ALIGNED(resource_size(res), SW_INIT_BANK_SIZE)) {
+- dev_err(kdev, "incorrect register range\n");
+- return -EINVAL;
+- }
+-
+ priv->base = devm_ioremap_resource(kdev, res);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+diff --git a/drivers/rtc/rtc-bd70528.c b/drivers/rtc/rtc-bd70528.c
+index 7744333b0f40..ddfef4d43bab 100644
+--- a/drivers/rtc/rtc-bd70528.c
++++ b/drivers/rtc/rtc-bd70528.c
+@@ -491,3 +491,4 @@ module_platform_driver(bd70528_rtc);
+ MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>");
+ MODULE_DESCRIPTION("BD70528 RTC driver");
+ MODULE_LICENSE("GPL");
++MODULE_ALIAS("platofrm:bd70528-rtc");
+diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c
+index 3e9800f9878a..82d2ab0b3e9c 100644
+--- a/drivers/rtc/rtc-brcmstb-waketimer.c
++++ b/drivers/rtc/rtc-brcmstb-waketimer.c
+@@ -277,6 +277,7 @@ static int brcmstb_waketmr_remove(struct platform_device *pdev)
+ struct brcmstb_waketmr *timer = dev_get_drvdata(&pdev->dev);
+
+ unregister_reboot_notifier(&timer->reboot_notifier);
++ clk_disable_unprepare(timer->clk);
+
+ return 0;
+ }
+diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
+index 1c2d3c4a4963..b1f2bedee77e 100644
+--- a/drivers/rtc/rtc-msm6242.c
++++ b/drivers/rtc/rtc-msm6242.c
+@@ -133,7 +133,8 @@ static int msm6242_read_time(struct device *dev, struct rtc_time *tm)
+ msm6242_read(priv, MSM6242_SECOND1);
+ tm->tm_min = msm6242_read(priv, MSM6242_MINUTE10) * 10 +
+ msm6242_read(priv, MSM6242_MINUTE1);
+- tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10 & 3)) * 10 +
++ tm->tm_hour = (msm6242_read(priv, MSM6242_HOUR10) &
++ MSM6242_HOUR10_HR_MASK) * 10 +
+ msm6242_read(priv, MSM6242_HOUR1);
+ tm->tm_mday = msm6242_read(priv, MSM6242_DAY10) * 10 +
+ msm6242_read(priv, MSM6242_DAY1);
+diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
+index 704229eb0cac..b216bdcba0da 100644
+--- a/drivers/rtc/rtc-mt6397.c
++++ b/drivers/rtc/rtc-mt6397.c
+@@ -47,6 +47,14 @@
+
+ #define RTC_AL_SEC 0x0018
+
++#define RTC_AL_SEC_MASK 0x003f
++#define RTC_AL_MIN_MASK 0x003f
++#define RTC_AL_HOU_MASK 0x001f
++#define RTC_AL_DOM_MASK 0x001f
++#define RTC_AL_DOW_MASK 0x0007
++#define RTC_AL_MTH_MASK 0x000f
++#define RTC_AL_YEA_MASK 0x007f
++
+ #define RTC_PDN2 0x002e
+ #define RTC_PDN2_PWRON_ALARM BIT(4)
+
+@@ -103,7 +111,7 @@ static irqreturn_t mtk_rtc_irq_handler_thread(int irq, void *data)
+ irqen = irqsta & ~RTC_IRQ_EN_AL;
+ mutex_lock(&rtc->lock);
+ if (regmap_write(rtc->regmap, rtc->addr_base + RTC_IRQ_EN,
+- irqen) < 0)
++ irqen) == 0)
+ mtk_rtc_write_trigger(rtc);
+ mutex_unlock(&rtc->lock);
+
+@@ -225,12 +233,12 @@ static int mtk_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
+ alm->pending = !!(pdn2 & RTC_PDN2_PWRON_ALARM);
+ mutex_unlock(&rtc->lock);
+
+- tm->tm_sec = data[RTC_OFFSET_SEC];
+- tm->tm_min = data[RTC_OFFSET_MIN];
+- tm->tm_hour = data[RTC_OFFSET_HOUR];
+- tm->tm_mday = data[RTC_OFFSET_DOM];
+- tm->tm_mon = data[RTC_OFFSET_MTH];
+- tm->tm_year = data[RTC_OFFSET_YEAR];
++ tm->tm_sec = data[RTC_OFFSET_SEC] & RTC_AL_SEC_MASK;
++ tm->tm_min = data[RTC_OFFSET_MIN] & RTC_AL_MIN_MASK;
++ tm->tm_hour = data[RTC_OFFSET_HOUR] & RTC_AL_HOU_MASK;
++ tm->tm_mday = data[RTC_OFFSET_DOM] & RTC_AL_DOM_MASK;
++ tm->tm_mon = data[RTC_OFFSET_MTH] & RTC_AL_MTH_MASK;
++ tm->tm_year = data[RTC_OFFSET_YEAR] & RTC_AL_YEA_MASK;
+
+ tm->tm_year += RTC_MIN_YEAR_OFFSET;
+ tm->tm_mon--;
+@@ -251,14 +259,25 @@ static int mtk_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
+ tm->tm_year -= RTC_MIN_YEAR_OFFSET;
+ tm->tm_mon++;
+
+- data[RTC_OFFSET_SEC] = tm->tm_sec;
+- data[RTC_OFFSET_MIN] = tm->tm_min;
+- data[RTC_OFFSET_HOUR] = tm->tm_hour;
+- data[RTC_OFFSET_DOM] = tm->tm_mday;
+- data[RTC_OFFSET_MTH] = tm->tm_mon;
+- data[RTC_OFFSET_YEAR] = tm->tm_year;
+-
+ mutex_lock(&rtc->lock);
++ ret = regmap_bulk_read(rtc->regmap, rtc->addr_base + RTC_AL_SEC,
++ data, RTC_OFFSET_COUNT);
++ if (ret < 0)
++ goto exit;
++
++ data[RTC_OFFSET_SEC] = ((data[RTC_OFFSET_SEC] & ~(RTC_AL_SEC_MASK)) |
++ (tm->tm_sec & RTC_AL_SEC_MASK));
++ data[RTC_OFFSET_MIN] = ((data[RTC_OFFSET_MIN] & ~(RTC_AL_MIN_MASK)) |
++ (tm->tm_min & RTC_AL_MIN_MASK));
++ data[RTC_OFFSET_HOUR] = ((data[RTC_OFFSET_HOUR] & ~(RTC_AL_HOU_MASK)) |
++ (tm->tm_hour & RTC_AL_HOU_MASK));
++ data[RTC_OFFSET_DOM] = ((data[RTC_OFFSET_DOM] & ~(RTC_AL_DOM_MASK)) |
++ (tm->tm_mday & RTC_AL_DOM_MASK));
++ data[RTC_OFFSET_MTH] = ((data[RTC_OFFSET_MTH] & ~(RTC_AL_MTH_MASK)) |
++ (tm->tm_mon & RTC_AL_MTH_MASK));
++ data[RTC_OFFSET_YEAR] = ((data[RTC_OFFSET_YEAR] & ~(RTC_AL_YEA_MASK)) |
++ (tm->tm_year & RTC_AL_YEA_MASK));
++
+ if (alm->enabled) {
+ ret = regmap_bulk_write(rtc->regmap,
+ rtc->addr_base + RTC_AL_SEC,
+diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
+index 9df47421d69c..5be4d800e4ba 100644
+--- a/drivers/s390/net/qeth_core_main.c
++++ b/drivers/s390/net/qeth_core_main.c
+@@ -2451,50 +2451,46 @@ static int qeth_mpc_initialize(struct qeth_card *card)
+ rc = qeth_cm_enable(card);
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "2err%d", rc);
+- goto out_qdio;
++ return rc;
+ }
+ rc = qeth_cm_setup(card);
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "3err%d", rc);
+- goto out_qdio;
++ return rc;
+ }
+ rc = qeth_ulp_enable(card);
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "4err%d", rc);
+- goto out_qdio;
++ return rc;
+ }
+ rc = qeth_ulp_setup(card);
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "5err%d", rc);
+- goto out_qdio;
++ return rc;
+ }
+ rc = qeth_alloc_qdio_queues(card);
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "5err%d", rc);
+- goto out_qdio;
++ return rc;
+ }
+ rc = qeth_qdio_establish(card);
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "6err%d", rc);
+ qeth_free_qdio_queues(card);
+- goto out_qdio;
++ return rc;
+ }
+ rc = qeth_qdio_activate(card);
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "7err%d", rc);
+- goto out_qdio;
++ return rc;
+ }
+ rc = qeth_dm_act(card);
+ if (rc) {
+ QETH_CARD_TEXT_(card, 2, "8err%d", rc);
+- goto out_qdio;
++ return rc;
+ }
+
+ return 0;
+-out_qdio:
+- qeth_qdio_clear_card(card, !IS_IQD(card));
+- qdio_free(CARD_DDEV(card));
+- return rc;
+ }
+
+ void qeth_print_status_message(struct qeth_card *card)
+@@ -3382,11 +3378,6 @@ int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
+ goto out;
+ }
+
+- if (card->state != CARD_STATE_DOWN) {
+- rc = -1;
+- goto out;
+- }
+-
+ qeth_free_qdio_queues(card);
+ card->options.cq = cq;
+ rc = 0;
+@@ -4972,10 +4963,8 @@ retriable:
+ }
+ if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
+ rc = qeth_query_setdiagass(card);
+- if (rc < 0) {
++ if (rc)
+ QETH_CARD_TEXT_(card, 2, "8err%d", rc);
+- goto out;
+- }
+ }
+ return 0;
+ out:
+diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
+index 8b7d911dccd8..11e3292c0adf 100644
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -287,12 +287,12 @@ static void qeth_l2_stop_card(struct qeth_card *card)
+ card->state = CARD_STATE_HARDSETUP;
+ }
+ if (card->state == CARD_STATE_HARDSETUP) {
+- qeth_qdio_clear_card(card, 0);
+ qeth_drain_output_queues(card);
+ qeth_clear_working_pool_list(card);
+ card->state = CARD_STATE_DOWN;
+ }
+
++ qeth_qdio_clear_card(card, 0);
+ flush_workqueue(card->event_wq);
+ card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+ card->info.promisc_mode = 0;
+@@ -1983,8 +1983,7 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
+ /* check if VNICC is currently enabled */
+ bool qeth_l2_vnicc_is_in_use(struct qeth_card *card)
+ {
+- /* if everything is turned off, VNICC is not active */
+- if (!card->options.vnicc.cur_chars)
++ if (!card->options.vnicc.sup_chars)
+ return false;
+ /* default values are only OK if rx_bcast was not enabled by user
+ * or the card is offline.
+@@ -2071,8 +2070,9 @@ static void qeth_l2_vnicc_init(struct qeth_card *card)
+ /* enforce assumed default values and recover settings, if changed */
+ error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
+ timeout);
+- chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT;
+- chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE;
++ /* Change chars, if necessary */
++ chars_tmp = card->options.vnicc.wanted_chars ^
++ card->options.vnicc.cur_chars;
+ chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
+ for_each_set_bit(i, &chars_tmp, chars_len) {
+ vnicc = BIT(i);
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index 32385327539b..5152970a9aa4 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -1426,12 +1426,12 @@ static void qeth_l3_stop_card(struct qeth_card *card)
+ card->state = CARD_STATE_HARDSETUP;
+ }
+ if (card->state == CARD_STATE_HARDSETUP) {
+- qeth_qdio_clear_card(card, 0);
+ qeth_drain_output_queues(card);
+ qeth_clear_working_pool_list(card);
+ card->state = CARD_STATE_DOWN;
+ }
+
++ qeth_qdio_clear_card(card, 0);
+ flush_workqueue(card->event_wq);
+ card->info.promisc_mode = 0;
+ }
+diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
+index 2f73b33c9347..333fd4619dc6 100644
+--- a/drivers/s390/net/qeth_l3_sys.c
++++ b/drivers/s390/net/qeth_l3_sys.c
+@@ -270,24 +270,36 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+ {
+ struct qeth_card *card = dev_get_drvdata(dev);
++ int rc = 0;
+ char *tmp;
+- int rc;
+
+ if (!card)
+ return -EINVAL;
+
+ if (!IS_IQD(card))
+ return -EPERM;
+- if (card->state != CARD_STATE_DOWN)
+- return -EPERM;
+- if (card->options.sniffer)
+- return -EPERM;
+- if (card->options.cq == QETH_CQ_NOTAVAILABLE)
+- return -EPERM;
++
++ mutex_lock(&card->conf_mutex);
++ if (card->state != CARD_STATE_DOWN) {
++ rc = -EPERM;
++ goto out;
++ }
++
++ if (card->options.sniffer) {
++ rc = -EPERM;
++ goto out;
++ }
++
++ if (card->options.cq == QETH_CQ_NOTAVAILABLE) {
++ rc = -EPERM;
++ goto out;
++ }
+
+ tmp = strsep((char **)&buf, "\n");
+- if (strlen(tmp) > 8)
+- return -EINVAL;
++ if (strlen(tmp) > 8) {
++ rc = -EINVAL;
++ goto out;
++ }
+
+ if (card->options.hsuid[0])
+ /* delete old ip address */
+@@ -298,11 +310,13 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
+ card->options.hsuid[0] = '\0';
+ memcpy(card->dev->perm_addr, card->options.hsuid, 9);
+ qeth_configure_cq(card, QETH_CQ_DISABLED);
+- return count;
++ goto out;
+ }
+
+- if (qeth_configure_cq(card, QETH_CQ_ENABLED))
+- return -EPERM;
++ if (qeth_configure_cq(card, QETH_CQ_ENABLED)) {
++ rc = -EPERM;
++ goto out;
++ }
+
+ snprintf(card->options.hsuid, sizeof(card->options.hsuid),
+ "%-8s", tmp);
+@@ -311,6 +325,8 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
+
+ rc = qeth_l3_modify_hsuid(card, true);
+
++out:
++ mutex_unlock(&card->conf_mutex);
+ return rc ? rc : count;
+ }
+
+diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
+index 3e17af8aedeb..2cd2761bd249 100644
+--- a/drivers/scsi/cxgbi/libcxgbi.c
++++ b/drivers/scsi/cxgbi/libcxgbi.c
+@@ -121,7 +121,8 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
+ "cdev 0x%p, p# %u.\n", cdev, cdev->nports);
+ cxgbi_hbas_remove(cdev);
+ cxgbi_device_portmap_cleanup(cdev);
+- cxgbi_ppm_release(cdev->cdev2ppm(cdev));
++ if (cdev->cdev2ppm)
++ cxgbi_ppm_release(cdev->cdev2ppm(cdev));
+ if (cdev->pmap.max_connect)
+ cxgbi_free_big_mem(cdev->pmap.port_csk);
+ kfree(cdev);
+diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
+index fea3cb6a090b..752b71cfbe12 100644
+--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
++++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
+@@ -5234,7 +5234,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
+ &ct->chain_buffer_dma);
+ if (!ct->chain_buffer) {
+ ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
+- _base_release_memory_pools(ioc);
+ goto out;
+ }
+ }
+diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
+index ebb40160539f..ac2e88ec1190 100644
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1694,20 +1694,30 @@ static void sd_rescan(struct device *dev)
+ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+ {
+- struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
++ struct gendisk *disk = bdev->bd_disk;
++ struct scsi_disk *sdkp = scsi_disk(disk);
++ struct scsi_device *sdev = sdkp->device;
++ void __user *p = compat_ptr(arg);
+ int error;
+
++ error = scsi_verify_blk_ioctl(bdev, cmd);
++ if (error < 0)
++ return error;
++
+ error = scsi_ioctl_block_when_processing_errors(sdev, cmd,
+ (mode & FMODE_NDELAY) != 0);
+ if (error)
+ return error;
++
++ if (is_sed_ioctl(cmd))
++ return sed_ioctl(sdkp->opal_dev, cmd, p);
+
+ /*
+ * Let the static ioctl translation table take care of it.
+ */
+ if (!sdev->host->hostt->compat_ioctl)
+ return -ENOIOCTLCMD;
+- return sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
++ return sdev->host->hostt->compat_ioctl(sdev, cmd, p);
+ }
+ #endif
+
+@@ -2192,8 +2202,10 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer
+ u8 type;
+ int ret = 0;
+
+- if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0)
++ if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
++ sdkp->protection_type = 0;
+ return ret;
++ }
+
+ type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */
+
+diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
+index dc2f6d2b46ed..d2197a31abe5 100644
+--- a/drivers/scsi/ufs/ufs_bsg.c
++++ b/drivers/scsi/ufs/ufs_bsg.c
+@@ -202,7 +202,7 @@ int ufs_bsg_probe(struct ufs_hba *hba)
+ bsg_dev->parent = get_device(parent);
+ bsg_dev->release = ufs_bsg_node_release;
+
+- dev_set_name(bsg_dev, "ufs-bsg");
++ dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no);
+
+ ret = device_add(bsg_dev);
+ if (ret)
+diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
+index ba8eff41b746..abbc1582f457 100644
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -302,7 +302,6 @@ struct atmel_spi {
+ bool use_cs_gpios;
+
+ bool keep_cs;
+- bool cs_active;
+
+ u32 fifo_size;
+ };
+@@ -1374,11 +1373,9 @@ static int atmel_spi_one_transfer(struct spi_master *master,
+ &msg->transfers)) {
+ as->keep_cs = true;
+ } else {
+- as->cs_active = !as->cs_active;
+- if (as->cs_active)
+- cs_activate(as, msg->spi);
+- else
+- cs_deactivate(as, msg->spi);
++ cs_deactivate(as, msg->spi);
++ udelay(10);
++ cs_activate(as, msg->spi);
+ }
+ }
+
+@@ -1401,7 +1398,6 @@ static int atmel_spi_transfer_one_message(struct spi_master *master,
+ atmel_spi_lock(as);
+ cs_activate(as, spi);
+
+- as->cs_active = true;
+ as->keep_cs = false;
+
+ msg->status = 0;
+diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
+index d08e9324140e..3528ed5eea9b 100644
+--- a/drivers/spi/spi-fsl-lpspi.c
++++ b/drivers/spi/spi-fsl-lpspi.c
+@@ -938,7 +938,7 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
+ ret = pm_runtime_get_sync(fsl_lpspi->dev);
+ if (ret < 0) {
+ dev_err(fsl_lpspi->dev, "failed to enable clock\n");
+- return ret;
++ goto out_controller_put;
+ }
+
+ temp = readl(fsl_lpspi->base + IMX7ULP_PARAM);
+diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
+index ae95ec0bc964..9f92165fe09f 100644
+--- a/drivers/spi/spi-pxa2xx.c
++++ b/drivers/spi/spi-pxa2xx.c
+@@ -1612,6 +1612,11 @@ static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller,
+ return cs;
+ }
+
++static size_t pxa2xx_spi_max_dma_transfer_size(struct spi_device *spi)
++{
++ return MAX_DMA_LEN;
++}
++
+ static int pxa2xx_spi_probe(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+@@ -1717,6 +1722,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
+ } else {
+ controller->can_dma = pxa2xx_spi_can_dma;
+ controller->max_dma_len = MAX_DMA_LEN;
++ controller->max_transfer_size =
++ pxa2xx_spi_max_dma_transfer_size;
+ }
+ }
+
+diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
+index 15f5723d9f95..7222c7689c3c 100644
+--- a/drivers/spi/spi-rspi.c
++++ b/drivers/spi/spi-rspi.c
+@@ -1257,9 +1257,9 @@ static int rspi_probe(struct platform_device *pdev)
+ ctlr->flags = ops->flags;
+ ctlr->dev.of_node = pdev->dev.of_node;
+
+- ret = platform_get_irq_byname(pdev, "rx");
++ ret = platform_get_irq_byname_optional(pdev, "rx");
+ if (ret < 0) {
+- ret = platform_get_irq_byname(pdev, "mux");
++ ret = platform_get_irq_byname_optional(pdev, "mux");
+ if (ret < 0)
+ ret = platform_get_irq(pdev, 0);
+ if (ret >= 0)
+@@ -1270,10 +1270,6 @@ static int rspi_probe(struct platform_device *pdev)
+ if (ret >= 0)
+ rspi->tx_irq = ret;
+ }
+- if (ret < 0) {
+- dev_err(&pdev->dev, "platform_get_irq error\n");
+- goto error2;
+- }
+
+ if (rspi->rx_irq == rspi->tx_irq) {
+ /* Single multiplexed interrupt */
+diff --git a/drivers/spi/spi-sprd.c b/drivers/spi/spi-sprd.c
+index 8c9021b7f7a9..fa597e27be17 100644
+--- a/drivers/spi/spi-sprd.c
++++ b/drivers/spi/spi-sprd.c
+@@ -674,7 +674,7 @@ static void sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
+ u16 word_delay, interval;
+ u32 val;
+
+- val = readl_relaxed(ss->base + SPRD_SPI_CTL7);
++ val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
+ val &= ~(SPRD_SPI_SCK_REV | SPRD_SPI_NG_TX | SPRD_SPI_NG_RX);
+ /* Set default chip selection, clock phase and clock polarity */
+ val |= ss->hw_mode & SPI_CPHA ? SPRD_SPI_NG_RX : SPRD_SPI_NG_TX;
+diff --git a/drivers/staging/media/hantro/hantro_g1_h264_dec.c b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
+index 636bf972adcf..5f29b7a836db 100644
+--- a/drivers/staging/media/hantro/hantro_g1_h264_dec.c
++++ b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
+@@ -63,7 +63,7 @@ static void set_params(struct hantro_ctx *ctx)
+ /* always use the matrix sent from userspace */
+ reg |= G1_REG_DEC_CTRL2_TYPE1_QUANT_E;
+
+- if (slices[0].flags & V4L2_H264_SLICE_FLAG_FIELD_PIC)
++ if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY))
+ reg |= G1_REG_DEC_CTRL2_FIELDPIC_FLAG_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL2);
+
+diff --git a/drivers/staging/media/hantro/hantro_h264.c b/drivers/staging/media/hantro/hantro_h264.c
+index 0d758e0c0f99..a9c134204351 100644
+--- a/drivers/staging/media/hantro/hantro_h264.c
++++ b/drivers/staging/media/hantro/hantro_h264.c
+@@ -20,7 +20,7 @@
+ /* Size with u32 units. */
+ #define CABAC_INIT_BUFFER_SIZE (460 * 2)
+ #define POC_BUFFER_SIZE 34
+-#define SCALING_LIST_SIZE (6 * 16 + 6 * 64)
++#define SCALING_LIST_SIZE (6 * 16 + 2 * 64)
+
+ #define POC_CMP(p0, p1) ((p0) < (p1) ? -1 : 1)
+
+@@ -194,23 +194,6 @@ static const u32 h264_cabac_table[] = {
+ 0x1f0c2517, 0x1f261440
+ };
+
+-/*
+- * NOTE: The scaling lists are in zig-zag order, apply inverse scanning process
+- * to get the values in matrix order. In addition, the hardware requires bytes
+- * swapped within each subsequent 4 bytes. Both arrays below include both
+- * transformations.
+- */
+-static const u32 zig_zag_4x4[] = {
+- 3, 2, 7, 11, 6, 1, 0, 5, 10, 15, 14, 9, 4, 8, 13, 12
+-};
+-
+-static const u32 zig_zag_8x8[] = {
+- 3, 2, 11, 19, 10, 1, 0, 9, 18, 27, 35, 26, 17, 8, 7, 6,
+- 15, 16, 25, 34, 43, 51, 42, 33, 24, 23, 14, 5, 4, 13, 22, 31,
+- 32, 41, 50, 59, 58, 49, 40, 39, 30, 21, 12, 20, 29, 38, 47, 48,
+- 57, 56, 55, 46, 37, 28, 36, 45, 54, 63, 62, 53, 44, 52, 61, 60
+-};
+-
+ static void
+ reorder_scaling_list(struct hantro_ctx *ctx)
+ {
+@@ -218,33 +201,23 @@ reorder_scaling_list(struct hantro_ctx *ctx)
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling = ctrls->scaling;
+ const size_t num_list_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4);
+ const size_t list_len_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4[0]);
+- const size_t num_list_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8);
+ const size_t list_len_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8[0]);
+ struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
+- u8 *dst = tbl->scaling_list;
+- const u8 *src;
++ u32 *dst = (u32 *)tbl->scaling_list;
++ const u32 *src;
+ int i, j;
+
+- BUILD_BUG_ON(ARRAY_SIZE(zig_zag_4x4) != list_len_4x4);
+- BUILD_BUG_ON(ARRAY_SIZE(zig_zag_8x8) != list_len_8x8);
+- BUILD_BUG_ON(ARRAY_SIZE(tbl->scaling_list) !=
+- num_list_4x4 * list_len_4x4 +
+- num_list_8x8 * list_len_8x8);
+-
+- src = &scaling->scaling_list_4x4[0][0];
+- for (i = 0; i < num_list_4x4; ++i) {
+- for (j = 0; j < list_len_4x4; ++j)
+- dst[zig_zag_4x4[j]] = src[j];
+- src += list_len_4x4;
+- dst += list_len_4x4;
++ for (i = 0; i < num_list_4x4; i++) {
++ src = (u32 *)&scaling->scaling_list_4x4[i];
++ for (j = 0; j < list_len_4x4 / 4; j++)
++ *dst++ = swab32(src[j]);
+ }
+
+- src = &scaling->scaling_list_8x8[0][0];
+- for (i = 0; i < num_list_8x8; ++i) {
+- for (j = 0; j < list_len_8x8; ++j)
+- dst[zig_zag_8x8[j]] = src[j];
+- src += list_len_8x8;
+- dst += list_len_8x8;
++ /* Only Intra/Inter Y lists */
++ for (i = 0; i < 2; i++) {
++ src = (u32 *)&scaling->scaling_list_8x8[i];
++ for (j = 0; j < list_len_8x8 / 4; j++)
++ *dst++ = swab32(src[j]);
+ }
+ }
+
+@@ -271,6 +244,7 @@ struct hantro_h264_reflist_builder {
+ const struct v4l2_h264_dpb_entry *dpb;
+ s32 pocs[HANTRO_H264_DPB_SIZE];
+ u8 unordered_reflist[HANTRO_H264_DPB_SIZE];
++ int frame_nums[HANTRO_H264_DPB_SIZE];
+ s32 curpoc;
+ u8 num_valid;
+ };
+@@ -294,13 +268,20 @@ static void
+ init_reflist_builder(struct hantro_ctx *ctx,
+ struct hantro_h264_reflist_builder *b)
+ {
++ const struct v4l2_ctrl_h264_slice_params *slice_params;
+ const struct v4l2_ctrl_h264_decode_params *dec_param;
++ const struct v4l2_ctrl_h264_sps *sps;
+ struct vb2_v4l2_buffer *buf = hantro_get_dst_buf(ctx);
+ const struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
+ struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q;
++ int cur_frame_num, max_frame_num;
+ unsigned int i;
+
+ dec_param = ctx->h264_dec.ctrls.decode;
++ slice_params = &ctx->h264_dec.ctrls.slices[0];
++ sps = ctx->h264_dec.ctrls.sps;
++ max_frame_num = 1 << (sps->log2_max_frame_num_minus4 + 4);
++ cur_frame_num = slice_params->frame_num;
+
+ memset(b, 0, sizeof(*b));
+ b->dpb = dpb;
+@@ -318,6 +299,18 @@ init_reflist_builder(struct hantro_ctx *ctx,
+ continue;
+
+ buf = to_vb2_v4l2_buffer(vb2_get_buffer(cap_q, buf_idx));
++
++ /*
++ * Handle frame_num wraparound as described in section
++ * '8.2.4.1 Decoding process for picture numbers' of the spec.
++ * TODO: This logic will have to be adjusted when we start
++ * supporting interlaced content.
++ */
++ if (dpb[i].frame_num > cur_frame_num)
++ b->frame_nums[i] = (int)dpb[i].frame_num - max_frame_num;
++ else
++ b->frame_nums[i] = dpb[i].frame_num;
++
+ b->pocs[i] = get_poc(buf->field, dpb[i].top_field_order_cnt,
+ dpb[i].bottom_field_order_cnt);
+ b->unordered_reflist[b->num_valid] = i;
+@@ -353,7 +346,7 @@ static int p_ref_list_cmp(const void *ptra, const void *ptrb, const void *data)
+ * ascending order.
+ */
+ if (!(a->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
+- return b->frame_num - a->frame_num;
++ return builder->frame_nums[idxb] - builder->frame_nums[idxa];
+
+ return a->pic_num - b->pic_num;
+ }
+diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
+index c7cd27efac8a..0b1cb9f9cbd1 100644
+--- a/drivers/staging/media/ipu3/include/intel-ipu3.h
++++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
+@@ -449,7 +449,7 @@ struct ipu3_uapi_awb_fr_config_s {
+ __u16 reserved1;
+ __u32 bayer_sign;
+ __u8 bayer_nf;
+- __u8 reserved2[3];
++ __u8 reserved2[7];
+ } __attribute__((aligned(32))) __packed;
+
+ /**
+diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+index 08c6c9c410cc..c07526c12629 100644
+--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
++++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+@@ -244,8 +244,8 @@ static void cedrus_write_scaling_lists(struct cedrus_ctx *ctx,
+ sizeof(scaling->scaling_list_8x8[0]));
+
+ cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_8x8_1,
+- scaling->scaling_list_8x8[3],
+- sizeof(scaling->scaling_list_8x8[3]));
++ scaling->scaling_list_8x8[1],
++ sizeof(scaling->scaling_list_8x8[1]));
+
+ cedrus_h264_write_sram(dev, CEDRUS_SRAM_H264_SCALING_LIST_4x4,
+ scaling->scaling_list_4x4,
+diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
+index 6949ea8bc387..51ffd5c002de 100644
+--- a/drivers/target/target_core_iblock.c
++++ b/drivers/target/target_core_iblock.c
+@@ -646,7 +646,9 @@ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio,
+ }
+
+ bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
+- bip_set_seed(bip, bio->bi_iter.bi_sector);
++ /* virtual start sector must be in integrity interval units */
++ bip_set_seed(bip, bio->bi_iter.bi_sector >>
++ (bi->interval_exp - SECTOR_SHIFT));
+
+ pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
+ (unsigned long long)bip->bip_iter.bi_sector);
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index 5e08f2657b90..34f602c3a882 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -619,7 +619,7 @@ static void imx_uart_dma_tx(struct imx_port *sport)
+ dev_err(dev, "DMA mapping error for TX.\n");
+ return;
+ }
+- desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
++ desc = dmaengine_prep_slave_sg(chan, sgl, ret,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dma_unmap_sg(dev, sgl, sport->dma_tx_nents,
+diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
+index 6157213a8359..c16234bca78f 100644
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -233,6 +233,7 @@ struct eg20t_port {
+ struct dma_chan *chan_rx;
+ struct scatterlist *sg_tx_p;
+ int nent;
++ int orig_nent;
+ struct scatterlist sg_rx;
+ int tx_dma_use;
+ void *rx_buf_virt;
+@@ -787,9 +788,10 @@ static void pch_dma_tx_complete(void *arg)
+ }
+ xmit->tail &= UART_XMIT_SIZE - 1;
+ async_tx_ack(priv->desc_tx);
+- dma_unmap_sg(port->dev, sg, priv->nent, DMA_TO_DEVICE);
++ dma_unmap_sg(port->dev, sg, priv->orig_nent, DMA_TO_DEVICE);
+ priv->tx_dma_use = 0;
+ priv->nent = 0;
++ priv->orig_nent = 0;
+ kfree(priv->sg_tx_p);
+ pch_uart_hal_enable_interrupt(priv, PCH_UART_HAL_TX_INT);
+ }
+@@ -1010,6 +1012,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
+ dev_err(priv->port.dev, "%s:dma_map_sg Failed\n", __func__);
+ return 0;
+ }
++ priv->orig_nent = num;
+ priv->nent = nent;
+
+ for (i = 0; i < nent; i++, sg++) {
+diff --git a/fs/affs/super.c b/fs/affs/super.c
+index cc463ae47c12..3812f7bc3a7f 100644
+--- a/fs/affs/super.c
++++ b/fs/affs/super.c
+@@ -561,14 +561,9 @@ affs_remount(struct super_block *sb, int *flags, char *data)
+ int root_block;
+ unsigned long mount_flags;
+ int res = 0;
+- char *new_opts;
+ char volume[32];
+ char *prefix = NULL;
+
+- new_opts = kstrdup(data, GFP_KERNEL);
+- if (data && !new_opts)
+- return -ENOMEM;
+-
+ pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data);
+
+ sync_filesystem(sb);
+@@ -579,7 +574,6 @@ affs_remount(struct super_block *sb, int *flags, char *data)
+ &blocksize, &prefix, volume,
+ &mount_flags)) {
+ kfree(prefix);
+- kfree(new_opts);
+ return -EINVAL;
+ }
+
+diff --git a/fs/afs/dir.c b/fs/afs/dir.c
+index 497f979018c2..5c794f4b051a 100644
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -908,6 +908,7 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+ {
+ struct afs_vnode *dvnode = AFS_FS_I(dir);
++ struct afs_fid fid = {};
+ struct inode *inode;
+ struct dentry *d;
+ struct key *key;
+@@ -951,21 +952,18 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
+ afs_stat_v(dvnode, n_lookup);
+ inode = afs_do_lookup(dir, dentry, key);
+ key_put(key);
+- if (inode == ERR_PTR(-ENOENT)) {
++ if (inode == ERR_PTR(-ENOENT))
+ inode = afs_try_auto_mntpt(dentry, dir);
+- } else {
+- dentry->d_fsdata =
+- (void *)(unsigned long)dvnode->status.data_version;
+- }
++
++ if (!IS_ERR_OR_NULL(inode))
++ fid = AFS_FS_I(inode)->fid;
++
+ d = d_splice_alias(inode, dentry);
+ if (!IS_ERR_OR_NULL(d)) {
+ d->d_fsdata = dentry->d_fsdata;
+- trace_afs_lookup(dvnode, &d->d_name,
+- inode ? AFS_FS_I(inode) : NULL);
++ trace_afs_lookup(dvnode, &d->d_name, &fid);
+ } else {
+- trace_afs_lookup(dvnode, &dentry->d_name,
+- IS_ERR_OR_NULL(inode) ? NULL
+- : AFS_FS_I(inode));
++ trace_afs_lookup(dvnode, &dentry->d_name, &fid);
+ }
+ return d;
+ }
+diff --git a/fs/afs/super.c b/fs/afs/super.c
+index d9a6036b70b9..7f8a9b3137bf 100644
+--- a/fs/afs/super.c
++++ b/fs/afs/super.c
+@@ -404,6 +404,7 @@ static int afs_test_super(struct super_block *sb, struct fs_context *fc)
+ return (as->net_ns == fc->net_ns &&
+ as->volume &&
+ as->volume->vid == ctx->volume->vid &&
++ as->cell == ctx->cell &&
+ !as->dyn_root);
+ }
+
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index eaafd00f93d4..5739b8fc7fff 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1903,9 +1903,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
+ (iocb->ki_flags & IOCB_NOWAIT))
+ return -EOPNOTSUPP;
+
+- if (!inode_trylock(inode)) {
+- if (iocb->ki_flags & IOCB_NOWAIT)
++ if (iocb->ki_flags & IOCB_NOWAIT) {
++ if (!inode_trylock(inode))
+ return -EAGAIN;
++ } else {
+ inode_lock(inode);
+ }
+
+diff --git a/fs/buffer.c b/fs/buffer.c
+index 7744488f7bde..91ceca52d14f 100644
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -2991,7 +2991,7 @@ static void end_bio_bh_io_sync(struct bio *bio)
+ * errors, this only handles the "we need to be able to
+ * do IO at the final sector" case.
+ */
+-void guard_bio_eod(int op, struct bio *bio)
++void guard_bio_eod(struct bio *bio)
+ {
+ sector_t maxsector;
+ struct hd_struct *part;
+@@ -3055,15 +3055,15 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
+ bio->bi_end_io = end_bio_bh_io_sync;
+ bio->bi_private = bh;
+
+- /* Take care of bh's that straddle the end of the device */
+- guard_bio_eod(op, bio);
+-
+ if (buffer_meta(bh))
+ op_flags |= REQ_META;
+ if (buffer_prio(bh))
+ op_flags |= REQ_PRIO;
+ bio_set_op_attrs(bio, op, op_flags);
+
++ /* Take care of bh's that straddle the end of the device */
++ guard_bio_eod(bio);
++
+ if (wbc) {
+ wbc_init_bio(wbc, bio);
+ wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
+diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
+index 8b0b512c5792..afe1f03aabe3 100644
+--- a/fs/cifs/smb2file.c
++++ b/fs/cifs/smb2file.c
+@@ -67,7 +67,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
+ goto out;
+
+
+- if (oparms->tcon->use_resilient) {
++ if (oparms->tcon->use_resilient) {
+ /* default timeout is 0, servers pick default (120 seconds) */
+ nr_ioctl_req.Timeout =
+ cpu_to_le32(oparms->tcon->handle_timeout);
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 5755e897a5f0..2e9c73165800 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -2098,7 +2098,7 @@ static int __write_data_page(struct page *page, bool *submitted,
+ loff_t i_size = i_size_read(inode);
+ const pgoff_t end_index = ((unsigned long long) i_size)
+ >> PAGE_SHIFT;
+- loff_t psize = (page->index + 1) << PAGE_SHIFT;
++ loff_t psize = (loff_t)(page->index + 1) << PAGE_SHIFT;
+ unsigned offset = 0;
+ bool need_balance_fs = false;
+ int err = 0;
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 8ed8e4328bd1..fae665691481 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1139,7 +1139,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
+ }
+ dn.ofs_in_node++;
+ i++;
+- new_size = (dst + i) << PAGE_SHIFT;
++ new_size = (loff_t)(dst + i) << PAGE_SHIFT;
+ if (dst_inode->i_size < new_size)
+ f2fs_i_size_write(dst_inode, new_size);
+ } while (--ilen && (do_replace[i] || blkaddr[i] == NULL_ADDR));
+diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
+index c53e3b892210..01ff37b76652 100644
+--- a/fs/gfs2/file.c
++++ b/fs/gfs2/file.c
+@@ -6,6 +6,7 @@
+
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
++#include <linux/compat.h>
+ #include <linux/completion.h>
+ #include <linux/buffer_head.h>
+ #include <linux/pagemap.h>
+@@ -354,6 +355,31 @@ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+ return -ENOTTY;
+ }
+
++#ifdef CONFIG_COMPAT
++static long gfs2_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
++{
++ switch(cmd) {
++ /* These are just misnamed, they actually get/put from/to user an int */
++ case FS_IOC32_GETFLAGS:
++ cmd = FS_IOC_GETFLAGS;
++ break;
++ case FS_IOC32_SETFLAGS:
++ cmd = FS_IOC_SETFLAGS;
++ break;
++ /* Keep this list in sync with gfs2_ioctl */
++ case FITRIM:
++ case FS_IOC_GETFSLABEL:
++ break;
++ default:
++ return -ENOIOCTLCMD;
++ }
++
++ return gfs2_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
++}
++#else
++#define gfs2_compat_ioctl NULL
++#endif
++
+ /**
+ * gfs2_size_hint - Give a hint to the size of a write request
+ * @filep: The struct file
+@@ -1294,6 +1320,7 @@ const struct file_operations gfs2_file_fops = {
+ .write_iter = gfs2_file_write_iter,
+ .iopoll = iomap_dio_iopoll,
+ .unlocked_ioctl = gfs2_ioctl,
++ .compat_ioctl = gfs2_compat_ioctl,
+ .mmap = gfs2_mmap,
+ .open = gfs2_open,
+ .release = gfs2_release,
+@@ -1309,6 +1336,7 @@ const struct file_operations gfs2_file_fops = {
+ const struct file_operations gfs2_dir_fops = {
+ .iterate_shared = gfs2_readdir,
+ .unlocked_ioctl = gfs2_ioctl,
++ .compat_ioctl = gfs2_compat_ioctl,
+ .open = gfs2_open,
+ .release = gfs2_release,
+ .fsync = gfs2_fsync,
+@@ -1325,6 +1353,7 @@ const struct file_operations gfs2_file_fops_nolock = {
+ .write_iter = gfs2_file_write_iter,
+ .iopoll = iomap_dio_iopoll,
+ .unlocked_ioctl = gfs2_ioctl,
++ .compat_ioctl = gfs2_compat_ioctl,
+ .mmap = gfs2_mmap,
+ .open = gfs2_open,
+ .release = gfs2_release,
+@@ -1338,6 +1367,7 @@ const struct file_operations gfs2_file_fops_nolock = {
+ const struct file_operations gfs2_dir_fops_nolock = {
+ .iterate_shared = gfs2_readdir,
+ .unlocked_ioctl = gfs2_ioctl,
++ .compat_ioctl = gfs2_compat_ioctl,
+ .open = gfs2_open,
+ .release = gfs2_release,
+ .fsync = gfs2_fsync,
+diff --git a/fs/internal.h b/fs/internal.h
+index 315fcd8d237c..7651e8b8ef13 100644
+--- a/fs/internal.h
++++ b/fs/internal.h
+@@ -38,7 +38,7 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait)
+ /*
+ * buffer.c
+ */
+-extern void guard_bio_eod(int rw, struct bio *bio);
++extern void guard_bio_eod(struct bio *bio);
+ extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
+ get_block_t *get_block, struct iomap *iomap);
+
+diff --git a/fs/mpage.c b/fs/mpage.c
+index a63620cdb73a..ccba3c4c4479 100644
+--- a/fs/mpage.c
++++ b/fs/mpage.c
+@@ -62,7 +62,7 @@ static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
+ {
+ bio->bi_end_io = mpage_end_io;
+ bio_set_op_attrs(bio, op, op_flags);
+- guard_bio_eod(op, bio);
++ guard_bio_eod(bio);
+ submit_bio(bio);
+ return NULL;
+ }
+diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
+index cbc17a203248..887f9136a9db 100644
+--- a/fs/nfs/nfs2xdr.c
++++ b/fs/nfs/nfs2xdr.c
+@@ -370,7 +370,7 @@ static void encode_sattr(struct xdr_stream *xdr, const struct iattr *attr,
+ } else
+ p = xdr_time_not_set(p);
+ if (attr->ia_valid & ATTR_MTIME_SET) {
+- ts = timespec64_to_timespec(attr->ia_atime);
++ ts = timespec64_to_timespec(attr->ia_mtime);
+ xdr_encode_time(p, &ts);
+ } else if (attr->ia_valid & ATTR_MTIME) {
+ ts = timespec64_to_timespec(attr->ia_mtime);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index caacf5e7f5e1..f26d714f9f28 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -521,9 +521,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server,
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_SEQ_FALSE_RETRY:
+ case -NFS4ERR_SEQ_MISORDERED:
+- dprintk("%s ERROR: %d Reset session\n", __func__,
+- errorcode);
+- nfs4_schedule_session_recovery(clp->cl_session, errorcode);
++ /* Handled in nfs41_sequence_process() */
+ goto wait_on_recovery;
+ #endif /* defined(CONFIG_NFS_V4_1) */
+ case -NFS4ERR_FILE_OPEN:
+@@ -782,6 +780,7 @@ static int nfs41_sequence_process(struct rpc_task *task,
+ struct nfs4_session *session;
+ struct nfs4_slot *slot = res->sr_slot;
+ struct nfs_client *clp;
++ int status;
+ int ret = 1;
+
+ if (slot == NULL)
+@@ -793,8 +792,13 @@ static int nfs41_sequence_process(struct rpc_task *task,
+ session = slot->table->session;
+
+ trace_nfs4_sequence_done(session, res);
++
++ status = res->sr_status;
++ if (task->tk_status == -NFS4ERR_DEADSESSION)
++ status = -NFS4ERR_DEADSESSION;
++
+ /* Check the SEQUENCE operation status */
+- switch (res->sr_status) {
++ switch (status) {
+ case 0:
+ /* Mark this sequence number as having been acked */
+ nfs4_slot_sequence_acked(slot, slot->seq_nr);
+@@ -866,6 +870,10 @@ static int nfs41_sequence_process(struct rpc_task *task,
+ */
+ slot->seq_nr = slot->seq_nr_highest_sent;
+ goto out_retry;
++ case -NFS4ERR_BADSESSION:
++ case -NFS4ERR_DEADSESSION:
++ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
++ goto session_recover;
+ default:
+ /* Just update the slot sequence no. */
+ slot->seq_done = 1;
+@@ -876,8 +884,10 @@ out:
+ out_noaction:
+ return ret;
+ session_recover:
+- nfs4_schedule_session_recovery(session, res->sr_status);
+- goto retry_nowait;
++ nfs4_schedule_session_recovery(session, status);
++ dprintk("%s ERROR: %d Reset session\n", __func__, status);
++ nfs41_sequence_free_slot(res);
++ goto out;
+ retry_new_seq:
+ ++slot->seq_nr;
+ retry_nowait:
+@@ -2188,7 +2198,6 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_DEADSESSION:
+- nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
+ return -EAGAIN;
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_STALE_STATEID:
+@@ -6243,8 +6252,10 @@ static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
+
+ d_data = (struct nfs4_delegreturndata *)data;
+
+- if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task))
++ if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task)) {
++ nfs4_sequence_done(task, &d_data->res.seq_res);
+ return;
++ }
+
+ lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
+ if (lo && !pnfs_layout_is_valid(lo)) {
+@@ -7820,6 +7831,15 @@ nfs41_same_server_scope(struct nfs41_server_scope *a,
+ static void
+ nfs4_bind_one_conn_to_session_done(struct rpc_task *task, void *calldata)
+ {
++ struct nfs41_bind_conn_to_session_args *args = task->tk_msg.rpc_argp;
++ struct nfs_client *clp = args->client;
++
++ switch (task->tk_status) {
++ case -NFS4ERR_BADSESSION:
++ case -NFS4ERR_DEADSESSION:
++ nfs4_schedule_session_recovery(clp->cl_session,
++ task->tk_status);
++ }
+ }
+
+ static const struct rpc_call_ops nfs4_bind_one_conn_to_session_ops = {
+@@ -8867,8 +8887,6 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+- nfs4_schedule_session_recovery(clp->cl_session,
+- task->tk_status);
+ break;
+ default:
+ nfs4_schedule_lease_recovery(clp);
+diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
+index 10cefb0c07c7..c4b1a89b8845 100644
+--- a/fs/nfsd/Kconfig
++++ b/fs/nfsd/Kconfig
+@@ -73,7 +73,7 @@ config NFSD_V4
+ select NFSD_V3
+ select FS_POSIX_ACL
+ select SUNRPC_GSS
+- select CRYPTO
++ select CRYPTO_SHA256
+ select GRACE_PERIOD
+ help
+ This option enables support in your system's NFS server for
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 38c0aeda500e..4798667af647 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1298,7 +1298,8 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ out:
+ return status;
+ out_err:
+- cleanup_async_copy(async_copy);
++ if (async_copy)
++ cleanup_async_copy(async_copy);
+ goto out;
+ }
+
+diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
+index cdc75ad4438b..c35c0ebaf722 100644
+--- a/fs/nfsd/nfs4recover.c
++++ b/fs/nfsd/nfs4recover.c
+@@ -1578,6 +1578,7 @@ nfsd4_cld_tracking_init(struct net *net)
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ bool running;
+ int retries = 10;
++ struct crypto_shash *tfm;
+
+ status = nfs4_cld_state_init(net);
+ if (status)
+@@ -1586,11 +1587,6 @@ nfsd4_cld_tracking_init(struct net *net)
+ status = __nfsd4_init_cld_pipe(net);
+ if (status)
+ goto err_shutdown;
+- nn->cld_net->cn_tfm = crypto_alloc_shash("sha256", 0, 0);
+- if (IS_ERR(nn->cld_net->cn_tfm)) {
+- status = PTR_ERR(nn->cld_net->cn_tfm);
+- goto err_remove;
+- }
+
+ /*
+ * rpc pipe upcalls take 30 seconds to time out, so we don't want to
+@@ -1607,6 +1603,12 @@ nfsd4_cld_tracking_init(struct net *net)
+ status = -ETIMEDOUT;
+ goto err_remove;
+ }
++ tfm = crypto_alloc_shash("sha256", 0, 0);
++ if (IS_ERR(tfm)) {
++ status = PTR_ERR(tfm);
++ goto err_remove;
++ }
++ nn->cld_net->cn_tfm = tfm;
+
+ status = nfsd4_cld_get_version(nn);
+ if (status == -EOPNOTSUPP)
+diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
+index 699a560efbb0..900e4ef686bf 100644
+--- a/fs/ocfs2/journal.c
++++ b/fs/ocfs2/journal.c
+@@ -1066,6 +1066,14 @@ int ocfs2_journal_load(struct ocfs2_journal *journal, int local, int replayed)
+
+ ocfs2_clear_journal_error(osb->sb, journal->j_journal, osb->slot_num);
+
++ if (replayed) {
++ jbd2_journal_lock_updates(journal->j_journal);
++ status = jbd2_journal_flush(journal->j_journal);
++ jbd2_journal_unlock_updates(journal->j_journal);
++ if (status < 0)
++ mlog_errno(status);
++ }
++
+ status = ocfs2_journal_toggle_dirty(osb, 1, replayed);
+ if (status < 0) {
+ mlog_errno(status);
+diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
+index 4fd9683b8245..826dad0243dc 100644
+--- a/fs/ubifs/journal.c
++++ b/fs/ubifs/journal.c
+@@ -899,7 +899,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
+ fname_name(&nm) = xent->name;
+ fname_len(&nm) = le16_to_cpu(xent->nlen);
+
+- xino = ubifs_iget(c->vfs_sb, xent->inum);
++ xino = ubifs_iget(c->vfs_sb, le64_to_cpu(xent->inum));
+ if (IS_ERR(xino)) {
+ err = PTR_ERR(xino);
+ ubifs_err(c, "dead directory entry '%s', error %d",
+diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
+index 3b4b4114f208..54d6db61106f 100644
+--- a/fs/ubifs/orphan.c
++++ b/fs/ubifs/orphan.c
+@@ -631,12 +631,17 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
+ ino_t inum;
+ int i, n, err, first = 1;
+
++ ino = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS);
++ if (!ino)
++ return -ENOMEM;
++
+ list_for_each_entry(snod, &sleb->nodes, list) {
+ if (snod->type != UBIFS_ORPH_NODE) {
+ ubifs_err(c, "invalid node type %d in orphan area at %d:%d",
+ snod->type, sleb->lnum, snod->offs);
+ ubifs_dump_node(c, snod->node);
+- return -EINVAL;
++ err = -EINVAL;
++ goto out_free;
+ }
+
+ orph = snod->node;
+@@ -663,20 +668,18 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
+ ubifs_err(c, "out of order commit number %llu in orphan node at %d:%d",
+ cmt_no, sleb->lnum, snod->offs);
+ ubifs_dump_node(c, snod->node);
+- return -EINVAL;
++ err = -EINVAL;
++ goto out_free;
+ }
+ dbg_rcvry("out of date LEB %d", sleb->lnum);
+ *outofdate = 1;
+- return 0;
++ err = 0;
++ goto out_free;
+ }
+
+ if (first)
+ first = 0;
+
+- ino = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS);
+- if (!ino)
+- return -ENOMEM;
+-
+ n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3;
+ for (i = 0; i < n; i++) {
+ union ubifs_key key1, key2;
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index 7d4547e5202d..5e1e8ec0589e 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -2267,10 +2267,8 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
+ }
+ } else {
+ err = ubifs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
+- if (err) {
+- kfree(c);
++ if (err)
+ goto out_deact;
+- }
+ /* We do not support atime */
+ sb->s_flags |= SB_ACTIVE;
+ if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
+diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
+index a950a22c4890..cac7404b2bdd 100644
+--- a/include/asm-generic/cacheflush.h
++++ b/include/asm-generic/cacheflush.h
+@@ -11,71 +11,102 @@
+ * The cache doesn't need to be flushed when TLB entries change when
+ * the cache is mapped to physical memory, not virtual memory
+ */
++#ifndef flush_cache_all
+ static inline void flush_cache_all(void)
+ {
+ }
++#endif
+
++#ifndef flush_cache_mm
+ static inline void flush_cache_mm(struct mm_struct *mm)
+ {
+ }
++#endif
+
++#ifndef flush_cache_dup_mm
+ static inline void flush_cache_dup_mm(struct mm_struct *mm)
+ {
+ }
++#endif
+
++#ifndef flush_cache_range
+ static inline void flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end)
+ {
+ }
++#endif
+
++#ifndef flush_cache_page
+ static inline void flush_cache_page(struct vm_area_struct *vma,
+ unsigned long vmaddr,
+ unsigned long pfn)
+ {
+ }
++#endif
+
++#ifndef flush_dcache_page
+ static inline void flush_dcache_page(struct page *page)
+ {
+ }
++#endif
+
++#ifndef flush_dcache_mmap_lock
+ static inline void flush_dcache_mmap_lock(struct address_space *mapping)
+ {
+ }
++#endif
+
++#ifndef flush_dcache_mmap_unlock
+ static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
+ {
+ }
++#endif
+
++#ifndef flush_icache_range
+ static inline void flush_icache_range(unsigned long start, unsigned long end)
+ {
+ }
++#endif
+
++#ifndef flush_icache_page
+ static inline void flush_icache_page(struct vm_area_struct *vma,
+ struct page *page)
+ {
+ }
++#endif
+
++#ifndef flush_icache_user_range
+ static inline void flush_icache_user_range(struct vm_area_struct *vma,
+ struct page *page,
+ unsigned long addr, int len)
+ {
+ }
++#endif
+
++#ifndef flush_cache_vmap
+ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+ {
+ }
++#endif
+
++#ifndef flush_cache_vunmap
+ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+ {
+ }
++#endif
+
+-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
++#ifndef copy_to_user_page
++#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ do { \
+ memcpy(dst, src, len); \
+ flush_icache_user_range(vma, page, vaddr, len); \
+ } while (0)
++#endif
++
++#ifndef copy_from_user_page
+ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
++#endif
+
+ #endif /* __ASM_CACHEFLUSH_H */
+diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
+index 734b6f7081b8..3175dfeaed2c 100644
+--- a/include/crypto/internal/skcipher.h
++++ b/include/crypto/internal/skcipher.h
+@@ -205,19 +205,6 @@ static inline unsigned int crypto_skcipher_alg_max_keysize(
+ return alg->max_keysize;
+ }
+
+-static inline unsigned int crypto_skcipher_alg_chunksize(
+- struct skcipher_alg *alg)
+-{
+- if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
+- CRYPTO_ALG_TYPE_BLKCIPHER)
+- return alg->base.cra_blocksize;
+-
+- if (alg->base.cra_ablkcipher.encrypt)
+- return alg->base.cra_blocksize;
+-
+- return alg->chunksize;
+-}
+-
+ static inline unsigned int crypto_skcipher_alg_walksize(
+ struct skcipher_alg *alg)
+ {
+@@ -231,23 +218,6 @@ static inline unsigned int crypto_skcipher_alg_walksize(
+ return alg->walksize;
+ }
+
+-/**
+- * crypto_skcipher_chunksize() - obtain chunk size
+- * @tfm: cipher handle
+- *
+- * The block size is set to one for ciphers such as CTR. However,
+- * you still need to provide incremental updates in multiples of
+- * the underlying block size as the IV does not have sub-block
+- * granularity. This is known in this API as the chunk size.
+- *
+- * Return: chunk size in bytes
+- */
+-static inline unsigned int crypto_skcipher_chunksize(
+- struct crypto_skcipher *tfm)
+-{
+- return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
+-}
+-
+ /**
+ * crypto_skcipher_walksize() - obtain walk size
+ * @tfm: cipher handle
+diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
+index 37c164234d97..aada87916918 100644
+--- a/include/crypto/skcipher.h
++++ b/include/crypto/skcipher.h
+@@ -304,6 +304,36 @@ static inline unsigned int crypto_skcipher_blocksize(
+ return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
+ }
+
++static inline unsigned int crypto_skcipher_alg_chunksize(
++ struct skcipher_alg *alg)
++{
++ if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
++ CRYPTO_ALG_TYPE_BLKCIPHER)
++ return alg->base.cra_blocksize;
++
++ if (alg->base.cra_ablkcipher.encrypt)
++ return alg->base.cra_blocksize;
++
++ return alg->chunksize;
++}
++
++/**
++ * crypto_skcipher_chunksize() - obtain chunk size
++ * @tfm: cipher handle
++ *
++ * The block size is set to one for ciphers such as CTR. However,
++ * you still need to provide incremental updates in multiples of
++ * the underlying block size as the IV does not have sub-block
++ * granularity. This is known in this API as the chunk size.
++ *
++ * Return: chunk size in bytes
++ */
++static inline unsigned int crypto_skcipher_chunksize(
++ struct crypto_skcipher *tfm)
++{
++ return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
++}
++
+ static inline unsigned int crypto_sync_skcipher_blocksize(
+ struct crypto_sync_skcipher *tfm)
+ {
+diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
+index d4ee6e942562..38555435a64a 100644
+--- a/include/linux/uaccess.h
++++ b/include/linux/uaccess.h
+@@ -337,6 +337,18 @@ extern long __probe_user_read(void *dst, const void __user *src, size_t size);
+ extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
+ extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
+
++/*
++ * probe_user_write(): safely attempt to write to a location in user space
++ * @dst: address to write to
++ * @src: pointer to the data that shall be written
++ * @size: size of the data chunk
++ *
++ * Safely write to address @dst from the buffer at @src. If a kernel fault
++ * happens, handle that and return -EFAULT.
++ */
++extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
++extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
++
+ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
+ extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
+ long count);
+diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
+index 31f76b6abf71..bbdd1542d6f1 100644
+--- a/include/sound/simple_card_utils.h
++++ b/include/sound/simple_card_utils.h
+@@ -8,6 +8,7 @@
+ #ifndef __SIMPLE_CARD_UTILS_H
+ #define __SIMPLE_CARD_UTILS_H
+
++#include <linux/clk.h>
+ #include <sound/soc.h>
+
+ #define asoc_simple_init_hp(card, sjack, prefix) \
+diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
+index d5ec4fac82ae..564ba1b5cf57 100644
+--- a/include/trace/events/afs.h
++++ b/include/trace/events/afs.h
+@@ -915,9 +915,9 @@ TRACE_EVENT(afs_call_state,
+
+ TRACE_EVENT(afs_lookup,
+ TP_PROTO(struct afs_vnode *dvnode, const struct qstr *name,
+- struct afs_vnode *vnode),
++ struct afs_fid *fid),
+
+- TP_ARGS(dvnode, name, vnode),
++ TP_ARGS(dvnode, name, fid),
+
+ TP_STRUCT__entry(
+ __field_struct(struct afs_fid, dfid )
+@@ -928,13 +928,7 @@ TRACE_EVENT(afs_lookup,
+ TP_fast_assign(
+ int __len = min_t(int, name->len, 23);
+ __entry->dfid = dvnode->fid;
+- if (vnode) {
+- __entry->fid = vnode->fid;
+- } else {
+- __entry->fid.vid = 0;
+- __entry->fid.vnode = 0;
+- __entry->fid.unique = 0;
+- }
++ __entry->fid = *fid;
+ memcpy(__entry->name, name->name, __len);
+ __entry->name[__len] = 0;
+ ),
+diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
+index a13830616107..7fd11ec1c9a4 100644
+--- a/include/trace/events/rpcrdma.h
++++ b/include/trace/events/rpcrdma.h
+@@ -735,6 +735,31 @@ TRACE_EVENT(xprtrdma_post_recvs,
+ )
+ );
+
++TRACE_EVENT(xprtrdma_post_linv,
++ TP_PROTO(
++ const struct rpcrdma_req *req,
++ int status
++ ),
++
++ TP_ARGS(req, status),
++
++ TP_STRUCT__entry(
++ __field(const void *, req)
++ __field(int, status)
++ __field(u32, xid)
++ ),
++
++ TP_fast_assign(
++ __entry->req = req;
++ __entry->status = status;
++ __entry->xid = be32_to_cpu(req->rl_slot.rq_xid);
++ ),
++
++ TP_printk("req=%p xid=0x%08x status=%d",
++ __entry->req, __entry->xid, __entry->status
++ )
++);
++
+ /**
+ ** Completion events
+ **/
+diff --git a/include/uapi/rdma/nes-abi.h b/include/uapi/rdma/nes-abi.h
+deleted file mode 100644
+index f80495baa969..000000000000
+--- a/include/uapi/rdma/nes-abi.h
++++ /dev/null
+@@ -1,115 +0,0 @@
+-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
+-/*
+- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
+- * Copyright (c) 2005 Topspin Communications. All rights reserved.
+- * Copyright (c) 2005 Cisco Systems. All rights reserved.
+- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
+- *
+- * This software is available to you under a choice of one of two
+- * licenses. You may choose to be licensed under the terms of the GNU
+- * General Public License (GPL) Version 2, available from the file
+- * COPYING in the main directory of this source tree, or the
+- * OpenIB.org BSD license below:
+- *
+- * Redistribution and use in source and binary forms, with or
+- * without modification, are permitted provided that the following
+- * conditions are met:
+- *
+- * - Redistributions of source code must retain the above
+- * copyright notice, this list of conditions and the following
+- * disclaimer.
+- *
+- * - Redistributions in binary form must reproduce the above
+- * copyright notice, this list of conditions and the following
+- * disclaimer in the documentation and/or other materials
+- * provided with the distribution.
+- *
+- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+- * SOFTWARE.
+- *
+- */
+-
+-#ifndef NES_ABI_USER_H
+-#define NES_ABI_USER_H
+-
+-#include <linux/types.h>
+-
+-#define NES_ABI_USERSPACE_VER 2
+-#define NES_ABI_KERNEL_VER 2
+-
+-/*
+- * Make sure that all structs defined in this file remain laid out so
+- * that they pack the same way on 32-bit and 64-bit architectures (to
+- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
+- * In particular do not use pointer types -- pass pointers in __u64
+- * instead.
+- */
+-
+-struct nes_alloc_ucontext_req {
+- __u32 reserved32;
+- __u8 userspace_ver;
+- __u8 reserved8[3];
+-};
+-
+-struct nes_alloc_ucontext_resp {
+- __u32 max_pds; /* maximum pds allowed for this user process */
+- __u32 max_qps; /* maximum qps allowed for this user process */
+- __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
+- __u8 virtwq; /* flag to indicate if virtual WQ are to be used or not */
+- __u8 kernel_ver;
+- __u8 reserved[2];
+-};
+-
+-struct nes_alloc_pd_resp {
+- __u32 pd_id;
+- __u32 mmap_db_index;
+-};
+-
+-struct nes_create_cq_req {
+- __aligned_u64 user_cq_buffer;
+- __u32 mcrqf;
+- __u8 reserved[4];
+-};
+-
+-struct nes_create_qp_req {
+- __aligned_u64 user_wqe_buffers;
+- __aligned_u64 user_qp_buffer;
+-};
+-
+-enum iwnes_memreg_type {
+- IWNES_MEMREG_TYPE_MEM = 0x0000,
+- IWNES_MEMREG_TYPE_QP = 0x0001,
+- IWNES_MEMREG_TYPE_CQ = 0x0002,
+- IWNES_MEMREG_TYPE_MW = 0x0003,
+- IWNES_MEMREG_TYPE_FMR = 0x0004,
+- IWNES_MEMREG_TYPE_FMEM = 0x0005,
+-};
+-
+-struct nes_mem_reg_req {
+- __u32 reg_type; /* indicates if id is memory, QP or CQ */
+- __u32 reserved;
+-};
+-
+-struct nes_create_cq_resp {
+- __u32 cq_id;
+- __u32 cq_size;
+- __u32 mmap_db_index;
+- __u32 reserved;
+-};
+-
+-struct nes_create_qp_resp {
+- __u32 qp_id;
+- __u32 actual_sq_size;
+- __u32 actual_rq_size;
+- __u32 mmap_sq_db_index;
+- __u32 mmap_rq_db_index;
+- __u32 nes_drv_opt;
+-};
+-
+-#endif /* NES_ABI_USER_H */
+diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
+index a3eaf08e7dd3..8bd69062fbe5 100644
+--- a/kernel/bpf/cgroup.c
++++ b/kernel/bpf/cgroup.c
+@@ -35,8 +35,8 @@ void cgroup_bpf_offline(struct cgroup *cgrp)
+ */
+ static void cgroup_bpf_release(struct work_struct *work)
+ {
+- struct cgroup *cgrp = container_of(work, struct cgroup,
+- bpf.release_work);
++ struct cgroup *p, *cgrp = container_of(work, struct cgroup,
++ bpf.release_work);
+ enum bpf_cgroup_storage_type stype;
+ struct bpf_prog_array *old_array;
+ unsigned int type;
+@@ -65,6 +65,9 @@ static void cgroup_bpf_release(struct work_struct *work)
+
+ mutex_unlock(&cgroup_mutex);
+
++ for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
++ cgroup_bpf_put(p);
++
+ percpu_ref_exit(&cgrp->bpf.refcnt);
+ cgroup_put(cgrp);
+ }
+@@ -199,6 +202,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp)
+ */
+ #define NR ARRAY_SIZE(cgrp->bpf.effective)
+ struct bpf_prog_array *arrays[NR] = {};
++ struct cgroup *p;
+ int ret, i;
+
+ ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0,
+@@ -206,6 +210,9 @@ int cgroup_bpf_inherit(struct cgroup *cgrp)
+ if (ret)
+ return ret;
+
++ for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
++ cgroup_bpf_get(p);
++
+ for (i = 0; i < NR; i++)
+ INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
+
+diff --git a/kernel/cred.c b/kernel/cred.c
+index 9ed51b70ed80..809a985b1793 100644
+--- a/kernel/cred.c
++++ b/kernel/cred.c
+@@ -175,8 +175,8 @@ void exit_creds(struct task_struct *tsk)
+ put_cred(cred);
+
+ #ifdef CONFIG_KEYS_REQUEST_CACHE
+- key_put(current->cached_requested_key);
+- current->cached_requested_key = NULL;
++ key_put(tsk->cached_requested_key);
++ tsk->cached_requested_key = NULL;
+ #endif
+ }
+
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index 44bd08f2443b..89bdac61233d 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -163,7 +163,7 @@ static const struct bpf_func_proto bpf_probe_read_proto = {
+ .arg3_type = ARG_ANYTHING,
+ };
+
+-BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
++BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src,
+ u32, size)
+ {
+ /*
+@@ -186,10 +186,8 @@ BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
+ return -EPERM;
+ if (unlikely(!nmi_uaccess_okay()))
+ return -EPERM;
+- if (!access_ok(unsafe_ptr, size))
+- return -EPERM;
+
+- return probe_kernel_write(unsafe_ptr, src, size);
++ return probe_user_write(unsafe_ptr, src, size);
+ }
+
+ static const struct bpf_func_proto bpf_probe_write_user_proto = {
+diff --git a/mm/maccess.c b/mm/maccess.c
+index d065736f6b87..2d3c3d01064c 100644
+--- a/mm/maccess.c
++++ b/mm/maccess.c
+@@ -18,6 +18,18 @@ probe_read_common(void *dst, const void __user *src, size_t size)
+ return ret ? -EFAULT : 0;
+ }
+
++static __always_inline long
++probe_write_common(void __user *dst, const void *src, size_t size)
++{
++ long ret;
++
++ pagefault_disable();
++ ret = __copy_to_user_inatomic(dst, src, size);
++ pagefault_enable();
++
++ return ret ? -EFAULT : 0;
++}
++
+ /**
+ * probe_kernel_read(): safely attempt to read from a kernel-space location
+ * @dst: pointer to the buffer that shall take the data
+@@ -85,6 +97,7 @@ EXPORT_SYMBOL_GPL(probe_user_read);
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
++
+ long __weak probe_kernel_write(void *dst, const void *src, size_t size)
+ __attribute__((alias("__probe_kernel_write")));
+
+@@ -94,15 +107,39 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+- pagefault_disable();
+- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
+- pagefault_enable();
++ ret = probe_write_common((__force void __user *)dst, src, size);
+ set_fs(old_fs);
+
+- return ret ? -EFAULT : 0;
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(probe_kernel_write);
+
++/**
++ * probe_user_write(): safely attempt to write to a user-space location
++ * @dst: address to write to
++ * @src: pointer to the data that shall be written
++ * @size: size of the data chunk
++ *
++ * Safely write to address @dst from the buffer at @src. If a kernel fault
++ * happens, handle that and return -EFAULT.
++ */
++
++long __weak probe_user_write(void __user *dst, const void *src, size_t size)
++ __attribute__((alias("__probe_user_write")));
++
++long __probe_user_write(void __user *dst, const void *src, size_t size)
++{
++ long ret = -EFAULT;
++ mm_segment_t old_fs = get_fs();
++
++ set_fs(USER_DS);
++ if (access_ok(dst, size))
++ ret = probe_write_common(dst, src, size);
++ set_fs(old_fs);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(probe_user_write);
+
+ /**
+ * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address.
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index 0675d022584e..ded2d5227678 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -793,15 +793,18 @@ static void sk_psock_strp_data_ready(struct sock *sk)
+ static void sk_psock_write_space(struct sock *sk)
+ {
+ struct sk_psock *psock;
+- void (*write_space)(struct sock *sk);
++ void (*write_space)(struct sock *sk) = NULL;
+
+ rcu_read_lock();
+ psock = sk_psock(sk);
+- if (likely(psock && sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)))
+- schedule_work(&psock->work);
+- write_space = psock->saved_write_space;
++ if (likely(psock)) {
++ if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED))
++ schedule_work(&psock->work);
++ write_space = psock->saved_write_space;
++ }
+ rcu_read_unlock();
+- write_space(sk);
++ if (write_space)
++ write_space(sk);
+ }
+
+ int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock)
+diff --git a/net/hsr/hsr_debugfs.c b/net/hsr/hsr_debugfs.c
+index 6618a9d8e58e..d5f709b940ff 100644
+--- a/net/hsr/hsr_debugfs.c
++++ b/net/hsr/hsr_debugfs.c
+@@ -20,6 +20,8 @@
+ #include "hsr_main.h"
+ #include "hsr_framereg.h"
+
++static struct dentry *hsr_debugfs_root_dir;
++
+ static void print_mac_address(struct seq_file *sfp, unsigned char *mac)
+ {
+ seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x:",
+@@ -63,6 +65,19 @@ hsr_node_table_open(struct inode *inode, struct file *filp)
+ return single_open(filp, hsr_node_table_show, inode->i_private);
+ }
+
++void hsr_debugfs_rename(struct net_device *dev)
++{
++ struct hsr_priv *priv = netdev_priv(dev);
++ struct dentry *d;
++
++ d = debugfs_rename(hsr_debugfs_root_dir, priv->node_tbl_root,
++ hsr_debugfs_root_dir, dev->name);
++ if (IS_ERR(d))
++ netdev_warn(dev, "failed to rename\n");
++ else
++ priv->node_tbl_root = d;
++}
++
+ static const struct file_operations hsr_fops = {
+ .open = hsr_node_table_open,
+ .read = seq_read,
+@@ -81,9 +96,9 @@ void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
+ {
+ struct dentry *de = NULL;
+
+- de = debugfs_create_dir(hsr_dev->name, NULL);
++ de = debugfs_create_dir(hsr_dev->name, hsr_debugfs_root_dir);
+ if (IS_ERR(de)) {
+- pr_err("Cannot create hsr debugfs root\n");
++ pr_err("Cannot create hsr debugfs directory\n");
+ return;
+ }
+
+@@ -93,7 +108,7 @@ void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev)
+ priv->node_tbl_root, priv,
+ &hsr_fops);
+ if (IS_ERR(de)) {
+- pr_err("Cannot create hsr node_table directory\n");
++ pr_err("Cannot create hsr node_table file\n");
+ debugfs_remove(priv->node_tbl_root);
+ priv->node_tbl_root = NULL;
+ return;
+@@ -115,3 +130,18 @@ hsr_debugfs_term(struct hsr_priv *priv)
+ debugfs_remove(priv->node_tbl_root);
+ priv->node_tbl_root = NULL;
+ }
++
++void hsr_debugfs_create_root(void)
++{
++ hsr_debugfs_root_dir = debugfs_create_dir("hsr", NULL);
++ if (IS_ERR(hsr_debugfs_root_dir)) {
++ pr_err("Cannot create hsr debugfs root directory\n");
++ hsr_debugfs_root_dir = NULL;
++ }
++}
++
++void hsr_debugfs_remove_root(void)
++{
++ /* debugfs_remove() internally checks NULL and ERROR */
++ debugfs_remove(hsr_debugfs_root_dir);
++}
+diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
+index 62c03f0d0079..c7bd6c49fadf 100644
+--- a/net/hsr/hsr_device.c
++++ b/net/hsr/hsr_device.c
+@@ -272,6 +272,8 @@ static void send_hsr_supervision_frame(struct hsr_port *master,
+ skb->dev->dev_addr, skb->len) <= 0)
+ goto out;
+ skb_reset_mac_header(skb);
++ skb_reset_network_header(skb);
++ skb_reset_transport_header(skb);
+
+ if (hsr_ver > 0) {
+ hsr_tag = skb_put(skb, sizeof(struct hsr_tag));
+diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
+index 6deb8fa8d5c8..9e389accbfc7 100644
+--- a/net/hsr/hsr_main.c
++++ b/net/hsr/hsr_main.c
+@@ -45,6 +45,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
+ case NETDEV_CHANGE: /* Link (carrier) state changes */
+ hsr_check_carrier_and_operstate(hsr);
+ break;
++ case NETDEV_CHANGENAME:
++ if (is_hsr_master(dev))
++ hsr_debugfs_rename(dev);
++ break;
+ case NETDEV_CHANGEADDR:
+ if (port->type == HSR_PT_MASTER) {
+ /* This should not happen since there's no
+@@ -123,6 +127,7 @@ static void __exit hsr_exit(void)
+ {
+ unregister_netdevice_notifier(&hsr_nb);
+ hsr_netlink_exit();
++ hsr_debugfs_remove_root();
+ }
+
+ module_init(hsr_init);
+diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h
+index 9ec38e33b8b1..d40de84a637f 100644
+--- a/net/hsr/hsr_main.h
++++ b/net/hsr/hsr_main.h
+@@ -185,14 +185,24 @@ static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb)
+ }
+
+ #if IS_ENABLED(CONFIG_DEBUG_FS)
++void hsr_debugfs_rename(struct net_device *dev);
+ void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev);
+ void hsr_debugfs_term(struct hsr_priv *priv);
++void hsr_debugfs_create_root(void);
++void hsr_debugfs_remove_root(void);
+ #else
++static inline void void hsr_debugfs_rename(struct net_device *dev)
++{
++}
+ static inline void hsr_debugfs_init(struct hsr_priv *priv,
+ struct net_device *hsr_dev)
+ {}
+ static inline void hsr_debugfs_term(struct hsr_priv *priv)
+ {}
++static inline void hsr_debugfs_create_root(void)
++{}
++static inline void hsr_debugfs_remove_root(void)
++{}
+ #endif
+
+ #endif /* __HSR_PRIVATE_H */
+diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
+index 8f8337f893ba..8dc0547f01d0 100644
+--- a/net/hsr/hsr_netlink.c
++++ b/net/hsr/hsr_netlink.c
+@@ -476,6 +476,7 @@ int __init hsr_netlink_init(void)
+ if (rc)
+ goto fail_genl_register_family;
+
++ hsr_debugfs_create_root();
+ return 0;
+
+ fail_genl_register_family:
+diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
+index e743f811245f..96a64e7594a5 100644
+--- a/net/netfilter/nf_tables_offload.c
++++ b/net/netfilter/nf_tables_offload.c
+@@ -358,14 +358,14 @@ int nft_flow_rule_offload_commit(struct net *net)
+ continue;
+
+ if (trans->ctx.flags & NLM_F_REPLACE ||
+- !(trans->ctx.flags & NLM_F_APPEND))
+- return -EOPNOTSUPP;
+-
++ !(trans->ctx.flags & NLM_F_APPEND)) {
++ err = -EOPNOTSUPP;
++ break;
++ }
+ err = nft_flow_offload_rule(trans->ctx.chain,
+ nft_trans_rule(trans),
+ nft_trans_flow_rule(trans),
+ FLOW_CLS_REPLACE);
+- nft_flow_rule_destroy(nft_trans_flow_rule(trans));
+ break;
+ case NFT_MSG_DELRULE:
+ if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
+@@ -379,7 +379,23 @@ int nft_flow_rule_offload_commit(struct net *net)
+ }
+
+ if (err)
+- return err;
++ break;
++ }
++
++ list_for_each_entry(trans, &net->nft.commit_list, list) {
++ if (trans->ctx.family != NFPROTO_NETDEV)
++ continue;
++
++ switch (trans->msg_type) {
++ case NFT_MSG_NEWRULE:
++ if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
++ continue;
++
++ nft_flow_rule_destroy(nft_trans_flow_rule(trans));
++ break;
++ default:
++ break;
++ }
+ }
+
+ return err;
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index f29bbc74c4bf..ff5ac173e897 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -197,9 +197,6 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx,
+ static void nft_flow_offload_destroy(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+ {
+- struct nft_flow_offload *priv = nft_expr_priv(expr);
+-
+- priv->flowtable->use--;
+ nf_ct_netns_put(ctx->net, ctx->family);
+ }
+
+diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
+index 317e3a9e8c5b..dda1e55d5801 100644
+--- a/net/netfilter/nft_meta.c
++++ b/net/netfilter/nft_meta.c
+@@ -33,19 +33,19 @@
+
+ static DEFINE_PER_CPU(struct rnd_state, nft_prandom_state);
+
+-static u8 nft_meta_weekday(unsigned long secs)
++static u8 nft_meta_weekday(time64_t secs)
+ {
+ unsigned int dse;
+ u8 wday;
+
+ secs -= NFT_META_SECS_PER_MINUTE * sys_tz.tz_minuteswest;
+- dse = secs / NFT_META_SECS_PER_DAY;
++ dse = div_u64(secs, NFT_META_SECS_PER_DAY);
+ wday = (4 + dse) % NFT_META_DAYS_PER_WEEK;
+
+ return wday;
+ }
+
+-static u32 nft_meta_hour(unsigned long secs)
++static u32 nft_meta_hour(time64_t secs)
+ {
+ struct tm tm;
+
+@@ -250,10 +250,10 @@ void nft_meta_get_eval(const struct nft_expr *expr,
+ nft_reg_store64(dest, ktime_get_real_ns());
+ break;
+ case NFT_META_TIME_DAY:
+- nft_reg_store8(dest, nft_meta_weekday(get_seconds()));
++ nft_reg_store8(dest, nft_meta_weekday(ktime_get_real_seconds()));
+ break;
+ case NFT_META_TIME_HOUR:
+- *dest = nft_meta_hour(get_seconds());
++ *dest = nft_meta_hour(ktime_get_real_seconds());
+ break;
+ default:
+ WARN_ON(1);
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
+index 7c7d10f2e0c1..5e99df80e80a 100644
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -209,6 +209,7 @@ struct rxrpc_skb_priv {
+ struct rxrpc_security {
+ const char *name; /* name of this service */
+ u8 security_index; /* security type provided */
++ u32 no_key_abort; /* Abort code indicating no key */
+
+ /* Initialise a security service */
+ int (*init)(void);
+@@ -977,8 +978,9 @@ static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
+ struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *,
+ struct sk_buff *);
+ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t);
+-void rxrpc_new_incoming_connection(struct rxrpc_sock *,
+- struct rxrpc_connection *, struct sk_buff *);
++void rxrpc_new_incoming_connection(struct rxrpc_sock *, struct rxrpc_connection *,
++ const struct rxrpc_security *, struct key *,
++ struct sk_buff *);
+ void rxrpc_unpublish_service_conn(struct rxrpc_connection *);
+
+ /*
+@@ -1103,7 +1105,9 @@ extern const struct rxrpc_security rxkad;
+ int __init rxrpc_init_security(void);
+ void rxrpc_exit_security(void);
+ int rxrpc_init_client_conn_security(struct rxrpc_connection *);
+-int rxrpc_init_server_conn_security(struct rxrpc_connection *);
++bool rxrpc_look_up_server_security(struct rxrpc_local *, struct rxrpc_sock *,
++ const struct rxrpc_security **, struct key **,
++ struct sk_buff *);
+
+ /*
+ * sendmsg.c
+diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
+index 135bf5cd8dd5..70e44abf106c 100644
+--- a/net/rxrpc/call_accept.c
++++ b/net/rxrpc/call_accept.c
+@@ -239,6 +239,22 @@ void rxrpc_discard_prealloc(struct rxrpc_sock *rx)
+ kfree(b);
+ }
+
++/*
++ * Ping the other end to fill our RTT cache and to retrieve the rwind
++ * and MTU parameters.
++ */
++static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
++{
++ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
++ ktime_t now = skb->tstamp;
++
++ if (call->peer->rtt_usage < 3 ||
++ ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
++ rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
++ true, true,
++ rxrpc_propose_ack_ping_for_params);
++}
++
+ /*
+ * Allocate a new incoming call from the prealloc pool, along with a connection
+ * and a peer as necessary.
+@@ -247,6 +263,8 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
+ struct rxrpc_local *local,
+ struct rxrpc_peer *peer,
+ struct rxrpc_connection *conn,
++ const struct rxrpc_security *sec,
++ struct key *key,
+ struct sk_buff *skb)
+ {
+ struct rxrpc_backlog *b = rx->backlog;
+@@ -294,7 +312,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
+ conn->params.local = rxrpc_get_local(local);
+ conn->params.peer = peer;
+ rxrpc_see_connection(conn);
+- rxrpc_new_incoming_connection(rx, conn, skb);
++ rxrpc_new_incoming_connection(rx, conn, sec, key, skb);
+ } else {
+ rxrpc_get_connection(conn);
+ }
+@@ -333,9 +351,11 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+ struct sk_buff *skb)
+ {
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
++ const struct rxrpc_security *sec = NULL;
+ struct rxrpc_connection *conn;
+ struct rxrpc_peer *peer = NULL;
+- struct rxrpc_call *call;
++ struct rxrpc_call *call = NULL;
++ struct key *key = NULL;
+
+ _enter("");
+
+@@ -346,9 +366,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+ sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
+ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
+ skb->priority = RX_INVALID_OPERATION;
+- _leave(" = NULL [close]");
+- call = NULL;
+- goto out;
++ goto no_call;
+ }
+
+ /* The peer, connection and call may all have sprung into existence due
+@@ -358,29 +376,19 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+ */
+ conn = rxrpc_find_connection_rcu(local, skb, &peer);
+
+- call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb);
++ if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb))
++ goto no_call;
++
++ call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb);
++ key_put(key);
+ if (!call) {
+ skb->mark = RXRPC_SKB_MARK_REJECT_BUSY;
+- _leave(" = NULL [busy]");
+- call = NULL;
+- goto out;
++ goto no_call;
+ }
+
+ trace_rxrpc_receive(call, rxrpc_receive_incoming,
+ sp->hdr.serial, sp->hdr.seq);
+
+- /* Lock the call to prevent rxrpc_kernel_send/recv_data() and
+- * sendmsg()/recvmsg() inconveniently stealing the mutex once the
+- * notification is generated.
+- *
+- * The BUG should never happen because the kernel should be well
+- * behaved enough not to access the call before the first notification
+- * event and userspace is prevented from doing so until the state is
+- * appropriate.
+- */
+- if (!mutex_trylock(&call->user_mutex))
+- BUG();
+-
+ /* Make the call live. */
+ rxrpc_incoming_call(rx, call, skb);
+ conn = call->conn;
+@@ -421,6 +429,9 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+ BUG();
+ }
+ spin_unlock(&conn->state_lock);
++ spin_unlock(&rx->incoming_lock);
++
++ rxrpc_send_ping(call, skb);
+
+ if (call->state == RXRPC_CALL_SERVER_ACCEPTING)
+ rxrpc_notify_socket(call);
+@@ -433,9 +444,12 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local,
+ rxrpc_put_call(call, rxrpc_call_put);
+
+ _leave(" = %p{%d}", call, call->debug_id);
+-out:
+- spin_unlock(&rx->incoming_lock);
+ return call;
++
++no_call:
++ spin_unlock(&rx->incoming_lock);
++ _leave(" = NULL [%u]", skb->mark);
++ return NULL;
+ }
+
+ /*
+diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
+index a1ceef4f5cd0..808a4723f868 100644
+--- a/net/rxrpc/conn_event.c
++++ b/net/rxrpc/conn_event.c
+@@ -376,21 +376,7 @@ static void rxrpc_secure_connection(struct rxrpc_connection *conn)
+ _enter("{%d}", conn->debug_id);
+
+ ASSERT(conn->security_ix != 0);
+-
+- if (!conn->params.key) {
+- _debug("set up security");
+- ret = rxrpc_init_server_conn_security(conn);
+- switch (ret) {
+- case 0:
+- break;
+- case -ENOENT:
+- abort_code = RX_CALL_DEAD;
+- goto abort;
+- default:
+- abort_code = RXKADNOAUTH;
+- goto abort;
+- }
+- }
++ ASSERT(conn->server_key);
+
+ if (conn->security->issue_challenge(conn) < 0) {
+ abort_code = RX_CALL_DEAD;
+diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
+index 123d6ceab15c..21da48e3d2e5 100644
+--- a/net/rxrpc/conn_service.c
++++ b/net/rxrpc/conn_service.c
+@@ -148,6 +148,8 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
+ */
+ void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
+ struct rxrpc_connection *conn,
++ const struct rxrpc_security *sec,
++ struct key *key,
+ struct sk_buff *skb)
+ {
+ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+@@ -160,6 +162,8 @@ void rxrpc_new_incoming_connection(struct rxrpc_sock *rx,
+ conn->service_id = sp->hdr.serviceId;
+ conn->security_ix = sp->hdr.securityIndex;
+ conn->out_clientflag = 0;
++ conn->security = sec;
++ conn->server_key = key_get(key);
+ if (conn->security_ix)
+ conn->state = RXRPC_CONN_SERVICE_UNSECURED;
+ else
+diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
+index 157be1ff8697..86bd133b4fa0 100644
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -192,22 +192,6 @@ send_extra_data:
+ goto out_no_clear_ca;
+ }
+
+-/*
+- * Ping the other end to fill our RTT cache and to retrieve the rwind
+- * and MTU parameters.
+- */
+-static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb)
+-{
+- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+- ktime_t now = skb->tstamp;
+-
+- if (call->peer->rtt_usage < 3 ||
+- ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
+- rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial,
+- true, true,
+- rxrpc_propose_ack_ping_for_params);
+-}
+-
+ /*
+ * Apply a hard ACK by advancing the Tx window.
+ */
+@@ -1396,8 +1380,6 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
+ call = rxrpc_new_incoming_call(local, rx, skb);
+ if (!call)
+ goto reject_packet;
+- rxrpc_send_ping(call, skb);
+- mutex_unlock(&call->user_mutex);
+ }
+
+ /* Process a call packet; this either discards or passes on the ref
+diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
+index 8d8aa3c230b5..098f1f9ec53b 100644
+--- a/net/rxrpc/rxkad.c
++++ b/net/rxrpc/rxkad.c
+@@ -648,9 +648,9 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
+ u32 serial;
+ int ret;
+
+- _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key));
++ _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
+
+- ret = key_validate(conn->params.key);
++ ret = key_validate(conn->server_key);
+ if (ret < 0)
+ return ret;
+
+@@ -1293,6 +1293,7 @@ static void rxkad_exit(void)
+ const struct rxrpc_security rxkad = {
+ .name = "rxkad",
+ .security_index = RXRPC_SECURITY_RXKAD,
++ .no_key_abort = RXKADUNKNOWNKEY,
+ .init = rxkad_init,
+ .exit = rxkad_exit,
+ .init_connection_security = rxkad_init_connection_security,
+diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c
+index a4c47d2b7054..9b1fb9ed0717 100644
+--- a/net/rxrpc/security.c
++++ b/net/rxrpc/security.c
+@@ -101,62 +101,58 @@ int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
+ }
+
+ /*
+- * initialise the security on a server connection
++ * Find the security key for a server connection.
+ */
+-int rxrpc_init_server_conn_security(struct rxrpc_connection *conn)
++bool rxrpc_look_up_server_security(struct rxrpc_local *local, struct rxrpc_sock *rx,
++ const struct rxrpc_security **_sec,
++ struct key **_key,
++ struct sk_buff *skb)
+ {
+ const struct rxrpc_security *sec;
+- struct rxrpc_local *local = conn->params.local;
+- struct rxrpc_sock *rx;
+- struct key *key;
+- key_ref_t kref;
++ struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
++ key_ref_t kref = NULL;
+ char kdesc[5 + 1 + 3 + 1];
+
+ _enter("");
+
+- sprintf(kdesc, "%u:%u", conn->service_id, conn->security_ix);
++ sprintf(kdesc, "%u:%u", sp->hdr.serviceId, sp->hdr.securityIndex);
+
+- sec = rxrpc_security_lookup(conn->security_ix);
++ sec = rxrpc_security_lookup(sp->hdr.securityIndex);
+ if (!sec) {
+- _leave(" = -ENOKEY [lookup]");
+- return -ENOKEY;
++ trace_rxrpc_abort(0, "SVS",
++ sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
++ RX_INVALID_OPERATION, EKEYREJECTED);
++ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
++ skb->priority = RX_INVALID_OPERATION;
++ return false;
+ }
+
+- /* find the service */
+- read_lock(&local->services_lock);
+- rx = rcu_dereference_protected(local->service,
+- lockdep_is_held(&local->services_lock));
+- if (rx && (rx->srx.srx_service == conn->service_id ||
+- rx->second_service == conn->service_id))
+- goto found_service;
++ if (sp->hdr.securityIndex == RXRPC_SECURITY_NONE)
++ goto out;
+
+- /* the service appears to have died */
+- read_unlock(&local->services_lock);
+- _leave(" = -ENOENT");
+- return -ENOENT;
+-
+-found_service:
+ if (!rx->securities) {
+- read_unlock(&local->services_lock);
+- _leave(" = -ENOKEY");
+- return -ENOKEY;
++ trace_rxrpc_abort(0, "SVR",
++ sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
++ RX_INVALID_OPERATION, EKEYREJECTED);
++ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
++ skb->priority = RX_INVALID_OPERATION;
++ return false;
+ }
+
+ /* look through the service's keyring */
+ kref = keyring_search(make_key_ref(rx->securities, 1UL),
+ &key_type_rxrpc_s, kdesc, true);
+ if (IS_ERR(kref)) {
+- read_unlock(&local->services_lock);
+- _leave(" = %ld [search]", PTR_ERR(kref));
+- return PTR_ERR(kref);
++ trace_rxrpc_abort(0, "SVK",
++ sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
++ sec->no_key_abort, EKEYREJECTED);
++ skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
++ skb->priority = sec->no_key_abort;
++ return false;
+ }
+
+- key = key_ref_to_ptr(kref);
+- read_unlock(&local->services_lock);
+-
+- conn->server_key = key;
+- conn->security = sec;
+-
+- _leave(" = 0");
+- return 0;
++out:
++ *_sec = sec;
++ *_key = key_ref_to_ptr(kref);
++ return true;
+ }
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index dd0e8680b030..2277369feae5 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -2184,6 +2184,7 @@ static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
+ [TCA_CAKE_MPU] = { .type = NLA_U32 },
+ [TCA_CAKE_INGRESS] = { .type = NLA_U32 },
+ [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 },
++ [TCA_CAKE_SPLIT_GSO] = { .type = NLA_U32 },
+ [TCA_CAKE_FWMARK] = { .type = NLA_U32 },
+ };
+
+diff --git a/net/socket.c b/net/socket.c
+index ca8de9e1582d..432800b39ddb 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -3532,6 +3532,7 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
+ case SIOCSARP:
+ case SIOCGARP:
+ case SIOCDARP:
++ case SIOCOUTQNSD:
+ case SIOCATMARK:
+ return sock_do_ioctl(net, sock, cmd, arg);
+ }
+diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
+index 30065a28628c..9901a811f598 100644
+--- a/net/sunrpc/xprtrdma/frwr_ops.c
++++ b/net/sunrpc/xprtrdma/frwr_ops.c
+@@ -570,7 +570,6 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+ */
+ bad_wr = NULL;
+ rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr);
+- trace_xprtrdma_post_send(req, rc);
+
+ /* The final LOCAL_INV WR in the chain is supposed to
+ * do the wake. If it was never posted, the wake will
+@@ -583,6 +582,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+
+ /* Recycle MRs in the LOCAL_INV chain that did not get posted.
+ */
++ trace_xprtrdma_post_linv(req, rc);
+ while (bad_wr) {
+ frwr = container_of(bad_wr, struct rpcrdma_frwr,
+ fr_invwr);
+@@ -673,12 +673,12 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
+ */
+ bad_wr = NULL;
+ rc = ib_post_send(r_xprt->rx_ia.ri_id->qp, first, &bad_wr);
+- trace_xprtrdma_post_send(req, rc);
+ if (!rc)
+ return;
+
+ /* Recycle MRs in the LOCAL_INV chain that did not get posted.
+ */
++ trace_xprtrdma_post_linv(req, rc);
+ while (bad_wr) {
+ frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr);
+ mr = container_of(frwr, struct rpcrdma_mr, frwr);
+diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
+index b86b5fd62d9f..ef5102b60589 100644
+--- a/net/sunrpc/xprtrdma/rpc_rdma.c
++++ b/net/sunrpc/xprtrdma/rpc_rdma.c
+@@ -1362,6 +1362,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
+ xprt->cwnd = credits << RPC_CWNDSHIFT;
+ spin_unlock(&xprt->transport_lock);
+ }
++ rpcrdma_post_recvs(r_xprt, false);
+
+ req = rpcr_to_rdmar(rqst);
+ if (req->rl_reply) {
+diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
+index 160558b4135e..c67d465dc062 100644
+--- a/net/sunrpc/xprtrdma/transport.c
++++ b/net/sunrpc/xprtrdma/transport.c
+@@ -428,8 +428,11 @@ void xprt_rdma_close(struct rpc_xprt *xprt)
+ /* Prepare @xprt for the next connection by reinitializing
+ * its credit grant to one (see RFC 8166, Section 3.3.3).
+ */
++ spin_lock(&xprt->transport_lock);
+ r_xprt->rx_buf.rb_credits = 1;
++ xprt->cong = 0;
+ xprt->cwnd = RPC_CWNDSHIFT;
++ spin_unlock(&xprt->transport_lock);
+
+ out:
+ xprt->reestablish_timeout = 0;
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 3a907537e2cf..0f4d39fdb48f 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -75,16 +75,15 @@
+ * internal functions
+ */
+ static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc);
+-static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf);
++static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt);
++static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt);
+ static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
+ static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf);
+-static void rpcrdma_mr_free(struct rpcrdma_mr *mr);
+ static struct rpcrdma_regbuf *
+ rpcrdma_regbuf_alloc(size_t size, enum dma_data_direction direction,
+ gfp_t flags);
+ static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb);
+ static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb);
+-static void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
+
+ /* Wait for outstanding transport work to finish. ib_drain_qp
+ * handles the drains in the wrong order for us, so open code
+@@ -170,7 +169,6 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
+ rdmab_addr(rep->rr_rdmabuf),
+ wc->byte_len, DMA_FROM_DEVICE);
+
+- rpcrdma_post_recvs(r_xprt, false);
+ rpcrdma_reply_handler(rep);
+ return;
+
+@@ -247,6 +245,7 @@ rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
+ ia->ri_id->device->name,
+ rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
+ #endif
++ init_completion(&ia->ri_remove_done);
+ set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
+ ep->rep_connected = -ENODEV;
+ xprt_force_disconnect(xprt);
+@@ -301,7 +300,6 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
+ trace_xprtrdma_conn_start(xprt);
+
+ init_completion(&ia->ri_done);
+- init_completion(&ia->ri_remove_done);
+
+ id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler,
+ xprt, RDMA_PS_TCP, IB_QPT_RC);
+@@ -431,7 +429,7 @@ rpcrdma_ia_remove(struct rpcrdma_ia *ia)
+ /* The ULP is responsible for ensuring all DMA
+ * mappings and MRs are gone.
+ */
+- rpcrdma_reps_destroy(buf);
++ rpcrdma_reps_unmap(r_xprt);
+ list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
+ rpcrdma_regbuf_dma_unmap(req->rl_rdmabuf);
+ rpcrdma_regbuf_dma_unmap(req->rl_sendbuf);
+@@ -609,6 +607,7 @@ static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
+ struct ib_qp_init_attr *qp_init_attr)
+ {
+ struct rpcrdma_ia *ia = &r_xprt->rx_ia;
++ struct rpcrdma_ep *ep = &r_xprt->rx_ep;
+ int rc, err;
+
+ trace_xprtrdma_reinsert(r_xprt);
+@@ -623,6 +622,7 @@ static int rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
+ pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err);
+ goto out2;
+ }
++ memcpy(qp_init_attr, &ep->rep_attr, sizeof(*qp_init_attr));
+
+ rc = -ENETUNREACH;
+ err = rdma_create_qp(ia->ri_id, ia->ri_pd, qp_init_attr);
+@@ -780,6 +780,7 @@ rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
+ trace_xprtrdma_disconnect(r_xprt, rc);
+
+ rpcrdma_xprt_drain(r_xprt);
++ rpcrdma_reqs_reset(r_xprt);
+ }
+
+ /* Fixed-size circular FIFO queue. This implementation is wait-free and
+@@ -965,7 +966,7 @@ rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
+ mr->mr_xprt = r_xprt;
+
+ spin_lock(&buf->rb_lock);
+- list_add(&mr->mr_list, &buf->rb_mrs);
++ rpcrdma_mr_push(mr, &buf->rb_mrs);
+ list_add(&mr->mr_all, &buf->rb_all_mrs);
+ spin_unlock(&buf->rb_lock);
+ }
+@@ -1042,6 +1043,26 @@ out1:
+ return NULL;
+ }
+
++/**
++ * rpcrdma_reqs_reset - Reset all reqs owned by a transport
++ * @r_xprt: controlling transport instance
++ *
++ * ASSUMPTION: the rb_allreqs list is stable for the duration,
++ * and thus can be walked without holding rb_lock. Eg. the
++ * caller is holding the transport send lock to exclude
++ * device removal or disconnection.
++ */
++static void rpcrdma_reqs_reset(struct rpcrdma_xprt *r_xprt)
++{
++ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
++ struct rpcrdma_req *req;
++
++ list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
++ /* Credits are valid only for one connection */
++ req->rl_slot.rq_cong = 0;
++ }
++}
++
+ static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
+ bool temp)
+ {
+@@ -1065,6 +1086,7 @@ static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
+ rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
+ rep->rr_recv_wr.num_sge = 1;
+ rep->rr_temp = temp;
++ list_add(&rep->rr_all, &r_xprt->rx_buf.rb_all_reps);
+ return rep;
+
+ out_free:
+@@ -1075,6 +1097,7 @@ out:
+
+ static void rpcrdma_rep_destroy(struct rpcrdma_rep *rep)
+ {
++ list_del(&rep->rr_all);
+ rpcrdma_regbuf_free(rep->rr_rdmabuf);
+ kfree(rep);
+ }
+@@ -1093,10 +1116,16 @@ static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf)
+ static void rpcrdma_rep_put(struct rpcrdma_buffer *buf,
+ struct rpcrdma_rep *rep)
+ {
+- if (!rep->rr_temp)
+- llist_add(&rep->rr_node, &buf->rb_free_reps);
+- else
+- rpcrdma_rep_destroy(rep);
++ llist_add(&rep->rr_node, &buf->rb_free_reps);
++}
++
++static void rpcrdma_reps_unmap(struct rpcrdma_xprt *r_xprt)
++{
++ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
++ struct rpcrdma_rep *rep;
++
++ list_for_each_entry(rep, &buf->rb_all_reps, rr_all)
++ rpcrdma_regbuf_dma_unmap(rep->rr_rdmabuf);
+ }
+
+ static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf)
+@@ -1129,6 +1158,7 @@ int rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
+
+ INIT_LIST_HEAD(&buf->rb_send_bufs);
+ INIT_LIST_HEAD(&buf->rb_allreqs);
++ INIT_LIST_HEAD(&buf->rb_all_reps);
+
+ rc = -ENOMEM;
+ for (i = 0; i < buf->rb_max_requests; i++) {
+@@ -1163,10 +1193,19 @@ out:
+ */
+ void rpcrdma_req_destroy(struct rpcrdma_req *req)
+ {
++ struct rpcrdma_mr *mr;
++
+ list_del(&req->rl_all);
+
+- while (!list_empty(&req->rl_free_mrs))
+- rpcrdma_mr_free(rpcrdma_mr_pop(&req->rl_free_mrs));
++ while ((mr = rpcrdma_mr_pop(&req->rl_free_mrs))) {
++ struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf;
++
++ spin_lock(&buf->rb_lock);
++ list_del(&mr->mr_all);
++ spin_unlock(&buf->rb_lock);
++
++ frwr_release_mr(mr);
++ }
+
+ rpcrdma_regbuf_free(req->rl_recvbuf);
+ rpcrdma_regbuf_free(req->rl_sendbuf);
+@@ -1174,24 +1213,28 @@ void rpcrdma_req_destroy(struct rpcrdma_req *req)
+ kfree(req);
+ }
+
+-static void
+-rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
++/**
++ * rpcrdma_mrs_destroy - Release all of a transport's MRs
++ * @buf: controlling buffer instance
++ *
++ * Relies on caller holding the transport send lock to protect
++ * removing mr->mr_list from req->rl_free_mrs safely.
++ */
++static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
+ {
+ struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
+ rx_buf);
+ struct rpcrdma_mr *mr;
+- unsigned int count;
+
+- count = 0;
+ spin_lock(&buf->rb_lock);
+ while ((mr = list_first_entry_or_null(&buf->rb_all_mrs,
+ struct rpcrdma_mr,
+ mr_all)) != NULL) {
++ list_del(&mr->mr_list);
+ list_del(&mr->mr_all);
+ spin_unlock(&buf->rb_lock);
+
+ frwr_release_mr(mr);
+- count++;
+ spin_lock(&buf->rb_lock);
+ }
+ spin_unlock(&buf->rb_lock);
+@@ -1264,17 +1307,6 @@ void rpcrdma_mr_put(struct rpcrdma_mr *mr)
+ rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
+ }
+
+-static void rpcrdma_mr_free(struct rpcrdma_mr *mr)
+-{
+- struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
+- struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+-
+- mr->mr_req = NULL;
+- spin_lock(&buf->rb_lock);
+- rpcrdma_mr_push(mr, &buf->rb_mrs);
+- spin_unlock(&buf->rb_lock);
+-}
+-
+ /**
+ * rpcrdma_buffer_get - Get a request buffer
+ * @buffers: Buffer pool from which to obtain a buffer
+@@ -1455,8 +1487,13 @@ rpcrdma_ep_post(struct rpcrdma_ia *ia,
+ return 0;
+ }
+
+-static void
+-rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
++/**
++ * rpcrdma_post_recvs - Refill the Receive Queue
++ * @r_xprt: controlling transport instance
++ * @temp: mark Receive buffers to be deleted after use
++ *
++ */
++void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
+ {
+ struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
+ struct rpcrdma_ep *ep = &r_xprt->rx_ep;
+@@ -1478,6 +1515,10 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
+ wr = NULL;
+ while (needed) {
+ rep = rpcrdma_rep_get_locked(buf);
++ if (rep && rep->rr_temp) {
++ rpcrdma_rep_destroy(rep);
++ continue;
++ }
+ if (!rep)
+ rep = rpcrdma_rep_create(r_xprt, temp);
+ if (!rep)
+diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
+index 65e6b0eb862e..fc761679487c 100644
+--- a/net/sunrpc/xprtrdma/xprt_rdma.h
++++ b/net/sunrpc/xprtrdma/xprt_rdma.h
+@@ -203,6 +203,7 @@ struct rpcrdma_rep {
+ struct xdr_stream rr_stream;
+ struct llist_node rr_node;
+ struct ib_recv_wr rr_recv_wr;
++ struct list_head rr_all;
+ };
+
+ /* To reduce the rate at which a transport invokes ib_post_recv
+@@ -372,6 +373,7 @@ struct rpcrdma_buffer {
+
+ struct list_head rb_allreqs;
+ struct list_head rb_all_mrs;
++ struct list_head rb_all_reps;
+
+ struct llist_head rb_free_reps;
+
+@@ -474,6 +476,7 @@ void rpcrdma_ep_disconnect(struct rpcrdma_ep *, struct rpcrdma_ia *);
+
+ int rpcrdma_ep_post(struct rpcrdma_ia *, struct rpcrdma_ep *,
+ struct rpcrdma_req *);
++void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp);
+
+ /*
+ * Buffer calls - xprtrdma/verbs.c
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 0d8da809bea2..b3369d678f1a 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -646,6 +646,9 @@ static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
+ static __poll_t unix_dgram_poll(struct file *, struct socket *,
+ poll_table *);
+ static int unix_ioctl(struct socket *, unsigned int, unsigned long);
++#ifdef CONFIG_COMPAT
++static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
++#endif
+ static int unix_shutdown(struct socket *, int);
+ static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
+ static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
+@@ -687,6 +690,9 @@ static const struct proto_ops unix_stream_ops = {
+ .getname = unix_getname,
+ .poll = unix_poll,
+ .ioctl = unix_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = unix_compat_ioctl,
++#endif
+ .listen = unix_listen,
+ .shutdown = unix_shutdown,
+ .setsockopt = sock_no_setsockopt,
+@@ -710,6 +716,9 @@ static const struct proto_ops unix_dgram_ops = {
+ .getname = unix_getname,
+ .poll = unix_dgram_poll,
+ .ioctl = unix_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = unix_compat_ioctl,
++#endif
+ .listen = sock_no_listen,
+ .shutdown = unix_shutdown,
+ .setsockopt = sock_no_setsockopt,
+@@ -732,6 +741,9 @@ static const struct proto_ops unix_seqpacket_ops = {
+ .getname = unix_getname,
+ .poll = unix_dgram_poll,
+ .ioctl = unix_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = unix_compat_ioctl,
++#endif
+ .listen = unix_listen,
+ .shutdown = unix_shutdown,
+ .setsockopt = sock_no_setsockopt,
+@@ -2582,6 +2594,13 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+ return err;
+ }
+
++#ifdef CONFIG_COMPAT
++static int unix_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
++{
++ return unix_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
++}
++#endif
++
+ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
+ {
+ struct sock *sk = sock->sk;
+diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
+index 06495379fcd8..2998ddb323e3 100755
+--- a/scripts/link-vmlinux.sh
++++ b/scripts/link-vmlinux.sh
+@@ -127,7 +127,8 @@ gen_btf()
+ cut -d, -f1 | cut -d' ' -f2)
+ bin_format=$(LANG=C ${OBJDUMP} -f ${1} | grep 'file format' | \
+ awk '{print $4}')
+- ${OBJCOPY} --dump-section .BTF=.btf.vmlinux.bin ${1} 2>/dev/null
++ ${OBJCOPY} --set-section-flags .BTF=alloc -O binary \
++ --only-section=.BTF ${1} .btf.vmlinux.bin 2>/dev/null
+ ${OBJCOPY} -I binary -O ${bin_format} -B ${bin_arch} \
+ --rename-section .data=.BTF .btf.vmlinux.bin ${2}
+ }
+@@ -253,6 +254,10 @@ btf_vmlinux_bin_o=""
+ if [ -n "${CONFIG_DEBUG_INFO_BTF}" ]; then
+ if gen_btf .tmp_vmlinux.btf .btf.vmlinux.bin.o ; then
+ btf_vmlinux_bin_o=.btf.vmlinux.bin.o
++ else
++ echo >&2 "Failed to generate BTF for vmlinux"
++ echo >&2 "Try to disable CONFIG_DEBUG_INFO_BTF"
++ exit 1
+ fi
+ fi
+
+diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian
+index 7c230016b08d..357dc56bcf30 100755
+--- a/scripts/package/mkdebian
++++ b/scripts/package/mkdebian
+@@ -136,7 +136,7 @@ mkdir -p debian/source/
+ echo "1.0" > debian/source/format
+
+ echo $debarch > debian/arch
+-extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev)"
++extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev:native)"
+ extra_build_depends="$extra_build_depends, $(if_enabled_echo CONFIG_SYSTEM_TRUSTED_KEYRING libssl-dev:native)"
+
+ # Generate a simple changelog template
+diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
+index dd3d5942e669..c36bafbcd77e 100644
+--- a/security/tomoyo/common.c
++++ b/security/tomoyo/common.c
+@@ -951,7 +951,8 @@ static bool tomoyo_manager(void)
+ exe = tomoyo_get_exe();
+ if (!exe)
+ return false;
+- list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list) {
++ list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.policy_list[TOMOYO_ID_MANAGER], head.list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (!ptr->head.is_deleted &&
+ (!tomoyo_pathcmp(domainname, ptr->manager) ||
+ !strcmp(exe, ptr->manager->name))) {
+@@ -1095,7 +1096,8 @@ static int tomoyo_delete_domain(char *domainname)
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ return -EINTR;
+ /* Is there an active domain? */
+- list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
++ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ /* Never delete tomoyo_kernel_domain */
+ if (domain == &tomoyo_kernel_domain)
+ continue;
+@@ -2778,7 +2780,8 @@ void tomoyo_check_profile(void)
+
+ tomoyo_policy_loaded = true;
+ pr_info("TOMOYO: 2.6.0\n");
+- list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
++ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ const u8 profile = domain->profile;
+ struct tomoyo_policy_namespace *ns = domain->ns;
+
+diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
+index 8526a0a74023..7869d6a9980b 100644
+--- a/security/tomoyo/domain.c
++++ b/security/tomoyo/domain.c
+@@ -41,7 +41,8 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
+
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ return -ENOMEM;
+- list_for_each_entry_rcu(entry, list, list) {
++ list_for_each_entry_rcu(entry, list, list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
+ continue;
+ if (!check_duplicate(entry, new_entry))
+@@ -119,7 +120,8 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
+ }
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ goto out;
+- list_for_each_entry_rcu(entry, list, list) {
++ list_for_each_entry_rcu(entry, list, list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
+ continue;
+ if (!tomoyo_same_acl_head(entry, new_entry) ||
+@@ -166,7 +168,8 @@ void tomoyo_check_acl(struct tomoyo_request_info *r,
+ u16 i = 0;
+
+ retry:
+- list_for_each_entry_rcu(ptr, list, list) {
++ list_for_each_entry_rcu(ptr, list, list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (ptr->is_deleted || ptr->type != r->param_type)
+ continue;
+ if (!check_entry(r, ptr))
+@@ -298,7 +301,8 @@ static inline bool tomoyo_scan_transition
+ {
+ const struct tomoyo_transition_control *ptr;
+
+- list_for_each_entry_rcu(ptr, list, head.list) {
++ list_for_each_entry_rcu(ptr, list, head.list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (ptr->head.is_deleted || ptr->type != type)
+ continue;
+ if (ptr->domainname) {
+@@ -735,7 +739,8 @@ retry:
+
+ /* Check 'aggregator' directive. */
+ candidate = &exename;
+- list_for_each_entry_rcu(ptr, list, head.list) {
++ list_for_each_entry_rcu(ptr, list, head.list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (ptr->head.is_deleted ||
+ !tomoyo_path_matches_pattern(&exename,
+ ptr->original_name))
+diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c
+index a37c7dc66e44..1cecdd797597 100644
+--- a/security/tomoyo/group.c
++++ b/security/tomoyo/group.c
+@@ -133,7 +133,8 @@ tomoyo_path_matches_group(const struct tomoyo_path_info *pathname,
+ {
+ struct tomoyo_path_group *member;
+
+- list_for_each_entry_rcu(member, &group->member_list, head.list) {
++ list_for_each_entry_rcu(member, &group->member_list, head.list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (member->head.is_deleted)
+ continue;
+ if (!tomoyo_path_matches_pattern(pathname, member->member_name))
+@@ -161,7 +162,8 @@ bool tomoyo_number_matches_group(const unsigned long min,
+ struct tomoyo_number_group *member;
+ bool matched = false;
+
+- list_for_each_entry_rcu(member, &group->member_list, head.list) {
++ list_for_each_entry_rcu(member, &group->member_list, head.list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (member->head.is_deleted)
+ continue;
+ if (min > member->number.values[1] ||
+@@ -191,7 +193,8 @@ bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
+ bool matched = false;
+ const u8 size = is_ipv6 ? 16 : 4;
+
+- list_for_each_entry_rcu(member, &group->member_list, head.list) {
++ list_for_each_entry_rcu(member, &group->member_list, head.list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (member->head.is_deleted)
+ continue;
+ if (member->address.is_ipv6 != is_ipv6)
+diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
+index 52752e1a84ed..eba0b3395851 100644
+--- a/security/tomoyo/util.c
++++ b/security/tomoyo/util.c
+@@ -594,7 +594,8 @@ struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
+
+ name.name = domainname;
+ tomoyo_fill_path_info(&name);
+- list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
++ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ if (!domain->is_deleted &&
+ !tomoyo_pathcmp(&name, domain->domainname))
+ return domain;
+@@ -1028,7 +1029,8 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
+ return false;
+ if (!domain)
+ return true;
+- list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
++ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list,
++ srcu_read_lock_held(&tomoyo_ss)) {
+ u16 perm;
+ u8 i;
+
+diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
+index a78e4ab478df..c7a49d03463a 100644
+--- a/sound/soc/fsl/fsl_esai.c
++++ b/sound/soc/fsl/fsl_esai.c
+@@ -33,6 +33,7 @@
+ * @fsysclk: system clock source to derive HCK, SCK and FS
+ * @spbaclk: SPBA clock (optional, depending on SoC design)
+ * @task: tasklet to handle the reset operation
++ * @lock: spin lock between hw_reset() and trigger()
+ * @fifo_depth: depth of tx/rx FIFO
+ * @slot_width: width of each DAI slot
+ * @slots: number of slots
+@@ -56,6 +57,7 @@ struct fsl_esai {
+ struct clk *fsysclk;
+ struct clk *spbaclk;
+ struct tasklet_struct task;
++ spinlock_t lock; /* Protect hw_reset and trigger */
+ u32 fifo_depth;
+ u32 slot_width;
+ u32 slots;
+@@ -676,8 +678,10 @@ static void fsl_esai_hw_reset(unsigned long arg)
+ {
+ struct fsl_esai *esai_priv = (struct fsl_esai *)arg;
+ bool tx = true, rx = false, enabled[2];
++ unsigned long lock_flags;
+ u32 tfcr, rfcr;
+
++ spin_lock_irqsave(&esai_priv->lock, lock_flags);
+ /* Save the registers */
+ regmap_read(esai_priv->regmap, REG_ESAI_TFCR, &tfcr);
+ regmap_read(esai_priv->regmap, REG_ESAI_RFCR, &rfcr);
+@@ -715,6 +719,8 @@ static void fsl_esai_hw_reset(unsigned long arg)
+ fsl_esai_trigger_start(esai_priv, tx);
+ if (enabled[rx])
+ fsl_esai_trigger_start(esai_priv, rx);
++
++ spin_unlock_irqrestore(&esai_priv->lock, lock_flags);
+ }
+
+ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
+@@ -722,6 +728,7 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
+ {
+ struct fsl_esai *esai_priv = snd_soc_dai_get_drvdata(dai);
+ bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
++ unsigned long lock_flags;
+
+ esai_priv->channels[tx] = substream->runtime->channels;
+
+@@ -729,12 +736,16 @@ static int fsl_esai_trigger(struct snd_pcm_substream *substream, int cmd,
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
++ spin_lock_irqsave(&esai_priv->lock, lock_flags);
+ fsl_esai_trigger_start(esai_priv, tx);
++ spin_unlock_irqrestore(&esai_priv->lock, lock_flags);
+ break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
++ spin_lock_irqsave(&esai_priv->lock, lock_flags);
+ fsl_esai_trigger_stop(esai_priv, tx);
++ spin_unlock_irqrestore(&esai_priv->lock, lock_flags);
+ break;
+ default:
+ return -EINVAL;
+@@ -1002,6 +1013,7 @@ static int fsl_esai_probe(struct platform_device *pdev)
+
+ dev_set_drvdata(&pdev->dev, esai_priv);
+
++ spin_lock_init(&esai_priv->lock);
+ ret = fsl_esai_hw_init(esai_priv);
+ if (ret)
+ return ret;
+diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
+index 01c99750212a..ef493cae78ff 100644
+--- a/sound/soc/intel/Kconfig
++++ b/sound/soc/intel/Kconfig
+@@ -59,6 +59,9 @@ config SND_SOC_INTEL_HASWELL
+ If you have a Intel Haswell or Broadwell platform connected to
+ an I2S codec, then enable this option by saying Y or m. This is
+ typically used for Chromebooks. This is a recommended option.
++ This option is mutually exclusive with the SOF support on
++ Broadwell. If you want to enable SOF on Broadwell, you need to
++ deselect this option first.
+
+ config SND_SOC_INTEL_BAYTRAIL
+ tristate "Baytrail (legacy) Platforms"
+diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
+index e9596c2096cd..a6c1cf987e6e 100644
+--- a/sound/soc/sh/rcar/core.c
++++ b/sound/soc/sh/rcar/core.c
+@@ -376,6 +376,17 @@ u32 rsnd_get_adinr_bit(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
+ */
+ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
+ {
++ static const u32 dalign_values[8][2] = {
++ {0x76543210, 0x67452301},
++ {0x00000032, 0x00000023},
++ {0x00007654, 0x00006745},
++ {0x00000076, 0x00000067},
++ {0xfedcba98, 0xefcdab89},
++ {0x000000ba, 0x000000ab},
++ {0x0000fedc, 0x0000efcd},
++ {0x000000fe, 0x000000ef},
++ };
++ int id = 0, inv;
+ struct rsnd_mod *ssiu = rsnd_io_to_mod_ssiu(io);
+ struct rsnd_mod *target;
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+@@ -411,13 +422,18 @@ u32 rsnd_get_dalign(struct rsnd_mod *mod, struct rsnd_dai_stream *io)
+ target = cmd ? cmd : ssiu;
+ }
+
++ if (mod == ssiu)
++ id = rsnd_mod_id_sub(mod);
++
+ /* Non target mod or non 16bit needs normal DALIGN */
+ if ((snd_pcm_format_width(runtime->format) != 16) ||
+ (mod != target))
+- return 0x76543210;
++ inv = 0;
+ /* Target mod needs inverted DALIGN when 16bit */
+ else
+- return 0x67452301;
++ inv = 1;
++
++ return dalign_values[id][inv];
+ }
+
+ u32 rsnd_get_busif_shift(struct rsnd_dai_stream *io, struct rsnd_mod *mod)
+diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
+index 88978a3036c4..9d3b546bae7b 100644
+--- a/sound/soc/soc-core.c
++++ b/sound/soc/soc-core.c
+@@ -1886,6 +1886,8 @@ match:
+
+ /* convert non BE into BE */
+ dai_link->no_pcm = 1;
++ dai_link->dpcm_playback = 1;
++ dai_link->dpcm_capture = 1;
+
+ /* override any BE fixups */
+ dai_link->be_hw_params_fixup =
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index a6e96cf1d8ff..d07026a846b9 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -1148,7 +1148,9 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
+ {
+ struct snd_soc_dpcm *dpcm;
+ unsigned long flags;
++#ifdef CONFIG_DEBUG_FS
+ char *name;
++#endif
+
+ /* only add new dpcms */
+ for_each_dpcm_be(fe, stream, dpcm) {
+diff --git a/sound/soc/sof/imx/imx8.c b/sound/soc/sof/imx/imx8.c
+index 2a22b18e5ec0..69785f688ddf 100644
+--- a/sound/soc/sof/imx/imx8.c
++++ b/sound/soc/sof/imx/imx8.c
+@@ -209,7 +209,7 @@ static int imx8_probe(struct snd_sof_dev *sdev)
+
+ priv->pd_dev = devm_kmalloc_array(&pdev->dev, priv->num_domains,
+ sizeof(*priv->pd_dev), GFP_KERNEL);
+- if (!priv)
++ if (!priv->pd_dev)
+ return -ENOMEM;
+
+ priv->link = devm_kmalloc_array(&pdev->dev, priv->num_domains,
+@@ -304,6 +304,9 @@ static int imx8_probe(struct snd_sof_dev *sdev)
+ }
+ sdev->mailbox_bar = SOF_FW_BLK_TYPE_SRAM;
+
++ /* set default mailbox offset for FW ready message */
++ sdev->dsp_box.offset = MBOX_OFFSET;
++
+ return 0;
+
+ exit_pdev_unregister:
+diff --git a/sound/soc/sof/intel/Kconfig b/sound/soc/sof/intel/Kconfig
+index d62f51d33be1..8421b97d949e 100644
+--- a/sound/soc/sof/intel/Kconfig
++++ b/sound/soc/sof/intel/Kconfig
+@@ -76,10 +76,18 @@ config SND_SOC_SOF_BAYTRAIL
+
+ config SND_SOC_SOF_BROADWELL_SUPPORT
+ bool "SOF support for Broadwell"
++ depends on SND_SOC_INTEL_HASWELL=n
+ help
+ This adds support for Sound Open Firmware for Intel(R) platforms
+ using the Broadwell processors.
+- Say Y if you have such a device.
++ This option is mutually exclusive with the Haswell/Broadwell legacy
++ driver. If you want to enable SOF on Broadwell you need to deselect
++ the legacy driver first.
++ SOF does fully support Broadwell yet, so this option is not
++ recommended for distros. At some point all legacy drivers will be
++ deprecated but not before all userspace firmware/topology/UCM files
++ are made available to downstream distros.
++ Say Y if you want to enable SOF on Broadwell
+ If unsure select "N".
+
+ config SND_SOC_SOF_BROADWELL
+diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c
+index cd4b235fce57..e53fb4bd66b3 100644
+--- a/sound/soc/stm/stm32_spdifrx.c
++++ b/sound/soc/stm/stm32_spdifrx.c
+@@ -12,7 +12,6 @@
+ #include <linux/delay.h>
+ #include <linux/module.h>
+ #include <linux/of_platform.h>
+-#include <linux/pinctrl/consumer.h>
+ #include <linux/regmap.h>
+ #include <linux/reset.h>
+
+@@ -220,6 +219,7 @@
+ * @slave_config: dma slave channel runtime config pointer
+ * @phys_addr: SPDIFRX registers physical base address
+ * @lock: synchronization enabling lock
++ * @irq_lock: prevent race condition with IRQ on stream state
+ * @cs: channel status buffer
+ * @ub: user data buffer
+ * @irq: SPDIFRX interrupt line
+@@ -240,6 +240,7 @@ struct stm32_spdifrx_data {
+ struct dma_slave_config slave_config;
+ dma_addr_t phys_addr;
+ spinlock_t lock; /* Sync enabling lock */
++ spinlock_t irq_lock; /* Prevent race condition on stream state */
+ unsigned char cs[SPDIFRX_CS_BYTES_NB];
+ unsigned char ub[SPDIFRX_UB_BYTES_NB];
+ int irq;
+@@ -320,6 +321,7 @@ static void stm32_spdifrx_dma_ctrl_stop(struct stm32_spdifrx_data *spdifrx)
+ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
+ {
+ int cr, cr_mask, imr, ret;
++ unsigned long flags;
+
+ /* Enable IRQs */
+ imr = SPDIFRX_IMR_IFEIE | SPDIFRX_IMR_SYNCDIE | SPDIFRX_IMR_PERRIE;
+@@ -327,7 +329,7 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
+ if (ret)
+ return ret;
+
+- spin_lock(&spdifrx->lock);
++ spin_lock_irqsave(&spdifrx->lock, flags);
+
+ spdifrx->refcount++;
+
+@@ -360,7 +362,7 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
+ "Failed to start synchronization\n");
+ }
+
+- spin_unlock(&spdifrx->lock);
++ spin_unlock_irqrestore(&spdifrx->lock, flags);
+
+ return ret;
+ }
+@@ -368,11 +370,12 @@ static int stm32_spdifrx_start_sync(struct stm32_spdifrx_data *spdifrx)
+ static void stm32_spdifrx_stop(struct stm32_spdifrx_data *spdifrx)
+ {
+ int cr, cr_mask, reg;
++ unsigned long flags;
+
+- spin_lock(&spdifrx->lock);
++ spin_lock_irqsave(&spdifrx->lock, flags);
+
+ if (--spdifrx->refcount) {
+- spin_unlock(&spdifrx->lock);
++ spin_unlock_irqrestore(&spdifrx->lock, flags);
+ return;
+ }
+
+@@ -391,7 +394,7 @@ static void stm32_spdifrx_stop(struct stm32_spdifrx_data *spdifrx)
+ regmap_read(spdifrx->regmap, STM32_SPDIFRX_DR, &reg);
+ regmap_read(spdifrx->regmap, STM32_SPDIFRX_CSR, &reg);
+
+- spin_unlock(&spdifrx->lock);
++ spin_unlock_irqrestore(&spdifrx->lock, flags);
+ }
+
+ static int stm32_spdifrx_dma_ctrl_register(struct device *dev,
+@@ -478,8 +481,6 @@ static int stm32_spdifrx_get_ctrl_data(struct stm32_spdifrx_data *spdifrx)
+ memset(spdifrx->cs, 0, SPDIFRX_CS_BYTES_NB);
+ memset(spdifrx->ub, 0, SPDIFRX_UB_BYTES_NB);
+
+- pinctrl_pm_select_default_state(&spdifrx->pdev->dev);
+-
+ ret = stm32_spdifrx_dma_ctrl_start(spdifrx);
+ if (ret < 0)
+ return ret;
+@@ -511,7 +512,6 @@ static int stm32_spdifrx_get_ctrl_data(struct stm32_spdifrx_data *spdifrx)
+
+ end:
+ clk_disable_unprepare(spdifrx->kclk);
+- pinctrl_pm_select_sleep_state(&spdifrx->pdev->dev);
+
+ return ret;
+ }
+@@ -663,7 +663,6 @@ static const struct regmap_config stm32_h7_spdifrx_regmap_conf = {
+ static irqreturn_t stm32_spdifrx_isr(int irq, void *devid)
+ {
+ struct stm32_spdifrx_data *spdifrx = (struct stm32_spdifrx_data *)devid;
+- struct snd_pcm_substream *substream = spdifrx->substream;
+ struct platform_device *pdev = spdifrx->pdev;
+ unsigned int cr, mask, sr, imr;
+ unsigned int flags;
+@@ -731,14 +730,19 @@ static irqreturn_t stm32_spdifrx_isr(int irq, void *devid)
+ regmap_update_bits(spdifrx->regmap, STM32_SPDIFRX_CR,
+ SPDIFRX_CR_SPDIFEN_MASK, cr);
+
+- if (substream)
+- snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
++ spin_lock(&spdifrx->irq_lock);
++ if (spdifrx->substream)
++ snd_pcm_stop(spdifrx->substream,
++ SNDRV_PCM_STATE_DISCONNECTED);
++ spin_unlock(&spdifrx->irq_lock);
+
+ return IRQ_HANDLED;
+ }
+
+- if (err_xrun && substream)
+- snd_pcm_stop_xrun(substream);
++ spin_lock(&spdifrx->irq_lock);
++ if (err_xrun && spdifrx->substream)
++ snd_pcm_stop_xrun(spdifrx->substream);
++ spin_unlock(&spdifrx->irq_lock);
+
+ return IRQ_HANDLED;
+ }
+@@ -747,9 +751,12 @@ static int stm32_spdifrx_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+ {
+ struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai);
++ unsigned long flags;
+ int ret;
+
++ spin_lock_irqsave(&spdifrx->irq_lock, flags);
+ spdifrx->substream = substream;
++ spin_unlock_irqrestore(&spdifrx->irq_lock, flags);
+
+ ret = clk_prepare_enable(spdifrx->kclk);
+ if (ret)
+@@ -825,8 +832,12 @@ static void stm32_spdifrx_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+ {
+ struct stm32_spdifrx_data *spdifrx = snd_soc_dai_get_drvdata(cpu_dai);
++ unsigned long flags;
+
++ spin_lock_irqsave(&spdifrx->irq_lock, flags);
+ spdifrx->substream = NULL;
++ spin_unlock_irqrestore(&spdifrx->irq_lock, flags);
++
+ clk_disable_unprepare(spdifrx->kclk);
+ }
+
+@@ -930,6 +941,7 @@ static int stm32_spdifrx_probe(struct platform_device *pdev)
+ spdifrx->pdev = pdev;
+ init_completion(&spdifrx->cs_completion);
+ spin_lock_init(&spdifrx->lock);
++ spin_lock_init(&spdifrx->irq_lock);
+
+ platform_set_drvdata(pdev, spdifrx);
+
+diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
+index 56ce6292071b..33e2638ef7f0 100644
+--- a/tools/lib/bpf/Makefile
++++ b/tools/lib/bpf/Makefile
+@@ -215,7 +215,7 @@ check_abi: $(OUTPUT)libbpf.so
+ "versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \
+ "Please make sure all LIBBPF_API symbols are" \
+ "versioned in $(VERSION_SCRIPT)." >&2; \
+- readelf -s --wide $(OUTPUT)libbpf-in.o | \
++ readelf -s --wide $(BPF_IN_SHARED) | \
+ cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \
+ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}'| \
+ sort -u > $(OUTPUT)libbpf_global_syms.tmp; \
+diff --git a/tools/pci/pcitest.c b/tools/pci/pcitest.c
+index cb1e51fcc84e..32b7c6f9043d 100644
+--- a/tools/pci/pcitest.c
++++ b/tools/pci/pcitest.c
+@@ -129,6 +129,7 @@ static int run_test(struct pci_test *test)
+ }
+
+ fflush(stdout);
++ close(fd);
+ return (ret < 0) ? ret : 1 - ret; /* return 0 if test succeeded */
+ }
+
+diff --git a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
+index 68618152ea2c..89e070727e1b 100644
+--- a/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
++++ b/tools/perf/pmu-events/arch/s390/cf_z14/extended.json
+@@ -4,7 +4,7 @@
+ "EventCode": "128",
+ "EventName": "L1D_RO_EXCL_WRITES",
+ "BriefDescription": "L1D Read-only Exclusive Writes",
+- "PublicDescription": "L1D_RO_EXCL_WRITES A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
++ "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+ },
+ {
+ "Unit": "CPU-M-CF",
+diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh
+index b879305a766d..5b8c0fedee76 100755
+--- a/tools/testing/selftests/firmware/fw_lib.sh
++++ b/tools/testing/selftests/firmware/fw_lib.sh
+@@ -34,6 +34,12 @@ test_modprobe()
+
+ check_mods()
+ {
++ local uid=$(id -u)
++ if [ $uid -ne 0 ]; then
++ echo "skip all tests: must be run as root" >&2
++ exit $ksft_skip
++ fi
++
+ trap "test_modprobe" EXIT
+ if [ ! -d $DIR ]; then
+ modprobe test_firmware
+diff --git a/tools/testing/selftests/net/forwarding/loopback.sh b/tools/testing/selftests/net/forwarding/loopback.sh
+index 6e4626ae71b0..8f4057310b5b 100755
+--- a/tools/testing/selftests/net/forwarding/loopback.sh
++++ b/tools/testing/selftests/net/forwarding/loopback.sh
+@@ -1,6 +1,9 @@
+ #!/bin/bash
+ # SPDX-License-Identifier: GPL-2.0
+
++# Kselftest framework requirement - SKIP code is 4.
++ksft_skip=4
++
+ ALL_TESTS="loopback_test"
+ NUM_NETIFS=2
+ source tc_common.sh
+@@ -72,6 +75,11 @@ setup_prepare()
+
+ h1_create
+ h2_create
++
++ if ethtool -k $h1 | grep loopback | grep -q fixed; then
++ log_test "SKIP: dev $h1 does not support loopback feature"
++ exit $ksft_skip
++ fi
+ }
+
+ cleanup()
+diff --git a/tools/testing/selftests/rseq/settings b/tools/testing/selftests/rseq/settings
+new file mode 100644
+index 000000000000..e7b9417537fb
+--- /dev/null
++++ b/tools/testing/selftests/rseq/settings
+@@ -0,0 +1 @@
++timeout=0