summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2019-08-16 08:15:48 -0400
committerMike Pagano <mpagano@gentoo.org>2019-08-16 08:15:48 -0400
commitcb6b2cec3829ccc37b3cad4e77fe06905bd73489 (patch)
tree4dba1ed8734e8a84343a59f16a9d353652e68d3b
parentLinux patch 5.2.8 (diff)
downloadlinux-patches-cb6b2cec3829ccc37b3cad4e77fe06905bd73489.tar.gz
linux-patches-cb6b2cec3829ccc37b3cad4e77fe06905bd73489.tar.bz2
linux-patches-cb6b2cec3829ccc37b3cad4e77fe06905bd73489.zip
Linux patch 5.2.9
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1008_linux-5.2.9.patch4835
2 files changed, 4839 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 6e8d29d4..4179af77 100644
--- a/0000_README
+++ b/0000_README
@@ -75,6 +75,10 @@ Patch: 1007_linux-5.2.8.patch
From: https://www.kernel.org
Desc: Linux 5.2.8
+Patch: 1008_linux-5.2.9.patch
+From: https://www.kernel.org
+Desc: Linux 5.2.9
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1008_linux-5.2.9.patch b/1008_linux-5.2.9.patch
new file mode 100644
index 00000000..682b8db2
--- /dev/null
+++ b/1008_linux-5.2.9.patch
@@ -0,0 +1,4835 @@
+diff --git a/Makefile b/Makefile
+index bad87c4c8117..cfc667fe9959 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 2
+-SUBLEVEL = 8
++SUBLEVEL = 9
+ EXTRAVERSION =
+ NAME = Bobtail Squid
+
+diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
+index 5fd47eec4407..1679959a3654 100644
+--- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
++++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
+@@ -126,6 +126,9 @@
+ };
+
+ mdio-bus-mux {
++ #address-cells = <1>;
++ #size-cells = <0>;
++
+ /* BIT(9) = 1 => external mdio */
+ mdio_ext: mdio@200 {
+ reg = <0x200>;
+diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
+index 9207d5d071f1..d556f7c541ce 100644
+--- a/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
++++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dtsi
+@@ -112,7 +112,7 @@
+ };
+
+ &i2c2 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+diff --git a/arch/arm/boot/dts/imx6ul-geam.dts b/arch/arm/boot/dts/imx6ul-geam.dts
+index bc77f26a2f1d..6157a058feec 100644
+--- a/arch/arm/boot/dts/imx6ul-geam.dts
++++ b/arch/arm/boot/dts/imx6ul-geam.dts
+@@ -156,7 +156,7 @@
+ };
+
+ &i2c2 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+diff --git a/arch/arm/boot/dts/imx6ul-isiot.dtsi b/arch/arm/boot/dts/imx6ul-isiot.dtsi
+index 213e802bf35c..23e6e2e7ace9 100644
+--- a/arch/arm/boot/dts/imx6ul-isiot.dtsi
++++ b/arch/arm/boot/dts/imx6ul-isiot.dtsi
+@@ -148,7 +148,7 @@
+ };
+
+ &i2c2 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+diff --git a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
+index 39eeeddac39e..09f7ffa9ad8c 100644
+--- a/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
++++ b/arch/arm/boot/dts/imx6ul-pico-hobbit.dts
+@@ -43,7 +43,7 @@
+ };
+
+ &i2c2 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+diff --git a/arch/arm/boot/dts/imx6ul-pico-pi.dts b/arch/arm/boot/dts/imx6ul-pico-pi.dts
+index de07357b27fc..6cd7d5877d20 100644
+--- a/arch/arm/boot/dts/imx6ul-pico-pi.dts
++++ b/arch/arm/boot/dts/imx6ul-pico-pi.dts
+@@ -43,7 +43,7 @@
+ };
+
+ &i2c2 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c2>;
+ status = "okay";
+@@ -58,7 +58,7 @@
+ };
+
+ &i2c3 {
+- clock_frequency = <100000>;
++ clock-frequency = <100000>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c3>;
+ status = "okay";
+diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
+index 05d03f09ff54..71262dcdbca3 100644
+--- a/arch/arm/mach-davinci/sleep.S
++++ b/arch/arm/mach-davinci/sleep.S
+@@ -24,6 +24,7 @@
+ #define DEEPSLEEP_SLEEPENABLE_BIT BIT(31)
+
+ .text
++ .arch armv5te
+ /*
+ * Move DaVinci into deep sleep state
+ *
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+index e25f7fcd7997..cffa8991880d 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
++++ b/arch/arm64/boot/dts/freescale/imx8mm-pinfunc.h
+@@ -462,7 +462,7 @@
+ #define MX8MM_IOMUXC_SAI3_RXFS_GPIO4_IO28 0x1CC 0x434 0x000 0x5 0x0
+ #define MX8MM_IOMUXC_SAI3_RXFS_TPSMP_HTRANS0 0x1CC 0x434 0x000 0x7 0x0
+ #define MX8MM_IOMUXC_SAI3_RXC_SAI3_RX_BCLK 0x1D0 0x438 0x000 0x0 0x0
+-#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CAPTURE2 0x1D0 0x438 0x000 0x1 0x0
++#define MX8MM_IOMUXC_SAI3_RXC_GPT1_CLK 0x1D0 0x438 0x000 0x1 0x0
+ #define MX8MM_IOMUXC_SAI3_RXC_SAI5_RX_BCLK 0x1D0 0x438 0x4D0 0x2 0x2
+ #define MX8MM_IOMUXC_SAI3_RXC_GPIO4_IO29 0x1D0 0x438 0x000 0x5 0x0
+ #define MX8MM_IOMUXC_SAI3_RXC_TPSMP_HTRANS1 0x1D0 0x438 0x000 0x7 0x0
+@@ -472,7 +472,7 @@
+ #define MX8MM_IOMUXC_SAI3_RXD_GPIO4_IO30 0x1D4 0x43C 0x000 0x5 0x0
+ #define MX8MM_IOMUXC_SAI3_RXD_TPSMP_HDATA0 0x1D4 0x43C 0x000 0x7 0x0
+ #define MX8MM_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0x1D8 0x440 0x000 0x0 0x0
+-#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CLK 0x1D8 0x440 0x000 0x1 0x0
++#define MX8MM_IOMUXC_SAI3_TXFS_GPT1_CAPTURE2 0x1D8 0x440 0x000 0x1 0x0
+ #define MX8MM_IOMUXC_SAI3_TXFS_SAI5_RX_DATA1 0x1D8 0x440 0x4D8 0x2 0x2
+ #define MX8MM_IOMUXC_SAI3_TXFS_GPIO4_IO31 0x1D8 0x440 0x000 0x5 0x0
+ #define MX8MM_IOMUXC_SAI3_TXFS_TPSMP_HDATA1 0x1D8 0x440 0x000 0x7 0x0
+diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+index 6d635ba0904c..6632cbd88bed 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+@@ -675,8 +675,7 @@
+
+ sai2: sai@308b0000 {
+ #sound-dai-cells = <0>;
+- compatible = "fsl,imx8mq-sai",
+- "fsl,imx6sx-sai";
++ compatible = "fsl,imx8mq-sai";
+ reg = <0x308b0000 0x10000>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clk IMX8MQ_CLK_SAI2_IPG>,
+diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
+index fd5b1a4efc70..844e2964b0f5 100644
+--- a/arch/arm64/include/asm/processor.h
++++ b/arch/arm64/include/asm/processor.h
+@@ -193,6 +193,16 @@ static inline void start_thread_common(struct pt_regs *regs, unsigned long pc)
+ regs->pmr_save = GIC_PRIO_IRQON;
+ }
+
++static inline void set_ssbs_bit(struct pt_regs *regs)
++{
++ regs->pstate |= PSR_SSBS_BIT;
++}
++
++static inline void set_compat_ssbs_bit(struct pt_regs *regs)
++{
++ regs->pstate |= PSR_AA32_SSBS_BIT;
++}
++
+ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
+ unsigned long sp)
+ {
+@@ -200,7 +210,7 @@ static inline void start_thread(struct pt_regs *regs, unsigned long pc,
+ regs->pstate = PSR_MODE_EL0t;
+
+ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+- regs->pstate |= PSR_SSBS_BIT;
++ set_ssbs_bit(regs);
+
+ regs->sp = sp;
+ }
+@@ -219,7 +229,7 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
+ #endif
+
+ if (arm64_get_ssbd_state() != ARM64_SSBD_FORCE_ENABLE)
+- regs->pstate |= PSR_AA32_SSBS_BIT;
++ set_compat_ssbs_bit(regs);
+
+ regs->compat_sp = sp;
+ }
+diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
+index 9cdc4592da3e..320a30dbe35e 100644
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -586,10 +586,8 @@ el1_sync:
+ b.eq el1_ia
+ cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
+ b.eq el1_undef
+- cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
+- b.eq el1_sp_pc
+ cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
+- b.eq el1_sp_pc
++ b.eq el1_pc
+ cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
+ b.eq el1_undef
+ cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
+@@ -611,9 +609,11 @@ el1_da:
+ bl do_mem_abort
+
+ kernel_exit 1
+-el1_sp_pc:
++el1_pc:
+ /*
+- * Stack or PC alignment exception handling
++ * PC alignment exception handling. We don't handle SP alignment faults,
++ * since we will have hit a recursive exception when trying to push the
++ * initial pt_regs.
+ */
+ mrs x0, far_el1
+ inherit_daif pstate=x23, tmp=x2
+@@ -732,9 +732,9 @@ el0_sync:
+ ccmp x24, #ESR_ELx_EC_WFx, #4, ne
+ b.eq el0_sys
+ cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
+- b.eq el0_sp_pc
++ b.eq el0_sp
+ cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
+- b.eq el0_sp_pc
++ b.eq el0_pc
+ cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
+ b.eq el0_undef
+ cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
+@@ -758,7 +758,7 @@ el0_sync_compat:
+ cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
+ b.eq el0_fpsimd_exc
+ cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
+- b.eq el0_sp_pc
++ b.eq el0_pc
+ cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
+ b.eq el0_undef
+ cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
+@@ -858,11 +858,15 @@ el0_fpsimd_exc:
+ mov x1, sp
+ bl do_fpsimd_exc
+ b ret_to_user
++el0_sp:
++ ldr x26, [sp, #S_SP]
++ b el0_sp_pc
++el0_pc:
++ mrs x26, far_el1
+ el0_sp_pc:
+ /*
+ * Stack or PC alignment exception handling
+ */
+- mrs x26, far_el1
+ gic_prio_kentry_setup tmp=x0
+ enable_da_f
+ #ifdef CONFIG_TRACE_IRQFLAGS
+diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
+index 6a869d9f304f..b0c859ca6320 100644
+--- a/arch/arm64/kernel/process.c
++++ b/arch/arm64/kernel/process.c
+@@ -398,7 +398,7 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
+ childregs->pstate |= PSR_UAO_BIT;
+
+ if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE)
+- childregs->pstate |= PSR_SSBS_BIT;
++ set_ssbs_bit(childregs);
+
+ if (system_uses_irq_prio_masking())
+ childregs->pmr_save = GIC_PRIO_IRQON;
+@@ -442,6 +442,32 @@ void uao_thread_switch(struct task_struct *next)
+ }
+ }
+
++/*
++ * Force SSBS state on context-switch, since it may be lost after migrating
++ * from a CPU which treats the bit as RES0 in a heterogeneous system.
++ */
++static void ssbs_thread_switch(struct task_struct *next)
++{
++ struct pt_regs *regs = task_pt_regs(next);
++
++ /*
++ * Nothing to do for kernel threads, but 'regs' may be junk
++ * (e.g. idle task) so check the flags and bail early.
++ */
++ if (unlikely(next->flags & PF_KTHREAD))
++ return;
++
++ /* If the mitigation is enabled, then we leave SSBS clear. */
++ if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
++ test_tsk_thread_flag(next, TIF_SSBD))
++ return;
++
++ if (compat_user_mode(regs))
++ set_compat_ssbs_bit(regs);
++ else if (user_mode(regs))
++ set_ssbs_bit(regs);
++}
++
+ /*
+ * We store our current task in sp_el0, which is clobbered by userspace. Keep a
+ * shadow copy so that we can restore this upon entry from userspace.
+@@ -471,6 +497,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
+ entry_task_switch(next);
+ uao_thread_switch(next);
+ ptrauth_thread_switch(next);
++ ssbs_thread_switch(next);
+
+ /*
+ * Complete any pending TLB or cache maintenance on this CPU in case
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index 6d704ad2472b..993017dd83ca 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -50,6 +50,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
+ return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
+ }
+
++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
++{
++ return kvm_arch_vcpu_runnable(vcpu);
++}
++
+ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+ {
+ return false;
+diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
+index 2540d3b2588c..2eda1ec36f55 100644
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -249,7 +249,7 @@ void __init paging_init(void)
+
+ #ifdef CONFIG_ZONE_DMA
+ max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
+- ((1UL << ARCH_ZONE_DMA_BITS) - 1) >> PAGE_SHIFT);
++ 1UL << (ARCH_ZONE_DMA_BITS - PAGE_SHIFT));
+ #endif
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+ #ifdef CONFIG_HIGHMEM
+diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c
+index 96c53b23e58f..dad9825e4087 100644
+--- a/arch/powerpc/platforms/pseries/papr_scm.c
++++ b/arch/powerpc/platforms/pseries/papr_scm.c
+@@ -42,8 +42,9 @@ struct papr_scm_priv {
+ static int drc_pmem_bind(struct papr_scm_priv *p)
+ {
+ unsigned long ret[PLPAR_HCALL_BUFSIZE];
+- uint64_t rc, token;
+ uint64_t saved = 0;
++ uint64_t token;
++ int64_t rc;
+
+ /*
+ * When the hypervisor cannot map all the requested memory in a single
+@@ -63,6 +64,10 @@ static int drc_pmem_bind(struct papr_scm_priv *p)
+ } while (rc == H_BUSY);
+
+ if (rc) {
++ /* H_OVERLAP needs a separate error path */
++ if (rc == H_OVERLAP)
++ return -EBUSY;
++
+ dev_err(&p->pdev->dev, "bind err: %lld\n", rc);
+ return -ENXIO;
+ }
+@@ -316,6 +321,14 @@ static int papr_scm_probe(struct platform_device *pdev)
+
+ /* request the hypervisor to bind this region to somewhere in memory */
+ rc = drc_pmem_bind(p);
++
++ /* If phyp says drc memory still bound then force unbound and retry */
++ if (rc == -EBUSY) {
++ dev_warn(&pdev->dev, "Retrying bind after unbinding\n");
++ drc_pmem_unbind(p);
++ rc = drc_pmem_bind(p);
++ }
++
+ if (rc)
+ goto err;
+
+diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
+index a4d38092530a..823578c6b9e2 100644
+--- a/arch/s390/include/asm/page.h
++++ b/arch/s390/include/asm/page.h
+@@ -177,6 +177,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#define ARCH_ZONE_DMA_BITS 31
++
+ #include <asm-generic/memory_model.h>
+ #include <asm-generic/getorder.h>
+
+diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
+index 401e30ca0a75..8272a4492844 100644
+--- a/arch/x86/boot/string.c
++++ b/arch/x86/boot/string.c
+@@ -37,6 +37,14 @@ int memcmp(const void *s1, const void *s2, size_t len)
+ return diff;
+ }
+
++/*
++ * Clang may lower `memcmp == 0` to `bcmp == 0`.
++ */
++int bcmp(const void *s1, const void *s2, size_t len)
++{
++ return memcmp(s1, s2, len);
++}
++
+ int strcmp(const char *str1, const char *str2)
+ {
+ const unsigned char *s1 = (const unsigned char *)str1;
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 2889dd023566..6179be624f35 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -20,7 +20,6 @@
+ #include <asm/intel-family.h>
+ #include <asm/apic.h>
+ #include <asm/cpu_device_id.h>
+-#include <asm/hypervisor.h>
+
+ #include "../perf_event.h"
+
+@@ -263,8 +262,8 @@ static struct event_constraint intel_icl_event_constraints[] = {
+ };
+
+ static struct extra_reg intel_icl_extra_regs[] __read_mostly = {
+- INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff9fffull, RSP_0),
+- INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff9fffull, RSP_1),
++ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffffbfffull, RSP_0),
++ INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffffbfffull, RSP_1),
+ INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
+ INTEL_UEVENT_EXTRA_REG(0x01c6, MSR_PEBS_FRONTEND, 0x7fff17, FE),
+ EVENT_EXTRA_END
+@@ -4057,7 +4056,7 @@ static bool check_msr(unsigned long msr, u64 mask)
+ * Disable the check for real HW, so we don't
+ * mess with potentionaly enabled registers:
+ */
+- if (hypervisor_is_type(X86_HYPER_NATIVE))
++ if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
+ return true;
+
+ /*
+diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
+index 505c73dc6a73..6601b8759c92 100644
+--- a/arch/x86/events/intel/ds.c
++++ b/arch/x86/events/intel/ds.c
+@@ -851,7 +851,7 @@ struct event_constraint intel_skl_pebs_event_constraints[] = {
+
+ struct event_constraint intel_icl_pebs_event_constraints[] = {
+ INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
+- INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x400000000ULL), /* SLOTS */
++ INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL), /* SLOTS */
+
+ INTEL_PLD_CONSTRAINT(0x1cd, 0xff), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
+ INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x1d0, 0xf), /* MEM_INST_RETIRED.LOAD */
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 8253925c5e8c..921c609c2af7 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -1169,6 +1169,7 @@ struct kvm_x86_ops {
+ int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
+ uint32_t guest_irq, bool set);
+ void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
++ bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
+
+ int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
+ bool *expired);
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 48c865a4e5dd..14384a1ec53f 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -3290,7 +3290,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
+ vmcb->control.exit_int_info_err,
+ KVM_ISA_SVM);
+
+- rc = kvm_vcpu_map(&svm->vcpu, gfn_to_gpa(svm->nested.vmcb), &map);
++ rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map);
+ if (rc) {
+ if (rc == -EINVAL)
+ kvm_inject_gp(&svm->vcpu, 0);
+@@ -3580,7 +3580,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
+
+ vmcb_gpa = svm->vmcb->save.rax;
+
+- rc = kvm_vcpu_map(&svm->vcpu, gfn_to_gpa(vmcb_gpa), &map);
++ rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map);
+ if (rc) {
+ if (rc == -EINVAL)
+ kvm_inject_gp(&svm->vcpu, 0);
+@@ -5167,6 +5167,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
+ kvm_vcpu_wake_up(vcpu);
+ }
+
++static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
++{
++ return false;
++}
++
+ static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
+ {
+ unsigned long flags;
+@@ -7264,6 +7269,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
+
+ .pmu_ops = &amd_pmu_ops,
+ .deliver_posted_interrupt = svm_deliver_avic_intr,
++ .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
+ .update_pi_irte = svm_update_pi_irte,
+ .setup_mce = svm_setup_mce,
+
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 924c2a79e4a9..4b830c0adcf8 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6096,6 +6096,11 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
+ return max_irr;
+ }
+
++static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
++{
++ return pi_test_on(vcpu_to_pi_desc(vcpu));
++}
++
+ static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
+ {
+ if (!kvm_vcpu_apicv_active(vcpu))
+@@ -7662,6 +7667,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
+ .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
+ .sync_pir_to_irr = vmx_sync_pir_to_irr,
+ .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
++ .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
+
+ .set_tss_addr = vmx_set_tss_addr,
+ .set_identity_map_addr = vmx_set_identity_map_addr,
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index a8ad3a4d86b1..cbced8ff29d4 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -9641,6 +9641,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+ return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
+ }
+
++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
++{
++ if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
++ return true;
++
++ if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
++ kvm_test_request(KVM_REQ_SMI, vcpu) ||
++ kvm_test_request(KVM_REQ_EVENT, vcpu))
++ return true;
++
++ if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
++ return true;
++
++ return false;
++}
++
+ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+ {
+ return vcpu->arch.preempted_in_kernel;
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 46df4c6aae46..26a8b4b1b9ed 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -194,13 +194,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
+
+ pmd = pmd_offset(pud, address);
+ pmd_k = pmd_offset(pud_k, address);
+- if (!pmd_present(*pmd_k))
+- return NULL;
+
+- if (!pmd_present(*pmd))
++ if (pmd_present(*pmd) != pmd_present(*pmd_k))
+ set_pmd(pmd, *pmd_k);
++
++ if (!pmd_present(*pmd_k))
++ return NULL;
+ else
+- BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
++ BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
+
+ return pmd_k;
+ }
+@@ -220,17 +221,13 @@ void vmalloc_sync_all(void)
+ spin_lock(&pgd_lock);
+ list_for_each_entry(page, &pgd_list, lru) {
+ spinlock_t *pgt_lock;
+- pmd_t *ret;
+
+ /* the pgt_lock only for Xen */
+ pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
+
+ spin_lock(pgt_lock);
+- ret = vmalloc_sync_one(page_address(page), address);
++ vmalloc_sync_one(page_address(page), address);
+ spin_unlock(pgt_lock);
+-
+- if (!ret)
+- break;
+ }
+ spin_unlock(&pgd_lock);
+ }
+diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
+index 3cf302b26332..8901a1f89cf5 100644
+--- a/arch/x86/purgatory/Makefile
++++ b/arch/x86/purgatory/Makefile
+@@ -6,6 +6,9 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
+ targets += $(purgatory-y)
+ PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
+
++$(obj)/string.o: $(srctree)/arch/x86/boot/compressed/string.c FORCE
++ $(call if_changed_rule,cc_o_c)
++
+ $(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
+ $(call if_changed_rule,cc_o_c)
+
+@@ -17,11 +20,34 @@ KCOV_INSTRUMENT := n
+
+ # Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
+ # in turn leaves some undefined symbols like __fentry__ in purgatory and not
+-# sure how to relocate those. Like kexec-tools, use custom flags.
+-
+-KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -Os -mcmodel=large
+-KBUILD_CFLAGS += -m$(BITS)
+-KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
++# sure how to relocate those.
++ifdef CONFIG_FUNCTION_TRACER
++CFLAGS_REMOVE_sha256.o += $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_purgatory.o += $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_string.o += $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_kexec-purgatory.o += $(CC_FLAGS_FTRACE)
++endif
++
++ifdef CONFIG_STACKPROTECTOR
++CFLAGS_REMOVE_sha256.o += -fstack-protector
++CFLAGS_REMOVE_purgatory.o += -fstack-protector
++CFLAGS_REMOVE_string.o += -fstack-protector
++CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector
++endif
++
++ifdef CONFIG_STACKPROTECTOR_STRONG
++CFLAGS_REMOVE_sha256.o += -fstack-protector-strong
++CFLAGS_REMOVE_purgatory.o += -fstack-protector-strong
++CFLAGS_REMOVE_string.o += -fstack-protector-strong
++CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector-strong
++endif
++
++ifdef CONFIG_RETPOLINE
++CFLAGS_REMOVE_sha256.o += $(RETPOLINE_CFLAGS)
++CFLAGS_REMOVE_purgatory.o += $(RETPOLINE_CFLAGS)
++CFLAGS_REMOVE_string.o += $(RETPOLINE_CFLAGS)
++CFLAGS_REMOVE_kexec-purgatory.o += $(RETPOLINE_CFLAGS)
++endif
+
+ $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
+ $(call if_changed,ld)
+diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
+index 6d8d5a34c377..b607bda786f6 100644
+--- a/arch/x86/purgatory/purgatory.c
++++ b/arch/x86/purgatory/purgatory.c
+@@ -68,3 +68,9 @@ void purgatory(void)
+ }
+ copy_backup_region();
+ }
++
++/*
++ * Defined in order to reuse memcpy() and memset() from
++ * arch/x86/boot/compressed/string.c
++ */
++void warn(const char *msg) {}
+diff --git a/arch/x86/purgatory/string.c b/arch/x86/purgatory/string.c
+deleted file mode 100644
+index 01ad43873ad9..000000000000
+--- a/arch/x86/purgatory/string.c
++++ /dev/null
+@@ -1,23 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * Simple string functions.
+- *
+- * Copyright (C) 2014 Red Hat Inc.
+- *
+- * Author:
+- * Vivek Goyal <vgoyal@redhat.com>
+- */
+-
+-#include <linux/types.h>
+-
+-#include "../boot/string.c"
+-
+-void *memcpy(void *dst, const void *src, size_t len)
+-{
+- return __builtin_memcpy(dst, src, len);
+-}
+-
+-void *memset(void *dst, int c, size_t len)
+-{
+- return __builtin_memset(dst, c, len);
+-}
+diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
+index 659ccb8b693f..06d024204f50 100644
+--- a/block/blk-rq-qos.c
++++ b/block/blk-rq-qos.c
+@@ -202,6 +202,7 @@ static int rq_qos_wake_function(struct wait_queue_entry *curr,
+ return -1;
+
+ data->got_token = true;
++ smp_wmb();
+ list_del_init(&curr->entry);
+ wake_up_process(data->task);
+ return 1;
+@@ -245,6 +246,7 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
+
+ prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE);
+ do {
++ /* The memory barrier in set_task_state saves us here. */
+ if (data.got_token)
+ break;
+ if (!has_sleeper && acquire_inflight_cb(rqw, private_data)) {
+@@ -255,12 +257,14 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
+ * which means we now have two. Put our local token
+ * and wake anyone else potentially waiting for one.
+ */
++ smp_rmb();
+ if (data.got_token)
+ cleanup_cb(rqw, private_data);
+ break;
+ }
+ io_schedule();
+- has_sleeper = false;
++ has_sleeper = true;
++ set_current_state(TASK_UNINTERRUPTIBLE);
+ } while (1);
+ finish_wait(&rqw->wait, &data.wq);
+ }
+diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
+index d4551e33fa71..8569b79e8b58 100644
+--- a/drivers/acpi/arm64/iort.c
++++ b/drivers/acpi/arm64/iort.c
+@@ -611,8 +611,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
+
+ /* Move to ITS specific data */
+ its = (struct acpi_iort_its_group *)node->node_data;
+- if (idx > its->its_count) {
+- dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
++ if (idx >= its->its_count) {
++ dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
+ idx, its->its_count);
+ return -ENXIO;
+ }
+diff --git a/drivers/base/platform.c b/drivers/base/platform.c
+index 4d1729853d1a..8b25c7b12179 100644
+--- a/drivers/base/platform.c
++++ b/drivers/base/platform.c
+@@ -157,8 +157,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
+ * the device will only expose one IRQ, and this fallback
+ * allows a common code path across either kind of resource.
+ */
+- if (num == 0 && has_acpi_companion(&dev->dev))
+- return acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
++ if (num == 0 && has_acpi_companion(&dev->dev)) {
++ int ret = acpi_dev_gpio_irq_get(ACPI_COMPANION(&dev->dev), num);
++
++ /* Our callers expect -ENXIO for missing IRQs. */
++ if (ret >= 0 || ret == -EPROBE_DEFER)
++ return ret;
++ }
+
+ return -ENXIO;
+ #endif
+diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
+index 90ebfcae0ce6..2b3103c30857 100644
+--- a/drivers/block/drbd/drbd_receiver.c
++++ b/drivers/block/drbd/drbd_receiver.c
+@@ -5417,7 +5417,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
+ unsigned int key_len;
+ char secret[SHARED_SECRET_MAX]; /* 64 byte */
+ unsigned int resp_size;
+- SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
++ struct shash_desc *desc;
+ struct packet_info pi;
+ struct net_conf *nc;
+ int err, rv;
+@@ -5430,6 +5430,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
+ memcpy(secret, nc->shared_secret, key_len);
+ rcu_read_unlock();
+
++ desc = kmalloc(sizeof(struct shash_desc) +
++ crypto_shash_descsize(connection->cram_hmac_tfm),
++ GFP_KERNEL);
++ if (!desc) {
++ rv = -1;
++ goto fail;
++ }
+ desc->tfm = connection->cram_hmac_tfm;
+
+ rv = crypto_shash_setkey(connection->cram_hmac_tfm, (u8 *)secret, key_len);
+@@ -5571,7 +5578,10 @@ static int drbd_do_auth(struct drbd_connection *connection)
+ kfree(peers_ch);
+ kfree(response);
+ kfree(right_response);
+- shash_desc_zero(desc);
++ if (desc) {
++ shash_desc_zero(desc);
++ kfree(desc);
++ }
+
+ return rv;
+ }
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 430d31499ce9..e1739efca37e 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -893,7 +893,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
+
+ static int loop_kthread_worker_fn(void *worker_ptr)
+ {
+- current->flags |= PF_LESS_THROTTLE;
++ current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
+ return kthread_worker_fn(worker_ptr);
+ }
+
+diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
+index 6b1e4abe3248..d2f061015323 100644
+--- a/drivers/cpufreq/pasemi-cpufreq.c
++++ b/drivers/cpufreq/pasemi-cpufreq.c
+@@ -131,10 +131,18 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ int err = -ENODEV;
+
+ cpu = of_get_cpu_node(policy->cpu, NULL);
++ if (!cpu)
++ goto out;
+
++ max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+ of_node_put(cpu);
+- if (!cpu)
++ if (!max_freqp) {
++ err = -EINVAL;
+ goto out;
++ }
++
++ /* we need the freq in kHz */
++ max_freq = *max_freqp / 1000;
+
+ dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
+ if (!dn)
+@@ -171,16 +179,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ }
+
+ pr_debug("init cpufreq on CPU %d\n", policy->cpu);
+-
+- max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+- if (!max_freqp) {
+- err = -EINVAL;
+- goto out_unmap_sdcpwr;
+- }
+-
+- /* we need the freq in kHz */
+- max_freq = *max_freqp / 1000;
+-
+ pr_debug("max clock-frequency is at %u kHz\n", max_freq);
+ pr_debug("initializing frequency table\n");
+
+@@ -198,9 +196,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
+
+ return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
+
+-out_unmap_sdcpwr:
+- iounmap(sdcpwr_mapbase);
+-
+ out_unmap_sdcasr:
+ iounmap(sdcasr_mapbase);
+ out:
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+index f9fec2ddf56a..94c1ad7eeddf 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+@@ -58,6 +58,19 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
+ static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
+ unsigned int authsize)
+ {
++ switch (authsize) {
++ case 16:
++ case 15:
++ case 14:
++ case 13:
++ case 12:
++ case 8:
++ case 4:
++ break;
++ default:
++ return -EINVAL;
++ }
++
+ return 0;
+ }
+
+@@ -104,6 +117,7 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_AES;
++ rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm);
+ rctx->cmd.u.aes.type = ctx->u.aes.type;
+ rctx->cmd.u.aes.mode = ctx->u.aes.mode;
+ rctx->cmd.u.aes.action = encrypt;
+diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
+index 1cbdfc08ca00..a13e8a362316 100644
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -622,6 +622,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+
+ unsigned long long *final;
+ unsigned int dm_offset;
++ unsigned int authsize;
+ unsigned int jobid;
+ unsigned int ilen;
+ bool in_place = true; /* Default value */
+@@ -643,6 +644,21 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+ if (!aes->key) /* Gotta have a key SGL */
+ return -EINVAL;
+
++ /* Zero defaults to 16 bytes, the maximum size */
++ authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
++ switch (authsize) {
++ case 16:
++ case 15:
++ case 14:
++ case 13:
++ case 12:
++ case 8:
++ case 4:
++ break;
++ default:
++ return -EINVAL;
++ }
++
+ /* First, decompose the source buffer into AAD & PT,
+ * and the destination buffer into AAD, CT & tag, or
+ * the input into CT & tag.
+@@ -657,7 +673,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+ p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
+ } else {
+ /* Input length for decryption includes tag */
+- ilen = aes->src_len - AES_BLOCK_SIZE;
++ ilen = aes->src_len - authsize;
+ p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
+ }
+
+@@ -766,8 +782,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+ while (src.sg_wa.bytes_left) {
+ ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
+ if (!src.sg_wa.bytes_left) {
+- unsigned int nbytes = aes->src_len
+- % AES_BLOCK_SIZE;
++ unsigned int nbytes = ilen % AES_BLOCK_SIZE;
+
+ if (nbytes) {
+ op.eom = 1;
+@@ -839,19 +854,19 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+
+ if (aes->action == CCP_AES_ACTION_ENCRYPT) {
+ /* Put the ciphered tag after the ciphertext. */
+- ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
++ ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
+ } else {
+ /* Does this ciphered tag match the input? */
+- ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
++ ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
+ DMA_BIDIRECTIONAL);
+ if (ret)
+ goto e_tag;
+- ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
++ ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
+ if (ret)
+ goto e_tag;
+
+ ret = crypto_memneq(tag.address, final_wa.address,
+- AES_BLOCK_SIZE) ? -EBADMSG : 0;
++ authsize) ? -EBADMSG : 0;
+ ccp_dm_free(&tag);
+ }
+
+@@ -859,11 +874,11 @@ e_tag:
+ ccp_dm_free(&final_wa);
+
+ e_dst:
+- if (aes->src_len && !in_place)
++ if (ilen > 0 && !in_place)
+ ccp_free_data(&dst, cmd_q);
+
+ e_src:
+- if (aes->src_len)
++ if (ilen > 0)
+ ccp_free_data(&src, cmd_q);
+
+ e_aad:
+diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
+index d40ccc3af9e2..fa7ed01415b7 100644
+--- a/drivers/firmware/Kconfig
++++ b/drivers/firmware/Kconfig
+@@ -157,7 +157,7 @@ config DMI_SCAN_MACHINE_NON_EFI_FALLBACK
+
+ config ISCSI_IBFT_FIND
+ bool "iSCSI Boot Firmware Table Attributes"
+- depends on X86 && ACPI
++ depends on X86 && ISCSI_IBFT
+ default n
+ help
+ This option enables the kernel to find the region of memory
+@@ -168,7 +168,8 @@ config ISCSI_IBFT_FIND
+ config ISCSI_IBFT
+ tristate "iSCSI Boot Firmware Table Attributes module"
+ select ISCSI_BOOT_SYSFS
+- depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
++ select ISCSI_IBFT_FIND if X86
++ depends on ACPI && SCSI && SCSI_LOWLEVEL
+ default n
+ help
+ This option enables support for detection and exposing of iSCSI
+diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
+index ab3aa3983833..7e12cbdf957c 100644
+--- a/drivers/firmware/iscsi_ibft.c
++++ b/drivers/firmware/iscsi_ibft.c
+@@ -84,6 +84,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION(IBFT_ISCSI_VERSION);
+
++#ifndef CONFIG_ISCSI_IBFT_FIND
++struct acpi_table_ibft *ibft_addr;
++#endif
++
+ struct ibft_hdr {
+ u8 id;
+ u8 version;
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
+index ee6b646180b6..0a7adc2925e3 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
+@@ -608,8 +608,10 @@ const struct dc_link_settings *dc_link_get_link_cap(
+
+ static void destruct(struct dc *dc)
+ {
+- dc_release_state(dc->current_state);
+- dc->current_state = NULL;
++ if (dc->current_state) {
++ dc_release_state(dc->current_state);
++ dc->current_state = NULL;
++ }
+
+ destroy_links(dc);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index a3ff33ff6da1..adf39e3b8d29 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -2284,7 +2284,7 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
+ if (core_dc->current_state->res_ctx.pipe_ctx[i].stream) {
+ if (core_dc->current_state->res_ctx.
+ pipe_ctx[i].stream->link
+- == link)
++ == link) {
+ /* DMCU -1 for all controller id values,
+ * therefore +1 here
+ */
+@@ -2292,6 +2292,13 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
+ core_dc->current_state->
+ res_ctx.pipe_ctx[i].stream_res.tg->inst +
+ 1;
++
++ /* Disable brightness ramping when the display is blanked
++ * as it can hang the DMCU
++ */
++ if (core_dc->current_state->res_ctx.pipe_ctx[i].plane_state == NULL)
++ frame_ramp = 0;
++ }
+ }
+ }
+ abm->funcs->set_backlight_level_pwm(
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index 253311864cdd..966aa3b754c5 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -2218,11 +2218,18 @@ static void get_active_converter_info(
+ link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE;
+ ddc_service_set_dongle_type(link->ddc,
+ link->dpcd_caps.dongle_type);
++ link->dpcd_caps.is_branch_dev = false;
+ return;
+ }
+
+ /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
+- link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
++ if (ds_port.fields.PORT_TYPE == DOWNSTREAM_DP) {
++ link->dpcd_caps.is_branch_dev = false;
++ }
++
++ else {
++ link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
++ }
+
+ switch (ds_port.fields.PORT_TYPE) {
+ case DOWNSTREAM_VGA:
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+index 12142d13f22f..b459ce056b60 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+@@ -254,7 +254,7 @@ bool resource_construct(
+ * PORT_CONNECTIVITY == 1 (as instructed by HW team).
+ */
+ update_num_audio(&straps, &num_audio, &pool->audio_support);
+- for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
++ for (i = 0; i < caps->num_audio; i++) {
+ struct audio *aud = create_funcs->create_audio(ctx, i);
+
+ if (aud == NULL) {
+@@ -1702,6 +1702,12 @@ static struct audio *find_first_free_audio(
+ return pool->audios[i];
+ }
+ }
++
++ /* use engine id to find free audio */
++ if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
++ return pool->audios[id];
++ }
++
+ /*not found the matching one, first come first serve*/
+ for (i = 0; i < pool->audio_count; i++) {
+ if (res_ctx->is_audio_acquired[i] == false) {
+@@ -1866,6 +1872,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
+ pix_clk /= 2;
+ if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
+ switch (timing->display_color_depth) {
++ case COLOR_DEPTH_666:
+ case COLOR_DEPTH_888:
+ normalized_pix_clk = pix_clk;
+ break;
+@@ -2012,7 +2019,7 @@ enum dc_status resource_map_pool_resources(
+ /* TODO: Add check if ASIC support and EDID audio */
+ if (!stream->converter_disable_audio &&
+ dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
+- stream->audio_info.mode_count) {
++ stream->audio_info.mode_count && stream->audio_info.flags.all) {
+ pipe_ctx->stream_res.audio = find_first_free_audio(
+ &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+index 2959c3c9390b..da30ae04e82b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+@@ -234,6 +234,10 @@ static void dmcu_set_backlight_level(
+ s2 |= (backlight_8_bit << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
+
+ REG_WRITE(BIOS_SCRATCH_2, s2);
++
++ /* waitDMCUReadyForCmd */
++ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
++ 0, 1, 80000);
+ }
+
+ static void dce_abm_init(struct abm *abm)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+index 9e4d70a0055e..5cc5dabf4d65 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+@@ -1120,16 +1120,7 @@ static void dcn10_init_hw(struct dc *dc)
+ * everything down.
+ */
+ if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
+- for (i = 0; i < dc->res_pool->pipe_count; i++) {
+- struct hubp *hubp = dc->res_pool->hubps[i];
+- struct dpp *dpp = dc->res_pool->dpps[i];
+-
+- hubp->funcs->hubp_init(hubp);
+- dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
+- plane_atomic_power_down(dc, dpp, hubp);
+- }
+-
+- apply_DEGVIDCN10_253_wa(dc);
++ dc->hwss.init_pipes(dc, dc->current_state);
+ }
+
+ for (i = 0; i < dc->res_pool->audio_count; i++) {
+@@ -1298,10 +1289,6 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
+ return result;
+ }
+
+-
+-
+-
+-
+ static bool
+ dcn10_set_output_transfer_func(struct pipe_ctx *pipe_ctx,
+ const struct dc_stream_state *stream)
+@@ -2416,6 +2403,12 @@ static void dcn10_apply_ctx_for_surface(
+ if (removed_pipe[i])
+ dcn10_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+
++ for (i = 0; i < dc->res_pool->pipe_count; i++)
++ if (removed_pipe[i]) {
++ dc->hwss.optimize_bandwidth(dc, context);
++ break;
++ }
++
+ if (dc->hwseq->wa.DEGVIDCN10_254)
+ hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
+ }
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+index 7eccb54c421d..aac52eed6b2a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+@@ -512,7 +512,7 @@ static const struct resource_caps rv2_res_cap = {
+ .num_audio = 3,
+ .num_stream_encoder = 3,
+ .num_pll = 3,
+- .num_ddc = 3,
++ .num_ddc = 4,
+ };
+ #endif
+
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+index 6f5ab05d6467..6f0cc718fbd7 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+@@ -169,7 +169,7 @@ struct resource_pool {
+ struct clock_source *clock_sources[MAX_CLOCK_SOURCES];
+ unsigned int clk_src_count;
+
+- struct audio *audios[MAX_PIPES];
++ struct audio *audios[MAX_AUDIOS];
+ unsigned int audio_count;
+ struct audio_support audio_support;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+index 4c8e2c6fb6db..72266efd826c 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+@@ -34,6 +34,7 @@
+ * Data types shared between different Virtual HW blocks
+ ******************************************************************************/
+
++#define MAX_AUDIOS 7
+ #define MAX_PIPES 6
+
+ struct gamma_curve {
+diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
+index d8d75e25f6fb..45f6f11a88a7 100644
+--- a/drivers/gpu/drm/drm_framebuffer.c
++++ b/drivers/gpu/drm/drm_framebuffer.c
+@@ -830,7 +830,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb)
+ struct drm_device *dev = fb->dev;
+ struct drm_atomic_state *state;
+ struct drm_plane *plane;
+- struct drm_connector *conn;
++ struct drm_connector *conn __maybe_unused;
+ struct drm_connector_state *conn_state;
+ int i, ret;
+ unsigned plane_mask;
+diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
+index 5e7b1fb2db5d..8ea1c927dbad 100644
+--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
++++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
+@@ -394,8 +394,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
+ else
+ txesc2_div = 10;
+
+- I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK);
+- I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK);
++ I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
++ I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
+ }
+
+ /* Program BXT Mipi clocks and dividers */
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index 0ea150196659..c62f7abcf509 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -2226,8 +2226,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+ if (ret)
+ goto fail;
+
+- spin_lock_init(&dpu_enc->enc_spinlock);
+-
+ atomic_set(&dpu_enc->frame_done_timeout_ms, 0);
+ timer_setup(&dpu_enc->frame_done_timer,
+ dpu_encoder_frame_done_timeout, 0);
+@@ -2281,6 +2279,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
+
+ drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+
++ spin_lock_init(&dpu_enc->enc_spinlock);
+ dpu_enc->enabled = false;
+
+ return &dpu_enc->base;
+diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
+index 93942063b51b..49dd2d905c7f 100644
+--- a/drivers/hid/hid-sony.c
++++ b/drivers/hid/hid-sony.c
+@@ -585,10 +585,14 @@ static void sony_set_leds(struct sony_sc *sc);
+ static inline void sony_schedule_work(struct sony_sc *sc,
+ enum sony_worker which)
+ {
++ unsigned long flags;
++
+ switch (which) {
+ case SONY_WORKER_STATE:
+- if (!sc->defer_initialization)
++ spin_lock_irqsave(&sc->lock, flags);
++ if (!sc->defer_initialization && sc->state_worker_initialized)
+ schedule_work(&sc->state_worker);
++ spin_unlock_irqrestore(&sc->lock, flags);
+ break;
+ case SONY_WORKER_HOTPLUG:
+ if (sc->hotplug_worker_initialized)
+@@ -2558,13 +2562,18 @@ static inline void sony_init_output_report(struct sony_sc *sc,
+
+ static inline void sony_cancel_work_sync(struct sony_sc *sc)
+ {
++ unsigned long flags;
++
+ if (sc->hotplug_worker_initialized)
+ cancel_work_sync(&sc->hotplug_worker);
+- if (sc->state_worker_initialized)
++ if (sc->state_worker_initialized) {
++ spin_lock_irqsave(&sc->lock, flags);
++ sc->state_worker_initialized = 0;
++ spin_unlock_irqrestore(&sc->lock, flags);
+ cancel_work_sync(&sc->state_worker);
++ }
+ }
+
+-
+ static int sony_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput)
+ {
+diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
+index 3fb9c0a2d6d0..ce5ec403ec73 100644
+--- a/drivers/hwmon/lm75.c
++++ b/drivers/hwmon/lm75.c
+@@ -343,7 +343,7 @@ lm75_probe(struct i2c_client *client, const struct i2c_device_id *id)
+ data->sample_time = MSEC_PER_SEC / 2;
+ break;
+ case tmp75b: /* not one-shot mode, Conversion rate 37Hz */
+- clr_mask |= 1 << 15 | 0x3 << 13;
++ clr_mask |= 1 << 7 | 0x3 << 5;
+ data->resolution = 12;
+ data->sample_time = MSEC_PER_SEC / 37;
+ break;
+diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
+index e7dff5febe16..d42bc0883a32 100644
+--- a/drivers/hwmon/nct6775.c
++++ b/drivers/hwmon/nct6775.c
+@@ -852,7 +852,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 };
+ static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
+ static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
+ static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
+-static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c };
++static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b };
+ static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
+ static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
+
+@@ -3764,6 +3764,7 @@ static int nct6775_probe(struct platform_device *pdev)
+ data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
+ data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
+ data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
++ data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
+ data->REG_PWM[0] = NCT6106_REG_PWM;
+ data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
+ data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
+diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
+index ec7bcf8d7cd6..f3dd2a17bd42 100644
+--- a/drivers/hwmon/nct7802.c
++++ b/drivers/hwmon/nct7802.c
+@@ -704,7 +704,7 @@ static struct attribute *nct7802_in_attrs[] = {
+ &sensor_dev_attr_in3_alarm.dev_attr.attr,
+ &sensor_dev_attr_in3_beep.dev_attr.attr,
+
+- &sensor_dev_attr_in4_input.dev_attr.attr, /* 17 */
++ &sensor_dev_attr_in4_input.dev_attr.attr, /* 16 */
+ &sensor_dev_attr_in4_min.dev_attr.attr,
+ &sensor_dev_attr_in4_max.dev_attr.attr,
+ &sensor_dev_attr_in4_alarm.dev_attr.attr,
+@@ -730,9 +730,9 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj,
+
+ if (index >= 6 && index < 11 && (reg & 0x03) != 0x03) /* VSEN1 */
+ return 0;
+- if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c) /* VSEN2 */
++ if (index >= 11 && index < 16 && (reg & 0x0c) != 0x0c) /* VSEN2 */
+ return 0;
+- if (index >= 17 && (reg & 0x30) != 0x30) /* VSEN3 */
++ if (index >= 16 && (reg & 0x30) != 0x30) /* VSEN3 */
+ return 0;
+
+ return attr->mode;
+diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
+index 13a6290c8d25..f02aa403332c 100644
+--- a/drivers/hwmon/occ/common.c
++++ b/drivers/hwmon/occ/common.c
+@@ -402,8 +402,10 @@ static ssize_t occ_show_power_1(struct device *dev,
+
+ static u64 occ_get_powr_avg(u64 *accum, u32 *samples)
+ {
+- return div64_u64(get_unaligned_be64(accum) * 1000000ULL,
+- get_unaligned_be32(samples));
++ u64 divisor = get_unaligned_be32(samples);
++
++ return (divisor == 0) ? 0 :
++ div64_u64(get_unaligned_be64(accum) * 1000000ULL, divisor);
+ }
+
+ static ssize_t occ_show_power_2(struct device *dev,
+diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
+index 3c6294432748..1ef098ff27c3 100644
+--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
++++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
+@@ -544,6 +544,7 @@ int etm_perf_add_symlink_sink(struct coresight_device *csdev)
+ /* See function coresight_get_sink_by_id() to know where this is used */
+ hash = hashlen_hash(hashlen_string(NULL, name));
+
++ sysfs_attr_init(&ea->attr.attr);
+ ea->attr.attr.name = devm_kstrdup(pdev, name, GFP_KERNEL);
+ if (!ea->attr.attr.name)
+ return -ENOMEM;
+diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
+index 46bb2e421bb9..ad19d9c716f4 100644
+--- a/drivers/iio/accel/cros_ec_accel_legacy.c
++++ b/drivers/iio/accel/cros_ec_accel_legacy.c
+@@ -319,7 +319,6 @@ static const struct iio_chan_spec_ext_info cros_ec_accel_legacy_ext_info[] = {
+ .modified = 1, \
+ .info_mask_separate = \
+ BIT(IIO_CHAN_INFO_RAW) | \
+- BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = cros_ec_accel_legacy_ext_info, \
+diff --git a/drivers/iio/adc/ingenic-adc.c b/drivers/iio/adc/ingenic-adc.c
+index 92b1d5037ac9..e234970b7150 100644
+--- a/drivers/iio/adc/ingenic-adc.c
++++ b/drivers/iio/adc/ingenic-adc.c
+@@ -11,6 +11,7 @@
+ #include <linux/iio/iio.h>
+ #include <linux/io.h>
+ #include <linux/iopoll.h>
++#include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+ #include <linux/platform_device.h>
+@@ -22,8 +23,11 @@
+ #define JZ_ADC_REG_ADTCH 0x18
+ #define JZ_ADC_REG_ADBDAT 0x1c
+ #define JZ_ADC_REG_ADSDAT 0x20
++#define JZ_ADC_REG_ADCLK 0x28
+
+ #define JZ_ADC_REG_CFG_BAT_MD BIT(4)
++#define JZ_ADC_REG_ADCLK_CLKDIV_LSB 0
++#define JZ_ADC_REG_ADCLK_CLKDIV10US_LSB 16
+
+ #define JZ_ADC_AUX_VREF 3300
+ #define JZ_ADC_AUX_VREF_BITS 12
+@@ -34,6 +38,8 @@
+ #define JZ4740_ADC_BATTERY_HIGH_VREF (7500 * 0.986)
+ #define JZ4740_ADC_BATTERY_HIGH_VREF_BITS 12
+
++struct ingenic_adc;
++
+ struct ingenic_adc_soc_data {
+ unsigned int battery_high_vref;
+ unsigned int battery_high_vref_bits;
+@@ -41,6 +47,7 @@ struct ingenic_adc_soc_data {
+ size_t battery_raw_avail_size;
+ const int *battery_scale_avail;
+ size_t battery_scale_avail_size;
++ int (*init_clk_div)(struct device *dev, struct ingenic_adc *adc);
+ };
+
+ struct ingenic_adc {
+@@ -151,6 +158,42 @@ static const int jz4740_adc_battery_scale_avail[] = {
+ JZ_ADC_BATTERY_LOW_VREF, JZ_ADC_BATTERY_LOW_VREF_BITS,
+ };
+
++static int jz4725b_adc_init_clk_div(struct device *dev, struct ingenic_adc *adc)
++{
++ struct clk *parent_clk;
++ unsigned long parent_rate, rate;
++ unsigned int div_main, div_10us;
++
++ parent_clk = clk_get_parent(adc->clk);
++ if (!parent_clk) {
++ dev_err(dev, "ADC clock has no parent\n");
++ return -ENODEV;
++ }
++ parent_rate = clk_get_rate(parent_clk);
++
++ /*
++ * The JZ4725B ADC works at 500 kHz to 8 MHz.
++ * We pick the highest rate possible.
++ * In practice we typically get 6 MHz, half of the 12 MHz EXT clock.
++ */
++ div_main = DIV_ROUND_UP(parent_rate, 8000000);
++ div_main = clamp(div_main, 1u, 64u);
++ rate = parent_rate / div_main;
++ if (rate < 500000 || rate > 8000000) {
++ dev_err(dev, "No valid divider for ADC main clock\n");
++ return -EINVAL;
++ }
++
++ /* We also need a divider that produces a 10us clock. */
++ div_10us = DIV_ROUND_UP(rate, 100000);
++
++ writel(((div_10us - 1) << JZ_ADC_REG_ADCLK_CLKDIV10US_LSB) |
++ (div_main - 1) << JZ_ADC_REG_ADCLK_CLKDIV_LSB,
++ adc->base + JZ_ADC_REG_ADCLK);
++
++ return 0;
++}
++
+ static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = {
+ .battery_high_vref = JZ4725B_ADC_BATTERY_HIGH_VREF,
+ .battery_high_vref_bits = JZ4725B_ADC_BATTERY_HIGH_VREF_BITS,
+@@ -158,6 +201,7 @@ static const struct ingenic_adc_soc_data jz4725b_adc_soc_data = {
+ .battery_raw_avail_size = ARRAY_SIZE(jz4725b_adc_battery_raw_avail),
+ .battery_scale_avail = jz4725b_adc_battery_scale_avail,
+ .battery_scale_avail_size = ARRAY_SIZE(jz4725b_adc_battery_scale_avail),
++ .init_clk_div = jz4725b_adc_init_clk_div,
+ };
+
+ static const struct ingenic_adc_soc_data jz4740_adc_soc_data = {
+@@ -167,6 +211,7 @@ static const struct ingenic_adc_soc_data jz4740_adc_soc_data = {
+ .battery_raw_avail_size = ARRAY_SIZE(jz4740_adc_battery_raw_avail),
+ .battery_scale_avail = jz4740_adc_battery_scale_avail,
+ .battery_scale_avail_size = ARRAY_SIZE(jz4740_adc_battery_scale_avail),
++ .init_clk_div = NULL, /* no ADCLK register on JZ4740 */
+ };
+
+ static int ingenic_adc_read_avail(struct iio_dev *iio_dev,
+@@ -317,6 +362,15 @@ static int ingenic_adc_probe(struct platform_device *pdev)
+ return ret;
+ }
+
++ /* Set clock dividers. */
++ if (soc_data->init_clk_div) {
++ ret = soc_data->init_clk_div(dev, adc);
++ if (ret) {
++ clk_disable_unprepare(adc->clk);
++ return ret;
++ }
++ }
++
+ /* Put hardware in a known passive state. */
+ writeb(0x00, adc->base + JZ_ADC_REG_ENABLE);
+ writeb(0xff, adc->base + JZ_ADC_REG_CTRL);
+diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
+index 917223d5ff5b..0e3c6529fc4c 100644
+--- a/drivers/iio/adc/max9611.c
++++ b/drivers/iio/adc/max9611.c
+@@ -83,7 +83,7 @@
+ #define MAX9611_TEMP_MAX_POS 0x7f80
+ #define MAX9611_TEMP_MAX_NEG 0xff80
+ #define MAX9611_TEMP_MIN_NEG 0xd980
+-#define MAX9611_TEMP_MASK GENMASK(7, 15)
++#define MAX9611_TEMP_MASK GENMASK(15, 7)
+ #define MAX9611_TEMP_SHIFT 0x07
+ #define MAX9611_TEMP_RAW(_r) ((_r) >> MAX9611_TEMP_SHIFT)
+ #define MAX9611_TEMP_SCALE_NUM 1000000
+diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
+index 2c0d0316d149..b373acce5927 100644
+--- a/drivers/iio/adc/rcar-gyroadc.c
++++ b/drivers/iio/adc/rcar-gyroadc.c
+@@ -382,7 +382,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
+ dev_err(dev,
+ "Only %i channels supported with %pOFn, but reg = <%i>.\n",
+ num_channels, child, reg);
+- return ret;
++ return -EINVAL;
+ }
+ }
+
+@@ -391,7 +391,7 @@ static int rcar_gyroadc_parse_subdevs(struct iio_dev *indio_dev)
+ dev_err(dev,
+ "Channel %i uses different ADC mode than the rest.\n",
+ reg);
+- return ret;
++ return -EINVAL;
+ }
+
+ /* Channel is valid, grab the regulator. */
+diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+index 53a59957cc54..8a704cd5bddb 100644
+--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
++++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+@@ -845,6 +845,25 @@ static const struct iio_chan_spec inv_mpu_channels[] = {
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_MPU6050_SCAN_ACCL_Z),
+ };
+
++static const unsigned long inv_mpu_scan_masks[] = {
++ /* 3-axis accel */
++ BIT(INV_MPU6050_SCAN_ACCL_X)
++ | BIT(INV_MPU6050_SCAN_ACCL_Y)
++ | BIT(INV_MPU6050_SCAN_ACCL_Z),
++ /* 3-axis gyro */
++ BIT(INV_MPU6050_SCAN_GYRO_X)
++ | BIT(INV_MPU6050_SCAN_GYRO_Y)
++ | BIT(INV_MPU6050_SCAN_GYRO_Z),
++ /* 6-axis accel + gyro */
++ BIT(INV_MPU6050_SCAN_ACCL_X)
++ | BIT(INV_MPU6050_SCAN_ACCL_Y)
++ | BIT(INV_MPU6050_SCAN_ACCL_Z)
++ | BIT(INV_MPU6050_SCAN_GYRO_X)
++ | BIT(INV_MPU6050_SCAN_GYRO_Y)
++ | BIT(INV_MPU6050_SCAN_GYRO_Z),
++ 0,
++};
++
+ static const struct iio_chan_spec inv_icm20602_channels[] = {
+ IIO_CHAN_SOFT_TIMESTAMP(INV_ICM20602_SCAN_TIMESTAMP),
+ {
+@@ -871,6 +890,28 @@ static const struct iio_chan_spec inv_icm20602_channels[] = {
+ INV_MPU6050_CHAN(IIO_ACCEL, IIO_MOD_Z, INV_ICM20602_SCAN_ACCL_Z),
+ };
+
++static const unsigned long inv_icm20602_scan_masks[] = {
++ /* 3-axis accel + temp (mandatory) */
++ BIT(INV_ICM20602_SCAN_ACCL_X)
++ | BIT(INV_ICM20602_SCAN_ACCL_Y)
++ | BIT(INV_ICM20602_SCAN_ACCL_Z)
++ | BIT(INV_ICM20602_SCAN_TEMP),
++ /* 3-axis gyro + temp (mandatory) */
++ BIT(INV_ICM20602_SCAN_GYRO_X)
++ | BIT(INV_ICM20602_SCAN_GYRO_Y)
++ | BIT(INV_ICM20602_SCAN_GYRO_Z)
++ | BIT(INV_ICM20602_SCAN_TEMP),
++ /* 6-axis accel + gyro + temp (mandatory) */
++ BIT(INV_ICM20602_SCAN_ACCL_X)
++ | BIT(INV_ICM20602_SCAN_ACCL_Y)
++ | BIT(INV_ICM20602_SCAN_ACCL_Z)
++ | BIT(INV_ICM20602_SCAN_GYRO_X)
++ | BIT(INV_ICM20602_SCAN_GYRO_Y)
++ | BIT(INV_ICM20602_SCAN_GYRO_Z)
++ | BIT(INV_ICM20602_SCAN_TEMP),
++ 0,
++};
++
+ /*
+ * The user can choose any frequency between INV_MPU6050_MIN_FIFO_RATE and
+ * INV_MPU6050_MAX_FIFO_RATE, but only these frequencies are matched by the
+@@ -1130,9 +1171,11 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
+ if (chip_type == INV_ICM20602) {
+ indio_dev->channels = inv_icm20602_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_icm20602_channels);
++ indio_dev->available_scan_masks = inv_icm20602_scan_masks;
+ } else {
+ indio_dev->channels = inv_mpu_channels;
+ indio_dev->num_channels = ARRAY_SIZE(inv_mpu_channels);
++ indio_dev->available_scan_masks = inv_mpu_scan_masks;
+ }
+
+ indio_dev->info = &mpu_info;
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index a4345052abd2..a47c7add4e0e 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -1807,6 +1807,30 @@ static int elantech_create_smbus(struct psmouse *psmouse,
+ leave_breadcrumbs);
+ }
+
++static bool elantech_use_host_notify(struct psmouse *psmouse,
++ struct elantech_device_info *info)
++{
++ if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
++ return true;
++
++ switch (info->bus) {
++ case ETP_BUS_PS2_ONLY:
++ /* expected case */
++ break;
++ case ETP_BUS_SMB_HST_NTFY_ONLY:
++ case ETP_BUS_PS2_SMB_HST_NTFY:
++ /* SMbus implementation is stable since 2018 */
++ if (dmi_get_bios_year() >= 2018)
++ return true;
++ default:
++ psmouse_dbg(psmouse,
++ "Ignoring SMBus bus provider %d\n", info->bus);
++ break;
++ }
++
++ return false;
++}
++
+ /**
+ * elantech_setup_smbus - called once the PS/2 devices are enumerated
+ * and decides to instantiate a SMBus InterTouch device.
+@@ -1826,7 +1850,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
+ * i2c_blacklist_pnp_ids.
+ * Old ICs are up to the user to decide.
+ */
+- if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
++ if (!elantech_use_host_notify(psmouse, info) ||
+ psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
+ return -ENXIO;
+ }
+@@ -1846,34 +1870,6 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
+ return 0;
+ }
+
+-static bool elantech_use_host_notify(struct psmouse *psmouse,
+- struct elantech_device_info *info)
+-{
+- if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+- return true;
+-
+- switch (info->bus) {
+- case ETP_BUS_PS2_ONLY:
+- /* expected case */
+- break;
+- case ETP_BUS_SMB_ALERT_ONLY:
+- /* fall-through */
+- case ETP_BUS_PS2_SMB_ALERT:
+- psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
+- break;
+- case ETP_BUS_SMB_HST_NTFY_ONLY:
+- /* fall-through */
+- case ETP_BUS_PS2_SMB_HST_NTFY:
+- return true;
+- default:
+- psmouse_dbg(psmouse,
+- "Ignoring SMBus bus provider %d.\n",
+- info->bus);
+- }
+-
+- return false;
+-}
+-
+ int elantech_init_smbus(struct psmouse *psmouse)
+ {
+ struct elantech_device_info info;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 7f8f4780b511..c0e188cd3811 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -182,6 +182,7 @@ static const char * const smbus_pnp_ids[] = {
+ "LEN2055", /* E580 */
+ "SYN3052", /* HP EliteBook 840 G4 */
+ "SYN3221", /* HP 15-ay000 */
++ "SYN323d", /* HP Spectre X360 13-w013dx */
+ NULL
+ };
+
+diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
+index a2cec6cacf57..16d70201de4a 100644
+--- a/drivers/input/touchscreen/usbtouchscreen.c
++++ b/drivers/input/touchscreen/usbtouchscreen.c
+@@ -1659,6 +1659,8 @@ static int usbtouch_probe(struct usb_interface *intf,
+ if (!usbtouch || !input_dev)
+ goto out_free;
+
++ mutex_init(&usbtouch->pm_mutex);
++
+ type = &usbtouch_dev_info[id->driver_info];
+ usbtouch->type = type;
+ if (!type->process_pkt)
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index 2101601adf57..1ad24367373f 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -1900,7 +1900,6 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
+
+ static void domain_exit(struct dmar_domain *domain)
+ {
+- struct page *freelist;
+
+ /* Remove associated devices and clear attached or cached domains */
+ rcu_read_lock();
+@@ -1910,9 +1909,12 @@ static void domain_exit(struct dmar_domain *domain)
+ /* destroy iovas */
+ put_iova_domain(&domain->iovad);
+
+- freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
++ if (domain->pgd) {
++ struct page *freelist;
+
+- dma_free_pagelist(freelist);
++ freelist = domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw));
++ dma_free_pagelist(freelist);
++ }
+
+ free_domain_mem(domain);
+ }
+diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
+index ed5cefb83768..89deb451e0ac 100644
+--- a/drivers/mmc/host/cavium.c
++++ b/drivers/mmc/host/cavium.c
+@@ -374,6 +374,7 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
+ {
+ data->bytes_xfered = data->blocks * data->blksz;
+ data->error = 0;
++ dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
+ return 1;
+ }
+
+@@ -1046,7 +1047,8 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
+ mmc->max_segs = 1;
+
+ /* DMA size field can address up to 8 MB */
+- mmc->max_seg_size = 8 * 1024 * 1024;
++ mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
++ dma_get_max_seg_size(host->dev));
+ mmc->max_req_size = mmc->max_seg_size;
+ /* External DMA is in 512 byte blocks */
+ mmc->max_blk_size = 512;
+diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
+index f2fe344593d5..fcec8bcb53d6 100644
+--- a/drivers/net/can/flexcan.c
++++ b/drivers/net/can/flexcan.c
+@@ -400,9 +400,10 @@ static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable)
+ priv->write(reg_mcr, &regs->mcr);
+ }
+
+-static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv)
++static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv)
+ {
+ struct flexcan_regs __iomem *regs = priv->regs;
++ unsigned int ackval;
+ u32 reg_mcr;
+
+ reg_mcr = priv->read(&regs->mcr);
+@@ -412,20 +413,37 @@ static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv)
+ /* enable stop request */
+ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+ 1 << priv->stm.req_bit, 1 << priv->stm.req_bit);
++
++ /* get stop acknowledgment */
++ if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
++ ackval, ackval & (1 << priv->stm.ack_bit),
++ 0, FLEXCAN_TIMEOUT_US))
++ return -ETIMEDOUT;
++
++ return 0;
+ }
+
+-static inline void flexcan_exit_stop_mode(struct flexcan_priv *priv)
++static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv)
+ {
+ struct flexcan_regs __iomem *regs = priv->regs;
++ unsigned int ackval;
+ u32 reg_mcr;
+
+ /* remove stop request */
+ regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr,
+ 1 << priv->stm.req_bit, 0);
+
++ /* get stop acknowledgment */
++ if (regmap_read_poll_timeout(priv->stm.gpr, priv->stm.ack_gpr,
++ ackval, !(ackval & (1 << priv->stm.ack_bit)),
++ 0, FLEXCAN_TIMEOUT_US))
++ return -ETIMEDOUT;
++
+ reg_mcr = priv->read(&regs->mcr);
+ reg_mcr &= ~FLEXCAN_MCR_SLF_WAK;
+ priv->write(reg_mcr, &regs->mcr);
++
++ return 0;
+ }
+
+ static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
+@@ -1437,10 +1455,10 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
+
+ priv = netdev_priv(dev);
+ priv->stm.gpr = syscon_node_to_regmap(gpr_np);
+- of_node_put(gpr_np);
+ if (IS_ERR(priv->stm.gpr)) {
+ dev_dbg(&pdev->dev, "could not find gpr regmap\n");
+- return PTR_ERR(priv->stm.gpr);
++ ret = PTR_ERR(priv->stm.gpr);
++ goto out_put_node;
+ }
+
+ priv->stm.req_gpr = out_val[1];
+@@ -1455,7 +1473,9 @@ static int flexcan_setup_stop_mode(struct platform_device *pdev)
+
+ device_set_wakeup_capable(&pdev->dev, true);
+
+- return 0;
++out_put_node:
++ of_node_put(gpr_np);
++ return ret;
+ }
+
+ static const struct of_device_id flexcan_of_match[] = {
+@@ -1612,7 +1632,9 @@ static int __maybe_unused flexcan_suspend(struct device *device)
+ */
+ if (device_may_wakeup(device)) {
+ enable_irq_wake(dev->irq);
+- flexcan_enter_stop_mode(priv);
++ err = flexcan_enter_stop_mode(priv);
++ if (err)
++ return err;
+ } else {
+ err = flexcan_chip_disable(priv);
+ if (err)
+@@ -1662,10 +1684,13 @@ static int __maybe_unused flexcan_noirq_resume(struct device *device)
+ {
+ struct net_device *dev = dev_get_drvdata(device);
+ struct flexcan_priv *priv = netdev_priv(dev);
++ int err;
+
+ if (netif_running(dev) && device_may_wakeup(device)) {
+ flexcan_enable_wakeup_irq(priv, false);
+- flexcan_exit_stop_mode(priv);
++ err = flexcan_exit_stop_mode(priv);
++ if (err)
++ return err;
+ }
+
+ return 0;
+diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
+index 05410008aa6b..de34a4b82d4a 100644
+--- a/drivers/net/can/rcar/rcar_canfd.c
++++ b/drivers/net/can/rcar/rcar_canfd.c
+@@ -1508,10 +1508,11 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
+
+ /* All packets processed */
+ if (num_pkts < quota) {
+- napi_complete_done(napi, num_pkts);
+- /* Enable Rx FIFO interrupts */
+- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+- RCANFD_RFCC_RFIE);
++ if (napi_complete_done(napi, num_pkts)) {
++ /* Enable Rx FIFO interrupts */
++ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
++ RCANFD_RFCC_RFIE);
++ }
+ }
+ return num_pkts;
+ }
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+index 458154c9b482..22b9c8e6d040 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+@@ -568,16 +568,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
+ dev->state &= ~PCAN_USB_STATE_STARTED;
+ netif_stop_queue(netdev);
+
++ close_candev(netdev);
++
++ dev->can.state = CAN_STATE_STOPPED;
++
+ /* unlink all pending urbs and free used memory */
+ peak_usb_unlink_all_urbs(dev);
+
+ if (dev->adapter->dev_stop)
+ dev->adapter->dev_stop(dev);
+
+- close_candev(netdev);
+-
+- dev->can.state = CAN_STATE_STOPPED;
+-
+ /* can set bus off now */
+ if (dev->adapter->dev_set_bus) {
+ int err = dev->adapter->dev_set_bus(dev, 0);
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+index 34761c3a6286..47cc1ff5b88e 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+@@ -841,7 +841,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
+ goto err_out;
+
+ /* allocate command buffer once for all for the interface */
+- pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
++ pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE,
+ GFP_KERNEL);
+ if (!pdev->cmd_buffer_addr)
+ goto err_out_1;
+diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+index 178bb7cff0c1..53cb2f72bdd0 100644
+--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
++++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+@@ -494,7 +494,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
+ u8 *buffer;
+ int err;
+
+- buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
++ buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+index cfaf8f618d1f..56742fa0c1af 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+@@ -67,7 +67,8 @@ static struct ch_tc_pedit_fields pedits[] = {
+ static struct ch_tc_flower_entry *allocate_flower_entry(void)
+ {
+ struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
+- spin_lock_init(&new->lock);
++ if (new)
++ spin_lock_init(&new->lock);
+ return new;
+ }
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index 559f6df1a74d..5af9959d05e5 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -753,7 +753,7 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
+
+ for (i = 0; i < n_profiles; i++) {
+ /* the tables start at element 3 */
+- static int pos = 3;
++ int pos = 3;
+
+ /* The EWRD profiles officially go from 2 to 4, but we
+ * save them in sar_profiles[1-3] (because we don't
+@@ -874,6 +874,22 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
+ return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
+ }
+
++static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
++{
++ /*
++ * The GEO_TX_POWER_LIMIT command is not supported on earlier
++ * firmware versions. Unfortunately, we don't have a TLV API
++ * flag to rely on, so rely on the major version which is in
++ * the first byte of ucode_ver. This was implemented
++ * initially on version 38 and then backported to 36, 29 and
++ * 17.
++ */
++ return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
++ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
++ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
++ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
++}
++
+ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
+ {
+ struct iwl_geo_tx_power_profiles_resp *resp;
+@@ -889,6 +905,9 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
+ .data = { &geo_cmd },
+ };
+
++ if (!iwl_mvm_sar_geo_support(mvm))
++ return -EOPNOTSUPP;
++
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
+@@ -914,13 +933,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
+ int ret, i, j;
+ u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
+
+- /*
+- * This command is not supported on earlier firmware versions.
+- * Unfortunately, we don't have a TLV API flag to rely on, so
+- * rely on the major version which is in the first byte of
+- * ucode_ver.
+- */
+- if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
++ if (!iwl_mvm_sar_geo_support(mvm))
+ return 0;
+
+ ret = iwl_mvm_sar_get_wgds_table(mvm);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+index 96f8d38ea321..a12ee20fb9ab 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+@@ -831,6 +831,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
+ unsigned int tcp_payload_len;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ bool ipv4 = (skb->protocol == htons(ETH_P_IP));
++ bool qos = ieee80211_is_data_qos(hdr->frame_control);
+ u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
+
+ skb_shinfo(skb)->gso_size = num_subframes * mss;
+@@ -864,7 +865,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
+ if (tcp_payload_len > mss) {
+ skb_shinfo(tmp)->gso_size = mss;
+ } else {
+- if (ieee80211_is_data_qos(hdr->frame_control)) {
++ if (qos) {
+ u8 *qc;
+
+ if (ipv4)
+diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+index fa4245d0d4a8..2f0ba7ef53b8 100644
+--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
++++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+@@ -435,6 +435,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
+ DMA_TO_DEVICE);
+ }
+
++ meta->tbs = 0;
++
+ if (trans->cfg->use_tfh) {
+ struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
+
+diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
+index b025ba164412..e39bb5c42c9a 100644
+--- a/drivers/net/wireless/marvell/mwifiex/main.h
++++ b/drivers/net/wireless/marvell/mwifiex/main.h
+@@ -124,6 +124,7 @@ enum {
+
+ #define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
+
++#define WPA_GTK_OUI_OFFSET 2
+ #define RSN_GTK_OUI_OFFSET 2
+
+ #define MWIFIEX_OUI_NOT_PRESENT 0
+diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
+index e2786ab612ca..dd02bbd9544e 100644
+--- a/drivers/net/wireless/marvell/mwifiex/scan.c
++++ b/drivers/net/wireless/marvell/mwifiex/scan.c
+@@ -181,7 +181,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
+ u8 ret = MWIFIEX_OUI_NOT_PRESENT;
+
+ if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
+- iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
++ iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data +
++ WPA_GTK_OUI_OFFSET);
+ oui = &mwifiex_wpa_oui[cipher][0];
+ ret = mwifiex_search_oui_in_ie(iebody, oui);
+ if (ret)
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 4a1d2ab4d161..5deb4deb3820 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2264,17 +2264,15 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
+ memset(subsys->subnqn + off, 0, sizeof(subsys->subnqn) - off);
+ }
+
+-static void __nvme_release_subsystem(struct nvme_subsystem *subsys)
++static void nvme_release_subsystem(struct device *dev)
+ {
++ struct nvme_subsystem *subsys =
++ container_of(dev, struct nvme_subsystem, dev);
++
+ ida_simple_remove(&nvme_subsystems_ida, subsys->instance);
+ kfree(subsys);
+ }
+
+-static void nvme_release_subsystem(struct device *dev)
+-{
+- __nvme_release_subsystem(container_of(dev, struct nvme_subsystem, dev));
+-}
+-
+ static void nvme_destroy_subsystem(struct kref *ref)
+ {
+ struct nvme_subsystem *subsys =
+@@ -2429,7 +2427,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
+ mutex_lock(&nvme_subsystems_lock);
+ found = __nvme_find_get_subsystem(subsys->subnqn);
+ if (found) {
+- __nvme_release_subsystem(subsys);
++ put_device(&subsys->dev);
+ subsys = found;
+
+ if (!nvme_validate_cntlid(subsys, ctrl, id)) {
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 7fbcd72c438f..f9959eaaa185 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2959,6 +2959,8 @@ static const struct pci_device_id nvme_id_table[] = {
+ .driver_data = NVME_QUIRK_LIGHTNVM, },
+ { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */
+ .driver_data = NVME_QUIRK_LIGHTNVM, },
++ { PCI_DEVICE(0x10ec, 0x5762), /* ADATA SX6000LNP */
++ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 720da09d4d73..088fcdc8d2b4 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -1004,10 +1004,15 @@ static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
+ if (state == PCI_D0) {
+ pci_platform_power_transition(dev, PCI_D0);
+ /*
+- * Mandatory power management transition delays are
+- * handled in the PCIe portdrv resume hooks.
++ * Mandatory power management transition delays, see
++ * PCI Express Base Specification Revision 2.0 Section
++ * 6.6.1: Conventional Reset. Do not delay for
++ * devices powered on/off by corresponding bridge,
++ * because have already delayed for the bridge.
+ */
+ if (dev->runtime_d3cold) {
++ if (dev->d3cold_delay && !dev->imm_ready)
++ msleep(dev->d3cold_delay);
+ /*
+ * When powering on a bridge from D3cold, the
+ * whole hierarchy may be powered on into
+@@ -4570,16 +4575,14 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
+
+ return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
+ }
+-
+ /**
+- * pcie_wait_for_link_delay - Wait until link is active or inactive
++ * pcie_wait_for_link - Wait until link is active or inactive
+ * @pdev: Bridge device
+ * @active: waiting for active or inactive?
+- * @delay: Delay to wait after link has become active (in ms)
+ *
+ * Use this to wait till link becomes active or inactive.
+ */
+-bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay)
++bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+ {
+ int timeout = 1000;
+ bool ret;
+@@ -4616,25 +4619,13 @@ bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay)
+ timeout -= 10;
+ }
+ if (active && ret)
+- msleep(delay);
++ msleep(100);
+ else if (ret != active)
+ pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
+ active ? "set" : "cleared");
+ return ret == active;
+ }
+
+-/**
+- * pcie_wait_for_link - Wait until link is active or inactive
+- * @pdev: Bridge device
+- * @active: waiting for active or inactive?
+- *
+- * Use this to wait till link becomes active or inactive.
+- */
+-bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
+-{
+- return pcie_wait_for_link_delay(pdev, active, 100);
+-}
+-
+ void pci_reset_secondary_bus(struct pci_dev *dev)
+ {
+ u16 ctrl;
+diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
+index 59802b3def4b..9cb99380c61e 100644
+--- a/drivers/pci/pci.h
++++ b/drivers/pci/pci.h
+@@ -493,7 +493,6 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev)
+ void pcie_do_recovery(struct pci_dev *dev, enum pci_channel_state state,
+ u32 service);
+
+-bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active, int delay);
+ bool pcie_wait_for_link(struct pci_dev *pdev, bool active);
+ #ifdef CONFIG_PCIEASPM
+ void pcie_aspm_init_link_state(struct pci_dev *pdev);
+diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
+index 308c3e0c4a34..1b330129089f 100644
+--- a/drivers/pci/pcie/portdrv_core.c
++++ b/drivers/pci/pcie/portdrv_core.c
+@@ -9,7 +9,6 @@
+ #include <linux/module.h>
+ #include <linux/pci.h>
+ #include <linux/kernel.h>
+-#include <linux/delay.h>
+ #include <linux/errno.h>
+ #include <linux/pm.h>
+ #include <linux/pm_runtime.h>
+@@ -379,67 +378,6 @@ static int pm_iter(struct device *dev, void *data)
+ return 0;
+ }
+
+-static int get_downstream_delay(struct pci_bus *bus)
+-{
+- struct pci_dev *pdev;
+- int min_delay = 100;
+- int max_delay = 0;
+-
+- list_for_each_entry(pdev, &bus->devices, bus_list) {
+- if (!pdev->imm_ready)
+- min_delay = 0;
+- else if (pdev->d3cold_delay < min_delay)
+- min_delay = pdev->d3cold_delay;
+- if (pdev->d3cold_delay > max_delay)
+- max_delay = pdev->d3cold_delay;
+- }
+-
+- return max(min_delay, max_delay);
+-}
+-
+-/*
+- * wait_for_downstream_link - Wait for downstream link to establish
+- * @pdev: PCIe port whose downstream link is waited
+- *
+- * Handle delays according to PCIe 4.0 section 6.6.1 before configuration
+- * access to the downstream component is permitted.
+- *
+- * This blocks PCI core resume of the hierarchy below this port until the
+- * link is trained. Should be called before resuming port services to
+- * prevent pciehp from starting to tear-down the hierarchy too soon.
+- */
+-static void wait_for_downstream_link(struct pci_dev *pdev)
+-{
+- int delay;
+-
+- if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
+- pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM)
+- return;
+-
+- if (pci_dev_is_disconnected(pdev))
+- return;
+-
+- if (!pdev->subordinate || list_empty(&pdev->subordinate->devices) ||
+- !pdev->bridge_d3)
+- return;
+-
+- delay = get_downstream_delay(pdev->subordinate);
+- if (!delay)
+- return;
+-
+- dev_dbg(&pdev->dev, "waiting downstream link for %d ms\n", delay);
+-
+- /*
+- * If downstream port does not support speeds greater than 5 GT/s
+- * need to wait 100ms. For higher speeds (gen3) we need to wait
+- * first for the data link layer to become active.
+- */
+- if (pcie_get_speed_cap(pdev) <= PCIE_SPEED_5_0GT)
+- msleep(delay);
+- else
+- pcie_wait_for_link_delay(pdev, true, delay);
+-}
+-
+ /**
+ * pcie_port_device_suspend - suspend port services associated with a PCIe port
+ * @dev: PCI Express port to handle
+@@ -453,8 +391,6 @@ int pcie_port_device_suspend(struct device *dev)
+ int pcie_port_device_resume_noirq(struct device *dev)
+ {
+ size_t off = offsetof(struct pcie_port_service_driver, resume_noirq);
+-
+- wait_for_downstream_link(to_pci_dev(dev));
+ return device_for_each_child(dev, &off, pm_iter);
+ }
+
+@@ -485,8 +421,6 @@ int pcie_port_device_runtime_suspend(struct device *dev)
+ int pcie_port_device_runtime_resume(struct device *dev)
+ {
+ size_t off = offsetof(struct pcie_port_service_driver, runtime_resume);
+-
+- wait_for_downstream_link(to_pci_dev(dev));
+ return device_for_each_child(dev, &off, pm_iter);
+ }
+ #endif /* PM */
+diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
+index 730c4e68094b..7f5adf02f095 100644
+--- a/drivers/s390/cio/qdio_main.c
++++ b/drivers/s390/cio/qdio_main.c
+@@ -1558,13 +1558,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
+ rc = qdio_kick_outbound_q(q, phys_aob);
+ } else if (need_siga_sync(q)) {
+ rc = qdio_siga_sync_q(q);
++ } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
++ get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
++ state == SLSB_CU_OUTPUT_PRIMED) {
++ /* The previous buffer is not processed yet, tack on. */
++ qperf_inc(q, fast_requeue);
+ } else {
+- /* try to fast requeue buffers */
+- get_buf_state(q, prev_buf(bufnr), &state, 0);
+- if (state != SLSB_CU_OUTPUT_PRIMED)
+- rc = qdio_kick_outbound_q(q, 0);
+- else
+- qperf_inc(q, fast_requeue);
++ rc = qdio_kick_outbound_q(q, 0);
+ }
+
+ /* in case of SIGA errors we must process the error immediately */
+diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
+index 0e79799e9a71..79eb40bdaf9f 100644
+--- a/drivers/s390/cio/vfio_ccw_cp.c
++++ b/drivers/s390/cio/vfio_ccw_cp.c
+@@ -89,8 +89,10 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
+ sizeof(*pa->pa_iova_pfn) +
+ sizeof(*pa->pa_pfn),
+ GFP_KERNEL);
+- if (unlikely(!pa->pa_iova_pfn))
++ if (unlikely(!pa->pa_iova_pfn)) {
++ pa->pa_nr = 0;
+ return -ENOMEM;
++ }
+ pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
+
+ pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
+diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
+index 9125f7f4e64c..8a8fbde7e186 100644
+--- a/drivers/s390/cio/vfio_ccw_drv.c
++++ b/drivers/s390/cio/vfio_ccw_drv.c
+@@ -88,7 +88,7 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
+ (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
+ if (scsw_is_solicited(&irb->scsw)) {
+ cp_update_scsw(&private->cp, &irb->scsw);
+- if (is_final)
++ if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING)
+ cp_free(&private->cp);
+ }
+ mutex_lock(&private->io_mutex);
+diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
+index f0066f8a1786..4971104b1817 100644
+--- a/drivers/scsi/device_handler/scsi_dh_alua.c
++++ b/drivers/scsi/device_handler/scsi_dh_alua.c
+@@ -40,6 +40,7 @@
+ #define ALUA_FAILOVER_TIMEOUT 60
+ #define ALUA_FAILOVER_RETRIES 5
+ #define ALUA_RTPG_DELAY_MSECS 5
++#define ALUA_RTPG_RETRY_DELAY 2
+
+ /* device handler flags */
+ #define ALUA_OPTIMIZE_STPG 0x01
+@@ -682,7 +683,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
+ case SCSI_ACCESS_STATE_TRANSITIONING:
+ if (time_before(jiffies, pg->expiry)) {
+ /* State transition, retry */
+- pg->interval = 2;
++ pg->interval = ALUA_RTPG_RETRY_DELAY;
+ err = SCSI_DH_RETRY;
+ } else {
+ struct alua_dh_data *h;
+@@ -807,6 +808,8 @@ static void alua_rtpg_work(struct work_struct *work)
+ spin_lock_irqsave(&pg->lock, flags);
+ pg->flags &= ~ALUA_PG_RUNNING;
+ pg->flags |= ALUA_PG_RUN_RTPG;
++ if (!pg->interval)
++ pg->interval = ALUA_RTPG_RETRY_DELAY;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ queue_delayed_work(kaluad_wq, &pg->rtpg_work,
+ pg->interval * HZ);
+@@ -818,6 +821,8 @@ static void alua_rtpg_work(struct work_struct *work)
+ spin_lock_irqsave(&pg->lock, flags);
+ if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
+ pg->flags &= ~ALUA_PG_RUNNING;
++ if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
++ pg->interval = ALUA_RTPG_RETRY_DELAY;
+ pg->flags |= ALUA_PG_RUN_RTPG;
+ spin_unlock_irqrestore(&pg->lock, flags);
+ queue_delayed_work(kaluad_wq, &pg->rtpg_work,
+diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
+index acd16e0d52cf..8cdbac076a1b 100644
+--- a/drivers/scsi/ibmvscsi/ibmvfc.c
++++ b/drivers/scsi/ibmvscsi/ibmvfc.c
+@@ -4864,8 +4864,8 @@ static int ibmvfc_remove(struct vio_dev *vdev)
+
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ ibmvfc_purge_requests(vhost, DID_ERROR);
+- ibmvfc_free_event_pool(vhost);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
++ ibmvfc_free_event_pool(vhost);
+
+ ibmvfc_free_mem(vhost);
+ spin_lock(&ibmvfc_driver_lock);
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 7237114a1d53..5f30016e9b64 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3045,6 +3045,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
+ u32 size;
+ unsigned long buff_addr;
+ unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
++ unsigned long chunk_left_bytes;
+ unsigned long src_addr;
+ unsigned long flags;
+ u32 buff_offset;
+@@ -3070,6 +3071,8 @@ megasas_fw_crash_buffer_show(struct device *cdev,
+ }
+
+ size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
++ chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
++ size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
+ size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
+
+ src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
+diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
+index fd4995fb676e..f85ec5b16b65 100644
+--- a/drivers/staging/android/ion/ion_page_pool.c
++++ b/drivers/staging/android/ion/ion_page_pool.c
+@@ -8,11 +8,14 @@
+ #include <linux/list.h>
+ #include <linux/slab.h>
+ #include <linux/swap.h>
++#include <linux/sched/signal.h>
+
+ #include "ion.h"
+
+ static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
+ {
++ if (fatal_signal_pending(current))
++ return NULL;
+ return alloc_pages(pool->gfp_mask, pool->order);
+ }
+
+diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
+index 9b07badf4c6c..bc750250ccd6 100644
+--- a/drivers/staging/fbtft/fbtft-core.c
++++ b/drivers/staging/fbtft/fbtft-core.c
+@@ -76,21 +76,18 @@ static int fbtft_request_one_gpio(struct fbtft_par *par,
+ struct gpio_desc **gpiop)
+ {
+ struct device *dev = par->info->device;
+- struct device_node *node = dev->of_node;
+ int ret = 0;
+
+- if (of_find_property(node, name, NULL)) {
+- *gpiop = devm_gpiod_get_index(dev, dev->driver->name, index,
+- GPIOD_OUT_HIGH);
+- if (IS_ERR(*gpiop)) {
+- ret = PTR_ERR(*gpiop);
+- dev_err(dev,
+- "Failed to request %s GPIO:%d\n", name, ret);
+- return ret;
+- }
+- fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n",
+- __func__, name);
++ *gpiop = devm_gpiod_get_index_optional(dev, name, index,
++ GPIOD_OUT_HIGH);
++ if (IS_ERR(*gpiop)) {
++ ret = PTR_ERR(*gpiop);
++ dev_err(dev,
++ "Failed to request %s GPIO: %d\n", name, ret);
++ return ret;
+ }
++ fbtft_par_dbg(DEBUG_REQUEST_GPIOS, par, "%s: '%s' GPIO\n",
++ __func__, name);
+
+ return ret;
+ }
+@@ -103,34 +100,34 @@ static int fbtft_request_gpios_dt(struct fbtft_par *par)
+ if (!par->info->device->of_node)
+ return -EINVAL;
+
+- ret = fbtft_request_one_gpio(par, "reset-gpios", 0, &par->gpio.reset);
++ ret = fbtft_request_one_gpio(par, "reset", 0, &par->gpio.reset);
+ if (ret)
+ return ret;
+- ret = fbtft_request_one_gpio(par, "dc-gpios", 0, &par->gpio.dc);
++ ret = fbtft_request_one_gpio(par, "dc", 0, &par->gpio.dc);
+ if (ret)
+ return ret;
+- ret = fbtft_request_one_gpio(par, "rd-gpios", 0, &par->gpio.rd);
++ ret = fbtft_request_one_gpio(par, "rd", 0, &par->gpio.rd);
+ if (ret)
+ return ret;
+- ret = fbtft_request_one_gpio(par, "wr-gpios", 0, &par->gpio.wr);
++ ret = fbtft_request_one_gpio(par, "wr", 0, &par->gpio.wr);
+ if (ret)
+ return ret;
+- ret = fbtft_request_one_gpio(par, "cs-gpios", 0, &par->gpio.cs);
++ ret = fbtft_request_one_gpio(par, "cs", 0, &par->gpio.cs);
+ if (ret)
+ return ret;
+- ret = fbtft_request_one_gpio(par, "latch-gpios", 0, &par->gpio.latch);
++ ret = fbtft_request_one_gpio(par, "latch", 0, &par->gpio.latch);
+ if (ret)
+ return ret;
+ for (i = 0; i < 16; i++) {
+- ret = fbtft_request_one_gpio(par, "db-gpios", i,
++ ret = fbtft_request_one_gpio(par, "db", i,
+ &par->gpio.db[i]);
+ if (ret)
+ return ret;
+- ret = fbtft_request_one_gpio(par, "led-gpios", i,
++ ret = fbtft_request_one_gpio(par, "led", i,
+ &par->gpio.led[i]);
+ if (ret)
+ return ret;
+- ret = fbtft_request_one_gpio(par, "aux-gpios", i,
++ ret = fbtft_request_one_gpio(par, "aux", i,
+ &par->gpio.aux[i]);
+ if (ret)
+ return ret;
+@@ -234,9 +231,9 @@ static void fbtft_reset(struct fbtft_par *par)
+ if (!par->gpio.reset)
+ return;
+ fbtft_par_dbg(DEBUG_RESET, par, "%s()\n", __func__);
+- gpiod_set_value_cansleep(par->gpio.reset, 0);
+- usleep_range(20, 40);
+ gpiod_set_value_cansleep(par->gpio.reset, 1);
++ usleep_range(20, 40);
++ gpiod_set_value_cansleep(par->gpio.reset, 0);
+ msleep(120);
+ }
+
+diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c
+index 2be45ee9d061..464648ee2036 100644
+--- a/drivers/staging/gasket/apex_driver.c
++++ b/drivers/staging/gasket/apex_driver.c
+@@ -532,7 +532,7 @@ static ssize_t sysfs_show(struct device *device, struct device_attribute *attr,
+ break;
+ case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE:
+ ret = scnprintf(buf, PAGE_SIZE, "%u\n",
+- gasket_page_table_num_entries(
++ gasket_page_table_num_simple_entries(
+ gasket_dev->page_table[0]));
+ break;
+ case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES:
+diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+index f6825727bf77..3592b545246c 100644
+--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
++++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+@@ -1789,6 +1789,7 @@ void wilc_deinit_host_int(struct net_device *net)
+
+ priv->p2p_listen_state = false;
+
++ flush_workqueue(vif->wilc->hif_workqueue);
+ mutex_destroy(&priv->scan_req_lock);
+ ret = wilc_deinit(vif);
+
+diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
+index 717292c1c0df..60ff236a3d63 100644
+--- a/drivers/tty/tty_ldsem.c
++++ b/drivers/tty/tty_ldsem.c
+@@ -93,8 +93,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
+
+ list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
+ tsk = waiter->task;
+- smp_mb();
+- waiter->task = NULL;
++ smp_store_release(&waiter->task, NULL);
+ wake_up_process(tsk);
+ put_task_struct(tsk);
+ }
+@@ -194,7 +193,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+- if (!waiter.task)
++ if (!smp_load_acquire(&waiter.task))
+ break;
+ if (!timeout)
+ break;
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index a02448105527..86130e8d35f9 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -1788,8 +1788,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
+ return 0;
+
+ error:
+- if (as && as->usbm)
+- dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
+ kfree(isopkt);
+ kfree(dr);
+ if (as)
+diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
+index 671bce18782c..8616c52849c6 100644
+--- a/drivers/usb/host/xhci-rcar.c
++++ b/drivers/usb/host/xhci-rcar.c
+@@ -238,10 +238,15 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
+ * pointers. So, this driver clears the AC64 bit of xhci->hcc_params
+ * to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
+ * xhci_gen_setup().
++ *
++ * And, since the firmware/internal CPU control the USBSTS.STS_HALT
++ * and the process speed is down when the roothub port enters U3,
++ * long delay for the handshake of STS_HALT is neeed in xhci_suspend().
+ */
+ if (xhci_rcar_is_gen2(hcd->self.controller) ||
+- xhci_rcar_is_gen3(hcd->self.controller))
+- xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
++ xhci_rcar_is_gen3(hcd->self.controller)) {
++ xhci->quirks |= XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND;
++ }
+
+ if (!xhci_rcar_wait_for_pll_active(hcd))
+ return -ETIMEDOUT;
+diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
+index ba05dd80a020..f5bed9f29e56 100644
+--- a/drivers/usb/misc/iowarrior.c
++++ b/drivers/usb/misc/iowarrior.c
+@@ -866,19 +866,20 @@ static void iowarrior_disconnect(struct usb_interface *interface)
+ dev = usb_get_intfdata(interface);
+ mutex_lock(&iowarrior_open_disc_lock);
+ usb_set_intfdata(interface, NULL);
++ /* prevent device read, write and ioctl */
++ dev->present = 0;
+
+ minor = dev->minor;
++ mutex_unlock(&iowarrior_open_disc_lock);
++ /* give back our minor - this will call close() locks need to be dropped at this point*/
+
+- /* give back our minor */
+ usb_deregister_dev(interface, &iowarrior_class);
+
+ mutex_lock(&dev->mutex);
+
+ /* prevent device read, write and ioctl */
+- dev->present = 0;
+
+ mutex_unlock(&dev->mutex);
+- mutex_unlock(&iowarrior_open_disc_lock);
+
+ if (dev->opened) {
+ /* There is a process that holds a filedescriptor to the device ,
+diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
+index 27e9c78a791e..a32d61a79ab8 100644
+--- a/drivers/usb/misc/rio500.c
++++ b/drivers/usb/misc/rio500.c
+@@ -51,6 +51,7 @@ struct rio_usb_data {
+ char *obuf, *ibuf; /* transfer buffers */
+ char bulk_in_ep, bulk_out_ep; /* Endpoint assignments */
+ wait_queue_head_t wait_q; /* for timeouts */
++ struct mutex lock; /* general race avoidance */
+ };
+
+ static DEFINE_MUTEX(rio500_mutex);
+@@ -62,8 +63,10 @@ static int open_rio(struct inode *inode, struct file *file)
+
+ /* against disconnect() */
+ mutex_lock(&rio500_mutex);
++ mutex_lock(&(rio->lock));
+
+ if (rio->isopen || !rio->present) {
++ mutex_unlock(&(rio->lock));
+ mutex_unlock(&rio500_mutex);
+ return -EBUSY;
+ }
+@@ -71,6 +74,7 @@ static int open_rio(struct inode *inode, struct file *file)
+
+ init_waitqueue_head(&rio->wait_q);
+
++ mutex_unlock(&(rio->lock));
+
+ dev_info(&rio->rio_dev->dev, "Rio opened.\n");
+ mutex_unlock(&rio500_mutex);
+@@ -84,6 +88,7 @@ static int close_rio(struct inode *inode, struct file *file)
+
+ /* against disconnect() */
+ mutex_lock(&rio500_mutex);
++ mutex_lock(&(rio->lock));
+
+ rio->isopen = 0;
+ if (!rio->present) {
+@@ -95,6 +100,7 @@ static int close_rio(struct inode *inode, struct file *file)
+ } else {
+ dev_info(&rio->rio_dev->dev, "Rio closed.\n");
+ }
++ mutex_unlock(&(rio->lock));
+ mutex_unlock(&rio500_mutex);
+ return 0;
+ }
+@@ -109,7 +115,7 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
+ int retries;
+ int retval=0;
+
+- mutex_lock(&rio500_mutex);
++ mutex_lock(&(rio->lock));
+ /* Sanity check to make sure rio is connected, powered, etc */
+ if (rio->present == 0 || rio->rio_dev == NULL) {
+ retval = -ENODEV;
+@@ -253,7 +259,7 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
+
+
+ err_out:
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return retval;
+ }
+
+@@ -273,12 +279,12 @@ write_rio(struct file *file, const char __user *buffer,
+ int errn = 0;
+ int intr;
+
+- intr = mutex_lock_interruptible(&rio500_mutex);
++ intr = mutex_lock_interruptible(&(rio->lock));
+ if (intr)
+ return -EINTR;
+ /* Sanity check to make sure rio is connected, powered, etc */
+ if (rio->present == 0 || rio->rio_dev == NULL) {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return -ENODEV;
+ }
+
+@@ -301,7 +307,7 @@ write_rio(struct file *file, const char __user *buffer,
+ goto error;
+ }
+ if (signal_pending(current)) {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return bytes_written ? bytes_written : -EINTR;
+ }
+
+@@ -339,12 +345,12 @@ write_rio(struct file *file, const char __user *buffer,
+ buffer += copy_size;
+ } while (count > 0);
+
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+
+ return bytes_written ? bytes_written : -EIO;
+
+ error:
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return errn;
+ }
+
+@@ -361,12 +367,12 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
+ char *ibuf;
+ int intr;
+
+- intr = mutex_lock_interruptible(&rio500_mutex);
++ intr = mutex_lock_interruptible(&(rio->lock));
+ if (intr)
+ return -EINTR;
+ /* Sanity check to make sure rio is connected, powered, etc */
+ if (rio->present == 0 || rio->rio_dev == NULL) {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return -ENODEV;
+ }
+
+@@ -377,11 +383,11 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
+
+ while (count > 0) {
+ if (signal_pending(current)) {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return read_count ? read_count : -EINTR;
+ }
+ if (!rio->rio_dev) {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return -ENODEV;
+ }
+ this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;
+@@ -399,7 +405,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
+ count = this_read = partial;
+ } else if (result == -ETIMEDOUT || result == 15) { /* FIXME: 15 ??? */
+ if (!maxretry--) {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ dev_err(&rio->rio_dev->dev,
+ "read_rio: maxretry timeout\n");
+ return -ETIME;
+@@ -409,19 +415,19 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
+ finish_wait(&rio->wait_q, &wait);
+ continue;
+ } else if (result != -EREMOTEIO) {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ dev_err(&rio->rio_dev->dev,
+ "Read Whoops - result:%d partial:%u this_read:%u\n",
+ result, partial, this_read);
+ return -EIO;
+ } else {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return (0);
+ }
+
+ if (this_read) {
+ if (copy_to_user(buffer, ibuf, this_read)) {
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return -EFAULT;
+ }
+ count -= this_read;
+@@ -429,7 +435,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
+ buffer += this_read;
+ }
+ }
+- mutex_unlock(&rio500_mutex);
++ mutex_unlock(&(rio->lock));
+ return read_count;
+ }
+
+@@ -494,6 +500,8 @@ static int probe_rio(struct usb_interface *intf,
+ }
+ dev_dbg(&intf->dev, "ibuf address:%p\n", rio->ibuf);
+
++ mutex_init(&(rio->lock));
++
+ usb_set_intfdata (intf, rio);
+ rio->present = 1;
+ bail_out:
+@@ -511,10 +519,12 @@ static void disconnect_rio(struct usb_interface *intf)
+ if (rio) {
+ usb_deregister_dev(intf, &usb_rio_class);
+
++ mutex_lock(&(rio->lock));
+ if (rio->isopen) {
+ rio->isopen = 0;
+ /* better let it finish - the release will do whats needed */
+ rio->rio_dev = NULL;
++ mutex_unlock(&(rio->lock));
+ mutex_unlock(&rio500_mutex);
+ return;
+ }
+@@ -524,6 +534,7 @@ static void disconnect_rio(struct usb_interface *intf)
+ dev_info(&intf->dev, "USB Rio disconnected.\n");
+
+ rio->present = 0;
++ mutex_unlock(&(rio->lock));
+ }
+ mutex_unlock(&rio500_mutex);
+ }
+diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
+index 7b306aa22d25..6715a128e6c8 100644
+--- a/drivers/usb/misc/yurex.c
++++ b/drivers/usb/misc/yurex.c
+@@ -92,7 +92,6 @@ static void yurex_delete(struct kref *kref)
+
+ dev_dbg(&dev->interface->dev, "%s\n", __func__);
+
+- usb_put_dev(dev->udev);
+ if (dev->cntl_urb) {
+ usb_kill_urb(dev->cntl_urb);
+ kfree(dev->cntl_req);
+@@ -108,6 +107,7 @@ static void yurex_delete(struct kref *kref)
+ dev->int_buffer, dev->urb->transfer_dma);
+ usb_free_urb(dev->urb);
+ }
++ usb_put_dev(dev->udev);
+ kfree(dev);
+ }
+
+diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
+index fba32d84e578..15abe1d9958f 100644
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -379,7 +379,8 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
+ return SNK_UNATTACHED;
+ else if (port->try_role == TYPEC_SOURCE)
+ return SRC_UNATTACHED;
+- else if (port->tcpc->config->default_role == TYPEC_SINK)
++ else if (port->tcpc->config &&
++ port->tcpc->config->default_role == TYPEC_SINK)
+ return SNK_UNATTACHED;
+ /* Fall through to return SRC_UNATTACHED */
+ } else if (port->port_type == TYPEC_PORT_SNK) {
+@@ -586,7 +587,20 @@ static void tcpm_debugfs_init(struct tcpm_port *port)
+
+ static void tcpm_debugfs_exit(struct tcpm_port *port)
+ {
++ int i;
++
++ mutex_lock(&port->logbuffer_lock);
++ for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
++ kfree(port->logbuffer[i]);
++ port->logbuffer[i] = NULL;
++ }
++ mutex_unlock(&port->logbuffer_lock);
++
+ debugfs_remove(port->dentry);
++ if (list_empty(&rootdir->d_subdirs)) {
++ debugfs_remove(rootdir);
++ rootdir = NULL;
++ }
+ }
+
+ #else
+@@ -1095,7 +1109,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
+ break;
+ case CMD_ATTENTION:
+ /* Attention command does not have response */
+- typec_altmode_attention(adev, p[1]);
++ if (adev)
++ typec_altmode_attention(adev, p[1]);
+ return 0;
+ default:
+ break;
+@@ -1147,20 +1162,26 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
+ }
+ break;
+ case CMD_ENTER_MODE:
+- typec_altmode_update_active(pdev, true);
+-
+- if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
+- response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE);
+- response[0] |= VDO_OPOS(adev->mode);
+- return 1;
++ if (adev && pdev) {
++ typec_altmode_update_active(pdev, true);
++
++ if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
++ response[0] = VDO(adev->svid, 1,
++ CMD_EXIT_MODE);
++ response[0] |= VDO_OPOS(adev->mode);
++ return 1;
++ }
+ }
+ return 0;
+ case CMD_EXIT_MODE:
+- typec_altmode_update_active(pdev, false);
++ if (adev && pdev) {
++ typec_altmode_update_active(pdev, false);
+
+- /* Back to USB Operation */
+- WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
+- NULL));
++ /* Back to USB Operation */
++ WARN_ON(typec_altmode_notify(adev,
++ TYPEC_STATE_USB,
++ NULL));
++ }
+ break;
+ default:
+ break;
+@@ -1170,8 +1191,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
+ switch (cmd) {
+ case CMD_ENTER_MODE:
+ /* Back to USB Operation */
+- WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
+- NULL));
++ if (adev)
++ WARN_ON(typec_altmode_notify(adev,
++ TYPEC_STATE_USB,
++ NULL));
+ break;
+ default:
+ break;
+@@ -1182,7 +1205,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
+ }
+
+ /* Informing the alternate mode drivers about everything */
+- typec_altmode_vdm(adev, p[0], &p[1], cnt);
++ if (adev)
++ typec_altmode_vdm(adev, p[0], &p[1], cnt);
+
+ return rlen;
+ }
+@@ -4114,7 +4138,7 @@ static int tcpm_try_role(const struct typec_capability *cap, int role)
+ mutex_lock(&port->lock);
+ if (tcpc->try_role)
+ ret = tcpc->try_role(tcpc, role);
+- if (!ret && !tcpc->config->try_role_hw)
++ if (!ret && (!tcpc->config || !tcpc->config->try_role_hw))
+ port->try_role = role;
+ port->try_src_count = 0;
+ port->try_snk_count = 0;
+@@ -4701,7 +4725,7 @@ static int tcpm_copy_caps(struct tcpm_port *port,
+ port->typec_caps.prefer_role = tcfg->default_role;
+ port->typec_caps.type = tcfg->type;
+ port->typec_caps.data = tcfg->data;
+- port->self_powered = port->tcpc->config->self_powered;
++ port->self_powered = tcfg->self_powered;
+
+ return 0;
+ }
+diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
+index bf63074675fc..c274a36c2fe4 100644
+--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
++++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
+@@ -963,7 +963,7 @@ release_fw:
+ ******************************************************************************/
+ static int ccg_fw_update(struct ucsi_ccg *uc, enum enum_flash_mode flash_mode)
+ {
+- int err;
++ int err = 0;
+
+ while (flash_mode != FLASH_NOT_NEEDED) {
+ err = do_flash(uc, flash_mode);
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 09c9d6726f07..a9e7c1b69437 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1723,7 +1723,10 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
+
+ /* finish claiming */
+ mutex_lock(&bdev->bd_mutex);
+- bd_finish_claiming(bdev, whole, holder);
++ if (!res)
++ bd_finish_claiming(bdev, whole, holder);
++ else
++ bd_abort_claiming(bdev, whole, holder);
+ /*
+ * Block event polling for write claims if requested. Any
+ * write holder makes the write_holder state stick until
+diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
+index 75311a8a68bf..c3c8de5513db 100644
+--- a/fs/cifs/smb2pdu.c
++++ b/fs/cifs/smb2pdu.c
+@@ -252,7 +252,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
+ if (tcon == NULL)
+ return 0;
+
+- if (smb2_command == SMB2_TREE_CONNECT)
++ if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
+ return 0;
+
+ if (tcon->tidStatus == CifsExiting) {
+@@ -1173,7 +1173,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
+ else
+ req->SecurityMode = 0;
+
++#ifdef CONFIG_CIFS_DFS_UPCALL
++ req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
++#else
+ req->Capabilities = 0;
++#endif /* DFS_UPCALL */
++
+ req->Channel = 0; /* MBZ */
+
+ sess_data->iov[0].iov_base = (char *)req;
+diff --git a/fs/dax.c b/fs/dax.c
+index 7d0e99982d48..4f42b852375b 100644
+--- a/fs/dax.c
++++ b/fs/dax.c
+@@ -601,7 +601,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
+ * guaranteed to either see new references or prevent new
+ * references from being established.
+ */
+- unmap_mapping_range(mapping, 0, 0, 1);
++ unmap_mapping_range(mapping, 0, 0, 0);
+
+ xas_lock_irq(&xas);
+ xas_for_each(&xas, entry, ULONG_MAX) {
+diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
+index 93ea1d529aa3..253e2f939d5f 100644
+--- a/fs/gfs2/bmap.c
++++ b/fs/gfs2/bmap.c
+@@ -390,6 +390,19 @@ static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
+ return mp->mp_aheight - x - 1;
+ }
+
++static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
++{
++ sector_t factor = 1, block = 0;
++ int hgt;
++
++ for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
++ if (hgt < mp->mp_aheight)
++ block += mp->mp_list[hgt] * factor;
++ factor *= sdp->sd_inptrs;
++ }
++ return block;
++}
++
+ static void release_metapath(struct metapath *mp)
+ {
+ int i;
+@@ -430,60 +443,84 @@ static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *pt
+ return ptr - first;
+ }
+
+-typedef const __be64 *(*gfs2_metadata_walker)(
+- struct metapath *mp,
+- const __be64 *start, const __be64 *end,
+- u64 factor, void *data);
++enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
+
+-#define WALK_STOP ((__be64 *)0)
+-#define WALK_NEXT ((__be64 *)1)
++/*
++ * gfs2_metadata_walker - walk an indirect block
++ * @mp: Metapath to indirect block
++ * @ptrs: Number of pointers to look at
++ *
++ * When returning WALK_FOLLOW, the walker must update @mp to point at the right
++ * indirect block to follow.
++ */
++typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
++ unsigned int ptrs);
+
+-static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
+- u64 len, struct metapath *mp, gfs2_metadata_walker walker,
+- void *data)
++/*
++ * gfs2_walk_metadata - walk a tree of indirect blocks
++ * @inode: The inode
++ * @mp: Starting point of walk
++ * @max_len: Maximum number of blocks to walk
++ * @walker: Called during the walk
++ *
++ * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
++ * past the end of metadata, and a negative error code otherwise.
++ */
++
++static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
++ u64 max_len, gfs2_metadata_walker walker)
+ {
+- struct metapath clone;
+ struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_sbd *sdp = GFS2_SB(inode);
+- const __be64 *start, *end, *ptr;
+ u64 factor = 1;
+ unsigned int hgt;
+- int ret = 0;
++ int ret;
+
+- for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--)
++ /*
++ * The walk starts in the lowest allocated indirect block, which may be
++ * before the position indicated by @mp. Adjust @max_len accordingly
++ * to avoid a short walk.
++ */
++ for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
++ max_len += mp->mp_list[hgt] * factor;
++ mp->mp_list[hgt] = 0;
+ factor *= sdp->sd_inptrs;
++ }
+
+ for (;;) {
+- u64 step;
++ u16 start = mp->mp_list[hgt];
++ enum walker_status status;
++ unsigned int ptrs;
++ u64 len;
+
+ /* Walk indirect block. */
+- start = metapointer(hgt, mp);
+- end = metaend(hgt, mp);
+-
+- step = (end - start) * factor;
+- if (step > len)
+- end = start + DIV_ROUND_UP_ULL(len, factor);
+-
+- ptr = walker(mp, start, end, factor, data);
+- if (ptr == WALK_STOP)
++ ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
++ len = ptrs * factor;
++ if (len > max_len)
++ ptrs = DIV_ROUND_UP_ULL(max_len, factor);
++ status = walker(mp, ptrs);
++ switch (status) {
++ case WALK_STOP:
++ return 1;
++ case WALK_FOLLOW:
++ BUG_ON(mp->mp_aheight == mp->mp_fheight);
++ ptrs = mp->mp_list[hgt] - start;
++ len = ptrs * factor;
+ break;
+- if (step >= len)
++ case WALK_CONTINUE:
+ break;
+- len -= step;
+- if (ptr != WALK_NEXT) {
+- BUG_ON(!*ptr);
+- mp->mp_list[hgt] += ptr - start;
+- goto fill_up_metapath;
+ }
++ if (len >= max_len)
++ break;
++ max_len -= len;
++ if (status == WALK_FOLLOW)
++ goto fill_up_metapath;
+
+ lower_metapath:
+ /* Decrease height of metapath. */
+- if (mp != &clone) {
+- clone_metapath(&clone, mp);
+- mp = &clone;
+- }
+ brelse(mp->mp_bh[hgt]);
+ mp->mp_bh[hgt] = NULL;
++ mp->mp_list[hgt] = 0;
+ if (!hgt)
+ break;
+ hgt--;
+@@ -491,10 +528,7 @@ lower_metapath:
+
+ /* Advance in metadata tree. */
+ (mp->mp_list[hgt])++;
+- start = metapointer(hgt, mp);
+- end = metaend(hgt, mp);
+- if (start >= end) {
+- mp->mp_list[hgt] = 0;
++ if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
+ if (!hgt)
+ break;
+ goto lower_metapath;
+@@ -502,44 +536,36 @@ lower_metapath:
+
+ fill_up_metapath:
+ /* Increase height of metapath. */
+- if (mp != &clone) {
+- clone_metapath(&clone, mp);
+- mp = &clone;
+- }
+ ret = fillup_metapath(ip, mp, ip->i_height - 1);
+ if (ret < 0)
+- break;
++ return ret;
+ hgt += ret;
+ for (; ret; ret--)
+ do_div(factor, sdp->sd_inptrs);
+ mp->mp_aheight = hgt + 1;
+ }
+- if (mp == &clone)
+- release_metapath(mp);
+- return ret;
++ return 0;
+ }
+
+-struct gfs2_hole_walker_args {
+- u64 blocks;
+-};
+-
+-static const __be64 *gfs2_hole_walker(struct metapath *mp,
+- const __be64 *start, const __be64 *end,
+- u64 factor, void *data)
++static enum walker_status gfs2_hole_walker(struct metapath *mp,
++ unsigned int ptrs)
+ {
+- struct gfs2_hole_walker_args *args = data;
+- const __be64 *ptr;
++ const __be64 *start, *ptr, *end;
++ unsigned int hgt;
++
++ hgt = mp->mp_aheight - 1;
++ start = metapointer(hgt, mp);
++ end = start + ptrs;
+
+ for (ptr = start; ptr < end; ptr++) {
+ if (*ptr) {
+- args->blocks += (ptr - start) * factor;
++ mp->mp_list[hgt] += ptr - start;
+ if (mp->mp_aheight == mp->mp_fheight)
+ return WALK_STOP;
+- return ptr; /* increase height */
++ return WALK_FOLLOW;
+ }
+ }
+- args->blocks += (end - start) * factor;
+- return WALK_NEXT;
++ return WALK_CONTINUE;
+ }
+
+ /**
+@@ -557,12 +583,24 @@ static const __be64 *gfs2_hole_walker(struct metapath *mp,
+ static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
+ struct metapath *mp, struct iomap *iomap)
+ {
+- struct gfs2_hole_walker_args args = { };
+- int ret = 0;
++ struct metapath clone;
++ u64 hole_size;
++ int ret;
+
+- ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args);
+- if (!ret)
+- iomap->length = args.blocks << inode->i_blkbits;
++ clone_metapath(&clone, mp);
++ ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
++ if (ret < 0)
++ goto out;
++
++ if (ret == 1)
++ hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
++ else
++ hole_size = len;
++ iomap->length = hole_size << inode->i_blkbits;
++ ret = 0;
++
++out:
++ release_metapath(&clone);
+ return ret;
+ }
+
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 0ff3facf81da..0af854cce8ff 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -153,7 +153,7 @@ again:
+ /* Block nfs4_proc_unlck */
+ mutex_lock(&sp->so_delegreturn_mutex);
+ seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
+- err = nfs4_open_delegation_recall(ctx, state, stateid, type);
++ err = nfs4_open_delegation_recall(ctx, state, stateid);
+ if (!err)
+ err = nfs_delegation_claim_locks(state, stateid);
+ if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
+diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
+index 5799777df5ec..9eb87ae4c982 100644
+--- a/fs/nfs/delegation.h
++++ b/fs/nfs/delegation.h
+@@ -63,7 +63,7 @@ void nfs_reap_expired_delegations(struct nfs_client *clp);
+
+ /* NFSv4 delegation-related procedures */
+ int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync);
+-int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
++int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
+ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
+ bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, const struct cred **cred);
+ bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 6418cb6c079b..63edda145d1b 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -1878,8 +1878,9 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
+ if (data->o_res.delegation_type != 0)
+ nfs4_opendata_check_deleg(data, state);
+ update:
+- update_open_stateid(state, &data->o_res.stateid, NULL,
+- data->o_arg.fmode);
++ if (!update_open_stateid(state, &data->o_res.stateid,
++ NULL, data->o_arg.fmode))
++ return ERR_PTR(-EAGAIN);
+ refcount_inc(&state->count);
+
+ return state;
+@@ -1944,8 +1945,11 @@ _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
+
+ if (data->o_res.delegation_type != 0)
+ nfs4_opendata_check_deleg(data, state);
+- update_open_stateid(state, &data->o_res.stateid, NULL,
+- data->o_arg.fmode);
++ if (!update_open_stateid(state, &data->o_res.stateid,
++ NULL, data->o_arg.fmode)) {
++ nfs4_put_open_state(state);
++ state = ERR_PTR(-EAGAIN);
++ }
+ out:
+ nfs_release_seqid(data->o_arg.seqid);
+ return state;
+@@ -2148,12 +2152,10 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
+ case -NFS4ERR_BAD_HIGH_SLOT:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -NFS4ERR_DEADSESSION:
+- set_bit(NFS_DELEGATED_STATE, &state->flags);
+ nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
+ return -EAGAIN;
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_STALE_STATEID:
+- set_bit(NFS_DELEGATED_STATE, &state->flags);
+ /* Don't recall a delegation if it was lost */
+ nfs4_schedule_lease_recovery(server->nfs_client);
+ return -EAGAIN;
+@@ -2174,7 +2176,6 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
+ return -EAGAIN;
+ case -NFS4ERR_DELAY:
+ case -NFS4ERR_GRACE:
+- set_bit(NFS_DELEGATED_STATE, &state->flags);
+ ssleep(1);
+ return -EAGAIN;
+ case -ENOMEM:
+@@ -2190,8 +2191,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
+ }
+
+ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
+- struct nfs4_state *state, const nfs4_stateid *stateid,
+- fmode_t type)
++ struct nfs4_state *state, const nfs4_stateid *stateid)
+ {
+ struct nfs_server *server = NFS_SERVER(state->inode);
+ struct nfs4_opendata *opendata;
+@@ -2202,20 +2202,23 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
+ if (IS_ERR(opendata))
+ return PTR_ERR(opendata);
+ nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
+- nfs_state_clear_delegation(state);
+- switch (type & (FMODE_READ|FMODE_WRITE)) {
+- case FMODE_READ|FMODE_WRITE:
+- case FMODE_WRITE:
++ if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
+ err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
+ if (err)
+- break;
++ goto out;
++ }
++ if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
+ err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
+ if (err)
+- break;
+- /* Fall through */
+- case FMODE_READ:
++ goto out;
++ }
++ if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
+ err = nfs4_open_recover_helper(opendata, FMODE_READ);
++ if (err)
++ goto out;
+ }
++ nfs_state_clear_delegation(state);
++out:
+ nfs4_opendata_put(opendata);
+ return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
+ }
+@@ -3172,7 +3175,7 @@ static int _nfs4_do_setattr(struct inode *inode,
+
+ if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
+ /* Use that stateid */
+- } else if (ctx != NULL) {
++ } else if (ctx != NULL && ctx->state) {
+ struct nfs_lock_context *l_ctx;
+ if (!nfs4_valid_open_stateid(ctx->state))
+ return -EBADF;
+diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
+index 46bbc949c20a..7a30524a80ee 100644
+--- a/include/kvm/arm_vgic.h
++++ b/include/kvm/arm_vgic.h
+@@ -350,6 +350,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
+
+ void kvm_vgic_load(struct kvm_vcpu *vcpu);
+ void kvm_vgic_put(struct kvm_vcpu *vcpu);
++void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
+
+ #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
+ #define vgic_initialized(k) ((k)->arch.vgic.initialized)
+diff --git a/include/linux/ccp.h b/include/linux/ccp.h
+index 55cb455cfcb0..a5dfbaf2470d 100644
+--- a/include/linux/ccp.h
++++ b/include/linux/ccp.h
+@@ -170,6 +170,8 @@ struct ccp_aes_engine {
+ enum ccp_aes_mode mode;
+ enum ccp_aes_action action;
+
++ u32 authsize;
++
+ struct scatterlist *key;
+ u32 key_len; /* In bytes */
+
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index d1ad38a3f048..7546cbf3dfb0 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -871,6 +871,7 @@ void kvm_arch_check_processor_compat(void *rtn);
+ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
+ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
++bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
+
+ #ifndef __KVM_HAVE_ARCH_VM_ALLOC
+ /*
+diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
+index c5188ff724d1..bc88d6f964da 100644
+--- a/include/sound/compress_driver.h
++++ b/include/sound/compress_driver.h
+@@ -173,10 +173,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
+ if (snd_BUG_ON(!stream))
+ return;
+
+- if (stream->direction == SND_COMPRESS_PLAYBACK)
+- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+- else
+- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
++ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+
+ wake_up(&stream->runtime->sleep);
+ }
+diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
+index 6f09d1500960..70da1c6cdd07 100644
+--- a/include/uapi/linux/nl80211.h
++++ b/include/uapi/linux/nl80211.h
+@@ -2844,7 +2844,7 @@ enum nl80211_attrs {
+ #define NL80211_HT_CAPABILITY_LEN 26
+ #define NL80211_VHT_CAPABILITY_LEN 12
+ #define NL80211_HE_MIN_CAPABILITY_LEN 16
+-#define NL80211_HE_MAX_CAPABILITY_LEN 51
++#define NL80211_HE_MAX_CAPABILITY_LEN 54
+ #define NL80211_MAX_NR_CIPHER_SUITES 5
+ #define NL80211_MAX_NR_AKM_SUITES 2
+
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index f851934d55d4..4bc15cff1026 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -11266,7 +11266,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
+ goto err_unlock;
+ }
+
+- perf_install_in_context(ctx, event, cpu);
++ perf_install_in_context(ctx, event, event->cpu);
+ perf_unpin_context(ctx);
+ mutex_unlock(&ctx->mutex);
+
+diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
+index f18cd5aa33e8..656c14333613 100644
+--- a/kernel/irq/affinity.c
++++ b/kernel/irq/affinity.c
+@@ -253,11 +253,9 @@ irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
+ * Determine the number of vectors which need interrupt affinities
+ * assigned. If the pre/post request exhausts the available vectors
+ * then nothing to do here except for invoking the calc_sets()
+- * callback so the device driver can adjust to the situation. If there
+- * is only a single vector, then managing the queue is pointless as
+- * well.
++ * callback so the device driver can adjust to the situation.
+ */
+- if (nvecs > 1 && nvecs > affd->pre_vectors + affd->post_vectors)
++ if (nvecs > affd->pre_vectors + affd->post_vectors)
+ affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
+ else
+ affvecs = 0;
+diff --git a/lib/test_firmware.c b/lib/test_firmware.c
+index 83ea6c4e623c..6ca97a63b3d6 100644
+--- a/lib/test_firmware.c
++++ b/lib/test_firmware.c
+@@ -886,8 +886,11 @@ static int __init test_firmware_init(void)
+ return -ENOMEM;
+
+ rc = __test_firmware_config_init();
+- if (rc)
++ if (rc) {
++ kfree(test_fw_config);
++ pr_err("could not init firmware test config: %d\n", rc);
+ return rc;
++ }
+
+ rc = misc_register(&test_fw_misc_device);
+ if (rc) {
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 0f76cca32a1c..080d30408ce3 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -1213,6 +1213,12 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
+ if (unlikely(valist == NULL))
+ return false;
+
++ /*
++ * First make sure the mappings are removed from all page-tables
++ * before they are freed.
++ */
++ vmalloc_sync_all();
++
+ /*
+ * TODO: to calculate a flush range without looping.
+ * The list can be up to lazy_max_pages() elements.
+@@ -3001,6 +3007,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
+ /*
+ * Implement a stub for vmalloc_sync_all() if the architecture chose not to
+ * have one.
++ *
++ * The purpose of this function is to make sure the vmalloc area
++ * mappings are identical in all page-tables in the system.
+ */
+ void __weak vmalloc_sync_all(void)
+ {
+diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
+index 59031670b16a..cc23f1ce239c 100644
+--- a/net/ipv4/netfilter/ipt_rpfilter.c
++++ b/net/ipv4/netfilter/ipt_rpfilter.c
+@@ -78,6 +78,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
+ flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
+ flow.flowi4_tos = RT_TOS(iph->tos);
+ flow.flowi4_scope = RT_SCOPE_UNIVERSE;
++ flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
+
+ return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
+ }
+diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
+index 6bcaf7357183..d800801a5dd2 100644
+--- a/net/ipv6/netfilter/ip6t_rpfilter.c
++++ b/net/ipv6/netfilter/ip6t_rpfilter.c
+@@ -55,7 +55,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
+ if (rpfilter_addr_linklocal(&iph->saddr)) {
+ lookup_flags |= RT6_LOOKUP_F_IFACE;
+ fl6.flowi6_oif = dev->ifindex;
+- } else if ((flags & XT_RPFILTER_LOOSE) == 0)
++ /* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */
++ } else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) ||
++ (flags & XT_RPFILTER_LOOSE) == 0)
+ fl6.flowi6_oif = dev->ifindex;
+
+ rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
+@@ -70,7 +72,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
+ goto out;
+ }
+
+- if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
++ if (rt->rt6i_idev->dev == dev ||
++ l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
++ (flags & XT_RPFILTER_LOOSE))
+ ret = true;
+ out:
+ ip6_rt_put(rt);
+diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
+index a1973a26c7fc..b8288125e05d 100644
+--- a/net/mac80211/cfg.c
++++ b/net/mac80211/cfg.c
+@@ -935,8 +935,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
+
+ err = ieee80211_set_probe_resp(sdata, params->probe_resp,
+ params->probe_resp_len, csa);
+- if (err < 0)
++ if (err < 0) {
++ kfree(new);
+ return err;
++ }
+ if (err == 0)
+ changed |= BSS_CHANGED_AP_PROBE_RESP;
+
+@@ -948,8 +950,10 @@ static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
+ params->civicloc,
+ params->civicloc_len);
+
+- if (err < 0)
++ if (err < 0) {
++ kfree(new);
+ return err;
++ }
+
+ changed |= BSS_CHANGED_FTM_RESPONDER;
+ }
+diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
+index acd4afb4944b..c9a8a2433e8a 100644
+--- a/net/mac80211/driver-ops.c
++++ b/net/mac80211/driver-ops.c
+@@ -187,11 +187,16 @@ int drv_conf_tx(struct ieee80211_local *local,
+ if (!check_sdata_in_driver(sdata))
+ return -EIO;
+
+- if (WARN_ONCE(params->cw_min == 0 ||
+- params->cw_min > params->cw_max,
+- "%s: invalid CW_min/CW_max: %d/%d\n",
+- sdata->name, params->cw_min, params->cw_max))
++ if (params->cw_min == 0 || params->cw_min > params->cw_max) {
++ /*
++ * If we can't configure hardware anyway, don't warn. We may
++ * never have initialized the CW parameters.
++ */
++ WARN_ONCE(local->ops->conf_tx,
++ "%s: invalid CW_min/CW_max: %d/%d\n",
++ sdata->name, params->cw_min, params->cw_max);
+ return -EINVAL;
++ }
+
+ trace_drv_conf_tx(local, sdata, ac, params);
+ if (local->ops->conf_tx)
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 379d2ab6d327..36b60f39930b 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -2041,6 +2041,16 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local,
+ ieee80211_regulatory_limit_wmm_params(sdata, &params[ac], ac);
+ }
+
++ /* WMM specification requires all 4 ACIs. */
++ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
++ if (params[ac].cw_min == 0) {
++ sdata_info(sdata,
++ "AP has invalid WMM params (missing AC %d), using defaults\n",
++ ac);
++ return false;
++ }
++ }
++
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ mlme_dbg(sdata,
+ "WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
+diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
+index 1e2cc83ff5da..ae1f8c6b3a97 100644
+--- a/net/netfilter/nf_conntrack_proto_tcp.c
++++ b/net/netfilter/nf_conntrack_proto_tcp.c
+@@ -472,6 +472,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
+ struct ip_ct_tcp_state *receiver = &state->seen[!dir];
+ const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
+ __u32 seq, ack, sack, end, win, swin;
++ u16 win_raw;
+ s32 receiver_offset;
+ bool res, in_recv_win;
+
+@@ -480,7 +481,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
+ */
+ seq = ntohl(tcph->seq);
+ ack = sack = ntohl(tcph->ack_seq);
+- win = ntohs(tcph->window);
++ win_raw = ntohs(tcph->window);
++ win = win_raw;
+ end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
+
+ if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
+@@ -655,14 +657,14 @@ static bool tcp_in_window(const struct nf_conn *ct,
+ && state->last_seq == seq
+ && state->last_ack == ack
+ && state->last_end == end
+- && state->last_win == win)
++ && state->last_win == win_raw)
+ state->retrans++;
+ else {
+ state->last_dir = dir;
+ state->last_seq = seq;
+ state->last_ack = ack;
+ state->last_end = end;
+- state->last_win = win;
++ state->last_win = win_raw;
+ state->retrans = 0;
+ }
+ }
+diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
+index 92077d459109..4abbb452cf6c 100644
+--- a/net/netfilter/nfnetlink.c
++++ b/net/netfilter/nfnetlink.c
+@@ -578,7 +578,7 @@ static int nfnetlink_bind(struct net *net, int group)
+ ss = nfnetlink_get_subsys(type << 8);
+ rcu_read_unlock();
+ if (!ss)
+- request_module("nfnetlink-subsys-%d", type);
++ request_module_nowait("nfnetlink-subsys-%d", type);
+ return 0;
+ }
+ #endif
+diff --git a/net/netfilter/nft_chain_nat.c b/net/netfilter/nft_chain_nat.c
+index 2f89bde3c61c..ff9ac8ae0031 100644
+--- a/net/netfilter/nft_chain_nat.c
++++ b/net/netfilter/nft_chain_nat.c
+@@ -142,3 +142,6 @@ MODULE_ALIAS_NFT_CHAIN(AF_INET, "nat");
+ #ifdef CONFIG_NF_TABLES_IPV6
+ MODULE_ALIAS_NFT_CHAIN(AF_INET6, "nat");
+ #endif
++#ifdef CONFIG_NF_TABLES_INET
++MODULE_ALIAS_NFT_CHAIN(1, "nat"); /* NFPROTO_INET */
++#endif
+diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
+index fe93e731dc7f..b836d550b919 100644
+--- a/net/netfilter/nft_hash.c
++++ b/net/netfilter/nft_hash.c
+@@ -129,7 +129,7 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
+ priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
+
+ priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
+- if (priv->modulus <= 1)
++ if (priv->modulus < 1)
+ return -ERANGE;
+
+ if (priv->offset + priv->modulus - 1 < priv->offset)
+diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c
+index 8487eeff5c0e..43eeb1f609f1 100644
+--- a/net/netfilter/nft_redir.c
++++ b/net/netfilter/nft_redir.c
+@@ -291,4 +291,4 @@ module_exit(nft_redir_module_exit);
+
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
+-MODULE_ALIAS_NFT_EXPR("nat");
++MODULE_ALIAS_NFT_EXPR("redir");
+diff --git a/scripts/gen_compile_commands.py b/scripts/gen_compile_commands.py
+index 7915823b92a5..c458696ef3a7 100755
+--- a/scripts/gen_compile_commands.py
++++ b/scripts/gen_compile_commands.py
+@@ -21,9 +21,9 @@ _LINE_PATTERN = r'^cmd_[^ ]*\.o := (.* )([^ ]*\.c)$'
+ _VALID_LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
+
+ # A kernel build generally has over 2000 entries in its compile_commands.json
+-# database. If this code finds 500 or fewer, then warn the user that they might
++# database. If this code finds 300 or fewer, then warn the user that they might
+ # not have all the .cmd files, and they might need to compile the kernel.
+-_LOW_COUNT_THRESHOLD = 500
++_LOW_COUNT_THRESHOLD = 300
+
+
+ def parse_arguments():
+diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
+index 9be208db88d3..1f9f0a334c24 100755
+--- a/scripts/sphinx-pre-install
++++ b/scripts/sphinx-pre-install
+@@ -77,6 +77,17 @@ sub check_missing(%)
+ foreach my $prog (sort keys %missing) {
+ my $is_optional = $missing{$prog};
+
++ # At least on some LTS distros like CentOS 7, texlive doesn't
++ # provide all packages we need. When such distros are
++ # detected, we have to disable PDF output.
++ #
++ # So, we need to ignore the packages that distros would
++ # need for LaTeX to work
++ if ($is_optional == 2 && !$pdf) {
++ $optional--;
++ next;
++ }
++
+ if ($is_optional) {
+ print "Warning: better to also install \"$prog\".\n";
+ } else {
+@@ -326,10 +337,10 @@ sub give_debian_hints()
+
+ if ($pdf) {
+ check_missing_file("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf",
+- "fonts-dejavu", 1);
++ "fonts-dejavu", 2);
+ }
+
+- check_program("dvipng", 1) if ($pdf);
++ check_program("dvipng", 2) if ($pdf);
+ check_missing(\%map);
+
+ return if (!$need && !$optional);
+@@ -364,22 +375,40 @@ sub give_redhat_hints()
+ #
+ # Checks valid for RHEL/CentOS version 7.x.
+ #
+- if (! $system_release =~ /Fedora/) {
++ my $old = 0;
++ my $rel;
++ $rel = $1 if ($system_release =~ /release\s+(\d+)/);
++
++ if (!($system_release =~ /Fedora/)) {
+ $map{"virtualenv"} = "python-virtualenv";
+- }
+
+- my $release;
++ if ($rel && $rel < 8) {
++ $old = 1;
++ $pdf = 0;
+
+- $release = $1 if ($system_release =~ /Fedora\s+release\s+(\d+)/);
++ printf("Note: texlive packages on RHEL/CENTOS <= 7 are incomplete. Can't support PDF output\n");
++ printf("If you want to build PDF, please read:\n");
++ printf("\thttps://www.systutorials.com/241660/how-to-install-tex-live-on-centos-7-linux/\n");
++ }
++ } else {
++ if ($rel && $rel < 26) {
++ $old = 1;
++ }
++ }
++ if (!$rel) {
++ printf("Couldn't identify release number\n");
++ $old = 1;
++ $pdf = 0;
++ }
+
+- check_rpm_missing(\@fedora26_opt_pkgs, 1) if ($pdf && $release >= 26);
+- check_rpm_missing(\@fedora_tex_pkgs, 1) if ($pdf);
+- check_missing_tex(1) if ($pdf);
++ check_rpm_missing(\@fedora26_opt_pkgs, 2) if ($pdf && !$old);
++ check_rpm_missing(\@fedora_tex_pkgs, 2) if ($pdf);
++ check_missing_tex(2) if ($pdf);
+ check_missing(\%map);
+
+ return if (!$need && !$optional);
+
+- if ($release >= 18) {
++ if (!$old) {
+ # dnf, for Fedora 18+
+ printf("You should run:\n\n\tsudo dnf install -y $install\n");
+ } else {
+@@ -418,8 +447,10 @@ sub give_opensuse_hints()
+ "texlive-zapfding",
+ );
+
+- check_rpm_missing(\@suse_tex_pkgs, 1) if ($pdf);
+- check_missing_tex(1) if ($pdf);
++ $map{"latexmk"} = "texlive-latexmk-bin";
++
++ check_rpm_missing(\@suse_tex_pkgs, 2) if ($pdf);
++ check_missing_tex(2) if ($pdf);
+ check_missing(\%map);
+
+ return if (!$need && !$optional);
+@@ -443,7 +474,9 @@ sub give_mageia_hints()
+ "texlive-fontsextra",
+ );
+
+- check_rpm_missing(\@tex_pkgs, 1) if ($pdf);
++ $map{"latexmk"} = "texlive-collection-basic";
++
++ check_rpm_missing(\@tex_pkgs, 2) if ($pdf);
+ check_missing(\%map);
+
+ return if (!$need && !$optional);
+@@ -466,7 +499,8 @@ sub give_arch_linux_hints()
+ "texlive-latexextra",
+ "ttf-dejavu",
+ );
+- check_pacman_missing(\@archlinux_tex_pkgs, 1) if ($pdf);
++ check_pacman_missing(\@archlinux_tex_pkgs, 2) if ($pdf);
++
+ check_missing(\%map);
+
+ return if (!$need && !$optional);
+@@ -485,7 +519,7 @@ sub give_gentoo_hints()
+ );
+
+ check_missing_file("/usr/share/fonts/dejavu/DejaVuSans.ttf",
+- "media-fonts/dejavu", 1) if ($pdf);
++ "media-fonts/dejavu", 2) if ($pdf);
+
+ check_missing(\%map);
+
+@@ -553,7 +587,7 @@ sub check_distros()
+ my %map = (
+ "sphinx-build" => "sphinx"
+ );
+- check_missing_tex(1) if ($pdf);
++ check_missing_tex(2) if ($pdf);
+ check_missing(\%map);
+ print "I don't know distro $system_release.\n";
+ print "So, I can't provide you a hint with the install procedure.\n";
+@@ -591,11 +625,13 @@ sub check_needs()
+ check_program("make", 0);
+ check_program("gcc", 0);
+ check_python_module("sphinx_rtd_theme", 1) if (!$virtualenv);
+- check_program("xelatex", 1) if ($pdf);
+ check_program("dot", 1);
+ check_program("convert", 1);
+- check_program("rsvg-convert", 1) if ($pdf);
+- check_program("latexmk", 1) if ($pdf);
++
++ # Extra PDF files - should use 2 for is_optional
++ check_program("xelatex", 2) if ($pdf);
++ check_program("rsvg-convert", 2) if ($pdf);
++ check_program("latexmk", 2) if ($pdf);
+
+ check_distros();
+
+diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
+index 99b882158705..41905afada63 100644
+--- a/sound/core/compress_offload.c
++++ b/sound/core/compress_offload.c
+@@ -574,10 +574,7 @@ snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
+ stream->metadata_set = false;
+ stream->next_track = false;
+
+- if (stream->direction == SND_COMPRESS_PLAYBACK)
+- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+- else
+- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
++ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+ } else {
+ return -EPERM;
+ }
+@@ -693,8 +690,17 @@ static int snd_compr_start(struct snd_compr_stream *stream)
+ {
+ int retval;
+
+- if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
++ switch (stream->runtime->state) {
++ case SNDRV_PCM_STATE_SETUP:
++ if (stream->direction != SND_COMPRESS_CAPTURE)
++ return -EPERM;
++ break;
++ case SNDRV_PCM_STATE_PREPARED:
++ break;
++ default:
+ return -EPERM;
++ }
++
+ retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
+ if (!retval)
+ stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
+@@ -705,9 +711,15 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
+ {
+ int retval;
+
+- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
+- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
++ switch (stream->runtime->state) {
++ case SNDRV_PCM_STATE_OPEN:
++ case SNDRV_PCM_STATE_SETUP:
++ case SNDRV_PCM_STATE_PREPARED:
+ return -EPERM;
++ default:
++ break;
++ }
++
+ retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
+ if (!retval) {
+ snd_compr_drain_notify(stream);
+@@ -795,9 +807,17 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
+ {
+ int retval;
+
+- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
+- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
++ switch (stream->runtime->state) {
++ case SNDRV_PCM_STATE_OPEN:
++ case SNDRV_PCM_STATE_SETUP:
++ case SNDRV_PCM_STATE_PREPARED:
++ case SNDRV_PCM_STATE_PAUSED:
+ return -EPERM;
++ case SNDRV_PCM_STATE_XRUN:
++ return -EPIPE;
++ default:
++ break;
++ }
+
+ retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
+ if (retval) {
+@@ -817,6 +837,10 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
+ if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
+ return -EPERM;
+
++ /* next track doesn't have any meaning for capture streams */
++ if (stream->direction == SND_COMPRESS_CAPTURE)
++ return -EPERM;
++
+ /* you can signal next track if this is intended to be a gapless stream
+ * and current track metadata is set
+ */
+@@ -834,9 +858,23 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
+ static int snd_compr_partial_drain(struct snd_compr_stream *stream)
+ {
+ int retval;
+- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
+- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
++
++ switch (stream->runtime->state) {
++ case SNDRV_PCM_STATE_OPEN:
++ case SNDRV_PCM_STATE_SETUP:
++ case SNDRV_PCM_STATE_PREPARED:
++ case SNDRV_PCM_STATE_PAUSED:
++ return -EPERM;
++ case SNDRV_PCM_STATE_XRUN:
++ return -EPIPE;
++ default:
++ break;
++ }
++
++ /* partial drain doesn't have any meaning for capture streams */
++ if (stream->direction == SND_COMPRESS_CAPTURE)
+ return -EPERM;
++
+ /* stream can be drained only when next track has been signalled */
+ if (stream->next_track == false)
+ return -EPERM;
+diff --git a/sound/firewire/packets-buffer.c b/sound/firewire/packets-buffer.c
+index 0d35359d25cd..0ecafd0c6722 100644
+--- a/sound/firewire/packets-buffer.c
++++ b/sound/firewire/packets-buffer.c
+@@ -37,7 +37,7 @@ int iso_packets_buffer_init(struct iso_packets_buffer *b, struct fw_unit *unit,
+ packets_per_page = PAGE_SIZE / packet_size;
+ if (WARN_ON(!packets_per_page)) {
+ err = -EINVAL;
+- goto error;
++ goto err_packets;
+ }
+ pages = DIV_ROUND_UP(count, packets_per_page);
+
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index 232a1926758a..dd96def48a3a 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -598,11 +598,9 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
+ }
+ runtime->private_data = azx_dev;
+
+- if (chip->gts_present)
+- azx_pcm_hw.info = azx_pcm_hw.info |
+- SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
+-
+ runtime->hw = azx_pcm_hw;
++ if (chip->gts_present)
++ runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
+ runtime->hw.channels_min = hinfo->channels_min;
+ runtime->hw.channels_max = hinfo->channels_max;
+ runtime->hw.formats = hinfo->formats;
+@@ -615,6 +613,13 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
+ 20,
+ 178000000);
+
++ /* by some reason, the playback stream stalls on PulseAudio with
++ * tsched=1 when a capture stream triggers. Until we figure out the
++ * real cause, disable tsched mode by telling the PCM info flag.
++ */
++ if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
++ runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
++
+ if (chip->align_buffer_size)
+ /* constrain buffer sizes to be multiple of 128
+ bytes. This is more efficient in terms of memory
+diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
+index 3aa5c957ffbf..863695d025af 100644
+--- a/sound/pci/hda/hda_controller.h
++++ b/sound/pci/hda/hda_controller.h
+@@ -31,7 +31,7 @@
+ /* 14 unused */
+ #define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
+ #define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
+-/* 17 unused */
++#define AZX_DCAPS_AMD_WORKAROUND (1 << 17) /* AMD-specific workaround */
+ #define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */
+ #define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */
+ #define AZX_DCAPS_OLD_SSYNC (1 << 20) /* Old SSYNC reg for ICH */
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index d438c450f04d..fb8f452a1c78 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -64,6 +64,7 @@ enum {
+ POS_FIX_VIACOMBO,
+ POS_FIX_COMBO,
+ POS_FIX_SKL,
++ POS_FIX_FIFO,
+ };
+
+ /* Defines for ATI HD Audio support in SB450 south bridge */
+@@ -135,7 +136,7 @@ module_param_array(model, charp, NULL, 0444);
+ MODULE_PARM_DESC(model, "Use the given board model.");
+ module_param_array(position_fix, int, NULL, 0444);
+ MODULE_PARM_DESC(position_fix, "DMA pointer read method."
+- "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+).");
++ "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+, 6 = FIFO).");
+ module_param_array(bdl_pos_adj, int, NULL, 0644);
+ MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset.");
+ module_param_array(probe_mask, int, NULL, 0444);
+@@ -332,6 +333,11 @@ enum {
+ #define AZX_DCAPS_PRESET_ATI_HDMI_NS \
+ (AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF)
+
++/* quirks for AMD SB */
++#define AZX_DCAPS_PRESET_AMD_SB \
++ (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_AMD_WORKAROUND |\
++ AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
++
+ /* quirks for Nvidia */
+ #define AZX_DCAPS_PRESET_NVIDIA \
+ (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
+@@ -841,6 +847,49 @@ static unsigned int azx_via_get_position(struct azx *chip,
+ return bound_pos + mod_dma_pos;
+ }
+
++#define AMD_FIFO_SIZE 32
++
++/* get the current DMA position with FIFO size correction */
++static unsigned int azx_get_pos_fifo(struct azx *chip, struct azx_dev *azx_dev)
++{
++ struct snd_pcm_substream *substream = azx_dev->core.substream;
++ struct snd_pcm_runtime *runtime = substream->runtime;
++ unsigned int pos, delay;
++
++ pos = snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
++ if (!runtime)
++ return pos;
++
++ runtime->delay = AMD_FIFO_SIZE;
++ delay = frames_to_bytes(runtime, AMD_FIFO_SIZE);
++ if (azx_dev->insufficient) {
++ if (pos < delay) {
++ delay = pos;
++ runtime->delay = bytes_to_frames(runtime, pos);
++ } else {
++ azx_dev->insufficient = 0;
++ }
++ }
++
++ /* correct the DMA position for capture stream */
++ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
++ if (pos < delay)
++ pos += azx_dev->core.bufsize;
++ pos -= delay;
++ }
++
++ return pos;
++}
++
++static int azx_get_delay_from_fifo(struct azx *chip, struct azx_dev *azx_dev,
++ unsigned int pos)
++{
++ struct snd_pcm_substream *substream = azx_dev->core.substream;
++
++ /* just read back the calculated value in the above */
++ return substream->runtime->delay;
++}
++
+ static unsigned int azx_skl_get_dpib_pos(struct azx *chip,
+ struct azx_dev *azx_dev)
+ {
+@@ -1417,6 +1466,7 @@ static int check_position_fix(struct azx *chip, int fix)
+ case POS_FIX_VIACOMBO:
+ case POS_FIX_COMBO:
+ case POS_FIX_SKL:
++ case POS_FIX_FIFO:
+ return fix;
+ }
+
+@@ -1433,6 +1483,10 @@ static int check_position_fix(struct azx *chip, int fix)
+ dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n");
+ return POS_FIX_VIACOMBO;
+ }
++ if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) {
++ dev_dbg(chip->card->dev, "Using FIFO position fix\n");
++ return POS_FIX_FIFO;
++ }
+ if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
+ dev_dbg(chip->card->dev, "Using LPIB position fix\n");
+ return POS_FIX_LPIB;
+@@ -1453,6 +1507,7 @@ static void assign_position_fix(struct azx *chip, int fix)
+ [POS_FIX_VIACOMBO] = azx_via_get_position,
+ [POS_FIX_COMBO] = azx_get_pos_lpib,
+ [POS_FIX_SKL] = azx_get_pos_skl,
++ [POS_FIX_FIFO] = azx_get_pos_fifo,
+ };
+
+ chip->get_position[0] = chip->get_position[1] = callbacks[fix];
+@@ -1467,6 +1522,9 @@ static void assign_position_fix(struct azx *chip, int fix)
+ azx_get_delay_from_lpib;
+ }
+
++ if (fix == POS_FIX_FIFO)
++ chip->get_delay[0] = chip->get_delay[1] =
++ azx_get_delay_from_fifo;
+ }
+
+ /*
+@@ -2444,6 +2502,9 @@ static const struct pci_device_id azx_ids[] = {
+ /* AMD Hudson */
+ { PCI_DEVICE(0x1022, 0x780d),
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
++ /* AMD, X370 & co */
++ { PCI_DEVICE(0x1022, 0x1457),
++ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
+ /* AMD Stoney */
+ { PCI_DEVICE(0x1022, 0x157a),
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+diff --git a/sound/sound_core.c b/sound/sound_core.c
+index b730d97c4de6..90d118cd9164 100644
+--- a/sound/sound_core.c
++++ b/sound/sound_core.c
+@@ -275,7 +275,8 @@ retry:
+ goto retry;
+ }
+ spin_unlock(&sound_loader_lock);
+- return -EBUSY;
++ r = -EBUSY;
++ goto fail;
+ }
+ }
+
+diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
+index 14fc1e1d5d13..c406497c5919 100644
+--- a/sound/usb/hiface/pcm.c
++++ b/sound/usb/hiface/pcm.c
+@@ -600,14 +600,13 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
+ ret = hiface_pcm_init_urb(&rt->out_urbs[i], chip, OUT_EP,
+ hiface_pcm_out_urb_handler);
+ if (ret < 0)
+- return ret;
++ goto error;
+ }
+
+ ret = snd_pcm_new(chip->card, "USB-SPDIF Audio", 0, 1, 0, &pcm);
+ if (ret < 0) {
+- kfree(rt);
+ dev_err(&chip->dev->dev, "Cannot create pcm instance\n");
+- return ret;
++ goto error;
+ }
+
+ pcm->private_data = rt;
+@@ -620,4 +619,10 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
+
+ chip->pcm = rt;
+ return 0;
++
++error:
++ for (i = 0; i < PCM_N_URBS; i++)
++ kfree(rt->out_urbs[i].buffer);
++ kfree(rt);
++ return ret;
+ }
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index 7ee9d17d0143..e852c7fd6109 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -1043,6 +1043,7 @@ found_clock:
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd) {
++ kfree(fp->chmap);
+ kfree(fp->rate_table);
+ kfree(fp);
+ return NULL;
+diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
+index a19690a17291..c8c86a0c9b79 100644
+--- a/tools/perf/arch/s390/util/machine.c
++++ b/tools/perf/arch/s390/util/machine.c
+@@ -6,8 +6,9 @@
+ #include "machine.h"
+ #include "api/fs/fs.h"
+ #include "debug.h"
++#include "symbol.h"
+
+-int arch__fix_module_text_start(u64 *start, const char *name)
++int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
+ {
+ u64 m_start = *start;
+ char path[PATH_MAX];
+@@ -17,7 +18,35 @@ int arch__fix_module_text_start(u64 *start, const char *name)
+ if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
+ pr_debug2("Using module %s start:%#lx\n", path, m_start);
+ *start = m_start;
++ } else {
++ /* Successful read of the modules segment text start address.
++ * Calculate difference between module start address
++ * in memory and module text segment start address.
++ * For example module load address is 0x3ff8011b000
++ * (from /proc/modules) and module text segment start
++ * address is 0x3ff8011b870 (from file above).
++ *
++ * Adjust the module size and subtract the GOT table
++ * size located at the beginning of the module.
++ */
++ *size -= (*start - m_start);
+ }
+
+ return 0;
+ }
++
++/* On s390 kernel text segment start is located at very low memory addresses,
++ * for example 0x10000. Modules are located at very high memory addresses,
++ * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment
++ * and beginning of first module's text segment is very big.
++ * Therefore do not fill this gap and do not assign it to the kernel dso map.
++ */
++void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
++{
++ if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
++ /* Last kernel symbol mapped to end of page */
++ p->end = roundup(p->end, page_size);
++ else
++ p->end = c->start;
++ pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
++}
+diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
+index 8bb124e55c6d..2c376f5b2120 100644
+--- a/tools/perf/builtin-probe.c
++++ b/tools/perf/builtin-probe.c
+@@ -698,6 +698,16 @@ __cmd_probe(int argc, const char **argv)
+
+ ret = perf_add_probe_events(params.events, params.nevents);
+ if (ret < 0) {
++
++ /*
++ * When perf_add_probe_events() fails it calls
++ * cleanup_perf_probe_events(pevs, npevs), i.e.
++ * cleanup_perf_probe_events(params.events, params.nevents), which
++ * will call clear_perf_probe_event(), so set nevents to zero
++ * to avoid cleanup_params() to call clear_perf_probe_event() again
++ * on the same pevs.
++ */
++ params.nevents = 0;
+ pr_err_with_code(" Error: Failed to add events.", ret);
+ return ret;
+ }
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index d089eb706d18..4380474c8c35 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -1057,7 +1057,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
+
+ printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
+ if (ip == end) {
+- printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, insn, fp,
++ printed += ip__fprintf_jump(ip, &br->entries[i], &x, buffer + off, len - off, ++insn, fp,
+ &total_cycles);
+ if (PRINT_FIELD(SRCCODE))
+ printed += print_srccode(thread, x.cpumode, ip);
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index e28002d90573..c6c550dbb947 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -607,7 +607,13 @@ try_again:
+ * group leaders.
+ */
+ read_counters(&(struct timespec) { .tv_nsec = t1-t0 });
+- perf_evlist__close(evsel_list);
++
++ /*
++ * We need to keep evsel_list alive, because it's processed
++ * later the evsel_list will be closed after.
++ */
++ if (!STAT_RECORD)
++ perf_evlist__close(evsel_list);
+
+ return WEXITSTATUS(status);
+ }
+@@ -1922,6 +1928,7 @@ int cmd_stat(int argc, const char **argv)
+ perf_session__write_header(perf_stat.session, evsel_list, fd, true);
+ }
+
++ perf_evlist__close(evsel_list);
+ perf_session__delete(perf_stat.session);
+ }
+
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
+index 2c46f9aa416c..b854541604df 100644
+--- a/tools/perf/util/evsel.c
++++ b/tools/perf/util/evsel.c
+@@ -1282,6 +1282,7 @@ static void perf_evsel__free_id(struct perf_evsel *evsel)
+ xyarray__delete(evsel->sample_id);
+ evsel->sample_id = NULL;
+ zfree(&evsel->id);
++ evsel->ids = 0;
+ }
+
+ static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
+@@ -2074,6 +2075,7 @@ void perf_evsel__close(struct perf_evsel *evsel)
+
+ perf_evsel__close_fd(evsel);
+ perf_evsel__free_fd(evsel);
++ perf_evsel__free_id(evsel);
+ }
+
+ int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index b82d4577d969..e84b70be3fc1 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -3666,7 +3666,7 @@ int perf_event__process_feature(struct perf_session *session,
+ return 0;
+
+ ff.buf = (void *)fe->data;
+- ff.size = event->header.size - sizeof(event->header);
++ ff.size = event->header.size - sizeof(*fe);
+ ff.ph = &session->header;
+
+ if (feat_ops[feat].process(&ff, NULL))
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index dc7aafe45a2b..081fe4bdebaa 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -1365,6 +1365,7 @@ static int machine__set_modules_path(struct machine *machine)
+ return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
+ }
+ int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
++ u64 *size __maybe_unused,
+ const char *name __maybe_unused)
+ {
+ return 0;
+@@ -1376,7 +1377,7 @@ static int machine__create_module(void *arg, const char *name, u64 start,
+ struct machine *machine = arg;
+ struct map *map;
+
+- if (arch__fix_module_text_start(&start, name) < 0)
++ if (arch__fix_module_text_start(&start, &size, name) < 0)
+ return -1;
+
+ map = machine__findnew_module_map(machine, start, name);
+diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
+index f70ab98a7bde..7aa38da26427 100644
+--- a/tools/perf/util/machine.h
++++ b/tools/perf/util/machine.h
+@@ -222,7 +222,7 @@ struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
+
+ struct map *machine__findnew_module_map(struct machine *machine, u64 start,
+ const char *filename);
+-int arch__fix_module_text_start(u64 *start, const char *name);
++int arch__fix_module_text_start(u64 *start, u64 *size, const char *name);
+
+ int machine__load_kallsyms(struct machine *machine, const char *filename);
+
+diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
+index 2e61dd6a3574..d78984096044 100644
+--- a/tools/perf/util/session.c
++++ b/tools/perf/util/session.c
+@@ -36,10 +36,16 @@ static int perf_session__process_compressed_event(struct perf_session *session,
+ void *src;
+ size_t decomp_size, src_size;
+ u64 decomp_last_rem = 0;
+- size_t decomp_len = session->header.env.comp_mmap_len;
++ size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
+ struct decomp *decomp, *decomp_last = session->decomp_last;
+
+- decomp = mmap(NULL, sizeof(struct decomp) + decomp_len, PROT_READ|PROT_WRITE,
++ if (decomp_last) {
++ decomp_last_rem = decomp_last->size - decomp_last->head;
++ decomp_len += decomp_last_rem;
++ }
++
++ mmap_len = sizeof(struct decomp) + decomp_len;
++ decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
+ if (decomp == MAP_FAILED) {
+ pr_err("Couldn't allocate memory for decompression\n");
+@@ -47,10 +53,10 @@ static int perf_session__process_compressed_event(struct perf_session *session,
+ }
+
+ decomp->file_pos = file_offset;
++ decomp->mmap_len = mmap_len;
+ decomp->head = 0;
+
+- if (decomp_last) {
+- decomp_last_rem = decomp_last->size - decomp_last->head;
++ if (decomp_last_rem) {
+ memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
+ decomp->size = decomp_last_rem;
+ }
+@@ -61,7 +67,7 @@ static int perf_session__process_compressed_event(struct perf_session *session,
+ decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
+ &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
+ if (!decomp_size) {
+- munmap(decomp, sizeof(struct decomp) + decomp_len);
++ munmap(decomp, mmap_len);
+ pr_err("Couldn't decompress data\n");
+ return -1;
+ }
+@@ -255,15 +261,15 @@ static void perf_session__delete_threads(struct perf_session *session)
+ static void perf_session__release_decomp_events(struct perf_session *session)
+ {
+ struct decomp *next, *decomp;
+- size_t decomp_len;
++ size_t mmap_len;
+ next = session->decomp;
+- decomp_len = session->header.env.comp_mmap_len;
+ do {
+ decomp = next;
+ if (decomp == NULL)
+ break;
+ next = decomp->next;
+- munmap(decomp, decomp_len + sizeof(struct decomp));
++ mmap_len = decomp->mmap_len;
++ munmap(decomp, mmap_len);
+ } while (1);
+ }
+
+diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
+index dd8920b745bc..863dbad87849 100644
+--- a/tools/perf/util/session.h
++++ b/tools/perf/util/session.h
+@@ -46,6 +46,7 @@ struct perf_session {
+ struct decomp {
+ struct decomp *next;
+ u64 file_pos;
++ size_t mmap_len;
+ u64 head;
+ size_t size;
+ char data[];
+diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
+index 5cbad55cd99d..3b49eb4e3ed9 100644
+--- a/tools/perf/util/symbol.c
++++ b/tools/perf/util/symbol.c
+@@ -91,6 +91,11 @@ static int prefix_underscores_count(const char *str)
+ return tail - str;
+ }
+
++void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
++{
++ p->end = c->start;
++}
++
+ const char * __weak arch__normalize_symbol_name(const char *name)
+ {
+ return name;
+@@ -217,7 +222,7 @@ void symbols__fixup_end(struct rb_root_cached *symbols)
+ curr = rb_entry(nd, struct symbol, rb_node);
+
+ if (prev->end == prev->start && prev->end != curr->start)
+- prev->end = curr->start;
++ arch__symbols__fixup_end(prev, curr);
+ }
+
+ /* Last entry */
+diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
+index 9a8fe012910a..f30ab608ea54 100644
+--- a/tools/perf/util/symbol.h
++++ b/tools/perf/util/symbol.h
+@@ -277,6 +277,7 @@ const char *arch__normalize_symbol_name(const char *name);
+ #define SYMBOL_A 0
+ #define SYMBOL_B 1
+
++void arch__symbols__fixup_end(struct symbol *p, struct symbol *c);
+ int arch__compare_symbol_names(const char *namea, const char *nameb);
+ int arch__compare_symbol_names_n(const char *namea, const char *nameb,
+ unsigned int n);
+diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
+index b413ba5b9835..4a9f88d9b7ab 100644
+--- a/tools/perf/util/thread.c
++++ b/tools/perf/util/thread.c
+@@ -197,14 +197,24 @@ struct comm *thread__comm(const struct thread *thread)
+
+ struct comm *thread__exec_comm(const struct thread *thread)
+ {
+- struct comm *comm, *last = NULL;
++ struct comm *comm, *last = NULL, *second_last = NULL;
+
+ list_for_each_entry(comm, &thread->comm_list, list) {
+ if (comm->exec)
+ return comm;
++ second_last = last;
+ last = comm;
+ }
+
++ /*
++ * 'last' with no start time might be the parent's comm of a synthesized
++ * thread (created by processing a synthesized fork event). For a main
++ * thread, that is very probably wrong. Prefer a later comm to avoid
++ * that case.
++ */
++ if (second_last && !last->start && thread->pid_ == thread->tid)
++ return second_last;
++
+ return last;
+ }
+
+diff --git a/tools/perf/util/zstd.c b/tools/perf/util/zstd.c
+index 23bdb9884576..d2202392ffdb 100644
+--- a/tools/perf/util/zstd.c
++++ b/tools/perf/util/zstd.c
+@@ -99,8 +99,8 @@ size_t zstd_decompress_stream(struct zstd_data *data, void *src, size_t src_size
+ while (input.pos < input.size) {
+ ret = ZSTD_decompressStream(data->dstream, &output, &input);
+ if (ZSTD_isError(ret)) {
+- pr_err("failed to decompress (B): %ld -> %ld : %s\n",
+- src_size, output.size, ZSTD_getErrorName(ret));
++ pr_err("failed to decompress (B): %ld -> %ld, dst_size %ld : %s\n",
++ src_size, output.size, dst_size, ZSTD_getErrorName(ret));
+ break;
+ }
+ output.dst = dst + output.pos;
+diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
+index bd5c55916d0d..9b778c51af1b 100644
+--- a/virt/kvm/arm/arm.c
++++ b/virt/kvm/arm/arm.c
+@@ -323,6 +323,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+
+ void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
+ {
++ /*
++ * If we're about to block (most likely because we've just hit a
++ * WFI), we need to sync back the state of the GIC CPU interface
++ * so that we have the lastest PMR and group enables. This ensures
++ * that kvm_arch_vcpu_runnable has up-to-date data to decide
++ * whether we have pending interrupts.
++ */
++ preempt_disable();
++ kvm_vgic_vmcr_sync(vcpu);
++ preempt_enable();
++
+ kvm_vgic_v4_enable_doorbell(vcpu);
+ }
+
+diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
+index 6dd5ad706c92..96aab77d0471 100644
+--- a/virt/kvm/arm/vgic/vgic-v2.c
++++ b/virt/kvm/arm/vgic/vgic-v2.c
+@@ -484,10 +484,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
+ kvm_vgic_global_state.vctrl_base + GICH_APR);
+ }
+
+-void vgic_v2_put(struct kvm_vcpu *vcpu)
++void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+
+ cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
++}
++
++void vgic_v2_put(struct kvm_vcpu *vcpu)
++{
++ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
++
++ vgic_v2_vmcr_sync(vcpu);
+ cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
+ }
+diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
+index c2c9ce009f63..0c653a1e5215 100644
+--- a/virt/kvm/arm/vgic/vgic-v3.c
++++ b/virt/kvm/arm/vgic/vgic-v3.c
+@@ -662,12 +662,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
+ __vgic_v3_activate_traps(vcpu);
+ }
+
+-void vgic_v3_put(struct kvm_vcpu *vcpu)
++void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+
+ if (likely(cpu_if->vgic_sre))
+ cpu_if->vgic_vmcr = kvm_call_hyp_ret(__vgic_v3_read_vmcr);
++}
++
++void vgic_v3_put(struct kvm_vcpu *vcpu)
++{
++ vgic_v3_vmcr_sync(vcpu);
+
+ kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
+
+diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
+index 04786c8ec77e..13d4b38a94ec 100644
+--- a/virt/kvm/arm/vgic/vgic.c
++++ b/virt/kvm/arm/vgic/vgic.c
+@@ -919,6 +919,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
+ vgic_v3_put(vcpu);
+ }
+
++void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
++{
++ if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
++ return;
++
++ if (kvm_vgic_global_state.type == VGIC_V2)
++ vgic_v2_vmcr_sync(vcpu);
++ else
++ vgic_v3_vmcr_sync(vcpu);
++}
++
+ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
+index 57205beaa981..11adbdac1d56 100644
+--- a/virt/kvm/arm/vgic/vgic.h
++++ b/virt/kvm/arm/vgic/vgic.h
+@@ -193,6 +193,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
+ void vgic_v2_init_lrs(void);
+ void vgic_v2_load(struct kvm_vcpu *vcpu);
+ void vgic_v2_put(struct kvm_vcpu *vcpu);
++void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
+
+ void vgic_v2_save_state(struct kvm_vcpu *vcpu);
+ void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
+@@ -223,6 +224,7 @@ bool vgic_v3_check_base(struct kvm *kvm);
+
+ void vgic_v3_load(struct kvm_vcpu *vcpu);
+ void vgic_v3_put(struct kvm_vcpu *vcpu);
++void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
+
+ bool vgic_has_its(struct kvm *kvm);
+ int kvm_vgic_register_its_device(void);
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index e629766f0ec8..7e0d18ac62bf 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -2475,6 +2475,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
+ #endif
+ }
+
++/*
++ * Unlike kvm_arch_vcpu_runnable, this function is called outside
++ * a vcpu_load/vcpu_put pair. However, for most architectures
++ * kvm_arch_vcpu_runnable does not require vcpu_load.
++ */
++bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
++{
++ return kvm_arch_vcpu_runnable(vcpu);
++}
++
++static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
++{
++ if (kvm_arch_dy_runnable(vcpu))
++ return true;
++
++#ifdef CONFIG_KVM_ASYNC_PF
++ if (!list_empty_careful(&vcpu->async_pf.done))
++ return true;
++#endif
++
++ return false;
++}
++
+ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
+ {
+ struct kvm *kvm = me->kvm;
+@@ -2504,7 +2527,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
+ continue;
+ if (vcpu == me)
+ continue;
+- if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
++ if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
+ continue;
+ if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
+ continue;