summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--0000_README4
-rw-r--r--1011_linux-6.4.12.patch8395
2 files changed, 8399 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 9ce881e3..5da232d8 100644
--- a/0000_README
+++ b/0000_README
@@ -87,6 +87,10 @@ Patch: 1010_linux-6.4.11.patch
From: https://www.kernel.org
Desc: Linux 6.4.11
+Patch: 1011_linux-6.4.12.patch
+From: https://www.kernel.org
+Desc: Linux 6.4.12
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1011_linux-6.4.12.patch b/1011_linux-6.4.12.patch
new file mode 100644
index 00000000..e7ae9487
--- /dev/null
+++ b/1011_linux-6.4.12.patch
@@ -0,0 +1,8395 @@
+diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst
+index 2f923c805802f..f79cb11b080f6 100644
+--- a/Documentation/admin-guide/hw-vuln/srso.rst
++++ b/Documentation/admin-guide/hw-vuln/srso.rst
+@@ -124,8 +124,8 @@ sequence.
+ To ensure the safety of this mitigation, the kernel must ensure that the
+ safe return sequence is itself free from attacker interference. In Zen3
+ and Zen4, this is accomplished by creating a BTB alias between the
+-untraining function srso_untrain_ret_alias() and the safe return
+-function srso_safe_ret_alias() which results in evicting a potentially
++untraining function srso_alias_untrain_ret() and the safe return
++function srso_alias_safe_ret() which results in evicting a potentially
+ poisoned BTB entry and using that safe one for all function returns.
+
+ In older Zen1 and Zen2, this is accomplished using a reinterpretation
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index a8fc0eb6fb1d6..7323911931828 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -323,6 +323,7 @@
+ option with care.
+ pgtbl_v1 - Use v1 page table for DMA-API (Default).
+ pgtbl_v2 - Use v2 page table for DMA-API.
++ irtcachedis - Disable Interrupt Remapping Table (IRT) caching.
+
+ amd_iommu_dump= [HW,X86-64]
+ Enable AMD IOMMU driver option to dump the ACPI table
+diff --git a/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml b/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml
+index ce18d7dadae23..1edad1da1196d 100644
+--- a/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml
++++ b/Documentation/devicetree/bindings/input/goodix,gt7375p.yaml
+@@ -43,6 +43,15 @@ properties:
+ itself as long as it allows the main board to make signals compatible
+ with what the touchscreen is expecting for its IO rails.
+
++ goodix,no-reset-during-suspend:
++ description:
++ Set this to true to enforce the driver to not assert the reset GPIO
++ during suspend.
++ Due to potential touchscreen hardware flaw, back-powering could happen in
++ suspend if the power supply is on and with active-low reset GPIO asserted.
++ This property is used to avoid the back-powering issue.
++ type: boolean
++
+ required:
+ - compatible
+ - reg
+diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml
+index e608a4f1bcaec..e119a226a4b18 100644
+--- a/Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml
++++ b/Documentation/devicetree/bindings/pinctrl/qcom,sa8775p-tlmm.yaml
+@@ -87,7 +87,7 @@ $defs:
+ emac0_mdc, emac0_mdio, emac0_ptp_aux, emac0_ptp_pps, emac1_mcg0,
+ emac1_mcg1, emac1_mcg2, emac1_mcg3, emac1_mdc, emac1_mdio,
+ emac1_ptp_aux, emac1_ptp_pps, gcc_gp1, gcc_gp2, gcc_gp3,
+- gcc_gp4, gcc_gp5, hs0_mi2s, hs1_mi2s, hs2_mi2s, ibi_i3c,
++ gcc_gp4, gcc_gp5, gpio, hs0_mi2s, hs1_mi2s, hs2_mi2s, ibi_i3c,
+ jitter_bist, mdp0_vsync0, mdp0_vsync1, mdp0_vsync2, mdp0_vsync3,
+ mdp0_vsync4, mdp0_vsync5, mdp0_vsync6, mdp0_vsync7, mdp0_vsync8,
+ mdp1_vsync0, mdp1_vsync1, mdp1_vsync2, mdp1_vsync3, mdp1_vsync4,
+diff --git a/Documentation/networking/nf_conntrack-sysctl.rst b/Documentation/networking/nf_conntrack-sysctl.rst
+index 8b1045c3b59e0..c383a394c6656 100644
+--- a/Documentation/networking/nf_conntrack-sysctl.rst
++++ b/Documentation/networking/nf_conntrack-sysctl.rst
+@@ -178,10 +178,10 @@ nf_conntrack_sctp_timeout_established - INTEGER (seconds)
+ Default is set to (hb_interval * path_max_retrans + rto_max)
+
+ nf_conntrack_sctp_timeout_shutdown_sent - INTEGER (seconds)
+- default 0.3
++ default 3
+
+ nf_conntrack_sctp_timeout_shutdown_recd - INTEGER (seconds)
+- default 0.3
++ default 3
+
+ nf_conntrack_sctp_timeout_shutdown_ack_sent - INTEGER (seconds)
+ default 3
+diff --git a/Makefile b/Makefile
+index d0efd84bb7d0f..0ff13b943f994 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 4
+-SUBLEVEL = 11
++SUBLEVEL = 12
+ EXTRAVERSION =
+ NAME = Hurr durr I'ma ninja sloth
+
+diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
+index d19508c8f9ed6..a3668a0827fc8 100644
+--- a/arch/arm/boot/dts/imx23.dtsi
++++ b/arch/arm/boot/dts/imx23.dtsi
+@@ -59,7 +59,7 @@
+ reg = <0x80000000 0x2000>;
+ };
+
+- dma_apbh: dma-apbh@80004000 {
++ dma_apbh: dma-controller@80004000 {
+ compatible = "fsl,imx23-dma-apbh";
+ reg = <0x80004000 0x2000>;
+ interrupts = <0 14 20 0
+diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
+index a8d3c3113e0f6..29e37b1fae66f 100644
+--- a/arch/arm/boot/dts/imx28.dtsi
++++ b/arch/arm/boot/dts/imx28.dtsi
+@@ -78,7 +78,7 @@
+ status = "disabled";
+ };
+
+- dma_apbh: dma-apbh@80004000 {
++ dma_apbh: dma-controller@80004000 {
+ compatible = "fsl,imx28-dma-apbh";
+ reg = <0x80004000 0x2000>;
+ interrupts = <82 83 84 85
+diff --git a/arch/arm/boot/dts/imx6dl-prtrvt.dts b/arch/arm/boot/dts/imx6dl-prtrvt.dts
+index 56bb1ca56a2df..36b031236e475 100644
+--- a/arch/arm/boot/dts/imx6dl-prtrvt.dts
++++ b/arch/arm/boot/dts/imx6dl-prtrvt.dts
+@@ -124,6 +124,10 @@
+ status = "disabled";
+ };
+
++&usbotg {
++ disable-over-current;
++};
++
+ &vpu {
+ status = "disabled";
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi
+index 1a599c294ab86..1ca4d219609f6 100644
+--- a/arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi
+@@ -182,7 +182,7 @@
+ pinctrl-0 = <&pinctrl_rtc_int>;
+ reg = <0x68>;
+ interrupt-parent = <&gpio7>;
+- interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+ status = "disabled";
+ };
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-prti6q.dtsi b/arch/arm/boot/dts/imx6qdl-prti6q.dtsi
+index f0db0d4471f40..36f84f4da6b0d 100644
+--- a/arch/arm/boot/dts/imx6qdl-prti6q.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-prti6q.dtsi
+@@ -69,6 +69,7 @@
+ vbus-supply = <&reg_usb_h1_vbus>;
+ phy_type = "utmi";
+ dr_mode = "host";
++ disable-over-current;
+ status = "okay";
+ };
+
+@@ -78,10 +79,18 @@
+ pinctrl-0 = <&pinctrl_usbotg>;
+ phy_type = "utmi";
+ dr_mode = "host";
+- disable-over-current;
++ over-current-active-low;
+ status = "okay";
+ };
+
++&usbphynop1 {
++ status = "disabled";
++};
++
++&usbphynop2 {
++ status = "disabled";
++};
++
+ &usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
+index b72ec745f6d12..bda182edc5891 100644
+--- a/arch/arm/boot/dts/imx6qdl.dtsi
++++ b/arch/arm/boot/dts/imx6qdl.dtsi
+@@ -150,7 +150,7 @@
+ interrupt-parent = <&gpc>;
+ ranges;
+
+- dma_apbh: dma-apbh@110000 {
++ dma_apbh: dma-controller@110000 {
+ compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
+ reg = <0x00110000 0x2000>;
+ interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
+index 93ac2380ca1ec..fc0654e3fe950 100644
+--- a/arch/arm/boot/dts/imx6sx.dtsi
++++ b/arch/arm/boot/dts/imx6sx.dtsi
+@@ -209,7 +209,7 @@
+ power-domains = <&pd_pu>;
+ };
+
+- dma_apbh: dma-apbh@1804000 {
++ dma_apbh: dma-controller@1804000 {
+ compatible = "fsl,imx6sx-dma-apbh", "fsl,imx28-dma-apbh";
+ reg = <0x01804000 0x2000>;
+ interrupts = <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
+@@ -980,6 +980,8 @@
+ <&clks IMX6SX_CLK_USDHC1>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
++ fsl,tuning-start-tap = <20>;
++ fsl,tuning-step= <2>;
+ status = "disabled";
+ };
+
+@@ -992,6 +994,8 @@
+ <&clks IMX6SX_CLK_USDHC2>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
++ fsl,tuning-start-tap = <20>;
++ fsl,tuning-step= <2>;
+ status = "disabled";
+ };
+
+@@ -1004,6 +1008,8 @@
+ <&clks IMX6SX_CLK_USDHC3>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
++ fsl,tuning-start-tap = <20>;
++ fsl,tuning-step= <2>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/imx6ul.dtsi b/arch/arm/boot/dts/imx6ul.dtsi
+index 3d9d0f8235685..118764c50d921 100644
+--- a/arch/arm/boot/dts/imx6ul.dtsi
++++ b/arch/arm/boot/dts/imx6ul.dtsi
+@@ -164,7 +164,7 @@
+ <0x00a06000 0x2000>;
+ };
+
+- dma_apbh: dma-apbh@1804000 {
++ dma_apbh: dma-controller@1804000 {
+ compatible = "fsl,imx6q-dma-apbh", "fsl,imx28-dma-apbh";
+ reg = <0x01804000 0x2000>;
+ interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm/boot/dts/imx7s.dtsi b/arch/arm/boot/dts/imx7s.dtsi
+index efe2525b62fa1..6ffb428dc939c 100644
+--- a/arch/arm/boot/dts/imx7s.dtsi
++++ b/arch/arm/boot/dts/imx7s.dtsi
+@@ -1184,6 +1184,8 @@
+ <&clks IMX7D_USDHC1_ROOT_CLK>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
++ fsl,tuning-step = <2>;
++ fsl,tuning-start-tap = <20>;
+ status = "disabled";
+ };
+
+@@ -1196,6 +1198,8 @@
+ <&clks IMX7D_USDHC2_ROOT_CLK>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
++ fsl,tuning-step = <2>;
++ fsl,tuning-start-tap = <20>;
+ status = "disabled";
+ };
+
+@@ -1208,6 +1212,8 @@
+ <&clks IMX7D_USDHC3_ROOT_CLK>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
++ fsl,tuning-step = <2>;
++ fsl,tuning-start-tap = <20>;
+ status = "disabled";
+ };
+
+@@ -1257,7 +1263,7 @@
+ };
+ };
+
+- dma_apbh: dma-apbh@33000000 {
++ dma_apbh: dma-controller@33000000 {
+ compatible = "fsl,imx7d-dma-apbh", "fsl,imx28-dma-apbh";
+ reg = <0x33000000 0x2000>;
+ interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+index d6b36f04f3dc1..1a647d4072ba0 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+@@ -1221,10 +1221,9 @@
+ compatible = "fsl,imx8mm-mipi-csi2";
+ reg = <0x32e30000 0x1000>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+- assigned-clocks = <&clk IMX8MM_CLK_CSI1_CORE>,
+- <&clk IMX8MM_CLK_CSI1_PHY_REF>;
+- assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_1000M>,
+- <&clk IMX8MM_SYS_PLL2_1000M>;
++ assigned-clocks = <&clk IMX8MM_CLK_CSI1_CORE>;
++ assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_1000M>;
++
+ clock-frequency = <333000000>;
+ clocks = <&clk IMX8MM_CLK_DISP_APB_ROOT>,
+ <&clk IMX8MM_CLK_CSI1_ROOT>,
+diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi
+index e8d49660ac85b..c0f49fedaf9ea 100644
+--- a/arch/arm64/boot/dts/freescale/imx93.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx93.dtsi
+@@ -306,7 +306,7 @@
+
+ anatop: anatop@44480000 {
+ compatible = "fsl,imx93-anatop", "syscon";
+- reg = <0x44480000 0x10000>;
++ reg = <0x44480000 0x2000>;
+ };
+
+ adc1: adc@44530000 {
+diff --git a/arch/arm64/boot/dts/qcom/ipq5332.dtsi b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
+index af4d97143bcf5..c2d6cc65a323a 100644
+--- a/arch/arm64/boot/dts/qcom/ipq5332.dtsi
++++ b/arch/arm64/boot/dts/qcom/ipq5332.dtsi
+@@ -135,6 +135,13 @@
+ #size-cells = <1>;
+ ranges = <0 0 0 0xffffffff>;
+
++ qfprom: efuse@a4000 {
++ compatible = "qcom,ipq5332-qfprom", "qcom,qfprom";
++ reg = <0x000a4000 0x721>;
++ #address-cells = <1>;
++ #size-cells = <1>;
++ };
++
+ rng: rng@e3000 {
+ compatible = "qcom,prng-ee";
+ reg = <0x000e3000 0x1000>;
+diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+index dd924331b0eea..ec066a89436a8 100644
+--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
++++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+@@ -121,7 +121,7 @@
+ };
+ };
+
+- pm8150l-thermal {
++ pm8150l-pcb-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150l_adc_tm 1>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
+index 028eb508ae302..8bfd5f88d1ef6 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
+@@ -548,9 +548,8 @@
+ &sdhci {
+ max-frequency = <150000000>;
+ bus-width = <8>;
+- mmc-hs400-1_8v;
++ mmc-hs200-1_8v;
+ non-removable;
+- mmc-hs400-enhanced-strobe;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+index 907071d4fe804..980c4534313a2 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+@@ -45,7 +45,7 @@
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rk808 1>;
+- clock-names = "ext_clock";
++ clock-names = "lpo";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_enable_h>;
+ reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+@@ -645,9 +645,9 @@
+ };
+
+ &sdhci {
++ max-frequency = <150000000>;
+ bus-width = <8>;
+- mmc-hs400-1_8v;
+- mmc-hs400-enhanced-strobe;
++ mmc-hs200-1_8v;
+ non-removable;
+ status = "okay";
+ };
+diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
+index 67f2fb781f59e..8df46f186c64b 100644
+--- a/arch/arm64/include/asm/fpsimd.h
++++ b/arch/arm64/include/asm/fpsimd.h
+@@ -356,7 +356,7 @@ static inline int sme_max_virtualisable_vl(void)
+ return vec_max_virtualisable_vl(ARM64_VEC_SME);
+ }
+
+-extern void sme_alloc(struct task_struct *task);
++extern void sme_alloc(struct task_struct *task, bool flush);
+ extern unsigned int sme_get_vl(void);
+ extern int sme_set_current_vl(unsigned long arg);
+ extern int sme_get_current_vl(void);
+@@ -388,7 +388,7 @@ static inline void sme_smstart_sm(void) { }
+ static inline void sme_smstop_sm(void) { }
+ static inline void sme_smstop(void) { }
+
+-static inline void sme_alloc(struct task_struct *task) { }
++static inline void sme_alloc(struct task_struct *task, bool flush) { }
+ static inline void sme_setup(void) { }
+ static inline unsigned int sme_get_vl(void) { return 0; }
+ static inline int sme_max_vl(void) { return 0; }
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 75c37b1c55aaf..087c05aa960ea 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -1285,9 +1285,9 @@ void fpsimd_release_task(struct task_struct *dead_task)
+ * the interest of testability and predictability, the architecture
+ * guarantees that when ZA is enabled it will be zeroed.
+ */
+-void sme_alloc(struct task_struct *task)
++void sme_alloc(struct task_struct *task, bool flush)
+ {
+- if (task->thread.sme_state) {
++ if (task->thread.sme_state && flush) {
+ memset(task->thread.sme_state, 0, sme_state_size(task));
+ return;
+ }
+@@ -1515,7 +1515,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
+ }
+
+ sve_alloc(current, false);
+- sme_alloc(current);
++ sme_alloc(current, true);
+ if (!current->thread.sve_state || !current->thread.sme_state) {
+ force_sig(SIGKILL);
+ return;
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index 5b9b4305248b8..187aa2b175b4f 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -881,6 +881,13 @@ static int sve_set_common(struct task_struct *target,
+ break;
+ case ARM64_VEC_SME:
+ target->thread.svcr |= SVCR_SM_MASK;
++
++ /*
++ * Disable traps and ensure there is SME storage but
++ * preserve any currently set values in ZA/ZT.
++ */
++ sme_alloc(target, false);
++ set_tsk_thread_flag(target, TIF_SME);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+@@ -1100,7 +1107,7 @@ static int za_set(struct task_struct *target,
+ }
+
+ /* Allocate/reinit ZA storage */
+- sme_alloc(target);
++ sme_alloc(target, true);
+ if (!target->thread.sme_state) {
+ ret = -ENOMEM;
+ goto out;
+@@ -1170,8 +1177,13 @@ static int zt_set(struct task_struct *target,
+ if (!system_supports_sme2())
+ return -EINVAL;
+
++ /* Ensure SVE storage in case this is first use of SME */
++ sve_alloc(target, false);
++ if (!target->thread.sve_state)
++ return -ENOMEM;
++
+ if (!thread_za_enabled(&target->thread)) {
+- sme_alloc(target);
++ sme_alloc(target, true);
+ if (!target->thread.sme_state)
+ return -ENOMEM;
+ }
+@@ -1179,8 +1191,10 @@ static int zt_set(struct task_struct *target,
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ thread_zt_state(&target->thread),
+ 0, ZT_SIG_REG_BYTES);
+- if (ret == 0)
++ if (ret == 0) {
+ target->thread.svcr |= SVCR_ZA_MASK;
++ set_tsk_thread_flag(target, TIF_SME);
++ }
+
+ fpsimd_flush_task_state(target);
+
+diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
+index 10b407672c427..bcd1ebb21da66 100644
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -474,7 +474,7 @@ static int restore_za_context(struct user_ctxs *user)
+ fpsimd_flush_task_state(current);
+ /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
+
+- sme_alloc(current);
++ sme_alloc(current, true);
+ if (!current->thread.sme_state) {
+ current->thread.svcr &= ~SVCR_ZA_MASK;
+ clear_thread_flag(TIF_SME);
+diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
+index 0e5ebfe8d9d29..ae03b8679696e 100644
+--- a/arch/parisc/kernel/entry.S
++++ b/arch/parisc/kernel/entry.S
+@@ -25,6 +25,7 @@
+ #include <asm/traps.h>
+ #include <asm/thread_info.h>
+ #include <asm/alternative.h>
++#include <asm/spinlock_types.h>
+
+ #include <linux/linkage.h>
+ #include <linux/pgtable.h>
+@@ -406,7 +407,7 @@
+ LDREG 0(\ptp),\pte
+ bb,<,n \pte,_PAGE_PRESENT_BIT,3f
+ b \fault
+- stw \spc,0(\tmp)
++ stw \tmp1,0(\tmp)
+ 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+ #endif
+ 2: LDREG 0(\ptp),\pte
+@@ -415,24 +416,22 @@
+ .endm
+
+ /* Release page_table_lock without reloading lock address.
+- Note that the values in the register spc are limited to
+- NR_SPACE_IDS (262144). Thus, the stw instruction always
+- stores a nonzero value even when register spc is 64 bits.
+ We use an ordered store to ensure all prior accesses are
+ performed prior to releasing the lock. */
+- .macro ptl_unlock0 spc,tmp
++ .macro ptl_unlock0 spc,tmp,tmp2
+ #ifdef CONFIG_TLB_PTLOCK
+-98: or,COND(=) %r0,\spc,%r0
+- stw,ma \spc,0(\tmp)
++98: ldi __ARCH_SPIN_LOCK_UNLOCKED_VAL, \tmp2
++ or,COND(=) %r0,\spc,%r0
++ stw,ma \tmp2,0(\tmp)
+ 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+ #endif
+ .endm
+
+ /* Release page_table_lock. */
+- .macro ptl_unlock1 spc,tmp
++ .macro ptl_unlock1 spc,tmp,tmp2
+ #ifdef CONFIG_TLB_PTLOCK
+ 98: get_ptl \tmp
+- ptl_unlock0 \spc,\tmp
++ ptl_unlock0 \spc,\tmp,\tmp2
+ 99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
+ #endif
+ .endm
+@@ -1125,7 +1124,7 @@ dtlb_miss_20w:
+
+ idtlbt pte,prot
+
+- ptl_unlock1 spc,t0
++ ptl_unlock1 spc,t0,t1
+ rfir
+ nop
+
+@@ -1151,7 +1150,7 @@ nadtlb_miss_20w:
+
+ idtlbt pte,prot
+
+- ptl_unlock1 spc,t0
++ ptl_unlock1 spc,t0,t1
+ rfir
+ nop
+
+@@ -1185,7 +1184,7 @@ dtlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock1 spc,t0
++ ptl_unlock1 spc,t0,t1
+ rfir
+ nop
+
+@@ -1218,7 +1217,7 @@ nadtlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock1 spc,t0
++ ptl_unlock1 spc,t0,t1
+ rfir
+ nop
+
+@@ -1247,7 +1246,7 @@ dtlb_miss_20:
+
+ idtlbt pte,prot
+
+- ptl_unlock1 spc,t0
++ ptl_unlock1 spc,t0,t1
+ rfir
+ nop
+
+@@ -1275,7 +1274,7 @@ nadtlb_miss_20:
+
+ idtlbt pte,prot
+
+- ptl_unlock1 spc,t0
++ ptl_unlock1 spc,t0,t1
+ rfir
+ nop
+
+@@ -1320,7 +1319,7 @@ itlb_miss_20w:
+
+ iitlbt pte,prot
+
+- ptl_unlock1 spc,t0
++ ptl_unlock1 spc,t0,t1
+ rfir
+ nop
+
+@@ -1344,7 +1343,7 @@ naitlb_miss_20w:
+
+ iitlbt pte,prot
+
+- ptl_unlock1 spc,t0
++ ptl_unlock1 spc,t0,t1
+ rfir
+ nop
+
+@@ -1378,7 +1377,7 @@ itlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock1 spc,t0
++ ptl_unlock1 spc,t0,t1
+ rfir
+ nop
+
+@@ -1402,7 +1401,7 @@ naitlb_miss_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock1 spc,t0
++ ptl_unlock1 spc,t0,t1
+ rfir
+ nop
+
+@@ -1432,7 +1431,7 @@ itlb_miss_20:
+
+ iitlbt pte,prot
+
+- ptl_unlock1 spc,t0
++ ptl_unlock1 spc,t0,t1
+ rfir
+ nop
+
+@@ -1452,7 +1451,7 @@ naitlb_miss_20:
+
+ iitlbt pte,prot
+
+- ptl_unlock1 spc,t0
++ ptl_unlock1 spc,t0,t1
+ rfir
+ nop
+
+@@ -1482,7 +1481,7 @@ dbit_trap_20w:
+
+ idtlbt pte,prot
+
+- ptl_unlock0 spc,t0
++ ptl_unlock0 spc,t0,t1
+ rfir
+ nop
+ #else
+@@ -1508,7 +1507,7 @@ dbit_trap_11:
+
+ mtsp t1, %sr1 /* Restore sr1 */
+
+- ptl_unlock0 spc,t0
++ ptl_unlock0 spc,t0,t1
+ rfir
+ nop
+
+@@ -1528,7 +1527,7 @@ dbit_trap_20:
+
+ idtlbt pte,prot
+
+- ptl_unlock0 spc,t0
++ ptl_unlock0 spc,t0,t1
+ rfir
+ nop
+ #endif
+diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
+index 4caf5e3079eb4..359577ec16801 100644
+--- a/arch/powerpc/kernel/rtas_flash.c
++++ b/arch/powerpc/kernel/rtas_flash.c
+@@ -709,9 +709,9 @@ static int __init rtas_flash_init(void)
+ if (!rtas_validate_flash_data.buf)
+ return -ENOMEM;
+
+- flash_block_cache = kmem_cache_create("rtas_flash_cache",
+- RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,
+- NULL);
++ flash_block_cache = kmem_cache_create_usercopy("rtas_flash_cache",
++ RTAS_BLK_SIZE, RTAS_BLK_SIZE,
++ 0, 0, RTAS_BLK_SIZE, NULL);
+ if (!flash_block_cache) {
+ printk(KERN_ERR "%s: failed to create block cache\n",
+ __func__);
+diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
+index 699eeffd9f551..f9522fd70b2f3 100644
+--- a/arch/powerpc/mm/kasan/Makefile
++++ b/arch/powerpc/mm/kasan/Makefile
+@@ -1,6 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+
+ KASAN_SANITIZE := n
++KCOV_INSTRUMENT := n
+
+ obj-$(CONFIG_PPC32) += init_32.o
+ obj-$(CONFIG_PPC_8xx) += 8xx.o
+diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h
+index 8d5c84f2d5ef7..603095c913e37 100644
+--- a/arch/riscv/include/asm/insn.h
++++ b/arch/riscv/include/asm/insn.h
+@@ -110,6 +110,7 @@
+ #define RVC_INSN_FUNCT4_OPOFF 12
+ #define RVC_INSN_FUNCT3_MASK GENMASK(15, 13)
+ #define RVC_INSN_FUNCT3_OPOFF 13
++#define RVC_INSN_J_RS1_MASK GENMASK(11, 7)
+ #define RVC_INSN_J_RS2_MASK GENMASK(6, 2)
+ #define RVC_INSN_OPCODE_MASK GENMASK(1, 0)
+ #define RVC_ENCODE_FUNCT3(f_) (RVC_FUNCT3_##f_ << RVC_INSN_FUNCT3_OPOFF)
+@@ -225,8 +226,6 @@ __RISCV_INSN_FUNCS(c_jal, RVC_MASK_C_JAL, RVC_MATCH_C_JAL)
+ __RISCV_INSN_FUNCS(auipc, RVG_MASK_AUIPC, RVG_MATCH_AUIPC)
+ __RISCV_INSN_FUNCS(jalr, RVG_MASK_JALR, RVG_MATCH_JALR)
+ __RISCV_INSN_FUNCS(jal, RVG_MASK_JAL, RVG_MATCH_JAL)
+-__RISCV_INSN_FUNCS(c_jr, RVC_MASK_C_JR, RVC_MATCH_C_JR)
+-__RISCV_INSN_FUNCS(c_jalr, RVC_MASK_C_JALR, RVC_MATCH_C_JALR)
+ __RISCV_INSN_FUNCS(c_j, RVC_MASK_C_J, RVC_MATCH_C_J)
+ __RISCV_INSN_FUNCS(beq, RVG_MASK_BEQ, RVG_MATCH_BEQ)
+ __RISCV_INSN_FUNCS(bne, RVG_MASK_BNE, RVG_MATCH_BNE)
+@@ -253,6 +252,18 @@ static __always_inline bool riscv_insn_is_branch(u32 code)
+ return (code & RV_INSN_OPCODE_MASK) == RVG_OPCODE_BRANCH;
+ }
+
++static __always_inline bool riscv_insn_is_c_jr(u32 code)
++{
++ return (code & RVC_MASK_C_JR) == RVC_MATCH_C_JR &&
++ (code & RVC_INSN_J_RS1_MASK) != 0;
++}
++
++static __always_inline bool riscv_insn_is_c_jalr(u32 code)
++{
++ return (code & RVC_MASK_C_JALR) == RVC_MATCH_C_JALR &&
++ (code & RVC_INSN_J_RS1_MASK) != 0;
++}
++
+ #define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
+ #define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1))
+ #define RV_X(X, s, mask) (((X) >> (s)) & (mask))
+diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
+index 8c258b78c925c..bd19e885dcec1 100644
+--- a/arch/riscv/kernel/traps.c
++++ b/arch/riscv/kernel/traps.c
+@@ -268,16 +268,16 @@ asmlinkage __visible __trap_section void do_trap_break(struct pt_regs *regs)
+ asmlinkage __visible __trap_section void do_trap_ecall_u(struct pt_regs *regs)
+ {
+ if (user_mode(regs)) {
+- ulong syscall = regs->a7;
++ long syscall = regs->a7;
+
+ regs->epc += 4;
+ regs->orig_a0 = regs->a0;
+
+ syscall = syscall_enter_from_user_mode(regs, syscall);
+
+- if (syscall < NR_syscalls)
++ if (syscall >= 0 && syscall < NR_syscalls)
+ syscall_handler(regs, syscall);
+- else
++ else if (syscall != -1)
+ regs->a0 = -ENOSYS;
+
+ syscall_exit_to_user_mode(regs);
+diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
+index ec486e5369d9b..09b47ebacf2e8 100644
+--- a/arch/riscv/lib/uaccess.S
++++ b/arch/riscv/lib/uaccess.S
+@@ -17,8 +17,11 @@ ENTRY(__asm_copy_from_user)
+ li t6, SR_SUM
+ csrs CSR_STATUS, t6
+
+- /* Save for return value */
+- mv t5, a2
++ /*
++ * Save the terminal address which will be used to compute the number
++ * of bytes copied in case of a fixup exception.
++ */
++ add t5, a0, a2
+
+ /*
+ * Register allocation for code below:
+@@ -176,7 +179,7 @@ ENTRY(__asm_copy_from_user)
+ 10:
+ /* Disable access to user memory */
+ csrc CSR_STATUS, t6
+- mv a0, t5
++ sub a0, t5, a0
+ ret
+ ENDPROC(__asm_copy_to_user)
+ ENDPROC(__asm_copy_from_user)
+@@ -228,7 +231,7 @@ ENTRY(__clear_user)
+ 11:
+ /* Disable access to user memory */
+ csrc CSR_STATUS, t6
+- mv a0, a1
++ sub a0, a3, a0
+ ret
+ ENDPROC(__clear_user)
+ EXPORT_SYMBOL(__clear_user)
+diff --git a/arch/um/os-Linux/user_syms.c b/arch/um/os-Linux/user_syms.c
+index 9b62a9d352b3a..a310ae27b479a 100644
+--- a/arch/um/os-Linux/user_syms.c
++++ b/arch/um/os-Linux/user_syms.c
+@@ -37,13 +37,6 @@ EXPORT_SYMBOL(vsyscall_ehdr);
+ EXPORT_SYMBOL(vsyscall_end);
+ #endif
+
+-/* Export symbols used by GCC for the stack protector. */
+-extern void __stack_smash_handler(void *) __attribute__((weak));
+-EXPORT_SYMBOL(__stack_smash_handler);
+-
+-extern long __guard __attribute__((weak));
+-EXPORT_SYMBOL(__guard);
+-
+ #ifdef _FORTIFY_SOURCE
+ extern int __sprintf_chk(char *str, int flag, size_t len, const char *format);
+ EXPORT_SYMBOL(__sprintf_chk);
+diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h
+index 117903881fe43..ce8f50192ae3e 100644
+--- a/arch/x86/include/asm/entry-common.h
++++ b/arch/x86/include/asm/entry-common.h
+@@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
+ static __always_inline void arch_exit_to_user_mode(void)
+ {
+ mds_user_clear_cpu_buffers();
++ amd_clear_divider();
+ }
+ #define arch_exit_to_user_mode arch_exit_to_user_mode
+
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index e1e7b319fe78d..8da84e1e56581 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -268,9 +268,9 @@
+ .endm
+
+ #ifdef CONFIG_CPU_UNRET_ENTRY
+-#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret"
++#define CALL_UNTRAIN_RET "call entry_untrain_ret"
+ #else
+-#define CALL_ZEN_UNTRAIN_RET ""
++#define CALL_UNTRAIN_RET ""
+ #endif
+
+ /*
+@@ -278,7 +278,7 @@
+ * return thunk isn't mapped into the userspace tables (then again, AMD
+ * typically has NO_MELTDOWN).
+ *
+- * While zen_untrain_ret() doesn't clobber anything but requires stack,
++ * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
+ * entry_ibpb() will clobber AX, CX, DX.
+ *
+ * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
+@@ -289,14 +289,20 @@
+ defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
+ VALIDATE_UNRET_END
+ ALTERNATIVE_3 "", \
+- CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
++ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
+ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
+ __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
+ #endif
++.endm
+
+-#ifdef CONFIG_CPU_SRSO
+- ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
+- "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
++.macro UNTRAIN_RET_VM
++#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
++ defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
++ VALIDATE_UNRET_END
++ ALTERNATIVE_3 "", \
++ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
++ "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT, \
++ __stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
+ #endif
+ .endm
+
+@@ -305,15 +311,10 @@
+ defined(CONFIG_CALL_DEPTH_TRACKING)
+ VALIDATE_UNRET_END
+ ALTERNATIVE_3 "", \
+- CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \
++ CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
+ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
+ __stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH
+ #endif
+-
+-#ifdef CONFIG_CPU_SRSO
+- ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \
+- "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS
+-#endif
+ .endm
+
+
+@@ -337,17 +338,24 @@ extern retpoline_thunk_t __x86_indirect_thunk_array[];
+ extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
+ extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
+
++#ifdef CONFIG_RETHUNK
+ extern void __x86_return_thunk(void);
+-extern void zen_untrain_ret(void);
++#else
++static inline void __x86_return_thunk(void) {}
++#endif
++
++extern void retbleed_return_thunk(void);
++extern void srso_return_thunk(void);
++extern void srso_alias_return_thunk(void);
++
++extern void retbleed_untrain_ret(void);
+ extern void srso_untrain_ret(void);
+-extern void srso_untrain_ret_alias(void);
++extern void srso_alias_untrain_ret(void);
++
++extern void entry_untrain_ret(void);
+ extern void entry_ibpb(void);
+
+-#ifdef CONFIG_CALL_THUNKS
+ extern void (*x86_return_thunk)(void);
+-#else
+-#define x86_return_thunk (&__x86_return_thunk)
+-#endif
+
+ #ifdef CONFIG_CALL_DEPTH_TRACKING
+ extern void __x86_return_skl(void);
+@@ -474,9 +482,6 @@ enum ssb_mitigation {
+ SPEC_STORE_BYPASS_SECCOMP,
+ };
+
+-extern char __indirect_thunk_start[];
+-extern char __indirect_thunk_end[];
+-
+ static __always_inline
+ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
+ {
+diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
+index f615e0cb6d932..94b42fbb6ffa6 100644
+--- a/arch/x86/kernel/alternative.c
++++ b/arch/x86/kernel/alternative.c
+@@ -571,10 +571,6 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
+
+ #ifdef CONFIG_RETHUNK
+
+-#ifdef CONFIG_CALL_THUNKS
+-void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
+-#endif
+-
+ /*
+ * Rewrite the compiler generated return thunk tail-calls.
+ *
+diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
+index 0b5f33cb32b59..13b0da82cb5fb 100644
+--- a/arch/x86/kernel/cpu/amd.c
++++ b/arch/x86/kernel/cpu/amd.c
+@@ -1329,3 +1329,4 @@ void noinstr amd_clear_divider(void)
+ asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0)
+ :: "a" (0), "d" (0), "r" (1));
+ }
++EXPORT_SYMBOL_GPL(amd_clear_divider);
+diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
+index f3d627901d890..d5319779da585 100644
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -63,6 +63,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd);
+
+ static DEFINE_MUTEX(spec_ctrl_mutex);
+
++void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
++
+ /* Update SPEC_CTRL MSR and its cached copy unconditionally */
+ static void update_spec_ctrl(u64 val)
+ {
+@@ -165,8 +167,13 @@ void __init cpu_select_mitigations(void)
+ md_clear_select_mitigation();
+ srbds_select_mitigation();
+ l1d_flush_select_mitigation();
+- gds_select_mitigation();
++
++ /*
++ * srso_select_mitigation() depends and must run after
++ * retbleed_select_mitigation().
++ */
+ srso_select_mitigation();
++ gds_select_mitigation();
+ }
+
+ /*
+@@ -1035,6 +1042,9 @@ do_cmd_auto:
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
+ setup_force_cpu_cap(X86_FEATURE_UNRET);
+
++ if (IS_ENABLED(CONFIG_RETHUNK))
++ x86_return_thunk = retbleed_return_thunk;
++
+ if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
+ boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
+ pr_err(RETBLEED_UNTRAIN_MSG);
+@@ -1044,6 +1054,7 @@ do_cmd_auto:
+
+ case RETBLEED_MITIGATION_IBPB:
+ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
++ setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+ mitigate_smt = true;
+ break;
+
+@@ -2417,9 +2428,10 @@ static void __init srso_select_mitigation(void)
+ * Zen1/2 with SMT off aren't vulnerable after the right
+ * IBPB microcode has been applied.
+ */
+- if ((boot_cpu_data.x86 < 0x19) &&
+- (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED)))
++ if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
+ setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
++ return;
++ }
+ }
+
+ if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
+@@ -2448,11 +2460,15 @@ static void __init srso_select_mitigation(void)
+ * like ftrace, static_call, etc.
+ */
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
++ setup_force_cpu_cap(X86_FEATURE_UNRET);
+
+- if (boot_cpu_data.x86 == 0x19)
++ if (boot_cpu_data.x86 == 0x19) {
+ setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
+- else
++ x86_return_thunk = srso_alias_return_thunk;
++ } else {
+ setup_force_cpu_cap(X86_FEATURE_SRSO);
++ x86_return_thunk = srso_return_thunk;
++ }
+ srso_mitigation = SRSO_MITIGATION_SAFE_RET;
+ } else {
+ pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
+@@ -2701,6 +2717,9 @@ static ssize_t gds_show_state(char *buf)
+
+ static ssize_t srso_show_state(char *buf)
+ {
++ if (boot_cpu_has(X86_FEATURE_SRSO_NO))
++ return sysfs_emit(buf, "Mitigation: SMT disabled\n");
++
+ return sysfs_emit(buf, "%s%s\n",
+ srso_strings[srso_mitigation],
+ (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));
+diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
+index 57b0037d0a996..517821b48391a 100644
+--- a/arch/x86/kernel/kprobes/opt.c
++++ b/arch/x86/kernel/kprobes/opt.c
+@@ -226,7 +226,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src, u8 *real)
+ }
+
+ /* Check whether insn is indirect jump */
+-static int __insn_is_indirect_jump(struct insn *insn)
++static int insn_is_indirect_jump(struct insn *insn)
+ {
+ return ((insn->opcode.bytes[0] == 0xff &&
+ (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
+@@ -260,26 +260,6 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
+ return (start <= target && target <= start + len);
+ }
+
+-static int insn_is_indirect_jump(struct insn *insn)
+-{
+- int ret = __insn_is_indirect_jump(insn);
+-
+-#ifdef CONFIG_RETPOLINE
+- /*
+- * Jump to x86_indirect_thunk_* is treated as an indirect jump.
+- * Note that even with CONFIG_RETPOLINE=y, the kernel compiled with
+- * older gcc may use indirect jump. So we add this check instead of
+- * replace indirect-jump check.
+- */
+- if (!ret)
+- ret = insn_jump_into_range(insn,
+- (unsigned long)__indirect_thunk_start,
+- (unsigned long)__indirect_thunk_end -
+- (unsigned long)__indirect_thunk_start);
+-#endif
+- return ret;
+-}
+-
+ /* Decode whole function to ensure any instructions don't jump into target */
+ static int can_optimize(unsigned long paddr)
+ {
+@@ -334,9 +314,21 @@ static int can_optimize(unsigned long paddr)
+ /* Recover address */
+ insn.kaddr = (void *)addr;
+ insn.next_byte = (void *)(addr + insn.length);
+- /* Check any instructions don't jump into target */
+- if (insn_is_indirect_jump(&insn) ||
+- insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
++ /*
++ * Check any instructions don't jump into target, indirectly or
++ * directly.
++ *
++ * The indirect case is present to handle a code with jump
++ * tables. When the kernel uses retpolines, the check should in
++ * theory additionally look for jumps to indirect thunks.
++ * However, the kernel built with retpolines or IBT has jump
++ * tables disabled so the check can be skipped altogether.
++ */
++ if (!IS_ENABLED(CONFIG_RETPOLINE) &&
++ !IS_ENABLED(CONFIG_X86_KERNEL_IBT) &&
++ insn_is_indirect_jump(&insn))
++ return 0;
++ if (insn_jump_into_range(&insn, paddr + INT3_INSN_SIZE,
+ DISP32_SIZE))
+ return 0;
+ addr += insn.length;
+diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
+index b70670a985978..77a9316da4357 100644
+--- a/arch/x86/kernel/static_call.c
++++ b/arch/x86/kernel/static_call.c
+@@ -186,6 +186,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform);
+ */
+ bool __static_call_fixup(void *tramp, u8 op, void *dest)
+ {
++ unsigned long addr = (unsigned long)tramp;
++ /*
++ * Not all .return_sites are a static_call trampoline (most are not).
++ * Check if the 3 bytes after the return are still kernel text, if not,
++ * then this definitely is not a trampoline and we need not worry
++ * further.
++ *
++ * This avoids the memcmp() below tripping over pagefaults etc..
++ */
++ if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) &&
++ !kernel_text_address(addr + 7))
++ return false;
++
+ if (memcmp(tramp+5, tramp_ud, 3)) {
+ /* Not a trampoline site, not our problem. */
+ return false;
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 1885326a8f659..4a817d20ce3bb 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -206,8 +206,6 @@ DEFINE_IDTENTRY(exc_divide_error)
+ {
+ do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE,
+ FPE_INTDIV, error_get_trap_addr(regs));
+-
+- amd_clear_divider();
+ }
+
+ DEFINE_IDTENTRY(exc_overflow)
+diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
+index bac2e2949f01d..83d41c2601d7b 100644
+--- a/arch/x86/kernel/vmlinux.lds.S
++++ b/arch/x86/kernel/vmlinux.lds.S
+@@ -133,27 +133,25 @@ SECTIONS
+ KPROBES_TEXT
+ SOFTIRQENTRY_TEXT
+ #ifdef CONFIG_RETPOLINE
+- __indirect_thunk_start = .;
+- *(.text.__x86.indirect_thunk)
+- *(.text.__x86.return_thunk)
+- __indirect_thunk_end = .;
++ *(.text..__x86.indirect_thunk)
++ *(.text..__x86.return_thunk)
+ #endif
+ STATIC_CALL_TEXT
+
+ ALIGN_ENTRY_TEXT_BEGIN
+ #ifdef CONFIG_CPU_SRSO
+- *(.text.__x86.rethunk_untrain)
++ *(.text..__x86.rethunk_untrain)
+ #endif
+
+ ENTRY_TEXT
+
+ #ifdef CONFIG_CPU_SRSO
+ /*
+- * See the comment above srso_untrain_ret_alias()'s
++ * See the comment above srso_alias_untrain_ret()'s
+ * definition.
+ */
+- . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
+- *(.text.__x86.rethunk_safe)
++ . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20);
++ *(.text..__x86.rethunk_safe)
+ #endif
+ ALIGN_ENTRY_TEXT_END
+ *(.gnu.warning)
+@@ -522,8 +520,8 @@ INIT_PER_CPU(irq_stack_backing_store);
+ "fixed_percpu_data is not at start of per-cpu area");
+ #endif
+
+- #ifdef CONFIG_RETHUNK
+-. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned");
++#ifdef CONFIG_RETHUNK
++. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
+ . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
+ #endif
+
+@@ -538,8 +536,8 @@ INIT_PER_CPU(irq_stack_backing_store);
+ * Instead do: (A | B) - (A & B) in order to compute the XOR
+ * of the two function addresses:
+ */
+-. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) -
+- (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
++. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) -
++ (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)),
+ "SRSO function pair won't alias");
+ #endif
+
+diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
+index af7b968f55703..c3b557aca2494 100644
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -4034,6 +4034,8 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu, bool spec_ctrl_in
+
+ guest_state_enter_irqoff();
+
++ amd_clear_divider();
++
+ if (sev_es_guest(vcpu->kvm))
+ __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted);
+ else
+diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
+index 265452fc9ebe9..ef2ebabb059c8 100644
+--- a/arch/x86/kvm/svm/vmenter.S
++++ b/arch/x86/kvm/svm/vmenter.S
+@@ -222,10 +222,7 @@ SYM_FUNC_START(__svm_vcpu_run)
+ * because interrupt handlers won't sanitize 'ret' if the return is
+ * from the kernel.
+ */
+- UNTRAIN_RET
+-
+- /* SRSO */
+- ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT
++ UNTRAIN_RET_VM
+
+ /*
+ * Clear all general purpose registers except RSP and RAX to prevent
+@@ -362,7 +359,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
+ * because interrupt handlers won't sanitize RET if the return is
+ * from the kernel.
+ */
+- UNTRAIN_RET
++ UNTRAIN_RET_VM
+
+ /* "Pop" @spec_ctrl_intercepted. */
+ pop %_ASM_BX
+diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
+index 2cff585f22f29..cd86aeb5fdd3e 100644
+--- a/arch/x86/lib/retpoline.S
++++ b/arch/x86/lib/retpoline.S
+@@ -13,7 +13,7 @@
+ #include <asm/frame.h>
+ #include <asm/nops.h>
+
+- .section .text.__x86.indirect_thunk
++ .section .text..__x86.indirect_thunk
+
+
+ .macro POLINE reg
+@@ -133,75 +133,106 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
+ #ifdef CONFIG_RETHUNK
+
+ /*
+- * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at
++ * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
+ * special addresses:
+ *
+- * - srso_untrain_ret_alias() is 2M aligned
+- * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14
++ * - srso_alias_untrain_ret() is 2M aligned
++ * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14
+ * and 20 in its virtual address are set (while those bits in the
+- * srso_untrain_ret_alias() function are cleared).
++ * srso_alias_untrain_ret() function are cleared).
+ *
+ * This guarantees that those two addresses will alias in the branch
+ * target buffer of Zen3/4 generations, leading to any potential
+ * poisoned entries at that BTB slot to get evicted.
+ *
+- * As a result, srso_safe_ret_alias() becomes a safe return.
++ * As a result, srso_alias_safe_ret() becomes a safe return.
+ */
+ #ifdef CONFIG_CPU_SRSO
+- .section .text.__x86.rethunk_untrain
++ .section .text..__x86.rethunk_untrain
+
+-SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
++SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
++ UNWIND_HINT_FUNC
+ ANNOTATE_NOENDBR
+ ASM_NOP2
+ lfence
+- jmp __x86_return_thunk
+-SYM_FUNC_END(srso_untrain_ret_alias)
+-__EXPORT_THUNK(srso_untrain_ret_alias)
+-
+- .section .text.__x86.rethunk_safe
++ jmp srso_alias_return_thunk
++SYM_FUNC_END(srso_alias_untrain_ret)
++__EXPORT_THUNK(srso_alias_untrain_ret)
++
++ .section .text..__x86.rethunk_safe
++#else
++/* dummy definition for alternatives */
++SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
++ ANNOTATE_UNRET_SAFE
++ ret
++ int3
++SYM_FUNC_END(srso_alias_untrain_ret)
+ #endif
+
+-/* Needs a definition for the __x86_return_thunk alternative below. */
+-SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE)
+-#ifdef CONFIG_CPU_SRSO
+- add $8, %_ASM_SP
++SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
++ lea 8(%_ASM_SP), %_ASM_SP
+ UNWIND_HINT_FUNC
+-#endif
+ ANNOTATE_UNRET_SAFE
+ ret
+ int3
+-SYM_FUNC_END(srso_safe_ret_alias)
++SYM_FUNC_END(srso_alias_safe_ret)
+
+- .section .text.__x86.return_thunk
++ .section .text..__x86.return_thunk
++
++SYM_CODE_START(srso_alias_return_thunk)
++ UNWIND_HINT_FUNC
++ ANNOTATE_NOENDBR
++ call srso_alias_safe_ret
++ ud2
++SYM_CODE_END(srso_alias_return_thunk)
++
++/*
++ * Some generic notes on the untraining sequences:
++ *
++ * They are interchangeable when it comes to flushing potentially wrong
++ * RET predictions from the BTB.
++ *
++ * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the
++ * Retbleed sequence because the return sequence done there
++ * (srso_safe_ret()) is longer and the return sequence must fully nest
++ * (end before) the untraining sequence. Therefore, the untraining
++ * sequence must fully overlap the return sequence.
++ *
++ * Regarding alignment - the instructions which need to be untrained,
++ * must all start at a cacheline boundary for Zen1/2 generations. That
++ * is, instruction sequences starting at srso_safe_ret() and
++ * the respective instruction sequences at retbleed_return_thunk()
++ * must start at a cacheline boundary.
++ */
+
+ /*
+ * Safety details here pertain to the AMD Zen{1,2} microarchitecture:
+- * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for
++ * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for
+ * alignment within the BTB.
+- * 2) The instruction at zen_untrain_ret must contain, and not
++ * 2) The instruction at retbleed_untrain_ret must contain, and not
+ * end with, the 0xc3 byte of the RET.
+ * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread
+ * from re-poisioning the BTB prediction.
+ */
+ .align 64
+- .skip 64 - (__ret - zen_untrain_ret), 0xcc
+-SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
++ .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc
++SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ ANNOTATE_NOENDBR
+ /*
+- * As executed from zen_untrain_ret, this is:
++ * As executed from retbleed_untrain_ret, this is:
+ *
+ * TEST $0xcc, %bl
+ * LFENCE
+- * JMP __x86_return_thunk
++ * JMP retbleed_return_thunk
+ *
+ * Executing the TEST instruction has a side effect of evicting any BTB
+ * prediction (potentially attacker controlled) attached to the RET, as
+- * __x86_return_thunk + 1 isn't an instruction boundary at the moment.
++ * retbleed_return_thunk + 1 isn't an instruction boundary at the moment.
+ */
+ .byte 0xf6
+
+ /*
+- * As executed from __x86_return_thunk, this is a plain RET.
++ * As executed from retbleed_return_thunk, this is a plain RET.
+ *
+ * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8.
+ *
+@@ -213,13 +244,13 @@ SYM_START(zen_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ * With SMT enabled and STIBP active, a sibling thread cannot poison
+ * RET's prediction to a type of its choice, but can evict the
+ * prediction due to competitive sharing. If the prediction is
+- * evicted, __x86_return_thunk will suffer Straight Line Speculation
++ * evicted, retbleed_return_thunk will suffer Straight Line Speculation
+ * which will be contained safely by the INT3.
+ */
+-SYM_INNER_LABEL(__ret, SYM_L_GLOBAL)
++SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL)
+ ret
+ int3
+-SYM_CODE_END(__ret)
++SYM_CODE_END(retbleed_return_thunk)
+
+ /*
+ * Ensure the TEST decoding / BTB invalidation is complete.
+@@ -230,16 +261,16 @@ SYM_CODE_END(__ret)
+ * Jump back and execute the RET in the middle of the TEST instruction.
+ * INT3 is for SLS protection.
+ */
+- jmp __ret
++ jmp retbleed_return_thunk
+ int3
+-SYM_FUNC_END(zen_untrain_ret)
+-__EXPORT_THUNK(zen_untrain_ret)
++SYM_FUNC_END(retbleed_untrain_ret)
++__EXPORT_THUNK(retbleed_untrain_ret)
+
+ /*
+- * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret()
++ * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
+ * above. On kernel entry, srso_untrain_ret() is executed which is a
+ *
+- * movabs $0xccccccc308c48348,%rax
++ * movabs $0xccccc30824648d48,%rax
+ *
+ * and when the return thunk executes the inner label srso_safe_ret()
+ * later, it is a stack manipulation and a RET which is mispredicted and
+@@ -251,22 +282,44 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
+ ANNOTATE_NOENDBR
+ .byte 0x48, 0xb8
+
++/*
++ * This forces the function return instruction to speculate into a trap
++ * (UD2 in srso_return_thunk() below). This RET will then mispredict
++ * and execution will continue at the return site read from the top of
++ * the stack.
++ */
+ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
+- add $8, %_ASM_SP
++ lea 8(%_ASM_SP), %_ASM_SP
+ ret
+ int3
+ int3
+- int3
++ /* end of movabs */
+ lfence
+ call srso_safe_ret
+- int3
++ ud2
+ SYM_CODE_END(srso_safe_ret)
+ SYM_FUNC_END(srso_untrain_ret)
+ __EXPORT_THUNK(srso_untrain_ret)
+
+-SYM_FUNC_START(__x86_return_thunk)
+- ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \
+- "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS
++SYM_CODE_START(srso_return_thunk)
++ UNWIND_HINT_FUNC
++ ANNOTATE_NOENDBR
++ call srso_safe_ret
++ ud2
++SYM_CODE_END(srso_return_thunk)
++
++SYM_FUNC_START(entry_untrain_ret)
++ ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
++ "jmp srso_untrain_ret", X86_FEATURE_SRSO, \
++ "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
++SYM_FUNC_END(entry_untrain_ret)
++__EXPORT_THUNK(entry_untrain_ret)
++
++SYM_CODE_START(__x86_return_thunk)
++ UNWIND_HINT_FUNC
++ ANNOTATE_NOENDBR
++ ANNOTATE_UNRET_SAFE
++ ret
+ int3
+ SYM_CODE_END(__x86_return_thunk)
+ EXPORT_SYMBOL(__x86_return_thunk)
+diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
+index fc49be622e05b..9faafcd10e177 100644
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -136,7 +136,9 @@ static void blkg_free_workfn(struct work_struct *work)
+ blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
+ if (blkg->parent)
+ blkg_put(blkg->parent);
++ spin_lock_irq(&q->queue_lock);
+ list_del_init(&blkg->q_node);
++ spin_unlock_irq(&q->queue_lock);
+ mutex_unlock(&q->blkcg_mutex);
+
+ blk_put_queue(q);
+diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
+index ad9844c5b40cb..e6468eab2681e 100644
+--- a/block/blk-crypto-fallback.c
++++ b/block/blk-crypto-fallback.c
+@@ -78,7 +78,7 @@ static struct blk_crypto_fallback_keyslot {
+ struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
+ } *blk_crypto_keyslots;
+
+-static struct blk_crypto_profile blk_crypto_fallback_profile;
++static struct blk_crypto_profile *blk_crypto_fallback_profile;
+ static struct workqueue_struct *blk_crypto_wq;
+ static mempool_t *blk_crypto_bounce_page_pool;
+ static struct bio_set crypto_bio_split;
+@@ -292,7 +292,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
+ * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
+ * this bio's algorithm and key.
+ */
+- blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
++ blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
+ bc->bc_key, &slot);
+ if (blk_st != BLK_STS_OK) {
+ src_bio->bi_status = blk_st;
+@@ -395,7 +395,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
+ * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
+ * this bio's algorithm and key.
+ */
+- blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
++ blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
+ bc->bc_key, &slot);
+ if (blk_st != BLK_STS_OK) {
+ bio->bi_status = blk_st;
+@@ -499,7 +499,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
+ return false;
+ }
+
+- if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile,
++ if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile,
+ &bc->bc_key->crypto_cfg)) {
+ bio->bi_status = BLK_STS_NOTSUPP;
+ return false;
+@@ -526,7 +526,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
+
+ int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
+ {
+- return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key);
++ return __blk_crypto_evict_key(blk_crypto_fallback_profile, key);
+ }
+
+ static bool blk_crypto_fallback_inited;
+@@ -534,7 +534,6 @@ static int blk_crypto_fallback_init(void)
+ {
+ int i;
+ int err;
+- struct blk_crypto_profile *profile = &blk_crypto_fallback_profile;
+
+ if (blk_crypto_fallback_inited)
+ return 0;
+@@ -545,18 +544,27 @@ static int blk_crypto_fallback_init(void)
+ if (err)
+ goto out;
+
+- err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots);
+- if (err)
++ /* Dynamic allocation is needed because of lockdep_register_key(). */
++ blk_crypto_fallback_profile =
++ kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL);
++ if (!blk_crypto_fallback_profile) {
++ err = -ENOMEM;
+ goto fail_free_bioset;
++ }
++
++ err = blk_crypto_profile_init(blk_crypto_fallback_profile,
++ blk_crypto_num_keyslots);
++ if (err)
++ goto fail_free_profile;
+ err = -ENOMEM;
+
+- profile->ll_ops = blk_crypto_fallback_ll_ops;
+- profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
++ blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops;
++ blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
+
+ /* All blk-crypto modes have a crypto API fallback. */
+ for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
+- profile->modes_supported[i] = 0xFFFFFFFF;
+- profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
++ blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF;
++ blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
+
+ blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
+ WQ_UNBOUND | WQ_HIGHPRI |
+@@ -597,7 +605,9 @@ fail_free_keyslots:
+ fail_free_wq:
+ destroy_workqueue(blk_crypto_wq);
+ fail_destroy_profile:
+- blk_crypto_profile_destroy(profile);
++ blk_crypto_profile_destroy(blk_crypto_fallback_profile);
++fail_free_profile:
++ kfree(blk_crypto_fallback_profile);
+ fail_free_bioset:
+ bioset_exit(&crypto_bio_split);
+ out:
+diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c
+index fabfc501ef543..a39dd346a1678 100644
+--- a/drivers/accel/habanalabs/common/device.c
++++ b/drivers/accel/habanalabs/common/device.c
+@@ -981,6 +981,18 @@ static void device_early_fini(struct hl_device *hdev)
+ hdev->asic_funcs->early_fini(hdev);
+ }
+
++static bool is_pci_link_healthy(struct hl_device *hdev)
++{
++ u16 vendor_id;
++
++ if (!hdev->pdev)
++ return false;
++
++ pci_read_config_word(hdev->pdev, PCI_VENDOR_ID, &vendor_id);
++
++ return (vendor_id == PCI_VENDOR_ID_HABANALABS);
++}
++
+ static void hl_device_heartbeat(struct work_struct *work)
+ {
+ struct hl_device *hdev = container_of(work, struct hl_device,
+@@ -995,7 +1007,8 @@ static void hl_device_heartbeat(struct work_struct *work)
+ goto reschedule;
+
+ if (hl_device_operational(hdev, NULL))
+- dev_err(hdev->dev, "Device heartbeat failed!\n");
++ dev_err(hdev->dev, "Device heartbeat failed! PCI link is %s\n",
++ is_pci_link_healthy(hdev) ? "healthy" : "broken");
+
+ info.err_type = HL_INFO_FW_HEARTBEAT_ERR;
+ info.event_mask = &event_mask;
+diff --git a/drivers/accel/habanalabs/common/habanalabs.h b/drivers/accel/habanalabs/common/habanalabs.h
+index eaae69a9f8178..7f5d1b6e3fb08 100644
+--- a/drivers/accel/habanalabs/common/habanalabs.h
++++ b/drivers/accel/habanalabs/common/habanalabs.h
+@@ -36,6 +36,8 @@
+ struct hl_device;
+ struct hl_fpriv;
+
++#define PCI_VENDOR_ID_HABANALABS 0x1da3
++
+ /* Use upper bits of mmap offset to store habana driver specific information.
+ * bits[63:59] - Encode mmap type
+ * bits[45:0] - mmap offset value
+diff --git a/drivers/accel/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c
+index d9df64e75f33a..70fb2df9a93b8 100644
+--- a/drivers/accel/habanalabs/common/habanalabs_drv.c
++++ b/drivers/accel/habanalabs/common/habanalabs_drv.c
+@@ -13,6 +13,7 @@
+
+ #include <linux/pci.h>
+ #include <linux/module.h>
++#include <linux/vmalloc.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/habanalabs.h>
+@@ -54,8 +55,6 @@ module_param(boot_error_status_mask, ulong, 0444);
+ MODULE_PARM_DESC(boot_error_status_mask,
+ "Mask of the error status during device CPU boot (If bitX is cleared then error X is masked. Default all 1's)");
+
+-#define PCI_VENDOR_ID_HABANALABS 0x1da3
+-
+ #define PCI_IDS_GOYA 0x0001
+ #define PCI_IDS_GAUDI 0x1000
+ #define PCI_IDS_GAUDI_SEC 0x1010
+@@ -220,6 +219,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
+
+ hl_debugfs_add_file(hpriv);
+
++ vfree(hdev->captured_err_info.page_fault_info.user_mappings);
+ memset(&hdev->captured_err_info, 0, sizeof(hdev->captured_err_info));
+ atomic_set(&hdev->captured_err_info.cs_timeout.write_enable, 1);
+ hdev->captured_err_info.undef_opcode.write_enable = true;
+diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
+index cfbc92da426fa..388abd40024ba 100644
+--- a/drivers/accel/qaic/qaic_control.c
++++ b/drivers/accel/qaic/qaic_control.c
+@@ -392,18 +392,31 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
+ struct qaic_manage_trans_dma_xfer *in_trans,
+ struct ioctl_resources *resources, struct dma_xfer *xfer)
+ {
++ u64 xfer_start_addr, remaining, end, total;
+ unsigned long need_pages;
+ struct page **page_list;
+ unsigned long nr_pages;
+ struct sg_table *sgt;
+- u64 xfer_start_addr;
+ int ret;
+ int i;
+
+- xfer_start_addr = in_trans->addr + resources->xferred_dma_size;
++ if (check_add_overflow(in_trans->addr, resources->xferred_dma_size, &xfer_start_addr))
++ return -EINVAL;
+
+- need_pages = DIV_ROUND_UP(in_trans->size + offset_in_page(xfer_start_addr) -
+- resources->xferred_dma_size, PAGE_SIZE);
++ if (in_trans->size < resources->xferred_dma_size)
++ return -EINVAL;
++ remaining = in_trans->size - resources->xferred_dma_size;
++ if (remaining == 0)
++ return 0;
++
++ if (check_add_overflow(xfer_start_addr, remaining, &end))
++ return -EINVAL;
++
++ total = remaining + offset_in_page(xfer_start_addr);
++ if (total >= SIZE_MAX)
++ return -EINVAL;
++
++ need_pages = DIV_ROUND_UP(total, PAGE_SIZE);
+
+ nr_pages = need_pages;
+
+@@ -435,7 +448,7 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
+
+ ret = sg_alloc_table_from_pages(sgt, page_list, nr_pages,
+ offset_in_page(xfer_start_addr),
+- in_trans->size - resources->xferred_dma_size, GFP_KERNEL);
++ remaining, GFP_KERNEL);
+ if (ret) {
+ ret = -ENOMEM;
+ goto free_sgt;
+@@ -566,9 +579,6 @@ static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list
+ QAIC_MANAGE_EXT_MSG_LENGTH)
+ return -ENOMEM;
+
+- if (in_trans->addr + in_trans->size < in_trans->addr || !in_trans->size)
+- return -EINVAL;
+-
+ xfer = kmalloc(sizeof(*xfer), GFP_KERNEL);
+ if (!xfer)
+ return -ENOMEM;
+diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
+index e9a1cb779b305..6b6d981a71be7 100644
+--- a/drivers/accel/qaic/qaic_data.c
++++ b/drivers/accel/qaic/qaic_data.c
+@@ -1021,6 +1021,7 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
+ bo->dbc = dbc;
+ srcu_read_unlock(&dbc->ch_lock, rcu_id);
+ drm_gem_object_put(obj);
++ kfree(slice_ent);
+ srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
+ srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
+
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index 50e23762ec5e9..025e803ba55c2 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -613,6 +613,9 @@ static const struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
++ { USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK |
++ BTUSB_WIDEBAND_SPEECH |
++ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index 21fe9854703f9..4cb23b9e06ea4 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -2142,6 +2142,8 @@ static int sysc_reset(struct sysc *ddata)
+ sysc_val = sysc_read_sysconfig(ddata);
+ sysc_val |= sysc_mask;
+ sysc_write(ddata, sysc_offset, sysc_val);
++ /* Flush posted write */
++ sysc_val = sysc_read_sysconfig(ddata);
+ }
+
+ if (ddata->cfg.srst_udelay)
+diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
+index 538bd677c254a..7a4d1a478e33e 100644
+--- a/drivers/firewire/net.c
++++ b/drivers/firewire/net.c
+@@ -479,7 +479,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
+ struct sk_buff *skb, u16 source_node_id,
+ bool is_broadcast, u16 ether_type)
+ {
+- int status;
++ int status, len;
+
+ switch (ether_type) {
+ case ETH_P_ARP:
+@@ -533,13 +533,15 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
+ }
+ skb->protocol = protocol;
+ }
++
++ len = skb->len;
+ status = netif_rx(skb);
+ if (status == NET_RX_DROP) {
+ net->stats.rx_errors++;
+ net->stats.rx_dropped++;
+ } else {
+ net->stats.rx_packets++;
+- net->stats.rx_bytes += skb->len;
++ net->stats.rx_bytes += len;
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index a989ae72a58a9..0c023269aadaa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -189,7 +189,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
+ uint64_t *chunk_array_user;
+ uint64_t *chunk_array;
+ uint32_t uf_offset = 0;
+- unsigned int size;
++ size_t size;
+ int ret;
+ int i;
+
+@@ -1625,15 +1625,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
+ continue;
+
+ r = dma_fence_wait_timeout(fence, true, timeout);
++ if (r > 0 && fence->error)
++ r = fence->error;
++
+ dma_fence_put(fence);
+ if (r < 0)
+ return r;
+
+ if (r == 0)
+ break;
+-
+- if (fence->error)
+- return fence->error;
+ }
+
+ memset(wait, 0, sizeof(*wait));
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+index c6d4d41c4393e..23d054526e7c7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c
+@@ -106,3 +106,41 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ ttm_eu_backoff_reservation(&ticket, &list);
+ return 0;
+ }
++
++int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
++ struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
++ uint64_t csa_addr)
++{
++ struct ww_acquire_ctx ticket;
++ struct list_head list;
++ struct amdgpu_bo_list_entry pd;
++ struct ttm_validate_buffer csa_tv;
++ int r;
++
++ INIT_LIST_HEAD(&list);
++ INIT_LIST_HEAD(&csa_tv.head);
++ csa_tv.bo = &bo->tbo;
++ csa_tv.num_shared = 1;
++
++ list_add(&csa_tv.head, &list);
++ amdgpu_vm_get_pd_bo(vm, &list, &pd);
++
++ r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
++ if (r) {
++ DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r);
++ return r;
++ }
++
++ r = amdgpu_vm_bo_unmap(adev, bo_va, csa_addr);
++ if (r) {
++ DRM_ERROR("failed to do bo_unmap on static CSA, err=%d\n", r);
++ ttm_eu_backoff_reservation(&ticket, &list);
++ return r;
++ }
++
++ amdgpu_vm_bo_del(adev, bo_va);
++
++ ttm_eu_backoff_reservation(&ticket, &list);
++
++ return 0;
++}
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
+index 524b4437a0217..7dfc1f2012ebf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.h
+@@ -34,6 +34,9 @@ int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo
+ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
+ uint64_t csa_addr, uint32_t size);
++int amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm,
++ struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
++ uint64_t csa_addr);
+ void amdgpu_free_static_csa(struct amdgpu_bo **bo);
+
+ #endif
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 44a902d9b5c7b..3108f5219cf3b 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4250,6 +4250,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
+ drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
+
+ cancel_delayed_work_sync(&adev->delayed_init_work);
++ flush_delayed_work(&adev->gfx.gfx_off_delay_work);
+
+ amdgpu_ras_suspend(adev);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index a7d250809da99..b9ba01b4c9925 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -555,6 +555,41 @@ int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
+ return 0;
+ }
+
++/**
++ * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether
++ * fence driver interrupts need to be restored.
++ *
++ * @ring: ring that to be checked
++ *
++ * Interrupts for rings that belong to GFX IP don't need to be restored
++ * when the target power state is s0ix.
++ *
++ * Return true if need to restore interrupts, false otherwise.
++ */
++static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)
++{
++ struct amdgpu_device *adev = ring->adev;
++ bool is_gfx_power_domain = false;
++
++ switch (ring->funcs->type) {
++ case AMDGPU_RING_TYPE_SDMA:
++ /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
++ if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0))
++ is_gfx_power_domain = true;
++ break;
++ case AMDGPU_RING_TYPE_GFX:
++ case AMDGPU_RING_TYPE_COMPUTE:
++ case AMDGPU_RING_TYPE_KIQ:
++ case AMDGPU_RING_TYPE_MES:
++ is_gfx_power_domain = true;
++ break;
++ default:
++ break;
++ }
++
++ return !(adev->in_s0ix && is_gfx_power_domain);
++}
++
+ /**
+ * amdgpu_fence_driver_hw_fini - tear down the fence driver
+ * for all possible rings.
+@@ -583,7 +618,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
+ amdgpu_fence_driver_force_completion(ring);
+
+ if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
+- ring->fence_drv.irq_src)
++ ring->fence_drv.irq_src &&
++ amdgpu_fence_need_ring_interrupt_restore(ring))
+ amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+ ring->fence_drv.irq_type);
+
+@@ -658,7 +694,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
+ continue;
+
+ /* enable the interrupt */
+- if (ring->fence_drv.irq_src)
++ if (ring->fence_drv.irq_src &&
++ amdgpu_fence_need_ring_interrupt_restore(ring))
+ amdgpu_irq_get(adev, ring->fence_drv.irq_src,
+ ring->fence_drv.irq_type);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index f3f541ba0acaa..bff5b6eac39b5 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -589,15 +589,8 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
+
+ if (adev->gfx.gfx_off_req_count == 0 &&
+ !adev->gfx.gfx_off_state) {
+- /* If going to s2idle, no need to wait */
+- if (adev->in_s0ix) {
+- if (!amdgpu_dpm_set_powergating_by_smu(adev,
+- AMD_IP_BLOCK_TYPE_GFX, true))
+- adev->gfx.gfx_off_state = true;
+- } else {
+- schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
++ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ delay);
+- }
+ }
+ } else {
+ if (adev->gfx.gfx_off_req_count == 0) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index fafebec5b7b66..9581c020d815d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -124,7 +124,6 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
+ continue;
+
+ for (k = 0; k < src->num_types; ++k) {
+- atomic_set(&src->enabled_types[k], 0);
+ r = src->funcs->set(adev, src, k,
+ AMDGPU_IRQ_STATE_DISABLE);
+ if (r)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+index 0efb38539d70c..724e80c192973 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+@@ -1284,12 +1284,12 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
+ if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_VCE) != NULL)
+ amdgpu_vce_free_handles(adev, file_priv);
+
+- if (amdgpu_mcbp) {
+- /* TODO: how to handle reserve failure */
+- BUG_ON(amdgpu_bo_reserve(adev->virt.csa_obj, true));
+- amdgpu_vm_bo_del(adev, fpriv->csa_va);
++ if (fpriv->csa_va) {
++ uint64_t csa_addr = amdgpu_csa_vaddr(adev) & AMDGPU_GMC_HOLE_MASK;
++
++ WARN_ON(amdgpu_unmap_static_csa(adev, &fpriv->vm, adev->virt.csa_obj,
++ fpriv->csa_va, csa_addr));
+ fpriv->csa_va = NULL;
+- amdgpu_bo_unreserve(adev->virt.csa_obj);
+ }
+
+ pasid = fpriv->vm.pasid;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index db820331f2c61..39e54685653cc 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -520,6 +520,8 @@ static int psp_sw_fini(void *handle)
+ kfree(cmd);
+ cmd = NULL;
+
++ psp_free_shared_bufs(psp);
++
+ if (psp->km_ring.ring_mem)
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &psp->km_ring.ring_mem_mc_addr,
+@@ -2657,8 +2659,6 @@ static int psp_hw_fini(void *handle)
+
+ psp_ring_destroy(psp, PSP_RING_TYPE__KM);
+
+- psp_free_shared_bufs(psp);
+-
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index 49de3a3eebc78..de04606c2061e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -361,6 +361,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
+ amdgpu_bo_free_kernel(&ring->ring_obj,
+ &ring->gpu_addr,
+ (void **)&ring->ring);
++ } else {
++ kfree(ring->fence_drv.fences);
+ }
+
+ dma_fence_put(ring->vmid_wait);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index 23f52150ebef4..fd029d91a3402 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1367,6 +1367,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+ amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
+
+ bo_va->ref_count = 1;
++ bo_va->last_pt_update = dma_fence_get_stub();
+ INIT_LIST_HEAD(&bo_va->valids);
+ INIT_LIST_HEAD(&bo_va->invalids);
+
+@@ -2088,7 +2089,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ vm->update_funcs = &amdgpu_vm_cpu_funcs;
+ else
+ vm->update_funcs = &amdgpu_vm_sdma_funcs;
+- vm->last_update = NULL;
++
++ vm->last_update = dma_fence_get_stub();
+ vm->last_unlocked = dma_fence_get_stub();
+ vm->last_tlb_flush = dma_fence_get_stub();
+
+@@ -2213,7 +2215,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ goto unreserve_bo;
+
+ dma_fence_put(vm->last_update);
+- vm->last_update = NULL;
++ vm->last_update = dma_fence_get_stub();
+ vm->is_compute_context = true;
+
+ /* Free the shadow bo for compute VM */
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index bdce367544368..4dd9a85f5c724 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -1653,11 +1653,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
+ if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0)
+ init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true;
+
+- /* Disable SubVP + DRR config by default */
+- init_data.flags.disable_subvp_drr = true;
+- if (amdgpu_dc_feature_mask & DC_ENABLE_SUBVP_DRR)
+- init_data.flags.disable_subvp_drr = false;
+-
+ init_data.flags.seamless_boot_edp_requested = false;
+
+ if (check_seamless_boot_capability(adev)) {
+diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+index 8d9444db092ab..eea103908b09f 100644
+--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
++++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+@@ -233,6 +233,32 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
+ DC_FP_END();
+ }
+
++static void dcn32_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
++ struct dc_state *context,
++ int ref_dtbclk_khz)
++{
++ struct dccg *dccg = clk_mgr->dccg;
++ uint32_t tg_mask = 0;
++ int i;
++
++ for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) {
++ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
++ struct dtbclk_dto_params dto_params = {0};
++
++ /* use mask to program DTO once per tg */
++ if (pipe_ctx->stream_res.tg &&
++ !(tg_mask & (1 << pipe_ctx->stream_res.tg->inst))) {
++ tg_mask |= (1 << pipe_ctx->stream_res.tg->inst);
++
++ dto_params.otg_inst = pipe_ctx->stream_res.tg->inst;
++ dto_params.ref_dtbclk_khz = ref_dtbclk_khz;
++
++ dccg->funcs->set_dtbclk_dto(clk_mgr->dccg, &dto_params);
++ //dccg->funcs->set_audio_dtbclk_dto(clk_mgr->dccg, &dto_params);
++ }
++ }
++}
++
+ /* Since DPPCLK request to PMFW needs to be exact (due to DPP DTO programming),
+ * update DPPCLK to be the exact frequency that will be set after the DPPCLK
+ * divider is updated. This will prevent rounding issues that could cause DPP
+@@ -570,6 +596,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
+ /* DCCG requires KHz precision for DTBCLK */
+ clk_mgr_base->clks.ref_dtbclk_khz =
+ dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz));
++ dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
+ }
+
+ if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+index 1d8c5805ef20c..77ef474ced071 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+@@ -712,7 +712,7 @@ static const struct dc_debug_options debug_defaults_drv = {
+ .timing_trace = false,
+ .clock_trace = true,
+ .disable_pplib_clock_request = true,
+- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
++ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .force_single_disp_pipe_split = false,
+ .disable_dcc = DCC_ENABLE,
+ .vsr_support = true,
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+index 4c2fdfea162f5..65c1d754e2d6b 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+@@ -47,6 +47,14 @@ void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+ {
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
++ if (dccg->dpp_clock_gated[dpp_inst]) {
++ /*
++ * Do not update the DPPCLK DTO if the clock is stopped.
++ * It is treated the same as if the pipe itself were in PG.
++ */
++ return;
++ }
++
+ if (dccg->ref_dppclk && req_dppclk) {
+ int ref_dppclk = dccg->ref_dppclk;
+ int modulo, phase;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+index de7bfba2c1798..afeb9f4d53441 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+@@ -322,6 +322,9 @@ static void dccg314_dpp_root_clock_control(
+ {
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
++ if (dccg->dpp_clock_gated[dpp_inst] != clock_on)
++ return;
++
+ if (clock_on) {
+ /* turn off the DTO and leave phase/modulo at max */
+ REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 0);
+@@ -335,6 +338,8 @@ static void dccg314_dpp_root_clock_control(
+ DPPCLK0_DTO_PHASE, 0,
+ DPPCLK0_DTO_MODULO, 1);
+ }
++
++ dccg->dpp_clock_gated[dpp_inst] = !clock_on;
+ }
+
+ static const struct dccg_funcs dccg314_funcs = {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index abeeede38fb39..653b5f15d4ca7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -921,6 +921,22 @@ static const struct dc_debug_options debug_defaults_drv = {
+ .afmt = true,
+ }
+ },
++
++ .root_clock_optimization = {
++ .bits = {
++ .dpp = true,
++ .dsc = false,
++ .hdmistream = false,
++ .hdmichar = false,
++ .dpstream = false,
++ .symclk32_se = false,
++ .symclk32_le = false,
++ .symclk_fe = false,
++ .physymclk = false,
++ .dpiasymclk = false,
++ }
++ },
++
+ .seamless_boot_odm_combine = true
+ };
+
+@@ -1920,6 +1936,10 @@ static bool dcn314_resource_construct(
+ dc->debug = debug_defaults_drv;
+ else
+ dc->debug = debug_defaults_diags;
++
++ /* Disable root clock optimization */
++ dc->debug.root_clock_optimization.u32All = 0;
++
+ // Init the vm_helper
+ if (dc->vm_helper)
+ vm_helper_init(dc->vm_helper, 16);
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+index 7661f8946aa31..9ec767ebf5d16 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+@@ -1097,10 +1097,6 @@ void dcn20_calculate_dlg_params(struct dc *dc,
+ context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz =
+ pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000;
+ context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest;
+- if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
+- dcn20_adjust_freesync_v_startup(
+- &context->res_ctx.pipe_ctx[i].stream->timing,
+- &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
+
+ pipe_idx++;
+ }
+@@ -1914,6 +1910,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
+ int vlevel = 0;
+ int pipe_split_from[MAX_PIPES];
+ int pipe_cnt = 0;
++ int i = 0;
+ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+@@ -1937,6 +1934,15 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
+ dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ if (!context->res_ctx.pipe_ctx[i].stream)
++ continue;
++ if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
++ dcn20_adjust_freesync_v_startup(
++ &context->res_ctx.pipe_ctx[i].stream->timing,
++ &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
++ }
++
+ BW_VAL_TRACE_END_WATERMARKS();
+
+ goto validate_out;
+@@ -2209,6 +2215,7 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
+ int vlevel = 0;
+ int pipe_split_from[MAX_PIPES];
+ int pipe_cnt = 0;
++ int i = 0;
+ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+@@ -2237,6 +2244,15 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc,
+ dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate);
+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+
++ for (i = 0; i < dc->res_pool->pipe_count; i++) {
++ if (!context->res_ctx.pipe_ctx[i].stream)
++ continue;
++ if (context->res_ctx.pipe_ctx[i].stream->adaptive_sync_infopacket.valid)
++ dcn20_adjust_freesync_v_startup(
++ &context->res_ctx.pipe_ctx[i].stream->timing,
++ &context->res_ctx.pipe_ctx[i].pipe_dlg_param.vstartup_start);
++ }
++
+ BW_VAL_TRACE_END_WATERMARKS();
+
+ goto validate_out;
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index d8b4119820bfc..1bfda6e2b3070 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -880,10 +880,6 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context, struc
+ int16_t stretched_drr_us = 0;
+ int16_t drr_stretched_vblank_us = 0;
+ int16_t max_vblank_mallregion = 0;
+- const struct dc_config *config = &dc->config;
+-
+- if (config->disable_subvp_drr)
+- return false;
+
+ // Find SubVP pipe
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+index d75248b6cae99..9a5150e96017a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+@@ -811,7 +811,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
+ v->SwathHeightC[k],
+ TWait,
+ (v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
+- v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ?
++ v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
+ mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
+ /* Output */
+ &v->DSTXAfterScaler[k],
+@@ -3311,7 +3311,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ v->swath_width_chroma_ub_this_state[k],
+ v->SwathHeightYThisState[k],
+ v->SwathHeightCThisState[k], v->TWait,
+- (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ?
++ (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
+ mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
+
+ /* Output */
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+index d98e36a9a09cc..c4745d63039bb 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+@@ -53,7 +53,7 @@
+ #define BPP_BLENDED_PIPE 0xffffffff
+
+ #define MEM_STROBE_FREQ_MHZ 1600
+-#define MIN_DCFCLK_FREQ_MHZ 200
++#define DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ 300
+ #define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0
+
+ struct display_mode_lib;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+index ad6acd1b34e1d..9651cccb084a3 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+@@ -68,6 +68,7 @@ struct dccg {
+ const struct dccg_funcs *funcs;
+ int pipe_dppclk_khz[MAX_PIPES];
+ int ref_dppclk;
++ bool dpp_clock_gated[MAX_PIPES];
+ //int dtbclk_khz[MAX_PIPES];/* TODO needs to be removed */
+ //int audio_dtbclk_khz;/* TODO needs to be removed */
+ //int ref_dtbclk_khz;/* TODO needs to be removed */
+diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
+index e4a22c68517d1..f175e65b853a0 100644
+--- a/drivers/gpu/drm/amd/include/amd_shared.h
++++ b/drivers/gpu/drm/amd/include/amd_shared.h
+@@ -240,7 +240,6 @@ enum DC_FEATURE_MASK {
+ DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default
+ DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default
+ DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default
+- DC_ENABLE_SUBVP_DRR = (1 << 9), // 0x200, disabled by default
+ };
+
+ enum DC_DEBUG_MASK {
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index ea03e8d9a3f6c..818379276a582 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1573,9 +1573,9 @@ static int smu_disable_dpms(struct smu_context *smu)
+
+ /*
+ * For SMU 13.0.4/11, PMFW will handle the features disablement properly
+- * for gpu reset case. Driver involvement is unnecessary.
++ * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
+ */
+- if (amdgpu_in_reset(adev)) {
++ if (amdgpu_in_reset(adev) || adev->in_s0ix) {
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 4):
+ case IP_VERSION(13, 0, 11):
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index 0cda3b276f611..f0800c0c5168c 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -588,7 +588,9 @@ err0_out:
+ return -ENOMEM;
+ }
+
+-static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu)
++static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu,
++ bool use_metrics_v3,
++ bool use_metrics_v2)
+ {
+ struct smu_table_context *smu_table= &smu->smu_table;
+ SmuMetricsExternal_t *metrics_ext =
+@@ -596,13 +598,11 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
+ uint32_t throttler_status = 0;
+ int i;
+
+- if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
+- (smu->smc_fw_version >= 0x3A4900)) {
++ if (use_metrics_v3) {
+ for (i = 0; i < THROTTLER_COUNT; i++)
+ throttler_status |=
+ (metrics_ext->SmuMetrics_V3.ThrottlingPercentage[i] ? 1U << i : 0);
+- } else if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
+- (smu->smc_fw_version >= 0x3A4300)) {
++ } else if (use_metrics_v2) {
+ for (i = 0; i < THROTTLER_COUNT; i++)
+ throttler_status |=
+ (metrics_ext->SmuMetrics_V2.ThrottlingPercentage[i] ? 1U << i : 0);
+@@ -864,7 +864,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
+ metrics->TemperatureVrSoc) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_THROTTLER_STATUS:
+- *value = sienna_cichlid_get_throttler_status_locked(smu);
++ *value = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2);
+ break;
+ case METRICS_CURR_FANSPEED:
+ *value = use_metrics_v3 ? metrics_v3->CurrFanSpeed :
+@@ -4017,7 +4017,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
+ gpu_metrics->current_dclk1 = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCLK_1] :
+ use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_1] : metrics->CurrClock[PPCLK_DCLK_1];
+
+- gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu);
++ gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2);
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
+ sienna_cichlid_throttler_map);
+diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
+index 0454da505687b..e1a04461ba884 100644
+--- a/drivers/gpu/drm/drm_edid.c
++++ b/drivers/gpu/drm/drm_edid.c
+@@ -3424,6 +3424,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
+ connector->base.id, connector->name);
+ return NULL;
+ }
++ if (!(pt->misc & DRM_EDID_PT_SEPARATE_SYNC)) {
++ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Composite sync not supported\n",
++ connector->base.id, connector->name);
++ }
+
+ /* it is incorrect if hsync/vsync width is zero */
+ if (!hsync_pulse_width || !vsync_pulse_width) {
+@@ -3470,27 +3474,10 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
+ if (info->quirks & EDID_QUIRK_DETAILED_SYNC_PP) {
+ mode->flags |= DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC;
+ } else {
+- switch (pt->misc & DRM_EDID_PT_SYNC_MASK) {
+- case DRM_EDID_PT_ANALOG_CSYNC:
+- case DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC:
+- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Analog composite sync!\n",
+- connector->base.id, connector->name);
+- mode->flags |= DRM_MODE_FLAG_CSYNC | DRM_MODE_FLAG_NCSYNC;
+- break;
+- case DRM_EDID_PT_DIGITAL_CSYNC:
+- drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Digital composite sync!\n",
+- connector->base.id, connector->name);
+- mode->flags |= DRM_MODE_FLAG_CSYNC;
+- mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+- DRM_MODE_FLAG_PCSYNC : DRM_MODE_FLAG_NCSYNC;
+- break;
+- case DRM_EDID_PT_DIGITAL_SEPARATE_SYNC:
+- mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
+- DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
+- mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
+- DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+- break;
+- }
++ mode->flags |= (pt->misc & DRM_EDID_PT_HSYNC_POSITIVE) ?
++ DRM_MODE_FLAG_PHSYNC : DRM_MODE_FLAG_NHSYNC;
++ mode->flags |= (pt->misc & DRM_EDID_PT_VSYNC_POSITIVE) ?
++ DRM_MODE_FLAG_PVSYNC : DRM_MODE_FLAG_NVSYNC;
+ }
+
+ set_size:
+diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
+index e12ba458636c1..5ee0479ae6de3 100644
+--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
+@@ -2752,7 +2752,7 @@ static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
+ __drm_atomic_helper_connector_reset(&sdvo_connector->base.base,
+ &conn_state->base.base);
+
+- INIT_LIST_HEAD(&sdvo_connector->base.panel.fixed_modes);
++ intel_panel_init_alloc(&sdvo_connector->base);
+
+ return sdvo_connector;
+ }
+diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+index cc18e8f664864..78822331f1b7f 100644
+--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
++++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+@@ -470,12 +470,19 @@ int intel_guc_slpc_set_ignore_eff_freq(struct intel_guc_slpc *slpc, bool val)
+ ret = slpc_set_param(slpc,
+ SLPC_PARAM_IGNORE_EFFICIENT_FREQUENCY,
+ val);
+- if (ret)
++ if (ret) {
+ guc_probe_error(slpc_to_guc(slpc), "Failed to set efficient freq(%d): %pe\n",
+ val, ERR_PTR(ret));
+- else
++ } else {
+ slpc->ignore_eff_freq = val;
+
++ /* Set min to RPn when we disable efficient freq */
++ if (val)
++ ret = slpc_set_param(slpc,
++ SLPC_PARAM_GLOBAL_MIN_GT_UNSLICE_FREQ_MHZ,
++ slpc->min_freq);
++ }
++
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+ mutex_unlock(&slpc->lock);
+ return ret;
+@@ -602,9 +609,8 @@ static int slpc_set_softlimits(struct intel_guc_slpc *slpc)
+ return ret;
+
+ if (!slpc->min_freq_softlimit) {
+- ret = intel_guc_slpc_get_min_freq(slpc, &slpc->min_freq_softlimit);
+- if (unlikely(ret))
+- return ret;
++ /* Min softlimit is initialized to RPn */
++ slpc->min_freq_softlimit = slpc->min_freq;
+ slpc_to_gt(slpc)->defaults.min_freq = slpc->min_freq_softlimit;
+ } else {
+ return intel_guc_slpc_set_min_freq(slpc,
+@@ -755,6 +761,9 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
+ return ret;
+ }
+
++ /* Set cached value of ignore efficient freq */
++ intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
++
+ /* Revert SLPC min/max to softlimits if necessary */
+ ret = slpc_set_softlimits(slpc);
+ if (unlikely(ret)) {
+@@ -765,9 +774,6 @@ int intel_guc_slpc_enable(struct intel_guc_slpc *slpc)
+ /* Set cached media freq ratio mode */
+ intel_guc_slpc_set_media_ratio_mode(slpc, slpc->media_ratio_mode);
+
+- /* Set cached value of ignore efficient freq */
+- intel_guc_slpc_set_ignore_eff_freq(slpc, slpc->ignore_eff_freq);
+-
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index a2e0033e8a260..622f6eb9a8bfd 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -1408,8 +1408,7 @@ nouveau_connector_create(struct drm_device *dev,
+ ret = nvif_conn_ctor(&disp->disp, nv_connector->base.name, nv_connector->index,
+ &nv_connector->conn);
+ if (ret) {
+- kfree(nv_connector);
+- return ERR_PTR(ret);
++ goto drm_conn_err;
+ }
+
+ ret = nvif_conn_event_ctor(&nv_connector->conn, "kmsHotplug",
+@@ -1426,8 +1425,7 @@ nouveau_connector_create(struct drm_device *dev,
+ if (ret) {
+ nvif_event_dtor(&nv_connector->hpd);
+ nvif_conn_dtor(&nv_connector->conn);
+- kfree(nv_connector);
+- return ERR_PTR(ret);
++ goto drm_conn_err;
+ }
+ }
+ }
+@@ -1475,4 +1473,9 @@ nouveau_connector_create(struct drm_device *dev,
+
+ drm_connector_register(connector);
+ return connector;
++
++drm_conn_err:
++ drm_connector_cleanup(connector);
++ kfree(nv_connector);
++ return ERR_PTR(ret);
+ }
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index e02249b212c2a..cf6b146acc323 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -969,21 +969,21 @@ static const struct panel_desc auo_g104sn02 = {
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+
+-static const struct drm_display_mode auo_g121ean01_mode = {
+- .clock = 66700,
+- .hdisplay = 1280,
+- .hsync_start = 1280 + 58,
+- .hsync_end = 1280 + 58 + 8,
+- .htotal = 1280 + 58 + 8 + 70,
+- .vdisplay = 800,
+- .vsync_start = 800 + 6,
+- .vsync_end = 800 + 6 + 4,
+- .vtotal = 800 + 6 + 4 + 10,
++static const struct display_timing auo_g121ean01_timing = {
++ .pixelclock = { 60000000, 74400000, 90000000 },
++ .hactive = { 1280, 1280, 1280 },
++ .hfront_porch = { 20, 50, 100 },
++ .hback_porch = { 20, 50, 100 },
++ .hsync_len = { 30, 100, 200 },
++ .vactive = { 800, 800, 800 },
++ .vfront_porch = { 2, 10, 25 },
++ .vback_porch = { 2, 10, 25 },
++ .vsync_len = { 4, 18, 50 },
+ };
+
+ static const struct panel_desc auo_g121ean01 = {
+- .modes = &auo_g121ean01_mode,
+- .num_modes = 1,
++ .timings = &auo_g121ean01_timing,
++ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 261,
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
+index ea993d7162e8c..307a890fde133 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.h
++++ b/drivers/gpu/drm/qxl/qxl_drv.h
+@@ -310,7 +310,7 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+ u32 domain,
+ size_t size,
+ struct qxl_surface *surf,
+- struct qxl_bo **qobj,
++ struct drm_gem_object **gobj,
+ uint32_t *handle);
+ void qxl_gem_object_free(struct drm_gem_object *gobj);
+ int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
+diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
+index d636ba6854513..17df5c7ccf691 100644
+--- a/drivers/gpu/drm/qxl/qxl_dumb.c
++++ b/drivers/gpu/drm/qxl/qxl_dumb.c
+@@ -34,6 +34,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
+ {
+ struct qxl_device *qdev = to_qxl(dev);
+ struct qxl_bo *qobj;
++ struct drm_gem_object *gobj;
+ uint32_t handle;
+ int r;
+ struct qxl_surface surf;
+@@ -62,11 +63,13 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
+
+ r = qxl_gem_object_create_with_handle(qdev, file_priv,
+ QXL_GEM_DOMAIN_CPU,
+- args->size, &surf, &qobj,
++ args->size, &surf, &gobj,
+ &handle);
+ if (r)
+ return r;
++ qobj = gem_to_qxl_bo(gobj);
+ qobj->is_dumb = true;
++ drm_gem_object_put(gobj);
+ args->pitch = pitch;
+ args->handle = handle;
+ return 0;
+diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
+index a08da0bd9098b..fc5e3763c3595 100644
+--- a/drivers/gpu/drm/qxl/qxl_gem.c
++++ b/drivers/gpu/drm/qxl/qxl_gem.c
+@@ -72,32 +72,41 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
+ return 0;
+ }
+
++/*
++ * If the caller passed a valid gobj pointer, it is responsible to call
++ * drm_gem_object_put() when it no longer needs to acess the object.
++ *
++ * If gobj is NULL, it is handled internally.
++ */
+ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+ struct drm_file *file_priv,
+ u32 domain,
+ size_t size,
+ struct qxl_surface *surf,
+- struct qxl_bo **qobj,
++ struct drm_gem_object **gobj,
+ uint32_t *handle)
+ {
+- struct drm_gem_object *gobj;
+ int r;
++ struct drm_gem_object *local_gobj;
+
+- BUG_ON(!qobj);
+ BUG_ON(!handle);
+
+ r = qxl_gem_object_create(qdev, size, 0,
+ domain,
+ false, false, surf,
+- &gobj);
++ &local_gobj);
+ if (r)
+ return -ENOMEM;
+- r = drm_gem_handle_create(file_priv, gobj, handle);
++ r = drm_gem_handle_create(file_priv, local_gobj, handle);
+ if (r)
+ return r;
+- /* drop reference from allocate - handle holds it now */
+- *qobj = gem_to_qxl_bo(gobj);
+- drm_gem_object_put(gobj);
++
++ if (gobj)
++ *gobj = local_gobj;
++ else
++ /* drop reference from allocate - handle holds it now */
++ drm_gem_object_put(local_gobj);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
+index 30f58b21372aa..dd0f834d881ce 100644
+--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
+@@ -38,7 +38,6 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
+ struct qxl_device *qdev = to_qxl(dev);
+ struct drm_qxl_alloc *qxl_alloc = data;
+ int ret;
+- struct qxl_bo *qobj;
+ uint32_t handle;
+ u32 domain = QXL_GEM_DOMAIN_VRAM;
+
+@@ -50,7 +49,7 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
+ domain,
+ qxl_alloc->size,
+ NULL,
+- &qobj, &handle);
++ NULL, &handle);
+ if (ret) {
+ DRM_ERROR("%s: failed to create gem ret=%d\n",
+ __func__, ret);
+@@ -386,7 +385,6 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
+ {
+ struct qxl_device *qdev = to_qxl(dev);
+ struct drm_qxl_alloc_surf *param = data;
+- struct qxl_bo *qobj;
+ int handle;
+ int ret;
+ int size, actual_stride;
+@@ -406,7 +404,7 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
+ QXL_GEM_DOMAIN_SURFACE,
+ size,
+ &surf,
+- &qobj, &handle);
++ NULL, &handle);
+ if (ret) {
+ DRM_ERROR("%s: failed to create gem ret=%d\n",
+ __func__, ret);
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+index d6d29be6b4f48..7e175dbfd8924 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+@@ -223,20 +223,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
+ * DU channels that have a display PLL can't use the internal
+ * system clock, and have no internal clock divider.
+ */
+-
+- /*
+- * The H3 ES1.x exhibits dot clock duty cycle stability issues.
+- * We can work around them by configuring the DPLL to twice the
+- * desired frequency, coupled with a /2 post-divider. Restrict
+- * the workaround to H3 ES1.x as ES2.0 and all other SoCs have
+- * no post-divider when a display PLL is present (as shown by
+- * the workaround breaking HDMI output on M3-W during testing).
+- */
+- if (rcdu->info->quirks & RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY) {
+- target *= 2;
+- div = 1;
+- }
+-
+ extclk = clk_get_rate(rcrtc->extclock);
+ rcar_du_dpll_divider(rcrtc, &dpll, extclk, target);
+
+@@ -245,30 +231,13 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
+ | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
+ | DPLLCR_STBY;
+
+- if (rcrtc->index == 1) {
++ if (rcrtc->index == 1)
+ dpllcr |= DPLLCR_PLCS1
+ | DPLLCR_INCS_DOTCLKIN1;
+- } else {
+- dpllcr |= DPLLCR_PLCS0_PLL
++ else
++ dpllcr |= DPLLCR_PLCS0
+ | DPLLCR_INCS_DOTCLKIN0;
+
+- /*
+- * On ES2.x we have a single mux controlled via bit 21,
+- * which selects between DCLKIN source (bit 21 = 0) and
+- * a PLL source (bit 21 = 1), where the PLL is always
+- * PLL1.
+- *
+- * On ES1.x we have an additional mux, controlled
+- * via bit 20, for choosing between PLL0 (bit 20 = 0)
+- * and PLL1 (bit 20 = 1). We always want to use PLL1,
+- * so on ES1.x, in addition to setting bit 21, we need
+- * to set the bit 20.
+- */
+-
+- if (rcdu->info->quirks & RCAR_DU_QUIRK_H3_ES1_PLL)
+- dpllcr |= DPLLCR_PLCS0_H3ES1X_PLL1;
+- }
+-
+ rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);
+
+ escr = ESCR_DCLKSEL_DCLKIN | div;
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+index b9a94c5260e9d..1ffde19cb87fe 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+@@ -16,7 +16,6 @@
+ #include <linux/platform_device.h>
+ #include <linux/pm.h>
+ #include <linux/slab.h>
+-#include <linux/sys_soc.h>
+ #include <linux/wait.h>
+
+ #include <drm/drm_atomic_helper.h>
+@@ -387,43 +386,6 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
+ .dpll_mask = BIT(2) | BIT(1),
+ };
+
+-static const struct rcar_du_device_info rcar_du_r8a7795_es1_info = {
+- .gen = 3,
+- .features = RCAR_DU_FEATURE_CRTC_IRQ
+- | RCAR_DU_FEATURE_CRTC_CLOCK
+- | RCAR_DU_FEATURE_VSP1_SOURCE
+- | RCAR_DU_FEATURE_INTERLACED
+- | RCAR_DU_FEATURE_TVM_SYNC,
+- .quirks = RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY
+- | RCAR_DU_QUIRK_H3_ES1_PLL,
+- .channels_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
+- .routes = {
+- /*
+- * R8A7795 has one RGB output, two HDMI outputs and one
+- * LVDS output.
+- */
+- [RCAR_DU_OUTPUT_DPAD0] = {
+- .possible_crtcs = BIT(3),
+- .port = 0,
+- },
+- [RCAR_DU_OUTPUT_HDMI0] = {
+- .possible_crtcs = BIT(1),
+- .port = 1,
+- },
+- [RCAR_DU_OUTPUT_HDMI1] = {
+- .possible_crtcs = BIT(2),
+- .port = 2,
+- },
+- [RCAR_DU_OUTPUT_LVDS0] = {
+- .possible_crtcs = BIT(0),
+- .port = 3,
+- },
+- },
+- .num_lvds = 1,
+- .num_rpf = 5,
+- .dpll_mask = BIT(2) | BIT(1),
+-};
+-
+ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+@@ -614,11 +576,6 @@ static const struct of_device_id rcar_du_of_table[] = {
+
+ MODULE_DEVICE_TABLE(of, rcar_du_of_table);
+
+-static const struct soc_device_attribute rcar_du_soc_table[] = {
+- { .soc_id = "r8a7795", .revision = "ES1.*", .data = &rcar_du_r8a7795_es1_info },
+- { /* sentinel */ }
+-};
+-
+ const char *rcar_du_output_name(enum rcar_du_output output)
+ {
+ static const char * const names[] = {
+@@ -707,7 +664,6 @@ static void rcar_du_shutdown(struct platform_device *pdev)
+
+ static int rcar_du_probe(struct platform_device *pdev)
+ {
+- const struct soc_device_attribute *soc_attr;
+ struct rcar_du_device *rcdu;
+ unsigned int mask;
+ int ret;
+@@ -725,10 +681,6 @@ static int rcar_du_probe(struct platform_device *pdev)
+
+ rcdu->info = of_device_get_match_data(rcdu->dev);
+
+- soc_attr = soc_device_match(rcar_du_soc_table);
+- if (soc_attr)
+- rcdu->info = soc_attr->data;
+-
+ platform_set_drvdata(pdev, rcdu);
+
+ /* I/O resources */
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+index acc3673fefe18..5cfa2bb7ad93d 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+@@ -34,8 +34,6 @@ struct rcar_du_device;
+ #define RCAR_DU_FEATURE_NO_BLENDING BIT(5) /* PnMR.SPIM does not have ALP nor EOR bits */
+
+ #define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */
+-#define RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY BIT(1) /* H3 ES1 has pclk stability issue */
+-#define RCAR_DU_QUIRK_H3_ES1_PLL BIT(2) /* H3 ES1 PLL setup differs from non-ES1 */
+
+ enum rcar_du_output {
+ RCAR_DU_OUTPUT_DPAD0,
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+index 6c750fab6ebb7..391de6661d8bc 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
++++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+@@ -283,8 +283,7 @@
+ #define DPLLCR 0x20044
+ #define DPLLCR_CODE (0x95 << 24)
+ #define DPLLCR_PLCS1 (1 << 23)
+-#define DPLLCR_PLCS0_PLL (1 << 21)
+-#define DPLLCR_PLCS0_H3ES1X_PLL1 (1 << 20)
++#define DPLLCR_PLCS0 (1 << 21)
+ #define DPLLCR_CLKE (1 << 18)
+ #define DPLLCR_FDPLL(n) ((n) << 12)
+ #define DPLLCR_N(n) ((n) << 5)
+diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
+index e0a8890a62e23..3e2a31d8190eb 100644
+--- a/drivers/gpu/drm/scheduler/sched_entity.c
++++ b/drivers/gpu/drm/scheduler/sched_entity.c
+@@ -448,6 +448,12 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
+ drm_sched_rq_update_fifo(entity, next->submit_ts);
+ }
+
++ /* Jobs and entities might have different lifecycles. Since we're
++ * removing the job from the entities queue, set the jobs entity pointer
++ * to NULL to prevent any future access of the entity through this job.
++ */
++ sched_job->entity = NULL;
++
+ return sched_job;
+ }
+
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index aea5a90ff98b9..cdd67676c3d1b 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -42,6 +42,10 @@
+ * the hardware.
+ *
+ * The jobs in a entity are always scheduled in the order that they were pushed.
++ *
++ * Note that once a job was taken from the entities queue and pushed to the
++ * hardware, i.e. the pending queue, the entity must not be referenced anymore
++ * through the jobs entity pointer.
+ */
+
+ #include <linux/kthread.h>
+diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
+index 03c6becda795c..b8be4c1db4235 100644
+--- a/drivers/gpu/drm/stm/ltdc.c
++++ b/drivers/gpu/drm/stm/ltdc.c
+@@ -1145,7 +1145,7 @@ static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
+
+ static int ltdc_crtc_set_crc_source(struct drm_crtc *crtc, const char *source)
+ {
+- struct ltdc_device *ldev = crtc_to_ltdc(crtc);
++ struct ltdc_device *ldev;
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+@@ -1153,6 +1153,8 @@ static int ltdc_crtc_set_crc_source(struct drm_crtc *crtc, const char *source)
+ if (!crtc)
+ return -ENODEV;
+
++ ldev = crtc_to_ltdc(crtc);
++
+ if (source && strcmp(source, "auto") == 0) {
+ ldev->crc_active = true;
+ ret = regmap_set_bits(ldev->regmap, LTDC_GCR, GCR_CRCEN);
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index f7e06d433a915..dfe8e09a18de0 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -4608,6 +4608,8 @@ static const struct hid_device_id hidpp_devices[] = {
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC086) },
+ { /* Logitech G903 Hero Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC091) },
++ { /* Logitech G915 TKL Keyboard over USB */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC343) },
+ { /* Logitech G920 Wheel over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
+ .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
+@@ -4630,6 +4632,8 @@ static const struct hid_device_id hidpp_devices[] = {
+ { /* MX5500 keyboard over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
+ .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
++ { /* Logitech G915 TKL keyboard over Bluetooth */
++ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb35f) },
+ { /* M-RCQ142 V470 Cordless Laser Mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) },
+ { /* MX Master mouse over Bluetooth */
+diff --git a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
+index 0060e3dcd775d..db4639db98407 100644
+--- a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
++++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
+@@ -28,6 +28,7 @@ struct i2c_hid_of_goodix {
+ struct regulator *vdd;
+ struct regulator *vddio;
+ struct gpio_desc *reset_gpio;
++ bool no_reset_during_suspend;
+ const struct goodix_i2c_hid_timing_data *timings;
+ };
+
+@@ -37,6 +38,14 @@ static int goodix_i2c_hid_power_up(struct i2chid_ops *ops)
+ container_of(ops, struct i2c_hid_of_goodix, ops);
+ int ret;
+
++ /*
++ * We assert reset GPIO here (instead of during power-down) to ensure
++ * the device will have a clean state after powering up, just like the
++ * normal scenarios will have.
++ */
++ if (ihid_goodix->no_reset_during_suspend)
++ gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
++
+ ret = regulator_enable(ihid_goodix->vdd);
+ if (ret)
+ return ret;
+@@ -60,7 +69,9 @@ static void goodix_i2c_hid_power_down(struct i2chid_ops *ops)
+ struct i2c_hid_of_goodix *ihid_goodix =
+ container_of(ops, struct i2c_hid_of_goodix, ops);
+
+- gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
++ if (!ihid_goodix->no_reset_during_suspend)
++ gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
++
+ regulator_disable(ihid_goodix->vddio);
+ regulator_disable(ihid_goodix->vdd);
+ }
+@@ -91,6 +102,9 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client)
+ if (IS_ERR(ihid_goodix->vddio))
+ return PTR_ERR(ihid_goodix->vddio);
+
++ ihid_goodix->no_reset_during_suspend =
++ of_property_read_bool(client->dev.of_node, "goodix,no-reset-during-suspend");
++
+ ihid_goodix->timings = device_get_match_data(&client->dev);
+
+ return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0);
+diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+index fc108f19a64c3..e99f3a3c65e15 100644
+--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
++++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+@@ -33,6 +33,7 @@
+ #define ADL_N_DEVICE_ID 0x54FC
+ #define RPL_S_DEVICE_ID 0x7A78
+ #define MTL_P_DEVICE_ID 0x7E45
++#define ARL_H_DEVICE_ID 0x7745
+
+ #define REVISION_ID_CHT_A0 0x6
+ #define REVISION_ID_CHT_Ax_SI 0x0
+diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+index 7120b30ac51d0..55cb25038e632 100644
+--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+@@ -44,6 +44,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_N_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, RPL_S_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MTL_P_DEVICE_ID)},
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ARL_H_DEVICE_ID)},
+ {0, }
+ };
+ MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
+diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
+index 85d8a6b048856..30a2a3200bed9 100644
+--- a/drivers/i2c/busses/i2c-bcm-iproc.c
++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
+@@ -233,13 +233,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 offset)
+ {
+ u32 val;
++ unsigned long flags;
+
+ if (iproc_i2c->idm_base) {
+- spin_lock(&iproc_i2c->idm_lock);
++ spin_lock_irqsave(&iproc_i2c->idm_lock, flags);
+ writel(iproc_i2c->ape_addr_mask,
+ iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET);
+ val = readl(iproc_i2c->base + offset);
+- spin_unlock(&iproc_i2c->idm_lock);
++ spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags);
+ } else {
+ val = readl(iproc_i2c->base + offset);
+ }
+@@ -250,12 +251,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
+ static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 offset, u32 val)
+ {
++ unsigned long flags;
++
+ if (iproc_i2c->idm_base) {
+- spin_lock(&iproc_i2c->idm_lock);
++ spin_lock_irqsave(&iproc_i2c->idm_lock, flags);
+ writel(iproc_i2c->ape_addr_mask,
+ iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET);
+ writel(val, iproc_i2c->base + offset);
+- spin_unlock(&iproc_i2c->idm_lock);
++ spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags);
+ } else {
+ writel(val, iproc_i2c->base + offset);
+ }
+diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
+index 55ea91a633829..c51fc1f4b97eb 100644
+--- a/drivers/i2c/busses/i2c-designware-master.c
++++ b/drivers/i2c/busses/i2c-designware-master.c
+@@ -526,9 +526,21 @@ i2c_dw_read(struct dw_i2c_dev *dev)
+ u32 flags = msgs[dev->msg_read_idx].flags;
+
+ regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
++ tmp &= DW_IC_DATA_CMD_DAT;
+ /* Ensure length byte is a valid value */
+- if (flags & I2C_M_RECV_LEN &&
+- (tmp & DW_IC_DATA_CMD_DAT) <= I2C_SMBUS_BLOCK_MAX && tmp > 0) {
++ if (flags & I2C_M_RECV_LEN) {
++ /*
++ * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be
++ * detected from the registers, the controller can be
++ * disabled if the STOP bit is set. But it is only set
++ * after receiving block data response length in
++ * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read
++ * another byte with STOP bit set when the block data
++ * response length is invalid to complete the transaction.
++ */
++ if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX)
++ tmp = 1;
++
+ len = i2c_dw_recv_len(dev, tmp);
+ }
+ *buf++ = tmp;
+diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c
+index e067671b3ce2e..0980c773cb5b1 100644
+--- a/drivers/i2c/busses/i2c-hisi.c
++++ b/drivers/i2c/busses/i2c-hisi.c
+@@ -330,6 +330,14 @@ static irqreturn_t hisi_i2c_irq(int irq, void *context)
+ struct hisi_i2c_controller *ctlr = context;
+ u32 int_stat;
+
++ /*
++ * Don't handle the interrupt if cltr->completion is NULL. We may
++ * reach here because the interrupt is spurious or the transfer is
++ * started by another port (e.g. firmware) rather than us.
++ */
++ if (!ctlr->completion)
++ return IRQ_NONE;
++
+ int_stat = readl(ctlr->iobase + HISI_I2C_INT_MSTAT);
+ hisi_i2c_clear_int(ctlr, int_stat);
+ if (!(int_stat & HISI_I2C_INT_ALL))
+diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
+index 157066f06a32d..d561cf066d705 100644
+--- a/drivers/i2c/busses/i2c-tegra.c
++++ b/drivers/i2c/busses/i2c-tegra.c
+@@ -449,7 +449,7 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
+ if (i2c_dev->is_vi)
+ return 0;
+
+- if (!i2c_dev->hw->has_apb_dma) {
++ if (i2c_dev->hw->has_apb_dma) {
+ if (!IS_ENABLED(CONFIG_TEGRA20_APB_DMA)) {
+ dev_dbg(i2c_dev->dev, "APB DMA support not enabled\n");
+ return 0;
+diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+index 2c95e6f3d47ac..eef3ef3fabb42 100644
+--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
++++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+@@ -179,6 +179,8 @@ struct bnxt_re_dev {
+ #define BNXT_RE_ROCEV2_IPV4_PACKET 2
+ #define BNXT_RE_ROCEV2_IPV6_PACKET 3
+
++#define BNXT_RE_CHECK_RC(x) ((x) && ((x) != -ETIMEDOUT))
++
+ static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
+ {
+ if (rdev)
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index ebe6852c40e8c..e7f153ee27541 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -614,12 +614,20 @@ int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
+ {
+ struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
+ struct bnxt_re_dev *rdev = ah->rdev;
++ bool block = true;
++ int rc = 0;
+
+- bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
+- !(flags & RDMA_DESTROY_AH_SLEEPABLE));
++ block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
++ rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
++ if (BNXT_RE_CHECK_RC(rc)) {
++ if (rc == -ETIMEDOUT)
++ rc = 0;
++ else
++ goto fail;
++ }
+ atomic_dec(&rdev->ah_count);
+-
+- return 0;
++fail:
++ return rc;
+ }
+
+ static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+index b967a17a44beb..10919532bca29 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+@@ -468,13 +468,14 @@ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
+ return 0;
+ }
+
+-void bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
+- bool block)
++int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
++ bool block)
+ {
+ struct bnxt_qplib_rcfw *rcfw = res->rcfw;
+ struct creq_destroy_ah_resp resp = {};
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct cmdq_destroy_ah req = {};
++ int rc;
+
+ /* Clean up the AH table in the device */
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+@@ -485,7 +486,8 @@ void bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
+
+ bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
+ sizeof(resp), block);
+- bnxt_qplib_rcfw_send_message(rcfw, &msg);
++ rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
++ return rc;
+ }
+
+ /* MRW */
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+index 5de874659cdfa..4061616048e85 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+@@ -327,8 +327,8 @@ int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_ctx *ctx);
+ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
+ bool block);
+-void bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
+- bool block);
++int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
++ bool block);
+ int bnxt_qplib_alloc_mrw(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_mrw *mrw);
+ int bnxt_qplib_dereg_mrw(struct bnxt_qplib_res *res, struct bnxt_qplib_mrw *mrw,
+diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
+index 54b61930a7fdb..4b3b5b274e849 100644
+--- a/drivers/infiniband/hw/mana/qp.c
++++ b/drivers/infiniband/hw/mana/qp.c
+@@ -13,7 +13,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
+ u8 *rx_hash_key)
+ {
+ struct mana_port_context *mpc = netdev_priv(ndev);
+- struct mana_cfg_rx_steer_req *req = NULL;
++ struct mana_cfg_rx_steer_req_v2 *req;
+ struct mana_cfg_rx_steer_resp resp = {};
+ mana_handle_t *req_indir_tab;
+ struct gdma_context *gc;
+@@ -33,6 +33,8 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
+ mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
+ sizeof(resp));
+
++ req->hdr.req.msg_version = GDMA_MESSAGE_V2;
++
+ req->vport = mpc->port_handle;
+ req->rx_enable = 1;
+ req->update_default_rxobj = 1;
+@@ -46,6 +48,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
+ req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE;
+ req->indir_tab_offset = sizeof(*req);
+ req->update_indir_tab = true;
++ req->cqe_coalescing_enable = 1;
+
+ req_indir_tab = (mana_handle_t *)(req + 1);
+ /* The ind table passed to the hardware must have
+diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
+index bae0334d6e7f1..aec011557b4a7 100644
+--- a/drivers/infiniband/hw/mlx5/qpc.c
++++ b/drivers/infiniband/hw/mlx5/qpc.c
+@@ -298,8 +298,7 @@ int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp)
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ MLX5_SET(destroy_qp_in, in, uid, qp->uid);
+- mlx5_cmd_exec_in(dev->mdev, destroy_qp, in);
+- return 0;
++ return mlx5_cmd_exec_in(dev->mdev, destroy_qp, in);
+ }
+
+ int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev,
+@@ -551,14 +550,14 @@ int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn)
+ return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in);
+ }
+
+-static void destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid)
++static int destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid)
+ {
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
+
+ MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, in, rqn, rqn);
+ MLX5_SET(destroy_rq_in, in, uid, uid);
+- mlx5_cmd_exec_in(dev->mdev, destroy_rq, in);
++ return mlx5_cmd_exec_in(dev->mdev, destroy_rq, in);
+ }
+
+ int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+@@ -589,8 +588,7 @@ int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *rq)
+ {
+ destroy_resource_common(dev, rq);
+- destroy_rq_tracked(dev, rq->qpn, rq->uid);
+- return 0;
++ return destroy_rq_tracked(dev, rq->qpn, rq->uid);
+ }
+
+ static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
+diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
+index 2ddbda3a43746..5a224e244be8a 100644
+--- a/drivers/iommu/amd/amd_iommu_types.h
++++ b/drivers/iommu/amd/amd_iommu_types.h
+@@ -174,6 +174,7 @@
+ #define CONTROL_GAINT_EN 29
+ #define CONTROL_XT_EN 50
+ #define CONTROL_INTCAPXT_EN 51
++#define CONTROL_IRTCACHEDIS 59
+ #define CONTROL_SNPAVIC_EN 61
+
+ #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
+@@ -716,6 +717,9 @@ struct amd_iommu {
+ /* if one, we need to send a completion wait command */
+ bool need_sync;
+
++ /* true if disable irte caching */
++ bool irtcachedis_enabled;
++
+ /* Handle for IOMMU core code */
+ struct iommu_device iommu;
+
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index c2d80a4e5fb06..02846299af0ef 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -162,6 +162,7 @@ static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
+ static bool amd_iommu_detected;
+ static bool amd_iommu_disabled __initdata;
+ static bool amd_iommu_force_enable __initdata;
++static bool amd_iommu_irtcachedis;
+ static int amd_iommu_target_ivhd_type;
+
+ /* Global EFR and EFR2 registers */
+@@ -484,6 +485,9 @@ static void iommu_disable(struct amd_iommu *iommu)
+
+ /* Disable IOMMU hardware itself */
+ iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
++
++ /* Clear IRTE cache disabling bit */
++ iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
+ }
+
+ /*
+@@ -2710,6 +2714,33 @@ static void iommu_enable_ga(struct amd_iommu *iommu)
+ #endif
+ }
+
++static void iommu_disable_irtcachedis(struct amd_iommu *iommu)
++{
++ iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
++}
++
++static void iommu_enable_irtcachedis(struct amd_iommu *iommu)
++{
++ u64 ctrl;
++
++ if (!amd_iommu_irtcachedis)
++ return;
++
++ /*
++ * Note:
++ * The support for IRTCacheDis feature is dertermined by
++ * checking if the bit is writable.
++ */
++ iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS);
++ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
++ ctrl &= (1ULL << CONTROL_IRTCACHEDIS);
++ if (ctrl)
++ iommu->irtcachedis_enabled = true;
++ pr_info("iommu%d (%#06x) : IRT cache is %s\n",
++ iommu->index, iommu->devid,
++ iommu->irtcachedis_enabled ? "disabled" : "enabled");
++}
++
+ static void early_enable_iommu(struct amd_iommu *iommu)
+ {
+ iommu_disable(iommu);
+@@ -2720,6 +2751,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
+ iommu_set_exclusion_range(iommu);
+ iommu_enable_ga(iommu);
+ iommu_enable_xt(iommu);
++ iommu_enable_irtcachedis(iommu);
+ iommu_enable(iommu);
+ iommu_flush_all_caches(iommu);
+ }
+@@ -2770,10 +2802,12 @@ static void early_enable_iommus(void)
+ for_each_iommu(iommu) {
+ iommu_disable_command_buffer(iommu);
+ iommu_disable_event_buffer(iommu);
++ iommu_disable_irtcachedis(iommu);
+ iommu_enable_command_buffer(iommu);
+ iommu_enable_event_buffer(iommu);
+ iommu_enable_ga(iommu);
+ iommu_enable_xt(iommu);
++ iommu_enable_irtcachedis(iommu);
+ iommu_set_device_table(iommu);
+ iommu_flush_all_caches(iommu);
+ }
+@@ -3426,6 +3460,8 @@ static int __init parse_amd_iommu_options(char *str)
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ } else if (strncmp(str, "pgtbl_v2", 8) == 0) {
+ amd_iommu_pgtable = AMD_IOMMU_V2;
++ } else if (strncmp(str, "irtcachedis", 11) == 0) {
++ amd_iommu_irtcachedis = true;
+ } else {
+ pr_notice("Unknown option - '%s'\n", str);
+ }
+diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
+index 1c849814a4917..212df2e3d3502 100644
+--- a/drivers/leds/rgb/leds-qcom-lpg.c
++++ b/drivers/leds/rgb/leds-qcom-lpg.c
+@@ -1173,8 +1173,10 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
+ i = 0;
+ for_each_available_child_of_node(np, child) {
+ ret = lpg_parse_channel(lpg, child, &led->channels[i]);
+- if (ret < 0)
++ if (ret < 0) {
++ of_node_put(child);
+ return ret;
++ }
+
+ info[i].color_index = led->channels[i]->color;
+ info[i].intensity = 0;
+@@ -1352,8 +1354,10 @@ static int lpg_probe(struct platform_device *pdev)
+
+ for_each_available_child_of_node(pdev->dev.of_node, np) {
+ ret = lpg_add_led(lpg, np);
+- if (ret)
++ if (ret) {
++ of_node_put(np);
+ return ret;
++ }
+ }
+
+ for (i = 0; i < lpg->num_channels; i++)
+diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+index 40cb3cb87ba17..60425c99a2b8b 100644
+--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
++++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+@@ -1310,6 +1310,8 @@ static int mtk_jpeg_probe(struct platform_device *pdev)
+ jpeg->dev = &pdev->dev;
+ jpeg->variant = of_device_get_match_data(jpeg->dev);
+
++ platform_set_drvdata(pdev, jpeg);
++
+ ret = devm_of_platform_populate(&pdev->dev);
+ if (ret) {
+ v4l2_err(&jpeg->v4l2_dev, "Master of platform populate failed.");
+@@ -1381,8 +1383,6 @@ static int mtk_jpeg_probe(struct platform_device *pdev)
+ jpeg->variant->dev_name, jpeg->vdev->num,
+ VIDEO_MAJOR, jpeg->vdev->minor);
+
+- platform_set_drvdata(pdev, jpeg);
+-
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.c b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
+index 5e2bc286f168e..1a95958a1f908 100644
+--- a/drivers/media/platform/mediatek/vpu/mtk_vpu.c
++++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
+@@ -562,15 +562,17 @@ static int load_requested_vpu(struct mtk_vpu *vpu,
+ int vpu_load_firmware(struct platform_device *pdev)
+ {
+ struct mtk_vpu *vpu;
+- struct device *dev = &pdev->dev;
++ struct device *dev;
+ struct vpu_run *run;
+ int ret;
+
+ if (!pdev) {
+- dev_err(dev, "VPU platform device is invalid\n");
++ pr_err("VPU platform device is invalid\n");
+ return -EINVAL;
+ }
+
++ dev = &pdev->dev;
++
+ vpu = platform_get_drvdata(pdev);
+ run = &vpu->run;
+
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
+index e0832f3f4f25c..06c95568e5af4 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe.c
+@@ -1541,7 +1541,11 @@ int msm_vfe_register_entities(struct vfe_device *vfe,
+ }
+
+ video_out->ops = &vfe->video_ops;
+- video_out->bpl_alignment = 8;
++ if (vfe->camss->version == CAMSS_845 ||
++ vfe->camss->version == CAMSS_8250)
++ video_out->bpl_alignment = 16;
++ else
++ video_out->bpl_alignment = 8;
+ video_out->line_based = 0;
+ if (i == VFE_LINE_PIX) {
+ video_out->bpl_alignment = 16;
+diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
+index 35453f81c1d97..c06f8ca9e09ec 100644
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -45,7 +45,7 @@ static int uvc_control_add_xu_mapping(struct uvc_video_chain *chain,
+ map->menu_names = NULL;
+ map->menu_mapping = NULL;
+
+- map->menu_mask = BIT_MASK(xmap->menu_count);
++ map->menu_mask = GENMASK(xmap->menu_count - 1, 0);
+
+ size = xmap->menu_count * sizeof(*map->menu_mapping);
+ map->menu_mapping = kzalloc(size, GFP_KERNEL);
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index e46330815484d..5d6c16adb50da 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -2097,14 +2097,14 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
+ mmc_blk_urgent_bkops(mq, mqrq);
+ }
+
+-static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
++static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type)
+ {
+ unsigned long flags;
+ bool put_card;
+
+ spin_lock_irqsave(&mq->lock, flags);
+
+- mq->in_flight[mmc_issue_type(mq, req)] -= 1;
++ mq->in_flight[issue_type] -= 1;
+
+ put_card = (mmc_tot_in_flight(mq) == 0);
+
+@@ -2117,6 +2117,7 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
+ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
+ bool can_sleep)
+ {
++ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_host *host = mq->card->host;
+@@ -2136,7 +2137,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
+ blk_mq_complete_request(req);
+ }
+
+- mmc_blk_mq_dec_in_flight(mq, req);
++ mmc_blk_mq_dec_in_flight(mq, issue_type);
+ }
+
+ void mmc_blk_mq_recovery(struct mmc_queue *mq)
+diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
+index b01ffb4d09737..3215063bcf868 100644
+--- a/drivers/mmc/host/sdhci_f_sdh30.c
++++ b/drivers/mmc/host/sdhci_f_sdh30.c
+@@ -210,13 +210,16 @@ static int sdhci_f_sdh30_remove(struct platform_device *pdev)
+ {
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
+-
+- reset_control_assert(priv->rst);
+- clk_disable_unprepare(priv->clk);
+- clk_disable_unprepare(priv->clk_iface);
++ struct clk *clk_iface = priv->clk_iface;
++ struct reset_control *rst = priv->rst;
++ struct clk *clk = priv->clk;
+
+ sdhci_pltfm_unregister(pdev);
+
++ reset_control_assert(rst);
++ clk_disable_unprepare(clk);
++ clk_disable_unprepare(clk_iface);
++
+ return 0;
+ }
+
+diff --git a/drivers/mmc/host/sunplus-mmc.c b/drivers/mmc/host/sunplus-mmc.c
+index db5e0dcdfa7f3..2bdebeb1f8e49 100644
+--- a/drivers/mmc/host/sunplus-mmc.c
++++ b/drivers/mmc/host/sunplus-mmc.c
+@@ -863,11 +863,9 @@ static int spmmc_drv_probe(struct platform_device *pdev)
+ struct spmmc_host *host;
+ int ret = 0;
+
+- mmc = mmc_alloc_host(sizeof(*host), &pdev->dev);
+- if (!mmc) {
+- ret = -ENOMEM;
+- goto probe_free_host;
+- }
++ mmc = devm_mmc_alloc_host(&pdev->dev, sizeof(struct spmmc_host));
++ if (!mmc)
++ return -ENOMEM;
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+@@ -902,7 +900,7 @@ static int spmmc_drv_probe(struct platform_device *pdev)
+
+ ret = mmc_of_parse(mmc);
+ if (ret)
+- goto probe_free_host;
++ goto clk_disable;
+
+ mmc->ops = &spmmc_ops;
+ mmc->f_min = SPMMC_MIN_CLK;
+@@ -911,7 +909,7 @@ static int spmmc_drv_probe(struct platform_device *pdev)
+
+ ret = mmc_regulator_get_supply(mmc);
+ if (ret)
+- goto probe_free_host;
++ goto clk_disable;
+
+ if (!mmc->ocr_avail)
+ mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
+@@ -927,14 +925,17 @@ static int spmmc_drv_probe(struct platform_device *pdev)
+ host->tuning_info.enable_tuning = 1;
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+- mmc_add_host(mmc);
++ ret = mmc_add_host(mmc);
++ if (ret)
++ goto pm_disable;
+
+- return ret;
++ return 0;
+
+-probe_free_host:
+- if (mmc)
+- mmc_free_host(mmc);
++pm_disable:
++ pm_runtime_disable(&pdev->dev);
+
++clk_disable:
++ clk_disable_unprepare(host->clk);
+ return ret;
+ }
+
+@@ -948,7 +949,6 @@ static int spmmc_drv_remove(struct platform_device *dev)
+ pm_runtime_put_noidle(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+ platform_set_drvdata(dev, NULL);
+- mmc_free_host(host->mmc);
+
+ return 0;
+ }
+diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
+index 521af9251f335..bf2a92fba0ed8 100644
+--- a/drivers/mmc/host/wbsd.c
++++ b/drivers/mmc/host/wbsd.c
+@@ -1705,8 +1705,6 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma,
+
+ wbsd_release_resources(host);
+ wbsd_free_mmc(dev);
+-
+- mmc_free_host(mmc);
+ return ret;
+ }
+
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 642e93e8623eb..8c9d05a1fe667 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3006,6 +3006,14 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
+
+ /* If there is a GPIO connected to the reset pin, toggle it */
+ if (gpiod) {
++ /* If the switch has just been reset and not yet completed
++ * loading EEPROM, the reset may interrupt the I2C transaction
++ * mid-byte, causing the first EEPROM read after the reset
++ * from the wrong location resulting in the switch booting
++ * to wrong mode and inoperable.
++ */
++ mv88e6xxx_g1_wait_eeprom_done(chip);
++
+ gpiod_set_value_cansleep(gpiod, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value_cansleep(gpiod, 0);
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index 29a1199dad146..3fbe15b3ac627 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -5159,6 +5159,9 @@ static int __maybe_unused macb_suspend(struct device *dev)
+ unsigned int q;
+ int err;
+
++ if (!device_may_wakeup(&bp->dev->dev))
++ phy_exit(bp->sgmii_phy);
++
+ if (!netif_running(netdev))
+ return 0;
+
+@@ -5219,7 +5222,6 @@ static int __maybe_unused macb_suspend(struct device *dev)
+ if (!(bp->wol & MACB_WOL_ENABLED)) {
+ rtnl_lock();
+ phylink_stop(bp->phylink);
+- phy_exit(bp->sgmii_phy);
+ rtnl_unlock();
+ spin_lock_irqsave(&bp->lock, flags);
+ macb_reset_hw(bp);
+@@ -5249,6 +5251,9 @@ static int __maybe_unused macb_resume(struct device *dev)
+ unsigned int q;
+ int err;
+
++ if (!device_may_wakeup(&bp->dev->dev))
++ phy_init(bp->sgmii_phy);
++
+ if (!netif_running(netdev))
+ return 0;
+
+@@ -5309,8 +5314,6 @@ static int __maybe_unused macb_resume(struct device *dev)
+ macb_set_rx_mode(netdev);
+ macb_restore_features(bp);
+ rtnl_lock();
+- if (!device_may_wakeup(&bp->dev->dev))
+- phy_init(bp->sgmii_phy);
+
+ phylink_start(bp->phylink);
+ rtnl_unlock();
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+index 9da0c87f03288..f99c1f7fec406 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+@@ -210,11 +210,11 @@ read_nvm_exit:
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+- * @words: number of words to write
+- * @data: buffer with words to write to the Shadow RAM
++ * @words: number of words to read
++ * @data: buffer with words to read to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+- * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
++ * Reads a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+ static int i40e_read_nvm_aq(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+@@ -234,18 +234,18 @@ static int i40e_read_nvm_aq(struct i40e_hw *hw,
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+- "NVM write error: offset %d beyond Shadow RAM limit %d\n",
++ "NVM read error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+- /* We can write only up to 4KB (one sector), in one AQ write */
++ /* We can read only up to 4KB (one sector), in one AQ write */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+- "NVM write fail error: tried to write %d words, limit is %d.\n",
++ "NVM read fail error: tried to read %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+- /* A single write cannot spread over two sectors */
++ /* A single read cannot spread over two sectors */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+- "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
++ "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n",
+ offset, words);
+ else
+ ret_code = i40e_aq_read_nvm(hw, module_pointer,
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+index 460ca561819a9..a34303ad057d0 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+@@ -1289,6 +1289,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+ fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
++ fltr->ip_ver = 4;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+@@ -1300,6 +1301,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
+ fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
+ fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
++ fltr->ip_ver = 4;
+ break;
+ case IPV4_USER_FLOW:
+ fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
+@@ -1312,6 +1314,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
+ fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
+ fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
++ fltr->ip_ver = 4;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+@@ -1330,6 +1333,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
+ fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
+ fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
++ fltr->ip_ver = 6;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+@@ -1345,6 +1349,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ sizeof(struct in6_addr));
+ fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
+ fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
++ fltr->ip_ver = 6;
+ break;
+ case IPV6_USER_FLOW:
+ memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+@@ -1361,6 +1366,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
+ fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
+ fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
++ fltr->ip_ver = 6;
+ break;
+ case ETHER_FLOW:
+ fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
+@@ -1371,6 +1377,10 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ return -EINVAL;
+ }
+
++ err = iavf_validate_fdir_fltr_masks(adapter, fltr);
++ if (err)
++ return err;
++
+ if (iavf_fdir_is_dup_fltr(adapter, fltr))
+ return -EEXIST;
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+index 505e82ebafe47..03e774bd2a5b4 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+@@ -18,6 +18,79 @@ static const struct in6_addr ipv6_addr_full_mask = {
+ }
+ };
+
++static const struct in6_addr ipv6_addr_zero_mask = {
++ .in6_u = {
++ .u6_addr8 = {
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++ }
++ }
++};
++
++/**
++ * iavf_validate_fdir_fltr_masks - validate Flow Director filter fields masks
++ * @adapter: pointer to the VF adapter structure
++ * @fltr: Flow Director filter data structure
++ *
++ * Returns 0 if all masks of packet fields are either full or empty. Returns
++ * error on at least one partial mask.
++ */
++int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
++ struct iavf_fdir_fltr *fltr)
++{
++ if (fltr->eth_mask.etype && fltr->eth_mask.etype != htons(U16_MAX))
++ goto partial_mask;
++
++ if (fltr->ip_ver == 4) {
++ if (fltr->ip_mask.v4_addrs.src_ip &&
++ fltr->ip_mask.v4_addrs.src_ip != htonl(U32_MAX))
++ goto partial_mask;
++
++ if (fltr->ip_mask.v4_addrs.dst_ip &&
++ fltr->ip_mask.v4_addrs.dst_ip != htonl(U32_MAX))
++ goto partial_mask;
++
++ if (fltr->ip_mask.tos && fltr->ip_mask.tos != U8_MAX)
++ goto partial_mask;
++ } else if (fltr->ip_ver == 6) {
++ if (memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_zero_mask,
++ sizeof(struct in6_addr)) &&
++ memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask,
++ sizeof(struct in6_addr)))
++ goto partial_mask;
++
++ if (memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_zero_mask,
++ sizeof(struct in6_addr)) &&
++ memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask,
++ sizeof(struct in6_addr)))
++ goto partial_mask;
++
++ if (fltr->ip_mask.tclass && fltr->ip_mask.tclass != U8_MAX)
++ goto partial_mask;
++ }
++
++ if (fltr->ip_mask.proto && fltr->ip_mask.proto != U8_MAX)
++ goto partial_mask;
++
++ if (fltr->ip_mask.src_port && fltr->ip_mask.src_port != htons(U16_MAX))
++ goto partial_mask;
++
++ if (fltr->ip_mask.dst_port && fltr->ip_mask.dst_port != htons(U16_MAX))
++ goto partial_mask;
++
++ if (fltr->ip_mask.spi && fltr->ip_mask.spi != htonl(U32_MAX))
++ goto partial_mask;
++
++ if (fltr->ip_mask.l4_header &&
++ fltr->ip_mask.l4_header != htonl(U32_MAX))
++ goto partial_mask;
++
++ return 0;
++
++partial_mask:
++ dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, partial masks are not supported\n");
++ return -EOPNOTSUPP;
++}
++
+ /**
+ * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload
+ * @fltr: Flow Director filter data structure
+@@ -263,8 +336,6 @@ iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr,
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
+ }
+
+- fltr->ip_ver = 4;
+-
+ return 0;
+ }
+
+@@ -309,8 +380,6 @@ iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr,
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
+ }
+
+- fltr->ip_ver = 6;
+-
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+index 33c55c366315b..9eb9f73f6adf3 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h
++++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+@@ -110,6 +110,8 @@ struct iavf_fdir_fltr {
+ struct virtchnl_fdir_add vc_add_msg;
+ };
+
++int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
++ struct iavf_fdir_fltr *fltr);
+ int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+ void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+index f6dd3f8fd936e..03e5139849462 100644
+--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
++++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+@@ -568,6 +568,12 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ break;
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ {
++ if (ice_is_adq_active(pf)) {
++ dev_err(ice_pf_to_dev(pf), "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
++ NL_SET_ERR_MSG_MOD(extack, "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
++ return -EOPNOTSUPP;
++ }
++
+ dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
+ pf->hw.pf_id);
+ NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index 34e8e7cb1bc54..cfb76612bd2f9 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -9065,6 +9065,11 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ ice_setup_tc_block_cb,
+ np, np, true);
+ case TC_SETUP_QDISC_MQPRIO:
++ if (ice_is_eswitch_mode_switchdev(pf)) {
++ netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
++ return -EOPNOTSUPP;
++ }
++
+ if (pf->adev) {
+ mutex_lock(&pf->adev_mutex);
+ device_lock(&pf->adev->dev);
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
+index 1cc6af2feb38a..565320ec24f81 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_ctrl_net.c
+@@ -55,7 +55,7 @@ static int octep_send_mbox_req(struct octep_device *oct,
+ list_add_tail(&d->list, &oct->ctrl_req_wait_list);
+ ret = wait_event_interruptible_timeout(oct->ctrl_req_wait_q,
+ (d->done != 0),
+- jiffies + msecs_to_jiffies(500));
++ msecs_to_jiffies(500));
+ list_del(&d->list);
+ if (ret == 0 || ret == 1)
+ return -EAGAIN;
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 43eb6e8713511..4424de2ffd70c 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -1038,6 +1038,10 @@ static void octep_device_cleanup(struct octep_device *oct)
+ {
+ int i;
+
++ oct->poll_non_ioq_intr = false;
++ cancel_delayed_work_sync(&oct->intr_poll_task);
++ cancel_work_sync(&oct->ctrl_mbox_task);
++
+ dev_info(&oct->pdev->dev, "Cleaning up Octeon Device ...\n");
+
+ for (i = 0; i < OCTEP_MAX_VF; i++) {
+@@ -1200,14 +1204,11 @@ static void octep_remove(struct pci_dev *pdev)
+ if (!oct)
+ return;
+
+- cancel_work_sync(&oct->tx_timeout_task);
+- cancel_work_sync(&oct->ctrl_mbox_task);
+ netdev = oct->netdev;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
+
+- oct->poll_non_ioq_intr = false;
+- cancel_delayed_work_sync(&oct->intr_poll_task);
++ cancel_work_sync(&oct->tx_timeout_task);
+ octep_device_cleanup(oct);
+ pci_release_mem_regions(pdev);
+ free_netdev(netdev);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+index 9e8e6184f9e43..ecfe93a479da8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+@@ -84,6 +84,8 @@ enum mlx5e_xdp_xmit_mode {
+ * MLX5E_XDP_XMIT_MODE_XSK:
+ * none.
+ */
++#define MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO 4
++
+ union mlx5e_xdp_info {
+ enum mlx5e_xdp_xmit_mode mode;
+ union {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 7e6d0489854e3..975c82df345cd 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -1298,11 +1298,13 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
+ {
+ struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+- int entries = wq_sz * MLX5_SEND_WQEBB_NUM_DS * 2; /* upper bound for maximum num of
+- * entries of all xmit_modes.
+- */
++ int entries;
+ size_t size;
+
++ /* upper bound for maximum num of entries of all xmit_modes. */
++ entries = roundup_pow_of_two(wq_sz * MLX5_SEND_WQEBB_NUM_DS *
++ MLX5E_XDP_FIFO_ENTRIES2DS_MAX_RATIO);
++
+ size = array_size(sizeof(*xdpi_fifo->xi), entries);
+ xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa);
+ if (!xdpi_fifo->xi)
+diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
+index 96c78f7db2543..7441577294bad 100644
+--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
++++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
+@@ -973,7 +973,7 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
+ bool update_tab)
+ {
+ u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
+- struct mana_cfg_rx_steer_req *req = NULL;
++ struct mana_cfg_rx_steer_req_v2 *req;
+ struct mana_cfg_rx_steer_resp resp = {};
+ struct net_device *ndev = apc->ndev;
+ mana_handle_t *req_indir_tab;
+@@ -988,6 +988,8 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
+ mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
+ sizeof(resp));
+
++ req->hdr.req.msg_version = GDMA_MESSAGE_V2;
++
+ req->vport = apc->port_handle;
+ req->num_indir_entries = num_entries;
+ req->indir_tab_offset = sizeof(*req);
+@@ -997,6 +999,7 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
+ req->update_hashkey = update_key;
+ req->update_indir_tab = update_tab;
+ req->default_rxobj = apc->default_rxobj;
++ req->cqe_coalescing_enable = 0;
+
+ if (update_key)
+ memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index 4b004a7281903..99df00c30b8c6 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -176,6 +176,15 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
+ }
+ #endif
+
++static int __maybe_unused qede_suspend(struct device *dev)
++{
++ dev_info(dev, "Device does not support suspend operation\n");
++
++ return -EOPNOTSUPP;
++}
++
++static DEFINE_SIMPLE_DEV_PM_OPS(qede_pm_ops, qede_suspend, NULL);
++
+ static const struct pci_error_handlers qede_err_handler = {
+ .error_detected = qede_io_error_detected,
+ };
+@@ -190,6 +199,7 @@ static struct pci_driver qede_pci_driver = {
+ .sriov_configure = qede_sriov_configure,
+ #endif
+ .err_handler = &qede_err_handler,
++ .driver.pm = &qede_pm_ops,
+ };
+
+ static struct qed_eth_cb_ops qede_ll_ops = {
+diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
+index 7adde9639c8ab..35d8e9811998d 100644
+--- a/drivers/net/ethernet/sfc/ef100_nic.c
++++ b/drivers/net/ethernet/sfc/ef100_nic.c
+@@ -1194,7 +1194,7 @@ int ef100_probe_netdev_pf(struct efx_nic *efx)
+ net_dev->features |= NETIF_F_HW_TC;
+ efx->fixed_features |= NETIF_F_HW_TC;
+ }
+- return rc;
++ return 0;
+ }
+
+ int ef100_probe_vf(struct efx_nic *efx)
+diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
+index d7827ab3761f9..6c8dfe0a64824 100644
+--- a/drivers/net/ethernet/sfc/tc.c
++++ b/drivers/net/ethernet/sfc/tc.c
+@@ -1310,6 +1310,58 @@ void efx_tc_deconfigure_default_rule(struct efx_nic *efx,
+ rule->fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ }
+
++static int efx_tc_configure_fallback_acts(struct efx_nic *efx, u32 eg_port,
++ struct efx_tc_action_set_list *acts)
++{
++ struct efx_tc_action_set *act;
++ int rc;
++
++ act = kzalloc(sizeof(*act), GFP_KERNEL);
++ if (!act)
++ return -ENOMEM;
++ act->deliver = 1;
++ act->dest_mport = eg_port;
++ rc = efx_mae_alloc_action_set(efx, act);
++ if (rc)
++ goto fail1;
++ EFX_WARN_ON_PARANOID(!list_empty(&acts->list));
++ list_add_tail(&act->list, &acts->list);
++ rc = efx_mae_alloc_action_set_list(efx, acts);
++ if (rc)
++ goto fail2;
++ return 0;
++fail2:
++ list_del(&act->list);
++ efx_mae_free_action_set(efx, act->fw_id);
++fail1:
++ kfree(act);
++ return rc;
++}
++
++static int efx_tc_configure_fallback_acts_pf(struct efx_nic *efx)
++{
++ struct efx_tc_action_set_list *acts = &efx->tc->facts.pf;
++ u32 eg_port;
++
++ efx_mae_mport_uplink(efx, &eg_port);
++ return efx_tc_configure_fallback_acts(efx, eg_port, acts);
++}
++
++static int efx_tc_configure_fallback_acts_reps(struct efx_nic *efx)
++{
++ struct efx_tc_action_set_list *acts = &efx->tc->facts.reps;
++ u32 eg_port;
++
++ efx_mae_mport_mport(efx, efx->tc->reps_mport_id, &eg_port);
++ return efx_tc_configure_fallback_acts(efx, eg_port, acts);
++}
++
++static void efx_tc_deconfigure_fallback_acts(struct efx_nic *efx,
++ struct efx_tc_action_set_list *acts)
++{
++ efx_tc_free_action_set_list(efx, acts, true);
++}
++
+ static int efx_tc_configure_rep_mport(struct efx_nic *efx)
+ {
+ u32 rep_mport_label;
+@@ -1402,10 +1454,16 @@ int efx_init_tc(struct efx_nic *efx)
+ rc = efx_tc_configure_rep_mport(efx);
+ if (rc)
+ return rc;
+- efx->tc->up = true;
++ rc = efx_tc_configure_fallback_acts_pf(efx);
++ if (rc)
++ return rc;
++ rc = efx_tc_configure_fallback_acts_reps(efx);
++ if (rc)
++ return rc;
+ rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
+ if (rc)
+ return rc;
++ efx->tc->up = true;
+ return 0;
+ }
+
+@@ -1419,6 +1477,8 @@ void efx_fini_tc(struct efx_nic *efx)
+ efx_tc_deconfigure_rep_mport(efx);
+ efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.pf);
+ efx_tc_deconfigure_default_rule(efx, &efx->tc->dflt.wire);
++ efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.pf);
++ efx_tc_deconfigure_fallback_acts(efx, &efx->tc->facts.reps);
+ efx->tc->up = false;
+ }
+
+@@ -1483,6 +1543,10 @@ int efx_init_struct_tc(struct efx_nic *efx)
+ efx->tc->dflt.pf.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
+ INIT_LIST_HEAD(&efx->tc->dflt.wire.acts.list);
+ efx->tc->dflt.wire.fw_id = MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL;
++ INIT_LIST_HEAD(&efx->tc->facts.pf.list);
++ efx->tc->facts.pf.fw_id = MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL;
++ INIT_LIST_HEAD(&efx->tc->facts.reps.list);
++ efx->tc->facts.reps.fw_id = MC_CMD_MAE_ACTION_SET_ALLOC_OUT_ACTION_SET_ID_NULL;
+ efx->extra_channel_type[EFX_EXTRA_CHANNEL_TC] = &efx_tc_channel_type;
+ return 0;
+ fail_match_action_ht:
+@@ -1508,6 +1572,10 @@ void efx_fini_struct_tc(struct efx_nic *efx)
+ MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
+ EFX_WARN_ON_PARANOID(efx->tc->dflt.wire.fw_id !=
+ MC_CMD_MAE_ACTION_RULE_INSERT_OUT_ACTION_RULE_ID_NULL);
++ EFX_WARN_ON_PARANOID(efx->tc->facts.pf.fw_id !=
++ MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
++ EFX_WARN_ON_PARANOID(efx->tc->facts.reps.fw_id !=
++ MC_CMD_MAE_ACTION_SET_LIST_ALLOC_OUT_ACTION_SET_LIST_ID_NULL);
+ rhashtable_free_and_destroy(&efx->tc->match_action_ht, efx_tc_flow_free,
+ efx);
+ rhashtable_free_and_destroy(&efx->tc->encap_match_ht,
+diff --git a/drivers/net/ethernet/sfc/tc.h b/drivers/net/ethernet/sfc/tc.h
+index 04cced6a2d39f..2b6782e9c7226 100644
+--- a/drivers/net/ethernet/sfc/tc.h
++++ b/drivers/net/ethernet/sfc/tc.h
+@@ -133,6 +133,11 @@ enum efx_tc_rule_prios {
+ * %EFX_TC_PRIO_DFLT. Named by *ingress* port
+ * @dflt.pf: rule for traffic ingressing from PF (egresses to wire)
+ * @dflt.wire: rule for traffic ingressing from wire (egresses to PF)
++ * @facts: Fallback action-set-lists for unready rules. Named by *egress* port
++ * @facts.pf: action-set-list for unready rules on PF netdev, hence applying to
++ * traffic from wire, and egressing to PF
++ * @facts.reps: action-set-list for unready rules on representors, hence
++ * applying to traffic from representees, and egressing to the reps mport
+ * @up: have TC datastructures been set up?
+ */
+ struct efx_tc_state {
+@@ -153,6 +158,10 @@ struct efx_tc_state {
+ struct efx_tc_flow_rule pf;
+ struct efx_tc_flow_rule wire;
+ } dflt;
++ struct {
++ struct efx_tc_action_set_list pf;
++ struct efx_tc_action_set_list reps;
++ } facts;
+ bool up;
+ };
+
+diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
+index 323bec5e57f83..3560991690038 100644
+--- a/drivers/net/pcs/pcs-rzn1-miic.c
++++ b/drivers/net/pcs/pcs-rzn1-miic.c
+@@ -313,15 +313,21 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
+
+ pdev = of_find_device_by_node(pcs_np);
+ of_node_put(pcs_np);
+- if (!pdev || !platform_get_drvdata(pdev))
++ if (!pdev || !platform_get_drvdata(pdev)) {
++ if (pdev)
++ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
++ }
+
+ miic_port = kzalloc(sizeof(*miic_port), GFP_KERNEL);
+- if (!miic_port)
++ if (!miic_port) {
++ put_device(&pdev->dev);
+ return ERR_PTR(-ENOMEM);
++ }
+
+ miic = platform_get_drvdata(pdev);
+ device_link_add(dev, miic->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
++ put_device(&pdev->dev);
+
+ miic_port->miic = miic;
+ miic_port->port = port - 1;
+diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
+index ef6dc008e4c50..8a77ec33b4172 100644
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -304,7 +304,6 @@ struct at803x_priv {
+ bool is_1000basex;
+ struct regulator_dev *vddio_rdev;
+ struct regulator_dev *vddh_rdev;
+- struct regulator *vddio;
+ u64 stats[ARRAY_SIZE(at803x_hw_stats)];
+ };
+
+@@ -460,21 +459,27 @@ static int at803x_set_wol(struct phy_device *phydev,
+ phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
+ mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
+
+- /* Enable WOL function */
+- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL,
+- 0, AT803X_WOL_EN);
+- if (ret)
+- return ret;
++ /* Enable WOL function for 1588 */
++ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
++ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++ AT803X_PHY_MMD3_WOL_CTRL,
++ 0, AT803X_WOL_EN);
++ if (ret)
++ return ret;
++ }
+ /* Enable WOL interrupt */
+ ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
+ if (ret)
+ return ret;
+ } else {
+- /* Disable WoL function */
+- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL,
+- AT803X_WOL_EN, 0);
+- if (ret)
+- return ret;
++ /* Disable WoL function for 1588 */
++ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
++ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++ AT803X_PHY_MMD3_WOL_CTRL,
++ AT803X_WOL_EN, 0);
++ if (ret)
++ return ret;
++ }
+ /* Disable WOL interrupt */
+ ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
+ if (ret)
+@@ -509,11 +514,11 @@ static void at803x_get_wol(struct phy_device *phydev,
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+
+- value = phy_read_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL);
++ value = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (value < 0)
+ return;
+
+- if (value & AT803X_WOL_EN)
++ if (value & AT803X_INTR_ENABLE_WOL)
+ wol->wolopts |= WAKE_MAGIC;
+ }
+
+@@ -824,11 +829,11 @@ static int at803x_parse_dt(struct phy_device *phydev)
+ if (ret < 0)
+ return ret;
+
+- priv->vddio = devm_regulator_get_optional(&phydev->mdio.dev,
+- "vddio");
+- if (IS_ERR(priv->vddio)) {
++ ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
++ "vddio");
++ if (ret) {
+ phydev_err(phydev, "failed to get VDDIO regulator\n");
+- return PTR_ERR(priv->vddio);
++ return ret;
+ }
+
+ /* Only AR8031/8033 support 1000Base-X for SFP modules */
+@@ -856,23 +861,12 @@ static int at803x_probe(struct phy_device *phydev)
+ if (ret)
+ return ret;
+
+- if (priv->vddio) {
+- ret = regulator_enable(priv->vddio);
+- if (ret < 0)
+- return ret;
+- }
+-
+ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
+ int ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ int mode_cfg;
+- struct ethtool_wolinfo wol = {
+- .wolopts = 0,
+- };
+
+- if (ccr < 0) {
+- ret = ccr;
+- goto err;
+- }
++ if (ccr < 0)
++ return ccr;
+ mode_cfg = ccr & AT803X_MODE_CFG_MASK;
+
+ switch (mode_cfg) {
+@@ -886,29 +880,17 @@ static int at803x_probe(struct phy_device *phydev)
+ break;
+ }
+
+- /* Disable WOL by default */
+- ret = at803x_set_wol(phydev, &wol);
+- if (ret < 0) {
+- phydev_err(phydev, "failed to disable WOL on probe: %d\n", ret);
+- goto err;
+- }
++ /* Disable WoL in 1588 register which is enabled
++ * by default
++ */
++ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++ AT803X_PHY_MMD3_WOL_CTRL,
++ AT803X_WOL_EN, 0);
++ if (ret)
++ return ret;
+ }
+
+ return 0;
+-
+-err:
+- if (priv->vddio)
+- regulator_disable(priv->vddio);
+-
+- return ret;
+-}
+-
+-static void at803x_remove(struct phy_device *phydev)
+-{
+- struct at803x_priv *priv = phydev->priv;
+-
+- if (priv->vddio)
+- regulator_disable(priv->vddio);
+ }
+
+ static int at803x_get_features(struct phy_device *phydev)
+@@ -2021,7 +2003,6 @@ static struct phy_driver at803x_driver[] = {
+ .name = "Qualcomm Atheros AR8035",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = at803x_probe,
+- .remove = at803x_remove,
+ .config_aneg = at803x_config_aneg,
+ .config_init = at803x_config_init,
+ .soft_reset = genphy_soft_reset,
+@@ -2043,7 +2024,6 @@ static struct phy_driver at803x_driver[] = {
+ .name = "Qualcomm Atheros AR8030",
+ .phy_id_mask = AT8030_PHY_ID_MASK,
+ .probe = at803x_probe,
+- .remove = at803x_remove,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+ .set_wol = at803x_set_wol,
+@@ -2059,7 +2039,6 @@ static struct phy_driver at803x_driver[] = {
+ .name = "Qualcomm Atheros AR8031/AR8033",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = at803x_probe,
+- .remove = at803x_remove,
+ .config_init = at803x_config_init,
+ .config_aneg = at803x_config_aneg,
+ .soft_reset = genphy_soft_reset,
+@@ -2082,7 +2061,6 @@ static struct phy_driver at803x_driver[] = {
+ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
+ .name = "Qualcomm Atheros AR8032",
+ .probe = at803x_probe,
+- .remove = at803x_remove,
+ .flags = PHY_POLL_CABLE_TEST,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+@@ -2098,7 +2076,6 @@ static struct phy_driver at803x_driver[] = {
+ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
+ .name = "Qualcomm Atheros AR9331 built-in PHY",
+ .probe = at803x_probe,
+- .remove = at803x_remove,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+@@ -2115,7 +2092,6 @@ static struct phy_driver at803x_driver[] = {
+ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
+ .name = "Qualcomm Atheros QCA9561 built-in PHY",
+ .probe = at803x_probe,
+- .remove = at803x_remove,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+@@ -2181,7 +2157,6 @@ static struct phy_driver at803x_driver[] = {
+ .name = "Qualcomm QCA8081",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = at803x_probe,
+- .remove = at803x_remove,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
+index ad71c88c87e78..f9ad8902100f3 100644
+--- a/drivers/net/phy/broadcom.c
++++ b/drivers/net/phy/broadcom.c
+@@ -486,6 +486,17 @@ static int bcm54xx_resume(struct phy_device *phydev)
+ return bcm54xx_config_init(phydev);
+ }
+
++static int bcm54810_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
++{
++ return -EOPNOTSUPP;
++}
++
++static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
++ u16 val)
++{
++ return -EOPNOTSUPP;
++}
++
+ static int bcm54811_config_init(struct phy_device *phydev)
+ {
+ int err, reg;
+@@ -981,6 +992,8 @@ static struct phy_driver broadcom_drivers[] = {
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
++ .read_mmd = bcm54810_read_mmd,
++ .write_mmd = bcm54810_write_mmd,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = bcm5481_config_aneg,
+ .config_intr = bcm_phy_config_intr,
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 2c4e6de8f4d9f..7958ea0e8714a 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -3217,6 +3217,8 @@ static int phy_probe(struct device *dev)
+ goto out;
+ }
+
++ phy_disable_interrupts(phydev);
++
+ /* Start out supporting everything. Eventually,
+ * a controller will attach, and may modify one
+ * or both of these values
+@@ -3334,16 +3336,6 @@ static int phy_remove(struct device *dev)
+ return 0;
+ }
+
+-static void phy_shutdown(struct device *dev)
+-{
+- struct phy_device *phydev = to_phy_device(dev);
+-
+- if (phydev->state == PHY_READY || !phydev->attached_dev)
+- return;
+-
+- phy_disable_interrupts(phydev);
+-}
+-
+ /**
+ * phy_driver_register - register a phy_driver with the PHY layer
+ * @new_driver: new phy_driver to register
+@@ -3377,7 +3369,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
+ new_driver->mdiodrv.driver.bus = &mdio_bus_type;
+ new_driver->mdiodrv.driver.probe = phy_probe;
+ new_driver->mdiodrv.driver.remove = phy_remove;
+- new_driver->mdiodrv.driver.shutdown = phy_shutdown;
+ new_driver->mdiodrv.driver.owner = owner;
+ new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
+
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index d3dc22509ea58..382756c3fb837 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2200,7 +2200,9 @@ static void team_setup(struct net_device *dev)
+
+ dev->hw_features = TEAM_VLAN_FEATURES |
+ NETIF_F_HW_VLAN_CTAG_RX |
+- NETIF_F_HW_VLAN_CTAG_FILTER;
++ NETIF_F_HW_VLAN_CTAG_FILTER |
++ NETIF_F_HW_VLAN_STAG_RX |
++ NETIF_F_HW_VLAN_STAG_FILTER;
+
+ dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
+ dev->features |= dev->hw_features;
+diff --git a/drivers/net/veth.c b/drivers/net/veth.c
+index dce9f9d63e04e..76019949e3fe9 100644
+--- a/drivers/net/veth.c
++++ b/drivers/net/veth.c
+@@ -1071,8 +1071,9 @@ static int __veth_napi_enable_range(struct net_device *dev, int start, int end)
+ err_xdp_ring:
+ for (i--; i >= start; i--)
+ ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free);
++ i = end;
+ err_page_pool:
+- for (i = start; i < end; i++) {
++ for (i--; i >= start; i--) {
+ page_pool_destroy(priv->rq[i].page_pool);
+ priv->rq[i].page_pool = NULL;
+ }
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 2336a0e4befa5..9b310795617c8 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -2652,7 +2652,7 @@ static void virtnet_init_default_rss(struct virtnet_info *vi)
+ vi->ctrl->rss.indirection_table[i] = indir_val;
+ }
+
+- vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs;
++ vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
+ vi->ctrl->rss.hash_key_length = vi->rss_key_size;
+
+ netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
+@@ -4110,8 +4110,6 @@ static int virtnet_probe(struct virtio_device *vdev)
+ if (vi->has_rss || vi->has_rss_hash_report)
+ virtnet_init_default_rss(vi);
+
+- _virtnet_set_queues(vi, vi->curr_queue_pairs);
+-
+ /* serialize netdev register + virtio_device_ready() with ndo_open() */
+ rtnl_lock();
+
+@@ -4124,6 +4122,8 @@ static int virtnet_probe(struct virtio_device *vdev)
+
+ virtio_device_ready(vdev);
+
++ _virtnet_set_queues(vi, vi->curr_queue_pairs);
++
+ /* a random MAC address has been assigned, notify the device.
+ * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
+ * because many devices work fine without getting MAC explicitly
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 09825b4a075e5..e6eec85480ca9 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -223,6 +223,7 @@
+ #define EP_STATE_ENABLED 1
+
+ static const unsigned int pcie_gen_freq[] = {
++ GEN1_CORE_CLK_FREQ, /* PCI_EXP_LNKSTA_CLS == 0; undefined */
+ GEN1_CORE_CLK_FREQ,
+ GEN2_CORE_CLK_FREQ,
+ GEN3_CORE_CLK_FREQ,
+@@ -459,7 +460,11 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
+
+ speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
+ PCI_EXP_LNKSTA_CLS;
+- clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
++
++ if (speed >= ARRAY_SIZE(pcie_gen_freq))
++ speed = 0;
++
++ clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]);
+
+ if (pcie->of_data->has_ltr_req_fix)
+ return IRQ_HANDLED;
+@@ -1020,7 +1025,11 @@ retry_link:
+
+ speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
+ PCI_EXP_LNKSTA_CLS;
+- clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
++
++ if (speed >= ARRAY_SIZE(pcie_gen_freq))
++ speed = 0;
++
++ clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]);
+
+ tegra_pcie_enable_interrupts(pp);
+
+diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
+index 471e0c5815f39..bf9d070a44966 100644
+--- a/drivers/pcmcia/rsrc_nonstatic.c
++++ b/drivers/pcmcia/rsrc_nonstatic.c
+@@ -1053,6 +1053,8 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
+ q = p->next;
+ kfree(p);
+ }
++
++ kfree(data);
+ }
+
+
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
+index c5f52d4f7781b..1fb0a24356bf5 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
+@@ -1039,6 +1039,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
+ const struct msm_pingroup *g;
++ u32 intr_target_mask = GENMASK(2, 0);
+ unsigned long flags;
+ bool was_enabled;
+ u32 val;
+@@ -1075,13 +1076,15 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ * With intr_target_use_scm interrupts are routed to
+ * application cpu using scm calls.
+ */
++ if (g->intr_target_width)
++ intr_target_mask = GENMASK(g->intr_target_width - 1, 0);
++
+ if (pctrl->intr_target_use_scm) {
+ u32 addr = pctrl->phys_base[0] + g->intr_target_reg;
+ int ret;
+
+ qcom_scm_io_readl(addr, &val);
+-
+- val &= ~(7 << g->intr_target_bit);
++ val &= ~(intr_target_mask << g->intr_target_bit);
+ val |= g->intr_target_kpss_val << g->intr_target_bit;
+
+ ret = qcom_scm_io_writel(addr, val);
+@@ -1091,7 +1094,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+ d->hwirq);
+ } else {
+ val = msm_readl_intr_target(pctrl, g);
+- val &= ~(7 << g->intr_target_bit);
++ val &= ~(intr_target_mask << g->intr_target_bit);
+ val |= g->intr_target_kpss_val << g->intr_target_bit;
+ msm_writel_intr_target(val, pctrl, g);
+ }
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
+index 985eceda25173..7f30416be127b 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.h
++++ b/drivers/pinctrl/qcom/pinctrl-msm.h
+@@ -51,6 +51,7 @@ struct msm_function {
+ * @intr_status_bit: Offset in @intr_status_reg for reading and acking the interrupt
+ * status.
+ * @intr_target_bit: Offset in @intr_target_reg for configuring the interrupt routing.
++ * @intr_target_width: Number of bits used for specifying interrupt routing target.
+ * @intr_target_kpss_val: Value in @intr_target_bit for specifying that the interrupt from
+ * this gpio should get routed to the KPSS processor.
+ * @intr_raw_status_bit: Offset in @intr_cfg_reg for the raw status bit.
+@@ -94,6 +95,7 @@ struct msm_pingroup {
+ unsigned intr_ack_high:1;
+
+ unsigned intr_target_bit:5;
++ unsigned intr_target_width:5;
+ unsigned intr_target_kpss_val:5;
+ unsigned intr_raw_status_bit:5;
+ unsigned intr_polarity_bit:5;
+diff --git a/drivers/pinctrl/qcom/pinctrl-sa8775p.c b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
+index 2ae7cdca65d3e..62f7a36d290cb 100644
+--- a/drivers/pinctrl/qcom/pinctrl-sa8775p.c
++++ b/drivers/pinctrl/qcom/pinctrl-sa8775p.c
+@@ -54,6 +54,7 @@
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
++ .intr_target_width = 4, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
+index dfd5ec9f75c90..a0621665a6d22 100644
+--- a/drivers/regulator/da9063-regulator.c
++++ b/drivers/regulator/da9063-regulator.c
+@@ -778,9 +778,6 @@ static int da9063_check_xvp_constraints(struct regulator_config *config)
+ const struct notification_limit *uv_l = &constr->under_voltage_limits;
+ const struct notification_limit *ov_l = &constr->over_voltage_limits;
+
+- if (!config->init_data) /* No config in DT, pointers will be invalid */
+- return 0;
+-
+ /* make sure that only one severity is used to clarify if unchanged, enabled or disabled */
+ if ((!!uv_l->prot + !!uv_l->err + !!uv_l->warn) > 1) {
+ dev_err(config->dev, "%s: at most one voltage monitoring severity allowed!\n",
+@@ -1031,9 +1028,12 @@ static int da9063_regulator_probe(struct platform_device *pdev)
+ config.of_node = da9063_reg_matches[id].of_node;
+ config.regmap = da9063->regmap;
+
+- ret = da9063_check_xvp_constraints(&config);
+- if (ret)
+- return ret;
++ /* Checking constraints requires init_data from DT. */
++ if (config.init_data) {
++ ret = da9063_check_xvp_constraints(&config);
++ if (ret)
++ return ret;
++ }
+
+ regl->rdev = devm_regulator_register(&pdev->dev, &regl->desc,
+ &config);
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
+index f3b280af07737..cd077b7c4aff3 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -1068,7 +1068,7 @@ static const struct rpmh_vreg_init_data pm8550_vreg_data[] = {
+ RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, "vdd-l8-l9"),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic5_nldo515, "vdd-l1-l4-l10"),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic5_nldo515, "vdd-l11"),
+- RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo, "vdd-l12"),
++ RPMH_VREG("ldo12", "ldo%s12", &pmic5_nldo515, "vdd-l12"),
+ RPMH_VREG("ldo13", "ldo%s13", &pmic5_pldo, "vdd-l2-l13-l14"),
+ RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo, "vdd-l2-l13-l14"),
+ RPMH_VREG("ldo15", "ldo%s15", &pmic5_nldo515, "vdd-l15"),
+diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c
+index 1ca140356a084..3f759121dc00a 100644
+--- a/drivers/soc/aspeed/aspeed-socinfo.c
++++ b/drivers/soc/aspeed/aspeed-socinfo.c
+@@ -137,6 +137,7 @@ static int __init aspeed_socinfo_init(void)
+
+ soc_dev = soc_device_register(attrs);
+ if (IS_ERR(soc_dev)) {
++ kfree(attrs->machine);
+ kfree(attrs->soc_id);
+ kfree(attrs->serial_number);
+ kfree(attrs);
+diff --git a/drivers/soc/aspeed/aspeed-uart-routing.c b/drivers/soc/aspeed/aspeed-uart-routing.c
+index ef8b24fd18518..59123e1f27acb 100644
+--- a/drivers/soc/aspeed/aspeed-uart-routing.c
++++ b/drivers/soc/aspeed/aspeed-uart-routing.c
+@@ -524,7 +524,7 @@ static ssize_t aspeed_uart_routing_store(struct device *dev,
+ struct aspeed_uart_routing_selector *sel = to_routing_selector(attr);
+ int val;
+
+- val = match_string(sel->options, -1, buf);
++ val = __sysfs_match_string(sel->options, -1, buf);
+ if (val < 0) {
+ dev_err(dev, "invalid value \"%s\"\n", buf);
+ return -EINVAL;
+diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
+index e58beac442958..1257d1c41f8e5 100644
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -1480,6 +1480,8 @@ static struct pci_device_id nhi_ids[] = {
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
+
+ /* Any USB4 compliant host */
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
+diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
+index b0718020c6f59..0f029ce758825 100644
+--- a/drivers/thunderbolt/nhi.h
++++ b/drivers/thunderbolt/nhi.h
+@@ -75,6 +75,10 @@ extern const struct tb_nhi_ops icl_nhi_ops;
+ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
+ #define PCI_DEVICE_ID_INTEL_ADL_NHI0 0x463e
+ #define PCI_DEVICE_ID_INTEL_ADL_NHI1 0x466d
++#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI 0x5781
++#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI 0x5784
++#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE 0x5786
++#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE 0x57a4
+ #define PCI_DEVICE_ID_INTEL_MTL_M_NHI0 0x7eb2
+ #define PCI_DEVICE_ID_INTEL_MTL_P_NHI0 0x7ec2
+ #define PCI_DEVICE_ID_INTEL_MTL_P_NHI1 0x7ec3
+diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
+index 1157b8869bcca..8c2ee431fcde8 100644
+--- a/drivers/thunderbolt/quirks.c
++++ b/drivers/thunderbolt/quirks.c
+@@ -74,6 +74,14 @@ static const struct tb_quirk tb_quirks[] = {
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
++ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
++ quirk_usb3_maximum_bandwidth },
++ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
++ quirk_usb3_maximum_bandwidth },
++ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE, 0x0000, 0x0000,
++ quirk_usb3_maximum_bandwidth },
++ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
++ quirk_usb3_maximum_bandwidth },
+ /*
+ * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
+ */
+diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
+index 9cc28197dbc45..edbd92435b41a 100644
+--- a/drivers/thunderbolt/retimer.c
++++ b/drivers/thunderbolt/retimer.c
+@@ -187,6 +187,21 @@ static ssize_t nvm_authenticate_show(struct device *dev,
+ return ret;
+ }
+
++static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status)
++{
++ int i;
++
++ tb_port_dbg(port, "reading NVM authentication status of retimers\n");
++
++ /*
++ * Before doing anything else, read the authentication status.
++ * If the retimer has it set, store it for the new retimer
++ * device instance.
++ */
++ for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
++ usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
++}
++
+ static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
+ {
+ int i;
+@@ -455,18 +470,16 @@ int tb_retimer_scan(struct tb_port *port, bool add)
+ return ret;
+
+ /*
+- * Enable sideband channel for each retimer. We can do this
+- * regardless whether there is device connected or not.
++ * Immediately after sending enumerate retimers read the
++ * authentication status of each retimer.
+ */
+- tb_retimer_set_inbound_sbtx(port);
++ tb_retimer_nvm_authenticate_status(port, status);
+
+ /*
+- * Before doing anything else, read the authentication status.
+- * If the retimer has it set, store it for the new retimer
+- * device instance.
++ * Enable sideband channel for each retimer. We can do this
++ * regardless whether there is device connected or not.
+ */
+- for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
+- usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
++ tb_retimer_set_inbound_sbtx(port);
+
+ for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
+ /*
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 1cdefac4dd1b5..739f522cb893c 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -3042,12 +3042,13 @@ static void gsm_error(struct gsm_mux *gsm)
+ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
+ {
+ int i;
+- struct gsm_dlci *dlci = gsm->dlci[0];
++ struct gsm_dlci *dlci;
+ struct gsm_msg *txq, *ntxq;
+
+ gsm->dead = true;
+ mutex_lock(&gsm->mutex);
+
++ dlci = gsm->dlci[0];
+ if (dlci) {
+ if (disc && dlci->state != DLCI_CLOSED) {
+ gsm_dlci_begin_close(dlci);
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 053d44412e42f..0a67dff575f78 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -3288,6 +3288,7 @@ void serial8250_init_port(struct uart_8250_port *up)
+ struct uart_port *port = &up->port;
+
+ spin_lock_init(&port->lock);
++ port->pm = NULL;
+ port->ops = &serial8250_pops;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
+
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index f38606b750967..3e4992b281132 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1137,8 +1137,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
+ unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
+
+ if (sr & (UARTSTAT_PE | UARTSTAT_FE)) {
+- /* Read DR to clear the error flags */
+- lpuart32_read(&sport->port, UARTDATA);
++ /* Clear the error flags */
++ lpuart32_write(&sport->port, sr, UARTSTAT);
+
+ if (sr & UARTSTAT_PE)
+ sport->port.icount.parity++;
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index 1e38fc9b10c11..e9e11a2596211 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -1755,13 +1755,10 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
+ struct uart_port *port = platform_get_drvdata(pdev);
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+- int err;
+ u32 cr3;
+
+ pm_runtime_get_sync(&pdev->dev);
+- err = uart_remove_one_port(&stm32_usart_driver, port);
+- if (err)
+- return(err);
++ uart_remove_one_port(&stm32_usart_driver, port);
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index 2855ac3030014..f7577f2bd2c5d 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -70,6 +70,10 @@ static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = {
+ CI_HDRC_PMQOS,
+ };
+
++static const struct ci_hdrc_imx_platform_flag imx8ulp_usb_data = {
++ .flags = CI_HDRC_SUPPORTS_RUNTIME_PM,
++};
++
+ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+ { .compatible = "fsl,imx23-usb", .data = &imx23_usb_data},
+ { .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
+@@ -80,6 +84,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+ { .compatible = "fsl,imx6ul-usb", .data = &imx6ul_usb_data},
+ { .compatible = "fsl,imx7d-usb", .data = &imx7d_usb_data},
+ { .compatible = "fsl,imx7ulp-usb", .data = &imx7ulp_usb_data},
++ { .compatible = "fsl,imx8ulp-usb", .data = &imx8ulp_usb_data},
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
+diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
+index c57c1a71a5132..681c2ddc83fa5 100644
+--- a/drivers/usb/chipidea/usbmisc_imx.c
++++ b/drivers/usb/chipidea/usbmisc_imx.c
+@@ -135,7 +135,7 @@
+ #define TXVREFTUNE0_MASK (0xf << 20)
+
+ #define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \
+- MX6_BM_ID_WAKEUP)
++ MX6_BM_ID_WAKEUP | MX6SX_BM_DPDM_WAKEUP_EN)
+
+ struct usbmisc_ops {
+ /* It's called once when probe a usb device */
+@@ -152,6 +152,7 @@ struct usbmisc_ops {
+ int (*charger_detection)(struct imx_usbmisc_data *data);
+ /* It's called when system resume from usb power lost */
+ int (*power_lost_check)(struct imx_usbmisc_data *data);
++ void (*vbus_comparator_on)(struct imx_usbmisc_data *data, bool on);
+ };
+
+ struct imx_usbmisc {
+@@ -875,6 +876,33 @@ static int imx7d_charger_detection(struct imx_usbmisc_data *data)
+ return ret;
+ }
+
++static void usbmisc_imx7d_vbus_comparator_on(struct imx_usbmisc_data *data,
++ bool on)
++{
++ unsigned long flags;
++ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
++ u32 val;
++
++ if (data->hsic)
++ return;
++
++ spin_lock_irqsave(&usbmisc->lock, flags);
++ /*
++ * Disable VBUS valid comparator when in suspend mode,
++ * when OTG is disabled and DRVVBUS0 is asserted case
++ * the Bandgap circuitry and VBUS Valid comparator are
++ * still powered, even in Suspend or Sleep mode.
++ */
++ val = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
++ if (on)
++ val |= MX7D_USB_OTG_PHY_CFG2_DRVVBUS0;
++ else
++ val &= ~MX7D_USB_OTG_PHY_CFG2_DRVVBUS0;
++
++ writel(val, usbmisc->base + MX7D_USB_OTG_PHY_CFG2);
++ spin_unlock_irqrestore(&usbmisc->lock, flags);
++}
++
+ static int usbmisc_imx7ulp_init(struct imx_usbmisc_data *data)
+ {
+ struct imx_usbmisc *usbmisc = dev_get_drvdata(data->dev);
+@@ -1018,6 +1046,7 @@ static const struct usbmisc_ops imx7d_usbmisc_ops = {
+ .set_wakeup = usbmisc_imx7d_set_wakeup,
+ .charger_detection = imx7d_charger_detection,
+ .power_lost_check = usbmisc_imx7d_power_lost_check,
++ .vbus_comparator_on = usbmisc_imx7d_vbus_comparator_on,
+ };
+
+ static const struct usbmisc_ops imx7ulp_usbmisc_ops = {
+@@ -1132,6 +1161,9 @@ int imx_usbmisc_suspend(struct imx_usbmisc_data *data, bool wakeup)
+
+ usbmisc = dev_get_drvdata(data->dev);
+
++ if (usbmisc->ops->vbus_comparator_on)
++ usbmisc->ops->vbus_comparator_on(data, false);
++
+ if (wakeup && usbmisc->ops->set_wakeup)
+ ret = usbmisc->ops->set_wakeup(data, true);
+ if (ret) {
+@@ -1185,6 +1217,9 @@ int imx_usbmisc_resume(struct imx_usbmisc_data *data, bool wakeup)
+ goto hsic_set_clk_fail;
+ }
+
++ if (usbmisc->ops->vbus_comparator_on)
++ usbmisc->ops->vbus_comparator_on(data, true);
++
+ return 0;
+
+ hsic_set_clk_fail:
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index e5d522d54f6a3..97f07757d19e3 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -916,8 +916,11 @@ static void __gs_console_push(struct gs_console *cons)
+ }
+
+ req->length = size;
++
++ spin_unlock_irq(&cons->lock);
+ if (usb_ep_queue(ep, req, GFP_ATOMIC))
+ req->length = 0;
++ spin_lock_irq(&cons->lock);
+ }
+
+ static void gs_console_work(struct work_struct *work)
+diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
+index dd1c6b2ca7c6f..e81865978299c 100644
+--- a/drivers/usb/gadget/function/uvc_video.c
++++ b/drivers/usb/gadget/function/uvc_video.c
+@@ -386,6 +386,9 @@ static void uvcg_video_pump(struct work_struct *work)
+ struct uvc_buffer *buf;
+ unsigned long flags;
+ int ret;
++ bool buf_int;
++ /* video->max_payload_size is only set when using bulk transfer */
++ bool is_bulk = video->max_payload_size;
+
+ while (video->ep->enabled) {
+ /*
+@@ -408,20 +411,35 @@ static void uvcg_video_pump(struct work_struct *work)
+ */
+ spin_lock_irqsave(&queue->irqlock, flags);
+ buf = uvcg_queue_head(queue);
+- if (buf == NULL) {
++
++ if (buf != NULL) {
++ video->encode(req, video, buf);
++ /* Always interrupt for the last request of a video buffer */
++ buf_int = buf->state == UVC_BUF_STATE_DONE;
++ } else if (!(queue->flags & UVC_QUEUE_DISCONNECTED) && !is_bulk) {
++ /*
++ * No video buffer available; the queue is still connected and
++ * we're traferring over ISOC. Queue a 0 length request to
++ * prevent missed ISOC transfers.
++ */
++ req->length = 0;
++ buf_int = false;
++ } else {
++ /*
++ * Either queue has been disconnected or no video buffer
++ * available to bulk transfer. Either way, stop processing
++ * further.
++ */
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+ break;
+ }
+
+- video->encode(req, video, buf);
+-
+ /*
+ * With usb3 we have more requests. This will decrease the
+ * interrupt load to a quarter but also catches the corner
+ * cases, which needs to be handled.
+ */
+- if (list_empty(&video->req_free) ||
+- buf->state == UVC_BUF_STATE_DONE ||
++ if (list_empty(&video->req_free) || buf_int ||
+ !(video->req_int_count %
+ DIV_ROUND_UP(video->uvc_num_requests, 4))) {
+ video->req_int_count = 0;
+@@ -441,8 +459,7 @@ static void uvcg_video_pump(struct work_struct *work)
+
+ /* Endpoint now owns the request */
+ req = NULL;
+- if (buf->state != UVC_BUF_STATE_DONE)
+- video->req_int_count++;
++ video->req_int_count++;
+ }
+
+ if (!req)
+@@ -527,4 +544,3 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
+ V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex);
+ return 0;
+ }
+-
+diff --git a/drivers/usb/host/xhci-histb.c b/drivers/usb/host/xhci-histb.c
+index 91ce97821de51..7c20477550830 100644
+--- a/drivers/usb/host/xhci-histb.c
++++ b/drivers/usb/host/xhci-histb.c
+@@ -164,16 +164,6 @@ static void xhci_histb_host_disable(struct xhci_hcd_histb *histb)
+ clk_disable_unprepare(histb->bus_clk);
+ }
+
+-static void xhci_histb_quirks(struct device *dev, struct xhci_hcd *xhci)
+-{
+- /*
+- * As of now platform drivers don't provide MSI support so we ensure
+- * here that the generic code does not try to make a pci_dev from our
+- * dev struct in order to setup MSI
+- */
+- xhci->quirks |= XHCI_PLAT;
+-}
+-
+ /* called during probe() after chip reset completes */
+ static int xhci_histb_setup(struct usb_hcd *hcd)
+ {
+@@ -186,7 +176,7 @@ static int xhci_histb_setup(struct usb_hcd *hcd)
+ return ret;
+ }
+
+- return xhci_gen_setup(hcd, xhci_histb_quirks);
++ return xhci_gen_setup(hcd, NULL);
+ }
+
+ static const struct xhci_driver_overrides xhci_histb_overrides __initconst = {
+diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
+index b60521e1a9a63..9a40da3b0064b 100644
+--- a/drivers/usb/host/xhci-mtk.c
++++ b/drivers/usb/host/xhci-mtk.c
+@@ -418,12 +418,6 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+
+- /*
+- * As of now platform drivers don't provide MSI support so we ensure
+- * here that the generic code does not try to make a pci_dev from our
+- * dev struct in order to setup MSI
+- */
+- xhci->quirks |= XHCI_PLAT;
+ xhci->quirks |= XHCI_MTK_HOST;
+ /*
+ * MTK host controller gives a spurious successful event after a
+diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
+index db9826c38b20b..9540f0e48c215 100644
+--- a/drivers/usb/host/xhci-pci.c
++++ b/drivers/usb/host/xhci-pci.c
+@@ -108,9 +108,6 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
+ struct usb_hcd *hcd = xhci_to_hcd(xhci);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+
+- if (xhci->quirks & XHCI_PLAT)
+- return;
+-
+ /* return if using legacy interrupt */
+ if (hcd->irq > 0)
+ return;
+@@ -208,10 +205,6 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
+ struct pci_dev *pdev;
+ int ret;
+
+- /* The xhci platform device has set up IRQs through usb_add_hcd. */
+- if (xhci->quirks & XHCI_PLAT)
+- return 0;
+-
+ pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ /*
+ * Some Fresco Logic host controllers advertise MSI, but fail to
+diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
+index f36633fa83624..80da67a6c3bf2 100644
+--- a/drivers/usb/host/xhci-plat.c
++++ b/drivers/usb/host/xhci-plat.c
+@@ -78,12 +78,7 @@ static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
+ {
+ struct xhci_plat_priv *priv = xhci_to_priv(xhci);
+
+- /*
+- * As of now platform drivers don't provide MSI support so we ensure
+- * here that the generic code does not try to make a pci_dev from our
+- * dev struct in order to setup MSI
+- */
+- xhci->quirks |= XHCI_PLAT | priv->quirks;
++ xhci->quirks |= priv->quirks;
+ }
+
+ /* called during probe() after chip reset completes */
+diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
+index d28fa892c2866..07a319db58034 100644
+--- a/drivers/usb/host/xhci-tegra.c
++++ b/drivers/usb/host/xhci-tegra.c
+@@ -2662,7 +2662,6 @@ static void tegra_xhci_quirks(struct device *dev, struct xhci_hcd *xhci)
+ {
+ struct tegra_xusb *tegra = dev_get_drvdata(dev);
+
+- xhci->quirks |= XHCI_PLAT;
+ if (tegra && tegra->soc->lpm_support)
+ xhci->quirks |= XHCI_LPM_SUPPORT;
+ }
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 4474d540f6b49..0b1928851a2a9 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1874,7 +1874,7 @@ struct xhci_hcd {
+ #define XHCI_SPURIOUS_REBOOT BIT_ULL(13)
+ #define XHCI_COMP_MODE_QUIRK BIT_ULL(14)
+ #define XHCI_AVOID_BEI BIT_ULL(15)
+-#define XHCI_PLAT BIT_ULL(16)
++#define XHCI_PLAT BIT_ULL(16) /* Deprecated */
+ #define XHCI_SLOW_SUSPEND BIT_ULL(17)
+ #define XHCI_SPURIOUS_WAKEUP BIT_ULL(18)
+ /* For controllers with a broken beyond repair streams implementation */
+diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+index 25fc4120b618d..b53420e874acb 100644
+--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
++++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+@@ -31,6 +31,7 @@ struct mlx5_vdpa_mr {
+ struct list_head head;
+ unsigned long num_directs;
+ unsigned long num_klms;
++ /* state of dvq mr */
+ bool initialized;
+
+ /* serialize mkey creation and destruction */
+@@ -121,6 +122,7 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
+ int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+ unsigned int asid);
+ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
++void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
+
+ #define mlx5_vdpa_warn(__dev, format, ...) \
+ dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
+index 03e5432297912..5a1971fcd87b1 100644
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -489,60 +489,103 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
+ }
+ }
+
+-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
++static void _mlx5_vdpa_destroy_cvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
++{
++ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
++ return;
++
++ prune_iotlb(mvdev);
++}
++
++static void _mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+ {
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+
+- mutex_lock(&mr->mkey_mtx);
++ if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
++ return;
++
+ if (!mr->initialized)
+- goto out;
++ return;
+
+- prune_iotlb(mvdev);
+ if (mr->user_mr)
+ destroy_user_mr(mvdev, mr);
+ else
+ destroy_dma_mr(mvdev, mr);
+
+ mr->initialized = false;
+-out:
++}
++
++void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
++{
++ struct mlx5_vdpa_mr *mr = &mvdev->mr;
++
++ mutex_lock(&mr->mkey_mtx);
++
++ _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
++ _mlx5_vdpa_destroy_cvq_mr(mvdev, asid);
++
+ mutex_unlock(&mr->mkey_mtx);
+ }
+
+-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
+- struct vhost_iotlb *iotlb, unsigned int asid)
++void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
++{
++ mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_CVQ_GROUP]);
++ mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]);
++}
++
++static int _mlx5_vdpa_create_cvq_mr(struct mlx5_vdpa_dev *mvdev,
++ struct vhost_iotlb *iotlb,
++ unsigned int asid)
++{
++ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
++ return 0;
++
++ return dup_iotlb(mvdev, iotlb);
++}
++
++static int _mlx5_vdpa_create_dvq_mr(struct mlx5_vdpa_dev *mvdev,
++ struct vhost_iotlb *iotlb,
++ unsigned int asid)
+ {
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ int err;
+
+- if (mr->initialized)
++ if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
+ return 0;
+
+- if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+- if (iotlb)
+- err = create_user_mr(mvdev, iotlb);
+- else
+- err = create_dma_mr(mvdev, mr);
++ if (mr->initialized)
++ return 0;
+
+- if (err)
+- return err;
+- }
++ if (iotlb)
++ err = create_user_mr(mvdev, iotlb);
++ else
++ err = create_dma_mr(mvdev, mr);
+
+- if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) {
+- err = dup_iotlb(mvdev, iotlb);
+- if (err)
+- goto out_err;
+- }
++ if (err)
++ return err;
+
+ mr->initialized = true;
++
++ return 0;
++}
++
++static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
++ struct vhost_iotlb *iotlb, unsigned int asid)
++{
++ int err;
++
++ err = _mlx5_vdpa_create_dvq_mr(mvdev, iotlb, asid);
++ if (err)
++ return err;
++
++ err = _mlx5_vdpa_create_cvq_mr(mvdev, iotlb, asid);
++ if (err)
++ goto out_err;
++
+ return 0;
+
+ out_err:
+- if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+- if (iotlb)
+- destroy_user_mr(mvdev, mr);
+- else
+- destroy_dma_mr(mvdev, mr);
+- }
++ _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
+
+ return err;
+ }
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index 279ac6a558d29..f18a9301ab94e 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -2564,7 +2564,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
+ goto err_mr;
+
+ teardown_driver(ndev);
+- mlx5_vdpa_destroy_mr(mvdev);
++ mlx5_vdpa_destroy_mr_asid(mvdev, asid);
+ err = mlx5_vdpa_create_mr(mvdev, iotlb, asid);
+ if (err)
+ goto err_mr;
+@@ -2580,7 +2580,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
+ return 0;
+
+ err_setup:
+- mlx5_vdpa_destroy_mr(mvdev);
++ mlx5_vdpa_destroy_mr_asid(mvdev, asid);
+ err_mr:
+ return err;
+ }
+diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
+index 965e32529eb85..a7612e0783b36 100644
+--- a/drivers/vdpa/vdpa.c
++++ b/drivers/vdpa/vdpa.c
+@@ -1247,44 +1247,41 @@ static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
+ [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
+ [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
+ [VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
++ [VDPA_ATTR_DEV_NET_CFG_MAX_VQP] = { .type = NLA_U16 },
+ /* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
+ [VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
++ [VDPA_ATTR_DEV_QUEUE_INDEX] = { .type = NLA_U32 },
++ [VDPA_ATTR_DEV_FEATURES] = { .type = NLA_U64 },
+ };
+
+ static const struct genl_ops vdpa_nl_ops[] = {
+ {
+ .cmd = VDPA_CMD_MGMTDEV_GET,
+- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_mgmtdev_get_doit,
+ .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
+ },
+ {
+ .cmd = VDPA_CMD_DEV_NEW,
+- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_add_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = VDPA_CMD_DEV_DEL,
+- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_del_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = VDPA_CMD_DEV_GET,
+- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_get_doit,
+ .dumpit = vdpa_nl_cmd_dev_get_dumpit,
+ },
+ {
+ .cmd = VDPA_CMD_DEV_CONFIG_GET,
+- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_config_get_doit,
+ .dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
+ },
+ {
+ .cmd = VDPA_CMD_DEV_VSTATS_GET,
+- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_stats_get_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
+index 0d84e6a9c3cca..76d4ab451f599 100644
+--- a/drivers/vdpa/vdpa_user/vduse_dev.c
++++ b/drivers/vdpa/vdpa_user/vduse_dev.c
+@@ -935,10 +935,10 @@ static void vduse_dev_irq_inject(struct work_struct *work)
+ {
+ struct vduse_dev *dev = container_of(work, struct vduse_dev, inject);
+
+- spin_lock_irq(&dev->irq_lock);
++ spin_lock_bh(&dev->irq_lock);
+ if (dev->config_cb.callback)
+ dev->config_cb.callback(dev->config_cb.private);
+- spin_unlock_irq(&dev->irq_lock);
++ spin_unlock_bh(&dev->irq_lock);
+ }
+
+ static void vduse_vq_irq_inject(struct work_struct *work)
+@@ -946,10 +946,10 @@ static void vduse_vq_irq_inject(struct work_struct *work)
+ struct vduse_virtqueue *vq = container_of(work,
+ struct vduse_virtqueue, inject);
+
+- spin_lock_irq(&vq->irq_lock);
++ spin_lock_bh(&vq->irq_lock);
+ if (vq->ready && vq->cb.callback)
+ vq->cb.callback(vq->cb.private);
+- spin_unlock_irq(&vq->irq_lock);
++ spin_unlock_bh(&vq->irq_lock);
+ }
+
+ static bool vduse_vq_signal_irqfd(struct vduse_virtqueue *vq)
+diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+index 51fbf02a03430..76b50b6c98ad9 100644
+--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
++++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+@@ -519,7 +519,9 @@ static int mmphw_probe(struct platform_device *pdev)
+ "unable to get clk %s\n", mi->clk_name);
+ goto failed;
+ }
+- clk_prepare_enable(ctrl->clk);
++ ret = clk_prepare_enable(ctrl->clk);
++ if (ret)
++ goto failed;
+
+ /* init global regs */
+ ctrl_set_default(ctrl);
+diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
+index a46a4a29e9295..97760f6112959 100644
+--- a/drivers/virtio/virtio_mmio.c
++++ b/drivers/virtio/virtio_mmio.c
+@@ -607,9 +607,8 @@ static void virtio_mmio_release_dev(struct device *_d)
+ struct virtio_device *vdev =
+ container_of(_d, struct virtio_device, dev);
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+- struct platform_device *pdev = vm_dev->pdev;
+
+- devm_kfree(&pdev->dev, vm_dev);
++ kfree(vm_dev);
+ }
+
+ /* Platform device */
+@@ -620,7 +619,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
+ unsigned long magic;
+ int rc;
+
+- vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
++ vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL);
+ if (!vm_dev)
+ return -ENOMEM;
+
+diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
+index 989e2d7184ce4..961161da59000 100644
+--- a/drivers/virtio/virtio_vdpa.c
++++ b/drivers/virtio/virtio_vdpa.c
+@@ -393,11 +393,13 @@ static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
+ cb.callback = virtio_vdpa_config_cb;
+ cb.private = vd_dev;
+ ops->set_config_cb(vdpa, &cb);
++ kfree(masks);
+
+ return 0;
+
+ err_setup_vq:
+ virtio_vdpa_del_vqs(vdev);
++ kfree(masks);
+ return err;
+ }
+
+diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
+index 14f8d8d90920f..2bd3dc25cb030 100644
+--- a/drivers/watchdog/sp5100_tco.c
++++ b/drivers/watchdog/sp5100_tco.c
+@@ -96,7 +96,7 @@ static enum tco_reg_layout tco_reg_layout(struct pci_dev *dev)
+ sp5100_tco_pci->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
+ sp5100_tco_pci->revision >= AMD_ZEN_SMBUS_PCI_REV) {
+ return efch_mmio;
+- } else if (dev->vendor == PCI_VENDOR_ID_AMD &&
++ } else if ((dev->vendor == PCI_VENDOR_ID_AMD || dev->vendor == PCI_VENDOR_ID_HYGON) &&
+ ((dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
+ dev->revision >= 0x41) ||
+ (dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
+@@ -579,6 +579,8 @@ static const struct pci_device_id sp5100_tco_pci_tbl[] = {
+ PCI_ANY_ID, },
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, PCI_ANY_ID,
+ PCI_ANY_ID, },
++ { PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, PCI_ANY_ID,
++ PCI_ANY_ID, },
+ { 0, }, /* End of list */
+ };
+ MODULE_DEVICE_TABLE(pci, sp5100_tco_pci_tbl);
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 2a60033d907bf..a250afa655d5c 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1670,6 +1670,10 @@ void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
+ btrfs_get_block_group(bg);
+ trace_btrfs_add_unused_block_group(bg);
+ list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
++ } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) {
++ /* Pull out the block group from the reclaim_bgs list. */
++ trace_btrfs_add_unused_block_group(bg);
++ list_move_tail(&bg->bg_list, &fs_info->unused_bgs);
+ }
+ spin_unlock(&fs_info->unused_bgs_lock);
+ }
+@@ -2693,6 +2697,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
+ next:
+ btrfs_delayed_refs_rsv_release(fs_info, 1);
+ list_del_init(&block_group->bg_list);
++ clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags);
+ }
+ btrfs_trans_release_chunk_metadata(trans);
+ }
+@@ -2732,6 +2737,13 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
+ if (!cache)
+ return ERR_PTR(-ENOMEM);
+
++ /*
++ * Mark it as new before adding it to the rbtree of block groups or any
++ * list, so that no other task finds it and calls btrfs_mark_bg_unused()
++ * before the new flag is set.
++ */
++ set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags);
++
+ cache->length = size;
+ set_free_space_tree_thresholds(cache);
+ cache->flags = type;
+diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
+index 471f591db7c0c..0852f6c101f82 100644
+--- a/fs/btrfs/block-group.h
++++ b/fs/btrfs/block-group.h
+@@ -70,6 +70,11 @@ enum btrfs_block_group_flags {
+ BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE,
+ /* Indicate that the block group is placed on a sequential zone */
+ BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE,
++ /*
++ * Indicate that block group is in the list of new block groups of a
++ * transaction.
++ */
++ BLOCK_GROUP_FLAG_NEW,
+ };
+
+ enum btrfs_caching_type {
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index 4c1986cd5bed5..cff98526679e7 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -443,6 +443,7 @@ struct btrfs_drop_extents_args {
+
+ struct btrfs_file_private {
+ void *filldir_buf;
++ u64 last_index;
+ struct extent_state *llseek_cached_state;
+ };
+
+diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
+index 6b457b010cbc4..6d51db066503b 100644
+--- a/fs/btrfs/delayed-inode.c
++++ b/fs/btrfs/delayed-inode.c
+@@ -1632,6 +1632,7 @@ int btrfs_inode_delayed_dir_index_count(struct btrfs_inode *inode)
+ }
+
+ bool btrfs_readdir_get_delayed_items(struct inode *inode,
++ u64 last_index,
+ struct list_head *ins_list,
+ struct list_head *del_list)
+ {
+@@ -1651,14 +1652,14 @@ bool btrfs_readdir_get_delayed_items(struct inode *inode,
+
+ mutex_lock(&delayed_node->mutex);
+ item = __btrfs_first_delayed_insertion_item(delayed_node);
+- while (item) {
++ while (item && item->index <= last_index) {
+ refcount_inc(&item->refs);
+ list_add_tail(&item->readdir_list, ins_list);
+ item = __btrfs_next_delayed_item(item);
+ }
+
+ item = __btrfs_first_delayed_deletion_item(delayed_node);
+- while (item) {
++ while (item && item->index <= last_index) {
+ refcount_inc(&item->refs);
+ list_add_tail(&item->readdir_list, del_list);
+ item = __btrfs_next_delayed_item(item);
+diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
+index 4f21daa3dbc7b..dc1085b2a3976 100644
+--- a/fs/btrfs/delayed-inode.h
++++ b/fs/btrfs/delayed-inode.h
+@@ -148,6 +148,7 @@ void btrfs_destroy_delayed_inodes(struct btrfs_fs_info *fs_info);
+
+ /* Used for readdir() */
+ bool btrfs_readdir_get_delayed_items(struct inode *inode,
++ u64 last_index,
+ struct list_head *ins_list,
+ struct list_head *del_list);
+ void btrfs_readdir_put_delayed_items(struct inode *inode,
+diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
+index 54eed5a8a412b..00f260c8bd60a 100644
+--- a/fs/btrfs/extent_io.c
++++ b/fs/btrfs/extent_io.c
+@@ -962,7 +962,30 @@ static void submit_extent_page(struct btrfs_bio_ctrl *bio_ctrl,
+ size -= len;
+ pg_offset += len;
+ disk_bytenr += len;
+- bio_ctrl->len_to_oe_boundary -= len;
++
++ /*
++ * len_to_oe_boundary defaults to U32_MAX, which isn't page or
++ * sector aligned. alloc_new_bio() then sets it to the end of
++ * our ordered extent for writes into zoned devices.
++ *
++ * When len_to_oe_boundary is tracking an ordered extent, we
++ * trust the ordered extent code to align things properly, and
++ * the check above to cap our write to the ordered extent
++ * boundary is correct.
++ *
++ * When len_to_oe_boundary is U32_MAX, the cap above would
++ * result in a 4095 byte IO for the last page right before
++ * we hit the bio limit of UINT_MAX. bio_add_page() has all
++ * the checks required to make sure we don't overflow the bio,
++ * and we should just ignore len_to_oe_boundary completely
++ * unless we're using it to track an ordered extent.
++ *
++ * It's pretty hard to make a bio sized U32_MAX, but it can
++ * happen when the page cache is able to feed us contiguous
++ * pages for large extents.
++ */
++ if (bio_ctrl->len_to_oe_boundary != U32_MAX)
++ bio_ctrl->len_to_oe_boundary -= len;
+
+ /* Ordered extent boundary: move on to a new bio. */
+ if (bio_ctrl->len_to_oe_boundary == 0)
+diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
+index 138afa955370b..367ed73cb6c74 100644
+--- a/fs/btrfs/extent_map.c
++++ b/fs/btrfs/extent_map.c
+@@ -758,8 +758,6 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
+
+ if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
+ start = em_end;
+- if (end != (u64)-1)
+- len = start + len - em_end;
+ goto next;
+ }
+
+@@ -827,8 +825,8 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
+ if (!split)
+ goto remove_em;
+ }
+- split->start = start + len;
+- split->len = em_end - (start + len);
++ split->start = end;
++ split->len = em_end - end;
+ split->block_start = em->block_start;
+ split->flags = flags;
+ split->compress_type = em->compress_type;
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index ace949bc75059..a446965d701db 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -5744,6 +5744,74 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
+ return d_splice_alias(inode, dentry);
+ }
+
++/*
++ * Find the highest existing sequence number in a directory and then set the
++ * in-memory index_cnt variable to the first free sequence number.
++ */
++static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
++{
++ struct btrfs_root *root = inode->root;
++ struct btrfs_key key, found_key;
++ struct btrfs_path *path;
++ struct extent_buffer *leaf;
++ int ret;
++
++ key.objectid = btrfs_ino(inode);
++ key.type = BTRFS_DIR_INDEX_KEY;
++ key.offset = (u64)-1;
++
++ path = btrfs_alloc_path();
++ if (!path)
++ return -ENOMEM;
++
++ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
++ if (ret < 0)
++ goto out;
++ /* FIXME: we should be able to handle this */
++ if (ret == 0)
++ goto out;
++ ret = 0;
++
++ if (path->slots[0] == 0) {
++ inode->index_cnt = BTRFS_DIR_START_INDEX;
++ goto out;
++ }
++
++ path->slots[0]--;
++
++ leaf = path->nodes[0];
++ btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
++
++ if (found_key.objectid != btrfs_ino(inode) ||
++ found_key.type != BTRFS_DIR_INDEX_KEY) {
++ inode->index_cnt = BTRFS_DIR_START_INDEX;
++ goto out;
++ }
++
++ inode->index_cnt = found_key.offset + 1;
++out:
++ btrfs_free_path(path);
++ return ret;
++}
++
++static int btrfs_get_dir_last_index(struct btrfs_inode *dir, u64 *index)
++{
++ if (dir->index_cnt == (u64)-1) {
++ int ret;
++
++ ret = btrfs_inode_delayed_dir_index_count(dir);
++ if (ret) {
++ ret = btrfs_set_inode_index_count(dir);
++ if (ret)
++ return ret;
++ }
++ }
++
++ *index = dir->index_cnt;
++
++ return 0;
++}
++
+ /*
+ * All this infrastructure exists because dir_emit can fault, and we are holding
+ * the tree lock when doing readdir. For now just allocate a buffer and copy
+@@ -5756,10 +5824,17 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
+ static int btrfs_opendir(struct inode *inode, struct file *file)
+ {
+ struct btrfs_file_private *private;
++ u64 last_index;
++ int ret;
++
++ ret = btrfs_get_dir_last_index(BTRFS_I(inode), &last_index);
++ if (ret)
++ return ret;
+
+ private = kzalloc(sizeof(struct btrfs_file_private), GFP_KERNEL);
+ if (!private)
+ return -ENOMEM;
++ private->last_index = last_index;
+ private->filldir_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!private->filldir_buf) {
+ kfree(private);
+@@ -5826,7 +5901,8 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
+
+ INIT_LIST_HEAD(&ins_list);
+ INIT_LIST_HEAD(&del_list);
+- put = btrfs_readdir_get_delayed_items(inode, &ins_list, &del_list);
++ put = btrfs_readdir_get_delayed_items(inode, private->last_index,
++ &ins_list, &del_list);
+
+ again:
+ key.type = BTRFS_DIR_INDEX_KEY;
+@@ -5844,6 +5920,8 @@ again:
+ break;
+ if (found_key.offset < ctx->pos)
+ continue;
++ if (found_key.offset > private->last_index)
++ break;
+ if (btrfs_should_delete_dir_index(&del_list, found_key.offset))
+ continue;
+ di = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dir_item);
+@@ -5979,57 +6057,6 @@ static int btrfs_update_time(struct inode *inode, struct timespec64 *now,
+ return dirty ? btrfs_dirty_inode(BTRFS_I(inode)) : 0;
+ }
+
+-/*
+- * find the highest existing sequence number in a directory
+- * and then set the in-memory index_cnt variable to reflect
+- * free sequence numbers
+- */
+-static int btrfs_set_inode_index_count(struct btrfs_inode *inode)
+-{
+- struct btrfs_root *root = inode->root;
+- struct btrfs_key key, found_key;
+- struct btrfs_path *path;
+- struct extent_buffer *leaf;
+- int ret;
+-
+- key.objectid = btrfs_ino(inode);
+- key.type = BTRFS_DIR_INDEX_KEY;
+- key.offset = (u64)-1;
+-
+- path = btrfs_alloc_path();
+- if (!path)
+- return -ENOMEM;
+-
+- ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+- if (ret < 0)
+- goto out;
+- /* FIXME: we should be able to handle this */
+- if (ret == 0)
+- goto out;
+- ret = 0;
+-
+- if (path->slots[0] == 0) {
+- inode->index_cnt = BTRFS_DIR_START_INDEX;
+- goto out;
+- }
+-
+- path->slots[0]--;
+-
+- leaf = path->nodes[0];
+- btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+-
+- if (found_key.objectid != btrfs_ino(inode) ||
+- found_key.type != BTRFS_DIR_INDEX_KEY) {
+- inode->index_cnt = BTRFS_DIR_START_INDEX;
+- goto out;
+- }
+-
+- inode->index_cnt = found_key.offset + 1;
+-out:
+- btrfs_free_path(path);
+- return ret;
+-}
+-
+ /*
+ * helper to find a free sequence number in a given directory. This current
+ * code is very simple, later versions will do smarter things in the btree
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 16c228344cbb8..2feb7f2294233 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -655,7 +655,8 @@ static void scrub_verify_one_metadata(struct scrub_stripe *stripe, int sector_nr
+ btrfs_stack_header_bytenr(header), logical);
+ return;
+ }
+- if (memcmp(header->fsid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE) != 0) {
++ if (memcmp(header->fsid, fs_info->fs_devices->metadata_uuid,
++ BTRFS_FSID_SIZE) != 0) {
+ bitmap_set(&stripe->meta_error_bitmap, sector_nr, sectors_per_tree);
+ bitmap_set(&stripe->error_bitmap, sector_nr, sectors_per_tree);
+ btrfs_warn_rl(fs_info,
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 436e15e3759da..30977f10e36b7 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -4631,8 +4631,7 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
+ }
+ }
+
+- BUG_ON(fs_info->balance_ctl ||
+- test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
++ ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
+ atomic_dec(&fs_info->balance_cancel_req);
+ mutex_unlock(&fs_info->balance_mutex);
+ return 0;
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 83c4abff496da..5fb367b1d4b06 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -645,6 +645,7 @@ bad:
+ err = -EIO;
+ out_bad:
+ pr_err("mds parse_reply err %d\n", err);
++ ceph_msg_dump(msg);
+ return err;
+ }
+
+@@ -3538,6 +3539,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
+
+ bad:
+ pr_err("mdsc_handle_forward decode error err=%d\n", err);
++ ceph_msg_dump(msg);
+ }
+
+ static int __decode_session_metadata(void **p, void *end,
+@@ -5258,6 +5260,7 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+ bad:
+ pr_err("error decoding fsmap %d. Shutting down mount.\n", err);
+ ceph_umount_begin(mdsc->fsc->sb);
++ ceph_msg_dump(msg);
+ err_out:
+ mutex_lock(&mdsc->mutex);
+ mdsc->mdsmap_err = err;
+@@ -5326,6 +5329,7 @@ bad_unlock:
+ bad:
+ pr_err("error decoding mdsmap %d. Shutting down mount.\n", err);
+ ceph_umount_begin(mdsc->fsc->sb);
++ ceph_msg_dump(msg);
+ return;
+ }
+
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index a84bf6444bba9..204ba7f8417e6 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -1004,7 +1004,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
+ {
+ struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
+ struct gfs2_args *args = &sdp->sd_args;
+- int val;
++ unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
++
++ spin_lock(&sdp->sd_tune.gt_spin);
++ logd_secs = sdp->sd_tune.gt_logd_secs;
++ quota_quantum = sdp->sd_tune.gt_quota_quantum;
++ statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
++ statfs_slow = sdp->sd_tune.gt_statfs_slow;
++ spin_unlock(&sdp->sd_tune.gt_spin);
+
+ if (is_ancestor(root, sdp->sd_master_dir))
+ seq_puts(s, ",meta");
+@@ -1059,17 +1066,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
+ }
+ if (args->ar_discard)
+ seq_puts(s, ",discard");
+- val = sdp->sd_tune.gt_logd_secs;
+- if (val != 30)
+- seq_printf(s, ",commit=%d", val);
+- val = sdp->sd_tune.gt_statfs_quantum;
+- if (val != 30)
+- seq_printf(s, ",statfs_quantum=%d", val);
+- else if (sdp->sd_tune.gt_statfs_slow)
++ if (logd_secs != 30)
++ seq_printf(s, ",commit=%d", logd_secs);
++ if (statfs_quantum != 30)
++ seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
++ else if (statfs_slow)
+ seq_puts(s, ",statfs_quantum=0");
+- val = sdp->sd_tune.gt_quota_quantum;
+- if (val != 60)
+- seq_printf(s, ",quota_quantum=%d", val);
++ if (quota_quantum != 60)
++ seq_printf(s, ",quota_quantum=%d", quota_quantum);
+ if (args->ar_statfs_percent)
+ seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
+ if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
+diff --git a/fs/netfs/iterator.c b/fs/netfs/iterator.c
+index 8a4c866874297..facb84f262dc7 100644
+--- a/fs/netfs/iterator.c
++++ b/fs/netfs/iterator.c
+@@ -151,7 +151,7 @@ static ssize_t netfs_extract_user_to_sg(struct iov_iter *iter,
+
+ failed:
+ while (sgtable->nents > sgtable->orig_nents)
+- put_page(sg_page(&sgtable->sgl[--sgtable->nents]));
++ unpin_user_page(sg_page(&sgtable->sgl[--sgtable->nents]));
+ return res;
+ }
+
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index 2bfcf1a989c95..50214b77c6a35 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -874,6 +874,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
+ if (err)
+ goto out1;
+
++ err = -EINVAL;
+ /* Call mi_remove_attr() in reverse order to keep pointers 'arr_move' valid. */
+ while (to_free > 0) {
+ struct ATTRIB *b = arr_move[--nb];
+@@ -882,7 +883,8 @@ int ni_create_attr_list(struct ntfs_inode *ni)
+
+ attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off),
+ b->name_len, asize, name_off);
+- WARN_ON(!attr);
++ if (!attr)
++ goto out1;
+
+ mi_get_ref(mi, &le_b[nb]->ref);
+ le_b[nb]->id = attr->id;
+@@ -892,17 +894,20 @@ int ni_create_attr_list(struct ntfs_inode *ni)
+ attr->id = le_b[nb]->id;
+
+ /* Remove from primary record. */
+- WARN_ON(!mi_remove_attr(NULL, &ni->mi, b));
++ if (!mi_remove_attr(NULL, &ni->mi, b))
++ goto out1;
+
+ if (to_free <= asize)
+ break;
+ to_free -= asize;
+- WARN_ON(!nb);
++ if (!nb)
++ goto out1;
+ }
+
+ attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0,
+ lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT);
+- WARN_ON(!attr);
++ if (!attr)
++ goto out1;
+
+ attr->non_res = 0;
+ attr->flags = 0;
+@@ -922,9 +927,10 @@ out1:
+ kfree(ni->attr_list.le);
+ ni->attr_list.le = NULL;
+ ni->attr_list.size = 0;
++ return err;
+
+ out:
+- return err;
++ return 0;
+ }
+
+ /*
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index 28cc421102e59..21567e58265c4 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -178,7 +178,7 @@ int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
+ /* Check errors. */
+ if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
+ fn * SECTOR_SIZE > bytes) {
+- return -EINVAL; /* Native chkntfs returns ok! */
++ return -E_NTFS_CORRUPT;
+ }
+
+ /* Get fixup pointer. */
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 0a48d2d672198..b40da258e6848 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -1113,6 +1113,12 @@ ok:
+ *node = in;
+
+ out:
++ if (err == -E_NTFS_CORRUPT) {
++ ntfs_inode_err(&ni->vfs_inode, "directory corrupted");
++ ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
++ err = -EINVAL;
++ }
++
+ if (ib != in->index)
+ kfree(ib);
+
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index eb01f7e76479a..2e4be773728df 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -53,6 +53,8 @@ enum utf16_endian;
+ #define E_NTFS_NONRESIDENT 556
+ /* NTFS specific error code about punch hole. */
+ #define E_NTFS_NOTALIGNED 557
++/* NTFS specific error code when on-disk struct is corrupted. */
++#define E_NTFS_CORRUPT 558
+
+
+ /* sbi->flags */
+diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
+index 2a281cead2bcc..7974ca35a15c6 100644
+--- a/fs/ntfs3/record.c
++++ b/fs/ntfs3/record.c
+@@ -124,7 +124,7 @@ int mi_read(struct mft_inode *mi, bool is_mft)
+ struct rw_semaphore *rw_lock = NULL;
+
+ if (is_mounted(sbi)) {
+- if (!is_mft) {
++ if (!is_mft && mft_ni) {
+ rw_lock = &mft_ni->file.run_lock;
+ down_read(rw_lock);
+ }
+@@ -148,7 +148,7 @@ int mi_read(struct mft_inode *mi, bool is_mft)
+ ni_lock(mft_ni);
+ down_write(rw_lock);
+ }
+- err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, &mft_ni->file.run,
++ err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, run,
+ vbo >> sbi->cluster_bits);
+ if (rw_lock) {
+ up_write(rw_lock);
+@@ -180,6 +180,12 @@ ok:
+ return 0;
+
+ out:
++ if (err == -E_NTFS_CORRUPT) {
++ ntfs_err(sbi->sb, "mft corrupted");
++ ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
++ err = -EINVAL;
++ }
++
+ return err;
+ }
+
+diff --git a/fs/ntfs3/super.c b/fs/ntfs3/super.c
+index 5158dd31fd97f..ecf899d571d83 100644
+--- a/fs/ntfs3/super.c
++++ b/fs/ntfs3/super.c
+@@ -724,6 +724,8 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+ struct MFT_REC *rec;
+ u16 fn, ao;
+ u8 cluster_bits;
++ u32 boot_off = 0;
++ const char *hint = "Primary boot";
+
+ sbi->volume.blocks = dev_size >> PAGE_SHIFT;
+
+@@ -731,11 +733,12 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+ if (!bh)
+ return -EIO;
+
++check_boot:
+ err = -EINVAL;
+- boot = (struct NTFS_BOOT *)bh->b_data;
++ boot = (struct NTFS_BOOT *)Add2Ptr(bh->b_data, boot_off);
+
+ if (memcmp(boot->system_id, "NTFS ", sizeof("NTFS ") - 1)) {
+- ntfs_err(sb, "Boot's signature is not NTFS.");
++ ntfs_err(sb, "%s signature is not NTFS.", hint);
+ goto out;
+ }
+
+@@ -748,14 +751,16 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+ boot->bytes_per_sector[0];
+ if (boot_sector_size < SECTOR_SIZE ||
+ !is_power_of_2(boot_sector_size)) {
+- ntfs_err(sb, "Invalid bytes per sector %u.", boot_sector_size);
++ ntfs_err(sb, "%s: invalid bytes per sector %u.", hint,
++ boot_sector_size);
+ goto out;
+ }
+
+ /* cluster size: 512, 1K, 2K, 4K, ... 2M */
+ sct_per_clst = true_sectors_per_clst(boot);
+ if ((int)sct_per_clst < 0 || !is_power_of_2(sct_per_clst)) {
+- ntfs_err(sb, "Invalid sectors per cluster %u.", sct_per_clst);
++ ntfs_err(sb, "%s: invalid sectors per cluster %u.", hint,
++ sct_per_clst);
+ goto out;
+ }
+
+@@ -771,8 +776,8 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+ if (mlcn * sct_per_clst >= sectors || mlcn2 * sct_per_clst >= sectors) {
+ ntfs_err(
+ sb,
+- "Start of MFT 0x%llx (0x%llx) is out of volume 0x%llx.",
+- mlcn, mlcn2, sectors);
++ "%s: start of MFT 0x%llx (0x%llx) is out of volume 0x%llx.",
++ hint, mlcn, mlcn2, sectors);
+ goto out;
+ }
+
+@@ -784,7 +789,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+
+ /* Check MFT record size. */
+ if (record_size < SECTOR_SIZE || !is_power_of_2(record_size)) {
+- ntfs_err(sb, "Invalid bytes per MFT record %u (%d).",
++ ntfs_err(sb, "%s: invalid bytes per MFT record %u (%d).", hint,
+ record_size, boot->record_size);
+ goto out;
+ }
+@@ -801,13 +806,13 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+
+ /* Check index record size. */
+ if (sbi->index_size < SECTOR_SIZE || !is_power_of_2(sbi->index_size)) {
+- ntfs_err(sb, "Invalid bytes per index %u(%d).", sbi->index_size,
+- boot->index_size);
++ ntfs_err(sb, "%s: invalid bytes per index %u(%d).", hint,
++ sbi->index_size, boot->index_size);
+ goto out;
+ }
+
+ if (sbi->index_size > MAXIMUM_BYTES_PER_INDEX) {
+- ntfs_err(sb, "Unsupported bytes per index %u.",
++ ntfs_err(sb, "%s: unsupported bytes per index %u.", hint,
+ sbi->index_size);
+ goto out;
+ }
+@@ -834,7 +839,7 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+
+ /* Compare boot's cluster and sector. */
+ if (sbi->cluster_size < boot_sector_size) {
+- ntfs_err(sb, "Invalid bytes per cluster (%u).",
++ ntfs_err(sb, "%s: invalid bytes per cluster (%u).", hint,
+ sbi->cluster_size);
+ goto out;
+ }
+@@ -930,7 +935,46 @@ static int ntfs_init_from_boot(struct super_block *sb, u32 sector_size,
+
+ err = 0;
+
++ if (bh->b_blocknr && !sb_rdonly(sb)) {
++ /*
++ * Alternative boot is ok but primary is not ok.
++ * Update primary boot.
++ */
++ struct buffer_head *bh0 = sb_getblk(sb, 0);
++ if (bh0) {
++ if (buffer_locked(bh0))
++ __wait_on_buffer(bh0);
++
++ lock_buffer(bh0);
++ memcpy(bh0->b_data, boot, sizeof(*boot));
++ set_buffer_uptodate(bh0);
++ mark_buffer_dirty(bh0);
++ unlock_buffer(bh0);
++ if (!sync_dirty_buffer(bh0))
++ ntfs_warn(sb, "primary boot is updated");
++ put_bh(bh0);
++ }
++ }
++
+ out:
++ if (err == -EINVAL && !bh->b_blocknr && dev_size > PAGE_SHIFT) {
++ u32 block_size = min_t(u32, sector_size, PAGE_SIZE);
++ u64 lbo = dev_size - sizeof(*boot);
++
++ /*
++ * Try alternative boot (last sector)
++ */
++ brelse(bh);
++
++ sb_set_blocksize(sb, block_size);
++ bh = ntfs_bread(sb, lbo >> blksize_bits(block_size));
++ if (!bh)
++ return -EINVAL;
++
++ boot_off = lbo & (block_size - 1);
++ hint = "Alternative boot";
++ goto check_boot;
++ }
+ brelse(bh);
+
+ return err;
+@@ -955,6 +999,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
+ struct ATTR_DEF_ENTRY *t;
+ u16 *shared;
+ struct MFT_REF ref;
++ bool ro = sb_rdonly(sb);
+
+ ref.high = 0;
+
+@@ -1035,6 +1080,10 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
+ sbi->volume.minor_ver = info->minor_ver;
+ sbi->volume.flags = info->flags;
+ sbi->volume.ni = ni;
++ if (info->flags & VOLUME_FLAG_DIRTY) {
++ sbi->volume.real_dirty = true;
++ ntfs_info(sb, "It is recommened to use chkdsk.");
++ }
+
+ /* Load $MFTMirr to estimate recs_mirr. */
+ ref.low = cpu_to_le32(MFT_REC_MIRR);
+@@ -1069,21 +1118,16 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
+
+ iput(inode);
+
+- if (sbi->flags & NTFS_FLAGS_NEED_REPLAY) {
+- if (!sb_rdonly(sb)) {
+- ntfs_warn(sb,
+- "failed to replay log file. Can't mount rw!");
+- err = -EINVAL;
+- goto out;
+- }
+- } else if (sbi->volume.flags & VOLUME_FLAG_DIRTY) {
+- if (!sb_rdonly(sb) && !options->force) {
+- ntfs_warn(
+- sb,
+- "volume is dirty and \"force\" flag is not set!");
+- err = -EINVAL;
+- goto out;
+- }
++ if ((sbi->flags & NTFS_FLAGS_NEED_REPLAY) && !ro) {
++ ntfs_warn(sb, "failed to replay log file. Can't mount rw!");
++ err = -EINVAL;
++ goto out;
++ }
++
++ if ((sbi->volume.flags & VOLUME_FLAG_DIRTY) && !ro && !options->force) {
++ ntfs_warn(sb, "volume is dirty and \"force\" flag is not set!");
++ err = -EINVAL;
++ goto out;
+ }
+
+ /* Load $MFT. */
+@@ -1173,7 +1217,7 @@ static int ntfs_fill_super(struct super_block *sb, struct fs_context *fc)
+
+ bad_len += len;
+ bad_frags += 1;
+- if (sb_rdonly(sb))
++ if (ro)
+ continue;
+
+ if (wnd_set_used_safe(&sbi->used.bitmap, lcn, len, &tt) || tt) {
+diff --git a/fs/ntfs3/xattr.c b/fs/ntfs3/xattr.c
+index fd02fcf4d4091..26787c2bbf758 100644
+--- a/fs/ntfs3/xattr.c
++++ b/fs/ntfs3/xattr.c
+@@ -141,6 +141,7 @@ static int ntfs_read_ea(struct ntfs_inode *ni, struct EA_FULL **ea,
+
+ memset(Add2Ptr(ea_p, size), 0, add_bytes);
+
++ err = -EINVAL;
+ /* Check all attributes for consistency. */
+ for (off = 0; off < size; off += ea_size) {
+ const struct EA_FULL *ef = Add2Ptr(ea_p, off);
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index ed0f71137584f..d14e88e14fb2e 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -153,6 +153,11 @@ cifs_dump_channel(struct seq_file *m, int i, struct cifs_chan *chan)
+ in_flight(server),
+ atomic_read(&server->in_send),
+ atomic_read(&server->num_waiters));
++#ifdef CONFIG_NET_NS
++ if (server->net)
++ seq_printf(m, " Net namespace: %u ", server->net->ns.inum);
++#endif /* NET_NS */
++
+ }
+
+ static inline const char *smb_speed_to_str(size_t bps)
+@@ -429,10 +434,15 @@ skip_rdma:
+ server->reconnect_instance,
+ server->srv_count,
+ server->sec_mode, in_flight(server));
++#ifdef CONFIG_NET_NS
++ if (server->net)
++ seq_printf(m, " Net namespace: %u ", server->net->ns.inum);
++#endif /* NET_NS */
+
+ seq_printf(m, "\nIn Send: %d In MaxReq Wait: %d",
+ atomic_read(&server->in_send),
+ atomic_read(&server->num_waiters));
++
+ if (server->leaf_fullpath) {
+ seq_printf(m, "\nDFS leaf full path: %s",
+ server->leaf_fullpath);
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 43a4d8603db34..30b03938f6d1d 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -884,11 +884,11 @@ struct dentry *
+ cifs_smb3_do_mount(struct file_system_type *fs_type,
+ int flags, struct smb3_fs_context *old_ctx)
+ {
+- int rc;
+- struct super_block *sb = NULL;
+- struct cifs_sb_info *cifs_sb = NULL;
+ struct cifs_mnt_data mnt_data;
++ struct cifs_sb_info *cifs_sb;
++ struct super_block *sb;
+ struct dentry *root;
++ int rc;
+
+ if (cifsFYI) {
+ cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
+@@ -897,11 +897,9 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
+ cifs_info("Attempting to mount %s\n", old_ctx->source);
+ }
+
+- cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
+- if (cifs_sb == NULL) {
+- root = ERR_PTR(-ENOMEM);
+- goto out;
+- }
++ cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
++ if (!cifs_sb)
++ return ERR_PTR(-ENOMEM);
+
+ cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
+ if (!cifs_sb->ctx) {
+@@ -938,10 +936,8 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
+
+ sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
+ if (IS_ERR(sb)) {
+- root = ERR_CAST(sb);
+ cifs_umount(cifs_sb);
+- cifs_sb = NULL;
+- goto out;
++ return ERR_CAST(sb);
+ }
+
+ if (sb->s_root) {
+@@ -972,13 +968,9 @@ out_super:
+ deactivate_locked_super(sb);
+ return root;
+ out:
+- if (cifs_sb) {
+- if (!sb || IS_ERR(sb)) { /* otherwise kill_sb will handle */
+- kfree(cifs_sb->prepath);
+- smb3_cleanup_fs_context(cifs_sb->ctx);
+- kfree(cifs_sb);
+- }
+- }
++ kfree(cifs_sb->prepath);
++ smb3_cleanup_fs_context(cifs_sb->ctx);
++ kfree(cifs_sb);
+ return root;
+ }
+
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index d554bca7e07eb..855454ff6cede 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -4681,9 +4681,9 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
+
+ io_error:
+ kunmap(page);
+- unlock_page(page);
+
+ read_complete:
++ unlock_page(page);
+ return rc;
+ }
+
+@@ -4878,9 +4878,11 @@ void cifs_oplock_break(struct work_struct *work)
+ struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+ oplock_break);
+ struct inode *inode = d_inode(cfile->dentry);
++ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifsInodeInfo *cinode = CIFS_I(inode);
+- struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+- struct TCP_Server_Info *server = tcon->ses->server;
++ struct cifs_tcon *tcon;
++ struct TCP_Server_Info *server;
++ struct tcon_link *tlink;
+ int rc = 0;
+ bool purge_cache = false, oplock_break_cancelled;
+ __u64 persistent_fid, volatile_fid;
+@@ -4889,6 +4891,12 @@ void cifs_oplock_break(struct work_struct *work)
+ wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
+ TASK_UNINTERRUPTIBLE);
+
++ tlink = cifs_sb_tlink(cifs_sb);
++ if (IS_ERR(tlink))
++ goto out;
++ tcon = tlink_tcon(tlink);
++ server = tcon->ses->server;
++
+ server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
+ cfile->oplock_epoch, &purge_cache);
+
+@@ -4938,18 +4946,19 @@ oplock_break_ack:
+ /*
+ * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
+ * an acknowledgment to be sent when the file has already been closed.
+- * check for server null, since can race with kill_sb calling tree disconnect.
+ */
+ spin_lock(&cinode->open_file_lock);
+- if (tcon->ses && tcon->ses->server && !oplock_break_cancelled &&
+- !list_empty(&cinode->openFileList)) {
++ /* check list empty since can race with kill_sb calling tree disconnect */
++ if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
+ spin_unlock(&cinode->open_file_lock);
+- rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
+- volatile_fid, net_fid, cinode);
++ rc = server->ops->oplock_response(tcon, persistent_fid,
++ volatile_fid, net_fid, cinode);
+ cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
+ } else
+ spin_unlock(&cinode->open_file_lock);
+
++ cifs_put_tlink(tlink);
++out:
+ cifs_done_oplock_break(cinode);
+ }
+
+diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
+index 4946a0c596009..67e16c2ac90e6 100644
+--- a/fs/smb/client/fs_context.c
++++ b/fs/smb/client/fs_context.c
+@@ -231,6 +231,8 @@ cifs_parse_security_flavors(struct fs_context *fc, char *value, struct smb3_fs_c
+ break;
+ case Opt_sec_none:
+ ctx->nullauth = 1;
++ kfree(ctx->username);
++ ctx->username = NULL;
+ break;
+ default:
+ cifs_errorf(fc, "bad security option: %s\n", value);
+@@ -1201,6 +1203,8 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
+ case Opt_user:
+ kfree(ctx->username);
+ ctx->username = NULL;
++ if (ctx->nullauth)
++ break;
+ if (strlen(param->string) == 0) {
+ /* null user, ie. anonymous authentication */
+ ctx->nullauth = 1;
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 17fe212ab895d..e04766fe6f803 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -3797,6 +3797,12 @@ void smb2_reconnect_server(struct work_struct *work)
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++ spin_lock(&ses->ses_lock);
++ if (ses->ses_status == SES_EXITING) {
++ spin_unlock(&ses->ses_lock);
++ continue;
++ }
++ spin_unlock(&ses->ses_lock);
+
+ tcon_selected = false;
+
+diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
+index 571885d32907b..70ae6c290bdc3 100644
+--- a/include/drm/drm_edid.h
++++ b/include/drm/drm_edid.h
+@@ -61,15 +61,9 @@ struct std_timing {
+ u8 vfreq_aspect;
+ } __attribute__((packed));
+
+-#define DRM_EDID_PT_SYNC_MASK (3 << 3)
+-# define DRM_EDID_PT_ANALOG_CSYNC (0 << 3)
+-# define DRM_EDID_PT_BIPOLAR_ANALOG_CSYNC (1 << 3)
+-# define DRM_EDID_PT_DIGITAL_CSYNC (2 << 3)
+-# define DRM_EDID_PT_CSYNC_ON_RGB (1 << 1) /* analog csync only */
+-# define DRM_EDID_PT_CSYNC_SERRATE (1 << 2)
+-# define DRM_EDID_PT_DIGITAL_SEPARATE_SYNC (3 << 3)
+-# define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1) /* also digital csync */
+-# define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
++#define DRM_EDID_PT_HSYNC_POSITIVE (1 << 1)
++#define DRM_EDID_PT_VSYNC_POSITIVE (1 << 2)
++#define DRM_EDID_PT_SEPARATE_SYNC (3 << 3)
+ #define DRM_EDID_PT_STEREO (1 << 5)
+ #define DRM_EDID_PT_INTERLACED (1 << 7)
+
+diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
+index 2c8860e406bd8..0417360a6db9b 100644
+--- a/include/linux/iopoll.h
++++ b/include/linux/iopoll.h
+@@ -53,6 +53,7 @@
+ } \
+ if (__sleep_us) \
+ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
++ cpu_relax(); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+ })
+@@ -95,6 +96,7 @@
+ } \
+ if (__delay_us) \
+ udelay(__delay_us); \
++ cpu_relax(); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+ })
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index bdf8de2cdd935..7b4dd69555e49 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -155,6 +155,10 @@ retry:
+ if (gso_type & SKB_GSO_UDP)
+ nh_off -= thlen;
+
++ /* Kernel has a special handling for GSO_BY_FRAGS. */
++ if (gso_size == GSO_BY_FRAGS)
++ return -EINVAL;
++
+ /* Too small packets are not really GSO ones. */
+ if (skb->len - nh_off > gso_size) {
+ shinfo->gso_size = gso_size;
+diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
+index bb9de6a899e07..d6c8eb2b52019 100644
+--- a/include/media/v4l2-mem2mem.h
++++ b/include/media/v4l2-mem2mem.h
+@@ -593,7 +593,14 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
+ static inline
+ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+ {
+- return m2m_ctx->out_q_ctx.num_rdy;
++ unsigned int num_buf_rdy;
++ unsigned long flags;
++
++ spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
++ num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy;
++ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
++
++ return num_buf_rdy;
+ }
+
+ /**
+@@ -605,7 +612,14 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+ static inline
+ unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+ {
+- return m2m_ctx->cap_q_ctx.num_rdy;
++ unsigned int num_buf_rdy;
++ unsigned long flags;
++
++ spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
++ num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy;
++ spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
++
++ return num_buf_rdy;
+ }
+
+ /**
+diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h
+index 9eef199728454..024ad8ddb27e5 100644
+--- a/include/net/mana/mana.h
++++ b/include/net/mana/mana.h
+@@ -579,7 +579,7 @@ struct mana_fence_rq_resp {
+ }; /* HW DATA */
+
+ /* Configure vPort Rx Steering */
+-struct mana_cfg_rx_steer_req {
++struct mana_cfg_rx_steer_req_v2 {
+ struct gdma_req_hdr hdr;
+ mana_handle_t vport;
+ u16 num_indir_entries;
+@@ -592,6 +592,8 @@ struct mana_cfg_rx_steer_req {
+ u8 reserved;
+ mana_handle_t default_rxobj;
+ u8 hashkey[MANA_HASH_KEY_SIZE];
++ u8 cqe_coalescing_enable;
++ u8 reserved2[7];
+ }; /* HW DATA */
+
+ struct mana_cfg_rx_steer_resp {
+diff --git a/include/net/sock.h b/include/net/sock.h
+index ad468fe71413a..415f3840a26aa 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1421,6 +1421,12 @@ static inline bool sk_has_memory_pressure(const struct sock *sk)
+ return sk->sk_prot->memory_pressure != NULL;
+ }
+
++static inline bool sk_under_global_memory_pressure(const struct sock *sk)
++{
++ return sk->sk_prot->memory_pressure &&
++ !!*sk->sk_prot->memory_pressure;
++}
++
+ static inline bool sk_under_memory_pressure(const struct sock *sk)
+ {
+ if (!sk->sk_prot->memory_pressure)
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index 151ca95dd08db..363c7d5105542 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -1984,6 +1984,7 @@ static inline void xfrm_dev_state_free(struct xfrm_state *x)
+ if (dev->xfrmdev_ops->xdo_dev_state_free)
+ dev->xfrmdev_ops->xdo_dev_state_free(x);
+ xso->dev = NULL;
++ xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
+ netdev_put(dev, &xso->dev_tracker);
+ }
+ }
+diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
+index b4526668072e7..27596f3b4aef3 100644
+--- a/kernel/dma/remap.c
++++ b/kernel/dma/remap.c
+@@ -43,13 +43,13 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
+ void *vaddr;
+ int i;
+
+- pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
++ pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return NULL;
+ for (i = 0; i < count; i++)
+ pages[i] = nth_page(page, i);
+ vaddr = vmap(pages, count, VM_DMA_COHERENT, prot);
+- kfree(pages);
++ kvfree(pages);
+
+ return vaddr;
+ }
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index 99634b29a8b82..46b4a3c7c3bf5 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -538,6 +538,7 @@ struct trace_buffer {
+ unsigned flags;
+ int cpus;
+ atomic_t record_disabled;
++ atomic_t resizing;
+ cpumask_var_t cpumask;
+
+ struct lock_class_key *reader_lock_key;
+@@ -2166,7 +2167,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+
+ /* prevent another thread from changing buffer sizes */
+ mutex_lock(&buffer->mutex);
+-
++ atomic_inc(&buffer->resizing);
+
+ if (cpu_id == RING_BUFFER_ALL_CPUS) {
+ /*
+@@ -2321,6 +2322,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ atomic_dec(&buffer->record_disabled);
+ }
+
++ atomic_dec(&buffer->resizing);
+ mutex_unlock(&buffer->mutex);
+ return 0;
+
+@@ -2341,6 +2343,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ }
+ }
+ out_err_unlock:
++ atomic_dec(&buffer->resizing);
+ mutex_unlock(&buffer->mutex);
+ return err;
+ }
+@@ -5543,6 +5546,15 @@ int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
+ if (local_read(&cpu_buffer_b->committing))
+ goto out_dec;
+
++ /*
++ * When resize is in progress, we cannot swap it because
++ * it will mess the state of the cpu buffer.
++ */
++ if (atomic_read(&buffer_a->resizing))
++ goto out_dec;
++ if (atomic_read(&buffer_b->resizing))
++ goto out_dec;
++
+ buffer_a->buffers[cpu] = cpu_buffer_b;
+ buffer_b->buffers[cpu] = cpu_buffer_a;
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index c80ff6f5b2cc1..fd051f85efd4b 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1928,9 +1928,10 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
+ * place on this CPU. We fail to record, but we reset
+ * the max trace buffer (no one writes directly to it)
+ * and flag that it failed.
++ * Another reason is resize is in progress.
+ */
+ trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
+- "Failed to swap buffers due to commit in progress\n");
++ "Failed to swap buffers due to commit or resize in progress\n");
+ }
+
+ WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index c5e8798e297ca..17ca13e8c044c 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -6374,9 +6374,14 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
+ if (!chan)
+ goto done;
+
++ chan = l2cap_chan_hold_unless_zero(chan);
++ if (!chan)
++ goto done;
++
+ l2cap_chan_lock(chan);
+ l2cap_chan_del(chan, ECONNREFUSED);
+ l2cap_chan_unlock(chan);
++ l2cap_chan_put(chan);
+
+ done:
+ mutex_unlock(&conn->chan_lock);
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 1e07d0f289723..d4498037fadc6 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -7285,7 +7285,7 @@ static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+- memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
++ memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
+
+ status = mgmt_status(err);
+ if (status == MGMT_STATUS_SUCCESS) {
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 1f31a97100d4f..8451a95266bf0 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -3107,7 +3107,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount)
+ if (mem_cgroup_sockets_enabled && sk->sk_memcg)
+ mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
+
+- if (sk_under_memory_pressure(sk) &&
++ if (sk_under_global_memory_pressure(sk) &&
+ (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
+ sk_leave_memory_pressure(sk);
+ }
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index 53bfd8af69203..d1e7d0ceb7edd 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -287,12 +287,12 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+- xfrm_decode_session(skb, &fl, AF_INET);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET);
+ break;
+ case htons(ETH_P_IPV6):
+- xfrm_decode_session(skb, &fl, AF_INET6);
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET6);
+ break;
+ default:
+ goto tx_err;
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 39eb947fe3920..366c3c25ebe20 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -586,7 +586,9 @@ out_reset_timer:
+ tcp_stream_is_thin(tp) &&
+ icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
+ icsk->icsk_backoff = 0;
+- icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
++ icsk->icsk_rto = clamp(__tcp_set_rto(tp),
++ tcp_rto_min(sk),
++ TCP_RTO_MAX);
+ } else {
+ /* Use normal (exponential) backoff */
+ icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index 10b222865d46a..73c85d4e0e9cd 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -568,12 +568,12 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ vti6_addr_conflict(t, ipv6_hdr(skb)))
+ goto tx_err;
+
+- xfrm_decode_session(skb, &fl, AF_INET6);
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET6);
+ break;
+ case htons(ETH_P_IP):
+- xfrm_decode_session(skb, &fl, AF_INET);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET);
+ break;
+ default:
+ goto tx_err;
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 31ab12fd720ae..203131ad0dfe1 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1848,9 +1848,9 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
+ if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
+ struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
+
+- if ((xfilter->sadb_x_filter_splen >=
++ if ((xfilter->sadb_x_filter_splen >
+ (sizeof(xfrm_address_t) << 3)) ||
+- (xfilter->sadb_x_filter_dplen >=
++ (xfilter->sadb_x_filter_dplen >
+ (sizeof(xfrm_address_t) << 3))) {
+ mutex_unlock(&pfk->dump_lock);
+ return -EINVAL;
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 62606fb44d027..4bb0d90eca1cd 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -1876,6 +1876,7 @@ static int
+ proc_do_sync_threshold(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+ {
++ struct netns_ipvs *ipvs = table->extra2;
+ int *valp = table->data;
+ int val[2];
+ int rc;
+@@ -1885,6 +1886,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write,
+ .mode = table->mode,
+ };
+
++ mutex_lock(&ipvs->sync_mutex);
+ memcpy(val, valp, sizeof(val));
+ rc = proc_dointvec(&tmp, write, buffer, lenp, ppos);
+ if (write) {
+@@ -1894,6 +1896,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write,
+ else
+ memcpy(valp, val, sizeof(val));
+ }
++ mutex_unlock(&ipvs->sync_mutex);
+ return rc;
+ }
+
+@@ -4321,6 +4324,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
+ ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD;
+ ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD;
+ tbl[idx].data = &ipvs->sysctl_sync_threshold;
++ tbl[idx].extra2 = ipvs;
+ tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
+ ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD;
+ tbl[idx++].data = &ipvs->sysctl_sync_refresh_period;
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 91eacc9b0b987..b6bcc8f2f46b7 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -49,8 +49,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
+ [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS,
+ [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS,
+ [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS,
+- [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000,
+- [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000,
++ [SCTP_CONNTRACK_SHUTDOWN_SENT] = 3 SECS,
++ [SCTP_CONNTRACK_SHUTDOWN_RECD] = 3 SECS,
+ [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS,
+ [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS,
+ };
+@@ -105,7 +105,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
+ {
+ /* ORIGINAL */
+ /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
+-/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW},
++/* init */ {sCL, sCL, sCW, sCE, sES, sCL, sCL, sSA, sCW},
+ /* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},
+ /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+ /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL},
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index c6de10f458fa4..b280b151a9e98 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -7088,6 +7088,7 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx,
+ ret = __nft_set_catchall_flush(ctx, set, &elem);
+ if (ret < 0)
+ break;
++ nft_set_elem_change_active(ctx->net, set, ext);
+ }
+
+ return ret;
+@@ -9494,9 +9495,14 @@ struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set,
+ if (!trans)
+ return NULL;
+
++ trans->net = maybe_get_net(net);
++ if (!trans->net) {
++ kfree(trans);
++ return NULL;
++ }
++
+ refcount_inc(&set->refs);
+ trans->set = set;
+- trans->net = get_net(net);
+ trans->seq = gc_seq;
+
+ return trans;
+@@ -9752,6 +9758,22 @@ static void nft_set_commit_update(struct list_head *set_update_list)
+ }
+ }
+
++static unsigned int nft_gc_seq_begin(struct nftables_pernet *nft_net)
++{
++ unsigned int gc_seq;
++
++ /* Bump gc counter, it becomes odd, this is the busy mark. */
++ gc_seq = READ_ONCE(nft_net->gc_seq);
++ WRITE_ONCE(nft_net->gc_seq, ++gc_seq);
++
++ return gc_seq;
++}
++
++static void nft_gc_seq_end(struct nftables_pernet *nft_net, unsigned int gc_seq)
++{
++ WRITE_ONCE(nft_net->gc_seq, ++gc_seq);
++}
++
+ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ {
+ struct nftables_pernet *nft_net = nft_pernet(net);
+@@ -9837,9 +9859,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+
+ WRITE_ONCE(nft_net->base_seq, base_seq);
+
+- /* Bump gc counter, it becomes odd, this is the busy mark. */
+- gc_seq = READ_ONCE(nft_net->gc_seq);
+- WRITE_ONCE(nft_net->gc_seq, ++gc_seq);
++ gc_seq = nft_gc_seq_begin(nft_net);
+
+ /* step 3. Start new generation, rules_gen_X now in use. */
+ net->nft.gencursor = nft_gencursor_next(net);
+@@ -10049,7 +10069,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
+ nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
+ nf_tables_commit_audit_log(&adl, nft_net->base_seq);
+
+- WRITE_ONCE(nft_net->gc_seq, ++gc_seq);
++ nft_gc_seq_end(nft_net, gc_seq);
+ nf_tables_commit_release(net);
+
+ return 0;
+@@ -11050,6 +11070,7 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
+ struct net *net = n->net;
+ unsigned int deleted;
+ bool restart = false;
++ unsigned int gc_seq;
+
+ if (event != NETLINK_URELEASE || n->protocol != NETLINK_NETFILTER)
+ return NOTIFY_DONE;
+@@ -11057,6 +11078,9 @@ static int nft_rcv_nl_event(struct notifier_block *this, unsigned long event,
+ nft_net = nft_pernet(net);
+ deleted = 0;
+ mutex_lock(&nft_net->commit_mutex);
++
++ gc_seq = nft_gc_seq_begin(nft_net);
++
+ if (!list_empty(&nf_tables_destroy_list))
+ rcu_barrier();
+ again:
+@@ -11079,6 +11103,8 @@ again:
+ if (restart)
+ goto again;
+ }
++ nft_gc_seq_end(nft_net, gc_seq);
++
+ mutex_unlock(&nft_net->commit_mutex);
+
+ return NOTIFY_DONE;
+@@ -11116,12 +11142,20 @@ static void __net_exit nf_tables_pre_exit_net(struct net *net)
+ static void __net_exit nf_tables_exit_net(struct net *net)
+ {
+ struct nftables_pernet *nft_net = nft_pernet(net);
++ unsigned int gc_seq;
+
+ mutex_lock(&nft_net->commit_mutex);
++
++ gc_seq = nft_gc_seq_begin(nft_net);
++
+ if (!list_empty(&nft_net->commit_list) ||
+ !list_empty(&nft_net->module_list))
+ __nf_tables_abort(net, NFNL_ABORT_NONE);
++
+ __nft_release_tables(net);
++
++ nft_gc_seq_end(nft_net, gc_seq);
++
+ mutex_unlock(&nft_net->commit_mutex);
+ WARN_ON_ONCE(!list_empty(&nft_net->tables));
+ WARN_ON_ONCE(!list_empty(&nft_net->module_list));
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index bd19c7aec92ee..c98a273c3006d 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -191,6 +191,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
+ if (IS_ERR(set))
+ return PTR_ERR(set);
+
++ if (set->flags & NFT_SET_OBJECT)
++ return -EOPNOTSUPP;
++
+ if (set->ops->update == NULL)
+ return -EOPNOTSUPP;
+
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 92b108e3000eb..352180b123fc7 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -566,6 +566,8 @@ next_match:
+ goto out;
+
+ if (last) {
++ if (nft_set_elem_expired(&f->mt[b].e->ext))
++ goto next_match;
+ if ((genmask &&
+ !nft_set_elem_active(&f->mt[b].e->ext, genmask)))
+ goto next_match;
+@@ -600,17 +602,8 @@ out:
+ static void *nft_pipapo_get(const struct net *net, const struct nft_set *set,
+ const struct nft_set_elem *elem, unsigned int flags)
+ {
+- struct nft_pipapo_elem *ret;
+-
+- ret = pipapo_get(net, set, (const u8 *)elem->key.val.data,
++ return pipapo_get(net, set, (const u8 *)elem->key.val.data,
+ nft_genmask_cur(net));
+- if (IS_ERR(ret))
+- return ret;
+-
+- if (nft_set_elem_expired(&ret->ext))
+- return ERR_PTR(-ENOENT);
+-
+- return ret;
+ }
+
+ /**
+@@ -1698,6 +1691,17 @@ static void nft_pipapo_commit(const struct nft_set *set)
+ priv->clone = new_clone;
+ }
+
++static bool nft_pipapo_transaction_mutex_held(const struct nft_set *set)
++{
++#ifdef CONFIG_PROVE_LOCKING
++ const struct net *net = read_pnet(&set->net);
++
++ return lockdep_is_held(&nft_pernet(net)->commit_mutex);
++#else
++ return true;
++#endif
++}
++
+ static void nft_pipapo_abort(const struct nft_set *set)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
+@@ -1706,7 +1710,7 @@ static void nft_pipapo_abort(const struct nft_set *set)
+ if (!priv->dirty)
+ return;
+
+- m = rcu_dereference(priv->match);
++ m = rcu_dereference_protected(priv->match, nft_pipapo_transaction_mutex_held(set));
+
+ new_clone = pipapo_clone(m);
+ if (IS_ERR(new_clone))
+@@ -1733,11 +1737,7 @@ static void nft_pipapo_activate(const struct net *net,
+ const struct nft_set *set,
+ const struct nft_set_elem *elem)
+ {
+- struct nft_pipapo_elem *e;
+-
+- e = pipapo_get(net, set, (const u8 *)elem->key.val.data, 0);
+- if (IS_ERR(e))
+- return;
++ struct nft_pipapo_elem *e = elem->priv;
+
+ nft_set_elem_change_active(net, set, &e->ext);
+ }
+@@ -1951,10 +1951,6 @@ static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
+
+ data = (const u8 *)nft_set_ext_key(&e->ext);
+
+- e = pipapo_get(net, set, data, 0);
+- if (IS_ERR(e))
+- return;
+-
+ while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
+ union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
+ const u8 *match_start, *match_end;
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index a6d2a0b1aa21e..3d7a91e64c88f 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -1829,7 +1829,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ parms.port_no = OVSP_LOCAL;
+ parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
+ parms.desired_ifindex = a[OVS_DP_ATTR_IFINDEX]
+- ? nla_get_u32(a[OVS_DP_ATTR_IFINDEX]) : 0;
++ ? nla_get_s32(a[OVS_DP_ATTR_IFINDEX]) : 0;
+
+ /* So far only local changes have been made, now need the lock. */
+ ovs_lock();
+@@ -2049,7 +2049,7 @@ static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
+ [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
+ [OVS_DP_ATTR_MASKS_CACHE_SIZE] = NLA_POLICY_RANGE(NLA_U32, 0,
+ PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)),
+- [OVS_DP_ATTR_IFINDEX] = {.type = NLA_U32 },
++ [OVS_DP_ATTR_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 0),
+ };
+
+ static const struct genl_small_ops dp_datapath_genl_ops[] = {
+@@ -2302,7 +2302,7 @@ restart:
+ parms.port_no = port_no;
+ parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
+ parms.desired_ifindex = a[OVS_VPORT_ATTR_IFINDEX]
+- ? nla_get_u32(a[OVS_VPORT_ATTR_IFINDEX]) : 0;
++ ? nla_get_s32(a[OVS_VPORT_ATTR_IFINDEX]) : 0;
+
+ vport = new_vport(&parms);
+ err = PTR_ERR(vport);
+@@ -2539,7 +2539,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
+ [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
+ [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
+ [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
+- [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
++ [OVS_VPORT_ATTR_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 0),
+ [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
+ [OVS_VPORT_ATTR_UPCALL_STATS] = { .type = NLA_NESTED },
+ };
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 10615878e3961..714bd87f12d91 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2291,6 +2291,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
+
+ if (false) {
+ alloc_skb:
++ spin_unlock(&other->sk_receive_queue.lock);
+ unix_state_unlock(other);
+ mutex_unlock(&unix_sk(other)->iolock);
+ newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
+@@ -2330,6 +2331,7 @@ alloc_skb:
+ init_scm = false;
+ }
+
++ spin_lock(&other->sk_receive_queue.lock);
+ skb = skb_peek_tail(&other->sk_receive_queue);
+ if (tail && tail == skb) {
+ skb = newskb;
+@@ -2360,14 +2362,11 @@ alloc_skb:
+ refcount_add(size, &sk->sk_wmem_alloc);
+
+ if (newskb) {
+- err = unix_scm_to_skb(&scm, skb, false);
+- if (err)
+- goto err_state_unlock;
+- spin_lock(&other->sk_receive_queue.lock);
++ unix_scm_to_skb(&scm, skb, false);
+ __skb_queue_tail(&other->sk_receive_queue, newskb);
+- spin_unlock(&other->sk_receive_queue.lock);
+ }
+
++ spin_unlock(&other->sk_receive_queue.lock);
+ unix_state_unlock(other);
+ mutex_unlock(&unix_sk(other)->iolock);
+
+diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c
+index 8cbf45a8bcdc2..655fe4ff86212 100644
+--- a/net/xfrm/xfrm_compat.c
++++ b/net/xfrm/xfrm_compat.c
+@@ -108,7 +108,7 @@ static const struct nla_policy compat_policy[XFRMA_MAX+1] = {
+ [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
+ [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
+ [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
+- [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
++ [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) },
+ [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
+ [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
+ [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
+diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
+index 815b380804011..d5ee96789d4bf 100644
+--- a/net/xfrm/xfrm_input.c
++++ b/net/xfrm/xfrm_input.c
+@@ -180,6 +180,8 @@ static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
+ int optlen = 0;
+ int err = -EINVAL;
+
++ skb->protocol = htons(ETH_P_IP);
++
+ if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) {
+ struct ip_beet_phdr *ph;
+ int phlen;
+@@ -232,6 +234,8 @@ static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
+ {
+ int err = -EINVAL;
+
++ skb->protocol = htons(ETH_P_IP);
++
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ goto out;
+
+@@ -267,6 +271,8 @@ static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb)
+ {
+ int err = -EINVAL;
+
++ skb->protocol = htons(ETH_P_IPV6);
++
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ goto out;
+
+@@ -296,6 +302,8 @@ static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb)
+ int size = sizeof(struct ipv6hdr);
+ int err;
+
++ skb->protocol = htons(ETH_P_IPV6);
++
+ err = skb_cow_head(skb, size + skb->mac_len);
+ if (err)
+ goto out;
+@@ -346,6 +354,7 @@ xfrm_inner_mode_encap_remove(struct xfrm_state *x,
+ return xfrm6_remove_tunnel_encap(x, skb);
+ break;
+ }
++ return -EINVAL;
+ }
+
+ WARN_ON_ONCE(1);
+@@ -366,19 +375,6 @@ static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb)
+ return -EAFNOSUPPORT;
+ }
+
+- switch (XFRM_MODE_SKB_CB(skb)->protocol) {
+- case IPPROTO_IPIP:
+- case IPPROTO_BEETPH:
+- skb->protocol = htons(ETH_P_IP);
+- break;
+- case IPPROTO_IPV6:
+- skb->protocol = htons(ETH_P_IPV6);
+- break;
+- default:
+- WARN_ON_ONCE(1);
+- break;
+- }
+-
+ return xfrm_inner_mode_encap_remove(x, skb);
+ }
+
+diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
+index a3319965470a7..b864740846902 100644
+--- a/net/xfrm/xfrm_interface_core.c
++++ b/net/xfrm/xfrm_interface_core.c
+@@ -537,8 +537,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IPV6):
+- xfrm_decode_session(skb, &fl, AF_INET6);
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET6);
+ if (!dst) {
+ fl.u.ip6.flowi6_oif = dev->ifindex;
+ fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+@@ -552,8 +552,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+ }
+ break;
+ case htons(ETH_P_IP):
+- xfrm_decode_session(skb, &fl, AF_INET);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET);
+ if (!dst) {
+ struct rtable *rt;
+
+diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
+index 49e63eea841dd..bda5327bf34df 100644
+--- a/net/xfrm/xfrm_state.c
++++ b/net/xfrm/xfrm_state.c
+@@ -1324,12 +1324,8 @@ found:
+ struct xfrm_dev_offload *xso = &x->xso;
+
+ if (xso->type == XFRM_DEV_OFFLOAD_PACKET) {
+- xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
+- xso->dir = 0;
+- netdev_put(xso->dev, &xso->dev_tracker);
+- xso->dev = NULL;
+- xso->real_dev = NULL;
+- xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED;
++ xfrm_dev_state_delete(x);
++ xfrm_dev_state_free(x);
+ }
+ #endif
+ x->km.state = XFRM_STATE_DEAD;
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index c34a2a06ca940..ad01997c3aa9d 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -628,7 +628,7 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
+ struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
+ struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH];
+
+- if (re) {
++ if (re && x->replay_esn && x->preplay_esn) {
+ struct xfrm_replay_state_esn *replay_esn;
+ replay_esn = nla_data(re);
+ memcpy(x->replay_esn, replay_esn,
+@@ -1267,6 +1267,15 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
+ sizeof(*filter), GFP_KERNEL);
+ if (filter == NULL)
+ return -ENOMEM;
++
++ /* see addr_match(), (prefix length >> 5) << 2
++ * will be used to compare xfrm_address_t
++ */
++ if (filter->splen > (sizeof(xfrm_address_t) << 3) ||
++ filter->dplen > (sizeof(xfrm_address_t) << 3)) {
++ kfree(filter);
++ return -EINVAL;
++ }
+ }
+
+ if (attrs[XFRMA_PROTO])
+@@ -2336,6 +2345,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
+ NETLINK_CB(skb).portid);
+ }
+ } else {
++ xfrm_dev_policy_delete(xp);
+ xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
+
+ if (err != 0)
+@@ -3015,7 +3025,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
+ [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
+ [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
+ [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
+- [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
++ [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) },
+ [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
+ [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
+ [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
+@@ -3035,6 +3045,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
+ [XFRMA_SET_MARK] = { .type = NLA_U32 },
+ [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 },
+ [XFRMA_IF_ID] = { .type = NLA_U32 },
++ [XFRMA_MTIMER_THRESH] = { .type = NLA_U32 },
+ };
+ EXPORT_SYMBOL_GPL(xfrma_policy);
+
+diff --git a/rust/macros/vtable.rs b/rust/macros/vtable.rs
+index 34d5e7fb5768a..ee06044fcd4f3 100644
+--- a/rust/macros/vtable.rs
++++ b/rust/macros/vtable.rs
+@@ -74,6 +74,7 @@ pub(crate) fn vtable(_attr: TokenStream, ts: TokenStream) -> TokenStream {
+ const {gen_const_name}: bool = false;",
+ )
+ .unwrap();
++ consts.insert(gen_const_name);
+ }
+ } else {
+ const_items = "const USE_VTABLE_ATTR: () = ();".to_owned();
+diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
+index fe3587547cfec..39610a15bcc98 100644
+--- a/sound/hda/hdac_regmap.c
++++ b/sound/hda/hdac_regmap.c
+@@ -597,10 +597,9 @@ EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw_once);
+ */
+ void snd_hdac_regmap_sync(struct hdac_device *codec)
+ {
+- if (codec->regmap) {
+- mutex_lock(&codec->regmap_lock);
++ mutex_lock(&codec->regmap_lock);
++ if (codec->regmap)
+ regcache_sync(codec->regmap);
+- mutex_unlock(&codec->regmap_lock);
+- }
++ mutex_unlock(&codec->regmap_lock);
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_regmap_sync);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index bcd548e247fc8..074aa06aa585c 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7081,6 +7081,9 @@ enum {
+ ALC285_FIXUP_SPEAKER2_TO_DAC1,
+ ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1,
+ ALC285_FIXUP_ASUS_HEADSET_MIC,
++ ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS,
++ ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1,
++ ALC285_FIXUP_ASUS_I2C_HEADSET_MIC,
+ ALC280_FIXUP_HP_HEADSET_MIC,
+ ALC221_FIXUP_HP_FRONT_MIC,
+ ALC292_FIXUP_TPT460,
+@@ -8073,6 +8076,31 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1
+ },
++ [ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x14, 0x90170120 },
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC285_FIXUP_ASUS_HEADSET_MIC
++ },
++ [ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc285_fixup_speaker2_to_dac1,
++ .chained = true,
++ .chain_id = ALC287_FIXUP_CS35L41_I2C_2
++ },
++ [ALC285_FIXUP_ASUS_I2C_HEADSET_MIC] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x19, 0x03a11050 },
++ { 0x1b, 0x03a11c30 },
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1
++ },
+ [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -9578,7 +9606,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
+- SND_PCI_QUIRK(0x103c, 0x8c26, "HP HP EliteBook 800G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c48, "HP EliteBook 860 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c49, "HP Elite x360 830 2-in-1 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -9598,10 +9632,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
++ SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650P", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
++ SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1493, "ASUS GV601V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
++ SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+ SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+@@ -9627,7 +9664,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
+- SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_HEADSET_MIC),
++ SND_PCI_QUIRK(0x1043, 0x1c9f, "ASUS G614JI", ALC285_FIXUP_ASUS_HEADSET_MIC),
++ SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+ SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS ROG Strix G17 2023 (G713PV)", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
+@@ -10595,6 +10633,7 @@ static int patch_alc269(struct hda_codec *codec)
+ spec = codec->spec;
+ spec->gen.shared_mic_vref_pin = 0x18;
+ codec->power_save_node = 0;
++ spec->en_3kpull_low = true;
+
+ #ifdef CONFIG_PM
+ codec->patch_ops.suspend = alc269_suspend;
+@@ -10677,14 +10716,16 @@ static int patch_alc269(struct hda_codec *codec)
+ spec->shutup = alc256_shutup;
+ spec->init_hook = alc256_init;
+ spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
+- if (codec->bus->pci->vendor == PCI_VENDOR_ID_AMD)
+- spec->en_3kpull_low = true;
++ if (codec->core.vendor_id == 0x10ec0236 &&
++ codec->bus->pci->vendor != PCI_VENDOR_ID_AMD)
++ spec->en_3kpull_low = false;
+ break;
+ case 0x10ec0257:
+ spec->codec_variant = ALC269_TYPE_ALC257;
+ spec->shutup = alc256_shutup;
+ spec->init_hook = alc256_init;
+ spec->gen.mixer_nid = 0;
++ spec->en_3kpull_low = false;
+ break;
+ case 0x10ec0215:
+ case 0x10ec0245:
+@@ -11316,6 +11357,7 @@ enum {
+ ALC897_FIXUP_HP_HSMIC_VERB,
+ ALC897_FIXUP_LENOVO_HEADSET_MODE,
+ ALC897_FIXUP_HEADSET_MIC_PIN2,
++ ALC897_FIXUP_UNIS_H3C_X500S,
+ };
+
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -11755,6 +11797,13 @@ static const struct hda_fixup alc662_fixups[] = {
+ .chained = true,
+ .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE
+ },
++ [ALC897_FIXUP_UNIS_H3C_X500S] = {
++ .type = HDA_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ { 0x14, AC_VERB_SET_EAPD_BTLENABLE, 0 },
++ {}
++ },
++ },
+ };
+
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -11916,6 +11965,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
+ {.id = ALC662_FIXUP_USI_HEADSET_MODE, .name = "usi-headset"},
+ {.id = ALC662_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
+ {.id = ALC669_FIXUP_ACER_ASPIRE_ETHOS, .name = "aspire-ethos"},
++ {.id = ALC897_FIXUP_UNIS_H3C_X500S, .name = "unis-h3c-x500s"},
+ {}
+ };
+
+diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig
+index 08e42082f5e96..e724cb3c70b74 100644
+--- a/sound/soc/amd/Kconfig
++++ b/sound/soc/amd/Kconfig
+@@ -81,6 +81,7 @@ config SND_SOC_AMD_VANGOGH_MACH
+ tristate "AMD Vangogh support for NAU8821 CS35L41"
+ select SND_SOC_NAU8821
+ select SND_SOC_CS35L41_SPI
++ select SND_AMD_ACP_CONFIG
+ depends on SND_SOC_AMD_ACP5x && I2C && SPI_MASTER
+ help
+ This option enables machine driver for Vangogh platform
+diff --git a/sound/soc/amd/vangogh/acp5x.h b/sound/soc/amd/vangogh/acp5x.h
+index bd9f1c5684d17..ac1936a8c43ff 100644
+--- a/sound/soc/amd/vangogh/acp5x.h
++++ b/sound/soc/amd/vangogh/acp5x.h
+@@ -147,6 +147,8 @@ static inline void acp_writel(u32 val, void __iomem *base_addr)
+ writel(val, base_addr - ACP5x_PHY_BASE_ADDRESS);
+ }
+
++int snd_amd_acp_find_config(struct pci_dev *pci);
++
+ static inline u64 acp_get_byte_count(struct i2s_stream_instance *rtd,
+ int direction)
+ {
+diff --git a/sound/soc/amd/vangogh/pci-acp5x.c b/sound/soc/amd/vangogh/pci-acp5x.c
+index e0df17c88e8e0..c4634a8a17cdc 100644
+--- a/sound/soc/amd/vangogh/pci-acp5x.c
++++ b/sound/soc/amd/vangogh/pci-acp5x.c
+@@ -125,10 +125,15 @@ static int snd_acp5x_probe(struct pci_dev *pci,
+ {
+ struct acp5x_dev_data *adata;
+ struct platform_device_info pdevinfo[ACP5x_DEVS];
+- unsigned int irqflags;
++ unsigned int irqflags, flag;
+ int ret, i;
+ u32 addr, val;
+
++ /* Return if acp config flag is defined */
++ flag = snd_amd_acp_find_config(pci);
++ if (flag)
++ return -ENODEV;
++
+ irqflags = IRQF_SHARED;
+ if (pci->revision != 0x50)
+ return -ENODEV;
+diff --git a/sound/soc/codecs/cs35l56.c b/sound/soc/codecs/cs35l56.c
+index e0d2b9bb23262..f3fee448d759e 100644
+--- a/sound/soc/codecs/cs35l56.c
++++ b/sound/soc/codecs/cs35l56.c
+@@ -834,12 +834,6 @@ static void cs35l56_dsp_work(struct work_struct *work)
+ if (!cs35l56->init_done)
+ return;
+
+- cs35l56->dsp.part = devm_kasprintf(cs35l56->dev, GFP_KERNEL, "cs35l56%s-%02x",
+- cs35l56->secured ? "s" : "", cs35l56->rev);
+-
+- if (!cs35l56->dsp.part)
+- return;
+-
+ pm_runtime_get_sync(cs35l56->dev);
+
+ /*
+@@ -1505,6 +1499,12 @@ int cs35l56_init(struct cs35l56_private *cs35l56)
+ dev_info(cs35l56->dev, "Cirrus Logic CS35L56%s Rev %02X OTP%d\n",
+ cs35l56->secured ? "s" : "", cs35l56->rev, otpid);
+
++ /* Populate the DSP information with the revision and security state */
++ cs35l56->dsp.part = devm_kasprintf(cs35l56->dev, GFP_KERNEL, "cs35l56%s-%02x",
++ cs35l56->secured ? "s" : "", cs35l56->rev);
++ if (!cs35l56->dsp.part)
++ return -ENOMEM;
++
+ /* Wake source and *_BLOCKED interrupts default to unmasked, so mask them */
+ regmap_write(cs35l56->regmap, CS35L56_IRQ1_MASK_20, 0xffffffff);
+ regmap_update_bits(cs35l56->regmap, CS35L56_IRQ1_MASK_1,
+diff --git a/sound/soc/codecs/max98363.c b/sound/soc/codecs/max98363.c
+index e6b84e222b504..169913ba76dd7 100644
+--- a/sound/soc/codecs/max98363.c
++++ b/sound/soc/codecs/max98363.c
+@@ -191,10 +191,10 @@ static int max98363_io_init(struct sdw_slave *slave)
+ pm_runtime_get_noresume(dev);
+
+ ret = regmap_read(max98363->regmap, MAX98363_R21FF_REV_ID, &reg);
+- if (!ret) {
++ if (!ret)
+ dev_info(dev, "Revision ID: %X\n", reg);
+- return ret;
+- }
++ else
++ goto out;
+
+ if (max98363->first_hw_init) {
+ regcache_cache_bypass(max98363->regmap, false);
+@@ -204,10 +204,11 @@ static int max98363_io_init(struct sdw_slave *slave)
+ max98363->first_hw_init = true;
+ max98363->hw_init = true;
+
++out:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+- return 0;
++ return ret;
+ }
+
+ #define MAX98363_RATES SNDRV_PCM_RATE_8000_192000
+diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
+index 17afaef85c77a..382bdbcf7b59b 100644
+--- a/sound/soc/codecs/rt5665.c
++++ b/sound/soc/codecs/rt5665.c
+@@ -4472,6 +4472,8 @@ static void rt5665_remove(struct snd_soc_component *component)
+ struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component);
+
+ regmap_write(rt5665->regmap, RT5665_RESET, 0);
++
++ regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies);
+ }
+
+ #ifdef CONFIG_PM
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index 5fa204897a52b..a6d13aae8f720 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -367,6 +367,16 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ RT711_JD2),
+ },
+ /* RaptorLake devices */
++ {
++ .callback = sof_sdw_quirk_cb,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc"),
++ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "0BDA")
++ },
++ .driver_data = (void *)(SOF_SDW_TGL_HDMI |
++ RT711_JD2 |
++ SOF_SDW_FOUR_SPK),
++ },
+ {
+ .callback = sof_sdw_quirk_cb,
+ .matches = {
+@@ -415,6 +425,31 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ },
+ .driver_data = (void *)(RT711_JD1),
+ },
++ {
++ .callback = sof_sdw_quirk_cb,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Meteor Lake Client Platform"),
++ },
++ .driver_data = (void *)(RT711_JD2_100K),
++ },
++ {
++ .callback = sof_sdw_quirk_cb,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Rex"),
++ },
++ .driver_data = (void *)(SOF_SDW_PCH_DMIC),
++ },
++ /* LunarLake devices */
++ {
++ .callback = sof_sdw_quirk_cb,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Lunar Lake Client Platform"),
++ },
++ .driver_data = (void *)(RT711_JD2_100K),
++ },
+ {}
+ };
+
+diff --git a/sound/soc/intel/boards/sof_sdw_rt711_sdca.c b/sound/soc/intel/boards/sof_sdw_rt711_sdca.c
+index 7f16304d025be..cf8b9793fe0e5 100644
+--- a/sound/soc/intel/boards/sof_sdw_rt711_sdca.c
++++ b/sound/soc/intel/boards/sof_sdw_rt711_sdca.c
+@@ -143,6 +143,9 @@ int sof_sdw_rt711_sdca_exit(struct snd_soc_card *card, struct snd_soc_dai_link *
+ if (!ctx->headset_codec_dev)
+ return 0;
+
++ if (!SOF_RT711_JDSRC(sof_sdw_quirk))
++ return 0;
++
+ device_remove_software_node(ctx->headset_codec_dev);
+ put_device(ctx->headset_codec_dev);
+
+diff --git a/sound/soc/meson/axg-tdm-formatter.c b/sound/soc/meson/axg-tdm-formatter.c
+index 9883dc777f630..63333a2b0a9c3 100644
+--- a/sound/soc/meson/axg-tdm-formatter.c
++++ b/sound/soc/meson/axg-tdm-formatter.c
+@@ -30,27 +30,32 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map,
+ struct axg_tdm_stream *ts,
+ unsigned int offset)
+ {
+- unsigned int val, ch = ts->channels;
+- unsigned long mask;
+- int i, j;
++ unsigned int ch = ts->channels;
++ u32 val[AXG_TDM_NUM_LANES];
++ int i, j, k;
++
++ /*
++ * We need to mimick the slot distribution used by the HW to keep the
++ * channel placement consistent regardless of the number of channel
++ * in the stream. This is why the odd algorithm below is used.
++ */
++ memset(val, 0, sizeof(*val) * AXG_TDM_NUM_LANES);
+
+ /*
+ * Distribute the channels of the stream over the available slots
+- * of each TDM lane
++ * of each TDM lane. We need to go over the 32 slots ...
+ */
+- for (i = 0; i < AXG_TDM_NUM_LANES; i++) {
+- val = 0;
+- mask = ts->mask[i];
+-
+- for (j = find_first_bit(&mask, 32);
+- (j < 32) && ch;
+- j = find_next_bit(&mask, 32, j + 1)) {
+- val |= 1 << j;
+- ch -= 1;
++ for (i = 0; (i < 32) && ch; i += 2) {
++ /* ... of all the lanes ... */
++ for (j = 0; j < AXG_TDM_NUM_LANES; j++) {
++ /* ... then distribute the channels in pairs */
++ for (k = 0; k < 2; k++) {
++ if ((BIT(i + k) & ts->mask[j]) && ch) {
++ val[j] |= BIT(i + k);
++ ch -= 1;
++ }
++ }
+ }
+-
+- regmap_write(map, offset, val);
+- offset += regmap_get_reg_stride(map);
+ }
+
+ /*
+@@ -63,6 +68,11 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map,
+ return -EINVAL;
+ }
+
++ for (i = 0; i < AXG_TDM_NUM_LANES; i++) {
++ regmap_write(map, offset, val[i]);
++ offset += regmap_get_reg_stride(map);
++ }
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(axg_tdm_formatter_set_channel_masks);
+diff --git a/sound/soc/sof/amd/acp.h b/sound/soc/sof/amd/acp.h
+index 1c535cc6c3a95..dc624f727aa37 100644
+--- a/sound/soc/sof/amd/acp.h
++++ b/sound/soc/sof/amd/acp.h
+@@ -55,6 +55,9 @@
+
+ #define ACP_DSP_TO_HOST_IRQ 0x04
+
++#define ACP_RN_PCI_ID 0x01
++#define ACP_RMB_PCI_ID 0x6F
++
+ #define HOST_BRIDGE_CZN 0x1630
+ #define HOST_BRIDGE_RMB 0x14B5
+ #define ACP_SHA_STAT 0x8000
+diff --git a/sound/soc/sof/amd/pci-rmb.c b/sound/soc/sof/amd/pci-rmb.c
+index eaf70ea6e556e..58b3092425f1a 100644
+--- a/sound/soc/sof/amd/pci-rmb.c
++++ b/sound/soc/sof/amd/pci-rmb.c
+@@ -65,6 +65,9 @@ static int acp_pci_rmb_probe(struct pci_dev *pci, const struct pci_device_id *pc
+ {
+ unsigned int flag;
+
++ if (pci->revision != ACP_RMB_PCI_ID)
++ return -ENODEV;
++
+ flag = snd_amd_acp_find_config(pci);
+ if (flag != FLAG_AMD_SOF && flag != FLAG_AMD_SOF_ONLY_DMIC)
+ return -ENODEV;
+diff --git a/sound/soc/sof/amd/pci-rn.c b/sound/soc/sof/amd/pci-rn.c
+index 4809cb644152b..7409e21ce5aa7 100644
+--- a/sound/soc/sof/amd/pci-rn.c
++++ b/sound/soc/sof/amd/pci-rn.c
+@@ -65,6 +65,9 @@ static int acp_pci_rn_probe(struct pci_dev *pci, const struct pci_device_id *pci
+ {
+ unsigned int flag;
+
++ if (pci->revision != ACP_RN_PCI_ID)
++ return -ENODEV;
++
+ flag = snd_amd_acp_find_config(pci);
+ if (flag != FLAG_AMD_SOF && flag != FLAG_AMD_SOF_ONLY_DMIC)
+ return -ENODEV;
+diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
+index 9a9d82220fd0d..30db685cc5f4b 100644
+--- a/sound/soc/sof/core.c
++++ b/sound/soc/sof/core.c
+@@ -504,8 +504,10 @@ int snd_sof_device_shutdown(struct device *dev)
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+ cancel_work_sync(&sdev->probe_work);
+
+- if (sdev->fw_state == SOF_FW_BOOT_COMPLETE)
++ if (sdev->fw_state == SOF_FW_BOOT_COMPLETE) {
++ sof_fw_trace_free(sdev);
+ return snd_sof_shutdown(sdev);
++ }
+
+ return 0;
+ }
+diff --git a/sound/soc/sof/intel/hda-dai-ops.c b/sound/soc/sof/intel/hda-dai-ops.c
+index 4b39cecacd68d..5938046f46b21 100644
+--- a/sound/soc/sof/intel/hda-dai-ops.c
++++ b/sound/soc/sof/intel/hda-dai-ops.c
+@@ -289,16 +289,27 @@ static const struct hda_dai_widget_dma_ops hda_ipc4_chain_dma_ops = {
+ static int hda_ipc3_post_trigger(struct snd_sof_dev *sdev, struct snd_soc_dai *cpu_dai,
+ struct snd_pcm_substream *substream, int cmd)
+ {
++ struct hdac_ext_stream *hext_stream = hda_get_hext_stream(sdev, cpu_dai, substream);
+ struct snd_soc_dapm_widget *w = snd_soc_dai_get_widget(cpu_dai, substream->stream);
++ struct snd_soc_pcm_runtime *rtd = asoc_substream_to_rtd(substream);
++ struct snd_soc_dai *codec_dai = asoc_rtd_to_codec(rtd, 0);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_STOP:
+ {
+ struct snd_sof_dai_config_data data = { 0 };
++ int ret;
+
+ data.dai_data = DMA_CHAN_INVALID;
+- return hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_HW_FREE, &data);
++ ret = hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_HW_FREE, &data);
++ if (ret < 0)
++ return ret;
++
++ if (cmd == SNDRV_PCM_TRIGGER_STOP)
++ return hda_link_dma_cleanup(substream, hext_stream, cpu_dai, codec_dai);
++
++ break;
+ }
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ return hda_dai_config(w, SOF_DAI_CONFIG_FLAGS_PAUSE, NULL);
+diff --git a/sound/soc/sof/intel/hda-dai.c b/sound/soc/sof/intel/hda-dai.c
+index 44a5d94c5050f..8a76320c3b993 100644
+--- a/sound/soc/sof/intel/hda-dai.c
++++ b/sound/soc/sof/intel/hda-dai.c
+@@ -91,10 +91,10 @@ hda_dai_get_ops(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai
+ return sdai->platform_private;
+ }
+
+-static int hda_link_dma_cleanup(struct snd_pcm_substream *substream,
+- struct hdac_ext_stream *hext_stream,
+- struct snd_soc_dai *cpu_dai,
+- struct snd_soc_dai *codec_dai)
++int hda_link_dma_cleanup(struct snd_pcm_substream *substream,
++ struct hdac_ext_stream *hext_stream,
++ struct snd_soc_dai *cpu_dai,
++ struct snd_soc_dai *codec_dai)
+ {
+ struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(cpu_dai->component);
+ const struct hda_dai_widget_dma_ops *ops = hda_dai_get_ops(substream, cpu_dai);
+diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
+index 3153e21f100ab..3853582e32e12 100644
+--- a/sound/soc/sof/intel/hda.c
++++ b/sound/soc/sof/intel/hda.c
+@@ -1343,12 +1343,22 @@ static void hda_generic_machine_select(struct snd_sof_dev *sdev,
+ hda_mach->mach_params.dmic_num = dmic_num;
+ pdata->tplg_filename = tplg_filename;
+
+- if (codec_num == 2) {
++ if (codec_num == 2 ||
++ (codec_num == 1 && !HDA_IDISP_CODEC(bus->codec_mask))) {
+ /*
+ * Prevent SoundWire links from starting when an external
+ * HDaudio codec is used
+ */
+ hda_mach->mach_params.link_mask = 0;
++ } else {
++ /*
++ * Allow SoundWire links to start when no external HDaudio codec
++ * was detected. This will not create a SoundWire card but
++ * will help detect if any SoundWire codec reports as ATTACHED.
++ */
++ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
++
++ hda_mach->mach_params.link_mask = hdev->info.link_mask;
+ }
+
+ *mach = hda_mach;
+diff --git a/sound/soc/sof/intel/hda.h b/sound/soc/sof/intel/hda.h
+index c4befacde23e4..94c738eae751a 100644
+--- a/sound/soc/sof/intel/hda.h
++++ b/sound/soc/sof/intel/hda.h
+@@ -942,5 +942,7 @@ const struct hda_dai_widget_dma_ops *
+ hda_select_dai_widget_ops(struct snd_sof_dev *sdev, struct snd_sof_widget *swidget);
+ int hda_dai_config(struct snd_soc_dapm_widget *w, unsigned int flags,
+ struct snd_sof_dai_config_data *data);
++int hda_link_dma_cleanup(struct snd_pcm_substream *substream, struct hdac_ext_stream *hext_stream,
++ struct snd_soc_dai *cpu_dai, struct snd_soc_dai *codec_dai);
+
+ #endif
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index efb4a3311cc59..5d72dc8441cbb 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -4507,6 +4507,35 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ }
+ },
++{
++ /* Advanced modes of the Mythware XA001AU.
++ * For the standard mode, Mythware XA001AU has ID ffad:a001
++ */
++ USB_DEVICE_VENDOR_SPEC(0xffad, 0xa001),
++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ .vendor_name = "Mythware",
++ .product_name = "XA001AU",
++ .ifnum = QUIRK_ANY_INTERFACE,
++ .type = QUIRK_COMPOSITE,
++ .data = (const struct snd_usb_audio_quirk[]) {
++ {
++ .ifnum = 0,
++ .type = QUIRK_IGNORE_INTERFACE,
++ },
++ {
++ .ifnum = 1,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE,
++ },
++ {
++ .ifnum = 2,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE,
++ },
++ {
++ .ifnum = -1
++ }
++ }
++ }
++},
+
+ #undef USB_DEVICE_VENDOR_SPEC
+ #undef USB_AUDIO_DEVICE
+diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
+index a25fdc08c39e8..228b2bb086ac2 100644
+--- a/tools/objtool/arch/x86/decode.c
++++ b/tools/objtool/arch/x86/decode.c
+@@ -824,8 +824,11 @@ bool arch_is_retpoline(struct symbol *sym)
+
+ bool arch_is_rethunk(struct symbol *sym)
+ {
+- return !strcmp(sym->name, "__x86_return_thunk") ||
+- !strcmp(sym->name, "srso_untrain_ret") ||
+- !strcmp(sym->name, "srso_safe_ret") ||
+- !strcmp(sym->name, "__ret");
++ return !strcmp(sym->name, "__x86_return_thunk");
++}
++
++bool arch_is_embedded_insn(struct symbol *sym)
++{
++ return !strcmp(sym->name, "retbleed_return_thunk") ||
++ !strcmp(sym->name, "srso_safe_ret");
+ }
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 0fcf99c914000..f7f34a0b101e1 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -429,7 +429,7 @@ static int decode_instructions(struct objtool_file *file)
+ if (!strcmp(sec->name, ".noinstr.text") ||
+ !strcmp(sec->name, ".entry.text") ||
+ !strcmp(sec->name, ".cpuidle.text") ||
+- !strncmp(sec->name, ".text.__x86.", 12))
++ !strncmp(sec->name, ".text..__x86.", 13))
+ sec->noinstr = true;
+
+ /*
+@@ -495,7 +495,7 @@ static int decode_instructions(struct objtool_file *file)
+ return -1;
+ }
+
+- if (func->return_thunk || func->alias != func)
++ if (func->embedded_insn || func->alias != func)
+ continue;
+
+ if (!find_insn(file, sec, func->offset)) {
+@@ -1346,16 +1346,33 @@ static int add_ignore_alternatives(struct objtool_file *file)
+ return 0;
+ }
+
++/*
++ * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol
++ * will be added to the .retpoline_sites section.
++ */
+ __weak bool arch_is_retpoline(struct symbol *sym)
+ {
+ return false;
+ }
+
++/*
++ * Symbols that replace INSN_RETURN, every (tail) call to such a symbol
++ * will be added to the .return_sites section.
++ */
+ __weak bool arch_is_rethunk(struct symbol *sym)
+ {
+ return false;
+ }
+
++/*
++ * Symbols that are embedded inside other instructions, because sometimes crazy
++ * code exists. These are mostly ignored for validation purposes.
++ */
++__weak bool arch_is_embedded_insn(struct symbol *sym)
++{
++ return false;
++}
++
+ static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn)
+ {
+ struct reloc *reloc;
+@@ -1638,14 +1655,14 @@ static int add_jump_destinations(struct objtool_file *file)
+ struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off);
+
+ /*
+- * This is a special case for zen_untrain_ret().
++ * This is a special case for retbleed_untrain_ret().
+ * It jumps to __x86_return_thunk(), but objtool
+ * can't find the thunk's starting RET
+ * instruction, because the RET is also in the
+ * middle of another instruction. Objtool only
+ * knows about the outer instruction.
+ */
+- if (sym && sym->return_thunk) {
++ if (sym && sym->embedded_insn) {
+ add_return_call(file, insn, false);
+ continue;
+ }
+@@ -2550,6 +2567,9 @@ static int classify_symbols(struct objtool_file *file)
+ if (arch_is_rethunk(func))
+ func->return_thunk = true;
+
++ if (arch_is_embedded_insn(func))
++ func->embedded_insn = true;
++
+ if (arch_ftrace_match(func->name))
+ func->fentry = true;
+
+@@ -2678,12 +2698,17 @@ static int decode_sections(struct objtool_file *file)
+ return 0;
+ }
+
+-static bool is_fentry_call(struct instruction *insn)
++static bool is_special_call(struct instruction *insn)
+ {
+- if (insn->type == INSN_CALL &&
+- insn_call_dest(insn) &&
+- insn_call_dest(insn)->fentry)
+- return true;
++ if (insn->type == INSN_CALL) {
++ struct symbol *dest = insn_call_dest(insn);
++
++ if (!dest)
++ return false;
++
++ if (dest->fentry || dest->embedded_insn)
++ return true;
++ }
+
+ return false;
+ }
+@@ -3681,7 +3706,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
+ if (ret)
+ return ret;
+
+- if (opts.stackval && func && !is_fentry_call(insn) &&
++ if (opts.stackval && func && !is_special_call(insn) &&
+ !has_valid_stack_frame(&state)) {
+ WARN_INSN(insn, "call without frame pointer save/setup");
+ return 1;
+diff --git a/tools/objtool/include/objtool/arch.h b/tools/objtool/include/objtool/arch.h
+index 2b6d2ce4f9a5b..0b303eba660e4 100644
+--- a/tools/objtool/include/objtool/arch.h
++++ b/tools/objtool/include/objtool/arch.h
+@@ -90,6 +90,7 @@ int arch_decode_hint_reg(u8 sp_reg, int *base);
+
+ bool arch_is_retpoline(struct symbol *sym);
+ bool arch_is_rethunk(struct symbol *sym);
++bool arch_is_embedded_insn(struct symbol *sym);
+
+ int arch_rewrite_retpolines(struct objtool_file *file);
+
+diff --git a/tools/objtool/include/objtool/elf.h b/tools/objtool/include/objtool/elf.h
+index e1ca588eb69d1..bfb4a69d0e91e 100644
+--- a/tools/objtool/include/objtool/elf.h
++++ b/tools/objtool/include/objtool/elf.h
+@@ -61,6 +61,7 @@ struct symbol {
+ u8 return_thunk : 1;
+ u8 fentry : 1;
+ u8 profiling_func : 1;
++ u8 embedded_insn : 1;
+ struct list_head pv_target;
+ struct list_head reloc_list;
+ };
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index 9e02e19c1b7a9..4d564e0698dfc 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -44,7 +44,6 @@
+ #include <linux/zalloc.h>
+
+ static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
+-static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip);
+
+ static struct dso *machine__kernel_dso(struct machine *machine)
+ {
+@@ -2371,10 +2370,6 @@ static int add_callchain_ip(struct thread *thread,
+ ms.maps = al.maps;
+ ms.map = al.map;
+ ms.sym = al.sym;
+-
+- if (!branch && append_inlines(cursor, &ms, ip) == 0)
+- return 0;
+-
+ srcline = callchain_srcline(&ms, al.addr);
+ err = callchain_cursor_append(cursor, ip, &ms,
+ branch, flags, nr_loop_iter,
+diff --git a/tools/perf/util/thread-stack.c b/tools/perf/util/thread-stack.c
+index 4b85c1728012c..e72bd059538c1 100644
+--- a/tools/perf/util/thread-stack.c
++++ b/tools/perf/util/thread-stack.c
+@@ -1037,9 +1037,7 @@ static int thread_stack__trace_end(struct thread_stack *ts,
+
+ static bool is_x86_retpoline(const char *name)
+ {
+- const char *p = strstr(name, "__x86_indirect_thunk_");
+-
+- return p == name || !strcmp(name, "__indirect_thunk_start");
++ return strstr(name, "__x86_indirect_thunk_") == name;
+ }
+
+ /*
+diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+index aff88f78e3391..5ea9d63915f77 100755
+--- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
++++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+@@ -72,7 +72,8 @@ test_span_gre_ttl()
+
+ RET=0
+
+- mirror_install $swp1 ingress $tundev "matchall $tcflags"
++ mirror_install $swp1 ingress $tundev \
++ "prot ip flower $tcflags ip_prot icmp"
+ tc filter add dev $h3 ingress pref 77 prot $prot \
+ flower skip_hw ip_ttl 50 action pass
+