diff options
author | Mike Pagano <mpagano@gentoo.org> | 2021-06-10 08:14:48 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2021-06-10 08:14:48 -0400 |
commit | b2e817b8ca49469e758b8db4e5985605cb670c1b (patch) | |
tree | c6428df6370878b3f826e2e12b6c1ecb6d33ba48 | |
parent | Updates from gyakovlev (diff) | |
download | linux-patches-b2e817b8ca49469e758b8db4e5985605cb670c1b.tar.gz linux-patches-b2e817b8ca49469e758b8db4e5985605cb670c1b.tar.bz2 linux-patches-b2e817b8ca49469e758b8db4e5985605cb670c1b.zip |
Linux patch 5.12.105.12-11
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1009_linux-5.12.10.patch | 6796 |
2 files changed, 6800 insertions, 0 deletions
diff --git a/0000_README b/0000_README index f16429c0..25657f9e 100644 --- a/0000_README +++ b/0000_README @@ -79,6 +79,10 @@ Patch: 1008_linux-5.12.9.patch From: http://www.kernel.org Desc: Linux 5.12.9 +Patch: 1009_linux-5.12.10.patch +From: http://www.kernel.org +Desc: Linux 5.12.10 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1009_linux-5.12.10.patch b/1009_linux-5.12.10.patch new file mode 100644 index 00000000..d2017851 --- /dev/null +++ b/1009_linux-5.12.10.patch @@ -0,0 +1,6796 @@ +diff --git a/Makefile b/Makefile +index d53577db10858..ebc02c56db03c 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 12 +-SUBLEVEL = 9 ++SUBLEVEL = 10 + EXTRAVERSION = + NAME = Frozen Wasteland + +diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi +index 7d2c72562c735..9148a01ed6d9f 100644 +--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi ++++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi +@@ -105,9 +105,13 @@ + phy-reset-gpios = <&gpio1 25 GPIO_ACTIVE_LOW>; + phy-reset-duration = <20>; + phy-supply = <&sw2_reg>; +- phy-handle = <ðphy0>; + status = "okay"; + ++ fixed-link { ++ speed = <1000>; ++ full-duplex; ++ }; ++ + mdio { + #address-cells = <1>; + #size-cells = <0>; +diff --git a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi +index 236fc205c3890..d0768ae429faa 100644 +--- a/arch/arm/boot/dts/imx6q-dhcom-som.dtsi ++++ b/arch/arm/boot/dts/imx6q-dhcom-som.dtsi +@@ -406,6 +406,18 @@ + vin-supply = <&sw1_reg>; + }; + ++®_pu { ++ vin-supply = <&sw1_reg>; ++}; ++ ++®_vdd1p1 { ++ vin-supply = <&sw2_reg>; ++}; ++ ++®_vdd2p5 { ++ vin-supply = <&sw2_reg>; ++}; ++ + &uart1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_uart1>; +diff --git a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi +index 828cf3e39784a..c4e146f3341bb 100644 +--- a/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi ++++ b/arch/arm/boot/dts/imx6qdl-emcon-avari.dtsi +@@ -126,7 +126,7 @@ + compatible = "nxp,pca8574"; + reg = <0x3a>; + gpio-controller; +- #gpio-cells = <1>; ++ #gpio-cells = <2>; + }; + }; + +diff --git a/arch/arm/boot/dts/imx7d-meerkat96.dts b/arch/arm/boot/dts/imx7d-meerkat96.dts +index 5339210b63d0f..dd8003bd1fc09 100644 +--- a/arch/arm/boot/dts/imx7d-meerkat96.dts ++++ b/arch/arm/boot/dts/imx7d-meerkat96.dts +@@ -193,7 +193,7 @@ + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usdhc1>; + keep-power-in-suspend; +- tuning-step = <2>; ++ fsl,tuning-step = <2>; + vmmc-supply = <®_3p3v>; + no-1-8-v; + broken-cd; +diff --git a/arch/arm/boot/dts/imx7d-pico.dtsi b/arch/arm/boot/dts/imx7d-pico.dtsi +index e57da0d32b98d..e519897fae082 100644 +--- a/arch/arm/boot/dts/imx7d-pico.dtsi ++++ b/arch/arm/boot/dts/imx7d-pico.dtsi +@@ -351,7 +351,7 @@ + pinctrl-2 = <&pinctrl_usdhc1_200mhz>; + cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; + bus-width = <4>; +- tuning-step = <2>; ++ fsl,tuning-step = <2>; + vmmc-supply = <®_3p3v>; + wakeup-source; + no-1-8-v; +diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c +index c40cf5ef86079..977b0b744c22a 100644 +--- a/arch/arm/mach-omap1/board-h2.c ++++ b/arch/arm/mach-omap1/board-h2.c +@@ -320,7 +320,7 @@ static int tps_setup(struct i2c_client *client, void *context) + { + if (!IS_BUILTIN(CONFIG_TPS65010)) + return -ENOSYS; +- ++ + tps65010_config_vregs1(TPS_LDO2_ENABLE | TPS_VLDO2_3_0V | + TPS_LDO1_ENABLE | TPS_VLDO1_3_0V); + +@@ -394,6 +394,8 @@ static void __init h2_init(void) + BUG_ON(gpio_request(H2_NAND_RB_GPIO_PIN, "NAND ready") < 0); + gpio_direction_input(H2_NAND_RB_GPIO_PIN); + ++ gpiod_add_lookup_table(&isp1301_gpiod_table); ++ + omap_cfg_reg(L3_1610_FLASH_CS2B_OE); + omap_cfg_reg(M8_1610_FLASH_CS2B_WE); + +diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms +index cdfd5fed457ff..a3fdffcd1ce8b 100644 +--- a/arch/arm64/Kconfig.platforms ++++ b/arch/arm64/Kconfig.platforms +@@ -168,6 +168,7 @@ config ARCH_MEDIATEK + + config ARCH_MESON + bool "Amlogic Platforms" ++ select COMMON_CLK + select MESON_IRQ_GPIO + help + This enables support for the arm64 based Amlogic SoCs +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts +index 6c309b97587df..e8d31279b7a34 100644 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var1.dts +@@ -46,7 +46,8 @@ + eee-broken-100tx; + qca,clk-out-frequency = <125000000>; + qca,clk-out-strength = <AR803X_STRENGTH_FULL>; +- vddio-supply = <&vddh>; ++ qca,keep-pll-enabled; ++ vddio-supply = <&vddio>; + + vddio: vddio-regulator { + regulator-name = "VDDIO"; +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts +index df212ed5bb942..e65d1c477e2ce 100644 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28-var4.dts +@@ -31,11 +31,10 @@ + reg = <0x4>; + eee-broken-1000t; + eee-broken-100tx; +- + qca,clk-out-frequency = <125000000>; + qca,clk-out-strength = <AR803X_STRENGTH_FULL>; +- +- vddio-supply = <&vddh>; ++ qca,keep-pll-enabled; ++ vddio-supply = <&vddio>; + + vddio: vddio-regulator { + regulator-name = "VDDIO"; +diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +index 262fbad8f0ec5..1b264e5e947ac 100644 +--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi ++++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a.dtsi +@@ -201,8 +201,8 @@ + ddr: memory-controller@1080000 { + compatible = "fsl,qoriq-memory-controller"; + reg = <0x0 0x1080000 0x0 0x1000>; +- interrupts = <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>; +- big-endian; ++ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; ++ little-endian; + }; + + dcfg: syscon@1e00000 { +diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts +index 631e01c1b9fd4..be1e7d6f0ecb5 100644 +--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts ++++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra-rmb3.dts +@@ -88,11 +88,11 @@ + pinctrl-0 = <&pinctrl_codec2>; + reg = <0x18>; + #sound-dai-cells = <0>; +- HPVDD-supply = <®_3p3v>; +- SPRVDD-supply = <®_3p3v>; +- SPLVDD-supply = <®_3p3v>; +- AVDD-supply = <®_3p3v>; +- IOVDD-supply = <®_3p3v>; ++ HPVDD-supply = <®_gen_3p3>; ++ SPRVDD-supply = <®_gen_3p3>; ++ SPLVDD-supply = <®_gen_3p3>; ++ AVDD-supply = <®_gen_3p3>; ++ IOVDD-supply = <®_gen_3p3>; + DVDD-supply = <&vgen4_reg>; + reset-gpios = <&gpio3 4 GPIO_ACTIVE_HIGH>; + }; +diff --git a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi +index 4dc8383478ee2..a08a568c31d92 100644 +--- a/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi ++++ b/arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi +@@ -45,8 +45,8 @@ + reg_12p0_main: regulator-12p0-main { + compatible = "regulator-fixed"; + regulator-name = "12V_MAIN"; +- regulator-min-microvolt = <5000000>; +- regulator-max-microvolt = <5000000>; ++ regulator-min-microvolt = <12000000>; ++ regulator-max-microvolt = <12000000>; + regulator-always-on; + }; + +@@ -77,15 +77,6 @@ + regulator-always-on; + }; + +- reg_3p3v: regulator-3p3v { +- compatible = "regulator-fixed"; +- vin-supply = <®_3p3_main>; +- regulator-name = "GEN_3V3"; +- regulator-min-microvolt = <3300000>; +- regulator-max-microvolt = <3300000>; +- regulator-always-on; +- }; +- + reg_usdhc2_vmmc: regulator-vsd-3v3 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_reg_usdhc2>; +@@ -415,11 +406,11 @@ + pinctrl-0 = <&pinctrl_codec1>; + reg = <0x18>; + #sound-dai-cells = <0>; +- HPVDD-supply = <®_3p3v>; +- SPRVDD-supply = <®_3p3v>; +- SPLVDD-supply = <®_3p3v>; +- AVDD-supply = <®_3p3v>; +- IOVDD-supply = <®_3p3v>; ++ HPVDD-supply = <®_gen_3p3>; ++ SPRVDD-supply = <®_gen_3p3>; ++ SPLVDD-supply = <®_gen_3p3>; ++ AVDD-supply = <®_gen_3p3>; ++ IOVDD-supply = <®_gen_3p3>; + DVDD-supply = <&vgen4_reg>; + reset-gpios = <&gpio3 3 GPIO_ACTIVE_LOW>; + }; +diff --git a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi +index 17477ab0fd8e1..3398f174f09b3 100644 +--- a/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi ++++ b/arch/arm64/boot/dts/ti/k3-j7200-main.dtsi +@@ -85,6 +85,8 @@ + #size-cells = <2>; + ranges = <0x00 0x30000000 0x00 0x30000000 0x00 0x0c400000>; + ti,sci-dev-id = <199>; ++ dma-coherent; ++ dma-ranges; + + main_navss_intr: interrupt-controller1 { + compatible = "ti,sci-intr"; +diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h +index a8578d650bb67..f362f72bcb508 100644 +--- a/arch/arm64/include/asm/kvm_asm.h ++++ b/arch/arm64/include/asm/kvm_asm.h +@@ -57,6 +57,7 @@ + #define __KVM_HOST_SMCCC_FUNC___kvm_get_mdcr_el2 12 + #define __KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs 13 + #define __KVM_HOST_SMCCC_FUNC___vgic_v3_restore_aprs 14 ++#define __KVM_HOST_SMCCC_FUNC___kvm_adjust_pc 15 + + #ifndef __ASSEMBLY__ + +diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c +index 84b5f79c9eab4..7730b81aad6d1 100644 +--- a/arch/arm64/kvm/arm.c ++++ b/arch/arm64/kvm/arm.c +@@ -715,11 +715,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) + return ret; + } + +- if (run->immediate_exit) +- return -EINTR; +- + vcpu_load(vcpu); + ++ if (run->immediate_exit) { ++ ret = -EINTR; ++ goto out; ++ } ++ + kvm_sigset_activate(vcpu); + + ret = 1; +@@ -892,6 +894,18 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) + + kvm_sigset_deactivate(vcpu); + ++out: ++ /* ++ * In the unlikely event that we are returning to userspace ++ * with pending exceptions or PC adjustment, commit these ++ * adjustments in order to give userspace a consistent view of ++ * the vcpu state. Note that this relies on __kvm_adjust_pc() ++ * being preempt-safe on VHE. ++ */ ++ if (unlikely(vcpu->arch.flags & (KVM_ARM64_PENDING_EXCEPTION | ++ KVM_ARM64_INCREMENT_PC))) ++ kvm_call_hyp(__kvm_adjust_pc, vcpu); ++ + vcpu_put(vcpu); + return ret; + } +diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c +index 0812a496725f6..11541b94b328f 100644 +--- a/arch/arm64/kvm/hyp/exception.c ++++ b/arch/arm64/kvm/hyp/exception.c +@@ -331,8 +331,8 @@ static void kvm_inject_exception(struct kvm_vcpu *vcpu) + } + + /* +- * Adjust the guest PC on entry, depending on flags provided by EL1 +- * for the purpose of emulation (MMIO, sysreg) or exception injection. ++ * Adjust the guest PC (and potentially exception state) depending on ++ * flags provided by the emulation code. + */ + void __kvm_adjust_pc(struct kvm_vcpu *vcpu) + { +diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c +index 936328207bde0..e52582e140873 100644 +--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c ++++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c +@@ -25,6 +25,13 @@ static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) + cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu)); + } + ++static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt) ++{ ++ DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1); ++ ++ __kvm_adjust_pc(kern_hyp_va(vcpu)); ++} ++ + static void handle___kvm_flush_vm_context(struct kvm_cpu_context *host_ctxt) + { + __kvm_flush_vm_context(); +@@ -112,6 +119,7 @@ typedef void (*hcall_t)(struct kvm_cpu_context *); + + static const hcall_t host_hcall[] = { + HANDLE_FUNC(__kvm_vcpu_run), ++ HANDLE_FUNC(__kvm_adjust_pc), + HANDLE_FUNC(__kvm_flush_vm_context), + HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa), + HANDLE_FUNC(__kvm_tlb_flush_vmid), +diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c +index 7719d632df8df..1754498b07174 100644 +--- a/arch/mips/mm/cache.c ++++ b/arch/mips/mm/cache.c +@@ -157,31 +157,29 @@ unsigned long _page_cachable_default; + EXPORT_SYMBOL(_page_cachable_default); + + #define PM(p) __pgprot(_page_cachable_default | (p)) +-#define PVA(p) PM(_PAGE_VALID | _PAGE_ACCESSED | (p)) + + static inline void setup_protection_map(void) + { + protection_map[0] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); +- protection_map[1] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); +- protection_map[2] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); +- protection_map[3] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); +- protection_map[4] = PVA(_PAGE_PRESENT); +- protection_map[5] = PVA(_PAGE_PRESENT); +- protection_map[6] = PVA(_PAGE_PRESENT); +- protection_map[7] = PVA(_PAGE_PRESENT); ++ protection_map[1] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); ++ protection_map[2] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); ++ protection_map[3] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); ++ protection_map[4] = PM(_PAGE_PRESENT); ++ protection_map[5] = PM(_PAGE_PRESENT); ++ protection_map[6] = PM(_PAGE_PRESENT); ++ protection_map[7] = PM(_PAGE_PRESENT); + + protection_map[8] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); +- protection_map[9] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC); +- protection_map[10] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | ++ protection_map[9] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC); ++ protection_map[10] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | + _PAGE_NO_READ); +- protection_map[11] = PVA(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); +- protection_map[12] = PVA(_PAGE_PRESENT); +- protection_map[13] = PVA(_PAGE_PRESENT); +- protection_map[14] = PVA(_PAGE_PRESENT); +- protection_map[15] = PVA(_PAGE_PRESENT); ++ protection_map[11] = PM(_PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); ++ protection_map[12] = PM(_PAGE_PRESENT); ++ protection_map[13] = PM(_PAGE_PRESENT); ++ protection_map[14] = PM(_PAGE_PRESENT | _PAGE_WRITE); ++ protection_map[15] = PM(_PAGE_PRESENT | _PAGE_WRITE); + } + +-#undef _PVA + #undef PM + + void cpu_cache_init(void) +diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c +index 01ab2163659e4..e8c2a6373157d 100644 +--- a/arch/powerpc/kernel/kprobes.c ++++ b/arch/powerpc/kernel/kprobes.c +@@ -108,7 +108,6 @@ int arch_prepare_kprobe(struct kprobe *p) + int ret = 0; + struct kprobe *prev; + struct ppc_inst insn = ppc_inst_read((struct ppc_inst *)p->addr); +- struct ppc_inst prefix = ppc_inst_read((struct ppc_inst *)(p->addr - 1)); + + if ((unsigned long)p->addr & 0x03) { + printk("Attempt to register kprobe at an unaligned address\n"); +@@ -116,7 +115,8 @@ int arch_prepare_kprobe(struct kprobe *p) + } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) { + printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n"); + ret = -EINVAL; +- } else if (ppc_inst_prefixed(prefix)) { ++ } else if ((unsigned long)p->addr & ~PAGE_MASK && ++ ppc_inst_prefixed(ppc_inst_read((struct ppc_inst *)(p->addr - 1)))) { + printk("Cannot register a kprobe on the second word of prefixed instruction\n"); + ret = -EINVAL; + } +diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c +index 208a053c9adfd..60c5bc0c130cf 100644 +--- a/arch/powerpc/kvm/book3s_hv.c ++++ b/arch/powerpc/kvm/book3s_hv.c +@@ -4418,7 +4418,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) + mtspr(SPRN_EBBRR, ebb_regs[1]); + mtspr(SPRN_BESCR, ebb_regs[2]); + mtspr(SPRN_TAR, user_tar); +- mtspr(SPRN_FSCR, current->thread.fscr); + } + mtspr(SPRN_VRSAVE, user_vrsave); + +diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S +index 5e634db4809bf..004f0d4e665f8 100644 +--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S ++++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S +@@ -59,6 +59,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) + #define STACK_SLOT_UAMOR (SFS-88) + #define STACK_SLOT_DAWR1 (SFS-96) + #define STACK_SLOT_DAWRX1 (SFS-104) ++#define STACK_SLOT_FSCR (SFS-112) + /* the following is used by the P9 short path */ + #define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */ + +@@ -686,6 +687,8 @@ BEGIN_FTR_SECTION + std r6, STACK_SLOT_DAWR0(r1) + std r7, STACK_SLOT_DAWRX0(r1) + std r8, STACK_SLOT_IAMR(r1) ++ mfspr r5, SPRN_FSCR ++ std r5, STACK_SLOT_FSCR(r1) + END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + BEGIN_FTR_SECTION + mfspr r6, SPRN_DAWR1 +@@ -1663,6 +1666,10 @@ FTR_SECTION_ELSE + ld r7, STACK_SLOT_HFSCR(r1) + mtspr SPRN_HFSCR, r7 + ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) ++BEGIN_FTR_SECTION ++ ld r5, STACK_SLOT_FSCR(r1) ++ mtspr SPRN_FSCR, r5 ++END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) + /* + * Restore various registers to 0, where non-zero values + * set by the guest could disrupt the host. +diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile +index ca2b40dfd24b8..24d936c147cdf 100644 +--- a/arch/riscv/kernel/vdso/Makefile ++++ b/arch/riscv/kernel/vdso/Makefile +@@ -23,7 +23,7 @@ ifneq ($(c-gettimeofday-y),) + endif + + # Build rules +-targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o ++targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-syms.S + obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) + + obj-y += vdso.o vdso-syms.o +@@ -41,7 +41,7 @@ KASAN_SANITIZE := n + $(obj)/vdso.o: $(obj)/vdso.so + + # link rule for the .so file, .lds has to be first +-$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE ++$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE + $(call if_changed,vdsold) + LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \ + --build-id=sha1 --hash-style=both --eh-frame-hdr +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h +index 412b51e059c80..48067af946785 100644 +--- a/arch/x86/include/asm/apic.h ++++ b/arch/x86/include/asm/apic.h +@@ -174,6 +174,7 @@ static inline int apic_is_clustered_box(void) + extern int setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask); + extern void lapic_assign_system_vectors(void); + extern void lapic_assign_legacy_vector(unsigned int isairq, bool replace); ++extern void lapic_update_legacy_vectors(void); + extern void lapic_online(void); + extern void lapic_offline(void); + extern bool apic_needs_pit(void); +diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h +index b7dd944dc8673..8f28fafa98b32 100644 +--- a/arch/x86/include/asm/disabled-features.h ++++ b/arch/x86/include/asm/disabled-features.h +@@ -56,11 +56,8 @@ + # define DISABLE_PTI (1 << (X86_FEATURE_PTI & 31)) + #endif + +-#ifdef CONFIG_IOMMU_SUPPORT +-# define DISABLE_ENQCMD 0 +-#else +-# define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31)) +-#endif ++/* Force disable because it's broken beyond repair */ ++#define DISABLE_ENQCMD (1 << (X86_FEATURE_ENQCMD & 31)) + + #ifdef CONFIG_X86_SGX + # define DISABLE_SGX 0 +diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h +index ed33a14188f66..23bef08a83880 100644 +--- a/arch/x86/include/asm/fpu/api.h ++++ b/arch/x86/include/asm/fpu/api.h +@@ -106,10 +106,6 @@ extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name); + */ + #define PASID_DISABLED 0 + +-#ifdef CONFIG_IOMMU_SUPPORT +-/* Update current's PASID MSR/state by mm's PASID. */ +-void update_pasid(void); +-#else + static inline void update_pasid(void) { } +-#endif ++ + #endif /* _ASM_X86_FPU_API_H */ +diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h +index 8d33ad80704f2..ceeba9f631722 100644 +--- a/arch/x86/include/asm/fpu/internal.h ++++ b/arch/x86/include/asm/fpu/internal.h +@@ -584,13 +584,6 @@ static inline void switch_fpu_finish(struct fpu *new_fpu) + pkru_val = pk->pkru; + } + __write_pkru(pkru_val); +- +- /* +- * Expensive PASID MSR write will be avoided in update_pasid() because +- * TIF_NEED_FPU_LOAD was set. And the PASID state won't be updated +- * unless it's different from mm->pasid to reduce overhead. +- */ +- update_pasid(); + } + + #endif /* _ASM_X86_FPU_INTERNAL_H */ +diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h +index 3381198525126..69299878b200a 100644 +--- a/arch/x86/include/asm/kvm_para.h ++++ b/arch/x86/include/asm/kvm_para.h +@@ -7,8 +7,6 @@ + #include <linux/interrupt.h> + #include <uapi/asm/kvm_para.h> + +-extern void kvmclock_init(void); +- + #ifdef CONFIG_KVM_GUEST + bool kvm_check_and_clear_guest_paused(void); + #else +@@ -86,13 +84,14 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, + } + + #ifdef CONFIG_KVM_GUEST ++void kvmclock_init(void); ++void kvmclock_disable(void); + bool kvm_para_available(void); + unsigned int kvm_arch_para_features(void); + unsigned int kvm_arch_para_hints(void); + void kvm_async_pf_task_wait_schedule(u32 token); + void kvm_async_pf_task_wake(u32 token); + u32 kvm_read_and_reset_apf_flags(void); +-void kvm_disable_steal_time(void); + bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token); + + DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled); +@@ -137,11 +136,6 @@ static inline u32 kvm_read_and_reset_apf_flags(void) + return 0; + } + +-static inline void kvm_disable_steal_time(void) +-{ +- return; +-} +- + static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token) + { + return false; +diff --git a/arch/x86/include/asm/thermal.h b/arch/x86/include/asm/thermal.h +index ddbdefd5b94f1..91a7b6687c3b9 100644 +--- a/arch/x86/include/asm/thermal.h ++++ b/arch/x86/include/asm/thermal.h +@@ -3,11 +3,13 @@ + #define _ASM_X86_THERMAL_H + + #ifdef CONFIG_X86_THERMAL_VECTOR ++void therm_lvt_init(void); + void intel_init_thermal(struct cpuinfo_x86 *c); + bool x86_thermal_enabled(void); + void intel_thermal_interrupt(void); + #else +-static inline void intel_init_thermal(struct cpuinfo_x86 *c) { } ++static inline void therm_lvt_init(void) { } ++static inline void intel_init_thermal(struct cpuinfo_x86 *c) { } + #endif + + #endif /* _ASM_X86_THERMAL_H */ +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index 4f26700f314d9..b967a2ba7494a 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -2604,6 +2604,7 @@ static void __init apic_bsp_setup(bool upmode) + end_local_APIC_setup(); + irq_remap_enable_fault_handling(); + setup_IO_APIC(); ++ lapic_update_legacy_vectors(); + } + + #ifdef CONFIG_UP_LATE_INIT +diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c +index 3c9c7492252f8..8c97a3468affa 100644 +--- a/arch/x86/kernel/apic/vector.c ++++ b/arch/x86/kernel/apic/vector.c +@@ -730,6 +730,26 @@ void lapic_assign_legacy_vector(unsigned int irq, bool replace) + irq_matrix_assign_system(vector_matrix, ISA_IRQ_VECTOR(irq), replace); + } + ++void __init lapic_update_legacy_vectors(void) ++{ ++ unsigned int i; ++ ++ if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0) ++ return; ++ ++ /* ++ * If the IO/APIC is disabled via config, kernel command line or ++ * lack of enumeration then all legacy interrupts are routed ++ * through the PIC. Make sure that they are marked as legacy ++ * vectors. PIC_CASCADE_IRQ has already been marked in ++ * lapic_assign_system_vectors(). ++ */ ++ for (i = 0; i < nr_legacy_irqs(); i++) { ++ if (i != PIC_CASCADE_IR) ++ lapic_assign_legacy_vector(i, true); ++ } ++} ++ + void __init lapic_assign_system_vectors(void) + { + unsigned int i, vector = 0; +diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c +index 683749b80ae28..2ad57cc14b83f 100644 +--- a/arch/x86/kernel/fpu/xstate.c ++++ b/arch/x86/kernel/fpu/xstate.c +@@ -1402,60 +1402,3 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns, + return 0; + } + #endif /* CONFIG_PROC_PID_ARCH_STATUS */ +- +-#ifdef CONFIG_IOMMU_SUPPORT +-void update_pasid(void) +-{ +- u64 pasid_state; +- u32 pasid; +- +- if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) +- return; +- +- if (!current->mm) +- return; +- +- pasid = READ_ONCE(current->mm->pasid); +- /* Set the valid bit in the PASID MSR/state only for valid pasid. */ +- pasid_state = pasid == PASID_DISABLED ? +- pasid : pasid | MSR_IA32_PASID_VALID; +- +- /* +- * No need to hold fregs_lock() since the task's fpstate won't +- * be changed by others (e.g. ptrace) while the task is being +- * switched to or is in IPI. +- */ +- if (!test_thread_flag(TIF_NEED_FPU_LOAD)) { +- /* The MSR is active and can be directly updated. */ +- wrmsrl(MSR_IA32_PASID, pasid_state); +- } else { +- struct fpu *fpu = ¤t->thread.fpu; +- struct ia32_pasid_state *ppasid_state; +- struct xregs_state *xsave; +- +- /* +- * The CPU's xstate registers are not currently active. Just +- * update the PASID state in the memory buffer here. The +- * PASID MSR will be loaded when returning to user mode. +- */ +- xsave = &fpu->state.xsave; +- xsave->header.xfeatures |= XFEATURE_MASK_PASID; +- ppasid_state = get_xsave_addr(xsave, XFEATURE_PASID); +- /* +- * Since XFEATURE_MASK_PASID is set in xfeatures, ppasid_state +- * won't be NULL and no need to check its value. +- * +- * Only update the task's PASID state when it's different +- * from the mm's pasid. +- */ +- if (ppasid_state->pasid != pasid_state) { +- /* +- * Invalid fpregs so that state restoring will pick up +- * the PASID state. +- */ +- __fpu_invalidate_fpregs_state(fpu); +- ppasid_state->pasid = pasid_state; +- } +- } +-} +-#endif /* CONFIG_IOMMU_SUPPORT */ +diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c +index 78bb0fae39826..919411a0117df 100644 +--- a/arch/x86/kernel/kvm.c ++++ b/arch/x86/kernel/kvm.c +@@ -26,6 +26,7 @@ + #include <linux/kprobes.h> + #include <linux/nmi.h> + #include <linux/swait.h> ++#include <linux/syscore_ops.h> + #include <asm/timer.h> + #include <asm/cpu.h> + #include <asm/traps.h> +@@ -37,6 +38,7 @@ + #include <asm/tlb.h> + #include <asm/cpuidle_haltpoll.h> + #include <asm/ptrace.h> ++#include <asm/reboot.h> + #include <asm/svm.h> + + DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled); +@@ -374,6 +376,14 @@ static void kvm_pv_disable_apf(void) + pr_info("Unregister pv shared memory for cpu %d\n", smp_processor_id()); + } + ++static void kvm_disable_steal_time(void) ++{ ++ if (!has_steal_clock) ++ return; ++ ++ wrmsr(MSR_KVM_STEAL_TIME, 0, 0); ++} ++ + static void kvm_pv_guest_cpu_reboot(void *unused) + { + /* +@@ -416,14 +426,6 @@ static u64 kvm_steal_clock(int cpu) + return steal; + } + +-void kvm_disable_steal_time(void) +-{ +- if (!has_steal_clock) +- return; +- +- wrmsr(MSR_KVM_STEAL_TIME, 0, 0); +-} +- + static inline void __set_percpu_decrypted(void *ptr, unsigned long size) + { + early_set_memory_decrypted((unsigned long) ptr, size); +@@ -460,6 +462,27 @@ static bool pv_tlb_flush_supported(void) + + static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask); + ++static void kvm_guest_cpu_offline(bool shutdown) ++{ ++ kvm_disable_steal_time(); ++ if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) ++ wrmsrl(MSR_KVM_PV_EOI_EN, 0); ++ kvm_pv_disable_apf(); ++ if (!shutdown) ++ apf_task_wake_all(); ++ kvmclock_disable(); ++} ++ ++static int kvm_cpu_online(unsigned int cpu) ++{ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ kvm_guest_cpu_init(); ++ local_irq_restore(flags); ++ return 0; ++} ++ + #ifdef CONFIG_SMP + + static bool pv_ipi_supported(void) +@@ -587,29 +610,46 @@ static void __init kvm_smp_prepare_boot_cpu(void) + kvm_spinlock_init(); + } + +-static void kvm_guest_cpu_offline(void) ++static int kvm_cpu_down_prepare(unsigned int cpu) + { +- kvm_disable_steal_time(); +- if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) +- wrmsrl(MSR_KVM_PV_EOI_EN, 0); +- kvm_pv_disable_apf(); +- apf_task_wake_all(); ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ kvm_guest_cpu_offline(false); ++ local_irq_restore(flags); ++ return 0; + } + +-static int kvm_cpu_online(unsigned int cpu) ++#endif ++ ++static int kvm_suspend(void) + { +- local_irq_disable(); +- kvm_guest_cpu_init(); +- local_irq_enable(); ++ kvm_guest_cpu_offline(false); ++ + return 0; + } + +-static int kvm_cpu_down_prepare(unsigned int cpu) ++static void kvm_resume(void) + { +- local_irq_disable(); +- kvm_guest_cpu_offline(); +- local_irq_enable(); +- return 0; ++ kvm_cpu_online(raw_smp_processor_id()); ++} ++ ++static struct syscore_ops kvm_syscore_ops = { ++ .suspend = kvm_suspend, ++ .resume = kvm_resume, ++}; ++ ++/* ++ * After a PV feature is registered, the host will keep writing to the ++ * registered memory location. If the guest happens to shutdown, this memory ++ * won't be valid. In cases like kexec, in which you install a new kernel, this ++ * means a random memory location will be kept being written. ++ */ ++#ifdef CONFIG_KEXEC_CORE ++static void kvm_crash_shutdown(struct pt_regs *regs) ++{ ++ kvm_guest_cpu_offline(true); ++ native_machine_crash_shutdown(regs); + } + #endif + +@@ -681,6 +721,12 @@ static void __init kvm_guest_init(void) + kvm_guest_cpu_init(); + #endif + ++#ifdef CONFIG_KEXEC_CORE ++ machine_ops.crash_shutdown = kvm_crash_shutdown; ++#endif ++ ++ register_syscore_ops(&kvm_syscore_ops); ++ + /* + * Hard lockup detection is enabled by default. Disable it, as guests + * can get false positives too easily, for example if the host is +diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c +index 1fc0962c89c08..b825c87c12ef7 100644 +--- a/arch/x86/kernel/kvmclock.c ++++ b/arch/x86/kernel/kvmclock.c +@@ -20,7 +20,6 @@ + #include <asm/hypervisor.h> + #include <asm/mem_encrypt.h> + #include <asm/x86_init.h> +-#include <asm/reboot.h> + #include <asm/kvmclock.h> + + static int kvmclock __initdata = 1; +@@ -203,28 +202,9 @@ static void kvm_setup_secondary_clock(void) + } + #endif + +-/* +- * After the clock is registered, the host will keep writing to the +- * registered memory location. If the guest happens to shutdown, this memory +- * won't be valid. In cases like kexec, in which you install a new kernel, this +- * means a random memory location will be kept being written. So before any +- * kind of shutdown from our side, we unregister the clock by writing anything +- * that does not have the 'enable' bit set in the msr +- */ +-#ifdef CONFIG_KEXEC_CORE +-static void kvm_crash_shutdown(struct pt_regs *regs) +-{ +- native_write_msr(msr_kvm_system_time, 0, 0); +- kvm_disable_steal_time(); +- native_machine_crash_shutdown(regs); +-} +-#endif +- +-static void kvm_shutdown(void) ++void kvmclock_disable(void) + { + native_write_msr(msr_kvm_system_time, 0, 0); +- kvm_disable_steal_time(); +- native_machine_shutdown(); + } + + static void __init kvmclock_init_mem(void) +@@ -351,10 +331,6 @@ void __init kvmclock_init(void) + #endif + x86_platform.save_sched_clock_state = kvm_save_sched_clock_state; + x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state; +- machine_ops.shutdown = kvm_shutdown; +-#ifdef CONFIG_KEXEC_CORE +- machine_ops.crash_shutdown = kvm_crash_shutdown; +-#endif + kvm_get_preset_lpj(); + + /* +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c +index ccab6cf91283d..e79f21d13a0d7 100644 +--- a/arch/x86/kernel/setup.c ++++ b/arch/x86/kernel/setup.c +@@ -44,6 +44,7 @@ + #include <asm/pci-direct.h> + #include <asm/prom.h> + #include <asm/proto.h> ++#include <asm/thermal.h> + #include <asm/unwind.h> + #include <asm/vsyscall.h> + #include <linux/vmalloc.h> +@@ -1220,6 +1221,14 @@ void __init setup_arch(char **cmdline_p) + + x86_init.timers.wallclock_init(); + ++ /* ++ * This needs to run before setup_local_APIC() which soft-disables the ++ * local APIC temporarily and that masks the thermal LVT interrupt, ++ * leading to softlockups on machines which have configured SMI ++ * interrupt delivery. ++ */ ++ therm_lvt_init(); ++ + mcheck_init(); + + register_refined_jiffies(CLOCK_TICK_RATE); +diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c +index 9a6825feaf53f..30569bbbca9ac 100644 +--- a/arch/x86/kvm/svm/svm.c ++++ b/arch/x86/kvm/svm/svm.c +@@ -2532,7 +2532,7 @@ static int cr_interception(struct vcpu_svm *svm) + err = 0; + if (cr >= 16) { /* mov to cr */ + cr -= 16; +- val = kvm_register_read(&svm->vcpu, reg); ++ val = kvm_register_readl(&svm->vcpu, reg); + trace_kvm_cr_write(cr, val); + switch (cr) { + case 0: +@@ -2578,7 +2578,7 @@ static int cr_interception(struct vcpu_svm *svm) + kvm_queue_exception(&svm->vcpu, UD_VECTOR); + return 1; + } +- kvm_register_write(&svm->vcpu, reg, val); ++ kvm_register_writel(&svm->vcpu, reg, val); + trace_kvm_cr_read(cr, val); + } + return kvm_complete_insn_gp(&svm->vcpu, err); +@@ -2643,11 +2643,11 @@ static int dr_interception(struct vcpu_svm *svm) + dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; + if (dr >= 16) { /* mov to DRn */ + dr -= 16; +- val = kvm_register_read(&svm->vcpu, reg); ++ val = kvm_register_readl(&svm->vcpu, reg); + err = kvm_set_dr(&svm->vcpu, dr, val); + } else { + kvm_get_dr(&svm->vcpu, dr, &val); +- kvm_register_write(&svm->vcpu, reg, val); ++ kvm_register_writel(&svm->vcpu, reg, val); + } + + return kvm_complete_insn_gp(&svm->vcpu, err); +diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c +index a73347e2cdfc5..ea3d0b73731bc 100644 +--- a/arch/x86/mm/fault.c ++++ b/arch/x86/mm/fault.c +@@ -836,8 +836,8 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code, + + if (si_code == SEGV_PKUERR) + force_sig_pkuerr((void __user *)address, pkey); +- +- force_sig_fault(SIGSEGV, si_code, (void __user *)address); ++ else ++ force_sig_fault(SIGSEGV, si_code, (void __user *)address); + + local_irq_disable(); + } +diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c +index a19374d261013..65f599e9075bc 100644 +--- a/arch/x86/mm/mem_encrypt_identity.c ++++ b/arch/x86/mm/mem_encrypt_identity.c +@@ -504,10 +504,6 @@ void __init sme_enable(struct boot_params *bp) + #define AMD_SME_BIT BIT(0) + #define AMD_SEV_BIT BIT(1) + +- /* Check the SEV MSR whether SEV or SME is enabled */ +- sev_status = __rdmsr(MSR_AMD64_SEV); +- feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; +- + /* + * Check for the SME/SEV feature: + * CPUID Fn8000_001F[EAX] +@@ -519,11 +515,16 @@ void __init sme_enable(struct boot_params *bp) + eax = 0x8000001f; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); +- if (!(eax & feature_mask)) ++ /* Check whether SEV or SME is supported */ ++ if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT))) + return; + + me_mask = 1UL << (ebx & 0x3f); + ++ /* Check the SEV MSR whether SEV or SME is enabled */ ++ sev_status = __rdmsr(MSR_AMD64_SEV); ++ feature_mask = (sev_status & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT; ++ + /* Check if memory encryption is enabled */ + if (feature_mask == AMD_SME_BIT) { + /* +diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c +index 624a26794d558..e5ba9795ec696 100644 +--- a/drivers/acpi/acpica/utdelete.c ++++ b/drivers/acpi/acpica/utdelete.c +@@ -285,6 +285,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) + } + break; + ++ case ACPI_TYPE_LOCAL_ADDRESS_HANDLER: ++ ++ ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, ++ "***** Address handler %p\n", object)); ++ ++ acpi_os_delete_mutex(object->address_space.context_mutex); ++ break; ++ + default: + + break; +diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c +index 68145e326eb90..30e9b700273e1 100644 +--- a/drivers/bus/ti-sysc.c ++++ b/drivers/bus/ti-sysc.c +@@ -1334,6 +1334,34 @@ err_allow_idle: + return error; + } + ++static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled) ++{ ++ struct device *dev = ddata->dev; ++ int error; ++ ++ /* Disable target module if it is enabled */ ++ if (ddata->enabled) { ++ error = sysc_runtime_suspend(dev); ++ if (error) ++ dev_warn(dev, "reinit suspend failed: %i\n", error); ++ } ++ ++ /* Enable target module */ ++ error = sysc_runtime_resume(dev); ++ if (error) ++ dev_warn(dev, "reinit resume failed: %i\n", error); ++ ++ if (leave_enabled) ++ return error; ++ ++ /* Disable target module if no leave_enabled was set */ ++ error = sysc_runtime_suspend(dev); ++ if (error) ++ dev_warn(dev, "reinit suspend failed: %i\n", error); ++ ++ return error; ++} ++ + static int __maybe_unused sysc_noirq_suspend(struct device *dev) + { + struct sysc *ddata; +@@ -1344,12 +1372,18 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev) + (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) + return 0; + +- return pm_runtime_force_suspend(dev); ++ if (!ddata->enabled) ++ return 0; ++ ++ ddata->needs_resume = 1; ++ ++ return sysc_runtime_suspend(dev); + } + + static int __maybe_unused sysc_noirq_resume(struct device *dev) + { + struct sysc *ddata; ++ int error = 0; + + ddata = dev_get_drvdata(dev); + +@@ -1357,7 +1391,19 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev) + (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE)) + return 0; + +- return pm_runtime_force_resume(dev); ++ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_RESUME) { ++ error = sysc_reinit_module(ddata, ddata->needs_resume); ++ if (error) ++ dev_warn(dev, "noirq_resume failed: %i\n", error); ++ } else if (ddata->needs_resume) { ++ error = sysc_runtime_resume(dev); ++ if (error) ++ dev_warn(dev, "noirq_resume failed: %i\n", error); ++ } ++ ++ ddata->needs_resume = 0; ++ ++ return error; + } + + static const struct dev_pm_ops sysc_pm_ops = { +@@ -1408,9 +1454,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { + SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), + /* Uarts on omap4 and later */ + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x50411e03, 0xffff00ff, +- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), ++ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), + SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x47422e03, 0xffffffff, +- SYSC_QUIRK_SWSUP_SIDLE_ACT | SYSC_QUIRK_LEGACY_IDLE), ++ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE), + + /* Quirks that need to be set based on the module address */ + SYSC_QUIRK("mcpdm", 0x40132000, 0, 0x10, -ENODEV, 0x50000800, 0xffffffff, +@@ -1466,7 +1512,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { + SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050, + 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), + SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff, +- SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY), ++ SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY | ++ SYSC_QUIRK_REINIT_ON_RESUME), + SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0, + SYSC_MODULE_QUIRK_WDT), + /* PRUSS on am3, am4 and am5 */ +diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c +index 07cf7977a0450..59f2104ffc771 100644 +--- a/drivers/dma/idxd/init.c ++++ b/drivers/dma/idxd/init.c +@@ -675,12 +675,12 @@ static int __init idxd_init_module(void) + * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in + * enumerating the device. We can not utilize it. + */ +- if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) { ++ if (!cpu_feature_enabled(X86_FEATURE_MOVDIR64B)) { + pr_warn("idxd driver failed to load without MOVDIR64B.\n"); + return -ENODEV; + } + +- if (!boot_cpu_has(X86_FEATURE_ENQCMD)) ++ if (!cpu_feature_enabled(X86_FEATURE_ENQCMD)) + pr_warn("Platform does not have ENQCMD(S) support.\n"); + else + support_enqcmd = true; +diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c +index e15d484b6a5a7..ea7ca74fc1730 100644 +--- a/drivers/firmware/efi/cper.c ++++ b/drivers/firmware/efi/cper.c +@@ -276,8 +276,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg) + if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE)) + return 0; + +- n = 0; +- len = CPER_REC_LEN - 1; ++ len = CPER_REC_LEN; + dmi_memdev_name(mem->mem_dev_handle, &bank, &device); + if (bank && device) + n = snprintf(msg, len, "DIMM location: %s %s ", bank, device); +@@ -286,7 +285,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg) + "DIMM location: not present. DMI handle: 0x%.4x ", + mem->mem_dev_handle); + +- msg[n] = '\0'; + return n; + } + +diff --git a/drivers/firmware/efi/fdtparams.c b/drivers/firmware/efi/fdtparams.c +index bb042ab7c2be6..e901f8564ca0c 100644 +--- a/drivers/firmware/efi/fdtparams.c ++++ b/drivers/firmware/efi/fdtparams.c +@@ -98,6 +98,9 @@ u64 __init efi_get_fdt_params(struct efi_memory_map_data *mm) + BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(name)); + BUILD_BUG_ON(ARRAY_SIZE(target) != ARRAY_SIZE(dt_params[0].params)); + ++ if (!fdt) ++ return 0; ++ + for (i = 0; i < ARRAY_SIZE(dt_params); i++) { + node = fdt_path_offset(fdt, dt_params[i].path); + if (node < 0) +diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c +index 4e81c6077188e..dd95f330fe6e1 100644 +--- a/drivers/firmware/efi/libstub/file.c ++++ b/drivers/firmware/efi/libstub/file.c +@@ -103,7 +103,7 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len, + return 0; + + /* Skip any leading slashes */ +- while (cmdline[i] == L'/' || cmdline[i] == L'\\') ++ while (i < cmdline_len && (cmdline[i] == L'/' || cmdline[i] == L'\\')) + i++; + + while (--result_len > 0 && i < cmdline_len) { +diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c +index 5737cb0fcd44e..0a9aba5f9ceff 100644 +--- a/drivers/firmware/efi/memattr.c ++++ b/drivers/firmware/efi/memattr.c +@@ -67,11 +67,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out) + return false; + } + +- if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) { +- pr_warn("Entry attributes invalid: RO and XP bits both cleared\n"); +- return false; +- } +- + if (PAGE_SIZE > EFI_PAGE_SIZE && + (!PAGE_ALIGNED(in->phys_addr) || + !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) { +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +index 0350205c48974..6819fe5612d9e 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +@@ -337,7 +337,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, + { + struct amdgpu_ctx *ctx; + struct amdgpu_ctx_mgr *mgr; +- unsigned long ras_counter; + + if (!fpriv) + return -EINVAL; +@@ -362,21 +361,6 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, + if (atomic_read(&ctx->guilty)) + out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_GUILTY; + +- /*query ue count*/ +- ras_counter = amdgpu_ras_query_error_count(adev, false); +- /*ras counter is monotonic increasing*/ +- if (ras_counter != ctx->ras_counter_ue) { +- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_UE; +- ctx->ras_counter_ue = ras_counter; +- } +- +- /*query ce count*/ +- ras_counter = amdgpu_ras_query_error_count(adev, true); +- if (ras_counter != ctx->ras_counter_ce) { +- out->state.flags |= AMDGPU_CTX_QUERY2_FLAGS_RAS_CE; +- ctx->ras_counter_ce = ras_counter; +- } +- + mutex_unlock(&mgr->lock); + return 0; + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +index a2ac44cc2a6da..e80cc2928b583 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +@@ -944,6 +944,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, + domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags); + if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { + drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n"); ++ drm_gem_object_put(obj); + return ERR_PTR(-EINVAL); + } + +diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +index dc947c8ffe213..e6c4a36eaf9ae 100644 +--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c ++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +@@ -187,14 +187,14 @@ static int jpeg_v2_5_hw_init(void *handle) + static int jpeg_v2_5_hw_fini(void *handle) + { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; +- struct amdgpu_ring *ring; + int i; + ++ cancel_delayed_work_sync(&adev->vcn.idle_work); ++ + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { + if (adev->jpeg.harvest_config & (1 << i)) + continue; + +- ring = &adev->jpeg.inst[i].ring_dec; + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS)) + jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE); +diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +index 1d354245678d5..2ea68c84e6b48 100644 +--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +@@ -159,9 +159,9 @@ static int jpeg_v3_0_hw_init(void *handle) + static int jpeg_v3_0_hw_fini(void *handle) + { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; +- struct amdgpu_ring *ring; + +- ring = &adev->jpeg.inst->ring_dec; ++ cancel_delayed_work_sync(&adev->vcn.idle_work); ++ + if (adev->jpeg.cur_state != AMD_PG_STATE_GATE && + RREG32_SOC15(JPEG, 0, mmUVD_JRBC_STATUS)) + jpeg_v3_0_set_powergating_state(adev, AMD_PG_STATE_GATE); +diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +index 760859880c1ed..4eebf973a0658 100644 +--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +@@ -357,6 +357,7 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) + + error: + dma_fence_put(fence); ++ amdgpu_bo_unpin(bo); + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); + return r; +diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +index ebbc04ff5da06..90138469648a9 100644 +--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c ++++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +@@ -367,15 +367,14 @@ done: + static int vcn_v3_0_hw_fini(void *handle) + { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; +- struct amdgpu_ring *ring; + int i; + ++ cancel_delayed_work_sync(&adev->vcn.idle_work); ++ + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + if (adev->vcn.harvest_config & (1 << i)) + continue; + +- ring = &adev->vcn.inst[i].ring_dec; +- + if (!amdgpu_sriov_vf(adev)) { + if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || + (adev->vcn.cur_state != AMD_PG_STATE_GATE && +diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c +index d2a678a2497e4..411494005f0ec 100644 +--- a/drivers/gpu/drm/i915/selftests/i915_request.c ++++ b/drivers/gpu/drm/i915/selftests/i915_request.c +@@ -1392,8 +1392,8 @@ static int live_breadcrumbs_smoketest(void *arg) + + for (n = 0; n < smoke[0].ncontexts; n++) { + smoke[0].contexts[n] = live_context(i915, file); +- if (!smoke[0].contexts[n]) { +- ret = -ENOMEM; ++ if (IS_ERR(smoke[0].contexts[n])) { ++ ret = PTR_ERR(smoke[0].contexts[n]); + goto out_contexts; + } + } +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +index 85f2c3564c966..fb061e666faa7 100644 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +@@ -933,8 +933,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms) + DPU_DEBUG("REG_DMA is not defined"); + } + +- if (of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) +- dpu_kms_parse_data_bus_icc_path(dpu_kms); ++ dpu_kms_parse_data_bus_icc_path(dpu_kms); + + pm_runtime_get_sync(&dpu_kms->pdev->dev); + +diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c +index cd4078807db1b..3416e9617ee9a 100644 +--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c ++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c +@@ -31,40 +31,8 @@ struct dpu_mdss { + void __iomem *mmio; + struct dss_module_power mp; + struct dpu_irq_controller irq_controller; +- struct icc_path *path[2]; +- u32 num_paths; + }; + +-static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev, +- struct dpu_mdss *dpu_mdss) +-{ +- struct icc_path *path0 = of_icc_get(dev->dev, "mdp0-mem"); +- struct icc_path *path1 = of_icc_get(dev->dev, "mdp1-mem"); +- +- if (IS_ERR_OR_NULL(path0)) +- return PTR_ERR_OR_ZERO(path0); +- +- dpu_mdss->path[0] = path0; +- dpu_mdss->num_paths = 1; +- +- if (!IS_ERR_OR_NULL(path1)) { +- dpu_mdss->path[1] = path1; +- dpu_mdss->num_paths++; +- } +- +- return 0; +-} +- +-static void dpu_mdss_icc_request_bw(struct msm_mdss *mdss) +-{ +- struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss); +- int i; +- u64 avg_bw = dpu_mdss->num_paths ? MAX_BW / dpu_mdss->num_paths : 0; +- +- for (i = 0; i < dpu_mdss->num_paths; i++) +- icc_set_bw(dpu_mdss->path[i], avg_bw, kBps_to_icc(MAX_BW)); +-} +- + static void dpu_mdss_irq(struct irq_desc *desc) + { + struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc); +@@ -178,8 +146,6 @@ static int dpu_mdss_enable(struct msm_mdss *mdss) + struct dss_module_power *mp = &dpu_mdss->mp; + int ret; + +- dpu_mdss_icc_request_bw(mdss); +- + ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true); + if (ret) { + DPU_ERROR("clock enable failed, ret:%d\n", ret); +@@ -213,15 +179,12 @@ static int dpu_mdss_disable(struct msm_mdss *mdss) + { + struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss); + struct dss_module_power *mp = &dpu_mdss->mp; +- int ret, i; ++ int ret; + + ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false); + if (ret) + DPU_ERROR("clock disable failed, ret:%d\n", ret); + +- for (i = 0; i < dpu_mdss->num_paths; i++) +- icc_set_bw(dpu_mdss->path[i], 0, 0); +- + return ret; + } + +@@ -232,7 +195,6 @@ static void dpu_mdss_destroy(struct drm_device *dev) + struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss); + struct dss_module_power *mp = &dpu_mdss->mp; + int irq; +- int i; + + pm_runtime_suspend(dev->dev); + pm_runtime_disable(dev->dev); +@@ -242,9 +204,6 @@ static void dpu_mdss_destroy(struct drm_device *dev) + msm_dss_put_clk(mp->clk_config, mp->num_clk); + devm_kfree(&pdev->dev, mp->clk_config); + +- for (i = 0; i < dpu_mdss->num_paths; i++) +- icc_put(dpu_mdss->path[i]); +- + if (dpu_mdss->mmio) + devm_iounmap(&pdev->dev, dpu_mdss->mmio); + dpu_mdss->mmio = NULL; +@@ -276,12 +235,6 @@ int dpu_mdss_init(struct drm_device *dev) + + DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio); + +- if (!of_device_is_compatible(dev->dev->of_node, "qcom,sc7180-mdss")) { +- ret = dpu_mdss_parse_data_bus_icc_path(dev, dpu_mdss); +- if (ret) +- return ret; +- } +- + mp = &dpu_mdss->mp; + ret = msm_dss_parse_clock(pdev, mp); + if (ret) { +@@ -307,8 +260,6 @@ int dpu_mdss_init(struct drm_device *dev) + + pm_runtime_enable(dev->dev); + +- dpu_mdss_icc_request_bw(priv->mdss); +- + return ret; + + irq_error: +diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_client.c b/drivers/hid/amd-sfh-hid/amd_sfh_client.c +index 2ab38b7153477..ea9a4913932d6 100644 +--- a/drivers/hid/amd-sfh-hid/amd_sfh_client.c ++++ b/drivers/hid/amd-sfh-hid/amd_sfh_client.c +@@ -88,6 +88,7 @@ static void amd_sfh_work(struct work_struct *work) + sensor_index = req_node->sensor_idx; + report_id = req_node->report_id; + node_type = req_node->report_type; ++ kfree(req_node); + + if (node_type == HID_FEATURE_REPORT) { + report_size = get_feature_report(sensor_index, report_id, +diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c +index d459e2dbe6474..f7710fb2f48d2 100644 +--- a/drivers/hid/hid-logitech-hidpp.c ++++ b/drivers/hid/hid-logitech-hidpp.c +@@ -1262,6 +1262,7 @@ static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage, + int status; + + long flags = (long) data[2]; ++ *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; + + if (flags & 0x80) + switch (flags & 0x07) { +diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c +index abd86903875f0..fc4c074597539 100644 +--- a/drivers/hid/hid-magicmouse.c ++++ b/drivers/hid/hid-magicmouse.c +@@ -597,7 +597,7 @@ static int magicmouse_probe(struct hid_device *hdev, + if (id->vendor == USB_VENDOR_ID_APPLE && + id->product == USB_DEVICE_ID_APPLE_MAGICTRACKPAD2 && + hdev->type != HID_TYPE_USBMOUSE) +- return 0; ++ return -ENODEV; + + msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL); + if (msc == NULL) { +diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c +index 9d9f3e1bd5f41..55dcb8536286b 100644 +--- a/drivers/hid/hid-multitouch.c ++++ b/drivers/hid/hid-multitouch.c +@@ -604,9 +604,13 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td, + if (!(HID_MAIN_ITEM_VARIABLE & field->flags)) + continue; + +- for (n = 0; n < field->report_count; n++) { +- if (field->usage[n].hid == HID_DG_CONTACTID) +- rdata->is_mt_collection = true; ++ if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) { ++ for (n = 0; n < field->report_count; n++) { ++ if (field->usage[n].hid == HID_DG_CONTACTID) { ++ rdata->is_mt_collection = true; ++ break; ++ } ++ } + } + } + +diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c +index 9993133989a58..46474612e73c6 100644 +--- a/drivers/hid/i2c-hid/i2c-hid-core.c ++++ b/drivers/hid/i2c-hid/i2c-hid-core.c +@@ -45,6 +45,7 @@ + #define I2C_HID_QUIRK_BOGUS_IRQ BIT(4) + #define I2C_HID_QUIRK_RESET_ON_RESUME BIT(5) + #define I2C_HID_QUIRK_BAD_INPUT_SIZE BIT(6) ++#define I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET BIT(7) + + + /* flags */ +@@ -178,6 +179,11 @@ static const struct i2c_hid_quirks { + I2C_HID_QUIRK_RESET_ON_RESUME }, + { USB_VENDOR_ID_ITE, I2C_DEVICE_ID_ITE_LENOVO_LEGION_Y720, + I2C_HID_QUIRK_BAD_INPUT_SIZE }, ++ /* ++ * Sending the wakeup after reset actually break ELAN touchscreen controller ++ */ ++ { USB_VENDOR_ID_ELAN, HID_ANY_ID, ++ I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET }, + { 0, 0 } + }; + +@@ -461,7 +467,8 @@ static int i2c_hid_hwreset(struct i2c_client *client) + } + + /* At least some SIS devices need this after reset */ +- ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); ++ if (!(ihid->quirks & I2C_HID_QUIRK_NO_WAKEUP_AFTER_RESET)) ++ ret = i2c_hid_set_power(client, I2C_HID_PWR_ON); + + out_unlock: + mutex_unlock(&ihid->reset_lock); +@@ -990,8 +997,8 @@ int i2c_hid_core_probe(struct i2c_client *client, struct i2chid_ops *ops, + hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID); + hid->product = le16_to_cpu(ihid->hdesc.wProductID); + +- snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX", +- client->name, hid->vendor, hid->product); ++ snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", ++ client->name, (u16)hid->vendor, (u16)hid->product); + strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys)); + + ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product); +diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c +index fddac7c72f645..07a9fe97d2e05 100644 +--- a/drivers/hid/usbhid/hid-pidff.c ++++ b/drivers/hid/usbhid/hid-pidff.c +@@ -1292,6 +1292,7 @@ int hid_pidff_init(struct hid_device *hid) + + if (pidff->pool[PID_DEVICE_MANAGED_POOL].value && + pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) { ++ error = -EPERM; + hid_notice(hid, + "device does not support device managed pool\n"); + goto fail; +diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c +index 73b9db9e3aab6..63b74e781c5d9 100644 +--- a/drivers/hwmon/dell-smm-hwmon.c ++++ b/drivers/hwmon/dell-smm-hwmon.c +@@ -838,10 +838,10 @@ static struct attribute *i8k_attrs[] = { + static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr, + int index) + { +- if (disallow_fan_support && index >= 8) ++ if (disallow_fan_support && index >= 20) + return 0; + if (disallow_fan_type_call && +- (index == 9 || index == 12 || index == 15)) ++ (index == 21 || index == 25 || index == 28)) + return 0; + if (index >= 0 && index <= 1 && + !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1)) +diff --git a/drivers/hwmon/pmbus/isl68137.c b/drivers/hwmon/pmbus/isl68137.c +index 2bee930d39002..789242ed72e5d 100644 +--- a/drivers/hwmon/pmbus/isl68137.c ++++ b/drivers/hwmon/pmbus/isl68137.c +@@ -244,8 +244,8 @@ static int isl68137_probe(struct i2c_client *client) + info->read_word_data = raa_dmpvr2_read_word_data; + break; + case raa_dmpvr2_2rail_nontc: +- info->func[0] &= ~PMBUS_HAVE_TEMP; +- info->func[1] &= ~PMBUS_HAVE_TEMP; ++ info->func[0] &= ~PMBUS_HAVE_TEMP3; ++ info->func[1] &= ~PMBUS_HAVE_TEMP3; + fallthrough; + case raa_dmpvr2_2rail: + info->pages = 2; +diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c +index 214b4c913a139..671f4a52275ec 100644 +--- a/drivers/i2c/busses/i2c-qcom-geni.c ++++ b/drivers/i2c/busses/i2c-qcom-geni.c +@@ -650,6 +650,14 @@ static int geni_i2c_remove(struct platform_device *pdev) + return 0; + } + ++static void geni_i2c_shutdown(struct platform_device *pdev) ++{ ++ struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev); ++ ++ /* Make client i2c transfers start failing */ ++ i2c_mark_adapter_suspended(&gi2c->adap); ++} ++ + static int __maybe_unused geni_i2c_runtime_suspend(struct device *dev) + { + int ret; +@@ -690,6 +698,8 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev) + { + struct geni_i2c_dev *gi2c = dev_get_drvdata(dev); + ++ i2c_mark_adapter_suspended(&gi2c->adap); ++ + if (!gi2c->suspended) { + geni_i2c_runtime_suspend(dev); + pm_runtime_disable(dev); +@@ -699,8 +709,16 @@ static int __maybe_unused geni_i2c_suspend_noirq(struct device *dev) + return 0; + } + ++static int __maybe_unused geni_i2c_resume_noirq(struct device *dev) ++{ ++ struct geni_i2c_dev *gi2c = dev_get_drvdata(dev); ++ ++ i2c_mark_adapter_resumed(&gi2c->adap); ++ return 0; ++} ++ + static const struct dev_pm_ops geni_i2c_pm_ops = { +- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, NULL) ++ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(geni_i2c_suspend_noirq, geni_i2c_resume_noirq) + SET_RUNTIME_PM_OPS(geni_i2c_runtime_suspend, geni_i2c_runtime_resume, + NULL) + }; +@@ -714,6 +732,7 @@ MODULE_DEVICE_TABLE(of, geni_i2c_dt_match); + static struct platform_driver geni_i2c_driver = { + .probe = geni_i2c_probe, + .remove = geni_i2c_remove, ++ .shutdown = geni_i2c_shutdown, + .driver = { + .name = "geni_i2c", + .pm = &geni_i2c_pm_ops, +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +index 314f8d8067231..9058f09f921ee 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +@@ -2177,8 +2177,6 @@ int cxgb4_update_mac_filt(struct port_info *pi, unsigned int viid, + bool persistent, u8 *smt_idx); + int cxgb4_get_msix_idx_from_bmap(struct adapter *adap); + void cxgb4_free_msix_idx_in_bmap(struct adapter *adap, u32 msix_idx); +-int cxgb_open(struct net_device *dev); +-int cxgb_close(struct net_device *dev); + void cxgb4_enable_rx(struct adapter *adap, struct sge_rspq *q); + void cxgb4_quiesce_rx(struct sge_rspq *q); + int cxgb4_port_mirror_alloc(struct net_device *dev); +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +index 421bd9b88028d..1f601de02e706 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +@@ -2834,7 +2834,7 @@ static void cxgb_down(struct adapter *adapter) + /* + * net_device operations + */ +-int cxgb_open(struct net_device *dev) ++static int cxgb_open(struct net_device *dev) + { + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; +@@ -2882,7 +2882,7 @@ out_unlock: + return err; + } + +-int cxgb_close(struct net_device *dev) ++static int cxgb_close(struct net_device *dev) + { + struct port_info *pi = netdev_priv(dev); + struct adapter *adapter = pi->adapter; +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +index 1b88bd1c2dbe4..dd9be229819a5 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c +@@ -997,20 +997,16 @@ int cxgb4_tc_flower_destroy(struct net_device *dev, + if (!ch_flower) + return -ENOENT; + ++ rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node, ++ adap->flower_ht_params); ++ + ret = cxgb4_flow_rule_destroy(dev, ch_flower->fs.tc_prio, + &ch_flower->fs, ch_flower->filter_id); + if (ret) +- goto err; ++ netdev_err(dev, "Flow rule destroy failed for tid: %u, ret: %d", ++ ch_flower->filter_id, ret); + +- ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node, +- adap->flower_ht_params); +- if (ret) { +- netdev_err(dev, "Flow remove from rhashtable failed"); +- goto err; +- } + kfree_rcu(ch_flower, rcu); +- +-err: + return ret; + } + +diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c +index 6c259de96f969..338b04f339b3d 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c +@@ -589,7 +589,8 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev, + * down before configuring tc params. + */ + if (netif_running(dev)) { +- cxgb_close(dev); ++ netif_tx_stop_all_queues(dev); ++ netif_carrier_off(dev); + needs_bring_up = true; + } + +@@ -615,8 +616,10 @@ int cxgb4_setup_tc_mqprio(struct net_device *dev, + } + + out: +- if (needs_bring_up) +- cxgb_open(dev); ++ if (needs_bring_up) { ++ netif_tx_start_all_queues(dev); ++ netif_carrier_on(dev); ++ } + + mutex_unlock(&adap->tc_mqprio->mqprio_mutex); + return ret; +diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c +index 1e5f2edb70cf4..6a099cb34b122 100644 +--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c ++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c +@@ -2556,6 +2556,12 @@ int cxgb4_ethofld_send_flowc(struct net_device *dev, u32 eotid, u32 tc) + if (!eosw_txq) + return -ENOMEM; + ++ if (!(adap->flags & CXGB4_FW_OK)) { ++ /* Don't stall caller when access to FW is lost */ ++ complete(&eosw_txq->completion); ++ return -EIO; ++ } ++ + skb = alloc_skb(len, GFP_KERNEL); + if (!skb) + return -ENOMEM; +diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c +index 70b515049540f..c358d90498813 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c +@@ -2313,15 +2313,20 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp) + case XDP_TX: + xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; + result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); ++ if (result == I40E_XDP_CONSUMED) ++ goto out_failure; + break; + case XDP_REDIRECT: + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); +- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; ++ if (err) ++ goto out_failure; ++ result = I40E_XDP_REDIR; + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: ++out_failure: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; /* handle aborts by dropping packet */ + case XDP_DROP: +diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c +index 12ca84113587d..5b39c457bd77b 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c +@@ -160,21 +160,28 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + act = bpf_prog_run_xdp(xdp_prog, xdp); + ++ if (likely(act == XDP_REDIRECT)) { ++ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); ++ if (err) ++ goto out_failure; ++ rcu_read_unlock(); ++ return I40E_XDP_REDIR; ++ } ++ + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; + result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); +- break; +- case XDP_REDIRECT: +- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); +- result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED; ++ if (result == I40E_XDP_CONSUMED) ++ goto out_failure; + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: ++out_failure: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; /* handle aborts by dropping packet */ + case XDP_DROP: +diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h +index 17101c45cbcd8..f668296ca6779 100644 +--- a/drivers/net/ethernet/intel/ice/ice.h ++++ b/drivers/net/ethernet/intel/ice/ice.h +@@ -325,6 +325,7 @@ struct ice_vsi { + struct ice_tc_cfg tc_cfg; + struct bpf_prog *xdp_prog; + struct ice_ring **xdp_rings; /* XDP ring array */ ++ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ + u16 num_xdp_txq; /* Used XDP queues */ + u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ + +@@ -534,15 +535,16 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring) + */ + static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring) + { ++ struct ice_vsi *vsi = ring->vsi; + u16 qid = ring->q_index; + + if (ice_ring_is_xdp(ring)) +- qid -= ring->vsi->num_xdp_txq; ++ qid -= vsi->num_xdp_txq; + +- if (!ice_is_xdp_ena_vsi(ring->vsi)) ++ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) + return NULL; + +- return xsk_get_pool_from_qid(ring->vsi->netdev, qid); ++ return xsk_get_pool_from_qid(vsi->netdev, qid); + } + + /** +diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c +index 32ba71a161652..f80fff97d8dce 100644 +--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c ++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c +@@ -1797,49 +1797,6 @@ ice_phy_type_to_ethtool(struct net_device *netdev, + ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB, + 100000baseKR4_Full); + } +- +- /* Autoneg PHY types */ +- if (phy_types_low & ICE_PHY_TYPE_LOW_100BASE_TX || +- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_T || +- phy_types_low & ICE_PHY_TYPE_LOW_1000BASE_KX || +- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_T || +- phy_types_low & ICE_PHY_TYPE_LOW_2500BASE_KX || +- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_T || +- phy_types_low & ICE_PHY_TYPE_LOW_5GBASE_KR || +- phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_T || +- phy_types_low & ICE_PHY_TYPE_LOW_10GBASE_KR_CR1 || +- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_T || +- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR || +- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR_S || +- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_CR1 || +- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR || +- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR_S || +- phy_types_low & ICE_PHY_TYPE_LOW_25GBASE_KR1 || +- phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_CR4 || +- phy_types_low & ICE_PHY_TYPE_LOW_40GBASE_KR4) { +- ethtool_link_ksettings_add_link_mode(ks, supported, +- Autoneg); +- ethtool_link_ksettings_add_link_mode(ks, advertising, +- Autoneg); +- } +- if (phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CR2 || +- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR2 || +- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_CP || +- phy_types_low & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) { +- ethtool_link_ksettings_add_link_mode(ks, supported, +- Autoneg); +- ethtool_link_ksettings_add_link_mode(ks, advertising, +- Autoneg); +- } +- if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CR4 || +- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR4 || +- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4 || +- phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) { +- ethtool_link_ksettings_add_link_mode(ks, supported, +- Autoneg); +- ethtool_link_ksettings_add_link_mode(ks, advertising, +- Autoneg); +- } + } + + #define TEST_SET_BITS_TIMEOUT 50 +@@ -1996,9 +1953,7 @@ ice_get_link_ksettings(struct net_device *netdev, + ks->base.port = PORT_TP; + break; + case ICE_MEDIA_BACKPLANE: +- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, supported, Backplane); +- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); + ethtool_link_ksettings_add_link_mode(ks, advertising, + Backplane); + ks->base.port = PORT_NONE; +@@ -2073,6 +2028,12 @@ ice_get_link_ksettings(struct net_device *netdev, + if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN) + ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS); + ++ /* Set supported and advertised autoneg */ ++ if (ice_is_phy_caps_an_enabled(caps)) { ++ ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg); ++ ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg); ++ } ++ + done: + kfree(caps); + return err; +diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +index 093a1818a3929..1998821896c0f 100644 +--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h ++++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +@@ -31,6 +31,7 @@ + #define PF_FW_ATQLEN_ATQOVFL_M BIT(29) + #define PF_FW_ATQLEN_ATQCRIT_M BIT(30) + #define VF_MBX_ARQLEN(_VF) (0x0022BC00 + ((_VF) * 4)) ++#define VF_MBX_ATQLEN(_VF) (0x0022A800 + ((_VF) * 4)) + #define PF_FW_ATQLEN_ATQENABLE_M BIT(31) + #define PF_FW_ATQT 0x00080400 + #define PF_MBX_ARQBAH 0x0022E400 +diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c +index 195d122c9cb22..27e439853c3b0 100644 +--- a/drivers/net/ethernet/intel/ice/ice_lib.c ++++ b/drivers/net/ethernet/intel/ice/ice_lib.c +@@ -105,8 +105,14 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) + if (!vsi->q_vectors) + goto err_vectors; + ++ vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL); ++ if (!vsi->af_xdp_zc_qps) ++ goto err_zc_qps; ++ + return 0; + ++err_zc_qps: ++ devm_kfree(dev, vsi->q_vectors); + err_vectors: + devm_kfree(dev, vsi->rxq_map); + err_rxq_map: +@@ -192,6 +198,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) + break; + case ICE_VSI_VF: + vf = &pf->vf[vsi->vf_id]; ++ if (vf->num_req_qs) ++ vf->num_vf_qs = vf->num_req_qs; + vsi->alloc_txq = vf->num_vf_qs; + vsi->alloc_rxq = vf->num_vf_qs; + /* pf->num_msix_per_vf includes (VF miscellaneous vector + +@@ -286,6 +294,10 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) + + dev = ice_pf_to_dev(pf); + ++ if (vsi->af_xdp_zc_qps) { ++ bitmap_free(vsi->af_xdp_zc_qps); ++ vsi->af_xdp_zc_qps = NULL; ++ } + /* free the ring and vector containers */ + if (vsi->q_vectors) { + devm_kfree(dev, vsi->q_vectors); +diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c +index b91dcfd12727d..113e53efffd71 100644 +--- a/drivers/net/ethernet/intel/ice/ice_txrx.c ++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c +@@ -523,7 +523,7 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, + struct bpf_prog *xdp_prog) + { + struct ice_ring *xdp_ring; +- int err; ++ int err, result; + u32 act; + + act = bpf_prog_run_xdp(xdp_prog, xdp); +@@ -532,14 +532,20 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp, + return ICE_XDP_PASS; + case XDP_TX: + xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()]; +- return ice_xmit_xdp_buff(xdp, xdp_ring); ++ result = ice_xmit_xdp_buff(xdp, xdp_ring); ++ if (result == ICE_XDP_CONSUMED) ++ goto out_failure; ++ return result; + case XDP_REDIRECT: + err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); +- return !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; ++ if (err) ++ goto out_failure; ++ return ICE_XDP_REDIR; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: ++out_failure: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: +@@ -2331,6 +2337,7 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) + struct ice_tx_offload_params offload = { 0 }; + struct ice_vsi *vsi = tx_ring->vsi; + struct ice_tx_buf *first; ++ struct ethhdr *eth; + unsigned int count; + int tso, csum; + +@@ -2377,7 +2384,9 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring) + goto out_drop; + + /* allow CONTROL frames egress from main VSI if FW LLDP disabled */ +- if (unlikely(skb->priority == TC_PRIO_CONTROL && ++ eth = (struct ethhdr *)skb_mac_header(skb); ++ if (unlikely((skb->priority == TC_PRIO_CONTROL || ++ eth->h_proto == htons(ETH_P_LLDP)) && + vsi->type == ICE_VSI_PF && + vsi->port_info->qos_cfg.is_sw_lldp)) + offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX | +diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +index 1f38a8d0c5254..48dee9c5d534b 100644 +--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +@@ -435,13 +435,15 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) + */ + clear_bit(ICE_VF_STATE_INIT, vf->vf_states); + +- /* VF_MBX_ARQLEN is cleared by PFR, so the driver needs to clear it +- * in the case of VFR. If this is done for PFR, it can mess up VF +- * resets because the VF driver may already have started cleanup +- * by the time we get here. ++ /* VF_MBX_ARQLEN and VF_MBX_ATQLEN are cleared by PFR, so the driver ++ * needs to clear them in the case of VFR/VFLR. If this is done for ++ * PFR, it can mess up VF resets because the VF driver may already ++ * have started cleanup by the time we get here. + */ +- if (!is_pfr) ++ if (!is_pfr) { + wr32(hw, VF_MBX_ARQLEN(vf->vf_id), 0); ++ wr32(hw, VF_MBX_ATQLEN(vf->vf_id), 0); ++ } + + /* In the case of a VFLR, the HW has already reset the VF and we + * just need to clean up, so don't hit the VFRTRIG register. +@@ -1375,7 +1377,12 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) + } + + ice_vf_pre_vsi_rebuild(vf); +- ice_vf_rebuild_vsi_with_release(vf); ++ ++ if (ice_vf_rebuild_vsi_with_release(vf)) { ++ dev_err(dev, "Failed to release and setup the VF%u's VSI\n", vf->vf_id); ++ return false; ++ } ++ + ice_vf_post_vsi_rebuild(vf); + + return true; +diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c +index 9f94d9159acde..f1d4240e57df3 100644 +--- a/drivers/net/ethernet/intel/ice/ice_xsk.c ++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c +@@ -273,6 +273,7 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid) + if (!pool) + return -EINVAL; + ++ clear_bit(qid, vsi->af_xdp_zc_qps); + xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR); + + return 0; +@@ -303,6 +304,8 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid) + if (err) + return err; + ++ set_bit(qid, vsi->af_xdp_zc_qps); ++ + return 0; + } + +@@ -473,21 +476,29 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp) + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + + act = bpf_prog_run_xdp(xdp_prog, xdp); ++ ++ if (likely(act == XDP_REDIRECT)) { ++ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); ++ if (err) ++ goto out_failure; ++ rcu_read_unlock(); ++ return ICE_XDP_REDIR; ++ } ++ + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index]; + result = ice_xmit_xdp_buff(xdp, xdp_ring); +- break; +- case XDP_REDIRECT: +- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); +- result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED; ++ if (result == ICE_XDP_CONSUMED) ++ goto out_failure; + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: ++out_failure: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: +diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h +index 7bda8c5edea5d..2d3daf022651c 100644 +--- a/drivers/net/ethernet/intel/igb/igb.h ++++ b/drivers/net/ethernet/intel/igb/igb.h +@@ -749,7 +749,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter); + void igb_ptp_tx_hang(struct igb_adapter *adapter); + void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb); + int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, +- struct sk_buff *skb); ++ ktime_t *timestamp); + int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); + int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); + void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c +index a45cd2b416c89..caa8929289ae7 100644 +--- a/drivers/net/ethernet/intel/igb/igb_main.c ++++ b/drivers/net/ethernet/intel/igb/igb_main.c +@@ -8281,7 +8281,7 @@ static void igb_add_rx_frag(struct igb_ring *rx_ring, + static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + struct xdp_buff *xdp, +- union e1000_adv_rx_desc *rx_desc) ++ ktime_t timestamp) + { + #if (PAGE_SIZE < 8192) + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; +@@ -8301,12 +8301,8 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, + if (unlikely(!skb)) + return NULL; + +- if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) { +- if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, xdp->data, skb)) { +- xdp->data += IGB_TS_HDR_LEN; +- size -= IGB_TS_HDR_LEN; +- } +- } ++ if (timestamp) ++ skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* Determine available headroom for copy */ + headlen = size; +@@ -8337,7 +8333,7 @@ static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring, + static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, + struct igb_rx_buffer *rx_buffer, + struct xdp_buff *xdp, +- union e1000_adv_rx_desc *rx_desc) ++ ktime_t timestamp) + { + #if (PAGE_SIZE < 8192) + unsigned int truesize = igb_rx_pg_size(rx_ring) / 2; +@@ -8364,11 +8360,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, + if (metasize) + skb_metadata_set(skb, metasize); + +- /* pull timestamp out of packet data */ +- if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { +- if (!igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb)) +- __skb_pull(skb, IGB_TS_HDR_LEN); +- } ++ if (timestamp) ++ skb_hwtstamps(skb)->hwtstamp = timestamp; + + /* update buffer offset */ + #if (PAGE_SIZE < 8192) +@@ -8402,18 +8395,20 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, + break; + case XDP_TX: + result = igb_xdp_xmit_back(adapter, xdp); ++ if (result == IGB_XDP_CONSUMED) ++ goto out_failure; + break; + case XDP_REDIRECT: + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); +- if (!err) +- result = IGB_XDP_REDIR; +- else +- result = IGB_XDP_CONSUMED; ++ if (err) ++ goto out_failure; ++ result = IGB_XDP_REDIR; + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: ++out_failure: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; + case XDP_DROP: +@@ -8683,7 +8678,10 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) + while (likely(total_packets < budget)) { + union e1000_adv_rx_desc *rx_desc; + struct igb_rx_buffer *rx_buffer; ++ ktime_t timestamp = 0; ++ int pkt_offset = 0; + unsigned int size; ++ void *pktbuf; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IGB_RX_BUFFER_WRITE) { +@@ -8703,14 +8701,24 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) + dma_rmb(); + + rx_buffer = igb_get_rx_buffer(rx_ring, size, &rx_buf_pgcnt); ++ pktbuf = page_address(rx_buffer->page) + rx_buffer->page_offset; ++ ++ /* pull rx packet timestamp if available and valid */ ++ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { ++ int ts_hdr_len; ++ ++ ts_hdr_len = igb_ptp_rx_pktstamp(rx_ring->q_vector, ++ pktbuf, ×tamp); ++ ++ pkt_offset += ts_hdr_len; ++ size -= ts_hdr_len; ++ } + + /* retrieve a buffer from the ring */ + if (!skb) { +- unsigned int offset = igb_rx_offset(rx_ring); +- unsigned char *hard_start; ++ unsigned char *hard_start = pktbuf - igb_rx_offset(rx_ring); ++ unsigned int offset = pkt_offset + igb_rx_offset(rx_ring); + +- hard_start = page_address(rx_buffer->page) + +- rx_buffer->page_offset - offset; + xdp_prepare_buff(&xdp, hard_start, offset, size, true); + #if (PAGE_SIZE > 4096) + /* At larger PAGE_SIZE, frame_sz depend on len size */ +@@ -8733,10 +8741,11 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) + } else if (skb) + igb_add_rx_frag(rx_ring, rx_buffer, skb, size); + else if (ring_uses_build_skb(rx_ring)) +- skb = igb_build_skb(rx_ring, rx_buffer, &xdp, rx_desc); ++ skb = igb_build_skb(rx_ring, rx_buffer, &xdp, ++ timestamp); + else + skb = igb_construct_skb(rx_ring, rx_buffer, +- &xdp, rx_desc); ++ &xdp, timestamp); + + /* exit if we failed to retrieve a buffer */ + if (!skb) { +diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c +index 86a576201f5ff..58b25f26ea7f2 100644 +--- a/drivers/net/ethernet/intel/igb/igb_ptp.c ++++ b/drivers/net/ethernet/intel/igb/igb_ptp.c +@@ -856,30 +856,28 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) + dev_kfree_skb_any(skb); + } + +-#define IGB_RET_PTP_DISABLED 1 +-#define IGB_RET_PTP_INVALID 2 +- + /** + * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp + * @q_vector: Pointer to interrupt specific structure + * @va: Pointer to address containing Rx buffer +- * @skb: Buffer containing timestamp and packet ++ * @timestamp: Pointer where timestamp will be stored + * + * This function is meant to retrieve a timestamp from the first buffer of an + * incoming frame. The value is stored in little endian format starting on + * byte 8 + * +- * Returns: 0 if success, nonzero if failure ++ * Returns: The timestamp header length or 0 if not available + **/ + int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, +- struct sk_buff *skb) ++ ktime_t *timestamp) + { + struct igb_adapter *adapter = q_vector->adapter; ++ struct skb_shared_hwtstamps ts; + __le64 *regval = (__le64 *)va; + int adjust = 0; + + if (!(adapter->ptp_flags & IGB_PTP_ENABLED)) +- return IGB_RET_PTP_DISABLED; ++ return 0; + + /* The timestamp is recorded in little endian format. + * DWORD: 0 1 2 3 +@@ -888,10 +886,9 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + + /* check reserved dwords are zero, be/le doesn't matter for zero */ + if (regval[0]) +- return IGB_RET_PTP_INVALID; ++ return 0; + +- igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), +- le64_to_cpu(regval[1])); ++ igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1])); + + /* adjust timestamp for the RX latency based on link speed */ + if (adapter->hw.mac.type == e1000_i210) { +@@ -907,10 +904,10 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, + break; + } + } +- skb_hwtstamps(skb)->hwtstamp = +- ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust); + +- return 0; ++ *timestamp = ktime_sub_ns(ts.hwtstamp, adjust); ++ ++ return IGB_TS_HDR_LEN; + } + + /** +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +index cffb95f8f6326..c194158a421c7 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +@@ -2213,23 +2213,23 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, + break; + case XDP_TX: + xdpf = xdp_convert_buff_to_frame(xdp); +- if (unlikely(!xdpf)) { +- result = IXGBE_XDP_CONSUMED; +- break; +- } ++ if (unlikely(!xdpf)) ++ goto out_failure; + result = ixgbe_xmit_xdp_ring(adapter, xdpf); ++ if (result == IXGBE_XDP_CONSUMED) ++ goto out_failure; + break; + case XDP_REDIRECT: + err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); +- if (!err) +- result = IXGBE_XDP_REDIR; +- else +- result = IXGBE_XDP_CONSUMED; ++ if (err) ++ goto out_failure; ++ result = IXGBE_XDP_REDIR; + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: ++out_failure: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; /* handle aborts by dropping packet */ + case XDP_DROP: +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +index 3771857cf887c..f72d2978263b9 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +@@ -104,25 +104,30 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, + xdp_prog = READ_ONCE(rx_ring->xdp_prog); + act = bpf_prog_run_xdp(xdp_prog, xdp); + ++ if (likely(act == XDP_REDIRECT)) { ++ err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); ++ if (err) ++ goto out_failure; ++ rcu_read_unlock(); ++ return IXGBE_XDP_REDIR; ++ } ++ + switch (act) { + case XDP_PASS: + break; + case XDP_TX: + xdpf = xdp_convert_buff_to_frame(xdp); +- if (unlikely(!xdpf)) { +- result = IXGBE_XDP_CONSUMED; +- break; +- } ++ if (unlikely(!xdpf)) ++ goto out_failure; + result = ixgbe_xmit_xdp_ring(adapter, xdpf); +- break; +- case XDP_REDIRECT: +- err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); +- result = !err ? IXGBE_XDP_REDIR : IXGBE_XDP_CONSUMED; ++ if (result == IXGBE_XDP_CONSUMED) ++ goto out_failure; + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: ++out_failure: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; /* handle aborts by dropping packet */ + case XDP_DROP: +diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +index 449d7d5b280dd..b38860c485986 100644 +--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +@@ -1067,11 +1067,14 @@ static struct sk_buff *ixgbevf_run_xdp(struct ixgbevf_adapter *adapter, + case XDP_TX: + xdp_ring = adapter->xdp_ring[rx_ring->queue_index]; + result = ixgbevf_xmit_xdp_ring(xdp_ring, xdp); ++ if (result == IXGBEVF_XDP_CONSUMED) ++ goto out_failure; + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: ++out_failure: + trace_xdp_exception(rx_ring->netdev, xdp_prog, act); + fallthrough; /* handle aborts by dropping packet */ + case XDP_DROP: +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +index 53802e18af900..04b49cb3adb32 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +@@ -1632,12 +1632,13 @@ static int mlx5e_set_fecparam(struct net_device *netdev, + { + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; ++ unsigned long fec_bitmap; + u16 fec_policy = 0; + int mode; + int err; + +- if (bitmap_weight((unsigned long *)&fecparam->fec, +- ETHTOOL_FEC_LLRS_BIT + 1) > 1) ++ bitmap_from_arr32(&fec_bitmap, &fecparam->fec, sizeof(fecparam->fec) * BITS_PER_BYTE); ++ if (bitmap_weight(&fec_bitmap, ETHTOOL_FEC_LLRS_BIT + 1) > 1) + return -EOPNOTSUPP; + + for (mode = 0; mode < ARRAY_SIZE(pplm_fec_2_ethtool); mode++) { +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +index 78a1403c98026..b633f669ea57f 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +@@ -1964,11 +1964,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, + misc_parameters); + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_dissector *dissector = rule->match.dissector; ++ enum fs_flow_table_type fs_type; + u16 addr_type = 0; + u8 ip_proto = 0; + u8 *match_level; + int err; + ++ fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX; + match_level = outer_match_level; + + if (dissector->used_keys & +@@ -2093,6 +2095,13 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, + if (match.mask->vlan_id || + match.mask->vlan_priority || + match.mask->vlan_tpid) { ++ if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid, ++ fs_type)) { ++ NL_SET_ERR_MSG_MOD(extack, ++ "Matching on CVLAN is not supported"); ++ return -EOPNOTSUPP; ++ } ++ + if (match.key->vlan_tpid == htons(ETH_P_8021AD)) { + MLX5_SET(fte_match_set_misc, misc_c, + outer_second_svlan_tag, 1); +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +index d4a2f8d1ee9f1..3719452a78035 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +@@ -349,7 +349,8 @@ esw_setup_slow_path_dest(struct mlx5_flow_destination *dest, + struct mlx5_fs_chains *chains, + int i) + { +- flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; ++ if (mlx5_chains_ignore_flow_level_supported(chains)) ++ flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[i].ft = mlx5_chains_get_tc_end_ft(chains); + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +index f9042e147c7f6..ee710ce007950 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +@@ -354,6 +354,9 @@ static void mlx5_sync_reset_abort_event(struct work_struct *work) + reset_abort_work); + struct mlx5_core_dev *dev = fw_reset->dev; + ++ if (!test_bit(MLX5_FW_RESET_FLAGS_RESET_REQUESTED, &fw_reset->reset_flags)) ++ return; ++ + mlx5_sync_reset_clear_reset_requested(dev, true); + mlx5_core_warn(dev, "PCI Sync FW Update Reset Aborted.\n"); + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +index 381325b4a863e..b607ed5a74bb4 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +@@ -111,7 +111,7 @@ bool mlx5_chains_prios_supported(struct mlx5_fs_chains *chains) + return chains->flags & MLX5_CHAINS_AND_PRIOS_SUPPORTED; + } + +-static bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains) ++bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains) + { + return chains->flags & MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h +index 6d5be31b05dd7..9f53a08235582 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.h +@@ -27,6 +27,7 @@ struct mlx5_chains_attr { + + bool + mlx5_chains_prios_supported(struct mlx5_fs_chains *chains); ++bool mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains); + bool + mlx5_chains_backwards_supported(struct mlx5_fs_chains *chains); + u32 +@@ -72,6 +73,10 @@ mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains, + + #else /* CONFIG_MLX5_CLS_ACT */ + ++static inline bool ++mlx5_chains_ignore_flow_level_supported(struct mlx5_fs_chains *chains) ++{ return false; } ++ + static inline struct mlx5_flow_table * + mlx5_chains_get_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio, + u32 level) { return ERR_PTR(-EOPNOTSUPP); } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c +index 1fbcd012bb855..7ccfd40586cee 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c +@@ -112,7 +112,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, + int ret; + + ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB; +- ft_attr.level = dmn->info.caps.max_ft_level - 2; ++ ft_attr.level = min_t(int, dmn->info.caps.max_ft_level - 2, ++ MLX5_FT_MAX_MULTIPATH_LEVEL); + ft_attr.reformat_en = reformat_req; + ft_attr.decap_en = reformat_req; + +diff --git a/drivers/net/wireguard/Makefile b/drivers/net/wireguard/Makefile +index fc52b2cb500b3..dbe1f8514efc3 100644 +--- a/drivers/net/wireguard/Makefile ++++ b/drivers/net/wireguard/Makefile +@@ -1,5 +1,4 @@ +-ccflags-y := -O3 +-ccflags-y += -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' ++ccflags-y := -D'pr_fmt(fmt)=KBUILD_MODNAME ": " fmt' + ccflags-$(CONFIG_WIREGUARD_DEBUG) += -DDEBUG + wireguard-y := main.o + wireguard-y += noise.o +diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c +index 3725e9cd85f4f..b7197e80f2264 100644 +--- a/drivers/net/wireguard/allowedips.c ++++ b/drivers/net/wireguard/allowedips.c +@@ -6,6 +6,8 @@ + #include "allowedips.h" + #include "peer.h" + ++static struct kmem_cache *node_cache; ++ + static void swap_endian(u8 *dst, const u8 *src, u8 bits) + { + if (bits == 32) { +@@ -28,8 +30,11 @@ static void copy_and_assign_cidr(struct allowedips_node *node, const u8 *src, + node->bitlen = bits; + memcpy(node->bits, src, bits / 8U); + } +-#define CHOOSE_NODE(parent, key) \ +- parent->bit[(key[parent->bit_at_a] >> parent->bit_at_b) & 1] ++ ++static inline u8 choose(struct allowedips_node *node, const u8 *key) ++{ ++ return (key[node->bit_at_a] >> node->bit_at_b) & 1; ++} + + static void push_rcu(struct allowedips_node **stack, + struct allowedips_node __rcu *p, unsigned int *len) +@@ -40,6 +45,11 @@ static void push_rcu(struct allowedips_node **stack, + } + } + ++static void node_free_rcu(struct rcu_head *rcu) ++{ ++ kmem_cache_free(node_cache, container_of(rcu, struct allowedips_node, rcu)); ++} ++ + static void root_free_rcu(struct rcu_head *rcu) + { + struct allowedips_node *node, *stack[128] = { +@@ -49,7 +59,7 @@ static void root_free_rcu(struct rcu_head *rcu) + while (len > 0 && (node = stack[--len])) { + push_rcu(stack, node->bit[0], &len); + push_rcu(stack, node->bit[1], &len); +- kfree(node); ++ kmem_cache_free(node_cache, node); + } + } + +@@ -66,60 +76,6 @@ static void root_remove_peer_lists(struct allowedips_node *root) + } + } + +-static void walk_remove_by_peer(struct allowedips_node __rcu **top, +- struct wg_peer *peer, struct mutex *lock) +-{ +-#define REF(p) rcu_access_pointer(p) +-#define DEREF(p) rcu_dereference_protected(*(p), lockdep_is_held(lock)) +-#define PUSH(p) ({ \ +- WARN_ON(IS_ENABLED(DEBUG) && len >= 128); \ +- stack[len++] = p; \ +- }) +- +- struct allowedips_node __rcu **stack[128], **nptr; +- struct allowedips_node *node, *prev; +- unsigned int len; +- +- if (unlikely(!peer || !REF(*top))) +- return; +- +- for (prev = NULL, len = 0, PUSH(top); len > 0; prev = node) { +- nptr = stack[len - 1]; +- node = DEREF(nptr); +- if (!node) { +- --len; +- continue; +- } +- if (!prev || REF(prev->bit[0]) == node || +- REF(prev->bit[1]) == node) { +- if (REF(node->bit[0])) +- PUSH(&node->bit[0]); +- else if (REF(node->bit[1])) +- PUSH(&node->bit[1]); +- } else if (REF(node->bit[0]) == prev) { +- if (REF(node->bit[1])) +- PUSH(&node->bit[1]); +- } else { +- if (rcu_dereference_protected(node->peer, +- lockdep_is_held(lock)) == peer) { +- RCU_INIT_POINTER(node->peer, NULL); +- list_del_init(&node->peer_list); +- if (!node->bit[0] || !node->bit[1]) { +- rcu_assign_pointer(*nptr, DEREF( +- &node->bit[!REF(node->bit[0])])); +- kfree_rcu(node, rcu); +- node = DEREF(nptr); +- } +- } +- --len; +- } +- } +- +-#undef REF +-#undef DEREF +-#undef PUSH +-} +- + static unsigned int fls128(u64 a, u64 b) + { + return a ? fls64(a) + 64U : fls64(b); +@@ -159,7 +115,7 @@ static struct allowedips_node *find_node(struct allowedips_node *trie, u8 bits, + found = node; + if (node->cidr == bits) + break; +- node = rcu_dereference_bh(CHOOSE_NODE(node, key)); ++ node = rcu_dereference_bh(node->bit[choose(node, key)]); + } + return found; + } +@@ -191,8 +147,7 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key, + u8 cidr, u8 bits, struct allowedips_node **rnode, + struct mutex *lock) + { +- struct allowedips_node *node = rcu_dereference_protected(trie, +- lockdep_is_held(lock)); ++ struct allowedips_node *node = rcu_dereference_protected(trie, lockdep_is_held(lock)); + struct allowedips_node *parent = NULL; + bool exact = false; + +@@ -202,13 +157,24 @@ static bool node_placement(struct allowedips_node __rcu *trie, const u8 *key, + exact = true; + break; + } +- node = rcu_dereference_protected(CHOOSE_NODE(parent, key), +- lockdep_is_held(lock)); ++ node = rcu_dereference_protected(parent->bit[choose(parent, key)], lockdep_is_held(lock)); + } + *rnode = parent; + return exact; + } + ++static inline void connect_node(struct allowedips_node **parent, u8 bit, struct allowedips_node *node) ++{ ++ node->parent_bit_packed = (unsigned long)parent | bit; ++ rcu_assign_pointer(*parent, node); ++} ++ ++static inline void choose_and_connect_node(struct allowedips_node *parent, struct allowedips_node *node) ++{ ++ u8 bit = choose(parent, node->bits); ++ connect_node(&parent->bit[bit], bit, node); ++} ++ + static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, + u8 cidr, struct wg_peer *peer, struct mutex *lock) + { +@@ -218,13 +184,13 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, + return -EINVAL; + + if (!rcu_access_pointer(*trie)) { +- node = kzalloc(sizeof(*node), GFP_KERNEL); ++ node = kmem_cache_zalloc(node_cache, GFP_KERNEL); + if (unlikely(!node)) + return -ENOMEM; + RCU_INIT_POINTER(node->peer, peer); + list_add_tail(&node->peer_list, &peer->allowedips_list); + copy_and_assign_cidr(node, key, cidr, bits); +- rcu_assign_pointer(*trie, node); ++ connect_node(trie, 2, node); + return 0; + } + if (node_placement(*trie, key, cidr, bits, &node, lock)) { +@@ -233,7 +199,7 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, + return 0; + } + +- newnode = kzalloc(sizeof(*newnode), GFP_KERNEL); ++ newnode = kmem_cache_zalloc(node_cache, GFP_KERNEL); + if (unlikely(!newnode)) + return -ENOMEM; + RCU_INIT_POINTER(newnode->peer, peer); +@@ -243,10 +209,10 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, + if (!node) { + down = rcu_dereference_protected(*trie, lockdep_is_held(lock)); + } else { +- down = rcu_dereference_protected(CHOOSE_NODE(node, key), +- lockdep_is_held(lock)); ++ const u8 bit = choose(node, key); ++ down = rcu_dereference_protected(node->bit[bit], lockdep_is_held(lock)); + if (!down) { +- rcu_assign_pointer(CHOOSE_NODE(node, key), newnode); ++ connect_node(&node->bit[bit], bit, newnode); + return 0; + } + } +@@ -254,30 +220,29 @@ static int add(struct allowedips_node __rcu **trie, u8 bits, const u8 *key, + parent = node; + + if (newnode->cidr == cidr) { +- rcu_assign_pointer(CHOOSE_NODE(newnode, down->bits), down); ++ choose_and_connect_node(newnode, down); + if (!parent) +- rcu_assign_pointer(*trie, newnode); ++ connect_node(trie, 2, newnode); + else +- rcu_assign_pointer(CHOOSE_NODE(parent, newnode->bits), +- newnode); +- } else { +- node = kzalloc(sizeof(*node), GFP_KERNEL); +- if (unlikely(!node)) { +- list_del(&newnode->peer_list); +- kfree(newnode); +- return -ENOMEM; +- } +- INIT_LIST_HEAD(&node->peer_list); +- copy_and_assign_cidr(node, newnode->bits, cidr, bits); ++ choose_and_connect_node(parent, newnode); ++ return 0; ++ } + +- rcu_assign_pointer(CHOOSE_NODE(node, down->bits), down); +- rcu_assign_pointer(CHOOSE_NODE(node, newnode->bits), newnode); +- if (!parent) +- rcu_assign_pointer(*trie, node); +- else +- rcu_assign_pointer(CHOOSE_NODE(parent, node->bits), +- node); ++ node = kmem_cache_zalloc(node_cache, GFP_KERNEL); ++ if (unlikely(!node)) { ++ list_del(&newnode->peer_list); ++ kmem_cache_free(node_cache, newnode); ++ return -ENOMEM; + } ++ INIT_LIST_HEAD(&node->peer_list); ++ copy_and_assign_cidr(node, newnode->bits, cidr, bits); ++ ++ choose_and_connect_node(node, down); ++ choose_and_connect_node(node, newnode); ++ if (!parent) ++ connect_node(trie, 2, node); ++ else ++ choose_and_connect_node(parent, node); + return 0; + } + +@@ -335,9 +300,41 @@ int wg_allowedips_insert_v6(struct allowedips *table, const struct in6_addr *ip, + void wg_allowedips_remove_by_peer(struct allowedips *table, + struct wg_peer *peer, struct mutex *lock) + { ++ struct allowedips_node *node, *child, **parent_bit, *parent, *tmp; ++ bool free_parent; ++ ++ if (list_empty(&peer->allowedips_list)) ++ return; + ++table->seq; +- walk_remove_by_peer(&table->root4, peer, lock); +- walk_remove_by_peer(&table->root6, peer, lock); ++ list_for_each_entry_safe(node, tmp, &peer->allowedips_list, peer_list) { ++ list_del_init(&node->peer_list); ++ RCU_INIT_POINTER(node->peer, NULL); ++ if (node->bit[0] && node->bit[1]) ++ continue; ++ child = rcu_dereference_protected(node->bit[!rcu_access_pointer(node->bit[0])], ++ lockdep_is_held(lock)); ++ if (child) ++ child->parent_bit_packed = node->parent_bit_packed; ++ parent_bit = (struct allowedips_node **)(node->parent_bit_packed & ~3UL); ++ *parent_bit = child; ++ parent = (void *)parent_bit - ++ offsetof(struct allowedips_node, bit[node->parent_bit_packed & 1]); ++ free_parent = !rcu_access_pointer(node->bit[0]) && ++ !rcu_access_pointer(node->bit[1]) && ++ (node->parent_bit_packed & 3) <= 1 && ++ !rcu_access_pointer(parent->peer); ++ if (free_parent) ++ child = rcu_dereference_protected( ++ parent->bit[!(node->parent_bit_packed & 1)], ++ lockdep_is_held(lock)); ++ call_rcu(&node->rcu, node_free_rcu); ++ if (!free_parent) ++ continue; ++ if (child) ++ child->parent_bit_packed = parent->parent_bit_packed; ++ *(struct allowedips_node **)(parent->parent_bit_packed & ~3UL) = child; ++ call_rcu(&parent->rcu, node_free_rcu); ++ } + } + + int wg_allowedips_read_node(struct allowedips_node *node, u8 ip[16], u8 *cidr) +@@ -374,4 +371,16 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, + return NULL; + } + ++int __init wg_allowedips_slab_init(void) ++{ ++ node_cache = KMEM_CACHE(allowedips_node, 0); ++ return node_cache ? 0 : -ENOMEM; ++} ++ ++void wg_allowedips_slab_uninit(void) ++{ ++ rcu_barrier(); ++ kmem_cache_destroy(node_cache); ++} ++ + #include "selftest/allowedips.c" +diff --git a/drivers/net/wireguard/allowedips.h b/drivers/net/wireguard/allowedips.h +index e5c83cafcef4c..2346c797eb4d8 100644 +--- a/drivers/net/wireguard/allowedips.h ++++ b/drivers/net/wireguard/allowedips.h +@@ -15,14 +15,11 @@ struct wg_peer; + struct allowedips_node { + struct wg_peer __rcu *peer; + struct allowedips_node __rcu *bit[2]; +- /* While it may seem scandalous that we waste space for v4, +- * we're alloc'ing to the nearest power of 2 anyway, so this +- * doesn't actually make a difference. +- */ +- u8 bits[16] __aligned(__alignof(u64)); + u8 cidr, bit_at_a, bit_at_b, bitlen; ++ u8 bits[16] __aligned(__alignof(u64)); + +- /* Keep rarely used list at bottom to be beyond cache line. */ ++ /* Keep rarely used members at bottom to be beyond cache line. */ ++ unsigned long parent_bit_packed; + union { + struct list_head peer_list; + struct rcu_head rcu; +@@ -33,7 +30,7 @@ struct allowedips { + struct allowedips_node __rcu *root4; + struct allowedips_node __rcu *root6; + u64 seq; +-}; ++} __aligned(4); /* We pack the lower 2 bits of &root, but m68k only gives 16-bit alignment. */ + + void wg_allowedips_init(struct allowedips *table); + void wg_allowedips_free(struct allowedips *table, struct mutex *mutex); +@@ -56,4 +53,7 @@ struct wg_peer *wg_allowedips_lookup_src(struct allowedips *table, + bool wg_allowedips_selftest(void); + #endif + ++int wg_allowedips_slab_init(void); ++void wg_allowedips_slab_uninit(void); ++ + #endif /* _WG_ALLOWEDIPS_H */ +diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c +index 7a7d5f1a80fc7..75dbe77b0b4b4 100644 +--- a/drivers/net/wireguard/main.c ++++ b/drivers/net/wireguard/main.c +@@ -21,13 +21,22 @@ static int __init mod_init(void) + { + int ret; + ++ ret = wg_allowedips_slab_init(); ++ if (ret < 0) ++ goto err_allowedips; ++ + #ifdef DEBUG ++ ret = -ENOTRECOVERABLE; + if (!wg_allowedips_selftest() || !wg_packet_counter_selftest() || + !wg_ratelimiter_selftest()) +- return -ENOTRECOVERABLE; ++ goto err_peer; + #endif + wg_noise_init(); + ++ ret = wg_peer_init(); ++ if (ret < 0) ++ goto err_peer; ++ + ret = wg_device_init(); + if (ret < 0) + goto err_device; +@@ -44,6 +53,10 @@ static int __init mod_init(void) + err_netlink: + wg_device_uninit(); + err_device: ++ wg_peer_uninit(); ++err_peer: ++ wg_allowedips_slab_uninit(); ++err_allowedips: + return ret; + } + +@@ -51,6 +64,8 @@ static void __exit mod_exit(void) + { + wg_genetlink_uninit(); + wg_device_uninit(); ++ wg_peer_uninit(); ++ wg_allowedips_slab_uninit(); + } + + module_init(mod_init); +diff --git a/drivers/net/wireguard/peer.c b/drivers/net/wireguard/peer.c +index cd5cb0292cb67..1acd00ab2fbcb 100644 +--- a/drivers/net/wireguard/peer.c ++++ b/drivers/net/wireguard/peer.c +@@ -15,6 +15,7 @@ + #include <linux/rcupdate.h> + #include <linux/list.h> + ++static struct kmem_cache *peer_cache; + static atomic64_t peer_counter = ATOMIC64_INIT(0); + + struct wg_peer *wg_peer_create(struct wg_device *wg, +@@ -29,10 +30,10 @@ struct wg_peer *wg_peer_create(struct wg_device *wg, + if (wg->num_peers >= MAX_PEERS_PER_DEVICE) + return ERR_PTR(ret); + +- peer = kzalloc(sizeof(*peer), GFP_KERNEL); ++ peer = kmem_cache_zalloc(peer_cache, GFP_KERNEL); + if (unlikely(!peer)) + return ERR_PTR(ret); +- if (dst_cache_init(&peer->endpoint_cache, GFP_KERNEL)) ++ if (unlikely(dst_cache_init(&peer->endpoint_cache, GFP_KERNEL))) + goto err; + + peer->device = wg; +@@ -64,7 +65,7 @@ struct wg_peer *wg_peer_create(struct wg_device *wg, + return peer; + + err: +- kfree(peer); ++ kmem_cache_free(peer_cache, peer); + return ERR_PTR(ret); + } + +@@ -88,7 +89,7 @@ static void peer_make_dead(struct wg_peer *peer) + /* Mark as dead, so that we don't allow jumping contexts after. */ + WRITE_ONCE(peer->is_dead, true); + +- /* The caller must now synchronize_rcu() for this to take effect. */ ++ /* The caller must now synchronize_net() for this to take effect. */ + } + + static void peer_remove_after_dead(struct wg_peer *peer) +@@ -160,7 +161,7 @@ void wg_peer_remove(struct wg_peer *peer) + lockdep_assert_held(&peer->device->device_update_lock); + + peer_make_dead(peer); +- synchronize_rcu(); ++ synchronize_net(); + peer_remove_after_dead(peer); + } + +@@ -178,7 +179,7 @@ void wg_peer_remove_all(struct wg_device *wg) + peer_make_dead(peer); + list_add_tail(&peer->peer_list, &dead_peers); + } +- synchronize_rcu(); ++ synchronize_net(); + list_for_each_entry_safe(peer, temp, &dead_peers, peer_list) + peer_remove_after_dead(peer); + } +@@ -193,7 +194,8 @@ static void rcu_release(struct rcu_head *rcu) + /* The final zeroing takes care of clearing any remaining handshake key + * material and other potentially sensitive information. + */ +- kfree_sensitive(peer); ++ memzero_explicit(peer, sizeof(*peer)); ++ kmem_cache_free(peer_cache, peer); + } + + static void kref_release(struct kref *refcount) +@@ -225,3 +227,14 @@ void wg_peer_put(struct wg_peer *peer) + return; + kref_put(&peer->refcount, kref_release); + } ++ ++int __init wg_peer_init(void) ++{ ++ peer_cache = KMEM_CACHE(wg_peer, 0); ++ return peer_cache ? 0 : -ENOMEM; ++} ++ ++void wg_peer_uninit(void) ++{ ++ kmem_cache_destroy(peer_cache); ++} +diff --git a/drivers/net/wireguard/peer.h b/drivers/net/wireguard/peer.h +index 8d53b687a1d16..76e4d3128ad4e 100644 +--- a/drivers/net/wireguard/peer.h ++++ b/drivers/net/wireguard/peer.h +@@ -80,4 +80,7 @@ void wg_peer_put(struct wg_peer *peer); + void wg_peer_remove(struct wg_peer *peer); + void wg_peer_remove_all(struct wg_device *wg); + ++int wg_peer_init(void); ++void wg_peer_uninit(void); ++ + #endif /* _WG_PEER_H */ +diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c +index 846db14cb046b..e173204ae7d78 100644 +--- a/drivers/net/wireguard/selftest/allowedips.c ++++ b/drivers/net/wireguard/selftest/allowedips.c +@@ -19,32 +19,22 @@ + + #include <linux/siphash.h> + +-static __init void swap_endian_and_apply_cidr(u8 *dst, const u8 *src, u8 bits, +- u8 cidr) +-{ +- swap_endian(dst, src, bits); +- memset(dst + (cidr + 7) / 8, 0, bits / 8 - (cidr + 7) / 8); +- if (cidr) +- dst[(cidr + 7) / 8 - 1] &= ~0U << ((8 - (cidr % 8)) % 8); +-} +- + static __init void print_node(struct allowedips_node *node, u8 bits) + { + char *fmt_connection = KERN_DEBUG "\t\"%p/%d\" -> \"%p/%d\";\n"; +- char *fmt_declaration = KERN_DEBUG +- "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n"; ++ char *fmt_declaration = KERN_DEBUG "\t\"%p/%d\"[style=%s, color=\"#%06x\"];\n"; ++ u8 ip1[16], ip2[16], cidr1, cidr2; + char *style = "dotted"; +- u8 ip1[16], ip2[16]; + u32 color = 0; + ++ if (node == NULL) ++ return; + if (bits == 32) { + fmt_connection = KERN_DEBUG "\t\"%pI4/%d\" -> \"%pI4/%d\";\n"; +- fmt_declaration = KERN_DEBUG +- "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n"; ++ fmt_declaration = KERN_DEBUG "\t\"%pI4/%d\"[style=%s, color=\"#%06x\"];\n"; + } else if (bits == 128) { + fmt_connection = KERN_DEBUG "\t\"%pI6/%d\" -> \"%pI6/%d\";\n"; +- fmt_declaration = KERN_DEBUG +- "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n"; ++ fmt_declaration = KERN_DEBUG "\t\"%pI6/%d\"[style=%s, color=\"#%06x\"];\n"; + } + if (node->peer) { + hsiphash_key_t key = { { 0 } }; +@@ -55,24 +45,20 @@ static __init void print_node(struct allowedips_node *node, u8 bits) + hsiphash_1u32(0xabad1dea, &key) % 200; + style = "bold"; + } +- swap_endian_and_apply_cidr(ip1, node->bits, bits, node->cidr); +- printk(fmt_declaration, ip1, node->cidr, style, color); ++ wg_allowedips_read_node(node, ip1, &cidr1); ++ printk(fmt_declaration, ip1, cidr1, style, color); + if (node->bit[0]) { +- swap_endian_and_apply_cidr(ip2, +- rcu_dereference_raw(node->bit[0])->bits, bits, +- node->cidr); +- printk(fmt_connection, ip1, node->cidr, ip2, +- rcu_dereference_raw(node->bit[0])->cidr); +- print_node(rcu_dereference_raw(node->bit[0]), bits); ++ wg_allowedips_read_node(rcu_dereference_raw(node->bit[0]), ip2, &cidr2); ++ printk(fmt_connection, ip1, cidr1, ip2, cidr2); + } + if (node->bit[1]) { +- swap_endian_and_apply_cidr(ip2, +- rcu_dereference_raw(node->bit[1])->bits, +- bits, node->cidr); +- printk(fmt_connection, ip1, node->cidr, ip2, +- rcu_dereference_raw(node->bit[1])->cidr); +- print_node(rcu_dereference_raw(node->bit[1]), bits); ++ wg_allowedips_read_node(rcu_dereference_raw(node->bit[1]), ip2, &cidr2); ++ printk(fmt_connection, ip1, cidr1, ip2, cidr2); + } ++ if (node->bit[0]) ++ print_node(rcu_dereference_raw(node->bit[0]), bits); ++ if (node->bit[1]) ++ print_node(rcu_dereference_raw(node->bit[1]), bits); + } + + static __init void print_tree(struct allowedips_node __rcu *top, u8 bits) +@@ -121,8 +107,8 @@ static __init inline union nf_inet_addr horrible_cidr_to_mask(u8 cidr) + { + union nf_inet_addr mask; + +- memset(&mask, 0x00, 128 / 8); +- memset(&mask, 0xff, cidr / 8); ++ memset(&mask, 0, sizeof(mask)); ++ memset(&mask.all, 0xff, cidr / 8); + if (cidr % 32) + mask.all[cidr / 32] = (__force u32)htonl( + (0xFFFFFFFFUL << (32 - (cidr % 32))) & 0xFFFFFFFFUL); +@@ -149,42 +135,36 @@ horrible_mask_self(struct horrible_allowedips_node *node) + } + + static __init inline bool +-horrible_match_v4(const struct horrible_allowedips_node *node, +- struct in_addr *ip) ++horrible_match_v4(const struct horrible_allowedips_node *node, struct in_addr *ip) + { + return (ip->s_addr & node->mask.ip) == node->ip.ip; + } + + static __init inline bool +-horrible_match_v6(const struct horrible_allowedips_node *node, +- struct in6_addr *ip) ++horrible_match_v6(const struct horrible_allowedips_node *node, struct in6_addr *ip) + { +- return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == +- node->ip.ip6[0] && +- (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == +- node->ip.ip6[1] && +- (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == +- node->ip.ip6[2] && ++ return (ip->in6_u.u6_addr32[0] & node->mask.ip6[0]) == node->ip.ip6[0] && ++ (ip->in6_u.u6_addr32[1] & node->mask.ip6[1]) == node->ip.ip6[1] && ++ (ip->in6_u.u6_addr32[2] & node->mask.ip6[2]) == node->ip.ip6[2] && + (ip->in6_u.u6_addr32[3] & node->mask.ip6[3]) == node->ip.ip6[3]; + } + + static __init void +-horrible_insert_ordered(struct horrible_allowedips *table, +- struct horrible_allowedips_node *node) ++horrible_insert_ordered(struct horrible_allowedips *table, struct horrible_allowedips_node *node) + { + struct horrible_allowedips_node *other = NULL, *where = NULL; + u8 my_cidr = horrible_mask_to_cidr(node->mask); + + hlist_for_each_entry(other, &table->head, table) { +- if (!memcmp(&other->mask, &node->mask, +- sizeof(union nf_inet_addr)) && +- !memcmp(&other->ip, &node->ip, +- sizeof(union nf_inet_addr)) && +- other->ip_version == node->ip_version) { ++ if (other->ip_version == node->ip_version && ++ !memcmp(&other->mask, &node->mask, sizeof(union nf_inet_addr)) && ++ !memcmp(&other->ip, &node->ip, sizeof(union nf_inet_addr))) { + other->value = node->value; + kfree(node); + return; + } ++ } ++ hlist_for_each_entry(other, &table->head, table) { + where = other; + if (horrible_mask_to_cidr(other->mask) <= my_cidr) + break; +@@ -201,8 +181,7 @@ static __init int + horrible_allowedips_insert_v4(struct horrible_allowedips *table, + struct in_addr *ip, u8 cidr, void *value) + { +- struct horrible_allowedips_node *node = kzalloc(sizeof(*node), +- GFP_KERNEL); ++ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL); + + if (unlikely(!node)) + return -ENOMEM; +@@ -219,8 +198,7 @@ static __init int + horrible_allowedips_insert_v6(struct horrible_allowedips *table, + struct in6_addr *ip, u8 cidr, void *value) + { +- struct horrible_allowedips_node *node = kzalloc(sizeof(*node), +- GFP_KERNEL); ++ struct horrible_allowedips_node *node = kzalloc(sizeof(*node), GFP_KERNEL); + + if (unlikely(!node)) + return -ENOMEM; +@@ -234,39 +212,43 @@ horrible_allowedips_insert_v6(struct horrible_allowedips *table, + } + + static __init void * +-horrible_allowedips_lookup_v4(struct horrible_allowedips *table, +- struct in_addr *ip) ++horrible_allowedips_lookup_v4(struct horrible_allowedips *table, struct in_addr *ip) + { + struct horrible_allowedips_node *node; +- void *ret = NULL; + + hlist_for_each_entry(node, &table->head, table) { +- if (node->ip_version != 4) +- continue; +- if (horrible_match_v4(node, ip)) { +- ret = node->value; +- break; +- } ++ if (node->ip_version == 4 && horrible_match_v4(node, ip)) ++ return node->value; + } +- return ret; ++ return NULL; + } + + static __init void * +-horrible_allowedips_lookup_v6(struct horrible_allowedips *table, +- struct in6_addr *ip) ++horrible_allowedips_lookup_v6(struct horrible_allowedips *table, struct in6_addr *ip) + { + struct horrible_allowedips_node *node; +- void *ret = NULL; + + hlist_for_each_entry(node, &table->head, table) { +- if (node->ip_version != 6) ++ if (node->ip_version == 6 && horrible_match_v6(node, ip)) ++ return node->value; ++ } ++ return NULL; ++} ++ ++ ++static __init void ++horrible_allowedips_remove_by_value(struct horrible_allowedips *table, void *value) ++{ ++ struct horrible_allowedips_node *node; ++ struct hlist_node *h; ++ ++ hlist_for_each_entry_safe(node, h, &table->head, table) { ++ if (node->value != value) + continue; +- if (horrible_match_v6(node, ip)) { +- ret = node->value; +- break; +- } ++ hlist_del(&node->table); ++ kfree(node); + } +- return ret; ++ + } + + static __init bool randomized_test(void) +@@ -296,6 +278,7 @@ static __init bool randomized_test(void) + goto free; + } + kref_init(&peers[i]->refcount); ++ INIT_LIST_HEAD(&peers[i]->allowedips_list); + } + + mutex_lock(&mutex); +@@ -333,7 +316,7 @@ static __init bool randomized_test(void) + if (wg_allowedips_insert_v4(&t, + (struct in_addr *)mutated, + cidr, peer, &mutex) < 0) { +- pr_err("allowedips random malloc: FAIL\n"); ++ pr_err("allowedips random self-test malloc: FAIL\n"); + goto free_locked; + } + if (horrible_allowedips_insert_v4(&h, +@@ -396,23 +379,33 @@ static __init bool randomized_test(void) + print_tree(t.root6, 128); + } + +- for (i = 0; i < NUM_QUERIES; ++i) { +- prandom_bytes(ip, 4); +- if (lookup(t.root4, 32, ip) != +- horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { +- pr_err("allowedips random self-test: FAIL\n"); +- goto free; ++ for (j = 0;; ++j) { ++ for (i = 0; i < NUM_QUERIES; ++i) { ++ prandom_bytes(ip, 4); ++ if (lookup(t.root4, 32, ip) != horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip)) { ++ horrible_allowedips_lookup_v4(&h, (struct in_addr *)ip); ++ pr_err("allowedips random v4 self-test: FAIL\n"); ++ goto free; ++ } ++ prandom_bytes(ip, 16); ++ if (lookup(t.root6, 128, ip) != horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { ++ pr_err("allowedips random v6 self-test: FAIL\n"); ++ goto free; ++ } + } ++ if (j >= NUM_PEERS) ++ break; ++ mutex_lock(&mutex); ++ wg_allowedips_remove_by_peer(&t, peers[j], &mutex); ++ mutex_unlock(&mutex); ++ horrible_allowedips_remove_by_value(&h, peers[j]); + } + +- for (i = 0; i < NUM_QUERIES; ++i) { +- prandom_bytes(ip, 16); +- if (lookup(t.root6, 128, ip) != +- horrible_allowedips_lookup_v6(&h, (struct in6_addr *)ip)) { +- pr_err("allowedips random self-test: FAIL\n"); +- goto free; +- } ++ if (t.root4 || t.root6) { ++ pr_err("allowedips random self-test removal: FAIL\n"); ++ goto free; + } ++ + ret = true; + + free: +diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c +index d9ad850daa793..8c496b7471082 100644 +--- a/drivers/net/wireguard/socket.c ++++ b/drivers/net/wireguard/socket.c +@@ -430,7 +430,7 @@ void wg_socket_reinit(struct wg_device *wg, struct sock *new4, + if (new4) + wg->incoming_port = ntohs(inet_sk(new4)->inet_sport); + mutex_unlock(&wg->socket_update_lock); +- synchronize_rcu(); ++ synchronize_net(); + sock_free(old4); + sock_free(old6); + } +diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +index 02d0aa0b815e9..d2489dc9dc139 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c ++++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +@@ -87,7 +87,7 @@ static const struct ieee80211_ops mt76x0e_ops = { + .reconfig_complete = mt76x02_reconfig_complete, + }; + +-static int mt76x0e_register_device(struct mt76x02_dev *dev) ++static int mt76x0e_init_hardware(struct mt76x02_dev *dev, bool resume) + { + int err; + +@@ -100,9 +100,11 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev) + if (err < 0) + return err; + +- err = mt76x02_dma_init(dev); +- if (err < 0) +- return err; ++ if (!resume) { ++ err = mt76x02_dma_init(dev); ++ if (err < 0) ++ return err; ++ } + + err = mt76x0_init_hardware(dev); + if (err < 0) +@@ -123,6 +125,17 @@ static int mt76x0e_register_device(struct mt76x02_dev *dev) + mt76_clear(dev, 0x110, BIT(9)); + mt76_set(dev, MT_MAX_LEN_CFG, BIT(13)); + ++ return 0; ++} ++ ++static int mt76x0e_register_device(struct mt76x02_dev *dev) ++{ ++ int err; ++ ++ err = mt76x0e_init_hardware(dev, false); ++ if (err < 0) ++ return err; ++ + err = mt76x0_register_device(dev); + if (err < 0) + return err; +@@ -167,6 +180,8 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) + if (ret) + return ret; + ++ mt76_pci_disable_aspm(pdev); ++ + mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt76x0e_ops, + &drv_ops); + if (!mdev) +@@ -220,6 +235,60 @@ mt76x0e_remove(struct pci_dev *pdev) + mt76_free_device(mdev); + } + ++#ifdef CONFIG_PM ++static int mt76x0e_suspend(struct pci_dev *pdev, pm_message_t state) ++{ ++ struct mt76_dev *mdev = pci_get_drvdata(pdev); ++ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); ++ int i; ++ ++ mt76_worker_disable(&mdev->tx_worker); ++ for (i = 0; i < ARRAY_SIZE(mdev->phy.q_tx); i++) ++ mt76_queue_tx_cleanup(dev, mdev->phy.q_tx[i], true); ++ for (i = 0; i < ARRAY_SIZE(mdev->q_mcu); i++) ++ mt76_queue_tx_cleanup(dev, mdev->q_mcu[i], true); ++ napi_disable(&mdev->tx_napi); ++ ++ mt76_for_each_q_rx(mdev, i) ++ napi_disable(&mdev->napi[i]); ++ ++ mt76x02_dma_disable(dev); ++ mt76x02_mcu_cleanup(dev); ++ mt76x0_chip_onoff(dev, false, false); ++ ++ pci_enable_wake(pdev, pci_choose_state(pdev, state), true); ++ pci_save_state(pdev); ++ ++ return pci_set_power_state(pdev, pci_choose_state(pdev, state)); ++} ++ ++static int mt76x0e_resume(struct pci_dev *pdev) ++{ ++ struct mt76_dev *mdev = pci_get_drvdata(pdev); ++ struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); ++ int err, i; ++ ++ err = pci_set_power_state(pdev, PCI_D0); ++ if (err) ++ return err; ++ ++ pci_restore_state(pdev); ++ ++ mt76_worker_enable(&mdev->tx_worker); ++ ++ mt76_for_each_q_rx(mdev, i) { ++ mt76_queue_rx_reset(dev, i); ++ napi_enable(&mdev->napi[i]); ++ napi_schedule(&mdev->napi[i]); ++ } ++ ++ napi_enable(&mdev->tx_napi); ++ napi_schedule(&mdev->tx_napi); ++ ++ return mt76x0e_init_hardware(dev, true); ++} ++#endif /* CONFIG_PM */ ++ + static const struct pci_device_id mt76x0e_device_table[] = { + { PCI_DEVICE(0x14c3, 0x7610) }, + { PCI_DEVICE(0x14c3, 0x7630) }, +@@ -237,6 +306,10 @@ static struct pci_driver mt76x0e_driver = { + .id_table = mt76x0e_device_table, + .probe = mt76x0e_probe, + .remove = mt76x0e_remove, ++#ifdef CONFIG_PM ++ .suspend = mt76x0e_suspend, ++ .resume = mt76x0e_resume, ++#endif /* CONFIG_PM */ + }; + + module_pci_driver(mt76x0e_driver); +diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +index 62afbad77596b..be88c9f5637a5 100644 +--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c ++++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +@@ -391,29 +391,37 @@ static void + mt7921_mcu_tx_rate_report(struct mt7921_dev *dev, struct sk_buff *skb, + u16 wlan_idx) + { +- struct mt7921_mcu_wlan_info_event *wtbl_info = +- (struct mt7921_mcu_wlan_info_event *)(skb->data); +- struct rate_info rate = {}; +- u8 curr_idx = wtbl_info->rate_info.rate_idx; +- u16 curr = le16_to_cpu(wtbl_info->rate_info.rate[curr_idx]); +- struct mt7921_mcu_peer_cap peer = wtbl_info->peer_cap; ++ struct mt7921_mcu_wlan_info_event *wtbl_info; + struct mt76_phy *mphy = &dev->mphy; + struct mt7921_sta_stats *stats; ++ struct rate_info rate = {}; + struct mt7921_sta *msta; + struct mt76_wcid *wcid; ++ u8 idx; + + if (wlan_idx >= MT76_N_WCIDS) + return; ++ ++ wtbl_info = (struct mt7921_mcu_wlan_info_event *)skb->data; ++ idx = wtbl_info->rate_info.rate_idx; ++ if (idx >= ARRAY_SIZE(wtbl_info->rate_info.rate)) ++ return; ++ ++ rcu_read_lock(); ++ + wcid = rcu_dereference(dev->mt76.wcid[wlan_idx]); + if (!wcid) +- return; ++ goto out; + + msta = container_of(wcid, struct mt7921_sta, wcid); + stats = &msta->stats; + + /* current rate */ +- mt7921_mcu_tx_rate_parse(mphy, &peer, &rate, curr); ++ mt7921_mcu_tx_rate_parse(mphy, &wtbl_info->peer_cap, &rate, ++ le16_to_cpu(wtbl_info->rate_info.rate[idx])); + stats->tx_rate = rate; ++out: ++ rcu_read_unlock(); + } + + static void +diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c +index 193b723fe3bd7..c58996c1e2309 100644 +--- a/drivers/net/xen-netback/interface.c ++++ b/drivers/net/xen-netback/interface.c +@@ -684,6 +684,7 @@ static void xenvif_disconnect_queue(struct xenvif_queue *queue) + { + if (queue->task) { + kthread_stop(queue->task); ++ put_task_struct(queue->task); + queue->task = NULL; + } + +@@ -745,6 +746,11 @@ int xenvif_connect_data(struct xenvif_queue *queue, + if (IS_ERR(task)) + goto kthread_err; + queue->task = task; ++ /* ++ * Take a reference to the task in order to prevent it from being freed ++ * if the thread function returns before kthread_stop is called. ++ */ ++ get_task_struct(task); + + task = kthread_run(xenvif_dealloc_kthread, queue, + "%s-dealloc", queue->name); +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c +index be905d4fdb47f..ce8b3ce7582be 100644 +--- a/drivers/nvme/host/rdma.c ++++ b/drivers/nvme/host/rdma.c +@@ -1319,16 +1319,17 @@ static int nvme_rdma_map_sg_inline(struct nvme_rdma_queue *queue, + int count) + { + struct nvme_sgl_desc *sg = &c->common.dptr.sgl; +- struct scatterlist *sgl = req->data_sgl.sg_table.sgl; + struct ib_sge *sge = &req->sge[1]; ++ struct scatterlist *sgl; + u32 len = 0; + int i; + +- for (i = 0; i < count; i++, sgl++, sge++) { ++ for_each_sg(req->data_sgl.sg_table.sgl, sgl, count, i) { + sge->addr = sg_dma_address(sgl); + sge->length = sg_dma_len(sgl); + sge->lkey = queue->device->pd->local_dma_lkey; + len += sge->length; ++ sge++; + } + + sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); +diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c +index 348057fdc568f..7d16cb4cd8acf 100644 +--- a/drivers/nvme/target/core.c ++++ b/drivers/nvme/target/core.c +@@ -999,19 +999,23 @@ static unsigned int nvmet_data_transfer_len(struct nvmet_req *req) + return req->transfer_len - req->metadata_len; + } + +-static int nvmet_req_alloc_p2pmem_sgls(struct nvmet_req *req) ++static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev, ++ struct nvmet_req *req) + { +- req->sg = pci_p2pmem_alloc_sgl(req->p2p_dev, &req->sg_cnt, ++ req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt, + nvmet_data_transfer_len(req)); + if (!req->sg) + goto out_err; + + if (req->metadata_len) { +- req->metadata_sg = pci_p2pmem_alloc_sgl(req->p2p_dev, ++ req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev, + &req->metadata_sg_cnt, req->metadata_len); + if (!req->metadata_sg) + goto out_free_sg; + } ++ ++ req->p2p_dev = p2p_dev; ++ + return 0; + out_free_sg: + pci_p2pmem_free_sgl(req->p2p_dev, req->sg); +@@ -1019,25 +1023,19 @@ out_err: + return -ENOMEM; + } + +-static bool nvmet_req_find_p2p_dev(struct nvmet_req *req) ++static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req) + { +- if (!IS_ENABLED(CONFIG_PCI_P2PDMA)) +- return false; +- +- if (req->sq->ctrl && req->sq->qid && req->ns) { +- req->p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, +- req->ns->nsid); +- if (req->p2p_dev) +- return true; +- } +- +- req->p2p_dev = NULL; +- return false; ++ if (!IS_ENABLED(CONFIG_PCI_P2PDMA) || ++ !req->sq->ctrl || !req->sq->qid || !req->ns) ++ return NULL; ++ return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid); + } + + int nvmet_req_alloc_sgls(struct nvmet_req *req) + { +- if (nvmet_req_find_p2p_dev(req) && !nvmet_req_alloc_p2pmem_sgls(req)) ++ struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req); ++ ++ if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req)) + return 0; + + req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL, +@@ -1066,6 +1064,7 @@ void nvmet_req_free_sgls(struct nvmet_req *req) + pci_p2pmem_free_sgl(req->p2p_dev, req->sg); + if (req->metadata_sg) + pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg); ++ req->p2p_dev = NULL; + } else { + sgl_free(req->sg); + if (req->metadata_sg) +diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c +index 920cf329268b5..f8a5a4eb5bcef 100644 +--- a/drivers/scsi/lpfc/lpfc_sli.c ++++ b/drivers/scsi/lpfc/lpfc_sli.c +@@ -20591,10 +20591,8 @@ lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + abtswqe = &abtsiocb->wqe; + memset(abtswqe, 0, sizeof(*abtswqe)); + +- if (lpfc_is_link_up(phba)) ++ if (!lpfc_is_link_up(phba)) + bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1); +- else +- bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 0); + bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG); + abtswqe->abort_cmd.rsrvd5 = 0; + abtswqe->abort_cmd.wqe_com.abort_tag = xritag; +diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c +index 7a77e375b503c..6b52f0c526baa 100644 +--- a/drivers/tee/optee/call.c ++++ b/drivers/tee/optee/call.c +@@ -216,6 +216,7 @@ int optee_open_session(struct tee_context *ctx, + struct optee_msg_arg *msg_arg; + phys_addr_t msg_parg; + struct optee_session *sess = NULL; ++ uuid_t client_uuid; + + /* +2 for the meta parameters added below */ + shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg); +@@ -236,10 +237,11 @@ int optee_open_session(struct tee_context *ctx, + memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid)); + msg_arg->params[1].u.value.c = arg->clnt_login; + +- rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value, +- arg->clnt_login, arg->clnt_uuid); ++ rc = tee_session_calc_client_uuid(&client_uuid, arg->clnt_login, ++ arg->clnt_uuid); + if (rc) + goto out; ++ export_uuid(msg_arg->params[1].u.octets, &client_uuid); + + rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param); + if (rc) +diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h +index 81ff593ac4ec2..e3d72d09c4848 100644 +--- a/drivers/tee/optee/optee_msg.h ++++ b/drivers/tee/optee/optee_msg.h +@@ -9,7 +9,7 @@ + #include <linux/types.h> + + /* +- * This file defines the OP-TEE message protocol used to communicate ++ * This file defines the OP-TEE message protocol (ABI) used to communicate + * with an instance of OP-TEE running in secure world. + * + * This file is divided into two sections. +@@ -144,9 +144,10 @@ struct optee_msg_param_value { + * @tmem: parameter by temporary memory reference + * @rmem: parameter by registered memory reference + * @value: parameter by opaque value ++ * @octets: parameter by octet string + * + * @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in +- * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value, ++ * the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value or octets, + * OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and + * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem, + * OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used. +@@ -157,6 +158,7 @@ struct optee_msg_param { + struct optee_msg_param_tmem tmem; + struct optee_msg_param_rmem rmem; + struct optee_msg_param_value value; ++ u8 octets[24]; + } u; + }; + +diff --git a/drivers/thermal/intel/therm_throt.c b/drivers/thermal/intel/therm_throt.c +index f8e882592ba5d..99abdc03c44ce 100644 +--- a/drivers/thermal/intel/therm_throt.c ++++ b/drivers/thermal/intel/therm_throt.c +@@ -621,6 +621,17 @@ bool x86_thermal_enabled(void) + return atomic_read(&therm_throt_en); + } + ++void __init therm_lvt_init(void) ++{ ++ /* ++ * This function is only called on boot CPU. Save the init thermal ++ * LVT value on BSP and use that value to restore APs' thermal LVT ++ * entry BIOS programmed later ++ */ ++ if (intel_thermal_supported(&boot_cpu_data)) ++ lvtthmr_init = apic_read(APIC_LVTTHMR); ++} ++ + void intel_init_thermal(struct cpuinfo_x86 *c) + { + unsigned int cpu = smp_processor_id(); +@@ -630,10 +641,6 @@ void intel_init_thermal(struct cpuinfo_x86 *c) + if (!intel_thermal_supported(c)) + return; + +- /* On the BSP? */ +- if (c == &boot_cpu_data) +- lvtthmr_init = apic_read(APIC_LVTTHMR); +- + /* + * First check if its enabled already, in which case there might + * be some SMM goo which handles it, so we can't even put a handler +diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c +index 99dfa884cbefb..68c6535bbf7f0 100644 +--- a/drivers/tty/serial/stm32-usart.c ++++ b/drivers/tty/serial/stm32-usart.c +@@ -214,14 +214,11 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded) + struct tty_port *tport = &port->state->port; + struct stm32_port *stm32_port = to_stm32_port(port); + const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs; +- unsigned long c, flags; ++ unsigned long c; + u32 sr; + char flag; + +- if (threaded) +- spin_lock_irqsave(&port->lock, flags); +- else +- spin_lock(&port->lock); ++ spin_lock(&port->lock); + + while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res, + threaded)) { +@@ -278,10 +275,7 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded) + uart_insert_char(port, sr, USART_SR_ORE, c, flag); + } + +- if (threaded) +- spin_unlock_irqrestore(&port->lock, flags); +- else +- spin_unlock(&port->lock); ++ spin_unlock(&port->lock); + + tty_flip_buffer_push(tport); + } +@@ -654,7 +648,8 @@ static int stm32_usart_startup(struct uart_port *port) + + ret = request_threaded_irq(port->irq, stm32_usart_interrupt, + stm32_usart_threaded_interrupt, +- IRQF_NO_SUSPEND, name, port); ++ IRQF_ONESHOT | IRQF_NO_SUSPEND, ++ name, port); + if (ret) + return ret; + +@@ -1136,6 +1131,13 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port, + struct dma_async_tx_descriptor *desc = NULL; + int ret; + ++ /* ++ * Using DMA and threaded handler for the console could lead to ++ * deadlocks. ++ */ ++ if (uart_console(port)) ++ return -ENODEV; ++ + /* Request DMA RX channel */ + stm32port->rx_ch = dma_request_slave_channel(dev, "rx"); + if (!stm32port->rx_ch) { +diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c +index 510fd0572feb1..e3f429f1575e9 100644 +--- a/drivers/usb/dwc2/core_intr.c ++++ b/drivers/usb/dwc2/core_intr.c +@@ -707,7 +707,11 @@ static inline void dwc_handle_gpwrdn_disc_det(struct dwc2_hsotg *hsotg, + dwc2_writel(hsotg, gpwrdn_tmp, GPWRDN); + + hsotg->hibernated = 0; ++ ++#if IS_ENABLED(CONFIG_USB_DWC2_HOST) || \ ++ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) + hsotg->bus_suspended = 0; ++#endif + + if (gpwrdn & GPWRDN_IDSTS) { + hsotg->op_state = OTG_STATE_B_PERIPHERAL; +diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig +index 4abddbebd4b23..c691127bc805a 100644 +--- a/drivers/vfio/pci/Kconfig ++++ b/drivers/vfio/pci/Kconfig +@@ -2,6 +2,7 @@ + config VFIO_PCI + tristate "VFIO support for PCI devices" + depends on VFIO && PCI && EVENTFD ++ depends on MMU + select VFIO_VIRQFD + select IRQ_BYPASS_MANAGER + help +diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c +index a402adee8a215..47f21a6ca7fe9 100644 +--- a/drivers/vfio/pci/vfio_pci_config.c ++++ b/drivers/vfio/pci/vfio_pci_config.c +@@ -1581,7 +1581,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev) + if (len == 0xFF) { + len = vfio_ext_cap_len(vdev, ecap, epos); + if (len < 0) +- return ret; ++ return len; + } + } + +diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c +index fb4b385191f28..e83a7cd15c956 100644 +--- a/drivers/vfio/platform/vfio_platform_common.c ++++ b/drivers/vfio/platform/vfio_platform_common.c +@@ -289,7 +289,7 @@ err_irq: + vfio_platform_regions_cleanup(vdev); + err_reg: + mutex_unlock(&driver_lock); +- module_put(THIS_MODULE); ++ module_put(vdev->parent_module); + return ret; + } + +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index 5b82050b871a7..27c3680074814 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -1868,7 +1868,7 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans, + trace_run_delayed_ref_head(fs_info, head, 0); + btrfs_delayed_ref_unlock(head); + btrfs_put_delayed_ref_head(head); +- return 0; ++ return ret; + } + + static struct btrfs_delayed_ref_head *btrfs_obtain_ref_head( +diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c +index 47cd3a6dc6351..eed75bb0fedbf 100644 +--- a/fs/btrfs/file-item.c ++++ b/fs/btrfs/file-item.c +@@ -787,7 +787,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, + u64 end_byte = bytenr + len; + u64 csum_end; + struct extent_buffer *leaf; +- int ret; ++ int ret = 0; + const u32 csum_size = fs_info->csum_size; + u32 blocksize_bits = fs_info->sectorsize_bits; + +@@ -805,6 +805,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, + + ret = btrfs_search_slot(trans, root, &key, path, -1, 1); + if (ret > 0) { ++ ret = 0; + if (path->slots[0] == 0) + break; + path->slots[0]--; +@@ -861,7 +862,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, + ret = btrfs_del_items(trans, root, path, + path->slots[0], del_nr); + if (ret) +- goto out; ++ break; + if (key.offset == bytenr) + break; + } else if (key.offset < bytenr && csum_end > end_byte) { +@@ -905,8 +906,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, + ret = btrfs_split_item(trans, root, path, &key, offset); + if (ret && ret != -EAGAIN) { + btrfs_abort_transaction(trans, ret); +- goto out; ++ break; + } ++ ret = 0; + + key.offset = end_byte - 1; + } else { +@@ -916,12 +918,41 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, + } + btrfs_release_path(path); + } +- ret = 0; +-out: + btrfs_free_path(path); + return ret; + } + ++static int find_next_csum_offset(struct btrfs_root *root, ++ struct btrfs_path *path, ++ u64 *next_offset) ++{ ++ const u32 nritems = btrfs_header_nritems(path->nodes[0]); ++ struct btrfs_key found_key; ++ int slot = path->slots[0] + 1; ++ int ret; ++ ++ if (nritems == 0 || slot >= nritems) { ++ ret = btrfs_next_leaf(root, path); ++ if (ret < 0) { ++ return ret; ++ } else if (ret > 0) { ++ *next_offset = (u64)-1; ++ return 0; ++ } ++ slot = path->slots[0]; ++ } ++ ++ btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); ++ ++ if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || ++ found_key.type != BTRFS_EXTENT_CSUM_KEY) ++ *next_offset = (u64)-1; ++ else ++ *next_offset = found_key.offset; ++ ++ return 0; ++} ++ + int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct btrfs_ordered_sum *sums) +@@ -937,7 +968,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, + u64 total_bytes = 0; + u64 csum_offset; + u64 bytenr; +- u32 nritems; + u32 ins_size; + int index = 0; + int found_next; +@@ -980,26 +1010,10 @@ again: + goto insert; + } + } else { +- int slot = path->slots[0] + 1; +- /* we didn't find a csum item, insert one */ +- nritems = btrfs_header_nritems(path->nodes[0]); +- if (!nritems || (path->slots[0] >= nritems - 1)) { +- ret = btrfs_next_leaf(root, path); +- if (ret < 0) { +- goto out; +- } else if (ret > 0) { +- found_next = 1; +- goto insert; +- } +- slot = path->slots[0]; +- } +- btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); +- if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || +- found_key.type != BTRFS_EXTENT_CSUM_KEY) { +- found_next = 1; +- goto insert; +- } +- next_offset = found_key.offset; ++ /* We didn't find a csum item, insert one. */ ++ ret = find_next_csum_offset(root, path, &next_offset); ++ if (ret < 0) ++ goto out; + found_next = 1; + goto insert; + } +@@ -1055,8 +1069,48 @@ extend_csum: + tmp = sums->len - total_bytes; + tmp >>= fs_info->sectorsize_bits; + WARN_ON(tmp < 1); ++ extend_nr = max_t(int, 1, tmp); ++ ++ /* ++ * A log tree can already have checksum items with a subset of ++ * the checksums we are trying to log. This can happen after ++ * doing a sequence of partial writes into prealloc extents and ++ * fsyncs in between, with a full fsync logging a larger subrange ++ * of an extent for which a previous fast fsync logged a smaller ++ * subrange. And this happens in particular due to merging file ++ * extent items when we complete an ordered extent for a range ++ * covered by a prealloc extent - this is done at ++ * btrfs_mark_extent_written(). ++ * ++ * So if we try to extend the previous checksum item, which has ++ * a range that ends at the start of the range we want to insert, ++ * make sure we don't extend beyond the start offset of the next ++ * checksum item. If we are at the last item in the leaf, then ++ * forget the optimization of extending and add a new checksum ++ * item - it is not worth the complexity of releasing the path, ++ * getting the first key for the next leaf, repeat the btree ++ * search, etc, because log trees are temporary anyway and it ++ * would only save a few bytes of leaf space. ++ */ ++ if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) { ++ if (path->slots[0] + 1 >= ++ btrfs_header_nritems(path->nodes[0])) { ++ ret = find_next_csum_offset(root, path, &next_offset); ++ if (ret < 0) ++ goto out; ++ found_next = 1; ++ goto insert; ++ } ++ ++ ret = find_next_csum_offset(root, path, &next_offset); ++ if (ret < 0) ++ goto out; ++ ++ tmp = (next_offset - bytenr) >> fs_info->sectorsize_bits; ++ if (tmp <= INT_MAX) ++ extend_nr = min_t(int, extend_nr, tmp); ++ } + +- extend_nr = max_t(int, 1, (int)tmp); + diff = (csum_offset + extend_nr) * csum_size; + diff = min(diff, + MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 81b93c9c659b7..3bb8ce4969f31 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -3011,6 +3011,18 @@ out: + if (ret || truncated) { + u64 unwritten_start = start; + ++ /* ++ * If we failed to finish this ordered extent for any reason we ++ * need to make sure BTRFS_ORDERED_IOERR is set on the ordered ++ * extent, and mark the inode with the error if it wasn't ++ * already set. Any error during writeback would have already ++ * set the mapping error, so we need to set it if we're the ones ++ * marking this ordered extent as failed. ++ */ ++ if (ret && !test_and_set_bit(BTRFS_ORDERED_IOERR, ++ &ordered_extent->flags)) ++ mapping_set_error(ordered_extent->inode->i_mapping, -EIO); ++ + if (truncated) + unwritten_start += logical_len; + clear_extent_uptodate(io_tree, unwritten_start, end, NULL); +@@ -9076,6 +9088,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, + int ret2; + bool root_log_pinned = false; + bool dest_log_pinned = false; ++ bool need_abort = false; + + /* we only allow rename subvolume link between subvolumes */ + if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) +@@ -9132,6 +9145,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, + old_idx); + if (ret) + goto out_fail; ++ need_abort = true; + } + + /* And now for the dest. */ +@@ -9147,8 +9161,11 @@ static int btrfs_rename_exchange(struct inode *old_dir, + new_ino, + btrfs_ino(BTRFS_I(old_dir)), + new_idx); +- if (ret) ++ if (ret) { ++ if (need_abort) ++ btrfs_abort_transaction(trans, ret); + goto out_fail; ++ } + } + + /* Update inode version and ctime/mtime. */ +diff --git a/fs/btrfs/reflink.c b/fs/btrfs/reflink.c +index 53ee17f5e382c..238e713635d79 100644 +--- a/fs/btrfs/reflink.c ++++ b/fs/btrfs/reflink.c +@@ -207,10 +207,7 @@ static int clone_copy_inline_extent(struct inode *dst, + * inline extent's data to the page. + */ + ASSERT(key.offset > 0); +- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, +- inline_data, size, datal, +- comp_type); +- goto out; ++ goto copy_to_page; + } + } else if (i_size_read(dst) <= datal) { + struct btrfs_file_extent_item *ei; +@@ -226,13 +223,10 @@ static int clone_copy_inline_extent(struct inode *dst, + BTRFS_FILE_EXTENT_INLINE) + goto copy_inline_extent; + +- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, +- inline_data, size, datal, comp_type); +- goto out; ++ goto copy_to_page; + } + + copy_inline_extent: +- ret = 0; + /* + * We have no extent items, or we have an extent at offset 0 which may + * or may not be inlined. All these cases are dealt the same way. +@@ -244,11 +238,13 @@ copy_inline_extent: + * clone. Deal with all these cases by copying the inline extent + * data into the respective page at the destination inode. + */ +- ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, +- inline_data, size, datal, comp_type); +- goto out; ++ goto copy_to_page; + } + ++ /* ++ * Release path before starting a new transaction so we don't hold locks ++ * that would confuse lockdep. ++ */ + btrfs_release_path(path); + /* + * If we end up here it means were copy the inline extent into a leaf +@@ -285,11 +281,6 @@ copy_inline_extent: + ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end); + out: + if (!ret && !trans) { +- /* +- * Release path before starting a new transaction so we don't +- * hold locks that would confuse lockdep. +- */ +- btrfs_release_path(path); + /* + * No transaction here means we copied the inline extent into a + * page of the destination inode. +@@ -310,6 +301,21 @@ out: + *trans_out = trans; + + return ret; ++ ++copy_to_page: ++ /* ++ * Release our path because we don't need it anymore and also because ++ * copy_inline_to_page() needs to reserve data and metadata, which may ++ * need to flush delalloc when we are low on available space and ++ * therefore cause a deadlock if writeback of an inline extent needs to ++ * write to the same leaf or an ordered extent completion needs to write ++ * to the same leaf. ++ */ ++ btrfs_release_path(path); ++ ++ ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, ++ inline_data, size, datal, comp_type); ++ goto out; + } + + /** +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index d7f1599e69b1f..faae6ebd8a279 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -1574,7 +1574,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, + if (ret) + goto out; + +- btrfs_update_inode(trans, root, BTRFS_I(inode)); ++ ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); ++ if (ret) ++ goto out; + } + + ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen; +@@ -1749,7 +1751,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, + + if (nlink != inode->i_nlink) { + set_nlink(inode, nlink); +- btrfs_update_inode(trans, root, BTRFS_I(inode)); ++ ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); ++ if (ret) ++ goto out; + } + BTRFS_I(inode)->index_cnt = (u64)-1; + +@@ -1787,6 +1791,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, + break; + + if (ret == 1) { ++ ret = 0; + if (path->slots[0] == 0) + break; + path->slots[0]--; +@@ -1799,17 +1804,19 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, + + ret = btrfs_del_item(trans, root, path); + if (ret) +- goto out; ++ break; + + btrfs_release_path(path); + inode = read_one_inode(root, key.offset); +- if (!inode) +- return -EIO; ++ if (!inode) { ++ ret = -EIO; ++ break; ++ } + + ret = fixup_inode_link_count(trans, root, inode); + iput(inode); + if (ret) +- goto out; ++ break; + + /* + * fixup on a directory may create new entries, +@@ -1818,8 +1825,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, + */ + key.offset = (u64)-1; + } +- ret = 0; +-out: + btrfs_release_path(path); + return ret; + } +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index 77c84d6f1af6b..cbf37b2cf871e 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -3206,7 +3206,10 @@ static int ext4_split_extent_at(handle_t *handle, + ext4_ext_mark_unwritten(ex2); + + err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); +- if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { ++ if (err != -ENOSPC && err != -EDQUOT) ++ goto out; ++ ++ if (EXT4_EXT_MAY_ZEROOUT & split_flag) { + if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { + if (split_flag & EXT4_EXT_DATA_VALID1) { + err = ext4_ext_zeroout(inode, ex2); +@@ -3232,25 +3235,22 @@ static int ext4_split_extent_at(handle_t *handle, + ext4_ext_pblock(&orig_ex)); + } + +- if (err) +- goto fix_extent_len; +- /* update the extent length and mark as initialized */ +- ex->ee_len = cpu_to_le16(ee_len); +- ext4_ext_try_to_merge(handle, inode, path, ex); +- err = ext4_ext_dirty(handle, inode, path + path->p_depth); +- if (err) +- goto fix_extent_len; +- +- /* update extent status tree */ +- err = ext4_zeroout_es(inode, &zero_ex); +- +- goto out; +- } else if (err) +- goto fix_extent_len; +- +-out: +- ext4_ext_show_leaf(inode, path); +- return err; ++ if (!err) { ++ /* update the extent length and mark as initialized */ ++ ex->ee_len = cpu_to_le16(ee_len); ++ ext4_ext_try_to_merge(handle, inode, path, ex); ++ err = ext4_ext_dirty(handle, inode, path + path->p_depth); ++ if (!err) ++ /* update extent status tree */ ++ err = ext4_zeroout_es(inode, &zero_ex); ++ /* If we failed at this point, we don't know in which ++ * state the extent tree exactly is so don't try to fix ++ * length of the original extent as it may do even more ++ * damage. ++ */ ++ goto out; ++ } ++ } + + fix_extent_len: + ex->ee_len = orig_ex.ee_len; +@@ -3260,6 +3260,9 @@ fix_extent_len: + */ + ext4_ext_dirty(handle, inode, path + path->p_depth); + return err; ++out: ++ ext4_ext_show_leaf(inode, path); ++ return err; + } + + /* +diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c +index eda14f630def4..c1c962b118012 100644 +--- a/fs/ext4/fast_commit.c ++++ b/fs/ext4/fast_commit.c +@@ -1288,28 +1288,29 @@ struct dentry_info_args { + }; + + static inline void tl_to_darg(struct dentry_info_args *darg, +- struct ext4_fc_tl *tl) ++ struct ext4_fc_tl *tl, u8 *val) + { +- struct ext4_fc_dentry_info *fcd; ++ struct ext4_fc_dentry_info fcd; + +- fcd = (struct ext4_fc_dentry_info *)ext4_fc_tag_val(tl); ++ memcpy(&fcd, val, sizeof(fcd)); + +- darg->parent_ino = le32_to_cpu(fcd->fc_parent_ino); +- darg->ino = le32_to_cpu(fcd->fc_ino); +- darg->dname = fcd->fc_dname; +- darg->dname_len = ext4_fc_tag_len(tl) - +- sizeof(struct ext4_fc_dentry_info); ++ darg->parent_ino = le32_to_cpu(fcd.fc_parent_ino); ++ darg->ino = le32_to_cpu(fcd.fc_ino); ++ darg->dname = val + offsetof(struct ext4_fc_dentry_info, fc_dname); ++ darg->dname_len = le16_to_cpu(tl->fc_len) - ++ sizeof(struct ext4_fc_dentry_info); + } + + /* Unlink replay function */ +-static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl) ++static int ext4_fc_replay_unlink(struct super_block *sb, struct ext4_fc_tl *tl, ++ u8 *val) + { + struct inode *inode, *old_parent; + struct qstr entry; + struct dentry_info_args darg; + int ret = 0; + +- tl_to_darg(&darg, tl); ++ tl_to_darg(&darg, tl, val); + + trace_ext4_fc_replay(sb, EXT4_FC_TAG_UNLINK, darg.ino, + darg.parent_ino, darg.dname_len); +@@ -1399,13 +1400,14 @@ out: + } + + /* Link replay function */ +-static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl) ++static int ext4_fc_replay_link(struct super_block *sb, struct ext4_fc_tl *tl, ++ u8 *val) + { + struct inode *inode; + struct dentry_info_args darg; + int ret = 0; + +- tl_to_darg(&darg, tl); ++ tl_to_darg(&darg, tl, val); + trace_ext4_fc_replay(sb, EXT4_FC_TAG_LINK, darg.ino, + darg.parent_ino, darg.dname_len); + +@@ -1450,9 +1452,10 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino) + /* + * Inode replay function + */ +-static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl) ++static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl, ++ u8 *val) + { +- struct ext4_fc_inode *fc_inode; ++ struct ext4_fc_inode fc_inode; + struct ext4_inode *raw_inode; + struct ext4_inode *raw_fc_inode; + struct inode *inode = NULL; +@@ -1460,9 +1463,9 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl) + int inode_len, ino, ret, tag = le16_to_cpu(tl->fc_tag); + struct ext4_extent_header *eh; + +- fc_inode = (struct ext4_fc_inode *)ext4_fc_tag_val(tl); ++ memcpy(&fc_inode, val, sizeof(fc_inode)); + +- ino = le32_to_cpu(fc_inode->fc_ino); ++ ino = le32_to_cpu(fc_inode.fc_ino); + trace_ext4_fc_replay(sb, tag, ino, 0, 0); + + inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL); +@@ -1474,12 +1477,13 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl) + + ext4_fc_record_modified_inode(sb, ino); + +- raw_fc_inode = (struct ext4_inode *)fc_inode->fc_raw_inode; ++ raw_fc_inode = (struct ext4_inode *) ++ (val + offsetof(struct ext4_fc_inode, fc_raw_inode)); + ret = ext4_get_fc_inode_loc(sb, ino, &iloc); + if (ret) + goto out; + +- inode_len = ext4_fc_tag_len(tl) - sizeof(struct ext4_fc_inode); ++ inode_len = le16_to_cpu(tl->fc_len) - sizeof(struct ext4_fc_inode); + raw_inode = ext4_raw_inode(&iloc); + + memcpy(raw_inode, raw_fc_inode, offsetof(struct ext4_inode, i_block)); +@@ -1547,14 +1551,15 @@ out: + * inode for which we are trying to create a dentry here, should already have + * been replayed before we start here. + */ +-static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl) ++static int ext4_fc_replay_create(struct super_block *sb, struct ext4_fc_tl *tl, ++ u8 *val) + { + int ret = 0; + struct inode *inode = NULL; + struct inode *dir = NULL; + struct dentry_info_args darg; + +- tl_to_darg(&darg, tl); ++ tl_to_darg(&darg, tl, val); + + trace_ext4_fc_replay(sb, EXT4_FC_TAG_CREAT, darg.ino, + darg.parent_ino, darg.dname_len); +@@ -1633,9 +1638,9 @@ static int ext4_fc_record_regions(struct super_block *sb, int ino, + + /* Replay add range tag */ + static int ext4_fc_replay_add_range(struct super_block *sb, +- struct ext4_fc_tl *tl) ++ struct ext4_fc_tl *tl, u8 *val) + { +- struct ext4_fc_add_range *fc_add_ex; ++ struct ext4_fc_add_range fc_add_ex; + struct ext4_extent newex, *ex; + struct inode *inode; + ext4_lblk_t start, cur; +@@ -1645,15 +1650,14 @@ static int ext4_fc_replay_add_range(struct super_block *sb, + struct ext4_ext_path *path = NULL; + int ret; + +- fc_add_ex = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl); +- ex = (struct ext4_extent *)&fc_add_ex->fc_ex; ++ memcpy(&fc_add_ex, val, sizeof(fc_add_ex)); ++ ex = (struct ext4_extent *)&fc_add_ex.fc_ex; + + trace_ext4_fc_replay(sb, EXT4_FC_TAG_ADD_RANGE, +- le32_to_cpu(fc_add_ex->fc_ino), le32_to_cpu(ex->ee_block), ++ le32_to_cpu(fc_add_ex.fc_ino), le32_to_cpu(ex->ee_block), + ext4_ext_get_actual_len(ex)); + +- inode = ext4_iget(sb, le32_to_cpu(fc_add_ex->fc_ino), +- EXT4_IGET_NORMAL); ++ inode = ext4_iget(sb, le32_to_cpu(fc_add_ex.fc_ino), EXT4_IGET_NORMAL); + if (IS_ERR(inode)) { + jbd_debug(1, "Inode not found."); + return 0; +@@ -1762,32 +1766,33 @@ next: + + /* Replay DEL_RANGE tag */ + static int +-ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl) ++ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl, ++ u8 *val) + { + struct inode *inode; +- struct ext4_fc_del_range *lrange; ++ struct ext4_fc_del_range lrange; + struct ext4_map_blocks map; + ext4_lblk_t cur, remaining; + int ret; + +- lrange = (struct ext4_fc_del_range *)ext4_fc_tag_val(tl); +- cur = le32_to_cpu(lrange->fc_lblk); +- remaining = le32_to_cpu(lrange->fc_len); ++ memcpy(&lrange, val, sizeof(lrange)); ++ cur = le32_to_cpu(lrange.fc_lblk); ++ remaining = le32_to_cpu(lrange.fc_len); + + trace_ext4_fc_replay(sb, EXT4_FC_TAG_DEL_RANGE, +- le32_to_cpu(lrange->fc_ino), cur, remaining); ++ le32_to_cpu(lrange.fc_ino), cur, remaining); + +- inode = ext4_iget(sb, le32_to_cpu(lrange->fc_ino), EXT4_IGET_NORMAL); ++ inode = ext4_iget(sb, le32_to_cpu(lrange.fc_ino), EXT4_IGET_NORMAL); + if (IS_ERR(inode)) { +- jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange->fc_ino)); ++ jbd_debug(1, "Inode %d not found", le32_to_cpu(lrange.fc_ino)); + return 0; + } + + ret = ext4_fc_record_modified_inode(sb, inode->i_ino); + + jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n", +- inode->i_ino, le32_to_cpu(lrange->fc_lblk), +- le32_to_cpu(lrange->fc_len)); ++ inode->i_ino, le32_to_cpu(lrange.fc_lblk), ++ le32_to_cpu(lrange.fc_len)); + while (remaining > 0) { + map.m_lblk = cur; + map.m_len = remaining; +@@ -1808,8 +1813,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl) + } + + ret = ext4_punch_hole(inode, +- le32_to_cpu(lrange->fc_lblk) << sb->s_blocksize_bits, +- le32_to_cpu(lrange->fc_len) << sb->s_blocksize_bits); ++ le32_to_cpu(lrange.fc_lblk) << sb->s_blocksize_bits, ++ le32_to_cpu(lrange.fc_len) << sb->s_blocksize_bits); + if (ret) + jbd_debug(1, "ext4_punch_hole returned %d", ret); + ext4_ext_replay_shrink_inode(inode, +@@ -1925,11 +1930,11 @@ static int ext4_fc_replay_scan(journal_t *journal, + struct ext4_sb_info *sbi = EXT4_SB(sb); + struct ext4_fc_replay_state *state; + int ret = JBD2_FC_REPLAY_CONTINUE; +- struct ext4_fc_add_range *ext; +- struct ext4_fc_tl *tl; +- struct ext4_fc_tail *tail; +- __u8 *start, *end; +- struct ext4_fc_head *head; ++ struct ext4_fc_add_range ext; ++ struct ext4_fc_tl tl; ++ struct ext4_fc_tail tail; ++ __u8 *start, *end, *cur, *val; ++ struct ext4_fc_head head; + struct ext4_extent *ex; + + state = &sbi->s_fc_replay_state; +@@ -1956,15 +1961,17 @@ static int ext4_fc_replay_scan(journal_t *journal, + } + + state->fc_replay_expected_off++; +- fc_for_each_tl(start, end, tl) { ++ for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) { ++ memcpy(&tl, cur, sizeof(tl)); ++ val = cur + sizeof(tl); + jbd_debug(3, "Scan phase, tag:%s, blk %lld\n", +- tag2str(le16_to_cpu(tl->fc_tag)), bh->b_blocknr); +- switch (le16_to_cpu(tl->fc_tag)) { ++ tag2str(le16_to_cpu(tl.fc_tag)), bh->b_blocknr); ++ switch (le16_to_cpu(tl.fc_tag)) { + case EXT4_FC_TAG_ADD_RANGE: +- ext = (struct ext4_fc_add_range *)ext4_fc_tag_val(tl); +- ex = (struct ext4_extent *)&ext->fc_ex; ++ memcpy(&ext, val, sizeof(ext)); ++ ex = (struct ext4_extent *)&ext.fc_ex; + ret = ext4_fc_record_regions(sb, +- le32_to_cpu(ext->fc_ino), ++ le32_to_cpu(ext.fc_ino), + le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex), + ext4_ext_get_actual_len(ex)); + if (ret < 0) +@@ -1978,18 +1985,18 @@ static int ext4_fc_replay_scan(journal_t *journal, + case EXT4_FC_TAG_INODE: + case EXT4_FC_TAG_PAD: + state->fc_cur_tag++; +- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl, +- sizeof(*tl) + ext4_fc_tag_len(tl)); ++ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur, ++ sizeof(tl) + le16_to_cpu(tl.fc_len)); + break; + case EXT4_FC_TAG_TAIL: + state->fc_cur_tag++; +- tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl); +- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl, +- sizeof(*tl) + ++ memcpy(&tail, val, sizeof(tail)); ++ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur, ++ sizeof(tl) + + offsetof(struct ext4_fc_tail, + fc_crc)); +- if (le32_to_cpu(tail->fc_tid) == expected_tid && +- le32_to_cpu(tail->fc_crc) == state->fc_crc) { ++ if (le32_to_cpu(tail.fc_tid) == expected_tid && ++ le32_to_cpu(tail.fc_crc) == state->fc_crc) { + state->fc_replay_num_tags = state->fc_cur_tag; + state->fc_regions_valid = + state->fc_regions_used; +@@ -2000,19 +2007,19 @@ static int ext4_fc_replay_scan(journal_t *journal, + state->fc_crc = 0; + break; + case EXT4_FC_TAG_HEAD: +- head = (struct ext4_fc_head *)ext4_fc_tag_val(tl); +- if (le32_to_cpu(head->fc_features) & ++ memcpy(&head, val, sizeof(head)); ++ if (le32_to_cpu(head.fc_features) & + ~EXT4_FC_SUPPORTED_FEATURES) { + ret = -EOPNOTSUPP; + break; + } +- if (le32_to_cpu(head->fc_tid) != expected_tid) { ++ if (le32_to_cpu(head.fc_tid) != expected_tid) { + ret = JBD2_FC_REPLAY_STOP; + break; + } + state->fc_cur_tag++; +- state->fc_crc = ext4_chksum(sbi, state->fc_crc, tl, +- sizeof(*tl) + ext4_fc_tag_len(tl)); ++ state->fc_crc = ext4_chksum(sbi, state->fc_crc, cur, ++ sizeof(tl) + le16_to_cpu(tl.fc_len)); + break; + default: + ret = state->fc_replay_num_tags ? +@@ -2036,11 +2043,11 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh, + { + struct super_block *sb = journal->j_private; + struct ext4_sb_info *sbi = EXT4_SB(sb); +- struct ext4_fc_tl *tl; +- __u8 *start, *end; ++ struct ext4_fc_tl tl; ++ __u8 *start, *end, *cur, *val; + int ret = JBD2_FC_REPLAY_CONTINUE; + struct ext4_fc_replay_state *state = &sbi->s_fc_replay_state; +- struct ext4_fc_tail *tail; ++ struct ext4_fc_tail tail; + + if (pass == PASS_SCAN) { + state->fc_current_pass = PASS_SCAN; +@@ -2067,49 +2074,52 @@ static int ext4_fc_replay(journal_t *journal, struct buffer_head *bh, + start = (u8 *)bh->b_data; + end = (__u8 *)bh->b_data + journal->j_blocksize - 1; + +- fc_for_each_tl(start, end, tl) { ++ for (cur = start; cur < end; cur = cur + sizeof(tl) + le16_to_cpu(tl.fc_len)) { ++ memcpy(&tl, cur, sizeof(tl)); ++ val = cur + sizeof(tl); ++ + if (state->fc_replay_num_tags == 0) { + ret = JBD2_FC_REPLAY_STOP; + ext4_fc_set_bitmaps_and_counters(sb); + break; + } + jbd_debug(3, "Replay phase, tag:%s\n", +- tag2str(le16_to_cpu(tl->fc_tag))); ++ tag2str(le16_to_cpu(tl.fc_tag))); + state->fc_replay_num_tags--; +- switch (le16_to_cpu(tl->fc_tag)) { ++ switch (le16_to_cpu(tl.fc_tag)) { + case EXT4_FC_TAG_LINK: +- ret = ext4_fc_replay_link(sb, tl); ++ ret = ext4_fc_replay_link(sb, &tl, val); + break; + case EXT4_FC_TAG_UNLINK: +- ret = ext4_fc_replay_unlink(sb, tl); ++ ret = ext4_fc_replay_unlink(sb, &tl, val); + break; + case EXT4_FC_TAG_ADD_RANGE: +- ret = ext4_fc_replay_add_range(sb, tl); ++ ret = ext4_fc_replay_add_range(sb, &tl, val); + break; + case EXT4_FC_TAG_CREAT: +- ret = ext4_fc_replay_create(sb, tl); ++ ret = ext4_fc_replay_create(sb, &tl, val); + break; + case EXT4_FC_TAG_DEL_RANGE: +- ret = ext4_fc_replay_del_range(sb, tl); ++ ret = ext4_fc_replay_del_range(sb, &tl, val); + break; + case EXT4_FC_TAG_INODE: +- ret = ext4_fc_replay_inode(sb, tl); ++ ret = ext4_fc_replay_inode(sb, &tl, val); + break; + case EXT4_FC_TAG_PAD: + trace_ext4_fc_replay(sb, EXT4_FC_TAG_PAD, 0, +- ext4_fc_tag_len(tl), 0); ++ le16_to_cpu(tl.fc_len), 0); + break; + case EXT4_FC_TAG_TAIL: + trace_ext4_fc_replay(sb, EXT4_FC_TAG_TAIL, 0, +- ext4_fc_tag_len(tl), 0); +- tail = (struct ext4_fc_tail *)ext4_fc_tag_val(tl); +- WARN_ON(le32_to_cpu(tail->fc_tid) != expected_tid); ++ le16_to_cpu(tl.fc_len), 0); ++ memcpy(&tail, val, sizeof(tail)); ++ WARN_ON(le32_to_cpu(tail.fc_tid) != expected_tid); + break; + case EXT4_FC_TAG_HEAD: + break; + default: +- trace_ext4_fc_replay(sb, le16_to_cpu(tl->fc_tag), 0, +- ext4_fc_tag_len(tl), 0); ++ trace_ext4_fc_replay(sb, le16_to_cpu(tl.fc_tag), 0, ++ le16_to_cpu(tl.fc_len), 0); + ret = -ECANCELED; + break; + } +diff --git a/fs/ext4/fast_commit.h b/fs/ext4/fast_commit.h +index b77f70f55a622..937c381b4c85e 100644 +--- a/fs/ext4/fast_commit.h ++++ b/fs/ext4/fast_commit.h +@@ -153,13 +153,6 @@ struct ext4_fc_replay_state { + #define region_last(__region) (((__region)->lblk) + ((__region)->len) - 1) + #endif + +-#define fc_for_each_tl(__start, __end, __tl) \ +- for (tl = (struct ext4_fc_tl *)(__start); \ +- (__u8 *)tl < (__u8 *)(__end); \ +- tl = (struct ext4_fc_tl *)((__u8 *)tl + \ +- sizeof(struct ext4_fc_tl) + \ +- + le16_to_cpu(tl->fc_len))) +- + static inline const char *tag2str(__u16 tag) + { + switch (tag) { +@@ -186,16 +179,4 @@ static inline const char *tag2str(__u16 tag) + } + } + +-/* Get length of a particular tlv */ +-static inline int ext4_fc_tag_len(struct ext4_fc_tl *tl) +-{ +- return le16_to_cpu(tl->fc_len); +-} +- +-/* Get a pointer to "value" of a tlv */ +-static inline __u8 *ext4_fc_tag_val(struct ext4_fc_tl *tl) +-{ +- return (__u8 *)tl + sizeof(*tl); +-} +- + #endif /* __FAST_COMMIT_H__ */ +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c +index 71d321b3b9844..edbaed073ac5c 100644 +--- a/fs/ext4/ialloc.c ++++ b/fs/ext4/ialloc.c +@@ -322,14 +322,16 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) + if (is_directory) { + count = ext4_used_dirs_count(sb, gdp) - 1; + ext4_used_dirs_set(sb, gdp, count); +- percpu_counter_dec(&sbi->s_dirs_counter); ++ if (percpu_counter_initialized(&sbi->s_dirs_counter)) ++ percpu_counter_dec(&sbi->s_dirs_counter); + } + ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh, + EXT4_INODES_PER_GROUP(sb) / 8); + ext4_group_desc_csum_set(sb, block_group, gdp); + ext4_unlock_group(sb, block_group); + +- percpu_counter_inc(&sbi->s_freeinodes_counter); ++ if (percpu_counter_initialized(&sbi->s_freeinodes_counter)) ++ percpu_counter_inc(&sbi->s_freeinodes_counter); + if (sbi->s_log_groups_per_flex) { + struct flex_groups *fg; + +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index a02fadf4fc84e..d24cb3dc79fff 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -2715,7 +2715,7 @@ static int ext4_mb_init_backend(struct super_block *sb) + */ + if (sbi->s_es->s_log_groups_per_flex >= 32) { + ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); +- goto err_freesgi; ++ goto err_freebuddy; + } + sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, + BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 77c1cb2582623..0e3a847b5d279 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -4449,14 +4449,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + } + + if (sb->s_blocksize != blocksize) { ++ /* ++ * bh must be released before kill_bdev(), otherwise ++ * it won't be freed and its page also. kill_bdev() ++ * is called by sb_set_blocksize(). ++ */ ++ brelse(bh); + /* Validate the filesystem blocksize */ + if (!sb_set_blocksize(sb, blocksize)) { + ext4_msg(sb, KERN_ERR, "bad block size %d", + blocksize); ++ bh = NULL; + goto failed_mount; + } + +- brelse(bh); + logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; + offset = do_div(logical_sb_block, blocksize); + bh = ext4_sb_bread_unmovable(sb, logical_sb_block); +@@ -5176,8 +5182,9 @@ failed_mount: + kfree(get_qf_name(sb, sbi, i)); + #endif + fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy); +- ext4_blkdev_remove(sbi); ++ /* ext4_blkdev_remove() calls kill_bdev(), release bh before it. */ + brelse(bh); ++ ext4_blkdev_remove(sbi); + out_fail: + sb->s_fs_info = NULL; + kfree(sbi->s_blockgroup_lock); +diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c +index 9567520d79f79..7c2ba81213da0 100644 +--- a/fs/gfs2/glock.c ++++ b/fs/gfs2/glock.c +@@ -1465,9 +1465,11 @@ void gfs2_glock_dq(struct gfs2_holder *gh) + glock_blocked_by_withdraw(gl) && + gh->gh_gl != sdp->sd_jinode_gl) { + sdp->sd_glock_dqs_held++; ++ spin_unlock(&gl->gl_lockref.lock); + might_sleep(); + wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_RECOVERY, + TASK_UNINTERRUPTIBLE); ++ spin_lock(&gl->gl_lockref.lock); + } + if (gh->gh_flags & GL_NOCACHE) + handle_callback(gl, LM_ST_UNLOCKED, 0, false); +diff --git a/fs/io_uring.c b/fs/io_uring.c +index 144056b0cac92..359d1abb089c4 100644 +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -653,7 +653,7 @@ struct io_unlink { + struct io_completion { + struct file *file; + struct list_head list; +- int cflags; ++ u32 cflags; + }; + + struct io_async_connect { +@@ -1476,7 +1476,33 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, + return ret; + } + +-static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags) ++static inline bool req_ref_inc_not_zero(struct io_kiocb *req) ++{ ++ return refcount_inc_not_zero(&req->refs); ++} ++ ++static inline bool req_ref_sub_and_test(struct io_kiocb *req, int refs) ++{ ++ return refcount_sub_and_test(refs, &req->refs); ++} ++ ++static inline bool req_ref_put_and_test(struct io_kiocb *req) ++{ ++ return refcount_dec_and_test(&req->refs); ++} ++ ++static inline void req_ref_put(struct io_kiocb *req) ++{ ++ refcount_dec(&req->refs); ++} ++ ++static inline void req_ref_get(struct io_kiocb *req) ++{ ++ refcount_inc(&req->refs); ++} ++ ++static void __io_cqring_fill_event(struct io_kiocb *req, long res, ++ unsigned int cflags) + { + struct io_ring_ctx *ctx = req->ctx; + struct io_uring_cqe *cqe; +@@ -1511,7 +1537,7 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags) + io_clean_op(req); + req->result = res; + req->compl.cflags = cflags; +- refcount_inc(&req->refs); ++ req_ref_get(req); + list_add_tail(&req->compl.list, &ctx->cq_overflow_list); + } + } +@@ -1533,7 +1559,7 @@ static void io_req_complete_post(struct io_kiocb *req, long res, + * If we're the last reference to this request, add to our locked + * free_list cache. + */ +- if (refcount_dec_and_test(&req->refs)) { ++ if (req_ref_put_and_test(req)) { + struct io_comp_state *cs = &ctx->submit_state.comp; + + if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { +@@ -2112,7 +2138,7 @@ static void io_submit_flush_completions(struct io_comp_state *cs, + req = cs->reqs[i]; + + /* submission and completion refs */ +- if (refcount_sub_and_test(2, &req->refs)) ++ if (req_ref_sub_and_test(req, 2)) + io_req_free_batch(&rb, req, &ctx->submit_state); + } + +@@ -2128,7 +2154,7 @@ static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) + { + struct io_kiocb *nxt = NULL; + +- if (refcount_dec_and_test(&req->refs)) { ++ if (req_ref_put_and_test(req)) { + nxt = io_req_find_next(req); + __io_free_req(req); + } +@@ -2137,7 +2163,7 @@ static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) + + static void io_put_req(struct io_kiocb *req) + { +- if (refcount_dec_and_test(&req->refs)) ++ if (req_ref_put_and_test(req)) + io_free_req(req); + } + +@@ -2160,14 +2186,14 @@ static void io_free_req_deferred(struct io_kiocb *req) + + static inline void io_put_req_deferred(struct io_kiocb *req, int refs) + { +- if (refcount_sub_and_test(refs, &req->refs)) ++ if (req_ref_sub_and_test(req, refs)) + io_free_req_deferred(req); + } + + static void io_double_put_req(struct io_kiocb *req) + { + /* drop both submit and complete references */ +- if (refcount_sub_and_test(2, &req->refs)) ++ if (req_ref_sub_and_test(req, 2)) + io_free_req(req); + } + +@@ -2253,7 +2279,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, + __io_cqring_fill_event(req, req->result, cflags); + (*nr_events)++; + +- if (refcount_dec_and_test(&req->refs)) ++ if (req_ref_put_and_test(req)) + io_req_free_batch(&rb, req, &ctx->submit_state); + } + +@@ -2495,7 +2521,7 @@ static bool io_rw_reissue(struct io_kiocb *req) + lockdep_assert_held(&req->ctx->uring_lock); + + if (io_resubmit_prep(req)) { +- refcount_inc(&req->refs); ++ req_ref_get(req); + io_queue_async_work(req); + return true; + } +@@ -3208,7 +3234,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, + list_del_init(&wait->entry); + + /* submit ref gets dropped, acquire a new one */ +- refcount_inc(&req->refs); ++ req_ref_get(req); + io_req_task_queue(req); + return 1; + } +@@ -4953,7 +4979,7 @@ static void io_poll_remove_double(struct io_kiocb *req) + spin_lock(&head->lock); + list_del_init(&poll->wait.entry); + if (poll->wait.private) +- refcount_dec(&req->refs); ++ req_ref_put(req); + poll->head = NULL; + spin_unlock(&head->lock); + } +@@ -5019,7 +5045,7 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, + poll->wait.func(&poll->wait, mode, sync, key); + } + } +- refcount_dec(&req->refs); ++ req_ref_put(req); + return 1; + } + +@@ -5062,7 +5088,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, + return; + } + io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake); +- refcount_inc(&req->refs); ++ req_ref_get(req); + poll->wait.private = req; + *poll_ptr = poll; + } +@@ -6211,7 +6237,7 @@ static void io_wq_submit_work(struct io_wq_work *work) + /* avoid locking problems by failing it from a clean context */ + if (ret) { + /* io-wq is going to take one down */ +- refcount_inc(&req->refs); ++ req_ref_get(req); + io_req_task_queue_fail(req, ret); + } + } +@@ -6263,15 +6289,17 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) + * We don't expect the list to be empty, that will only happen if we + * race with the completion of the linked work. + */ +- if (prev && refcount_inc_not_zero(&prev->refs)) ++ if (prev) { + io_remove_next_linked(prev); +- else +- prev = NULL; ++ if (!req_ref_inc_not_zero(prev)) ++ prev = NULL; ++ } + spin_unlock_irqrestore(&ctx->completion_lock, flags); + + if (prev) { + io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME); + io_put_req_deferred(prev, 1); ++ io_put_req_deferred(req, 1); + } else { + io_req_complete_post(req, -ETIME, 0); + io_put_req_deferred(req, 1); +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c +index 5edc1d0cf115f..0e45893c0cda0 100644 +--- a/fs/ocfs2/file.c ++++ b/fs/ocfs2/file.c +@@ -1857,6 +1857,45 @@ out: + return ret; + } + ++/* ++ * zero out partial blocks of one cluster. ++ * ++ * start: file offset where zero starts, will be made upper block aligned. ++ * len: it will be trimmed to the end of current cluster if "start + len" ++ * is bigger than it. ++ */ ++static int ocfs2_zeroout_partial_cluster(struct inode *inode, ++ u64 start, u64 len) ++{ ++ int ret; ++ u64 start_block, end_block, nr_blocks; ++ u64 p_block, offset; ++ u32 cluster, p_cluster, nr_clusters; ++ struct super_block *sb = inode->i_sb; ++ u64 end = ocfs2_align_bytes_to_clusters(sb, start); ++ ++ if (start + len < end) ++ end = start + len; ++ ++ start_block = ocfs2_blocks_for_bytes(sb, start); ++ end_block = ocfs2_blocks_for_bytes(sb, end); ++ nr_blocks = end_block - start_block; ++ if (!nr_blocks) ++ return 0; ++ ++ cluster = ocfs2_bytes_to_clusters(sb, start); ++ ret = ocfs2_get_clusters(inode, cluster, &p_cluster, ++ &nr_clusters, NULL); ++ if (ret) ++ return ret; ++ if (!p_cluster) ++ return 0; ++ ++ offset = start_block - ocfs2_clusters_to_blocks(sb, cluster); ++ p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset; ++ return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS); ++} ++ + /* + * Parts of this function taken from xfs_change_file_space() + */ +@@ -1867,7 +1906,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, + { + int ret; + s64 llen; +- loff_t size; ++ loff_t size, orig_isize; + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct buffer_head *di_bh = NULL; + handle_t *handle; +@@ -1898,6 +1937,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, + goto out_inode_unlock; + } + ++ orig_isize = i_size_read(inode); + switch (sr->l_whence) { + case 0: /*SEEK_SET*/ + break; +@@ -1905,7 +1945,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, + sr->l_start += f_pos; + break; + case 2: /*SEEK_END*/ +- sr->l_start += i_size_read(inode); ++ sr->l_start += orig_isize; + break; + default: + ret = -EINVAL; +@@ -1959,6 +1999,14 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, + default: + ret = -EINVAL; + } ++ ++ /* zeroout eof blocks in the cluster. */ ++ if (!ret && change_size && orig_isize < size) { ++ ret = ocfs2_zeroout_partial_cluster(inode, orig_isize, ++ size - orig_isize); ++ if (!ret) ++ i_size_write(inode, size); ++ } + up_write(&OCFS2_I(inode)->ip_alloc_sem); + if (ret) { + mlog_errno(ret); +@@ -1975,9 +2023,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode, + goto out_inode_unlock; + } + +- if (change_size && i_size_read(inode) < size) +- i_size_write(inode, size); +- + inode->i_ctime = inode->i_mtime = current_time(inode); + ret = ocfs2_mark_inode_dirty(handle, inode, di_bh); + if (ret < 0) +diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h +index 9c68b2da14c63..e5a4c68093fc2 100644 +--- a/include/linux/mlx5/mlx5_ifc.h ++++ b/include/linux/mlx5/mlx5_ifc.h +@@ -1260,6 +1260,8 @@ enum mlx5_fc_bulk_alloc_bitmask { + + #define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum)) + ++#define MLX5_FT_MAX_MULTIPATH_LEVEL 63 ++ + enum { + MLX5_STEERING_FORMAT_CONNECTX_5 = 0, + MLX5_STEERING_FORMAT_CONNECTX_6DX = 1, +diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h +index 5e772392a3795..136b1d996075c 100644 +--- a/include/linux/pgtable.h ++++ b/include/linux/pgtable.h +@@ -432,6 +432,14 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres + * To be differentiate with macro pte_mkyoung, this macro is used on platforms + * where software maintains page access bit. + */ ++#ifndef pte_sw_mkyoung ++static inline pte_t pte_sw_mkyoung(pte_t pte) ++{ ++ return pte; ++} ++#define pte_sw_mkyoung pte_sw_mkyoung ++#endif ++ + #ifndef pte_savedwrite + #define pte_savedwrite pte_write + #endif +diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h +index fafc1beea504a..9837fb011f2fb 100644 +--- a/include/linux/platform_data/ti-sysc.h ++++ b/include/linux/platform_data/ti-sysc.h +@@ -50,6 +50,7 @@ struct sysc_regbits { + s8 emufree_shift; + }; + ++#define SYSC_QUIRK_REINIT_ON_RESUME BIT(27) + #define SYSC_QUIRK_GPMC_DEBUG BIT(26) + #define SYSC_MODULE_QUIRK_ENA_RESETDONE BIT(25) + #define SYSC_MODULE_QUIRK_PRUSS BIT(24) +diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h +index 48ecca8530ffa..b655d8666f555 100644 +--- a/include/net/caif/caif_dev.h ++++ b/include/net/caif/caif_dev.h +@@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer); + * The link_support layer is used to add any Link Layer specific + * framing. + */ +-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, ++int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, + struct cflayer *link_support, int head_room, + struct cflayer **layer, int (**rcv_func)( + struct sk_buff *, struct net_device *, +diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h +index 2aa5e91d84576..8819ff4db35a6 100644 +--- a/include/net/caif/cfcnfg.h ++++ b/include/net/caif/cfcnfg.h +@@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg); + * @fcs: Specify if checksum is used in CAIF Framing Layer. + * @head_room: Head space needed by link specific protocol. + */ +-void ++int + cfcnfg_add_phy_layer(struct cfcnfg *cnfg, + struct net_device *dev, struct cflayer *phy_layer, + enum cfcnfg_phy_preference pref, +diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h +index 14a55e03bb3ce..67cce8757175a 100644 +--- a/include/net/caif/cfserl.h ++++ b/include/net/caif/cfserl.h +@@ -9,4 +9,5 @@ + #include <net/caif/caif_layer.h> + + struct cflayer *cfserl_create(int instance, bool use_stx); ++void cfserl_release(struct cflayer *layer); + #endif +diff --git a/include/net/tls.h b/include/net/tls.h +index 3eccb525e8f79..8341a8d1e8073 100644 +--- a/include/net/tls.h ++++ b/include/net/tls.h +@@ -193,7 +193,11 @@ struct tls_offload_context_tx { + (sizeof(struct tls_offload_context_tx) + TLS_DRIVER_STATE_SIZE_TX) + + enum tls_context_flags { +- TLS_RX_SYNC_RUNNING = 0, ++ /* tls_device_down was called after the netdev went down, device state ++ * was released, and kTLS works in software, even though rx_conf is ++ * still TLS_HW (needed for transition). ++ */ ++ TLS_RX_DEV_DEGRADED = 0, + /* Unlike RX where resync is driven entirely by the core in TX only + * the driver knows when things went out of sync, so we need the flag + * to be atomic. +@@ -266,6 +270,7 @@ struct tls_context { + + /* cache cold stuff */ + struct proto *sk_proto; ++ struct sock *sk; + + void (*sk_destruct)(struct sock *sk); + +@@ -448,6 +453,9 @@ static inline u16 tls_user_config(struct tls_context *ctx, bool tx) + struct sk_buff * + tls_validate_xmit_skb(struct sock *sk, struct net_device *dev, + struct sk_buff *skb); ++struct sk_buff * ++tls_validate_xmit_skb_sw(struct sock *sk, struct net_device *dev, ++ struct sk_buff *skb); + + static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk) + { +diff --git a/init/main.c b/init/main.c +index 53b278845b886..5bd1a25f1d6f5 100644 +--- a/init/main.c ++++ b/init/main.c +@@ -1514,7 +1514,7 @@ static noinline void __init kernel_init_freeable(void) + */ + set_mems_allowed(node_states[N_MEMORY]); + +- cad_pid = task_pid(current); ++ cad_pid = get_pid(task_pid(current)); + + smp_prepare_cpus(setup_max_cpus); + +diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c +index 308427fe03a3a..6140e91e9c891 100644 +--- a/kernel/bpf/helpers.c ++++ b/kernel/bpf/helpers.c +@@ -14,6 +14,7 @@ + #include <linux/jiffies.h> + #include <linux/pid_namespace.h> + #include <linux/proc_ns.h> ++#include <linux/security.h> + + #include "../../lib/kstrtox.h" + +@@ -741,11 +742,13 @@ bpf_base_func_proto(enum bpf_func_id func_id) + case BPF_FUNC_probe_read_user: + return &bpf_probe_read_user_proto; + case BPF_FUNC_probe_read_kernel: +- return &bpf_probe_read_kernel_proto; ++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? ++ NULL : &bpf_probe_read_kernel_proto; + case BPF_FUNC_probe_read_user_str: + return &bpf_probe_read_user_str_proto; + case BPF_FUNC_probe_read_kernel_str: +- return &bpf_probe_read_kernel_str_proto; ++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? ++ NULL : &bpf_probe_read_kernel_str_proto; + case BPF_FUNC_snprintf_btf: + return &bpf_snprintf_btf_proto; + default: +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c +index b0c45d923f0f9..9bb3d2823f442 100644 +--- a/kernel/trace/bpf_trace.c ++++ b/kernel/trace/bpf_trace.c +@@ -215,16 +215,11 @@ const struct bpf_func_proto bpf_probe_read_user_str_proto = { + static __always_inline int + bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) + { +- int ret = security_locked_down(LOCKDOWN_BPF_READ); ++ int ret; + +- if (unlikely(ret < 0)) +- goto fail; + ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); + if (unlikely(ret < 0)) +- goto fail; +- return ret; +-fail: +- memset(dst, 0, size); ++ memset(dst, 0, size); + return ret; + } + +@@ -246,10 +241,7 @@ const struct bpf_func_proto bpf_probe_read_kernel_proto = { + static __always_inline int + bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) + { +- int ret = security_locked_down(LOCKDOWN_BPF_READ); +- +- if (unlikely(ret < 0)) +- goto fail; ++ int ret; + + /* + * The strncpy_from_kernel_nofault() call will likely not fill the +@@ -262,11 +254,7 @@ bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr) + */ + ret = strncpy_from_kernel_nofault(dst, unsafe_ptr, size); + if (unlikely(ret < 0)) +- goto fail; +- +- return ret; +-fail: +- memset(dst, 0, size); ++ memset(dst, 0, size); + return ret; + } + +@@ -1322,16 +1310,20 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) + case BPF_FUNC_probe_read_user: + return &bpf_probe_read_user_proto; + case BPF_FUNC_probe_read_kernel: +- return &bpf_probe_read_kernel_proto; ++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? ++ NULL : &bpf_probe_read_kernel_proto; + case BPF_FUNC_probe_read_user_str: + return &bpf_probe_read_user_str_proto; + case BPF_FUNC_probe_read_kernel_str: +- return &bpf_probe_read_kernel_str_proto; ++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? ++ NULL : &bpf_probe_read_kernel_str_proto; + #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE + case BPF_FUNC_probe_read: +- return &bpf_probe_read_compat_proto; ++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? ++ NULL : &bpf_probe_read_compat_proto; + case BPF_FUNC_probe_read_str: +- return &bpf_probe_read_compat_str_proto; ++ return security_locked_down(LOCKDOWN_BPF_READ) < 0 ? ++ NULL : &bpf_probe_read_compat_str_proto; + #endif + #ifdef CONFIG_CGROUPS + case BPF_FUNC_get_current_cgroup_id: +diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c +index a9bd6ce1ba02b..726fd2030f645 100644 +--- a/mm/debug_vm_pgtable.c ++++ b/mm/debug_vm_pgtable.c +@@ -192,7 +192,7 @@ static void __init pmd_advanced_tests(struct mm_struct *mm, + + pr_debug("Validating PMD advanced\n"); + /* Align the address wrt HPAGE_PMD_SIZE */ +- vaddr = (vaddr & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE; ++ vaddr &= HPAGE_PMD_MASK; + + pgtable_trans_huge_deposit(mm, pmdp, pgtable); + +@@ -330,7 +330,7 @@ static void __init pud_advanced_tests(struct mm_struct *mm, + + pr_debug("Validating PUD advanced\n"); + /* Align the address wrt HPAGE_PUD_SIZE */ +- vaddr = (vaddr & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE; ++ vaddr &= HPAGE_PUD_MASK; + + set_pud_at(mm, vaddr, pudp, pud); + pudp_set_wrprotect(mm, vaddr, pudp); +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 96b722af092e7..ce63ec0187c55 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -4705,10 +4705,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, + struct page *page; + + if (!*pagep) { +- ret = -ENOMEM; ++ /* If a page already exists, then it's UFFDIO_COPY for ++ * a non-missing case. Return -EEXIST. ++ */ ++ if (vm_shared && ++ hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) { ++ ret = -EEXIST; ++ goto out; ++ } ++ + page = alloc_huge_page(dst_vma, dst_addr, 0); +- if (IS_ERR(page)) ++ if (IS_ERR(page)) { ++ ret = -ENOMEM; + goto out; ++ } + + ret = copy_huge_page_from_user(page, + (const void __user *) src_addr, +diff --git a/mm/kfence/core.c b/mm/kfence/core.c +index f0be2c5038b5d..6f29981e317fe 100644 +--- a/mm/kfence/core.c ++++ b/mm/kfence/core.c +@@ -20,6 +20,7 @@ + #include <linux/moduleparam.h> + #include <linux/random.h> + #include <linux/rcupdate.h> ++#include <linux/sched/sysctl.h> + #include <linux/seq_file.h> + #include <linux/slab.h> + #include <linux/spinlock.h> +@@ -620,7 +621,16 @@ static void toggle_allocation_gate(struct work_struct *work) + /* Enable static key, and await allocation to happen. */ + static_branch_enable(&kfence_allocation_key); + +- wait_event_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), HZ); ++ if (sysctl_hung_task_timeout_secs) { ++ /* ++ * During low activity with no allocations we might wait a ++ * while; let's avoid the hung task warning. ++ */ ++ wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), ++ sysctl_hung_task_timeout_secs * HZ / 2); ++ } else { ++ wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate)); ++ } + + /* Disable static key and reset timer. */ + static_branch_disable(&kfence_allocation_key); +diff --git a/mm/memory.c b/mm/memory.c +index 550405fc3b5e6..14a6c66b37483 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -2896,6 +2896,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) + } + flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); + entry = mk_pte(new_page, vma->vm_page_prot); ++ entry = pte_sw_mkyoung(entry); + entry = maybe_mkwrite(pte_mkdirty(entry), vma); + + /* +@@ -3561,6 +3562,7 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) + __SetPageUptodate(page); + + entry = mk_pte(page, vma->vm_page_prot); ++ entry = pte_sw_mkyoung(entry); + if (vma->vm_flags & VM_WRITE) + entry = pte_mkwrite(pte_mkdirty(entry)); + +@@ -3745,6 +3747,8 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) + + if (prefault && arch_wants_old_prefaulted_pte()) + entry = pte_mkold(entry); ++ else ++ entry = pte_sw_mkyoung(entry); + + if (write) + entry = maybe_mkwrite(pte_mkdirty(entry), vma); +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 4bb3cdfc47f87..d9dbf45f7590e 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -8951,6 +8951,8 @@ bool take_page_off_buddy(struct page *page) + del_page_from_free_list(page_head, zone, page_order); + break_down_buddy_pages(zone, page_head, page, 0, + page_order, migratetype); ++ if (!is_migrate_isolate(migratetype)) ++ __mod_zone_freepage_state(zone, -1, migratetype); + ret = true; + break; + } +diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c +index b0d9c36acc033..4fad2ca661ed9 100644 +--- a/net/bluetooth/hci_core.c ++++ b/net/bluetooth/hci_core.c +@@ -1608,8 +1608,13 @@ setup_failed: + } else { + /* Init failed, cleanup */ + flush_work(&hdev->tx_work); +- flush_work(&hdev->cmd_work); ++ ++ /* Since hci_rx_work() is possible to awake new cmd_work ++ * it should be flushed first to avoid unexpected call of ++ * hci_cmd_work() ++ */ + flush_work(&hdev->rx_work); ++ flush_work(&hdev->cmd_work); + + skb_queue_purge(&hdev->cmd_q); + skb_queue_purge(&hdev->rx_q); +diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c +index 251b9128f530a..eed0dd066e12c 100644 +--- a/net/bluetooth/hci_sock.c ++++ b/net/bluetooth/hci_sock.c +@@ -762,7 +762,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event) + /* Detach sockets from device */ + read_lock(&hci_sk_list.lock); + sk_for_each(sk, &hci_sk_list.head) { +- bh_lock_sock_nested(sk); ++ lock_sock(sk); + if (hci_pi(sk)->hdev == hdev) { + hci_pi(sk)->hdev = NULL; + sk->sk_err = EPIPE; +@@ -771,7 +771,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event) + + hci_dev_put(hdev); + } +- bh_unlock_sock(sk); ++ release_sock(sk); + } + read_unlock(&hci_sk_list.lock); + } +diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c +index c10e5a55758d2..440139706130a 100644 +--- a/net/caif/caif_dev.c ++++ b/net/caif/caif_dev.c +@@ -308,7 +308,7 @@ static void dev_flowctrl(struct net_device *dev, int on) + caifd_put(caifd); + } + +-void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, ++int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, + struct cflayer *link_support, int head_room, + struct cflayer **layer, + int (**rcv_func)(struct sk_buff *, struct net_device *, +@@ -319,11 +319,12 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, + enum cfcnfg_phy_preference pref; + struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); + struct caif_device_entry_list *caifdevs; ++ int res; + + caifdevs = caif_device_list(dev_net(dev)); + caifd = caif_device_alloc(dev); + if (!caifd) +- return; ++ return -ENOMEM; + *layer = &caifd->layer; + spin_lock_init(&caifd->flow_lock); + +@@ -344,7 +345,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, + strlcpy(caifd->layer.name, dev->name, + sizeof(caifd->layer.name)); + caifd->layer.transmit = transmit; +- cfcnfg_add_phy_layer(cfg, ++ res = cfcnfg_add_phy_layer(cfg, + dev, + &caifd->layer, + pref, +@@ -354,6 +355,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, + mutex_unlock(&caifdevs->lock); + if (rcv_func) + *rcv_func = receive; ++ return res; + } + EXPORT_SYMBOL(caif_enroll_dev); + +@@ -368,6 +370,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, + struct cflayer *layer, *link_support; + int head_room = 0; + struct caif_device_entry_list *caifdevs; ++ int res; + + cfg = get_cfcnfg(dev_net(dev)); + caifdevs = caif_device_list(dev_net(dev)); +@@ -393,8 +396,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, + break; + } + } +- caif_enroll_dev(dev, caifdev, link_support, head_room, ++ res = caif_enroll_dev(dev, caifdev, link_support, head_room, + &layer, NULL); ++ if (res) ++ cfserl_release(link_support); + caifdev->flowctrl = dev_flowctrl; + break; + +diff --git a/net/caif/caif_usb.c b/net/caif/caif_usb.c +index a0116b9503d9d..b02e1292f7f19 100644 +--- a/net/caif/caif_usb.c ++++ b/net/caif/caif_usb.c +@@ -115,6 +115,11 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN], + return (struct cflayer *) this; + } + ++static void cfusbl_release(struct cflayer *layer) ++{ ++ kfree(layer); ++} ++ + static struct packet_type caif_usb_type __read_mostly = { + .type = cpu_to_be16(ETH_P_802_EX1), + }; +@@ -127,6 +132,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, + struct cflayer *layer, *link_support; + struct usbnet *usbnet; + struct usb_device *usbdev; ++ int res; + + /* Check whether we have a NCM device, and find its VID/PID. */ + if (!(dev->dev.parent && dev->dev.parent->driver && +@@ -169,8 +175,11 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, + if (dev->num_tx_queues > 1) + pr_warn("USB device uses more than one tx queue\n"); + +- caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN, ++ res = caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN, + &layer, &caif_usb_type.func); ++ if (res) ++ goto err; ++ + if (!pack_added) + dev_add_pack(&caif_usb_type); + pack_added = true; +@@ -178,6 +187,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what, + strlcpy(layer->name, dev->name, sizeof(layer->name)); + + return 0; ++err: ++ cfusbl_release(link_support); ++ return res; + } + + static struct notifier_block caif_device_notifier = { +diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c +index 399239a14420f..cac30e676ac94 100644 +--- a/net/caif/cfcnfg.c ++++ b/net/caif/cfcnfg.c +@@ -450,7 +450,7 @@ unlock: + rcu_read_unlock(); + } + +-void ++int + cfcnfg_add_phy_layer(struct cfcnfg *cnfg, + struct net_device *dev, struct cflayer *phy_layer, + enum cfcnfg_phy_preference pref, +@@ -459,7 +459,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, + { + struct cflayer *frml; + struct cfcnfg_phyinfo *phyinfo = NULL; +- int i; ++ int i, res = 0; + u8 phyid; + + mutex_lock(&cnfg->lock); +@@ -473,12 +473,15 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg, + goto got_phyid; + } + pr_warn("Too many CAIF Link Layers (max 6)\n"); ++ res = -EEXIST; + goto out; + + got_phyid: + phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); +- if (!phyinfo) ++ if (!phyinfo) { ++ res = -ENOMEM; + goto out_err; ++ } + + phy_layer->id = phyid; + phyinfo->pref = pref; +@@ -492,8 +495,10 @@ got_phyid: + + frml = cffrml_create(phyid, fcs); + +- if (!frml) ++ if (!frml) { ++ res = -ENOMEM; + goto out_err; ++ } + phyinfo->frm_layer = frml; + layer_set_up(frml, cnfg->mux); + +@@ -511,11 +516,12 @@ got_phyid: + list_add_rcu(&phyinfo->node, &cnfg->phys); + out: + mutex_unlock(&cnfg->lock); +- return; ++ return res; + + out_err: + kfree(phyinfo); + mutex_unlock(&cnfg->lock); ++ return res; + } + EXPORT_SYMBOL(cfcnfg_add_phy_layer); + +diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c +index e11725a4bb0ed..40cd57ad0a0f4 100644 +--- a/net/caif/cfserl.c ++++ b/net/caif/cfserl.c +@@ -31,6 +31,11 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt); + static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, + int phyid); + ++void cfserl_release(struct cflayer *layer) ++{ ++ kfree(layer); ++} ++ + struct cflayer *cfserl_create(int instance, bool use_stx) + { + struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC); +diff --git a/net/core/devlink.c b/net/core/devlink.c +index 737b61c2976e1..4c363fa7d4d11 100644 +--- a/net/core/devlink.c ++++ b/net/core/devlink.c +@@ -705,7 +705,6 @@ static int devlink_nl_port_attrs_put(struct sk_buff *msg, + case DEVLINK_PORT_FLAVOUR_PHYSICAL: + case DEVLINK_PORT_FLAVOUR_CPU: + case DEVLINK_PORT_FLAVOUR_DSA: +- case DEVLINK_PORT_FLAVOUR_VIRTUAL: + if (nla_put_u32(msg, DEVLINK_ATTR_PORT_NUMBER, + attrs->phys.port_number)) + return -EMSGSIZE; +@@ -8629,7 +8628,6 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, + + switch (attrs->flavour) { + case DEVLINK_PORT_FLAVOUR_PHYSICAL: +- case DEVLINK_PORT_FLAVOUR_VIRTUAL: + if (!attrs->split) + n = snprintf(name, len, "p%u", attrs->phys.port_number); + else +@@ -8670,6 +8668,8 @@ static int __devlink_port_phys_port_name_get(struct devlink_port *devlink_port, + n = snprintf(name, len, "pf%usf%u", attrs->pci_sf.pf, + attrs->pci_sf.sf); + break; ++ case DEVLINK_PORT_FLAVOUR_VIRTUAL: ++ return -EOPNOTSUPP; + } + + if (n >= len) +diff --git a/net/core/neighbour.c b/net/core/neighbour.c +index 98f20efbfadf2..bf774575ad716 100644 +--- a/net/core/neighbour.c ++++ b/net/core/neighbour.c +@@ -238,6 +238,7 @@ static int neigh_forced_gc(struct neigh_table *tbl) + + write_lock(&n->lock); + if ((n->nud_state == NUD_FAILED) || ++ (n->nud_state == NUD_NOARP) || + (tbl->is_multicast && + tbl->is_multicast(n->primary_key)) || + time_after(tref, n->updated)) +diff --git a/net/core/sock.c b/net/core/sock.c +index 9c7b143e7a964..a266760cd65ea 100644 +--- a/net/core/sock.c ++++ b/net/core/sock.c +@@ -815,10 +815,18 @@ void sock_set_rcvbuf(struct sock *sk, int val) + } + EXPORT_SYMBOL(sock_set_rcvbuf); + ++static void __sock_set_mark(struct sock *sk, u32 val) ++{ ++ if (val != sk->sk_mark) { ++ sk->sk_mark = val; ++ sk_dst_reset(sk); ++ } ++} ++ + void sock_set_mark(struct sock *sk, u32 val) + { + lock_sock(sk); +- sk->sk_mark = val; ++ __sock_set_mark(sk, val); + release_sock(sk); + } + EXPORT_SYMBOL(sock_set_mark); +@@ -1126,10 +1134,10 @@ set_sndbuf: + case SO_MARK: + if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) { + ret = -EPERM; +- } else if (val != sk->sk_mark) { +- sk->sk_mark = val; +- sk_dst_reset(sk); ++ break; + } ++ ++ __sock_set_mark(sk, val); + break; + + case SO_RXQ_OVFL: +diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c +index 008c1ec6e20c1..122ad5833fb1c 100644 +--- a/net/dsa/tag_8021q.c ++++ b/net/dsa/tag_8021q.c +@@ -64,7 +64,7 @@ + #define DSA_8021Q_SUBVLAN_HI_SHIFT 9 + #define DSA_8021Q_SUBVLAN_HI_MASK GENMASK(9, 9) + #define DSA_8021Q_SUBVLAN_LO_SHIFT 4 +-#define DSA_8021Q_SUBVLAN_LO_MASK GENMASK(4, 3) ++#define DSA_8021Q_SUBVLAN_LO_MASK GENMASK(5, 4) + #define DSA_8021Q_SUBVLAN_HI(x) (((x) & GENMASK(2, 2)) >> 2) + #define DSA_8021Q_SUBVLAN_LO(x) ((x) & GENMASK(1, 0)) + #define DSA_8021Q_SUBVLAN(x) \ +diff --git a/net/ieee802154/nl-mac.c b/net/ieee802154/nl-mac.c +index 0c1b0770c59ea..c23c152860b73 100644 +--- a/net/ieee802154/nl-mac.c ++++ b/net/ieee802154/nl-mac.c +@@ -680,8 +680,10 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info) + nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) || + nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER, + be32_to_cpu(params.frame_counter)) || +- ieee802154_llsec_fill_key_id(msg, ¶ms.out_key)) ++ ieee802154_llsec_fill_key_id(msg, ¶ms.out_key)) { ++ rc = -ENOBUFS; + goto out_free; ++ } + + dev_put(dev); + +diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c +index 2cdc7e63fe172..88215b5c93aa4 100644 +--- a/net/ieee802154/nl-phy.c ++++ b/net/ieee802154/nl-phy.c +@@ -241,8 +241,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info) + } + + if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || +- nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) ++ nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) { ++ rc = -EMSGSIZE; + goto nla_put_failure; ++ } + dev_put(dev); + + wpan_phy_put(phy); +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 373d48073106f..36e80b3598b01 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -3676,11 +3676,11 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, + if (nh) { + if (rt->fib6_src.plen) { + NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing"); +- goto out; ++ goto out_free; + } + if (!nexthop_get(nh)) { + NL_SET_ERR_MSG(extack, "Nexthop has been deleted"); +- goto out; ++ goto out_free; + } + rt->nh = nh; + fib6_nh = nexthop_fib6_nh(rt->nh); +@@ -3717,6 +3717,10 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, + out: + fib6_info_release(rt); + return ERR_PTR(err); ++out_free: ++ ip_fib_metrics_put(rt->fib6_metrics); ++ kfree(rt); ++ return ERR_PTR(err); + } + + int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c +index 228dd40828c4b..225b988215171 100644 +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -937,6 +937,10 @@ static void __mptcp_update_wmem(struct sock *sk) + { + struct mptcp_sock *msk = mptcp_sk(sk); + ++#ifdef CONFIG_LOCKDEP ++ WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock)); ++#endif ++ + if (!msk->wmem_reserved) + return; + +@@ -1075,10 +1079,20 @@ out: + + static void __mptcp_clean_una_wakeup(struct sock *sk) + { ++#ifdef CONFIG_LOCKDEP ++ WARN_ON_ONCE(!lockdep_is_held(&sk->sk_lock.slock)); ++#endif + __mptcp_clean_una(sk); + mptcp_write_space(sk); + } + ++static void mptcp_clean_una_wakeup(struct sock *sk) ++{ ++ mptcp_data_lock(sk); ++ __mptcp_clean_una_wakeup(sk); ++ mptcp_data_unlock(sk); ++} ++ + static void mptcp_enter_memory_pressure(struct sock *sk) + { + struct mptcp_subflow_context *subflow; +@@ -2288,7 +2302,7 @@ static void __mptcp_retrans(struct sock *sk) + struct sock *ssk; + int ret; + +- __mptcp_clean_una_wakeup(sk); ++ mptcp_clean_una_wakeup(sk); + dfrag = mptcp_rtx_head(sk); + if (!dfrag) { + if (mptcp_data_fin_enabled(msk)) { +diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c +index 1936db3574d2e..8425cd393bf3e 100644 +--- a/net/mptcp/subflow.c ++++ b/net/mptcp/subflow.c +@@ -608,21 +608,20 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, + + /* if the sk is MP_CAPABLE, we try to fetch the client key */ + if (subflow_req->mp_capable) { +- if (TCP_SKB_CB(skb)->seq != subflow_req->ssn_offset + 1) { +- /* here we can receive and accept an in-window, +- * out-of-order pkt, which will not carry the MP_CAPABLE +- * opt even on mptcp enabled paths +- */ +- goto create_msk; +- } +- ++ /* we can receive and accept an in-window, out-of-order pkt, ++ * which may not carry the MP_CAPABLE opt even on mptcp enabled ++ * paths: always try to extract the peer key, and fallback ++ * for packets missing it. ++ * Even OoO DSS packets coming legitly after dropped or ++ * reordered MPC will cause fallback, but we don't have other ++ * options. ++ */ + mptcp_get_options(skb, &mp_opt); + if (!mp_opt.mp_capable) { + fallback = true; + goto create_child; + } + +-create_msk: + new_msk = mptcp_sk_clone(listener->conn, &mp_opt, req); + if (!new_msk) + fallback = true; +@@ -985,22 +984,11 @@ static bool subflow_check_data_avail(struct sock *ssk) + u64 old_ack; + + status = get_mapping_status(ssk, msk); +- pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status); +- if (status == MAPPING_INVALID) { +- ssk->sk_err = EBADMSG; +- goto fatal; +- } +- if (status == MAPPING_DUMMY) { +- __mptcp_do_fallback(msk); +- skb = skb_peek(&ssk->sk_receive_queue); +- subflow->map_valid = 1; +- subflow->map_seq = READ_ONCE(msk->ack_seq); +- subflow->map_data_len = skb->len; +- subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - +- subflow->ssn_offset; +- subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL; +- return true; +- } ++ if (unlikely(status == MAPPING_INVALID)) ++ goto fallback; ++ ++ if (unlikely(status == MAPPING_DUMMY)) ++ goto fallback; + + if (status != MAPPING_OK) + goto no_data; +@@ -1013,10 +1001,8 @@ static bool subflow_check_data_avail(struct sock *ssk) + * MP_CAPABLE-based mapping + */ + if (unlikely(!READ_ONCE(msk->can_ack))) { +- if (!subflow->mpc_map) { +- ssk->sk_err = EBADMSG; +- goto fatal; +- } ++ if (!subflow->mpc_map) ++ goto fallback; + WRITE_ONCE(msk->remote_key, subflow->remote_key); + WRITE_ONCE(msk->ack_seq, subflow->map_seq); + WRITE_ONCE(msk->can_ack, true); +@@ -1044,15 +1030,29 @@ static bool subflow_check_data_avail(struct sock *ssk) + no_data: + subflow_sched_work_if_closed(msk, ssk); + return false; +-fatal: +- /* fatal protocol error, close the socket */ +- /* This barrier is coupled with smp_rmb() in tcp_poll() */ +- smp_wmb(); +- ssk->sk_error_report(ssk); +- tcp_set_state(ssk, TCP_CLOSE); +- tcp_send_active_reset(ssk, GFP_ATOMIC); +- subflow->data_avail = 0; +- return false; ++ ++fallback: ++ /* RFC 8684 section 3.7. */ ++ if (subflow->mp_join || subflow->fully_established) { ++ /* fatal protocol error, close the socket. ++ * subflow_error_report() will introduce the appropriate barriers ++ */ ++ ssk->sk_err = EBADMSG; ++ ssk->sk_error_report(ssk); ++ tcp_set_state(ssk, TCP_CLOSE); ++ tcp_send_active_reset(ssk, GFP_ATOMIC); ++ subflow->data_avail = 0; ++ return false; ++ } ++ ++ __mptcp_do_fallback(msk); ++ skb = skb_peek(&ssk->sk_receive_queue); ++ subflow->map_valid = 1; ++ subflow->map_seq = READ_ONCE(msk->ack_seq); ++ subflow->map_data_len = skb->len; ++ subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset; ++ subflow->data_avail = MPTCP_SUBFLOW_DATA_AVAIL; ++ return true; + } + + bool mptcp_subflow_data_available(struct sock *sk) +diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c +index d45dbcba8b49c..c25097092a060 100644 +--- a/net/netfilter/ipvs/ip_vs_ctl.c ++++ b/net/netfilter/ipvs/ip_vs_ctl.c +@@ -1367,7 +1367,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, + ip_vs_addr_copy(svc->af, &svc->addr, &u->addr); + svc->port = u->port; + svc->fwmark = u->fwmark; +- svc->flags = u->flags; ++ svc->flags = u->flags & ~IP_VS_SVC_F_HASHED; + svc->timeout = u->timeout * HZ; + svc->netmask = u->netmask; + svc->ipvs = ipvs; +diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c +index 47e9319d2cf31..71892822bbf5d 100644 +--- a/net/netfilter/nf_conntrack_proto.c ++++ b/net/netfilter/nf_conntrack_proto.c +@@ -660,7 +660,7 @@ int nf_conntrack_proto_init(void) + + #if IS_ENABLED(CONFIG_IPV6) + cleanup_sockopt: +- nf_unregister_sockopt(&so_getorigdst6); ++ nf_unregister_sockopt(&so_getorigdst); + #endif + return ret; + } +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index 878ed49d0c569..31016c144c48b 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -3288,8 +3288,10 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, + if (n == NFT_RULE_MAXEXPRS) + goto err1; + err = nf_tables_expr_parse(&ctx, tmp, &info[n]); +- if (err < 0) ++ if (err < 0) { ++ NL_SET_BAD_ATTR(extack, tmp); + goto err1; ++ } + size += info[n].ops->size; + n++; + } +diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c +index 0f94fce1d3ed3..04a12a264cf74 100644 +--- a/net/netfilter/nfnetlink_cthelper.c ++++ b/net/netfilter/nfnetlink_cthelper.c +@@ -380,10 +380,14 @@ static int + nfnl_cthelper_update(const struct nlattr * const tb[], + struct nf_conntrack_helper *helper) + { ++ u32 size; + int ret; + +- if (tb[NFCTH_PRIV_DATA_LEN]) +- return -EBUSY; ++ if (tb[NFCTH_PRIV_DATA_LEN]) { ++ size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN])); ++ if (size != helper->data_len) ++ return -EBUSY; ++ } + + if (tb[NFCTH_POLICY]) { + ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]); +diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c +index 882fe8648653d..6d2b382f5e075 100644 +--- a/net/netfilter/nft_ct.c ++++ b/net/netfilter/nft_ct.c +@@ -1216,7 +1216,7 @@ static void nft_ct_expect_obj_eval(struct nft_object *obj, + struct nf_conn *ct; + + ct = nf_ct_get(pkt->skb, &ctinfo); +- if (!ct || ctinfo == IP_CT_UNTRACKED) { ++ if (!ct || nf_ct_is_confirmed(ct) || nf_ct_is_template(ct)) { + regs->verdict.code = NFT_BREAK; + return; + } +diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c +index 53dbe733f9981..6cfd30fc07985 100644 +--- a/net/nfc/llcp_sock.c ++++ b/net/nfc/llcp_sock.c +@@ -110,6 +110,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) + if (!llcp_sock->service_name) { + nfc_llcp_local_put(llcp_sock->local); + llcp_sock->local = NULL; ++ llcp_sock->dev = NULL; + ret = -ENOMEM; + goto put_dev; + } +@@ -119,6 +120,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) + llcp_sock->local = NULL; + kfree(llcp_sock->service_name); + llcp_sock->service_name = NULL; ++ llcp_sock->dev = NULL; + ret = -EADDRINUSE; + goto put_dev; + } +diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c +index 48fdf7293deaa..ba7f57cb41c30 100644 +--- a/net/sched/act_ct.c ++++ b/net/sched/act_ct.c +@@ -984,7 +984,7 @@ static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, + */ + cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force); + if (!cached) { +- if (!commit && tcf_ct_flow_table_lookup(p, skb, family)) { ++ if (tcf_ct_flow_table_lookup(p, skb, family)) { + skip_add = true; + goto do_nat; + } +@@ -1024,10 +1024,11 @@ do_nat: + * even if the connection is already confirmed. + */ + nf_conntrack_confirm(skb); +- } else if (!skip_add) { +- tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo); + } + ++ if (!skip_add) ++ tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo); ++ + out_push: + skb_push_rcsum(skb, nh_ofs); + +@@ -1204,9 +1205,6 @@ static int tcf_ct_fill_params(struct net *net, + sizeof(p->zone)); + } + +- if (p->zone == NF_CT_DEFAULT_ZONE_ID) +- return 0; +- + nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0); + tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL); + if (!tmpl) { +diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c +index 081c11d5717c4..8827987ba9034 100644 +--- a/net/sched/sch_htb.c ++++ b/net/sched/sch_htb.c +@@ -1488,7 +1488,8 @@ static void htb_parent_to_leaf_offload(struct Qdisc *sch, + struct Qdisc *old_q; + + /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */ +- qdisc_refcount_inc(new_q); ++ if (new_q) ++ qdisc_refcount_inc(new_q); + old_q = htb_graft_helper(dev_queue, new_q); + WARN_ON(!(old_q->flags & TCQ_F_BUILTIN)); + } +@@ -1675,10 +1676,9 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg, + cl->parent->common.classid, + NULL); + if (q->offload) { +- if (new_q) { ++ if (new_q) + htb_set_lockdep_class_child(new_q); +- htb_parent_to_leaf_offload(sch, dev_queue, new_q); +- } ++ htb_parent_to_leaf_offload(sch, dev_queue, new_q); + } + } + +diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c +index a4389ef08a980..0c8882052ba08 100644 +--- a/net/tipc/bearer.c ++++ b/net/tipc/bearer.c +@@ -243,7 +243,8 @@ void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest) + */ + static int tipc_enable_bearer(struct net *net, const char *name, + u32 disc_domain, u32 prio, +- struct nlattr *attr[]) ++ struct nlattr *attr[], ++ struct netlink_ext_ack *extack) + { + struct tipc_net *tn = tipc_net(net); + struct tipc_bearer_names b_names; +@@ -254,20 +255,24 @@ static int tipc_enable_bearer(struct net *net, const char *name, + int bearer_id = 0; + int res = -EINVAL; + char *errstr = ""; ++ u32 i; + + if (!bearer_name_validate(name, &b_names)) { + errstr = "illegal name"; ++ NL_SET_ERR_MSG(extack, "Illegal name"); + goto rejected; + } + + if (prio > TIPC_MAX_LINK_PRI && prio != TIPC_MEDIA_LINK_PRI) { + errstr = "illegal priority"; ++ NL_SET_ERR_MSG(extack, "Illegal priority"); + goto rejected; + } + + m = tipc_media_find(b_names.media_name); + if (!m) { + errstr = "media not registered"; ++ NL_SET_ERR_MSG(extack, "Media not registered"); + goto rejected; + } + +@@ -275,33 +280,43 @@ static int tipc_enable_bearer(struct net *net, const char *name, + prio = m->priority; + + /* Check new bearer vs existing ones and find free bearer id if any */ +- while (bearer_id < MAX_BEARERS) { +- b = rtnl_dereference(tn->bearer_list[bearer_id]); +- if (!b) +- break; ++ bearer_id = MAX_BEARERS; ++ i = MAX_BEARERS; ++ while (i-- != 0) { ++ b = rtnl_dereference(tn->bearer_list[i]); ++ if (!b) { ++ bearer_id = i; ++ continue; ++ } + if (!strcmp(name, b->name)) { + errstr = "already enabled"; ++ NL_SET_ERR_MSG(extack, "Already enabled"); + goto rejected; + } +- bearer_id++; +- if (b->priority != prio) +- continue; +- if (++with_this_prio <= 2) +- continue; +- pr_warn("Bearer <%s>: already 2 bearers with priority %u\n", +- name, prio); +- if (prio == TIPC_MIN_LINK_PRI) { +- errstr = "cannot adjust to lower"; +- goto rejected; ++ ++ if (b->priority == prio && ++ (++with_this_prio > 2)) { ++ pr_warn("Bearer <%s>: already 2 bearers with priority %u\n", ++ name, prio); ++ ++ if (prio == TIPC_MIN_LINK_PRI) { ++ errstr = "cannot adjust to lower"; ++ NL_SET_ERR_MSG(extack, "Cannot adjust to lower"); ++ goto rejected; ++ } ++ ++ pr_warn("Bearer <%s>: trying with adjusted priority\n", ++ name); ++ prio--; ++ bearer_id = MAX_BEARERS; ++ i = MAX_BEARERS; ++ with_this_prio = 1; + } +- pr_warn("Bearer <%s>: trying with adjusted priority\n", name); +- prio--; +- bearer_id = 0; +- with_this_prio = 1; + } + + if (bearer_id >= MAX_BEARERS) { + errstr = "max 3 bearers permitted"; ++ NL_SET_ERR_MSG(extack, "Max 3 bearers permitted"); + goto rejected; + } + +@@ -315,6 +330,7 @@ static int tipc_enable_bearer(struct net *net, const char *name, + if (res) { + kfree(b); + errstr = "failed to enable media"; ++ NL_SET_ERR_MSG(extack, "Failed to enable media"); + goto rejected; + } + +@@ -331,6 +347,7 @@ static int tipc_enable_bearer(struct net *net, const char *name, + if (res) { + bearer_disable(net, b); + errstr = "failed to create discoverer"; ++ NL_SET_ERR_MSG(extack, "Failed to create discoverer"); + goto rejected; + } + +@@ -909,6 +926,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info) + bearer = tipc_bearer_find(net, name); + if (!bearer) { + err = -EINVAL; ++ NL_SET_ERR_MSG(info->extack, "Bearer not found"); + goto err_out; + } + +@@ -948,8 +966,10 @@ int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) + name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); + + bearer = tipc_bearer_find(net, name); +- if (!bearer) ++ if (!bearer) { ++ NL_SET_ERR_MSG(info->extack, "Bearer not found"); + return -EINVAL; ++ } + + bearer_disable(net, bearer); + +@@ -1007,7 +1027,8 @@ int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) + prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); + } + +- return tipc_enable_bearer(net, bearer, domain, prio, attrs); ++ return tipc_enable_bearer(net, bearer, domain, prio, attrs, ++ info->extack); + } + + int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) +@@ -1046,6 +1067,7 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info) + b = tipc_bearer_find(net, name); + if (!b) { + rtnl_unlock(); ++ NL_SET_ERR_MSG(info->extack, "Bearer not found"); + return -EINVAL; + } + +@@ -1086,8 +1108,10 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) + name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); + + b = tipc_bearer_find(net, name); +- if (!b) ++ if (!b) { ++ NL_SET_ERR_MSG(info->extack, "Bearer not found"); + return -EINVAL; ++ } + + if (attrs[TIPC_NLA_BEARER_PROP]) { + struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; +@@ -1106,12 +1130,18 @@ int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) + if (props[TIPC_NLA_PROP_WIN]) + b->max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); + if (props[TIPC_NLA_PROP_MTU]) { +- if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) ++ if (b->media->type_id != TIPC_MEDIA_TYPE_UDP) { ++ NL_SET_ERR_MSG(info->extack, ++ "MTU property is unsupported"); + return -EINVAL; ++ } + #ifdef CONFIG_TIPC_MEDIA_UDP + if (tipc_udp_mtu_bad(nla_get_u32 +- (props[TIPC_NLA_PROP_MTU]))) ++ (props[TIPC_NLA_PROP_MTU]))) { ++ NL_SET_ERR_MSG(info->extack, ++ "MTU value is out-of-range"); + return -EINVAL; ++ } + b->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]); + tipc_node_apply_property(net, b, TIPC_NLA_PROP_MTU); + #endif +@@ -1239,6 +1269,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info) + rtnl_lock(); + media = tipc_media_find(name); + if (!media) { ++ NL_SET_ERR_MSG(info->extack, "Media not found"); + err = -EINVAL; + goto err_out; + } +@@ -1275,9 +1306,10 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) + name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]); + + m = tipc_media_find(name); +- if (!m) ++ if (!m) { ++ NL_SET_ERR_MSG(info->extack, "Media not found"); + return -EINVAL; +- ++ } + if (attrs[TIPC_NLA_MEDIA_PROP]) { + struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; + +@@ -1293,12 +1325,18 @@ int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) + if (props[TIPC_NLA_PROP_WIN]) + m->max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]); + if (props[TIPC_NLA_PROP_MTU]) { +- if (m->type_id != TIPC_MEDIA_TYPE_UDP) ++ if (m->type_id != TIPC_MEDIA_TYPE_UDP) { ++ NL_SET_ERR_MSG(info->extack, ++ "MTU property is unsupported"); + return -EINVAL; ++ } + #ifdef CONFIG_TIPC_MEDIA_UDP + if (tipc_udp_mtu_bad(nla_get_u32 +- (props[TIPC_NLA_PROP_MTU]))) ++ (props[TIPC_NLA_PROP_MTU]))) { ++ NL_SET_ERR_MSG(info->extack, ++ "MTU value is out-of-range"); + return -EINVAL; ++ } + m->mtu = nla_get_u32(props[TIPC_NLA_PROP_MTU]); + #endif + } +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c +index d9cd229aa111b..9b1ea17f3b1dc 100644 +--- a/net/tls/tls_device.c ++++ b/net/tls/tls_device.c +@@ -50,6 +50,7 @@ static void tls_device_gc_task(struct work_struct *work); + static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task); + static LIST_HEAD(tls_device_gc_list); + static LIST_HEAD(tls_device_list); ++static LIST_HEAD(tls_device_down_list); + static DEFINE_SPINLOCK(tls_device_lock); + + static void tls_device_free_ctx(struct tls_context *ctx) +@@ -680,15 +681,13 @@ static void tls_device_resync_rx(struct tls_context *tls_ctx, + struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx); + struct net_device *netdev; + +- if (WARN_ON(test_and_set_bit(TLS_RX_SYNC_RUNNING, &tls_ctx->flags))) +- return; +- + trace_tls_device_rx_resync_send(sk, seq, rcd_sn, rx_ctx->resync_type); ++ rcu_read_lock(); + netdev = READ_ONCE(tls_ctx->netdev); + if (netdev) + netdev->tlsdev_ops->tls_dev_resync(netdev, sk, seq, rcd_sn, + TLS_OFFLOAD_CTX_DIR_RX); +- clear_bit_unlock(TLS_RX_SYNC_RUNNING, &tls_ctx->flags); ++ rcu_read_unlock(); + TLS_INC_STATS(sock_net(sk), LINUX_MIB_TLSRXDEVICERESYNC); + } + +@@ -761,6 +760,8 @@ void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) + + if (tls_ctx->rx_conf != TLS_HW) + return; ++ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) ++ return; + + prot = &tls_ctx->prot_info; + rx_ctx = tls_offload_ctx_rx(tls_ctx); +@@ -963,6 +964,17 @@ int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx, + + ctx->sw.decrypted |= is_decrypted; + ++ if (unlikely(test_bit(TLS_RX_DEV_DEGRADED, &tls_ctx->flags))) { ++ if (likely(is_encrypted || is_decrypted)) ++ return 0; ++ ++ /* After tls_device_down disables the offload, the next SKB will ++ * likely have initial fragments decrypted, and final ones not ++ * decrypted. We need to reencrypt that single SKB. ++ */ ++ return tls_device_reencrypt(sk, skb); ++ } ++ + /* Return immediately if the record is either entirely plaintext or + * entirely ciphertext. Otherwise handle reencrypt partially decrypted + * record. +@@ -1292,6 +1304,26 @@ static int tls_device_down(struct net_device *netdev) + spin_unlock_irqrestore(&tls_device_lock, flags); + + list_for_each_entry_safe(ctx, tmp, &list, list) { ++ /* Stop offloaded TX and switch to the fallback. ++ * tls_is_sk_tx_device_offloaded will return false. ++ */ ++ WRITE_ONCE(ctx->sk->sk_validate_xmit_skb, tls_validate_xmit_skb_sw); ++ ++ /* Stop the RX and TX resync. ++ * tls_dev_resync must not be called after tls_dev_del. ++ */ ++ WRITE_ONCE(ctx->netdev, NULL); ++ ++ /* Start skipping the RX resync logic completely. */ ++ set_bit(TLS_RX_DEV_DEGRADED, &ctx->flags); ++ ++ /* Sync with inflight packets. After this point: ++ * TX: no non-encrypted packets will be passed to the driver. ++ * RX: resync requests from the driver will be ignored. ++ */ ++ synchronize_net(); ++ ++ /* Release the offload context on the driver side. */ + if (ctx->tx_conf == TLS_HW) + netdev->tlsdev_ops->tls_dev_del(netdev, ctx, + TLS_OFFLOAD_CTX_DIR_TX); +@@ -1299,15 +1331,21 @@ static int tls_device_down(struct net_device *netdev) + !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags)) + netdev->tlsdev_ops->tls_dev_del(netdev, ctx, + TLS_OFFLOAD_CTX_DIR_RX); +- WRITE_ONCE(ctx->netdev, NULL); +- smp_mb__before_atomic(); /* pairs with test_and_set_bit() */ +- while (test_bit(TLS_RX_SYNC_RUNNING, &ctx->flags)) +- usleep_range(10, 200); ++ + dev_put(netdev); +- list_del_init(&ctx->list); + +- if (refcount_dec_and_test(&ctx->refcount)) +- tls_device_free_ctx(ctx); ++ /* Move the context to a separate list for two reasons: ++ * 1. When the context is deallocated, list_del is called. ++ * 2. It's no longer an offloaded context, so we don't want to ++ * run offload-specific code on this context. ++ */ ++ spin_lock_irqsave(&tls_device_lock, flags); ++ list_move_tail(&ctx->list, &tls_device_down_list); ++ spin_unlock_irqrestore(&tls_device_lock, flags); ++ ++ /* Device contexts for RX and TX will be freed in on sk_destruct ++ * by tls_device_free_ctx. rx_conf and tx_conf stay in TLS_HW. ++ */ + } + + up_write(&device_offload_lock); +diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c +index cacf040872c74..e40bedd112b68 100644 +--- a/net/tls/tls_device_fallback.c ++++ b/net/tls/tls_device_fallback.c +@@ -431,6 +431,13 @@ struct sk_buff *tls_validate_xmit_skb(struct sock *sk, + } + EXPORT_SYMBOL_GPL(tls_validate_xmit_skb); + ++struct sk_buff *tls_validate_xmit_skb_sw(struct sock *sk, ++ struct net_device *dev, ++ struct sk_buff *skb) ++{ ++ return tls_sw_fallback(sk, skb); ++} ++ + struct sk_buff *tls_encrypt_skb(struct sk_buff *skb) + { + return tls_sw_fallback(skb->sk, skb); +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c +index 47b7c5334c346..fde56ff491637 100644 +--- a/net/tls/tls_main.c ++++ b/net/tls/tls_main.c +@@ -636,6 +636,7 @@ struct tls_context *tls_ctx_create(struct sock *sk) + mutex_init(&ctx->tx_lock); + rcu_assign_pointer(icsk->icsk_ulp_data, ctx); + ctx->sk_proto = READ_ONCE(sk->sk_prot); ++ ctx->sk = sk; + return ctx; + } + +diff --git a/samples/vfio-mdev/mdpy-fb.c b/samples/vfio-mdev/mdpy-fb.c +index 21dbf63d6e415..9ec93d90e8a5a 100644 +--- a/samples/vfio-mdev/mdpy-fb.c ++++ b/samples/vfio-mdev/mdpy-fb.c +@@ -117,22 +117,27 @@ static int mdpy_fb_probe(struct pci_dev *pdev, + if (format != DRM_FORMAT_XRGB8888) { + pci_err(pdev, "format mismatch (0x%x != 0x%x)\n", + format, DRM_FORMAT_XRGB8888); +- return -EINVAL; ++ ret = -EINVAL; ++ goto err_release_regions; + } + if (width < 100 || width > 10000) { + pci_err(pdev, "width (%d) out of range\n", width); +- return -EINVAL; ++ ret = -EINVAL; ++ goto err_release_regions; + } + if (height < 100 || height > 10000) { + pci_err(pdev, "height (%d) out of range\n", height); +- return -EINVAL; ++ ret = -EINVAL; ++ goto err_release_regions; + } + pci_info(pdev, "mdpy found: %dx%d framebuffer\n", + width, height); + + info = framebuffer_alloc(sizeof(struct mdpy_fb_par), &pdev->dev); +- if (!info) ++ if (!info) { ++ ret = -ENOMEM; + goto err_release_regions; ++ } + pci_set_drvdata(pdev, info); + par = info->par; + +diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal +index 735e11e9041b9..19468831fcc73 100644 +--- a/scripts/Makefile.modfinal ++++ b/scripts/Makefile.modfinal +@@ -59,7 +59,7 @@ quiet_cmd_ld_ko_o = LD [M] $@ + quiet_cmd_btf_ko = BTF [M] $@ + cmd_btf_ko = \ + if [ -f vmlinux ]; then \ +- LLVM_OBJCOPY=$(OBJCOPY) $(PAHOLE) -J --btf_base vmlinux $@; \ ++ LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J --btf_base vmlinux $@; \ + else \ + printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \ + fi; +diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh +index 3b261b0f74f0a..0a16928e495b9 100755 +--- a/scripts/link-vmlinux.sh ++++ b/scripts/link-vmlinux.sh +@@ -228,7 +228,7 @@ gen_btf() + vmlinux_link ${1} + + info "BTF" ${2} +- LLVM_OBJCOPY=${OBJCOPY} ${PAHOLE} -J ${1} ++ LLVM_OBJCOPY="${OBJCOPY}" ${PAHOLE} -J ${1} + + # Create ${2} which contains just .BTF section but no symbols. Add + # SHF_ALLOC because .BTF will be part of the vmlinux image. --strip-all +diff --git a/sound/core/timer.c b/sound/core/timer.c +index 6898b1ac0d7f4..92b7008fcdb86 100644 +--- a/sound/core/timer.c ++++ b/sound/core/timer.c +@@ -520,9 +520,10 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event) + return; + if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) + return; ++ event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */ + list_for_each_entry(ts, &ti->slave_active_head, active_list) + if (ts->ccallback) +- ts->ccallback(ts, event + 100, &tstamp, resolution); ++ ts->ccallback(ts, event, &tstamp, resolution); + } + + /* start/continue a master timer */ +diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c +index 2026f1ccaf5a7..a220f7ac81263 100644 +--- a/sound/pci/hda/hda_codec.c ++++ b/sound/pci/hda/hda_codec.c +@@ -2973,6 +2973,7 @@ static int hda_codec_runtime_resume(struct device *dev) + #ifdef CONFIG_PM_SLEEP + static int hda_codec_pm_prepare(struct device *dev) + { ++ dev->power.power_state = PMSG_SUSPEND; + return pm_runtime_suspended(dev); + } + +@@ -2980,6 +2981,10 @@ static void hda_codec_pm_complete(struct device *dev) + { + struct hda_codec *codec = dev_to_hda_codec(dev); + ++ /* If no other pm-functions are called between prepare() and complete() */ ++ if (dev->power.power_state.event == PM_EVENT_SUSPEND) ++ dev->power.power_state = PMSG_RESUME; ++ + if (pm_runtime_suspended(dev) && (codec->jackpoll_interval || + hda_codec_need_resume(codec) || codec->forced_resume)) + pm_request_resume(dev); +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index d8424d226714f..cc13a68197f3c 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -8289,6 +8289,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), ++ SND_PCI_QUIRK(0x103c, 0x841c, "HP Pavilion 15-CK0xx", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), + SND_PCI_QUIRK(0x103c, 0x84da, "HP OMEN dc0019-ur", ALC295_FIXUP_HP_OMEN), + SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3), +diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c +index 7b2d471a6419d..4343356f3cf9a 100644 +--- a/tools/perf/util/dwarf-aux.c ++++ b/tools/perf/util/dwarf-aux.c +@@ -975,9 +975,13 @@ static int __die_find_variable_cb(Dwarf_Die *die_mem, void *data) + if ((tag == DW_TAG_formal_parameter || + tag == DW_TAG_variable) && + die_compare_name(die_mem, fvp->name) && +- /* Does the DIE have location information or external instance? */ ++ /* ++ * Does the DIE have location information or const value ++ * or external instance? ++ */ + (dwarf_attr(die_mem, DW_AT_external, &attr) || +- dwarf_attr(die_mem, DW_AT_location, &attr))) ++ dwarf_attr(die_mem, DW_AT_location, &attr) || ++ dwarf_attr(die_mem, DW_AT_const_value, &attr))) + return DIE_FIND_CB_END; + if (dwarf_haspc(die_mem, fvp->addr)) + return DIE_FIND_CB_CONTINUE; +diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c +index 1b118c9c86a69..bba61b95a37a8 100644 +--- a/tools/perf/util/probe-finder.c ++++ b/tools/perf/util/probe-finder.c +@@ -190,6 +190,9 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr, + immediate_value_is_supported()) { + Dwarf_Sword snum; + ++ if (!tvar) ++ return 0; ++ + dwarf_formsdata(&attr, &snum); + ret = asprintf(&tvar->value, "\\%ld", (long)snum); + +diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh +index 7ed7cd95e58fe..ebc4ee0fe179f 100755 +--- a/tools/testing/selftests/wireguard/netns.sh ++++ b/tools/testing/selftests/wireguard/netns.sh +@@ -363,6 +363,7 @@ ip1 -6 rule add table main suppress_prefixlength 0 + ip1 -4 route add default dev wg0 table 51820 + ip1 -4 rule add not fwmark 51820 table 51820 + ip1 -4 rule add table main suppress_prefixlength 0 ++n1 bash -c 'printf 0 > /proc/sys/net/ipv4/conf/vethc/rp_filter' + # Flood the pings instead of sending just one, to trigger routing table reference counting bugs. + n1 ping -W 1 -c 100 -f 192.168.99.7 + n1 ping -W 1 -c 100 -f abab::1111 +diff --git a/tools/testing/selftests/wireguard/qemu/kernel.config b/tools/testing/selftests/wireguard/qemu/kernel.config +index 4eecb432a66c1..74db83a0aedd8 100644 +--- a/tools/testing/selftests/wireguard/qemu/kernel.config ++++ b/tools/testing/selftests/wireguard/qemu/kernel.config +@@ -19,7 +19,6 @@ CONFIG_NETFILTER_XTABLES=y + CONFIG_NETFILTER_XT_NAT=y + CONFIG_NETFILTER_XT_MATCH_LENGTH=y + CONFIG_NETFILTER_XT_MARK=y +-CONFIG_NF_CONNTRACK_IPV4=y + CONFIG_NF_NAT_IPV4=y + CONFIG_IP_NF_IPTABLES=y + CONFIG_IP_NF_FILTER=y |