diff options
author | Mike Pagano <mpagano@gentoo.org> | 2024-02-23 07:37:18 -0500 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2024-02-23 07:37:18 -0500 |
commit | c01acf6f609ce49d27c723598d558377d83d6681 (patch) | |
tree | 120904b4be60934095aa86e17fcdee0b3d64bd3e | |
parent | Linux patch 6.1.78 (diff) | |
download | linux-patches-c01acf6f609ce49d27c723598d558377d83d6681.tar.gz linux-patches-c01acf6f609ce49d27c723598d558377d83d6681.tar.bz2 linux-patches-c01acf6f609ce49d27c723598d558377d83d6681.zip |
Linux patch 6.1.79
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1078_linux-6.1.79.patch | 9468 |
2 files changed, 9472 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 7d732a84..4d09930e 100644 --- a/0000_README +++ b/0000_README @@ -355,6 +355,10 @@ Patch: 1077_linux-6.1.78.patch From: https://www.kernel.org Desc: Linux 6.1.78 +Patch: 1078_linux-6.1.79.patch +From: https://www.kernel.org +Desc: Linux 6.1.79 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1078_linux-6.1.79.patch b/1078_linux-6.1.79.patch new file mode 100644 index 00000000..1136d033 --- /dev/null +++ b/1078_linux-6.1.79.patch @@ -0,0 +1,9468 @@ +diff --git a/Documentation/ABI/testing/sysfs-class-net-statistics b/Documentation/ABI/testing/sysfs-class-net-statistics +index 55db27815361b..53e508c6936a5 100644 +--- a/Documentation/ABI/testing/sysfs-class-net-statistics ++++ b/Documentation/ABI/testing/sysfs-class-net-statistics +@@ -1,4 +1,4 @@ +-What: /sys/class/<iface>/statistics/collisions ++What: /sys/class/net/<iface>/statistics/collisions + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -6,7 +6,7 @@ Description: + Indicates the number of collisions seen by this network device. + This value might not be relevant with all MAC layers. + +-What: /sys/class/<iface>/statistics/multicast ++What: /sys/class/net/<iface>/statistics/multicast + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -14,7 +14,7 @@ Description: + Indicates the number of multicast packets received by this + network device. + +-What: /sys/class/<iface>/statistics/rx_bytes ++What: /sys/class/net/<iface>/statistics/rx_bytes + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -23,7 +23,7 @@ Description: + See the network driver for the exact meaning of when this + value is incremented. + +-What: /sys/class/<iface>/statistics/rx_compressed ++What: /sys/class/net/<iface>/statistics/rx_compressed + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -32,7 +32,7 @@ Description: + network device. This value might only be relevant for interfaces + that support packet compression (e.g: PPP). + +-What: /sys/class/<iface>/statistics/rx_crc_errors ++What: /sys/class/net/<iface>/statistics/rx_crc_errors + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -41,7 +41,7 @@ Description: + by this network device. Note that the specific meaning might + depend on the MAC layer used by the interface. + +-What: /sys/class/<iface>/statistics/rx_dropped ++What: /sys/class/net/<iface>/statistics/rx_dropped + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -51,7 +51,7 @@ Description: + packet processing. See the network driver for the exact + meaning of this value. + +-What: /sys/class/<iface>/statistics/rx_errors ++What: /sys/class/net/<iface>/statistics/rx_errors + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -59,7 +59,7 @@ Description: + Indicates the number of receive errors on this network device. + See the network driver for the exact meaning of this value. + +-What: /sys/class/<iface>/statistics/rx_fifo_errors ++What: /sys/class/net/<iface>/statistics/rx_fifo_errors + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -68,7 +68,7 @@ Description: + network device. See the network driver for the exact + meaning of this value. + +-What: /sys/class/<iface>/statistics/rx_frame_errors ++What: /sys/class/net/<iface>/statistics/rx_frame_errors + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -78,7 +78,7 @@ Description: + on the MAC layer protocol used. See the network driver for + the exact meaning of this value. + +-What: /sys/class/<iface>/statistics/rx_length_errors ++What: /sys/class/net/<iface>/statistics/rx_length_errors + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -87,7 +87,7 @@ Description: + error, oversized or undersized. See the network driver for the + exact meaning of this value. + +-What: /sys/class/<iface>/statistics/rx_missed_errors ++What: /sys/class/net/<iface>/statistics/rx_missed_errors + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -96,7 +96,7 @@ Description: + due to lack of capacity in the receive side. See the network + driver for the exact meaning of this value. + +-What: /sys/class/<iface>/statistics/rx_nohandler ++What: /sys/class/net/<iface>/statistics/rx_nohandler + Date: February 2016 + KernelVersion: 4.6 + Contact: netdev@vger.kernel.org +@@ -104,7 +104,7 @@ Description: + Indicates the number of received packets that were dropped on + an inactive device by the network core. + +-What: /sys/class/<iface>/statistics/rx_over_errors ++What: /sys/class/net/<iface>/statistics/rx_over_errors + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -114,7 +114,7 @@ Description: + (e.g: larger than MTU). See the network driver for the exact + meaning of this value. + +-What: /sys/class/<iface>/statistics/rx_packets ++What: /sys/class/net/<iface>/statistics/rx_packets + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -122,7 +122,7 @@ Description: + Indicates the total number of good packets received by this + network device. + +-What: /sys/class/<iface>/statistics/tx_aborted_errors ++What: /sys/class/net/<iface>/statistics/tx_aborted_errors + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -132,7 +132,7 @@ Description: + a medium collision). See the network driver for the exact + meaning of this value. + +-What: /sys/class/<iface>/statistics/tx_bytes ++What: /sys/class/net/<iface>/statistics/tx_bytes + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -143,7 +143,7 @@ Description: + transmitted packets or all packets that have been queued for + transmission. + +-What: /sys/class/<iface>/statistics/tx_carrier_errors ++What: /sys/class/net/<iface>/statistics/tx_carrier_errors + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -152,7 +152,7 @@ Description: + because of carrier errors (e.g: physical link down). See the + network driver for the exact meaning of this value. + +-What: /sys/class/<iface>/statistics/tx_compressed ++What: /sys/class/net/<iface>/statistics/tx_compressed + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -161,7 +161,7 @@ Description: + this might only be relevant for devices that support + compression (e.g: PPP). + +-What: /sys/class/<iface>/statistics/tx_dropped ++What: /sys/class/net/<iface>/statistics/tx_dropped + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -170,7 +170,7 @@ Description: + See the driver for the exact reasons as to why the packets were + dropped. + +-What: /sys/class/<iface>/statistics/tx_errors ++What: /sys/class/net/<iface>/statistics/tx_errors + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -179,7 +179,7 @@ Description: + a network device. See the driver for the exact reasons as to + why the packets were dropped. + +-What: /sys/class/<iface>/statistics/tx_fifo_errors ++What: /sys/class/net/<iface>/statistics/tx_fifo_errors + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -188,7 +188,7 @@ Description: + FIFO error. See the driver for the exact reasons as to why the + packets were dropped. + +-What: /sys/class/<iface>/statistics/tx_heartbeat_errors ++What: /sys/class/net/<iface>/statistics/tx_heartbeat_errors + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -197,7 +197,7 @@ Description: + reported as heartbeat errors. See the driver for the exact + reasons as to why the packets were dropped. + +-What: /sys/class/<iface>/statistics/tx_packets ++What: /sys/class/net/<iface>/statistics/tx_packets + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +@@ -206,7 +206,7 @@ Description: + device. See the driver for whether this reports the number of all + attempted or successful transmissions. + +-What: /sys/class/<iface>/statistics/tx_window_errors ++What: /sys/class/net/<iface>/statistics/tx_window_errors + Date: April 2005 + KernelVersion: 2.6.12 + Contact: netdev@vger.kernel.org +diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst +index d9fce65b2f047..27135b9c07acb 100644 +--- a/Documentation/arm64/silicon-errata.rst ++++ b/Documentation/arm64/silicon-errata.rst +@@ -221,3 +221,10 @@ stable kernels. + +----------------+-----------------+-----------------+-----------------------------+ + | Fujitsu | A64FX | E#010001 | FUJITSU_ERRATUM_010001 | + +----------------+-----------------+-----------------+-----------------------------+ +++----------------+-----------------+-----------------+-----------------------------+ ++| Microsoft | Azure Cobalt 100| #2139208 | ARM64_ERRATUM_2139208 | +++----------------+-----------------+-----------------+-----------------------------+ ++| Microsoft | Azure Cobalt 100| #2067961 | ARM64_ERRATUM_2067961 | +++----------------+-----------------+-----------------+-----------------------------+ ++| Microsoft | Azure Cobalt 100| #2253138 | ARM64_ERRATUM_2253138 | +++----------------+-----------------+-----------------+-----------------------------+ +diff --git a/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt b/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt +index 9bf9bbac16e25..cdc303caf5f45 100644 +--- a/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt ++++ b/Documentation/devicetree/bindings/net/wireless/marvell-8xxx.txt +@@ -1,4 +1,4 @@ +-Marvell 8787/8897/8997 (sd8787/sd8897/sd8997/pcie8997) SDIO/PCIE devices ++Marvell 8787/8897/8978/8997 (sd8787/sd8897/sd8978/sd8997/pcie8997) SDIO/PCIE devices + ------ + + This node provides properties for controlling the Marvell SDIO/PCIE wireless device. +@@ -10,7 +10,9 @@ Required properties: + - compatible : should be one of the following: + * "marvell,sd8787" + * "marvell,sd8897" ++ * "marvell,sd8978" + * "marvell,sd8997" ++ * "nxp,iw416" + * "pci11ab,2b42" + * "pci1b4b,2b42" + +diff --git a/Makefile b/Makefile +index e93554269e474..d6bc9f597e8b8 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 6 + PATCHLEVEL = 1 +-SUBLEVEL = 78 ++SUBLEVEL = 79 + EXTRAVERSION = + NAME = Curry Ramen + +@@ -459,8 +459,7 @@ HOSTRUSTC = rustc + HOSTPKG_CONFIG = pkg-config + + KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \ +- -O2 -fomit-frame-pointer -std=gnu11 \ +- -Wdeclaration-after-statement ++ -O2 -fomit-frame-pointer -std=gnu11 + KBUILD_USERCFLAGS := $(KBUILD_USERHOSTCFLAGS) $(USERCFLAGS) + KBUILD_USERLDFLAGS := $(USERLDFLAGS) + +@@ -1018,9 +1017,6 @@ endif + # arch Makefile may override CC so keep this after arch Makefile is included + NOSTDINC_FLAGS += -nostdinc + +-# warn about C99 declaration after statement +-KBUILD_CFLAGS += -Wdeclaration-after-statement +- + # Variable Length Arrays (VLAs) should not be used anywhere in the kernel + KBUILD_CFLAGS += -Wvla + +diff --git a/arch/Kconfig b/arch/Kconfig +index 14273a6203dfc..f99fd9a4ca778 100644 +--- a/arch/Kconfig ++++ b/arch/Kconfig +@@ -642,6 +642,7 @@ config SHADOW_CALL_STACK + bool "Shadow Call Stack" + depends on ARCH_SUPPORTS_SHADOW_CALL_STACK + depends on DYNAMIC_FTRACE_WITH_ARGS || DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER ++ depends on MMU + help + This option enables the compiler's Shadow Call Stack, which + uses a shadow stack to protect function return addresses from +diff --git a/arch/arc/include/asm/jump_label.h b/arch/arc/include/asm/jump_label.h +index 9d96180797396..a339223d9e052 100644 +--- a/arch/arc/include/asm/jump_label.h ++++ b/arch/arc/include/asm/jump_label.h +@@ -31,7 +31,7 @@ + static __always_inline bool arch_static_branch(struct static_key *key, + bool branch) + { +- asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n" ++ asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n" + "1: \n" + "nop \n" + ".pushsection __jump_table, \"aw\" \n" +@@ -47,7 +47,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, + static __always_inline bool arch_static_branch_jump(struct static_key *key, + bool branch) + { +- asm_volatile_goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n" ++ asm goto(".balign "__stringify(JUMP_LABEL_NOP_SIZE)" \n" + "1: \n" + "b %l[l_yes] \n" + ".pushsection __jump_table, \"aw\" \n" +diff --git a/arch/arm/boot/dts/imx6q-apalis-ixora-v1.2.dts b/arch/arm/boot/dts/imx6q-apalis-ixora-v1.2.dts +index f9f7d99bd4db8..76f3e07bc8826 100644 +--- a/arch/arm/boot/dts/imx6q-apalis-ixora-v1.2.dts ++++ b/arch/arm/boot/dts/imx6q-apalis-ixora-v1.2.dts +@@ -76,6 +76,7 @@ reg_can1_supply: regulator-can1-supply { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_enable_can1_power>; + regulator-name = "can1_supply"; ++ startup-delay-us = <1000>; + }; + + reg_can2_supply: regulator-can2-supply { +@@ -85,6 +86,7 @@ reg_can2_supply: regulator-can2-supply { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_enable_can2_power>; + regulator-name = "can2_supply"; ++ startup-delay-us = <1000>; + }; + }; + +diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h +index e12d7d096fc03..e4eb54f6cd9fe 100644 +--- a/arch/arm/include/asm/jump_label.h ++++ b/arch/arm/include/asm/jump_label.h +@@ -11,7 +11,7 @@ + + static __always_inline bool arch_static_branch(struct static_key *key, bool branch) + { +- asm_volatile_goto("1:\n\t" ++ asm goto("1:\n\t" + WASM(nop) "\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".word 1b, %l[l_yes], %c0\n\t" +@@ -25,7 +25,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran + + static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) + { +- asm_volatile_goto("1:\n\t" ++ asm goto("1:\n\t" + WASM(b) " %l[l_yes]\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".word 1b, %l[l_yes], %c0\n\t" +diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts +index 9d116e1fbe10c..1ac4f8c24e231 100644 +--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dts ++++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dts +@@ -169,10 +169,6 @@ led@6 { + }; + }; + +-&blsp_dma { +- status = "okay"; +-}; +- + &blsp_i2c2 { + /* On Low speed expansion */ + status = "okay"; +diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi +index bafac2cf7e3d6..987cebbda0571 100644 +--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi ++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi +@@ -1522,7 +1522,7 @@ blsp_dma: dma-controller@7884000 { + clock-names = "bam_clk"; + #dma-cells = <1>; + qcom,ee = <0>; +- status = "disabled"; ++ qcom,controlled-remotely; + }; + + blsp1_uart1: serial@78af000 { +diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi +index 4d5905ef0b411..95c515da9f2e0 100644 +--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi ++++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi +@@ -4049,7 +4049,7 @@ usb_1: usb@a6f8800 { + assigned-clock-rates = <19200000>, <150000000>; + + interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>, +- <&intc GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>, ++ <&pdc_intc 6 IRQ_TYPE_LEVEL_HIGH>, + <&pdc_intc 8 IRQ_TYPE_EDGE_BOTH>, + <&pdc_intc 9 IRQ_TYPE_EDGE_BOTH>; + interrupt-names = "hs_phy_irq", "ss_phy_irq", +@@ -4100,7 +4100,7 @@ usb_2: usb@a8f8800 { + assigned-clock-rates = <19200000>, <150000000>; + + interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>, +- <&intc GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>, ++ <&pdc_intc 7 IRQ_TYPE_LEVEL_HIGH>, + <&pdc_intc 10 IRQ_TYPE_EDGE_BOTH>, + <&pdc_intc 11 IRQ_TYPE_EDGE_BOTH>; + interrupt-names = "hs_phy_irq", "ss_phy_irq", +diff --git a/arch/arm64/boot/dts/qcom/sm8150.dtsi b/arch/arm64/boot/dts/qcom/sm8150.dtsi +index 8efd0e227d780..eb1a9369926d2 100644 +--- a/arch/arm64/boot/dts/qcom/sm8150.dtsi ++++ b/arch/arm64/boot/dts/qcom/sm8150.dtsi +@@ -3629,7 +3629,7 @@ usb_1: usb@a6f8800 { + assigned-clock-rates = <19200000>, <200000000>; + + interrupts-extended = <&intc GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>, +- <&intc GIC_SPI 486 IRQ_TYPE_LEVEL_HIGH>, ++ <&pdc 6 IRQ_TYPE_LEVEL_HIGH>, + <&pdc 8 IRQ_TYPE_EDGE_BOTH>, + <&pdc 9 IRQ_TYPE_EDGE_BOTH>; + interrupt-names = "hs_phy_irq", "ss_phy_irq", +@@ -3678,7 +3678,7 @@ usb_2: usb@a8f8800 { + assigned-clock-rates = <19200000>, <200000000>; + + interrupts-extended = <&intc GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>, +- <&intc GIC_SPI 487 IRQ_TYPE_LEVEL_HIGH>, ++ <&pdc 7 IRQ_TYPE_LEVEL_HIGH>, + <&pdc 10 IRQ_TYPE_EDGE_BOTH>, + <&pdc 11 IRQ_TYPE_EDGE_BOTH>; + interrupt-names = "hs_phy_irq", "ss_phy_irq", +diff --git a/arch/arm64/include/asm/alternative-macros.h b/arch/arm64/include/asm/alternative-macros.h +index 3622e9f4fb442..51738c56e96cd 100644 +--- a/arch/arm64/include/asm/alternative-macros.h ++++ b/arch/arm64/include/asm/alternative-macros.h +@@ -229,7 +229,7 @@ alternative_has_feature_likely(unsigned long feature) + compiletime_assert(feature < ARM64_NCAPS, + "feature must be < ARM64_NCAPS"); + +- asm_volatile_goto( ++ asm goto( + ALTERNATIVE_CB("b %l[l_no]", %[feature], alt_cb_patch_nops) + : + : [feature] "i" (feature) +@@ -247,7 +247,7 @@ alternative_has_feature_unlikely(unsigned long feature) + compiletime_assert(feature < ARM64_NCAPS, + "feature must be < ARM64_NCAPS"); + +- asm_volatile_goto( ++ asm goto( + ALTERNATIVE("nop", "b %l[l_yes]", %[feature]) + : + : [feature] "i" (feature) +diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h +index 7dce9c0aa7836..af3a678a76b3a 100644 +--- a/arch/arm64/include/asm/cputype.h ++++ b/arch/arm64/include/asm/cputype.h +@@ -61,6 +61,7 @@ + #define ARM_CPU_IMP_HISI 0x48 + #define ARM_CPU_IMP_APPLE 0x61 + #define ARM_CPU_IMP_AMPERE 0xC0 ++#define ARM_CPU_IMP_MICROSOFT 0x6D + + #define ARM_CPU_PART_AEM_V8 0xD0F + #define ARM_CPU_PART_FOUNDATION 0xD00 +@@ -128,6 +129,8 @@ + + #define AMPERE_CPU_PART_AMPERE1 0xAC3 + ++#define MICROSOFT_CPU_PART_AZURE_COBALT_100 0xD49 /* Based on r0p0 of ARM Neoverse N2 */ ++ + #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) + #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) + #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) +@@ -179,6 +182,7 @@ + #define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX) + #define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX) + #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1) ++#define MIDR_MICROSOFT_AZURE_COBALT_100 MIDR_CPU_MODEL(ARM_CPU_IMP_MICROSOFT, MICROSOFT_CPU_PART_AZURE_COBALT_100) + + /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */ + #define MIDR_FUJITSU_ERRATUM_010001 MIDR_FUJITSU_A64FX +diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h +index cea441b6aa5dc..b5bd3c38a01b2 100644 +--- a/arch/arm64/include/asm/jump_label.h ++++ b/arch/arm64/include/asm/jump_label.h +@@ -18,7 +18,7 @@ + static __always_inline bool arch_static_branch(struct static_key *key, + bool branch) + { +- asm_volatile_goto( ++ asm goto( + "1: nop \n\t" + " .pushsection __jump_table, \"aw\" \n\t" + " .align 3 \n\t" +@@ -35,7 +35,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, + static __always_inline bool arch_static_branch_jump(struct static_key *key, + bool branch) + { +- asm_volatile_goto( ++ asm goto( + "1: b %l[l_yes] \n\t" + " .pushsection __jump_table, \"aw\" \n\t" + " .align 3 \n\t" +diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c +index 61f22e9c92b4c..74584597bfb82 100644 +--- a/arch/arm64/kernel/cpu_errata.c ++++ b/arch/arm64/kernel/cpu_errata.c +@@ -390,6 +390,7 @@ static const struct midr_range erratum_1463225[] = { + static const struct midr_range trbe_overwrite_fill_mode_cpus[] = { + #ifdef CONFIG_ARM64_ERRATUM_2139208 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), ++ MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), + #endif + #ifdef CONFIG_ARM64_ERRATUM_2119858 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), +@@ -403,6 +404,7 @@ static const struct midr_range trbe_overwrite_fill_mode_cpus[] = { + static const struct midr_range tsb_flush_fail_cpus[] = { + #ifdef CONFIG_ARM64_ERRATUM_2067961 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), ++ MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), + #endif + #ifdef CONFIG_ARM64_ERRATUM_2054223 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), +@@ -415,6 +417,7 @@ static const struct midr_range tsb_flush_fail_cpus[] = { + static struct midr_range trbe_write_out_of_range_cpus[] = { + #ifdef CONFIG_ARM64_ERRATUM_2253138 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), ++ MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100), + #endif + #ifdef CONFIG_ARM64_ERRATUM_2224489 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), +diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile +index 36c8f66cad251..d513533cc922f 100644 +--- a/arch/arm64/kernel/vdso32/Makefile ++++ b/arch/arm64/kernel/vdso32/Makefile +@@ -68,11 +68,9 @@ VDSO_CFLAGS += -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ + -fno-strict-aliasing -fno-common \ + -Werror-implicit-function-declaration \ + -Wno-format-security \ +- -Wdeclaration-after-statement \ + -std=gnu11 + VDSO_CFLAGS += -O2 + # Some useful compiler-dependent flags from top-level Makefile +-VDSO_CFLAGS += $(call cc32-option,-Wdeclaration-after-statement,) + VDSO_CFLAGS += $(call cc32-option,-Wno-pointer-sign) + VDSO_CFLAGS += -fno-strict-overflow + VDSO_CFLAGS += $(call cc32-option,-Werror=strict-prototypes) +diff --git a/arch/csky/include/asm/jump_label.h b/arch/csky/include/asm/jump_label.h +index 98a3f4b168bd2..ef2e37a10a0fe 100644 +--- a/arch/csky/include/asm/jump_label.h ++++ b/arch/csky/include/asm/jump_label.h +@@ -12,7 +12,7 @@ + static __always_inline bool arch_static_branch(struct static_key *key, + bool branch) + { +- asm_volatile_goto( ++ asm goto( + "1: nop32 \n" + " .pushsection __jump_table, \"aw\" \n" + " .align 2 \n" +@@ -29,7 +29,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, + static __always_inline bool arch_static_branch_jump(struct static_key *key, + bool branch) + { +- asm_volatile_goto( ++ asm goto( + "1: bsr32 %l[label] \n" + " .pushsection __jump_table, \"aw\" \n" + " .align 2 \n" +diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h +index 4044eaf989ac7..0921ddda11a4b 100644 +--- a/arch/mips/include/asm/checksum.h ++++ b/arch/mips/include/asm/checksum.h +@@ -241,7 +241,8 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + " .set pop" + : "=&r" (sum), "=&r" (tmp) + : "r" (saddr), "r" (daddr), +- "0" (htonl(len)), "r" (htonl(proto)), "r" (sum)); ++ "0" (htonl(len)), "r" (htonl(proto)), "r" (sum) ++ : "memory"); + + return csum_fold(sum); + } +diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h +index c5c6864e64bc4..405c85173f2c1 100644 +--- a/arch/mips/include/asm/jump_label.h ++++ b/arch/mips/include/asm/jump_label.h +@@ -36,7 +36,7 @@ + + static __always_inline bool arch_static_branch(struct static_key *key, bool branch) + { +- asm_volatile_goto("1:\t" B_INSN " 2f\n\t" ++ asm goto("1:\t" B_INSN " 2f\n\t" + "2:\t.insn\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + WORD_INSN " 1b, %l[l_yes], %0\n\t" +@@ -50,7 +50,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran + + static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) + { +- asm_volatile_goto("1:\t" J_INSN " %l[l_yes]\n\t" ++ asm goto("1:\t" J_INSN " %l[l_yes]\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + WORD_INSN " 1b, %l[l_yes], %0\n\t" + ".popsection\n\t" +diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig +index 345d5e021484c..abf39ecda6fb1 100644 +--- a/arch/parisc/Kconfig ++++ b/arch/parisc/Kconfig +@@ -24,7 +24,6 @@ config PARISC + select RTC_DRV_GENERIC + select INIT_ALL_POSSIBLE + select BUG +- select BUILDTIME_TABLE_SORT + select HAVE_PCI + select HAVE_PERF_EVENTS + select HAVE_KERNEL_BZIP2 +diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h +index 74d17d7e759da..5937d5edaba1e 100644 +--- a/arch/parisc/include/asm/assembly.h ++++ b/arch/parisc/include/asm/assembly.h +@@ -576,6 +576,7 @@ + .section __ex_table,"aw" ! \ + .align 4 ! \ + .word (fault_addr - .), (except_addr - .) ! \ ++ or %r0,%r0,%r0 ! \ + .previous + + +diff --git a/arch/parisc/include/asm/extable.h b/arch/parisc/include/asm/extable.h +new file mode 100644 +index 0000000000000..4ea23e3d79dc9 +--- /dev/null ++++ b/arch/parisc/include/asm/extable.h +@@ -0,0 +1,64 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef __PARISC_EXTABLE_H ++#define __PARISC_EXTABLE_H ++ ++#include <asm/ptrace.h> ++#include <linux/compiler.h> ++ ++/* ++ * The exception table consists of three addresses: ++ * ++ * - A relative address to the instruction that is allowed to fault. ++ * - A relative address at which the program should continue (fixup routine) ++ * - An asm statement which specifies which CPU register will ++ * receive -EFAULT when an exception happens if the lowest bit in ++ * the fixup address is set. ++ * ++ * Note: The register specified in the err_opcode instruction will be ++ * modified at runtime if a fault happens. Register %r0 will be ignored. ++ * ++ * Since relative addresses are used, 32bit values are sufficient even on ++ * 64bit kernel. ++ */ ++ ++struct pt_regs; ++int fixup_exception(struct pt_regs *regs); ++ ++#define ARCH_HAS_RELATIVE_EXTABLE ++struct exception_table_entry { ++ int insn; /* relative address of insn that is allowed to fault. */ ++ int fixup; /* relative address of fixup routine */ ++ int err_opcode; /* sample opcode with register which holds error code */ ++}; ++ ++#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr, opcode )\ ++ ".section __ex_table,\"aw\"\n" \ ++ ".align 4\n" \ ++ ".word (" #fault_addr " - .), (" #except_addr " - .)\n" \ ++ opcode "\n" \ ++ ".previous\n" ++ ++/* ++ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry ++ * (with lowest bit set) for which the fault handler in fixup_exception() will ++ * load -EFAULT on fault into the register specified by the err_opcode instruction, ++ * and zeroes the target register in case of a read fault in get_user(). ++ */ ++#define ASM_EXCEPTIONTABLE_VAR(__err_var) \ ++ int __err_var = 0 ++#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr, register )\ ++ ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1, "or %%r0,%%r0," register) ++ ++static inline void swap_ex_entry_fixup(struct exception_table_entry *a, ++ struct exception_table_entry *b, ++ struct exception_table_entry tmp, ++ int delta) ++{ ++ a->fixup = b->fixup + delta; ++ b->fixup = tmp.fixup - delta; ++ a->err_opcode = b->err_opcode; ++ b->err_opcode = tmp.err_opcode; ++} ++#define swap_ex_entry_fixup swap_ex_entry_fixup ++ ++#endif +diff --git a/arch/parisc/include/asm/jump_label.h b/arch/parisc/include/asm/jump_label.h +index 94428798b6aa6..317ebc5edc9fe 100644 +--- a/arch/parisc/include/asm/jump_label.h ++++ b/arch/parisc/include/asm/jump_label.h +@@ -12,7 +12,7 @@ + + static __always_inline bool arch_static_branch(struct static_key *key, bool branch) + { +- asm_volatile_goto("1:\n\t" ++ asm goto("1:\n\t" + "nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align %1\n\t" +@@ -29,7 +29,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran + + static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) + { +- asm_volatile_goto("1:\n\t" ++ asm goto("1:\n\t" + "b,n %l[l_yes]\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align %1\n\t" +diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h +index c822bd0c0e3c6..51f40eaf77806 100644 +--- a/arch/parisc/include/asm/special_insns.h ++++ b/arch/parisc/include/asm/special_insns.h +@@ -8,7 +8,8 @@ + "copy %%r0,%0\n" \ + "8:\tlpa %%r0(%1),%0\n" \ + "9:\n" \ +- ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \ ++ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \ ++ "or %%r0,%%r0,%%r0") \ + : "=&r" (pa) \ + : "r" (va) \ + : "memory" \ +@@ -22,7 +23,8 @@ + "copy %%r0,%0\n" \ + "8:\tlpa %%r0(%%sr3,%1),%0\n" \ + "9:\n" \ +- ASM_EXCEPTIONTABLE_ENTRY(8b, 9b) \ ++ ASM_EXCEPTIONTABLE_ENTRY(8b, 9b, \ ++ "or %%r0,%%r0,%%r0") \ + : "=&r" (pa) \ + : "r" (va) \ + : "memory" \ +diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h +index 4165079898d9e..88d0ae5769dde 100644 +--- a/arch/parisc/include/asm/uaccess.h ++++ b/arch/parisc/include/asm/uaccess.h +@@ -7,6 +7,7 @@ + */ + #include <asm/page.h> + #include <asm/cache.h> ++#include <asm/extable.h> + + #include <linux/bug.h> + #include <linux/string.h> +@@ -26,37 +27,6 @@ + #define STD_USER(sr, x, ptr) __put_user_asm(sr, "std", x, ptr) + #endif + +-/* +- * The exception table contains two values: the first is the relative offset to +- * the address of the instruction that is allowed to fault, and the second is +- * the relative offset to the address of the fixup routine. Since relative +- * addresses are used, 32bit values are sufficient even on 64bit kernel. +- */ +- +-#define ARCH_HAS_RELATIVE_EXTABLE +-struct exception_table_entry { +- int insn; /* relative address of insn that is allowed to fault. */ +- int fixup; /* relative address of fixup routine */ +-}; +- +-#define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\ +- ".section __ex_table,\"aw\"\n" \ +- ".align 4\n" \ +- ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \ +- ".previous\n" +- +-/* +- * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry +- * (with lowest bit set) for which the fault handler in fixup_exception() will +- * load -EFAULT into %r29 for a read or write fault, and zeroes the target +- * register in case of a read fault in get_user(). +- */ +-#define ASM_EXCEPTIONTABLE_REG 29 +-#define ASM_EXCEPTIONTABLE_VAR(__variable) \ +- register long __variable __asm__ ("r29") = 0 +-#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\ +- ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1) +- + #define __get_user_internal(sr, val, ptr) \ + ({ \ + ASM_EXCEPTIONTABLE_VAR(__gu_err); \ +@@ -83,7 +53,7 @@ struct exception_table_entry { + \ + __asm__("1: " ldx " 0(%%sr%2,%3),%0\n" \ + "9:\n" \ +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1") \ + : "=r"(__gu_val), "+r"(__gu_err) \ + : "i"(sr), "r"(ptr)); \ + \ +@@ -115,8 +85,8 @@ struct exception_table_entry { + "1: ldw 0(%%sr%2,%3),%0\n" \ + "2: ldw 4(%%sr%2,%3),%R0\n" \ + "9:\n" \ +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%1") \ ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%1") \ + : "=&r"(__gu_tmp.l), "+r"(__gu_err) \ + : "i"(sr), "r"(ptr)); \ + \ +@@ -174,7 +144,7 @@ struct exception_table_entry { + __asm__ __volatile__ ( \ + "1: " stx " %1,0(%%sr%2,%3)\n" \ + "9:\n" \ +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0") \ + : "+r"(__pu_err) \ + : "r"(x), "i"(sr), "r"(ptr)) + +@@ -186,15 +156,14 @@ struct exception_table_entry { + "1: stw %1,0(%%sr%2,%3)\n" \ + "2: stw %R1,4(%%sr%2,%3)\n" \ + "9:\n" \ +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b, "%0") \ ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b, "%0") \ + : "+r"(__pu_err) \ + : "r"(__val), "i"(sr), "r"(ptr)); \ + } while (0) + + #endif /* !defined(CONFIG_64BIT) */ + +- + /* + * Complex access routines -- external declarations + */ +@@ -216,7 +185,4 @@ unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src, + #define INLINE_COPY_TO_USER + #define INLINE_COPY_FROM_USER + +-struct pt_regs; +-int fixup_exception(struct pt_regs *regs); +- + #endif /* __PARISC_UACCESS_H */ +diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c +index 8f12b9f318ae6..a582928739dd5 100644 +--- a/arch/parisc/kernel/drivers.c ++++ b/arch/parisc/kernel/drivers.c +@@ -1003,6 +1003,9 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data) + + pr_info("\n"); + ++ /* Prevent hung task messages when printing on serial console */ ++ cond_resched(); ++ + pr_info("#define HPA_%08lx_DESCRIPTION \"%s\"\n", + hpa, parisc_hardware_description(&dev->id)); + +diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c +index e8a4d77cff53a..8a8e7d7224a26 100644 +--- a/arch/parisc/kernel/unaligned.c ++++ b/arch/parisc/kernel/unaligned.c +@@ -118,8 +118,8 @@ static int emulate_ldh(struct pt_regs *regs, int toreg) + "2: ldbs 1(%%sr1,%3), %0\n" + " depw %2, 23, 24, %0\n" + "3: \n" +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b) +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b) ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1") ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1") + : "+r" (val), "+r" (ret), "=&r" (temp1) + : "r" (saddr), "r" (regs->isr) ); + +@@ -150,8 +150,8 @@ static int emulate_ldw(struct pt_regs *regs, int toreg, int flop) + " mtctl %2,11\n" + " vshd %0,%3,%0\n" + "3: \n" +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b) +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b) ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1") ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1") + : "+r" (val), "+r" (ret), "=&r" (temp1), "=&r" (temp2) + : "r" (saddr), "r" (regs->isr) ); + +@@ -187,8 +187,8 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) + " mtsar %%r19\n" + " shrpd %0,%%r20,%%sar,%0\n" + "3: \n" +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b) +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b) ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%1") ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%1") + : "=r" (val), "+r" (ret) + : "0" (val), "r" (saddr), "r" (regs->isr) + : "r19", "r20" ); +@@ -207,9 +207,9 @@ static int emulate_ldd(struct pt_regs *regs, int toreg, int flop) + " vshd %0,%R0,%0\n" + " vshd %R0,%4,%R0\n" + "4: \n" +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b) +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b) +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b) ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 4b, "%1") ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 4b, "%1") ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 4b, "%1") + : "+r" (val), "+r" (ret), "+r" (saddr), "=&r" (shift), "=&r" (temp1) + : "r" (regs->isr) ); + } +@@ -242,8 +242,8 @@ static int emulate_sth(struct pt_regs *regs, int frreg) + "1: stb %1, 0(%%sr1, %3)\n" + "2: stb %2, 1(%%sr1, %3)\n" + "3: \n" +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b) +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b) ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0") ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0") + : "+r" (ret), "=&r" (temp1) + : "r" (val), "r" (regs->ior), "r" (regs->isr) ); + +@@ -283,8 +283,8 @@ static int emulate_stw(struct pt_regs *regs, int frreg, int flop) + " stw %%r20,0(%%sr1,%2)\n" + " stw %%r21,4(%%sr1,%2)\n" + "3: \n" +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b) +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b) ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 3b, "%0") ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 3b, "%0") + : "+r" (ret) + : "r" (val), "r" (regs->ior), "r" (regs->isr) + : "r19", "r20", "r21", "r22", "r1" ); +@@ -327,10 +327,10 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) + "3: std %%r20,0(%%sr1,%2)\n" + "4: std %%r21,8(%%sr1,%2)\n" + "5: \n" +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b) +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b) +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b) +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b) ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 5b, "%0") ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 5b, "%0") ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 5b, "%0") ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 5b, "%0") + : "+r" (ret) + : "r" (val), "r" (regs->ior), "r" (regs->isr) + : "r19", "r20", "r21", "r22", "r1" ); +@@ -356,11 +356,11 @@ static int emulate_std(struct pt_regs *regs, int frreg, int flop) + "4: stw %%r1,4(%%sr1,%3)\n" + "5: stw %2,8(%%sr1,%3)\n" + "6: \n" +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b) +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b) +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b) +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b) +- ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b) ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 6b, "%0") ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 6b, "%0") ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(3b, 6b, "%0") ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(4b, 6b, "%0") ++ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(5b, 6b, "%0") + : "+r" (ret) + : "r" (valh), "r" (vall), "r" (regs->ior), "r" (regs->isr) + : "r19", "r20", "r21", "r1" ); +diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c +index b00aa98b582c2..fbd9ada5e527e 100644 +--- a/arch/parisc/mm/fault.c ++++ b/arch/parisc/mm/fault.c +@@ -150,11 +150,16 @@ int fixup_exception(struct pt_regs *regs) + * Fix up get_user() and put_user(). + * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant + * bit in the relative address of the fixup routine to indicate +- * that gr[ASM_EXCEPTIONTABLE_REG] should be loaded with +- * -EFAULT to report a userspace access error. ++ * that the register encoded in the "or %r0,%r0,register" ++ * opcode should be loaded with -EFAULT to report a userspace ++ * access error. + */ + if (fix->fixup & 1) { +- regs->gr[ASM_EXCEPTIONTABLE_REG] = -EFAULT; ++ int fault_error_reg = fix->err_opcode & 0x1f; ++ if (!WARN_ON(!fault_error_reg)) ++ regs->gr[fault_error_reg] = -EFAULT; ++ pr_debug("Unalignment fixup of register %d at %pS\n", ++ fault_error_reg, (void*)regs->iaoq[0]); + + /* zero target register for get_user() */ + if (parisc_acctyp(0, regs->iir) == VM_READ) { +diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h +index 61a4736355c24..20d5052e22925 100644 +--- a/arch/powerpc/include/asm/bug.h ++++ b/arch/powerpc/include/asm/bug.h +@@ -74,7 +74,7 @@ + ##__VA_ARGS__) + + #define WARN_ENTRY(insn, flags, label, ...) \ +- asm_volatile_goto( \ ++ asm goto( \ + "1: " insn "\n" \ + EX_TABLE(1b, %l[label]) \ + _EMIT_BUG_ENTRY \ +diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h +index 93ce3ec253877..2f2a86ed2280a 100644 +--- a/arch/powerpc/include/asm/jump_label.h ++++ b/arch/powerpc/include/asm/jump_label.h +@@ -17,7 +17,7 @@ + + static __always_inline bool arch_static_branch(struct static_key *key, bool branch) + { +- asm_volatile_goto("1:\n\t" ++ asm goto("1:\n\t" + "nop # arch_static_branch\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".long 1b - ., %l[l_yes] - .\n\t" +@@ -32,7 +32,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran + + static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) + { +- asm_volatile_goto("1:\n\t" ++ asm goto("1:\n\t" + "b %l[l_yes] # arch_static_branch_jump\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".long 1b - ., %l[l_yes] - .\n\t" +diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h +index af58f1ed3952e..c4b798aa6ce80 100644 +--- a/arch/powerpc/include/asm/thread_info.h ++++ b/arch/powerpc/include/asm/thread_info.h +@@ -14,7 +14,7 @@ + + #ifdef __KERNEL__ + +-#ifdef CONFIG_KASAN ++#if defined(CONFIG_KASAN) && CONFIG_THREAD_SHIFT < 15 + #define MIN_THREAD_SHIFT (CONFIG_THREAD_SHIFT + 1) + #else + #define MIN_THREAD_SHIFT CONFIG_THREAD_SHIFT +diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h +index 3ddc65c63a49e..45d4c9cf3f3a2 100644 +--- a/arch/powerpc/include/asm/uaccess.h ++++ b/arch/powerpc/include/asm/uaccess.h +@@ -72,7 +72,7 @@ __pu_failed: \ + * are no aliasing issues. + */ + #define __put_user_asm_goto(x, addr, label, op) \ +- asm_volatile_goto( \ ++ asm goto( \ + "1: " op "%U1%X1 %0,%1 # put_user\n" \ + EX_TABLE(1b, %l2) \ + : \ +@@ -85,7 +85,7 @@ __pu_failed: \ + __put_user_asm_goto(x, ptr, label, "std") + #else /* __powerpc64__ */ + #define __put_user_asm2_goto(x, addr, label) \ +- asm_volatile_goto( \ ++ asm goto( \ + "1: stw%X1 %0, %1\n" \ + "2: stw%X1 %L0, %L1\n" \ + EX_TABLE(1b, %l2) \ +@@ -132,7 +132,7 @@ do { \ + #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT + + #define __get_user_asm_goto(x, addr, label, op) \ +- asm_volatile_goto( \ ++ asm_goto_output( \ + "1: "op"%U1%X1 %0, %1 # get_user\n" \ + EX_TABLE(1b, %l2) \ + : "=r" (x) \ +@@ -145,7 +145,7 @@ do { \ + __get_user_asm_goto(x, addr, label, "ld") + #else /* __powerpc64__ */ + #define __get_user_asm2_goto(x, addr, label) \ +- asm_volatile_goto( \ ++ asm_goto_output( \ + "1: lwz%X1 %0, %1\n" \ + "2: lwz%X1 %L0, %L1\n" \ + EX_TABLE(1b, %l2) \ +diff --git a/arch/powerpc/kernel/cpu_specs_e500mc.h b/arch/powerpc/kernel/cpu_specs_e500mc.h +index ceb06b109f831..2ae8e9a7b461c 100644 +--- a/arch/powerpc/kernel/cpu_specs_e500mc.h ++++ b/arch/powerpc/kernel/cpu_specs_e500mc.h +@@ -8,7 +8,8 @@ + + #ifdef CONFIG_PPC64 + #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ +- PPC_FEATURE_HAS_FPU | PPC_FEATURE_64) ++ PPC_FEATURE_HAS_FPU | PPC_FEATURE_64 | \ ++ PPC_FEATURE_BOOKE) + #else + #define COMMON_USER_BOOKE (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \ + PPC_FEATURE_BOOKE) +diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S +index a019ed6fc8393..26c151e2a7942 100644 +--- a/arch/powerpc/kernel/interrupt_64.S ++++ b/arch/powerpc/kernel/interrupt_64.S +@@ -52,7 +52,8 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) + mr r10,r1 + ld r1,PACAKSAVE(r13) + std r10,0(r1) +- std r11,_NIP(r1) ++ std r11,_LINK(r1) ++ std r11,_NIP(r1) /* Saved LR is also the next instruction */ + std r12,_MSR(r1) + std r0,GPR0(r1) + std r10,GPR1(r1) +@@ -70,7 +71,6 @@ _ASM_NOKPROBE_SYMBOL(system_call_vectored_\name) + std r9,GPR13(r1) + SAVE_NVGPRS(r1) + std r11,_XER(r1) +- std r11,_LINK(r1) + std r11,_CTR(r1) + + li r11,\trapnr +diff --git a/arch/powerpc/kernel/irq_64.c b/arch/powerpc/kernel/irq_64.c +index 9dc0ad3c533a8..5a6e44e4d36f5 100644 +--- a/arch/powerpc/kernel/irq_64.c ++++ b/arch/powerpc/kernel/irq_64.c +@@ -230,7 +230,7 @@ notrace void arch_local_irq_restore(unsigned long mask) + * This allows interrupts to be unmasked without hard disabling, and + * also without new hard interrupts coming in ahead of pending ones. + */ +- asm_volatile_goto( ++ asm goto( + "1: \n" + " lbz 9,%0(13) \n" + " cmpwi 9,0 \n" +diff --git a/arch/powerpc/mm/kasan/init_32.c b/arch/powerpc/mm/kasan/init_32.c +index a70828a6d9357..aa9aa11927b2f 100644 +--- a/arch/powerpc/mm/kasan/init_32.c ++++ b/arch/powerpc/mm/kasan/init_32.c +@@ -64,6 +64,7 @@ int __init __weak kasan_init_region(void *start, size_t size) + if (ret) + return ret; + ++ k_start = k_start & PAGE_MASK; + block = memblock_alloc(k_end - k_start, PAGE_SIZE); + if (!block) + return -ENOMEM; +diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c +index 541199c6a587d..5186d65d772e2 100644 +--- a/arch/powerpc/platforms/pseries/lpar.c ++++ b/arch/powerpc/platforms/pseries/lpar.c +@@ -660,8 +660,12 @@ u64 pseries_paravirt_steal_clock(int cpu) + { + struct lppaca *lppaca = &lppaca_of(cpu); + +- return be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) + +- be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb)); ++ /* ++ * VPA steal time counters are reported at TB frequency. Hence do a ++ * conversion to ns before returning ++ */ ++ return tb_to_ns(be64_to_cpu(READ_ONCE(lppaca->enqueue_dispatch_tb)) + ++ be64_to_cpu(READ_ONCE(lppaca->ready_enqueue_tb))); + } + #endif + +diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h +index 14a5ea8d8ef0f..4a35d787c0191 100644 +--- a/arch/riscv/include/asm/jump_label.h ++++ b/arch/riscv/include/asm/jump_label.h +@@ -17,7 +17,7 @@ + static __always_inline bool arch_static_branch(struct static_key * const key, + const bool branch) + { +- asm_volatile_goto( ++ asm goto( + " .align 2 \n\t" + " .option push \n\t" + " .option norelax \n\t" +@@ -39,7 +39,7 @@ static __always_inline bool arch_static_branch(struct static_key * const key, + static __always_inline bool arch_static_branch_jump(struct static_key * const key, + const bool branch) + { +- asm_volatile_goto( ++ asm goto( + " .align 2 \n\t" + " .option push \n\t" + " .option norelax \n\t" +diff --git a/arch/s390/include/asm/jump_label.h b/arch/s390/include/asm/jump_label.h +index 895f774bbcc55..bf78cf381dfcd 100644 +--- a/arch/s390/include/asm/jump_label.h ++++ b/arch/s390/include/asm/jump_label.h +@@ -25,7 +25,7 @@ + */ + static __always_inline bool arch_static_branch(struct static_key *key, bool branch) + { +- asm_volatile_goto("0: brcl 0,%l[label]\n" ++ asm goto("0: brcl 0,%l[label]\n" + ".pushsection __jump_table,\"aw\"\n" + ".balign 8\n" + ".long 0b-.,%l[label]-.\n" +@@ -39,7 +39,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran + + static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) + { +- asm_volatile_goto("0: brcl 15,%l[label]\n" ++ asm goto("0: brcl 15,%l[label]\n" + ".pushsection __jump_table,\"aw\"\n" + ".balign 8\n" + ".long 0b-.,%l[label]-.\n" +diff --git a/arch/sparc/include/asm/jump_label.h b/arch/sparc/include/asm/jump_label.h +index 94eb529dcb776..2718cbea826a7 100644 +--- a/arch/sparc/include/asm/jump_label.h ++++ b/arch/sparc/include/asm/jump_label.h +@@ -10,7 +10,7 @@ + + static __always_inline bool arch_static_branch(struct static_key *key, bool branch) + { +- asm_volatile_goto("1:\n\t" ++ asm goto("1:\n\t" + "nop\n\t" + "nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" +@@ -26,7 +26,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran + + static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) + { +- asm_volatile_goto("1:\n\t" ++ asm goto("1:\n\t" + "b %l[l_yes]\n\t" + "nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" +diff --git a/arch/um/Makefile b/arch/um/Makefile +index 3dbd0e3b660ea..778c50f273992 100644 +--- a/arch/um/Makefile ++++ b/arch/um/Makefile +@@ -118,7 +118,9 @@ archprepare: + $(Q)$(MAKE) $(build)=$(HOST_DIR)/um include/generated/user_constants.h + + LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static +-LINK-$(CONFIG_LD_SCRIPT_DYN) += $(call cc-option, -no-pie) ++ifdef CONFIG_LD_SCRIPT_DYN ++LINK-$(call gcc-min-version, 60100)$(CONFIG_CC_IS_CLANG) += -no-pie ++endif + LINK-$(CONFIG_LD_SCRIPT_DYN_RPATH) += -Wl,-rpath,/lib + + CFLAGS_NO_HARDENING := $(call cc-option, -fno-PIC,) $(call cc-option, -fno-pic,) \ +diff --git a/arch/um/include/asm/cpufeature.h b/arch/um/include/asm/cpufeature.h +index 4b6d1b526bc12..66fe06db872f0 100644 +--- a/arch/um/include/asm/cpufeature.h ++++ b/arch/um/include/asm/cpufeature.h +@@ -75,7 +75,7 @@ extern void setup_clear_cpu_cap(unsigned int bit); + */ + static __always_inline bool _static_cpu_has(u16 bit) + { +- asm_volatile_goto("1: jmp 6f\n" ++ asm goto("1: jmp 6f\n" + "2:\n" + ".skip -(((5f-4f) - (2b-1b)) > 0) * " + "((5f-4f) - (2b-1b)),0x90\n" +diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu +index 542377cd419d7..ce5ed2c2db0c9 100644 +--- a/arch/x86/Kconfig.cpu ++++ b/arch/x86/Kconfig.cpu +@@ -375,7 +375,7 @@ config X86_CMOV + config X86_MINIMUM_CPU_FAMILY + int + default "64" if X86_64 +- default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8) ++ default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCORE2 || MK7 || MK8) + default "5" if X86_32 && X86_CMPXCHG64 + default "4" + +diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h +index ce0c8f7d32186..f835b328ba24f 100644 +--- a/arch/x86/include/asm/cpufeature.h ++++ b/arch/x86/include/asm/cpufeature.h +@@ -173,7 +173,7 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); + */ + static __always_inline bool _static_cpu_has(u16 bit) + { +- asm_volatile_goto( ++ asm goto( + ALTERNATIVE_TERNARY("jmp 6f", %P[feature], "", "jmp %l[t_no]") + ".pushsection .altinstr_aux,\"ax\"\n" + "6:\n" +diff --git a/arch/x86/include/asm/jump_label.h b/arch/x86/include/asm/jump_label.h +index 071572e23d3a0..cbbef32517f00 100644 +--- a/arch/x86/include/asm/jump_label.h ++++ b/arch/x86/include/asm/jump_label.h +@@ -24,7 +24,7 @@ + + static __always_inline bool arch_static_branch(struct static_key *key, bool branch) + { +- asm_volatile_goto("1:" ++ asm goto("1:" + "jmp %l[l_yes] # objtool NOPs this \n\t" + JUMP_TABLE_ENTRY + : : "i" (key), "i" (2 | branch) : : l_yes); +@@ -38,7 +38,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran + + static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch) + { +- asm_volatile_goto("1:" ++ asm goto("1:" + ".byte " __stringify(BYTES_NOP5) "\n\t" + JUMP_TABLE_ENTRY + : : "i" (key), "i" (branch) : : l_yes); +@@ -52,7 +52,7 @@ static __always_inline bool arch_static_branch(struct static_key * const key, co + + static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch) + { +- asm_volatile_goto("1:" ++ asm goto("1:" + "jmp %l[l_yes]\n\t" + JUMP_TABLE_ENTRY + : : "i" (key), "i" (branch) : : l_yes); +diff --git a/arch/x86/include/asm/rmwcc.h b/arch/x86/include/asm/rmwcc.h +index 7fa6112164172..1919ccf493cd1 100644 +--- a/arch/x86/include/asm/rmwcc.h ++++ b/arch/x86/include/asm/rmwcc.h +@@ -18,7 +18,7 @@ + #define __GEN_RMWcc(fullop, _var, cc, clobbers, ...) \ + ({ \ + bool c = false; \ +- asm_volatile_goto (fullop "; j" #cc " %l[cc_label]" \ ++ asm goto (fullop "; j" #cc " %l[cc_label]" \ + : : [var] "m" (_var), ## __VA_ARGS__ \ + : clobbers : cc_label); \ + if (0) { \ +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h +index 6ca0c661cb637..c638535eedd55 100644 +--- a/arch/x86/include/asm/uaccess.h ++++ b/arch/x86/include/asm/uaccess.h +@@ -155,7 +155,7 @@ extern int __get_user_bad(void); + + #ifdef CONFIG_X86_32 + #define __put_user_goto_u64(x, addr, label) \ +- asm_volatile_goto("\n" \ ++ asm goto("\n" \ + "1: movl %%eax,0(%1)\n" \ + "2: movl %%edx,4(%1)\n" \ + _ASM_EXTABLE_UA(1b, %l2) \ +@@ -317,7 +317,7 @@ do { \ + } while (0) + + #define __get_user_asm(x, addr, itype, ltype, label) \ +- asm_volatile_goto("\n" \ ++ asm_goto_output("\n" \ + "1: mov"itype" %[umem],%[output]\n" \ + _ASM_EXTABLE_UA(1b, %l2) \ + : [output] ltype(x) \ +@@ -397,7 +397,7 @@ do { \ + __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ + __typeof__(*(_ptr)) __old = *_old; \ + __typeof__(*(_ptr)) __new = (_new); \ +- asm_volatile_goto("\n" \ ++ asm_goto_output("\n" \ + "1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\ + _ASM_EXTABLE_UA(1b, %l[label]) \ + : CC_OUT(z) (success), \ +@@ -416,7 +416,7 @@ do { \ + __typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \ + __typeof__(*(_ptr)) __old = *_old; \ + __typeof__(*(_ptr)) __new = (_new); \ +- asm_volatile_goto("\n" \ ++ asm_goto_output("\n" \ + "1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \ + _ASM_EXTABLE_UA(1b, %l[label]) \ + : CC_OUT(z) (success), \ +@@ -499,7 +499,7 @@ struct __large_struct { unsigned long buf[100]; }; + * aliasing issues. + */ + #define __put_user_goto(x, addr, itype, ltype, label) \ +- asm_volatile_goto("\n" \ ++ asm goto("\n" \ + "1: mov"itype" %0,%1\n" \ + _ASM_EXTABLE_UA(1b, %l2) \ + : : ltype(x), "m" (__m(addr)) \ +diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h +index 6c2e3ff3cb28f..724ce44809ed2 100644 +--- a/arch/x86/include/asm/virtext.h ++++ b/arch/x86/include/asm/virtext.h +@@ -43,9 +43,9 @@ static inline int cpu_has_vmx(void) + */ + static inline int cpu_vmxoff(void) + { +- asm_volatile_goto("1: vmxoff\n\t" +- _ASM_EXTABLE(1b, %l[fault]) +- ::: "cc", "memory" : fault); ++ asm goto("1: vmxoff\n\t" ++ _ASM_EXTABLE(1b, %l[fault]) ++ ::: "cc", "memory" : fault); + + cr4_clear_bits(X86_CR4_VMXE); + return 0; +@@ -129,9 +129,9 @@ static inline void cpu_svm_disable(void) + * case, GIF must already be set, otherwise the NMI would have + * been blocked, so just eat the fault. + */ +- asm_volatile_goto("1: stgi\n\t" +- _ASM_EXTABLE(1b, %l[fault]) +- ::: "memory" : fault); ++ asm goto("1: stgi\n\t" ++ _ASM_EXTABLE(1b, %l[fault]) ++ ::: "memory" : fault); + fault: + wrmsrl(MSR_EFER, efer & ~EFER_SVME); + } +diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c +index 558076dbde5bf..247f2225aa9f3 100644 +--- a/arch/x86/kernel/fpu/signal.c ++++ b/arch/x86/kernel/fpu/signal.c +@@ -274,12 +274,13 @@ static int __restore_fpregs_from_user(void __user *buf, u64 ufeatures, + * Attempt to restore the FPU registers directly from user memory. + * Pagefaults are handled and any errors returned are fatal. + */ +-static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, +- bool fx_only, unsigned int size) ++static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, bool fx_only) + { + struct fpu *fpu = ¤t->thread.fpu; + int ret; + ++ /* Restore enabled features only. */ ++ xrestore &= fpu->fpstate->user_xfeatures; + retry: + fpregs_lock(); + /* Ensure that XFD is up to date */ +@@ -309,7 +310,7 @@ static bool restore_fpregs_from_user(void __user *buf, u64 xrestore, + if (ret != X86_TRAP_PF) + return false; + +- if (!fault_in_readable(buf, size)) ++ if (!fault_in_readable(buf, fpu->fpstate->user_size)) + goto retry; + return false; + } +@@ -339,7 +340,6 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx, + struct user_i387_ia32_struct env; + bool success, fx_only = false; + union fpregs_state *fpregs; +- unsigned int state_size; + u64 user_xfeatures = 0; + + if (use_xsave()) { +@@ -349,17 +349,14 @@ static bool __fpu_restore_sig(void __user *buf, void __user *buf_fx, + return false; + + fx_only = !fx_sw_user.magic1; +- state_size = fx_sw_user.xstate_size; + user_xfeatures = fx_sw_user.xfeatures; + } else { + user_xfeatures = XFEATURE_MASK_FPSSE; +- state_size = fpu->fpstate->user_size; + } + + if (likely(!ia32_fxstate)) { + /* Restore the FPU registers directly from user memory. */ +- return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only, +- state_size); ++ return restore_fpregs_from_user(buf_fx, user_xfeatures, fx_only); + } + + /* +diff --git a/arch/x86/kvm/svm/svm_ops.h b/arch/x86/kvm/svm/svm_ops.h +index 36c8af87a707a..4e725854c63a1 100644 +--- a/arch/x86/kvm/svm/svm_ops.h ++++ b/arch/x86/kvm/svm/svm_ops.h +@@ -8,7 +8,7 @@ + + #define svm_asm(insn, clobber...) \ + do { \ +- asm_volatile_goto("1: " __stringify(insn) "\n\t" \ ++ asm goto("1: " __stringify(insn) "\n\t" \ + _ASM_EXTABLE(1b, %l[fault]) \ + ::: clobber : fault); \ + return; \ +@@ -18,7 +18,7 @@ fault: \ + + #define svm_asm1(insn, op1, clobber...) \ + do { \ +- asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \ ++ asm goto("1: " __stringify(insn) " %0\n\t" \ + _ASM_EXTABLE(1b, %l[fault]) \ + :: op1 : clobber : fault); \ + return; \ +@@ -28,7 +28,7 @@ fault: \ + + #define svm_asm2(insn, op1, op2, clobber...) \ + do { \ +- asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \ ++ asm goto("1: " __stringify(insn) " %1, %0\n\t" \ + _ASM_EXTABLE(1b, %l[fault]) \ + :: op1, op2 : clobber : fault); \ + return; \ +diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c +index 9a75a0d5deae1..220cdbe1e286e 100644 +--- a/arch/x86/kvm/vmx/pmu_intel.c ++++ b/arch/x86/kvm/vmx/pmu_intel.c +@@ -38,7 +38,7 @@ static int fixed_pmc_events[] = {1, 0, 7}; + static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data) + { + struct kvm_pmc *pmc; +- u8 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl; ++ u64 old_fixed_ctr_ctrl = pmu->fixed_ctr_ctrl; + int i; + + pmu->fixed_ctr_ctrl = data; +diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c +index 98d732b9418f1..57c1374fdfd49 100644 +--- a/arch/x86/kvm/vmx/vmx.c ++++ b/arch/x86/kvm/vmx/vmx.c +@@ -2469,10 +2469,10 @@ static int kvm_cpu_vmxon(u64 vmxon_pointer) + + cr4_set_bits(X86_CR4_VMXE); + +- asm_volatile_goto("1: vmxon %[vmxon_pointer]\n\t" +- _ASM_EXTABLE(1b, %l[fault]) +- : : [vmxon_pointer] "m"(vmxon_pointer) +- : : fault); ++ asm goto("1: vmxon %[vmxon_pointer]\n\t" ++ _ASM_EXTABLE(1b, %l[fault]) ++ : : [vmxon_pointer] "m"(vmxon_pointer) ++ : : fault); + return 0; + + fault: +diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h +index ec268df83ed67..5edab28dfb2ef 100644 +--- a/arch/x86/kvm/vmx/vmx_ops.h ++++ b/arch/x86/kvm/vmx/vmx_ops.h +@@ -73,7 +73,7 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field) + + #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT + +- asm_volatile_goto("1: vmread %[field], %[output]\n\t" ++ asm_goto_output("1: vmread %[field], %[output]\n\t" + "jna %l[do_fail]\n\t" + + _ASM_EXTABLE(1b, %l[do_exception]) +@@ -166,7 +166,7 @@ static __always_inline unsigned long vmcs_readl(unsigned long field) + + #define vmx_asm1(insn, op1, error_args...) \ + do { \ +- asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \ ++ asm goto("1: " __stringify(insn) " %0\n\t" \ + ".byte 0x2e\n\t" /* branch not taken hint */ \ + "jna %l[error]\n\t" \ + _ASM_EXTABLE(1b, %l[fault]) \ +@@ -183,7 +183,7 @@ fault: \ + + #define vmx_asm2(insn, op1, op2, error_args...) \ + do { \ +- asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \ ++ asm goto("1: " __stringify(insn) " %1, %0\n\t" \ + ".byte 0x2e\n\t" /* branch not taken hint */ \ + "jna %l[error]\n\t" \ + _ASM_EXTABLE(1b, %l[fault]) \ +diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c +index 968d7005f4a72..f50cc210a9818 100644 +--- a/arch/x86/mm/ident_map.c ++++ b/arch/x86/mm/ident_map.c +@@ -26,18 +26,31 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, + for (; addr < end; addr = next) { + pud_t *pud = pud_page + pud_index(addr); + pmd_t *pmd; ++ bool use_gbpage; + + next = (addr & PUD_MASK) + PUD_SIZE; + if (next > end) + next = end; + +- if (info->direct_gbpages) { +- pud_t pudval; ++ /* if this is already a gbpage, this portion is already mapped */ ++ if (pud_large(*pud)) ++ continue; ++ ++ /* Is using a gbpage allowed? */ ++ use_gbpage = info->direct_gbpages; + +- if (pud_present(*pud)) +- continue; ++ /* Don't use gbpage if it maps more than the requested region. */ ++ /* at the begining: */ ++ use_gbpage &= ((addr & ~PUD_MASK) == 0); ++ /* ... or at the end: */ ++ use_gbpage &= ((next & ~PUD_MASK) == 0); ++ ++ /* Never overwrite existing mappings */ ++ use_gbpage &= !pud_present(*pud); ++ ++ if (use_gbpage) { ++ pud_t pudval; + +- addr &= PUD_MASK; + pudval = __pud((addr - info->offset) | info->page_flag); + set_pud(pud, pudval); + continue; +diff --git a/arch/xtensa/include/asm/jump_label.h b/arch/xtensa/include/asm/jump_label.h +index c812bf85021c0..46c8596259d2d 100644 +--- a/arch/xtensa/include/asm/jump_label.h ++++ b/arch/xtensa/include/asm/jump_label.h +@@ -13,7 +13,7 @@ + static __always_inline bool arch_static_branch(struct static_key *key, + bool branch) + { +- asm_volatile_goto("1:\n\t" ++ asm goto("1:\n\t" + "_nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".word 1b, %l[l_yes], %c0\n\t" +@@ -38,7 +38,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, + * make it reachable and wrap both into a no-transform block + * to avoid any assembler interference with this. + */ +- asm_volatile_goto("1:\n\t" ++ asm goto("1:\n\t" + ".begin no-transform\n\t" + "_j %l[l_yes]\n\t" + "2:\n\t" +diff --git a/block/blk-mq.c b/block/blk-mq.c +index c07e5eebcbd85..7ed6b9469f979 100644 +--- a/block/blk-mq.c ++++ b/block/blk-mq.c +@@ -747,11 +747,16 @@ static void req_bio_endio(struct request *rq, struct bio *bio, + /* + * Partial zone append completions cannot be supported as the + * BIO fragments may end up not being written sequentially. ++ * For such case, force the completed nbytes to be equal to ++ * the BIO size so that bio_advance() sets the BIO remaining ++ * size to 0 and we end up calling bio_endio() before returning. + */ +- if (bio->bi_iter.bi_size != nbytes) ++ if (bio->bi_iter.bi_size != nbytes) { + bio->bi_status = BLK_STS_IOERR; +- else ++ nbytes = bio->bi_iter.bi_size; ++ } else { + bio->bi_iter.bi_sector = rq->__sector; ++ } + } + + bio_advance(bio, nbytes); +diff --git a/drivers/android/binder.c b/drivers/android/binder.c +index d933ef6cc65af..55cd17a13e758 100644 +--- a/drivers/android/binder.c ++++ b/drivers/android/binder.c +@@ -477,6 +477,16 @@ binder_enqueue_thread_work_ilocked(struct binder_thread *thread, + { + WARN_ON(!list_empty(&thread->waiting_thread_node)); + binder_enqueue_work_ilocked(work, &thread->todo); ++ ++ /* (e)poll-based threads require an explicit wakeup signal when ++ * queuing their own work; they rely on these events to consume ++ * messages without I/O block. Without it, threads risk waiting ++ * indefinitely without handling the work. ++ */ ++ if (thread->looper & BINDER_LOOPER_STATE_POLL && ++ thread->pid == current->pid && !thread->process_todo) ++ wake_up_interruptible_sync(&thread->wait); ++ + thread->process_todo = true; + } + +diff --git a/drivers/base/core.c b/drivers/base/core.c +index af90bfb0cc3d8..3078f44dc1861 100644 +--- a/drivers/base/core.c ++++ b/drivers/base/core.c +@@ -337,10 +337,12 @@ static bool device_is_ancestor(struct device *dev, struct device *target) + return false; + } + ++#define DL_MARKER_FLAGS (DL_FLAG_INFERRED | \ ++ DL_FLAG_CYCLE | \ ++ DL_FLAG_MANAGED) + static inline bool device_link_flag_is_sync_state_only(u32 flags) + { +- return (flags & ~(DL_FLAG_INFERRED | DL_FLAG_CYCLE)) == +- (DL_FLAG_SYNC_STATE_ONLY | DL_FLAG_MANAGED); ++ return (flags & ~DL_MARKER_FLAGS) == DL_FLAG_SYNC_STATE_ONLY; + } + + /** +@@ -2054,9 +2056,14 @@ static int fw_devlink_create_devlink(struct device *con, + + /* + * SYNC_STATE_ONLY device links don't block probing and supports cycles. +- * So cycle detection isn't necessary and shouldn't be done. ++ * So, one might expect that cycle detection isn't necessary for them. ++ * However, if the device link was marked as SYNC_STATE_ONLY because ++ * it's part of a cycle, then we still need to do cycle detection. This ++ * is because the consumer and supplier might be part of multiple cycles ++ * and we need to detect all those cycles. + */ +- if (!(flags & DL_FLAG_SYNC_STATE_ONLY)) { ++ if (!device_link_flag_is_sync_state_only(flags) || ++ flags & DL_FLAG_CYCLE) { + device_links_write_lock(); + if (__fw_devlink_relax_cycles(con, sup_handle)) { + __fwnode_link_cycle(link); +diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c +index 56ceba4698024..d238b47f74c34 100644 +--- a/drivers/base/power/domain.c ++++ b/drivers/base/power/domain.c +@@ -1052,7 +1052,7 @@ static int __init genpd_power_off_unused(void) + + return 0; + } +-late_initcall(genpd_power_off_unused); ++late_initcall_sync(genpd_power_off_unused); + + #ifdef CONFIG_PM_SLEEP + +diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c +index 5eb0fe73ddc45..79fc96c8d8364 100644 +--- a/drivers/bus/moxtet.c ++++ b/drivers/bus/moxtet.c +@@ -830,6 +830,12 @@ static void moxtet_remove(struct spi_device *spi) + mutex_destroy(&moxtet->lock); + } + ++static const struct spi_device_id moxtet_spi_ids[] = { ++ { "moxtet" }, ++ { }, ++}; ++MODULE_DEVICE_TABLE(spi, moxtet_spi_ids); ++ + static const struct of_device_id moxtet_dt_ids[] = { + { .compatible = "cznic,moxtet" }, + {}, +@@ -841,6 +847,7 @@ static struct spi_driver moxtet_spi_driver = { + .name = "moxtet", + .of_match_table = moxtet_dt_ids, + }, ++ .id_table = moxtet_spi_ids, + .probe = moxtet_probe, + .remove = moxtet_remove, + }; +diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c +index b8e02c3a19610..bbfb0f288dc35 100644 +--- a/drivers/crypto/ccp/sev-dev.c ++++ b/drivers/crypto/ccp/sev-dev.c +@@ -515,10 +515,16 @@ EXPORT_SYMBOL_GPL(sev_platform_init); + + static int __sev_platform_shutdown_locked(int *error) + { +- struct sev_device *sev = psp_master->sev_data; ++ struct psp_device *psp = psp_master; ++ struct sev_device *sev; + int ret; + +- if (!sev || sev->state == SEV_STATE_UNINIT) ++ if (!psp || !psp->sev_data) ++ return 0; ++ ++ sev = psp->sev_data; ++ ++ if (sev->state == SEV_STATE_UNINIT) + return 0; + + ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); +diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c +index e2070df6cad28..0b846c605d4bd 100644 +--- a/drivers/dma/ioat/dma.c ++++ b/drivers/dma/ioat/dma.c +@@ -584,11 +584,11 @@ desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc) + } + + /** +- * __cleanup - reclaim used descriptors ++ * __ioat_cleanup - reclaim used descriptors + * @ioat_chan: channel (ring) to clean + * @phys_complete: zeroed (or not) completion address (from status) + */ +-static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) ++static void __ioat_cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete) + { + struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma; + struct ioat_ring_ent *desc; +@@ -675,7 +675,7 @@ static void ioat_cleanup(struct ioatdma_chan *ioat_chan) + spin_lock_bh(&ioat_chan->cleanup_lock); + + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) +- __cleanup(ioat_chan, phys_complete); ++ __ioat_cleanup(ioat_chan, phys_complete); + + if (is_ioat_halted(*ioat_chan->completion)) { + u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); +@@ -712,7 +712,7 @@ static void ioat_restart_channel(struct ioatdma_chan *ioat_chan) + + ioat_quiesce(ioat_chan, 0); + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) +- __cleanup(ioat_chan, phys_complete); ++ __ioat_cleanup(ioat_chan, phys_complete); + + __ioat_restart_chan(ioat_chan); + } +@@ -786,7 +786,7 @@ static void ioat_eh(struct ioatdma_chan *ioat_chan) + + /* cleanup so tail points to descriptor that caused the error */ + if (ioat_cleanup_preamble(ioat_chan, &phys_complete)) +- __cleanup(ioat_chan, phys_complete); ++ __ioat_cleanup(ioat_chan, phys_complete); + + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); +@@ -943,7 +943,7 @@ void ioat_timer_event(struct timer_list *t) + /* timer restarted in ioat_cleanup_preamble + * and IOAT_COMPLETION_ACK cleared + */ +- __cleanup(ioat_chan, phys_complete); ++ __ioat_cleanup(ioat_chan, phys_complete); + goto unlock_out; + } + +diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c +index 74bab06283b71..1879ec27c0236 100644 +--- a/drivers/firewire/core-device.c ++++ b/drivers/firewire/core-device.c +@@ -100,10 +100,9 @@ static int textual_leaf_to_string(const u32 *block, char *buf, size_t size) + * @buf: where to put the string + * @size: size of @buf, in bytes + * +- * The string is taken from a minimal ASCII text descriptor leaf after +- * the immediate entry with @key. The string is zero-terminated. +- * An overlong string is silently truncated such that it and the +- * zero byte fit into @size. ++ * The string is taken from a minimal ASCII text descriptor leaf just after the entry with the ++ * @key. The string is zero-terminated. An overlong string is silently truncated such that it ++ * and the zero byte fit into @size. + * + * Returns strlen(buf) or a negative error code. + */ +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +index 4b91f95066eca..6a4749c0c5a58 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -4203,7 +4203,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) + drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); + + cancel_delayed_work_sync(&adev->delayed_init_work); +- flush_delayed_work(&adev->gfx.gfx_off_delay_work); + + amdgpu_ras_suspend(adev); + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +index 23f0067f92e4e..b803e785d3aff 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +@@ -585,8 +585,15 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) + + if (adev->gfx.gfx_off_req_count == 0 && + !adev->gfx.gfx_off_state) { +- schedule_delayed_work(&adev->gfx.gfx_off_delay_work, ++ /* If going to s2idle, no need to wait */ ++ if (adev->in_s0ix) { ++ if (!amdgpu_dpm_set_powergating_by_smu(adev, ++ AMD_IP_BLOCK_TYPE_GFX, true)) ++ adev->gfx.gfx_off_state = true; ++ } else { ++ schedule_delayed_work(&adev->gfx.gfx_off_delay_work, + delay); ++ } + } + } else { + if (adev->gfx.gfx_off_req_count == 0) { +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +index f02e509d5facb..a826c92933199 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +@@ -6001,7 +6001,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, + if (recalculate_timing) { + freesync_mode = get_highest_refresh_rate_mode(aconnector, false); + drm_mode_copy(&saved_mode, &mode); ++ saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio; + drm_mode_copy(&mode, freesync_mode); ++ mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio; + } else { + decide_crtc_timing_for_drm_display_mode( + &mode, preferred_mode, scale); +diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile +index ca7d240006213..6fdf87a6e240f 100644 +--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile ++++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile +@@ -60,11 +60,11 @@ ifdef CONFIG_DRM_AMD_DC_DCN + CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags) + CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags) + CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags) +-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) ++CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) $(frame_warn_flag) + CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags) +-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) ++CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) $(frame_warn_flag) + CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags) +-CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) ++CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) $(frame_warn_flag) + CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags) + CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag) + CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags) +diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c +index eb09e86044c6d..68a6d4b0ead75 100644 +--- a/drivers/gpu/drm/drm_prime.c ++++ b/drivers/gpu/drm/drm_prime.c +@@ -828,7 +828,7 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, + if (max_segment == 0) + max_segment = UINT_MAX; + err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0, +- nr_pages << PAGE_SHIFT, ++ (unsigned long)nr_pages << PAGE_SHIFT, + max_segment, GFP_KERNEL); + if (err) { + kfree(sg); +diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c +index d12ba47b37c4f..0de3612135e96 100644 +--- a/drivers/gpu/drm/msm/msm_iommu.c ++++ b/drivers/gpu/drm/msm/msm_iommu.c +@@ -21,6 +21,8 @@ struct msm_iommu_pagetable { + struct msm_mmu base; + struct msm_mmu *parent; + struct io_pgtable_ops *pgtbl_ops; ++ const struct iommu_flush_ops *tlb; ++ struct device *iommu_dev; + unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ + phys_addr_t ttbr; + u32 asid; +@@ -194,11 +196,33 @@ static const struct msm_mmu_funcs pagetable_funcs = { + + static void msm_iommu_tlb_flush_all(void *cookie) + { ++ struct msm_iommu_pagetable *pagetable = cookie; ++ struct adreno_smmu_priv *adreno_smmu; ++ ++ if (!pm_runtime_get_if_in_use(pagetable->iommu_dev)) ++ return; ++ ++ adreno_smmu = dev_get_drvdata(pagetable->parent->dev); ++ ++ pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie); ++ ++ pm_runtime_put_autosuspend(pagetable->iommu_dev); + } + + static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size, + size_t granule, void *cookie) + { ++ struct msm_iommu_pagetable *pagetable = cookie; ++ struct adreno_smmu_priv *adreno_smmu; ++ ++ if (!pm_runtime_get_if_in_use(pagetable->iommu_dev)) ++ return; ++ ++ adreno_smmu = dev_get_drvdata(pagetable->parent->dev); ++ ++ pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie); ++ ++ pm_runtime_put_autosuspend(pagetable->iommu_dev); + } + + static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, +@@ -206,7 +230,7 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, + { + } + +-static const struct iommu_flush_ops null_tlb_ops = { ++static const struct iommu_flush_ops tlb_ops = { + .tlb_flush_all = msm_iommu_tlb_flush_all, + .tlb_flush_walk = msm_iommu_tlb_flush_walk, + .tlb_add_page = msm_iommu_tlb_add_page, +@@ -254,10 +278,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) + + /* The incoming cfg will have the TTBR1 quirk enabled */ + ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1; +- ttbr0_cfg.tlb = &null_tlb_ops; ++ ttbr0_cfg.tlb = &tlb_ops; + + pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, +- &ttbr0_cfg, iommu->domain); ++ &ttbr0_cfg, pagetable); + + if (!pagetable->pgtbl_ops) { + kfree(pagetable); +@@ -282,6 +306,8 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent) + + /* Needed later for TLB flush */ + pagetable->parent = parent; ++ pagetable->tlb = ttbr1_cfg->tlb; ++ pagetable->iommu_dev = ttbr1_cfg->iommu_dev; + pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap; + pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr; + +diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c +index 31a5b81ee9fc4..be6674fb1af71 100644 +--- a/drivers/gpu/drm/nouveau/nouveau_svm.c ++++ b/drivers/gpu/drm/nouveau/nouveau_svm.c +@@ -997,7 +997,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id) + if (ret) + return ret; + +- buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL); ++ buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL); + if (!buffer->fault) + return -ENOMEM; + +diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c +index 0035affc3e590..9b2d235168bb6 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_drv.c ++++ b/drivers/gpu/drm/virtio/virtgpu_drv.c +@@ -93,6 +93,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev) + goto err_free; + } + ++ dma_set_max_seg_size(dev->dev, dma_max_mapping_size(dev->dev) ?: UINT_MAX); + ret = virtio_gpu_init(vdev, dev); + if (ret) + goto err_free; +diff --git a/drivers/hid/i2c-hid/i2c-hid-of.c b/drivers/hid/i2c-hid/i2c-hid-of.c +index 97a27a803f58d..6feb812fce375 100644 +--- a/drivers/hid/i2c-hid/i2c-hid-of.c ++++ b/drivers/hid/i2c-hid/i2c-hid-of.c +@@ -80,6 +80,7 @@ static int i2c_hid_of_probe(struct i2c_client *client, + if (!ihid_of) + return -ENOMEM; + ++ ihid_of->client = client; + ihid_of->ops.power_up = i2c_hid_of_power_up; + ihid_of->ops.power_down = i2c_hid_of_power_down; + +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c +index af163e8dfec07..12d4c28741d7e 100644 +--- a/drivers/hid/wacom_sys.c ++++ b/drivers/hid/wacom_sys.c +@@ -2080,7 +2080,7 @@ static int wacom_allocate_inputs(struct wacom *wacom) + return 0; + } + +-static int wacom_register_inputs(struct wacom *wacom) ++static int wacom_setup_inputs(struct wacom *wacom) + { + struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev; + struct wacom_wac *wacom_wac = &(wacom->wacom_wac); +@@ -2099,10 +2099,6 @@ static int wacom_register_inputs(struct wacom *wacom) + input_free_device(pen_input_dev); + wacom_wac->pen_input = NULL; + pen_input_dev = NULL; +- } else { +- error = input_register_device(pen_input_dev); +- if (error) +- goto fail; + } + + error = wacom_setup_touch_input_capabilities(touch_input_dev, wacom_wac); +@@ -2111,10 +2107,6 @@ static int wacom_register_inputs(struct wacom *wacom) + input_free_device(touch_input_dev); + wacom_wac->touch_input = NULL; + touch_input_dev = NULL; +- } else { +- error = input_register_device(touch_input_dev); +- if (error) +- goto fail; + } + + error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac); +@@ -2123,7 +2115,34 @@ static int wacom_register_inputs(struct wacom *wacom) + input_free_device(pad_input_dev); + wacom_wac->pad_input = NULL; + pad_input_dev = NULL; +- } else { ++ } ++ ++ return 0; ++} ++ ++static int wacom_register_inputs(struct wacom *wacom) ++{ ++ struct input_dev *pen_input_dev, *touch_input_dev, *pad_input_dev; ++ struct wacom_wac *wacom_wac = &(wacom->wacom_wac); ++ int error = 0; ++ ++ pen_input_dev = wacom_wac->pen_input; ++ touch_input_dev = wacom_wac->touch_input; ++ pad_input_dev = wacom_wac->pad_input; ++ ++ if (pen_input_dev) { ++ error = input_register_device(pen_input_dev); ++ if (error) ++ goto fail; ++ } ++ ++ if (touch_input_dev) { ++ error = input_register_device(touch_input_dev); ++ if (error) ++ goto fail; ++ } ++ ++ if (pad_input_dev) { + error = input_register_device(pad_input_dev); + if (error) + goto fail; +@@ -2379,6 +2398,20 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) + goto fail; + } + ++ error = wacom_setup_inputs(wacom); ++ if (error) ++ goto fail; ++ ++ if (features->type == HID_GENERIC) ++ connect_mask |= HID_CONNECT_DRIVER; ++ ++ /* Regular HID work starts now */ ++ error = hid_hw_start(hdev, connect_mask); ++ if (error) { ++ hid_err(hdev, "hw start failed\n"); ++ goto fail; ++ } ++ + error = wacom_register_inputs(wacom); + if (error) + goto fail; +@@ -2393,16 +2426,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless) + goto fail; + } + +- if (features->type == HID_GENERIC) +- connect_mask |= HID_CONNECT_DRIVER; +- +- /* Regular HID work starts now */ +- error = hid_hw_start(hdev, connect_mask); +- if (error) { +- hid_err(hdev, "hw start failed\n"); +- goto fail; +- } +- + if (!wireless) { + /* Note that if query fails it is not a hard failure */ + wacom_query_tablet_data(wacom); +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c +index 165ed872fa4e7..53235b276bb24 100644 +--- a/drivers/hid/wacom_wac.c ++++ b/drivers/hid/wacom_wac.c +@@ -2571,7 +2571,14 @@ static void wacom_wac_pen_report(struct hid_device *hdev, + wacom_wac->hid_data.tipswitch); + input_report_key(input, wacom_wac->tool[0], sense); + if (wacom_wac->serial[0]) { +- input_event(input, EV_MSC, MSC_SERIAL, wacom_wac->serial[0]); ++ /* ++ * xf86-input-wacom does not accept a serial number ++ * of '0'. Report the low 32 bits if possible, but ++ * if they are zero, report the upper ones instead. ++ */ ++ __u32 serial_lo = wacom_wac->serial[0] & 0xFFFFFFFFu; ++ __u32 serial_hi = wacom_wac->serial[0] >> 32; ++ input_event(input, EV_MSC, MSC_SERIAL, (int)(serial_lo ? serial_lo : serial_hi)); + input_report_abs(input, ABS_MISC, sense ? id : 0); + } + +diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile +index e73cdb1d2b5a8..784a803279d99 100644 +--- a/drivers/i2c/busses/Makefile ++++ b/drivers/i2c/busses/Makefile +@@ -89,10 +89,8 @@ obj-$(CONFIG_I2C_NPCM) += i2c-npcm7xx.o + obj-$(CONFIG_I2C_OCORES) += i2c-ocores.o + obj-$(CONFIG_I2C_OMAP) += i2c-omap.o + obj-$(CONFIG_I2C_OWL) += i2c-owl.o +-i2c-pasemi-objs := i2c-pasemi-core.o i2c-pasemi-pci.o +-obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi.o +-i2c-apple-objs := i2c-pasemi-core.o i2c-pasemi-platform.o +-obj-$(CONFIG_I2C_APPLE) += i2c-apple.o ++obj-$(CONFIG_I2C_PASEMI) += i2c-pasemi-core.o i2c-pasemi-pci.o ++obj-$(CONFIG_I2C_APPLE) += i2c-pasemi-core.o i2c-pasemi-platform.o + obj-$(CONFIG_I2C_PCA_PLATFORM) += i2c-pca-platform.o + obj-$(CONFIG_I2C_PNX) += i2c-pnx.o + obj-$(CONFIG_I2C_PXA) += i2c-pxa.o +diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c +index 3159ffbb77a20..9a4e9bf304c28 100644 +--- a/drivers/i2c/busses/i2c-i801.c ++++ b/drivers/i2c/busses/i2c-i801.c +@@ -500,11 +500,10 @@ static int i801_block_transaction_by_block(struct i801_priv *priv, + /* Set block buffer mode */ + outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv)); + +- inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */ +- + if (read_write == I2C_SMBUS_WRITE) { + len = data->block[0]; + outb_p(len, SMBHSTDAT0(priv)); ++ inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */ + for (i = 0; i < len; i++) + outb_p(data->block[i+1], SMBBLKDAT(priv)); + } +@@ -520,6 +519,7 @@ static int i801_block_transaction_by_block(struct i801_priv *priv, + return -EPROTO; + + data->block[0] = len; ++ inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */ + for (i = 0; i < len; i++) + data->block[i + 1] = inb_p(SMBBLKDAT(priv)); + } +diff --git a/drivers/i2c/busses/i2c-pasemi-core.c b/drivers/i2c/busses/i2c-pasemi-core.c +index 9028ffb58cc07..f297e41352e7a 100644 +--- a/drivers/i2c/busses/i2c-pasemi-core.c ++++ b/drivers/i2c/busses/i2c-pasemi-core.c +@@ -356,3 +356,8 @@ int pasemi_i2c_common_probe(struct pasemi_smbus *smbus) + + return 0; + } ++EXPORT_SYMBOL_GPL(pasemi_i2c_common_probe); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Olof Johansson <olof@lixom.net>"); ++MODULE_DESCRIPTION("PA Semi PWRficient SMBus driver"); +diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c +index 8fce98bb77ff9..75b9c3f26bba6 100644 +--- a/drivers/i2c/busses/i2c-qcom-geni.c ++++ b/drivers/i2c/busses/i2c-qcom-geni.c +@@ -605,20 +605,20 @@ static int geni_i2c_gpi_xfer(struct geni_i2c_dev *gi2c, struct i2c_msg msgs[], i + + peripheral.addr = msgs[i].addr; + ++ ret = geni_i2c_gpi(gi2c, &msgs[i], &config, ++ &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c); ++ if (ret) ++ goto err; ++ + if (msgs[i].flags & I2C_M_RD) { + ret = geni_i2c_gpi(gi2c, &msgs[i], &config, + &rx_addr, &rx_buf, I2C_READ, gi2c->rx_c); + if (ret) + goto err; +- } +- +- ret = geni_i2c_gpi(gi2c, &msgs[i], &config, +- &tx_addr, &tx_buf, I2C_WRITE, gi2c->tx_c); +- if (ret) +- goto err; + +- if (msgs[i].flags & I2C_M_RD) + dma_async_issue_pending(gi2c->rx_c); ++ } ++ + dma_async_issue_pending(gi2c->tx_c); + + timeout = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT); +diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig +index ffac66db7ac92..1f34747a68bfe 100644 +--- a/drivers/iio/accel/Kconfig ++++ b/drivers/iio/accel/Kconfig +@@ -219,10 +219,12 @@ config BMA400 + + config BMA400_I2C + tristate ++ select REGMAP_I2C + depends on BMA400 + + config BMA400_SPI + tristate ++ select REGMAP_SPI + depends on BMA400 + + config BMC150_ACCEL +diff --git a/drivers/iio/imu/bno055/Kconfig b/drivers/iio/imu/bno055/Kconfig +index 83e53acfbe880..c7f5866a177d9 100644 +--- a/drivers/iio/imu/bno055/Kconfig ++++ b/drivers/iio/imu/bno055/Kconfig +@@ -8,6 +8,7 @@ config BOSCH_BNO055 + config BOSCH_BNO055_SERIAL + tristate "Bosch BNO055 attached via UART" + depends on SERIAL_DEV_BUS ++ select REGMAP + select BOSCH_BNO055 + help + Enable this to support Bosch BNO055 IMUs attached via UART. +diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c +index c9614982cb671..a2f8278f00856 100644 +--- a/drivers/iio/industrialio-core.c ++++ b/drivers/iio/industrialio-core.c +@@ -1601,10 +1601,13 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev) + ret = iio_device_register_sysfs_group(indio_dev, + &iio_dev_opaque->chan_attr_group); + if (ret) +- goto error_clear_attrs; ++ goto error_free_chan_attrs; + + return 0; + ++error_free_chan_attrs: ++ kfree(iio_dev_opaque->chan_attr_group.attrs); ++ iio_dev_opaque->chan_attr_group.attrs = NULL; + error_clear_attrs: + iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); + +diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c +index 5a1a625d8d16d..85097b769c209 100644 +--- a/drivers/iio/light/hid-sensor-als.c ++++ b/drivers/iio/light/hid-sensor-als.c +@@ -228,6 +228,7 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev, + case HID_USAGE_SENSOR_TIME_TIMESTAMP: + als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes, + *(s64 *)raw_data); ++ ret = 0; + break; + default: + break; +diff --git a/drivers/iio/magnetometer/rm3100-core.c b/drivers/iio/magnetometer/rm3100-core.c +index 69938204456f8..42b70cd42b393 100644 +--- a/drivers/iio/magnetometer/rm3100-core.c ++++ b/drivers/iio/magnetometer/rm3100-core.c +@@ -530,6 +530,7 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq) + struct rm3100_data *data; + unsigned int tmp; + int ret; ++ int samp_rate_index; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); + if (!indio_dev) +@@ -586,9 +587,14 @@ int rm3100_common_probe(struct device *dev, struct regmap *regmap, int irq) + ret = regmap_read(regmap, RM3100_REG_TMRC, &tmp); + if (ret < 0) + return ret; ++ ++ samp_rate_index = tmp - RM3100_TMRC_OFFSET; ++ if (samp_rate_index < 0 || samp_rate_index >= RM3100_SAMP_NUM) { ++ dev_err(dev, "The value read from RM3100_REG_TMRC is invalid!\n"); ++ return -EINVAL; ++ } + /* Initializing max wait time, which is double conversion time. */ +- data->conversion_time = rm3100_samp_rates[tmp - RM3100_TMRC_OFFSET][2] +- * 2; ++ data->conversion_time = rm3100_samp_rates[samp_rate_index][2] * 2; + + /* Cycle count values may not be what we want. */ + if ((tmp - RM3100_TMRC_OFFSET) == 0) +diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c +index 4859b99d54fc2..01faec6ea5285 100644 +--- a/drivers/infiniband/hw/irdma/verbs.c ++++ b/drivers/infiniband/hw/irdma/verbs.c +@@ -2845,6 +2845,13 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len, + + switch (req.reg_type) { + case IRDMA_MEMREG_TYPE_QP: ++ /* iWarp: Catch page not starting on OS page boundary */ ++ if (!rdma_protocol_roce(&iwdev->ibdev, 1) && ++ ib_umem_offset(iwmr->region)) { ++ err = -EINVAL; ++ goto error; ++ } ++ + total = req.sq_pages + req.rq_pages + shadow_pgcnt; + if (total > iwmr->page_cnt) { + err = -EINVAL; +diff --git a/drivers/interconnect/qcom/sc8180x.c b/drivers/interconnect/qcom/sc8180x.c +index 83461e31774ec..d9ee193fb18bd 100644 +--- a/drivers/interconnect/qcom/sc8180x.c ++++ b/drivers/interconnect/qcom/sc8180x.c +@@ -1387,6 +1387,7 @@ static struct qcom_icc_bcm bcm_mm0 = { + + static struct qcom_icc_bcm bcm_co0 = { + .name = "CO0", ++ .keepalive = true, + .num_nodes = 1, + .nodes = { &slv_qns_cdsp_mem_noc } + }; +diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c +index 091b0fe7e3242..5d4421f75b43a 100644 +--- a/drivers/irqchip/irq-brcmstb-l2.c ++++ b/drivers/irqchip/irq-brcmstb-l2.c +@@ -2,7 +2,7 @@ + /* + * Generic Broadcom Set Top Box Level 2 Interrupt controller driver + * +- * Copyright (C) 2014-2017 Broadcom ++ * Copyright (C) 2014-2024 Broadcom + */ + + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +@@ -113,6 +113,9 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc) + generic_handle_domain_irq(b->domain, irq); + } while (status); + out: ++ /* Don't ack parent before all device writes are done */ ++ wmb(); ++ + chained_irq_exit(chip, desc); + } + +diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c +index 8956881503d9a..b83b39e93e1a9 100644 +--- a/drivers/irqchip/irq-gic-v3-its.c ++++ b/drivers/irqchip/irq-gic-v3-its.c +@@ -3805,8 +3805,9 @@ static int its_vpe_set_affinity(struct irq_data *d, + bool force) + { + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); +- int from, cpu = cpumask_first(mask_val); ++ struct cpumask common, *table_mask; + unsigned long flags; ++ int from, cpu; + + /* + * Changing affinity is mega expensive, so let's be as lazy as +@@ -3822,19 +3823,22 @@ static int its_vpe_set_affinity(struct irq_data *d, + * taken on any vLPI handling path that evaluates vpe->col_idx. + */ + from = vpe_to_cpuid_lock(vpe, &flags); +- if (from == cpu) +- goto out; +- +- vpe->col_idx = cpu; ++ table_mask = gic_data_rdist_cpu(from)->vpe_table_mask; + + /* +- * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD +- * is sharing its VPE table with the current one. ++ * If we are offered another CPU in the same GICv4.1 ITS ++ * affinity, pick this one. Otherwise, any CPU will do. + */ +- if (gic_data_rdist_cpu(cpu)->vpe_table_mask && +- cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) ++ if (table_mask && cpumask_and(&common, mask_val, table_mask)) ++ cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common); ++ else ++ cpu = cpumask_first(mask_val); ++ ++ if (from == cpu) + goto out; + ++ vpe->col_idx = cpu; ++ + its_send_vmovp(vpe); + its_vpe_db_proxy_move(vpe, from, cpu); + +diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c +index 3d99b8bdd8ef1..de115ee6e9ec7 100644 +--- a/drivers/irqchip/irq-loongson-eiointc.c ++++ b/drivers/irqchip/irq-loongson-eiointc.c +@@ -242,7 +242,7 @@ static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq, + int ret; + unsigned int i, type; + unsigned long hwirq = 0; +- struct eiointc *priv = domain->host_data; ++ struct eiointc_priv *priv = domain->host_data; + + ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type); + if (ret) +diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h +index 71dcd8fd4050a..6314210d36971 100644 +--- a/drivers/md/dm-core.h ++++ b/drivers/md/dm-core.h +@@ -21,6 +21,8 @@ + #include "dm-ima.h" + + #define DM_RESERVED_MAX_IOS 1024 ++#define DM_MAX_TARGETS 1048576 ++#define DM_MAX_TARGET_PARAMS 1024 + + struct dm_io; + +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c +index ff515437d81e7..0e6068ee783e7 100644 +--- a/drivers/md/dm-crypt.c ++++ b/drivers/md/dm-crypt.c +@@ -72,10 +72,8 @@ struct dm_crypt_io { + struct bio *base_bio; + u8 *integrity_metadata; + bool integrity_metadata_from_pool:1; +- bool in_tasklet:1; + + struct work_struct work; +- struct tasklet_struct tasklet; + + struct convert_context ctx; + +@@ -1729,7 +1727,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, + io->ctx.r.req = NULL; + io->integrity_metadata = NULL; + io->integrity_metadata_from_pool = false; +- io->in_tasklet = false; + atomic_set(&io->io_pending, 0); + } + +@@ -1738,12 +1735,6 @@ static void crypt_inc_pending(struct dm_crypt_io *io) + atomic_inc(&io->io_pending); + } + +-static void kcryptd_io_bio_endio(struct work_struct *work) +-{ +- struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); +- bio_endio(io->base_bio); +-} +- + /* + * One of the bios was finished. Check for completion of + * the whole request and correctly clean up the buffer. +@@ -1767,20 +1758,6 @@ static void crypt_dec_pending(struct dm_crypt_io *io) + + base_bio->bi_status = error; + +- /* +- * If we are running this function from our tasklet, +- * we can't call bio_endio() here, because it will call +- * clone_endio() from dm.c, which in turn will +- * free the current struct dm_crypt_io structure with +- * our tasklet. In this case we need to delay bio_endio() +- * execution to after the tasklet is done and dequeued. +- */ +- if (io->in_tasklet) { +- INIT_WORK(&io->work, kcryptd_io_bio_endio); +- queue_work(cc->io_queue, &io->work); +- return; +- } +- + bio_endio(base_bio); + } + +@@ -2213,11 +2190,6 @@ static void kcryptd_crypt(struct work_struct *work) + kcryptd_crypt_write_convert(io); + } + +-static void kcryptd_crypt_tasklet(unsigned long work) +-{ +- kcryptd_crypt((struct work_struct *)work); +-} +- + static void kcryptd_queue_crypt(struct dm_crypt_io *io) + { + struct crypt_config *cc = io->cc; +@@ -2229,15 +2201,10 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io) + * irqs_disabled(): the kernel may run some IO completion from the idle thread, but + * it is being executed with irqs disabled. + */ +- if (in_hardirq() || irqs_disabled()) { +- io->in_tasklet = true; +- tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work); +- tasklet_schedule(&io->tasklet); ++ if (!(in_hardirq() || irqs_disabled())) { ++ kcryptd_crypt(&io->work); + return; + } +- +- kcryptd_crypt(&io->work); +- return; + } + + INIT_WORK(&io->work, kcryptd_crypt); +diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c +index 206e6ce554dc7..4376754816abe 100644 +--- a/drivers/md/dm-ioctl.c ++++ b/drivers/md/dm-ioctl.c +@@ -1877,7 +1877,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern + minimum_data_size - sizeof(param_kernel->version))) + return -EFAULT; + +- if (param_kernel->data_size < minimum_data_size) { ++ if (unlikely(param_kernel->data_size < minimum_data_size) || ++ unlikely(param_kernel->data_size > DM_MAX_TARGETS * DM_MAX_TARGET_PARAMS)) { + DMERR("Invalid data size in the ioctl structure: %u", + param_kernel->data_size); + return -EINVAL; +diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c +index dac6a5f25f2be..e0367a672eabf 100644 +--- a/drivers/md/dm-table.c ++++ b/drivers/md/dm-table.c +@@ -128,7 +128,12 @@ static int alloc_targets(struct dm_table *t, unsigned int num) + int dm_table_create(struct dm_table **result, fmode_t mode, + unsigned int num_targets, struct mapped_device *md) + { +- struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL); ++ struct dm_table *t; ++ ++ if (num_targets > DM_MAX_TARGETS) ++ return -EOVERFLOW; ++ ++ t = kzalloc(sizeof(*t), GFP_KERNEL); + + if (!t) + return -ENOMEM; +@@ -143,7 +148,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode, + + if (!num_targets) { + kfree(t); +- return -ENOMEM; ++ return -EOVERFLOW; + } + + if (alloc_targets(t, num_targets)) { +diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c +index 24df610a2c438..4669923f4cfb4 100644 +--- a/drivers/md/dm-verity-target.c ++++ b/drivers/md/dm-verity-target.c +@@ -634,23 +634,6 @@ static void verity_work(struct work_struct *w) + verity_finish_io(io, errno_to_blk_status(verity_verify_io(io))); + } + +-static void verity_tasklet(unsigned long data) +-{ +- struct dm_verity_io *io = (struct dm_verity_io *)data; +- int err; +- +- io->in_tasklet = true; +- err = verity_verify_io(io); +- if (err == -EAGAIN || err == -ENOMEM) { +- /* fallback to retrying with work-queue */ +- INIT_WORK(&io->work, verity_work); +- queue_work(io->v->verify_wq, &io->work); +- return; +- } +- +- verity_finish_io(io, errno_to_blk_status(err)); +-} +- + static void verity_end_io(struct bio *bio) + { + struct dm_verity_io *io = bio->bi_private; +@@ -663,13 +646,8 @@ static void verity_end_io(struct bio *bio) + return; + } + +- if (static_branch_unlikely(&use_tasklet_enabled) && io->v->use_tasklet) { +- tasklet_init(&io->tasklet, verity_tasklet, (unsigned long)io); +- tasklet_schedule(&io->tasklet); +- } else { +- INIT_WORK(&io->work, verity_work); +- queue_work(io->v->verify_wq, &io->work); +- } ++ INIT_WORK(&io->work, verity_work); ++ queue_work(io->v->verify_wq, &io->work); + } + + /* +diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h +index f9d522c870e61..f3f6070084196 100644 +--- a/drivers/md/dm-verity.h ++++ b/drivers/md/dm-verity.h +@@ -83,7 +83,6 @@ struct dm_verity_io { + struct bvec_iter iter; + + struct work_struct work; +- struct tasklet_struct tasklet; + + /* + * Three variably-size fields follow this struct: +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 3ccf1920682cb..c7efe15229514 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -963,9 +963,10 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, + return; + + bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev, +- 1, +- REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA, +- GFP_NOIO, &mddev->sync_set); ++ 1, ++ REQ_OP_WRITE | REQ_SYNC | REQ_IDLE | REQ_META ++ | REQ_PREFLUSH | REQ_FUA, ++ GFP_NOIO, &mddev->sync_set); + + atomic_inc(&rdev->nr_pending); + +diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c +index aeb6bb63667eb..41abb18b00acb 100644 +--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c ++++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c +@@ -559,7 +559,7 @@ static int rkisp1_probe(struct platform_device *pdev) + rkisp1->irqs[il] = irq; + } + +- ret = devm_request_irq(dev, irq, info->isrs[i].isr, 0, ++ ret = devm_request_irq(dev, irq, info->isrs[i].isr, IRQF_SHARED, + dev_driver_string(dev), dev); + if (ret) { + dev_err(dev, "request irq failed: %d\n", ret); +diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c +index fe17c7f98e810..52d82cbe7685f 100644 +--- a/drivers/media/rc/bpf-lirc.c ++++ b/drivers/media/rc/bpf-lirc.c +@@ -253,7 +253,7 @@ int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) + if (attr->attach_flags) + return -EINVAL; + +- rcdev = rc_dev_get_from_fd(attr->target_fd); ++ rcdev = rc_dev_get_from_fd(attr->target_fd, true); + if (IS_ERR(rcdev)) + return PTR_ERR(rcdev); + +@@ -278,7 +278,7 @@ int lirc_prog_detach(const union bpf_attr *attr) + if (IS_ERR(prog)) + return PTR_ERR(prog); + +- rcdev = rc_dev_get_from_fd(attr->target_fd); ++ rcdev = rc_dev_get_from_fd(attr->target_fd, true); + if (IS_ERR(rcdev)) { + bpf_prog_put(prog); + return PTR_ERR(rcdev); +@@ -303,7 +303,7 @@ int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) + if (attr->query.query_flags) + return -EINVAL; + +- rcdev = rc_dev_get_from_fd(attr->query.target_fd); ++ rcdev = rc_dev_get_from_fd(attr->query.target_fd, false); + if (IS_ERR(rcdev)) + return PTR_ERR(rcdev); + +diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c +index 1968067092594..69e630d85262f 100644 +--- a/drivers/media/rc/ir_toy.c ++++ b/drivers/media/rc/ir_toy.c +@@ -332,6 +332,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count) + sizeof(COMMAND_SMODE_EXIT), STATE_COMMAND_NO_RESP); + if (err) { + dev_err(irtoy->dev, "exit sample mode: %d\n", err); ++ kfree(buf); + return err; + } + +@@ -339,6 +340,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count) + sizeof(COMMAND_SMODE_ENTER), STATE_COMMAND); + if (err) { + dev_err(irtoy->dev, "enter sample mode: %d\n", err); ++ kfree(buf); + return err; + } + +diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c +index 184e0b35744f3..adb8c794a2d7b 100644 +--- a/drivers/media/rc/lirc_dev.c ++++ b/drivers/media/rc/lirc_dev.c +@@ -814,7 +814,7 @@ void __exit lirc_dev_exit(void) + unregister_chrdev_region(lirc_base_dev, RC_DEV_MAX); + } + +-struct rc_dev *rc_dev_get_from_fd(int fd) ++struct rc_dev *rc_dev_get_from_fd(int fd, bool write) + { + struct fd f = fdget(fd); + struct lirc_fh *fh; +@@ -828,6 +828,9 @@ struct rc_dev *rc_dev_get_from_fd(int fd) + return ERR_PTR(-EINVAL); + } + ++ if (write && !(f.file->f_mode & FMODE_WRITE)) ++ return ERR_PTR(-EPERM); ++ + fh = f.file->private_data; + dev = fh->rc; + +diff --git a/drivers/media/rc/rc-core-priv.h b/drivers/media/rc/rc-core-priv.h +index ef1e95e1af7fc..7df949fc65e2b 100644 +--- a/drivers/media/rc/rc-core-priv.h ++++ b/drivers/media/rc/rc-core-priv.h +@@ -325,7 +325,7 @@ void lirc_raw_event(struct rc_dev *dev, struct ir_raw_event ev); + void lirc_scancode_event(struct rc_dev *dev, struct lirc_scancode *lsc); + int lirc_register(struct rc_dev *dev); + void lirc_unregister(struct rc_dev *dev); +-struct rc_dev *rc_dev_get_from_fd(int fd); ++struct rc_dev *rc_dev_get_from_fd(int fd, bool write); + #else + static inline int lirc_dev_init(void) { return 0; } + static inline void lirc_dev_exit(void) {} +diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c +index cc57cc8204328..69cc24962706c 100644 +--- a/drivers/misc/fastrpc.c ++++ b/drivers/misc/fastrpc.c +@@ -1990,7 +1990,7 @@ static int fastrpc_cb_remove(struct platform_device *pdev) + int i; + + spin_lock_irqsave(&cctx->lock, flags); +- for (i = 1; i < FASTRPC_MAX_SESSIONS; i++) { ++ for (i = 0; i < FASTRPC_MAX_SESSIONS; i++) { + if (cctx->session[i].sid == sess->sid) { + cctx->session[i].valid = false; + cctx->sesscount--; +diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c +index dd2a4b6ab6adb..e3c69c6b85a6c 100644 +--- a/drivers/mmc/core/slot-gpio.c ++++ b/drivers/mmc/core/slot-gpio.c +@@ -62,11 +62,15 @@ int mmc_gpio_alloc(struct mmc_host *host) + int mmc_gpio_get_ro(struct mmc_host *host) + { + struct mmc_gpio *ctx = host->slot.handler_priv; ++ int cansleep; + + if (!ctx || !ctx->ro_gpio) + return -ENOSYS; + +- return gpiod_get_value_cansleep(ctx->ro_gpio); ++ cansleep = gpiod_cansleep(ctx->ro_gpio); ++ return cansleep ? ++ gpiod_get_value_cansleep(ctx->ro_gpio) : ++ gpiod_get_value(ctx->ro_gpio); + } + EXPORT_SYMBOL(mmc_gpio_get_ro); + +diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c +index bca1d095b7597..24bb0e9809e76 100644 +--- a/drivers/mmc/host/sdhci-pci-o2micro.c ++++ b/drivers/mmc/host/sdhci-pci-o2micro.c +@@ -602,6 +602,35 @@ static void sdhci_pci_o2_set_clock(struct sdhci_host *host, unsigned int clock) + sdhci_o2_enable_clk(host, clk); + } + ++static void sdhci_pci_o2_set_power(struct sdhci_host *host, unsigned char mode, unsigned short vdd) ++{ ++ struct sdhci_pci_chip *chip; ++ struct sdhci_pci_slot *slot = sdhci_priv(host); ++ u32 scratch_32 = 0; ++ u8 scratch_8 = 0; ++ ++ chip = slot->chip; ++ ++ if (mode == MMC_POWER_OFF) { ++ /* UnLock WP */ ++ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8); ++ scratch_8 &= 0x7f; ++ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8); ++ ++ /* Set PCR 0x354[16] to switch Clock Source back to OPE Clock */ ++ pci_read_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, &scratch_32); ++ scratch_32 &= ~(O2_SD_SEL_DLL); ++ pci_write_config_dword(chip->pdev, O2_SD_OUTPUT_CLK_SOURCE_SWITCH, scratch_32); ++ ++ /* Lock WP */ ++ pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch_8); ++ scratch_8 |= 0x80; ++ pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch_8); ++ } ++ ++ sdhci_set_power(host, mode, vdd); ++} ++ + static int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) + { + struct sdhci_pci_chip *chip; +@@ -911,6 +940,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = { + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, ++ .set_power = sdhci_pci_o2_set_power, + }; + + const struct sdhci_pci_fixes sdhci_o2 = { +diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c +index 8efa22d9f214d..053d375eae4f5 100644 +--- a/drivers/net/can/dev/netlink.c ++++ b/drivers/net/can/dev/netlink.c +@@ -311,7 +311,7 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], + /* Neither of TDC parameters nor TDC flags are + * provided: do calculation + */ +- can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming, ++ can_calc_tdco(&priv->tdc, priv->tdc_const, &dbt, + &priv->ctrlmode, priv->ctrlmode_supported); + } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly + * turned off. TDC is disabled: do nothing +diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c +index 63d43ef86f9b9..76455405a6d8e 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_main.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c +@@ -5333,7 +5333,7 @@ static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf) + { + int v, ret = 0; + +- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { ++ for (v = 0; v < pf->num_alloc_vsi; v++) { + if (pf->vsi[v]) { + ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]); + if (ret) +diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +index 3d3db58090ed1..ed4be80fec2a5 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +@@ -2846,6 +2846,24 @@ static int i40e_vc_get_stats_msg(struct i40e_vf *vf, u8 *msg) + (u8 *)&stats, sizeof(stats)); + } + ++/** ++ * i40e_can_vf_change_mac ++ * @vf: pointer to the VF info ++ * ++ * Return true if the VF is allowed to change its MAC filters, false otherwise ++ */ ++static bool i40e_can_vf_change_mac(struct i40e_vf *vf) ++{ ++ /* If the VF MAC address has been set administratively (via the ++ * ndo_set_vf_mac command), then deny permission to the VF to ++ * add/delete unicast MAC addresses, unless the VF is trusted ++ */ ++ if (vf->pf_set_mac && !vf->trusted) ++ return false; ++ ++ return true; ++} ++ + #define I40E_MAX_MACVLAN_PER_HW 3072 + #define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \ + (num_ports)) +@@ -2905,8 +2923,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, + * The VF may request to set the MAC address filter already + * assigned to it so do not return an error in that case. + */ +- if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && +- !is_multicast_ether_addr(addr) && vf->pf_set_mac && ++ if (!i40e_can_vf_change_mac(vf) && ++ !is_multicast_ether_addr(addr) && + !ether_addr_equal(addr, vf->default_lan_addr.addr)) { + dev_err(&pf->pdev->dev, + "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n"); +@@ -3049,19 +3067,29 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) + ret = I40E_ERR_INVALID_MAC_ADDR; + goto error_param; + } +- if (ether_addr_equal(al->list[i].addr, vf->default_lan_addr.addr)) +- was_unimac_deleted = true; + } + vsi = pf->vsi[vf->lan_vsi_idx]; + + spin_lock_bh(&vsi->mac_filter_hash_lock); + /* delete addresses from the list */ +- for (i = 0; i < al->num_elements; i++) ++ for (i = 0; i < al->num_elements; i++) { ++ const u8 *addr = al->list[i].addr; ++ ++ /* Allow to delete VF primary MAC only if it was not set ++ * administratively by PF or if VF is trusted. ++ */ ++ if (ether_addr_equal(addr, vf->default_lan_addr.addr) && ++ i40e_can_vf_change_mac(vf)) ++ was_unimac_deleted = true; ++ else ++ continue; ++ + if (i40e_del_mac_filter(vsi, al->list[i].addr)) { + ret = I40E_ERR_INVALID_MAC_ADDR; + spin_unlock_bh(&vsi->mac_filter_hash_lock); + goto error_param; + } ++ } + + spin_unlock_bh(&vsi->mac_filter_hash_lock); + +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +index 3b9ba8fa247ab..dc2e204bcd727 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +@@ -65,6 +65,8 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, + tcam->max_groups = max_groups; + tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, + ACL_MAX_GROUP_SIZE); ++ tcam->max_group_size = min_t(unsigned int, tcam->max_group_size, ++ MLXSW_REG_PAGT_ACL_MAX_NUM); + + err = ops->init(mlxsw_sp, tcam->priv, tcam); + if (err) +diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c +index 41fa2523d91d3..5f2cd9a8cf8fb 100644 +--- a/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c ++++ b/drivers/net/ethernet/microchip/lan966x/lan966x_lag.c +@@ -37,19 +37,24 @@ static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x) + + /* Now, set PGIDs for each active LAG */ + for (lag = 0; lag < lan966x->num_phys_ports; ++lag) { +- struct net_device *bond = lan966x->ports[lag]->bond; ++ struct lan966x_port *port = lan966x->ports[lag]; + int num_active_ports = 0; ++ struct net_device *bond; + unsigned long bond_mask; + u8 aggr_idx[16]; + +- if (!bond || (visited & BIT(lag))) ++ if (!port || !port->bond || (visited & BIT(lag))) + continue; + ++ bond = port->bond; + bond_mask = lan966x_lag_get_mask(lan966x, bond); + + for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) { + struct lan966x_port *port = lan966x->ports[p]; + ++ if (!port) ++ continue; ++ + lan_wr(ANA_PGID_PGID_SET(bond_mask), + lan966x, ANA_PGID(p)); + if (port->lag_tx_active) +diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +index f7492be452aed..7af03b45555dd 100644 +--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c ++++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +@@ -1379,10 +1379,30 @@ static void nfp_nft_ct_translate_mangle_action(struct flow_action_entry *mangle_ + mangle_action->mangle.mask = (__force u32)cpu_to_be32(mangle_action->mangle.mask); + return; + ++ /* Both struct tcphdr and struct udphdr start with ++ * __be16 source; ++ * __be16 dest; ++ * so we can use the same code for both. ++ */ + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: +- mangle_action->mangle.val = (__force u16)cpu_to_be16(mangle_action->mangle.val); +- mangle_action->mangle.mask = (__force u16)cpu_to_be16(mangle_action->mangle.mask); ++ if (mangle_action->mangle.offset == offsetof(struct tcphdr, source)) { ++ mangle_action->mangle.val = ++ (__force u32)cpu_to_be32(mangle_action->mangle.val << 16); ++ /* The mask of mangle action is inverse mask, ++ * so clear the dest tp port with 0xFFFF to ++ * instead of rotate-left operation. ++ */ ++ mangle_action->mangle.mask = ++ (__force u32)cpu_to_be32(mangle_action->mangle.mask << 16 | 0xFFFF); ++ } ++ if (mangle_action->mangle.offset == offsetof(struct tcphdr, dest)) { ++ mangle_action->mangle.offset = 0; ++ mangle_action->mangle.val = ++ (__force u32)cpu_to_be32(mangle_action->mangle.val); ++ mangle_action->mangle.mask = ++ (__force u32)cpu_to_be32(mangle_action->mangle.mask); ++ } + return; + + default: +diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +index 52f67157bd0f7..a3c52c91a575d 100644 +--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c ++++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +@@ -980,7 +980,7 @@ nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev, + u16 nfp_mac_idx = 0; + + entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr); +- if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) { ++ if (entry && (nfp_tunnel_is_mac_idx_global(entry->index) || netif_is_lag_port(netdev))) { + if (entry->bridge_count || + !nfp_flower_is_supported_bridge(netdev)) { + nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, +diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +index 33b4c28563162..3f10c5365c80e 100644 +--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c ++++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +@@ -537,11 +537,13 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) + const u32 barcfg_msix_general = + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) | +- NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT; ++ NFP_PCIE_BAR_PCIE2CPP_LengthSelect( ++ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT); + const u32 barcfg_msix_xpb = + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) | +- NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT | ++ NFP_PCIE_BAR_PCIE2CPP_LengthSelect( ++ NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT) | + NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress( + NFP_CPP_TARGET_ISLAND_XPB); + const u32 barcfg_explicit[4] = { +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index e988a60c8561b..66178ce6d000e 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -3826,6 +3826,9 @@ static int __stmmac_open(struct net_device *dev, + priv->rx_copybreak = STMMAC_RX_COPYBREAK; + + buf_sz = dma_conf->dma_buf_sz; ++ for (int i = 0; i < MTL_MAX_TX_QUEUES; i++) ++ if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) ++ dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs; + memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); + + stmmac_reset_queues_param(priv); +diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c +index 13c9c2d6b79bb..d95771ca4e5a3 100644 +--- a/drivers/net/ethernet/ti/cpsw.c ++++ b/drivers/net/ethernet/ti/cpsw.c +@@ -631,6 +631,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) + } + } + ++ phy->mac_managed_pm = true; ++ + slave->phy = phy; + + phy_attached_info(slave->phy); +diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c +index 83596ec0c7cb9..6e70aa1cc7bf1 100644 +--- a/drivers/net/ethernet/ti/cpsw_new.c ++++ b/drivers/net/ethernet/ti/cpsw_new.c +@@ -772,6 +772,9 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv) + slave->slave_num); + return; + } ++ ++ phy->mac_managed_pm = true; ++ + slave->phy = phy; + + phy_attached_info(slave->phy); +diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c +index da737d959e81c..3a834d4e1c842 100644 +--- a/drivers/net/hyperv/netvsc.c ++++ b/drivers/net/hyperv/netvsc.c +@@ -740,7 +740,10 @@ void netvsc_device_remove(struct hv_device *device) + /* Disable NAPI and disassociate its context from the device. */ + for (i = 0; i < net_device->num_chn; i++) { + /* See also vmbus_reset_channel_cb(). */ +- napi_disable(&net_device->chan_table[i].napi); ++ /* only disable enabled NAPI channel */ ++ if (i < ndev->real_num_rx_queues) ++ napi_disable(&net_device->chan_table[i].napi); ++ + netif_napi_del(&net_device->chan_table[i].napi); + } + +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +index c1aac6ceb29e6..1b74055399840 100644 +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -42,6 +42,10 @@ + #define LINKCHANGE_INT (2 * HZ) + #define VF_TAKEOVER_INT (HZ / 10) + ++/* Macros to define the context of vf registration */ ++#define VF_REG_IN_PROBE 1 ++#define VF_REG_IN_NOTIFIER 2 ++ + static unsigned int ring_size __ro_after_init = 128; + module_param(ring_size, uint, 0444); + MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)"); +@@ -2181,7 +2185,7 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb) + } + + static int netvsc_vf_join(struct net_device *vf_netdev, +- struct net_device *ndev) ++ struct net_device *ndev, int context) + { + struct net_device_context *ndev_ctx = netdev_priv(ndev); + int ret; +@@ -2204,7 +2208,11 @@ static int netvsc_vf_join(struct net_device *vf_netdev, + goto upper_link_failed; + } + +- schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); ++ /* If this registration is called from probe context vf_takeover ++ * is taken care of later in probe itself. ++ */ ++ if (context == VF_REG_IN_NOTIFIER) ++ schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT); + + call_netdevice_notifiers(NETDEV_JOIN, vf_netdev); + +@@ -2342,7 +2350,7 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev) + return NOTIFY_DONE; + } + +-static int netvsc_register_vf(struct net_device *vf_netdev) ++static int netvsc_register_vf(struct net_device *vf_netdev, int context) + { + struct net_device_context *net_device_ctx; + struct netvsc_device *netvsc_dev; +@@ -2382,7 +2390,7 @@ static int netvsc_register_vf(struct net_device *vf_netdev) + + netdev_info(ndev, "VF registering: %s\n", vf_netdev->name); + +- if (netvsc_vf_join(vf_netdev, ndev) != 0) ++ if (netvsc_vf_join(vf_netdev, ndev, context) != 0) + return NOTIFY_DONE; + + dev_hold(vf_netdev); +@@ -2480,10 +2488,31 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev) + return NOTIFY_OK; + } + ++static int check_dev_is_matching_vf(struct net_device *event_ndev) ++{ ++ /* Skip NetVSC interfaces */ ++ if (event_ndev->netdev_ops == &device_ops) ++ return -ENODEV; ++ ++ /* Avoid non-Ethernet type devices */ ++ if (event_ndev->type != ARPHRD_ETHER) ++ return -ENODEV; ++ ++ /* Avoid Vlan dev with same MAC registering as VF */ ++ if (is_vlan_dev(event_ndev)) ++ return -ENODEV; ++ ++ /* Avoid Bonding master dev with same MAC registering as VF */ ++ if (netif_is_bond_master(event_ndev)) ++ return -ENODEV; ++ ++ return 0; ++} ++ + static int netvsc_probe(struct hv_device *dev, + const struct hv_vmbus_device_id *dev_id) + { +- struct net_device *net = NULL; ++ struct net_device *net = NULL, *vf_netdev; + struct net_device_context *net_device_ctx; + struct netvsc_device_info *device_info = NULL; + struct netvsc_device *nvdev; +@@ -2592,6 +2621,30 @@ static int netvsc_probe(struct hv_device *dev, + } + + list_add(&net_device_ctx->list, &netvsc_dev_list); ++ ++ /* When the hv_netvsc driver is unloaded and reloaded, the ++ * NET_DEVICE_REGISTER for the vf device is replayed before probe ++ * is complete. This is because register_netdevice_notifier() gets ++ * registered before vmbus_driver_register() so that callback func ++ * is set before probe and we don't miss events like NETDEV_POST_INIT ++ * So, in this section we try to register the matching vf device that ++ * is present as a netdevice, knowing that its register call is not ++ * processed in the netvsc_netdev_notifier(as probing is progress and ++ * get_netvsc_byslot fails). ++ */ ++ for_each_netdev(dev_net(net), vf_netdev) { ++ ret = check_dev_is_matching_vf(vf_netdev); ++ if (ret != 0) ++ continue; ++ ++ if (net != get_netvsc_byslot(vf_netdev)) ++ continue; ++ ++ netvsc_prepare_bonding(vf_netdev); ++ netvsc_register_vf(vf_netdev, VF_REG_IN_PROBE); ++ __netvsc_vf_setup(net, vf_netdev); ++ break; ++ } + rtnl_unlock(); + + netvsc_devinfo_put(device_info); +@@ -2748,28 +2801,17 @@ static int netvsc_netdev_event(struct notifier_block *this, + unsigned long event, void *ptr) + { + struct net_device *event_dev = netdev_notifier_info_to_dev(ptr); ++ int ret = 0; + +- /* Skip our own events */ +- if (event_dev->netdev_ops == &device_ops) +- return NOTIFY_DONE; +- +- /* Avoid non-Ethernet type devices */ +- if (event_dev->type != ARPHRD_ETHER) +- return NOTIFY_DONE; +- +- /* Avoid Vlan dev with same MAC registering as VF */ +- if (is_vlan_dev(event_dev)) +- return NOTIFY_DONE; +- +- /* Avoid Bonding master dev with same MAC registering as VF */ +- if (netif_is_bond_master(event_dev)) ++ ret = check_dev_is_matching_vf(event_dev); ++ if (ret != 0) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_POST_INIT: + return netvsc_prepare_bonding(event_dev); + case NETDEV_REGISTER: +- return netvsc_register_vf(event_dev); ++ return netvsc_register_vf(event_dev, VF_REG_IN_NOTIFIER); + case NETDEV_UNREGISTER: + return netvsc_unregister_vf(event_dev); + case NETDEV_UP: +diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +index 585e8cd2d332d..f5fcc547de391 100644 +--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c ++++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +@@ -576,7 +576,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 2) { +- ret = PTR_ERR(wifi_pkg); ++ ret = -EINVAL; + goto out_free; + } + +@@ -592,7 +592,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 1) { +- ret = PTR_ERR(wifi_pkg); ++ ret = -EINVAL; + goto out_free; + } + +@@ -608,7 +608,7 @@ int iwl_sar_get_wrds_table(struct iwl_fw_runtime *fwrt) + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 0) { +- ret = PTR_ERR(wifi_pkg); ++ ret = -EINVAL; + goto out_free; + } + +@@ -665,7 +665,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 2) { +- ret = PTR_ERR(wifi_pkg); ++ ret = -EINVAL; + goto out_free; + } + +@@ -681,7 +681,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 1) { +- ret = PTR_ERR(wifi_pkg); ++ ret = -EINVAL; + goto out_free; + } + +@@ -697,7 +697,7 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) + &tbl_rev); + if (!IS_ERR(wifi_pkg)) { + if (tbl_rev != 0) { +- ret = PTR_ERR(wifi_pkg); ++ ret = -EINVAL; + goto out_free; + } + +@@ -1044,6 +1044,9 @@ int iwl_acpi_get_ppag_table(struct iwl_fw_runtime *fwrt) + goto read_table; + } + ++ ret = PTR_ERR(wifi_pkg); ++ goto out_free; ++ + read_table: + fwrt->ppag_ver = tbl_rev; + flags = &wifi_pkg->package.elements[1]; +diff --git a/drivers/net/wireless/marvell/mwifiex/Kconfig b/drivers/net/wireless/marvell/mwifiex/Kconfig +index 2b4ff2b78a7e1..b182f7155d66f 100644 +--- a/drivers/net/wireless/marvell/mwifiex/Kconfig ++++ b/drivers/net/wireless/marvell/mwifiex/Kconfig +@@ -10,13 +10,14 @@ config MWIFIEX + mwifiex. + + config MWIFIEX_SDIO +- tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8987/SD8997" ++ tristate "Marvell WiFi-Ex Driver for SD8786/SD8787/SD8797/SD8887/SD8897/SD8977/SD8978/SD8987/SD8997" + depends on MWIFIEX && MMC + select FW_LOADER + select WANT_DEV_COREDUMP + help + This adds support for wireless adapters based on Marvell +- 8786/8787/8797/8887/8897/8977/8987/8997 chipsets with SDIO interface. ++ 8786/8787/8797/8887/8897/8977/8978/8987/8997 chipsets with ++ SDIO interface. SD8978 is also known as NXP IW416. + + If you choose to build it as a module, it will be called + mwifiex_sdio. +diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c +index ea1c1c2412e72..2c9b70e9a7263 100644 +--- a/drivers/net/wireless/marvell/mwifiex/sdio.c ++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c +@@ -263,7 +263,7 @@ static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8887 = { + 0x68, 0x69, 0x6a}, + }; + +-static const struct mwifiex_sdio_card_reg mwifiex_reg_sd8987 = { ++static const struct mwifiex_sdio_card_reg mwifiex_reg_sd89xx = { + .start_rd_port = 0, + .start_wr_port = 0, + .base_0_reg = 0xF8, +@@ -331,6 +331,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8786 = { + .can_dump_fw = false, + .can_auto_tdls = false, + .can_ext_scan = false, ++ .fw_ready_extra_delay = false, + }; + + static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = { +@@ -346,6 +347,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8787 = { + .can_dump_fw = false, + .can_auto_tdls = false, + .can_ext_scan = true, ++ .fw_ready_extra_delay = false, + }; + + static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = { +@@ -361,6 +363,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8797 = { + .can_dump_fw = false, + .can_auto_tdls = false, + .can_ext_scan = true, ++ .fw_ready_extra_delay = false, + }; + + static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { +@@ -376,6 +379,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8897 = { + .can_dump_fw = true, + .can_auto_tdls = false, + .can_ext_scan = true, ++ .fw_ready_extra_delay = false, + }; + + static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = { +@@ -392,6 +396,24 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8977 = { + .fw_dump_enh = true, + .can_auto_tdls = false, + .can_ext_scan = true, ++ .fw_ready_extra_delay = false, ++}; ++ ++static const struct mwifiex_sdio_device mwifiex_sdio_sd8978 = { ++ .firmware_sdiouart = SD8978_SDIOUART_FW_NAME, ++ .reg = &mwifiex_reg_sd89xx, ++ .max_ports = 32, ++ .mp_agg_pkt_limit = 16, ++ .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_4K, ++ .mp_tx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX, ++ .mp_rx_agg_buf_size = MWIFIEX_MP_AGGR_BUF_SIZE_MAX, ++ .supports_sdio_new_mode = true, ++ .has_control_mask = false, ++ .can_dump_fw = true, ++ .fw_dump_enh = true, ++ .can_auto_tdls = false, ++ .can_ext_scan = true, ++ .fw_ready_extra_delay = true, + }; + + static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = { +@@ -409,6 +431,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8997 = { + .fw_dump_enh = true, + .can_auto_tdls = false, + .can_ext_scan = true, ++ .fw_ready_extra_delay = false, + }; + + static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = { +@@ -424,11 +447,12 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8887 = { + .can_dump_fw = false, + .can_auto_tdls = true, + .can_ext_scan = true, ++ .fw_ready_extra_delay = false, + }; + + static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = { + .firmware = SD8987_DEFAULT_FW_NAME, +- .reg = &mwifiex_reg_sd8987, ++ .reg = &mwifiex_reg_sd89xx, + .max_ports = 32, + .mp_agg_pkt_limit = 16, + .tx_buf_size = MWIFIEX_TX_DATA_BUF_SIZE_2K, +@@ -440,6 +464,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8987 = { + .fw_dump_enh = true, + .can_auto_tdls = true, + .can_ext_scan = true, ++ .fw_ready_extra_delay = false, + }; + + static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = { +@@ -455,6 +480,7 @@ static const struct mwifiex_sdio_device mwifiex_sdio_sd8801 = { + .can_dump_fw = false, + .can_auto_tdls = false, + .can_ext_scan = true, ++ .fw_ready_extra_delay = false, + }; + + static struct memory_type_mapping generic_mem_type_map[] = { +@@ -482,7 +508,9 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = { + static const struct of_device_id mwifiex_sdio_of_match_table[] __maybe_unused = { + { .compatible = "marvell,sd8787" }, + { .compatible = "marvell,sd8897" }, ++ { .compatible = "marvell,sd8978" }, + { .compatible = "marvell,sd8997" }, ++ { .compatible = "nxp,iw416" }, + { } + }; + +@@ -545,6 +573,7 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) + card->fw_dump_enh = data->fw_dump_enh; + card->can_auto_tdls = data->can_auto_tdls; + card->can_ext_scan = data->can_ext_scan; ++ card->fw_ready_extra_delay = data->fw_ready_extra_delay; + INIT_WORK(&card->work, mwifiex_sdio_work); + } + +@@ -748,8 +777,9 @@ mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat) + static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, + u32 poll_num) + { ++ struct sdio_mmc_card *card = adapter->card; + int ret = 0; +- u16 firmware_stat; ++ u16 firmware_stat = 0; + u32 tries; + + for (tries = 0; tries < poll_num; tries++) { +@@ -765,6 +795,13 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter, + ret = -1; + } + ++ if (card->fw_ready_extra_delay && ++ firmware_stat == FIRMWARE_READY_SDIO) ++ /* firmware might pretend to be ready, when it's not. ++ * Wait a little bit more as a workaround. ++ */ ++ msleep(100); ++ + return ret; + } + +@@ -920,6 +957,8 @@ static const struct sdio_device_id mwifiex_ids[] = { + .driver_data = (unsigned long)&mwifiex_sdio_sd8801}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977_WLAN), + .driver_data = (unsigned long)&mwifiex_sdio_sd8977}, ++ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8978_WLAN), ++ .driver_data = (unsigned long)&mwifiex_sdio_sd8978}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8987_WLAN), + .driver_data = (unsigned long)&mwifiex_sdio_sd8987}, + {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997_WLAN), +@@ -3164,6 +3203,7 @@ MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME); + MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME); + MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME); + MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME); ++MODULE_FIRMWARE(SD8978_SDIOUART_FW_NAME); + MODULE_FIRMWARE(SD8987_DEFAULT_FW_NAME); + MODULE_FIRMWARE(SD8997_DEFAULT_FW_NAME); + MODULE_FIRMWARE(SD8997_SDIOUART_FW_NAME); +diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.h b/drivers/net/wireless/marvell/mwifiex/sdio.h +index 3a24bb48b2996..a5112cb35cdcd 100644 +--- a/drivers/net/wireless/marvell/mwifiex/sdio.h ++++ b/drivers/net/wireless/marvell/mwifiex/sdio.h +@@ -25,6 +25,7 @@ + #define SD8887_DEFAULT_FW_NAME "mrvl/sd8887_uapsta.bin" + #define SD8801_DEFAULT_FW_NAME "mrvl/sd8801_uapsta.bin" + #define SD8977_DEFAULT_FW_NAME "mrvl/sdsd8977_combo_v2.bin" ++#define SD8978_SDIOUART_FW_NAME "mrvl/sdiouartiw416_combo_v0.bin" + #define SD8987_DEFAULT_FW_NAME "mrvl/sd8987_uapsta.bin" + #define SD8997_DEFAULT_FW_NAME "mrvl/sdsd8997_combo_v4.bin" + #define SD8997_SDIOUART_FW_NAME "mrvl/sdiouart8997_combo_v4.bin" +@@ -257,6 +258,7 @@ struct sdio_mmc_card { + bool fw_dump_enh; + bool can_auto_tdls; + bool can_ext_scan; ++ bool fw_ready_extra_delay; + + struct mwifiex_sdio_mpa_tx mpa_tx; + struct mwifiex_sdio_mpa_rx mpa_rx; +@@ -280,6 +282,7 @@ struct mwifiex_sdio_device { + bool fw_dump_enh; + bool can_auto_tdls; + bool can_ext_scan; ++ bool fw_ready_extra_delay; + }; + + /* +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c +index 2716040985748..0d51c900c5538 100644 +--- a/drivers/net/xen-netback/netback.c ++++ b/drivers/net/xen-netback/netback.c +@@ -104,13 +104,12 @@ bool provides_xdp_headroom = true; + module_param(provides_xdp_headroom, bool, 0644); + + static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, +- u8 status); ++ s8 status); + + static void make_tx_response(struct xenvif_queue *queue, +- struct xen_netif_tx_request *txp, ++ const struct xen_netif_tx_request *txp, + unsigned int extra_count, +- s8 st); +-static void push_tx_responses(struct xenvif_queue *queue); ++ s8 status); + + static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); + +@@ -208,13 +207,9 @@ static void xenvif_tx_err(struct xenvif_queue *queue, + unsigned int extra_count, RING_IDX end) + { + RING_IDX cons = queue->tx.req_cons; +- unsigned long flags; + + do { +- spin_lock_irqsave(&queue->response_lock, flags); + make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR); +- push_tx_responses(queue); +- spin_unlock_irqrestore(&queue->response_lock, flags); + if (cons == end) + break; + RING_COPY_REQUEST(&queue->tx, cons++, txp); +@@ -465,12 +460,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue, + for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; + nr_slots--) { + if (unlikely(!txp->size)) { +- unsigned long flags; +- +- spin_lock_irqsave(&queue->response_lock, flags); + make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY); +- push_tx_responses(queue); +- spin_unlock_irqrestore(&queue->response_lock, flags); + ++txp; + continue; + } +@@ -496,14 +486,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue, + + for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) { + if (unlikely(!txp->size)) { +- unsigned long flags; +- +- spin_lock_irqsave(&queue->response_lock, flags); + make_tx_response(queue, txp, 0, + XEN_NETIF_RSP_OKAY); +- push_tx_responses(queue); +- spin_unlock_irqrestore(&queue->response_lock, +- flags); + continue; + } + +@@ -997,7 +981,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, + (ret == 0) ? + XEN_NETIF_RSP_OKAY : + XEN_NETIF_RSP_ERROR); +- push_tx_responses(queue); + continue; + } + +@@ -1009,7 +992,6 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, + + make_tx_response(queue, &txreq, extra_count, + XEN_NETIF_RSP_OKAY); +- push_tx_responses(queue); + continue; + } + +@@ -1444,8 +1426,35 @@ int xenvif_tx_action(struct xenvif_queue *queue, int budget) + return work_done; + } + ++static void _make_tx_response(struct xenvif_queue *queue, ++ const struct xen_netif_tx_request *txp, ++ unsigned int extra_count, ++ s8 status) ++{ ++ RING_IDX i = queue->tx.rsp_prod_pvt; ++ struct xen_netif_tx_response *resp; ++ ++ resp = RING_GET_RESPONSE(&queue->tx, i); ++ resp->id = txp->id; ++ resp->status = status; ++ ++ while (extra_count-- != 0) ++ RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; ++ ++ queue->tx.rsp_prod_pvt = ++i; ++} ++ ++static void push_tx_responses(struct xenvif_queue *queue) ++{ ++ int notify; ++ ++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); ++ if (notify) ++ notify_remote_via_irq(queue->tx_irq); ++} ++ + static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, +- u8 status) ++ s8 status) + { + struct pending_tx_info *pending_tx_info; + pending_ring_idx_t index; +@@ -1455,8 +1464,8 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, + + spin_lock_irqsave(&queue->response_lock, flags); + +- make_tx_response(queue, &pending_tx_info->req, +- pending_tx_info->extra_count, status); ++ _make_tx_response(queue, &pending_tx_info->req, ++ pending_tx_info->extra_count, status); + + /* Release the pending index before pusing the Tx response so + * its available before a new Tx request is pushed by the +@@ -1470,32 +1479,19 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, + spin_unlock_irqrestore(&queue->response_lock, flags); + } + +- + static void make_tx_response(struct xenvif_queue *queue, +- struct xen_netif_tx_request *txp, ++ const struct xen_netif_tx_request *txp, + unsigned int extra_count, +- s8 st) ++ s8 status) + { +- RING_IDX i = queue->tx.rsp_prod_pvt; +- struct xen_netif_tx_response *resp; +- +- resp = RING_GET_RESPONSE(&queue->tx, i); +- resp->id = txp->id; +- resp->status = st; +- +- while (extra_count-- != 0) +- RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; ++ unsigned long flags; + +- queue->tx.rsp_prod_pvt = ++i; +-} ++ spin_lock_irqsave(&queue->response_lock, flags); + +-static void push_tx_responses(struct xenvif_queue *queue) +-{ +- int notify; ++ _make_tx_response(queue, txp, extra_count, status); ++ push_tx_responses(queue); + +- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); +- if (notify) +- notify_remote_via_irq(queue->tx_irq); ++ spin_unlock_irqrestore(&queue->response_lock, flags); + } + + static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) +diff --git a/drivers/of/property.c b/drivers/of/property.c +index b636777e6f7c8..33d5f16c81204 100644 +--- a/drivers/of/property.c ++++ b/drivers/of/property.c +@@ -762,7 +762,9 @@ struct device_node *of_graph_get_port_parent(struct device_node *node) + /* Walk 3 levels up only if there is 'ports' node. */ + for (depth = 3; depth && node; depth--) { + node = of_get_next_parent(node); +- if (depth == 2 && !of_node_name_eq(node, "ports")) ++ if (depth == 2 && !of_node_name_eq(node, "ports") && ++ !of_node_name_eq(node, "in-ports") && ++ !of_node_name_eq(node, "out-ports")) + break; + } + return node; +@@ -1243,7 +1245,7 @@ DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells") + DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells") + DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells") + DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells") +-DEFINE_SIMPLE_PROP(io_channels, "io-channel", "#io-channel-cells") ++DEFINE_SIMPLE_PROP(io_channels, "io-channels", "#io-channel-cells") + DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL) + DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells") + DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells") +@@ -1261,7 +1263,6 @@ DEFINE_SIMPLE_PROP(pinctrl5, "pinctrl-5", NULL) + DEFINE_SIMPLE_PROP(pinctrl6, "pinctrl-6", NULL) + DEFINE_SIMPLE_PROP(pinctrl7, "pinctrl-7", NULL) + DEFINE_SIMPLE_PROP(pinctrl8, "pinctrl-8", NULL) +-DEFINE_SIMPLE_PROP(remote_endpoint, "remote-endpoint", NULL) + DEFINE_SIMPLE_PROP(pwms, "pwms", "#pwm-cells") + DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells") + DEFINE_SIMPLE_PROP(leds, "leds", NULL) +@@ -1326,6 +1327,17 @@ static struct device_node *parse_interrupts(struct device_node *np, + return of_irq_parse_one(np, index, &sup_args) ? NULL : sup_args.np; + } + ++static struct device_node *parse_remote_endpoint(struct device_node *np, ++ const char *prop_name, ++ int index) ++{ ++ /* Return NULL for index > 0 to signify end of remote-endpoints. */ ++ if (!index || strcmp(prop_name, "remote-endpoint")) ++ return NULL; ++ ++ return of_graph_get_remote_port_parent(np); ++} ++ + static const struct supplier_bindings of_supplier_bindings[] = { + { .parse_prop = parse_clocks, }, + { .parse_prop = parse_interconnects, }, +diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c +index e541a8960f1de..ce1386074e66b 100644 +--- a/drivers/of/unittest.c ++++ b/drivers/of/unittest.c +@@ -49,6 +49,12 @@ static struct unittest_results { + failed; \ + }) + ++#ifdef CONFIG_OF_KOBJ ++#define OF_KREF_READ(NODE) kref_read(&(NODE)->kobj.kref) ++#else ++#define OF_KREF_READ(NODE) 1 ++#endif ++ + /* + * Expected message may have a message level other than KERN_INFO. + * Print the expected message only if the current loglevel will allow +@@ -562,7 +568,7 @@ static void __init of_unittest_parse_phandle_with_args_map(void) + pr_err("missing testcase data\n"); + return; + } +- prefs[i] = kref_read(&p[i]->kobj.kref); ++ prefs[i] = OF_KREF_READ(p[i]); + } + + rc = of_count_phandle_with_args(np, "phandle-list", "#phandle-cells"); +@@ -685,9 +691,9 @@ static void __init of_unittest_parse_phandle_with_args_map(void) + unittest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); + + for (i = 0; i < ARRAY_SIZE(p); ++i) { +- unittest(prefs[i] == kref_read(&p[i]->kobj.kref), ++ unittest(prefs[i] == OF_KREF_READ(p[i]), + "provider%d: expected:%d got:%d\n", +- i, prefs[i], kref_read(&p[i]->kobj.kref)); ++ i, prefs[i], OF_KREF_READ(p[i])); + of_node_put(p[i]); + } + } +diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c +index c0f30cefec102..a416011391856 100644 +--- a/drivers/s390/net/qeth_l3_main.c ++++ b/drivers/s390/net/qeth_l3_main.c +@@ -254,9 +254,10 @@ static void qeth_l3_clear_ip_htable(struct qeth_card *card, int recover) + if (!recover) { + hash_del(&addr->hnode); + kfree(addr); +- continue; ++ } else { ++ /* prepare for recovery */ ++ addr->disp_flag = QETH_DISP_ADDR_ADD; + } +- addr->disp_flag = QETH_DISP_ADDR_ADD; + } + + mutex_unlock(&card->ip_lock); +@@ -277,9 +278,11 @@ static void qeth_l3_recover_ip(struct qeth_card *card) + if (addr->disp_flag == QETH_DISP_ADDR_ADD) { + rc = qeth_l3_register_addr_entry(card, addr); + +- if (!rc) { ++ if (!rc || rc == -EADDRINUSE || rc == -ENETDOWN) { ++ /* keep it in the records */ + addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING; + } else { ++ /* bad address */ + hash_del(&addr->hnode); + kfree(addr); + } +diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c +index 8a4124e7d2043..ddc048069af25 100644 +--- a/drivers/scsi/fcoe/fcoe_ctlr.c ++++ b/drivers/scsi/fcoe/fcoe_ctlr.c +@@ -319,17 +319,16 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip) + { + struct fcoe_fcf *sel; + struct fcoe_fcf *fcf; +- unsigned long flags; + + mutex_lock(&fip->ctlr_mutex); +- spin_lock_irqsave(&fip->ctlr_lock, flags); ++ spin_lock_bh(&fip->ctlr_lock); + + kfree_skb(fip->flogi_req); + fip->flogi_req = NULL; + list_for_each_entry(fcf, &fip->fcfs, list) + fcf->flogi_sent = 0; + +- spin_unlock_irqrestore(&fip->ctlr_lock, flags); ++ spin_unlock_bh(&fip->ctlr_lock); + sel = fip->sel_fcf; + + if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr)) +@@ -700,7 +699,6 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, + { + struct fc_frame *fp; + struct fc_frame_header *fh; +- unsigned long flags; + u16 old_xid; + u8 op; + u8 mac[ETH_ALEN]; +@@ -734,11 +732,11 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct fc_lport *lport, + op = FIP_DT_FLOGI; + if (fip->mode == FIP_MODE_VN2VN) + break; +- spin_lock_irqsave(&fip->ctlr_lock, flags); ++ spin_lock_bh(&fip->ctlr_lock); + kfree_skb(fip->flogi_req); + fip->flogi_req = skb; + fip->flogi_req_send = 1; +- spin_unlock_irqrestore(&fip->ctlr_lock, flags); ++ spin_unlock_bh(&fip->ctlr_lock); + schedule_work(&fip->timer_work); + return -EINPROGRESS; + case ELS_FDISC: +@@ -1707,11 +1705,10 @@ static int fcoe_ctlr_flogi_send_locked(struct fcoe_ctlr *fip) + static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) + { + struct fcoe_fcf *fcf; +- unsigned long flags; + int error; + + mutex_lock(&fip->ctlr_mutex); +- spin_lock_irqsave(&fip->ctlr_lock, flags); ++ spin_lock_bh(&fip->ctlr_lock); + LIBFCOE_FIP_DBG(fip, "re-sending FLOGI - reselect\n"); + fcf = fcoe_ctlr_select(fip); + if (!fcf || fcf->flogi_sent) { +@@ -1722,7 +1719,7 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) + fcoe_ctlr_solicit(fip, NULL); + error = fcoe_ctlr_flogi_send_locked(fip); + } +- spin_unlock_irqrestore(&fip->ctlr_lock, flags); ++ spin_unlock_bh(&fip->ctlr_lock); + mutex_unlock(&fip->ctlr_mutex); + return error; + } +@@ -1739,9 +1736,8 @@ static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *fip) + static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip) + { + struct fcoe_fcf *fcf; +- unsigned long flags; + +- spin_lock_irqsave(&fip->ctlr_lock, flags); ++ spin_lock_bh(&fip->ctlr_lock); + fcf = fip->sel_fcf; + if (!fcf || !fip->flogi_req_send) + goto unlock; +@@ -1768,7 +1764,7 @@ static void fcoe_ctlr_flogi_send(struct fcoe_ctlr *fip) + } else /* XXX */ + LIBFCOE_FIP_DBG(fip, "No FCF selected - defer send\n"); + unlock: +- spin_unlock_irqrestore(&fip->ctlr_lock, flags); ++ spin_unlock_bh(&fip->ctlr_lock); + } + + /** +diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c +index c2d981d5a2dd5..4fad9d85bd6f9 100644 +--- a/drivers/scsi/storvsc_drv.c ++++ b/drivers/scsi/storvsc_drv.c +@@ -326,6 +326,7 @@ enum storvsc_request_type { + */ + + static int storvsc_ringbuffer_size = (128 * 1024); ++static int aligned_ringbuffer_size; + static u32 max_outstanding_req_per_channel; + static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth); + +@@ -683,8 +684,8 @@ static void handle_sc_creation(struct vmbus_channel *new_sc) + new_sc->next_request_id_callback = storvsc_next_request_id; + + ret = vmbus_open(new_sc, +- storvsc_ringbuffer_size, +- storvsc_ringbuffer_size, ++ aligned_ringbuffer_size, ++ aligned_ringbuffer_size, + (void *)&props, + sizeof(struct vmstorage_channel_properties), + storvsc_on_channel_callback, new_sc); +@@ -1964,7 +1965,7 @@ static int storvsc_probe(struct hv_device *device, + dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1); + + stor_device->port_number = host->host_no; +- ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc); ++ ret = storvsc_connect_to_vsp(device, aligned_ringbuffer_size, is_fc); + if (ret) + goto err_out1; + +@@ -2157,7 +2158,7 @@ static int storvsc_resume(struct hv_device *hv_dev) + { + int ret; + +- ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size, ++ ret = storvsc_connect_to_vsp(hv_dev, aligned_ringbuffer_size, + hv_dev_is_fc(hv_dev)); + return ret; + } +@@ -2191,8 +2192,9 @@ static int __init storvsc_drv_init(void) + * the ring buffer indices) by the max request size (which is + * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64) + */ ++ aligned_ringbuffer_size = VMBUS_RING_SIZE(storvsc_ringbuffer_size); + max_outstanding_req_per_channel = +- ((storvsc_ringbuffer_size - PAGE_SIZE) / ++ ((aligned_ringbuffer_size - PAGE_SIZE) / + ALIGN(MAX_MULTIPAGE_BUFFER_PACKET + + sizeof(struct vstor_packet) + sizeof(u64), + sizeof(u64))); +diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c +index d65f047b6c823..1179a1115137f 100644 +--- a/drivers/spi/spi-ppc4xx.c ++++ b/drivers/spi/spi-ppc4xx.c +@@ -166,10 +166,8 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t) + int scr; + u8 cdm = 0; + u32 speed; +- u8 bits_per_word; + + /* Start with the generic configuration for this device. */ +- bits_per_word = spi->bits_per_word; + speed = spi->max_speed_hz; + + /* +@@ -177,9 +175,6 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t) + * the transfer to overwrite the generic configuration with zeros. + */ + if (t) { +- if (t->bits_per_word) +- bits_per_word = t->bits_per_word; +- + if (t->speed_hz) + speed = min(t->speed_hz, spi->max_speed_hz); + } +diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c +index f177b20f0f2d9..ceba632138940 100644 +--- a/drivers/staging/iio/impedance-analyzer/ad5933.c ++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c +@@ -608,7 +608,7 @@ static void ad5933_work(struct work_struct *work) + struct ad5933_state, work.work); + struct iio_dev *indio_dev = i2c_get_clientdata(st->client); + __be16 buf[2]; +- int val[2]; ++ u16 val[2]; + unsigned char status; + int ret; + +diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c +index 338cb19dec23c..163a89f84c9c2 100644 +--- a/drivers/tty/serial/max310x.c ++++ b/drivers/tty/serial/max310x.c +@@ -237,6 +237,14 @@ + #define MAX310x_REV_MASK (0xf8) + #define MAX310X_WRITE_BIT 0x80 + ++/* Port startup definitions */ ++#define MAX310X_PORT_STARTUP_WAIT_RETRIES 20 /* Number of retries */ ++#define MAX310X_PORT_STARTUP_WAIT_DELAY_MS 10 /* Delay between retries */ ++ ++/* Crystal-related definitions */ ++#define MAX310X_XTAL_WAIT_RETRIES 20 /* Number of retries */ ++#define MAX310X_XTAL_WAIT_DELAY_MS 10 /* Delay between retries */ ++ + /* MAX3107 specific */ + #define MAX3107_REV_ID (0xa0) + +@@ -583,7 +591,7 @@ static int max310x_update_best_err(unsigned long f, long *besterr) + return 1; + } + +-static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s, ++static s32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s, + unsigned long freq, bool xtal) + { + unsigned int div, clksrc, pllcfg = 0; +@@ -641,12 +649,20 @@ static u32 max310x_set_ref_clk(struct device *dev, struct max310x_port *s, + + /* Wait for crystal */ + if (xtal) { +- unsigned int val; +- msleep(10); +- regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val); +- if (!(val & MAX310X_STS_CLKREADY_BIT)) { +- dev_warn(dev, "clock is not stable yet\n"); +- } ++ bool stable = false; ++ unsigned int try = 0, val = 0; ++ ++ do { ++ msleep(MAX310X_XTAL_WAIT_DELAY_MS); ++ regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &val); ++ ++ if (val & MAX310X_STS_CLKREADY_BIT) ++ stable = true; ++ } while (!stable && (++try < MAX310X_XTAL_WAIT_RETRIES)); ++ ++ if (!stable) ++ return dev_err_probe(dev, -EAGAIN, ++ "clock is not stable\n"); + } + + return bestfreq; +@@ -1274,7 +1290,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty + { + int i, ret, fmin, fmax, freq; + struct max310x_port *s; +- u32 uartclk = 0; ++ s32 uartclk = 0; + bool xtal; + + for (i = 0; i < devtype->nr; i++) +@@ -1337,6 +1353,9 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty + goto out_clk; + + for (i = 0; i < devtype->nr; i++) { ++ bool started = false; ++ unsigned int try = 0, val = 0; ++ + /* Reset port */ + regmap_write(regmaps[i], MAX310X_MODE2_REG, + MAX310X_MODE2_RST_BIT); +@@ -1345,13 +1364,27 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty + + /* Wait for port startup */ + do { +- regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &ret); +- } while (ret != 0x01); ++ msleep(MAX310X_PORT_STARTUP_WAIT_DELAY_MS); ++ regmap_read(regmaps[i], MAX310X_BRGDIVLSB_REG, &val); ++ ++ if (val == 0x01) ++ started = true; ++ } while (!started && (++try < MAX310X_PORT_STARTUP_WAIT_RETRIES)); ++ ++ if (!started) { ++ ret = dev_err_probe(dev, -EAGAIN, "port reset failed\n"); ++ goto out_uart; ++ } + + regmap_write(regmaps[i], MAX310X_MODE1_REG, devtype->mode1); + } + + uartclk = max310x_set_ref_clk(dev, s, freq, xtal); ++ if (uartclk < 0) { ++ ret = uartclk; ++ goto out_uart; ++ } ++ + dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk); + + for (i = 0; i < devtype->nr; i++) { +diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c +index 38703781ee2d1..1283c427cdf88 100644 +--- a/drivers/usb/common/ulpi.c ++++ b/drivers/usb/common/ulpi.c +@@ -301,7 +301,7 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi) + return ret; + } + +- root = debugfs_create_dir(dev_name(dev), ulpi_root); ++ root = debugfs_create_dir(dev_name(&ulpi->dev), ulpi_root); + debugfs_create_file("regs", 0444, root, ulpi, &ulpi_regs_fops); + + dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n", +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 4f181110d00db..d960a56b760ec 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -2389,17 +2389,25 @@ static int usb_enumerate_device_otg(struct usb_device *udev) + } + } else if (desc->bLength == sizeof + (struct usb_otg_descriptor)) { +- /* Set a_alt_hnp_support for legacy otg device */ +- err = usb_control_msg(udev, +- usb_sndctrlpipe(udev, 0), +- USB_REQ_SET_FEATURE, 0, +- USB_DEVICE_A_ALT_HNP_SUPPORT, +- 0, NULL, 0, +- USB_CTRL_SET_TIMEOUT); +- if (err < 0) +- dev_err(&udev->dev, +- "set a_alt_hnp_support failed: %d\n", +- err); ++ /* ++ * We are operating on a legacy OTP device ++ * These should be told that they are operating ++ * on the wrong port if we have another port that does ++ * support HNP ++ */ ++ if (bus->otg_port != 0) { ++ /* Set a_alt_hnp_support for legacy otg device */ ++ err = usb_control_msg(udev, ++ usb_sndctrlpipe(udev, 0), ++ USB_REQ_SET_FEATURE, 0, ++ USB_DEVICE_A_ALT_HNP_SUPPORT, ++ 0, NULL, 0, ++ USB_CTRL_SET_TIMEOUT); ++ if (err < 0) ++ dev_err(&udev->dev, ++ "set a_alt_hnp_support failed: %d\n", ++ err); ++ } + } + } + #endif +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index c4703f6b20894..576c21bf77cda 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -4583,15 +4583,13 @@ int dwc3_gadget_suspend(struct dwc3 *dwc) + unsigned long flags; + int ret; + +- if (!dwc->gadget_driver) +- return 0; +- + ret = dwc3_gadget_soft_disconnect(dwc); + if (ret) + goto err; + + spin_lock_irqsave(&dwc->lock, flags); +- dwc3_disconnect_gadget(dwc); ++ if (dwc->gadget_driver) ++ dwc3_disconnect_gadget(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + + return 0; +diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c +index 7b9a4cf9b100c..d35f30a9cae2c 100644 +--- a/drivers/usb/gadget/function/f_mass_storage.c ++++ b/drivers/usb/gadget/function/f_mass_storage.c +@@ -544,21 +544,37 @@ static int start_transfer(struct fsg_dev *fsg, struct usb_ep *ep, + + static bool start_in_transfer(struct fsg_common *common, struct fsg_buffhd *bh) + { ++ int rc; ++ + if (!fsg_is_set(common)) + return false; + bh->state = BUF_STATE_SENDING; +- if (start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq)) ++ rc = start_transfer(common->fsg, common->fsg->bulk_in, bh->inreq); ++ if (rc) { + bh->state = BUF_STATE_EMPTY; ++ if (rc == -ESHUTDOWN) { ++ common->running = 0; ++ return false; ++ } ++ } + return true; + } + + static bool start_out_transfer(struct fsg_common *common, struct fsg_buffhd *bh) + { ++ int rc; ++ + if (!fsg_is_set(common)) + return false; + bh->state = BUF_STATE_RECEIVING; +- if (start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq)) ++ rc = start_transfer(common->fsg, common->fsg->bulk_out, bh->outreq); ++ if (rc) { + bh->state = BUF_STATE_FULL; ++ if (rc == -ESHUTDOWN) { ++ common->running = 0; ++ return false; ++ } ++ } + return true; + } + +diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c +index dc2dea3768fb6..0695ee54ff781 100644 +--- a/drivers/usb/typec/ucsi/ucsi.c ++++ b/drivers/usb/typec/ucsi/ucsi.c +@@ -831,7 +831,9 @@ static void ucsi_handle_connector_change(struct work_struct *work) + + clear_bit(EVENT_PENDING, &con->ucsi->flags); + ++ mutex_lock(&ucsi->ppm_lock); + ret = ucsi_acknowledge_connector_change(ucsi); ++ mutex_unlock(&ucsi->ppm_lock); + if (ret) + dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret); + +diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c +index 217355f1f9b94..26171c5d3c61c 100644 +--- a/drivers/usb/typec/ucsi/ucsi_acpi.c ++++ b/drivers/usb/typec/ucsi/ucsi_acpi.c +@@ -73,9 +73,13 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset, + const void *val, size_t val_len) + { + struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi); ++ bool ack = UCSI_COMMAND(*(u64 *)val) == UCSI_ACK_CC_CI; + int ret; + +- set_bit(COMMAND_PENDING, &ua->flags); ++ if (ack) ++ set_bit(ACK_PENDING, &ua->flags); ++ else ++ set_bit(COMMAND_PENDING, &ua->flags); + + ret = ucsi_acpi_async_write(ucsi, offset, val, val_len); + if (ret) +@@ -85,7 +89,10 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset, + ret = -ETIMEDOUT; + + out_clear_bit: +- clear_bit(COMMAND_PENDING, &ua->flags); ++ if (ack) ++ clear_bit(ACK_PENDING, &ua->flags); ++ else ++ clear_bit(COMMAND_PENDING, &ua->flags); + + return ret; + } +@@ -142,8 +149,10 @@ static void ucsi_acpi_notify(acpi_handle handle, u32 event, void *data) + if (UCSI_CCI_CONNECTOR(cci)) + ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci)); + +- if (test_bit(COMMAND_PENDING, &ua->flags) && +- cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE)) ++ if (cci & UCSI_CCI_ACK_COMPLETE && test_bit(ACK_PENDING, &ua->flags)) ++ complete(&ua->complete); ++ if (cci & UCSI_CCI_COMMAND_COMPLETE && ++ test_bit(COMMAND_PENDING, &ua->flags)) + complete(&ua->complete); + } + +diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c +index 08017b180a10d..9f77565bd7f5a 100644 +--- a/fs/btrfs/block-group.c ++++ b/fs/btrfs/block-group.c +@@ -1318,6 +1318,7 @@ static bool clean_pinned_extents(struct btrfs_trans_handle *trans, + */ + void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) + { ++ LIST_HEAD(retry_list); + struct btrfs_block_group *block_group; + struct btrfs_space_info *space_info; + struct btrfs_trans_handle *trans; +@@ -1339,6 +1340,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) + + spin_lock(&fs_info->unused_bgs_lock); + while (!list_empty(&fs_info->unused_bgs)) { ++ u64 used; + int trimming; + + block_group = list_first_entry(&fs_info->unused_bgs, +@@ -1374,9 +1376,9 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) + goto next; + } + ++ spin_lock(&space_info->lock); + spin_lock(&block_group->lock); +- if (block_group->reserved || block_group->pinned || +- block_group->used || block_group->ro || ++ if (btrfs_is_block_group_used(block_group) || block_group->ro || + list_is_singular(&block_group->list)) { + /* + * We want to bail if we made new allocations or have +@@ -1386,10 +1388,49 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) + */ + trace_btrfs_skip_unused_block_group(block_group); + spin_unlock(&block_group->lock); ++ spin_unlock(&space_info->lock); ++ up_write(&space_info->groups_sem); ++ goto next; ++ } ++ ++ /* ++ * The block group may be unused but there may be space reserved ++ * accounting with the existence of that block group, that is, ++ * space_info->bytes_may_use was incremented by a task but no ++ * space was yet allocated from the block group by the task. ++ * That space may or may not be allocated, as we are generally ++ * pessimistic about space reservation for metadata as well as ++ * for data when using compression (as we reserve space based on ++ * the worst case, when data can't be compressed, and before ++ * actually attempting compression, before starting writeback). ++ * ++ * So check if the total space of the space_info minus the size ++ * of this block group is less than the used space of the ++ * space_info - if that's the case, then it means we have tasks ++ * that might be relying on the block group in order to allocate ++ * extents, and add back the block group to the unused list when ++ * we finish, so that we retry later in case no tasks ended up ++ * needing to allocate extents from the block group. ++ */ ++ used = btrfs_space_info_used(space_info, true); ++ if (space_info->total_bytes - block_group->length < used) { ++ /* ++ * Add a reference for the list, compensate for the ref ++ * drop under the "next" label for the ++ * fs_info->unused_bgs list. ++ */ ++ btrfs_get_block_group(block_group); ++ list_add_tail(&block_group->bg_list, &retry_list); ++ ++ trace_btrfs_skip_unused_block_group(block_group); ++ spin_unlock(&block_group->lock); ++ spin_unlock(&space_info->lock); + up_write(&space_info->groups_sem); + goto next; + } ++ + spin_unlock(&block_group->lock); ++ spin_unlock(&space_info->lock); + + /* We don't want to force the issue, only flip if it's ok. */ + ret = inc_block_group_ro(block_group, 0); +@@ -1513,12 +1554,16 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) + btrfs_put_block_group(block_group); + spin_lock(&fs_info->unused_bgs_lock); + } ++ list_splice_tail(&retry_list, &fs_info->unused_bgs); + spin_unlock(&fs_info->unused_bgs_lock); + mutex_unlock(&fs_info->reclaim_bgs_lock); + return; + + flip_async: + btrfs_end_transaction(trans); ++ spin_lock(&fs_info->unused_bgs_lock); ++ list_splice_tail(&retry_list, &fs_info->unused_bgs); ++ spin_unlock(&fs_info->unused_bgs_lock); + mutex_unlock(&fs_info->reclaim_bgs_lock); + btrfs_put_block_group(block_group); + btrfs_discard_punt_unused_bgs_list(fs_info); +diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h +index 47a2dcbfee255..bace40a006379 100644 +--- a/fs/btrfs/block-group.h ++++ b/fs/btrfs/block-group.h +@@ -241,6 +241,13 @@ static inline u64 btrfs_block_group_end(struct btrfs_block_group *block_group) + return (block_group->start + block_group->length); + } + ++static inline bool btrfs_is_block_group_used(const struct btrfs_block_group *bg) ++{ ++ lockdep_assert_held(&bg->lock); ++ ++ return (bg->used > 0 || bg->reserved > 0 || bg->pinned > 0); ++} ++ + static inline bool btrfs_is_block_group_data_only( + struct btrfs_block_group *block_group) + { +diff --git a/fs/btrfs/delalloc-space.c b/fs/btrfs/delalloc-space.c +index f2bc5563c0f92..63b7fa7067434 100644 +--- a/fs/btrfs/delalloc-space.c ++++ b/fs/btrfs/delalloc-space.c +@@ -243,7 +243,6 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info, + struct btrfs_block_rsv *block_rsv = &inode->block_rsv; + u64 reserve_size = 0; + u64 qgroup_rsv_size = 0; +- u64 csum_leaves; + unsigned outstanding_extents; + + lockdep_assert_held(&inode->lock); +@@ -258,10 +257,12 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info, + outstanding_extents); + reserve_size += btrfs_calc_metadata_size(fs_info, 1); + } +- csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, +- inode->csum_bytes); +- reserve_size += btrfs_calc_insert_metadata_size(fs_info, +- csum_leaves); ++ if (!(inode->flags & BTRFS_INODE_NODATASUM)) { ++ u64 csum_leaves; ++ ++ csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes); ++ reserve_size += btrfs_calc_insert_metadata_size(fs_info, csum_leaves); ++ } + /* + * For qgroup rsv, the calculation is very simple: + * account one nodesize for each outstanding extent +@@ -276,14 +277,20 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info, + spin_unlock(&block_rsv->lock); + } + +-static void calc_inode_reservations(struct btrfs_fs_info *fs_info, ++static void calc_inode_reservations(struct btrfs_inode *inode, + u64 num_bytes, u64 disk_num_bytes, + u64 *meta_reserve, u64 *qgroup_reserve) + { ++ struct btrfs_fs_info *fs_info = inode->root->fs_info; + u64 nr_extents = count_max_extents(fs_info, num_bytes); +- u64 csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, disk_num_bytes); ++ u64 csum_leaves; + u64 inode_update = btrfs_calc_metadata_size(fs_info, 1); + ++ if (inode->flags & BTRFS_INODE_NODATASUM) ++ csum_leaves = 0; ++ else ++ csum_leaves = btrfs_csum_bytes_to_leaves(fs_info, disk_num_bytes); ++ + *meta_reserve = btrfs_calc_insert_metadata_size(fs_info, + nr_extents + csum_leaves); + +@@ -335,7 +342,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes, + * everything out and try again, which is bad. This way we just + * over-reserve slightly, and clean up the mess when we are done. + */ +- calc_inode_reservations(fs_info, num_bytes, disk_num_bytes, ++ calc_inode_reservations(inode, num_bytes, disk_num_bytes, + &meta_reserve, &qgroup_reserve); + ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true, + noflush); +@@ -356,7 +363,8 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes, + spin_lock(&inode->lock); + nr_extents = count_max_extents(fs_info, num_bytes); + btrfs_mod_outstanding_extents(inode, nr_extents); +- inode->csum_bytes += disk_num_bytes; ++ if (!(inode->flags & BTRFS_INODE_NODATASUM)) ++ inode->csum_bytes += disk_num_bytes; + btrfs_calculate_inode_block_rsv_size(fs_info, inode); + spin_unlock(&inode->lock); + +@@ -390,7 +398,8 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes, + + num_bytes = ALIGN(num_bytes, fs_info->sectorsize); + spin_lock(&inode->lock); +- inode->csum_bytes -= num_bytes; ++ if (!(inode->flags & BTRFS_INODE_NODATASUM)) ++ inode->csum_bytes -= num_bytes; + btrfs_calculate_inode_block_rsv_size(fs_info, inode); + spin_unlock(&inode->lock); + +diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c +index 40152458e7b74..0d1b05ded1e35 100644 +--- a/fs/btrfs/disk-io.c ++++ b/fs/btrfs/disk-io.c +@@ -1662,8 +1662,17 @@ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info, + again: + root = btrfs_lookup_fs_root(fs_info, objectid); + if (root) { +- /* Shouldn't get preallocated anon_dev for cached roots */ +- ASSERT(!anon_dev); ++ /* ++ * Some other caller may have read out the newly inserted ++ * subvolume already (for things like backref walk etc). Not ++ * that common but still possible. In that case, we just need ++ * to free the anon_dev. ++ */ ++ if (unlikely(anon_dev)) { ++ free_anon_bdev(anon_dev); ++ anon_dev = 0; ++ } ++ + if (check_ref && btrfs_root_refs(&root->root_item) == 0) { + btrfs_put_root(root); + return ERR_PTR(-ENOENT); +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 82f92b5652a77..f7f4bcc094642 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -3364,8 +3364,23 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) + unwritten_start += logical_len; + clear_extent_uptodate(io_tree, unwritten_start, end, NULL); + +- /* Drop extent maps for the part of the extent we didn't write. */ +- btrfs_drop_extent_map_range(inode, unwritten_start, end, false); ++ /* ++ * Drop extent maps for the part of the extent we didn't write. ++ * ++ * We have an exception here for the free_space_inode, this is ++ * because when we do btrfs_get_extent() on the free space inode ++ * we will search the commit root. If this is a new block group ++ * we won't find anything, and we will trip over the assert in ++ * writepage where we do ASSERT(em->block_start != ++ * EXTENT_MAP_HOLE). ++ * ++ * Theoretically we could also skip this for any NOCOW extent as ++ * we don't mess with the extent map tree in the NOCOW case, but ++ * for now simply skip this if we are the free space inode. ++ */ ++ if (!btrfs_is_free_space_inode(inode)) ++ btrfs_drop_extent_map_range(inode, unwritten_start, ++ end, false); + + /* + * If the ordered extent had an IOERR or something else went +@@ -10774,6 +10789,13 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, + if (encoded->encryption != BTRFS_ENCODED_IO_ENCRYPTION_NONE) + return -EINVAL; + ++ /* ++ * Compressed extents should always have checksums, so error out if we ++ * have a NOCOW file or inode was created while mounted with NODATASUM. ++ */ ++ if (inode->flags & BTRFS_INODE_NODATASUM) ++ return -EINVAL; ++ + orig_count = iov_iter_count(from); + + /* The extent size must be sane. */ +diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c +index 8516c70b5edc1..196e222749ccd 100644 +--- a/fs/btrfs/ioctl.c ++++ b/fs/btrfs/ioctl.c +@@ -4695,6 +4695,11 @@ static long btrfs_ioctl_qgroup_create(struct file *file, void __user *arg) + goto out; + } + ++ if (sa->create && is_fstree(sa->qgroupid)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ + trans = btrfs_join_transaction(root); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c +index 96ec9ccc2ef61..b3472bf6b288f 100644 +--- a/fs/btrfs/qgroup.c ++++ b/fs/btrfs/qgroup.c +@@ -1635,6 +1635,15 @@ int btrfs_create_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) + return ret; + } + ++static bool qgroup_has_usage(struct btrfs_qgroup *qgroup) ++{ ++ return (qgroup->rfer > 0 || qgroup->rfer_cmpr > 0 || ++ qgroup->excl > 0 || qgroup->excl_cmpr > 0 || ++ qgroup->rsv.values[BTRFS_QGROUP_RSV_DATA] > 0 || ++ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC] > 0 || ++ qgroup->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > 0); ++} ++ + int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) + { + struct btrfs_fs_info *fs_info = trans->fs_info; +@@ -1654,6 +1663,11 @@ int btrfs_remove_qgroup(struct btrfs_trans_handle *trans, u64 qgroupid) + goto out; + } + ++ if (is_fstree(qgroupid) && qgroup_has_usage(qgroup)) { ++ ret = -EBUSY; ++ goto out; ++ } ++ + /* Check if there are no children of this qgroup */ + if (!list_empty(&qgroup->members)) { + ret = -EBUSY; +diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c +index 4a4d65b5e24f7..a75669972dc73 100644 +--- a/fs/btrfs/send.c ++++ b/fs/btrfs/send.c +@@ -7852,7 +7852,7 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg) + } + + if (arg->flags & ~BTRFS_SEND_FLAG_MASK) { +- ret = -EINVAL; ++ ret = -EOPNOTSUPP; + goto out; + } + +diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c +index 111938a6307e6..57603782e7e2a 100644 +--- a/fs/ceph/caps.c ++++ b/fs/ceph/caps.c +@@ -1391,7 +1391,7 @@ static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap, + if (flushing & CEPH_CAP_XATTR_EXCL) { + arg->old_xattr_buf = __ceph_build_xattrs_blob(ci); + arg->xattr_version = ci->i_xattrs.version; +- arg->xattr_buf = ci->i_xattrs.blob; ++ arg->xattr_buf = ceph_buffer_get(ci->i_xattrs.blob); + } else { + arg->xattr_buf = NULL; + arg->old_xattr_buf = NULL; +@@ -1457,6 +1457,7 @@ static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci) + encode_cap_msg(msg, arg); + ceph_con_send(&arg->session->s_con, msg); + ceph_buffer_put(arg->old_xattr_buf); ++ ceph_buffer_put(arg->xattr_buf); + if (arg->wake) + wake_up_all(&ci->i_cap_wq); + } +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 40903c172a34f..1a310ee7d9e55 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -1785,11 +1785,6 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, + mb_check_buddy(e4b); + mb_free_blocks_double(inode, e4b, first, count); + +- this_cpu_inc(discard_pa_seq); +- e4b->bd_info->bb_free += count; +- if (first < e4b->bd_info->bb_first_free) +- e4b->bd_info->bb_first_free = first; +- + /* access memory sequentially: check left neighbour, + * clear range and then check right neighbour + */ +@@ -1803,23 +1798,31 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, + struct ext4_sb_info *sbi = EXT4_SB(sb); + ext4_fsblk_t blocknr; + ++ /* ++ * Fastcommit replay can free already freed blocks which ++ * corrupts allocation info. Regenerate it. ++ */ ++ if (sbi->s_mount_state & EXT4_FC_REPLAY) { ++ mb_regenerate_buddy(e4b); ++ goto check; ++ } ++ + blocknr = ext4_group_first_block_no(sb, e4b->bd_group); + blocknr += EXT4_C2B(sbi, block); +- if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { +- ext4_grp_locked_error(sb, e4b->bd_group, +- inode ? inode->i_ino : 0, +- blocknr, +- "freeing already freed block (bit %u); block bitmap corrupt.", +- block); +- ext4_mark_group_bitmap_corrupted( +- sb, e4b->bd_group, ++ ext4_grp_locked_error(sb, e4b->bd_group, ++ inode ? inode->i_ino : 0, blocknr, ++ "freeing already freed block (bit %u); block bitmap corrupt.", ++ block); ++ ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, + EXT4_GROUP_INFO_BBITMAP_CORRUPT); +- } else { +- mb_regenerate_buddy(e4b); +- } +- goto done; ++ return; + } + ++ this_cpu_inc(discard_pa_seq); ++ e4b->bd_info->bb_free += count; ++ if (first < e4b->bd_info->bb_first_free) ++ e4b->bd_info->bb_first_free = first; ++ + /* let's maintain fragments counter */ + if (left_is_free && right_is_free) + e4b->bd_info->bb_fragments--; +@@ -1844,9 +1847,9 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, + if (first <= last) + mb_buddy_mark_free(e4b, first >> 1, last >> 1); + +-done: + mb_set_largest_free_order(sb, e4b->bd_info); + mb_update_avg_fragment_size(sb, e4b->bd_info); ++check: + mb_check_buddy(e4b); + } + +diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c +index dedc9d445f243..8e3ff150bc36b 100644 +--- a/fs/ext4/move_extent.c ++++ b/fs/ext4/move_extent.c +@@ -621,6 +621,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, + goto out; + o_end = o_start + len; + ++ *moved_len = 0; + while (o_start < o_end) { + struct ext4_extent *ex; + ext4_lblk_t cur_blk, next_blk; +@@ -675,7 +676,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, + */ + ext4_double_up_write_data_sem(orig_inode, donor_inode); + /* Swap original branches with new branches */ +- move_extent_per_page(o_filp, donor_inode, ++ *moved_len += move_extent_per_page(o_filp, donor_inode, + orig_page_index, donor_page_index, + offset_in_page, cur_len, + unwritten, &ret); +@@ -685,9 +686,6 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, + o_start += cur_len; + d_start += cur_len; + } +- *moved_len = o_start - orig_blk; +- if (*moved_len > len) +- *moved_len = len; + + out: + if (*moved_len) { +diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c +index 8eea709e36599..4fe4b3393e71c 100644 +--- a/fs/hugetlbfs/inode.c ++++ b/fs/hugetlbfs/inode.c +@@ -123,6 +123,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) + loff_t len, vma_len; + int ret; + struct hstate *h = hstate_file(file); ++ vm_flags_t vm_flags; + + /* + * vma address alignment (but not the pgoff alignment) has +@@ -164,10 +165,20 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) + file_accessed(file); + + ret = -ENOMEM; ++ ++ vm_flags = vma->vm_flags; ++ /* ++ * for SHM_HUGETLB, the pages are reserved in the shmget() call so skip ++ * reserving here. Note: only for SHM hugetlbfs file, the inode ++ * flag S_PRIVATE is set. ++ */ ++ if (inode->i_flags & S_PRIVATE) ++ vm_flags |= VM_NORESERVE; ++ + if (!hugetlb_reserve_pages(inode, + vma->vm_pgoff >> huge_page_order(h), + len >> huge_page_shift(h), vma, +- vma->vm_flags)) ++ vm_flags)) + goto out; + + ret = 0; +@@ -1350,6 +1361,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par + { + struct hugetlbfs_fs_context *ctx = fc->fs_private; + struct fs_parse_result result; ++ struct hstate *h; + char *rest; + unsigned long ps; + int opt; +@@ -1394,11 +1406,12 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par + + case Opt_pagesize: + ps = memparse(param->string, &rest); +- ctx->hstate = size_to_hstate(ps); +- if (!ctx->hstate) { ++ h = size_to_hstate(ps); ++ if (!h) { + pr_err("Unsupported page size %lu MB\n", ps / SZ_1M); + return -EINVAL; + } ++ ctx->hstate = h; + return 0; + + case Opt_min_size: +diff --git a/fs/namespace.c b/fs/namespace.c +index 29a8d90dd1072..1533550f73567 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -4172,10 +4172,15 @@ static int do_mount_setattr(struct path *path, struct mount_kattr *kattr) + /* + * If this is an attached mount make sure it's located in the callers + * mount namespace. If it's not don't let the caller interact with it. +- * If this is a detached mount make sure it has an anonymous mount +- * namespace attached to it, i.e. we've created it via OPEN_TREE_CLONE. ++ * ++ * If this mount doesn't have a parent it's most often simply a ++ * detached mount with an anonymous mount namespace. IOW, something ++ * that's simply not attached yet. But there are apparently also users ++ * that do change mount properties on the rootfs itself. That obviously ++ * neither has a parent nor is it a detached mount so we cannot ++ * unconditionally check for detached mounts. + */ +- if (!(mnt_has_parent(mnt) ? check_mnt(mnt) : is_anon_ns(mnt->mnt_ns))) ++ if ((mnt_has_parent(mnt) || !is_anon_ns(mnt->mnt_ns)) && !check_mnt(mnt)) + goto out; + + /* +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index faecdbfa01a29..b3f6dda930d8b 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -4908,10 +4908,8 @@ nfsd_break_deleg_cb(struct file_lock *fl) + */ + fl->fl_break_time = 0; + +- spin_lock(&fp->fi_lock); + fp->fi_had_conflict = true; + nfsd_break_one_deleg(dp); +- spin_unlock(&fp->fi_lock); + return false; + } + +@@ -5499,12 +5497,13 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp, + if (status) + goto out_unlock; + ++ status = -EAGAIN; ++ if (fp->fi_had_conflict) ++ goto out_unlock; ++ + spin_lock(&state_lock); + spin_lock(&fp->fi_lock); +- if (fp->fi_had_conflict) +- status = -EAGAIN; +- else +- status = hash_delegation_locked(dp, fp); ++ status = hash_delegation_locked(dp, fp); + spin_unlock(&fp->fi_lock); + spin_unlock(&state_lock); + +@@ -7736,14 +7735,16 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) + { + struct file_lock *fl; + int status = false; +- struct nfsd_file *nf = find_any_file(fp); ++ struct nfsd_file *nf; + struct inode *inode; + struct file_lock_context *flctx; + ++ spin_lock(&fp->fi_lock); ++ nf = find_any_file_locked(fp); + if (!nf) { + /* Any valid lock stateid should have some sort of access */ + WARN_ON_ONCE(1); +- return status; ++ goto out; + } + + inode = locks_inode(nf->nf_file); +@@ -7759,7 +7760,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) + } + spin_unlock(&flctx->flc_lock); + } +- nfsd_file_put(nf); ++out: ++ spin_unlock(&fp->fi_lock); + return status; + } + +@@ -7769,10 +7771,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) + * @cstate: NFSv4 COMPOUND state + * @u: RELEASE_LOCKOWNER arguments + * +- * The lockowner's so_count is bumped when a lock record is added +- * or when copying a conflicting lock. The latter case is brief, +- * but can lead to fleeting false positives when looking for +- * locks-in-use. ++ * Check if theree are any locks still held and if not - free the lockowner ++ * and any lock state that is owned. + * + * Return values: + * %nfs_ok: lockowner released or not found +@@ -7808,10 +7808,13 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp, + spin_unlock(&clp->cl_lock); + return nfs_ok; + } +- if (atomic_read(&lo->lo_owner.so_count) != 2) { +- spin_unlock(&clp->cl_lock); +- nfs4_put_stateowner(&lo->lo_owner); +- return nfserr_locks_held; ++ ++ list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) { ++ if (check_for_locks(stp->st_stid.sc_file, lo)) { ++ spin_unlock(&clp->cl_lock); ++ nfs4_put_stateowner(&lo->lo_owner); ++ return nfserr_locks_held; ++ } + } + unhash_lockowner_locked(lo); + while (!list_empty(&lo->lo_owner.so_stateids)) { +diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c +index 9930fa901039f..1e7f653c1df7e 100644 +--- a/fs/nilfs2/dat.c ++++ b/fs/nilfs2/dat.c +@@ -40,8 +40,21 @@ static inline struct nilfs_dat_info *NILFS_DAT_I(struct inode *dat) + static int nilfs_dat_prepare_entry(struct inode *dat, + struct nilfs_palloc_req *req, int create) + { +- return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr, +- create, &req->pr_entry_bh); ++ int ret; ++ ++ ret = nilfs_palloc_get_entry_block(dat, req->pr_entry_nr, ++ create, &req->pr_entry_bh); ++ if (unlikely(ret == -ENOENT)) { ++ nilfs_err(dat->i_sb, ++ "DAT doesn't have a block to manage vblocknr = %llu", ++ (unsigned long long)req->pr_entry_nr); ++ /* ++ * Return internal code -EINVAL to notify bmap layer of ++ * metadata corruption. ++ */ ++ ret = -EINVAL; ++ } ++ return ret; + } + + static void nilfs_dat_commit_entry(struct inode *dat, +@@ -123,11 +136,7 @@ static void nilfs_dat_commit_free(struct inode *dat, + + int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req) + { +- int ret; +- +- ret = nilfs_dat_prepare_entry(dat, req, 0); +- WARN_ON(ret == -ENOENT); +- return ret; ++ return nilfs_dat_prepare_entry(dat, req, 0); + } + + void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req, +@@ -154,10 +163,8 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req) + int ret; + + ret = nilfs_dat_prepare_entry(dat, req, 0); +- if (ret < 0) { +- WARN_ON(ret == -ENOENT); ++ if (ret < 0) + return ret; +- } + + kaddr = kmap_atomic(req->pr_entry_bh->b_page); + entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, +diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c +index a265d391ffe92..822e8d95d31ef 100644 +--- a/fs/nilfs2/file.c ++++ b/fs/nilfs2/file.c +@@ -105,7 +105,13 @@ static vm_fault_t nilfs_page_mkwrite(struct vm_fault *vmf) + nilfs_transaction_commit(inode->i_sb); + + mapped: +- wait_for_stable_page(page); ++ /* ++ * Since checksumming including data blocks is performed to determine ++ * the validity of the log to be written and used for recovery, it is ++ * necessary to wait for writeback to finish here, regardless of the ++ * stable write requirement of the backing device. ++ */ ++ wait_on_page_writeback(page); + out: + sb_end_pagefault(inode->i_sb); + return block_page_mkwrite_return(ret); +diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c +index 0955b657938ff..a9b8d77c8c1d5 100644 +--- a/fs/nilfs2/recovery.c ++++ b/fs/nilfs2/recovery.c +@@ -472,9 +472,10 @@ static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs, + + static int nilfs_recovery_copy_block(struct the_nilfs *nilfs, + struct nilfs_recovery_block *rb, +- struct page *page) ++ loff_t pos, struct page *page) + { + struct buffer_head *bh_org; ++ size_t from = pos & ~PAGE_MASK; + void *kaddr; + + bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize); +@@ -482,7 +483,7 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs, + return -EIO; + + kaddr = kmap_atomic(page); +- memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size); ++ memcpy(kaddr + from, bh_org->b_data, bh_org->b_size); + kunmap_atomic(kaddr); + brelse(bh_org); + return 0; +@@ -521,7 +522,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, + goto failed_inode; + } + +- err = nilfs_recovery_copy_block(nilfs, rb, page); ++ err = nilfs_recovery_copy_block(nilfs, rb, pos, page); + if (unlikely(err)) + goto failed_page; + +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c +index a4a147a983e0a..0a84613960dbf 100644 +--- a/fs/nilfs2/segment.c ++++ b/fs/nilfs2/segment.c +@@ -1702,7 +1702,6 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) + + list_for_each_entry(bh, &segbuf->sb_payload_buffers, + b_assoc_buffers) { +- set_buffer_async_write(bh); + if (bh == segbuf->sb_super_root) { + if (bh->b_page != bd_page) { + lock_page(bd_page); +@@ -1713,6 +1712,7 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci) + } + break; + } ++ set_buffer_async_write(bh); + if (bh->b_page != fs_page) { + nilfs_begin_page_io(fs_page); + fs_page = bh->b_page; +@@ -1798,7 +1798,6 @@ static void nilfs_abort_logs(struct list_head *logs, int err) + + list_for_each_entry(bh, &segbuf->sb_payload_buffers, + b_assoc_buffers) { +- clear_buffer_async_write(bh); + if (bh == segbuf->sb_super_root) { + clear_buffer_uptodate(bh); + if (bh->b_page != bd_page) { +@@ -1807,6 +1806,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err) + } + break; + } ++ clear_buffer_async_write(bh); + if (bh->b_page != fs_page) { + nilfs_end_page_io(fs_page, err); + fs_page = bh->b_page; +@@ -1894,8 +1894,9 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) + BIT(BH_Delay) | BIT(BH_NILFS_Volatile) | + BIT(BH_NILFS_Redirected)); + +- set_mask_bits(&bh->b_state, clear_bits, set_bits); + if (bh == segbuf->sb_super_root) { ++ set_buffer_uptodate(bh); ++ clear_buffer_dirty(bh); + if (bh->b_page != bd_page) { + end_page_writeback(bd_page); + bd_page = bh->b_page; +@@ -1903,6 +1904,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) + update_sr = true; + break; + } ++ set_mask_bits(&bh->b_state, clear_bits, set_bits); + if (bh->b_page != fs_page) { + nilfs_end_page_io(fs_page, 0); + fs_page = bh->b_page; +diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c +index 873b1434a9989..4b72bc7f12ca3 100644 +--- a/fs/ntfs3/fsntfs.c ++++ b/fs/ntfs3/fsntfs.c +@@ -1842,10 +1842,12 @@ int ntfs_security_init(struct ntfs_sb_info *sbi) + goto out; + } + +- root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT)); +- if (root_sdh->type != ATTR_ZERO || ++ if(!(root_sdh = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) || ++ root_sdh->type != ATTR_ZERO || + root_sdh->rule != NTFS_COLLATION_TYPE_SECURITY_HASH || +- offsetof(struct INDEX_ROOT, ihdr) + root_sdh->ihdr.used > attr->res.data_size) { ++ offsetof(struct INDEX_ROOT, ihdr) + ++ le32_to_cpu(root_sdh->ihdr.used) > ++ le32_to_cpu(attr->res.data_size)) { + err = -EINVAL; + goto out; + } +@@ -1861,10 +1863,12 @@ int ntfs_security_init(struct ntfs_sb_info *sbi) + goto out; + } + +- root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT)); +- if (root_sii->type != ATTR_ZERO || ++ if(!(root_sii = resident_data_ex(attr, sizeof(struct INDEX_ROOT))) || ++ root_sii->type != ATTR_ZERO || + root_sii->rule != NTFS_COLLATION_TYPE_UINT || +- offsetof(struct INDEX_ROOT, ihdr) + root_sii->ihdr.used > attr->res.data_size) { ++ offsetof(struct INDEX_ROOT, ihdr) + ++ le32_to_cpu(root_sii->ihdr.used) > ++ le32_to_cpu(attr->res.data_size)) { + err = -EINVAL; + goto out; + } +diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c +index b89a33f5761ef..7371f7855e4c4 100644 +--- a/fs/ntfs3/index.c ++++ b/fs/ntfs3/index.c +@@ -1097,7 +1097,8 @@ int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn, + } + + /* check for index header length */ +- if (offsetof(struct INDEX_BUFFER, ihdr) + ib->ihdr.used > bytes) { ++ if (offsetof(struct INDEX_BUFFER, ihdr) + le32_to_cpu(ib->ihdr.used) > ++ bytes) { + err = -EINVAL; + goto out; + } +diff --git a/fs/proc/array.c b/fs/proc/array.c +index 49283b8103c7e..1b0d78dfd20f9 100644 +--- a/fs/proc/array.c ++++ b/fs/proc/array.c +@@ -501,7 +501,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + + sigemptyset(&sigign); + sigemptyset(&sigcatch); +- cutime = cstime = utime = stime = 0; ++ cutime = cstime = 0; + cgtime = gtime = 0; + + if (lock_task_sighand(task, &flags)) { +@@ -535,7 +535,6 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + + min_flt += sig->min_flt; + maj_flt += sig->maj_flt; +- thread_group_cputime_adjusted(task, &utime, &stime); + gtime += sig->gtime; + + if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED)) +@@ -551,10 +550,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + + if (permitted && (!whole || num_threads < 2)) + wchan = !task_is_running(task); +- if (!whole) { ++ ++ if (whole) { ++ thread_group_cputime_adjusted(task, &utime, &stime); ++ } else { ++ task_cputime_adjusted(task, &utime, &stime); + min_flt = task->min_flt; + maj_flt = task->maj_flt; +- task_cputime_adjusted(task, &utime, &stime); + gtime = task_gtime(task); + } + +diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c +index 5a132c1e6f6c4..6f4d7aa70e5a2 100644 +--- a/fs/smb/client/cached_dir.c ++++ b/fs/smb/client/cached_dir.c +@@ -268,10 +268,12 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, + if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) + goto oshr_free; + +- smb2_parse_contexts(server, o_rsp, ++ rc = smb2_parse_contexts(server, rsp_iov, + &oparms.fid->epoch, +- oparms.fid->lease_key, &oplock, +- NULL, NULL); ++ oparms.fid->lease_key, ++ &oplock, NULL, NULL); ++ if (rc) ++ goto oshr_free; + if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) + goto oshr_free; + qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; +diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c +index 5a157000bdfe6..34d1262004dfb 100644 +--- a/fs/smb/client/smb2ops.c ++++ b/fs/smb/client/smb2ops.c +@@ -613,7 +613,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf, + goto out; + } + +- while (bytes_left >= sizeof(*p)) { ++ while (bytes_left >= (ssize_t)sizeof(*p)) { + memset(&tmp_iface, 0, sizeof(tmp_iface)); + tmp_iface.speed = le64_to_cpu(p->LinkSpeed); + tmp_iface.rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE) ? 1 : 0; +diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c +index e65f998ea4cfc..c1fc1651d8b69 100644 +--- a/fs/smb/client/smb2pdu.c ++++ b/fs/smb/client/smb2pdu.c +@@ -2145,17 +2145,18 @@ parse_posix_ctxt(struct create_context *cc, struct smb2_file_all_info *info, + posix->nlink, posix->mode, posix->reparse_tag); + } + +-void +-smb2_parse_contexts(struct TCP_Server_Info *server, +- struct smb2_create_rsp *rsp, +- unsigned int *epoch, char *lease_key, __u8 *oplock, +- struct smb2_file_all_info *buf, +- struct create_posix_rsp *posix) ++int smb2_parse_contexts(struct TCP_Server_Info *server, ++ struct kvec *rsp_iov, ++ unsigned int *epoch, ++ char *lease_key, __u8 *oplock, ++ struct smb2_file_all_info *buf, ++ struct create_posix_rsp *posix) + { +- char *data_offset; ++ struct smb2_create_rsp *rsp = rsp_iov->iov_base; + struct create_context *cc; +- unsigned int next; +- unsigned int remaining; ++ size_t rem, off, len; ++ size_t doff, dlen; ++ size_t noff, nlen; + char *name; + static const char smb3_create_tag_posix[] = { + 0x93, 0xAD, 0x25, 0x50, 0x9C, +@@ -2164,45 +2165,63 @@ smb2_parse_contexts(struct TCP_Server_Info *server, + }; + + *oplock = 0; +- data_offset = (char *)rsp + le32_to_cpu(rsp->CreateContextsOffset); +- remaining = le32_to_cpu(rsp->CreateContextsLength); +- cc = (struct create_context *)data_offset; ++ ++ off = le32_to_cpu(rsp->CreateContextsOffset); ++ rem = le32_to_cpu(rsp->CreateContextsLength); ++ if (check_add_overflow(off, rem, &len) || len > rsp_iov->iov_len) ++ return -EINVAL; ++ cc = (struct create_context *)((u8 *)rsp + off); + + /* Initialize inode number to 0 in case no valid data in qfid context */ + if (buf) + buf->IndexNumber = 0; + +- while (remaining >= sizeof(struct create_context)) { +- name = le16_to_cpu(cc->NameOffset) + (char *)cc; +- if (le16_to_cpu(cc->NameLength) == 4 && +- strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4) == 0) +- *oplock = server->ops->parse_lease_buf(cc, epoch, +- lease_key); +- else if (buf && (le16_to_cpu(cc->NameLength) == 4) && +- strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4) == 0) +- parse_query_id_ctxt(cc, buf); +- else if ((le16_to_cpu(cc->NameLength) == 16)) { +- if (posix && +- memcmp(name, smb3_create_tag_posix, 16) == 0) ++ while (rem >= sizeof(*cc)) { ++ doff = le16_to_cpu(cc->DataOffset); ++ dlen = le32_to_cpu(cc->DataLength); ++ if (check_add_overflow(doff, dlen, &len) || len > rem) ++ return -EINVAL; ++ ++ noff = le16_to_cpu(cc->NameOffset); ++ nlen = le16_to_cpu(cc->NameLength); ++ if (noff + nlen > doff) ++ return -EINVAL; ++ ++ name = (char *)cc + noff; ++ switch (nlen) { ++ case 4: ++ if (!strncmp(name, SMB2_CREATE_REQUEST_LEASE, 4)) { ++ *oplock = server->ops->parse_lease_buf(cc, epoch, ++ lease_key); ++ } else if (buf && ++ !strncmp(name, SMB2_CREATE_QUERY_ON_DISK_ID, 4)) { ++ parse_query_id_ctxt(cc, buf); ++ } ++ break; ++ case 16: ++ if (posix && !memcmp(name, smb3_create_tag_posix, 16)) + parse_posix_ctxt(cc, buf, posix); ++ break; ++ default: ++ cifs_dbg(FYI, "%s: unhandled context (nlen=%zu dlen=%zu)\n", ++ __func__, nlen, dlen); ++ if (IS_ENABLED(CONFIG_CIFS_DEBUG2)) ++ cifs_dump_mem("context data: ", cc, dlen); ++ break; + } +- /* else { +- cifs_dbg(FYI, "Context not matched with len %d\n", +- le16_to_cpu(cc->NameLength)); +- cifs_dump_mem("Cctxt name: ", name, 4); +- } */ +- +- next = le32_to_cpu(cc->Next); +- if (!next) ++ ++ off = le32_to_cpu(cc->Next); ++ if (!off) + break; +- remaining -= next; +- cc = (struct create_context *)((char *)cc + next); ++ if (check_sub_overflow(rem, off, &rem)) ++ return -EINVAL; ++ cc = (struct create_context *)((u8 *)cc + off); + } + + if (rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) + *oplock = rsp->OplockLevel; + +- return; ++ return 0; + } + + static int +@@ -3082,8 +3101,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path, + } + + +- smb2_parse_contexts(server, rsp, &oparms->fid->epoch, +- oparms->fid->lease_key, oplock, buf, posix); ++ rc = smb2_parse_contexts(server, &rsp_iov, &oparms->fid->epoch, ++ oparms->fid->lease_key, oplock, buf, posix); + creat_exit: + SMB2_open_free(&rqst); + free_rsp_buf(resp_buftype, rsp); +diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h +index be21b5d26f67e..b325fde010adc 100644 +--- a/fs/smb/client/smb2proto.h ++++ b/fs/smb/client/smb2proto.h +@@ -249,11 +249,13 @@ extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *); + + extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *, + enum securityEnum); +-extern void smb2_parse_contexts(struct TCP_Server_Info *server, +- struct smb2_create_rsp *rsp, +- unsigned int *epoch, char *lease_key, +- __u8 *oplock, struct smb2_file_all_info *buf, +- struct create_posix_rsp *posix); ++int smb2_parse_contexts(struct TCP_Server_Info *server, ++ struct kvec *rsp_iov, ++ unsigned int *epoch, ++ char *lease_key, __u8 *oplock, ++ struct smb2_file_all_info *buf, ++ struct create_posix_rsp *posix); ++ + extern int smb3_encryption_required(const struct cifs_tcon *tcon); + extern int smb2_validate_iov(unsigned int offset, unsigned int buffer_length, + struct kvec *iov, unsigned int min_buf_size); +diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c +index 4cfa45c2727ea..66d25d0e34d8b 100644 +--- a/fs/smb/server/smb2pdu.c ++++ b/fs/smb/server/smb2pdu.c +@@ -6171,8 +6171,10 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work) + err = ksmbd_iov_pin_rsp_read(work, (void *)rsp, + offsetof(struct smb2_read_rsp, Buffer), + aux_payload_buf, nbytes); +- if (err) ++ if (err) { ++ kvfree(aux_payload_buf); + goto out; ++ } + kvfree(rpc_resp); + } else { + err = ksmbd_iov_pin_rsp(work, (void *)rsp, +@@ -6382,8 +6384,10 @@ int smb2_read(struct ksmbd_work *work) + err = ksmbd_iov_pin_rsp_read(work, (void *)rsp, + offsetof(struct smb2_read_rsp, Buffer), + aux_payload_buf, nbytes); +- if (err) ++ if (err) { ++ kvfree(aux_payload_buf); + goto out; ++ } + ksmbd_fd_put(work, fp); + return 0; + +diff --git a/fs/zonefs/file.c b/fs/zonefs/file.c +index 63cd50840419c..8d5f4a5a74e65 100644 +--- a/fs/zonefs/file.c ++++ b/fs/zonefs/file.c +@@ -349,7 +349,12 @@ static int zonefs_file_write_dio_end_io(struct kiocb *iocb, ssize_t size, + struct zonefs_inode_info *zi = ZONEFS_I(inode); + + if (error) { +- zonefs_io_error(inode, true); ++ /* ++ * For Sync IOs, error recovery is called from ++ * zonefs_file_dio_write(). ++ */ ++ if (!is_sync_kiocb(iocb)) ++ zonefs_io_error(inode, true); + return error; + } + +@@ -577,6 +582,14 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) + ret = -EINVAL; + goto inode_unlock; + } ++ /* ++ * Advance the zone write pointer offset. This assumes that the ++ * IO will succeed, which is OK to do because we do not allow ++ * partial writes (IOMAP_DIO_PARTIAL is not set) and if the IO ++ * fails, the error path will correct the write pointer offset. ++ */ ++ z->z_wpoffset += count; ++ zonefs_inode_account_active(inode); + mutex_unlock(&zi->i_truncate_mutex); + append = sync; + } +@@ -596,20 +609,19 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from) + ret = -EBUSY; + } + +- if (zonefs_zone_is_seq(z) && +- (ret > 0 || ret == -EIOCBQUEUED)) { +- if (ret > 0) +- count = ret; +- +- /* +- * Update the zone write pointer offset assuming the write +- * operation succeeded. If it did not, the error recovery path +- * will correct it. Also do active seq file accounting. +- */ +- mutex_lock(&zi->i_truncate_mutex); +- z->z_wpoffset += count; +- zonefs_inode_account_active(inode); +- mutex_unlock(&zi->i_truncate_mutex); ++ /* ++ * For a failed IO or partial completion, trigger error recovery ++ * to update the zone write pointer offset to a correct value. ++ * For asynchronous IOs, zonefs_file_write_dio_end_io() may already ++ * have executed error recovery if the IO already completed when we ++ * reach here. However, we cannot know that and execute error recovery ++ * again (that will not change anything). ++ */ ++ if (zonefs_zone_is_seq(z)) { ++ if (ret > 0 && ret != count) ++ ret = -EIO; ++ if (ret < 0 && ret != -EIOCBQUEUED) ++ zonefs_io_error(inode, true); + } + + inode_unlock: +diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c +index 270ded209dde5..f6b701261078c 100644 +--- a/fs/zonefs/super.c ++++ b/fs/zonefs/super.c +@@ -245,16 +245,18 @@ static void zonefs_inode_update_mode(struct inode *inode) + z->z_flags &= ~ZONEFS_ZONE_INIT_MODE; + } + +-struct zonefs_ioerr_data { +- struct inode *inode; +- bool write; +-}; +- + static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx, + void *data) + { +- struct zonefs_ioerr_data *err = data; +- struct inode *inode = err->inode; ++ struct blk_zone *z = data; ++ ++ *z = *zone; ++ return 0; ++} ++ ++static void zonefs_handle_io_error(struct inode *inode, struct blk_zone *zone, ++ bool write) ++{ + struct zonefs_zone *z = zonefs_inode_zone(inode); + struct super_block *sb = inode->i_sb; + struct zonefs_sb_info *sbi = ZONEFS_SB(sb); +@@ -269,8 +271,8 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx, + data_size = zonefs_check_zone_condition(sb, z, zone); + isize = i_size_read(inode); + if (!(z->z_flags & (ZONEFS_ZONE_READONLY | ZONEFS_ZONE_OFFLINE)) && +- !err->write && isize == data_size) +- return 0; ++ !write && isize == data_size) ++ return; + + /* + * At this point, we detected either a bad zone or an inconsistency +@@ -291,7 +293,7 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx, + * In all cases, warn about inode size inconsistency and handle the + * IO error according to the zone condition and to the mount options. + */ +- if (zonefs_zone_is_seq(z) && isize != data_size) ++ if (isize != data_size) + zonefs_warn(sb, + "inode %lu: invalid size %lld (should be %lld)\n", + inode->i_ino, isize, data_size); +@@ -351,8 +353,6 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx, + zonefs_i_size_write(inode, data_size); + z->z_wpoffset = data_size; + zonefs_inode_account_active(inode); +- +- return 0; + } + + /* +@@ -366,23 +366,25 @@ void __zonefs_io_error(struct inode *inode, bool write) + { + struct zonefs_zone *z = zonefs_inode_zone(inode); + struct super_block *sb = inode->i_sb; +- struct zonefs_sb_info *sbi = ZONEFS_SB(sb); + unsigned int noio_flag; +- unsigned int nr_zones = 1; +- struct zonefs_ioerr_data err = { +- .inode = inode, +- .write = write, +- }; ++ struct blk_zone zone; + int ret; + + /* +- * The only files that have more than one zone are conventional zone +- * files with aggregated conventional zones, for which the inode zone +- * size is always larger than the device zone size. ++ * Conventional zone have no write pointer and cannot become read-only ++ * or offline. So simply fake a report for a single or aggregated zone ++ * and let zonefs_handle_io_error() correct the zone inode information ++ * according to the mount options. + */ +- if (z->z_size > bdev_zone_sectors(sb->s_bdev)) +- nr_zones = z->z_size >> +- (sbi->s_zone_sectors_shift + SECTOR_SHIFT); ++ if (!zonefs_zone_is_seq(z)) { ++ zone.start = z->z_sector; ++ zone.len = z->z_size >> SECTOR_SHIFT; ++ zone.wp = zone.start + zone.len; ++ zone.type = BLK_ZONE_TYPE_CONVENTIONAL; ++ zone.cond = BLK_ZONE_COND_NOT_WP; ++ zone.capacity = zone.len; ++ goto handle_io_error; ++ } + + /* + * Memory allocations in blkdev_report_zones() can trigger a memory +@@ -393,12 +395,20 @@ void __zonefs_io_error(struct inode *inode, bool write) + * the GFP_NOIO context avoids both problems. + */ + noio_flag = memalloc_noio_save(); +- ret = blkdev_report_zones(sb->s_bdev, z->z_sector, nr_zones, +- zonefs_io_error_cb, &err); +- if (ret != nr_zones) ++ ret = blkdev_report_zones(sb->s_bdev, z->z_sector, 1, ++ zonefs_io_error_cb, &zone); ++ memalloc_noio_restore(noio_flag); ++ ++ if (ret != 1) { + zonefs_err(sb, "Get inode %lu zone information failed %d\n", + inode->i_ino, ret); +- memalloc_noio_restore(noio_flag); ++ zonefs_warn(sb, "remounting filesystem read-only\n"); ++ sb->s_flags |= SB_RDONLY; ++ return; ++ } ++ ++handle_io_error: ++ zonefs_handle_io_error(inode, &zone, write); + } + + static struct kmem_cache *zonefs_inode_cachep; +diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h +index 7ad6f51b3d914..1d1f480a5e9e4 100644 +--- a/include/asm-generic/vmlinux.lds.h ++++ b/include/asm-generic/vmlinux.lds.h +@@ -351,7 +351,6 @@ + *(.ref.data) \ + *(.data..shared_aligned) /* percpu related */ \ + MEM_KEEP(init.data*) \ +- MEM_KEEP(exit.data*) \ + *(.data.unlikely) \ + __start_once = .; \ + *(.data.once) \ +@@ -546,7 +545,6 @@ + __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \ + *(.ref.rodata) \ + MEM_KEEP(init.rodata) \ +- MEM_KEEP(exit.rodata) \ + } \ + \ + /* Built-in module parameters. */ \ +@@ -601,7 +599,6 @@ + *(.ref.text) \ + *(.text.asan.* .text.tsan.*) \ + MEM_KEEP(init.text*) \ +- MEM_KEEP(exit.text*) \ + + + /* sched.text is aling to function alignment to secure we have same +@@ -751,13 +748,10 @@ + *(.exit.data .exit.data.*) \ + *(.fini_array .fini_array.*) \ + *(.dtors .dtors.*) \ +- MEM_DISCARD(exit.data*) \ +- MEM_DISCARD(exit.rodata*) + + #define EXIT_TEXT \ + *(.exit.text) \ + *(.text.exit) \ +- MEM_DISCARD(exit.text) + + #define EXIT_CALL \ + *(.exitcall.exit) +diff --git a/include/linux/bpf.h b/include/linux/bpf.h +index c04a61ffac8ae..1ca1902af23e9 100644 +--- a/include/linux/bpf.h ++++ b/include/linux/bpf.h +@@ -2739,10 +2739,18 @@ struct btf_id_set; + bool btf_id_set_contains(const struct btf_id_set *set, u32 id); + + #define MAX_BPRINTF_VARARGS 12 ++#define MAX_BPRINTF_BUF 1024 ++ ++struct bpf_bprintf_data { ++ u32 *bin_args; ++ char *buf; ++ bool get_bin_args; ++ bool get_buf; ++}; + + int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, +- u32 **bin_buf, u32 num_args); +-void bpf_bprintf_cleanup(void); ++ u32 num_args, struct bpf_bprintf_data *data); ++void bpf_bprintf_cleanup(struct bpf_bprintf_data *data); + + /* the implementation of the opaque uapi struct bpf_dynptr */ + struct bpf_dynptr_kern { +diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h +new file mode 100644 +index 0000000000000..53f1a7a932b08 +--- /dev/null ++++ b/include/linux/cleanup.h +@@ -0,0 +1,171 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++#ifndef __LINUX_GUARDS_H ++#define __LINUX_GUARDS_H ++ ++#include <linux/compiler.h> ++ ++/* ++ * DEFINE_FREE(name, type, free): ++ * simple helper macro that defines the required wrapper for a __free() ++ * based cleanup function. @free is an expression using '_T' to access ++ * the variable. ++ * ++ * __free(name): ++ * variable attribute to add a scoped based cleanup to the variable. ++ * ++ * no_free_ptr(var): ++ * like a non-atomic xchg(var, NULL), such that the cleanup function will ++ * be inhibited -- provided it sanely deals with a NULL value. ++ * ++ * return_ptr(p): ++ * returns p while inhibiting the __free(). ++ * ++ * Ex. ++ * ++ * DEFINE_FREE(kfree, void *, if (_T) kfree(_T)) ++ * ++ * struct obj *p __free(kfree) = kmalloc(...); ++ * if (!p) ++ * return NULL; ++ * ++ * if (!init_obj(p)) ++ * return NULL; ++ * ++ * return_ptr(p); ++ */ ++ ++#define DEFINE_FREE(_name, _type, _free) \ ++ static inline void __free_##_name(void *p) { _type _T = *(_type *)p; _free; } ++ ++#define __free(_name) __cleanup(__free_##_name) ++ ++#define no_free_ptr(p) \ ++ ({ __auto_type __ptr = (p); (p) = NULL; __ptr; }) ++ ++#define return_ptr(p) return no_free_ptr(p) ++ ++ ++/* ++ * DEFINE_CLASS(name, type, exit, init, init_args...): ++ * helper to define the destructor and constructor for a type. ++ * @exit is an expression using '_T' -- similar to FREE above. ++ * @init is an expression in @init_args resulting in @type ++ * ++ * EXTEND_CLASS(name, ext, init, init_args...): ++ * extends class @name to @name@ext with the new constructor ++ * ++ * CLASS(name, var)(args...): ++ * declare the variable @var as an instance of the named class ++ * ++ * Ex. ++ * ++ * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd) ++ * ++ * CLASS(fdget, f)(fd); ++ * if (!f.file) ++ * return -EBADF; ++ * ++ * // use 'f' without concern ++ */ ++ ++#define DEFINE_CLASS(_name, _type, _exit, _init, _init_args...) \ ++typedef _type class_##_name##_t; \ ++static inline void class_##_name##_destructor(_type *p) \ ++{ _type _T = *p; _exit; } \ ++static inline _type class_##_name##_constructor(_init_args) \ ++{ _type t = _init; return t; } ++ ++#define EXTEND_CLASS(_name, ext, _init, _init_args...) \ ++typedef class_##_name##_t class_##_name##ext##_t; \ ++static inline void class_##_name##ext##_destructor(class_##_name##_t *p)\ ++{ class_##_name##_destructor(p); } \ ++static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \ ++{ class_##_name##_t t = _init; return t; } ++ ++#define CLASS(_name, var) \ ++ class_##_name##_t var __cleanup(class_##_name##_destructor) = \ ++ class_##_name##_constructor ++ ++ ++/* ++ * DEFINE_GUARD(name, type, lock, unlock): ++ * trivial wrapper around DEFINE_CLASS() above specifically ++ * for locks. ++ * ++ * guard(name): ++ * an anonymous instance of the (guard) class ++ * ++ * scoped_guard (name, args...) { }: ++ * similar to CLASS(name, scope)(args), except the variable (with the ++ * explicit name 'scope') is declard in a for-loop such that its scope is ++ * bound to the next (compound) statement. ++ * ++ */ ++ ++#define DEFINE_GUARD(_name, _type, _lock, _unlock) \ ++ DEFINE_CLASS(_name, _type, _unlock, ({ _lock; _T; }), _type _T) ++ ++#define guard(_name) \ ++ CLASS(_name, __UNIQUE_ID(guard)) ++ ++#define scoped_guard(_name, args...) \ ++ for (CLASS(_name, scope)(args), \ ++ *done = NULL; !done; done = (void *)1) ++ ++/* ++ * Additional helper macros for generating lock guards with types, either for ++ * locks that don't have a native type (eg. RCU, preempt) or those that need a ++ * 'fat' pointer (eg. spin_lock_irqsave). ++ * ++ * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...) ++ * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...) ++ * ++ * will result in the following type: ++ * ++ * typedef struct { ++ * type *lock; // 'type := void' for the _0 variant ++ * __VA_ARGS__; ++ * } class_##name##_t; ++ * ++ * As above, both _lock and _unlock are statements, except this time '_T' will ++ * be a pointer to the above struct. ++ */ ++ ++#define __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, ...) \ ++typedef struct { \ ++ _type *lock; \ ++ __VA_ARGS__; \ ++} class_##_name##_t; \ ++ \ ++static inline void class_##_name##_destructor(class_##_name##_t *_T) \ ++{ \ ++ if (_T->lock) { _unlock; } \ ++} ++ ++ ++#define __DEFINE_LOCK_GUARD_1(_name, _type, _lock) \ ++static inline class_##_name##_t class_##_name##_constructor(_type *l) \ ++{ \ ++ class_##_name##_t _t = { .lock = l }, *_T = &_t; \ ++ _lock; \ ++ return _t; \ ++} ++ ++#define __DEFINE_LOCK_GUARD_0(_name, _lock) \ ++static inline class_##_name##_t class_##_name##_constructor(void) \ ++{ \ ++ class_##_name##_t _t = { .lock = (void*)1 }, \ ++ *_T __maybe_unused = &_t; \ ++ _lock; \ ++ return _t; \ ++} ++ ++#define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \ ++__DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \ ++__DEFINE_LOCK_GUARD_1(_name, _type, _lock) ++ ++#define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \ ++__DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \ ++__DEFINE_LOCK_GUARD_0(_name, _lock) ++ ++#endif /* __LINUX_GUARDS_H */ +diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h +index 6cfd6902bd5b9..9b673fefcef8a 100644 +--- a/include/linux/compiler-clang.h ++++ b/include/linux/compiler-clang.h +@@ -5,6 +5,15 @@ + + /* Compiler specific definitions for Clang compiler */ + ++/* ++ * Clang prior to 17 is being silly and considers many __cleanup() variables ++ * as unused (because they are, their sole purpose is to go out of scope). ++ * ++ * https://reviews.llvm.org/D152180 ++ */ ++#undef __cleanup ++#define __cleanup(func) __maybe_unused __attribute__((__cleanup__(func))) ++ + /* same as gcc, this was present in clang-2.6 so we can assume it works + * with any version that can compile the kernel + */ +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h +index f55a37efdb974..149a520515e1d 100644 +--- a/include/linux/compiler-gcc.h ++++ b/include/linux/compiler-gcc.h +@@ -66,6 +66,26 @@ + __builtin_unreachable(); \ + } while (0) + ++/* ++ * GCC 'asm goto' with outputs miscompiles certain code sequences: ++ * ++ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=113921 ++ * ++ * Work around it via the same compiler barrier quirk that we used ++ * to use for the old 'asm goto' workaround. ++ * ++ * Also, always mark such 'asm goto' statements as volatile: all ++ * asm goto statements are supposed to be volatile as per the ++ * documentation, but some versions of gcc didn't actually do ++ * that for asms with outputs: ++ * ++ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98619 ++ */ ++#ifdef CONFIG_GCC_ASM_GOTO_OUTPUT_WORKAROUND ++#define asm_goto_output(x...) \ ++ do { asm volatile goto(x); asm (""); } while (0) ++#endif ++ + #if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) + #define __HAVE_BUILTIN_BSWAP32__ + #define __HAVE_BUILTIN_BSWAP64__ +diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h +index 898b3458b24a0..ae4c9579ca5f0 100644 +--- a/include/linux/compiler_attributes.h ++++ b/include/linux/compiler_attributes.h +@@ -75,6 +75,12 @@ + # define __assume_aligned(a, ...) + #endif + ++/* ++ * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-cleanup-variable-attribute ++ * clang: https://clang.llvm.org/docs/AttributeReference.html#cleanup ++ */ ++#define __cleanup(func) __attribute__((__cleanup__(func))) ++ + /* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute +diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h +index eb0466236661f..574b4121ebe3e 100644 +--- a/include/linux/compiler_types.h ++++ b/include/linux/compiler_types.h +@@ -284,8 +284,15 @@ struct ftrace_likely_data { + # define __realloc_size(x, ...) + #endif + +-#ifndef asm_volatile_goto +-#define asm_volatile_goto(x...) asm goto(x) ++/* ++ * Some versions of gcc do not mark 'asm goto' volatile: ++ * ++ * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103979 ++ * ++ * We do it here by hand, because it doesn't hurt. ++ */ ++#ifndef asm_goto_output ++#define asm_goto_output(x...) asm volatile goto(x) + #endif + + #ifdef CONFIG_CC_HAS_ASM_INLINE +diff --git a/include/linux/device.h b/include/linux/device.h +index 7cf24330d6814..5520bb546a4ac 100644 +--- a/include/linux/device.h ++++ b/include/linux/device.h +@@ -30,6 +30,7 @@ + #include <linux/device/bus.h> + #include <linux/device/class.h> + #include <linux/device/driver.h> ++#include <linux/cleanup.h> + #include <asm/device.h> + + struct device; +@@ -898,6 +899,9 @@ void device_unregister(struct device *dev); + void device_initialize(struct device *dev); + int __must_check device_add(struct device *dev); + void device_del(struct device *dev); ++ ++DEFINE_FREE(device_del, struct device *, if (_T) device_del(_T)) ++ + int device_for_each_child(struct device *dev, void *data, + int (*fn)(struct device *dev, void *data)); + int device_for_each_child_reverse(struct device *dev, void *data, +@@ -1071,6 +1075,9 @@ extern int (*platform_notify_remove)(struct device *dev); + */ + struct device *get_device(struct device *dev); + void put_device(struct device *dev); ++ ++DEFINE_FREE(put_device, struct device *, if (_T) put_device(_T)) ++ + bool kill_device(struct device *dev); + + #ifdef CONFIG_DEVTMPFS +diff --git a/include/linux/file.h b/include/linux/file.h +index 39704eae83e27..6e9099d293436 100644 +--- a/include/linux/file.h ++++ b/include/linux/file.h +@@ -10,6 +10,7 @@ + #include <linux/types.h> + #include <linux/posix_types.h> + #include <linux/errno.h> ++#include <linux/cleanup.h> + + struct file; + +@@ -80,6 +81,8 @@ static inline void fdput_pos(struct fd f) + fdput(f); + } + ++DEFINE_CLASS(fd, struct fd, fdput(_T), fdget(fd), int fd) ++ + extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); + extern int replace_fd(unsigned fd, struct file *file, unsigned flags); + extern void set_close_on_exec(unsigned int fd, int flag); +@@ -88,6 +91,9 @@ extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile); + extern int get_unused_fd_flags(unsigned flags); + extern void put_unused_fd(unsigned int fd); + ++DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T), ++ get_unused_fd_flags(flags), unsigned flags) ++ + extern void fd_install(unsigned int fd, struct file *file); + + extern int __receive_fd(struct file *file, int __user *ufd, +diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h +index 7852f6c9a714c..719cf9cc6e1ac 100644 +--- a/include/linux/iio/adc/ad_sigma_delta.h ++++ b/include/linux/iio/adc/ad_sigma_delta.h +@@ -8,6 +8,8 @@ + #ifndef __AD_SIGMA_DELTA_H__ + #define __AD_SIGMA_DELTA_H__ + ++#include <linux/iio/iio.h> ++ + enum ad_sigma_delta_mode { + AD_SD_MODE_CONTINUOUS = 0, + AD_SD_MODE_SINGLE = 1, +@@ -99,7 +101,7 @@ struct ad_sigma_delta { + * 'rx_buf' is up to 32 bits per sample + 64 bit timestamp, + * rounded to 16 bytes to take into account padding. + */ +- uint8_t tx_buf[4] ____cacheline_aligned; ++ uint8_t tx_buf[4] __aligned(IIO_DMA_MINALIGN); + uint8_t rx_buf[16] __aligned(8); + }; + +diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h +index db4a1b260348c..c34e648f07e28 100644 +--- a/include/linux/iio/common/st_sensors.h ++++ b/include/linux/iio/common/st_sensors.h +@@ -261,9 +261,9 @@ struct st_sensor_data { + bool hw_irq_trigger; + s64 hw_timestamp; + +- char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] ____cacheline_aligned; +- + struct mutex odr_lock; ++ ++ char buffer_data[ST_SENSORS_MAX_BUFFER_SIZE] __aligned(IIO_DMA_MINALIGN); + }; + + #ifdef CONFIG_IIO_BUFFER +diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h +index bcbefb7574751..af083aa0c4317 100644 +--- a/include/linux/iio/imu/adis.h ++++ b/include/linux/iio/imu/adis.h +@@ -11,6 +11,7 @@ + + #include <linux/spi/spi.h> + #include <linux/interrupt.h> ++#include <linux/iio/iio.h> + #include <linux/iio/types.h> + + #define ADIS_WRITE_REG(reg) ((0x80 | (reg))) +@@ -131,7 +132,7 @@ struct adis { + unsigned long irq_flag; + void *buffer; + +- u8 tx[10] ____cacheline_aligned; ++ u8 tx[10] __aligned(IIO_DMA_MINALIGN); + u8 rx[4]; + }; + +diff --git a/include/linux/init.h b/include/linux/init.h +index 077d7f93b402f..c96aea3229ca1 100644 +--- a/include/linux/init.h ++++ b/include/linux/init.h +@@ -87,9 +87,6 @@ + __latent_entropy + #define __meminitdata __section(".meminit.data") + #define __meminitconst __section(".meminit.rodata") +-#define __memexit __section(".memexit.text") __exitused __cold notrace +-#define __memexitdata __section(".memexit.data") +-#define __memexitconst __section(".memexit.rodata") + + /* For assembly routines */ + #define __HEAD .section ".head.text","ax" +diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h +index 5ec0fa71399e4..2b665c32f5fe6 100644 +--- a/include/linux/irqflags.h ++++ b/include/linux/irqflags.h +@@ -13,6 +13,7 @@ + #define _LINUX_TRACE_IRQFLAGS_H + + #include <linux/typecheck.h> ++#include <linux/cleanup.h> + #include <asm/irqflags.h> + #include <asm/percpu.h> + +@@ -267,4 +268,10 @@ extern void warn_bogus_irq_restore(void); + + #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) + ++DEFINE_LOCK_GUARD_0(irq, local_irq_disable(), local_irq_enable()) ++DEFINE_LOCK_GUARD_0(irqsave, ++ local_irq_save(_T->flags), ++ local_irq_restore(_T->flags), ++ unsigned long flags) ++ + #endif +diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h +index 74f9d9a6d3307..0e4ef9c5127ad 100644 +--- a/include/linux/mmc/sdio_ids.h ++++ b/include/linux/mmc/sdio_ids.h +@@ -102,6 +102,7 @@ + #define SDIO_DEVICE_ID_MARVELL_8977_BT 0x9146 + #define SDIO_DEVICE_ID_MARVELL_8987_WLAN 0x9149 + #define SDIO_DEVICE_ID_MARVELL_8987_BT 0x914a ++#define SDIO_DEVICE_ID_MARVELL_8978_WLAN 0x9159 + + #define SDIO_VENDOR_ID_MEDIATEK 0x037a + #define SDIO_DEVICE_ID_MEDIATEK_MT7663 0x7663 +diff --git a/include/linux/mutex.h b/include/linux/mutex.h +index 8f226d460f51c..a33aa9eb9fc3b 100644 +--- a/include/linux/mutex.h ++++ b/include/linux/mutex.h +@@ -19,6 +19,7 @@ + #include <asm/processor.h> + #include <linux/osq_lock.h> + #include <linux/debug_locks.h> ++#include <linux/cleanup.h> + + #ifdef CONFIG_DEBUG_LOCK_ALLOC + # define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ +@@ -219,4 +220,7 @@ extern void mutex_unlock(struct mutex *lock); + + extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); + ++DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T)) ++DEFINE_FREE(mutex, struct mutex *, if (_T) mutex_unlock(_T)) ++ + #endif /* __LINUX_MUTEX_H */ +diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h +index 72f5ebc5c97a9..0b217d4ae2a48 100644 +--- a/include/linux/netfilter/ipset/ip_set.h ++++ b/include/linux/netfilter/ipset/ip_set.h +@@ -186,6 +186,8 @@ struct ip_set_type_variant { + /* Return true if "b" set is the same as "a" + * according to the create set parameters */ + bool (*same_set)(const struct ip_set *a, const struct ip_set *b); ++ /* Cancel ongoing garbage collectors before destroying the set*/ ++ void (*cancel_gc)(struct ip_set *set); + /* Region-locking is used */ + bool region_lock; + }; +@@ -242,6 +244,8 @@ extern void ip_set_type_unregister(struct ip_set_type *set_type); + + /* A generic IP set */ + struct ip_set { ++ /* For call_cru in destroy */ ++ struct rcu_head rcu; + /* The name of the set */ + char name[IPSET_MAXNAMELEN]; + /* Lock protecting the set data */ +diff --git a/include/linux/percpu.h b/include/linux/percpu.h +index f1ec5ad1351cc..ba00a49369cae 100644 +--- a/include/linux/percpu.h ++++ b/include/linux/percpu.h +@@ -8,6 +8,7 @@ + #include <linux/cpumask.h> + #include <linux/pfn.h> + #include <linux/init.h> ++#include <linux/cleanup.h> + + #include <asm/percpu.h> + +@@ -128,6 +129,9 @@ extern void __init setup_per_cpu_areas(void); + extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1); + extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1); + extern void free_percpu(void __percpu *__pdata); ++ ++DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T)) ++ + extern phys_addr_t per_cpu_ptr_to_phys(void *addr); + + #define alloc_percpu_gfp(type, gfp) \ +diff --git a/include/linux/preempt.h b/include/linux/preempt.h +index 8cfcc5d454512..9aa6358a1a16b 100644 +--- a/include/linux/preempt.h ++++ b/include/linux/preempt.h +@@ -8,6 +8,7 @@ + */ + + #include <linux/linkage.h> ++#include <linux/cleanup.h> + #include <linux/list.h> + + /* +@@ -474,4 +475,8 @@ static __always_inline void preempt_enable_nested(void) + preempt_enable(); + } + ++DEFINE_LOCK_GUARD_0(preempt, preempt_disable(), preempt_enable()) ++DEFINE_LOCK_GUARD_0(preempt_notrace, preempt_disable_notrace(), preempt_enable_notrace()) ++DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable()) ++ + #endif /* __LINUX_PREEMPT_H */ +diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h +index 46bd9a331fd5d..d2507168b9c7b 100644 +--- a/include/linux/rcupdate.h ++++ b/include/linux/rcupdate.h +@@ -27,6 +27,7 @@ + #include <linux/preempt.h> + #include <linux/bottom_half.h> + #include <linux/lockdep.h> ++#include <linux/cleanup.h> + #include <asm/processor.h> + #include <linux/cpumask.h> + #include <linux/context_tracking_irq.h> +@@ -1077,4 +1078,6 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) + extern int rcu_expedited; + extern int rcu_normal; + ++DEFINE_LOCK_GUARD_0(rcu, rcu_read_lock(), rcu_read_unlock()) ++ + #endif /* __LINUX_RCUPDATE_H */ +diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h +index efa5c324369a2..1dd530ce8b45b 100644 +--- a/include/linux/rwsem.h ++++ b/include/linux/rwsem.h +@@ -15,6 +15,7 @@ + #include <linux/spinlock.h> + #include <linux/atomic.h> + #include <linux/err.h> ++#include <linux/cleanup.h> + + #ifdef CONFIG_DEBUG_LOCK_ALLOC + # define __RWSEM_DEP_MAP_INIT(lockname) \ +@@ -201,6 +202,13 @@ extern void up_read(struct rw_semaphore *sem); + */ + extern void up_write(struct rw_semaphore *sem); + ++DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T)) ++DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T)) ++ ++DEFINE_FREE(up_read, struct rw_semaphore *, if (_T) up_read(_T)) ++DEFINE_FREE(up_write, struct rw_semaphore *, if (_T) up_write(_T)) ++ ++ + /* + * downgrade write lock to read lock + */ +diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h +index 7291fb6399d2a..aaa25ed1a8fe0 100644 +--- a/include/linux/sched/task.h ++++ b/include/linux/sched/task.h +@@ -145,6 +145,8 @@ static inline void put_task_struct(struct task_struct *t) + __put_task_struct(t); + } + ++DEFINE_FREE(put_task, struct task_struct *, if (_T) put_task_struct(_T)) ++ + static inline void put_task_struct_many(struct task_struct *t, int nr) + { + if (refcount_sub_and_test(nr, &t->usage)) +diff --git a/include/linux/slab.h b/include/linux/slab.h +index 45efc6c553b82..cb4b5deca9a9c 100644 +--- a/include/linux/slab.h ++++ b/include/linux/slab.h +@@ -17,6 +17,7 @@ + #include <linux/types.h> + #include <linux/workqueue.h> + #include <linux/percpu-refcount.h> ++#include <linux/cleanup.h> + + + /* +@@ -197,6 +198,8 @@ void kfree(const void *objp); + void kfree_sensitive(const void *objp); + size_t __ksize(const void *objp); + ++DEFINE_FREE(kfree, void *, if (_T) kfree(_T)) ++ + /** + * ksize - Report actual allocation size of associated object + * +diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h +index 1341f7d62da44..83377540c369a 100644 +--- a/include/linux/spinlock.h ++++ b/include/linux/spinlock.h +@@ -61,6 +61,7 @@ + #include <linux/stringify.h> + #include <linux/bottom_half.h> + #include <linux/lockdep.h> ++#include <linux/cleanup.h> + #include <asm/barrier.h> + #include <asm/mmiowb.h> + +@@ -493,5 +494,35 @@ int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, + + void free_bucket_spinlocks(spinlock_t *locks); + ++DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t, ++ raw_spin_lock(_T->lock), ++ raw_spin_unlock(_T->lock)) ++ ++DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t, ++ raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING), ++ raw_spin_unlock(_T->lock)) ++ ++DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t, ++ raw_spin_lock_irq(_T->lock), ++ raw_spin_unlock_irq(_T->lock)) ++ ++DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t, ++ raw_spin_lock_irqsave(_T->lock, _T->flags), ++ raw_spin_unlock_irqrestore(_T->lock, _T->flags), ++ unsigned long flags) ++ ++DEFINE_LOCK_GUARD_1(spinlock, spinlock_t, ++ spin_lock(_T->lock), ++ spin_unlock(_T->lock)) ++ ++DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t, ++ spin_lock_irq(_T->lock), ++ spin_unlock_irq(_T->lock)) ++ ++DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t, ++ spin_lock_irqsave(_T->lock, _T->flags), ++ spin_unlock_irqrestore(_T->lock, _T->flags), ++ unsigned long flags) ++ + #undef __LINUX_INSIDE_SPINLOCK_H + #endif /* __LINUX_SPINLOCK_H */ +diff --git a/include/linux/srcu.h b/include/linux/srcu.h +index 01226e4d960a0..f9e1fa7ff86fc 100644 +--- a/include/linux/srcu.h ++++ b/include/linux/srcu.h +@@ -212,4 +212,9 @@ static inline void smp_mb__after_srcu_read_unlock(void) + /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */ + } + ++DEFINE_LOCK_GUARD_1(srcu, struct srcu_struct, ++ _T->idx = srcu_read_lock(_T->lock), ++ srcu_read_unlock(_T->lock, _T->idx), ++ int idx) ++ + #endif +diff --git a/include/net/tls.h b/include/net/tls.h +index c36bf4c50027e..899c863aba02c 100644 +--- a/include/net/tls.h ++++ b/include/net/tls.h +@@ -108,9 +108,6 @@ struct tls_sw_context_tx { + struct tls_rec *open_rec; + struct list_head tx_list; + atomic_t encrypt_pending; +- /* protect crypto_wait with encrypt_pending */ +- spinlock_t encrypt_compl_lock; +- int async_notify; + u8 async_capable:1; + + #define BIT_TX_SCHEDULED 0 +@@ -147,8 +144,6 @@ struct tls_sw_context_rx { + struct tls_strparser strp; + + atomic_t decrypt_pending; +- /* protect crypto_wait with decrypt_pending*/ +- spinlock_t decrypt_compl_lock; + struct sk_buff_head async_hold; + struct wait_queue_head wq; + }; +diff --git a/init/Kconfig b/init/Kconfig +index 148704640252e..ffb927bf6034f 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -89,6 +89,15 @@ config CC_HAS_ASM_GOTO_TIED_OUTPUT + # Detect buggy gcc and clang, fixed in gcc-11 clang-14. + def_bool $(success,echo 'int foo(int *x) { asm goto (".long (%l[bar]) - .": "+m"(*x) ::: bar); return *x; bar: return 0; }' | $CC -x c - -c -o /dev/null) + ++config GCC_ASM_GOTO_OUTPUT_WORKAROUND ++ bool ++ depends on CC_IS_GCC && CC_HAS_ASM_GOTO_OUTPUT ++ # Fixed in GCC 14, 13.3, 12.4 and 11.5 ++ # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=113921 ++ default y if GCC_VERSION < 110500 ++ default y if GCC_VERSION >= 120000 && GCC_VERSION < 120400 ++ default y if GCC_VERSION >= 130000 && GCC_VERSION < 130300 ++ + config TOOLS_SUPPORT_RELR + def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh) + +diff --git a/io_uring/net.c b/io_uring/net.c +index 618ab186fe036..c062ce66af12c 100644 +--- a/io_uring/net.c ++++ b/io_uring/net.c +@@ -1326,7 +1326,7 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags) + * has already been done + */ + if (issue_flags & IO_URING_F_MULTISHOT) +- ret = IOU_ISSUE_SKIP_COMPLETE; ++ return IOU_ISSUE_SKIP_COMPLETE; + return ret; + } + if (ret == -ERESTARTSYS) +@@ -1350,7 +1350,8 @@ int io_accept(struct io_kiocb *req, unsigned int issue_flags) + if (io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false)) + goto retry; + +- return -ECANCELED; ++ io_req_set_res(req, ret, 0); ++ return IOU_STOP_MULTISHOT; + } + + int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) +diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c +index 34135fbd6097e..6a61a98d602cd 100644 +--- a/kernel/bpf/helpers.c ++++ b/kernel/bpf/helpers.c +@@ -753,19 +753,20 @@ static int bpf_trace_copy_string(char *buf, void *unsafe_ptr, char fmt_ptype, + /* Per-cpu temp buffers used by printf-like helpers to store the bprintf binary + * arguments representation. + */ +-#define MAX_BPRINTF_BUF_LEN 512 ++#define MAX_BPRINTF_BIN_ARGS 512 + + /* Support executing three nested bprintf helper calls on a given CPU */ + #define MAX_BPRINTF_NEST_LEVEL 3 + struct bpf_bprintf_buffers { +- char tmp_bufs[MAX_BPRINTF_NEST_LEVEL][MAX_BPRINTF_BUF_LEN]; ++ char bin_args[MAX_BPRINTF_BIN_ARGS]; ++ char buf[MAX_BPRINTF_BUF]; + }; +-static DEFINE_PER_CPU(struct bpf_bprintf_buffers, bpf_bprintf_bufs); ++ ++static DEFINE_PER_CPU(struct bpf_bprintf_buffers[MAX_BPRINTF_NEST_LEVEL], bpf_bprintf_bufs); + static DEFINE_PER_CPU(int, bpf_bprintf_nest_level); + +-static int try_get_fmt_tmp_buf(char **tmp_buf) ++static int try_get_buffers(struct bpf_bprintf_buffers **bufs) + { +- struct bpf_bprintf_buffers *bufs; + int nest_level; + + preempt_disable(); +@@ -775,18 +776,19 @@ static int try_get_fmt_tmp_buf(char **tmp_buf) + preempt_enable(); + return -EBUSY; + } +- bufs = this_cpu_ptr(&bpf_bprintf_bufs); +- *tmp_buf = bufs->tmp_bufs[nest_level - 1]; ++ *bufs = this_cpu_ptr(&bpf_bprintf_bufs[nest_level - 1]); + + return 0; + } + +-void bpf_bprintf_cleanup(void) ++void bpf_bprintf_cleanup(struct bpf_bprintf_data *data) + { +- if (this_cpu_read(bpf_bprintf_nest_level)) { +- this_cpu_dec(bpf_bprintf_nest_level); +- preempt_enable(); +- } ++ if (!data->bin_args && !data->buf) ++ return; ++ if (WARN_ON_ONCE(this_cpu_read(bpf_bprintf_nest_level) == 0)) ++ return; ++ this_cpu_dec(bpf_bprintf_nest_level); ++ preempt_enable(); + } + + /* +@@ -795,18 +797,20 @@ void bpf_bprintf_cleanup(void) + * Returns a negative value if fmt is an invalid format string or 0 otherwise. + * + * This can be used in two ways: +- * - Format string verification only: when bin_args is NULL ++ * - Format string verification only: when data->get_bin_args is false + * - Arguments preparation: in addition to the above verification, it writes in +- * bin_args a binary representation of arguments usable by bstr_printf where +- * pointers from BPF have been sanitized. ++ * data->bin_args a binary representation of arguments usable by bstr_printf ++ * where pointers from BPF have been sanitized. + * + * In argument preparation mode, if 0 is returned, safe temporary buffers are + * allocated and bpf_bprintf_cleanup should be called to free them after use. + */ + int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, +- u32 **bin_args, u32 num_args) ++ u32 num_args, struct bpf_bprintf_data *data) + { ++ bool get_buffers = (data->get_bin_args && num_args) || data->get_buf; + char *unsafe_ptr = NULL, *tmp_buf = NULL, *tmp_buf_end, *fmt_end; ++ struct bpf_bprintf_buffers *buffers = NULL; + size_t sizeof_cur_arg, sizeof_cur_ip; + int err, i, num_spec = 0; + u64 cur_arg; +@@ -817,14 +821,19 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, + return -EINVAL; + fmt_size = fmt_end - fmt; + +- if (bin_args) { +- if (num_args && try_get_fmt_tmp_buf(&tmp_buf)) +- return -EBUSY; ++ if (get_buffers && try_get_buffers(&buffers)) ++ return -EBUSY; + +- tmp_buf_end = tmp_buf + MAX_BPRINTF_BUF_LEN; +- *bin_args = (u32 *)tmp_buf; ++ if (data->get_bin_args) { ++ if (num_args) ++ tmp_buf = buffers->bin_args; ++ tmp_buf_end = tmp_buf + MAX_BPRINTF_BIN_ARGS; ++ data->bin_args = (u32 *)tmp_buf; + } + ++ if (data->get_buf) ++ data->buf = buffers->buf; ++ + for (i = 0; i < fmt_size; i++) { + if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) { + err = -EINVAL; +@@ -1018,31 +1027,33 @@ int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, + err = 0; + out: + if (err) +- bpf_bprintf_cleanup(); ++ bpf_bprintf_cleanup(data); + return err; + } + + BPF_CALL_5(bpf_snprintf, char *, str, u32, str_size, char *, fmt, +- const void *, data, u32, data_len) ++ const void *, args, u32, data_len) + { ++ struct bpf_bprintf_data data = { ++ .get_bin_args = true, ++ }; + int err, num_args; +- u32 *bin_args; + + if (data_len % 8 || data_len > MAX_BPRINTF_VARARGS * 8 || +- (data_len && !data)) ++ (data_len && !args)) + return -EINVAL; + num_args = data_len / 8; + + /* ARG_PTR_TO_CONST_STR guarantees that fmt is zero-terminated so we + * can safely give an unbounded size. + */ +- err = bpf_bprintf_prepare(fmt, UINT_MAX, data, &bin_args, num_args); ++ err = bpf_bprintf_prepare(fmt, UINT_MAX, args, num_args, &data); + if (err < 0) + return err; + +- err = bstr_printf(str, str_size, fmt, bin_args); ++ err = bstr_printf(str, str_size, fmt, data.bin_args); + +- bpf_bprintf_cleanup(); ++ bpf_bprintf_cleanup(&data); + + return err + 1; + } +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 23b6d57b5eef2..1a29ac4db6eae 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -7448,6 +7448,7 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env, + struct bpf_reg_state *fmt_reg = ®s[BPF_REG_3]; + struct bpf_reg_state *data_len_reg = ®s[BPF_REG_5]; + struct bpf_map *fmt_map = fmt_reg->map_ptr; ++ struct bpf_bprintf_data data = {}; + int err, fmt_map_off, num_args; + u64 fmt_addr; + char *fmt; +@@ -7472,7 +7473,7 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env, + /* We are also guaranteed that fmt+fmt_map_off is NULL terminated, we + * can focus on validating the format specifiers. + */ +- err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, NULL, num_args); ++ err = bpf_bprintf_prepare(fmt, UINT_MAX, NULL, num_args, &data); + if (err < 0) + verbose(env, "Invalid format string\n"); + +diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c +index 0c5be7ebb1dca..08b16d20c85bb 100644 +--- a/kernel/sched/membarrier.c ++++ b/kernel/sched/membarrier.c +@@ -161,6 +161,9 @@ + | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \ + | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK) + ++static DEFINE_MUTEX(membarrier_ipi_mutex); ++#define SERIALIZE_IPI() guard(mutex)(&membarrier_ipi_mutex) ++ + static void ipi_mb(void *info) + { + smp_mb(); /* IPIs should be serializing but paranoid. */ +@@ -258,6 +261,7 @@ static int membarrier_global_expedited(void) + if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) + return -ENOMEM; + ++ SERIALIZE_IPI(); + cpus_read_lock(); + rcu_read_lock(); + for_each_online_cpu(cpu) { +@@ -346,6 +350,7 @@ static int membarrier_private_expedited(int flags, int cpu_id) + if (cpu_id < 0 && !zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) + return -ENOMEM; + ++ SERIALIZE_IPI(); + cpus_read_lock(); + + if (cpu_id >= 0) { +@@ -459,6 +464,7 @@ static int sync_runqueues_membarrier_state(struct mm_struct *mm) + * between threads which are users of @mm has its membarrier state + * updated. + */ ++ SERIALIZE_IPI(); + cpus_read_lock(); + rcu_read_lock(); + for_each_online_cpu(cpu) { +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index 8e0aff1d1ea4f..9bb88836c42e6 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -2266,7 +2266,7 @@ void __init hrtimers_init(void) + /** + * schedule_hrtimeout_range_clock - sleep until timeout + * @expires: timeout value (ktime_t) +- * @delta: slack in expires timeout (ktime_t) ++ * @delta: slack in expires timeout (ktime_t) for SCHED_OTHER tasks + * @mode: timer mode + * @clock_id: timer clock to be used + */ +@@ -2293,6 +2293,13 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, + return -EINTR; + } + ++ /* ++ * Override any slack passed by the user if under ++ * rt contraints. ++ */ ++ if (rt_task(current)) ++ delta = 0; ++ + hrtimer_init_sleeper_on_stack(&t, clock_id, mode); + hrtimer_set_expires_range_ns(&t.timer, *expires, delta); + hrtimer_sleeper_start_expires(&t, mode); +@@ -2312,7 +2319,7 @@ EXPORT_SYMBOL_GPL(schedule_hrtimeout_range_clock); + /** + * schedule_hrtimeout_range - sleep until timeout + * @expires: timeout value (ktime_t) +- * @delta: slack in expires timeout (ktime_t) ++ * @delta: slack in expires timeout (ktime_t) for SCHED_OTHER tasks + * @mode: timer mode + * + * Make the current task sleep until the given expiry time has +@@ -2320,7 +2327,8 @@ EXPORT_SYMBOL_GPL(schedule_hrtimeout_range_clock); + * the current task state has been set (see set_current_state()). + * + * The @delta argument gives the kernel the freedom to schedule the +- * actual wakeup to a time that is both power and performance friendly. ++ * actual wakeup to a time that is both power and performance friendly ++ * for regular (non RT/DL) tasks. + * The kernel give the normal best effort behavior for "@expires+@delta", + * but may decide to fire the timer earlier, but no earlier than @expires. + * +diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c +index f4a494a457c52..3fdde232eaa92 100644 +--- a/kernel/trace/bpf_trace.c ++++ b/kernel/trace/bpf_trace.c +@@ -368,8 +368,6 @@ static const struct bpf_func_proto *bpf_get_probe_write_proto(void) + return &bpf_probe_write_user_proto; + } + +-static DEFINE_RAW_SPINLOCK(trace_printk_lock); +- + #define MAX_TRACE_PRINTK_VARARGS 3 + #define BPF_TRACE_PRINTK_SIZE 1024 + +@@ -377,23 +375,22 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1, + u64, arg2, u64, arg3) + { + u64 args[MAX_TRACE_PRINTK_VARARGS] = { arg1, arg2, arg3 }; +- u32 *bin_args; +- static char buf[BPF_TRACE_PRINTK_SIZE]; +- unsigned long flags; ++ struct bpf_bprintf_data data = { ++ .get_bin_args = true, ++ .get_buf = true, ++ }; + int ret; + +- ret = bpf_bprintf_prepare(fmt, fmt_size, args, &bin_args, +- MAX_TRACE_PRINTK_VARARGS); ++ ret = bpf_bprintf_prepare(fmt, fmt_size, args, ++ MAX_TRACE_PRINTK_VARARGS, &data); + if (ret < 0) + return ret; + +- raw_spin_lock_irqsave(&trace_printk_lock, flags); +- ret = bstr_printf(buf, sizeof(buf), fmt, bin_args); ++ ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); + +- trace_bpf_trace_printk(buf); +- raw_spin_unlock_irqrestore(&trace_printk_lock, flags); ++ trace_bpf_trace_printk(data.buf); + +- bpf_bprintf_cleanup(); ++ bpf_bprintf_cleanup(&data); + + return ret; + } +@@ -426,30 +423,29 @@ const struct bpf_func_proto *bpf_get_trace_printk_proto(void) + return &bpf_trace_printk_proto; + } + +-BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, data, ++BPF_CALL_4(bpf_trace_vprintk, char *, fmt, u32, fmt_size, const void *, args, + u32, data_len) + { +- static char buf[BPF_TRACE_PRINTK_SIZE]; +- unsigned long flags; ++ struct bpf_bprintf_data data = { ++ .get_bin_args = true, ++ .get_buf = true, ++ }; + int ret, num_args; +- u32 *bin_args; + + if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || +- (data_len && !data)) ++ (data_len && !args)) + return -EINVAL; + num_args = data_len / 8; + +- ret = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args); ++ ret = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data); + if (ret < 0) + return ret; + +- raw_spin_lock_irqsave(&trace_printk_lock, flags); +- ret = bstr_printf(buf, sizeof(buf), fmt, bin_args); ++ ret = bstr_printf(data.buf, MAX_BPRINTF_BUF, fmt, data.bin_args); + +- trace_bpf_trace_printk(buf); +- raw_spin_unlock_irqrestore(&trace_printk_lock, flags); ++ trace_bpf_trace_printk(data.buf); + +- bpf_bprintf_cleanup(); ++ bpf_bprintf_cleanup(&data); + + return ret; + } +@@ -471,23 +467,25 @@ const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void) + } + + BPF_CALL_5(bpf_seq_printf, struct seq_file *, m, char *, fmt, u32, fmt_size, +- const void *, data, u32, data_len) ++ const void *, args, u32, data_len) + { ++ struct bpf_bprintf_data data = { ++ .get_bin_args = true, ++ }; + int err, num_args; +- u32 *bin_args; + + if (data_len & 7 || data_len > MAX_BPRINTF_VARARGS * 8 || +- (data_len && !data)) ++ (data_len && !args)) + return -EINVAL; + num_args = data_len / 8; + +- err = bpf_bprintf_prepare(fmt, fmt_size, data, &bin_args, num_args); ++ err = bpf_bprintf_prepare(fmt, fmt_size, args, num_args, &data); + if (err < 0) + return err; + +- seq_bprintf(m, fmt, bin_args); ++ seq_bprintf(m, fmt, data.bin_args); + +- bpf_bprintf_cleanup(); ++ bpf_bprintf_cleanup(&data); + + return seq_has_overflowed(m) ? -EOVERFLOW : 0; + } +diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c +index 1285e7fb597ee..e019a9278794f 100644 +--- a/kernel/trace/ring_buffer.c ++++ b/kernel/trace/ring_buffer.c +@@ -1095,7 +1095,7 @@ __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, + full = 0; + } else { + if (!cpumask_test_cpu(cpu, buffer->cpumask)) +- return -EINVAL; ++ return EPOLLERR; + + cpu_buffer = buffer->buffers[cpu]; + work = &cpu_buffer->irq_work; +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index 2b3c4cd8382b3..f667d6bdddda5 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -40,6 +40,7 @@ + #include <linux/ctype.h> + #include <linux/init.h> + #include <linux/panic_notifier.h> ++#include <linux/kmemleak.h> + #include <linux/poll.h> + #include <linux/nmi.h> + #include <linux/fs.h> +@@ -2249,7 +2250,7 @@ struct saved_cmdlines_buffer { + unsigned *map_cmdline_to_pid; + unsigned cmdline_num; + int cmdline_idx; +- char *saved_cmdlines; ++ char saved_cmdlines[]; + }; + static struct saved_cmdlines_buffer *savedcmd; + +@@ -2263,47 +2264,60 @@ static inline void set_cmdline(int idx, const char *cmdline) + strncpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN); + } + +-static int allocate_cmdlines_buffer(unsigned int val, +- struct saved_cmdlines_buffer *s) ++static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s) + { ++ int order = get_order(sizeof(*s) + s->cmdline_num * TASK_COMM_LEN); ++ ++ kfree(s->map_cmdline_to_pid); ++ kmemleak_free(s); ++ free_pages((unsigned long)s, order); ++} ++ ++static struct saved_cmdlines_buffer *allocate_cmdlines_buffer(unsigned int val) ++{ ++ struct saved_cmdlines_buffer *s; ++ struct page *page; ++ int orig_size, size; ++ int order; ++ ++ /* Figure out how much is needed to hold the given number of cmdlines */ ++ orig_size = sizeof(*s) + val * TASK_COMM_LEN; ++ order = get_order(orig_size); ++ size = 1 << (order + PAGE_SHIFT); ++ page = alloc_pages(GFP_KERNEL, order); ++ if (!page) ++ return NULL; ++ ++ s = page_address(page); ++ kmemleak_alloc(s, size, 1, GFP_KERNEL); ++ memset(s, 0, sizeof(*s)); ++ ++ /* Round up to actual allocation */ ++ val = (size - sizeof(*s)) / TASK_COMM_LEN; ++ s->cmdline_num = val; ++ + s->map_cmdline_to_pid = kmalloc_array(val, + sizeof(*s->map_cmdline_to_pid), + GFP_KERNEL); +- if (!s->map_cmdline_to_pid) +- return -ENOMEM; +- +- s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL); +- if (!s->saved_cmdlines) { +- kfree(s->map_cmdline_to_pid); +- return -ENOMEM; ++ if (!s->map_cmdline_to_pid) { ++ free_saved_cmdlines_buffer(s); ++ return NULL; + } + + s->cmdline_idx = 0; +- s->cmdline_num = val; + memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP, + sizeof(s->map_pid_to_cmdline)); + memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP, + val * sizeof(*s->map_cmdline_to_pid)); + +- return 0; ++ return s; + } + + static int trace_create_savedcmd(void) + { +- int ret; +- +- savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL); +- if (!savedcmd) +- return -ENOMEM; ++ savedcmd = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT); + +- ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd); +- if (ret < 0) { +- kfree(savedcmd); +- savedcmd = NULL; +- return -ENOMEM; +- } +- +- return 0; ++ return savedcmd ? 0 : -ENOMEM; + } + + int is_tracing_stopped(void) +@@ -5972,26 +5986,14 @@ tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf, + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); + } + +-static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s) +-{ +- kfree(s->saved_cmdlines); +- kfree(s->map_cmdline_to_pid); +- kfree(s); +-} +- + static int tracing_resize_saved_cmdlines(unsigned int val) + { + struct saved_cmdlines_buffer *s, *savedcmd_temp; + +- s = kmalloc(sizeof(*s), GFP_KERNEL); ++ s = allocate_cmdlines_buffer(val); + if (!s) + return -ENOMEM; + +- if (allocate_cmdlines_buffer(val, s) < 0) { +- kfree(s); +- return -ENOMEM; +- } +- + preempt_disable(); + arch_spin_lock(&trace_cmdline_lock); + savedcmd_temp = savedcmd; +diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c +index 918730d749325..f941ce01ee351 100644 +--- a/kernel/trace/trace_events_trigger.c ++++ b/kernel/trace/trace_events_trigger.c +@@ -1455,8 +1455,10 @@ register_snapshot_trigger(char *glob, + struct event_trigger_data *data, + struct trace_event_file *file) + { +- if (tracing_alloc_snapshot_instance(file->tr) != 0) +- return 0; ++ int ret = tracing_alloc_snapshot_instance(file->tr); ++ ++ if (ret < 0) ++ return ret; + + return register_trigger(glob, data, file); + } +diff --git a/lib/mpi/ec.c b/lib/mpi/ec.c +index 40f5908e57a4f..e16dca1e23d52 100644 +--- a/lib/mpi/ec.c ++++ b/lib/mpi/ec.c +@@ -584,6 +584,9 @@ void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model, + ctx->a = mpi_copy(a); + ctx->b = mpi_copy(b); + ++ ctx->d = NULL; ++ ctx->t.two_inv_p = NULL; ++ + ctx->t.p_barrett = use_barrett > 0 ? mpi_barrett_init(ctx->p, 0) : NULL; + + mpi_ec_get_reset(ctx); +diff --git a/mm/page-writeback.c b/mm/page-writeback.c +index de5f69921b946..d3e9d12860b9f 100644 +--- a/mm/page-writeback.c ++++ b/mm/page-writeback.c +@@ -1526,7 +1526,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc) + */ + dtc->wb_thresh = __wb_calc_thresh(dtc); + dtc->wb_bg_thresh = dtc->thresh ? +- div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0; ++ div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0; + + /* + * In order to avoid the stacked BDI deadlock we need +diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c +index 650ab6cfd5f49..992a0a16846f7 100644 +--- a/mm/userfaultfd.c ++++ b/mm/userfaultfd.c +@@ -327,6 +327,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, + unsigned long dst_start, + unsigned long src_start, + unsigned long len, ++ atomic_t *mmap_changing, + enum mcopy_atomic_mode mode, + bool wp_copy) + { +@@ -445,6 +446,15 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, + goto out; + } + mmap_read_lock(dst_mm); ++ /* ++ * If memory mappings are changing because of non-cooperative ++ * operation (e.g. mremap) running in parallel, bail out and ++ * request the user to retry later ++ */ ++ if (mmap_changing && atomic_read(mmap_changing)) { ++ err = -EAGAIN; ++ break; ++ } + + dst_vma = NULL; + goto retry; +@@ -480,6 +490,7 @@ extern ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, + unsigned long dst_start, + unsigned long src_start, + unsigned long len, ++ atomic_t *mmap_changing, + enum mcopy_atomic_mode mode, + bool wp_copy); + #endif /* CONFIG_HUGETLB_PAGE */ +@@ -601,8 +612,8 @@ static __always_inline ssize_t __mcopy_atomic(struct mm_struct *dst_mm, + */ + if (is_vm_hugetlb_page(dst_vma)) + return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start, +- src_start, len, mcopy_mode, +- wp_copy); ++ src_start, len, mmap_changing, ++ mcopy_mode, wp_copy); + + if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma)) + goto out_unlock; +diff --git a/net/can/j1939/j1939-priv.h b/net/can/j1939/j1939-priv.h +index 16af1a7f80f60..31a93cae5111b 100644 +--- a/net/can/j1939/j1939-priv.h ++++ b/net/can/j1939/j1939-priv.h +@@ -86,7 +86,7 @@ struct j1939_priv { + unsigned int tp_max_packet_size; + + /* lock for j1939_socks list */ +- spinlock_t j1939_socks_lock; ++ rwlock_t j1939_socks_lock; + struct list_head j1939_socks; + + struct kref rx_kref; +@@ -301,6 +301,7 @@ struct j1939_sock { + + int ifindex; + struct j1939_addr addr; ++ spinlock_t filters_lock; + struct j1939_filter *filters; + int nfilters; + pgn_t pgn_rx_filter; +diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c +index ecff1c947d683..a6fb89fa62785 100644 +--- a/net/can/j1939/main.c ++++ b/net/can/j1939/main.c +@@ -274,7 +274,7 @@ struct j1939_priv *j1939_netdev_start(struct net_device *ndev) + return ERR_PTR(-ENOMEM); + + j1939_tp_init(priv); +- spin_lock_init(&priv->j1939_socks_lock); ++ rwlock_init(&priv->j1939_socks_lock); + INIT_LIST_HEAD(&priv->j1939_socks); + + mutex_lock(&j1939_netdev_lock); +diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c +index b0be23559243c..58909b36561a6 100644 +--- a/net/can/j1939/socket.c ++++ b/net/can/j1939/socket.c +@@ -80,16 +80,16 @@ static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk) + jsk->state |= J1939_SOCK_BOUND; + j1939_priv_get(priv); + +- spin_lock_bh(&priv->j1939_socks_lock); ++ write_lock_bh(&priv->j1939_socks_lock); + list_add_tail(&jsk->list, &priv->j1939_socks); +- spin_unlock_bh(&priv->j1939_socks_lock); ++ write_unlock_bh(&priv->j1939_socks_lock); + } + + static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk) + { +- spin_lock_bh(&priv->j1939_socks_lock); ++ write_lock_bh(&priv->j1939_socks_lock); + list_del_init(&jsk->list); +- spin_unlock_bh(&priv->j1939_socks_lock); ++ write_unlock_bh(&priv->j1939_socks_lock); + + j1939_priv_put(priv); + jsk->state &= ~J1939_SOCK_BOUND; +@@ -262,12 +262,17 @@ static bool j1939_sk_match_dst(struct j1939_sock *jsk, + static bool j1939_sk_match_filter(struct j1939_sock *jsk, + const struct j1939_sk_buff_cb *skcb) + { +- const struct j1939_filter *f = jsk->filters; +- int nfilter = jsk->nfilters; ++ const struct j1939_filter *f; ++ int nfilter; ++ ++ spin_lock_bh(&jsk->filters_lock); ++ ++ f = jsk->filters; ++ nfilter = jsk->nfilters; + + if (!nfilter) + /* receive all when no filters are assigned */ +- return true; ++ goto filter_match_found; + + for (; nfilter; ++f, --nfilter) { + if ((skcb->addr.pgn & f->pgn_mask) != f->pgn) +@@ -276,9 +281,15 @@ static bool j1939_sk_match_filter(struct j1939_sock *jsk, + continue; + if ((skcb->addr.src_name & f->name_mask) != f->name) + continue; +- return true; ++ goto filter_match_found; + } ++ ++ spin_unlock_bh(&jsk->filters_lock); + return false; ++ ++filter_match_found: ++ spin_unlock_bh(&jsk->filters_lock); ++ return true; + } + + static bool j1939_sk_recv_match_one(struct j1939_sock *jsk, +@@ -329,13 +340,13 @@ bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb) + struct j1939_sock *jsk; + bool match = false; + +- spin_lock_bh(&priv->j1939_socks_lock); ++ read_lock_bh(&priv->j1939_socks_lock); + list_for_each_entry(jsk, &priv->j1939_socks, list) { + match = j1939_sk_recv_match_one(jsk, skcb); + if (match) + break; + } +- spin_unlock_bh(&priv->j1939_socks_lock); ++ read_unlock_bh(&priv->j1939_socks_lock); + + return match; + } +@@ -344,11 +355,11 @@ void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb) + { + struct j1939_sock *jsk; + +- spin_lock_bh(&priv->j1939_socks_lock); ++ read_lock_bh(&priv->j1939_socks_lock); + list_for_each_entry(jsk, &priv->j1939_socks, list) { + j1939_sk_recv_one(jsk, skb); + } +- spin_unlock_bh(&priv->j1939_socks_lock); ++ read_unlock_bh(&priv->j1939_socks_lock); + } + + static void j1939_sk_sock_destruct(struct sock *sk) +@@ -401,6 +412,7 @@ static int j1939_sk_init(struct sock *sk) + atomic_set(&jsk->skb_pending, 0); + spin_lock_init(&jsk->sk_session_queue_lock); + INIT_LIST_HEAD(&jsk->sk_session_queue); ++ spin_lock_init(&jsk->filters_lock); + + /* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */ + sock_set_flag(sk, SOCK_RCU_FREE); +@@ -703,9 +715,11 @@ static int j1939_sk_setsockopt(struct socket *sock, int level, int optname, + } + + lock_sock(&jsk->sk); ++ spin_lock_bh(&jsk->filters_lock); + ofilters = jsk->filters; + jsk->filters = filters; + jsk->nfilters = count; ++ spin_unlock_bh(&jsk->filters_lock); + release_sock(&jsk->sk); + kfree(ofilters); + return 0; +@@ -1080,12 +1094,12 @@ void j1939_sk_errqueue(struct j1939_session *session, + } + + /* spread RX notifications to all sockets subscribed to this session */ +- spin_lock_bh(&priv->j1939_socks_lock); ++ read_lock_bh(&priv->j1939_socks_lock); + list_for_each_entry(jsk, &priv->j1939_socks, list) { + if (j1939_sk_recv_match_one(jsk, &session->skcb)) + __j1939_sk_errqueue(session, &jsk->sk, type); + } +- spin_unlock_bh(&priv->j1939_socks_lock); ++ read_unlock_bh(&priv->j1939_socks_lock); + }; + + void j1939_sk_send_loop_abort(struct sock *sk, int err) +@@ -1273,7 +1287,7 @@ void j1939_sk_netdev_event_netdown(struct j1939_priv *priv) + struct j1939_sock *jsk; + int error_code = ENETDOWN; + +- spin_lock_bh(&priv->j1939_socks_lock); ++ read_lock_bh(&priv->j1939_socks_lock); + list_for_each_entry(jsk, &priv->j1939_socks, list) { + jsk->sk.sk_err = error_code; + if (!sock_flag(&jsk->sk, SOCK_DEAD)) +@@ -1281,7 +1295,7 @@ void j1939_sk_netdev_event_netdown(struct j1939_priv *priv) + + j1939_sk_queue_drop_all(priv, jsk, error_code); + } +- spin_unlock_bh(&priv->j1939_socks_lock); ++ read_unlock_bh(&priv->j1939_socks_lock); + } + + static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd, +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 8a819d0a7bfb0..d4bd10f8723df 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -4213,8 +4213,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb, + /* GSO partial only requires that we trim off any excess that + * doesn't fit into an MSS sized block, so take care of that + * now. ++ * Cap len to not accidentally hit GSO_BY_FRAGS. + */ +- partial_segs = len / mss; ++ partial_segs = min(len, GSO_BY_FRAGS - 1U) / mss; + if (partial_segs > 1) + mss *= partial_segs; + else +diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c +index b1e86a7265b32..83906d093f0ae 100644 +--- a/net/hsr/hsr_device.c ++++ b/net/hsr/hsr_device.c +@@ -291,7 +291,7 @@ static void send_hsr_supervision_frame(struct hsr_port *master, + + skb = hsr_init_skb(master); + if (!skb) { +- WARN_ONCE(1, "HSR: Could not send supervision frame\n"); ++ netdev_warn_once(master->dev, "HSR: Could not send supervision frame\n"); + return; + } + +@@ -338,7 +338,7 @@ static void send_prp_supervision_frame(struct hsr_port *master, + + skb = hsr_init_skb(master); + if (!skb) { +- WARN_ONCE(1, "PRP: Could not send supervision frame\n"); ++ netdev_warn_once(master->dev, "PRP: Could not send supervision frame\n"); + return; + } + +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c +index 2db103a56a28f..322a035f75929 100644 +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -5,7 +5,7 @@ + * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> + * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> + * Copyright 2013-2014 Intel Mobile Communications GmbH +- * Copyright (C) 2018-2022 Intel Corporation ++ * Copyright (C) 2018-2024 Intel Corporation + * + * Transmit and frame generation functions. + */ +@@ -3838,6 +3838,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, + goto begin; + + skb = __skb_dequeue(&tx.skbs); ++ info = IEEE80211_SKB_CB(skb); + + if (!skb_queue_empty(&tx.skbs)) { + spin_lock_bh(&fq->lock); +@@ -3882,7 +3883,7 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw, + } + + encap_out: +- IEEE80211_SKB_CB(skb)->control.vif = vif; ++ info->control.vif = vif; + + if (tx.sta && + wiphy_ext_feature_isset(local->hw.wiphy, NL80211_EXT_FEATURE_AQL)) { +diff --git a/net/mptcp/pm_userspace.c b/net/mptcp/pm_userspace.c +index 38cbdc66d8bff..2e1e0d0e3ec60 100644 +--- a/net/mptcp/pm_userspace.c ++++ b/net/mptcp/pm_userspace.c +@@ -132,10 +132,21 @@ int mptcp_userspace_pm_get_flags_and_ifindex_by_id(struct mptcp_sock *msk, + int mptcp_userspace_pm_get_local_id(struct mptcp_sock *msk, + struct mptcp_addr_info *skc) + { +- struct mptcp_pm_addr_entry new_entry; ++ struct mptcp_pm_addr_entry *entry = NULL, *e, new_entry; + __be16 msk_sport = ((struct inet_sock *) + inet_sk((struct sock *)msk))->inet_sport; + ++ spin_lock_bh(&msk->pm.lock); ++ list_for_each_entry(e, &msk->pm.userspace_pm_local_addr_list, list) { ++ if (mptcp_addresses_equal(&e->addr, skc, false)) { ++ entry = e; ++ break; ++ } ++ } ++ spin_unlock_bh(&msk->pm.lock); ++ if (entry) ++ return entry->addr.id; ++ + memset(&new_entry, 0, sizeof(struct mptcp_pm_addr_entry)); + new_entry.addr = *skc; + new_entry.addr.id = 0; +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c +index 76539d1004ebb..859b18cb8e4f6 100644 +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -1582,8 +1582,11 @@ static void mptcp_update_post_push(struct mptcp_sock *msk, + + void mptcp_check_and_set_pending(struct sock *sk) + { +- if (mptcp_send_head(sk)) +- mptcp_sk(sk)->push_pending |= BIT(MPTCP_PUSH_PENDING); ++ if (mptcp_send_head(sk)) { ++ mptcp_data_lock(sk); ++ mptcp_sk(sk)->cb_flags |= BIT(MPTCP_PUSH_PENDING); ++ mptcp_data_unlock(sk); ++ } + } + + void __mptcp_push_pending(struct sock *sk, unsigned int flags) +@@ -2336,9 +2339,6 @@ bool __mptcp_retransmit_pending_data(struct sock *sk) + if (__mptcp_check_fallback(mptcp_sk(sk))) + return false; + +- if (tcp_rtx_and_write_queues_empty(sk)) +- return false; +- + /* the closing socket has some data untransmitted and/or unacked: + * some data in the mptcp rtx queue has not really xmitted yet. + * keep it simple and re-inject the whole mptcp level rtx queue +@@ -2422,7 +2422,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, + goto out_release; + } + +- dispose_it = !msk->subflow || ssk != msk->subflow->sk; ++ dispose_it = msk->free_first || ssk != msk->first; + if (dispose_it) + list_del(&subflow->node); + +@@ -2440,7 +2440,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, + need_push = (flags & MPTCP_CF_PUSH) && __mptcp_retransmit_pending_data(sk); + if (!dispose_it) { + __mptcp_subflow_disconnect(ssk, subflow, flags); +- msk->subflow->state = SS_UNCONNECTED; + release_sock(ssk); + + goto out; +@@ -3144,7 +3143,6 @@ static int mptcp_disconnect(struct sock *sk, int flags) + msk->last_snd = NULL; + WRITE_ONCE(msk->flags, 0); + msk->cb_flags = 0; +- msk->push_pending = 0; + msk->recovery = false; + msk->can_ack = false; + msk->fully_established = false; +@@ -3341,10 +3339,10 @@ static void mptcp_destroy(struct sock *sk) + { + struct mptcp_sock *msk = mptcp_sk(sk); + +- /* clears msk->subflow, allowing the following to close +- * even the initial subflow +- */ + mptcp_dispose_initial_subflow(msk); ++ ++ /* allow the following to close even the initial subflow */ ++ msk->free_first = 1; + mptcp_destroy_common(msk, 0); + sk_sockets_allocated_dec(sk); + } +@@ -3388,8 +3386,7 @@ static void mptcp_release_cb(struct sock *sk) + struct mptcp_sock *msk = mptcp_sk(sk); + + for (;;) { +- unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED) | +- msk->push_pending; ++ unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED); + struct list_head join_list; + + if (!flags) +@@ -3405,7 +3402,6 @@ static void mptcp_release_cb(struct sock *sk) + * datapath acquires the msk socket spinlock while helding + * the subflow socket lock + */ +- msk->push_pending = 0; + msk->cb_flags &= ~flags; + spin_unlock_bh(&sk->sk_lock.slock); + +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h +index 4ec8e0a81b5a4..259672cc344f3 100644 +--- a/net/mptcp/protocol.h ++++ b/net/mptcp/protocol.h +@@ -272,7 +272,6 @@ struct mptcp_sock { + int rmem_released; + unsigned long flags; + unsigned long cb_flags; +- unsigned long push_pending; + bool recovery; /* closing subflow write queue reinjected */ + bool can_ack; + bool fully_established; +@@ -287,7 +286,8 @@ struct mptcp_sock { + cork:1, + nodelay:1, + fastopening:1, +- in_accept_queue:1; ++ in_accept_queue:1, ++ free_first:1; + struct work_struct work; + struct sk_buff *ooo_last_skb; + struct rb_root out_of_order_queue; +diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h +index 26ab0e9612d82..9523104a90da4 100644 +--- a/net/netfilter/ipset/ip_set_bitmap_gen.h ++++ b/net/netfilter/ipset/ip_set_bitmap_gen.h +@@ -28,6 +28,7 @@ + #define mtype_del IPSET_TOKEN(MTYPE, _del) + #define mtype_list IPSET_TOKEN(MTYPE, _list) + #define mtype_gc IPSET_TOKEN(MTYPE, _gc) ++#define mtype_cancel_gc IPSET_TOKEN(MTYPE, _cancel_gc) + #define mtype MTYPE + + #define get_ext(set, map, id) ((map)->extensions + ((set)->dsize * (id))) +@@ -57,9 +58,6 @@ mtype_destroy(struct ip_set *set) + { + struct mtype *map = set->data; + +- if (SET_WITH_TIMEOUT(set)) +- del_timer_sync(&map->gc); +- + if (set->dsize && set->extensions & IPSET_EXT_DESTROY) + mtype_ext_cleanup(set); + ip_set_free(map->members); +@@ -288,6 +286,15 @@ mtype_gc(struct timer_list *t) + add_timer(&map->gc); + } + ++static void ++mtype_cancel_gc(struct ip_set *set) ++{ ++ struct mtype *map = set->data; ++ ++ if (SET_WITH_TIMEOUT(set)) ++ del_timer_sync(&map->gc); ++} ++ + static const struct ip_set_type_variant mtype = { + .kadt = mtype_kadt, + .uadt = mtype_uadt, +@@ -301,6 +308,7 @@ static const struct ip_set_type_variant mtype = { + .head = mtype_head, + .list = mtype_list, + .same_set = mtype_same_set, ++ .cancel_gc = mtype_cancel_gc, + }; + + #endif /* __IP_SET_BITMAP_IP_GEN_H */ +diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c +index d47dfdcb899b0..f645da82d826e 100644 +--- a/net/netfilter/ipset/ip_set_core.c ++++ b/net/netfilter/ipset/ip_set_core.c +@@ -1156,6 +1156,7 @@ static int ip_set_create(struct sk_buff *skb, const struct nfnl_info *info, + return ret; + + cleanup: ++ set->variant->cancel_gc(set); + set->variant->destroy(set); + put_out: + module_put(set->type->me); +@@ -1184,6 +1185,14 @@ ip_set_destroy_set(struct ip_set *set) + kfree(set); + } + ++static void ++ip_set_destroy_set_rcu(struct rcu_head *head) ++{ ++ struct ip_set *set = container_of(head, struct ip_set, rcu); ++ ++ ip_set_destroy_set(set); ++} ++ + static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info, + const struct nlattr * const attr[]) + { +@@ -1195,8 +1204,6 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info, + if (unlikely(protocol_min_failed(attr))) + return -IPSET_ERR_PROTOCOL; + +- /* Must wait for flush to be really finished in list:set */ +- rcu_barrier(); + + /* Commands are serialized and references are + * protected by the ip_set_ref_lock. +@@ -1208,8 +1215,10 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info, + * counter, so if it's already zero, we can proceed + * without holding the lock. + */ +- read_lock_bh(&ip_set_ref_lock); + if (!attr[IPSET_ATTR_SETNAME]) { ++ /* Must wait for flush to be really finished in list:set */ ++ rcu_barrier(); ++ read_lock_bh(&ip_set_ref_lock); + for (i = 0; i < inst->ip_set_max; i++) { + s = ip_set(inst, i); + if (s && (s->ref || s->ref_netlink)) { +@@ -1223,6 +1232,8 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info, + s = ip_set(inst, i); + if (s) { + ip_set(inst, i) = NULL; ++ /* Must cancel garbage collectors */ ++ s->variant->cancel_gc(s); + ip_set_destroy_set(s); + } + } +@@ -1230,6 +1241,9 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info, + inst->is_destroyed = false; + } else { + u32 flags = flag_exist(info->nlh); ++ u16 features = 0; ++ ++ read_lock_bh(&ip_set_ref_lock); + s = find_set_and_id(inst, nla_data(attr[IPSET_ATTR_SETNAME]), + &i); + if (!s) { +@@ -1240,10 +1254,16 @@ static int ip_set_destroy(struct sk_buff *skb, const struct nfnl_info *info, + ret = -IPSET_ERR_BUSY; + goto out; + } ++ features = s->type->features; + ip_set(inst, i) = NULL; + read_unlock_bh(&ip_set_ref_lock); +- +- ip_set_destroy_set(s); ++ if (features & IPSET_TYPE_NAME) { ++ /* Must wait for flush to be really finished */ ++ rcu_barrier(); ++ } ++ /* Must cancel garbage collectors */ ++ s->variant->cancel_gc(s); ++ call_rcu(&s->rcu, ip_set_destroy_set_rcu); + } + return 0; + out: +@@ -1396,9 +1416,6 @@ static int ip_set_swap(struct sk_buff *skb, const struct nfnl_info *info, + ip_set(inst, to_id) = from; + write_unlock_bh(&ip_set_ref_lock); + +- /* Make sure all readers of the old set pointers are completed. */ +- synchronize_rcu(); +- + return 0; + } + +@@ -2364,6 +2381,7 @@ ip_set_net_exit(struct net *net) + set = ip_set(inst, i); + if (set) { + ip_set(inst, i) = NULL; ++ set->variant->cancel_gc(set); + ip_set_destroy_set(set); + } + } +@@ -2411,8 +2429,11 @@ ip_set_fini(void) + { + nf_unregister_sockopt(&so_set); + nfnetlink_subsys_unregister(&ip_set_netlink_subsys); +- + unregister_pernet_subsys(&ip_set_net_ops); ++ ++ /* Wait for call_rcu() in destroy */ ++ rcu_barrier(); ++ + pr_debug("these are the famous last words\n"); + } + +diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h +index 7499192af5866..ef04e556aadb4 100644 +--- a/net/netfilter/ipset/ip_set_hash_gen.h ++++ b/net/netfilter/ipset/ip_set_hash_gen.h +@@ -210,6 +210,7 @@ htable_size(u8 hbits) + #undef mtype_gc_do + #undef mtype_gc + #undef mtype_gc_init ++#undef mtype_cancel_gc + #undef mtype_variant + #undef mtype_data_match + +@@ -254,6 +255,7 @@ htable_size(u8 hbits) + #define mtype_gc_do IPSET_TOKEN(MTYPE, _gc_do) + #define mtype_gc IPSET_TOKEN(MTYPE, _gc) + #define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init) ++#define mtype_cancel_gc IPSET_TOKEN(MTYPE, _cancel_gc) + #define mtype_variant IPSET_TOKEN(MTYPE, _variant) + #define mtype_data_match IPSET_TOKEN(MTYPE, _data_match) + +@@ -417,7 +419,7 @@ mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy) + u32 i; + + for (i = 0; i < jhash_size(t->htable_bits); i++) { +- n = __ipset_dereference(hbucket(t, i)); ++ n = (__force struct hbucket *)hbucket(t, i); + if (!n) + continue; + if (set->extensions & IPSET_EXT_DESTROY && ext_destroy) +@@ -437,10 +439,7 @@ mtype_destroy(struct ip_set *set) + struct htype *h = set->data; + struct list_head *l, *lt; + +- if (SET_WITH_TIMEOUT(set)) +- cancel_delayed_work_sync(&h->gc.dwork); +- +- mtype_ahash_destroy(set, ipset_dereference_nfnl(h->table), true); ++ mtype_ahash_destroy(set, (__force struct htable *)h->table, true); + list_for_each_safe(l, lt, &h->ad) { + list_del(l); + kfree(l); +@@ -586,6 +585,15 @@ mtype_gc_init(struct htable_gc *gc) + queue_delayed_work(system_power_efficient_wq, &gc->dwork, HZ); + } + ++static void ++mtype_cancel_gc(struct ip_set *set) ++{ ++ struct htype *h = set->data; ++ ++ if (SET_WITH_TIMEOUT(set)) ++ cancel_delayed_work_sync(&h->gc.dwork); ++} ++ + static int + mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext, + struct ip_set_ext *mext, u32 flags); +@@ -1416,6 +1424,7 @@ static const struct ip_set_type_variant mtype_variant = { + .uref = mtype_uref, + .resize = mtype_resize, + .same_set = mtype_same_set, ++ .cancel_gc = mtype_cancel_gc, + .region_lock = true, + }; + +diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c +index 5a67f79665742..6bc7019982b05 100644 +--- a/net/netfilter/ipset/ip_set_list_set.c ++++ b/net/netfilter/ipset/ip_set_list_set.c +@@ -426,9 +426,6 @@ list_set_destroy(struct ip_set *set) + struct list_set *map = set->data; + struct set_elem *e, *n; + +- if (SET_WITH_TIMEOUT(set)) +- del_timer_sync(&map->gc); +- + list_for_each_entry_safe(e, n, &map->members, list) { + list_del(&e->list); + ip_set_put_byindex(map->net, e->id); +@@ -545,6 +542,15 @@ list_set_same_set(const struct ip_set *a, const struct ip_set *b) + a->extensions == b->extensions; + } + ++static void ++list_set_cancel_gc(struct ip_set *set) ++{ ++ struct list_set *map = set->data; ++ ++ if (SET_WITH_TIMEOUT(set)) ++ del_timer_sync(&map->gc); ++} ++ + static const struct ip_set_type_variant set_variant = { + .kadt = list_set_kadt, + .uadt = list_set_uadt, +@@ -558,6 +564,7 @@ static const struct ip_set_type_variant set_variant = { + .head = list_set_head, + .list = list_set_list, + .same_set = list_set_same_set, ++ .cancel_gc = list_set_cancel_gc, + }; + + static void +diff --git a/net/netfilter/nft_set_pipapo_avx2.c b/net/netfilter/nft_set_pipapo_avx2.c +index 90e275bb3e5d7..a3a8ddca99189 100644 +--- a/net/netfilter/nft_set_pipapo_avx2.c ++++ b/net/netfilter/nft_set_pipapo_avx2.c +@@ -57,7 +57,7 @@ + + /* Jump to label if @reg is zero */ + #define NFT_PIPAPO_AVX2_NOMATCH_GOTO(reg, label) \ +- asm_volatile_goto("vptest %%ymm" #reg ", %%ymm" #reg ";" \ ++ asm goto("vptest %%ymm" #reg ", %%ymm" #reg ";" \ + "je %l[" #label "]" : : : : label) + + /* Store 256 bits from YMM register into memory. Contrary to bucket load +diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c +index 7535afd1537e9..b5071a2f597d4 100644 +--- a/net/nfc/nci/core.c ++++ b/net/nfc/nci/core.c +@@ -1207,6 +1207,10 @@ void nci_free_device(struct nci_dev *ndev) + { + nfc_free_device(ndev->nfc_dev); + nci_hci_deallocate(ndev); ++ ++ /* drop partial rx data packet if present */ ++ if (ndev->rx_data_reassembly) ++ kfree_skb(ndev->rx_data_reassembly); + kfree(ndev); + } + EXPORT_SYMBOL(nci_free_device); +diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c +index ead5418c126e3..e3c85ceb1f0a5 100644 +--- a/net/openvswitch/flow_netlink.c ++++ b/net/openvswitch/flow_netlink.c +@@ -47,6 +47,7 @@ struct ovs_len_tbl { + + #define OVS_ATTR_NESTED -1 + #define OVS_ATTR_VARIABLE -2 ++#define OVS_COPY_ACTIONS_MAX_DEPTH 16 + + static bool actions_may_change_flow(const struct nlattr *actions) + { +@@ -2543,13 +2544,15 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, + const struct sw_flow_key *key, + struct sw_flow_actions **sfa, + __be16 eth_type, __be16 vlan_tci, +- u32 mpls_label_count, bool log); ++ u32 mpls_label_count, bool log, ++ u32 depth); + + static int validate_and_copy_sample(struct net *net, const struct nlattr *attr, + const struct sw_flow_key *key, + struct sw_flow_actions **sfa, + __be16 eth_type, __be16 vlan_tci, +- u32 mpls_label_count, bool log, bool last) ++ u32 mpls_label_count, bool log, bool last, ++ u32 depth) + { + const struct nlattr *attrs[OVS_SAMPLE_ATTR_MAX + 1]; + const struct nlattr *probability, *actions; +@@ -2600,7 +2603,8 @@ static int validate_and_copy_sample(struct net *net, const struct nlattr *attr, + return err; + + err = __ovs_nla_copy_actions(net, actions, key, sfa, +- eth_type, vlan_tci, mpls_label_count, log); ++ eth_type, vlan_tci, mpls_label_count, log, ++ depth + 1); + + if (err) + return err; +@@ -2615,7 +2619,8 @@ static int validate_and_copy_dec_ttl(struct net *net, + const struct sw_flow_key *key, + struct sw_flow_actions **sfa, + __be16 eth_type, __be16 vlan_tci, +- u32 mpls_label_count, bool log) ++ u32 mpls_label_count, bool log, ++ u32 depth) + { + const struct nlattr *attrs[OVS_DEC_TTL_ATTR_MAX + 1]; + int start, action_start, err, rem; +@@ -2658,7 +2663,8 @@ static int validate_and_copy_dec_ttl(struct net *net, + return action_start; + + err = __ovs_nla_copy_actions(net, actions, key, sfa, eth_type, +- vlan_tci, mpls_label_count, log); ++ vlan_tci, mpls_label_count, log, ++ depth + 1); + if (err) + return err; + +@@ -2672,7 +2678,8 @@ static int validate_and_copy_clone(struct net *net, + const struct sw_flow_key *key, + struct sw_flow_actions **sfa, + __be16 eth_type, __be16 vlan_tci, +- u32 mpls_label_count, bool log, bool last) ++ u32 mpls_label_count, bool log, bool last, ++ u32 depth) + { + int start, err; + u32 exec; +@@ -2692,7 +2699,8 @@ static int validate_and_copy_clone(struct net *net, + return err; + + err = __ovs_nla_copy_actions(net, attr, key, sfa, +- eth_type, vlan_tci, mpls_label_count, log); ++ eth_type, vlan_tci, mpls_label_count, log, ++ depth + 1); + if (err) + return err; + +@@ -3061,7 +3069,7 @@ static int validate_and_copy_check_pkt_len(struct net *net, + struct sw_flow_actions **sfa, + __be16 eth_type, __be16 vlan_tci, + u32 mpls_label_count, +- bool log, bool last) ++ bool log, bool last, u32 depth) + { + const struct nlattr *acts_if_greater, *acts_if_lesser_eq; + struct nlattr *a[OVS_CHECK_PKT_LEN_ATTR_MAX + 1]; +@@ -3109,7 +3117,8 @@ static int validate_and_copy_check_pkt_len(struct net *net, + return nested_acts_start; + + err = __ovs_nla_copy_actions(net, acts_if_lesser_eq, key, sfa, +- eth_type, vlan_tci, mpls_label_count, log); ++ eth_type, vlan_tci, mpls_label_count, log, ++ depth + 1); + + if (err) + return err; +@@ -3122,7 +3131,8 @@ static int validate_and_copy_check_pkt_len(struct net *net, + return nested_acts_start; + + err = __ovs_nla_copy_actions(net, acts_if_greater, key, sfa, +- eth_type, vlan_tci, mpls_label_count, log); ++ eth_type, vlan_tci, mpls_label_count, log, ++ depth + 1); + + if (err) + return err; +@@ -3150,12 +3160,16 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, + const struct sw_flow_key *key, + struct sw_flow_actions **sfa, + __be16 eth_type, __be16 vlan_tci, +- u32 mpls_label_count, bool log) ++ u32 mpls_label_count, bool log, ++ u32 depth) + { + u8 mac_proto = ovs_key_mac_proto(key); + const struct nlattr *a; + int rem, err; + ++ if (depth > OVS_COPY_ACTIONS_MAX_DEPTH) ++ return -EOVERFLOW; ++ + nla_for_each_nested(a, attr, rem) { + /* Expected argument lengths, (u32)-1 for variable length. */ + static const u32 action_lens[OVS_ACTION_ATTR_MAX + 1] = { +@@ -3350,7 +3364,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, + err = validate_and_copy_sample(net, a, key, sfa, + eth_type, vlan_tci, + mpls_label_count, +- log, last); ++ log, last, depth); + if (err) + return err; + skip_copy = true; +@@ -3421,7 +3435,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, + err = validate_and_copy_clone(net, a, key, sfa, + eth_type, vlan_tci, + mpls_label_count, +- log, last); ++ log, last, depth); + if (err) + return err; + skip_copy = true; +@@ -3435,7 +3449,8 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, + eth_type, + vlan_tci, + mpls_label_count, +- log, last); ++ log, last, ++ depth); + if (err) + return err; + skip_copy = true; +@@ -3445,7 +3460,8 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, + case OVS_ACTION_ATTR_DEC_TTL: + err = validate_and_copy_dec_ttl(net, a, key, sfa, + eth_type, vlan_tci, +- mpls_label_count, log); ++ mpls_label_count, log, ++ depth); + if (err) + return err; + skip_copy = true; +@@ -3485,7 +3501,8 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, + + (*sfa)->orig_len = nla_len(attr); + err = __ovs_nla_copy_actions(net, attr, key, sfa, key->eth.type, +- key->eth.vlan.tci, mpls_label_count, log); ++ key->eth.vlan.tci, mpls_label_count, log, ++ 0); + if (err) + ovs_nla_free_flow_actions(*sfa); + +diff --git a/net/tls/tls.h b/net/tls/tls.h +index 0672acab27731..4922668fefaa8 100644 +--- a/net/tls/tls.h ++++ b/net/tls/tls.h +@@ -97,6 +97,7 @@ void tls_update_rx_zc_capable(struct tls_context *tls_ctx); + void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); + void tls_sw_strparser_done(struct tls_context *tls_ctx); + int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); ++void tls_sw_splice_eof(struct socket *sock); + int tls_sw_sendpage_locked(struct sock *sk, struct page *page, + int offset, size_t size, int flags); + int tls_sw_sendpage(struct sock *sk, struct page *page, +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c +index 338a443fa47b2..80b42a3e78830 100644 +--- a/net/tls/tls_main.c ++++ b/net/tls/tls_main.c +@@ -922,6 +922,7 @@ static void build_proto_ops(struct proto_ops ops[TLS_NUM_CONFIG][TLS_NUM_CONFIG] + ops[TLS_BASE][TLS_BASE] = *base; + + ops[TLS_SW ][TLS_BASE] = ops[TLS_BASE][TLS_BASE]; ++ ops[TLS_SW ][TLS_BASE].splice_eof = tls_sw_splice_eof; + ops[TLS_SW ][TLS_BASE].sendpage_locked = tls_sw_sendpage_locked; + + ops[TLS_BASE][TLS_SW ] = ops[TLS_BASE][TLS_BASE]; +@@ -990,6 +991,7 @@ static void build_protos(struct proto prot[TLS_NUM_CONFIG][TLS_NUM_CONFIG], + + prot[TLS_SW][TLS_BASE] = prot[TLS_BASE][TLS_BASE]; + prot[TLS_SW][TLS_BASE].sendmsg = tls_sw_sendmsg; ++ prot[TLS_SW][TLS_BASE].splice_eof = tls_sw_splice_eof; + prot[TLS_SW][TLS_BASE].sendpage = tls_sw_sendpage; + + prot[TLS_BASE][TLS_SW] = prot[TLS_BASE][TLS_BASE]; +diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c +index 0323040d34bc6..c8cbdd02a784e 100644 +--- a/net/tls/tls_sw.c ++++ b/net/tls/tls_sw.c +@@ -62,6 +62,7 @@ struct tls_decrypt_ctx { + u8 iv[MAX_IV_SIZE]; + u8 aad[TLS_MAX_AAD_SIZE]; + u8 tail; ++ bool free_sgout; + struct scatterlist sg[]; + }; + +@@ -186,7 +187,6 @@ static void tls_decrypt_done(crypto_completion_data_t *data, int err) + struct aead_request *aead_req = crypto_get_completion_data(data); + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); + struct scatterlist *sgout = aead_req->dst; +- struct scatterlist *sgin = aead_req->src; + struct tls_sw_context_rx *ctx; + struct tls_decrypt_ctx *dctx; + struct tls_context *tls_ctx; +@@ -212,7 +212,7 @@ static void tls_decrypt_done(crypto_completion_data_t *data, int err) + } + + /* Free the destination pages if skb was not decrypted inplace */ +- if (sgout != sgin) { ++ if (dctx->free_sgout) { + /* Skip the first S/G entry as it points to AAD */ + for_each_sg(sg_next(sgout), sg, UINT_MAX, pages) { + if (!sg) +@@ -223,10 +223,17 @@ static void tls_decrypt_done(crypto_completion_data_t *data, int err) + + kfree(aead_req); + +- spin_lock_bh(&ctx->decrypt_compl_lock); +- if (!atomic_dec_return(&ctx->decrypt_pending)) ++ if (atomic_dec_and_test(&ctx->decrypt_pending)) + complete(&ctx->async_wait.completion); +- spin_unlock_bh(&ctx->decrypt_compl_lock); ++} ++ ++static int tls_decrypt_async_wait(struct tls_sw_context_rx *ctx) ++{ ++ if (!atomic_dec_and_test(&ctx->decrypt_pending)) ++ crypto_wait_req(-EINPROGRESS, &ctx->async_wait); ++ atomic_inc(&ctx->decrypt_pending); ++ ++ return ctx->async_wait.err; + } + + static int tls_do_decryption(struct sock *sk, +@@ -252,6 +259,7 @@ static int tls_do_decryption(struct sock *sk, + aead_request_set_callback(aead_req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + tls_decrypt_done, aead_req); ++ DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->decrypt_pending) < 1); + atomic_inc(&ctx->decrypt_pending); + } else { + aead_request_set_callback(aead_req, +@@ -441,7 +449,6 @@ static void tls_encrypt_done(crypto_completion_data_t *data, int err) + struct tls_rec *rec; + bool ready = false; + struct sock *sk; +- int pending; + + rec = container_of(aead_req, struct tls_rec, aead_req); + msg_en = &rec->msg_encrypted; +@@ -481,12 +488,8 @@ static void tls_encrypt_done(crypto_completion_data_t *data, int err) + ready = true; + } + +- spin_lock_bh(&ctx->encrypt_compl_lock); +- pending = atomic_dec_return(&ctx->encrypt_pending); +- +- if (!pending && ctx->async_notify) ++ if (atomic_dec_and_test(&ctx->encrypt_pending)) + complete(&ctx->async_wait.completion); +- spin_unlock_bh(&ctx->encrypt_compl_lock); + + if (!ready) + return; +@@ -496,6 +499,15 @@ static void tls_encrypt_done(crypto_completion_data_t *data, int err) + schedule_delayed_work(&ctx->tx_work.work, 1); + } + ++static int tls_encrypt_async_wait(struct tls_sw_context_tx *ctx) ++{ ++ if (!atomic_dec_and_test(&ctx->encrypt_pending)) ++ crypto_wait_req(-EINPROGRESS, &ctx->async_wait); ++ atomic_inc(&ctx->encrypt_pending); ++ ++ return ctx->async_wait.err; ++} ++ + static int tls_do_encryption(struct sock *sk, + struct tls_context *tls_ctx, + struct tls_sw_context_tx *ctx, +@@ -542,6 +554,7 @@ static int tls_do_encryption(struct sock *sk, + + /* Add the record in tx_list */ + list_add_tail((struct list_head *)&rec->list, &ctx->tx_list); ++ DEBUG_NET_WARN_ON_ONCE(atomic_read(&ctx->encrypt_pending) < 1); + atomic_inc(&ctx->encrypt_pending); + + rc = crypto_aead_encrypt(aead_req); +@@ -953,7 +966,6 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) + int num_zc = 0; + int orig_size; + int ret = 0; +- int pending; + + if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | + MSG_CMSG_COMPAT)) +@@ -1122,24 +1134,12 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) + if (!num_async) { + goto send_end; + } else if (num_zc) { +- /* Wait for pending encryptions to get completed */ +- spin_lock_bh(&ctx->encrypt_compl_lock); +- ctx->async_notify = true; +- +- pending = atomic_read(&ctx->encrypt_pending); +- spin_unlock_bh(&ctx->encrypt_compl_lock); +- if (pending) +- crypto_wait_req(-EINPROGRESS, &ctx->async_wait); +- else +- reinit_completion(&ctx->async_wait.completion); +- +- /* There can be no concurrent accesses, since we have no +- * pending encrypt operations +- */ +- WRITE_ONCE(ctx->async_notify, false); ++ int err; + +- if (ctx->async_wait.err) { +- ret = ctx->async_wait.err; ++ /* Wait for pending encryptions to get completed */ ++ err = tls_encrypt_async_wait(ctx); ++ if (err) { ++ ret = err; + copied = 0; + } + } +@@ -1158,6 +1158,67 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) + return copied > 0 ? copied : ret; + } + ++/* ++ * Handle unexpected EOF during splice without SPLICE_F_MORE set. ++ */ ++void tls_sw_splice_eof(struct socket *sock) ++{ ++ struct sock *sk = sock->sk; ++ struct tls_context *tls_ctx = tls_get_ctx(sk); ++ struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); ++ struct tls_rec *rec; ++ struct sk_msg *msg_pl; ++ ssize_t copied = 0; ++ bool retrying = false; ++ int ret = 0; ++ ++ if (!ctx->open_rec) ++ return; ++ ++ mutex_lock(&tls_ctx->tx_lock); ++ lock_sock(sk); ++ ++retry: ++ /* same checks as in tls_sw_push_pending_record() */ ++ rec = ctx->open_rec; ++ if (!rec) ++ goto unlock; ++ ++ msg_pl = &rec->msg_plaintext; ++ if (msg_pl->sg.size == 0) ++ goto unlock; ++ ++ /* Check the BPF advisor and perform transmission. */ ++ ret = bpf_exec_tx_verdict(msg_pl, sk, false, TLS_RECORD_TYPE_DATA, ++ &copied, 0); ++ switch (ret) { ++ case 0: ++ case -EAGAIN: ++ if (retrying) ++ goto unlock; ++ retrying = true; ++ goto retry; ++ case -EINPROGRESS: ++ break; ++ default: ++ goto unlock; ++ } ++ ++ /* Wait for pending encryptions to get completed */ ++ if (tls_encrypt_async_wait(ctx)) ++ goto unlock; ++ ++ /* Transmit if any encryptions have completed */ ++ if (test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) { ++ cancel_delayed_work(&ctx->tx_work.work); ++ tls_tx_records(sk, 0); ++ } ++ ++unlock: ++ release_sock(sk); ++ mutex_unlock(&tls_ctx->tx_lock); ++} ++ + static int tls_sw_do_sendpage(struct sock *sk, struct page *page, + int offset, size_t size, int flags) + { +@@ -1595,6 +1656,7 @@ static int tls_decrypt_sg(struct sock *sk, struct iov_iter *out_iov, + } else if (out_sg) { + memcpy(sgout, out_sg, n_sgout * sizeof(*sgout)); + } ++ dctx->free_sgout = !!pages; + + /* Prepare and submit AEAD request */ + err = tls_do_decryption(sk, sgin, sgout, dctx->iv, +@@ -2123,16 +2185,10 @@ int tls_sw_recvmsg(struct sock *sk, + + recv_end: + if (async) { +- int ret, pending; ++ int ret; + + /* Wait for all previously submitted records to be decrypted */ +- spin_lock_bh(&ctx->decrypt_compl_lock); +- reinit_completion(&ctx->async_wait.completion); +- pending = atomic_read(&ctx->decrypt_pending); +- spin_unlock_bh(&ctx->decrypt_compl_lock); +- ret = 0; +- if (pending) +- ret = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); ++ ret = tls_decrypt_async_wait(ctx); + __skb_queue_purge(&ctx->async_hold); + + if (ret) { +@@ -2149,7 +2205,6 @@ int tls_sw_recvmsg(struct sock *sk, + else + err = process_rx_list(ctx, msg, &control, 0, + async_copy_bytes, is_peek); +- decrypted += max(err, 0); + } + + copied += decrypted; +@@ -2351,16 +2406,9 @@ void tls_sw_release_resources_tx(struct sock *sk) + struct tls_context *tls_ctx = tls_get_ctx(sk); + struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); + struct tls_rec *rec, *tmp; +- int pending; + + /* Wait for any pending async encryptions to complete */ +- spin_lock_bh(&ctx->encrypt_compl_lock); +- ctx->async_notify = true; +- pending = atomic_read(&ctx->encrypt_pending); +- spin_unlock_bh(&ctx->encrypt_compl_lock); +- +- if (pending) +- crypto_wait_req(-EINPROGRESS, &ctx->async_wait); ++ tls_encrypt_async_wait(ctx); + + tls_tx_records(sk, -1); + +@@ -2513,6 +2561,48 @@ void tls_update_rx_zc_capable(struct tls_context *tls_ctx) + tls_ctx->prot_info.version != TLS_1_3_VERSION; + } + ++static struct tls_sw_context_tx *init_ctx_tx(struct tls_context *ctx, struct sock *sk) ++{ ++ struct tls_sw_context_tx *sw_ctx_tx; ++ ++ if (!ctx->priv_ctx_tx) { ++ sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); ++ if (!sw_ctx_tx) ++ return NULL; ++ } else { ++ sw_ctx_tx = ctx->priv_ctx_tx; ++ } ++ ++ crypto_init_wait(&sw_ctx_tx->async_wait); ++ atomic_set(&sw_ctx_tx->encrypt_pending, 1); ++ INIT_LIST_HEAD(&sw_ctx_tx->tx_list); ++ INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); ++ sw_ctx_tx->tx_work.sk = sk; ++ ++ return sw_ctx_tx; ++} ++ ++static struct tls_sw_context_rx *init_ctx_rx(struct tls_context *ctx) ++{ ++ struct tls_sw_context_rx *sw_ctx_rx; ++ ++ if (!ctx->priv_ctx_rx) { ++ sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); ++ if (!sw_ctx_rx) ++ return NULL; ++ } else { ++ sw_ctx_rx = ctx->priv_ctx_rx; ++ } ++ ++ crypto_init_wait(&sw_ctx_rx->async_wait); ++ atomic_set(&sw_ctx_rx->decrypt_pending, 1); ++ init_waitqueue_head(&sw_ctx_rx->wq); ++ skb_queue_head_init(&sw_ctx_rx->rx_list); ++ skb_queue_head_init(&sw_ctx_rx->async_hold); ++ ++ return sw_ctx_rx; ++} ++ + int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) + { + struct tls_context *tls_ctx = tls_get_ctx(sk); +@@ -2534,48 +2624,22 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx) + } + + if (tx) { +- if (!ctx->priv_ctx_tx) { +- sw_ctx_tx = kzalloc(sizeof(*sw_ctx_tx), GFP_KERNEL); +- if (!sw_ctx_tx) { +- rc = -ENOMEM; +- goto out; +- } +- ctx->priv_ctx_tx = sw_ctx_tx; +- } else { +- sw_ctx_tx = +- (struct tls_sw_context_tx *)ctx->priv_ctx_tx; +- } +- } else { +- if (!ctx->priv_ctx_rx) { +- sw_ctx_rx = kzalloc(sizeof(*sw_ctx_rx), GFP_KERNEL); +- if (!sw_ctx_rx) { +- rc = -ENOMEM; +- goto out; +- } +- ctx->priv_ctx_rx = sw_ctx_rx; +- } else { +- sw_ctx_rx = +- (struct tls_sw_context_rx *)ctx->priv_ctx_rx; +- } +- } ++ ctx->priv_ctx_tx = init_ctx_tx(ctx, sk); ++ if (!ctx->priv_ctx_tx) ++ return -ENOMEM; + +- if (tx) { +- crypto_init_wait(&sw_ctx_tx->async_wait); +- spin_lock_init(&sw_ctx_tx->encrypt_compl_lock); ++ sw_ctx_tx = ctx->priv_ctx_tx; + crypto_info = &ctx->crypto_send.info; + cctx = &ctx->tx; + aead = &sw_ctx_tx->aead_send; +- INIT_LIST_HEAD(&sw_ctx_tx->tx_list); +- INIT_DELAYED_WORK(&sw_ctx_tx->tx_work.work, tx_work_handler); +- sw_ctx_tx->tx_work.sk = sk; + } else { +- crypto_init_wait(&sw_ctx_rx->async_wait); +- spin_lock_init(&sw_ctx_rx->decrypt_compl_lock); +- init_waitqueue_head(&sw_ctx_rx->wq); ++ ctx->priv_ctx_rx = init_ctx_rx(ctx); ++ if (!ctx->priv_ctx_rx) ++ return -ENOMEM; ++ ++ sw_ctx_rx = ctx->priv_ctx_rx; + crypto_info = &ctx->crypto_recv.info; + cctx = &ctx->rx; +- skb_queue_head_init(&sw_ctx_rx->rx_list); +- skb_queue_head_init(&sw_ctx_rx->async_hold); + aead = &sw_ctx_rx->aead_recv; + } + +diff --git a/net/wireless/core.c b/net/wireless/core.c +index 8809e668ed912..3fcddc8687ed4 100644 +--- a/net/wireless/core.c ++++ b/net/wireless/core.c +@@ -1671,6 +1671,7 @@ void wiphy_delayed_work_queue(struct wiphy *wiphy, + unsigned long delay) + { + if (!delay) { ++ del_timer(&dwork->timer); + wiphy_work_queue(wiphy, &dwork->work); + return; + } +diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c +index ac1a645afa8df..d0320e35accbf 100644 +--- a/net/xfrm/xfrm_input.c ++++ b/net/xfrm/xfrm_input.c +@@ -180,6 +180,8 @@ static int xfrm4_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb) + int optlen = 0; + int err = -EINVAL; + ++ skb->protocol = htons(ETH_P_IP); ++ + if (unlikely(XFRM_MODE_SKB_CB(skb)->protocol == IPPROTO_BEETPH)) { + struct ip_beet_phdr *ph; + int phlen; +@@ -232,8 +234,7 @@ static int xfrm4_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb) + { + int err = -EINVAL; + +- if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP) +- goto out; ++ skb->protocol = htons(ETH_P_IP); + + if (!pskb_may_pull(skb, sizeof(struct iphdr))) + goto out; +@@ -270,8 +271,8 @@ static int xfrm6_remove_tunnel_encap(struct xfrm_state *x, struct sk_buff *skb) + { + int err = -EINVAL; + +- if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPV6) +- goto out; ++ skb->protocol = htons(ETH_P_IPV6); ++ + if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) + goto out; + +@@ -301,6 +302,8 @@ static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb) + int size = sizeof(struct ipv6hdr); + int err; + ++ skb->protocol = htons(ETH_P_IPV6); ++ + err = skb_cow_head(skb, size + skb->mac_len); + if (err) + goto out; +@@ -332,22 +335,26 @@ static int xfrm6_remove_beet_encap(struct xfrm_state *x, struct sk_buff *skb) + */ + static int + xfrm_inner_mode_encap_remove(struct xfrm_state *x, +- const struct xfrm_mode *inner_mode, + struct sk_buff *skb) + { +- switch (inner_mode->encap) { ++ switch (x->props.mode) { + case XFRM_MODE_BEET: +- if (inner_mode->family == AF_INET) ++ switch (x->sel.family) { ++ case AF_INET: + return xfrm4_remove_beet_encap(x, skb); +- if (inner_mode->family == AF_INET6) ++ case AF_INET6: + return xfrm6_remove_beet_encap(x, skb); ++ } + break; + case XFRM_MODE_TUNNEL: +- if (inner_mode->family == AF_INET) ++ switch (XFRM_MODE_SKB_CB(skb)->protocol) { ++ case IPPROTO_IPIP: + return xfrm4_remove_tunnel_encap(x, skb); +- if (inner_mode->family == AF_INET6) ++ case IPPROTO_IPV6: + return xfrm6_remove_tunnel_encap(x, skb); + break; ++ } ++ return -EINVAL; + } + + WARN_ON_ONCE(1); +@@ -356,9 +363,7 @@ xfrm_inner_mode_encap_remove(struct xfrm_state *x, + + static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) + { +- const struct xfrm_mode *inner_mode = &x->inner_mode; +- +- switch (x->outer_mode.family) { ++ switch (x->props.family) { + case AF_INET: + xfrm4_extract_header(skb); + break; +@@ -370,25 +375,7 @@ static int xfrm_prepare_input(struct xfrm_state *x, struct sk_buff *skb) + return -EAFNOSUPPORT; + } + +- if (x->sel.family == AF_UNSPEC) { +- inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); +- if (!inner_mode) +- return -EAFNOSUPPORT; +- } +- +- switch (inner_mode->family) { +- case AF_INET: +- skb->protocol = htons(ETH_P_IP); +- break; +- case AF_INET6: +- skb->protocol = htons(ETH_P_IPV6); +- break; +- default: +- WARN_ON_ONCE(1); +- break; +- } +- +- return xfrm_inner_mode_encap_remove(x, inner_mode, skb); ++ return xfrm_inner_mode_encap_remove(x, skb); + } + + /* Remove encapsulation header. +@@ -434,17 +421,16 @@ static int xfrm6_transport_input(struct xfrm_state *x, struct sk_buff *skb) + } + + static int xfrm_inner_mode_input(struct xfrm_state *x, +- const struct xfrm_mode *inner_mode, + struct sk_buff *skb) + { +- switch (inner_mode->encap) { ++ switch (x->props.mode) { + case XFRM_MODE_BEET: + case XFRM_MODE_TUNNEL: + return xfrm_prepare_input(x, skb); + case XFRM_MODE_TRANSPORT: +- if (inner_mode->family == AF_INET) ++ if (x->props.family == AF_INET) + return xfrm4_transport_input(x, skb); +- if (inner_mode->family == AF_INET6) ++ if (x->props.family == AF_INET6) + return xfrm6_transport_input(x, skb); + break; + case XFRM_MODE_ROUTEOPTIMIZATION: +@@ -462,7 +448,6 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) + { + const struct xfrm_state_afinfo *afinfo; + struct net *net = dev_net(skb->dev); +- const struct xfrm_mode *inner_mode; + int err; + __be32 seq; + __be32 seq_hi; +@@ -492,7 +477,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) + goto drop; + } + +- family = x->outer_mode.family; ++ family = x->props.family; + + /* An encap_type of -1 indicates async resumption. */ + if (encap_type == -1) { +@@ -676,17 +661,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) + + XFRM_MODE_SKB_CB(skb)->protocol = nexthdr; + +- inner_mode = &x->inner_mode; +- +- if (x->sel.family == AF_UNSPEC) { +- inner_mode = xfrm_ip2inner_mode(x, XFRM_MODE_SKB_CB(skb)->protocol); +- if (inner_mode == NULL) { +- XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); +- goto drop; +- } +- } +- +- if (xfrm_inner_mode_input(x, inner_mode, skb)) { ++ if (xfrm_inner_mode_input(x, skb)) { + XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR); + goto drop; + } +@@ -701,7 +676,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) + * transport mode so the outer address is identical. + */ + daddr = &x->id.daddr; +- family = x->outer_mode.family; ++ family = x->props.family; + + err = xfrm_parse_spi(skb, nexthdr, &spi, &seq); + if (err < 0) { +@@ -732,7 +707,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) + + err = -EAFNOSUPPORT; + rcu_read_lock(); +- afinfo = xfrm_state_afinfo_get_rcu(x->inner_mode.family); ++ afinfo = xfrm_state_afinfo_get_rcu(x->props.family); + if (likely(afinfo)) + err = afinfo->transport_finish(skb, xfrm_gro || async); + rcu_read_unlock(); +diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c +index 9a5e79a38c679..07a7ee43b8ae2 100644 +--- a/net/xfrm/xfrm_output.c ++++ b/net/xfrm/xfrm_output.c +@@ -414,7 +414,7 @@ static int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb) + IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE; + skb->protocol = htons(ETH_P_IP); + +- switch (x->outer_mode.encap) { ++ switch (x->props.mode) { + case XFRM_MODE_BEET: + return xfrm4_beet_encap_add(x, skb); + case XFRM_MODE_TUNNEL: +@@ -437,7 +437,7 @@ static int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) + skb->ignore_df = 1; + skb->protocol = htons(ETH_P_IPV6); + +- switch (x->outer_mode.encap) { ++ switch (x->props.mode) { + case XFRM_MODE_BEET: + return xfrm6_beet_encap_add(x, skb); + case XFRM_MODE_TUNNEL: +@@ -453,22 +453,22 @@ static int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb) + + static int xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb) + { +- switch (x->outer_mode.encap) { ++ switch (x->props.mode) { + case XFRM_MODE_BEET: + case XFRM_MODE_TUNNEL: +- if (x->outer_mode.family == AF_INET) ++ if (x->props.family == AF_INET) + return xfrm4_prepare_output(x, skb); +- if (x->outer_mode.family == AF_INET6) ++ if (x->props.family == AF_INET6) + return xfrm6_prepare_output(x, skb); + break; + case XFRM_MODE_TRANSPORT: +- if (x->outer_mode.family == AF_INET) ++ if (x->props.family == AF_INET) + return xfrm4_transport_output(x, skb); +- if (x->outer_mode.family == AF_INET6) ++ if (x->props.family == AF_INET6) + return xfrm6_transport_output(x, skb); + break; + case XFRM_MODE_ROUTEOPTIMIZATION: +- if (x->outer_mode.family == AF_INET6) ++ if (x->props.family == AF_INET6) + return xfrm6_ro_output(x, skb); + WARN_ON_ONCE(1); + break; +@@ -866,21 +866,10 @@ static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb) + + static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb) + { +- const struct xfrm_mode *inner_mode; +- +- if (x->sel.family == AF_UNSPEC) +- inner_mode = xfrm_ip2inner_mode(x, +- xfrm_af2proto(skb_dst(skb)->ops->family)); +- else +- inner_mode = &x->inner_mode; +- +- if (inner_mode == NULL) +- return -EAFNOSUPPORT; +- +- switch (inner_mode->family) { +- case AF_INET: ++ switch (skb->protocol) { ++ case htons(ETH_P_IP): + return xfrm4_extract_output(x, skb); +- case AF_INET6: ++ case htons(ETH_P_IPV6): + return xfrm6_extract_output(x, skb); + } + +diff --git a/samples/bpf/asm_goto_workaround.h b/samples/bpf/asm_goto_workaround.h +index 7048bb3594d65..634e81d83efd9 100644 +--- a/samples/bpf/asm_goto_workaround.h ++++ b/samples/bpf/asm_goto_workaround.h +@@ -4,14 +4,14 @@ + #define __ASM_GOTO_WORKAROUND_H + + /* +- * This will bring in asm_volatile_goto and asm_inline macro definitions ++ * This will bring in asm_goto_output and asm_inline macro definitions + * if enabled by compiler and config options. + */ + #include <linux/types.h> + +-#ifdef asm_volatile_goto +-#undef asm_volatile_goto +-#define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto") ++#ifdef asm_goto_output ++#undef asm_goto_output ++#define asm_goto_output(x...) asm volatile("invalid use of asm_goto_output") + #endif + + /* +diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost +index e41dee64d429c..39aea753d0bdc 100644 +--- a/scripts/Makefile.modpost ++++ b/scripts/Makefile.modpost +@@ -44,6 +44,7 @@ modpost-args = \ + $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \ + $(if $(KBUILD_NSDEPS),-d $(MODULES_NSDEPS)) \ + $(if $(CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS)$(KBUILD_NSDEPS),-N) \ ++ $(if $(findstring 1, $(KBUILD_EXTRA_WARN)),-W) \ + -o $@ + + # 'make -i -k' ignores compile errors, and builds as many modules as possible. +diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl +index 1e5e66ae5a522..ecf4250b0d2d2 100755 +--- a/scripts/checkpatch.pl ++++ b/scripts/checkpatch.pl +@@ -4971,7 +4971,7 @@ sub process { + if|for|while|switch|return|case| + volatile|__volatile__| + __attribute__|format|__extension__| +- asm|__asm__)$/x) ++ asm|__asm__|scoped_guard)$/x) + { + # cpp #define statements have non-optional spaces, ie + # if there is a space between the name and the open +diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh +index 32e573943cf03..458b2948b580d 100755 +--- a/scripts/link-vmlinux.sh ++++ b/scripts/link-vmlinux.sh +@@ -135,8 +135,13 @@ gen_btf() + ${OBJCOPY} --only-section=.BTF --set-section-flags .BTF=alloc,readonly \ + --strip-all ${1} ${2} 2>/dev/null + # Change e_type to ET_REL so that it can be used to link final vmlinux. +- # Unlike GNU ld, lld does not allow an ET_EXEC input. +- printf '\1' | dd of=${2} conv=notrunc bs=1 seek=16 status=none ++ # GNU ld 2.35+ and lld do not allow an ET_EXEC input. ++ if is_enabled CONFIG_CPU_BIG_ENDIAN; then ++ et_rel='\0\1' ++ else ++ et_rel='\1\0' ++ fi ++ printf "${et_rel}" | dd of=${2} conv=notrunc bs=1 seek=16 status=none + } + + # Create ${2} .S file with all symbols from the ${1} object file +diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c +index e6be7fc2625fd..686eed37f9781 100644 +--- a/scripts/mod/modpost.c ++++ b/scripts/mod/modpost.c +@@ -41,6 +41,8 @@ static bool allow_missing_ns_imports; + + static bool error_occurred; + ++static bool extra_warn; ++ + /* + * Cut off the warnings when there are too many. This typically occurs when + * vmlinux is missing. ('make modules' without building vmlinux.) +@@ -809,7 +811,7 @@ static void check_section(const char *modname, struct elf_info *elf, + #define ALL_INIT_TEXT_SECTIONS \ + ".init.text", ".meminit.text" + #define ALL_EXIT_TEXT_SECTIONS \ +- ".exit.text", ".memexit.text" ++ ".exit.text" + + #define ALL_PCI_INIT_SECTIONS \ + ".pci_fixup_early", ".pci_fixup_header", ".pci_fixup_final", \ +@@ -817,23 +819,22 @@ static void check_section(const char *modname, struct elf_info *elf, + ".pci_fixup_resume_early", ".pci_fixup_suspend" + + #define ALL_XXXINIT_SECTIONS MEM_INIT_SECTIONS +-#define ALL_XXXEXIT_SECTIONS MEM_EXIT_SECTIONS + + #define ALL_INIT_SECTIONS INIT_SECTIONS, ALL_XXXINIT_SECTIONS +-#define ALL_EXIT_SECTIONS EXIT_SECTIONS, ALL_XXXEXIT_SECTIONS ++#define ALL_EXIT_SECTIONS EXIT_SECTIONS + + #define DATA_SECTIONS ".data", ".data.rel" +-#define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \ +- ".kprobes.text", ".cpuidle.text", ".noinstr.text" ++#define TEXT_SECTIONS ".text", ".text.*", ".sched.text", \ ++ ".kprobes.text", ".cpuidle.text", ".noinstr.text", \ ++ ".ltext", ".ltext.*" + #define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \ +- ".fixup", ".entry.text", ".exception.text", ".text.*", \ ++ ".fixup", ".entry.text", ".exception.text", \ + ".coldtext", ".softirqentry.text" + + #define INIT_SECTIONS ".init.*" + #define MEM_INIT_SECTIONS ".meminit.*" + + #define EXIT_SECTIONS ".exit.*" +-#define MEM_EXIT_SECTIONS ".memexit.*" + + #define ALL_TEXT_SECTIONS ALL_INIT_TEXT_SECTIONS, ALL_EXIT_TEXT_SECTIONS, \ + TEXT_SECTIONS, OTHER_TEXT_SECTIONS +@@ -862,7 +863,6 @@ enum mismatch { + TEXT_TO_ANY_EXIT, + DATA_TO_ANY_EXIT, + XXXINIT_TO_SOME_INIT, +- XXXEXIT_TO_SOME_EXIT, + ANY_INIT_TO_ANY_EXIT, + ANY_EXIT_TO_ANY_INIT, + EXPORT_TO_INIT_EXIT, +@@ -937,12 +937,6 @@ static const struct sectioncheck sectioncheck[] = { + .bad_tosec = { INIT_SECTIONS, NULL }, + .mismatch = XXXINIT_TO_SOME_INIT, + }, +-/* Do not reference exit code/data from memexit code/data */ +-{ +- .fromsec = { ALL_XXXEXIT_SECTIONS, NULL }, +- .bad_tosec = { EXIT_SECTIONS, NULL }, +- .mismatch = XXXEXIT_TO_SOME_EXIT, +-}, + /* Do not use exit code/data from init code */ + { + .fromsec = { ALL_INIT_SECTIONS, NULL }, +@@ -1085,9 +1079,20 @@ static int secref_whitelist(const struct sectioncheck *mismatch, + "*_console"))) + return 0; + +- /* symbols in data sections that may refer to meminit/exit sections */ ++ /* symbols in data sections that may refer to meminit sections */ + if (match(fromsec, PATTERNS(DATA_SECTIONS)) && +- match(tosec, PATTERNS(ALL_XXXINIT_SECTIONS, ALL_EXIT_SECTIONS)) && ++ match(tosec, PATTERNS(ALL_XXXINIT_SECTIONS)) && ++ match(fromsym, PATTERNS("*driver"))) ++ return 0; ++ ++ /* ++ * symbols in data sections must not refer to .exit.*, but there are ++ * quite a few offenders, so hide these unless for W=1 builds until ++ * these are fixed. ++ */ ++ if (!extra_warn && ++ match(fromsec, PATTERNS(DATA_SECTIONS)) && ++ match(tosec, PATTERNS(EXIT_SECTIONS)) && + match(fromsym, PATTERNS("*driver"))) + return 0; + +@@ -1254,7 +1259,6 @@ static void report_sec_mismatch(const char *modname, + case TEXT_TO_ANY_EXIT: + case DATA_TO_ANY_EXIT: + case XXXINIT_TO_SOME_INIT: +- case XXXEXIT_TO_SOME_EXIT: + case ANY_INIT_TO_ANY_EXIT: + case ANY_EXIT_TO_ANY_INIT: + warn("%s: section mismatch in reference: %s (section: %s) -> %s (section: %s)\n", +@@ -2290,7 +2294,7 @@ int main(int argc, char **argv) + LIST_HEAD(dump_lists); + struct dump_list *dl, *dl2; + +- while ((opt = getopt(argc, argv, "ei:mnT:o:awENd:")) != -1) { ++ while ((opt = getopt(argc, argv, "ei:mnT:o:aWwENd:")) != -1) { + switch (opt) { + case 'e': + external_module = true; +@@ -2315,6 +2319,9 @@ int main(int argc, char **argv) + case 'T': + files_source = optarg; + break; ++ case 'W': ++ extra_warn = true; ++ break; + case 'w': + warn_unresolved = true; + break; +diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c +index 6bf9caca09684..a72e6cf61a1f0 100644 +--- a/scripts/mod/sumversion.c ++++ b/scripts/mod/sumversion.c +@@ -326,7 +326,12 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md) + + /* Sum all files in the same dir or subdirs. */ + while ((line = get_line(&pos))) { +- char* p = line; ++ char* p; ++ ++ /* trim the leading spaces away */ ++ while (isspace(*line)) ++ line++; ++ p = line; + + if (strncmp(line, "source_", sizeof("source_")-1) == 0) { + p = strrchr(line, ' '); +diff --git a/security/apparmor/include/lib.h b/security/apparmor/include/lib.h +index f42359f58eb58..d468c8b90298d 100644 +--- a/security/apparmor/include/lib.h ++++ b/security/apparmor/include/lib.h +@@ -226,7 +226,7 @@ void aa_policy_destroy(struct aa_policy *policy); + */ + #define fn_label_build(L, P, GFP, FN) \ + ({ \ +- __label__ __cleanup, __done; \ ++ __label__ __do_cleanup, __done; \ + struct aa_label *__new_; \ + \ + if ((L)->size > 1) { \ +@@ -244,7 +244,7 @@ void aa_policy_destroy(struct aa_policy *policy); + __new_ = (FN); \ + AA_BUG(!__new_); \ + if (IS_ERR(__new_)) \ +- goto __cleanup; \ ++ goto __do_cleanup; \ + __lvec[__j++] = __new_; \ + } \ + for (__j = __count = 0; __j < (L)->size; __j++) \ +@@ -266,7 +266,7 @@ void aa_policy_destroy(struct aa_policy *policy); + vec_cleanup(profile, __pvec, __count); \ + } else \ + __new_ = NULL; \ +-__cleanup: \ ++__do_cleanup: \ + vec_cleanup(label, __lvec, (L)->size); \ + } else { \ + (P) = labels_profile(L); \ +diff --git a/security/security.c b/security/security.c +index fc15b963e1028..1b504c296551c 100644 +--- a/security/security.c ++++ b/security/security.c +@@ -2186,7 +2186,19 @@ EXPORT_SYMBOL(security_inode_setsecctx); + + int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) + { +- return call_int_hook(inode_getsecctx, -EOPNOTSUPP, inode, ctx, ctxlen); ++ struct security_hook_list *hp; ++ int rc; ++ ++ /* ++ * Only one module will provide a security context. ++ */ ++ hlist_for_each_entry(hp, &security_hook_heads.inode_getsecctx, list) { ++ rc = hp->hook.inode_getsecctx(inode, ctx, ctxlen); ++ if (rc != LSM_RET_DEFAULT(inode_getsecctx)) ++ return rc; ++ } ++ ++ return LSM_RET_DEFAULT(inode_getsecctx); + } + EXPORT_SYMBOL(security_inode_getsecctx); + +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index e8819e8a98763..e8209178d87bb 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -344,6 +344,7 @@ enum { + CXT_FIXUP_HP_ZBOOK_MUTE_LED, + CXT_FIXUP_HEADSET_MIC, + CXT_FIXUP_HP_MIC_NO_PRESENCE, ++ CXT_PINCFG_SWS_JS201D, + }; + + /* for hda_fixup_thinkpad_acpi() */ +@@ -841,6 +842,17 @@ static const struct hda_pintbl cxt_pincfg_lemote[] = { + {} + }; + ++/* SuoWoSi/South-holding JS201D with sn6140 */ ++static const struct hda_pintbl cxt_pincfg_sws_js201d[] = { ++ { 0x16, 0x03211040 }, /* hp out */ ++ { 0x17, 0x91170110 }, /* SPK/Class_D */ ++ { 0x18, 0x95a70130 }, /* Internal mic */ ++ { 0x19, 0x03a11020 }, /* Headset Mic */ ++ { 0x1a, 0x40f001f0 }, /* Not used */ ++ { 0x21, 0x40f001f0 }, /* Not used */ ++ {} ++}; ++ + static const struct hda_fixup cxt_fixups[] = { + [CXT_PINCFG_LENOVO_X200] = { + .type = HDA_FIXUP_PINS, +@@ -996,6 +1008,10 @@ static const struct hda_fixup cxt_fixups[] = { + .chained = true, + .chain_id = CXT_FIXUP_HEADSET_MIC, + }, ++ [CXT_PINCFG_SWS_JS201D] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = cxt_pincfg_sws_js201d, ++ }, + }; + + static const struct snd_pci_quirk cxt5045_fixups[] = { +@@ -1069,6 +1085,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { + SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), ++ SND_PCI_QUIRK(0x14f1, 0x0265, "SWS JS201D", CXT_PINCFG_SWS_JS201D), + SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO), + SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), + SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410), +@@ -1109,6 +1126,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = { + { .id = CXT_FIXUP_HP_ZBOOK_MUTE_LED, .name = "hp-zbook-mute-led" }, + { .id = CXT_FIXUP_HP_MIC_NO_PRESENCE, .name = "hp-mic-fix" }, + { .id = CXT_PINCFG_LENOVO_NOTEBOOK, .name = "lenovo-20149" }, ++ { .id = CXT_PINCFG_SWS_JS201D, .name = "sws-js201d" }, + {} + }; + +diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c +index 627899959ffe8..e41316e2e9833 100644 +--- a/sound/pci/hda/patch_cs8409.c ++++ b/sound/pci/hda/patch_cs8409.c +@@ -1371,6 +1371,7 @@ void dolphin_fixups(struct hda_codec *codec, const struct hda_fixup *fix, int ac + spec->scodecs[CS8409_CODEC1] = &dolphin_cs42l42_1; + spec->scodecs[CS8409_CODEC1]->codec = codec; + spec->num_scodecs = 2; ++ spec->gen.suppress_vmaster = 1; + + codec->patch_ops = cs8409_dolphin_patch_ops; + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 62f2137044923..92a656fb53212 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -9377,7 +9377,7 @@ static const struct hda_fixup alc269_fixups[] = { + .type = HDA_FIXUP_FUNC, + .v.func = cs35l41_fixup_i2c_two, + .chained = true, +- .chain_id = ALC269_FIXUP_THINKPAD_ACPI, ++ .chain_id = ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK, + }, + [ALC245_FIXUP_HP_MUTE_LED_COEFBIT] = { + .type = HDA_FIXUP_FUNC, +@@ -9392,6 +9392,8 @@ static const struct hda_fixup alc269_fixups[] = { + [ALC287_FIXUP_THINKPAD_I2S_SPK] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc287_fixup_bind_dacs, ++ .chained = true, ++ .chain_id = ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK, + }, + [ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD] = { + .type = HDA_FIXUP_FUNC, +@@ -9431,6 +9433,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS), + SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1025, 0x1269, "Acer SWIFT SF314-54", ALC256_FIXUP_ACER_HEADSET_MIC), ++ SND_PCI_QUIRK(0x1025, 0x126a, "Acer Swift SF114-32", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), +@@ -9617,6 +9620,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x8786, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x8787, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED), + SND_PCI_QUIRK(0x103c, 0x8788, "HP OMEN 15", ALC285_FIXUP_HP_MUTE_LED), ++ SND_PCI_QUIRK(0x103c, 0x87b7, "HP Laptop 14-fq0xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), + SND_PCI_QUIRK(0x103c, 0x87c8, "HP", ALC287_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x87e5, "HP ProBook 440 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x87e7, "HP ProBook 450 G8 Notebook PC", ALC236_FIXUP_HP_GPIO_LED), +@@ -9686,6 +9690,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x8abb, "HP ZBook Firefly 14 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8ad1, "HP EliteBook 840 14 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), ++ SND_PCI_QUIRK(0x103c, 0x8b0f, "HP Elite mt645 G7 Mobile Thin Client U81", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8b2f, "HP 255 15.6 inch G10 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), + SND_PCI_QUIRK(0x103c, 0x8b42, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8b43, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), +@@ -9693,6 +9698,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x8b45, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8b46, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8b47, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), ++ SND_PCI_QUIRK(0x103c, 0x8b59, "HP Elite mt645 G7 Mobile Thin Client U89", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8b63, "HP Elite Dragonfly 13.5 inch G4", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED), +@@ -9722,6 +9728,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), + SND_PCI_QUIRK(0x103c, 0x8c97, "HP ZBook", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), ++ SND_PCI_QUIRK(0x103c, 0x8ca1, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED), ++ SND_PCI_QUIRK(0x103c, 0x8ca2, "HP ZBook Power", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8ca4, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8ca7, "HP ZBook Fury", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8cf5, "HP ZBook Studio 16", ALC245_FIXUP_CS35L41_SPI_4_HP_GPIO_LED), +@@ -10049,6 +10057,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC), + SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO), ++ SND_PCI_QUIRK(0x2782, 0x1707, "Vaio VJFE-ADL", ALC298_FIXUP_SPK_VOLUME), + SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC), + SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED), + SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", ALC256_FIXUP_INTEL_NUC10), +diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c +index 808d002826233..28da4e1858d7e 100644 +--- a/sound/soc/amd/yc/acp6x-mach.c ++++ b/sound/soc/amd/yc/acp6x-mach.c +@@ -241,6 +241,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "82YM"), + } + }, ++ { ++ .driver_data = &acp6x_card, ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "83AS"), ++ } ++ }, + { + .driver_data = &acp6x_card, + .matches = { +@@ -297,6 +304,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 15 2022"), + } + }, ++ { ++ .driver_data = &acp6x_card, ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "Micro-Star International Co., Ltd."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "Bravo 15 C7VF"), ++ } ++ }, + { + .driver_data = &acp6x_card, + .matches = { +diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c +index fd3dca08460ba..844d14d4c9a51 100644 +--- a/sound/soc/codecs/rt5645.c ++++ b/sound/soc/codecs/rt5645.c +@@ -3288,6 +3288,7 @@ static void rt5645_jack_detect_work(struct work_struct *work) + report, SND_JACK_HEADPHONE); + snd_soc_jack_report(rt5645->mic_jack, + report, SND_JACK_MICROPHONE); ++ mutex_unlock(&rt5645->jd_mutex); + return; + case 4: + val = snd_soc_component_read(rt5645->component, RT5645_A_JD_CTRL1) & 0x0020; +diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c +index a2abd1a111612..e80be4e4fa8b4 100644 +--- a/sound/soc/codecs/wcd938x.c ++++ b/sound/soc/codecs/wcd938x.c +@@ -3588,7 +3588,7 @@ static int wcd938x_probe(struct platform_device *pdev) + ret = wcd938x_populate_dt_data(wcd938x, dev); + if (ret) { + dev_err(dev, "%s: Fail to obtain platform data\n", __func__); +- return -EINVAL; ++ return ret; + } + + ret = wcd938x_add_slave_components(wcd938x, dev, &match); +diff --git a/tools/arch/x86/include/asm/rmwcc.h b/tools/arch/x86/include/asm/rmwcc.h +index 11ff975242cac..e2ff22b379a44 100644 +--- a/tools/arch/x86/include/asm/rmwcc.h ++++ b/tools/arch/x86/include/asm/rmwcc.h +@@ -4,7 +4,7 @@ + + #define __GEN_RMWcc(fullop, var, cc, ...) \ + do { \ +- asm_volatile_goto (fullop "; j" cc " %l[cc_label]" \ ++ asm goto (fullop "; j" cc " %l[cc_label]" \ + : : "m" (var), ## __VA_ARGS__ \ + : "memory" : cc_label); \ + return 0; \ +diff --git a/tools/include/linux/compiler_types.h b/tools/include/linux/compiler_types.h +index 1bdd834bdd571..d09f9dc172a48 100644 +--- a/tools/include/linux/compiler_types.h ++++ b/tools/include/linux/compiler_types.h +@@ -36,8 +36,8 @@ + #include <linux/compiler-gcc.h> + #endif + +-#ifndef asm_volatile_goto +-#define asm_volatile_goto(x...) asm goto(x) ++#ifndef asm_goto_output ++#define asm_goto_output(x...) asm goto(x) + #endif + + #endif /* __LINUX_COMPILER_TYPES_H */ +diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh +index 7bf56ea161e35..616d3581419ca 100755 +--- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh ++++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh +@@ -11,7 +11,7 @@ ALL_TESTS="single_mask_test identical_filters_test two_masks_test \ + multiple_masks_test ctcam_edge_cases_test delta_simple_test \ + delta_two_masks_one_key_test delta_simple_rehash_test \ + bloom_simple_test bloom_complex_test bloom_delta_test \ +- max_erp_entries_test" ++ max_erp_entries_test max_group_size_test" + NUM_NETIFS=2 + source $lib_dir/lib.sh + source $lib_dir/tc_common.sh +@@ -1033,6 +1033,60 @@ max_erp_entries_test() + "max chain $chain_failed, mask $mask_failed" + } + ++max_group_size_test() ++{ ++ # The number of ACLs in an ACL group is limited. Once the maximum ++ # number of ACLs has been reached, filters cannot be added. This test ++ # verifies that when this limit is reached, insertion fails without ++ # crashing. ++ ++ RET=0 ++ ++ local num_acls=32 ++ local max_size ++ local ret ++ ++ if [[ "$tcflags" != "skip_sw" ]]; then ++ return 0; ++ fi ++ ++ for ((i=1; i < $num_acls; i++)); do ++ if [[ $(( i % 2 )) == 1 ]]; then ++ tc filter add dev $h2 ingress pref $i proto ipv4 \ ++ flower $tcflags dst_ip 198.51.100.1/32 \ ++ ip_proto tcp tcp_flags 0x01/0x01 \ ++ action drop &> /dev/null ++ else ++ tc filter add dev $h2 ingress pref $i proto ipv6 \ ++ flower $tcflags dst_ip 2001:db8:1::1/128 \ ++ action drop &> /dev/null ++ fi ++ ++ ret=$? ++ [[ $ret -ne 0 ]] && max_size=$((i - 1)) && break ++ done ++ ++ # We expect to exceed the maximum number of ACLs in a group, so that ++ # insertion eventually fails. Otherwise, the test should be adjusted to ++ # add more filters. ++ check_fail $ret "expected to exceed number of ACLs in a group" ++ ++ for ((; i >= 1; i--)); do ++ if [[ $(( i % 2 )) == 1 ]]; then ++ tc filter del dev $h2 ingress pref $i proto ipv4 \ ++ flower $tcflags dst_ip 198.51.100.1/32 \ ++ ip_proto tcp tcp_flags 0x01/0x01 \ ++ action drop &> /dev/null ++ else ++ tc filter del dev $h2 ingress pref $i proto ipv6 \ ++ flower $tcflags dst_ip 2001:db8:1::1/128 \ ++ action drop &> /dev/null ++ fi ++ done ++ ++ log_test "max ACL group size test ($tcflags). max size $max_size" ++} ++ + setup_prepare() + { + h1=${NETIFS[p1]} +diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c +index b5234d6efbe15..ec40a33c29fda 100644 +--- a/tools/testing/selftests/kvm/dirty_log_test.c ++++ b/tools/testing/selftests/kvm/dirty_log_test.c +@@ -226,13 +226,15 @@ static void clear_log_create_vm_done(struct kvm_vm *vm) + } + + static void dirty_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, +- void *bitmap, uint32_t num_pages) ++ void *bitmap, uint32_t num_pages, ++ uint32_t *unused) + { + kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap); + } + + static void clear_log_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, +- void *bitmap, uint32_t num_pages) ++ void *bitmap, uint32_t num_pages, ++ uint32_t *unused) + { + kvm_vm_get_dirty_log(vcpu->vm, slot, bitmap); + kvm_vm_clear_dirty_log(vcpu->vm, slot, bitmap, 0, num_pages); +@@ -329,10 +331,9 @@ static void dirty_ring_continue_vcpu(void) + } + + static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, +- void *bitmap, uint32_t num_pages) ++ void *bitmap, uint32_t num_pages, ++ uint32_t *ring_buf_idx) + { +- /* We only have one vcpu */ +- static uint32_t fetch_index = 0; + uint32_t count = 0, cleared; + bool continued_vcpu = false; + +@@ -349,11 +350,15 @@ static void dirty_ring_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, + + /* Only have one vcpu */ + count = dirty_ring_collect_one(vcpu_map_dirty_ring(vcpu), +- slot, bitmap, num_pages, &fetch_index); ++ slot, bitmap, num_pages, ++ ring_buf_idx); + + cleared = kvm_vm_reset_dirty_ring(vcpu->vm); + +- /* Cleared pages should be the same as collected */ ++ /* ++ * Cleared pages should be the same as collected, as KVM is supposed to ++ * clear only the entries that have been harvested. ++ */ + TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch " + "with collected (%u)", cleared, count); + +@@ -392,12 +397,6 @@ static void dirty_ring_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err) + } + } + +-static void dirty_ring_before_vcpu_join(void) +-{ +- /* Kick another round of vcpu just to make sure it will quit */ +- sem_post(&sem_vcpu_cont); +-} +- + struct log_mode { + const char *name; + /* Return true if this mode is supported, otherwise false */ +@@ -406,10 +405,10 @@ struct log_mode { + void (*create_vm_done)(struct kvm_vm *vm); + /* Hook to collect the dirty pages into the bitmap provided */ + void (*collect_dirty_pages) (struct kvm_vcpu *vcpu, int slot, +- void *bitmap, uint32_t num_pages); ++ void *bitmap, uint32_t num_pages, ++ uint32_t *ring_buf_idx); + /* Hook to call when after each vcpu run */ + void (*after_vcpu_run)(struct kvm_vcpu *vcpu, int ret, int err); +- void (*before_vcpu_join) (void); + } log_modes[LOG_MODE_NUM] = { + { + .name = "dirty-log", +@@ -428,7 +427,6 @@ struct log_mode { + .supported = dirty_ring_supported, + .create_vm_done = dirty_ring_create_vm_done, + .collect_dirty_pages = dirty_ring_collect_dirty_pages, +- .before_vcpu_join = dirty_ring_before_vcpu_join, + .after_vcpu_run = dirty_ring_after_vcpu_run, + }, + }; +@@ -471,13 +469,14 @@ static void log_mode_create_vm_done(struct kvm_vm *vm) + } + + static void log_mode_collect_dirty_pages(struct kvm_vcpu *vcpu, int slot, +- void *bitmap, uint32_t num_pages) ++ void *bitmap, uint32_t num_pages, ++ uint32_t *ring_buf_idx) + { + struct log_mode *mode = &log_modes[host_log_mode]; + + TEST_ASSERT(mode->collect_dirty_pages != NULL, + "collect_dirty_pages() is required for any log mode!"); +- mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages); ++ mode->collect_dirty_pages(vcpu, slot, bitmap, num_pages, ring_buf_idx); + } + + static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err) +@@ -488,14 +487,6 @@ static void log_mode_after_vcpu_run(struct kvm_vcpu *vcpu, int ret, int err) + mode->after_vcpu_run(vcpu, ret, err); + } + +-static void log_mode_before_vcpu_join(void) +-{ +- struct log_mode *mode = &log_modes[host_log_mode]; +- +- if (mode->before_vcpu_join) +- mode->before_vcpu_join(); +-} +- + static void generate_random_array(uint64_t *guest_array, uint64_t size) + { + uint64_t i; +@@ -696,6 +687,8 @@ static void run_test(enum vm_guest_mode mode, void *arg) + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + unsigned long *bmap; ++ uint32_t ring_buf_idx = 0; ++ int sem_val; + + if (!log_mode_supported()) { + print_skip("Log mode '%s' not supported", +@@ -767,10 +760,21 @@ static void run_test(enum vm_guest_mode mode, void *arg) + /* Start the iterations */ + iteration = 1; + sync_global_to_guest(vm, iteration); +- host_quit = false; ++ WRITE_ONCE(host_quit, false); + host_dirty_count = 0; + host_clear_count = 0; + host_track_next_count = 0; ++ WRITE_ONCE(dirty_ring_vcpu_ring_full, false); ++ ++ /* ++ * Ensure the previous iteration didn't leave a dangling semaphore, i.e. ++ * that the main task and vCPU worker were synchronized and completed ++ * verification of all iterations. ++ */ ++ sem_getvalue(&sem_vcpu_stop, &sem_val); ++ TEST_ASSERT_EQ(sem_val, 0); ++ sem_getvalue(&sem_vcpu_cont, &sem_val); ++ TEST_ASSERT_EQ(sem_val, 0); + + pthread_create(&vcpu_thread, NULL, vcpu_worker, vcpu); + +@@ -778,7 +782,8 @@ static void run_test(enum vm_guest_mode mode, void *arg) + /* Give the vcpu thread some time to dirty some pages */ + usleep(p->interval * 1000); + log_mode_collect_dirty_pages(vcpu, TEST_MEM_SLOT_INDEX, +- bmap, host_num_pages); ++ bmap, host_num_pages, ++ &ring_buf_idx); + + /* + * See vcpu_sync_stop_requested definition for details on why +@@ -796,15 +801,21 @@ static void run_test(enum vm_guest_mode mode, void *arg) + assert(host_log_mode == LOG_MODE_DIRTY_RING || + atomic_read(&vcpu_sync_stop_requested) == false); + vm_dirty_log_verify(mode, bmap); +- sem_post(&sem_vcpu_cont); + +- iteration++; ++ /* ++ * Set host_quit before sem_vcpu_cont in the final iteration to ++ * ensure that the vCPU worker doesn't resume the guest. As ++ * above, the dirty ring test may stop and wait even when not ++ * explicitly request to do so, i.e. would hang waiting for a ++ * "continue" if it's allowed to resume the guest. ++ */ ++ if (++iteration == p->iterations) ++ WRITE_ONCE(host_quit, true); ++ ++ sem_post(&sem_vcpu_cont); + sync_global_to_guest(vm, iteration); + } + +- /* Tell the vcpu thread to quit */ +- host_quit = true; +- log_mode_before_vcpu_join(); + pthread_join(vcpu_thread, NULL); + + pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), " +diff --git a/tools/testing/selftests/net/mptcp/config b/tools/testing/selftests/net/mptcp/config +index e317c2e44dae8..4f80014cae494 100644 +--- a/tools/testing/selftests/net/mptcp/config ++++ b/tools/testing/selftests/net/mptcp/config +@@ -22,8 +22,11 @@ CONFIG_NFT_TPROXY=m + CONFIG_NFT_SOCKET=m + CONFIG_IP_ADVANCED_ROUTER=y + CONFIG_IP_MULTIPLE_TABLES=y ++CONFIG_IP_NF_FILTER=m ++CONFIG_IP_NF_MANGLE=m + CONFIG_IP_NF_TARGET_REJECT=m + CONFIG_IPV6_MULTIPLE_TABLES=y ++CONFIG_IP6_NF_FILTER=m + CONFIG_NET_ACT_CSUM=m + CONFIG_NET_ACT_PEDIT=m + CONFIG_NET_CLS_ACT=y +diff --git a/tools/testing/selftests/net/mptcp/settings b/tools/testing/selftests/net/mptcp/settings +index 79b65bdf05db6..abc5648b59abd 100644 +--- a/tools/testing/selftests/net/mptcp/settings ++++ b/tools/testing/selftests/net/mptcp/settings +@@ -1 +1 @@ +-timeout=1200 ++timeout=1800 +diff --git a/tools/testing/selftests/vm/ksm_tests.c b/tools/testing/selftests/vm/ksm_tests.c +index 0d85be2350fa3..a811659307855 100644 +--- a/tools/testing/selftests/vm/ksm_tests.c ++++ b/tools/testing/selftests/vm/ksm_tests.c +@@ -470,7 +470,7 @@ static int ksm_merge_hugepages_time(int mapping, int prot, int timeout, size_t m + if (map_ptr_orig == MAP_FAILED) + err(2, "initial mmap"); + +- if (madvise(map_ptr, len + HPAGE_SIZE, MADV_HUGEPAGE)) ++ if (madvise(map_ptr, len, MADV_HUGEPAGE)) + err(2, "MADV_HUGEPAGE"); + + pagemap_fd = open("/proc/self/pagemap", O_RDONLY); +diff --git a/tools/testing/selftests/vm/va_128TBswitch.sh b/tools/testing/selftests/vm/va_128TBswitch.sh +index 41580751dc511..231622b3a2327 100755 +--- a/tools/testing/selftests/vm/va_128TBswitch.sh ++++ b/tools/testing/selftests/vm/va_128TBswitch.sh +@@ -29,9 +29,15 @@ check_supported_x86_64() + # See man 1 gzip under '-f'. + local pg_table_levels=$(gzip -dcfq "${config}" | grep PGTABLE_LEVELS | cut -d'=' -f 2) + ++ local cpu_supports_pl5=$(awk '/^flags/ {if (/la57/) {print 0;} ++ else {print 1}; exit}' /proc/cpuinfo 2>/dev/null) ++ + if [[ "${pg_table_levels}" -lt 5 ]]; then + echo "$0: PGTABLE_LEVELS=${pg_table_levels}, must be >= 5 to run this test" + exit $ksft_skip ++ elif [[ "${cpu_supports_pl5}" -ne 0 ]]; then ++ echo "$0: CPU does not have the necessary la57 flag to support page table level 5" ++ exit $ksft_skip + fi + } + +diff --git a/tools/tracing/rtla/Makefile b/tools/tracing/rtla/Makefile +index 22e28b76f8004..6912e9577b658 100644 +--- a/tools/tracing/rtla/Makefile ++++ b/tools/tracing/rtla/Makefile +@@ -28,10 +28,15 @@ FOPTS := -flto=auto -ffat-lto-objects -fexceptions -fstack-protector-strong \ + -fasynchronous-unwind-tables -fstack-clash-protection + WOPTS := -Wall -Werror=format-security -Wp,-D_FORTIFY_SOURCE=2 -Wp,-D_GLIBCXX_ASSERTIONS -Wno-maybe-uninitialized + ++ifeq ($(CC),clang) ++ FOPTS := $(filter-out -ffat-lto-objects, $(FOPTS)) ++ WOPTS := $(filter-out -Wno-maybe-uninitialized, $(WOPTS)) ++endif ++ + TRACEFS_HEADERS := $$($(PKG_CONFIG) --cflags libtracefs) + + CFLAGS := -O -g -DVERSION=\"$(VERSION)\" $(FOPTS) $(MOPTS) $(WOPTS) $(TRACEFS_HEADERS) $(EXTRA_CFLAGS) +-LDFLAGS := -ggdb $(EXTRA_LDFLAGS) ++LDFLAGS := -flto=auto -ggdb $(EXTRA_LDFLAGS) + LIBS := $$($(PKG_CONFIG) --libs libtracefs) + + SRC := $(wildcard src/*.c) +diff --git a/tools/tracing/rtla/src/osnoise_hist.c b/tools/tracing/rtla/src/osnoise_hist.c +index fe34452fc4ec0..b9658f213cb55 100644 +--- a/tools/tracing/rtla/src/osnoise_hist.c ++++ b/tools/tracing/rtla/src/osnoise_hist.c +@@ -129,8 +129,7 @@ static void osnoise_hist_update_multiple(struct osnoise_tool *tool, int cpu, + if (params->output_divisor) + duration = duration / params->output_divisor; + +- if (data->bucket_size) +- bucket = duration / data->bucket_size; ++ bucket = duration / data->bucket_size; + + total_duration = duration * count; + +@@ -472,7 +471,11 @@ static void osnoise_hist_usage(char *usage) + + for (i = 0; msg[i]; i++) + fprintf(stderr, "%s\n", msg[i]); +- exit(1); ++ ++ if (usage) ++ exit(EXIT_FAILURE); ++ ++ exit(EXIT_SUCCESS); + } + + /* +diff --git a/tools/tracing/rtla/src/osnoise_top.c b/tools/tracing/rtla/src/osnoise_top.c +index 76479bfb29224..6c07f360de72c 100644 +--- a/tools/tracing/rtla/src/osnoise_top.c ++++ b/tools/tracing/rtla/src/osnoise_top.c +@@ -282,7 +282,11 @@ void osnoise_top_usage(char *usage) + + for (i = 0; msg[i]; i++) + fprintf(stderr, "%s\n", msg[i]); +- exit(1); ++ ++ if (usage) ++ exit(EXIT_FAILURE); ++ ++ exit(EXIT_SUCCESS); + } + + /* +diff --git a/tools/tracing/rtla/src/timerlat_hist.c b/tools/tracing/rtla/src/timerlat_hist.c +index 4b48af8a83096..ed08295bfa12c 100644 +--- a/tools/tracing/rtla/src/timerlat_hist.c ++++ b/tools/tracing/rtla/src/timerlat_hist.c +@@ -151,8 +151,7 @@ timerlat_hist_update(struct osnoise_tool *tool, int cpu, + if (params->output_divisor) + latency = latency / params->output_divisor; + +- if (data->bucket_size) +- bucket = latency / data->bucket_size; ++ bucket = latency / data->bucket_size; + + if (!thread) { + hist = data->hist[cpu].irq; +@@ -475,7 +474,11 @@ static void timerlat_hist_usage(char *usage) + + for (i = 0; msg[i]; i++) + fprintf(stderr, "%s\n", msg[i]); +- exit(1); ++ ++ if (usage) ++ exit(EXIT_FAILURE); ++ ++ exit(EXIT_SUCCESS); + } + + /* +diff --git a/tools/tracing/rtla/src/timerlat_top.c b/tools/tracing/rtla/src/timerlat_top.c +index 3342719352222..8fc0f6aa19dad 100644 +--- a/tools/tracing/rtla/src/timerlat_top.c ++++ b/tools/tracing/rtla/src/timerlat_top.c +@@ -305,7 +305,11 @@ static void timerlat_top_usage(char *usage) + + for (i = 0; msg[i]; i++) + fprintf(stderr, "%s\n", msg[i]); +- exit(1); ++ ++ if (usage) ++ exit(EXIT_FAILURE); ++ ++ exit(EXIT_SUCCESS); + } + + /* +diff --git a/tools/tracing/rtla/src/utils.c b/tools/tracing/rtla/src/utils.c +index 663a047f794d2..8c8d63c7196cf 100644 +--- a/tools/tracing/rtla/src/utils.c ++++ b/tools/tracing/rtla/src/utils.c +@@ -243,12 +243,6 @@ static inline int sched_setattr(pid_t pid, const struct sched_attr *attr, + return syscall(__NR_sched_setattr, pid, attr, flags); + } + +-static inline int sched_getattr(pid_t pid, struct sched_attr *attr, +- unsigned int size, unsigned int flags) +-{ +- return syscall(__NR_sched_getattr, pid, attr, size, flags); +-} +- + int __set_sched_attr(int pid, struct sched_attr *attr) + { + int flags = 0; +@@ -484,13 +478,13 @@ int parse_prio(char *arg, struct sched_attr *sched_param) + if (prio == INVALID_VAL) + return -1; + +- if (prio < sched_get_priority_min(SCHED_OTHER)) ++ if (prio < MIN_NICE) + return -1; +- if (prio > sched_get_priority_max(SCHED_OTHER)) ++ if (prio > MAX_NICE) + return -1; + + sched_param->sched_policy = SCHED_OTHER; +- sched_param->sched_priority = prio; ++ sched_param->sched_nice = prio; + break; + default: + return -1; +diff --git a/tools/tracing/rtla/src/utils.h b/tools/tracing/rtla/src/utils.h +index 5571afd3b5498..92da41aaf4c4c 100644 +--- a/tools/tracing/rtla/src/utils.h ++++ b/tools/tracing/rtla/src/utils.h +@@ -7,6 +7,8 @@ + */ + #define BUFF_U64_STR_SIZE 24 + #define MAX_PATH 1024 ++#define MAX_NICE 20 ++#define MIN_NICE -19 + + #define container_of(ptr, type, member)({ \ + const typeof(((type *)0)->member) *__mptr = (ptr); \ |