summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2020-01-27 09:25:03 -0500
committerMike Pagano <mpagano@gentoo.org>2020-01-27 09:25:03 -0500
commit6fac29ab3b07663ad8abad37e43b857ad95a2261 (patch)
tree127190d01df36ef7e0f2cff5f2d105d2fb4f4ba0
parentLinux patch 4.19.98 (diff)
downloadlinux-patches-4.19-98.tar.gz
linux-patches-4.19-98.tar.bz2
linux-patches-4.19-98.zip
Linux patch 4.19.994.19-98
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1098_linux-4.19.99.patch20747
2 files changed, 20751 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index b2dcc203..cae34388 100644
--- a/0000_README
+++ b/0000_README
@@ -431,6 +431,10 @@ Patch: 1097_linux-4.19.98.patch
From: https://www.kernel.org
Desc: Linux 4.19.98
+Patch: 1098_linux-4.19.99.patch
+From: https://www.kernel.org
+Desc: Linux 4.19.99
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1098_linux-4.19.99.patch b/1098_linux-4.19.99.patch
new file mode 100644
index 00000000..4c7a117a
--- /dev/null
+++ b/1098_linux-4.19.99.patch
@@ -0,0 +1,20747 @@
+diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
+index a5b4f223641d..8127a08e366d 100644
+--- a/Documentation/ABI/testing/sysfs-bus-iio
++++ b/Documentation/ABI/testing/sysfs-bus-iio
+@@ -199,7 +199,7 @@ Description:
+
+ What: /sys/bus/iio/devices/iio:deviceX/in_positionrelative_x_raw
+ What: /sys/bus/iio/devices/iio:deviceX/in_positionrelative_y_raw
+-KernelVersion: 4.18
++KernelVersion: 4.19
+ Contact: linux-iio@vger.kernel.org
+ Description:
+ Relative position in direction x or y on a pad (may be
+diff --git a/Documentation/devicetree/bindings/bus/ti-sysc.txt b/Documentation/devicetree/bindings/bus/ti-sysc.txt
+index 91dc2333af01..85a23f551f02 100644
+--- a/Documentation/devicetree/bindings/bus/ti-sysc.txt
++++ b/Documentation/devicetree/bindings/bus/ti-sysc.txt
+@@ -35,6 +35,7 @@ Required standard properties:
+ "ti,sysc-omap3-sham"
+ "ti,sysc-omap-aes"
+ "ti,sysc-mcasp"
++ "ti,sysc-dra7-mcasp"
+ "ti,sysc-usb-host-fs"
+ "ti,sysc-dra7-mcan"
+
+diff --git a/Documentation/devicetree/bindings/rng/omap3_rom_rng.txt b/Documentation/devicetree/bindings/rng/omap3_rom_rng.txt
+new file mode 100644
+index 000000000000..f315c9723bd2
+--- /dev/null
++++ b/Documentation/devicetree/bindings/rng/omap3_rom_rng.txt
+@@ -0,0 +1,27 @@
++OMAP ROM RNG driver binding
++
++Secure SoCs may provide RNG via secure ROM calls like Nokia N900 does. The
++implementation can depend on the SoC secure ROM used.
++
++- compatible:
++ Usage: required
++ Value type: <string>
++ Definition: must be "nokia,n900-rom-rng"
++
++- clocks:
++ Usage: required
++ Value type: <prop-encoded-array>
++ Definition: reference to the the RNG interface clock
++
++- clock-names:
++ Usage: required
++ Value type: <stringlist>
++ Definition: must be "ick"
++
++Example:
++
++ rom_rng: rng {
++ compatible = "nokia,n900-rom-rng";
++ clocks = <&rng_ick>;
++ clock-names = "ick";
++ };
+diff --git a/Makefile b/Makefile
+index 48dbafb790ff..a2be0c79eeb8 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 19
+-SUBLEVEL = 98
++SUBLEVEL = 99
+ EXTRAVERSION =
+ NAME = "People's Front"
+
+@@ -964,6 +964,7 @@ ifdef CONFIG_STACK_VALIDATION
+ endif
+ endif
+
++PHONY += prepare0
+
+ ifeq ($(KBUILD_EXTMOD),)
+ core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
+@@ -1072,8 +1073,7 @@ scripts: scripts_basic asm-generic gcc-plugins $(autoksyms_h)
+ # archprepare is used in arch Makefiles and when processed asm symlink,
+ # version.h and scripts_basic is processed / created.
+
+-# Listed in dependency order
+-PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
++PHONY += prepare archprepare prepare1 prepare2 prepare3
+
+ # prepare3 is used to check if we are building in a separate output directory,
+ # and if so do:
+diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
+index d107459fc0f8..f2e1015d75ab 100644
+--- a/arch/arm/boot/dts/aspeed-g5.dtsi
++++ b/arch/arm/boot/dts/aspeed-g5.dtsi
+@@ -247,7 +247,7 @@
+ compatible = "aspeed,ast2500-gpio";
+ reg = <0x1e780000 0x1000>;
+ interrupts = <20>;
+- gpio-ranges = <&pinctrl 0 0 220>;
++ gpio-ranges = <&pinctrl 0 0 232>;
+ clocks = <&syscon ASPEED_CLK_APB>;
+ interrupt-controller;
+ };
+diff --git a/arch/arm/boot/dts/at91-nattis-2-natte-2.dts b/arch/arm/boot/dts/at91-nattis-2-natte-2.dts
+index af9f38456d04..4308a07b792e 100644
+--- a/arch/arm/boot/dts/at91-nattis-2-natte-2.dts
++++ b/arch/arm/boot/dts/at91-nattis-2-natte-2.dts
+@@ -38,14 +38,16 @@
+ atmel,pins =
+ <AT91_PIOA 21
+ AT91_PERIPH_GPIO
+- AT91_PINCTRL_OUTPUT_VAL(0)>;
++ (AT91_PINCTRL_OUTPUT |
++ AT91_PINCTRL_OUTPUT_VAL(0))>;
+ };
+
+ pinctrl_lcd_hipow0: lcd_hipow0 {
+ atmel,pins =
+ <AT91_PIOA 23
+ AT91_PERIPH_GPIO
+- AT91_PINCTRL_OUTPUT_VAL(0)>;
++ (AT91_PINCTRL_OUTPUT |
++ AT91_PINCTRL_OUTPUT_VAL(0))>;
+ };
+ };
+ };
+@@ -219,6 +221,7 @@
+ reg = <0>;
+ bus-width = <4>;
+ cd-gpios = <&pioD 5 GPIO_ACTIVE_HIGH>;
++ cd-inverted;
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi
+index cb2d6d78a7fb..c481eab1bd7c 100644
+--- a/arch/arm/boot/dts/bcm2835-rpi.dtsi
++++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi
+@@ -32,7 +32,7 @@
+
+ mailbox@7e00b840 {
+ compatible = "brcm,bcm2835-vchiq";
+- reg = <0x7e00b840 0xf>;
++ reg = <0x7e00b840 0x3c>;
+ interrupts = <0 2>;
+ };
+ };
+diff --git a/arch/arm/boot/dts/iwg20d-q7-common.dtsi b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
+index 5cae74eb6cdd..a2c9a1e88c1a 100644
+--- a/arch/arm/boot/dts/iwg20d-q7-common.dtsi
++++ b/arch/arm/boot/dts/iwg20d-q7-common.dtsi
+@@ -87,7 +87,7 @@
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+
+- gpios = <&gpio2 30 GPIO_ACTIVE_LOW>;
++ gpios = <&gpio2 30 GPIO_ACTIVE_HIGH>;
+ gpios-states = <1>;
+ states = <3300000 1
+ 1800000 0>;
+diff --git a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
+index 4990ed90dcea..3e39b9a1f35d 100644
+--- a/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
++++ b/arch/arm/boot/dts/logicpd-som-lv-baseboard.dtsi
+@@ -153,7 +153,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+ wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
+- cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>; /* gpio_110 */
++ cd-gpios = <&gpio4 14 GPIO_ACTIVE_LOW>; /* gpio_110 */
+ vmmc-supply = <&vmmc1>;
+ bus-width = <4>;
+ cap-power-off-card;
+diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
+index 98b682a8080c..c5d54c4d3747 100644
+--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
++++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
+@@ -232,6 +232,20 @@
+ >;
+ };
+
++ i2c2_pins: pinmux_i2c2_pins {
++ pinctrl-single,pins = <
++ OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT | MUX_MODE0) /* i2c2_scl */
++ OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT | MUX_MODE0) /* i2c2_sda */
++ >;
++ };
++
++ i2c3_pins: pinmux_i2c3_pins {
++ pinctrl-single,pins = <
++ OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT | MUX_MODE0) /* i2c3_scl */
++ OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT | MUX_MODE0) /* i2c3_sda */
++ >;
++ };
++
+ tsc2004_pins: pinmux_tsc2004_pins {
+ pinctrl-single,pins = <
+ OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE4) /* mcbsp4_dr.gpio_153 */
+@@ -253,18 +267,6 @@
+ OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */
+ >;
+ };
+- i2c2_pins: pinmux_i2c2_pins {
+- pinctrl-single,pins = <
+- OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT | MUX_MODE0) /* i2c2_scl */
+- OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT | MUX_MODE0) /* i2c2_sda */
+- >;
+- };
+- i2c3_pins: pinmux_i2c3_pins {
+- pinctrl-single,pins = <
+- OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT | MUX_MODE0) /* i2c3_scl */
+- OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT | MUX_MODE0) /* i2c3_sda */
+- >;
+- };
+ };
+
+ &omap3_pmx_core2 {
+diff --git a/arch/arm/boot/dts/lpc3250-phy3250.dts b/arch/arm/boot/dts/lpc3250-phy3250.dts
+index 1e1c2f517a82..ffcf78631b22 100644
+--- a/arch/arm/boot/dts/lpc3250-phy3250.dts
++++ b/arch/arm/boot/dts/lpc3250-phy3250.dts
+@@ -49,8 +49,8 @@
+ sd_reg: regulator@2 {
+ compatible = "regulator-fixed";
+ regulator-name = "sd_reg";
+- regulator-min-microvolt = <1800000>;
+- regulator-max-microvolt = <1800000>;
++ regulator-min-microvolt = <3300000>;
++ regulator-max-microvolt = <3300000>;
+ gpio = <&gpio 5 5 0>;
+ enable-active-high;
+ };
+diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
+index ed0d6fb20122..9ad3df11db0d 100644
+--- a/arch/arm/boot/dts/lpc32xx.dtsi
++++ b/arch/arm/boot/dts/lpc32xx.dtsi
+@@ -139,11 +139,11 @@
+ };
+
+ clcd: clcd@31040000 {
+- compatible = "arm,pl110", "arm,primecell";
++ compatible = "arm,pl111", "arm,primecell";
+ reg = <0x31040000 0x1000>;
+ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&clk LPC32XX_CLK_LCD>;
+- clock-names = "apb_pclk";
++ clocks = <&clk LPC32XX_CLK_LCD>, <&clk LPC32XX_CLK_LCD>;
++ clock-names = "clcdclk", "apb_pclk";
+ status = "disabled";
+ };
+
+@@ -462,7 +462,9 @@
+ key: key@40050000 {
+ compatible = "nxp,lpc3220-key";
+ reg = <0x40050000 0x1000>;
+- interrupts = <54 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&clk LPC32XX_CLK_KEY>;
++ interrupt-parent = <&sic1>;
++ interrupts = <22 IRQ_TYPE_LEVEL_HIGH>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/ls1021a-twr.dts b/arch/arm/boot/dts/ls1021a-twr.dts
+index f0c949d74833..ec5afad3efd8 100644
+--- a/arch/arm/boot/dts/ls1021a-twr.dts
++++ b/arch/arm/boot/dts/ls1021a-twr.dts
+@@ -143,7 +143,7 @@
+ };
+
+ &enet0 {
+- tbi-handle = <&tbi1>;
++ tbi-handle = <&tbi0>;
+ phy-handle = <&sgmii_phy2>;
+ phy-connection-type = "sgmii";
+ status = "okay";
+@@ -222,6 +222,13 @@
+ sgmii_phy2: ethernet-phy@2 {
+ reg = <0x2>;
+ };
++ tbi0: tbi-phy@1f {
++ reg = <0x1f>;
++ device_type = "tbi-phy";
++ };
++};
++
++&mdio1 {
+ tbi1: tbi-phy@1f {
+ reg = <0x1f>;
+ device_type = "tbi-phy";
+diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
+index f18490548c78..7e22309bccac 100644
+--- a/arch/arm/boot/dts/ls1021a.dtsi
++++ b/arch/arm/boot/dts/ls1021a.dtsi
+@@ -584,7 +584,7 @@
+ };
+
+ mdio0: mdio@2d24000 {
+- compatible = "gianfar";
++ compatible = "fsl,etsec2-mdio";
+ device_type = "mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -592,6 +592,15 @@
+ <0x0 0x2d10030 0x0 0x4>;
+ };
+
++ mdio1: mdio@2d64000 {
++ compatible = "fsl,etsec2-mdio";
++ device_type = "mdio";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ reg = <0x0 0x2d64000 0x0 0x4000>,
++ <0x0 0x2d50030 0x0 0x4>;
++ };
++
+ ptp_clock@2d10e00 {
+ compatible = "fsl,etsec-ptp";
+ reg = <0x0 0x2d10e00 0x0 0xb0>;
+diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
+index 182a53991c90..37785e7d1238 100644
+--- a/arch/arm/boot/dts/omap3-n900.dts
++++ b/arch/arm/boot/dts/omap3-n900.dts
+@@ -158,6 +158,12 @@
+ pwms = <&pwm9 0 26316 0>; /* 38000 Hz */
+ };
+
++ rom_rng: rng {
++ compatible = "nokia,n900-rom-rng";
++ clocks = <&rng_ick>;
++ clock-names = "ick";
++ };
++
+ /* controlled (enabled/disabled) directly by bcm2048 and wl1251 */
+ vctcxo: vctcxo {
+ compatible = "fixed-clock";
+diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi
+index 24715f74ae08..5015e2273d82 100644
+--- a/arch/arm/boot/dts/r8a7743.dtsi
++++ b/arch/arm/boot/dts/r8a7743.dtsi
+@@ -565,9 +565,7 @@
+ /* doesn't need pinmux */
+ #address-cells = <1>;
+ #size-cells = <0>;
+- compatible = "renesas,iic-r8a7743",
+- "renesas,rcar-gen2-iic",
+- "renesas,rmobile-iic";
++ compatible = "renesas,iic-r8a7743";
+ reg = <0 0xe60b0000 0 0x425>;
+ interrupts = <GIC_SPI 173 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 926>;
+diff --git a/arch/arm/boot/dts/stm32h743i-eval.dts b/arch/arm/boot/dts/stm32h743i-eval.dts
+index 3f8e0c4a998d..5bf64e63cdf3 100644
+--- a/arch/arm/boot/dts/stm32h743i-eval.dts
++++ b/arch/arm/boot/dts/stm32h743i-eval.dts
+@@ -79,6 +79,7 @@
+ };
+
+ &adc_12 {
++ vdda-supply = <&vdda>;
+ vref-supply = <&vdda>;
+ status = "okay";
+ adc1: adc@0 {
+diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+index c16ffcc4db7d..1efad1a6bcfd 100644
+--- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi
++++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi
+@@ -155,6 +155,21 @@
+ #dma-cells = <1>;
+ };
+
++ nfc: nand@1c03000 {
++ compatible = "allwinner,sun4i-a10-nand";
++ reg = <0x01c03000 0x1000>;
++ interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
++ clocks = <&ccu CLK_BUS_NAND>, <&ccu CLK_NAND>;
++ clock-names = "ahb", "mod";
++ resets = <&ccu RST_BUS_NAND>;
++ reset-names = "ahb";
++ pinctrl-names = "default";
++ pinctrl-0 = <&nand_pins &nand_pins_cs0 &nand_pins_rb0>;
++ status = "disabled";
++ #address-cells = <1>;
++ #size-cells = <0>;
++ };
++
+ mmc0: mmc@1c0f000 {
+ compatible = "allwinner,sun7i-a20-mmc";
+ reg = <0x01c0f000 0x1000>;
+@@ -212,21 +227,6 @@
+ #size-cells = <0>;
+ };
+
+- nfc: nand@1c03000 {
+- compatible = "allwinner,sun4i-a10-nand";
+- reg = <0x01c03000 0x1000>;
+- interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&ccu CLK_BUS_NAND>, <&ccu CLK_NAND>;
+- clock-names = "ahb", "mod";
+- resets = <&ccu RST_BUS_NAND>;
+- reset-names = "ahb";
+- pinctrl-names = "default";
+- pinctrl-0 = <&nand_pins &nand_pins_cs0 &nand_pins_rb0>;
+- status = "disabled";
+- #address-cells = <1>;
+- #size-cells = <0>;
+- };
+-
+ usb_otg: usb@1c19000 {
+ /* compatible gets set in SoC specific dtsi file */
+ reg = <0x01c19000 0x0400>;
+diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
+index 25540b7694d5..6523d81dd9c4 100644
+--- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
++++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
+@@ -90,6 +90,8 @@
+ wifi_pwrseq: wifi_pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ reset-gpios = <&r_pio 0 7 GPIO_ACTIVE_LOW>; /* PL7 */
++ clocks = <&rtc 1>;
++ clock-names = "ext_clock";
+ };
+
+ sound_spdif {
+@@ -155,6 +157,8 @@
+
+ &mmc1 {
+ vmmc-supply = <&reg_vcc3v3>;
++ vqmmc-supply = <&reg_vcc3v3>;
++ mmc-pwrseq = <&wifi_pwrseq>;
+ bus-width = <4>;
+ non-removable;
+ status = "okay";
+diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts
+index 58a199b0e494..d1e58a6a4343 100644
+--- a/arch/arm/boot/dts/sun9i-a80-optimus.dts
++++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts
+@@ -82,7 +82,7 @@
+
+ reg_usb1_vbus: usb1-vbus {
+ compatible = "regulator-fixed";
+- pinctrl-names = "default";
++ regulator-name = "usb1-vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+@@ -91,7 +91,7 @@
+
+ reg_usb3_vbus: usb3-vbus {
+ compatible = "regulator-fixed";
+- pinctrl-names = "default";
++ regulator-name = "usb3-vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ enable-active-high;
+diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
+index ad574d20415c..1b1b82b37ce0 100644
+--- a/arch/arm/common/mcpm_entry.c
++++ b/arch/arm/common/mcpm_entry.c
+@@ -381,7 +381,7 @@ static int __init nocache_trampoline(unsigned long _arg)
+ unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+ phys_reset_t phys_reset;
+
+- mcpm_set_entry_vector(cpu, cluster, cpu_resume);
++ mcpm_set_entry_vector(cpu, cluster, cpu_resume_no_hyp);
+ setup_mm_for_reboot();
+
+ __mcpm_cpu_going_down(cpu, cluster);
+diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
+index 6aa7046fb91f..bd6440f23493 100644
+--- a/arch/arm/configs/qcom_defconfig
++++ b/arch/arm/configs/qcom_defconfig
+@@ -207,6 +207,7 @@ CONFIG_MSM_MMCC_8974=y
+ CONFIG_MSM_IOMMU=y
+ CONFIG_HWSPINLOCK=y
+ CONFIG_HWSPINLOCK_QCOM=y
++CONFIG_MAILBOX=y
+ CONFIG_REMOTEPROC=y
+ CONFIG_QCOM_ADSP_PIL=y
+ CONFIG_QCOM_Q6V5_PIL=y
+diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
+index 452bbdcbcc83..506314265c6f 100644
+--- a/arch/arm/include/asm/suspend.h
++++ b/arch/arm/include/asm/suspend.h
+@@ -10,6 +10,7 @@ struct sleep_save_sp {
+ };
+
+ extern void cpu_resume(void);
++extern void cpu_resume_no_hyp(void);
+ extern void cpu_resume_arm(void);
+ extern int cpu_suspend(unsigned long, int (*)(unsigned long));
+
+diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
+index 326a97aa3ea0..22efcf48604c 100644
+--- a/arch/arm/kernel/head-nommu.S
++++ b/arch/arm/kernel/head-nommu.S
+@@ -441,8 +441,8 @@ M_CLASS(str r6, [r12, #PMSAv8_RLAR_A(3)])
+ str r5, [r12, #PMSAv8_RBAR_A(0)]
+ str r6, [r12, #PMSAv8_RLAR_A(0)]
+ #else
+- mcr p15, 0, r5, c6, c10, 1 @ PRBAR4
+- mcr p15, 0, r6, c6, c10, 2 @ PRLAR4
++ mcr p15, 0, r5, c6, c10, 0 @ PRBAR4
++ mcr p15, 0, r6, c6, c10, 1 @ PRLAR4
+ #endif
+ #endif
+ ret lr
+diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
+index 60146e32619a..82a942894fc0 100644
+--- a/arch/arm/kernel/hyp-stub.S
++++ b/arch/arm/kernel/hyp-stub.S
+@@ -180,8 +180,8 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
+ @ Check whether GICv3 system registers are available
+ mrc p15, 0, r7, c0, c1, 1 @ ID_PFR1
+ ubfx r7, r7, #28, #4
+- cmp r7, #1
+- bne 2f
++ teq r7, #0
++ beq 2f
+
+ @ Enable system register accesses
+ mrc p15, 4, r7, c12, c9, 5 @ ICC_HSRE
+diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
+index a8257fc9cf2a..5dc8b80bb693 100644
+--- a/arch/arm/kernel/sleep.S
++++ b/arch/arm/kernel/sleep.S
+@@ -120,6 +120,14 @@ ENDPROC(cpu_resume_after_mmu)
+ .text
+ .align
+
++#ifdef CONFIG_MCPM
++ .arm
++THUMB( .thumb )
++ENTRY(cpu_resume_no_hyp)
++ARM_BE8(setend be) @ ensure we are in BE mode
++ b no_hyp
++#endif
++
+ #ifdef CONFIG_MMU
+ .arm
+ ENTRY(cpu_resume_arm)
+@@ -135,6 +143,7 @@ ARM_BE8(setend be) @ ensure we are in BE mode
+ bl __hyp_stub_install_secondary
+ #endif
+ safe_svcmode_maskall r1
++no_hyp:
+ mov r1, #0
+ ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
+ ALT_UP_B(1f)
+@@ -163,6 +172,9 @@ ENDPROC(cpu_resume)
+
+ #ifdef CONFIG_MMU
+ ENDPROC(cpu_resume_arm)
++#endif
++#ifdef CONFIG_MCPM
++ENDPROC(cpu_resume_no_hyp)
+ #endif
+
+ .align 2
+diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
+index f4dd7f9663c1..e8cda5e02b4e 100644
+--- a/arch/arm/kernel/vdso.c
++++ b/arch/arm/kernel/vdso.c
+@@ -205,7 +205,6 @@ static int __init vdso_init(void)
+ }
+
+ text_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
+- pr_debug("vdso: %i text pages at base %p\n", text_pages, vdso_start);
+
+ /* Allocate the VDSO text pagelist */
+ vdso_text_pagelist = kcalloc(text_pages, sizeof(struct page *),
+diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
+index ec3789ba17b8..a8269f0a87ce 100644
+--- a/arch/arm/mach-omap2/omap_hwmod.c
++++ b/arch/arm/mach-omap2/omap_hwmod.c
+@@ -2430,7 +2430,7 @@ static void _setup_iclk_autoidle(struct omap_hwmod *oh)
+ */
+ static int _setup_reset(struct omap_hwmod *oh)
+ {
+- int r;
++ int r = 0;
+
+ if (oh->_state != _HWMOD_STATE_INITIALIZED)
+ return -EINVAL;
+diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
+index dae726228770..b57faa2310a2 100644
+--- a/arch/arm/mach-omap2/pdata-quirks.c
++++ b/arch/arm/mach-omap2/pdata-quirks.c
+@@ -263,14 +263,6 @@ static void __init am3517_evm_legacy_init(void)
+ am35xx_emac_reset();
+ }
+
+-static struct platform_device omap3_rom_rng_device = {
+- .name = "omap3-rom-rng",
+- .id = -1,
+- .dev = {
+- .platform_data = rx51_secure_rng_call,
+- },
+-};
+-
+ static void __init nokia_n900_legacy_init(void)
+ {
+ hsmmc2_internal_input_clk();
+@@ -286,9 +278,6 @@ static void __init nokia_n900_legacy_init(void)
+ pr_warn("RX-51: Not enabling ARM errata 430973 workaround\n");
+ pr_warn("Thumb binaries may crash randomly without this workaround\n");
+ }
+-
+- pr_info("RX-51: Registering OMAP3 HWRNG device\n");
+- platform_device_register(&omap3_rom_rng_device);
+ }
+ }
+
+@@ -466,6 +455,7 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = {
+ OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL),
+ OF_DEV_AUXDATA("ti,am3517-emac", 0x5c000000, "davinci_emac.0",
+ &am35xx_emac_pdata),
++ OF_DEV_AUXDATA("nokia,n900-rom-rng", 0, NULL, rx51_secure_rng_call),
+ /* McBSP modules with sidetone core */
+ #if IS_ENABLED(CONFIG_SND_OMAP_SOC_MCBSP)
+ OF_DEV_AUXDATA("ti,omap3-mcbsp", 0x49022000, "49022000.mcbsp", &mcbsp_pdata),
+diff --git a/arch/arm/mach-rpc/irq.c b/arch/arm/mach-rpc/irq.c
+index b8a61cb11207..7f0f40178634 100644
+--- a/arch/arm/mach-rpc/irq.c
++++ b/arch/arm/mach-rpc/irq.c
+@@ -118,7 +118,7 @@ extern unsigned char rpc_default_fiq_start, rpc_default_fiq_end;
+
+ void __init rpc_init_irq(void)
+ {
+- unsigned int irq, clr, set = 0;
++ unsigned int irq, clr, set;
+
+ iomd_writeb(0, IOMD_IRQMASKA);
+ iomd_writeb(0, IOMD_IRQMASKB);
+@@ -130,6 +130,7 @@ void __init rpc_init_irq(void)
+
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ clr = IRQ_NOREQUEST;
++ set = 0;
+
+ if (irq <= 6 || (irq >= 9 && irq <= 15))
+ clr |= IRQ_NOPROBE;
+diff --git a/arch/arm/mach-stm32/Kconfig b/arch/arm/mach-stm32/Kconfig
+index 713c068b953f..adca4368d67c 100644
+--- a/arch/arm/mach-stm32/Kconfig
++++ b/arch/arm/mach-stm32/Kconfig
+@@ -1,5 +1,6 @@
+ menuconfig ARCH_STM32
+- bool "STMicroelectronics STM32 family" if ARM_SINGLE_ARMV7M || ARCH_MULTI_V7
++ bool "STMicroelectronics STM32 family"
++ depends on ARM_SINGLE_ARMV7M || ARCH_MULTI_V7
+ select ARMV7M_SYSTICK if ARM_SINGLE_ARMV7M
+ select HAVE_ARM_ARCH_TIMER if ARCH_MULTI_V7
+ select ARM_GIC if ARCH_MULTI_V7
+diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
+index f51919974183..bf25f780c1c9 100644
+--- a/arch/arm/plat-pxa/ssp.c
++++ b/arch/arm/plat-pxa/ssp.c
+@@ -183,18 +183,12 @@ static int pxa_ssp_probe(struct platform_device *pdev)
+
+ static int pxa_ssp_remove(struct platform_device *pdev)
+ {
+- struct resource *res;
+ struct ssp_device *ssp;
+
+ ssp = platform_get_drvdata(pdev);
+ if (ssp == NULL)
+ return -ENODEV;
+
+- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- release_mem_region(res->start, resource_size(res));
+-
+- clk_put(ssp->clk);
+-
+ mutex_lock(&ssp_lock);
+ list_del(&ssp->node);
+ mutex_unlock(&ssp_lock);
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+index d3daf90a8715..7abc4ea30541 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+@@ -366,7 +366,8 @@
+ interrupts = <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
+- clocks = <&ccu 58>;
++ clocks = <&ccu 58>, <&osc24M>, <&rtc 0>;
++ clock-names = "apb", "hosc", "losc";
+ gpio-controller;
+ #gpio-cells = <3>;
+ interrupt-controller;
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
+index 48daec7f78ba..6c3a47d90c79 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6-pine-h64.dts
+@@ -176,6 +176,8 @@
+ pcf8563: rtc@51 {
+ compatible = "nxp,pcf8563";
+ reg = <0x51>;
++ interrupt-parent = <&r_intc>;
++ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ #clock-cells = <0>;
+ };
+ };
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+index cfa5fffcf62b..72813e7aefb8 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+@@ -101,17 +101,6 @@
+ #reset-cells = <1>;
+ };
+
+- gic: interrupt-controller@3021000 {
+- compatible = "arm,gic-400";
+- reg = <0x03021000 0x1000>,
+- <0x03022000 0x2000>,
+- <0x03024000 0x2000>,
+- <0x03026000 0x2000>;
+- interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
+- interrupt-controller;
+- #interrupt-cells = <3>;
+- };
+-
+ pio: pinctrl@300b000 {
+ compatible = "allwinner,sun50i-h6-pinctrl";
+ reg = <0x0300b000 0x400>;
+@@ -149,6 +138,17 @@
+ };
+ };
+
++ gic: interrupt-controller@3021000 {
++ compatible = "arm,gic-400";
++ reg = <0x03021000 0x1000>,
++ <0x03022000 0x2000>,
++ <0x03024000 0x2000>,
++ <0x03026000 0x2000>;
++ interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
++ interrupt-controller;
++ #interrupt-cells = <3>;
++ };
++
+ mmc0: mmc@4020000 {
+ compatible = "allwinner,sun50i-h6-mmc",
+ "allwinner,sun50i-a64-mmc";
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+index 765247bc4f24..e14e0ce7e89f 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi
+@@ -125,6 +125,7 @@
+ status = "okay";
+ pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+ pinctrl-names = "default";
++ hdmi-supply = <&hdmi_5v>;
+ };
+
+ &hdmi_tx_tmds_port {
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
+index 9d858eb193ca..062e12aa4677 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-khadas-vim.dts
+@@ -76,6 +76,7 @@
+ status = "okay";
+ pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+ pinctrl-names = "default";
++ hdmi-supply = <&hdmi_5v>;
+ };
+
+ &hdmi_tx_tmds_port {
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+index b4dfb9afdef8..daad007fac1f 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+@@ -155,6 +155,7 @@
+ status = "okay";
+ pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+ pinctrl-names = "default";
++ hdmi-supply = <&hdmi_5v>;
+ };
+
+ &hdmi_tx_tmds_port {
+@@ -255,7 +256,6 @@
+ cap-mmc-highspeed;
+ mmc-ddr-3_3v;
+ max-frequency = <50000000>;
+- non-removable;
+ disable-wp;
+
+ mmc-pwrseq = <&emmc_pwrseq>;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts
+index 5896e8a5d86b..2602940c2077 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dts
+@@ -51,6 +51,7 @@
+ status = "okay";
+ pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+ pinctrl-names = "default";
++ hdmi-supply = <&hdmi_5v>;
+ };
+
+ &hdmi_tx_tmds_port {
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+index 313f88f8759e..bdf7c6c5983c 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+@@ -18,7 +18,6 @@
+
+ aliases {
+ serial0 = &uart_AO;
+- serial1 = &uart_A;
+ serial2 = &uart_AO_B;
+ };
+
+@@ -63,11 +62,9 @@
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+- #address-cells = <1>;
+- #size-cells = <0>;
+ poll-interval = <100>;
+
+- button@0 {
++ power-button {
+ label = "power";
+ linux,code = <KEY_POWER>;
+ gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_LOW>;
+@@ -271,6 +268,7 @@
+ status = "okay";
+ pinctrl-0 = <&hdmi_hpd_pins>, <&hdmi_i2c_pins>;
+ pinctrl-names = "default";
++ hdmi-supply = <&hdmi_5v>;
+ };
+
+ &hdmi_tx_tmds_port {
+@@ -408,8 +406,17 @@
+ /* This one is connected to the Bluetooth module */
+ &uart_A {
+ status = "okay";
+- pinctrl-0 = <&uart_a_pins>;
++ pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
+ pinctrl-names = "default";
++ uart-has-rtscts;
++
++ bluetooth {
++ compatible = "brcm,bcm43438-bt";
++ shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
++ max-speed = <2000000>;
++ clocks = <&wifi32k>;
++ clock-names = "lpo";
++ };
+ };
+
+ /* This is brought out on the Linux_RX (18) and Linux_TX (19) pins: */
+diff --git a/arch/arm64/boot/dts/arm/juno-clocks.dtsi b/arch/arm64/boot/dts/arm/juno-clocks.dtsi
+index e5e265dfa902..2870b5eeb198 100644
+--- a/arch/arm64/boot/dts/arm/juno-clocks.dtsi
++++ b/arch/arm64/boot/dts/arm/juno-clocks.dtsi
+@@ -8,10 +8,10 @@
+ */
+ / {
+ /* SoC fixed clocks */
+- soc_uartclk: refclk7273800hz {
++ soc_uartclk: refclk7372800hz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+- clock-frequency = <7273800>;
++ clock-frequency = <7372800>;
+ clock-output-names = "juno:uartclk";
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+index 78ce3979ef09..f38b815e696d 100644
+--- a/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
++++ b/arch/arm64/boot/dts/qcom/apq8016-sbc.dtsi
+@@ -630,6 +630,8 @@
+ l11 {
+ regulator-min-microvolt = <1750000>;
+ regulator-max-microvolt = <3337000>;
++ regulator-allow-set-load;
++ regulator-system-load = <200000>;
+ };
+
+ l12 {
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 7b32b8990d62..8011e564a234 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -114,7 +114,7 @@
+ next-level-cache = <&L2_0>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SPC>;
+- clocks = <&apcs 0>;
++ clocks = <&apcs>;
+ operating-points-v2 = <&cpu_opp_table>;
+ #cooling-cells = <2>;
+ };
+@@ -126,7 +126,7 @@
+ next-level-cache = <&L2_0>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SPC>;
+- clocks = <&apcs 0>;
++ clocks = <&apcs>;
+ operating-points-v2 = <&cpu_opp_table>;
+ #cooling-cells = <2>;
+ };
+@@ -138,7 +138,7 @@
+ next-level-cache = <&L2_0>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SPC>;
+- clocks = <&apcs 0>;
++ clocks = <&apcs>;
+ operating-points-v2 = <&cpu_opp_table>;
+ #cooling-cells = <2>;
+ };
+@@ -150,7 +150,7 @@
+ next-level-cache = <&L2_0>;
+ enable-method = "psci";
+ cpu-idle-states = <&CPU_SPC>;
+- clocks = <&apcs 0>;
++ clocks = <&apcs>;
+ operating-points-v2 = <&cpu_opp_table>;
+ #cooling-cells = <2>;
+ };
+diff --git a/arch/arm64/boot/dts/renesas/r8a7795-es1.dtsi b/arch/arm64/boot/dts/renesas/r8a7795-es1.dtsi
+index 7b2fbaec9aef..3dc61b7e1d08 100644
+--- a/arch/arm64/boot/dts/renesas/r8a7795-es1.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a7795-es1.dtsi
+@@ -28,6 +28,7 @@
+ compatible = "renesas,ipmmu-r8a7795";
+ reg = <0 0xec680000 0 0x1000>;
+ renesas,ipmmu-main = <&ipmmu_mm 5>;
++ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ #iommu-cells = <1>;
+ };
+
+@@ -35,6 +36,7 @@
+ compatible = "renesas,ipmmu-r8a7795";
+ reg = <0 0xe7730000 0 0x1000>;
+ renesas,ipmmu-main = <&ipmmu_mm 8>;
++ power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
+ #iommu-cells = <1>;
+ };
+
+diff --git a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
+index 2bc3a4884b00..470c2a35a5af 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
++++ b/arch/arm64/boot/dts/renesas/r8a77990-ebisu.dts
+@@ -33,7 +33,6 @@
+ &avb {
+ pinctrl-0 = <&avb_pins>;
+ pinctrl-names = "default";
+- renesas,no-ether-link;
+ phy-handle = <&phy0>;
+ phy-mode = "rgmii-txid";
+ status = "okay";
+diff --git a/arch/arm64/boot/dts/renesas/r8a77995.dtsi b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
+index fe77bc43c447..fb3ecb2c385d 100644
+--- a/arch/arm64/boot/dts/renesas/r8a77995.dtsi
++++ b/arch/arm64/boot/dts/renesas/r8a77995.dtsi
+@@ -938,7 +938,7 @@
+
+ du: display@feb00000 {
+ compatible = "renesas,du-r8a77995";
+- reg = <0 0xfeb00000 0 0x80000>;
++ reg = <0 0xfeb00000 0 0x40000>;
+ interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&cpg CPG_MOD 724>,
+diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
+index db8d364f8476..1a4f8b67bbe8 100644
+--- a/arch/arm64/configs/defconfig
++++ b/arch/arm64/configs/defconfig
+@@ -365,6 +365,7 @@ CONFIG_THERMAL_EMULATION=y
+ CONFIG_ROCKCHIP_THERMAL=m
+ CONFIG_RCAR_GEN3_THERMAL=y
+ CONFIG_ARMADA_THERMAL=y
++CONFIG_BCM2835_THERMAL=m
+ CONFIG_BRCMSTB_THERMAL=m
+ CONFIG_EXYNOS_THERMAL=y
+ CONFIG_TEGRA_BPMP_THERMAL=m
+diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
+index 9859e1178e6b..dbeeeffdb9c9 100644
+--- a/arch/arm64/kernel/hibernate.c
++++ b/arch/arm64/kernel/hibernate.c
+@@ -202,6 +202,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
+ gfp_t mask)
+ {
+ int rc = 0;
++ pgd_t *trans_pgd;
+ pgd_t *pgdp;
+ pud_t *pudp;
+ pmd_t *pmdp;
+@@ -216,7 +217,13 @@ static int create_safe_exec_page(void *src_start, size_t length,
+ memcpy((void *)dst, src_start, length);
+ __flush_icache_range(dst, dst + length);
+
+- pgdp = pgd_offset_raw(allocator(mask), dst_addr);
++ trans_pgd = allocator(mask);
++ if (!trans_pgd) {
++ rc = -ENOMEM;
++ goto out;
++ }
++
++ pgdp = pgd_offset_raw(trans_pgd, dst_addr);
+ if (pgd_none(READ_ONCE(*pgdp))) {
+ pudp = allocator(mask);
+ if (!pudp) {
+diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
+index ec0bb588d755..42b7082029e1 100644
+--- a/arch/arm64/kernel/vdso.c
++++ b/arch/arm64/kernel/vdso.c
+@@ -146,8 +146,6 @@ static int __init vdso_init(void)
+ }
+
+ vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
+- pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
+- vdso_pages + 1, vdso_pages, vdso_start, 1L, vdso_data);
+
+ /* Allocate the vDSO pagelist, plus a page for the data. */
+ vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
+diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
+index d1234a5ba4c5..9a960829a01d 100644
+--- a/arch/ia64/kernel/signal.c
++++ b/arch/ia64/kernel/signal.c
+@@ -110,7 +110,6 @@ ia64_rt_sigreturn (struct sigscratch *scr)
+ {
+ extern char ia64_strace_leave_kernel, ia64_leave_kernel;
+ struct sigcontext __user *sc;
+- struct siginfo si;
+ sigset_t set;
+ long retval;
+
+@@ -153,14 +152,7 @@ ia64_rt_sigreturn (struct sigscratch *scr)
+ return retval;
+
+ give_sigsegv:
+- clear_siginfo(&si);
+- si.si_signo = SIGSEGV;
+- si.si_errno = 0;
+- si.si_code = SI_KERNEL;
+- si.si_pid = task_pid_vnr(current);
+- si.si_uid = from_kuid_munged(current_user_ns(), current_uid());
+- si.si_addr = sc;
+- force_sig_info(SIGSEGV, &si, current);
++ force_sig(SIGSEGV, current);
+ return retval;
+ }
+
+@@ -231,37 +223,6 @@ rbs_on_sig_stack (unsigned long bsp)
+ return (bsp - current->sas_ss_sp < current->sas_ss_size);
+ }
+
+-static long
+-force_sigsegv_info (int sig, void __user *addr)
+-{
+- unsigned long flags;
+- struct siginfo si;
+-
+- clear_siginfo(&si);
+- if (sig == SIGSEGV) {
+- /*
+- * Acquiring siglock around the sa_handler-update is almost
+- * certainly overkill, but this isn't a
+- * performance-critical path and I'd rather play it safe
+- * here than having to debug a nasty race if and when
+- * something changes in kernel/signal.c that would make it
+- * no longer safe to modify sa_handler without holding the
+- * lock.
+- */
+- spin_lock_irqsave(&current->sighand->siglock, flags);
+- current->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
+- spin_unlock_irqrestore(&current->sighand->siglock, flags);
+- }
+- si.si_signo = SIGSEGV;
+- si.si_errno = 0;
+- si.si_code = SI_KERNEL;
+- si.si_pid = task_pid_vnr(current);
+- si.si_uid = from_kuid_munged(current_user_ns(), current_uid());
+- si.si_addr = addr;
+- force_sig_info(SIGSEGV, &si, current);
+- return 1;
+-}
+-
+ static long
+ setup_frame(struct ksignal *ksig, sigset_t *set, struct sigscratch *scr)
+ {
+@@ -295,15 +256,18 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct sigscratch *scr)
+ * instead so we will die with SIGSEGV.
+ */
+ check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN;
+- if (!likely(on_sig_stack(check_sp)))
+- return force_sigsegv_info(ksig->sig, (void __user *)
+- check_sp);
++ if (!likely(on_sig_stack(check_sp))) {
++ force_sigsegv(ksig->sig, current);
++ return 1;
++ }
+ }
+ }
+ frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN);
+
+- if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
+- return force_sigsegv_info(ksig->sig, frame);
++ if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) {
++ force_sigsegv(ksig->sig, current);
++ return 1;
++ }
+
+ err = __put_user(ksig->sig, &frame->arg0);
+ err |= __put_user(&frame->info, &frame->arg1);
+@@ -317,8 +281,10 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct sigscratch *scr)
+ err |= __save_altstack(&frame->sc.sc_stack, scr->pt.r12);
+ err |= setup_sigcontext(&frame->sc, set, scr);
+
+- if (unlikely(err))
+- return force_sigsegv_info(ksig->sig, frame);
++ if (unlikely(err)) {
++ force_sigsegv(ksig->sig, current);
++ return 1;
++ }
+
+ scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */
+ scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */
+diff --git a/arch/m68k/amiga/cia.c b/arch/m68k/amiga/cia.c
+index 2081b8cd5591..b9aee983e6f4 100644
+--- a/arch/m68k/amiga/cia.c
++++ b/arch/m68k/amiga/cia.c
+@@ -88,10 +88,19 @@ static irqreturn_t cia_handler(int irq, void *dev_id)
+ struct ciabase *base = dev_id;
+ int mach_irq;
+ unsigned char ints;
++ unsigned long flags;
+
++ /* Interrupts get disabled while the timer irq flag is cleared and
++ * the timer interrupt serviced.
++ */
+ mach_irq = base->cia_irq;
++ local_irq_save(flags);
+ ints = cia_set_irq(base, CIA_ICR_ALL);
+ amiga_custom.intreq = base->int_mask;
++ if (ints & 1)
++ generic_handle_irq(mach_irq);
++ local_irq_restore(flags);
++ mach_irq++, ints >>= 1;
+ for (; ints; mach_irq++, ints >>= 1) {
+ if (ints & 1)
+ generic_handle_irq(mach_irq);
+diff --git a/arch/m68k/atari/ataints.c b/arch/m68k/atari/ataints.c
+index 3d2b63bedf05..56f02ea2c248 100644
+--- a/arch/m68k/atari/ataints.c
++++ b/arch/m68k/atari/ataints.c
+@@ -142,7 +142,7 @@ struct mfptimerbase {
+ .name = "MFP Timer D"
+ };
+
+-static irqreturn_t mfptimer_handler(int irq, void *dev_id)
++static irqreturn_t mfp_timer_d_handler(int irq, void *dev_id)
+ {
+ struct mfptimerbase *base = dev_id;
+ int mach_irq;
+@@ -344,7 +344,7 @@ void __init atari_init_IRQ(void)
+ st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 0xf0) | 0x6;
+
+ /* request timer D dispatch handler */
+- if (request_irq(IRQ_MFP_TIMD, mfptimer_handler, IRQF_SHARED,
++ if (request_irq(IRQ_MFP_TIMD, mfp_timer_d_handler, IRQF_SHARED,
+ stmfp_base.name, &stmfp_base))
+ pr_err("Couldn't register %s interrupt\n", stmfp_base.name);
+
+diff --git a/arch/m68k/atari/time.c b/arch/m68k/atari/time.c
+index 9cca64286464..fafa20f75ab9 100644
+--- a/arch/m68k/atari/time.c
++++ b/arch/m68k/atari/time.c
+@@ -24,6 +24,18 @@
+ DEFINE_SPINLOCK(rtc_lock);
+ EXPORT_SYMBOL_GPL(rtc_lock);
+
++static irqreturn_t mfp_timer_c_handler(int irq, void *dev_id)
++{
++ irq_handler_t timer_routine = dev_id;
++ unsigned long flags;
++
++ local_irq_save(flags);
++ timer_routine(0, NULL);
++ local_irq_restore(flags);
++
++ return IRQ_HANDLED;
++}
++
+ void __init
+ atari_sched_init(irq_handler_t timer_routine)
+ {
+@@ -32,7 +44,8 @@ atari_sched_init(irq_handler_t timer_routine)
+ /* start timer C, div = 1:100 */
+ st_mfp.tim_ct_cd = (st_mfp.tim_ct_cd & 15) | 0x60;
+ /* install interrupt service routine for MFP Timer C */
+- if (request_irq(IRQ_MFP_TIMC, timer_routine, 0, "timer", timer_routine))
++ if (request_irq(IRQ_MFP_TIMC, mfp_timer_c_handler, 0, "timer",
++ timer_routine))
+ pr_err("Couldn't register timer interrupt\n");
+ }
+
+diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c
+index 143ee9fa3893..0e5efed4da86 100644
+--- a/arch/m68k/bvme6000/config.c
++++ b/arch/m68k/bvme6000/config.c
+@@ -44,11 +44,6 @@ extern int bvme6000_hwclk (int, struct rtc_time *);
+ extern void bvme6000_reset (void);
+ void bvme6000_set_vectors (void);
+
+-/* Save tick handler routine pointer, will point to xtime_update() in
+- * kernel/timer/timekeeping.c, called via bvme6000_process_int() */
+-
+-static irq_handler_t tick_handler;
+-
+
+ int __init bvme6000_parse_bootinfo(const struct bi_record *bi)
+ {
+@@ -157,12 +152,18 @@ irqreturn_t bvme6000_abort_int (int irq, void *dev_id)
+
+ static irqreturn_t bvme6000_timer_int (int irq, void *dev_id)
+ {
++ irq_handler_t timer_routine = dev_id;
++ unsigned long flags;
+ volatile RtcPtr_t rtc = (RtcPtr_t)BVME_RTC_BASE;
+- unsigned char msr = rtc->msr & 0xc0;
++ unsigned char msr;
+
++ local_irq_save(flags);
++ msr = rtc->msr & 0xc0;
+ rtc->msr = msr | 0x20; /* Ack the interrupt */
++ timer_routine(0, NULL);
++ local_irq_restore(flags);
+
+- return tick_handler(irq, dev_id);
++ return IRQ_HANDLED;
+ }
+
+ /*
+@@ -181,9 +182,8 @@ void bvme6000_sched_init (irq_handler_t timer_routine)
+
+ rtc->msr = 0; /* Ensure timer registers accessible */
+
+- tick_handler = timer_routine;
+- if (request_irq(BVME_IRQ_RTC, bvme6000_timer_int, 0,
+- "timer", bvme6000_timer_int))
++ if (request_irq(BVME_IRQ_RTC, bvme6000_timer_int, 0, "timer",
++ timer_routine))
+ panic ("Couldn't register timer int");
+
+ rtc->t1cr_omr = 0x04; /* Mode 2, ext clk */
+diff --git a/arch/m68k/hp300/time.c b/arch/m68k/hp300/time.c
+index 289d928a46cb..d30b03ea93a2 100644
+--- a/arch/m68k/hp300/time.c
++++ b/arch/m68k/hp300/time.c
+@@ -38,13 +38,19 @@
+
+ static irqreturn_t hp300_tick(int irq, void *dev_id)
+ {
++ irq_handler_t timer_routine = dev_id;
++ unsigned long flags;
+ unsigned long tmp;
+- irq_handler_t vector = dev_id;
++
++ local_irq_save(flags);
+ in_8(CLOCKBASE + CLKSR);
+ asm volatile ("movpw %1@(5),%0" : "=d" (tmp) : "a" (CLOCKBASE));
++ timer_routine(0, NULL);
++ local_irq_restore(flags);
++
+ /* Turn off the network and SCSI leds */
+ blinken_leds(0, 0xe0);
+- return vector(irq, NULL);
++ return IRQ_HANDLED;
+ }
+
+ u32 hp300_gettimeoffset(void)
+diff --git a/arch/m68k/mac/via.c b/arch/m68k/mac/via.c
+index acdabbeecfd2..038d5a1c4d48 100644
+--- a/arch/m68k/mac/via.c
++++ b/arch/m68k/mac/via.c
+@@ -54,16 +54,6 @@ static __u8 rbv_clear;
+
+ static int gIER,gIFR,gBufA,gBufB;
+
+-/*
+- * Timer defs.
+- */
+-
+-#define TICK_SIZE 10000
+-#define MAC_CLOCK_TICK (783300/HZ) /* ticks per HZ */
+-#define MAC_CLOCK_LOW (MAC_CLOCK_TICK&0xFF)
+-#define MAC_CLOCK_HIGH (MAC_CLOCK_TICK>>8)
+-
+-
+ /*
+ * On Macs with a genuine VIA chip there is no way to mask an individual slot
+ * interrupt. This limitation also seems to apply to VIA clone logic cores in
+@@ -267,22 +257,6 @@ void __init via_init(void)
+ }
+ }
+
+-/*
+- * Start the 100 Hz clock
+- */
+-
+-void __init via_init_clock(irq_handler_t func)
+-{
+- via1[vACR] |= 0x40;
+- via1[vT1LL] = MAC_CLOCK_LOW;
+- via1[vT1LH] = MAC_CLOCK_HIGH;
+- via1[vT1CL] = MAC_CLOCK_LOW;
+- via1[vT1CH] = MAC_CLOCK_HIGH;
+-
+- if (request_irq(IRQ_MAC_TIMER_1, func, 0, "timer", func))
+- pr_err("Couldn't register %s interrupt\n", "timer");
+-}
+-
+ /*
+ * Debugging dump, used in various places to see what's going on.
+ */
+@@ -310,29 +284,6 @@ void via_debug_dump(void)
+ }
+ }
+
+-/*
+- * This is always executed with interrupts disabled.
+- *
+- * TBI: get time offset between scheduling timer ticks
+- */
+-
+-u32 mac_gettimeoffset(void)
+-{
+- unsigned long ticks, offset = 0;
+-
+- /* read VIA1 timer 2 current value */
+- ticks = via1[vT1CL] | (via1[vT1CH] << 8);
+- /* The probability of underflow is less than 2% */
+- if (ticks > MAC_CLOCK_TICK - MAC_CLOCK_TICK / 50)
+- /* Check for pending timer interrupt in VIA1 IFR */
+- if (via1[vIFR] & 0x40) offset = TICK_SIZE;
+-
+- ticks = MAC_CLOCK_TICK - ticks;
+- ticks = ticks * 10000L / MAC_CLOCK_TICK;
+-
+- return (ticks + offset) * 1000;
+-}
+-
+ /*
+ * Flush the L2 cache on Macs that have it by flipping
+ * the system into 24-bit mode for an instant.
+@@ -436,6 +387,8 @@ void via_nubus_irq_shutdown(int irq)
+ * via6522.c :-), disable/pending masks added.
+ */
+
++#define VIA_TIMER_1_INT BIT(6)
++
+ void via1_irq(struct irq_desc *desc)
+ {
+ int irq_num;
+@@ -445,6 +398,21 @@ void via1_irq(struct irq_desc *desc)
+ if (!events)
+ return;
+
++ irq_num = IRQ_MAC_TIMER_1;
++ irq_bit = VIA_TIMER_1_INT;
++ if (events & irq_bit) {
++ unsigned long flags;
++
++ local_irq_save(flags);
++ via1[vIFR] = irq_bit;
++ generic_handle_irq(irq_num);
++ local_irq_restore(flags);
++
++ events &= ~irq_bit;
++ if (!events)
++ return;
++ }
++
+ irq_num = VIA1_SOURCE_BASE;
+ irq_bit = 1;
+ do {
+@@ -601,3 +569,56 @@ int via2_scsi_drq_pending(void)
+ return via2[gIFR] & (1 << IRQ_IDX(IRQ_MAC_SCSIDRQ));
+ }
+ EXPORT_SYMBOL(via2_scsi_drq_pending);
++
++/* timer and clock source */
++
++#define VIA_CLOCK_FREQ 783360 /* VIA "phase 2" clock in Hz */
++#define VIA_TIMER_INTERVAL (1000000 / HZ) /* microseconds per jiffy */
++#define VIA_TIMER_CYCLES (VIA_CLOCK_FREQ / HZ) /* clock cycles per jiffy */
++
++#define VIA_TC (VIA_TIMER_CYCLES - 2) /* including 0 and -1 */
++#define VIA_TC_LOW (VIA_TC & 0xFF)
++#define VIA_TC_HIGH (VIA_TC >> 8)
++
++void __init via_init_clock(irq_handler_t timer_routine)
++{
++ if (request_irq(IRQ_MAC_TIMER_1, timer_routine, 0, "timer", NULL)) {
++ pr_err("Couldn't register %s interrupt\n", "timer");
++ return;
++ }
++
++ via1[vT1LL] = VIA_TC_LOW;
++ via1[vT1LH] = VIA_TC_HIGH;
++ via1[vT1CL] = VIA_TC_LOW;
++ via1[vT1CH] = VIA_TC_HIGH;
++ via1[vACR] |= 0x40;
++}
++
++u32 mac_gettimeoffset(void)
++{
++ unsigned long flags;
++ u8 count_high;
++ u16 count, offset = 0;
++
++ /*
++ * Timer counter wrap-around is detected with the timer interrupt flag
++ * but reading the counter low byte (vT1CL) would reset the flag.
++ * Also, accessing both counter registers is essentially a data race.
++ * These problems are avoided by ignoring the low byte. Clock accuracy
++ * is 256 times worse (error can reach 0.327 ms) but CPU overhead is
++ * reduced by avoiding slow VIA register accesses.
++ */
++
++ local_irq_save(flags);
++ count_high = via1[vT1CH];
++ if (count_high == 0xFF)
++ count_high = 0;
++ if (count_high > 0 && (via1[vIFR] & VIA_TIMER_1_INT))
++ offset = VIA_TIMER_CYCLES;
++ local_irq_restore(flags);
++
++ count = count_high << 8;
++ count = VIA_TIMER_CYCLES - count + offset;
++
++ return ((count * VIA_TIMER_INTERVAL) / VIA_TIMER_CYCLES) * 1000;
++}
+diff --git a/arch/m68k/mvme147/config.c b/arch/m68k/mvme147/config.c
+index adea549d240e..93c68d2b8e0e 100644
+--- a/arch/m68k/mvme147/config.c
++++ b/arch/m68k/mvme147/config.c
+@@ -45,11 +45,6 @@ extern void mvme147_reset (void);
+
+ static int bcd2int (unsigned char b);
+
+-/* Save tick handler routine pointer, will point to xtime_update() in
+- * kernel/time/timekeeping.c, called via mvme147_process_int() */
+-
+-irq_handler_t tick_handler;
+-
+
+ int __init mvme147_parse_bootinfo(const struct bi_record *bi)
+ {
+@@ -104,16 +99,23 @@ void __init config_mvme147(void)
+
+ static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
+ {
++ irq_handler_t timer_routine = dev_id;
++ unsigned long flags;
++
++ local_irq_save(flags);
+ m147_pcc->t1_int_cntrl = PCC_TIMER_INT_CLR;
+ m147_pcc->t1_int_cntrl = PCC_INT_ENAB|PCC_LEVEL_TIMER1;
+- return tick_handler(irq, dev_id);
++ timer_routine(0, NULL);
++ local_irq_restore(flags);
++
++ return IRQ_HANDLED;
+ }
+
+
+ void mvme147_sched_init (irq_handler_t timer_routine)
+ {
+- tick_handler = timer_routine;
+- if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, 0, "timer 1", NULL))
++ if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, 0, "timer 1",
++ timer_routine))
+ pr_err("Couldn't register timer interrupt\n");
+
+ /* Init the clock with a value */
+diff --git a/arch/m68k/mvme16x/config.c b/arch/m68k/mvme16x/config.c
+index 6ee36a5b528d..5feb3ab484d0 100644
+--- a/arch/m68k/mvme16x/config.c
++++ b/arch/m68k/mvme16x/config.c
+@@ -50,11 +50,6 @@ extern void mvme16x_reset (void);
+
+ int bcd2int (unsigned char b);
+
+-/* Save tick handler routine pointer, will point to xtime_update() in
+- * kernel/time/timekeeping.c, called via mvme16x_process_int() */
+-
+-static irq_handler_t tick_handler;
+-
+
+ unsigned short mvme16x_config;
+ EXPORT_SYMBOL(mvme16x_config);
+@@ -352,8 +347,15 @@ static irqreturn_t mvme16x_abort_int (int irq, void *dev_id)
+
+ static irqreturn_t mvme16x_timer_int (int irq, void *dev_id)
+ {
+- *(volatile unsigned char *)0xfff4201b |= 8;
+- return tick_handler(irq, dev_id);
++ irq_handler_t timer_routine = dev_id;
++ unsigned long flags;
++
++ local_irq_save(flags);
++ *(volatile unsigned char *)0xfff4201b |= 8;
++ timer_routine(0, NULL);
++ local_irq_restore(flags);
++
++ return IRQ_HANDLED;
+ }
+
+ void mvme16x_sched_init (irq_handler_t timer_routine)
+@@ -361,14 +363,13 @@ void mvme16x_sched_init (irq_handler_t timer_routine)
+ uint16_t brdno = be16_to_cpu(mvme_bdid.brdno);
+ int irq;
+
+- tick_handler = timer_routine;
+ /* Using PCCchip2 or MC2 chip tick timer 1 */
+ *(volatile unsigned long *)0xfff42008 = 0;
+ *(volatile unsigned long *)0xfff42004 = 10000; /* 10ms */
+ *(volatile unsigned char *)0xfff42017 |= 3;
+ *(volatile unsigned char *)0xfff4201b = 0x16;
+- if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, 0,
+- "timer", mvme16x_timer_int))
++ if (request_irq(MVME16x_IRQ_TIMER, mvme16x_timer_int, 0, "timer",
++ timer_routine))
+ panic ("Couldn't register timer int");
+
+ if (brdno == 0x0162 || brdno == 0x172)
+diff --git a/arch/m68k/q40/q40ints.c b/arch/m68k/q40/q40ints.c
+index 3e7603202977..1c696906c159 100644
+--- a/arch/m68k/q40/q40ints.c
++++ b/arch/m68k/q40/q40ints.c
+@@ -127,10 +127,10 @@ void q40_mksound(unsigned int hz, unsigned int ticks)
+ sound_ticks = ticks << 1;
+ }
+
+-static irq_handler_t q40_timer_routine;
+-
+-static irqreturn_t q40_timer_int (int irq, void * dev)
++static irqreturn_t q40_timer_int(int irq, void *dev_id)
+ {
++ irq_handler_t timer_routine = dev_id;
++
+ ql_ticks = ql_ticks ? 0 : 1;
+ if (sound_ticks) {
+ unsigned char sval=(sound_ticks & 1) ? 128-SVOL : 128+SVOL;
+@@ -139,8 +139,13 @@ static irqreturn_t q40_timer_int (int irq, void * dev)
+ *DAC_RIGHT=sval;
+ }
+
+- if (!ql_ticks)
+- q40_timer_routine(irq, dev);
++ if (!ql_ticks) {
++ unsigned long flags;
++
++ local_irq_save(flags);
++ timer_routine(0, NULL);
++ local_irq_restore(flags);
++ }
+ return IRQ_HANDLED;
+ }
+
+@@ -148,11 +153,9 @@ void q40_sched_init (irq_handler_t timer_routine)
+ {
+ int timer_irq;
+
+- q40_timer_routine = timer_routine;
+ timer_irq = Q40_IRQ_FRAME;
+
+- if (request_irq(timer_irq, q40_timer_int, 0,
+- "timer", q40_timer_int))
++ if (request_irq(timer_irq, q40_timer_int, 0, "timer", timer_routine))
+ panic("Couldn't register timer int");
+
+ master_outb(-1, FRAME_CLEAR_REG);
+diff --git a/arch/m68k/sun3/sun3ints.c b/arch/m68k/sun3/sun3ints.c
+index 6bbca30c9188..a5824abb4a39 100644
+--- a/arch/m68k/sun3/sun3ints.c
++++ b/arch/m68k/sun3/sun3ints.c
+@@ -61,8 +61,10 @@ static irqreturn_t sun3_int7(int irq, void *dev_id)
+
+ static irqreturn_t sun3_int5(int irq, void *dev_id)
+ {
++ unsigned long flags;
+ unsigned int cnt;
+
++ local_irq_save(flags);
+ #ifdef CONFIG_SUN3
+ intersil_clear();
+ #endif
+@@ -76,6 +78,7 @@ static irqreturn_t sun3_int5(int irq, void *dev_id)
+ cnt = kstat_irqs_cpu(irq, 0);
+ if (!(cnt % 20))
+ sun3_leds(led_pattern[cnt % 160 / 20]);
++ local_irq_restore(flags);
+ return IRQ_HANDLED;
+ }
+
+diff --git a/arch/m68k/sun3x/time.c b/arch/m68k/sun3x/time.c
+index 047e2bcee3d7..3c8a86d08508 100644
+--- a/arch/m68k/sun3x/time.c
++++ b/arch/m68k/sun3x/time.c
+@@ -80,15 +80,19 @@ u32 sun3x_gettimeoffset(void)
+ }
+
+ #if 0
+-static void sun3x_timer_tick(int irq, void *dev_id, struct pt_regs *regs)
++static irqreturn_t sun3x_timer_tick(int irq, void *dev_id)
+ {
+- void (*vector)(int, void *, struct pt_regs *) = dev_id;
++ irq_handler_t timer_routine = dev_id;
++ unsigned long flags;
+
+- /* Clear the pending interrupt - pulse the enable line low */
+- disable_irq(5);
+- enable_irq(5);
++ local_irq_save(flags);
++ /* Clear the pending interrupt - pulse the enable line low */
++ disable_irq(5);
++ enable_irq(5);
++ timer_routine(0, NULL);
++ local_irq_restore(flags);
+
+- vector(irq, NULL, regs);
++ return IRQ_HANDLED;
+ }
+ #endif
+
+diff --git a/arch/mips/bcm63xx/Makefile b/arch/mips/bcm63xx/Makefile
+index c69f297fc1df..d89651e538f6 100644
+--- a/arch/mips/bcm63xx/Makefile
++++ b/arch/mips/bcm63xx/Makefile
+@@ -1,8 +1,8 @@
+ # SPDX-License-Identifier: GPL-2.0
+ obj-y += clk.o cpu.o cs.o gpio.o irq.o nvram.o prom.o reset.o \
+- setup.o timer.o dev-dsp.o dev-enet.o dev-flash.o \
+- dev-pcmcia.o dev-rng.o dev-spi.o dev-hsspi.o dev-uart.o \
+- dev-wdt.o dev-usb-usbd.o
++ setup.o timer.o dev-enet.o dev-flash.o dev-pcmcia.o \
++ dev-rng.o dev-spi.o dev-hsspi.o dev-uart.o dev-wdt.o \
++ dev-usb-usbd.o
+ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+
+ obj-y += boards/
+diff --git a/arch/mips/bcm63xx/boards/board_bcm963xx.c b/arch/mips/bcm63xx/boards/board_bcm963xx.c
+index b2097c0d2ed7..36ec3dc2c999 100644
+--- a/arch/mips/bcm63xx/boards/board_bcm963xx.c
++++ b/arch/mips/bcm63xx/boards/board_bcm963xx.c
+@@ -23,7 +23,6 @@
+ #include <bcm63xx_nvram.h>
+ #include <bcm63xx_dev_pci.h>
+ #include <bcm63xx_dev_enet.h>
+-#include <bcm63xx_dev_dsp.h>
+ #include <bcm63xx_dev_flash.h>
+ #include <bcm63xx_dev_hsspi.h>
+ #include <bcm63xx_dev_pcmcia.h>
+@@ -289,14 +288,6 @@ static struct board_info __initdata board_96348gw_10 = {
+ .has_pccard = 1,
+ .has_ehci0 = 1,
+
+- .has_dsp = 1,
+- .dsp = {
+- .gpio_rst = 6,
+- .gpio_int = 34,
+- .cs = 2,
+- .ext_irq = 2,
+- },
+-
+ .leds = {
+ {
+ .name = "adsl-fail",
+@@ -401,14 +392,6 @@ static struct board_info __initdata board_96348gw = {
+
+ .has_ohci0 = 1,
+
+- .has_dsp = 1,
+- .dsp = {
+- .gpio_rst = 6,
+- .gpio_int = 34,
+- .ext_irq = 2,
+- .cs = 2,
+- },
+-
+ .leds = {
+ {
+ .name = "adsl-fail",
+@@ -898,9 +881,6 @@ int __init board_register_devices(void)
+ if (board.has_usbd)
+ bcm63xx_usbd_register(&board.usbd);
+
+- if (board.has_dsp)
+- bcm63xx_dsp_register(&board.dsp);
+-
+ /* Generate MAC address for WLAN and register our SPROM,
+ * do this after registering enet devices
+ */
+diff --git a/arch/mips/bcm63xx/dev-dsp.c b/arch/mips/bcm63xx/dev-dsp.c
+deleted file mode 100644
+index 5bb5b154c9bd..000000000000
+--- a/arch/mips/bcm63xx/dev-dsp.c
++++ /dev/null
+@@ -1,56 +0,0 @@
+-/*
+- * Broadcom BCM63xx VoIP DSP registration
+- *
+- * This file is subject to the terms and conditions of the GNU General Public
+- * License. See the file "COPYING" in the main directory of this archive
+- * for more details.
+- *
+- * Copyright (C) 2009 Florian Fainelli <florian@openwrt.org>
+- */
+-
+-#include <linux/init.h>
+-#include <linux/kernel.h>
+-#include <linux/platform_device.h>
+-
+-#include <bcm63xx_cpu.h>
+-#include <bcm63xx_dev_dsp.h>
+-#include <bcm63xx_regs.h>
+-#include <bcm63xx_io.h>
+-
+-static struct resource voip_dsp_resources[] = {
+- {
+- .start = -1, /* filled at runtime */
+- .end = -1, /* filled at runtime */
+- .flags = IORESOURCE_MEM,
+- },
+- {
+- .start = -1, /* filled at runtime */
+- .flags = IORESOURCE_IRQ,
+- },
+-};
+-
+-static struct platform_device bcm63xx_voip_dsp_device = {
+- .name = "bcm63xx-voip-dsp",
+- .id = -1,
+- .num_resources = ARRAY_SIZE(voip_dsp_resources),
+- .resource = voip_dsp_resources,
+-};
+-
+-int __init bcm63xx_dsp_register(const struct bcm63xx_dsp_platform_data *pd)
+-{
+- struct bcm63xx_dsp_platform_data *dpd;
+- u32 val;
+-
+- /* Get the memory window */
+- val = bcm_mpi_readl(MPI_CSBASE_REG(pd->cs - 1));
+- val &= MPI_CSBASE_BASE_MASK;
+- voip_dsp_resources[0].start = val;
+- voip_dsp_resources[0].end = val + 0xFFFFFFF;
+- voip_dsp_resources[1].start = pd->ext_irq;
+-
+- /* copy given platform data */
+- dpd = bcm63xx_voip_dsp_device.dev.platform_data;
+- memcpy(dpd, pd, sizeof (*pd));
+-
+- return platform_device_register(&bcm63xx_voip_dsp_device);
+-}
+diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
+index 54c730aed327..df1eaa365279 100644
+--- a/arch/mips/include/asm/io.h
++++ b/arch/mips/include/asm/io.h
+@@ -62,21 +62,11 @@
+ * instruction, so the lower 16 bits must be zero. Should be true on
+ * on any sane architecture; generic code does not use this assumption.
+ */
+-extern const unsigned long mips_io_port_base;
++extern unsigned long mips_io_port_base;
+
+-/*
+- * Gcc will generate code to load the value of mips_io_port_base after each
+- * function call which may be fairly wasteful in some cases. So we don't
+- * play quite by the book. We tell gcc mips_io_port_base is a long variable
+- * which solves the code generation issue. Now we need to violate the
+- * aliasing rules a little to make initialization possible and finally we
+- * will need the barrier() to fight side effects of the aliasing chat.
+- * This trickery will eventually collapse under gcc's optimizer. Oh well.
+- */
+ static inline void set_io_port_base(unsigned long base)
+ {
+- * (unsigned long *) &mips_io_port_base = base;
+- barrier();
++ mips_io_port_base = base;
+ }
+
+ /*
+diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_dsp.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_dsp.h
+deleted file mode 100644
+index 4e4970787371..000000000000
+--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_dsp.h
++++ /dev/null
+@@ -1,14 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef __BCM63XX_DSP_H
+-#define __BCM63XX_DSP_H
+-
+-struct bcm63xx_dsp_platform_data {
+- unsigned gpio_rst;
+- unsigned gpio_int;
+- unsigned cs;
+- unsigned ext_irq;
+-};
+-
+-int __init bcm63xx_dsp_register(const struct bcm63xx_dsp_platform_data *pd);
+-
+-#endif /* __BCM63XX_DSP_H */
+diff --git a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h
+index 5e5b1bc4a324..830f53f28e3f 100644
+--- a/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h
++++ b/arch/mips/include/asm/mach-bcm63xx/board_bcm963xx.h
+@@ -7,7 +7,6 @@
+ #include <linux/leds.h>
+ #include <bcm63xx_dev_enet.h>
+ #include <bcm63xx_dev_usb_usbd.h>
+-#include <bcm63xx_dev_dsp.h>
+
+ /*
+ * flash mapping
+@@ -31,7 +30,6 @@ struct board_info {
+ unsigned int has_ohci0:1;
+ unsigned int has_ehci0:1;
+ unsigned int has_usbd:1;
+- unsigned int has_dsp:1;
+ unsigned int has_uart0:1;
+ unsigned int has_uart1:1;
+
+@@ -43,9 +41,6 @@ struct board_info {
+ /* USB config */
+ struct bcm63xx_usbd_platform_data usbd;
+
+- /* DSP config */
+- struct bcm63xx_dsp_platform_data dsp;
+-
+ /* GPIO LEDs */
+ struct gpio_led leds[5];
+
+diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
+index 8aaaa42f91ed..e87c98b8a72c 100644
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -76,7 +76,7 @@ static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
+ * mips_io_port_base is the begin of the address space to which x86 style
+ * I/O ports are mapped.
+ */
+-const unsigned long mips_io_port_base = -1;
++unsigned long mips_io_port_base = -1;
+ EXPORT_SYMBOL(mips_io_port_base);
+
+ static struct resource code_resource = { .name = "Kernel code", };
+diff --git a/arch/nios2/kernel/nios2_ksyms.c b/arch/nios2/kernel/nios2_ksyms.c
+index bf2f55d10a4d..4e704046a150 100644
+--- a/arch/nios2/kernel/nios2_ksyms.c
++++ b/arch/nios2/kernel/nios2_ksyms.c
+@@ -9,12 +9,20 @@
+ #include <linux/export.h>
+ #include <linux/string.h>
+
++#include <asm/cacheflush.h>
++#include <asm/pgtable.h>
++
+ /* string functions */
+
+ EXPORT_SYMBOL(memcpy);
+ EXPORT_SYMBOL(memset);
+ EXPORT_SYMBOL(memmove);
+
++/* memory management */
++
++EXPORT_SYMBOL(empty_zero_page);
++EXPORT_SYMBOL(flush_icache_range);
++
+ /*
+ * libgcc functions - functions that are used internally by the
+ * compiler... (prototypes are not correct though, but that
+@@ -31,3 +39,7 @@ DECLARE_EXPORT(__udivsi3);
+ DECLARE_EXPORT(__umoddi3);
+ DECLARE_EXPORT(__umodsi3);
+ DECLARE_EXPORT(__muldi3);
++DECLARE_EXPORT(__ucmpdi2);
++DECLARE_EXPORT(__lshrdi3);
++DECLARE_EXPORT(__ashldi3);
++DECLARE_EXPORT(__ashrdi3);
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index e43321f46a3b..8954108df457 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -412,7 +412,9 @@ vdso_install:
+ ifdef CONFIG_PPC64
+ $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso64 $@
+ endif
++ifdef CONFIG_VDSO32
+ $(Q)$(MAKE) $(build)=arch/$(ARCH)/kernel/vdso32 $@
++endif
+
+ archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
+diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h
+index 9c63b596e6ce..a09595f00cab 100644
+--- a/arch/powerpc/include/asm/archrandom.h
++++ b/arch/powerpc/include/asm/archrandom.h
+@@ -28,7 +28,7 @@ static inline int arch_get_random_seed_int(unsigned int *v)
+ unsigned long val;
+ int rc;
+
+- rc = arch_get_random_long(&val);
++ rc = arch_get_random_seed_long(&val);
+ if (rc)
+ *v = val;
+
+diff --git a/arch/powerpc/include/asm/kgdb.h b/arch/powerpc/include/asm/kgdb.h
+index 9db24e77b9f4..a9e098a3b881 100644
+--- a/arch/powerpc/include/asm/kgdb.h
++++ b/arch/powerpc/include/asm/kgdb.h
+@@ -26,9 +26,12 @@
+ #define BREAK_INSTR_SIZE 4
+ #define BUFMAX ((NUMREGBYTES * 2) + 512)
+ #define OUTBUFMAX ((NUMREGBYTES * 2) + 512)
++
++#define BREAK_INSTR 0x7d821008 /* twge r2, r2 */
++
+ static inline void arch_kgdb_breakpoint(void)
+ {
+- asm(".long 0x7d821008"); /* twge r2, r2 */
++ asm(stringify_in_c(.long BREAK_INSTR));
+ }
+ #define CACHE_FLUSH_IS_SAFE 1
+ #define DBG_MAX_REG_NUM 70
+diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
+index a8f20e5928e1..9edb45430133 100644
+--- a/arch/powerpc/kernel/cacheinfo.c
++++ b/arch/powerpc/kernel/cacheinfo.c
+@@ -865,4 +865,25 @@ void cacheinfo_cpu_offline(unsigned int cpu_id)
+ if (cache)
+ cache_cpu_clear(cache, cpu_id);
+ }
++
++void cacheinfo_teardown(void)
++{
++ unsigned int cpu;
++
++ lockdep_assert_cpus_held();
++
++ for_each_online_cpu(cpu)
++ cacheinfo_cpu_offline(cpu);
++}
++
++void cacheinfo_rebuild(void)
++{
++ unsigned int cpu;
++
++ lockdep_assert_cpus_held();
++
++ for_each_online_cpu(cpu)
++ cacheinfo_cpu_online(cpu);
++}
++
+ #endif /* (CONFIG_PPC_PSERIES && CONFIG_SUSPEND) || CONFIG_HOTPLUG_CPU */
+diff --git a/arch/powerpc/kernel/cacheinfo.h b/arch/powerpc/kernel/cacheinfo.h
+index 955f5e999f1b..52bd3fc6642d 100644
+--- a/arch/powerpc/kernel/cacheinfo.h
++++ b/arch/powerpc/kernel/cacheinfo.h
+@@ -6,4 +6,8 @@
+ extern void cacheinfo_cpu_online(unsigned int cpu_id);
+ extern void cacheinfo_cpu_offline(unsigned int cpu_id);
+
++/* Allow migration/suspend to tear down and rebuild the hierarchy. */
++extern void cacheinfo_teardown(void);
++extern void cacheinfo_rebuild(void);
++
+ #endif /* _PPC_CACHEINFO_H */
+diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
+index c6f41907f0d7..a4b31e17492d 100644
+--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
+@@ -666,8 +666,10 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
+ m = &dt_cpu_feature_match_table[i];
+ if (!strcmp(f->name, m->name)) {
+ known = true;
+- if (m->enable(f))
++ if (m->enable(f)) {
++ cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
+ break;
++ }
+
+ pr_info("not enabling: %s (disabled or unsupported by kernel)\n",
+ f->name);
+@@ -675,17 +677,12 @@ static bool __init cpufeatures_process_feature(struct dt_cpu_feature *f)
+ }
+ }
+
+- if (!known && enable_unknown) {
+- if (!feat_try_enable_unknown(f)) {
+- pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
+- f->name);
+- return false;
+- }
++ if (!known && (!enable_unknown || !feat_try_enable_unknown(f))) {
++ pr_info("not enabling: %s (unknown and unsupported by kernel)\n",
++ f->name);
++ return false;
+ }
+
+- if (m->cpu_ftr_bit_mask)
+- cur_cpu_spec->cpu_features |= m->cpu_ftr_bit_mask;
+-
+ if (known)
+ pr_debug("enabling: %s\n", f->name);
+ else
+diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
+index 35e240a0a408..59c578f865aa 100644
+--- a/arch/powerpc/kernel/kgdb.c
++++ b/arch/powerpc/kernel/kgdb.c
+@@ -24,6 +24,7 @@
+ #include <asm/processor.h>
+ #include <asm/machdep.h>
+ #include <asm/debug.h>
++#include <asm/code-patching.h>
+ #include <linux/slab.h>
+
+ /*
+@@ -144,7 +145,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
+ if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
+ return 0;
+
+- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
++ if (*(u32 *)regs->nip == BREAK_INSTR)
+ regs->nip += BREAK_INSTR_SIZE;
+
+ return 1;
+@@ -441,16 +442,42 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ return -1;
+ }
+
++int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
++{
++ int err;
++ unsigned int instr;
++ unsigned int *addr = (unsigned int *)bpt->bpt_addr;
++
++ err = probe_kernel_address(addr, instr);
++ if (err)
++ return err;
++
++ err = patch_instruction(addr, BREAK_INSTR);
++ if (err)
++ return -EFAULT;
++
++ *(unsigned int *)bpt->saved_instr = instr;
++
++ return 0;
++}
++
++int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
++{
++ int err;
++ unsigned int instr = *(unsigned int *)bpt->saved_instr;
++ unsigned int *addr = (unsigned int *)bpt->bpt_addr;
++
++ err = patch_instruction(addr, instr);
++ if (err)
++ return -EFAULT;
++
++ return 0;
++}
++
+ /*
+ * Global data
+ */
+-struct kgdb_arch arch_kgdb_ops = {
+-#ifdef __LITTLE_ENDIAN__
+- .gdb_bpt_instr = {0x08, 0x10, 0x82, 0x7d},
+-#else
+- .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
+-#endif
+-};
++struct kgdb_arch arch_kgdb_ops;
+
+ static int kgdb_not_implemented(struct pt_regs *regs)
+ {
+diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
+index 37a110b8e7e1..ecb375040637 100644
+--- a/arch/powerpc/kernel/mce_power.c
++++ b/arch/powerpc/kernel/mce_power.c
+@@ -40,7 +40,7 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
+ {
+ pte_t *ptep;
+ unsigned int shift;
+- unsigned long flags;
++ unsigned long pfn, flags;
+ struct mm_struct *mm;
+
+ if (user_mode(regs))
+@@ -50,18 +50,22 @@ static unsigned long addr_to_pfn(struct pt_regs *regs, unsigned long addr)
+
+ local_irq_save(flags);
+ ptep = __find_linux_pte(mm->pgd, addr, NULL, &shift);
+- local_irq_restore(flags);
+
+- if (!ptep || pte_special(*ptep))
+- return ULONG_MAX;
++ if (!ptep || pte_special(*ptep)) {
++ pfn = ULONG_MAX;
++ goto out;
++ }
+
+- if (shift > PAGE_SHIFT) {
++ if (shift <= PAGE_SHIFT)
++ pfn = pte_pfn(*ptep);
++ else {
+ unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
+-
+- return pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
++ pfn = pte_pfn(__pte(pte_val(*ptep) | (addr & rpnmask)));
+ }
+
+- return pte_pfn(*ptep);
++out:
++ local_irq_restore(flags);
++ return pfn;
+ }
+
+ /* flush SLBs and reload */
+diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
+index 9b38a2e5dd35..af1e38febe49 100644
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -904,7 +904,7 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
+ .reserved2 = 0,
+ .reserved3 = 0,
+ .subprocessors = 1,
+- .byte22 = OV5_FEAT(OV5_DRMEM_V2),
++ .byte22 = OV5_FEAT(OV5_DRMEM_V2) | OV5_FEAT(OV5_DRC_INFO),
+ .intarch = 0,
+ .mmu = 0,
+ .hash_ext = 0,
+diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
+index 65486c3d029b..26b03af71abd 100644
+--- a/arch/powerpc/kvm/book3s_64_vio.c
++++ b/arch/powerpc/kvm/book3s_64_vio.c
+@@ -133,7 +133,6 @@ extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
+ continue;
+
+ kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
+- return;
+ }
+ }
+ }
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 3ae3e8d141e3..dbfe32327212 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -2993,25 +2993,26 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
+ }
+ }
+
+- /*
+- * Interrupts will be enabled once we get into the guest,
+- * so tell lockdep that we're about to enable interrupts.
+- */
+- trace_hardirqs_on();
+-
+ guest_enter_irqoff();
+
+ srcu_idx = srcu_read_lock(&vc->kvm->srcu);
+
+ this_cpu_disable_ftrace();
+
++ /*
++ * Interrupts will be enabled once we get into the guest,
++ * so tell lockdep that we're about to enable interrupts.
++ */
++ trace_hardirqs_on();
++
+ trap = __kvmppc_vcore_entry();
+
++ trace_hardirqs_off();
++
+ this_cpu_enable_ftrace();
+
+ srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
+
+- trace_hardirqs_off();
+ set_irq_happened(trap);
+
+ spin_lock(&vc->lock);
+diff --git a/arch/powerpc/mm/dump_hashpagetable.c b/arch/powerpc/mm/dump_hashpagetable.c
+index 869294695048..b430e4e08af6 100644
+--- a/arch/powerpc/mm/dump_hashpagetable.c
++++ b/arch/powerpc/mm/dump_hashpagetable.c
+@@ -342,7 +342,7 @@ static unsigned long hpte_find(struct pg_state *st, unsigned long ea, int psize)
+
+ /* Look in secondary table */
+ if (slot == -1)
+- slot = base_hpte_find(ea, psize, true, &v, &r);
++ slot = base_hpte_find(ea, psize, false, &v, &r);
+
+ /* No entry found */
+ if (slot == -1)
+diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
+index 69caeb5bccb2..5404a631d583 100644
+--- a/arch/powerpc/mm/pgtable-radix.c
++++ b/arch/powerpc/mm/pgtable-radix.c
+@@ -717,8 +717,8 @@ static int __meminit stop_machine_change_mapping(void *data)
+
+ spin_unlock(&init_mm.page_table_lock);
+ pte_clear(&init_mm, params->aligned_start, params->pte);
+- create_physical_mapping(params->aligned_start, params->start, -1);
+- create_physical_mapping(params->end, params->aligned_end, -1);
++ create_physical_mapping(__pa(params->aligned_start), __pa(params->start), -1);
++ create_physical_mapping(__pa(params->end), __pa(params->aligned_end), -1);
+ spin_lock(&init_mm.page_table_lock);
+ return 0;
+ }
+diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
+index 7f86bc3eaade..62d3c72cd931 100644
+--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
++++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
+@@ -101,11 +101,12 @@ static struct property *dlpar_clone_property(struct property *prop,
+ return new_prop;
+ }
+
+-static u32 find_aa_index(struct device_node *dr_node,
+- struct property *ala_prop, const u32 *lmb_assoc)
++static bool find_aa_index(struct device_node *dr_node,
++ struct property *ala_prop,
++ const u32 *lmb_assoc, u32 *aa_index)
+ {
+- u32 *assoc_arrays;
+- u32 aa_index;
++ u32 *assoc_arrays, new_prop_size;
++ struct property *new_prop;
+ int aa_arrays, aa_array_entries, aa_array_sz;
+ int i, index;
+
+@@ -121,46 +122,39 @@ static u32 find_aa_index(struct device_node *dr_node,
+ aa_array_entries = be32_to_cpu(assoc_arrays[1]);
+ aa_array_sz = aa_array_entries * sizeof(u32);
+
+- aa_index = -1;
+ for (i = 0; i < aa_arrays; i++) {
+ index = (i * aa_array_entries) + 2;
+
+ if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
+ continue;
+
+- aa_index = i;
+- break;
++ *aa_index = i;
++ return true;
+ }
+
+- if (aa_index == -1) {
+- struct property *new_prop;
+- u32 new_prop_size;
+-
+- new_prop_size = ala_prop->length + aa_array_sz;
+- new_prop = dlpar_clone_property(ala_prop, new_prop_size);
+- if (!new_prop)
+- return -1;
+-
+- assoc_arrays = new_prop->value;
++ new_prop_size = ala_prop->length + aa_array_sz;
++ new_prop = dlpar_clone_property(ala_prop, new_prop_size);
++ if (!new_prop)
++ return false;
+
+- /* increment the number of entries in the lookup array */
+- assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
++ assoc_arrays = new_prop->value;
+
+- /* copy the new associativity into the lookup array */
+- index = aa_arrays * aa_array_entries + 2;
+- memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
++ /* increment the number of entries in the lookup array */
++ assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
+
+- of_update_property(dr_node, new_prop);
++ /* copy the new associativity into the lookup array */
++ index = aa_arrays * aa_array_entries + 2;
++ memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
+
+- /*
+- * The associativity lookup array index for this lmb is
+- * number of entries - 1 since we added its associativity
+- * to the end of the lookup array.
+- */
+- aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
+- }
++ of_update_property(dr_node, new_prop);
+
+- return aa_index;
++ /*
++ * The associativity lookup array index for this lmb is
++ * number of entries - 1 since we added its associativity
++ * to the end of the lookup array.
++ */
++ *aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
++ return true;
+ }
+
+ static int update_lmb_associativity_index(struct drmem_lmb *lmb)
+@@ -169,6 +163,7 @@ static int update_lmb_associativity_index(struct drmem_lmb *lmb)
+ struct property *ala_prop;
+ const u32 *lmb_assoc;
+ u32 aa_index;
++ bool found;
+
+ parent = of_find_node_by_path("/");
+ if (!parent)
+@@ -200,12 +195,12 @@ static int update_lmb_associativity_index(struct drmem_lmb *lmb)
+ return -ENODEV;
+ }
+
+- aa_index = find_aa_index(dr_node, ala_prop, lmb_assoc);
++ found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
+
+ of_node_put(dr_node);
+ dlpar_free_cc_nodes(lmb_node);
+
+- if (aa_index < 0) {
++ if (!found) {
+ pr_err("Could not find LMB associativity\n");
+ return -1;
+ }
+diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
+index e4ea71383383..70744b4fbd9e 100644
+--- a/arch/powerpc/platforms/pseries/mobility.c
++++ b/arch/powerpc/platforms/pseries/mobility.c
+@@ -24,6 +24,7 @@
+ #include <asm/machdep.h>
+ #include <asm/rtas.h>
+ #include "pseries.h"
++#include "../../kernel/cacheinfo.h"
+
+ static struct kobject *mobility_kobj;
+
+@@ -360,11 +361,20 @@ void post_mobility_fixup(void)
+ */
+ cpus_read_lock();
+
++ /*
++ * It's common for the destination firmware to replace cache
++ * nodes. Release all of the cacheinfo hierarchy's references
++ * before updating the device tree.
++ */
++ cacheinfo_teardown();
++
+ rc = pseries_devicetree_update(MIGRATION_SCOPE);
+ if (rc)
+ printk(KERN_ERR "Post-mobility device tree update "
+ "failed: %d\n", rc);
+
++ cacheinfo_rebuild();
++
+ cpus_read_unlock();
+
+ /* Possibly switch to a new RFI flush type */
+diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c
+index 602e7cc26d11..5cf340b778f1 100644
+--- a/arch/s390/kernel/kexec_elf.c
++++ b/arch/s390/kernel/kexec_elf.c
+@@ -58,7 +58,7 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
+ if (ret)
+ return ret;
+
+- data->memsz += buf.memsz;
++ data->memsz = ALIGN(data->memsz, phdr->p_align) + buf.memsz;
+ }
+
+ return 0;
+diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
+index 254f2c662703..6cd3cd468047 100644
+--- a/arch/sh/boards/mach-migor/setup.c
++++ b/arch/sh/boards/mach-migor/setup.c
+@@ -5,6 +5,7 @@
+ * Copyright (C) 2008 Magnus Damm
+ */
+ #include <linux/clkdev.h>
++#include <linux/dma-mapping.h>
+ #include <linux/init.h>
+ #include <linux/platform_device.h>
+ #include <linux/interrupt.h>
+diff --git a/arch/um/drivers/chan_kern.c b/arch/um/drivers/chan_kern.c
+index 05588f9466c7..13ba195f9c9c 100644
+--- a/arch/um/drivers/chan_kern.c
++++ b/arch/um/drivers/chan_kern.c
+@@ -171,19 +171,55 @@ int enable_chan(struct line *line)
+ return err;
+ }
+
++/* Items are added in IRQ context, when free_irq can't be called, and
++ * removed in process context, when it can.
++ * This handles interrupt sources which disappear, and which need to
++ * be permanently disabled. This is discovered in IRQ context, but
++ * the freeing of the IRQ must be done later.
++ */
++static DEFINE_SPINLOCK(irqs_to_free_lock);
++static LIST_HEAD(irqs_to_free);
++
++void free_irqs(void)
++{
++ struct chan *chan;
++ LIST_HEAD(list);
++ struct list_head *ele;
++ unsigned long flags;
++
++ spin_lock_irqsave(&irqs_to_free_lock, flags);
++ list_splice_init(&irqs_to_free, &list);
++ spin_unlock_irqrestore(&irqs_to_free_lock, flags);
++
++ list_for_each(ele, &list) {
++ chan = list_entry(ele, struct chan, free_list);
++
++ if (chan->input && chan->enabled)
++ um_free_irq(chan->line->driver->read_irq, chan);
++ if (chan->output && chan->enabled)
++ um_free_irq(chan->line->driver->write_irq, chan);
++ chan->enabled = 0;
++ }
++}
++
+ static void close_one_chan(struct chan *chan, int delay_free_irq)
+ {
++ unsigned long flags;
++
+ if (!chan->opened)
+ return;
+
+- /* we can safely call free now - it will be marked
+- * as free and freed once the IRQ stopped processing
+- */
+- if (chan->input && chan->enabled)
+- um_free_irq(chan->line->driver->read_irq, chan);
+- if (chan->output && chan->enabled)
+- um_free_irq(chan->line->driver->write_irq, chan);
+- chan->enabled = 0;
++ if (delay_free_irq) {
++ spin_lock_irqsave(&irqs_to_free_lock, flags);
++ list_add(&chan->free_list, &irqs_to_free);
++ spin_unlock_irqrestore(&irqs_to_free_lock, flags);
++ } else {
++ if (chan->input && chan->enabled)
++ um_free_irq(chan->line->driver->read_irq, chan);
++ if (chan->output && chan->enabled)
++ um_free_irq(chan->line->driver->write_irq, chan);
++ chan->enabled = 0;
++ }
+ if (chan->ops->close != NULL)
+ (*chan->ops->close)(chan->fd, chan->data);
+
+diff --git a/arch/um/include/asm/irq.h b/arch/um/include/asm/irq.h
+index 49ed3e35b35a..ce7a78c3bcf2 100644
+--- a/arch/um/include/asm/irq.h
++++ b/arch/um/include/asm/irq.h
+@@ -23,7 +23,7 @@
+ #define VECTOR_BASE_IRQ 15
+ #define VECTOR_IRQ_SPACE 8
+
+-#define LAST_IRQ (VECTOR_IRQ_SPACE + VECTOR_BASE_IRQ)
++#define LAST_IRQ (VECTOR_IRQ_SPACE + VECTOR_BASE_IRQ - 1)
+
+ #else
+
+diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
+index 6b7f3827d6e4..2753718d31b9 100644
+--- a/arch/um/kernel/irq.c
++++ b/arch/um/kernel/irq.c
+@@ -21,6 +21,8 @@
+ #include <irq_user.h>
+
+
++extern void free_irqs(void);
++
+ /* When epoll triggers we do not know why it did so
+ * we can also have different IRQs for read and write.
+ * This is why we keep a small irq_fd array for each fd -
+@@ -100,6 +102,8 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
+ }
+ }
+ }
++
++ free_irqs();
+ }
+
+ static int assign_epoll_events_to_irq(struct irq_entry *irq_entry)
+diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
+index 7d68f0c7cfb1..687cd1a213d5 100644
+--- a/arch/x86/Kconfig.debug
++++ b/arch/x86/Kconfig.debug
+@@ -181,7 +181,7 @@ config HAVE_MMIOTRACE_SUPPORT
+
+ config X86_DECODER_SELFTEST
+ bool "x86 instruction decoder selftest"
+- depends on DEBUG_KERNEL && KPROBES
++ depends on DEBUG_KERNEL && INSTRUCTION_DECODER
+ depends on !COMPILE_TEST
+ ---help---
+ Perform x86 instruction decoder selftests at build time.
+diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
+index 8f4c98fdd03c..f03100bc5fd1 100644
+--- a/arch/x86/events/intel/pt.c
++++ b/arch/x86/events/intel/pt.c
+@@ -1213,7 +1213,8 @@ static int pt_event_addr_filters_validate(struct list_head *filters)
+ static void pt_event_addr_filters_sync(struct perf_event *event)
+ {
+ struct perf_addr_filters_head *head = perf_event_addr_filters(event);
+- unsigned long msr_a, msr_b, *offs = event->addr_filters_offs;
++ unsigned long msr_a, msr_b;
++ struct perf_addr_filter_range *fr = event->addr_filter_ranges;
+ struct pt_filters *filters = event->hw.addr_filters;
+ struct perf_addr_filter *filter;
+ int range = 0;
+@@ -1222,12 +1223,12 @@ static void pt_event_addr_filters_sync(struct perf_event *event)
+ return;
+
+ list_for_each_entry(filter, &head->list, entry) {
+- if (filter->path.dentry && !offs[range]) {
++ if (filter->path.dentry && !fr[range].start) {
+ msr_a = msr_b = 0;
+ } else {
+ /* apply the offset */
+- msr_a = filter->offset + offs[range];
+- msr_b = filter->size + msr_a - 1;
++ msr_a = fr[range].start;
++ msr_b = msr_a + fr[range].size - 1;
+ }
+
+ filters->filter[range].msr_a = msr_a;
+diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
+index b3ec519e3982..71e1df860176 100644
+--- a/arch/x86/include/asm/pgtable_32.h
++++ b/arch/x86/include/asm/pgtable_32.h
+@@ -106,6 +106,6 @@ do { \
+ * with only a host target support using a 32-bit type for internal
+ * representation.
+ */
+-#define LOWMEM_PAGES ((((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT))
++#define LOWMEM_PAGES ((((_ULL(2)<<31) - __PAGE_OFFSET) >> PAGE_SHIFT))
+
+ #endif /* _ASM_X86_PGTABLE_32_H */
+diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
+index 8e36f249646e..904e18bb38c5 100644
+--- a/arch/x86/kernel/kgdb.c
++++ b/arch/x86/kernel/kgdb.c
+@@ -438,7 +438,7 @@ static void kgdb_disable_hw_debug(struct pt_regs *regs)
+ */
+ void kgdb_roundup_cpus(unsigned long flags)
+ {
+- apic->send_IPI_allbutself(APIC_DM_NMI);
++ apic->send_IPI_allbutself(NMI_VECTOR);
+ }
+ #endif
+
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index a6836ab0fcc7..b72296bd04a2 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -664,9 +664,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
+ * that UV should be updated so that smp_call_function_many(),
+ * etc, are optimal on UV.
+ */
+- unsigned int cpu;
+-
+- cpu = smp_processor_id();
+ cpumask = uv_flush_tlb_others(cpumask, info);
+ if (cpumask)
+ smp_call_function_many(cpumask, flush_tlb_func_remote,
+diff --git a/block/blk-merge.c b/block/blk-merge.c
+index 2776ee6c5c3d..7efa8c3e2b72 100644
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -309,13 +309,7 @@ void blk_recalc_rq_segments(struct request *rq)
+
+ void blk_recount_segments(struct request_queue *q, struct bio *bio)
+ {
+- unsigned short seg_cnt;
+-
+- /* estimate segment number by bi_vcnt for non-cloned bio */
+- if (bio_flagged(bio, BIO_CLONED))
+- seg_cnt = bio_segments(bio);
+- else
+- seg_cnt = bio->bi_vcnt;
++ unsigned short seg_cnt = bio_segments(bio);
+
+ if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
+ (seg_cnt < queue_max_segments(q)))
+diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
+index f8ec3d4ba4a8..a5718c0a3dc4 100644
+--- a/crypto/pcrypt.c
++++ b/crypto/pcrypt.c
+@@ -394,7 +394,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
+ int ret;
+
+ pinst->kobj.kset = pcrypt_kset;
+- ret = kobject_add(&pinst->kobj, NULL, name);
++ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
+ if (!ret)
+ kobject_uevent(&pinst->kobj, KOBJ_ADD);
+
+diff --git a/crypto/tgr192.c b/crypto/tgr192.c
+index 022d3dd76c3b..f8e1d9f9938f 100644
+--- a/crypto/tgr192.c
++++ b/crypto/tgr192.c
+@@ -25,8 +25,9 @@
+ #include <linux/init.h>
+ #include <linux/module.h>
+ #include <linux/mm.h>
+-#include <asm/byteorder.h>
+ #include <linux/types.h>
++#include <asm/byteorder.h>
++#include <asm/unaligned.h>
+
+ #define TGR192_DIGEST_SIZE 24
+ #define TGR160_DIGEST_SIZE 20
+@@ -468,10 +469,9 @@ static void tgr192_transform(struct tgr192_ctx *tctx, const u8 * data)
+ u64 a, b, c, aa, bb, cc;
+ u64 x[8];
+ int i;
+- const __le64 *ptr = (const __le64 *)data;
+
+ for (i = 0; i < 8; i++)
+- x[i] = le64_to_cpu(ptr[i]);
++ x[i] = get_unaligned_le64(data + i * sizeof(__le64));
+
+ /* save */
+ a = aa = tctx->a;
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index 30ccd94f87d2..ded6c5c17fd7 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -1056,6 +1056,13 @@ static int acpi_lpss_suspend_noirq(struct device *dev)
+ int ret;
+
+ if (pdata->dev_desc->resume_from_noirq) {
++ /*
++ * The driver's ->suspend_late callback will be invoked by
++ * acpi_lpss_do_suspend_late(), with the assumption that the
++ * driver really wanted to run that code in ->suspend_noirq, but
++ * it could not run after acpi_dev_suspend() and the driver
++ * expected the latter to be called in the "late" phase.
++ */
+ ret = acpi_lpss_do_suspend_late(dev);
+ if (ret)
+ return ret;
+@@ -1086,16 +1093,99 @@ static int acpi_lpss_resume_noirq(struct device *dev)
+ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+ int ret;
+
+- ret = acpi_subsys_resume_noirq(dev);
++ /* Follow acpi_subsys_resume_noirq(). */
++ if (dev_pm_may_skip_resume(dev))
++ return 0;
++
++ if (dev_pm_smart_suspend_and_suspended(dev))
++ pm_runtime_set_active(dev);
++
++ ret = pm_generic_resume_noirq(dev);
+ if (ret)
+ return ret;
+
+- if (!dev_pm_may_skip_resume(dev) && pdata->dev_desc->resume_from_noirq)
+- ret = acpi_lpss_do_resume_early(dev);
++ if (!pdata->dev_desc->resume_from_noirq)
++ return 0;
+
+- return ret;
++ /*
++ * The driver's ->resume_early callback will be invoked by
++ * acpi_lpss_do_resume_early(), with the assumption that the driver
++ * really wanted to run that code in ->resume_noirq, but it could not
++ * run before acpi_dev_resume() and the driver expected the latter to be
++ * called in the "early" phase.
++ */
++ return acpi_lpss_do_resume_early(dev);
++}
++
++static int acpi_lpss_do_restore_early(struct device *dev)
++{
++ int ret = acpi_lpss_resume(dev);
++
++ return ret ? ret : pm_generic_restore_early(dev);
+ }
+
++static int acpi_lpss_restore_early(struct device *dev)
++{
++ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
++
++ if (pdata->dev_desc->resume_from_noirq)
++ return 0;
++
++ return acpi_lpss_do_restore_early(dev);
++}
++
++static int acpi_lpss_restore_noirq(struct device *dev)
++{
++ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
++ int ret;
++
++ ret = pm_generic_restore_noirq(dev);
++ if (ret)
++ return ret;
++
++ if (!pdata->dev_desc->resume_from_noirq)
++ return 0;
++
++ /* This is analogous to what happens in acpi_lpss_resume_noirq(). */
++ return acpi_lpss_do_restore_early(dev);
++}
++
++static int acpi_lpss_do_poweroff_late(struct device *dev)
++{
++ int ret = pm_generic_poweroff_late(dev);
++
++ return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
++}
++
++static int acpi_lpss_poweroff_late(struct device *dev)
++{
++ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
++
++ if (dev_pm_smart_suspend_and_suspended(dev))
++ return 0;
++
++ if (pdata->dev_desc->resume_from_noirq)
++ return 0;
++
++ return acpi_lpss_do_poweroff_late(dev);
++}
++
++static int acpi_lpss_poweroff_noirq(struct device *dev)
++{
++ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
++
++ if (dev_pm_smart_suspend_and_suspended(dev))
++ return 0;
++
++ if (pdata->dev_desc->resume_from_noirq) {
++ /* This is analogous to the acpi_lpss_suspend_noirq() case. */
++ int ret = acpi_lpss_do_poweroff_late(dev);
++ if (ret)
++ return ret;
++ }
++
++ return pm_generic_poweroff_noirq(dev);
++}
+ #endif /* CONFIG_PM_SLEEP */
+
+ static int acpi_lpss_runtime_suspend(struct device *dev)
+@@ -1129,14 +1219,11 @@ static struct dev_pm_domain acpi_lpss_pm_domain = {
+ .resume_noirq = acpi_lpss_resume_noirq,
+ .resume_early = acpi_lpss_resume_early,
+ .freeze = acpi_subsys_freeze,
+- .freeze_late = acpi_subsys_freeze_late,
+- .freeze_noirq = acpi_subsys_freeze_noirq,
+- .thaw_noirq = acpi_subsys_thaw_noirq,
+- .poweroff = acpi_subsys_suspend,
+- .poweroff_late = acpi_lpss_suspend_late,
+- .poweroff_noirq = acpi_lpss_suspend_noirq,
+- .restore_noirq = acpi_lpss_resume_noirq,
+- .restore_early = acpi_lpss_resume_early,
++ .poweroff = acpi_subsys_poweroff,
++ .poweroff_late = acpi_lpss_poweroff_late,
++ .poweroff_noirq = acpi_lpss_poweroff_noirq,
++ .restore_noirq = acpi_lpss_restore_noirq,
++ .restore_early = acpi_lpss_restore_early,
+ #endif
+ .runtime_suspend = acpi_lpss_runtime_suspend,
+ .runtime_resume = acpi_lpss_runtime_resume,
+diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
+index 870eb5c7516a..a25d77b3a16a 100644
+--- a/drivers/acpi/button.c
++++ b/drivers/acpi/button.c
+@@ -467,8 +467,11 @@ static int acpi_button_resume(struct device *dev)
+ struct acpi_button *button = acpi_driver_data(device);
+
+ button->suspended = false;
+- if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users)
++ if (button->type == ACPI_BUTTON_TYPE_LID && button->input->users) {
++ button->last_state = !!acpi_lid_evaluate_state(device);
++ button->last_time = ktime_get();
+ acpi_lid_initialize_state(device);
++ }
+ return 0;
+ }
+ #endif
+diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
+index e0927c5fd282..54b6547d32b2 100644
+--- a/drivers/acpi/device_pm.c
++++ b/drivers/acpi/device_pm.c
+@@ -1077,7 +1077,7 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq);
+ * acpi_subsys_resume_noirq - Run the device driver's "noirq" resume callback.
+ * @dev: Device to handle.
+ */
+-int acpi_subsys_resume_noirq(struct device *dev)
++static int acpi_subsys_resume_noirq(struct device *dev)
+ {
+ if (dev_pm_may_skip_resume(dev))
+ return 0;
+@@ -1092,7 +1092,6 @@ int acpi_subsys_resume_noirq(struct device *dev)
+
+ return pm_generic_resume_noirq(dev);
+ }
+-EXPORT_SYMBOL_GPL(acpi_subsys_resume_noirq);
+
+ /**
+ * acpi_subsys_resume_early - Resume device using ACPI.
+@@ -1102,12 +1101,11 @@ EXPORT_SYMBOL_GPL(acpi_subsys_resume_noirq);
+ * generic early resume procedure for it during system transition into the
+ * working state.
+ */
+-int acpi_subsys_resume_early(struct device *dev)
++static int acpi_subsys_resume_early(struct device *dev)
+ {
+ int ret = acpi_dev_resume(dev);
+ return ret ? ret : pm_generic_resume_early(dev);
+ }
+-EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
+
+ /**
+ * acpi_subsys_freeze - Run the device driver's freeze callback.
+@@ -1116,65 +1114,81 @@ EXPORT_SYMBOL_GPL(acpi_subsys_resume_early);
+ int acpi_subsys_freeze(struct device *dev)
+ {
+ /*
+- * This used to be done in acpi_subsys_prepare() for all devices and
+- * some drivers may depend on it, so do it here. Ideally, however,
+- * runtime-suspended devices should not be touched during freeze/thaw
+- * transitions.
++ * Resume all runtime-suspended devices before creating a snapshot
++ * image of system memory, because the restore kernel generally cannot
++ * be expected to always handle them consistently and they need to be
++ * put into the runtime-active metastate during system resume anyway,
++ * so it is better to ensure that the state saved in the image will be
++ * always consistent with that.
+ */
+- if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
+- pm_runtime_resume(dev);
++ pm_runtime_resume(dev);
+
+ return pm_generic_freeze(dev);
+ }
+ EXPORT_SYMBOL_GPL(acpi_subsys_freeze);
+
+ /**
+- * acpi_subsys_freeze_late - Run the device driver's "late" freeze callback.
+- * @dev: Device to handle.
++ * acpi_subsys_restore_early - Restore device using ACPI.
++ * @dev: Device to restore.
+ */
+-int acpi_subsys_freeze_late(struct device *dev)
++int acpi_subsys_restore_early(struct device *dev)
+ {
++ int ret = acpi_dev_resume(dev);
++ return ret ? ret : pm_generic_restore_early(dev);
++}
++EXPORT_SYMBOL_GPL(acpi_subsys_restore_early);
+
+- if (dev_pm_smart_suspend_and_suspended(dev))
+- return 0;
++/**
++ * acpi_subsys_poweroff - Run the device driver's poweroff callback.
++ * @dev: Device to handle.
++ *
++ * Follow PCI and resume devices from runtime suspend before running their
++ * system poweroff callbacks, unless the driver can cope with runtime-suspended
++ * devices during system suspend and there are no ACPI-specific reasons for
++ * resuming them.
++ */
++int acpi_subsys_poweroff(struct device *dev)
++{
++ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
++ acpi_dev_needs_resume(dev, ACPI_COMPANION(dev)))
++ pm_runtime_resume(dev);
+
+- return pm_generic_freeze_late(dev);
++ return pm_generic_poweroff(dev);
+ }
+-EXPORT_SYMBOL_GPL(acpi_subsys_freeze_late);
++EXPORT_SYMBOL_GPL(acpi_subsys_poweroff);
+
+ /**
+- * acpi_subsys_freeze_noirq - Run the device driver's "noirq" freeze callback.
++ * acpi_subsys_poweroff_late - Run the device driver's poweroff callback.
+ * @dev: Device to handle.
++ *
++ * Carry out the generic late poweroff procedure for @dev and use ACPI to put
++ * it into a low-power state during system transition into a sleep state.
+ */
+-int acpi_subsys_freeze_noirq(struct device *dev)
++static int acpi_subsys_poweroff_late(struct device *dev)
+ {
++ int ret;
+
+ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+
+- return pm_generic_freeze_noirq(dev);
++ ret = pm_generic_poweroff_late(dev);
++ if (ret)
++ return ret;
++
++ return acpi_dev_suspend(dev, device_may_wakeup(dev));
+ }
+-EXPORT_SYMBOL_GPL(acpi_subsys_freeze_noirq);
+
+ /**
+- * acpi_subsys_thaw_noirq - Run the device driver's "noirq" thaw callback.
+- * @dev: Device to handle.
++ * acpi_subsys_poweroff_noirq - Run the driver's "noirq" poweroff callback.
++ * @dev: Device to suspend.
+ */
+-int acpi_subsys_thaw_noirq(struct device *dev)
++static int acpi_subsys_poweroff_noirq(struct device *dev)
+ {
+- /*
+- * If the device is in runtime suspend, the "thaw" code may not work
+- * correctly with it, so skip the driver callback and make the PM core
+- * skip all of the subsequent "thaw" callbacks for the device.
+- */
+- if (dev_pm_smart_suspend_and_suspended(dev)) {
+- dev_pm_skip_next_resume_phases(dev);
++ if (dev_pm_smart_suspend_and_suspended(dev))
+ return 0;
+- }
+
+- return pm_generic_thaw_noirq(dev);
++ return pm_generic_poweroff_noirq(dev);
+ }
+-EXPORT_SYMBOL_GPL(acpi_subsys_thaw_noirq);
+ #endif /* CONFIG_PM_SLEEP */
+
+ static struct dev_pm_domain acpi_general_pm_domain = {
+@@ -1190,14 +1204,10 @@ static struct dev_pm_domain acpi_general_pm_domain = {
+ .resume_noirq = acpi_subsys_resume_noirq,
+ .resume_early = acpi_subsys_resume_early,
+ .freeze = acpi_subsys_freeze,
+- .freeze_late = acpi_subsys_freeze_late,
+- .freeze_noirq = acpi_subsys_freeze_noirq,
+- .thaw_noirq = acpi_subsys_thaw_noirq,
+- .poweroff = acpi_subsys_suspend,
+- .poweroff_late = acpi_subsys_suspend_late,
+- .poweroff_noirq = acpi_subsys_suspend_noirq,
+- .restore_noirq = acpi_subsys_resume_noirq,
+- .restore_early = acpi_subsys_resume_early,
++ .poweroff = acpi_subsys_poweroff,
++ .poweroff_late = acpi_subsys_poweroff_late,
++ .poweroff_noirq = acpi_subsys_poweroff_noirq,
++ .restore_early = acpi_subsys_restore_early,
+ #endif
+ },
+ };
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index b5f57c69c487..2bdb250a2142 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -191,7 +191,6 @@ struct ata_port_operations ahci_pmp_retry_srst_ops = {
+ EXPORT_SYMBOL_GPL(ahci_pmp_retry_srst_ops);
+
+ static bool ahci_em_messages __read_mostly = true;
+-EXPORT_SYMBOL_GPL(ahci_em_messages);
+ module_param(ahci_em_messages, bool, 0444);
+ /* add other LED protocol types when they become supported */
+ MODULE_PARM_DESC(ahci_em_messages,
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index 985ccced33a2..742bc60e9cca 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -179,10 +179,14 @@ void device_pm_move_to_tail(struct device *dev)
+ * of the link. If DL_FLAG_PM_RUNTIME is not set, DL_FLAG_RPM_ACTIVE will be
+ * ignored.
+ *
+- * If the DL_FLAG_AUTOREMOVE_CONSUMER is set, the link will be removed
+- * automatically when the consumer device driver unbinds from it.
+- * The combination of both DL_FLAG_AUTOREMOVE_CONSUMER and DL_FLAG_STATELESS
+- * set is invalid and will cause NULL to be returned.
++ * If the DL_FLAG_AUTOREMOVE_CONSUMER flag is set, the link will be removed
++ * automatically when the consumer device driver unbinds from it. Analogously,
++ * if DL_FLAG_AUTOREMOVE_SUPPLIER is set in @flags, the link will be removed
++ * automatically when the supplier device driver unbinds from it.
++ *
++ * The combination of DL_FLAG_STATELESS and either DL_FLAG_AUTOREMOVE_CONSUMER
++ * or DL_FLAG_AUTOREMOVE_SUPPLIER set in @flags at the same time is invalid and
++ * will cause NULL to be returned upfront.
+ *
+ * A side effect of the link creation is re-ordering of dpm_list and the
+ * devices_kset list by moving the consumer device and all devices depending
+@@ -199,10 +203,17 @@ struct device_link *device_link_add(struct device *consumer,
+ struct device_link *link;
+
+ if (!consumer || !supplier ||
+- ((flags & DL_FLAG_STATELESS) &&
+- (flags & DL_FLAG_AUTOREMOVE_CONSUMER)))
++ (flags & DL_FLAG_STATELESS &&
++ flags & (DL_FLAG_AUTOREMOVE_CONSUMER | DL_FLAG_AUTOREMOVE_SUPPLIER)))
+ return NULL;
+
++ if (flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) {
++ if (pm_runtime_get_sync(supplier) < 0) {
++ pm_runtime_put_noidle(supplier);
++ return NULL;
++ }
++ }
++
+ device_links_write_lock();
+ device_pm_lock();
+
+@@ -217,35 +228,51 @@ struct device_link *device_link_add(struct device *consumer,
+ goto out;
+ }
+
+- list_for_each_entry(link, &supplier->links.consumers, s_node)
+- if (link->consumer == consumer) {
+- kref_get(&link->kref);
++ list_for_each_entry(link, &supplier->links.consumers, s_node) {
++ if (link->consumer != consumer)
++ continue;
++
++ /*
++ * Don't return a stateless link if the caller wants a stateful
++ * one and vice versa.
++ */
++ if (WARN_ON((flags & DL_FLAG_STATELESS) != (link->flags & DL_FLAG_STATELESS))) {
++ link = NULL;
+ goto out;
+ }
+
++ if (flags & DL_FLAG_AUTOREMOVE_CONSUMER)
++ link->flags |= DL_FLAG_AUTOREMOVE_CONSUMER;
++
++ if (flags & DL_FLAG_AUTOREMOVE_SUPPLIER)
++ link->flags |= DL_FLAG_AUTOREMOVE_SUPPLIER;
++
++ if (flags & DL_FLAG_PM_RUNTIME) {
++ if (!(link->flags & DL_FLAG_PM_RUNTIME)) {
++ pm_runtime_new_link(consumer);
++ link->flags |= DL_FLAG_PM_RUNTIME;
++ }
++ if (flags & DL_FLAG_RPM_ACTIVE)
++ refcount_inc(&link->rpm_active);
++ }
++
++ kref_get(&link->kref);
++ goto out;
++ }
++
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (!link)
+ goto out;
+
++ refcount_set(&link->rpm_active, 1);
++
+ if (flags & DL_FLAG_PM_RUNTIME) {
+- if (flags & DL_FLAG_RPM_ACTIVE) {
+- if (pm_runtime_get_sync(supplier) < 0) {
+- pm_runtime_put_noidle(supplier);
+- kfree(link);
+- link = NULL;
+- goto out;
+- }
+- link->rpm_active = true;
+- }
++ if (flags & DL_FLAG_RPM_ACTIVE)
++ refcount_inc(&link->rpm_active);
++
+ pm_runtime_new_link(consumer);
+- /*
+- * If the link is being added by the consumer driver at probe
+- * time, balance the decrementation of the supplier's runtime PM
+- * usage counter after consumer probe in driver_probe_device().
+- */
+- if (consumer->links.status == DL_DEV_PROBING)
+- pm_runtime_get_noresume(supplier);
+ }
++
+ get_device(supplier);
+ link->supplier = supplier;
+ INIT_LIST_HEAD(&link->s_node);
+@@ -307,12 +334,19 @@ struct device_link *device_link_add(struct device *consumer,
+ out:
+ device_pm_unlock();
+ device_links_write_unlock();
++
++ if ((flags & DL_FLAG_PM_RUNTIME && flags & DL_FLAG_RPM_ACTIVE) && !link)
++ pm_runtime_put(supplier);
++
+ return link;
+ }
+ EXPORT_SYMBOL_GPL(device_link_add);
+
+ static void device_link_free(struct device_link *link)
+ {
++ while (refcount_dec_not_one(&link->rpm_active))
++ pm_runtime_put(link->supplier);
++
+ put_device(link->consumer);
+ put_device(link->supplier);
+ kfree(link);
+@@ -539,11 +573,11 @@ void device_links_no_driver(struct device *dev)
+ */
+ void device_links_driver_cleanup(struct device *dev)
+ {
+- struct device_link *link;
++ struct device_link *link, *ln;
+
+ device_links_write_lock();
+
+- list_for_each_entry(link, &dev->links.consumers, s_node) {
++ list_for_each_entry_safe(link, ln, &dev->links.consumers, s_node) {
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
+index beb85c31f3fa..303ce7d54a30 100644
+--- a/drivers/base/power/runtime.c
++++ b/drivers/base/power/runtime.c
+@@ -268,11 +268,8 @@ static int rpm_get_suppliers(struct device *dev)
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
+ int retval;
+
+- if (!(link->flags & DL_FLAG_PM_RUNTIME))
+- continue;
+-
+- if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND ||
+- link->rpm_active)
++ if (!(link->flags & DL_FLAG_PM_RUNTIME) ||
++ READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
+ continue;
+
+ retval = pm_runtime_get_sync(link->supplier);
+@@ -281,7 +278,7 @@ static int rpm_get_suppliers(struct device *dev)
+ pm_runtime_put_noidle(link->supplier);
+ return retval;
+ }
+- link->rpm_active = true;
++ refcount_inc(&link->rpm_active);
+ }
+ return 0;
+ }
+@@ -290,12 +287,13 @@ static void rpm_put_suppliers(struct device *dev)
+ {
+ struct device_link *link;
+
+- list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+- if (link->rpm_active &&
+- READ_ONCE(link->status) != DL_STATE_SUPPLIER_UNBIND) {
++ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
++ if (READ_ONCE(link->status) == DL_STATE_SUPPLIER_UNBIND)
++ continue;
++
++ while (refcount_dec_not_one(&link->rpm_active))
+ pm_runtime_put(link->supplier);
+- link->rpm_active = false;
+- }
++ }
+ }
+
+ /**
+@@ -1531,7 +1529,7 @@ void pm_runtime_remove(struct device *dev)
+ *
+ * Check links from this device to any consumers and if any of them have active
+ * runtime PM references to the device, drop the usage counter of the device
+- * (once per link).
++ * (as many times as needed).
+ *
+ * Links with the DL_FLAG_STATELESS flag set are ignored.
+ *
+@@ -1553,10 +1551,8 @@ void pm_runtime_clean_up_links(struct device *dev)
+ if (link->flags & DL_FLAG_STATELESS)
+ continue;
+
+- if (link->rpm_active) {
++ while (refcount_dec_not_one(&link->rpm_active))
+ pm_runtime_put_noidle(dev);
+- link->rpm_active = false;
+- }
+ }
+
+ device_links_read_unlock(idx);
+@@ -1574,8 +1570,11 @@ void pm_runtime_get_suppliers(struct device *dev)
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+- if (link->flags & DL_FLAG_PM_RUNTIME)
++ if (link->flags & DL_FLAG_PM_RUNTIME) {
++ link->supplier_preactivated = true;
++ refcount_inc(&link->rpm_active);
+ pm_runtime_get_sync(link->supplier);
++ }
+
+ device_links_read_unlock(idx);
+ }
+@@ -1592,8 +1591,11 @@ void pm_runtime_put_suppliers(struct device *dev)
+ idx = device_links_read_lock();
+
+ list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
+- if (link->flags & DL_FLAG_PM_RUNTIME)
+- pm_runtime_put(link->supplier);
++ if (link->supplier_preactivated) {
++ link->supplier_preactivated = false;
++ if (refcount_dec_not_one(&link->rpm_active))
++ pm_runtime_put(link->supplier);
++ }
+
+ device_links_read_unlock(idx);
+ }
+@@ -1607,8 +1609,6 @@ void pm_runtime_new_link(struct device *dev)
+
+ void pm_runtime_drop_link(struct device *dev)
+ {
+- rpm_put_suppliers(dev);
+-
+ spin_lock_irq(&dev->power.lock);
+ WARN_ON(dev->power.links_count == 0);
+ dev->power.links_count--;
+diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
+index 7c84f64c74f7..2dfa2e048745 100644
+--- a/drivers/base/power/wakeup.c
++++ b/drivers/base/power/wakeup.c
+@@ -875,7 +875,7 @@ EXPORT_SYMBOL_GPL(pm_system_wakeup);
+
+ void pm_system_cancel_wakeup(void)
+ {
+- atomic_dec(&pm_abort_suspend);
++ atomic_dec_if_positive(&pm_abort_suspend);
+ }
+
+ void pm_wakeup_clear(bool reset)
+diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
+index f499a469e66d..12b2cc9a3fbe 100644
+--- a/drivers/bcma/driver_pci.c
++++ b/drivers/bcma/driver_pci.c
+@@ -78,7 +78,7 @@ static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u16 device, u8 address)
+ v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
+ }
+
+- v = BCMA_CORE_PCI_MDIODATA_START;
++ v |= BCMA_CORE_PCI_MDIODATA_START;
+ v |= BCMA_CORE_PCI_MDIODATA_READ;
+ v |= BCMA_CORE_PCI_MDIODATA_TA;
+
+@@ -121,7 +121,7 @@ static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u16 device,
+ v |= (address << BCMA_CORE_PCI_MDIODATA_REGADDR_SHF_OLD);
+ }
+
+- v = BCMA_CORE_PCI_MDIODATA_START;
++ v |= BCMA_CORE_PCI_MDIODATA_START;
+ v |= BCMA_CORE_PCI_MDIODATA_WRITE;
+ v |= BCMA_CORE_PCI_MDIODATA_TA;
+ v |= data;
+diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
+index a49a8d91a599..5e3885f5729b 100644
+--- a/drivers/block/drbd/drbd_main.c
++++ b/drivers/block/drbd/drbd_main.c
+@@ -334,6 +334,8 @@ static int drbd_thread_setup(void *arg)
+ thi->name[0],
+ resource->name);
+
++ allow_kernel_signal(DRBD_SIGKILL);
++ allow_kernel_signal(SIGXCPU);
+ restart:
+ retval = thi->function(thi);
+
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 585378bc988c..b942f4c8cea8 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -2506,6 +2506,7 @@ again:
+ ret = rbd_obj_issue_copyup(obj_req, obj_req->xferred);
+ if (ret) {
+ obj_req->result = ret;
++ obj_req->xferred = 0;
+ return true;
+ }
+ return false;
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index b6f63e762021..54c8c8644df2 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -888,10 +888,10 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
+ SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff,
+ SYSC_QUIRK_LEGACY_IDLE),
+ SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff,
+- SYSC_QUIRK_LEGACY_IDLE),
++ 0),
+ /* Some timers on omap4 and later */
+ SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffffffff,
+- SYSC_QUIRK_LEGACY_IDLE),
++ 0),
+ SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
+ SYSC_QUIRK_LEGACY_IDLE),
+ /* Uarts on omap4 and later */
+@@ -1400,6 +1400,9 @@ static void sysc_unprepare(struct sysc *ddata)
+ {
+ int i;
+
++ if (!ddata->clocks)
++ return;
++
+ for (i = 0; i < SYSC_MAX_CLOCKS; i++) {
+ if (!IS_ERR_OR_NULL(ddata->clocks[i]))
+ clk_unprepare(ddata->clocks[i]);
+@@ -1593,6 +1596,16 @@ static const struct sysc_regbits sysc_regbits_omap4_mcasp = {
+ static const struct sysc_capabilities sysc_omap4_mcasp = {
+ .type = TI_SYSC_OMAP4_MCASP,
+ .regbits = &sysc_regbits_omap4_mcasp,
++ .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
++};
++
++/*
++ * McASP found on dra7 and later
++ */
++static const struct sysc_capabilities sysc_dra7_mcasp = {
++ .type = TI_SYSC_OMAP4_SIMPLE,
++ .regbits = &sysc_regbits_omap4_simple,
++ .mod_quirks = SYSC_QUIRK_OPT_CLKS_NEEDED,
+ };
+
+ /*
+@@ -1821,6 +1834,7 @@ static const struct of_device_id sysc_match[] = {
+ { .compatible = "ti,sysc-omap3-sham", .data = &sysc_omap3_sham, },
+ { .compatible = "ti,sysc-omap-aes", .data = &sysc_omap3_aes, },
+ { .compatible = "ti,sysc-mcasp", .data = &sysc_omap4_mcasp, },
++ { .compatible = "ti,sysc-dra7-mcasp", .data = &sysc_dra7_mcasp, },
+ { .compatible = "ti,sysc-usb-host-fs",
+ .data = &sysc_omap4_usb_host_fs, },
+ { .compatible = "ti,sysc-dra7-mcan", .data = &sysc_dra7_mcan, },
+diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c
+index 6767d965c36c..19bde680aee1 100644
+--- a/drivers/char/hw_random/bcm2835-rng.c
++++ b/drivers/char/hw_random/bcm2835-rng.c
+@@ -171,14 +171,16 @@ static int bcm2835_rng_probe(struct platform_device *pdev)
+ priv->rng.read = bcm2835_rng_read;
+ priv->rng.cleanup = bcm2835_rng_cleanup;
+
+- rng_id = of_match_node(bcm2835_rng_of_match, np);
+- if (!rng_id)
+- return -EINVAL;
+-
+- /* Check for rng init function, execute it */
+- of_data = rng_id->data;
+- if (of_data)
+- priv->mask_interrupts = of_data->mask_interrupts;
++ if (dev_of_node(dev)) {
++ rng_id = of_match_node(bcm2835_rng_of_match, np);
++ if (!rng_id)
++ return -EINVAL;
++
++ /* Check for rng init function, execute it */
++ of_data = rng_id->data;
++ if (of_data)
++ priv->mask_interrupts = of_data->mask_interrupts;
++ }
+
+ /* register driver */
+ err = devm_hwrng_register(dev, &priv->rng);
+diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
+index 648e39ce6bd9..8df3cad7c97a 100644
+--- a/drivers/char/hw_random/omap3-rom-rng.c
++++ b/drivers/char/hw_random/omap3-rom-rng.c
+@@ -20,6 +20,8 @@
+ #include <linux/workqueue.h>
+ #include <linux/clk.h>
+ #include <linux/err.h>
++#include <linux/of.h>
++#include <linux/of_device.h>
+ #include <linux/platform_device.h>
+
+ #define RNG_RESET 0x01
+@@ -86,14 +88,18 @@ static int omap3_rom_rng_read(struct hwrng *rng, void *data, size_t max, bool w)
+
+ static struct hwrng omap3_rom_rng_ops = {
+ .name = "omap3-rom",
+- .read = omap3_rom_rng_read,
+ };
+
+ static int omap3_rom_rng_probe(struct platform_device *pdev)
+ {
+ int ret = 0;
+
+- pr_info("initializing\n");
++ omap3_rom_rng_ops.read = of_device_get_match_data(&pdev->dev);
++ if (!omap3_rom_rng_ops.read) {
++ dev_err(&pdev->dev, "missing rom code handler\n");
++
++ return -ENODEV;
++ }
+
+ omap3_rom_rng_call = pdev->dev.platform_data;
+ if (!omap3_rom_rng_call) {
+@@ -126,9 +132,16 @@ static int omap3_rom_rng_remove(struct platform_device *pdev)
+ return 0;
+ }
+
++static const struct of_device_id omap_rom_rng_match[] = {
++ { .compatible = "nokia,n900-rom-rng", .data = omap3_rom_rng_read, },
++ { /* sentinel */ },
++};
++MODULE_DEVICE_TABLE(of, omap_rom_rng_match);
++
+ static struct platform_driver omap3_rom_rng_driver = {
+ .driver = {
+ .name = "omap3-rom-rng",
++ .of_match_table = omap_rom_rng_match,
+ },
+ .probe = omap3_rom_rng_probe,
+ .remove = omap3_rom_rng_remove,
+diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
+index 91f2d9219489..980eb7c60952 100644
+--- a/drivers/char/ipmi/ipmi_msghandler.c
++++ b/drivers/char/ipmi/ipmi_msghandler.c
+@@ -2965,8 +2965,11 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf,
+ bmc->pdev.name = "ipmi_bmc";
+
+ rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
+- if (rv < 0)
++ if (rv < 0) {
++ kfree(bmc);
+ goto out;
++ }
++
+ bmc->pdev.dev.driver = &ipmidriver.driver;
+ bmc->pdev.id = rv;
+ bmc->pdev.dev.release = release_bmc_device;
+diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c
+index e6124bd548df..ed4dc3b1843e 100644
+--- a/drivers/char/ipmi/kcs_bmc.c
++++ b/drivers/char/ipmi/kcs_bmc.c
+@@ -440,12 +440,13 @@ struct kcs_bmc *kcs_bmc_alloc(struct device *dev, int sizeof_priv, u32 channel)
+ kcs_bmc->data_in = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+ kcs_bmc->data_out = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+ kcs_bmc->kbuffer = devm_kmalloc(dev, KCS_MSG_BUFSIZ, GFP_KERNEL);
+- if (!kcs_bmc->data_in || !kcs_bmc->data_out || !kcs_bmc->kbuffer)
+- return NULL;
+
+ kcs_bmc->miscdev.minor = MISC_DYNAMIC_MINOR;
+ kcs_bmc->miscdev.name = devm_kasprintf(dev, GFP_KERNEL, "%s%u",
+ DEVICE_NAME, channel);
++ if (!kcs_bmc->data_in || !kcs_bmc->data_out || !kcs_bmc->kbuffer ||
++ !kcs_bmc->miscdev.name)
++ return NULL;
+ kcs_bmc->miscdev.fops = &kcs_bmc_fops;
+
+ return kcs_bmc;
+diff --git a/drivers/clk/actions/owl-factor.c b/drivers/clk/actions/owl-factor.c
+index 317d4a9e112e..f15e2621fa18 100644
+--- a/drivers/clk/actions/owl-factor.c
++++ b/drivers/clk/actions/owl-factor.c
+@@ -64,11 +64,10 @@ static unsigned int _get_table_val(const struct clk_factor_table *table,
+ return val;
+ }
+
+-static int clk_val_best(struct clk_hw *hw, unsigned long rate,
++static int owl_clk_val_best(const struct owl_factor_hw *factor_hw,
++ struct clk_hw *hw, unsigned long rate,
+ unsigned long *best_parent_rate)
+ {
+- struct owl_factor *factor = hw_to_owl_factor(hw);
+- struct owl_factor_hw *factor_hw = &factor->factor_hw;
+ const struct clk_factor_table *clkt = factor_hw->table;
+ unsigned long parent_rate, try_parent_rate, best = 0, cur_rate;
+ unsigned long parent_rate_saved = *best_parent_rate;
+@@ -126,7 +125,7 @@ long owl_factor_helper_round_rate(struct owl_clk_common *common,
+ const struct clk_factor_table *clkt = factor_hw->table;
+ unsigned int val, mul = 0, div = 1;
+
+- val = clk_val_best(&common->hw, rate, parent_rate);
++ val = owl_clk_val_best(factor_hw, &common->hw, rate, parent_rate);
+ _get_table_div_mul(clkt, val, &mul, &div);
+
+ return *parent_rate * mul / div;
+diff --git a/drivers/clk/clk-highbank.c b/drivers/clk/clk-highbank.c
+index 727ed8e1bb72..8e4581004695 100644
+--- a/drivers/clk/clk-highbank.c
++++ b/drivers/clk/clk-highbank.c
+@@ -293,6 +293,7 @@ static __init struct clk *hb_clk_init(struct device_node *node, const struct clk
+ /* Map system registers */
+ srnp = of_find_compatible_node(NULL, NULL, "calxeda,hb-sregs");
+ hb_clk->reg = of_iomap(srnp, 0);
++ of_node_put(srnp);
+ BUG_ON(!hb_clk->reg);
+ hb_clk->reg += reg;
+
+diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
+index 8abc5c8cb8b8..a0713b2a12f3 100644
+--- a/drivers/clk/clk-qoriq.c
++++ b/drivers/clk/clk-qoriq.c
+@@ -1389,6 +1389,7 @@ static void __init clockgen_init(struct device_node *np)
+ pr_err("%s: Couldn't map %pOF regs\n", __func__,
+ guts);
+ }
++ of_node_put(guts);
+ }
+
+ }
+diff --git a/drivers/clk/imx/clk-imx6q.c b/drivers/clk/imx/clk-imx6q.c
+index c509324f6338..8ddf9b1f1677 100644
+--- a/drivers/clk/imx/clk-imx6q.c
++++ b/drivers/clk/imx/clk-imx6q.c
+@@ -424,6 +424,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
+ anatop_base = base = of_iomap(np, 0);
+ WARN_ON(!base);
++ of_node_put(np);
+
+ /* Audio/video PLL post dividers do not work on i.MX6q revision 1.0 */
+ if (clk_on_imx6q() && imx_get_soc_revision() == IMX_CHIP_REVISION_1_0) {
+diff --git a/drivers/clk/imx/clk-imx6sx.c b/drivers/clk/imx/clk-imx6sx.c
+index d9f2890ffe62..57ab96a47775 100644
+--- a/drivers/clk/imx/clk-imx6sx.c
++++ b/drivers/clk/imx/clk-imx6sx.c
+@@ -151,6 +151,7 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx6sx-anatop");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
++ of_node_put(np);
+
+ clks[IMX6SX_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+ clks[IMX6SX_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", base + 0x30, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
+diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c
+index 881b772c4ac9..83412bc36ebf 100644
+--- a/drivers/clk/imx/clk-imx7d.c
++++ b/drivers/clk/imx/clk-imx7d.c
+@@ -413,6 +413,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node)
+ np = of_find_compatible_node(NULL, NULL, "fsl,imx7d-anatop");
+ base = of_iomap(np, 0);
+ WARN_ON(!base);
++ of_node_put(np);
+
+ clks[IMX7D_PLL_ARM_MAIN_SRC] = imx_clk_mux("pll_arm_main_src", base + 0x60, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
+ clks[IMX7D_PLL_DRAM_MAIN_SRC] = imx_clk_mux("pll_dram_main_src", base + 0x70, 14, 2, pll_bypass_src_sel, ARRAY_SIZE(pll_bypass_src_sel));
+diff --git a/drivers/clk/imx/clk-vf610.c b/drivers/clk/imx/clk-vf610.c
+index 6dae54325a91..a334667c450a 100644
+--- a/drivers/clk/imx/clk-vf610.c
++++ b/drivers/clk/imx/clk-vf610.c
+@@ -203,6 +203,7 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
+ np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop");
+ anatop_base = of_iomap(np, 0);
+ BUG_ON(!anatop_base);
++ of_node_put(np);
+
+ np = ccm_node;
+ ccm_base = of_iomap(np, 0);
+diff --git a/drivers/clk/ingenic/jz4740-cgu.c b/drivers/clk/ingenic/jz4740-cgu.c
+index 4479c102e899..b86edd328249 100644
+--- a/drivers/clk/ingenic/jz4740-cgu.c
++++ b/drivers/clk/ingenic/jz4740-cgu.c
+@@ -165,7 +165,7 @@ static const struct ingenic_cgu_clk_info jz4740_cgu_clocks[] = {
+ .parents = { JZ4740_CLK_EXT, JZ4740_CLK_PLL_HALF, -1, -1 },
+ .mux = { CGU_REG_CPCCR, 29, 1 },
+ .div = { CGU_REG_CPCCR, 23, 1, 6, -1, -1, -1 },
+- .gate = { CGU_REG_SCR, 6 },
++ .gate = { CGU_REG_SCR, 6, true },
+ },
+
+ /* Gate-only clocks */
+diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
+index 02229d051d77..4e7dac24948b 100644
+--- a/drivers/clk/meson/axg.c
++++ b/drivers/clk/meson/axg.c
+@@ -461,11 +461,6 @@ static struct clk_regmap axg_mpll0_div = {
+ .shift = 16,
+ .width = 9,
+ },
+- .ssen = {
+- .reg_off = HHI_MPLL_CNTL,
+- .shift = 25,
+- .width = 1,
+- },
+ .misc = {
+ .reg_off = HHI_PLL_TOP_MISC,
+ .shift = 0,
+@@ -560,6 +555,11 @@ static struct clk_regmap axg_mpll2_div = {
+ .shift = 16,
+ .width = 9,
+ },
++ .ssen = {
++ .reg_off = HHI_MPLL_CNTL,
++ .shift = 25,
++ .width = 1,
++ },
+ .misc = {
+ .reg_off = HHI_PLL_TOP_MISC,
+ .shift = 2,
+diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
+index b039909e03cf..38ffa51a5bad 100644
+--- a/drivers/clk/meson/gxbb.c
++++ b/drivers/clk/meson/gxbb.c
+@@ -650,11 +650,6 @@ static struct clk_regmap gxbb_mpll0_div = {
+ .shift = 16,
+ .width = 9,
+ },
+- .ssen = {
+- .reg_off = HHI_MPLL_CNTL,
+- .shift = 25,
+- .width = 1,
+- },
+ .lock = &meson_clk_lock,
+ },
+ .hw.init = &(struct clk_init_data){
+diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
+index 2c7c1085f883..8fdfa97900cd 100644
+--- a/drivers/clk/mvebu/armada-370.c
++++ b/drivers/clk/mvebu/armada-370.c
+@@ -177,8 +177,10 @@ static void __init a370_clk_init(struct device_node *np)
+
+ mvebu_coreclk_setup(np, &a370_coreclks);
+
+- if (cgnp)
++ if (cgnp) {
+ mvebu_clk_gating_setup(cgnp, a370_gating_desc);
++ of_node_put(cgnp);
++ }
+ }
+ CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init);
+
+diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
+index 0ec44ae9a2a2..df529982adc9 100644
+--- a/drivers/clk/mvebu/armada-xp.c
++++ b/drivers/clk/mvebu/armada-xp.c
+@@ -228,7 +228,9 @@ static void __init axp_clk_init(struct device_node *np)
+
+ mvebu_coreclk_setup(np, &axp_coreclks);
+
+- if (cgnp)
++ if (cgnp) {
+ mvebu_clk_gating_setup(cgnp, axp_gating_desc);
++ of_node_put(cgnp);
++ }
+ }
+ CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init);
+diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c
+index 59fad9546c84..5f258c9bb68b 100644
+--- a/drivers/clk/mvebu/dove.c
++++ b/drivers/clk/mvebu/dove.c
+@@ -190,10 +190,14 @@ static void __init dove_clk_init(struct device_node *np)
+
+ mvebu_coreclk_setup(np, &dove_coreclks);
+
+- if (ddnp)
++ if (ddnp) {
+ dove_divider_clk_init(ddnp);
++ of_node_put(ddnp);
++ }
+
+- if (cgnp)
++ if (cgnp) {
+ mvebu_clk_gating_setup(cgnp, dove_gating_desc);
++ of_node_put(cgnp);
++ }
+ }
+ CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init);
+diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
+index a2a8d614039d..890ebf623261 100644
+--- a/drivers/clk/mvebu/kirkwood.c
++++ b/drivers/clk/mvebu/kirkwood.c
+@@ -333,6 +333,8 @@ static void __init kirkwood_clk_init(struct device_node *np)
+ if (cgnp) {
+ mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc);
+ kirkwood_clk_muxing_setup(cgnp, kirkwood_mux_desc);
++
++ of_node_put(cgnp);
+ }
+ }
+ CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock",
+diff --git a/drivers/clk/mvebu/mv98dx3236.c b/drivers/clk/mvebu/mv98dx3236.c
+index 6e203af73cac..c8a0d03d2cd6 100644
+--- a/drivers/clk/mvebu/mv98dx3236.c
++++ b/drivers/clk/mvebu/mv98dx3236.c
+@@ -174,7 +174,9 @@ static void __init mv98dx3236_clk_init(struct device_node *np)
+
+ mvebu_coreclk_setup(np, &mv98dx3236_core_clocks);
+
+- if (cgnp)
++ if (cgnp) {
+ mvebu_clk_gating_setup(cgnp, mv98dx3236_gating_desc);
++ of_node_put(cgnp);
++ }
+ }
+ CLK_OF_DECLARE(mv98dx3236_clk, "marvell,mv98dx3236-core-clock", mv98dx3236_clk_init);
+diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
+index 9a3290fdd01b..bea55c461cee 100644
+--- a/drivers/clk/qcom/gcc-msm8996.c
++++ b/drivers/clk/qcom/gcc-msm8996.c
+@@ -138,22 +138,6 @@ static const char * const gcc_xo_gpll0_gpll4_gpll0_early_div[] = {
+ "gpll0_early_div"
+ };
+
+-static const struct parent_map gcc_xo_gpll0_gpll2_gpll3_gpll0_early_div_map[] = {
+- { P_XO, 0 },
+- { P_GPLL0, 1 },
+- { P_GPLL2, 2 },
+- { P_GPLL3, 3 },
+- { P_GPLL0_EARLY_DIV, 6 }
+-};
+-
+-static const char * const gcc_xo_gpll0_gpll2_gpll3_gpll0_early_div[] = {
+- "xo",
+- "gpll0",
+- "gpll2",
+- "gpll3",
+- "gpll0_early_div"
+-};
+-
+ static const struct parent_map gcc_xo_gpll0_gpll1_early_div_gpll1_gpll4_gpll0_early_div_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+@@ -192,26 +176,6 @@ static const char * const gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll2_early_gpll0_early
+ "gpll0_early_div"
+ };
+
+-static const struct parent_map gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll4_gpll0_early_div_map[] = {
+- { P_XO, 0 },
+- { P_GPLL0, 1 },
+- { P_GPLL2, 2 },
+- { P_GPLL3, 3 },
+- { P_GPLL1, 4 },
+- { P_GPLL4, 5 },
+- { P_GPLL0_EARLY_DIV, 6 }
+-};
+-
+-static const char * const gcc_xo_gpll0_gpll2_gpll3_gpll1_gpll4_gpll0_early_div[] = {
+- "xo",
+- "gpll0",
+- "gpll2",
+- "gpll3",
+- "gpll1",
+- "gpll4",
+- "gpll0_early_div"
+-};
+-
+ static struct clk_fixed_factor xo = {
+ .mult = 1,
+ .div = 1,
+diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
+index 4e23973b6cd1..772a08101ddf 100644
+--- a/drivers/clk/qcom/gcc-msm8998.c
++++ b/drivers/clk/qcom/gcc-msm8998.c
+@@ -2144,7 +2144,7 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+
+ static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x6b018,
+- .halt_check = BRANCH_HALT,
++ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x6b018,
+ .enable_mask = BIT(0),
+diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
+index 0421960eb963..442309b56920 100644
+--- a/drivers/clk/samsung/clk-exynos4.c
++++ b/drivers/clk/samsung/clk-exynos4.c
+@@ -1226,6 +1226,7 @@ static unsigned long __init exynos4_get_xom(void)
+ xom = readl(chipid_base + 8);
+
+ iounmap(chipid_base);
++ of_node_put(np);
+ }
+
+ return xom;
+diff --git a/drivers/clk/socfpga/clk-pll-a10.c b/drivers/clk/socfpga/clk-pll-a10.c
+index 35fabe1a32c3..269467e8e07e 100644
+--- a/drivers/clk/socfpga/clk-pll-a10.c
++++ b/drivers/clk/socfpga/clk-pll-a10.c
+@@ -95,6 +95,7 @@ static struct clk * __init __socfpga_pll_init(struct device_node *node,
+
+ clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
+ clk_mgr_a10_base_addr = of_iomap(clkmgr_np, 0);
++ of_node_put(clkmgr_np);
+ BUG_ON(!clk_mgr_a10_base_addr);
+ pll_clk->hw.reg = clk_mgr_a10_base_addr + reg;
+
+diff --git a/drivers/clk/socfpga/clk-pll.c b/drivers/clk/socfpga/clk-pll.c
+index c7f463172e4b..b4b44e9b5901 100644
+--- a/drivers/clk/socfpga/clk-pll.c
++++ b/drivers/clk/socfpga/clk-pll.c
+@@ -100,6 +100,7 @@ static __init struct clk *__socfpga_pll_init(struct device_node *node,
+
+ clkmgr_np = of_find_compatible_node(NULL, NULL, "altr,clk-mgr");
+ clk_mgr_base_addr = of_iomap(clkmgr_np, 0);
++ of_node_put(clkmgr_np);
+ BUG_ON(!clk_mgr_base_addr);
+ pll_clk->hw.reg = clk_mgr_base_addr + reg;
+
+diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+index 27554eaf6929..8d05d4f1f8a1 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
++++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+@@ -104,7 +104,7 @@ static SUNXI_CCU_GATE(r_apb2_i2c_clk, "r-apb2-i2c", "r-apb2",
+ static SUNXI_CCU_GATE(r_apb1_ir_clk, "r-apb1-ir", "r-apb1",
+ 0x1cc, BIT(0), 0);
+ static SUNXI_CCU_GATE(r_apb1_w1_clk, "r-apb1-w1", "r-apb1",
+- 0x1cc, BIT(0), 0);
++ 0x1ec, BIT(0), 0);
+
+ /* Information of IR(RX) mod clock is gathered from BSP source code */
+ static const char * const r_mod0_default_parents[] = { "osc32k", "osc24M" };
+diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+index a4fa2945f230..4b5f8f4e4ab8 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
++++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+@@ -144,7 +144,7 @@ static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_mipi_clk, "pll-mipi",
+ 8, 4, /* N */
+ 4, 2, /* K */
+ 0, 4, /* M */
+- BIT(31), /* gate */
++ BIT(31) | BIT(23) | BIT(22), /* gate */
+ BIT(28), /* lock */
+ CLK_SET_RATE_UNGATE);
+
+diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+index 9e3f4088724b..c7f9d974b10d 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
++++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+@@ -84,7 +84,7 @@ static SUNXI_CCU_NM_WITH_FRAC_GATE_LOCK(pll_ve_clk, "pll-ve",
+ BIT(28), /* lock */
+ 0);
+
+-static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr_clk, "pll-ddr",
++static SUNXI_CCU_NKM_WITH_GATE_LOCK(pll_ddr0_clk, "pll-ddr0",
+ "osc24M", 0x020,
+ 8, 5, /* N */
+ 4, 2, /* K */
+@@ -123,6 +123,14 @@ static SUNXI_CCU_NK_WITH_GATE_LOCK_POSTDIV(pll_periph1_clk, "pll-periph1",
+ 2, /* post-div */
+ 0);
+
++static SUNXI_CCU_NM_WITH_GATE_LOCK(pll_ddr1_clk, "pll-ddr1",
++ "osc24M", 0x04c,
++ 8, 7, /* N */
++ 0, 2, /* M */
++ BIT(31), /* gate */
++ BIT(28), /* lock */
++ 0);
++
+ static const char * const cpu_parents[] = { "osc32k", "osc24M",
+ "pll-cpu", "pll-cpu" };
+ static SUNXI_CCU_MUX(cpu_clk, "cpu", cpu_parents,
+@@ -310,7 +318,8 @@ static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
+ static SUNXI_CCU_GATE(usb_ohci0_clk, "usb-ohci0", "osc24M",
+ 0x0cc, BIT(16), 0);
+
+-static const char * const dram_parents[] = { "pll-ddr", "pll-periph0-2x" };
++static const char * const dram_parents[] = { "pll-ddr0", "pll-ddr1",
++ "pll-periph0-2x" };
+ static SUNXI_CCU_M_WITH_MUX(dram_clk, "dram", dram_parents,
+ 0x0f4, 0, 4, 20, 2, CLK_IS_CRITICAL);
+
+@@ -369,10 +378,11 @@ static struct ccu_common *sun8i_v3s_ccu_clks[] = {
+ &pll_audio_base_clk.common,
+ &pll_video_clk.common,
+ &pll_ve_clk.common,
+- &pll_ddr_clk.common,
++ &pll_ddr0_clk.common,
+ &pll_periph0_clk.common,
+ &pll_isp_clk.common,
+ &pll_periph1_clk.common,
++ &pll_ddr1_clk.common,
+ &cpu_clk.common,
+ &axi_clk.common,
+ &ahb1_clk.common,
+@@ -457,11 +467,12 @@ static struct clk_hw_onecell_data sun8i_v3s_hw_clks = {
+ [CLK_PLL_AUDIO_8X] = &pll_audio_8x_clk.hw,
+ [CLK_PLL_VIDEO] = &pll_video_clk.common.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+- [CLK_PLL_DDR] = &pll_ddr_clk.common.hw,
++ [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw,
+ [CLK_PLL_PERIPH0] = &pll_periph0_clk.common.hw,
+ [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.hw,
+ [CLK_PLL_ISP] = &pll_isp_clk.common.hw,
+ [CLK_PLL_PERIPH1] = &pll_periph1_clk.common.hw,
++ [CLK_PLL_DDR1] = &pll_ddr1_clk.common.hw,
+ [CLK_CPU] = &cpu_clk.common.hw,
+ [CLK_AXI] = &axi_clk.common.hw,
+ [CLK_AHB1] = &ahb1_clk.common.hw,
+diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h
+index 4a4d36fdad96..a091b7217dfd 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h
++++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.h
+@@ -29,7 +29,7 @@
+ #define CLK_PLL_AUDIO_8X 5
+ #define CLK_PLL_VIDEO 6
+ #define CLK_PLL_VE 7
+-#define CLK_PLL_DDR 8
++#define CLK_PLL_DDR0 8
+ #define CLK_PLL_PERIPH0 9
+ #define CLK_PLL_PERIPH0_2X 10
+ #define CLK_PLL_ISP 11
+@@ -58,6 +58,8 @@
+
+ /* And the GPU module clock is exported */
+
+-#define CLK_NUMBER (CLK_MIPI_CSI + 1)
++#define CLK_PLL_DDR1 74
++
++#define CLK_NUMBER (CLK_PLL_DDR1 + 1)
+
+ #endif /* _CCU_SUN8I_H3_H_ */
+diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
+index 27e0979b3158..0cc87c6ae91c 100644
+--- a/drivers/clk/ti/clk.c
++++ b/drivers/clk/ti/clk.c
+@@ -188,9 +188,13 @@ void __init ti_dt_clocks_register(struct ti_dt_clk oclks[])
+ clkdev_add(&c->lk);
+ } else {
+ if (num_args && !has_clkctrl_data) {
+- if (of_find_compatible_node(NULL, NULL,
+- "ti,clkctrl")) {
++ struct device_node *np;
++
++ np = of_find_compatible_node(NULL, NULL,
++ "ti,clkctrl");
++ if (np) {
+ has_clkctrl_data = true;
++ of_node_put(np);
+ } else {
+ clkctrl_nodes_missing = true;
+
+diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
+index aaf5bfa9bd9c..e3ae041ac30e 100644
+--- a/drivers/clocksource/exynos_mct.c
++++ b/drivers/clocksource/exynos_mct.c
+@@ -563,7 +563,19 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem *
+ return 0;
+
+ out_irq:
+- free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
++ if (mct_int_type == MCT_INT_PPI) {
++ free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
++ } else {
++ for_each_possible_cpu(cpu) {
++ struct mct_clock_event_device *pcpu_mevt =
++ per_cpu_ptr(&percpu_mct_tick, cpu);
++
++ if (pcpu_mevt->evt.irq != -1) {
++ free_irq(pcpu_mevt->evt.irq, pcpu_mevt);
++ pcpu_mevt->evt.irq = -1;
++ }
++ }
++ }
+ return err;
+ }
+
+diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
+index 3b56ea3f52af..552c5254390c 100644
+--- a/drivers/clocksource/timer-sun5i.c
++++ b/drivers/clocksource/timer-sun5i.c
+@@ -202,6 +202,11 @@ static int __init sun5i_setup_clocksource(struct device_node *node,
+ }
+
+ rate = clk_get_rate(clk);
++ if (!rate) {
++ pr_err("Couldn't get parent clock rate\n");
++ ret = -EINVAL;
++ goto err_disable_clk;
++ }
+
+ cs->timer.base = base;
+ cs->timer.clk = clk;
+@@ -275,6 +280,11 @@ static int __init sun5i_setup_clockevent(struct device_node *node, void __iomem
+ }
+
+ rate = clk_get_rate(clk);
++ if (!rate) {
++ pr_err("Couldn't get parent clock rate\n");
++ ret = -EINVAL;
++ goto err_disable_clk;
++ }
+
+ ce->timer.base = base;
+ ce->timer.ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
+diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
+index 3ecf84706640..23414dddc3ba 100644
+--- a/drivers/clocksource/timer-ti-dm.c
++++ b/drivers/clocksource/timer-ti-dm.c
+@@ -868,7 +868,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
+ timer->pdev = pdev;
+
+ pm_runtime_enable(dev);
+- pm_runtime_irq_safe(dev);
+
+ if (!timer->reserved) {
+ ret = pm_runtime_get_sync(dev);
+diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+index e6f9cbe5835f..77b0e5d0fb13 100644
+--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
++++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+@@ -384,12 +384,12 @@ static int brcm_avs_set_pstate(struct private_data *priv, unsigned int pstate)
+ return __issue_avs_command(priv, AVS_CMD_SET_PSTATE, true, args);
+ }
+
+-static unsigned long brcm_avs_get_voltage(void __iomem *base)
++static u32 brcm_avs_get_voltage(void __iomem *base)
+ {
+ return readl(base + AVS_MBOX_VOLTAGE1);
+ }
+
+-static unsigned long brcm_avs_get_frequency(void __iomem *base)
++static u32 brcm_avs_get_frequency(void __iomem *base)
+ {
+ return readl(base + AVS_MBOX_FREQUENCY) * 1000; /* in kHz */
+ }
+@@ -446,8 +446,8 @@ static bool brcm_avs_is_firmware_loaded(struct private_data *priv)
+ rc = brcm_avs_get_pmap(priv, NULL);
+ magic = readl(priv->base + AVS_MBOX_MAGIC);
+
+- return (magic == AVS_FIRMWARE_MAGIC) && (rc != -ENOTSUPP) &&
+- (rc != -EINVAL);
++ return (magic == AVS_FIRMWARE_MAGIC) && ((rc != -ENOTSUPP) ||
++ (rc != -EINVAL));
+ }
+
+ static unsigned int brcm_avs_cpufreq_get(unsigned int cpu)
+@@ -653,14 +653,14 @@ static ssize_t show_brcm_avs_voltage(struct cpufreq_policy *policy, char *buf)
+ {
+ struct private_data *priv = policy->driver_data;
+
+- return sprintf(buf, "0x%08lx\n", brcm_avs_get_voltage(priv->base));
++ return sprintf(buf, "0x%08x\n", brcm_avs_get_voltage(priv->base));
+ }
+
+ static ssize_t show_brcm_avs_frequency(struct cpufreq_policy *policy, char *buf)
+ {
+ struct private_data *priv = policy->driver_data;
+
+- return sprintf(buf, "0x%08lx\n", brcm_avs_get_frequency(priv->base));
++ return sprintf(buf, "0x%08x\n", brcm_avs_get_frequency(priv->base));
+ }
+
+ cpufreq_freq_attr_ro(brcm_avs_pstate);
+diff --git a/drivers/crypto/amcc/crypto4xx_trng.h b/drivers/crypto/amcc/crypto4xx_trng.h
+index 931d22531f51..7bbda51b7337 100644
+--- a/drivers/crypto/amcc/crypto4xx_trng.h
++++ b/drivers/crypto/amcc/crypto4xx_trng.h
+@@ -26,9 +26,9 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev);
+ void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev);
+ #else
+ static inline void ppc4xx_trng_probe(
+- struct crypto4xx_device *dev __maybe_unused) { }
++ struct crypto4xx_core_device *dev __maybe_unused) { }
+ static inline void ppc4xx_trng_remove(
+- struct crypto4xx_device *dev __maybe_unused) { }
++ struct crypto4xx_core_device *dev __maybe_unused) { }
+ #endif
+
+ #endif
+diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
+index 49c0097fa474..0b1fc5664b1d 100644
+--- a/drivers/crypto/bcm/cipher.c
++++ b/drivers/crypto/bcm/cipher.c
+@@ -717,7 +717,7 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx)
+ */
+ unsigned int new_data_len;
+
+- unsigned int chunk_start = 0;
++ unsigned int __maybe_unused chunk_start = 0;
+ u32 db_size; /* Length of data field, incl gcm and hash padding */
+ int pad_len = 0; /* total pad len, including gcm, hash, stat padding */
+ u32 data_pad_len = 0; /* length of GCM/CCM padding */
+@@ -1675,8 +1675,6 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
+ struct spu_hw *spu = &iproc_priv.spu;
+ struct brcm_message *mssg = msg;
+ struct iproc_reqctx_s *rctx;
+- struct iproc_ctx_s *ctx;
+- struct crypto_async_request *areq;
+ int err = 0;
+
+ rctx = mssg->ctx;
+@@ -1686,8 +1684,6 @@ static void spu_rx_callback(struct mbox_client *cl, void *msg)
+ err = -EFAULT;
+ goto cb_finish;
+ }
+- areq = rctx->parent;
+- ctx = rctx->ctx;
+
+ /* process the SPU status */
+ err = spu->spu_status_process(rctx->msg_buf.rx_stat);
+diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
+index fde07d4ff019..ff6718a11e9e 100644
+--- a/drivers/crypto/caam/caamrng.c
++++ b/drivers/crypto/caam/caamrng.c
+@@ -353,7 +353,10 @@ static int __init caam_rng_init(void)
+ goto free_rng_ctx;
+
+ dev_info(dev, "registering rng-caam\n");
+- return hwrng_register(&caam_rng);
++
++ err = hwrng_register(&caam_rng);
++ if (!err)
++ return err;
+
+ free_rng_ctx:
+ kfree(rng_ctx);
+diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
+index 8da88beb1abb..832ba2afdcd5 100644
+--- a/drivers/crypto/caam/error.c
++++ b/drivers/crypto/caam/error.c
+@@ -22,7 +22,7 @@ void caam_dump_sg(const char *level, const char *prefix_str, int prefix_type,
+ size_t len;
+ void *buf;
+
+- for (it = sg; it && tlen > 0 ; it = sg_next(sg)) {
++ for (it = sg; it && tlen > 0 ; it = sg_next(it)) {
+ /*
+ * make sure the scatterlist's page
+ * has a valid virtual memory mapping
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes.c b/drivers/crypto/ccp/ccp-crypto-aes.c
+index 89291c15015c..3f768699332b 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes.c
+@@ -1,7 +1,8 @@
++// SPDX-License-Identifier: GPL-2.0
+ /*
+ * AMD Cryptographic Coprocessor (CCP) AES crypto API support
+ *
+- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
++ * Copyright (C) 2013-2019 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+@@ -79,8 +80,7 @@ static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt)
+ return -EINVAL;
+
+ if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) ||
+- (ctx->u.aes.mode == CCP_AES_MODE_CBC) ||
+- (ctx->u.aes.mode == CCP_AES_MODE_CFB)) &&
++ (ctx->u.aes.mode == CCP_AES_MODE_CBC)) &&
+ (req->nbytes & (AES_BLOCK_SIZE - 1)))
+ return -EINVAL;
+
+@@ -291,7 +291,7 @@ static struct ccp_aes_def aes_algs[] = {
+ .version = CCP_VERSION(3, 0),
+ .name = "cfb(aes)",
+ .driver_name = "cfb-aes-ccp",
+- .blocksize = AES_BLOCK_SIZE,
++ .blocksize = 1,
+ .ivsize = AES_BLOCK_SIZE,
+ .alg_defaults = &ccp_aes_defaults,
+ },
+diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
+index 1e2e42106dee..330853a2702f 100644
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -458,8 +458,8 @@ static int ccp_copy_from_sb(struct ccp_cmd_queue *cmd_q,
+ return ccp_copy_to_from_sb(cmd_q, wa, jobid, sb, byte_swap, true);
+ }
+
+-static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
+- struct ccp_cmd *cmd)
++static noinline_for_stack int
++ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ {
+ struct ccp_aes_engine *aes = &cmd->u.aes;
+ struct ccp_dm_workarea key, ctx;
+@@ -614,8 +614,8 @@ e_key:
+ return ret;
+ }
+
+-static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
+- struct ccp_cmd *cmd)
++static noinline_for_stack int
++ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ {
+ struct ccp_aes_engine *aes = &cmd->u.aes;
+ struct ccp_dm_workarea key, ctx, final_wa, tag;
+@@ -897,7 +897,8 @@ e_key:
+ return ret;
+ }
+
+-static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
++static noinline_for_stack int
++ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ {
+ struct ccp_aes_engine *aes = &cmd->u.aes;
+ struct ccp_dm_workarea key, ctx;
+@@ -907,12 +908,6 @@ static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ bool in_place = false;
+ int ret;
+
+- if (aes->mode == CCP_AES_MODE_CMAC)
+- return ccp_run_aes_cmac_cmd(cmd_q, cmd);
+-
+- if (aes->mode == CCP_AES_MODE_GCM)
+- return ccp_run_aes_gcm_cmd(cmd_q, cmd);
+-
+ if (!((aes->key_len == AES_KEYSIZE_128) ||
+ (aes->key_len == AES_KEYSIZE_192) ||
+ (aes->key_len == AES_KEYSIZE_256)))
+@@ -1080,8 +1075,8 @@ e_key:
+ return ret;
+ }
+
+-static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
+- struct ccp_cmd *cmd)
++static noinline_for_stack int
++ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ {
+ struct ccp_xts_aes_engine *xts = &cmd->u.xts;
+ struct ccp_dm_workarea key, ctx;
+@@ -1280,7 +1275,8 @@ e_key:
+ return ret;
+ }
+
+-static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
++static noinline_for_stack int
++ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ {
+ struct ccp_des3_engine *des3 = &cmd->u.des3;
+
+@@ -1293,6 +1289,9 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ int ret;
+
+ /* Error checks */
++ if (cmd_q->ccp->vdata->version < CCP_VERSION(5, 0))
++ return -EINVAL;
++
+ if (!cmd_q->ccp->vdata->perform->des3)
+ return -EINVAL;
+
+@@ -1375,8 +1374,6 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ * passthru option to convert from big endian to little endian.
+ */
+ if (des3->mode != CCP_DES3_MODE_ECB) {
+- u32 load_mode;
+-
+ op.sb_ctx = cmd_q->sb_ctx;
+
+ ret = ccp_init_dm_workarea(&ctx, cmd_q,
+@@ -1392,12 +1389,8 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ if (ret)
+ goto e_ctx;
+
+- if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+- load_mode = CCP_PASSTHRU_BYTESWAP_NOOP;
+- else
+- load_mode = CCP_PASSTHRU_BYTESWAP_256BIT;
+ ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx,
+- load_mode);
++ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+ cmd->engine_error = cmd_q->cmd_error;
+ goto e_ctx;
+@@ -1459,10 +1452,6 @@ static int ccp_run_des3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ }
+
+ /* ...but we only need the last DES3_EDE_BLOCK_SIZE bytes */
+- if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
+- dm_offset = CCP_SB_BYTES - des3->iv_len;
+- else
+- dm_offset = 0;
+ ccp_get_dm_area(&ctx, dm_offset, des3->iv, 0,
+ DES3_EDE_BLOCK_SIZE);
+ }
+@@ -1483,7 +1472,8 @@ e_key:
+ return ret;
+ }
+
+-static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
++static noinline_for_stack int
++ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ {
+ struct ccp_sha_engine *sha = &cmd->u.sha;
+ struct ccp_dm_workarea ctx;
+@@ -1827,7 +1817,8 @@ e_ctx:
+ return ret;
+ }
+
+-static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
++static noinline_for_stack int
++ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ {
+ struct ccp_rsa_engine *rsa = &cmd->u.rsa;
+ struct ccp_dm_workarea exp, src, dst;
+@@ -1958,8 +1949,8 @@ e_sb:
+ return ret;
+ }
+
+-static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
+- struct ccp_cmd *cmd)
++static noinline_for_stack int
++ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ {
+ struct ccp_passthru_engine *pt = &cmd->u.passthru;
+ struct ccp_dm_workarea mask;
+@@ -2090,7 +2081,8 @@ e_mask:
+ return ret;
+ }
+
+-static int ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
++static noinline_for_stack int
++ccp_run_passthru_nomap_cmd(struct ccp_cmd_queue *cmd_q,
+ struct ccp_cmd *cmd)
+ {
+ struct ccp_passthru_nomap_engine *pt = &cmd->u.passthru_nomap;
+@@ -2431,7 +2423,8 @@ e_src:
+ return ret;
+ }
+
+-static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
++static noinline_for_stack int
++ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+ {
+ struct ccp_ecc_engine *ecc = &cmd->u.ecc;
+
+@@ -2468,7 +2461,17 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
+
+ switch (cmd->engine) {
+ case CCP_ENGINE_AES:
+- ret = ccp_run_aes_cmd(cmd_q, cmd);
++ switch (cmd->u.aes.mode) {
++ case CCP_AES_MODE_CMAC:
++ ret = ccp_run_aes_cmac_cmd(cmd_q, cmd);
++ break;
++ case CCP_AES_MODE_GCM:
++ ret = ccp_run_aes_gcm_cmd(cmd_q, cmd);
++ break;
++ default:
++ ret = ccp_run_aes_cmd(cmd_q, cmd);
++ break;
++ }
+ break;
+ case CCP_ENGINE_XTS_AES_128:
+ ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
+diff --git a/drivers/crypto/ccree/cc_cipher.c b/drivers/crypto/ccree/cc_cipher.c
+index 54a39164aab8..28a5b8b38fa2 100644
+--- a/drivers/crypto/ccree/cc_cipher.c
++++ b/drivers/crypto/ccree/cc_cipher.c
+@@ -306,7 +306,6 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
+ struct crypto_tfm *tfm = crypto_skcipher_tfm(sktfm);
+ struct cc_cipher_ctx *ctx_p = crypto_tfm_ctx(tfm);
+ struct device *dev = drvdata_to_dev(ctx_p->drvdata);
+- u32 tmp[DES3_EDE_EXPKEY_WORDS];
+ struct cc_crypto_alg *cc_alg =
+ container_of(tfm->__crt_alg, struct cc_crypto_alg,
+ skcipher_alg.base);
+@@ -332,6 +331,7 @@ static int cc_cipher_setkey(struct crypto_skcipher *sktfm, const u8 *key,
+ * HW does the expansion on its own.
+ */
+ if (ctx_p->flow_mode == S_DIN_to_DES) {
++ u32 tmp[DES3_EDE_EXPKEY_WORDS];
+ if (keylen == DES3_EDE_KEY_SIZE &&
+ __des3_ede_setkey(tmp, &tfm->crt_flags, key,
+ DES3_EDE_KEY_SIZE)) {
+diff --git a/drivers/crypto/hisilicon/sec/sec_algs.c b/drivers/crypto/hisilicon/sec/sec_algs.c
+index db2983c51f1e..bf9658800bda 100644
+--- a/drivers/crypto/hisilicon/sec/sec_algs.c
++++ b/drivers/crypto/hisilicon/sec/sec_algs.c
+@@ -153,6 +153,24 @@ static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm,
+ ctx->cipher_alg);
+ }
+
++static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
++ dma_addr_t psec_sgl, struct sec_dev_info *info)
++{
++ struct sec_hw_sgl *sgl_current, *sgl_next;
++ dma_addr_t sgl_next_dma;
++
++ sgl_current = hw_sgl;
++ while (sgl_current) {
++ sgl_next = sgl_current->next;
++ sgl_next_dma = sgl_current->next_sgl;
++
++ dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
++
++ sgl_current = sgl_next;
++ psec_sgl = sgl_next_dma;
++ }
++}
++
+ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
+ dma_addr_t *psec_sgl,
+ struct scatterlist *sgl,
+@@ -199,36 +217,12 @@ static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl,
+ return 0;
+
+ err_free_hw_sgls:
+- sgl_current = *sec_sgl;
+- while (sgl_current) {
+- sgl_next = sgl_current->next;
+- dma_pool_free(info->hw_sgl_pool, sgl_current,
+- sgl_current->next_sgl);
+- sgl_current = sgl_next;
+- }
++ sec_free_hw_sgl(*sec_sgl, *psec_sgl, info);
+ *psec_sgl = 0;
+
+ return ret;
+ }
+
+-static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl,
+- dma_addr_t psec_sgl, struct sec_dev_info *info)
+-{
+- struct sec_hw_sgl *sgl_current, *sgl_next;
+- dma_addr_t sgl_next_dma;
+-
+- sgl_current = hw_sgl;
+- while (sgl_current) {
+- sgl_next = sgl_current->next;
+- sgl_next_dma = sgl_current->next_sgl;
+-
+- dma_pool_free(info->hw_sgl_pool, sgl_current, psec_sgl);
+-
+- sgl_current = sgl_next;
+- psec_sgl = sgl_next_dma;
+- }
+-}
+-
+ static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen,
+ enum sec_cipher_alg alg)
+diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
+index ac9282c1a5ec..f3b02c00b784 100644
+--- a/drivers/crypto/inside-secure/safexcel_hash.c
++++ b/drivers/crypto/inside-secure/safexcel_hash.c
+@@ -50,10 +50,12 @@ struct safexcel_ahash_req {
+
+ static inline u64 safexcel_queued_len(struct safexcel_ahash_req *req)
+ {
+- if (req->len[1] > req->processed[1])
+- return 0xffffffff - (req->len[0] - req->processed[0]);
++ u64 len, processed;
+
+- return req->len[0] - req->processed[0];
++ len = (0xffffffff * req->len[1]) + req->len[0];
++ processed = (0xffffffff * req->processed[1]) + req->processed[0];
++
++ return len - processed;
+ }
+
+ static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
+@@ -486,7 +488,7 @@ static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
+ struct safexcel_inv_result result = {};
+ int ring = ctx->base.ring;
+
+- memset(req, 0, sizeof(struct ahash_request));
++ memset(req, 0, EIP197_AHASH_REQ_SIZE);
+
+ /* create invalidation request */
+ init_completion(&result.completion);
+diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+index 1a724263761b..2d178e013535 100644
+--- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
++++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
+@@ -179,7 +179,7 @@ static int sun4i_hash(struct ahash_request *areq)
+ */
+ unsigned int i = 0, end, fill, min_fill, nwait, nbw = 0, j = 0, todo;
+ unsigned int in_i = 0;
+- u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, wb = 0, v, ivmode = 0;
++ u32 spaces, rx_cnt = SS_RX_DEFAULT, bf[32] = {0}, v, ivmode = 0;
+ struct sun4i_req_ctx *op = ahash_request_ctx(areq);
+ struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
+ struct sun4i_tfm_ctx *tfmctx = crypto_ahash_ctx(tfm);
+@@ -188,6 +188,7 @@ static int sun4i_hash(struct ahash_request *areq)
+ struct sg_mapping_iter mi;
+ int in_r, err = 0;
+ size_t copied = 0;
++ __le32 wb = 0;
+
+ dev_dbg(ss->dev, "%s %s bc=%llu len=%u mode=%x wl=%u h0=%0x",
+ __func__, crypto_tfm_alg_name(areq->base.tfm),
+@@ -399,7 +400,7 @@ hash_final:
+
+ nbw = op->len - 4 * nwait;
+ if (nbw) {
+- wb = *(u32 *)(op->buf + nwait * 4);
++ wb = cpu_to_le32(*(u32 *)(op->buf + nwait * 4));
+ wb &= GENMASK((nbw * 8) - 1, 0);
+
+ op->byte_count += nbw;
+@@ -408,7 +409,7 @@ hash_final:
+
+ /* write the remaining bytes of the nbw buffer */
+ wb |= ((1 << 7) << (nbw * 8));
+- bf[j++] = wb;
++ bf[j++] = le32_to_cpu(wb);
+
+ /*
+ * number of space to pad to obtain 64o minus 8(size) minus 4 (final 1)
+@@ -427,13 +428,13 @@ hash_final:
+
+ /* write the length of data */
+ if (op->mode == SS_OP_SHA1) {
+- __be64 bits = cpu_to_be64(op->byte_count << 3);
+- bf[j++] = lower_32_bits(bits);
+- bf[j++] = upper_32_bits(bits);
++ __be64 *bits = (__be64 *)&bf[j];
++ *bits = cpu_to_be64(op->byte_count << 3);
++ j += 2;
+ } else {
+- __le64 bits = op->byte_count << 3;
+- bf[j++] = lower_32_bits(bits);
+- bf[j++] = upper_32_bits(bits);
++ __le64 *bits = (__le64 *)&bf[j];
++ *bits = cpu_to_le64(op->byte_count << 3);
++ j += 2;
+ }
+ writesl(ss->base + SS_RXFIFO, bf, j);
+
+@@ -475,7 +476,7 @@ hash_final:
+ }
+ } else {
+ for (i = 0; i < 4; i++) {
+- v = readl(ss->base + SS_MD0 + i * 4);
++ v = cpu_to_le32(readl(ss->base + SS_MD0 + i * 4));
+ memcpy(areq->result + i * 4, &v, 4);
+ }
+ }
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index 634ae487c372..db5f939f5aa3 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -974,8 +974,8 @@ static void ipsec_esp_unmap(struct device *dev,
+ DMA_FROM_DEVICE);
+ unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
+
+- talitos_sg_unmap(dev, edesc, areq->src, areq->dst, cryptlen,
+- areq->assoclen);
++ talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
++ cryptlen + authsize, areq->assoclen);
+
+ if (edesc->dma_len)
+ dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
+@@ -996,30 +996,15 @@ static void ipsec_esp_encrypt_done(struct device *dev,
+ struct talitos_desc *desc, void *context,
+ int err)
+ {
+- struct talitos_private *priv = dev_get_drvdata(dev);
+- bool is_sec1 = has_ftr_sec1(priv);
+ struct aead_request *areq = context;
+ struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
+- unsigned int authsize = crypto_aead_authsize(authenc);
+ unsigned int ivsize = crypto_aead_ivsize(authenc);
+ struct talitos_edesc *edesc;
+- void *icvdata;
+
+ edesc = container_of(desc, struct talitos_edesc, desc);
+
+ ipsec_esp_unmap(dev, edesc, areq, true);
+
+- /* copy the generated ICV to dst */
+- if (edesc->icv_ool) {
+- if (is_sec1)
+- icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
+- else
+- icvdata = &edesc->link_tbl[edesc->src_nents +
+- edesc->dst_nents + 2];
+- sg_pcopy_from_buffer(areq->dst, edesc->dst_nents ? : 1, icvdata,
+- authsize, areq->assoclen + areq->cryptlen);
+- }
+-
+ dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
+
+ kfree(edesc);
+@@ -1036,39 +1021,15 @@ static void ipsec_esp_decrypt_swauth_done(struct device *dev,
+ unsigned int authsize = crypto_aead_authsize(authenc);
+ struct talitos_edesc *edesc;
+ char *oicv, *icv;
+- struct talitos_private *priv = dev_get_drvdata(dev);
+- bool is_sec1 = has_ftr_sec1(priv);
+
+ edesc = container_of(desc, struct talitos_edesc, desc);
+
+ ipsec_esp_unmap(dev, edesc, req, false);
+
+ if (!err) {
+- char icvdata[SHA512_DIGEST_SIZE];
+- int nents = edesc->dst_nents ? : 1;
+- unsigned int len = req->assoclen + req->cryptlen;
+-
+ /* auth check */
+- if (nents > 1) {
+- sg_pcopy_to_buffer(req->dst, nents, icvdata, authsize,
+- len - authsize);
+- icv = icvdata;
+- } else {
+- icv = (char *)sg_virt(req->dst) + len - authsize;
+- }
+-
+- if (edesc->dma_len) {
+- if (is_sec1)
+- oicv = (char *)&edesc->dma_link_tbl +
+- req->assoclen + req->cryptlen;
+- else
+- oicv = (char *)
+- &edesc->link_tbl[edesc->src_nents +
+- edesc->dst_nents + 2];
+- if (edesc->icv_ool)
+- icv = oicv + authsize;
+- } else
+- oicv = (char *)&edesc->link_tbl[0];
++ oicv = edesc->buf + edesc->dma_len;
++ icv = oicv - authsize;
+
+ err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
+ }
+@@ -1104,11 +1065,12 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
+ * stop at cryptlen bytes
+ */
+ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
+- unsigned int offset, int cryptlen,
++ unsigned int offset, int datalen, int elen,
+ struct talitos_ptr *link_tbl_ptr)
+ {
+- int n_sg = sg_count;
++ int n_sg = elen ? sg_count + 1 : sg_count;
+ int count = 0;
++ int cryptlen = datalen + elen;
+
+ while (cryptlen && sg && n_sg--) {
+ unsigned int len = sg_dma_len(sg);
+@@ -1123,11 +1085,20 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
+ if (len > cryptlen)
+ len = cryptlen;
+
++ if (datalen > 0 && len > datalen) {
++ to_talitos_ptr(link_tbl_ptr + count,
++ sg_dma_address(sg) + offset, datalen, 0);
++ to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
++ count++;
++ len -= datalen;
++ offset += datalen;
++ }
+ to_talitos_ptr(link_tbl_ptr + count,
+ sg_dma_address(sg) + offset, len, 0);
+ to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
+ count++;
+ cryptlen -= len;
++ datalen -= len;
+ offset = 0;
+
+ next:
+@@ -1137,7 +1108,7 @@ next:
+ /* tag end of link table */
+ if (count > 0)
+ to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
+- DESC_PTR_LNKTBL_RETURN, 0);
++ DESC_PTR_LNKTBL_RET, 0);
+
+ return count;
+ }
+@@ -1145,7 +1116,8 @@ next:
+ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+ unsigned int len, struct talitos_edesc *edesc,
+ struct talitos_ptr *ptr, int sg_count,
+- unsigned int offset, int tbl_off, int elen)
++ unsigned int offset, int tbl_off, int elen,
++ bool force)
+ {
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
+@@ -1155,7 +1127,7 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+ return 1;
+ }
+ to_talitos_ptr_ext_set(ptr, elen, is_sec1);
+- if (sg_count == 1) {
++ if (sg_count == 1 && !force) {
+ to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
+ return sg_count;
+ }
+@@ -1163,9 +1135,9 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+ to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
+ return sg_count;
+ }
+- sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
++ sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
+ &edesc->link_tbl[tbl_off]);
+- if (sg_count == 1) {
++ if (sg_count == 1 && !force) {
+ /* Only one segment now, so no link tbl needed*/
+ copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
+ return sg_count;
+@@ -1183,7 +1155,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+ unsigned int offset, int tbl_off)
+ {
+ return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
+- tbl_off, 0);
++ tbl_off, 0, false);
+ }
+
+ /*
+@@ -1211,6 +1183,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
+ struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
+ struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
++ dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
+
+ /* hmac key */
+ to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
+@@ -1250,7 +1223,8 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ elen = authsize;
+
+ ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
+- sg_count, areq->assoclen, tbl_off, elen);
++ sg_count, areq->assoclen, tbl_off, elen,
++ false);
+
+ if (ret > 1) {
+ tbl_off += ret;
+@@ -1264,55 +1238,35 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
+ }
+
+- ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
+- sg_count, areq->assoclen, tbl_off);
+-
+- if (is_ipsec_esp)
+- to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
++ if (is_ipsec_esp && encrypt)
++ elen = authsize;
++ else
++ elen = 0;
++ ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
++ sg_count, areq->assoclen, tbl_off, elen,
++ is_ipsec_esp && !encrypt);
++ tbl_off += ret;
+
+ /* ICV data */
+- if (ret > 1) {
+- tbl_off += ret;
+- edesc->icv_ool = true;
+- sync_needed = true;
++ edesc->icv_ool = !encrypt;
+
+- if (is_ipsec_esp) {
+- struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
+- int offset = (edesc->src_nents + edesc->dst_nents + 2) *
+- sizeof(struct talitos_ptr) + authsize;
++ if (!encrypt && is_ipsec_esp) {
++ struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
+
+- /* Add an entry to the link table for ICV data */
+- to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
+- to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
+- is_sec1);
++ /* Add an entry to the link table for ICV data */
++ to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
++ to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
+
+- /* icv data follows link tables */
+- to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
+- authsize, is_sec1);
+- } else {
+- dma_addr_t addr = edesc->dma_link_tbl;
+-
+- if (is_sec1)
+- addr += areq->assoclen + cryptlen;
+- else
+- addr += sizeof(struct talitos_ptr) * tbl_off;
+-
+- to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
+- }
++ /* icv data follows link tables */
++ to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
++ to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
++ sync_needed = true;
++ } else if (!encrypt) {
++ to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
++ sync_needed = true;
+ } else if (!is_ipsec_esp) {
+- ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
+- &desc->ptr[6], sg_count, areq->assoclen +
+- cryptlen,
+- tbl_off);
+- if (ret > 1) {
+- tbl_off += ret;
+- edesc->icv_ool = true;
+- sync_needed = true;
+- } else {
+- edesc->icv_ool = false;
+- }
+- } else {
+- edesc->icv_ool = false;
++ talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
++ sg_count, areq->assoclen + cryptlen, tbl_off);
+ }
+
+ /* iv out */
+@@ -1395,18 +1349,18 @@ static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
+ * and space for two sets of ICVs (stashed and generated)
+ */
+ alloc_len = sizeof(struct talitos_edesc);
+- if (src_nents || dst_nents) {
++ if (src_nents || dst_nents || !encrypt) {
+ if (is_sec1)
+ dma_len = (src_nents ? src_len : 0) +
+- (dst_nents ? dst_len : 0);
++ (dst_nents ? dst_len : 0) + authsize;
+ else
+ dma_len = (src_nents + dst_nents + 2) *
+- sizeof(struct talitos_ptr) + authsize * 2;
++ sizeof(struct talitos_ptr) + authsize;
+ alloc_len += dma_len;
+ } else {
+ dma_len = 0;
+- alloc_len += icv_stashing ? authsize : 0;
+ }
++ alloc_len += icv_stashing ? authsize : 0;
+
+ /* if its a ahash, add space for a second desc next to the first one */
+ if (is_sec1 && !dst)
+@@ -1500,11 +1454,7 @@ static int aead_decrypt(struct aead_request *req)
+ edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
+
+ /* stash incoming ICV for later cmp with ICV generated by the h/w */
+- if (edesc->dma_len)
+- icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
+- edesc->dst_nents + 2];
+- else
+- icvdata = &edesc->link_tbl[0];
++ icvdata = edesc->buf + edesc->dma_len;
+
+ sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
+ req->assoclen + req->cryptlen - authsize);
+diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
+index 979f6a61e545..cb0137e131cc 100644
+--- a/drivers/crypto/talitos.h
++++ b/drivers/crypto/talitos.h
+@@ -442,5 +442,5 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
+
+ /* link table extent field bits */
+ #define DESC_PTR_LNKTBL_JUMP 0x80
+-#define DESC_PTR_LNKTBL_RETURN 0x02
++#define DESC_PTR_LNKTBL_RET 0x02
+ #define DESC_PTR_LNKTBL_NEXT 0x01
+diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c
+index 15b2453d2647..b2c755b74bea 100644
+--- a/drivers/dma/dma-axi-dmac.c
++++ b/drivers/dma/dma-axi-dmac.c
+@@ -486,7 +486,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
+
+ if (chan->hw_2d) {
+ if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
+- !axi_dmac_check_len(chan, xt->numf))
++ xt->numf == 0)
+ return NULL;
+ if (xt->sgl[0].size + dst_icg > chan->max_length ||
+ xt->sgl[0].size + src_icg > chan->max_length)
+diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
+index c299ff181bb6..62218ea0894c 100644
+--- a/drivers/dma/dw/platform.c
++++ b/drivers/dma/dw/platform.c
+@@ -87,13 +87,20 @@ static void dw_dma_acpi_controller_register(struct dw_dma *dw)
+ dma_cap_set(DMA_SLAVE, info->dma_cap);
+ info->filter_fn = dw_dma_acpi_filter;
+
+- ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate,
+- info);
++ ret = acpi_dma_controller_register(dev, acpi_dma_simple_xlate, info);
+ if (ret)
+ dev_err(dev, "could not register acpi_dma_controller\n");
+ }
++
++static void dw_dma_acpi_controller_free(struct dw_dma *dw)
++{
++ struct device *dev = dw->dma.dev;
++
++ acpi_dma_controller_free(dev);
++}
+ #else /* !CONFIG_ACPI */
+ static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {}
++static inline void dw_dma_acpi_controller_free(struct dw_dma *dw) {}
+ #endif /* !CONFIG_ACPI */
+
+ #ifdef CONFIG_OF
+@@ -249,6 +256,9 @@ static int dw_remove(struct platform_device *pdev)
+ {
+ struct dw_dma_chip *chip = platform_get_drvdata(pdev);
+
++ if (ACPI_HANDLE(&pdev->dev))
++ dw_dma_acpi_controller_free(chip->dw);
++
+ if (pdev->dev.of_node)
+ of_dma_controller_free(pdev->dev.of_node);
+
+diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
+index 202ffa9f7611..18f155a974db 100644
+--- a/drivers/dma/hsu/hsu.c
++++ b/drivers/dma/hsu/hsu.c
+@@ -64,10 +64,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
+
+ if (hsuc->direction == DMA_MEM_TO_DEV) {
+ bsr = config->dst_maxburst;
+- mtsr = config->src_addr_width;
++ mtsr = config->dst_addr_width;
+ } else if (hsuc->direction == DMA_DEV_TO_MEM) {
+ bsr = config->src_maxburst;
+- mtsr = config->dst_addr_width;
++ mtsr = config->src_addr_width;
+ }
+
+ hsu_chan_disable(hsuc);
+diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
+index 3f5a01cb4ab4..ceb82e74f5b4 100644
+--- a/drivers/dma/imx-sdma.c
++++ b/drivers/dma/imx-sdma.c
+@@ -1662,6 +1662,14 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
+ if (!sdma->script_number)
+ sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
+
++ if (sdma->script_number > sizeof(struct sdma_script_start_addrs)
++ / sizeof(s32)) {
++ dev_err(sdma->dev,
++ "SDMA script number %d not match with firmware.\n",
++ sdma->script_number);
++ return;
++ }
++
+ for (i = 0; i < sdma->script_number; i++)
+ if (addr_arr[i] > 0)
+ saddr_arr[i] = addr_arr[i];
+diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
+index 969534c1a6c6..abc8d3e0487b 100644
+--- a/drivers/dma/mv_xor.c
++++ b/drivers/dma/mv_xor.c
+@@ -1059,6 +1059,7 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
+ mv_chan->op_in_desc = XOR_MODE_IN_DESC;
+
+ dma_dev = &mv_chan->dmadev;
++ dma_dev->dev = &pdev->dev;
+ mv_chan->xordev = xordev;
+
+ /*
+@@ -1091,7 +1092,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev,
+ dma_dev->device_free_chan_resources = mv_xor_free_chan_resources;
+ dma_dev->device_tx_status = mv_xor_status;
+ dma_dev->device_issue_pending = mv_xor_issue_pending;
+- dma_dev->dev = &pdev->dev;
+
+ /* set prep routines based on capability */
+ if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
+diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
+index 09b6756366c3..4f4733d831a1 100644
+--- a/drivers/dma/tegra210-adma.c
++++ b/drivers/dma/tegra210-adma.c
+@@ -98,6 +98,7 @@ struct tegra_adma_chan_regs {
+ unsigned int src_addr;
+ unsigned int trg_addr;
+ unsigned int fifo_ctrl;
++ unsigned int cmd;
+ unsigned int tc;
+ };
+
+@@ -127,6 +128,7 @@ struct tegra_adma_chan {
+ enum dma_transfer_direction sreq_dir;
+ unsigned int sreq_index;
+ bool sreq_reserved;
++ struct tegra_adma_chan_regs ch_regs;
+
+ /* Transfer count and position info */
+ unsigned int tx_buf_count;
+@@ -635,8 +637,30 @@ static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
+ static int tegra_adma_runtime_suspend(struct device *dev)
+ {
+ struct tegra_adma *tdma = dev_get_drvdata(dev);
++ struct tegra_adma_chan_regs *ch_reg;
++ struct tegra_adma_chan *tdc;
++ int i;
+
+ tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
++ if (!tdma->global_cmd)
++ goto clk_disable;
++
++ for (i = 0; i < tdma->nr_channels; i++) {
++ tdc = &tdma->channels[i];
++ ch_reg = &tdc->ch_regs;
++ ch_reg->cmd = tdma_ch_read(tdc, ADMA_CH_CMD);
++ /* skip if channel is not active */
++ if (!ch_reg->cmd)
++ continue;
++ ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC);
++ ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR);
++ ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR);
++ ch_reg->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL);
++ ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL);
++ ch_reg->config = tdma_ch_read(tdc, ADMA_CH_CONFIG);
++ }
++
++clk_disable:
+ clk_disable_unprepare(tdma->ahub_clk);
+
+ return 0;
+@@ -645,7 +669,9 @@ static int tegra_adma_runtime_suspend(struct device *dev)
+ static int tegra_adma_runtime_resume(struct device *dev)
+ {
+ struct tegra_adma *tdma = dev_get_drvdata(dev);
+- int ret;
++ struct tegra_adma_chan_regs *ch_reg;
++ struct tegra_adma_chan *tdc;
++ int ret, i;
+
+ ret = clk_prepare_enable(tdma->ahub_clk);
+ if (ret) {
+@@ -654,6 +680,24 @@ static int tegra_adma_runtime_resume(struct device *dev)
+ }
+ tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
+
++ if (!tdma->global_cmd)
++ return 0;
++
++ for (i = 0; i < tdma->nr_channels; i++) {
++ tdc = &tdma->channels[i];
++ ch_reg = &tdc->ch_regs;
++ /* skip if channel was not active earlier */
++ if (!ch_reg->cmd)
++ continue;
++ tdma_ch_write(tdc, ADMA_CH_TC, ch_reg->tc);
++ tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_reg->src_addr);
++ tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_reg->trg_addr);
++ tdma_ch_write(tdc, ADMA_CH_CTRL, ch_reg->ctrl);
++ tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl);
++ tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_reg->config);
++ tdma_ch_write(tdc, ADMA_CH_CMD, ch_reg->cmd);
++ }
++
+ return 0;
+ }
+
+@@ -700,16 +744,6 @@ static int tegra_adma_probe(struct platform_device *pdev)
+ return PTR_ERR(tdma->ahub_clk);
+ }
+
+- pm_runtime_enable(&pdev->dev);
+-
+- ret = pm_runtime_get_sync(&pdev->dev);
+- if (ret < 0)
+- goto rpm_disable;
+-
+- ret = tegra_adma_init(tdma);
+- if (ret)
+- goto rpm_put;
+-
+ INIT_LIST_HEAD(&tdma->dma_dev.channels);
+ for (i = 0; i < tdma->nr_channels; i++) {
+ struct tegra_adma_chan *tdc = &tdma->channels[i];
+@@ -727,6 +761,16 @@ static int tegra_adma_probe(struct platform_device *pdev)
+ tdc->tdma = tdma;
+ }
+
++ pm_runtime_enable(&pdev->dev);
++
++ ret = pm_runtime_get_sync(&pdev->dev);
++ if (ret < 0)
++ goto rpm_disable;
++
++ ret = tegra_adma_init(tdma);
++ if (ret)
++ goto rpm_put;
++
+ dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
+ dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
+@@ -768,13 +812,13 @@ static int tegra_adma_probe(struct platform_device *pdev)
+
+ dma_remove:
+ dma_async_device_unregister(&tdma->dma_dev);
+-irq_dispose:
+- while (--i >= 0)
+- irq_dispose_mapping(tdma->channels[i].irq);
+ rpm_put:
+ pm_runtime_put_sync(&pdev->dev);
+ rpm_disable:
+ pm_runtime_disable(&pdev->dev);
++irq_dispose:
++ while (--i >= 0)
++ irq_dispose_mapping(tdma->channels[i].irq);
+
+ return ret;
+ }
+diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
+index 982631d4e1f8..44158fa85973 100644
+--- a/drivers/dma/ti/edma.c
++++ b/drivers/dma/ti/edma.c
+@@ -2345,8 +2345,10 @@ static int edma_probe(struct platform_device *pdev)
+
+ ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
+ sizeof(*ecc->tc_list), GFP_KERNEL);
+- if (!ecc->tc_list)
+- return -ENOMEM;
++ if (!ecc->tc_list) {
++ ret = -ENOMEM;
++ goto err_reg1;
++ }
+
+ for (i = 0;; i++) {
+ ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
+diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
+index f59511bd9926..fd440b35d76e 100644
+--- a/drivers/edac/edac_mc.c
++++ b/drivers/edac/edac_mc.c
+@@ -681,22 +681,18 @@ static int del_mc_from_global_list(struct mem_ctl_info *mci)
+
+ struct mem_ctl_info *edac_mc_find(int idx)
+ {
+- struct mem_ctl_info *mci = NULL;
++ struct mem_ctl_info *mci;
+ struct list_head *item;
+
+ mutex_lock(&mem_ctls_mutex);
+
+ list_for_each(item, &mc_devices) {
+ mci = list_entry(item, struct mem_ctl_info, link);
+-
+- if (mci->mc_idx >= idx) {
+- if (mci->mc_idx == idx) {
+- goto unlock;
+- }
+- break;
+- }
++ if (mci->mc_idx == idx)
++ goto unlock;
+ }
+
++ mci = NULL;
+ unlock:
+ mutex_unlock(&mem_ctls_mutex);
+ return mci;
+diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c
+index 30fc04e28431..0a194af92438 100644
+--- a/drivers/firmware/arm_scmi/clock.c
++++ b/drivers/firmware/arm_scmi/clock.c
+@@ -185,6 +185,8 @@ scmi_clock_describe_rates_get(const struct scmi_handle *handle, u32 clk_id,
+ if (rate_discrete)
+ clk->list.num_rates = tot_rate_cnt;
+
++ clk->rate_discrete = rate_discrete;
++
+ err:
+ scmi_xfer_put(handle, t);
+ return ret;
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index 09119e3f5c01..effc4c17e0fb 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -662,9 +662,7 @@ static int scmi_xfer_info_init(struct scmi_info *sinfo)
+
+ static int scmi_mailbox_check(struct device_node *np)
+ {
+- struct of_phandle_args arg;
+-
+- return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, &arg);
++ return of_parse_phandle_with_args(np, "mboxes", "#mbox-cells", 0, NULL);
+ }
+
+ static int scmi_mbox_free_channel(int id, void *p, void *data)
+diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
+index b53d5cc9c9f6..c00287b5f2c2 100644
+--- a/drivers/firmware/arm_scmi/sensors.c
++++ b/drivers/firmware/arm_scmi/sensors.c
+@@ -30,10 +30,10 @@ struct scmi_msg_resp_sensor_description {
+ __le32 id;
+ __le32 attributes_low;
+ #define SUPPORTS_ASYNC_READ(x) ((x) & BIT(31))
+-#define NUM_TRIP_POINTS(x) (((x) >> 4) & 0xff)
++#define NUM_TRIP_POINTS(x) ((x) & 0xff)
+ __le32 attributes_high;
+ #define SENSOR_TYPE(x) ((x) & 0xff)
+-#define SENSOR_SCALE(x) (((x) >> 11) & 0x3f)
++#define SENSOR_SCALE(x) (((x) >> 11) & 0x1f)
+ #define SENSOR_UPDATE_SCALE(x) (((x) >> 22) & 0x1f)
+ #define SENSOR_UPDATE_BASE(x) (((x) >> 27) & 0x1f)
+ u8 name[SCMI_MAX_STR_SIZE];
+diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
+index f2483548cde9..0dc0c78f1fdb 100644
+--- a/drivers/firmware/dmi_scan.c
++++ b/drivers/firmware/dmi_scan.c
+@@ -407,7 +407,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
+ bytes = ~0ull;
+ else if (size & 0x8000)
+ bytes = (u64)(size & 0x7fff) << 10;
+- else if (size != 0x7fff)
++ else if (size != 0x7fff || dm->length < 0x20)
+ bytes = (u64)size << 20;
+ else
+ bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20;
+diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
+index 1606abead22c..b0aeffd4e269 100644
+--- a/drivers/firmware/efi/runtime-wrappers.c
++++ b/drivers/firmware/efi/runtime-wrappers.c
+@@ -95,7 +95,7 @@ struct efi_runtime_work {
+ efi_rts_work.status = EFI_ABORTED; \
+ \
+ init_completion(&efi_rts_work.efi_rts_comp); \
+- INIT_WORK(&efi_rts_work.work, efi_call_rts); \
++ INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts); \
+ efi_rts_work.arg1 = _arg1; \
+ efi_rts_work.arg2 = _arg2; \
+ efi_rts_work.arg3 = _arg3; \
+diff --git a/drivers/firmware/google/coreboot_table-of.c b/drivers/firmware/google/coreboot_table-of.c
+index f15bf404c579..9b90c0fa4a0b 100644
+--- a/drivers/firmware/google/coreboot_table-of.c
++++ b/drivers/firmware/google/coreboot_table-of.c
+@@ -19,7 +19,6 @@
+ #include <linux/io.h>
+ #include <linux/module.h>
+ #include <linux/of_address.h>
+-#include <linux/of_platform.h>
+ #include <linux/platform_device.h>
+
+ #include "coreboot_table.h"
+@@ -30,7 +29,6 @@ static int coreboot_table_of_probe(struct platform_device *pdev)
+ void __iomem *ptr;
+
+ ptr = of_iomap(fw_dn, 0);
+- of_node_put(fw_dn);
+ if (!ptr)
+ return -ENOMEM;
+
+@@ -44,8 +42,9 @@ static int coreboot_table_of_remove(struct platform_device *pdev)
+
+ static const struct of_device_id coreboot_of_match[] = {
+ { .compatible = "coreboot" },
+- {},
++ {}
+ };
++MODULE_DEVICE_TABLE(of, coreboot_of_match);
+
+ static struct platform_driver coreboot_table_of_driver = {
+ .probe = coreboot_table_of_probe,
+@@ -55,28 +54,7 @@ static struct platform_driver coreboot_table_of_driver = {
+ .of_match_table = coreboot_of_match,
+ },
+ };
+-
+-static int __init platform_coreboot_table_of_init(void)
+-{
+- struct platform_device *pdev;
+- struct device_node *of_node;
+-
+- /* Limit device creation to the presence of /firmware/coreboot node */
+- of_node = of_find_node_by_path("/firmware/coreboot");
+- if (!of_node)
+- return -ENODEV;
+-
+- if (!of_match_node(coreboot_of_match, of_node))
+- return -ENODEV;
+-
+- pdev = of_platform_device_create(of_node, "coreboot_table_of", NULL);
+- if (!pdev)
+- return -ENODEV;
+-
+- return platform_driver_register(&coreboot_table_of_driver);
+-}
+-
+-module_init(platform_coreboot_table_of_init);
++module_platform_driver(coreboot_table_of_driver);
+
+ MODULE_AUTHOR("Google, Inc.");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c
+index c6fa9b393e84..bd62236d3f97 100644
+--- a/drivers/fsi/fsi-core.c
++++ b/drivers/fsi/fsi-core.c
+@@ -1060,6 +1060,14 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
+
+ }
+
++ rc = fsi_slave_set_smode(slave);
++ if (rc) {
++ dev_warn(&master->dev,
++ "can't set smode on slave:%02x:%02x %d\n",
++ link, id, rc);
++ goto err_free;
++ }
++
+ /* Allocate a minor in the FSI space */
+ rc = __fsi_get_new_minor(slave, fsi_dev_cfam, &slave->dev.devt,
+ &slave->cdev_idx);
+@@ -1071,17 +1079,14 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
+ rc = cdev_device_add(&slave->cdev, &slave->dev);
+ if (rc) {
+ dev_err(&slave->dev, "Error %d creating slave device\n", rc);
+- goto err_free;
++ goto err_free_ida;
+ }
+
+- rc = fsi_slave_set_smode(slave);
+- if (rc) {
+- dev_warn(&master->dev,
+- "can't set smode on slave:%02x:%02x %d\n",
+- link, id, rc);
+- kfree(slave);
+- return -ENODEV;
+- }
++ /* Now that we have the cdev registered with the core, any fatal
++ * failures beyond this point will need to clean up through
++ * cdev_device_del(). Fortunately though, nothing past here is fatal.
++ */
++
+ if (master->link_config)
+ master->link_config(master, link,
+ slave->t_send_delay,
+@@ -1098,10 +1103,13 @@ static int fsi_slave_init(struct fsi_master *master, int link, uint8_t id)
+ dev_dbg(&master->dev, "failed during slave scan with: %d\n",
+ rc);
+
+- return rc;
++ return 0;
+
+- err_free:
+- put_device(&slave->dev);
++err_free_ida:
++ fsi_free_minor(slave->dev.devt);
++err_free:
++ of_node_put(slave->dev.of_node);
++ kfree(slave);
+ return rc;
+ }
+
+diff --git a/drivers/fsi/fsi-sbefifo.c b/drivers/fsi/fsi-sbefifo.c
+index ae861342626e..9fa3959e0855 100644
+--- a/drivers/fsi/fsi-sbefifo.c
++++ b/drivers/fsi/fsi-sbefifo.c
+@@ -289,11 +289,11 @@ static int sbefifo_check_sbe_state(struct sbefifo *sbefifo)
+ switch ((sbm & CFAM_SBM_SBE_STATE_MASK) >> CFAM_SBM_SBE_STATE_SHIFT) {
+ case SBE_STATE_UNKNOWN:
+ return -ESHUTDOWN;
++ case SBE_STATE_DMT:
++ return -EBUSY;
+ case SBE_STATE_IPLING:
+ case SBE_STATE_ISTEP:
+ case SBE_STATE_MPIPL:
+- case SBE_STATE_DMT:
+- return -EBUSY;
+ case SBE_STATE_RUNTIME:
+ case SBE_STATE_DUMP: /* Not sure about that one */
+ break;
+diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
+index b696ec35efb3..e627e0e9001a 100644
+--- a/drivers/gpio/gpio-aspeed.c
++++ b/drivers/gpio/gpio-aspeed.c
+@@ -1199,7 +1199,7 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
+ gpio->chip.irq.need_valid_mask = true;
+
+ /* Allocate a cache of the output registers */
+- banks = gpio->config->nr_gpios >> 5;
++ banks = DIV_ROUND_UP(gpio->config->nr_gpios, 32);
+ gpio->dcache = devm_kcalloc(&pdev->dev,
+ banks, sizeof(u32), GFP_KERNEL);
+ if (!gpio->dcache)
+diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
+index f973d287696a..da5abf24f59f 100644
+--- a/drivers/gpu/drm/drm_context.c
++++ b/drivers/gpu/drm/drm_context.c
+@@ -361,23 +361,26 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
+ {
+ struct drm_ctx_list *ctx_entry;
+ struct drm_ctx *ctx = data;
++ int tmp_handle;
+
+ if (!drm_core_check_feature(dev, DRIVER_KMS_LEGACY_CONTEXT) &&
+ !drm_core_check_feature(dev, DRIVER_LEGACY))
+ return -EINVAL;
+
+- ctx->handle = drm_legacy_ctxbitmap_next(dev);
+- if (ctx->handle == DRM_KERNEL_CONTEXT) {
++ tmp_handle = drm_legacy_ctxbitmap_next(dev);
++ if (tmp_handle == DRM_KERNEL_CONTEXT) {
+ /* Skip kernel's context and get a new one. */
+- ctx->handle = drm_legacy_ctxbitmap_next(dev);
++ tmp_handle = drm_legacy_ctxbitmap_next(dev);
+ }
+- DRM_DEBUG("%d\n", ctx->handle);
+- if (ctx->handle < 0) {
++ DRM_DEBUG("%d\n", tmp_handle);
++ if (tmp_handle < 0) {
+ DRM_DEBUG("Not enough free contexts.\n");
+ /* Should this return -EBUSY instead? */
+- return -ENOMEM;
++ return tmp_handle;
+ }
+
++ ctx->handle = tmp_handle;
++
+ ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
+ if (!ctx_entry) {
+ DRM_DEBUG("out of memory\n");
+diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
+index 4d7715845306..58fe3945494c 100644
+--- a/drivers/gpu/drm/drm_dp_mst_topology.c
++++ b/drivers/gpu/drm/drm_dp_mst_topology.c
+@@ -1022,9 +1022,20 @@ static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_
+ static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+ {
+ struct drm_dp_mst_port *rport = NULL;
++
+ mutex_lock(&mgr->lock);
+- if (mgr->mst_primary)
+- rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
++ /*
++ * Port may or may not be 'valid' but we don't care about that when
++ * destroying the port and we are guaranteed that the port pointer
++ * will be valid until we've finished
++ */
++ if (current_work() == &mgr->destroy_connector_work) {
++ kref_get(&port->kref);
++ rport = port;
++ } else if (mgr->mst_primary) {
++ rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary,
++ port);
++ }
+ mutex_unlock(&mgr->lock);
+ return rport;
+ }
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index f57fc1450b61..da9a381d6b57 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -2979,18 +2979,16 @@ static int drm_fbdev_fb_release(struct fb_info *info, int user)
+ return 0;
+ }
+
+-/*
+- * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
+- * unregister_framebuffer() or fb_release().
+- */
+-static void drm_fbdev_fb_destroy(struct fb_info *info)
++static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
+ {
+- struct drm_fb_helper *fb_helper = info->par;
+ struct fb_info *fbi = fb_helper->fbdev;
+ struct fb_ops *fbops = NULL;
+ void *shadow = NULL;
+
+- if (fbi->fbdefio) {
++ if (!fb_helper->dev)
++ return;
++
++ if (fbi && fbi->fbdefio) {
+ fb_deferred_io_cleanup(fbi);
+ shadow = fbi->screen_buffer;
+ fbops = fbi->fbops;
+@@ -3004,6 +3002,12 @@ static void drm_fbdev_fb_destroy(struct fb_info *info)
+ }
+
+ drm_client_framebuffer_delete(fb_helper->buffer);
++}
++
++static void drm_fbdev_release(struct drm_fb_helper *fb_helper)
++{
++ drm_fbdev_cleanup(fb_helper);
++
+ /*
+ * FIXME:
+ * Remove conditional when all CMA drivers have been moved over to using
+@@ -3015,6 +3019,15 @@ static void drm_fbdev_fb_destroy(struct fb_info *info)
+ }
+ }
+
++/*
++ * fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
++ * unregister_framebuffer() or fb_release().
++ */
++static void drm_fbdev_fb_destroy(struct fb_info *info)
++{
++ drm_fbdev_release(info->par);
++}
++
+ static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+ {
+ struct drm_fb_helper *fb_helper = info->par;
+@@ -3065,7 +3078,6 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+ struct drm_framebuffer *fb;
+ struct fb_info *fbi;
+ u32 format;
+- int ret;
+
+ DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+@@ -3082,10 +3094,8 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+ fb = buffer->fb;
+
+ fbi = drm_fb_helper_alloc_fbi(fb_helper);
+- if (IS_ERR(fbi)) {
+- ret = PTR_ERR(fbi);
+- goto err_free_buffer;
+- }
++ if (IS_ERR(fbi))
++ return PTR_ERR(fbi);
+
+ fbi->par = fb_helper;
+ fbi->fbops = &drm_fbdev_fb_ops;
+@@ -3116,8 +3126,7 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+ if (!fbops || !shadow) {
+ kfree(fbops);
+ vfree(shadow);
+- ret = -ENOMEM;
+- goto err_fb_info_destroy;
++ return -ENOMEM;
+ }
+
+ *fbops = *fbi->fbops;
+@@ -3129,13 +3138,6 @@ int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+ }
+
+ return 0;
+-
+-err_fb_info_destroy:
+- drm_fb_helper_fini(fb_helper);
+-err_free_buffer:
+- drm_client_framebuffer_delete(buffer);
+-
+- return ret;
+ }
+ EXPORT_SYMBOL(drm_fb_helper_generic_probe);
+
+@@ -3147,18 +3149,11 @@ static void drm_fbdev_client_unregister(struct drm_client_dev *client)
+ {
+ struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
+
+- if (fb_helper->fbdev) {
+- drm_fb_helper_unregister_fbi(fb_helper);
++ if (fb_helper->fbdev)
+ /* drm_fbdev_fb_destroy() takes care of cleanup */
+- return;
+- }
+-
+- /* Did drm_fb_helper_fbdev_setup() run? */
+- if (fb_helper->dev)
+- drm_fb_helper_fini(fb_helper);
+-
+- drm_client_release(client);
+- kfree(fb_helper);
++ drm_fb_helper_unregister_fbi(fb_helper);
++ else
++ drm_fbdev_release(fb_helper);
+ }
+
+ static int drm_fbdev_client_restore(struct drm_client_dev *client)
+@@ -3174,7 +3169,7 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
+ struct drm_device *dev = client->dev;
+ int ret;
+
+- /* If drm_fb_helper_fbdev_setup() failed, we only try once */
++ /* Setup is not retried if it has failed */
+ if (!fb_helper->dev && fb_helper->funcs)
+ return 0;
+
+@@ -3184,15 +3179,34 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
+ if (!dev->mode_config.num_connector)
+ return 0;
+
+- ret = drm_fb_helper_fbdev_setup(dev, fb_helper, &drm_fb_helper_generic_funcs,
+- fb_helper->preferred_bpp, 0);
+- if (ret) {
+- fb_helper->dev = NULL;
+- fb_helper->fbdev = NULL;
+- return ret;
+- }
++ drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
++
++ ret = drm_fb_helper_init(dev, fb_helper, dev->mode_config.num_connector);
++ if (ret)
++ goto err;
++
++ ret = drm_fb_helper_single_add_all_connectors(fb_helper);
++ if (ret)
++ goto err_cleanup;
++
++ if (!drm_drv_uses_atomic_modeset(dev))
++ drm_helper_disable_unused_functions(dev);
++
++ ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp);
++ if (ret)
++ goto err_cleanup;
+
+ return 0;
++
++err_cleanup:
++ drm_fbdev_cleanup(fb_helper);
++err:
++ fb_helper->dev = NULL;
++ fb_helper->fbdev = NULL;
++
++ DRM_DEV_ERROR(dev->dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
++
++ return ret;
+ }
+
+ static const struct drm_client_funcs drm_fbdev_client_funcs = {
+@@ -3243,12 +3257,16 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
+ return ret;
+ }
+
+- drm_client_add(&fb_helper->client);
+-
++ if (!preferred_bpp)
++ preferred_bpp = dev->mode_config.preferred_depth;
++ if (!preferred_bpp)
++ preferred_bpp = 32;
+ fb_helper->preferred_bpp = preferred_bpp;
+
+ drm_fbdev_client_hotplug(&fb_helper->client);
+
++ drm_client_add(&fb_helper->client);
++
+ return 0;
+ }
+ EXPORT_SYMBOL(drm_fbdev_generic_setup);
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+index 468dff2f7904..9d839b4fd8f7 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
+@@ -217,7 +217,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
+ mutex_lock(&obj->lock);
+ pages = etnaviv_gem_get_pages(obj);
+ mutex_unlock(&obj->lock);
+- if (pages) {
++ if (!IS_ERR(pages)) {
+ int j;
+
+ iter.hdr->data[0] = bomap - bomap_start;
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+index 0566171f8df2..f21529e635e3 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c
+@@ -15,7 +15,7 @@ struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj)
+ int npages = obj->size >> PAGE_SHIFT;
+
+ if (WARN_ON(!etnaviv_obj->pages)) /* should have already pinned! */
+- return NULL;
++ return ERR_PTR(-EINVAL);
+
+ return drm_prime_pages_to_sg(etnaviv_obj->pages, npages);
+ }
+diff --git a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+index 9980d81a26e3..4227a4006c34 100644
+--- a/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
++++ b/drivers/gpu/drm/etnaviv/etnaviv_perfmon.c
+@@ -113,7 +113,7 @@ static const struct etnaviv_pm_domain doms_3d[] = {
+ .name = "PE",
+ .profile_read = VIVS_MC_PROFILE_PE_READ,
+ .profile_config = VIVS_MC_PROFILE_CONFIG0,
+- .nr_signals = 5,
++ .nr_signals = 4,
+ .signal = (const struct etnaviv_pm_signal[]) {
+ {
+ "PIXEL_COUNT_KILLED_BY_COLOR_PIPE",
+@@ -435,7 +435,7 @@ int etnaviv_pm_query_sig(struct etnaviv_gpu *gpu,
+
+ dom = meta->domains + signal->domain;
+
+- if (signal->iter > dom->nr_signals)
++ if (signal->iter >= dom->nr_signals)
+ return -EINVAL;
+
+ sig = &dom->signal[signal->iter];
+@@ -461,7 +461,7 @@ int etnaviv_pm_req_validate(const struct drm_etnaviv_gem_submit_pmr *r,
+
+ dom = meta->domains + r->domain;
+
+- if (r->signal > dom->nr_signals)
++ if (r->signal >= dom->nr_signals)
+ return -EINVAL;
+
+ return 0;
+diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
+index 8bd29075ae4e..edcca1761500 100644
+--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
++++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c
+@@ -71,7 +71,6 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper,
+ DRM_DEBUG_DRIVER("surface width(%d), height(%d) and bpp(%d)\n",
+ sizes->surface_width, sizes->surface_height,
+ sizes->surface_bpp);
+- sizes->surface_depth = 32;
+
+ bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
+
+diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+index 669c2d4b070d..5c068301d817 100644
+--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+@@ -395,19 +395,17 @@ static const unsigned int a3xx_registers[] = {
+ 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
+ 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
+ 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
+- 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
+- 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
+- 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
+- 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
+- 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
+- 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
+- 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
+- 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
+- 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
+- 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
+- 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
+- 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
+- 0x303c, 0x303c, 0x305e, 0x305f,
++ 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2440, 0x2440, 0x2444, 0x2444,
++ 0x2448, 0x244d, 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470,
++ 0x2472, 0x2472, 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3,
++ 0x24e4, 0x24ef, 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e,
++ 0x2510, 0x2511, 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea,
++ 0x25ec, 0x25ed, 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617,
++ 0x261a, 0x261a, 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0,
++ 0x26c4, 0x26ce, 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9,
++ 0x26ec, 0x26ec, 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743,
++ 0x300c, 0x300e, 0x301c, 0x301d, 0x302a, 0x302a, 0x302c, 0x302d,
++ 0x3030, 0x3031, 0x3034, 0x3036, 0x303c, 0x303c, 0x305e, 0x305f,
+ ~0 /* sentinel */
+ };
+
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+index 06be7cf7ce50..79bafea66354 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
+@@ -310,7 +310,7 @@ static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
+ u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
+ u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
+ int i, j;
+- u8 stages;
++ int stages;
+ int pipes_per_stage;
+
+ stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
+diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+index 824067d2d427..42f0ecb0cf35 100644
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+@@ -635,7 +635,7 @@ fail:
+ if (cfg_handler)
+ mdp5_cfg_destroy(cfg_handler);
+
+- return NULL;
++ return ERR_PTR(ret);
+ }
+
+ static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index cc4ea5502d6c..3b78bca0bb4d 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -34,6 +34,8 @@
+ #include "dsi_cfg.h"
+ #include "msm_kms.h"
+
++#define DSI_RESET_TOGGLE_DELAY_MS 20
++
+ static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
+ {
+ u32 ver;
+@@ -994,7 +996,7 @@ static void dsi_sw_reset(struct msm_dsi_host *msm_host)
+ wmb(); /* clocks need to be enabled before reset */
+
+ dsi_write(msm_host, REG_DSI_RESET, 1);
+- wmb(); /* make sure reset happen */
++ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
+ dsi_write(msm_host, REG_DSI_RESET, 0);
+ }
+
+@@ -1402,7 +1404,7 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
+
+ /* dsi controller can only be reset while clocks are running */
+ dsi_write(msm_host, REG_DSI_RESET, 1);
+- wmb(); /* make sure reset happen */
++ msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
+ dsi_write(msm_host, REG_DSI_RESET, 0);
+ wmb(); /* controller out of reset */
+ dsi_write(msm_host, REG_DSI_CTRL, data0);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
+index e67a471331b5..6ec745873bc5 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
++++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
+@@ -214,6 +214,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
+ WARN_ON(1);
+ break;
+ }
++ break;
+ case NOUVEAU_GETPARAM_FB_SIZE:
+ getparam->value = drm->gem.vram_available;
+ break;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
+index 60ece0a8a2e1..1d2d6bae73cd 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gddr3.c
+@@ -87,7 +87,7 @@ nvkm_gddr3_calc(struct nvkm_ram *ram)
+ WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
+ /* XXX: Get these values from the VBIOS instead */
+ DLL = !(ram->mr[1] & 0x1);
+- RON = !(ram->mr[1] & 0x300) >> 8;
++ RON = !((ram->mr[1] & 0x300) >> 8);
+ break;
+ default:
+ return -ENOSYS;
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
+index 11b28b086a06..7b052879af72 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/memx.c
+@@ -88,10 +88,10 @@ nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec)
+ if (exec) {
+ nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC,
+ memx->base, finish);
++ nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
++ reply[0], reply[1]);
+ }
+
+- nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n",
+- reply[0], reply[1]);
+ kfree(memx);
+ return 0;
+ }
+diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c
+index 8a1687887ae9..bd704a36c5d0 100644
+--- a/drivers/gpu/drm/panel/panel-lvds.c
++++ b/drivers/gpu/drm/panel/panel-lvds.c
+@@ -199,7 +199,6 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds)
+ static int panel_lvds_probe(struct platform_device *pdev)
+ {
+ struct panel_lvds *lvds;
+- struct device_node *np;
+ int ret;
+
+ lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
+@@ -245,14 +244,9 @@ static int panel_lvds_probe(struct platform_device *pdev)
+ return ret;
+ }
+
+- np = of_parse_phandle(lvds->dev->of_node, "backlight", 0);
+- if (np) {
+- lvds->backlight = of_find_backlight_by_node(np);
+- of_node_put(np);
+-
+- if (!lvds->backlight)
+- return -EPROBE_DEFER;
+- }
++ lvds->backlight = devm_of_find_backlight(lvds->dev);
++ if (IS_ERR(lvds->backlight))
++ return PTR_ERR(lvds->backlight);
+
+ /*
+ * TODO: Handle all power supplies specified in the DT node in a generic
+@@ -268,14 +262,10 @@ static int panel_lvds_probe(struct platform_device *pdev)
+
+ ret = drm_panel_add(&lvds->panel);
+ if (ret < 0)
+- goto error;
++ return ret;
+
+ dev_set_drvdata(lvds->dev, lvds);
+ return 0;
+-
+-error:
+- put_device(&lvds->backlight->dev);
+- return ret;
+ }
+
+ static int panel_lvds_remove(struct platform_device *pdev)
+@@ -286,9 +276,6 @@ static int panel_lvds_remove(struct platform_device *pdev)
+
+ panel_lvds_disable(&lvds->panel);
+
+- if (lvds->backlight)
+- put_device(&lvds->backlight->dev);
+-
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
+index ebce4601a305..827d551962d9 100644
+--- a/drivers/gpu/drm/radeon/cik.c
++++ b/drivers/gpu/drm/radeon/cik.c
+@@ -6965,8 +6965,8 @@ static int cik_irq_init(struct radeon_device *rdev)
+ }
+
+ /* setup interrupt control */
+- /* XXX this should actually be a bus address, not an MC address. same on older asics */
+- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
++ /* set dummy read address to dummy page address */
++ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
+ interrupt_cntl = RREG32(INTERRUPT_CNTL);
+ /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+ * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
+index e06e2d8feab3..a724bb87cfad 100644
+--- a/drivers/gpu/drm/radeon/r600.c
++++ b/drivers/gpu/drm/radeon/r600.c
+@@ -3690,8 +3690,8 @@ int r600_irq_init(struct radeon_device *rdev)
+ }
+
+ /* setup interrupt control */
+- /* set dummy read address to ring address */
+- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
++ /* set dummy read address to dummy page address */
++ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
+ interrupt_cntl = RREG32(INTERRUPT_CNTL);
+ /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+ * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
+index 85c604d29235..639f0698f961 100644
+--- a/drivers/gpu/drm/radeon/si.c
++++ b/drivers/gpu/drm/radeon/si.c
+@@ -5993,8 +5993,8 @@ static int si_irq_init(struct radeon_device *rdev)
+ }
+
+ /* setup interrupt control */
+- /* set dummy read address to ring address */
+- WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
++ /* set dummy read address to dummy page address */
++ WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8);
+ interrupt_cntl = RREG32(INTERRUPT_CNTL);
+ /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
+ * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+index 15dc9caa128b..212e5e11e4b7 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+@@ -837,7 +837,7 @@ unlock:
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+- return 0;
++ return ret;
+ }
+
+ static const struct drm_crtc_funcs crtc_funcs_gen2 = {
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+index 0386b454e221..6a9578159c2b 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+@@ -544,7 +544,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
+ * Initialize vertical blanking interrupts handling. Start with vblank
+ * disabled for all CRTCs.
+ */
+- ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
++ ret = drm_vblank_init(dev, rcdu->num_crtcs);
+ if (ret < 0)
+ return ret;
+
+diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
+index 4c39de3f4f0f..b6dc91cdff68 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
++++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
+@@ -59,11 +59,11 @@ struct rcar_lvds {
+ enum rcar_lvds_mode mode;
+ };
+
+-#define bridge_to_rcar_lvds(bridge) \
+- container_of(bridge, struct rcar_lvds, bridge)
++#define bridge_to_rcar_lvds(b) \
++ container_of(b, struct rcar_lvds, bridge)
+
+-#define connector_to_rcar_lvds(connector) \
+- container_of(connector, struct rcar_lvds, connector)
++#define connector_to_rcar_lvds(c) \
++ container_of(c, struct rcar_lvds, connector)
+
+ static void rcar_lvds_write(struct rcar_lvds *lvds, u32 reg, u32 data)
+ {
+diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+index 592572554eb0..58d8a98c749b 100644
+--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
++++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+@@ -233,8 +233,8 @@ static int shmob_drm_probe(struct platform_device *pdev)
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sdev->mmio = devm_ioremap_resource(&pdev->dev, res);
+- if (sdev->mmio == NULL)
+- return -ENOMEM;
++ if (IS_ERR(sdev->mmio))
++ return PTR_ERR(sdev->mmio);
+
+ ret = shmob_drm_setup_clocks(sdev, pdata->clk_source);
+ if (ret < 0)
+diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
+index 49438337f70d..19b9b5ed1297 100644
+--- a/drivers/gpu/drm/sti/sti_hda.c
++++ b/drivers/gpu/drm/sti/sti_hda.c
+@@ -721,7 +721,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
+ return 0;
+
+ err_sysfs:
+- drm_bridge_remove(bridge);
+ return -EINVAL;
+ }
+
+diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
+index 34cdc4644435..ccf718404a1c 100644
+--- a/drivers/gpu/drm/sti/sti_hdmi.c
++++ b/drivers/gpu/drm/sti/sti_hdmi.c
+@@ -1315,7 +1315,6 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
+ return 0;
+
+ err_sysfs:
+- drm_bridge_remove(bridge);
+ hdmi->drm_connector = NULL;
+ return -EINVAL;
+ }
+diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+index 3ecffa52c814..a74adec6c5dc 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
++++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_tmds_clk.c
+@@ -52,7 +52,7 @@ static unsigned long sun4i_tmds_calc_divider(unsigned long rate,
+ (rate - tmp_rate) < (rate - best_rate)) {
+ best_rate = tmp_rate;
+ best_m = m;
+- is_double = d;
++ is_double = (d == 2) ? true : false;
+ }
+ }
+ }
+diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
+index c8a581b1f4c4..608906f06ced 100644
+--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
++++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
+@@ -650,11 +650,11 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
+ {
+ struct virtio_gpu_get_capset *cmd_p;
+ struct virtio_gpu_vbuffer *vbuf;
+- int max_size = vgdev->capsets[idx].max_size;
++ int max_size;
+ struct virtio_gpu_drv_cap_cache *cache_ent;
+ void *resp_buf;
+
+- if (idx > vgdev->num_capsets)
++ if (idx >= vgdev->num_capsets)
+ return -EINVAL;
+
+ if (version > vgdev->capsets[idx].max_version)
+@@ -664,6 +664,7 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
+ if (!cache_ent)
+ return -ENOMEM;
+
++ max_size = vgdev->capsets[idx].max_size;
+ cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
+ if (!cache_ent->caps_cache) {
+ kfree(cache_ent);
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+index e7e4655d3f36..ce1ad7cd7899 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+@@ -511,17 +511,14 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
+ container_of(work, struct vmw_cmdbuf_man, work);
+ struct vmw_cmdbuf_header *entry, *next;
+ uint32_t dummy;
+- bool restart[SVGA_CB_CONTEXT_MAX];
+ bool send_fence = false;
+ struct list_head restart_head[SVGA_CB_CONTEXT_MAX];
+ int i;
+ struct vmw_cmdbuf_context *ctx;
+ bool global_block = false;
+
+- for_each_cmdbuf_ctx(man, i, ctx) {
++ for_each_cmdbuf_ctx(man, i, ctx)
+ INIT_LIST_HEAD(&restart_head[i]);
+- restart[i] = false;
+- }
+
+ mutex_lock(&man->error_mutex);
+ spin_lock(&man->lock);
+@@ -533,7 +530,6 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
+ const char *cmd_name;
+
+ list_del_init(&entry->list);
+- restart[entry->cb_context] = true;
+ global_block = true;
+
+ if (!vmw_cmd_describe(header, &error_cmd_size, &cmd_name)) {
+diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
+index c85bfe7571cb..802662839e7e 100644
+--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
++++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
+@@ -236,8 +236,14 @@ static int gem_mmap_obj(struct xen_gem_object *xen_obj,
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+ vma->vm_pgoff = 0;
+- vma->vm_page_prot =
+- pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
++ /*
++ * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
++ * all memory which is shared with other entities in the system
++ * (including the hypervisor and other guests) must reside in memory
++ * which is mapped as Normal Inner Write-Back Outer Write-Back
++ * Inner-Shareable.
++ */
++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+ /*
+ * vm_operations_struct.fault handler will be called if CPU access
+@@ -283,8 +289,9 @@ void *xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj)
+ if (!xen_obj->pages)
+ return NULL;
+
++ /* Please see comment in gem_mmap_obj on mapping and attributes. */
+ return vmap(xen_obj->pages, xen_obj->num_pages,
+- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
++ VM_MAP, PAGE_KERNEL);
+ }
+
+ void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
+diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
+index 49f4b33a5685..7f28912c9abc 100644
+--- a/drivers/hwmon/lm75.c
++++ b/drivers/hwmon/lm75.c
+@@ -165,7 +165,7 @@ static int lm75_write(struct device *dev, enum hwmon_sensor_types type,
+ temp = DIV_ROUND_CLOSEST(temp << (resolution - 8),
+ 1000) << (16 - resolution);
+
+- return regmap_write(data->regmap, reg, temp);
++ return regmap_write(data->regmap, reg, (u16)temp);
+ }
+
+ static umode_t lm75_is_visible(const void *data, enum hwmon_sensor_types type,
+diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c
+index 85b515cd9df0..2bc352c5357f 100644
+--- a/drivers/hwmon/pmbus/tps53679.c
++++ b/drivers/hwmon/pmbus/tps53679.c
+@@ -80,7 +80,14 @@ static struct pmbus_driver_info tps53679_info = {
+ static int tps53679_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+ {
+- return pmbus_do_probe(client, id, &tps53679_info);
++ struct pmbus_driver_info *info;
++
++ info = devm_kmemdup(&client->dev, &tps53679_info, sizeof(*info),
++ GFP_KERNEL);
++ if (!info)
++ return -ENOMEM;
++
++ return pmbus_do_probe(client, id, info);
+ }
+
+ static const struct i2c_device_id tps53679_id[] = {
+diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c
+index decd7df995ab..2a18539591ea 100644
+--- a/drivers/hwmon/shtc1.c
++++ b/drivers/hwmon/shtc1.c
+@@ -38,7 +38,7 @@ static const unsigned char shtc1_cmd_read_id_reg[] = { 0xef, 0xc8 };
+
+ /* constants for reading the ID register */
+ #define SHTC1_ID 0x07
+-#define SHTC1_ID_REG_MASK 0x1f
++#define SHTC1_ID_REG_MASK 0x3f
+
+ /* delays for non-blocking i2c commands, both in us */
+ #define SHTC1_NONBLOCKING_WAIT_TIME_HPM 14400
+diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
+index 8ac89d0781cc..a575e1cdb81a 100644
+--- a/drivers/hwmon/w83627hf.c
++++ b/drivers/hwmon/w83627hf.c
+@@ -130,17 +130,23 @@ superio_select(struct w83627hf_sio_data *sio, int ld)
+ outb(ld, sio->sioaddr + 1);
+ }
+
+-static inline void
++static inline int
+ superio_enter(struct w83627hf_sio_data *sio)
+ {
++ if (!request_muxed_region(sio->sioaddr, 2, DRVNAME))
++ return -EBUSY;
++
+ outb(0x87, sio->sioaddr);
+ outb(0x87, sio->sioaddr);
++
++ return 0;
+ }
+
+ static inline void
+ superio_exit(struct w83627hf_sio_data *sio)
+ {
+ outb(0xAA, sio->sioaddr);
++ release_region(sio->sioaddr, 2);
+ }
+
+ #define W627_DEVID 0x52
+@@ -1278,7 +1284,7 @@ static DEVICE_ATTR_RO(name);
+ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
+ struct w83627hf_sio_data *sio_data)
+ {
+- int err = -ENODEV;
++ int err;
+ u16 val;
+
+ static __initconst char *const names[] = {
+@@ -1290,7 +1296,11 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr,
+ };
+
+ sio_data->sioaddr = sioaddr;
+- superio_enter(sio_data);
++ err = superio_enter(sio_data);
++ if (err)
++ return err;
++
++ err = -ENODEV;
+ val = force_id ? force_id : superio_inb(sio_data, DEVID);
+ switch (val) {
+ case W627_DEVID:
+@@ -1644,9 +1654,21 @@ static int w83627thf_read_gpio5(struct platform_device *pdev)
+ struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
+ int res = 0xff, sel;
+
+- superio_enter(sio_data);
++ if (superio_enter(sio_data)) {
++ /*
++ * Some other driver reserved the address space for itself.
++ * We don't want to fail driver instantiation because of that,
++ * so display a warning and keep going.
++ */
++ dev_warn(&pdev->dev,
++ "Can not read VID data: Failed to enable SuperIO access\n");
++ return res;
++ }
++
+ superio_select(sio_data, W83627HF_LD_GPIO5);
+
++ res = 0xff;
++
+ /* Make sure these GPIO pins are enabled */
+ if (!(superio_inb(sio_data, W83627THF_GPIO5_EN) & (1<<3))) {
+ dev_dbg(&pdev->dev, "GPIO5 disabled, no VID function\n");
+@@ -1677,7 +1699,17 @@ static int w83687thf_read_vid(struct platform_device *pdev)
+ struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
+ int res = 0xff;
+
+- superio_enter(sio_data);
++ if (superio_enter(sio_data)) {
++ /*
++ * Some other driver reserved the address space for itself.
++ * We don't want to fail driver instantiation because of that,
++ * so display a warning and keep going.
++ */
++ dev_warn(&pdev->dev,
++ "Can not read VID data: Failed to enable SuperIO access\n");
++ return res;
++ }
++
+ superio_select(sio_data, W83627HF_LD_HWM);
+
+ /* Make sure these GPIO pins are enabled */
+diff --git a/drivers/hwtracing/coresight/coresight-catu.h b/drivers/hwtracing/coresight/coresight-catu.h
+index 1b281f0dcccc..1d2ad183fd92 100644
+--- a/drivers/hwtracing/coresight/coresight-catu.h
++++ b/drivers/hwtracing/coresight/coresight-catu.h
+@@ -109,11 +109,6 @@ static inline bool coresight_is_catu_device(struct coresight_device *csdev)
+ return true;
+ }
+
+-#ifdef CONFIG_CORESIGHT_CATU
+ extern const struct etr_buf_operations etr_catu_buf_ops;
+-#else
+-/* Dummy declaration for the CATU ops */
+-static const struct etr_buf_operations etr_catu_buf_ops;
+-#endif
+
+ #endif
+diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
+index c3c645201514..567f46ca2599 100644
+--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
++++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
+@@ -437,15 +437,16 @@ static int etm_addr_filters_validate(struct list_head *filters)
+ static void etm_addr_filters_sync(struct perf_event *event)
+ {
+ struct perf_addr_filters_head *head = perf_event_addr_filters(event);
+- unsigned long start, stop, *offs = event->addr_filters_offs;
++ unsigned long start, stop;
++ struct perf_addr_filter_range *fr = event->addr_filter_ranges;
+ struct etm_filters *filters = event->hw.addr_filters;
+ struct etm_filter *etm_filter;
+ struct perf_addr_filter *filter;
+ int i = 0;
+
+ list_for_each_entry(filter, &head->list, entry) {
+- start = filter->offset + offs[i];
+- stop = start + filter->size;
++ start = fr[i].start;
++ stop = start + fr[i].size;
+ etm_filter = &filters->etm_filter[i];
+
+ switch (filter->action) {
+diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+index 2d6f428176ff..3b684687b5a7 100644
+--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
++++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
+@@ -747,7 +747,8 @@ static inline void tmc_etr_disable_catu(struct tmc_drvdata *drvdata)
+ static const struct etr_buf_operations *etr_buf_ops[] = {
+ [ETR_MODE_FLAT] = &etr_flat_buf_ops,
+ [ETR_MODE_ETR_SG] = &etr_sg_buf_ops,
+- [ETR_MODE_CATU] = &etr_catu_buf_ops,
++ [ETR_MODE_CATU] = IS_ENABLED(CONFIG_CORESIGHT_CATU)
++ ? &etr_catu_buf_ops : NULL,
+ };
+
+ static inline int tmc_etr_mode_alloc_buf(int mode,
+@@ -761,7 +762,7 @@ static inline int tmc_etr_mode_alloc_buf(int mode,
+ case ETR_MODE_FLAT:
+ case ETR_MODE_ETR_SG:
+ case ETR_MODE_CATU:
+- if (etr_buf_ops[mode]->alloc)
++ if (etr_buf_ops[mode] && etr_buf_ops[mode]->alloc)
+ rc = etr_buf_ops[mode]->alloc(drvdata, etr_buf,
+ node, pages);
+ if (!rc)
+diff --git a/drivers/i2c/busses/i2c-stm32.c b/drivers/i2c/busses/i2c-stm32.c
+index d75fbcbf02ef..667f8032f8ef 100644
+--- a/drivers/i2c/busses/i2c-stm32.c
++++ b/drivers/i2c/busses/i2c-stm32.c
+@@ -21,13 +21,13 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
+
+ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL);
+ if (!dma)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ /* Request and configure I2C TX dma channel */
+- dma->chan_tx = dma_request_slave_channel(dev, "tx");
+- if (!dma->chan_tx) {
++ dma->chan_tx = dma_request_chan(dev, "tx");
++ if (IS_ERR(dma->chan_tx)) {
+ dev_dbg(dev, "can't request DMA tx channel\n");
+- ret = -EINVAL;
++ ret = PTR_ERR(dma->chan_tx);
+ goto fail_al;
+ }
+
+@@ -43,10 +43,10 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
+ }
+
+ /* Request and configure I2C RX dma channel */
+- dma->chan_rx = dma_request_slave_channel(dev, "rx");
+- if (!dma->chan_rx) {
++ dma->chan_rx = dma_request_chan(dev, "rx");
++ if (IS_ERR(dma->chan_rx)) {
+ dev_err(dev, "can't request DMA rx channel\n");
+- ret = -EINVAL;
++ ret = PTR_ERR(dma->chan_rx);
+ goto fail_tx;
+ }
+
+@@ -76,7 +76,7 @@ fail_al:
+ devm_kfree(dev, dma);
+ dev_info(dev, "can't use DMA\n");
+
+- return NULL;
++ return ERR_PTR(ret);
+ }
+
+ void stm32_i2c_dma_free(struct stm32_i2c_dma *dma)
+diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
+index f4e3613f9361..eb7e533b0dd4 100644
+--- a/drivers/i2c/busses/i2c-stm32f7.c
++++ b/drivers/i2c/busses/i2c-stm32f7.c
+@@ -1252,8 +1252,8 @@ static int stm32f7_i2c_get_free_slave_id(struct stm32f7_i2c_dev *i2c_dev,
+ * slave[0] supports 7-bit and 10-bit slave address
+ * slave[1] supports 7-bit slave address only
+ */
+- for (i = 0; i < STM32F7_I2C_MAX_SLAVE; i++) {
+- if (i == 1 && (slave->flags & I2C_CLIENT_PEC))
++ for (i = STM32F7_I2C_MAX_SLAVE - 1; i >= 0; i--) {
++ if (i == 1 && (slave->flags & I2C_CLIENT_TEN))
+ continue;
+ if (!i2c_dev->slave[i]) {
+ *id = i;
+@@ -1914,6 +1914,15 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
+ i2c_dev->dma = stm32_i2c_dma_request(i2c_dev->dev, phy_addr,
+ STM32F7_I2C_TXDR,
+ STM32F7_I2C_RXDR);
++ if (PTR_ERR(i2c_dev->dma) == -ENODEV)
++ i2c_dev->dma = NULL;
++ else if (IS_ERR(i2c_dev->dma)) {
++ ret = PTR_ERR(i2c_dev->dma);
++ if (ret != -EPROBE_DEFER)
++ dev_err(&pdev->dev,
++ "Failed to request dma error %i\n", ret);
++ goto clk_free;
++ }
+
+ ret = i2c_add_adapter(adap);
+ if (ret)
+diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
+index 873c2bf637c0..617c9f7fe59a 100644
+--- a/drivers/iio/dac/ad5380.c
++++ b/drivers/iio/dac/ad5380.c
+@@ -221,7 +221,7 @@ static int ad5380_read_raw(struct iio_dev *indio_dev,
+ if (ret)
+ return ret;
+ *val >>= chan->scan_type.shift;
+- val -= (1 << chan->scan_type.realbits) / 2;
++ *val -= (1 << chan->scan_type.realbits) / 2;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 2 * st->vref;
+diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
+index df5b2a0da96c..f2e308c6d6d7 100644
+--- a/drivers/iio/light/tsl2772.c
++++ b/drivers/iio/light/tsl2772.c
+@@ -716,6 +716,13 @@ static int tsl2772_chip_off(struct iio_dev *indio_dev)
+ return tsl2772_write_control_reg(chip, 0x00);
+ }
+
++static void tsl2772_chip_off_action(void *data)
++{
++ struct iio_dev *indio_dev = data;
++
++ tsl2772_chip_off(indio_dev);
++}
++
+ /**
+ * tsl2772_invoke_change - power cycle the device to implement the user
+ * parameters
+@@ -1711,9 +1718,14 @@ static int tsl2772_probe(struct i2c_client *clientp,
+ if (ret < 0)
+ return ret;
+
++ ret = devm_add_action_or_reset(&clientp->dev,
++ tsl2772_chip_off_action,
++ indio_dev);
++ if (ret < 0)
++ return ret;
++
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+- tsl2772_chip_off(indio_dev);
+ dev_err(&clientp->dev,
+ "%s: iio registration failed\n", __func__);
+ return ret;
+@@ -1740,8 +1752,6 @@ static int tsl2772_remove(struct i2c_client *client)
+ {
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+- tsl2772_chip_off(indio_dev);
+-
+ iio_device_unregister(indio_dev);
+
+ return 0;
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 319bfef00a4a..e16872e0724f 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -2889,7 +2889,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,
+ if (status)
+ pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
+ status);
+- } else {
++ } else if (status) {
+ pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
+ }
+
+diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c
+index be854628a7c6..959a3418a192 100644
+--- a/drivers/infiniband/core/uverbs_uapi.c
++++ b/drivers/infiniband/core/uverbs_uapi.c
+@@ -17,6 +17,8 @@ static void *uapi_add_elm(struct uverbs_api *uapi, u32 key, size_t alloc_size)
+ return ERR_PTR(-EOVERFLOW);
+
+ elm = kzalloc(alloc_size, GFP_KERNEL);
++ if (!elm)
++ return ERR_PTR(-ENOMEM);
+ rc = radix_tree_insert(&uapi->radix, key, elm);
+ if (rc) {
+ kfree(elm);
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index a69632f1fab0..c9af2d139f5c 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -2664,6 +2664,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev,
+ nq->budget++;
+
+ atomic_inc(&rdev->cq_count);
++ spin_lock_init(&cq->cq_lock);
+
+ if (context) {
+ struct bnxt_re_cq_resp resp;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index c15335dc8f61..60f2fb7e7dbf 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -1970,6 +1970,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
+ INIT_LIST_HEAD(&cq->sqf_head);
+ INIT_LIST_HEAD(&cq->rqf_head);
+ spin_lock_init(&cq->compl_lock);
++ spin_lock_init(&cq->flush_lock);
+
+ bnxt_qplib_arm_cq_enable(cq);
+ return 0;
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index 4dcc92d11609..16145b0a1458 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -2076,7 +2076,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
+ } else {
+ pdev = get_real_dev(n->dev);
+ ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+- n, pdev, 0);
++ n, pdev, rt_tos2priority(tos));
+ if (!ep->l2t)
+ goto out;
+ ep->mtu = dst_mtu(dst);
+@@ -2166,7 +2166,8 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
+ laddr6->sin6_addr.s6_addr,
+ raddr6->sin6_addr.s6_addr,
+ laddr6->sin6_port,
+- raddr6->sin6_port, 0,
++ raddr6->sin6_port,
++ ep->com.cm_id->tos,
+ raddr6->sin6_scope_id);
+ iptype = 6;
+ ra = (__u8 *)&raddr6->sin6_addr;
+@@ -2947,15 +2948,18 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
+
+ ep = get_ep_from_tid(dev, tid);
+
+- if (ep && ep->com.qp) {
+- pr_warn("TERM received tid %u qpid %u\n",
+- tid, ep->com.qp->wq.sq.qid);
+- attrs.next_state = C4IW_QP_STATE_TERMINATE;
+- c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
+- C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
++ if (ep) {
++ if (ep->com.qp) {
++ pr_warn("TERM received tid %u qpid %u\n", tid,
++ ep->com.qp->wq.sq.qid);
++ attrs.next_state = C4IW_QP_STATE_TERMINATE;
++ c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
++ C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
++ }
++
++ c4iw_put_ep(&ep->com);
+ } else
+ pr_warn("TERM received tid %u no ep/qp\n", tid);
+- c4iw_put_ep(&ep->com);
+
+ return 0;
+ }
+@@ -3323,7 +3327,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ laddr6->sin6_addr.s6_addr,
+ raddr6->sin6_addr.s6_addr,
+ laddr6->sin6_port,
+- raddr6->sin6_port, 0,
++ raddr6->sin6_port, cm_id->tos,
+ raddr6->sin6_scope_id);
+ }
+ if (!ep->dst) {
+diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
+index 6b8935361133..b09a4b1cf397 100644
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -10580,12 +10580,29 @@ void set_link_down_reason(struct hfi1_pportdata *ppd, u8 lcl_reason,
+ }
+ }
+
+-/*
+- * Verify if BCT for data VLs is non-zero.
++/**
++ * data_vls_operational() - Verify if data VL BCT credits and MTU
++ * are both set.
++ * @ppd: pointer to hfi1_pportdata structure
++ *
++ * Return: true - Ok, false -otherwise.
+ */
+ static inline bool data_vls_operational(struct hfi1_pportdata *ppd)
+ {
+- return !!ppd->actual_vls_operational;
++ int i;
++ u64 reg;
++
++ if (!ppd->actual_vls_operational)
++ return false;
++
++ for (i = 0; i < ppd->vls_supported; i++) {
++ reg = read_csr(ppd->dd, SEND_CM_CREDIT_VL + (8 * i));
++ if ((reg && !ppd->dd->vld[i].mtu) ||
++ (!reg && ppd->dd->vld[i].mtu))
++ return false;
++ }
++
++ return true;
+ }
+
+ /*
+@@ -10698,7 +10715,8 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
+
+ if (!data_vls_operational(ppd)) {
+ dd_dev_err(dd,
+- "%s: data VLs not operational\n", __func__);
++ "%s: Invalid data VL credits or mtu\n",
++ __func__);
+ ret = -EINVAL;
+ break;
+ }
+diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
+index a41f85558312..d5277c23cba6 100644
+--- a/drivers/infiniband/hw/hfi1/driver.c
++++ b/drivers/infiniband/hw/hfi1/driver.c
+@@ -430,40 +430,60 @@ static const hfi1_handle_cnp hfi1_handle_cnp_tbl[2] = {
+ [HFI1_PKT_TYPE_16B] = &return_cnp_16B
+ };
+
+-void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+- bool do_cnp)
++/**
++ * hfi1_process_ecn_slowpath - Process FECN or BECN bits
++ * @qp: The packet's destination QP
++ * @pkt: The packet itself.
++ * @prescan: Is the caller the RXQ prescan
++ *
++ * Process the packet's FECN or BECN bits. By now, the packet
++ * has already been evaluated whether processing of those bit should
++ * be done.
++ * The significance of the @prescan argument is that if the caller
++ * is the RXQ prescan, a CNP will be send out instead of waiting for the
++ * normal packet processing to send an ACK with BECN set (or a CNP).
++ */
++bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
++ bool prescan)
+ {
+ struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ struct ib_other_headers *ohdr = pkt->ohdr;
+ struct ib_grh *grh = pkt->grh;
+- u32 rqpn = 0, bth1;
++ u32 rqpn = 0;
+ u16 pkey;
+ u32 rlid, slid, dlid = 0;
+- u8 hdr_type, sc, svc_type;
+- bool is_mcast = false;
++ u8 hdr_type, sc, svc_type, opcode;
++ bool is_mcast = false, ignore_fecn = false, do_cnp = false,
++ fecn, becn;
+
+ /* can be called from prescan */
+ if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
+- is_mcast = hfi1_is_16B_mcast(dlid);
+ pkey = hfi1_16B_get_pkey(pkt->hdr);
+ sc = hfi1_16B_get_sc(pkt->hdr);
+ dlid = hfi1_16B_get_dlid(pkt->hdr);
+ slid = hfi1_16B_get_slid(pkt->hdr);
++ is_mcast = hfi1_is_16B_mcast(dlid);
++ opcode = ib_bth_get_opcode(ohdr);
+ hdr_type = HFI1_PKT_TYPE_16B;
++ fecn = hfi1_16B_get_fecn(pkt->hdr);
++ becn = hfi1_16B_get_becn(pkt->hdr);
+ } else {
+- is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
+- (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
+ pkey = ib_bth_get_pkey(ohdr);
+ sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf);
+- dlid = ib_get_dlid(pkt->hdr);
++ dlid = qp->ibqp.qp_type != IB_QPT_UD ? ib_get_dlid(pkt->hdr) :
++ ppd->lid;
+ slid = ib_get_slid(pkt->hdr);
++ is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
++ (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
++ opcode = ib_bth_get_opcode(ohdr);
+ hdr_type = HFI1_PKT_TYPE_9B;
++ fecn = ib_bth_get_fecn(ohdr);
++ becn = ib_bth_get_becn(ohdr);
+ }
+
+ switch (qp->ibqp.qp_type) {
+ case IB_QPT_UD:
+- dlid = ppd->lid;
+ rlid = slid;
+ rqpn = ib_get_sqpn(pkt->ohdr);
+ svc_type = IB_CC_SVCTYPE_UD;
+@@ -485,22 +505,31 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+ svc_type = IB_CC_SVCTYPE_RC;
+ break;
+ default:
+- return;
++ return false;
+ }
+
+- bth1 = be32_to_cpu(ohdr->bth[1]);
++ ignore_fecn = is_mcast || (opcode == IB_OPCODE_CNP) ||
++ (opcode == IB_OPCODE_RC_ACKNOWLEDGE);
++ /*
++ * ACKNOWLEDGE packets do not get a CNP but this will be
++ * guarded by ignore_fecn above.
++ */
++ do_cnp = prescan ||
++ (opcode >= IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST &&
++ opcode <= IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE);
++
+ /* Call appropriate CNP handler */
+- if (do_cnp && (bth1 & IB_FECN_SMASK))
++ if (!ignore_fecn && do_cnp && fecn)
+ hfi1_handle_cnp_tbl[hdr_type](ibp, qp, rqpn, pkey,
+ dlid, rlid, sc, grh);
+
+- if (!is_mcast && (bth1 & IB_BECN_SMASK)) {
+- u32 lqpn = bth1 & RVT_QPN_MASK;
++ if (becn) {
++ u32 lqpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK;
+ u8 sl = ibp->sc_to_sl[sc];
+
+ process_becn(ppd, sl, rlid, lqpn, rqpn, svc_type);
+ }
+-
++ return !ignore_fecn && fecn;
+ }
+
+ struct ps_mdata {
+@@ -599,7 +628,6 @@ static void __prescan_rxq(struct hfi1_packet *packet)
+ struct rvt_dev_info *rdi = &rcd->dd->verbs_dev.rdi;
+ u64 rhf = rhf_to_cpu(rhf_addr);
+ u32 etype = rhf_rcv_type(rhf), qpn, bth1;
+- int is_ecn = 0;
+ u8 lnh;
+
+ if (ps_done(&mdata, rhf, rcd))
+@@ -625,12 +653,10 @@ static void __prescan_rxq(struct hfi1_packet *packet)
+ goto next; /* just in case */
+ }
+
+- bth1 = be32_to_cpu(packet->ohdr->bth[1]);
+- is_ecn = !!(bth1 & (IB_FECN_SMASK | IB_BECN_SMASK));
+-
+- if (!is_ecn)
++ if (!hfi1_may_ecn(packet))
+ goto next;
+
++ bth1 = be32_to_cpu(packet->ohdr->bth[1]);
+ qpn = bth1 & RVT_QPN_MASK;
+ rcu_read_lock();
+ qp = rvt_lookup_qpn(rdi, &ibp->rvp, qpn);
+@@ -640,7 +666,7 @@ static void __prescan_rxq(struct hfi1_packet *packet)
+ goto next;
+ }
+
+- process_ecn(qp, packet, true);
++ hfi1_process_ecn_slowpath(qp, packet, true);
+ rcu_read_unlock();
+
+ /* turn off BECN, FECN */
+diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
+index 2ea42c04cfd2..232fc4b59a98 100644
+--- a/drivers/infiniband/hw/hfi1/hfi.h
++++ b/drivers/infiniband/hw/hfi1/hfi.h
+@@ -1797,13 +1797,20 @@ static inline struct hfi1_ibport *rcd_to_iport(struct hfi1_ctxtdata *rcd)
+ return &rcd->ppd->ibport_data;
+ }
+
+-void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
+- bool do_cnp);
+-static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt,
+- bool do_cnp)
++/**
++ * hfi1_may_ecn - Check whether FECN or BECN processing should be done
++ * @pkt: the packet to be evaluated
++ *
++ * Check whether the FECN or BECN bits in the packet's header are
++ * enabled, depending on packet type.
++ *
++ * This function only checks for FECN and BECN bits. Additional checks
++ * are done in the slowpath (hfi1_process_ecn_slowpath()) in order to
++ * ensure correct handling.
++ */
++static inline bool hfi1_may_ecn(struct hfi1_packet *pkt)
+ {
+- bool becn;
+- bool fecn;
++ bool fecn, becn;
+
+ if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
+ fecn = hfi1_16B_get_fecn(pkt->hdr);
+@@ -1812,10 +1819,18 @@ static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt,
+ fecn = ib_bth_get_fecn(pkt->ohdr);
+ becn = ib_bth_get_becn(pkt->ohdr);
+ }
+- if (unlikely(fecn || becn)) {
+- hfi1_process_ecn_slowpath(qp, pkt, do_cnp);
+- return fecn;
+- }
++ return fecn || becn;
++}
++
++bool hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
++ bool prescan);
++static inline bool process_ecn(struct rvt_qp *qp, struct hfi1_packet *pkt)
++{
++ bool do_work;
++
++ do_work = hfi1_may_ecn(pkt);
++ if (unlikely(do_work))
++ return hfi1_process_ecn_slowpath(qp, pkt, false);
+ return false;
+ }
+
+diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
+index 752057647f09..3fcbf56f8be2 100644
+--- a/drivers/infiniband/hw/hfi1/pio.c
++++ b/drivers/infiniband/hw/hfi1/pio.c
+@@ -1434,7 +1434,8 @@ void sc_stop(struct send_context *sc, int flag)
+ * @cb: optional callback to call when the buffer is finished sending
+ * @arg: argument for cb
+ *
+- * Return a pointer to a PIO buffer if successful, NULL if not enough room.
++ * Return a pointer to a PIO buffer, NULL if not enough room, -ECOMM
++ * when link is down.
+ */
+ struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
+ pio_release_cb cb, void *arg)
+@@ -1450,7 +1451,7 @@ struct pio_buf *sc_buffer_alloc(struct send_context *sc, u32 dw_len,
+ spin_lock_irqsave(&sc->alloc_lock, flags);
+ if (!(sc->flags & SCF_ENABLED)) {
+ spin_unlock_irqrestore(&sc->alloc_lock, flags);
+- goto done;
++ return ERR_PTR(-ECOMM);
+ }
+
+ retry:
+diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
+index 770c78c65730..7ed6fb407a68 100644
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -914,7 +914,7 @@ void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn)
+ pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps,
+ sc_to_vlt(ppd->dd, sc5), plen);
+ pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL);
+- if (!pbuf) {
++ if (IS_ERR_OR_NULL(pbuf)) {
+ /*
+ * We have no room to send at the moment. Pass
+ * responsibility for sending the ACK to the send engine
+@@ -2049,8 +2049,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
+ struct ib_reth *reth;
+ unsigned long flags;
+ int ret;
+- bool is_fecn = false;
+- bool copy_last = false;
++ bool copy_last = false, fecn;
+ u32 rkey;
+ u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
+
+@@ -2059,7 +2058,7 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
+ if (hfi1_ruc_check_hdr(ibp, packet))
+ return;
+
+- is_fecn = process_ecn(qp, packet, false);
++ fecn = process_ecn(qp, packet);
+
+ /*
+ * Process responses (ACKs) before anything else. Note that the
+@@ -2070,8 +2069,6 @@ void hfi1_rc_rcv(struct hfi1_packet *packet)
+ if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
+ opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
+ rc_rcv_resp(packet);
+- if (is_fecn)
+- goto send_ack;
+ return;
+ }
+
+@@ -2347,11 +2344,11 @@ send_last:
+
+ /* Schedule the send engine. */
+ qp->s_flags |= RVT_S_RESP_PENDING;
++ if (fecn)
++ qp->s_flags |= RVT_S_ECN;
+ hfi1_schedule_send(qp);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+- if (is_fecn)
+- goto send_ack;
+ return;
+ }
+
+@@ -2413,11 +2410,11 @@ send_last:
+
+ /* Schedule the send engine. */
+ qp->s_flags |= RVT_S_RESP_PENDING;
++ if (fecn)
++ qp->s_flags |= RVT_S_ECN;
+ hfi1_schedule_send(qp);
+
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+- if (is_fecn)
+- goto send_ack;
+ return;
+ }
+
+@@ -2430,16 +2427,9 @@ send_last:
+ qp->r_ack_psn = psn;
+ qp->r_nak_state = 0;
+ /* Send an ACK if requested or required. */
+- if (psn & IB_BTH_REQ_ACK) {
+- if (packet->numpkt == 0) {
+- rc_cancel_ack(qp);
+- goto send_ack;
+- }
+- if (qp->r_adefered >= HFI1_PSN_CREDIT) {
+- rc_cancel_ack(qp);
+- goto send_ack;
+- }
+- if (unlikely(is_fecn)) {
++ if (psn & IB_BTH_REQ_ACK || fecn) {
++ if (packet->numpkt == 0 || fecn ||
++ qp->r_adefered >= HFI1_PSN_CREDIT) {
+ rc_cancel_ack(qp);
+ goto send_ack;
+ }
+@@ -2480,7 +2470,7 @@ nack_acc:
+ qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
+ qp->r_ack_psn = qp->r_psn;
+ send_ack:
+- hfi1_send_rc_ack(packet, is_fecn);
++ hfi1_send_rc_ack(packet, fecn);
+ }
+
+ void hfi1_rc_hdrerr(
+diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
+index e254dcec6f64..4121d1a93b1b 100644
+--- a/drivers/infiniband/hw/hfi1/uc.c
++++ b/drivers/infiniband/hw/hfi1/uc.c
+@@ -321,7 +321,7 @@ void hfi1_uc_rcv(struct hfi1_packet *packet)
+ if (hfi1_ruc_check_hdr(ibp, packet))
+ return;
+
+- process_ecn(qp, packet, true);
++ process_ecn(qp, packet);
+
+ psn = ib_bth_get_psn(ohdr);
+ /* Compare the PSN verses the expected PSN. */
+diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
+index 54eb69564264..839593641e3f 100644
+--- a/drivers/infiniband/hw/hfi1/ud.c
++++ b/drivers/infiniband/hw/hfi1/ud.c
+@@ -51,6 +51,7 @@
+ #include "hfi.h"
+ #include "mad.h"
+ #include "verbs_txreq.h"
++#include "trace_ibhdrs.h"
+ #include "qp.h"
+
+ /* We support only two types - 9B and 16B for now */
+@@ -656,18 +657,19 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
+ u32 bth0, plen, vl, hwords = 7;
+ u16 len;
+ u8 l4;
+- struct hfi1_16b_header hdr;
++ struct hfi1_opa_header hdr;
+ struct ib_other_headers *ohdr;
+ struct pio_buf *pbuf;
+ struct send_context *ctxt = qp_to_send_context(qp, sc5);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+ u32 nwords;
+
++ hdr.hdr_type = HFI1_PKT_TYPE_16B;
+ /* Populate length */
+ nwords = ((hfi1_get_16b_padding(hwords << 2, 0) +
+ SIZE_OF_LT) >> 2) + SIZE_OF_CRC;
+ if (old_grh) {
+- struct ib_grh *grh = &hdr.u.l.grh;
++ struct ib_grh *grh = &hdr.opah.u.l.grh;
+
+ grh->version_tclass_flow = old_grh->version_tclass_flow;
+ grh->paylen = cpu_to_be16(
+@@ -675,11 +677,11 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
+ grh->hop_limit = 0xff;
+ grh->sgid = old_grh->dgid;
+ grh->dgid = old_grh->sgid;
+- ohdr = &hdr.u.l.oth;
++ ohdr = &hdr.opah.u.l.oth;
+ l4 = OPA_16B_L4_IB_GLOBAL;
+ hwords += sizeof(struct ib_grh) / sizeof(u32);
+ } else {
+- ohdr = &hdr.u.oth;
++ ohdr = &hdr.opah.u.oth;
+ l4 = OPA_16B_L4_IB_LOCAL;
+ }
+
+@@ -693,7 +695,7 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
+
+ /* Convert dwords to flits */
+ len = (hwords + nwords) >> 1;
+- hfi1_make_16b_hdr(&hdr, slid, dlid, len, pkey, 1, 0, l4, sc5);
++ hfi1_make_16b_hdr(&hdr.opah, slid, dlid, len, pkey, 1, 0, l4, sc5);
+
+ plen = 2 /* PBC */ + hwords + nwords;
+ pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
+@@ -701,9 +703,11 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
+ pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
+ if (ctxt) {
+ pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
+- if (pbuf)
++ if (!IS_ERR_OR_NULL(pbuf)) {
++ trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
+ ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
+ &hdr, hwords);
++ }
+ }
+ }
+
+@@ -715,14 +719,15 @@ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
+ u32 bth0, plen, vl, hwords = 5;
+ u16 lrh0;
+ u8 sl = ibp->sc_to_sl[sc5];
+- struct ib_header hdr;
++ struct hfi1_opa_header hdr;
+ struct ib_other_headers *ohdr;
+ struct pio_buf *pbuf;
+ struct send_context *ctxt = qp_to_send_context(qp, sc5);
+ struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
+
++ hdr.hdr_type = HFI1_PKT_TYPE_9B;
+ if (old_grh) {
+- struct ib_grh *grh = &hdr.u.l.grh;
++ struct ib_grh *grh = &hdr.ibh.u.l.grh;
+
+ grh->version_tclass_flow = old_grh->version_tclass_flow;
+ grh->paylen = cpu_to_be16(
+@@ -730,11 +735,11 @@ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
+ grh->hop_limit = 0xff;
+ grh->sgid = old_grh->dgid;
+ grh->dgid = old_grh->sgid;
+- ohdr = &hdr.u.l.oth;
++ ohdr = &hdr.ibh.u.l.oth;
+ lrh0 = HFI1_LRH_GRH;
+ hwords += sizeof(struct ib_grh) / sizeof(u32);
+ } else {
+- ohdr = &hdr.u.oth;
++ ohdr = &hdr.ibh.u.oth;
+ lrh0 = HFI1_LRH_BTH;
+ }
+
+@@ -746,16 +751,18 @@ void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
+ ohdr->bth[1] = cpu_to_be32(remote_qpn | (1 << IB_BECN_SHIFT));
+ ohdr->bth[2] = 0; /* PSN 0 */
+
+- hfi1_make_ib_hdr(&hdr, lrh0, hwords + SIZE_OF_CRC, dlid, slid);
++ hfi1_make_ib_hdr(&hdr.ibh, lrh0, hwords + SIZE_OF_CRC, dlid, slid);
+ plen = 2 /* PBC */ + hwords;
+ pbc_flags |= (ib_is_sc5(sc5) << PBC_DC_INFO_SHIFT);
+ vl = sc_to_vlt(ppd->dd, sc5);
+ pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps, vl, plen);
+ if (ctxt) {
+ pbuf = sc_buffer_alloc(ctxt, plen, NULL, NULL);
+- if (pbuf)
++ if (!IS_ERR_OR_NULL(pbuf)) {
++ trace_pio_output_ibhdr(ppd->dd, &hdr, sc5);
+ ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
+ &hdr, hwords);
++ }
+ }
+ }
+
+@@ -912,7 +919,7 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
+ src_qp = hfi1_16B_get_src_qpn(packet->mgmt);
+ }
+
+- process_ecn(qp, packet, (opcode != IB_OPCODE_CNP));
++ process_ecn(qp, packet);
+ /*
+ * Get the number of bytes the message was padded by
+ * and drop incomplete packets.
+diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
+index 4e7b3c027901..90e12f9433a3 100644
+--- a/drivers/infiniband/hw/hfi1/verbs.c
++++ b/drivers/infiniband/hw/hfi1/verbs.c
+@@ -1096,10 +1096,10 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
+ if (cb)
+ iowait_pio_inc(&priv->s_iowait);
+ pbuf = sc_buffer_alloc(sc, plen, cb, qp);
+- if (unlikely(!pbuf)) {
++ if (unlikely(IS_ERR_OR_NULL(pbuf))) {
+ if (cb)
+ verbs_pio_complete(qp, 0);
+- if (ppd->host_link_state != HLS_UP_ACTIVE) {
++ if (IS_ERR(pbuf)) {
+ /*
+ * If we have filled the PIO buffers to capacity and are
+ * not in an active state this request is not going to
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
+index a73d388b7093..31b9b99f81cb 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -54,12 +54,13 @@ bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
+ EXPORT_SYMBOL_GPL(hns_roce_check_whether_mhop);
+
+ static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx,
+- u32 bt_chunk_num)
++ u32 bt_chunk_num, u64 hem_max_num)
+ {
+- int i;
++ u64 check_max_num = start_idx + bt_chunk_num;
++ u64 i;
+
+- for (i = 0; i < bt_chunk_num; i++)
+- if (hem[start_idx + i])
++ for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++)
++ if (hem[i])
+ return false;
+
+ return true;
+@@ -413,6 +414,12 @@ static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
+ return -EINVAL;
+ }
+
++ if (unlikely(hem_idx >= table->num_hem)) {
++ dev_err(dev, "Table %d exceed hem limt idx = %llu,max = %lu!\n",
++ table->type, hem_idx, table->num_hem);
++ return -EINVAL;
++ }
++
+ mutex_lock(&table->mutex);
+
+ if (table->hem[hem_idx]) {
+@@ -649,7 +656,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
+ if (check_whether_bt_num_2(table->type, hop_num)) {
+ start_idx = mhop.l0_idx * chunk_ba_num;
+ if (hns_roce_check_hem_null(table->hem, start_idx,
+- chunk_ba_num)) {
++ chunk_ba_num, table->num_hem)) {
+ if (table->type < HEM_TYPE_MTT &&
+ hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
+ dev_warn(dev, "Clear HEM base address failed.\n");
+@@ -663,7 +670,7 @@ static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
+ start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
+ mhop.l1_idx * chunk_ba_num;
+ if (hns_roce_check_hem_null(table->hem, start_idx,
+- chunk_ba_num)) {
++ chunk_ba_num, table->num_hem)) {
+ if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
+ dev_warn(dev, "Clear HEM base address failed.\n");
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 7021444f18b4..417de7ac0d5e 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -4833,7 +4833,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
+ break;
+ }
+ eq->cur_eqe_ba = eq->buf_dma[0];
+- eq->nxt_eqe_ba = eq->buf_dma[1];
++ if (ba_num > 1)
++ eq->nxt_eqe_ba = eq->buf_dma[1];
+
+ } else if (mhop_num == 2) {
+ /* alloc L1 BT and buf */
+@@ -4875,7 +4876,8 @@ static int hns_roce_mhop_alloc_eq(struct hns_roce_dev *hr_dev,
+ break;
+ }
+ eq->cur_eqe_ba = eq->buf_dma[0];
+- eq->nxt_eqe_ba = eq->buf_dma[1];
++ if (ba_num > 1)
++ eq->nxt_eqe_ba = eq->buf_dma[1];
+ }
+
+ eq->l0_last_num = i + 1;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index af24698ff226..3012d7eb4ccb 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -262,7 +262,6 @@ void hns_roce_qp_free(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
+ hns_roce_table_put(hr_dev, &qp_table->trrl_table,
+ hr_qp->qpn);
+ hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
+- hns_roce_table_put(hr_dev, &qp_table->qp_table, hr_qp->qpn);
+ }
+ }
+ EXPORT_SYMBOL_GPL(hns_roce_qp_free);
+diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
+index 35a0e04c38f2..b841589c27c9 100644
+--- a/drivers/infiniband/hw/mlx5/ib_rep.c
++++ b/drivers/infiniband/hw/mlx5/ib_rep.c
+@@ -69,8 +69,10 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+ ibdev->mdev = dev;
+ ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
+ MLX5_CAP_GEN(dev, num_vhca_ports));
+- if (!__mlx5_ib_add(ibdev, &rep_profile))
++ if (!__mlx5_ib_add(ibdev, &rep_profile)) {
++ ib_dealloc_device(&ibdev->ib_dev);
+ return -EINVAL;
++ }
+
+ rep->rep_if[REP_IB].priv = ibdev;
+
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index df5be462dd28..2db34f7b5ced 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -2390,10 +2390,29 @@ static u8 get_match_criteria_enable(u32 *match_criteria)
+ return match_criteria_enable;
+ }
+
+-static void set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
++static int set_proto(void *outer_c, void *outer_v, u8 mask, u8 val)
+ {
+- MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
+- MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
++ u8 entry_mask;
++ u8 entry_val;
++ int err = 0;
++
++ if (!mask)
++ goto out;
++
++ entry_mask = MLX5_GET(fte_match_set_lyr_2_4, outer_c,
++ ip_protocol);
++ entry_val = MLX5_GET(fte_match_set_lyr_2_4, outer_v,
++ ip_protocol);
++ if (!entry_mask) {
++ MLX5_SET(fte_match_set_lyr_2_4, outer_c, ip_protocol, mask);
++ MLX5_SET(fte_match_set_lyr_2_4, outer_v, ip_protocol, val);
++ goto out;
++ }
++ /* Don't override existing ip protocol */
++ if (mask != entry_mask || val != entry_val)
++ err = -EINVAL;
++out:
++ return err;
+ }
+
+ static void set_flow_label(void *misc_c, void *misc_v, u32 mask, u32 val,
+@@ -2597,8 +2616,10 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
+ set_tos(headers_c, headers_v,
+ ib_spec->ipv4.mask.tos, ib_spec->ipv4.val.tos);
+
+- set_proto(headers_c, headers_v,
+- ib_spec->ipv4.mask.proto, ib_spec->ipv4.val.proto);
++ if (set_proto(headers_c, headers_v,
++ ib_spec->ipv4.mask.proto,
++ ib_spec->ipv4.val.proto))
++ return -EINVAL;
+ break;
+ case IB_FLOW_SPEC_IPV6:
+ if (FIELDS_NOT_SUPPORTED(ib_spec->ipv6.mask, LAST_IPV6_FIELD))
+@@ -2637,9 +2658,10 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
+ ib_spec->ipv6.mask.traffic_class,
+ ib_spec->ipv6.val.traffic_class);
+
+- set_proto(headers_c, headers_v,
+- ib_spec->ipv6.mask.next_hdr,
+- ib_spec->ipv6.val.next_hdr);
++ if (set_proto(headers_c, headers_v,
++ ib_spec->ipv6.mask.next_hdr,
++ ib_spec->ipv6.val.next_hdr))
++ return -EINVAL;
+
+ set_flow_label(misc_params_c, misc_params_v,
+ ntohl(ib_spec->ipv6.mask.flow_label),
+@@ -2660,10 +2682,8 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
+ LAST_TCP_UDP_FIELD))
+ return -EOPNOTSUPP;
+
+- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
+- 0xff);
+- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+- IPPROTO_TCP);
++ if (set_proto(headers_c, headers_v, 0xff, IPPROTO_TCP))
++ return -EINVAL;
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_sport,
+ ntohs(ib_spec->tcp_udp.mask.src_port));
+@@ -2680,10 +2700,8 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
+ LAST_TCP_UDP_FIELD))
+ return -EOPNOTSUPP;
+
+- MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
+- 0xff);
+- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+- IPPROTO_UDP);
++ if (set_proto(headers_c, headers_v, 0xff, IPPROTO_UDP))
++ return -EINVAL;
+
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport,
+ ntohs(ib_spec->tcp_udp.mask.src_port));
+@@ -2699,6 +2717,9 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
+ if (ib_spec->gre.mask.c_ks_res0_ver)
+ return -EOPNOTSUPP;
+
++ if (set_proto(headers_c, headers_v, 0xff, IPPROTO_GRE))
++ return -EINVAL;
++
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
+ 0xff);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
+diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
+index ef0f710587ad..4c0f0ce02d2f 100644
+--- a/drivers/infiniband/hw/mlx5/qp.c
++++ b/drivers/infiniband/hw/mlx5/qp.c
+@@ -2598,6 +2598,11 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
+ MLX5_QP_OPTPAR_Q_KEY |
+ MLX5_QP_OPTPAR_PRI_PORT,
++ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE |
++ MLX5_QP_OPTPAR_RAE |
++ MLX5_QP_OPTPAR_RWE |
++ MLX5_QP_OPTPAR_PKEY_INDEX |
++ MLX5_QP_OPTPAR_PRI_PORT,
+ },
+ [MLX5_QP_STATE_RTR] = {
+ [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
+@@ -2631,6 +2636,12 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_PM_STATE,
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
++ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
++ MLX5_QP_OPTPAR_RRE |
++ MLX5_QP_OPTPAR_RAE |
++ MLX5_QP_OPTPAR_RWE |
++ MLX5_QP_OPTPAR_PM_STATE |
++ MLX5_QP_OPTPAR_RNR_TIMEOUT,
+ },
+ },
+ [MLX5_QP_STATE_RTS] = {
+@@ -2647,6 +2658,12 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
+ [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
+ MLX5_QP_OPTPAR_SRQN |
+ MLX5_QP_OPTPAR_CQN_RCV,
++ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RRE |
++ MLX5_QP_OPTPAR_RAE |
++ MLX5_QP_OPTPAR_RWE |
++ MLX5_QP_OPTPAR_RNR_TIMEOUT |
++ MLX5_QP_OPTPAR_PM_STATE |
++ MLX5_QP_OPTPAR_ALT_ADDR_PATH,
+ },
+ },
+ [MLX5_QP_STATE_SQER] = {
+@@ -2658,6 +2675,10 @@ static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_Q
+ MLX5_QP_OPTPAR_RWE |
+ MLX5_QP_OPTPAR_RAE |
+ MLX5_QP_OPTPAR_RRE,
++ [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
++ MLX5_QP_OPTPAR_RWE |
++ MLX5_QP_OPTPAR_RAE |
++ MLX5_QP_OPTPAR_RRE,
+ },
+ },
+ };
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+index c158ca9fde6d..08271fce0b9e 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+@@ -55,7 +55,7 @@
+
+ int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+ {
+- if (index > 1)
++ if (index > 0)
+ return -EINVAL;
+
+ *pkey = 0xffff;
+diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
+index 9167a1c40bcf..38fe2f741375 100644
+--- a/drivers/infiniband/hw/qedr/verbs.c
++++ b/drivers/infiniband/hw/qedr/verbs.c
+@@ -67,7 +67,7 @@ static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src,
+
+ int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
+ {
+- if (index > QEDR_ROCE_PKEY_TABLE_LEN)
++ if (index >= QEDR_ROCE_PKEY_TABLE_LEN)
+ return -EINVAL;
+
+ *pkey = QEDR_ROCE_PKEY_DEFAULT;
+@@ -158,54 +158,47 @@ int qedr_query_device(struct ib_device *ibdev,
+ return 0;
+ }
+
+-#define QEDR_SPEED_SDR (1)
+-#define QEDR_SPEED_DDR (2)
+-#define QEDR_SPEED_QDR (4)
+-#define QEDR_SPEED_FDR10 (8)
+-#define QEDR_SPEED_FDR (16)
+-#define QEDR_SPEED_EDR (32)
+-
+ static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
+ u8 *ib_width)
+ {
+ switch (speed) {
+ case 1000:
+- *ib_speed = QEDR_SPEED_SDR;
++ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+ case 10000:
+- *ib_speed = QEDR_SPEED_QDR;
++ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 20000:
+- *ib_speed = QEDR_SPEED_DDR;
++ *ib_speed = IB_SPEED_DDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 25000:
+- *ib_speed = QEDR_SPEED_EDR;
++ *ib_speed = IB_SPEED_EDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 40000:
+- *ib_speed = QEDR_SPEED_QDR;
++ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case 50000:
+- *ib_speed = QEDR_SPEED_QDR;
+- *ib_width = IB_WIDTH_4X;
++ *ib_speed = IB_SPEED_HDR;
++ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case 100000:
+- *ib_speed = QEDR_SPEED_EDR;
++ *ib_speed = IB_SPEED_EDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ default:
+ /* Unsupported */
+- *ib_speed = QEDR_SPEED_SDR;
++ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ }
+ }
+diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+index 3db232429630..e611f133aa97 100644
+--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
++++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+@@ -447,7 +447,7 @@ struct net_device *usnic_get_netdev(struct ib_device *device, u8 port_num)
+ int usnic_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
+ u16 *pkey)
+ {
+- if (index > 1)
++ if (index > 0)
+ return -EINVAL;
+
+ *pkey = 0xffff;
+diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
+index 2ee4b08b00ea..a57276f2cb84 100644
+--- a/drivers/infiniband/sw/rxe/rxe_cq.c
++++ b/drivers/infiniband/sw/rxe/rxe_cq.c
+@@ -30,7 +30,7 @@
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+-
++#include <linux/vmalloc.h>
+ #include "rxe.h"
+ #include "rxe_loc.h"
+ #include "rxe_queue.h"
+@@ -97,7 +97,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
+ err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, context,
+ cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
+ if (err) {
+- kvfree(cq->queue->buf);
++ vfree(cq->queue->buf);
+ kfree(cq->queue);
+ return err;
+ }
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index 8094cbaa54a9..54add70c22b5 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -533,8 +533,9 @@ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
+ if (unlikely(!skb))
+ goto out;
+
+- skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev));
++ skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(ndev));
+
++ /* FIXME: hold reference to this netdev until life of this skb. */
+ skb->dev = ndev;
+ if (av->network_type == RDMA_NETWORK_IPV4)
+ skb->protocol = htons(ETH_P_IP);
+diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
+index b4a8acc7bb7d..0e2425f28233 100644
+--- a/drivers/infiniband/sw/rxe/rxe_pool.c
++++ b/drivers/infiniband/sw/rxe/rxe_pool.c
+@@ -112,6 +112,18 @@ static inline struct kmem_cache *pool_cache(struct rxe_pool *pool)
+ return rxe_type_info[pool->type].cache;
+ }
+
++static void rxe_cache_clean(size_t cnt)
++{
++ int i;
++ struct rxe_type_info *type;
++
++ for (i = 0; i < cnt; i++) {
++ type = &rxe_type_info[i];
++ kmem_cache_destroy(type->cache);
++ type->cache = NULL;
++ }
++}
++
+ int rxe_cache_init(void)
+ {
+ int err;
+@@ -136,24 +148,14 @@ int rxe_cache_init(void)
+ return 0;
+
+ err1:
+- while (--i >= 0) {
+- kmem_cache_destroy(type->cache);
+- type->cache = NULL;
+- }
++ rxe_cache_clean(i);
+
+ return err;
+ }
+
+ void rxe_cache_exit(void)
+ {
+- int i;
+- struct rxe_type_info *type;
+-
+- for (i = 0; i < RXE_NUM_TYPES; i++) {
+- type = &rxe_type_info[i];
+- kmem_cache_destroy(type->cache);
+- type->cache = NULL;
+- }
++ rxe_cache_clean(RXE_NUM_TYPES);
+ }
+
+ static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index c58452daffc7..230697fa31fe 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -34,6 +34,7 @@
+ #include <linux/skbuff.h>
+ #include <linux/delay.h>
+ #include <linux/sched.h>
++#include <linux/vmalloc.h>
+
+ #include "rxe.h"
+ #include "rxe_loc.h"
+@@ -247,7 +248,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
+ &qp->sq.queue->ip);
+
+ if (err) {
+- kvfree(qp->sq.queue->buf);
++ vfree(qp->sq.queue->buf);
+ kfree(qp->sq.queue);
+ return err;
+ }
+@@ -300,7 +301,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
+ qp->rq.queue->buf, qp->rq.queue->buf_size,
+ &qp->rq.queue->ip);
+ if (err) {
+- kvfree(qp->rq.queue->buf);
++ vfree(qp->rq.queue->buf);
+ kfree(qp->rq.queue);
+ return err;
+ }
+diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
+index 120b40829560..a7aeaa0c6fbc 100644
+--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
+@@ -197,7 +197,7 @@ struct iser_data_buf {
+ struct scatterlist *sg;
+ int size;
+ unsigned long data_len;
+- unsigned int dma_nents;
++ int dma_nents;
+ };
+
+ /* fwd declarations */
+diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
+index 009be8889d71..379bc0dfc388 100644
+--- a/drivers/infiniband/ulp/iser/iser_memory.c
++++ b/drivers/infiniband/ulp/iser/iser_memory.c
+@@ -240,8 +240,8 @@ int iser_fast_reg_fmr(struct iscsi_iser_task *iser_task,
+ page_vec->npages = 0;
+ page_vec->fake_mr.page_size = SIZE_4K;
+ plen = ib_sg_to_pages(&page_vec->fake_mr, mem->sg,
+- mem->size, NULL, iser_set_page);
+- if (unlikely(plen < mem->size)) {
++ mem->dma_nents, NULL, iser_set_page);
++ if (unlikely(plen < mem->dma_nents)) {
+ iser_err("page vec too short to hold this SG\n");
+ iser_data_buf_dump(mem, device->ib_device);
+ iser_dump_page_vec(page_vec);
+@@ -451,10 +451,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
+
+ ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
+
+- n = ib_map_mr_sg(mr, mem->sg, mem->size, NULL, SIZE_4K);
+- if (unlikely(n != mem->size)) {
++ n = ib_map_mr_sg(mr, mem->sg, mem->dma_nents, NULL, SIZE_4K);
++ if (unlikely(n != mem->dma_nents)) {
+ iser_err("failed to map sg (%d/%d)\n",
+- n, mem->size);
++ n, mem->dma_nents);
+ return n < 0 ? n : -EINVAL;
+ }
+
+diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
+index 8567ee47761e..ae3b04557074 100644
+--- a/drivers/input/keyboard/nomadik-ske-keypad.c
++++ b/drivers/input/keyboard/nomadik-ske-keypad.c
+@@ -100,7 +100,7 @@ static int __init ske_keypad_chip_init(struct ske_keypad *keypad)
+ while ((readl(keypad->reg_base + SKE_RIS) != 0x00000000) && timeout--)
+ cpu_relax();
+
+- if (!timeout)
++ if (timeout == -1)
+ return -EINVAL;
+
+ /*
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 9991386fb700..bea19aa33758 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -2153,6 +2153,8 @@ skip_ats_check:
+ */
+ domain_flush_tlb_pde(domain);
+
++ domain_flush_complete(domain);
++
+ return ret;
+ }
+
+diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
+index 1e9a5da562f0..465f28a7844c 100644
+--- a/drivers/iommu/amd_iommu_init.c
++++ b/drivers/iommu/amd_iommu_init.c
+@@ -422,6 +422,9 @@ static void iommu_enable(struct amd_iommu *iommu)
+
+ static void iommu_disable(struct amd_iommu *iommu)
+ {
++ if (!iommu->mmio_base)
++ return;
++
+ /* Disable command buffer */
+ iommu_feature_disable(iommu, CONTROL_CMDBUF_EN);
+
+diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
+index b9af2419006f..9df3b8441227 100644
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -387,7 +387,6 @@ struct dmar_rmrr_unit {
+ u64 end_address; /* reserved end address */
+ struct dmar_dev_scope *devices; /* target devices */
+ int devices_cnt; /* target device count */
+- struct iommu_resv_region *resv; /* reserved region handle */
+ };
+
+ struct dmar_atsr_unit {
+@@ -3387,9 +3386,12 @@ static int __init init_dmars(void)
+ iommu_identity_mapping |= IDENTMAP_ALL;
+
+ #ifdef CONFIG_INTEL_IOMMU_BROKEN_GFX_WA
+- iommu_identity_mapping |= IDENTMAP_GFX;
++ dmar_map_gfx = 0;
+ #endif
+
++ if (!dmar_map_gfx)
++ iommu_identity_mapping |= IDENTMAP_GFX;
++
+ check_tylersburg_isoch();
+
+ if (iommu_identity_mapping) {
+@@ -4182,7 +4184,6 @@ static inline void init_iommu_pm_ops(void) {}
+ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
+ {
+ struct acpi_dmar_reserved_memory *rmrr;
+- int prot = DMA_PTE_READ|DMA_PTE_WRITE;
+ struct dmar_rmrr_unit *rmrru;
+ size_t length;
+
+@@ -4196,22 +4197,16 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
+ rmrru->end_address = rmrr->end_address;
+
+ length = rmrr->end_address - rmrr->base_address + 1;
+- rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
+- IOMMU_RESV_DIRECT);
+- if (!rmrru->resv)
+- goto free_rmrru;
+
+ rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
+ ((void *)rmrr) + rmrr->header.length,
+ &rmrru->devices_cnt);
+ if (rmrru->devices_cnt && rmrru->devices == NULL)
+- goto free_all;
++ goto free_rmrru;
+
+ list_add(&rmrru->list, &dmar_rmrr_units);
+
+ return 0;
+-free_all:
+- kfree(rmrru->resv);
+ free_rmrru:
+ kfree(rmrru);
+ out:
+@@ -4429,7 +4424,6 @@ static void intel_iommu_free_dmars(void)
+ list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
+ list_del(&rmrru->list);
+ dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
+- kfree(rmrru->resv);
+ kfree(rmrru);
+ }
+
+@@ -5203,22 +5197,33 @@ static void intel_iommu_remove_device(struct device *dev)
+ static void intel_iommu_get_resv_regions(struct device *device,
+ struct list_head *head)
+ {
++ int prot = DMA_PTE_READ | DMA_PTE_WRITE;
+ struct iommu_resv_region *reg;
+ struct dmar_rmrr_unit *rmrr;
+ struct device *i_dev;
+ int i;
+
+- rcu_read_lock();
++ down_read(&dmar_global_lock);
+ for_each_rmrr_units(rmrr) {
+ for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
+ i, i_dev) {
++ struct iommu_resv_region *resv;
++ size_t length;
++
+ if (i_dev != device)
+ continue;
+
+- list_add_tail(&rmrr->resv->list, head);
++ length = rmrr->end_address - rmrr->base_address + 1;
++ resv = iommu_alloc_resv_region(rmrr->base_address,
++ length, prot,
++ IOMMU_RESV_DIRECT);
++ if (!resv)
++ break;
++
++ list_add_tail(&resv->list, head);
+ }
+ }
+- rcu_read_unlock();
++ up_read(&dmar_global_lock);
+
+ reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
+ IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
+@@ -5233,10 +5238,8 @@ static void intel_iommu_put_resv_regions(struct device *dev,
+ {
+ struct iommu_resv_region *entry, *next;
+
+- list_for_each_entry_safe(entry, next, head, list) {
+- if (entry->type == IOMMU_RESV_MSI)
+- kfree(entry);
+- }
++ list_for_each_entry_safe(entry, next, head, list)
++ kfree(entry);
+ }
+
+ #ifdef CONFIG_INTEL_IOMMU_SVM
+diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
+index 188f4eaed6e5..fd8730b2cd46 100644
+--- a/drivers/iommu/intel-svm.c
++++ b/drivers/iommu/intel-svm.c
+@@ -293,7 +293,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
+ int pasid_max;
+ int ret;
+
+- if (!iommu)
++ if (!iommu || dmar_disabled)
+ return -EINVAL;
+
+ if (dev_is_pci(dev)) {
+diff --git a/drivers/iommu/iommu-debugfs.c b/drivers/iommu/iommu-debugfs.c
+index 3b1bf88fd1b0..f03548942096 100644
+--- a/drivers/iommu/iommu-debugfs.c
++++ b/drivers/iommu/iommu-debugfs.c
+@@ -12,6 +12,7 @@
+ #include <linux/debugfs.h>
+
+ struct dentry *iommu_debugfs_dir;
++EXPORT_SYMBOL_GPL(iommu_debugfs_dir);
+
+ /**
+ * iommu_debugfs_setup - create the top-level iommu directory in debugfs
+@@ -23,9 +24,9 @@ struct dentry *iommu_debugfs_dir;
+ * Emit a strong warning at boot time to indicate that this feature is
+ * enabled.
+ *
+- * This function is called from iommu_init; drivers may then call
+- * iommu_debugfs_new_driver_dir() to instantiate a vendor-specific
+- * directory to be used to expose internal data.
++ * This function is called from iommu_init; drivers may then use
++ * iommu_debugfs_dir to instantiate a vendor-specific directory to be used
++ * to expose internal data.
+ */
+ void iommu_debugfs_setup(void)
+ {
+@@ -48,19 +49,3 @@ void iommu_debugfs_setup(void)
+ pr_warn("*************************************************************\n");
+ }
+ }
+-
+-/**
+- * iommu_debugfs_new_driver_dir - create a vendor directory under debugfs/iommu
+- * @vendor: name of the vendor-specific subdirectory to create
+- *
+- * This function is called by an IOMMU driver to create the top-level debugfs
+- * directory for that driver.
+- *
+- * Return: upon success, a pointer to the dentry for the new directory.
+- * NULL in case of failure.
+- */
+-struct dentry *iommu_debugfs_new_driver_dir(const char *vendor)
+-{
+- return debugfs_create_dir(vendor, iommu_debugfs_dir);
+-}
+-EXPORT_SYMBOL_GPL(iommu_debugfs_new_driver_dir);
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index ee25ec22778e..00e1c908cd8e 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -317,7 +317,7 @@ static ssize_t iommu_group_show_type(struct iommu_group *group,
+ type = "unmanaged\n";
+ break;
+ case IOMMU_DOMAIN_DMA:
+- type = "DMA";
++ type = "DMA\n";
+ break;
+ }
+ }
+@@ -1900,9 +1900,9 @@ int iommu_request_dm_for_dev(struct device *dev)
+ int ret;
+
+ /* Device must already be in a group before calling this function */
+- group = iommu_group_get_for_dev(dev);
+- if (IS_ERR(group))
+- return PTR_ERR(group);
++ group = iommu_group_get(dev);
++ if (!group)
++ return -EINVAL;
+
+ mutex_lock(&group->mutex);
+
+diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
+index 154cf44439cb..8e75f34ac886 100644
+--- a/drivers/iommu/mtk_iommu.c
++++ b/drivers/iommu/mtk_iommu.c
+@@ -115,6 +115,30 @@ struct mtk_iommu_domain {
+
+ static struct iommu_ops mtk_iommu_ops;
+
++/*
++ * In M4U 4GB mode, the physical address is remapped as below:
++ *
++ * CPU Physical address:
++ * ====================
++ *
++ * 0 1G 2G 3G 4G 5G
++ * |---A---|---B---|---C---|---D---|---E---|
++ * +--I/O--+------------Memory-------------+
++ *
++ * IOMMU output physical address:
++ * =============================
++ *
++ * 4G 5G 6G 7G 8G
++ * |---E---|---B---|---C---|---D---|
++ * +------------Memory-------------+
++ *
++ * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the
++ * bit32 of the CPU physical address always is needed to set, and for Region
++ * 'E', the CPU physical address keep as is.
++ * Additionally, The iommu consumers always use the CPU phyiscal address.
++ */
++#define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x40000000
++
+ static LIST_HEAD(m4ulist); /* List all the M4U HWs */
+
+ #define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list)
+@@ -409,7 +433,7 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
+ pa = dom->iop->iova_to_phys(dom->iop, iova);
+ spin_unlock_irqrestore(&dom->pgtlock, flags);
+
+- if (data->enable_4GB)
++ if (data->enable_4GB && pa < MTK_IOMMU_4GB_MODE_REMAP_BASE)
+ pa |= BIT_ULL(32);
+
+ return pa;
+diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
+index e4cb3811e82a..005b839f6eb9 100644
+--- a/drivers/leds/led-triggers.c
++++ b/drivers/leds/led-triggers.c
+@@ -171,11 +171,11 @@ err_add_groups:
+ trig->deactivate(led_cdev);
+ err_activate:
+
+- led_cdev->trigger = NULL;
+- led_cdev->trigger_data = NULL;
+ write_lock_irqsave(&led_cdev->trigger->leddev_list_lock, flags);
+ list_del(&led_cdev->trig_list);
+ write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
++ led_cdev->trigger = NULL;
++ led_cdev->trigger_data = NULL;
+ led_set_brightness(led_cdev, LED_OFF);
+ kfree(event);
+
+diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c
+index f6eec0212dfc..d22c13b55622 100644
+--- a/drivers/lightnvm/pblk-rb.c
++++ b/drivers/lightnvm/pblk-rb.c
+@@ -784,8 +784,8 @@ int pblk_rb_tear_down_check(struct pblk_rb *rb)
+ }
+
+ out:
+- spin_unlock(&rb->w_lock);
+ spin_unlock_irq(&rb->s_lock);
++ spin_unlock(&rb->w_lock);
+
+ return ret;
+ }
+diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
+index aec46d5d3506..f7cc29c00302 100644
+--- a/drivers/mailbox/mtk-cmdq-mailbox.c
++++ b/drivers/mailbox/mtk-cmdq-mailbox.c
+@@ -363,6 +363,9 @@ static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
+ WARN_ON(cmdq->suspended);
+
+ task = kzalloc(sizeof(*task), GFP_ATOMIC);
++ if (!task)
++ return -ENOMEM;
++
+ task->cmdq = cmdq;
+ INIT_LIST_HEAD(&task->list_entry);
+ task->pa_base = pkt->pa_base;
+diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+index 5255dcb551a7..d8b4f08f613b 100644
+--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
++++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+@@ -36,7 +36,7 @@ static const struct regmap_config apcs_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+- .max_register = 0x1000,
++ .max_register = 0xFFC,
+ .fast_io = true,
+ };
+
+diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c
+index 5bceafbf6699..01e9e462512b 100644
+--- a/drivers/mailbox/ti-msgmgr.c
++++ b/drivers/mailbox/ti-msgmgr.c
+@@ -547,7 +547,7 @@ static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox,
+ }
+
+ if (d->is_sproxy) {
+- if (req_pid > d->num_valid_queues)
++ if (req_pid >= d->num_valid_queues)
+ goto err;
+ qinst = &inst->qinsts[req_pid];
+ return qinst->chan;
+diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
+index 8c53d874ada4..f6b60d5908f7 100644
+--- a/drivers/md/bcache/debug.c
++++ b/drivers/md/bcache/debug.c
+@@ -178,10 +178,9 @@ static ssize_t bch_dump_read(struct file *file, char __user *buf,
+ while (size) {
+ struct keybuf_key *w;
+ unsigned int bytes = min(i->bytes, size);
+- int err = copy_to_user(buf, i->buf, bytes);
+
+- if (err)
+- return err;
++ if (copy_to_user(buf, i->buf, bytes))
++ return -EFAULT;
+
+ ret += bytes;
+ buf += bytes;
+diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
+index 4b6be3b0fd52..5cdda9d6ca31 100644
+--- a/drivers/media/i2c/ov2659.c
++++ b/drivers/media/i2c/ov2659.c
+@@ -1136,7 +1136,7 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
+ mf = v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
+ *mf = fmt->format;
+ #else
+- return -ENOTTY;
++ ret = -ENOTTY;
+ #endif
+ } else {
+ s64 val;
+diff --git a/drivers/media/i2c/tw9910.c b/drivers/media/i2c/tw9910.c
+index a54548cc4285..c7321a70e3ed 100644
+--- a/drivers/media/i2c/tw9910.c
++++ b/drivers/media/i2c/tw9910.c
+@@ -1000,7 +1000,7 @@ static int tw9910_remove(struct i2c_client *client)
+ if (priv->pdn_gpio)
+ gpiod_put(priv->pdn_gpio);
+ clk_put(priv->clk);
+- v4l2_device_unregister_subdev(&priv->subdev);
++ v4l2_async_unregister_subdev(&priv->subdev);
+
+ return 0;
+ }
+diff --git a/drivers/media/pci/cx18/cx18-fileops.c b/drivers/media/pci/cx18/cx18-fileops.c
+index a3f44e30f821..88c2f3bea2b6 100644
+--- a/drivers/media/pci/cx18/cx18-fileops.c
++++ b/drivers/media/pci/cx18/cx18-fileops.c
+@@ -484,7 +484,7 @@ static ssize_t cx18_read_pos(struct cx18_stream *s, char __user *ubuf,
+
+ CX18_DEBUG_HI_FILE("read %zd from %s, got %zd\n", count, s->name, rc);
+ if (rc > 0)
+- pos += rc;
++ *pos += rc;
+ return rc;
+ }
+
+diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
+index 7d52173073d6..bafcb3982274 100644
+--- a/drivers/media/pci/cx23885/cx23885-dvb.c
++++ b/drivers/media/pci/cx23885/cx23885-dvb.c
+@@ -1474,8 +1474,9 @@ static int dvb_register(struct cx23885_tsport *port)
+ if (fe0->dvb.frontend != NULL) {
+ struct i2c_adapter *tun_i2c;
+
+- fe0->dvb.frontend->sec_priv = kmalloc(sizeof(dib7000p_ops), GFP_KERNEL);
+- memcpy(fe0->dvb.frontend->sec_priv, &dib7000p_ops, sizeof(dib7000p_ops));
++ fe0->dvb.frontend->sec_priv = kmemdup(&dib7000p_ops, sizeof(dib7000p_ops), GFP_KERNEL);
++ if (!fe0->dvb.frontend->sec_priv)
++ return -ENOMEM;
+ tun_i2c = dib7000p_ops.get_i2c_master(fe0->dvb.frontend, DIBX000_I2C_INTERFACE_TUNER, 1);
+ if (!dvb_attach(dib0070_attach, fe0->dvb.frontend, tun_i2c, &dib7070p_dib0070_config))
+ return -ENODEV;
+diff --git a/drivers/media/pci/ivtv/ivtv-fileops.c b/drivers/media/pci/ivtv/ivtv-fileops.c
+index 6196daae4b3e..043ac0ae9ed0 100644
+--- a/drivers/media/pci/ivtv/ivtv-fileops.c
++++ b/drivers/media/pci/ivtv/ivtv-fileops.c
+@@ -420,7 +420,7 @@ static ssize_t ivtv_read_pos(struct ivtv_stream *s, char __user *ubuf, size_t co
+
+ IVTV_DEBUG_HI_FILE("read %zd from %s, got %zd\n", count, s->name, rc);
+ if (rc > 0)
+- pos += rc;
++ *pos += rc;
+ return rc;
+ }
+
+diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
+index 7f878fc41b7e..93fecffb36ee 100644
+--- a/drivers/media/pci/pt1/pt1.c
++++ b/drivers/media/pci/pt1/pt1.c
+@@ -200,16 +200,10 @@ static const u8 va1j5jf8007t_25mhz_configs[][2] = {
+ static int config_demod(struct i2c_client *cl, enum pt1_fe_clk clk)
+ {
+ int ret;
+- u8 buf[2] = {0x01, 0x80};
+ bool is_sat;
+ const u8 (*cfg_data)[2];
+ int i, len;
+
+- ret = i2c_master_send(cl, buf, 2);
+- if (ret < 0)
+- return ret;
+- usleep_range(30000, 50000);
+-
+ is_sat = !strncmp(cl->name, TC90522_I2C_DEV_SAT,
+ strlen(TC90522_I2C_DEV_SAT));
+ if (is_sat) {
+@@ -260,6 +254,46 @@ static int config_demod(struct i2c_client *cl, enum pt1_fe_clk clk)
+ return 0;
+ }
+
++/*
++ * Init registers for (each pair of) terrestrial/satellite block in demod.
++ * Note that resetting terr. block also resets its peer sat. block as well.
++ * This function must be called before configuring any demod block
++ * (before pt1_wakeup(), fe->ops.init()).
++ */
++static int pt1_demod_block_init(struct pt1 *pt1)
++{
++ struct i2c_client *cl;
++ u8 buf[2] = {0x01, 0x80};
++ int ret;
++ int i;
++
++ /* reset all terr. & sat. pairs first */
++ for (i = 0; i < PT1_NR_ADAPS; i++) {
++ cl = pt1->adaps[i]->demod_i2c_client;
++ if (strncmp(cl->name, TC90522_I2C_DEV_TER,
++ strlen(TC90522_I2C_DEV_TER)))
++ continue;
++
++ ret = i2c_master_send(cl, buf, 2);
++ if (ret < 0)
++ return ret;
++ usleep_range(30000, 50000);
++ }
++
++ for (i = 0; i < PT1_NR_ADAPS; i++) {
++ cl = pt1->adaps[i]->demod_i2c_client;
++ if (strncmp(cl->name, TC90522_I2C_DEV_SAT,
++ strlen(TC90522_I2C_DEV_SAT)))
++ continue;
++
++ ret = i2c_master_send(cl, buf, 2);
++ if (ret < 0)
++ return ret;
++ usleep_range(30000, 50000);
++ }
++ return 0;
++}
++
+ static void pt1_write_reg(struct pt1 *pt1, int reg, u32 data)
+ {
+ writel(data, pt1->regs + reg * 4);
+@@ -987,6 +1021,10 @@ static int pt1_init_frontends(struct pt1 *pt1)
+ goto tuner_release;
+ }
+
++ ret = pt1_demod_block_init(pt1);
++ if (ret < 0)
++ goto fe_unregister;
++
+ return 0;
+
+ tuner_release:
+@@ -1245,6 +1283,10 @@ static int pt1_resume(struct device *dev)
+ pt1_update_power(pt1);
+ usleep_range(1000, 2000);
+
++ ret = pt1_demod_block_init(pt1);
++ if (ret < 0)
++ goto resume_err;
++
+ for (i = 0; i < PT1_NR_ADAPS; i++)
+ dvb_frontend_reinitialise(pt1->adaps[i]->fe);
+
+diff --git a/drivers/media/pci/tw5864/tw5864-video.c b/drivers/media/pci/tw5864/tw5864-video.c
+index ff2b7da90c08..6c40e60ac993 100644
+--- a/drivers/media/pci/tw5864/tw5864-video.c
++++ b/drivers/media/pci/tw5864/tw5864-video.c
+@@ -1395,13 +1395,13 @@ static void tw5864_handle_frame(struct tw5864_h264_frame *frame)
+ input->vb = NULL;
+ spin_unlock_irqrestore(&input->slock, flags);
+
+- v4l2_buf = to_vb2_v4l2_buffer(&vb->vb.vb2_buf);
+-
+ if (!vb) { /* Gone because of disabling */
+ dev_dbg(&dev->pci->dev, "vb is empty, dropping frame\n");
+ return;
+ }
+
++ v4l2_buf = to_vb2_v4l2_buffer(&vb->vb.vb2_buf);
++
+ /*
+ * Check for space.
+ * Mind the overhead of startcode emulation prevention.
+diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
+index e8db4df1e7c4..1a0e5233ae28 100644
+--- a/drivers/media/platform/atmel/atmel-isi.c
++++ b/drivers/media/platform/atmel/atmel-isi.c
+@@ -496,7 +496,7 @@ static void stop_streaming(struct vb2_queue *vq)
+ spin_unlock_irq(&isi->irqlock);
+
+ if (!isi->enable_preview_path) {
+- timeout = jiffies + FRAME_INTERVAL_MILLI_SEC * HZ;
++ timeout = jiffies + (FRAME_INTERVAL_MILLI_SEC * HZ) / 1000;
+ /* Wait until the end of the current frame. */
+ while ((isi_readl(isi, ISI_STATUS) & ISI_CTRL_CDC) &&
+ time_before(jiffies, timeout))
+diff --git a/drivers/media/platform/davinci/isif.c b/drivers/media/platform/davinci/isif.c
+index 340f8218f54d..80fa60a4c448 100644
+--- a/drivers/media/platform/davinci/isif.c
++++ b/drivers/media/platform/davinci/isif.c
+@@ -884,9 +884,7 @@ static int isif_set_hw_if_params(struct vpfe_hw_if_param *params)
+ static int isif_config_ycbcr(void)
+ {
+ struct isif_ycbcr_config *params = &isif_cfg.ycbcr;
+- struct vpss_pg_frame_size frame_size;
+ u32 modeset = 0, ccdcfg = 0;
+- struct vpss_sync_pol sync;
+
+ dev_dbg(isif_cfg.dev, "\nStarting isif_config_ycbcr...");
+
+@@ -974,13 +972,6 @@ static int isif_config_ycbcr(void)
+ /* two fields are interleaved in memory */
+ regw(0x00000249, SDOFST);
+
+- /* Setup test pattern if enabled */
+- if (isif_cfg.bayer.config_params.test_pat_gen) {
+- sync.ccdpg_hdpol = params->hd_pol;
+- sync.ccdpg_vdpol = params->vd_pol;
+- dm365_vpss_set_sync_pol(sync);
+- dm365_vpss_set_pg_frame_size(frame_size);
+- }
+ return 0;
+ }
+
+diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
+index df1ae6b5c854..e45e062f4442 100644
+--- a/drivers/media/platform/davinci/vpbe.c
++++ b/drivers/media/platform/davinci/vpbe.c
+@@ -126,7 +126,7 @@ static int vpbe_enum_outputs(struct vpbe_device *vpbe_dev,
+ struct v4l2_output *output)
+ {
+ struct vpbe_config *cfg = vpbe_dev->cfg;
+- int temp_index = output->index;
++ unsigned int temp_index = output->index;
+
+ if (temp_index >= cfg->num_outputs)
+ return -EINVAL;
+diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
+index 5700b7818621..45511d24d570 100644
+--- a/drivers/media/platform/omap/omap_vout.c
++++ b/drivers/media/platform/omap/omap_vout.c
+@@ -1527,23 +1527,20 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *b)
+ unsigned long size;
+ struct videobuf_buffer *vb;
+
+- vb = q->bufs[b->index];
+-
+ if (!vout->streaming)
+ return -EINVAL;
+
+- if (file->f_flags & O_NONBLOCK)
+- /* Call videobuf_dqbuf for non blocking mode */
+- ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 1);
+- else
+- /* Call videobuf_dqbuf for blocking mode */
+- ret = videobuf_dqbuf(q, (struct v4l2_buffer *)b, 0);
++ ret = videobuf_dqbuf(q, b, !!(file->f_flags & O_NONBLOCK));
++ if (ret)
++ return ret;
++
++ vb = q->bufs[b->index];
+
+ addr = (unsigned long) vout->buf_phy_addr[vb->i];
+ size = (unsigned long) vb->size;
+ dma_unmap_single(vout->vid_dev->v4l2_dev.dev, addr,
+ size, DMA_TO_DEVICE);
+- return ret;
++ return 0;
+ }
+
+ static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
+diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
+index 485fa3fa8b49..c389ba9ba74d 100644
+--- a/drivers/media/platform/rcar-vin/rcar-core.c
++++ b/drivers/media/platform/rcar-vin/rcar-core.c
+@@ -631,7 +631,7 @@ static int rvin_parallel_init(struct rvin_dev *vin)
+ ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
+ if (ret < 0) {
+ vin_err(vin, "Notifier registration failed\n");
+- v4l2_async_notifier_cleanup(&vin->group->notifier);
++ v4l2_async_notifier_cleanup(&vin->notifier);
+ return ret;
+ }
+
+diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
+index 350afaa29a62..fa7c42cf4b4e 100644
+--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
++++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
+@@ -2005,7 +2005,7 @@ static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx)
+
+ v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops,
+ V4L2_CID_JPEG_RESTART_INTERVAL,
+- 0, 3, 0xffff, 0);
++ 0, 0xffff, 1, 0);
+ if (ctx->jpeg->variant->version == SJPEG_S5P)
+ mask = ~0x06; /* 422, 420 */
+ }
+diff --git a/drivers/media/platform/vivid/vivid-osd.c b/drivers/media/platform/vivid/vivid-osd.c
+index bbbc1b6938a5..b24596697f57 100644
+--- a/drivers/media/platform/vivid/vivid-osd.c
++++ b/drivers/media/platform/vivid/vivid-osd.c
+@@ -155,7 +155,7 @@ static int _vivid_fb_check_var(struct fb_var_screeninfo *var, struct vivid_dev *
+ var->nonstd = 0;
+
+ var->vmode &= ~FB_VMODE_MASK;
+- var->vmode = FB_VMODE_NONINTERLACED;
++ var->vmode |= FB_VMODE_NONINTERLACED;
+
+ /* Dummy values */
+ var->hsync_len = 24;
+diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
+index 1cf4019689a5..cccf1a743f4e 100644
+--- a/drivers/media/radio/wl128x/fmdrv_common.c
++++ b/drivers/media/radio/wl128x/fmdrv_common.c
+@@ -1271,8 +1271,9 @@ static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
+
+ switch (action->type) {
+ case ACTION_SEND_COMMAND: /* Send */
+- if (fmc_send_cmd(fmdev, 0, 0, action->data,
+- action->size, NULL, NULL))
++ ret = fmc_send_cmd(fmdev, 0, 0, action->data,
++ action->size, NULL, NULL);
++ if (ret)
+ goto rel_fw;
+
+ cmd_cnt++;
+diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
+index 5657f8710ca6..69445c8e38e2 100644
+--- a/drivers/media/usb/em28xx/em28xx-core.c
++++ b/drivers/media/usb/em28xx/em28xx-core.c
+@@ -930,7 +930,7 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
+
+ usb_bufs->buf = kcalloc(num_bufs, sizeof(void *), GFP_KERNEL);
+ if (!usb_bufs->buf) {
+- kfree(usb_bufs->buf);
++ kfree(usb_bufs->urb);
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
+index c8f16666256c..346d8eadb44b 100644
+--- a/drivers/memory/tegra/mc.c
++++ b/drivers/memory/tegra/mc.c
+@@ -664,12 +664,13 @@ static int tegra_mc_probe(struct platform_device *pdev)
+ }
+
+ isr = tegra_mc_irq;
+- }
+
+- err = tegra_mc_setup_timings(mc);
+- if (err < 0) {
+- dev_err(&pdev->dev, "failed to setup timings: %d\n", err);
+- return err;
++ err = tegra_mc_setup_timings(mc);
++ if (err < 0) {
++ dev_err(&pdev->dev, "failed to setup timings: %d\n",
++ err);
++ return err;
++ }
+ }
+
+ mc->irq = platform_get_irq(pdev, 0);
+diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
+index c37c8bb86068..742d6c1973f4 100644
+--- a/drivers/mfd/intel-lpss-pci.c
++++ b/drivers/mfd/intel-lpss-pci.c
+@@ -126,6 +126,18 @@ static const struct intel_lpss_platform_info apl_i2c_info = {
+ .properties = apl_i2c_properties,
+ };
+
++static struct property_entry glk_i2c_properties[] = {
++ PROPERTY_ENTRY_U32("i2c-sda-hold-time-ns", 313),
++ PROPERTY_ENTRY_U32("i2c-sda-falling-time-ns", 171),
++ PROPERTY_ENTRY_U32("i2c-scl-falling-time-ns", 290),
++ { },
++};
++
++static const struct intel_lpss_platform_info glk_i2c_info = {
++ .clk_rate = 133000000,
++ .properties = glk_i2c_properties,
++};
++
+ static const struct intel_lpss_platform_info cnl_i2c_info = {
+ .clk_rate = 216000000,
+ .properties = spt_i2c_properties,
+@@ -165,14 +177,14 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info },
+ { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info },
+ /* GLK */
+- { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&bxt_i2c_info },
+- { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&bxt_i2c_info },
+- { PCI_VDEVICE(INTEL, 0x31b0), (kernel_ulong_t)&bxt_i2c_info },
+- { PCI_VDEVICE(INTEL, 0x31b2), (kernel_ulong_t)&bxt_i2c_info },
+- { PCI_VDEVICE(INTEL, 0x31b4), (kernel_ulong_t)&bxt_i2c_info },
+- { PCI_VDEVICE(INTEL, 0x31b6), (kernel_ulong_t)&bxt_i2c_info },
+- { PCI_VDEVICE(INTEL, 0x31b8), (kernel_ulong_t)&bxt_i2c_info },
+- { PCI_VDEVICE(INTEL, 0x31ba), (kernel_ulong_t)&bxt_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x31b0), (kernel_ulong_t)&glk_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x31b2), (kernel_ulong_t)&glk_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x31b4), (kernel_ulong_t)&glk_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x31b6), (kernel_ulong_t)&glk_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x31b8), (kernel_ulong_t)&glk_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x31ba), (kernel_ulong_t)&glk_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x31bc), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x31be), (kernel_ulong_t)&bxt_uart_info },
+ { PCI_VDEVICE(INTEL, 0x31c0), (kernel_ulong_t)&bxt_uart_info },
+diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
+index ff3fba16e735..95e217e6b6d7 100644
+--- a/drivers/mfd/intel-lpss.c
++++ b/drivers/mfd/intel-lpss.c
+@@ -545,6 +545,7 @@ module_init(intel_lpss_init);
+
+ static void __exit intel_lpss_exit(void)
+ {
++ ida_destroy(&intel_lpss_devid_ida);
+ debugfs_remove(intel_lpss_debugfs);
+ }
+ module_exit(intel_lpss_exit);
+diff --git a/drivers/misc/aspeed-lpc-snoop.c b/drivers/misc/aspeed-lpc-snoop.c
+index 2feb4347d67f..c10be21a1663 100644
+--- a/drivers/misc/aspeed-lpc-snoop.c
++++ b/drivers/misc/aspeed-lpc-snoop.c
+@@ -101,13 +101,13 @@ static ssize_t snoop_file_read(struct file *file, char __user *buffer,
+ return ret ? ret : copied;
+ }
+
+-static unsigned int snoop_file_poll(struct file *file,
++static __poll_t snoop_file_poll(struct file *file,
+ struct poll_table_struct *pt)
+ {
+ struct aspeed_lpc_snoop_channel *chan = snoop_file_to_chan(file);
+
+ poll_wait(file, &chan->wq, pt);
+- return !kfifo_is_empty(&chan->fifo) ? POLLIN : 0;
++ return !kfifo_is_empty(&chan->fifo) ? EPOLLIN : 0;
+ }
+
+ static const struct file_operations snoop_fops = {
+diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
+index 4d77a6ae183a..87281b3695e6 100644
+--- a/drivers/misc/mei/main.c
++++ b/drivers/misc/mei/main.c
+@@ -599,10 +599,10 @@ static __poll_t mei_poll(struct file *file, poll_table *wait)
+ mei_cl_read_start(cl, mei_cl_mtu(cl), file);
+ }
+
+- if (req_events & (POLLOUT | POLLWRNORM)) {
++ if (req_events & (EPOLLOUT | EPOLLWRNORM)) {
+ poll_wait(file, &cl->tx_wait, wait);
+ if (cl->tx_cb_queued < dev->tx_queue_limit)
+- mask |= POLLOUT | POLLWRNORM;
++ mask |= EPOLLOUT | EPOLLWRNORM;
+ }
+
+ out:
+diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c
+index b9f0710ffa6b..4007adc666f3 100644
+--- a/drivers/misc/mic/card/mic_x100.c
++++ b/drivers/misc/mic/card/mic_x100.c
+@@ -249,6 +249,9 @@ static int __init mic_probe(struct platform_device *pdev)
+ mdrv->dev = &pdev->dev;
+ snprintf(mdrv->name, sizeof(mic_driver_name), mic_driver_name);
+
++ /* FIXME: use dma_set_mask_and_coherent() and check result */
++ dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
++
+ mdev->mmio.pa = MIC_X100_MMIO_BASE;
+ mdev->mmio.len = MIC_X100_MMIO_LEN;
+ mdev->mmio.va = devm_ioremap(&pdev->dev, MIC_X100_MMIO_BASE,
+@@ -294,18 +297,6 @@ static void mic_platform_shutdown(struct platform_device *pdev)
+ mic_remove(pdev);
+ }
+
+-static u64 mic_dma_mask = DMA_BIT_MASK(64);
+-
+-static struct platform_device mic_platform_dev = {
+- .name = mic_driver_name,
+- .id = 0,
+- .num_resources = 0,
+- .dev = {
+- .dma_mask = &mic_dma_mask,
+- .coherent_dma_mask = DMA_BIT_MASK(64),
+- },
+-};
+-
+ static struct platform_driver __refdata mic_platform_driver = {
+ .probe = mic_probe,
+ .remove = mic_remove,
+@@ -315,6 +306,8 @@ static struct platform_driver __refdata mic_platform_driver = {
+ },
+ };
+
++static struct platform_device *mic_platform_dev;
++
+ static int __init mic_init(void)
+ {
+ int ret;
+@@ -328,9 +321,12 @@ static int __init mic_init(void)
+
+ request_module("mic_x100_dma");
+ mic_init_card_debugfs();
+- ret = platform_device_register(&mic_platform_dev);
++
++ mic_platform_dev = platform_device_register_simple(mic_driver_name,
++ 0, NULL, 0);
++ ret = PTR_ERR_OR_ZERO(mic_platform_dev);
+ if (ret) {
+- pr_err("platform_device_register ret %d\n", ret);
++ pr_err("platform_device_register_full ret %d\n", ret);
+ goto cleanup_debugfs;
+ }
+ ret = platform_driver_register(&mic_platform_driver);
+@@ -341,7 +337,7 @@ static int __init mic_init(void)
+ return ret;
+
+ device_unregister:
+- platform_device_unregister(&mic_platform_dev);
++ platform_device_unregister(mic_platform_dev);
+ cleanup_debugfs:
+ mic_exit_card_debugfs();
+ done:
+@@ -351,7 +347,7 @@ done:
+ static void __exit mic_exit(void)
+ {
+ platform_driver_unregister(&mic_platform_driver);
+- platform_device_unregister(&mic_platform_dev);
++ platform_device_unregister(mic_platform_dev);
+ mic_exit_card_debugfs();
+ }
+
+diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
+index 0c3ef6f1df54..519826ba1378 100644
+--- a/drivers/misc/sgi-xp/xpc_partition.c
++++ b/drivers/misc/sgi-xp/xpc_partition.c
+@@ -70,7 +70,7 @@ xpc_get_rsvd_page_pa(int nasid)
+ unsigned long rp_pa = nasid; /* seed with nasid */
+ size_t len = 0;
+ size_t buf_len = 0;
+- void *buf = buf;
++ void *buf = NULL;
+ void *buf_base = NULL;
+ enum xp_retval (*get_partition_rsvd_page_pa)
+ (void *, u64 *, unsigned long *, size_t *) =
+diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
+index f57f5de54206..dd1c14d8f686 100644
+--- a/drivers/mmc/core/host.c
++++ b/drivers/mmc/core/host.c
+@@ -385,8 +385,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
+
+ if (mmc_gpio_alloc(host)) {
+ put_device(&host->class_dev);
+- ida_simple_remove(&mmc_host_ida, host->index);
+- kfree(host);
+ return NULL;
+ }
+
+diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
+index dd2f73af8f2c..d5bbe8e544de 100644
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -119,7 +119,14 @@ static const struct mmc_fixup mmc_ext_csd_fixups[] = {
+ END_FIXUP
+ };
+
++
+ static const struct mmc_fixup sdio_fixup_methods[] = {
++ SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
++ add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
++
++ SDIO_FIXUP(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251,
++ add_quirk, MMC_QUIRK_DISABLE_CD),
++
+ SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
+ add_quirk, MMC_QUIRK_NONSTD_FUNC_IF),
+
+diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c
+index 552bddc5096c..1cd10356fc14 100644
+--- a/drivers/mmc/host/sdhci-brcmstb.c
++++ b/drivers/mmc/host/sdhci-brcmstb.c
+@@ -55,7 +55,9 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev)
+ }
+
+ sdhci_get_of_property(pdev);
+- mmc_of_parse(host->mmc);
++ res = mmc_of_parse(host->mmc);
++ if (res)
++ goto err;
+
+ /*
+ * Supply the existing CAPS, but clear the UHS modes. This
+diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
+index 2d3a2cb026d2..9f21e710fc38 100644
+--- a/drivers/net/dsa/b53/b53_common.c
++++ b/drivers/net/dsa/b53/b53_common.c
+@@ -343,7 +343,8 @@ static void b53_set_forwarding(struct b53_device *dev, int enable)
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt);
+ }
+
+-static void b53_enable_vlan(struct b53_device *dev, bool enable)
++static void b53_enable_vlan(struct b53_device *dev, bool enable,
++ bool enable_filtering)
+ {
+ u8 mgmt, vc0, vc1, vc4 = 0, vc5;
+
+@@ -368,8 +369,13 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
+ vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID;
+ vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN;
+ vc4 &= ~VC4_ING_VID_CHECK_MASK;
+- vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
+- vc5 |= VC5_DROP_VTABLE_MISS;
++ if (enable_filtering) {
++ vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S;
++ vc5 |= VC5_DROP_VTABLE_MISS;
++ } else {
++ vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S;
++ vc5 &= ~VC5_DROP_VTABLE_MISS;
++ }
+
+ if (is5325(dev))
+ vc0 &= ~VC0_RESERVED_1;
+@@ -419,6 +425,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable)
+ }
+
+ b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt);
++
++ dev->vlan_enabled = enable;
++ dev->vlan_filtering_enabled = enable_filtering;
+ }
+
+ static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100)
+@@ -622,25 +631,35 @@ static void b53_enable_mib(struct b53_device *dev)
+ b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc);
+ }
+
++static u16 b53_default_pvid(struct b53_device *dev)
++{
++ if (is5325(dev) || is5365(dev))
++ return 1;
++ else
++ return 0;
++}
++
+ int b53_configure_vlan(struct dsa_switch *ds)
+ {
+ struct b53_device *dev = ds->priv;
+ struct b53_vlan vl = { 0 };
+- int i;
++ int i, def_vid;
++
++ def_vid = b53_default_pvid(dev);
+
+ /* clear all vlan entries */
+ if (is5325(dev) || is5365(dev)) {
+- for (i = 1; i < dev->num_vlans; i++)
++ for (i = def_vid; i < dev->num_vlans; i++)
+ b53_set_vlan_entry(dev, i, &vl);
+ } else {
+ b53_do_vlan_op(dev, VTA_CMD_CLEAR);
+ }
+
+- b53_enable_vlan(dev, false);
++ b53_enable_vlan(dev, false, dev->vlan_filtering_enabled);
+
+ b53_for_each_port(dev, i)
+ b53_write16(dev, B53_VLAN_PAGE,
+- B53_VLAN_PORT_DEF_TAG(i), 1);
++ B53_VLAN_PORT_DEF_TAG(i), def_vid);
+
+ if (!is5325(dev) && !is5365(dev))
+ b53_set_jumbo(dev, dev->enable_jumbo, false);
+@@ -1071,6 +1090,46 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
+
+ int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
+ {
++ struct b53_device *dev = ds->priv;
++ struct net_device *bridge_dev;
++ unsigned int i;
++ u16 pvid, new_pvid;
++
++ /* Handle the case were multiple bridges span the same switch device
++ * and one of them has a different setting than what is being requested
++ * which would be breaking filtering semantics for any of the other
++ * bridge devices.
++ */
++ b53_for_each_port(dev, i) {
++ bridge_dev = dsa_to_port(ds, i)->bridge_dev;
++ if (bridge_dev &&
++ bridge_dev != dsa_to_port(ds, port)->bridge_dev &&
++ br_vlan_enabled(bridge_dev) != vlan_filtering) {
++ netdev_err(bridge_dev,
++ "VLAN filtering is global to the switch!\n");
++ return -EINVAL;
++ }
++ }
++
++ b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid);
++ new_pvid = pvid;
++ if (dev->vlan_filtering_enabled && !vlan_filtering) {
++ /* Filtering is currently enabled, use the default PVID since
++ * the bridge does not expect tagging anymore
++ */
++ dev->ports[port].pvid = pvid;
++ new_pvid = b53_default_pvid(dev);
++ } else if (!dev->vlan_filtering_enabled && vlan_filtering) {
++ /* Filtering is currently disabled, restore the previous PVID */
++ new_pvid = dev->ports[port].pvid;
++ }
++
++ if (pvid != new_pvid)
++ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
++ new_pvid);
++
++ b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering);
++
+ return 0;
+ }
+ EXPORT_SYMBOL(b53_vlan_filtering);
+@@ -1086,7 +1145,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port,
+ if (vlan->vid_end > dev->num_vlans)
+ return -ERANGE;
+
+- b53_enable_vlan(dev, true);
++ b53_enable_vlan(dev, true, dev->vlan_filtering_enabled);
+
+ return 0;
+ }
+@@ -1116,7 +1175,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
+ b53_fast_age_vlan(dev, vid);
+ }
+
+- if (pvid) {
++ if (pvid && !dsa_is_cpu_port(ds, port)) {
+ b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
+ vlan->vid_end);
+ b53_fast_age_vlan(dev, vid);
+@@ -1142,12 +1201,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
+
+ vl->members &= ~BIT(port);
+
+- if (pvid == vid) {
+- if (is5325(dev) || is5365(dev))
+- pvid = 1;
+- else
+- pvid = 0;
+- }
++ if (pvid == vid)
++ pvid = b53_default_pvid(dev);
+
+ if (untagged && !dsa_is_cpu_port(ds, port))
+ vl->untag &= ~(BIT(port));
+@@ -1460,10 +1515,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
+ b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
+ dev->ports[port].vlan_ctl_mask = pvlan;
+
+- if (is5325(dev) || is5365(dev))
+- pvid = 1;
+- else
+- pvid = 0;
++ pvid = b53_default_pvid(dev);
+
+ /* Make this port join all VLANs without VLAN entries */
+ if (is58xx(dev)) {
+diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
+index df149756c282..e87af5db0d6d 100644
+--- a/drivers/net/dsa/b53/b53_priv.h
++++ b/drivers/net/dsa/b53/b53_priv.h
+@@ -73,6 +73,7 @@ enum {
+ struct b53_port {
+ u16 vlan_ctl_mask;
+ struct ethtool_eee eee;
++ u16 pvid;
+ };
+
+ struct b53_vlan {
+@@ -118,6 +119,8 @@ struct b53_device {
+
+ unsigned int num_vlans;
+ struct b53_vlan *vlans;
++ bool vlan_enabled;
++ bool vlan_filtering_enabled;
+ unsigned int num_ports;
+ struct b53_port *ports;
+ };
+diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
+index 33232cc9fb04..6c04f32e9641 100644
+--- a/drivers/net/dsa/qca8k.c
++++ b/drivers/net/dsa/qca8k.c
+@@ -451,6 +451,18 @@ qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode)
+ qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
+ break;
++ case PHY_INTERFACE_MODE_RGMII_ID:
++ /* RGMII_ID needs internal delay. This is enabled through
++ * PORT5_PAD_CTRL for all ports, rather than individual port
++ * registers
++ */
++ qca8k_write(priv, reg,
++ QCA8K_PORT_PAD_RGMII_EN |
++ QCA8K_PORT_PAD_RGMII_TX_DELAY(QCA8K_MAX_DELAY) |
++ QCA8K_PORT_PAD_RGMII_RX_DELAY(QCA8K_MAX_DELAY));
++ qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
++ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
++ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ qca8k_write(priv, reg, QCA8K_PORT_PAD_SGMII_EN);
+ break;
+diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
+index 613fe5c50236..d146e54c8a6c 100644
+--- a/drivers/net/dsa/qca8k.h
++++ b/drivers/net/dsa/qca8k.h
+@@ -40,6 +40,7 @@
+ ((0x8 + (x & 0x3)) << 22)
+ #define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) \
+ ((0x10 + (x & 0x3)) << 20)
++#define QCA8K_MAX_DELAY 3
+ #define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
+ #define QCA8K_PORT_PAD_SGMII_EN BIT(7)
+ #define QCA8K_REG_MODULE_EN 0x030
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index 7635c38e77dd..92261c946e2a 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -2008,7 +2008,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+ if (unlikely(ret))
+ return ret;
+
+- if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
++ if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
+ pr_err("Func hash %d isn't supported by device, abort\n",
+ rss->hash_func);
+ return -EOPNOTSUPP;
+@@ -2093,6 +2093,7 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+ return -EINVAL;
+ }
+
++ rss->hash_func = func;
+ rc = ena_com_set_hash_function(ena_dev);
+
+ /* Restore the old function */
+diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+index 521607bc4393..eb9e07fa427e 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
++++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+@@ -695,8 +695,8 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
+ if (indir) {
+ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++) {
+ rc = ena_com_indirect_table_fill_entry(ena_dev,
+- ENA_IO_RXQ_IDX(indir[i]),
+- i);
++ i,
++ ENA_IO_RXQ_IDX(indir[i]));
+ if (unlikely(rc)) {
+ netif_err(adapter, drv, netdev,
+ "Cannot fill indirect table (index is too large)\n");
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index e26c195fec83..9afb19ebba58 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -1800,6 +1800,7 @@ err_setup_rx:
+ err_setup_tx:
+ ena_free_io_irq(adapter);
+ err_req_irq:
++ ena_del_napi(adapter);
+
+ return rc;
+ }
+diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+index 82582fa54d5d..72aa2a4c4d66 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
++++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+@@ -309,15 +309,13 @@ irqreturn_t aq_vec_isr_legacy(int irq, void *private)
+ {
+ struct aq_vec_s *self = private;
+ u64 irq_mask = 0U;
+- irqreturn_t err = 0;
++ int err;
+
+- if (!self) {
+- err = -EINVAL;
+- goto err_exit;
+- }
++ if (!self)
++ return IRQ_NONE;
+ err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
+ if (err < 0)
+- goto err_exit;
++ return IRQ_NONE;
+
+ if (irq_mask) {
+ self->aq_hw_ops->hw_irq_disable(self->aq_hw,
+@@ -325,11 +323,10 @@ irqreturn_t aq_vec_isr_legacy(int irq, void *private)
+ napi_schedule(&self->napi);
+ } else {
+ self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
+- err = IRQ_NONE;
++ return IRQ_NONE;
+ }
+
+-err_exit:
+- return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
++ return IRQ_HANDLED;
+ }
+
+ cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+index 97addfa6f895..dab5891b9714 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+@@ -207,8 +207,8 @@ static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
+ u32 i = 0U;
+ u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
+ int err = 0;
+- u16 bitary[(HW_ATL_A0_RSS_REDIRECTION_MAX *
+- HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)];
++ u16 bitary[1 + (HW_ATL_A0_RSS_REDIRECTION_MAX *
++ HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)];
+
+ memset(bitary, 0, sizeof(bitary));
+
+diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+index 51cd1f98bcf0..c4f914a29c38 100644
+--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
++++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+@@ -192,8 +192,8 @@ static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
+ u32 i = 0U;
+ u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
+ int err = 0;
+- u16 bitary[(HW_ATL_B0_RSS_REDIRECTION_MAX *
+- HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
++ u16 bitary[1 + (HW_ATL_B0_RSS_REDIRECTION_MAX *
++ HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
+
+ memset(bitary, 0, sizeof(bitary));
+
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index 0bdbc72605e1..49aa3b5ea57c 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -2470,7 +2470,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
+
+ priv->phy_interface = of_get_phy_mode(dn);
+ /* Default to GMII interface mode */
+- if (priv->phy_interface < 0)
++ if ((int)priv->phy_interface < 0)
+ priv->phy_interface = PHY_INTERFACE_MODE_GMII;
+
+ /* In the case of a fixed PHY, the DT node associated
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+index f9e253b705ec..585f5aef0a45 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -527,6 +527,7 @@ struct rx_tpa_end_cmp_ext {
+ #define DFLT_HWRM_CMD_TIMEOUT 500
+ #define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout)
+ #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4)
++#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12)
+ #define HWRM_RESP_ERR_CODE_MASK 0xffff
+ #define HWRM_RESP_LEN_OFFSET 4
+ #define HWRM_RESP_LEN_MASK 0xffff0000
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+index a85d2be986af..0e4e0b47f5d8 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+@@ -396,7 +396,7 @@ static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp)
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1);
+ mutex_lock(&bp->hwrm_cmd_lock);
+- rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
++ rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1;
+ if (bp->max_dscp_value < 0x3f)
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+index 0a409ba4012a..047024717d65 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+@@ -1778,21 +1778,19 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
+ mutex_lock(&bp->hwrm_cmd_lock);
+ hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+- if (hwrm_err)
+- goto flash_pkg_exit;
+-
+- if (resp->error_code) {
++ if (hwrm_err) {
+ u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
+
+- if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
++ if (resp->error_code && error_code ==
++ NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
+ install.flags |= cpu_to_le16(
+ NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
+ hwrm_err = _hwrm_send_message(bp, &install,
+ sizeof(install),
+ INSTALL_PACKAGE_TIMEOUT);
+- if (hwrm_err)
+- goto flash_pkg_exit;
+ }
++ if (hwrm_err)
++ goto flash_pkg_exit;
+ }
+
+ if (resp->result) {
+@@ -2600,7 +2598,7 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
+ bool offline = false;
+ u8 test_results = 0;
+ u8 test_mask = 0;
+- int rc, i;
++ int rc = 0, i;
+
+ if (!bp->num_tests || !BNXT_SINGLE_PF(bp))
+ return;
+@@ -2671,9 +2669,9 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
+ }
+ bnxt_hwrm_phy_loopback(bp, false, false);
+ bnxt_half_close_nic(bp);
+- bnxt_open_nic(bp, false, true);
++ rc = bnxt_open_nic(bp, false, true);
+ }
+- if (bnxt_test_irq(bp)) {
++ if (rc || bnxt_test_irq(bp)) {
+ buf[BNXT_IRQ_TEST_IDX] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+@@ -2835,7 +2833,7 @@ static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
+ req.component_id = cpu_to_le16(component_id);
+ req.segment_id = cpu_to_le16(segment_id);
+
+- return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
++ return hwrm_send_message(bp, &req, sizeof(req), HWRM_COREDUMP_TIMEOUT);
+ }
+
+ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index bb04c695ab9f..c81d6c330548 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -5452,7 +5452,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ whoami = t4_read_reg(adapter, PL_WHOAMI_A);
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
+ chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
+- if (chip < 0) {
++ if ((int)chip < 0) {
+ dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
+ err = chip;
+ goto out_free_adapter;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/smt.c b/drivers/net/ethernet/chelsio/cxgb4/smt.c
+index 7b2207a2a130..9b3f4205cb4d 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/smt.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/smt.c
+@@ -98,11 +98,9 @@ found_reuse:
+
+ static void t4_smte_free(struct smt_entry *e)
+ {
+- spin_lock_bh(&e->lock);
+ if (atomic_read(&e->refcnt) == 0) { /* hasn't been recycled */
+ e->state = SMT_STATE_UNUSED;
+ }
+- spin_unlock_bh(&e->lock);
+ }
+
+ /**
+@@ -112,8 +110,10 @@ static void t4_smte_free(struct smt_entry *e)
+ */
+ void cxgb4_smt_release(struct smt_entry *e)
+ {
++ spin_lock_bh(&e->lock);
+ if (atomic_dec_and_test(&e->refcnt))
+ t4_smte_free(e);
++ spin_unlock_bh(&e->lock);
+ }
+ EXPORT_SYMBOL(cxgb4_smt_release);
+
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index 462bb8c4f80c..d7736c9c6339 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -1600,13 +1600,15 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
+ * Skb freeing is not handled here.
+ *
+ * This function may be called on error paths in the Tx function, so guard
+- * against cases when not all fd relevant fields were filled in.
++ * against cases when not all fd relevant fields were filled in. To avoid
++ * reading the invalid transmission timestamp for the error paths set ts to
++ * false.
+ *
+ * Return the skb backpointer, since for S/G frames the buffer containing it
+ * gets freed here.
+ */
+ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
+- const struct qm_fd *fd)
++ const struct qm_fd *fd, bool ts)
+ {
+ const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
+ struct device *dev = priv->net_dev->dev.parent;
+@@ -1620,18 +1622,6 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
+ skbh = (struct sk_buff **)phys_to_virt(addr);
+ skb = *skbh;
+
+- if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+- memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+-
+- if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
+- &ns)) {
+- shhwtstamps.hwtstamp = ns_to_ktime(ns);
+- skb_tstamp_tx(skb, &shhwtstamps);
+- } else {
+- dev_warn(dev, "fman_port_get_tstamp failed!\n");
+- }
+- }
+-
+ if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ dma_unmap_single(dev, addr,
+@@ -1654,14 +1644,29 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
+ dma_unmap_page(dev, qm_sg_addr(&sgt[i]),
+ qm_sg_entry_get_len(&sgt[i]), dma_dir);
+ }
+-
+- /* Free the page frag that we allocated on Tx */
+- skb_free_frag(phys_to_virt(addr));
+ } else {
+ dma_unmap_single(dev, addr,
+ skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
+ }
+
++ /* DMA unmapping is required before accessing the HW provided info */
++ if (ts && priv->tx_tstamp &&
++ skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
++ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
++
++ if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
++ &ns)) {
++ shhwtstamps.hwtstamp = ns_to_ktime(ns);
++ skb_tstamp_tx(skb, &shhwtstamps);
++ } else {
++ dev_warn(dev, "fman_port_get_tstamp failed!\n");
++ }
++ }
++
++ if (qm_fd_get_format(fd) == qm_fd_sg)
++ /* Free the page frag that we allocated on Tx */
++ skb_free_frag(phys_to_virt(addr));
++
+ return skb;
+ }
+
+@@ -2116,7 +2121,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+ if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
+ return NETDEV_TX_OK;
+
+- dpaa_cleanup_tx_fd(priv, &fd);
++ dpaa_cleanup_tx_fd(priv, &fd, false);
+ skb_to_fd_failed:
+ enomem:
+ percpu_stats->tx_errors++;
+@@ -2162,7 +2167,7 @@ static void dpaa_tx_error(struct net_device *net_dev,
+
+ percpu_priv->stats.tx_errors++;
+
+- skb = dpaa_cleanup_tx_fd(priv, fd);
++ skb = dpaa_cleanup_tx_fd(priv, fd, false);
+ dev_kfree_skb(skb);
+ }
+
+@@ -2203,7 +2208,7 @@ static void dpaa_tx_conf(struct net_device *net_dev,
+
+ percpu_priv->tx_confirm++;
+
+- skb = dpaa_cleanup_tx_fd(priv, fd);
++ skb = dpaa_cleanup_tx_fd(priv, fd, true);
+
+ consume_skb(skb);
+ }
+@@ -2433,7 +2438,7 @@ static void egress_ern(struct qman_portal *portal,
+ percpu_priv->stats.tx_fifo_errors++;
+ count_ern(percpu_priv, msg);
+
+- skb = dpaa_cleanup_tx_fd(priv, fd);
++ skb = dpaa_cleanup_tx_fd(priv, fd, false);
+ dev_kfree_skb_any(skb);
+ }
+
+diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+index 471805ea363b..b63871ef8a40 100644
+--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
++++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+@@ -1201,7 +1201,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
+ goto err_free_mdio;
+
+ priv->phy_mode = of_get_phy_mode(node);
+- if (priv->phy_mode < 0) {
++ if ((int)priv->phy_mode < 0) {
+ netdev_err(ndev, "not find phy-mode\n");
+ ret = -EINVAL;
+ goto err_mdiobus;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+index 1aaf6e2a3b39..3eb8b85f6afb 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+@@ -1464,7 +1464,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
+ int i;
+
+ /* Find the stopped queue the same way the stack does */
+- for (i = 0; i < ndev->real_num_tx_queues; i++) {
++ for (i = 0; i < ndev->num_tx_queues; i++) {
+ struct netdev_queue *q;
+ unsigned long trans_start;
+
+@@ -2605,9 +2605,10 @@ err_free_chain:
+ cur_chain = head->next;
+ while (cur_chain) {
+ chain = cur_chain->next;
+- devm_kfree(&pdev->dev, chain);
++ devm_kfree(&pdev->dev, cur_chain);
+ cur_chain = chain;
+ }
++ head->next = NULL;
+
+ return -ENOMEM;
+ }
+@@ -2642,7 +2643,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int ret = 0;
+- u16 i;
++ int i;
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+@@ -2679,7 +2680,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
+ ret = hns3_get_vector_ring_chain(tqp_vector,
+ &vector_ring_chain);
+ if (ret)
+- return ret;
++ goto map_ring_fail;
+
+ ret = h->ae_algo->ops->map_ring_to_vector(h,
+ tqp_vector->vector_irq, &vector_ring_chain);
+@@ -2687,13 +2688,19 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
+ hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
+
+ if (ret)
+- return ret;
++ goto map_ring_fail;
+
+ netif_napi_add(priv->netdev, &tqp_vector->napi,
+ hns3_nic_common_poll, NAPI_POLL_WEIGHT);
+ }
+
+ return 0;
++
++map_ring_fail:
++ while (i--)
++ netif_napi_del(&priv->tqp_vector[i].napi);
++
++ return ret;
+ }
+
+ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+index f8cc8d1f0b20..d575dd9a329d 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+@@ -4833,6 +4833,7 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
+ struct hclge_vport_vtag_tx_cfg_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
++ u16 bmap_index;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
+@@ -4855,8 +4856,10 @@ static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+
+ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+- req->vf_bitmap[req->vf_offset] =
+- 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
++ bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
++ HCLGE_VF_NUM_PER_BYTE;
++ req->vf_bitmap[bmap_index] =
++ 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+@@ -4873,6 +4876,7 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
+ struct hclge_vport_vtag_rx_cfg_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
++ u16 bmap_index;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
+@@ -4888,8 +4892,10 @@ static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
+ vcfg->vlan2_vlan_prionly ? 1 : 0);
+
+ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+- req->vf_bitmap[req->vf_offset] =
+- 1 << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
++ bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
++ HCLGE_VF_NUM_PER_BYTE;
++ req->vf_bitmap[bmap_index] =
++ 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+@@ -5922,18 +5928,17 @@ static u32 hclge_get_max_channels(struct hnae3_handle *handle)
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+- return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
++ return min_t(u32, hdev->rss_size_max,
++ vport->alloc_tqps / kinfo->num_tc);
+ }
+
+ static void hclge_get_channels(struct hnae3_handle *handle,
+ struct ethtool_channels *ch)
+ {
+- struct hclge_vport *vport = hclge_get_vport(handle);
+-
+ ch->max_combined = hclge_get_max_channels(handle);
+ ch->other_count = 1;
+ ch->max_other = 1;
+- ch->combined_count = vport->alloc_tqps;
++ ch->combined_count = handle->kinfo.rss_size;
+ }
+
+ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+index 260b1e779690..d14b7018fdf3 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+@@ -600,7 +600,7 @@ struct hclge_vport {
+ u16 alloc_rss_size;
+
+ u16 qs_offset;
+- u16 bw_limit; /* VSI BW Limit (0 = disabled) */
++ u32 bw_limit; /* VSI BW Limit (0 = disabled) */
+ u8 dwrr;
+
+ struct hclge_tx_vtag_cfg txvlan_cfg;
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+index e08e82020402..997ca79ed892 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+@@ -181,12 +181,10 @@ static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
+ return ret;
+
+ ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
+- if (ret)
+- return ret;
+
+ hclge_free_vector_ring_chain(&ring_chain);
+
+- return 0;
++ return ret;
+ }
+
+ static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
+diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+index 67db19709dea..fd5375b5991b 100644
+--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
++++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+@@ -1957,7 +1957,8 @@ static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hnae3_knic_private_info *kinfo = &nic->kinfo;
+
+- return min_t(u32, hdev->rss_size_max * kinfo->num_tc, hdev->num_tqps);
++ return min_t(u32, hdev->rss_size_max,
++ hdev->num_tqps / kinfo->num_tc);
+ }
+
+ /**
+@@ -1978,7 +1979,7 @@ static void hclgevf_get_channels(struct hnae3_handle *handle,
+ ch->max_combined = hclgevf_get_max_channels(hdev);
+ ch->other_count = 0;
+ ch->max_other = 0;
+- ch->combined_count = hdev->num_tqps;
++ ch->combined_count = handle->kinfo.rss_size;
+ }
+
+ static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
+diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
+index e8ee69d4e4d3..0f799e8e093c 100644
+--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
++++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
+@@ -1464,7 +1464,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
+
+ memset(pr, 0, sizeof(struct ehea_port_res));
+
+- pr->tx_bytes = rx_bytes;
++ pr->tx_bytes = tx_bytes;
+ pr->tx_packets = tx_packets;
+ pr->rx_bytes = rx_bytes;
+ pr->rx_packets = rx_packets;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
+index 85f75b5978fc..eb0ae6ab01e2 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
+@@ -1668,25 +1668,15 @@ enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+ return status;
+ }
+
+-/**
+- * i40e_set_fc
+- * @hw: pointer to the hw struct
+- * @aq_failures: buffer to return AdminQ failure information
+- * @atomic_restart: whether to enable atomic link restart
+- *
+- * Set the requested flow control mode using set_phy_config.
+- **/
+-enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+- bool atomic_restart)
++static noinline_for_stack enum i40e_status_code
++i40e_set_fc_status(struct i40e_hw *hw,
++ struct i40e_aq_get_phy_abilities_resp *abilities,
++ bool atomic_restart)
+ {
+- enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
+- struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_aq_set_phy_config config;
+- enum i40e_status_code status;
++ enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
+ u8 pause_mask = 0x0;
+
+- *aq_failures = 0x0;
+-
+ switch (fc_mode) {
+ case I40E_FC_FULL:
+ pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+@@ -1702,6 +1692,48 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ break;
+ }
+
++ memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
++ /* clear the old pause settings */
++ config.abilities = abilities->abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
++ ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
++ /* set the new abilities */
++ config.abilities |= pause_mask;
++ /* If the abilities have changed, then set the new config */
++ if (config.abilities == abilities->abilities)
++ return 0;
++
++ /* Auto restart link so settings take effect */
++ if (atomic_restart)
++ config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
++ /* Copy over all the old settings */
++ config.phy_type = abilities->phy_type;
++ config.phy_type_ext = abilities->phy_type_ext;
++ config.link_speed = abilities->link_speed;
++ config.eee_capability = abilities->eee_capability;
++ config.eeer = abilities->eeer_val;
++ config.low_power_ctrl = abilities->d3_lpan;
++ config.fec_config = abilities->fec_cfg_curr_mod_ext_info &
++ I40E_AQ_PHY_FEC_CONFIG_MASK;
++
++ return i40e_aq_set_phy_config(hw, &config, NULL);
++}
++
++/**
++ * i40e_set_fc
++ * @hw: pointer to the hw struct
++ * @aq_failures: buffer to return AdminQ failure information
++ * @atomic_restart: whether to enable atomic link restart
++ *
++ * Set the requested flow control mode using set_phy_config.
++ **/
++enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
++ bool atomic_restart)
++{
++ struct i40e_aq_get_phy_abilities_resp abilities;
++ enum i40e_status_code status;
++
++ *aq_failures = 0x0;
++
+ /* Get the current phy config */
+ status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+@@ -1710,31 +1742,10 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+ return status;
+ }
+
+- memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+- /* clear the old pause settings */
+- config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
+- ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
+- /* set the new abilities */
+- config.abilities |= pause_mask;
+- /* If the abilities have changed, then set the new config */
+- if (config.abilities != abilities.abilities) {
+- /* Auto restart link so settings take effect */
+- if (atomic_restart)
+- config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+- /* Copy over all the old settings */
+- config.phy_type = abilities.phy_type;
+- config.phy_type_ext = abilities.phy_type_ext;
+- config.link_speed = abilities.link_speed;
+- config.eee_capability = abilities.eee_capability;
+- config.eeer = abilities.eeer_val;
+- config.low_power_ctrl = abilities.d3_lpan;
+- config.fec_config = abilities.fec_cfg_curr_mod_ext_info &
+- I40E_AQ_PHY_FEC_CONFIG_MASK;
+- status = i40e_aq_set_phy_config(hw, &config, NULL);
++ status = i40e_set_fc_status(hw, &abilities, atomic_restart);
++ if (status)
++ *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
+
+- if (status)
+- *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
+- }
+ /* Update the link info */
+ status = i40e_update_link_info(hw);
+ if (status) {
+@@ -2563,7 +2574,7 @@ i40e_status i40e_get_link_status(struct i40e_hw *hw, bool *link_up)
+ * i40e_updatelink_status - update status of the HW network link
+ * @hw: pointer to the hw struct
+ **/
+-i40e_status i40e_update_link_info(struct i40e_hw *hw)
++noinline_for_stack i40e_status i40e_update_link_info(struct i40e_hw *hw)
+ {
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ i40e_status status = 0;
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+index b27f7a968820..49e6d66ccf80 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+@@ -114,7 +114,6 @@ static void ixgbe_ipsec_set_rx_ip(struct ixgbe_hw *hw, u16 idx, __be32 addr[])
+ **/
+ static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
+ {
+- struct ixgbe_ipsec *ipsec = adapter->ipsec;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 buf[4] = {0, 0, 0, 0};
+ u16 idx;
+@@ -133,9 +132,6 @@ static void ixgbe_ipsec_clear_hw_tables(struct ixgbe_adapter *adapter)
+ ixgbe_ipsec_set_tx_sa(hw, idx, buf, 0);
+ ixgbe_ipsec_set_rx_sa(hw, idx, 0, buf, 0, 0, 0);
+ }
+-
+- ipsec->num_rx_sa = 0;
+- ipsec->num_tx_sa = 0;
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index de65ca1e6558..51cd58fbab69 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -1822,13 +1822,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
+ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
+ struct sk_buff *skb)
+ {
+- /* if the page was released unmap it, else just sync our portion */
+- if (unlikely(IXGBE_CB(skb)->page_released)) {
+- dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
+- ixgbe_rx_pg_size(rx_ring),
+- DMA_FROM_DEVICE,
+- IXGBE_RX_DMA_ATTR);
+- } else if (ring_uses_build_skb(rx_ring)) {
++ if (ring_uses_build_skb(rx_ring)) {
+ unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK;
+
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+@@ -1845,6 +1839,14 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
+ skb_frag_size(frag),
+ DMA_FROM_DEVICE);
+ }
++
++ /* If the page was released, just unmap it. */
++ if (unlikely(IXGBE_CB(skb)->page_released)) {
++ dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma,
++ ixgbe_rx_pg_size(rx_ring),
++ DMA_FROM_DEVICE,
++ IXGBE_RX_DMA_ATTR);
++ }
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 9cbc4173973e..044687a1f27c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1364,8 +1364,14 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
+
+ skb->protocol = *((__be16 *)(skb->data));
+
+- skb->ip_summed = CHECKSUM_COMPLETE;
+- skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
++ if (netdev->features & NETIF_F_RXCSUM) {
++ skb->ip_summed = CHECKSUM_COMPLETE;
++ skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
++ stats->csum_complete++;
++ } else {
++ skb->ip_summed = CHECKSUM_NONE;
++ stats->csum_none++;
++ }
+
+ if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
+ skb_hwtstamps(skb)->hwtstamp =
+@@ -1384,7 +1390,6 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
+
+ skb->dev = netdev;
+
+- stats->csum_complete++;
+ stats->packets++;
+ stats->bytes += cqe_bcnt;
+ }
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+index 436a8136f26f..310f9e7d8320 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
+@@ -289,7 +289,6 @@ void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
+ const char *event_name;
+ bool teardown = false;
+ unsigned long flags;
+- u32 fpga_qpn;
+ u8 syndrome;
+
+ switch (event) {
+@@ -300,7 +299,6 @@ void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data)
+ case MLX5_EVENT_TYPE_FPGA_QP_ERROR:
+ syndrome = MLX5_GET(fpga_qp_error_event, data, syndrome);
+ event_name = mlx5_fpga_qp_syndrome_to_string(syndrome);
+- fpga_qpn = MLX5_GET(fpga_qp_error_event, data, fpga_qpn);
+ break;
+ default:
+ mlx5_fpga_warn_ratelimited(fdev, "Unexpected event %u\n",
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 82a53317285d..b16e0f45d28c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -469,6 +469,7 @@ static void del_hw_fte(struct fs_node *node)
+ mlx5_core_warn(dev,
+ "flow steering can't delete fte in index %d of flow group id %d\n",
+ fte->index, fg->id);
++ node->active = 0;
+ }
+ }
+
+@@ -1597,6 +1598,11 @@ lookup_fte_locked(struct mlx5_flow_group *g,
+ fte_tmp = NULL;
+ goto out;
+ }
++ if (!fte_tmp->node.active) {
++ tree_put_node(&fte_tmp->node);
++ fte_tmp = NULL;
++ goto out;
++ }
+
+ nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+ out:
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+index f33707ce8b6b..479ac21cdbc6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+@@ -44,14 +44,15 @@ static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev,
+ {
+ struct mlx5_qp_table *table = &dev->priv.qp_table;
+ struct mlx5_core_rsc_common *common;
++ unsigned long flags;
+
+- spin_lock(&table->lock);
++ spin_lock_irqsave(&table->lock, flags);
+
+ common = radix_tree_lookup(&table->tree, rsn);
+ if (common)
+ atomic_inc(&common->refcount);
+
+- spin_unlock(&table->lock);
++ spin_unlock_irqrestore(&table->lock, flags);
+
+ if (!common) {
+ mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n",
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
+index aee58b3892f2..c9895876a231 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
++++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
+@@ -3215,7 +3215,7 @@ static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port,
+ * Configures the ETS elements.
+ */
+ #define MLXSW_REG_QEEC_ID 0x400D
+-#define MLXSW_REG_QEEC_LEN 0x1C
++#define MLXSW_REG_QEEC_LEN 0x20
+
+ MLXSW_REG_DEFINE(qeec, MLXSW_REG_QEEC_ID, MLXSW_REG_QEEC_LEN);
+
+@@ -3257,6 +3257,15 @@ MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8);
+ */
+ MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8);
+
++/* reg_qeec_mise
++ * Min shaper configuration enable. Enables configuration of the min
++ * shaper on this ETS element
++ * 0 - Disable
++ * 1 - Enable
++ * Access: RW
++ */
++MLXSW_ITEM32(reg, qeec, mise, 0x0C, 31, 1);
++
+ enum {
+ MLXSW_REG_QEEC_BYTES_MODE,
+ MLXSW_REG_QEEC_PACKETS_MODE,
+@@ -3273,6 +3282,17 @@ enum {
+ */
+ MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1);
+
++/* The smallest permitted min shaper rate. */
++#define MLXSW_REG_QEEC_MIS_MIN 200000 /* Kbps */
++
++/* reg_qeec_min_shaper_rate
++ * Min shaper information rate.
++ * For CPU port, can only be configured for port hierarchy.
++ * When in bytes mode, value is specified in units of 1000bps.
++ * Access: RW
++ */
++MLXSW_ITEM32(reg, qeec, min_shaper_rate, 0x0C, 0, 28);
++
+ /* reg_qeec_mase
+ * Max shaper configuration enable. Enables configuration of the max
+ * shaper on this ETS element.
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+index 30ef318b3d68..5df9b25cab27 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+@@ -2753,6 +2753,21 @@ int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+ }
+
++static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
++ enum mlxsw_reg_qeec_hr hr, u8 index,
++ u8 next_index, u32 minrate)
++{
++ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
++ char qeec_pl[MLXSW_REG_QEEC_LEN];
++
++ mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
++ next_index);
++ mlxsw_reg_qeec_mise_set(qeec_pl, true);
++ mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
++
++ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
++}
++
+ int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 switch_prio, u8 tclass)
+ {
+@@ -2830,6 +2845,16 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
+ return err;
+ }
+
++ /* Configure the min shaper for multicast TCs. */
++ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
++ err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
++ MLXSW_REG_QEEC_HIERARCY_TC,
++ i + 8, i,
++ MLXSW_REG_QEEC_MIS_MIN);
++ if (err)
++ return err;
++ }
++
+ /* Map all priorities to traffic class 0. */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
+diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
+index c805dcbebd02..5f1875fe47cd 100644
+--- a/drivers/net/ethernet/natsemi/sonic.c
++++ b/drivers/net/ethernet/natsemi/sonic.c
+@@ -231,9 +231,9 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
+
+ laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
+ if (!laddr) {
+- printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
+- dev_kfree_skb(skb);
+- return NETDEV_TX_BUSY;
++ pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
++ dev_kfree_skb_any(skb);
++ return NETDEV_TX_OK;
+ }
+
+ sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
+diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+index 4e18d95e548f..c3ce0fb47a0f 100644
+--- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c
++++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c
+@@ -326,7 +326,18 @@ __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
+ return;
+ }
+
+- if (sc == SHF_SC_L_SHF)
++ /* NFP shift instruction has something special. If shift direction is
++ * left then shift amount of 1 to 31 is specified as 32 minus the amount
++ * to shift.
++ *
++ * But no need to do this for indirect shift which has shift amount be
++ * 0. Even after we do this subtraction, shift amount 0 will be turned
++ * into 32 which will eventually be encoded the same as 0 because only
++ * low 5 bits are encoded, but shift amount be 32 will fail the
++ * FIELD_PREP check done later on shift mask (0x1f), due to 32 is out of
++ * mask range.
++ */
++ if (sc == SHF_SC_L_SHF && shift)
+ shift = 32 - shift;
+
+ insn = OP_SHF_BASE |
+diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+index 44d3ea75d043..ab602a79b084 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
++++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+@@ -423,7 +423,7 @@
+ #define NFP_NET_CFG_MBOX_SIMPLE_CMD 0x0
+ #define NFP_NET_CFG_MBOX_SIMPLE_RET 0x4
+ #define NFP_NET_CFG_MBOX_SIMPLE_VAL 0x8
+-#define NFP_NET_CFG_MBOX_SIMPLE_LEN 0x12
++#define NFP_NET_CFG_MBOX_SIMPLE_LEN 12
+
+ #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1
+ #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2
+diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
+index 76efed058f33..a791d7932b0e 100644
+--- a/drivers/net/ethernet/ni/nixge.c
++++ b/drivers/net/ethernet/ni/nixge.c
+@@ -1233,7 +1233,7 @@ static int nixge_probe(struct platform_device *pdev)
+ }
+
+ priv->phy_mode = of_get_phy_mode(pdev->dev.of_node);
+- if (priv->phy_mode < 0) {
++ if ((int)priv->phy_mode < 0) {
+ netdev_err(ndev, "not find \"phy-mode\" property\n");
+ err = -EINVAL;
+ goto unregister_mdio;
+diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
+index 8a31a02c9f47..65f69e562618 100644
+--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
++++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
+@@ -1053,7 +1053,6 @@ static int pasemi_mac_phy_init(struct net_device *dev)
+
+ dn = pci_device_to_OF_node(mac->pdev);
+ phy_dn = of_parse_phandle(dn, "phy-handle", 0);
+- of_node_put(phy_dn);
+
+ mac->link = 0;
+ mac->speed = 0;
+@@ -1062,6 +1061,7 @@ static int pasemi_mac_phy_init(struct net_device *dev)
+ phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0,
+ PHY_INTERFACE_MODE_SGMII);
+
++ of_node_put(phy_dn);
+ if (!phydev) {
+ printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
+ return -ENODEV;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+index 7002a660b6b4..39787bb885c8 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+@@ -532,7 +532,8 @@ int qed_iwarp_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
+
+ /* Make sure ep is closed before returning and freeing memory. */
+ if (ep) {
+- while (ep->state != QED_IWARP_EP_CLOSED && wait_count++ < 200)
++ while (READ_ONCE(ep->state) != QED_IWARP_EP_CLOSED &&
++ wait_count++ < 200)
+ msleep(100);
+
+ if (ep->state != QED_IWARP_EP_CLOSED)
+@@ -1023,8 +1024,6 @@ qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
+
+ params.ep_context = ep;
+
+- ep->state = QED_IWARP_EP_CLOSED;
+-
+ switch (fw_return_code) {
+ case RDMA_RETURN_OK:
+ ep->qp->max_rd_atomic_req = ep->cm_info.ord;
+@@ -1084,6 +1083,10 @@ qed_iwarp_mpa_complete(struct qed_hwfn *p_hwfn,
+ break;
+ }
+
++ if (fw_return_code != RDMA_RETURN_OK)
++ /* paired with READ_ONCE in destroy_qp */
++ smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
++
+ ep->event_cb(ep->cb_context, &params);
+
+ /* on passive side, if there is no associated QP (REJECT) we need to
+@@ -2638,6 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
+ cbs.rx_release_cb = qed_iwarp_ll2_rel_rx_pkt;
+ cbs.tx_comp_cb = qed_iwarp_ll2_comp_tx_pkt;
+ cbs.tx_release_cb = qed_iwarp_ll2_rel_tx_pkt;
++ cbs.slowpath_cb = NULL;
+ cbs.cookie = p_hwfn;
+
+ memset(&data, 0, sizeof(data));
+@@ -2828,7 +2832,9 @@ static void qed_iwarp_qp_in_error(struct qed_hwfn *p_hwfn,
+ params.status = (fw_return_code == IWARP_QP_IN_ERROR_GOOD_CLOSE) ?
+ 0 : -ECONNRESET;
+
+- ep->state = QED_IWARP_EP_CLOSED;
++ /* paired with READ_ONCE in destroy_qp */
++ smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
++
+ spin_lock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+ list_del(&ep->list_entry);
+ spin_unlock_bh(&p_hwfn->p_rdma_info->iwarp.iw_lock);
+@@ -2917,7 +2923,8 @@ qed_iwarp_tcp_connect_unsuccessful(struct qed_hwfn *p_hwfn,
+ params.event = QED_IWARP_EVENT_ACTIVE_COMPLETE;
+ params.ep_context = ep;
+ params.cm_info = &ep->cm_info;
+- ep->state = QED_IWARP_EP_CLOSED;
++ /* paired with READ_ONCE in destroy_qp */
++ smp_store_release(&ep->state, QED_IWARP_EP_CLOSED);
+
+ switch (fw_return_code) {
+ case IWARP_CONN_ERROR_TCP_CONNECT_INVALID_PACKET:
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+index 64ac95ca4df2..d921b991dbdb 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
+@@ -1631,10 +1631,9 @@ static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
+ }
+ }
+
+-static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
+- struct qed_ptt *p_ptt,
+- struct qed_eth_stats *p_stats,
+- u16 statistics_bin)
++static noinline_for_stack void
++__qed_get_vport_pstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
++ struct qed_eth_stats *p_stats, u16 statistics_bin)
+ {
+ struct eth_pstorm_per_queue_stat pstats;
+ u32 pstats_addr = 0, pstats_len = 0;
+@@ -1661,10 +1660,9 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
+ HILO_64_REGPAIR(pstats.error_drop_pkts);
+ }
+
+-static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
+- struct qed_ptt *p_ptt,
+- struct qed_eth_stats *p_stats,
+- u16 statistics_bin)
++static noinline_for_stack void
++__qed_get_vport_tstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
++ struct qed_eth_stats *p_stats, u16 statistics_bin)
+ {
+ struct tstorm_per_port_stat tstats;
+ u32 tstats_addr, tstats_len;
+@@ -1709,10 +1707,9 @@ static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
+ }
+ }
+
+-static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
+- struct qed_ptt *p_ptt,
+- struct qed_eth_stats *p_stats,
+- u16 statistics_bin)
++static noinline_for_stack
++void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
++ struct qed_eth_stats *p_stats, u16 statistics_bin)
+ {
+ struct eth_ustorm_per_queue_stat ustats;
+ u32 ustats_addr = 0, ustats_len = 0;
+@@ -1751,10 +1748,9 @@ static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
+ }
+ }
+
+-static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
+- struct qed_ptt *p_ptt,
+- struct qed_eth_stats *p_stats,
+- u16 statistics_bin)
++static noinline_for_stack void
++__qed_get_vport_mstats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
++ struct qed_eth_stats *p_stats, u16 statistics_bin)
+ {
+ struct eth_mstorm_per_queue_stat mstats;
+ u32 mstats_addr = 0, mstats_len = 0;
+@@ -1780,9 +1776,9 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
+ HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
+ }
+
+-static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
+- struct qed_ptt *p_ptt,
+- struct qed_eth_stats *p_stats)
++static noinline_for_stack void
++__qed_get_vport_port_stats(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
++ struct qed_eth_stats *p_stats)
+ {
+ struct qed_eth_stats_common *p_common = &p_stats->common;
+ struct port_stats port_stats;
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
+index 66b775d462fd..9d188931bc09 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.c
++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
+@@ -475,7 +475,6 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
+ u16 signature = 0;
+ u16 spi_config;
+ u16 wrbuf_space = 0;
+- static u16 reset_count;
+
+ if (event == QCASPI_EVENT_CPUON) {
+ /* Read signature twice, if not valid
+@@ -528,13 +527,13 @@ qcaspi_qca7k_sync(struct qcaspi *qca, int event)
+
+ qca->sync = QCASPI_SYNC_RESET;
+ qca->stats.trig_reset++;
+- reset_count = 0;
++ qca->reset_count = 0;
+ break;
+ case QCASPI_SYNC_RESET:
+- reset_count++;
++ qca->reset_count++;
+ netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n",
+- reset_count);
+- if (reset_count >= QCASPI_RESET_TIMEOUT) {
++ qca->reset_count);
++ if (qca->reset_count >= QCASPI_RESET_TIMEOUT) {
+ /* reset did not seem to take place, try again */
+ qca->sync = QCASPI_SYNC_UNKNOWN;
+ qca->stats.reset_timeout++;
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
+index fc0e98726b36..719c41227f22 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.h
++++ b/drivers/net/ethernet/qualcomm/qca_spi.h
+@@ -92,6 +92,7 @@ struct qcaspi {
+
+ unsigned int intr_req;
+ unsigned int intr_svc;
++ u16 reset_count;
+
+ #ifdef CONFIG_DEBUG_FS
+ struct dentry *device_root;
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 6068e96f5ac1..441643670ac0 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -3133,12 +3133,16 @@ static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
+ struct device_node *np = dev->of_node;
+ struct sh_eth_plat_data *pdata;
+ const char *mac_addr;
++ int ret;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+- pdata->phy_interface = of_get_phy_mode(np);
++ ret = of_get_phy_mode(np);
++ if (ret < 0)
++ return NULL;
++ pdata->phy_interface = ret;
+
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
+index 28d582c18afb..027367b9cc48 100644
+--- a/drivers/net/ethernet/socionext/netsec.c
++++ b/drivers/net/ethernet/socionext/netsec.c
+@@ -432,9 +432,12 @@ static int netsec_mac_update_to_phy_state(struct netsec_priv *priv)
+ return 0;
+ }
+
++static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr);
++
+ static int netsec_phy_write(struct mii_bus *bus,
+ int phy_addr, int reg, u16 val)
+ {
++ int status;
+ struct netsec_priv *priv = bus->priv;
+
+ if (netsec_mac_write(priv, GMAC_REG_GDR, val))
+@@ -447,8 +450,19 @@ static int netsec_phy_write(struct mii_bus *bus,
+ GMAC_REG_SHIFT_CR_GAR)))
+ return -ETIMEDOUT;
+
+- return netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
+- NETSEC_GMAC_GAR_REG_GB);
++ status = netsec_mac_wait_while_busy(priv, GMAC_REG_GAR,
++ NETSEC_GMAC_GAR_REG_GB);
++
++ /* Developerbox implements RTL8211E PHY and there is
++ * a compatibility problem with F_GMAC4.
++ * RTL8211E expects MDC clock must be kept toggling for several
++ * clock cycle with MDIO high before entering the IDLE state.
++ * To meet this requirement, netsec driver needs to issue dummy
++ * read(e.g. read PHYID1(offset 0x2) register) right after write.
++ */
++ netsec_phy_read(bus, phy_addr, MII_PHYSID1);
++
++ return status;
+ }
+
+ static int netsec_phy_read(struct mii_bus *bus, int phy_addr, int reg_addr)
+@@ -1590,7 +1604,7 @@ static int netsec_probe(struct platform_device *pdev)
+ NETIF_MSG_LINK | NETIF_MSG_PROBE;
+
+ priv->phy_interface = device_get_phy_mode(&pdev->dev);
+- if (priv->phy_interface < 0) {
++ if ((int)priv->phy_interface < 0) {
+ dev_err(&pdev->dev, "missing required property 'phy-mode'\n");
+ ret = -ENODEV;
+ goto free_ndev;
+diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
+index 09d25b87cf7c..c309accc6797 100644
+--- a/drivers/net/ethernet/socionext/sni_ave.c
++++ b/drivers/net/ethernet/socionext/sni_ave.c
+@@ -1575,7 +1575,7 @@ static int ave_probe(struct platform_device *pdev)
+
+ np = dev->of_node;
+ phy_mode = of_get_phy_mode(np);
+- if (phy_mode < 0) {
++ if ((int)phy_mode < 0) {
+ dev_err(dev, "phy-mode not found\n");
+ return -EINVAL;
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+index 2c6d7c69c8f7..0d21082ceb93 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+@@ -191,7 +191,7 @@ static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
+ struct device *dev = &gmac->pdev->dev;
+
+ gmac->phy_mode = of_get_phy_mode(dev->of_node);
+- if (gmac->phy_mode < 0) {
++ if ((int)gmac->phy_mode < 0) {
+ dev_err(dev, "missing phy mode property\n");
+ return -EINVAL;
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+index 94b46258e8ff..0a17535f13ae 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+@@ -355,7 +355,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
+
+ dwmac->dev = &pdev->dev;
+ dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node);
+- if (dwmac->phy_mode < 0) {
++ if ((int)dwmac->phy_mode < 0) {
+ dev_err(&pdev->dev, "missing phy-mode property\n");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+index 48cf5e2b2441..bc8871e7351f 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+@@ -443,7 +443,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
+ }
+
+ /* Handle multiple unicast addresses */
+- if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
++ if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
+ /* Switch to promiscuous mode if more than 128 addrs
+ * are required
+ */
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+index 3f4f3132e16b..e436fa160c7d 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
+@@ -515,6 +515,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
+
+ if (!enable) {
+ val |= PPSCMDx(index, 0x5);
++ val |= PPSEN0;
+ writel(val, ioaddr + MAC_PPS_CONTROL);
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+index cc60b3fb0892..8f8b8f381ffd 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+@@ -174,7 +174,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
+ /* structure describing a PTP hardware clock */
+ static struct ptp_clock_info stmmac_ptp_clock_ops = {
+ .owner = THIS_MODULE,
+- .name = "stmmac_ptp_clock",
++ .name = "stmmac ptp",
+ .max_adj = 62500000,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 28764268a44f..b093f14eeec3 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1573,7 +1573,7 @@ static int axienet_probe(struct platform_device *pdev)
+ }
+ } else {
+ lp->phy_mode = of_get_phy_mode(pdev->dev.of_node);
+- if (lp->phy_mode < 0) {
++ if ((int)lp->phy_mode < 0) {
+ ret = -EINVAL;
+ goto free_netdev;
+ }
+diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
+index 50709c76b672..dfa801315da6 100644
+--- a/drivers/net/hyperv/hyperv_net.h
++++ b/drivers/net/hyperv/hyperv_net.h
+@@ -616,7 +616,8 @@ struct nvsp_5_send_indirect_table {
+ /* The number of entries in the send indirection table */
+ u32 count;
+
+- /* The offset of the send indirection table from top of this struct.
++ /* The offset of the send indirection table from the beginning of
++ * struct nvsp_message.
+ * The send indirection table tells which channel to put the send
+ * traffic on. Each entry is a channel number.
+ */
+diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
+index 35413041dcf8..dbfd3a0c97d3 100644
+--- a/drivers/net/hyperv/netvsc.c
++++ b/drivers/net/hyperv/netvsc.c
+@@ -1182,20 +1182,39 @@ static int netvsc_receive(struct net_device *ndev,
+ }
+
+ static void netvsc_send_table(struct net_device *ndev,
+- const struct nvsp_message *nvmsg)
++ struct netvsc_device *nvscdev,
++ const struct nvsp_message *nvmsg,
++ u32 msglen)
+ {
+ struct net_device_context *net_device_ctx = netdev_priv(ndev);
+- u32 count, *tab;
++ u32 count, offset, *tab;
+ int i;
+
+ count = nvmsg->msg.v5_msg.send_table.count;
++ offset = nvmsg->msg.v5_msg.send_table.offset;
++
+ if (count != VRSS_SEND_TAB_SIZE) {
+ netdev_err(ndev, "Received wrong send-table size:%u\n", count);
+ return;
+ }
+
+- tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table +
+- nvmsg->msg.v5_msg.send_table.offset);
++ /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be
++ * wrong due to a host bug. So fix the offset here.
++ */
++ if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 &&
++ msglen >= sizeof(struct nvsp_message_header) +
++ sizeof(union nvsp_6_message_uber) + count * sizeof(u32))
++ offset = sizeof(struct nvsp_message_header) +
++ sizeof(union nvsp_6_message_uber);
++
++ /* Boundary check for all versions */
++ if (offset > msglen - count * sizeof(u32)) {
++ netdev_err(ndev, "Received send-table offset too big:%u\n",
++ offset);
++ return;
++ }
++
++ tab = (void *)nvmsg + offset;
+
+ for (i = 0; i < count; i++)
+ net_device_ctx->tx_table[i] = tab[i];
+@@ -1213,12 +1232,14 @@ static void netvsc_send_vf(struct net_device *ndev,
+ net_device_ctx->vf_alloc ? "added" : "removed");
+ }
+
+-static void netvsc_receive_inband(struct net_device *ndev,
+- const struct nvsp_message *nvmsg)
++static void netvsc_receive_inband(struct net_device *ndev,
++ struct netvsc_device *nvscdev,
++ const struct nvsp_message *nvmsg,
++ u32 msglen)
+ {
+ switch (nvmsg->hdr.msg_type) {
+ case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE:
+- netvsc_send_table(ndev, nvmsg);
++ netvsc_send_table(ndev, nvscdev, nvmsg, msglen);
+ break;
+
+ case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION:
+@@ -1235,6 +1256,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
+ int budget)
+ {
+ const struct nvsp_message *nvmsg = hv_pkt_data(desc);
++ u32 msglen = hv_pkt_datalen(desc);
+
+ trace_nvsp_recv(ndev, channel, nvmsg);
+
+@@ -1250,7 +1272,7 @@ static int netvsc_process_raw_pkt(struct hv_device *device,
+ break;
+
+ case VM_PKT_DATA_INBAND:
+- netvsc_receive_inband(ndev, nvmsg);
++ netvsc_receive_inband(ndev, net_device, nvmsg, msglen);
+ break;
+
+ default:
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 1f9f7fcdb0eb..7ab576d8b622 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -295,9 +295,9 @@ static inline u32 netvsc_get_hash(
+ else if (flow.basic.n_proto == htons(ETH_P_IPV6))
+ hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
+ else
+- hash = 0;
++ return 0;
+
+- skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
++ __skb_set_sw_hash(skb, hash, false);
+ }
+
+ return hash;
+@@ -804,8 +804,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
+ skb->protocol == htons(ETH_P_IP))
+ netvsc_comp_ipcsum(skb);
+
+- /* Do L4 checksum offload if enabled and present.
+- */
++ /* Do L4 checksum offload if enabled and present. */
+ if (csum_info && (net->features & NETIF_F_RXCSUM)) {
+ if (csum_info->receive.tcp_checksum_succeeded ||
+ csum_info->receive.udp_checksum_succeeded)
+@@ -2004,6 +2003,12 @@ static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
+ struct netvsc_vf_pcpu_stats *pcpu_stats
+ = this_cpu_ptr(ndev_ctx->vf_stats);
+
++ skb = skb_share_check(skb, GFP_ATOMIC);
++ if (unlikely(!skb))
++ return RX_HANDLER_CONSUMED;
++
++ *pskb = skb;
++
+ skb->dev = ndev;
+
+ u64_stats_update_begin(&pcpu_stats->syncp);
+diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
+index 67b260877f30..59820164502e 100644
+--- a/drivers/net/phy/fixed_phy.c
++++ b/drivers/net/phy/fixed_phy.c
+@@ -67,11 +67,11 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num)
+ do {
+ s = read_seqcount_begin(&fp->seqcount);
+ /* Issue callback if user registered it. */
+- if (fp->link_update) {
++ if (fp->link_update)
+ fp->link_update(fp->phydev->attached_dev,
+ &fp->status);
+- fixed_phy_update(fp);
+- }
++ /* Check the GPIO for change in status */
++ fixed_phy_update(fp);
+ state = fp->status;
+ } while (read_seqcount_retry(&fp->seqcount, s));
+
+diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
+index c5588d4508f9..5c89a310359d 100644
+--- a/drivers/net/phy/mdio_bus.c
++++ b/drivers/net/phy/mdio_bus.c
+@@ -56,11 +56,12 @@ static int mdiobus_register_gpiod(struct mdio_device *mdiodev)
+ gpiod = fwnode_get_named_gpiod(&mdiodev->dev.of_node->fwnode,
+ "reset-gpios", 0, GPIOD_OUT_LOW,
+ "PHY reset");
+- if (PTR_ERR(gpiod) == -ENOENT ||
+- PTR_ERR(gpiod) == -ENOSYS)
+- gpiod = NULL;
+- else if (IS_ERR(gpiod))
+- return PTR_ERR(gpiod);
++ if (IS_ERR(gpiod)) {
++ if (PTR_ERR(gpiod) == -ENOENT || PTR_ERR(gpiod) == -ENOSYS)
++ gpiod = NULL;
++ else
++ return PTR_ERR(gpiod);
++ }
+
+ mdiodev->reset = gpiod;
+
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 05a6ae32ff65..b4c67c3a928b 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -977,6 +977,7 @@ static struct phy_driver ksphy_driver[] = {
+ .driver_data = &ksz9021_type,
+ .probe = kszphy_probe,
+ .config_init = ksz9031_config_init,
++ .soft_reset = genphy_soft_reset,
+ .read_status = ksz9031_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 43c4f358eeb8..ae40d8137fd2 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -76,7 +76,7 @@ static LIST_HEAD(phy_fixup_list);
+ static DEFINE_MUTEX(phy_fixup_lock);
+
+ #ifdef CONFIG_PM
+-static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
++static bool mdio_bus_phy_may_suspend(struct phy_device *phydev, bool suspend)
+ {
+ struct device_driver *drv = phydev->mdio.dev.driver;
+ struct phy_driver *phydrv = to_phy_driver(drv);
+@@ -88,10 +88,11 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
+ /* PHY not attached? May suspend if the PHY has not already been
+ * suspended as part of a prior call to phy_disconnect() ->
+ * phy_detach() -> phy_suspend() because the parent netdev might be the
+- * MDIO bus driver and clock gated at this point.
++ * MDIO bus driver and clock gated at this point. Also may resume if
++ * PHY is not attached.
+ */
+ if (!netdev)
+- return !phydev->suspended;
++ return suspend ? !phydev->suspended : phydev->suspended;
+
+ if (netdev->wol_enabled)
+ return false;
+@@ -126,7 +127,7 @@ static int mdio_bus_phy_suspend(struct device *dev)
+ if (phydev->attached_dev && phydev->adjust_link)
+ phy_stop_machine(phydev);
+
+- if (!mdio_bus_phy_may_suspend(phydev))
++ if (!mdio_bus_phy_may_suspend(phydev, true))
+ return 0;
+
+ return phy_suspend(phydev);
+@@ -137,7 +138,7 @@ static int mdio_bus_phy_resume(struct device *dev)
+ struct phy_device *phydev = to_phy_device(dev);
+ int ret;
+
+- if (!mdio_bus_phy_may_suspend(phydev))
++ if (!mdio_bus_phy_may_suspend(phydev, false))
+ goto no_resume;
+
+ ret = phy_resume(phydev);
+@@ -1656,7 +1657,7 @@ int genphy_soft_reset(struct phy_device *phydev)
+ {
+ int ret;
+
+- ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
++ ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
+ if (ret < 0)
+ return ret;
+
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 613f36681853..df88981e796a 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -3496,7 +3496,6 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct vxlan_rdst *dst = &vxlan->default_dst;
+ struct vxlan_rdst old_dst;
+ struct vxlan_config conf;
+- struct vxlan_fdb *f = NULL;
+ int err;
+
+ err = vxlan_nl2conf(tb, data,
+@@ -3522,19 +3521,19 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
+ old_dst.remote_ifindex, 0);
+
+ if (!vxlan_addr_any(&dst->remote_ip)) {
+- err = vxlan_fdb_create(vxlan, all_zeros_mac,
++ err = vxlan_fdb_update(vxlan, all_zeros_mac,
+ &dst->remote_ip,
+ NUD_REACHABLE | NUD_PERMANENT,
++ NLM_F_APPEND | NLM_F_CREATE,
+ vxlan->cfg.dst_port,
+ dst->remote_vni,
+ dst->remote_vni,
+ dst->remote_ifindex,
+- NTF_SELF, &f);
++ NTF_SELF);
+ if (err) {
+ spin_unlock_bh(&vxlan->hash_lock);
+ return err;
+ }
+- vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
+ }
+ spin_unlock_bh(&vxlan->hash_lock);
+ }
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index 448e3a8c33a6..a09d7a07e90a 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -1,7 +1,7 @@
+ /*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+@@ -3853,7 +3853,7 @@ void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
+ ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
+ ret);
+ dma_unmap_single(ar->dev, paddr, skb->len,
+- DMA_FROM_DEVICE);
++ DMA_TO_DEVICE);
+ ieee80211_free_txskb(ar->hw, skb);
+ }
+ } else {
+diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
+index 686759b5613f..0ecaba824fb2 100644
+--- a/drivers/net/wireless/ath/ath10k/sdio.c
++++ b/drivers/net/wireless/ath/ath10k/sdio.c
+@@ -392,16 +392,11 @@ static int ath10k_sdio_mbox_rx_process_packet(struct ath10k *ar,
+ struct ath10k_htc_hdr *htc_hdr = (struct ath10k_htc_hdr *)skb->data;
+ bool trailer_present = htc_hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
+ enum ath10k_htc_ep_id eid;
+- u16 payload_len;
+ u8 *trailer;
+ int ret;
+
+- payload_len = le16_to_cpu(htc_hdr->len);
+- skb->len = payload_len + sizeof(struct ath10k_htc_hdr);
+-
+ if (trailer_present) {
+- trailer = skb->data + sizeof(*htc_hdr) +
+- payload_len - htc_hdr->trailer_len;
++ trailer = skb->data + skb->len - htc_hdr->trailer_len;
+
+ eid = pipe_id_to_eid(htc_hdr->eid);
+
+@@ -638,13 +633,31 @@ static int ath10k_sdio_mbox_rx_packet(struct ath10k *ar,
+ {
+ struct ath10k_sdio *ar_sdio = ath10k_sdio_priv(ar);
+ struct sk_buff *skb = pkt->skb;
++ struct ath10k_htc_hdr *htc_hdr;
+ int ret;
+
+ ret = ath10k_sdio_readsb(ar, ar_sdio->mbox_info.htc_addr,
+ skb->data, pkt->alloc_len);
++ if (ret)
++ goto out;
++
++ /* Update actual length. The original length may be incorrect,
++ * as the FW will bundle multiple packets as long as their sizes
++ * fit within the same aligned length (pkt->alloc_len).
++ */
++ htc_hdr = (struct ath10k_htc_hdr *)skb->data;
++ pkt->act_len = le16_to_cpu(htc_hdr->len) + sizeof(*htc_hdr);
++ if (pkt->act_len > pkt->alloc_len) {
++ ath10k_warn(ar, "rx packet too large (%zu > %zu)\n",
++ pkt->act_len, pkt->alloc_len);
++ ret = -EMSGSIZE;
++ goto out;
++ }
++
++ skb_put(skb, pkt->act_len);
++
++out:
+ pkt->status = ret;
+- if (!ret)
+- skb_put(skb, pkt->act_len);
+
+ return ret;
+ }
+diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+index cdc1e64d52ad..248decb494c2 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+@@ -2693,7 +2693,7 @@ ath10k_wmi_tlv_op_gen_mgmt_tx_send(struct ath10k *ar, struct sk_buff *msdu,
+ ieee80211_is_deauth(hdr->frame_control) ||
+ ieee80211_is_disassoc(hdr->frame_control)) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+- len += IEEE80211_CCMP_MIC_LEN;
++ skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
+ buf_len += IEEE80211_CCMP_MIC_LEN;
+ }
+
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
+index aefc92d2c09b..0f6ff7a78e49 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.c
++++ b/drivers/net/wireless/ath/ath10k/wmi.c
+@@ -1,7 +1,7 @@
+ /*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+@@ -2340,7 +2340,7 @@ static int wmi_process_mgmt_tx_comp(struct ath10k *ar, u32 desc_id,
+
+ msdu = pkt_addr->vaddr;
+ dma_unmap_single(ar->dev, pkt_addr->paddr,
+- msdu->len, DMA_FROM_DEVICE);
++ msdu->len, DMA_TO_DEVICE);
+ info = IEEE80211_SKB_CB(msdu);
+
+ if (status)
+diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c
+index 6e236a485431..71b4888b30e7 100644
+--- a/drivers/net/wireless/ath/ath9k/dynack.c
++++ b/drivers/net/wireless/ath/ath9k/dynack.c
+@@ -300,9 +300,9 @@ void ath_dynack_node_init(struct ath_hw *ah, struct ath_node *an)
+
+ an->ackto = ackto;
+
+- spin_lock(&da->qlock);
++ spin_lock_bh(&da->qlock);
+ list_add_tail(&an->list, &da->nodes);
+- spin_unlock(&da->qlock);
++ spin_unlock_bh(&da->qlock);
+ }
+ EXPORT_SYMBOL(ath_dynack_node_init);
+
+@@ -316,9 +316,9 @@ void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an)
+ {
+ struct ath_dynack *da = &ah->dynack;
+
+- spin_lock(&da->qlock);
++ spin_lock_bh(&da->qlock);
+ list_del(&an->list);
+- spin_unlock(&da->qlock);
++ spin_unlock_bh(&da->qlock);
+ }
+ EXPORT_SYMBOL(ath_dynack_node_deinit);
+
+diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
+index 00098f24116d..6cd113b3b3e4 100644
+--- a/drivers/net/wireless/ath/wcn36xx/smd.c
++++ b/drivers/net/wireless/ath/wcn36xx/smd.c
+@@ -641,52 +641,58 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+ {
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+- struct wcn36xx_hal_start_scan_offload_req_msg msg_body;
++ struct wcn36xx_hal_start_scan_offload_req_msg *msg_body;
+ int ret, i;
+
+ if (req->ie_len > WCN36XX_MAX_SCAN_IE_LEN)
+ return -EINVAL;
+
+ mutex_lock(&wcn->hal_mutex);
+- INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_OFFLOAD_REQ);
++ msg_body = kzalloc(sizeof(*msg_body), GFP_KERNEL);
++ if (!msg_body) {
++ ret = -ENOMEM;
++ goto out;
++ }
+
+- msg_body.scan_type = WCN36XX_HAL_SCAN_TYPE_ACTIVE;
+- msg_body.min_ch_time = 30;
+- msg_body.max_ch_time = 100;
+- msg_body.scan_hidden = 1;
+- memcpy(msg_body.mac, vif->addr, ETH_ALEN);
+- msg_body.bss_type = vif_priv->bss_type;
+- msg_body.p2p_search = vif->p2p;
++ INIT_HAL_MSG((*msg_body), WCN36XX_HAL_START_SCAN_OFFLOAD_REQ);
+
+- msg_body.num_ssid = min_t(u8, req->n_ssids, ARRAY_SIZE(msg_body.ssids));
+- for (i = 0; i < msg_body.num_ssid; i++) {
+- msg_body.ssids[i].length = min_t(u8, req->ssids[i].ssid_len,
+- sizeof(msg_body.ssids[i].ssid));
+- memcpy(msg_body.ssids[i].ssid, req->ssids[i].ssid,
+- msg_body.ssids[i].length);
++ msg_body->scan_type = WCN36XX_HAL_SCAN_TYPE_ACTIVE;
++ msg_body->min_ch_time = 30;
++ msg_body->max_ch_time = 100;
++ msg_body->scan_hidden = 1;
++ memcpy(msg_body->mac, vif->addr, ETH_ALEN);
++ msg_body->bss_type = vif_priv->bss_type;
++ msg_body->p2p_search = vif->p2p;
++
++ msg_body->num_ssid = min_t(u8, req->n_ssids, ARRAY_SIZE(msg_body->ssids));
++ for (i = 0; i < msg_body->num_ssid; i++) {
++ msg_body->ssids[i].length = min_t(u8, req->ssids[i].ssid_len,
++ sizeof(msg_body->ssids[i].ssid));
++ memcpy(msg_body->ssids[i].ssid, req->ssids[i].ssid,
++ msg_body->ssids[i].length);
+ }
+
+- msg_body.num_channel = min_t(u8, req->n_channels,
+- sizeof(msg_body.channels));
+- for (i = 0; i < msg_body.num_channel; i++)
+- msg_body.channels[i] = req->channels[i]->hw_value;
++ msg_body->num_channel = min_t(u8, req->n_channels,
++ sizeof(msg_body->channels));
++ for (i = 0; i < msg_body->num_channel; i++)
++ msg_body->channels[i] = req->channels[i]->hw_value;
+
+- msg_body.header.len -= WCN36XX_MAX_SCAN_IE_LEN;
++ msg_body->header.len -= WCN36XX_MAX_SCAN_IE_LEN;
+
+ if (req->ie_len > 0) {
+- msg_body.ie_len = req->ie_len;
+- msg_body.header.len += req->ie_len;
+- memcpy(msg_body.ie, req->ie, req->ie_len);
++ msg_body->ie_len = req->ie_len;
++ msg_body->header.len += req->ie_len;
++ memcpy(msg_body->ie, req->ie, req->ie_len);
+ }
+
+- PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
++ PREPARE_HAL_BUF(wcn->hal_buf, (*msg_body));
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal start hw-scan (channels: %u; ssids: %u; p2p: %s)\n",
+- msg_body.num_channel, msg_body.num_ssid,
+- msg_body.p2p_search ? "yes" : "no");
++ msg_body->num_channel, msg_body->num_ssid,
++ msg_body->p2p_search ? "yes" : "no");
+
+- ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
++ ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_start_scan_offload failed\n");
+ goto out;
+@@ -698,6 +704,7 @@ int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ goto out;
+ }
+ out:
++ kfree(msg_body);
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+ }
+@@ -1257,96 +1264,104 @@ out:
+ static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
+ const struct wcn36xx_hal_config_bss_req_msg *orig)
+ {
+- struct wcn36xx_hal_config_bss_req_msg_v1 msg_body;
+- struct wcn36xx_hal_config_bss_params_v1 *bss = &msg_body.bss_params;
+- struct wcn36xx_hal_config_sta_params_v1 *sta = &bss->sta;
++ struct wcn36xx_hal_config_bss_req_msg_v1 *msg_body;
++ struct wcn36xx_hal_config_bss_params_v1 *bss;
++ struct wcn36xx_hal_config_sta_params_v1 *sta;
++ int ret;
++
++ msg_body = kzalloc(sizeof(*msg_body), GFP_KERNEL);
++ if (!msg_body)
++ return -ENOMEM;
++
++ INIT_HAL_MSG((*msg_body), WCN36XX_HAL_CONFIG_BSS_REQ);
+
+- INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_BSS_REQ);
++ bss = &msg_body->bss_params;
++ sta = &bss->sta;
+
+ /* convert orig to v1 */
+- memcpy(&msg_body.bss_params.bssid,
++ memcpy(&msg_body->bss_params.bssid,
+ &orig->bss_params.bssid, ETH_ALEN);
+- memcpy(&msg_body.bss_params.self_mac_addr,
++ memcpy(&msg_body->bss_params.self_mac_addr,
+ &orig->bss_params.self_mac_addr, ETH_ALEN);
+
+- msg_body.bss_params.bss_type = orig->bss_params.bss_type;
+- msg_body.bss_params.oper_mode = orig->bss_params.oper_mode;
+- msg_body.bss_params.nw_type = orig->bss_params.nw_type;
++ msg_body->bss_params.bss_type = orig->bss_params.bss_type;
++ msg_body->bss_params.oper_mode = orig->bss_params.oper_mode;
++ msg_body->bss_params.nw_type = orig->bss_params.nw_type;
+
+- msg_body.bss_params.short_slot_time_supported =
++ msg_body->bss_params.short_slot_time_supported =
+ orig->bss_params.short_slot_time_supported;
+- msg_body.bss_params.lla_coexist = orig->bss_params.lla_coexist;
+- msg_body.bss_params.llb_coexist = orig->bss_params.llb_coexist;
+- msg_body.bss_params.llg_coexist = orig->bss_params.llg_coexist;
+- msg_body.bss_params.ht20_coexist = orig->bss_params.ht20_coexist;
+- msg_body.bss_params.lln_non_gf_coexist =
++ msg_body->bss_params.lla_coexist = orig->bss_params.lla_coexist;
++ msg_body->bss_params.llb_coexist = orig->bss_params.llb_coexist;
++ msg_body->bss_params.llg_coexist = orig->bss_params.llg_coexist;
++ msg_body->bss_params.ht20_coexist = orig->bss_params.ht20_coexist;
++ msg_body->bss_params.lln_non_gf_coexist =
+ orig->bss_params.lln_non_gf_coexist;
+
+- msg_body.bss_params.lsig_tx_op_protection_full_support =
++ msg_body->bss_params.lsig_tx_op_protection_full_support =
+ orig->bss_params.lsig_tx_op_protection_full_support;
+- msg_body.bss_params.rifs_mode = orig->bss_params.rifs_mode;
+- msg_body.bss_params.beacon_interval = orig->bss_params.beacon_interval;
+- msg_body.bss_params.dtim_period = orig->bss_params.dtim_period;
+- msg_body.bss_params.tx_channel_width_set =
++ msg_body->bss_params.rifs_mode = orig->bss_params.rifs_mode;
++ msg_body->bss_params.beacon_interval = orig->bss_params.beacon_interval;
++ msg_body->bss_params.dtim_period = orig->bss_params.dtim_period;
++ msg_body->bss_params.tx_channel_width_set =
+ orig->bss_params.tx_channel_width_set;
+- msg_body.bss_params.oper_channel = orig->bss_params.oper_channel;
+- msg_body.bss_params.ext_channel = orig->bss_params.ext_channel;
++ msg_body->bss_params.oper_channel = orig->bss_params.oper_channel;
++ msg_body->bss_params.ext_channel = orig->bss_params.ext_channel;
+
+- msg_body.bss_params.reserved = orig->bss_params.reserved;
++ msg_body->bss_params.reserved = orig->bss_params.reserved;
+
+- memcpy(&msg_body.bss_params.ssid,
++ memcpy(&msg_body->bss_params.ssid,
+ &orig->bss_params.ssid,
+ sizeof(orig->bss_params.ssid));
+
+- msg_body.bss_params.action = orig->bss_params.action;
+- msg_body.bss_params.rateset = orig->bss_params.rateset;
+- msg_body.bss_params.ht = orig->bss_params.ht;
+- msg_body.bss_params.obss_prot_enabled =
++ msg_body->bss_params.action = orig->bss_params.action;
++ msg_body->bss_params.rateset = orig->bss_params.rateset;
++ msg_body->bss_params.ht = orig->bss_params.ht;
++ msg_body->bss_params.obss_prot_enabled =
+ orig->bss_params.obss_prot_enabled;
+- msg_body.bss_params.rmf = orig->bss_params.rmf;
+- msg_body.bss_params.ht_oper_mode = orig->bss_params.ht_oper_mode;
+- msg_body.bss_params.dual_cts_protection =
++ msg_body->bss_params.rmf = orig->bss_params.rmf;
++ msg_body->bss_params.ht_oper_mode = orig->bss_params.ht_oper_mode;
++ msg_body->bss_params.dual_cts_protection =
+ orig->bss_params.dual_cts_protection;
+
+- msg_body.bss_params.max_probe_resp_retry_limit =
++ msg_body->bss_params.max_probe_resp_retry_limit =
+ orig->bss_params.max_probe_resp_retry_limit;
+- msg_body.bss_params.hidden_ssid = orig->bss_params.hidden_ssid;
+- msg_body.bss_params.proxy_probe_resp =
++ msg_body->bss_params.hidden_ssid = orig->bss_params.hidden_ssid;
++ msg_body->bss_params.proxy_probe_resp =
+ orig->bss_params.proxy_probe_resp;
+- msg_body.bss_params.edca_params_valid =
++ msg_body->bss_params.edca_params_valid =
+ orig->bss_params.edca_params_valid;
+
+- memcpy(&msg_body.bss_params.acbe,
++ memcpy(&msg_body->bss_params.acbe,
+ &orig->bss_params.acbe,
+ sizeof(orig->bss_params.acbe));
+- memcpy(&msg_body.bss_params.acbk,
++ memcpy(&msg_body->bss_params.acbk,
+ &orig->bss_params.acbk,
+ sizeof(orig->bss_params.acbk));
+- memcpy(&msg_body.bss_params.acvi,
++ memcpy(&msg_body->bss_params.acvi,
+ &orig->bss_params.acvi,
+ sizeof(orig->bss_params.acvi));
+- memcpy(&msg_body.bss_params.acvo,
++ memcpy(&msg_body->bss_params.acvo,
+ &orig->bss_params.acvo,
+ sizeof(orig->bss_params.acvo));
+
+- msg_body.bss_params.ext_set_sta_key_param_valid =
++ msg_body->bss_params.ext_set_sta_key_param_valid =
+ orig->bss_params.ext_set_sta_key_param_valid;
+
+- memcpy(&msg_body.bss_params.ext_set_sta_key_param,
++ memcpy(&msg_body->bss_params.ext_set_sta_key_param,
+ &orig->bss_params.ext_set_sta_key_param,
+ sizeof(orig->bss_params.acvo));
+
+- msg_body.bss_params.wcn36xx_hal_persona =
++ msg_body->bss_params.wcn36xx_hal_persona =
+ orig->bss_params.wcn36xx_hal_persona;
+- msg_body.bss_params.spectrum_mgt_enable =
++ msg_body->bss_params.spectrum_mgt_enable =
+ orig->bss_params.spectrum_mgt_enable;
+- msg_body.bss_params.tx_mgmt_power = orig->bss_params.tx_mgmt_power;
+- msg_body.bss_params.max_tx_power = orig->bss_params.max_tx_power;
++ msg_body->bss_params.tx_mgmt_power = orig->bss_params.tx_mgmt_power;
++ msg_body->bss_params.max_tx_power = orig->bss_params.max_tx_power;
+
+ wcn36xx_smd_convert_sta_to_v1(wcn, &orig->bss_params.sta,
+- &msg_body.bss_params.sta);
++ &msg_body->bss_params.sta);
+
+- PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
++ PREPARE_HAL_BUF(wcn->hal_buf, (*msg_body));
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
+@@ -1358,7 +1373,10 @@ static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
+ sta->bssid, sta->action, sta->sta_index,
+ sta->bssid_index, sta->aid, sta->type, sta->mac);
+
+- return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
++ ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len);
++ kfree(msg_body);
++
++ return ret;
+ }
+
+
+@@ -1410,16 +1428,21 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, const u8 *bssid,
+ bool update)
+ {
+- struct wcn36xx_hal_config_bss_req_msg msg;
++ struct wcn36xx_hal_config_bss_req_msg *msg;
+ struct wcn36xx_hal_config_bss_params *bss;
+ struct wcn36xx_hal_config_sta_params *sta_params;
+ struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+- INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_BSS_REQ);
++ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
++ if (!msg) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ INIT_HAL_MSG((*msg), WCN36XX_HAL_CONFIG_BSS_REQ);
+
+- bss = &msg.bss_params;
++ bss = &msg->bss_params;
+ sta_params = &bss->sta;
+
+ WARN_ON(is_zero_ether_addr(bssid));
+@@ -1514,11 +1537,11 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ sta_params->mac);
+
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+- ret = wcn36xx_smd_config_bss_v1(wcn, &msg);
++ ret = wcn36xx_smd_config_bss_v1(wcn, msg);
+ } else {
+- PREPARE_HAL_BUF(wcn->hal_buf, msg);
++ PREPARE_HAL_BUF(wcn->hal_buf, (*msg));
+
+- ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
++ ret = wcn36xx_smd_send_and_wait(wcn, msg->header.len);
+ }
+ if (ret) {
+ wcn36xx_err("Sending hal_config_bss failed\n");
+@@ -1534,6 +1557,7 @@ int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
+ goto out;
+ }
+ out:
++ kfree(msg);
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+ }
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+index d2f788d88668..710dc59c5d34 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+@@ -617,15 +617,13 @@ int brcmf_sdiod_send_buf(struct brcmf_sdio_dev *sdiodev, u8 *buf, uint nbytes)
+
+ err = brcmf_sdiod_set_backplane_window(sdiodev, addr);
+ if (err)
+- return err;
++ goto out;
+
+ addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+- if (!err)
+- err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr,
+- mypkt);
+-
++ err = brcmf_sdiod_skbuff_write(sdiodev, sdiodev->func2, addr, mypkt);
++out:
+ brcmu_pkt_buf_free_skb(mypkt);
+
+ return err;
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+index c4965184cdf3..3d441c5c745c 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bus.h
+@@ -90,6 +90,7 @@ struct brcmf_bus_ops {
+ int (*get_memdump)(struct device *dev, void *data, size_t len);
+ int (*get_fwname)(struct device *dev, const char *ext,
+ unsigned char *fw_name);
++ void (*debugfs_create)(struct device *dev);
+ };
+
+
+@@ -235,6 +236,15 @@ int brcmf_bus_get_fwname(struct brcmf_bus *bus, const char *ext,
+ return bus->ops->get_fwname(bus->dev, ext, fw_name);
+ }
+
++static inline
++void brcmf_bus_debugfs_create(struct brcmf_bus *bus)
++{
++ if (!bus->ops->debugfs_create)
++ return;
++
++ return bus->ops->debugfs_create(bus->dev);
++}
++
+ /*
+ * interface functions from common layer
+ */
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+index 584e05fdca6a..9d7b8834b854 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+@@ -1105,6 +1105,7 @@ static int brcmf_bus_started(struct brcmf_pub *drvr, struct cfg80211_ops *ops)
+ brcmf_debugfs_add_entry(drvr, "revinfo", brcmf_revinfo_read);
+ brcmf_feat_debugfs_create(drvr);
+ brcmf_proto_debugfs_create(drvr);
++ brcmf_bus_debugfs_create(bus_if);
+
+ return 0;
+
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+index abaed2fa2def..5c3b62e61980 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -3131,9 +3131,12 @@ static int brcmf_debugfs_sdio_count_read(struct seq_file *seq, void *data)
+ return 0;
+ }
+
+-static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
++static void brcmf_sdio_debugfs_create(struct device *dev)
+ {
+- struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr;
++ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
++ struct brcmf_pub *drvr = bus_if->drvr;
++ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
++ struct brcmf_sdio *bus = sdiodev->bus;
+ struct dentry *dentry = brcmf_debugfs_get_devdir(drvr);
+
+ if (IS_ERR_OR_NULL(dentry))
+@@ -3153,7 +3156,7 @@ static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
+ return 0;
+ }
+
+-static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
++static void brcmf_sdio_debugfs_create(struct device *dev)
+ {
+ }
+ #endif /* DEBUG */
+@@ -3438,8 +3441,6 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
+ if (bus->rxbuf)
+ bus->rxblen = value;
+
+- brcmf_sdio_debugfs_create(bus);
+-
+ /* the commands below use the terms tx and rx from
+ * a device perspective, ie. bus:txglom affects the
+ * bus transfers from device to host.
+@@ -4050,6 +4051,7 @@ static const struct brcmf_bus_ops brcmf_sdio_bus_ops = {
+ .get_ramsize = brcmf_sdio_bus_get_ramsize,
+ .get_memdump = brcmf_sdio_bus_get_memdump,
+ .get_fwname = brcmf_sdio_get_fwname,
++ .debugfs_create = brcmf_sdio_debugfs_create
+ };
+
+ #define BRCMF_SDIO_FW_CODE 0
+diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+index 6c5338364794..d22c1eefba6a 100644
+--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
++++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+@@ -165,7 +165,7 @@ struct iwl_nvm_access_resp {
+ */
+ struct iwl_nvm_get_info {
+ __le32 reserved;
+-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_S_VER_1 */
++} __packed; /* REGULATORY_NVM_GET_INFO_CMD_API_S_VER_1 */
+
+ /**
+ * enum iwl_nvm_info_general_flags - flags in NVM_GET_INFO resp
+@@ -180,14 +180,14 @@ enum iwl_nvm_info_general_flags {
+ * @flags: bit 0: 1 - empty, 0 - non-empty
+ * @nvm_version: nvm version
+ * @board_type: board type
+- * @reserved: reserved
++ * @n_hw_addrs: number of reserved MAC addresses
+ */
+ struct iwl_nvm_get_info_general {
+ __le32 flags;
+ __le16 nvm_version;
+ u8 board_type;
+- u8 reserved;
+-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_GENERAL_S_VER_1 */
++ u8 n_hw_addrs;
++} __packed; /* REGULATORY_NVM_GET_INFO_GENERAL_S_VER_2 */
+
+ /**
+ * enum iwl_nvm_mac_sku_flags - flags in &iwl_nvm_get_info_sku
+@@ -231,7 +231,7 @@ struct iwl_nvm_get_info_sku {
+ struct iwl_nvm_get_info_phy {
+ __le32 tx_chains;
+ __le32 rx_chains;
+-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
++} __packed; /* REGULATORY_NVM_GET_INFO_PHY_SKU_SECTION_S_VER_1 */
+
+ #define IWL_NUM_CHANNELS (51)
+
+@@ -245,7 +245,7 @@ struct iwl_nvm_get_info_regulatory {
+ __le32 lar_enabled;
+ __le16 channel_profile[IWL_NUM_CHANNELS];
+ __le16 reserved;
+-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
++} __packed; /* REGULATORY_NVM_GET_INFO_REGULATORY_S_VER_1 */
+
+ /**
+ * struct iwl_nvm_get_info_rsp - response to get NVM data
+@@ -259,7 +259,7 @@ struct iwl_nvm_get_info_rsp {
+ struct iwl_nvm_get_info_sku mac_sku;
+ struct iwl_nvm_get_info_phy phy_sku;
+ struct iwl_nvm_get_info_regulatory regulatory;
+-} __packed; /* GRP_REGULATORY_NVM_GET_INFO_CMD_RSP_S_VER_2 */
++} __packed; /* REGULATORY_NVM_GET_INFO_RSP_API_S_VER_3 */
+
+ /**
+ * struct iwl_nvm_access_complete_cmd - NVM_ACCESS commands are completed
+diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+index 73969dbeb5c5..b850cca9853c 100644
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+@@ -1315,6 +1315,7 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
+ bool lar_fw_supported = !iwlwifi_mod_params.lar_disable &&
+ fw_has_capa(&fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_LAR_SUPPORT);
++ bool empty_otp;
+ u32 mac_flags;
+ u32 sbands_flags = 0;
+
+@@ -1330,7 +1331,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
+ }
+
+ rsp = (void *)hcmd.resp_pkt->data;
+- if (le32_to_cpu(rsp->general.flags) & NVM_GENERAL_FLAGS_EMPTY_OTP)
++ empty_otp = !!(le32_to_cpu(rsp->general.flags) &
++ NVM_GENERAL_FLAGS_EMPTY_OTP);
++ if (empty_otp)
+ IWL_INFO(trans, "OTP is empty\n");
+
+ nvm = kzalloc(sizeof(*nvm) +
+@@ -1354,6 +1357,11 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
+
+ /* Initialize general data */
+ nvm->nvm_version = le16_to_cpu(rsp->general.nvm_version);
++ nvm->n_hw_addrs = rsp->general.n_hw_addrs;
++ if (nvm->n_hw_addrs == 0)
++ IWL_WARN(trans,
++ "Firmware declares no reserved mac addresses. OTP is empty: %d\n",
++ empty_otp);
+
+ /* Initialize MAC sku data */
+ mac_flags = le32_to_cpu(rsp->mac_sku.mac_sku_flags);
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+index 9808d954dca2..c7e2b88cd5ab 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+@@ -110,12 +110,12 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
+ int i;
+ struct iwl_rss_config_cmd cmd = {
+ .flags = cpu_to_le32(IWL_RSS_ENABLE),
+- .hash_mask = IWL_RSS_HASH_TYPE_IPV4_TCP |
+- IWL_RSS_HASH_TYPE_IPV4_UDP |
+- IWL_RSS_HASH_TYPE_IPV4_PAYLOAD |
+- IWL_RSS_HASH_TYPE_IPV6_TCP |
+- IWL_RSS_HASH_TYPE_IPV6_UDP |
+- IWL_RSS_HASH_TYPE_IPV6_PAYLOAD,
++ .hash_mask = BIT(IWL_RSS_HASH_TYPE_IPV4_TCP) |
++ BIT(IWL_RSS_HASH_TYPE_IPV4_UDP) |
++ BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD) |
++ BIT(IWL_RSS_HASH_TYPE_IPV6_TCP) |
++ BIT(IWL_RSS_HASH_TYPE_IPV6_UDP) |
++ BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD),
+ };
+
+ if (mvm->trans->num_rx_queues == 1)
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+index 036d1d82d93e..77e369453642 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+@@ -1077,7 +1077,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
+ he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
+
+ rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
+- rx_status->ampdu_reference = mvm->ampdu_ref;
+ /* toggle is switched whenever new aggregation starts */
+ if (toggle_bit != mvm->ampdu_toggle) {
+ mvm->ampdu_ref++;
+@@ -1092,6 +1091,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
+ RX_FLAG_AMPDU_EOF_BIT;
+ }
+ }
++ rx_status->ampdu_reference = mvm->ampdu_ref;
+ }
+
+ rcu_read_lock();
+diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+index e850aa504b60..69057701641e 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+@@ -2462,7 +2462,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_tid_data *tid_data;
+ u16 normalized_ssn;
+- int txq_id;
++ u16 txq_id;
+ int ret;
+
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+@@ -2506,17 +2506,24 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ */
+ txq_id = mvmsta->tid_data[tid].txq_id;
+ if (txq_id == IWL_MVM_INVALID_QUEUE) {
+- txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
+- IWL_MVM_DQA_MIN_DATA_QUEUE,
+- IWL_MVM_DQA_MAX_DATA_QUEUE);
+- if (txq_id < 0) {
+- ret = txq_id;
++ ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
++ IWL_MVM_DQA_MIN_DATA_QUEUE,
++ IWL_MVM_DQA_MAX_DATA_QUEUE);
++ if (ret < 0) {
+ IWL_ERR(mvm, "Failed to allocate agg queue\n");
+ goto release_locks;
+ }
+
++ txq_id = ret;
++
+ /* TXQ hasn't yet been enabled, so mark it only as reserved */
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
++ } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
++ ret = -ENXIO;
++ IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
++ tid, IWL_MAX_HW_QUEUES - 1);
++ goto out;
++
+ } else if (unlikely(mvm->queue_info[txq_id].status ==
+ IWL_MVM_QUEUE_SHARED)) {
+ ret = -ENXIO;
+diff --git a/drivers/net/wireless/marvell/libertas_tf/cmd.c b/drivers/net/wireless/marvell/libertas_tf/cmd.c
+index 909ac3685010..2b193f1257a5 100644
+--- a/drivers/net/wireless/marvell/libertas_tf/cmd.c
++++ b/drivers/net/wireless/marvell/libertas_tf/cmd.c
+@@ -69,7 +69,7 @@ static void lbtf_geo_init(struct lbtf_private *priv)
+ break;
+ }
+
+- for (ch = priv->range.start; ch < priv->range.end; ch++)
++ for (ch = range->start; ch < range->end; ch++)
+ priv->channels[CHAN_TO_IDX(ch)].flags = 0;
+ }
+
+diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
+index 8d40e92fb6f2..dcf927de65f3 100644
+--- a/drivers/net/wireless/mediatek/mt76/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/usb.c
+@@ -273,10 +273,16 @@ EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
+ void mt76u_buf_free(struct mt76u_buf *buf)
+ {
+ struct urb *urb = buf->urb;
++ struct scatterlist *sg;
+ int i;
+
+- for (i = 0; i < urb->num_sgs; i++)
+- skb_free_frag(sg_virt(&urb->sg[i]));
++ for (i = 0; i < urb->num_sgs; i++) {
++ sg = &urb->sg[i];
++ if (!sg)
++ continue;
++
++ skb_free_frag(sg_virt(sg));
++ }
+ usb_free_urb(buf->urb);
+ }
+ EXPORT_SYMBOL_GPL(mt76u_buf_free);
+@@ -478,7 +484,8 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
+ nsgs = 1;
+ }
+
+- for (i = 0; i < MT_NUM_RX_ENTRIES; i++) {
++ q->ndesc = MT_NUM_RX_ENTRIES;
++ for (i = 0; i < q->ndesc; i++) {
+ err = mt76u_buf_alloc(dev, &q->entry[i].ubuf,
+ nsgs, q->buf_size,
+ SKB_WITH_OVERHEAD(q->buf_size),
+@@ -486,7 +493,6 @@ static int mt76u_alloc_rx(struct mt76_dev *dev)
+ if (err < 0)
+ return err;
+ }
+- q->ndesc = MT_NUM_RX_ENTRIES;
+
+ return mt76u_submit_rx_buffers(dev);
+ }
+diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
+index b804abd464ae..539f78149e43 100644
+--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
++++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
+@@ -221,7 +221,7 @@ int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev)
+
+ do {
+ val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION);
+- if (val && ~val)
++ if (val && val != 0xff)
+ break;
+ } while (--i);
+
+diff --git a/drivers/net/wireless/realtek/rtlwifi/debug.c b/drivers/net/wireless/realtek/rtlwifi/debug.c
+index d70385be9976..498994041bbc 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/debug.c
++++ b/drivers/net/wireless/realtek/rtlwifi/debug.c
+@@ -109,7 +109,7 @@ static const struct file_operations file_ops_common = {
+ .open = dl_debug_open_common,
+ .read = seq_read,
+ .llseek = seq_lseek,
+- .release = seq_release,
++ .release = single_release,
+ };
+
+ static int rtl_debug_get_mac_page(struct seq_file *m, void *v)
+diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+index 317c1b3101da..ba258318ee9f 100644
+--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
++++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
+@@ -3404,75 +3404,6 @@ static void rtl8821ae_update_hal_rate_table(struct ieee80211_hw *hw,
+ "%x\n", rtl_read_dword(rtlpriv, REG_ARFR0));
+ }
+
+-static u8 _rtl8821ae_mrate_idx_to_arfr_id(
+- struct ieee80211_hw *hw, u8 rate_index,
+- enum wireless_mode wirelessmode)
+-{
+- struct rtl_priv *rtlpriv = rtl_priv(hw);
+- struct rtl_phy *rtlphy = &rtlpriv->phy;
+- u8 ret = 0;
+- switch (rate_index) {
+- case RATR_INX_WIRELESS_NGB:
+- if (rtlphy->rf_type == RF_1T1R)
+- ret = 1;
+- else
+- ret = 0;
+- ; break;
+- case RATR_INX_WIRELESS_N:
+- case RATR_INX_WIRELESS_NG:
+- if (rtlphy->rf_type == RF_1T1R)
+- ret = 5;
+- else
+- ret = 4;
+- ; break;
+- case RATR_INX_WIRELESS_NB:
+- if (rtlphy->rf_type == RF_1T1R)
+- ret = 3;
+- else
+- ret = 2;
+- ; break;
+- case RATR_INX_WIRELESS_GB:
+- ret = 6;
+- break;
+- case RATR_INX_WIRELESS_G:
+- ret = 7;
+- break;
+- case RATR_INX_WIRELESS_B:
+- ret = 8;
+- break;
+- case RATR_INX_WIRELESS_MC:
+- if ((wirelessmode == WIRELESS_MODE_B)
+- || (wirelessmode == WIRELESS_MODE_G)
+- || (wirelessmode == WIRELESS_MODE_N_24G)
+- || (wirelessmode == WIRELESS_MODE_AC_24G))
+- ret = 6;
+- else
+- ret = 7;
+- case RATR_INX_WIRELESS_AC_5N:
+- if (rtlphy->rf_type == RF_1T1R)
+- ret = 10;
+- else
+- ret = 9;
+- break;
+- case RATR_INX_WIRELESS_AC_24N:
+- if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_80) {
+- if (rtlphy->rf_type == RF_1T1R)
+- ret = 10;
+- else
+- ret = 9;
+- } else {
+- if (rtlphy->rf_type == RF_1T1R)
+- ret = 11;
+- else
+- ret = 12;
+- }
+- break;
+- default:
+- ret = 0; break;
+- }
+- return ret;
+-}
+-
+ static u32 _rtl8821ae_rate_to_bitmap_2ssvht(__le16 vht_rate)
+ {
+ u8 i, j, tmp_rate;
+@@ -3761,7 +3692,7 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
+ break;
+ }
+
+- ratr_index = _rtl8821ae_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode);
++ ratr_index = rtl_mrate_idx_to_arfr_id(hw, ratr_index, wirelessmode);
+ sta_entry->ratr_index = ratr_index;
+ ratr_bitmap = _rtl8821ae_set_ra_vht_ratr_bitmap(hw, wirelessmode,
+ ratr_bitmap);
+diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
+index dbe72f116017..a67ef23e81bc 100644
+--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
++++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
+@@ -1105,9 +1105,9 @@ static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port,
+ }
+
+ /* Allocate memory for memory window descriptors */
+- ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt,
+- sizeof(*ret_mws), GFP_KERNEL);
+- if (IS_ERR_OR_NULL(ret_mws))
++ ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, sizeof(*ret_mws),
++ GFP_KERNEL);
++ if (!ret_mws)
+ return ERR_PTR(-ENOMEM);
+
+ /* Copy the info of detected memory windows */
+@@ -2390,7 +2390,7 @@ static struct idt_ntb_dev *idt_create_dev(struct pci_dev *pdev,
+
+ /* Allocate memory for the IDT PCIe-device descriptor */
+ ndev = devm_kzalloc(&pdev->dev, sizeof(*ndev), GFP_KERNEL);
+- if (IS_ERR_OR_NULL(ndev)) {
++ if (!ndev) {
+ dev_err(&pdev->dev, "Memory allocation failed for descriptor");
+ return ERR_PTR(-ENOMEM);
+ }
+diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+index 5ee5f40b4dfc..313f6258c424 100644
+--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
++++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+@@ -899,7 +899,7 @@ static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
+ }
+
+ sndev->peer_partition = ffs(tpart_vec) - 1;
+- if (!(part_map & (1 << sndev->peer_partition))) {
++ if (!(part_map & (1ULL << sndev->peer_partition))) {
+ dev_err(&sndev->stdev->dev,
+ "ntb target partition is not NT partition\n");
+ return -ENODEV;
+@@ -1120,7 +1120,7 @@ static int crosslink_enum_partition(struct switchtec_ntb *sndev,
+
+ dev_dbg(&sndev->stdev->dev,
+ "Crosslink BAR%d addr: %llx\n",
+- i, bar_addr);
++ i*2, bar_addr);
+
+ if (bar_addr != bar_space * i)
+ continue;
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 124f41157173..3c68a5b35ec1 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2396,7 +2396,7 @@ static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
+
+ static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
+ {
+- *val = readq(to_nvme_dev(ctrl)->bar + off);
++ *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off);
+ return 0;
+ }
+
+diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
+index 926d9cc080cf..09281aca86c2 100644
+--- a/drivers/nvmem/imx-ocotp.c
++++ b/drivers/nvmem/imx-ocotp.c
+@@ -50,7 +50,9 @@
+ #define IMX_OCOTP_BM_CTRL_ERROR 0x00000200
+ #define IMX_OCOTP_BM_CTRL_REL_SHADOWS 0x00000400
+
+-#define DEF_RELAX 20 /* > 16.5ns */
++#define TIMING_STROBE_PROG_US 10 /* Min time to blow a fuse */
++#define TIMING_STROBE_READ_NS 37 /* Min time before read */
++#define TIMING_RELAX_NS 17
+ #define DEF_FSOURCE 1001 /* > 1000 ns */
+ #define DEF_STROBE_PROG 10000 /* IPG clocks */
+ #define IMX_OCOTP_WR_UNLOCK 0x3E770000
+@@ -182,14 +184,41 @@ static void imx_ocotp_set_imx6_timing(struct ocotp_priv *priv)
+ * fields with timing values to match the current frequency of the
+ * ipg_clk. OTP writes will work at maximum bus frequencies as long
+ * as the HW_OCOTP_TIMING parameters are set correctly.
++ *
++ * Note: there are minimum timings required to ensure an OTP fuse burns
++ * correctly that are independent of the ipg_clk. Those values are not
++ * formally documented anywhere however, working from the minimum
++ * timings given in u-boot we can say:
++ *
++ * - Minimum STROBE_PROG time is 10 microseconds. Intuitively 10
++ * microseconds feels about right as representative of a minimum time
++ * to physically burn out a fuse.
++ *
++ * - Minimum STROBE_READ i.e. the time to wait post OTP fuse burn before
++ * performing another read is 37 nanoseconds
++ *
++ * - Minimum RELAX timing is 17 nanoseconds. This final RELAX minimum
++ * timing is not entirely clear the documentation says "This
++ * count value specifies the time to add to all default timing
++ * parameters other than the Tpgm and Trd. It is given in number
++ * of ipg_clk periods." where Tpgm and Trd refer to STROBE_PROG
++ * and STROBE_READ respectively. What the other timing parameters
++ * are though, is not specified. Experience shows a zero RELAX
++ * value will mess up a re-load of the shadow registers post OTP
++ * burn.
+ */
+ clk_rate = clk_get_rate(priv->clk);
+
+- relax = clk_rate / (1000000000 / DEF_RELAX) - 1;
+- strobe_prog = clk_rate / (1000000000 / 10000) + 2 * (DEF_RELAX + 1) - 1;
+- strobe_read = clk_rate / (1000000000 / 40) + 2 * (DEF_RELAX + 1) - 1;
++ relax = DIV_ROUND_UP(clk_rate * TIMING_RELAX_NS, 1000000000) - 1;
++ strobe_read = DIV_ROUND_UP(clk_rate * TIMING_STROBE_READ_NS,
++ 1000000000);
++ strobe_read += 2 * (relax + 1) - 1;
++ strobe_prog = DIV_ROUND_CLOSEST(clk_rate * TIMING_STROBE_PROG_US,
++ 1000000);
++ strobe_prog += 2 * (relax + 1) - 1;
+
+- timing = strobe_prog & 0x00000FFF;
++ timing = readl(priv->base + IMX_OCOTP_ADDR_TIMING) & 0x0FC00000;
++ timing |= strobe_prog & 0x00000FFF;
+ timing |= (relax << 12) & 0x0000F000;
+ timing |= (strobe_read << 16) & 0x003F0000;
+
+diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
+index 5ad1342f5682..7d2bc22680d9 100644
+--- a/drivers/of/of_mdio.c
++++ b/drivers/of/of_mdio.c
+@@ -370,7 +370,7 @@ struct phy_device *of_phy_get_and_connect(struct net_device *dev,
+ int ret;
+
+ iface = of_get_phy_mode(np);
+- if (iface < 0)
++ if ((int)iface < 0)
+ return NULL;
+ if (of_phy_is_fixed_link(np)) {
+ ret = of_phy_register_fixed_link(np);
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index 1e80f9ec1aa6..34515f432375 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -793,6 +793,9 @@ static struct opp_table *_allocate_opp_table(struct device *dev)
+
+ INIT_LIST_HEAD(&opp_table->dev_list);
+
++ /* Mark regulator count uninitialized */
++ opp_table->regulator_count = -1;
++
+ opp_dev = _add_opp_dev(dev, opp_table);
+ if (!opp_dev) {
+ kfree(opp_table);
+@@ -955,7 +958,7 @@ struct dev_pm_opp *_opp_allocate(struct opp_table *table)
+ int count, supply_size;
+
+ /* Allocate space for at least one supply */
+- count = table->regulator_count ? table->regulator_count : 1;
++ count = table->regulator_count > 0 ? table->regulator_count : 1;
+ supply_size = sizeof(*opp->supplies) * count;
+
+ /* allocate new OPP node and supplies structures */
+@@ -1363,7 +1366,7 @@ free_regulators:
+
+ kfree(opp_table->regulators);
+ opp_table->regulators = NULL;
+- opp_table->regulator_count = 0;
++ opp_table->regulator_count = -1;
+ err:
+ dev_pm_opp_put_opp_table(opp_table);
+
+@@ -1392,7 +1395,7 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
+
+ kfree(opp_table->regulators);
+ opp_table->regulators = NULL;
+- opp_table->regulator_count = 0;
++ opp_table->regulator_count = -1;
+
+ put_opp_table:
+ dev_pm_opp_put_opp_table(opp_table);
+@@ -1545,6 +1548,9 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
+ if (!opp_table)
+ return -ENOMEM;
+
++ /* Fix regulator count for dynamic OPPs */
++ opp_table->regulator_count = 1;
++
+ ret = _opp_add_v1(opp_table, dev, freq, u_volt, true);
+
+ dev_pm_opp_put_opp_table(opp_table);
+diff --git a/drivers/opp/of.c b/drivers/opp/of.c
+index 20988c426650..d64a13d7881b 100644
+--- a/drivers/opp/of.c
++++ b/drivers/opp/of.c
+@@ -113,12 +113,10 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
+ struct opp_table *opp_table)
+ {
+ u32 *microvolt, *microamp = NULL;
+- int supplies, vcount, icount, ret, i, j;
++ int supplies = opp_table->regulator_count, vcount, icount, ret, i, j;
+ struct property *prop = NULL;
+ char name[NAME_MAX];
+
+- supplies = opp_table->regulator_count ? opp_table->regulator_count : 1;
+-
+ /* Search for "opp-microvolt-<name>" */
+ if (opp_table->prop_name) {
+ snprintf(name, sizeof(name), "opp-microvolt-%s",
+@@ -133,7 +131,13 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
+
+ /* Missing property isn't a problem, but an invalid entry is */
+ if (!prop) {
+- if (!opp_table->regulator_count)
++ if (unlikely(supplies == -1)) {
++ /* Initialize regulator_count */
++ opp_table->regulator_count = 0;
++ return 0;
++ }
++
++ if (!supplies)
+ return 0;
+
+ dev_err(dev, "%s: opp-microvolt missing although OPP managing regulators\n",
+@@ -142,6 +146,14 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
+ }
+ }
+
++ if (unlikely(supplies == -1)) {
++ /* Initialize regulator_count */
++ supplies = opp_table->regulator_count = 1;
++ } else if (unlikely(!supplies)) {
++ dev_err(dev, "%s: opp-microvolt wasn't expected\n", __func__);
++ return -EINVAL;
++ }
++
+ vcount = of_property_count_u32_elems(opp->np, name);
+ if (vcount < 0) {
+ dev_err(dev, "%s: Invalid %s property (%d)\n",
+diff --git a/drivers/opp/opp.h b/drivers/opp/opp.h
+index 7c540fd063b2..c9e65964ed84 100644
+--- a/drivers/opp/opp.h
++++ b/drivers/opp/opp.h
+@@ -136,7 +136,9 @@ enum opp_table_access {
+ * @prop_name: A name to postfix to many DT properties, while parsing them.
+ * @clk: Device's clock handle
+ * @regulators: Supply regulators
+- * @regulator_count: Number of power supply regulators
++ * @regulator_count: Number of power supply regulators. Its value can be -1
++ * (uninitialized), 0 (no opp-microvolt property) or > 0 (has opp-microvolt
++ * property).
+ * @genpd_performance_state: Device's power domain support performance state.
+ * @set_opp: Platform specific set_opp callback
+ * @set_opp_data: Data to be passed to set_opp callback
+@@ -172,7 +174,7 @@ struct opp_table {
+ const char *prop_name;
+ struct clk *clk;
+ struct regulator **regulators;
+- unsigned int regulator_count;
++ int regulator_count;
+ bool genpd_performance_state;
+
+ int (*set_opp)(struct dev_pm_set_opp_data *data);
+diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c
+index 739d97080d3b..a3d07d9c598b 100644
+--- a/drivers/pci/controller/dwc/pcie-designware-ep.c
++++ b/drivers/pci/controller/dwc/pcie-designware-ep.c
+@@ -46,16 +46,19 @@ static u8 __dw_pcie_ep_find_next_cap(struct dw_pcie *pci, u8 cap_ptr,
+ u8 cap_id, next_cap_ptr;
+ u16 reg;
+
++ if (!cap_ptr)
++ return 0;
++
+ reg = dw_pcie_readw_dbi(pci, cap_ptr);
+- next_cap_ptr = (reg & 0xff00) >> 8;
+ cap_id = (reg & 0x00ff);
+
+- if (!next_cap_ptr || cap_id > PCI_CAP_ID_MAX)
++ if (cap_id > PCI_CAP_ID_MAX)
+ return 0;
+
+ if (cap_id == cap)
+ return cap_ptr;
+
++ next_cap_ptr = (reg & 0xff00) >> 8;
+ return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
+ }
+
+@@ -67,9 +70,6 @@ static u8 dw_pcie_ep_find_capability(struct dw_pcie *pci, u8 cap)
+ reg = dw_pcie_readw_dbi(pci, PCI_CAPABILITY_LIST);
+ next_cap_ptr = (reg & 0x00ff);
+
+- if (!next_cap_ptr)
+- return 0;
+-
+ return __dw_pcie_ep_find_next_cap(pci, next_cap_ptr, cap);
+ }
+
+diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
+index 3160e9342a2f..9d5cbc75d5ae 100644
+--- a/drivers/pci/controller/pcie-iproc.c
++++ b/drivers/pci/controller/pcie-iproc.c
+@@ -630,14 +630,6 @@ static void __iomem *iproc_pcie_map_cfg_bus(struct iproc_pcie *pcie,
+ return (pcie->base + offset);
+ }
+
+- /*
+- * PAXC is connected to an internally emulated EP within the SoC. It
+- * allows only one device.
+- */
+- if (pcie->ep_is_internal)
+- if (slot > 0)
+- return NULL;
+-
+ return iproc_pcie_map_ep_cfg_reg(pcie, busno, slot, fn, where);
+ }
+
+@@ -1355,7 +1347,6 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
+ break;
+ case IPROC_PCIE_PAXB:
+ regs = iproc_pcie_reg_paxb;
+- pcie->iproc_cfg_read = true;
+ pcie->has_apb_err_disable = true;
+ if (pcie->need_ob_cfg) {
+ pcie->ob_map = paxb_ob_map;
+@@ -1364,6 +1355,7 @@ static int iproc_pcie_rev_init(struct iproc_pcie *pcie)
+ break;
+ case IPROC_PCIE_PAXB_V2:
+ regs = iproc_pcie_reg_paxb_v2;
++ pcie->iproc_cfg_read = true;
+ pcie->has_apb_err_disable = true;
+ if (pcie->need_ob_cfg) {
+ pcie->ob_map = paxb_v2_ob_map;
+diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
+index a2d1e89d4867..14f816591e84 100644
+--- a/drivers/pci/controller/pcie-mobiveil.c
++++ b/drivers/pci/controller/pcie-mobiveil.c
+@@ -174,7 +174,7 @@ static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+ * Do not read more than one device on the bus directly
+ * attached to RC
+ */
+- if ((bus->primary == pcie->root_bus_nr) && (devfn > 0))
++ if ((bus->primary == pcie->root_bus_nr) && (PCI_SLOT(devfn) > 0))
+ return false;
+
+ return true;
+@@ -395,7 +395,7 @@ static void program_ib_windows(struct mobiveil_pcie *pcie, int win_num,
+ int amap_ctrl_dw;
+ u64 size64 = ~(size - 1);
+
+- if ((pcie->ib_wins_configured + 1) > pcie->ppio_wins) {
++ if (win_num >= pcie->ppio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max inbound windows reached !\n");
+ return;
+@@ -429,7 +429,7 @@ static void program_ob_windows(struct mobiveil_pcie *pcie, int win_num,
+ u32 value, type;
+ u64 size64 = ~(size - 1);
+
+- if ((pcie->ob_wins_configured + 1) > pcie->apio_wins) {
++ if (win_num >= pcie->apio_wins) {
+ dev_err(&pcie->pdev->dev,
+ "ERROR: max outbound windows reached !\n");
+ return;
+@@ -643,7 +643,7 @@ static struct irq_chip mobiveil_msi_irq_chip = {
+
+ static struct msi_domain_info mobiveil_msi_domain_info = {
+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
++ MSI_FLAG_PCI_MSIX),
+ .chip = &mobiveil_msi_irq_chip,
+ };
+
+diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c
+index b8163c56a142..caf34661d38d 100644
+--- a/drivers/pci/controller/pcie-rockchip-ep.c
++++ b/drivers/pci/controller/pcie-rockchip-ep.c
+@@ -350,7 +350,7 @@ static void rockchip_pcie_ep_assert_intx(struct rockchip_pcie_ep *ep, u8 fn,
+ struct rockchip_pcie *rockchip = &ep->rockchip;
+ u32 r = ep->max_regions - 1;
+ u32 offset;
+- u16 status;
++ u32 status;
+ u8 msg_code;
+
+ if (unlikely(ep->irq_pci_addr != ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR ||
+diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c
+index 3e86fa3c7da3..4bbd26e8a9e2 100644
+--- a/drivers/pci/endpoint/functions/pci-epf-test.c
++++ b/drivers/pci/endpoint/functions/pci-epf-test.c
+@@ -175,7 +175,7 @@ static int pci_epf_test_read(struct pci_epf_test *epf_test)
+ goto err_map_addr;
+ }
+
+- memcpy(buf, src_addr, reg->size);
++ memcpy_fromio(buf, src_addr, reg->size);
+
+ crc32 = crc32_le(~0, buf, reg->size);
+ if (crc32 != reg->checksum)
+@@ -230,7 +230,7 @@ static int pci_epf_test_write(struct pci_epf_test *epf_test)
+ get_random_bytes(buf, reg->size);
+ reg->checksum = crc32_le(~0, buf, reg->size);
+
+- memcpy(dst_addr, buf, reg->size);
++ memcpy_toio(dst_addr, buf, reg->size);
+
+ /*
+ * wait 1ms inorder for the write to complete. Without this delay L3
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index bc1ff41ce3d3..78ae1cab9af7 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -959,15 +959,15 @@ static int pci_pm_freeze(struct device *dev)
+ }
+
+ /*
+- * This used to be done in pci_pm_prepare() for all devices and some
+- * drivers may depend on it, so do it here. Ideally, runtime-suspended
+- * devices should not be touched during freeze/thaw transitions,
+- * however.
++ * Resume all runtime-suspended devices before creating a snapshot
++ * image of system memory, because the restore kernel generally cannot
++ * be expected to always handle them consistently and they need to be
++ * put into the runtime-active metastate during system resume anyway,
++ * so it is better to ensure that the state saved in the image will be
++ * always consistent with that.
+ */
+- if (!dev_pm_smart_suspend_and_suspended(dev)) {
+- pm_runtime_resume(dev);
+- pci_dev->state_saved = false;
+- }
++ pm_runtime_resume(dev);
++ pci_dev->state_saved = false;
+
+ if (pm->freeze) {
+ int error;
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index c9f51fc24563..57a87a001b4f 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -5039,39 +5039,42 @@ unlock:
+ return 0;
+ }
+
+-/* Save and disable devices from the top of the tree down */
+-static void pci_bus_save_and_disable(struct pci_bus *bus)
++/*
++ * Save and disable devices from the top of the tree down while holding
++ * the @dev mutex lock for the entire tree.
++ */
++static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
+ {
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+- pci_dev_lock(dev);
+ pci_dev_save_and_disable(dev);
+- pci_dev_unlock(dev);
+ if (dev->subordinate)
+- pci_bus_save_and_disable(dev->subordinate);
++ pci_bus_save_and_disable_locked(dev->subordinate);
+ }
+ }
+
+ /*
+- * Restore devices from top of the tree down - parent bridges need to be
+- * restored before we can get to subordinate devices.
++ * Restore devices from top of the tree down while holding @dev mutex lock
++ * for the entire tree. Parent bridges need to be restored before we can
++ * get to subordinate devices.
+ */
+-static void pci_bus_restore(struct pci_bus *bus)
++static void pci_bus_restore_locked(struct pci_bus *bus)
+ {
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+- pci_dev_lock(dev);
+ pci_dev_restore(dev);
+- pci_dev_unlock(dev);
+ if (dev->subordinate)
+- pci_bus_restore(dev->subordinate);
++ pci_bus_restore_locked(dev->subordinate);
+ }
+ }
+
+-/* Save and disable devices from the top of the tree down */
+-static void pci_slot_save_and_disable(struct pci_slot *slot)
++/*
++ * Save and disable devices from the top of the tree down while holding
++ * the @dev mutex lock for the entire tree.
++ */
++static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
+ {
+ struct pci_dev *dev;
+
+@@ -5080,26 +5083,25 @@ static void pci_slot_save_and_disable(struct pci_slot *slot)
+ continue;
+ pci_dev_save_and_disable(dev);
+ if (dev->subordinate)
+- pci_bus_save_and_disable(dev->subordinate);
++ pci_bus_save_and_disable_locked(dev->subordinate);
+ }
+ }
+
+ /*
+- * Restore devices from top of the tree down - parent bridges need to be
+- * restored before we can get to subordinate devices.
++ * Restore devices from top of the tree down while holding @dev mutex lock
++ * for the entire tree. Parent bridges need to be restored before we can
++ * get to subordinate devices.
+ */
+-static void pci_slot_restore(struct pci_slot *slot)
++static void pci_slot_restore_locked(struct pci_slot *slot)
+ {
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+- pci_dev_lock(dev);
+ pci_dev_restore(dev);
+- pci_dev_unlock(dev);
+ if (dev->subordinate)
+- pci_bus_restore(dev->subordinate);
++ pci_bus_restore_locked(dev->subordinate);
+ }
+ }
+
+@@ -5158,17 +5160,15 @@ static int __pci_reset_slot(struct pci_slot *slot)
+ if (rc)
+ return rc;
+
+- pci_slot_save_and_disable(slot);
+-
+ if (pci_slot_trylock(slot)) {
++ pci_slot_save_and_disable_locked(slot);
+ might_sleep();
+ rc = pci_reset_hotplug_slot(slot->hotplug, 0);
++ pci_slot_restore_locked(slot);
+ pci_slot_unlock(slot);
+ } else
+ rc = -EAGAIN;
+
+- pci_slot_restore(slot);
+-
+ return rc;
+ }
+
+@@ -5254,17 +5254,15 @@ static int __pci_reset_bus(struct pci_bus *bus)
+ if (rc)
+ return rc;
+
+- pci_bus_save_and_disable(bus);
+-
+ if (pci_bus_trylock(bus)) {
++ pci_bus_save_and_disable_locked(bus);
+ might_sleep();
+ rc = pci_bridge_secondary_bus_reset(bus->self);
++ pci_bus_restore_locked(bus);
+ pci_bus_unlock(bus);
+ } else
+ rc = -EAGAIN;
+
+- pci_bus_restore(bus);
+-
+ return rc;
+ }
+
+diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
+index 5aaa4ce04ec3..ceb7ab3ba3d0 100644
+--- a/drivers/pci/switch/switchtec.c
++++ b/drivers/pci/switch/switchtec.c
+@@ -134,10 +134,6 @@ static void mrpc_cmd_submit(struct switchtec_dev *stdev)
+ stuser->data, stuser->data_len);
+ iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd);
+
+- stuser->status = ioread32(&stdev->mmio_mrpc->status);
+- if (stuser->status != SWITCHTEC_MRPC_STATUS_INPROGRESS)
+- mrpc_complete_cmd(stdev);
+-
+ schedule_delayed_work(&stdev->mrpc_timeout,
+ msecs_to_jiffies(500));
+ }
+diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
+index d1dab36fa5b7..e2455ffb8597 100644
+--- a/drivers/phy/broadcom/phy-brcm-usb.c
++++ b/drivers/phy/broadcom/phy-brcm-usb.c
+@@ -378,6 +378,13 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
+ return 0;
+ }
+
++static int brcm_usb_phy_remove(struct platform_device *pdev)
++{
++ sysfs_remove_group(&pdev->dev.kobj, &brcm_usb_phy_group);
++
++ return 0;
++}
++
+ #ifdef CONFIG_PM_SLEEP
+ static int brcm_usb_phy_suspend(struct device *dev)
+ {
+@@ -443,6 +450,7 @@ MODULE_DEVICE_TABLE(of, brcm_usb_dt_ids);
+
+ static struct platform_driver brcm_usb_driver = {
+ .probe = brcm_usb_phy_probe,
++ .remove = brcm_usb_phy_remove,
+ .driver = {
+ .name = "brcmstb-usb-phy",
+ .owner = THIS_MODULE,
+diff --git a/drivers/phy/qualcomm/phy-qcom-qusb2.c b/drivers/phy/qualcomm/phy-qcom-qusb2.c
+index 69c92843eb3b..9b7ae93e9df1 100644
+--- a/drivers/phy/qualcomm/phy-qcom-qusb2.c
++++ b/drivers/phy/qualcomm/phy-qcom-qusb2.c
+@@ -526,7 +526,7 @@ static int __maybe_unused qusb2_phy_runtime_resume(struct device *dev)
+ }
+
+ if (!qphy->has_se_clk_scheme) {
+- clk_prepare_enable(qphy->ref_clk);
++ ret = clk_prepare_enable(qphy->ref_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable ref clk, %d\n", ret);
+ goto disable_ahb_clk;
+diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+index b70058caee50..20b9864adce0 100644
+--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
++++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+@@ -54,8 +54,12 @@
+ /* drive strength control for ASIU GPIO */
+ #define IPROC_GPIO_ASIU_DRV0_CTRL_OFFSET 0x58
+
+-/* drive strength control for CCM/CRMU (AON) GPIO */
+-#define IPROC_GPIO_DRV0_CTRL_OFFSET 0x00
++/* pinconf for CCM GPIO */
++#define IPROC_GPIO_PULL_DN_OFFSET 0x10
++#define IPROC_GPIO_PULL_UP_OFFSET 0x14
++
++/* pinconf for CRMU(aon) GPIO and CCM GPIO*/
++#define IPROC_GPIO_DRV_CTRL_OFFSET 0x00
+
+ #define GPIO_BANK_SIZE 0x200
+ #define NGPIOS_PER_BANK 32
+@@ -76,6 +80,12 @@ enum iproc_pinconf_param {
+ IPROC_PINCON_MAX,
+ };
+
++enum iproc_pinconf_ctrl_type {
++ IOCTRL_TYPE_AON = 1,
++ IOCTRL_TYPE_CDRU,
++ IOCTRL_TYPE_INVALID,
++};
++
+ /*
+ * Iproc GPIO core
+ *
+@@ -100,6 +110,7 @@ struct iproc_gpio {
+
+ void __iomem *base;
+ void __iomem *io_ctrl;
++ enum iproc_pinconf_ctrl_type io_ctrl_type;
+
+ raw_spinlock_t lock;
+
+@@ -461,20 +472,44 @@ static const struct pinctrl_ops iproc_pctrl_ops = {
+ static int iproc_gpio_set_pull(struct iproc_gpio *chip, unsigned gpio,
+ bool disable, bool pull_up)
+ {
++ void __iomem *base;
+ unsigned long flags;
++ unsigned int shift;
++ u32 val_1, val_2;
+
+ raw_spin_lock_irqsave(&chip->lock, flags);
+-
+- if (disable) {
+- iproc_set_bit(chip, IPROC_GPIO_RES_EN_OFFSET, gpio, false);
++ if (chip->io_ctrl_type == IOCTRL_TYPE_CDRU) {
++ base = chip->io_ctrl;
++ shift = IPROC_GPIO_SHIFT(gpio);
++
++ val_1 = readl(base + IPROC_GPIO_PULL_UP_OFFSET);
++ val_2 = readl(base + IPROC_GPIO_PULL_DN_OFFSET);
++ if (disable) {
++ /* no pull-up or pull-down */
++ val_1 &= ~BIT(shift);
++ val_2 &= ~BIT(shift);
++ } else if (pull_up) {
++ val_1 |= BIT(shift);
++ val_2 &= ~BIT(shift);
++ } else {
++ val_1 &= ~BIT(shift);
++ val_2 |= BIT(shift);
++ }
++ writel(val_1, base + IPROC_GPIO_PULL_UP_OFFSET);
++ writel(val_2, base + IPROC_GPIO_PULL_DN_OFFSET);
+ } else {
+- iproc_set_bit(chip, IPROC_GPIO_PAD_RES_OFFSET, gpio,
+- pull_up);
+- iproc_set_bit(chip, IPROC_GPIO_RES_EN_OFFSET, gpio, true);
++ if (disable) {
++ iproc_set_bit(chip, IPROC_GPIO_RES_EN_OFFSET, gpio,
++ false);
++ } else {
++ iproc_set_bit(chip, IPROC_GPIO_PAD_RES_OFFSET, gpio,
++ pull_up);
++ iproc_set_bit(chip, IPROC_GPIO_RES_EN_OFFSET, gpio,
++ true);
++ }
+ }
+
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
+-
+ dev_dbg(chip->dev, "gpio:%u set pullup:%d\n", gpio, pull_up);
+
+ return 0;
+@@ -483,14 +518,35 @@ static int iproc_gpio_set_pull(struct iproc_gpio *chip, unsigned gpio,
+ static void iproc_gpio_get_pull(struct iproc_gpio *chip, unsigned gpio,
+ bool *disable, bool *pull_up)
+ {
++ void __iomem *base;
+ unsigned long flags;
++ unsigned int shift;
++ u32 val_1, val_2;
+
+ raw_spin_lock_irqsave(&chip->lock, flags);
+- *disable = !iproc_get_bit(chip, IPROC_GPIO_RES_EN_OFFSET, gpio);
+- *pull_up = iproc_get_bit(chip, IPROC_GPIO_PAD_RES_OFFSET, gpio);
++ if (chip->io_ctrl_type == IOCTRL_TYPE_CDRU) {
++ base = chip->io_ctrl;
++ shift = IPROC_GPIO_SHIFT(gpio);
++
++ val_1 = readl(base + IPROC_GPIO_PULL_UP_OFFSET) & BIT(shift);
++ val_2 = readl(base + IPROC_GPIO_PULL_DN_OFFSET) & BIT(shift);
++
++ *pull_up = val_1 ? true : false;
++ *disable = (val_1 | val_2) ? false : true;
++
++ } else {
++ *disable = !iproc_get_bit(chip, IPROC_GPIO_RES_EN_OFFSET, gpio);
++ *pull_up = iproc_get_bit(chip, IPROC_GPIO_PAD_RES_OFFSET, gpio);
++ }
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
+ }
+
++#define DRV_STRENGTH_OFFSET(gpio, bit, type) ((type) == IOCTRL_TYPE_AON ? \
++ ((2 - (bit)) * 4 + IPROC_GPIO_DRV_CTRL_OFFSET) : \
++ ((type) == IOCTRL_TYPE_CDRU) ? \
++ ((bit) * 4 + IPROC_GPIO_DRV_CTRL_OFFSET) : \
++ ((bit) * 4 + IPROC_GPIO_REG(gpio, IPROC_GPIO_ASIU_DRV0_CTRL_OFFSET)))
++
+ static int iproc_gpio_set_strength(struct iproc_gpio *chip, unsigned gpio,
+ unsigned strength)
+ {
+@@ -505,11 +561,8 @@ static int iproc_gpio_set_strength(struct iproc_gpio *chip, unsigned gpio,
+
+ if (chip->io_ctrl) {
+ base = chip->io_ctrl;
+- offset = IPROC_GPIO_DRV0_CTRL_OFFSET;
+ } else {
+ base = chip->base;
+- offset = IPROC_GPIO_REG(gpio,
+- IPROC_GPIO_ASIU_DRV0_CTRL_OFFSET);
+ }
+
+ shift = IPROC_GPIO_SHIFT(gpio);
+@@ -520,11 +573,11 @@ static int iproc_gpio_set_strength(struct iproc_gpio *chip, unsigned gpio,
+ raw_spin_lock_irqsave(&chip->lock, flags);
+ strength = (strength / 2) - 1;
+ for (i = 0; i < GPIO_DRV_STRENGTH_BITS; i++) {
++ offset = DRV_STRENGTH_OFFSET(gpio, i, chip->io_ctrl_type);
+ val = readl(base + offset);
+ val &= ~BIT(shift);
+ val |= ((strength >> i) & 0x1) << shift;
+ writel(val, base + offset);
+- offset += 4;
+ }
+ raw_spin_unlock_irqrestore(&chip->lock, flags);
+
+@@ -541,11 +594,8 @@ static int iproc_gpio_get_strength(struct iproc_gpio *chip, unsigned gpio,
+
+ if (chip->io_ctrl) {
+ base = chip->io_ctrl;
+- offset = IPROC_GPIO_DRV0_CTRL_OFFSET;
+ } else {
+ base = chip->base;
+- offset = IPROC_GPIO_REG(gpio,
+- IPROC_GPIO_ASIU_DRV0_CTRL_OFFSET);
+ }
+
+ shift = IPROC_GPIO_SHIFT(gpio);
+@@ -553,10 +603,10 @@ static int iproc_gpio_get_strength(struct iproc_gpio *chip, unsigned gpio,
+ raw_spin_lock_irqsave(&chip->lock, flags);
+ *strength = 0;
+ for (i = 0; i < GPIO_DRV_STRENGTH_BITS; i++) {
++ offset = DRV_STRENGTH_OFFSET(gpio, i, chip->io_ctrl_type);
+ val = readl(base + offset) & BIT(shift);
+ val >>= shift;
+ *strength += (val << i);
+- offset += 4;
+ }
+
+ /* convert to mA */
+@@ -734,6 +784,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
+ u32 ngpios, pinconf_disable_mask = 0;
+ int irq, ret;
+ bool no_pinconf = false;
++ enum iproc_pinconf_ctrl_type io_ctrl_type = IOCTRL_TYPE_INVALID;
+
+ /* NSP does not support drive strength config */
+ if (of_device_is_compatible(dev->of_node, "brcm,iproc-nsp-gpio"))
+@@ -764,8 +815,15 @@ static int iproc_gpio_probe(struct platform_device *pdev)
+ dev_err(dev, "unable to map I/O memory\n");
+ return PTR_ERR(chip->io_ctrl);
+ }
++ if (of_device_is_compatible(dev->of_node,
++ "brcm,cygnus-ccm-gpio"))
++ io_ctrl_type = IOCTRL_TYPE_CDRU;
++ else
++ io_ctrl_type = IOCTRL_TYPE_AON;
+ }
+
++ chip->io_ctrl_type = io_ctrl_type;
++
+ if (of_property_read_u32(dev->of_node, "ngpios", &ngpios)) {
+ dev_err(&pdev->dev, "missing ngpios DT property\n");
+ return -ENODEV;
+diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+index 158f618f1695..0c0a5018102b 100644
+--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
++++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+@@ -239,13 +239,9 @@ static const unsigned int eth_link_led_pins[] = { GPIOZ_14 };
+ static const unsigned int eth_act_led_pins[] = { GPIOZ_15 };
+
+ static const unsigned int tsin_a_d0_pins[] = { GPIODV_0 };
+-static const unsigned int tsin_a_d0_x_pins[] = { GPIOX_10 };
+ static const unsigned int tsin_a_clk_pins[] = { GPIODV_8 };
+-static const unsigned int tsin_a_clk_x_pins[] = { GPIOX_11 };
+ static const unsigned int tsin_a_sop_pins[] = { GPIODV_9 };
+-static const unsigned int tsin_a_sop_x_pins[] = { GPIOX_8 };
+ static const unsigned int tsin_a_d_valid_pins[] = { GPIODV_10 };
+-static const unsigned int tsin_a_d_valid_x_pins[] = { GPIOX_9 };
+ static const unsigned int tsin_a_fail_pins[] = { GPIODV_11 };
+ static const unsigned int tsin_a_dp_pins[] = {
+ GPIODV_1, GPIODV_2, GPIODV_3, GPIODV_4, GPIODV_5, GPIODV_6, GPIODV_7,
+@@ -432,10 +428,6 @@ static struct meson_pmx_group meson_gxl_periphs_groups[] = {
+ GROUP(spi_miso, 5, 2),
+ GROUP(spi_ss0, 5, 1),
+ GROUP(spi_sclk, 5, 0),
+- GROUP(tsin_a_sop_x, 6, 3),
+- GROUP(tsin_a_d_valid_x, 6, 2),
+- GROUP(tsin_a_d0_x, 6, 1),
+- GROUP(tsin_a_clk_x, 6, 0),
+
+ /* Bank Z */
+ GROUP(eth_mdio, 4, 23),
+@@ -698,8 +690,8 @@ static const char * const eth_led_groups[] = {
+ };
+
+ static const char * const tsin_a_groups[] = {
+- "tsin_a_clk", "tsin_a_clk_x", "tsin_a_sop", "tsin_a_sop_x",
+- "tsin_a_d_valid", "tsin_a_d_valid_x", "tsin_a_d0", "tsin_a_d0_x",
++ "tsin_a_clk", "tsin_a_sop",
++ "tsin_a_d_valid", "tsin_a_d0",
+ "tsin_a_dp", "tsin_a_fail",
+ };
+
+diff --git a/drivers/pinctrl/sh-pfc/pfc-emev2.c b/drivers/pinctrl/sh-pfc/pfc-emev2.c
+index 1cbbe04d7df6..eafd8edbcbe9 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-emev2.c
++++ b/drivers/pinctrl/sh-pfc/pfc-emev2.c
+@@ -1263,6 +1263,14 @@ static const char * const dtv_groups[] = {
+ "dtv_b",
+ };
+
++static const char * const err_rst_reqb_groups[] = {
++ "err_rst_reqb",
++};
++
++static const char * const ext_clki_groups[] = {
++ "ext_clki",
++};
++
+ static const char * const iic0_groups[] = {
+ "iic0",
+ };
+@@ -1285,6 +1293,10 @@ static const char * const lcd_groups[] = {
+ "yuv3",
+ };
+
++static const char * const lowpwr_groups[] = {
++ "lowpwr",
++};
++
+ static const char * const ntsc_groups[] = {
+ "ntsc_clk",
+ "ntsc_data",
+@@ -1298,6 +1310,10 @@ static const char * const pwm1_groups[] = {
+ "pwm1",
+ };
+
++static const char * const ref_clko_groups[] = {
++ "ref_clko",
++};
++
+ static const char * const sd_groups[] = {
+ "sd_cki",
+ };
+@@ -1391,13 +1407,17 @@ static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(cam),
+ SH_PFC_FUNCTION(cf),
+ SH_PFC_FUNCTION(dtv),
++ SH_PFC_FUNCTION(err_rst_reqb),
++ SH_PFC_FUNCTION(ext_clki),
+ SH_PFC_FUNCTION(iic0),
+ SH_PFC_FUNCTION(iic1),
+ SH_PFC_FUNCTION(jtag),
+ SH_PFC_FUNCTION(lcd),
++ SH_PFC_FUNCTION(lowpwr),
+ SH_PFC_FUNCTION(ntsc),
+ SH_PFC_FUNCTION(pwm0),
+ SH_PFC_FUNCTION(pwm1),
++ SH_PFC_FUNCTION(ref_clko),
+ SH_PFC_FUNCTION(sd),
+ SH_PFC_FUNCTION(sdi0),
+ SH_PFC_FUNCTION(sdi1),
+diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+index 35f436bcb849..e9739dbcb356 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
++++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+@@ -1982,7 +1982,7 @@ static const unsigned int gether_gmii_pins[] = {
+ */
+ 185, 186, 187, 188, 189, 190, 191, 192, 174, 161, 204,
+ 171, 170, 169, 168, 167, 166, 173, 172, 176, 184, 183, 203,
+- 205, 163, 206, 207,
++ 205, 163, 206, 207, 158,
+ };
+ static const unsigned int gether_gmii_mux[] = {
+ ET_ERXD0_MARK, ET_ERXD1_MARK, ET_ERXD2_MARK, ET_ERXD3_MARK,
+@@ -2154,6 +2154,7 @@ static const unsigned int lcd0_data24_1_mux[] = {
+ LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK,
+ LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK,
+ LCD0_D8_MARK, LCD0_D9_MARK, LCD0_D10_MARK, LCD0_D11_MARK,
++ LCD0_D12_MARK, LCD0_D13_MARK, LCD0_D14_MARK, LCD0_D15_MARK,
+ LCD0_D16_MARK, LCD0_D17_MARK, LCD0_D18_PORT163_MARK,
+ LCD0_D19_PORT162_MARK, LCD0_D20_PORT161_MARK, LCD0_D21_PORT158_MARK,
+ LCD0_D22_PORT160_MARK, LCD0_D23_PORT159_MARK,
+diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+index 5811784d88cb..9eb860c8f917 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
++++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+@@ -3220,8 +3220,7 @@ static const unsigned int qspi_data4_b_pins[] = {
+ RCAR_GP_PIN(6, 4),
+ };
+ static const unsigned int qspi_data4_b_mux[] = {
+- SPCLK_B_MARK, MOSI_IO0_B_MARK, MISO_IO1_B_MARK,
+- IO2_B_MARK, IO3_B_MARK, SSL_B_MARK,
++ MOSI_IO0_B_MARK, MISO_IO1_B_MARK, IO2_B_MARK, IO3_B_MARK,
+ };
+ /* - SCIF0 ------------------------------------------------------------------ */
+ static const unsigned int scif0_data_pins[] = {
+@@ -4375,17 +4374,14 @@ static const unsigned int vin1_b_data18_pins[] = {
+ };
+ static const unsigned int vin1_b_data18_mux[] = {
+ /* B */
+- VI1_DATA0_B_MARK, VI1_DATA1_B_MARK,
+ VI1_DATA2_B_MARK, VI1_DATA3_B_MARK,
+ VI1_DATA4_B_MARK, VI1_DATA5_B_MARK,
+ VI1_DATA6_B_MARK, VI1_DATA7_B_MARK,
+ /* G */
+- VI1_G0_B_MARK, VI1_G1_B_MARK,
+ VI1_G2_B_MARK, VI1_G3_B_MARK,
+ VI1_G4_B_MARK, VI1_G5_B_MARK,
+ VI1_G6_B_MARK, VI1_G7_B_MARK,
+ /* R */
+- VI1_R0_B_MARK, VI1_R1_B_MARK,
+ VI1_R2_B_MARK, VI1_R3_B_MARK,
+ VI1_R4_B_MARK, VI1_R5_B_MARK,
+ VI1_R6_B_MARK, VI1_R7_B_MARK,
+@@ -5243,7 +5239,7 @@ static const char * const scifb2_groups[] = {
+ "scifb2_data_b",
+ "scifb2_clk_b",
+ "scifb2_ctrl_b",
+- "scifb0_data_c",
++ "scifb2_data_c",
+ "scifb2_clk_c",
+ "scifb2_data_d",
+ };
+diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c b/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
+index cc3597f66605..46c41ca6ea38 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
++++ b/drivers/pinctrl/sh-pfc/pfc-r8a7792.c
+@@ -1916,6 +1916,7 @@ static const char * const vin1_groups[] = {
+ "vin1_data8",
+ "vin1_data24_b",
+ "vin1_data20_b",
++ "vin1_data18_b",
+ "vin1_data16_b",
+ "vin1_sync",
+ "vin1_field",
+diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+index 164002437594..24b9bb1ee1fe 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
++++ b/drivers/pinctrl/sh-pfc/pfc-r8a7794.c
+@@ -5215,7 +5215,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ FN_AVB_MDC, FN_SSI_SDATA6_B, 0, 0, }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR9", 0xE6060044, 32,
+- 1, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3, 3) {
++ 1, 3, 3, 3, 3, 2, 2, 3, 3, 3, 3, 3) {
+ /* IP9_31 [1] */
+ 0, 0,
+ /* IP9_30_28 [3] */
+diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
+index eeb58b3bbc9a..53fae9fd682b 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
++++ b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
+@@ -2354,7 +2354,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ #define F_(x, y) x,
+ #define FM(x) FN_##x,
+ { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
+- 4, 4, 4, 4,
++ 4, 4, 4, 4, 4,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) {
+ /* RESERVED 31, 30, 29, 28 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77980.c b/drivers/pinctrl/sh-pfc/pfc-r8a77980.c
+index 3f6967331f64..81a710bb8555 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-r8a77980.c
++++ b/drivers/pinctrl/sh-pfc/pfc-r8a77980.c
+@@ -2751,7 +2751,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ #define F_(x, y) x,
+ #define FM(x) FN_##x,
+ { PINMUX_CFG_REG_VAR("MOD_SEL0", 0xe6060500, 32,
+- 4, 4, 4, 4,
++ 4, 4, 4, 4, 4,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) {
+ /* RESERVED 31, 30, 29, 28 */
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
+index adade5b7ffbc..337c80bde8f9 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
++++ b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
+@@ -391,10 +391,10 @@ FM(IP12_31_28) IP12_31_28 \
+ #define MOD_SEL0_27 FM(SEL_MSIOF3_0) FM(SEL_MSIOF3_1)
+ #define MOD_SEL0_26 FM(SEL_HSCIF3_0) FM(SEL_HSCIF3_1)
+ #define MOD_SEL0_25 FM(SEL_SCIF4_0) FM(SEL_SCIF4_1)
+-#define MOD_SEL0_24_23 FM(SEL_PWM0_0) FM(SEL_PWM0_1) FM(SEL_PWM0_2) FM(SEL_PWM0_3)
+-#define MOD_SEL0_22_21 FM(SEL_PWM1_0) FM(SEL_PWM1_1) FM(SEL_PWM1_2) FM(SEL_PWM1_3)
+-#define MOD_SEL0_20_19 FM(SEL_PWM2_0) FM(SEL_PWM2_1) FM(SEL_PWM2_2) FM(SEL_PWM2_3)
+-#define MOD_SEL0_18_17 FM(SEL_PWM3_0) FM(SEL_PWM3_1) FM(SEL_PWM3_2) FM(SEL_PWM3_3)
++#define MOD_SEL0_24_23 FM(SEL_PWM0_0) FM(SEL_PWM0_1) FM(SEL_PWM0_2) F_(0, 0)
++#define MOD_SEL0_22_21 FM(SEL_PWM1_0) FM(SEL_PWM1_1) FM(SEL_PWM1_2) F_(0, 0)
++#define MOD_SEL0_20_19 FM(SEL_PWM2_0) FM(SEL_PWM2_1) FM(SEL_PWM2_2) F_(0, 0)
++#define MOD_SEL0_18_17 FM(SEL_PWM3_0) FM(SEL_PWM3_1) FM(SEL_PWM3_2) F_(0, 0)
+ #define MOD_SEL0_15 FM(SEL_IRQ_0_0) FM(SEL_IRQ_0_1)
+ #define MOD_SEL0_14 FM(SEL_IRQ_1_0) FM(SEL_IRQ_1_1)
+ #define MOD_SEL0_13 FM(SEL_IRQ_2_0) FM(SEL_IRQ_2_1)
+diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+index a50d22bef1f4..cfdb4fc177c3 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
++++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+@@ -2119,7 +2119,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ },
+
+ { PINMUX_CFG_REG("PCIOR0", 0xfffe3852, 16, 1) {
+- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ PC8_IN, PC8_OUT,
+ PC7_IN, PC7_OUT,
+ PC6_IN, PC6_OUT,
+diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+index d25e6f674d0a..6dca760f9f28 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
++++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+@@ -3086,6 +3086,7 @@ static const unsigned int tpu4_to2_mux[] = {
+ };
+ static const unsigned int tpu4_to3_pins[] = {
+ /* TO */
++ PIN_NUMBER(6, 26),
+ };
+ static const unsigned int tpu4_to3_mux[] = {
+ TPU4TO3_MARK,
+@@ -3366,7 +3367,8 @@ static const char * const fsic_groups[] = {
+ "fsic_sclk_out",
+ "fsic_data_in",
+ "fsic_data_out",
+- "fsic_spdif",
++ "fsic_spdif_0",
++ "fsic_spdif_1",
+ };
+
+ static const char * const fsid_groups[] = {
+diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+index 3eccc9b3ca84..c691e5e9d9de 100644
+--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
++++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+@@ -2231,13 +2231,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
+ FN_LCD_CL1_B, 0, 0,
+ /* IP10_5_3 [3] */
+ FN_SSI_WS23, FN_VI1_5_B, FN_TX1_D, FN_HSCK0_C, FN_FALE_B,
+- FN_LCD_DON_B, 0, 0, 0,
++ FN_LCD_DON_B, 0, 0,
+ /* IP10_2_0 [3] */
+ FN_SSI_SCK23, FN_VI1_4_B, FN_RX1_D, FN_FCLE_B,
+ FN_LCD_DATA15_B, 0, 0, 0 }
+ },
+ { PINMUX_CFG_REG_VAR("IPSR11", 0xFFFC0048, 32,
+- 3, 1, 2, 2, 2, 3, 3, 1, 2, 3, 3, 1, 1, 1, 1) {
++ 3, 1, 2, 3, 2, 2, 3, 3, 1, 2, 3, 3, 1, 1, 1, 1) {
+ /* IP11_31_29 [3] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ /* IP11_28 [1] */
+diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c
+index f66521c7f846..98128374d710 100644
+--- a/drivers/platform/mips/cpu_hwmon.c
++++ b/drivers/platform/mips/cpu_hwmon.c
+@@ -159,7 +159,7 @@ static int __init loongson_hwmon_init(void)
+
+ cpu_hwmon_dev = hwmon_device_register(NULL);
+ if (IS_ERR(cpu_hwmon_dev)) {
+- ret = -ENOMEM;
++ ret = PTR_ERR(cpu_hwmon_dev);
+ pr_err("hwmon_device_register fail!\n");
+ goto fail_hwmon_device_register;
+ }
+diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
+index f10af5c383c5..83fd7677af24 100644
+--- a/drivers/platform/x86/alienware-wmi.c
++++ b/drivers/platform/x86/alienware-wmi.c
+@@ -522,23 +522,22 @@ static acpi_status alienware_wmax_command(struct wmax_basic_args *in_args,
+
+ input.length = (acpi_size) sizeof(*in_args);
+ input.pointer = in_args;
+- if (out_data != NULL) {
++ if (out_data) {
+ output.length = ACPI_ALLOCATE_BUFFER;
+ output.pointer = NULL;
+ status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
+ command, &input, &output);
+- } else
++ if (ACPI_SUCCESS(status)) {
++ obj = (union acpi_object *)output.pointer;
++ if (obj && obj->type == ACPI_TYPE_INTEGER)
++ *out_data = (u32)obj->integer.value;
++ }
++ kfree(output.pointer);
++ } else {
+ status = wmi_evaluate_method(WMAX_CONTROL_GUID, 0,
+ command, &input, NULL);
+-
+- if (ACPI_SUCCESS(status) && out_data != NULL) {
+- obj = (union acpi_object *)output.pointer;
+- if (obj && obj->type == ACPI_TYPE_INTEGER)
+- *out_data = (u32) obj->integer.value;
+ }
+- kfree(output.pointer);
+ return status;
+-
+ }
+
+ /*
+@@ -588,7 +587,7 @@ static ssize_t show_hdmi_source(struct device *dev,
+ return scnprintf(buf, PAGE_SIZE,
+ "input [gpu] unknown\n");
+ }
+- pr_err("alienware-wmi: unknown HDMI source status: %d\n", out_data);
++ pr_err("alienware-wmi: unknown HDMI source status: %u\n", status);
+ return scnprintf(buf, PAGE_SIZE, "input gpu [unknown]\n");
+ }
+
+diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
+index 04791ea5d97b..35cdc3998eb5 100644
+--- a/drivers/platform/x86/wmi.c
++++ b/drivers/platform/x86/wmi.c
+@@ -768,6 +768,9 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
+ struct wmi_block *wblock = dev_to_wblock(dev);
+ const struct wmi_device_id *id = wmi_driver->id_table;
+
++ if (id == NULL)
++ return 0;
++
+ while (id->guid_string) {
+ uuid_le driver_guid;
+
+diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
+index e85361878450..e43a7b3b570c 100644
+--- a/drivers/power/supply/power_supply_core.c
++++ b/drivers/power/supply/power_supply_core.c
+@@ -902,14 +902,14 @@ __power_supply_register(struct device *parent,
+ }
+
+ spin_lock_init(&psy->changed_lock);
+- rc = device_init_wakeup(dev, ws);
+- if (rc)
+- goto wakeup_init_failed;
+-
+ rc = device_add(dev);
+ if (rc)
+ goto device_add_failed;
+
++ rc = device_init_wakeup(dev, ws);
++ if (rc)
++ goto wakeup_init_failed;
++
+ rc = psy_register_thermal(psy);
+ if (rc)
+ goto register_thermal_failed;
+@@ -946,8 +946,8 @@ register_cooler_failed:
+ psy_unregister_thermal(psy);
+ register_thermal_failed:
+ device_del(dev);
+-device_add_failed:
+ wakeup_init_failed:
++device_add_failed:
+ check_supplies_failed:
+ dev_set_name_failed:
+ put_device(dev);
+diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
+index 1e69c1c9ec09..7a4a6406cf69 100644
+--- a/drivers/pwm/pwm-lpss.c
++++ b/drivers/pwm/pwm-lpss.c
+@@ -216,6 +216,12 @@ EXPORT_SYMBOL_GPL(pwm_lpss_probe);
+
+ int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
+ {
++ int i;
++
++ for (i = 0; i < lpwm->info->npwm; i++) {
++ if (pwm_is_enabled(&lpwm->chip.pwms[i]))
++ pm_runtime_put(lpwm->chip.dev);
++ }
+ return pwmchip_remove(&lpwm->chip);
+ }
+ EXPORT_SYMBOL_GPL(pwm_lpss_remove);
+diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
+index f6e738ad7bd9..e247ab632530 100644
+--- a/drivers/pwm/pwm-meson.c
++++ b/drivers/pwm/pwm-meson.c
+@@ -188,7 +188,7 @@ static int meson_pwm_calc(struct meson_pwm *meson,
+ do_div(fin_ps, fin_freq);
+
+ /* Calc pre_div with the period */
+- for (pre_div = 0; pre_div < MISC_CLK_DIV_MASK; pre_div++) {
++ for (pre_div = 0; pre_div <= MISC_CLK_DIV_MASK; pre_div++) {
+ cnt = DIV_ROUND_CLOSEST_ULL((u64)period * 1000,
+ fin_ps * (pre_div + 1));
+ dev_dbg(meson->chip.dev, "fin_ps=%llu pre_div=%u cnt=%u\n",
+@@ -197,7 +197,7 @@ static int meson_pwm_calc(struct meson_pwm *meson,
+ break;
+ }
+
+- if (pre_div == MISC_CLK_DIV_MASK) {
++ if (pre_div > MISC_CLK_DIV_MASK) {
+ dev_err(meson->chip.dev, "unable to get period pre_div\n");
+ return -EINVAL;
+ }
+@@ -325,11 +325,6 @@ static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ if (state->period != channel->state.period ||
+ state->duty_cycle != channel->state.duty_cycle ||
+ state->polarity != channel->state.polarity) {
+- if (channel->state.enabled) {
+- meson_pwm_disable(meson, pwm->hwpwm);
+- channel->state.enabled = false;
+- }
+-
+ if (state->polarity != channel->state.polarity) {
+ if (state->polarity == PWM_POLARITY_NORMAL)
+ meson->inverter_mask |= BIT(pwm->hwpwm);
+diff --git a/drivers/rapidio/rio_cm.c b/drivers/rapidio/rio_cm.c
+index ef989a15aefc..b29fc258eeba 100644
+--- a/drivers/rapidio/rio_cm.c
++++ b/drivers/rapidio/rio_cm.c
+@@ -1215,7 +1215,9 @@ static int riocm_ch_listen(u16 ch_id)
+ riocm_debug(CHOP, "(ch_%d)", ch_id);
+
+ ch = riocm_get_channel(ch_id);
+- if (!ch || !riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN))
++ if (!ch)
++ return -EINVAL;
++ if (!riocm_cmp_exch(ch, RIO_CM_CHAN_BOUND, RIO_CM_LISTEN))
+ ret = -EINVAL;
+ riocm_put_channel(ch);
+ return ret;
+diff --git a/drivers/regulator/lp87565-regulator.c b/drivers/regulator/lp87565-regulator.c
+index c192357d1dea..7cd6862406b7 100644
+--- a/drivers/regulator/lp87565-regulator.c
++++ b/drivers/regulator/lp87565-regulator.c
+@@ -193,7 +193,7 @@ static int lp87565_regulator_probe(struct platform_device *pdev)
+ struct lp87565 *lp87565 = dev_get_drvdata(pdev->dev.parent);
+ struct regulator_config config = { };
+ struct regulator_dev *rdev;
+- int i, min_idx = LP87565_BUCK_1, max_idx = LP87565_BUCK_3;
++ int i, min_idx = LP87565_BUCK_0, max_idx = LP87565_BUCK_3;
+
+ platform_set_drvdata(pdev, lp87565);
+
+diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c
+index a9446056435f..1f2d8180506b 100644
+--- a/drivers/regulator/pv88060-regulator.c
++++ b/drivers/regulator/pv88060-regulator.c
+@@ -135,7 +135,7 @@ static int pv88060_set_current_limit(struct regulator_dev *rdev, int min,
+ int i;
+
+ /* search for closest to maximum */
+- for (i = info->n_current_limits; i >= 0; i--) {
++ for (i = info->n_current_limits - 1; i >= 0; i--) {
+ if (min <= info->current_limits[i]
+ && max >= info->current_limits[i]) {
+ return regmap_update_bits(rdev->regmap,
+diff --git a/drivers/regulator/pv88080-regulator.c b/drivers/regulator/pv88080-regulator.c
+index 9a08cb2de501..6770e4de2097 100644
+--- a/drivers/regulator/pv88080-regulator.c
++++ b/drivers/regulator/pv88080-regulator.c
+@@ -279,7 +279,7 @@ static int pv88080_set_current_limit(struct regulator_dev *rdev, int min,
+ int i;
+
+ /* search for closest to maximum */
+- for (i = info->n_current_limits; i >= 0; i--) {
++ for (i = info->n_current_limits - 1; i >= 0; i--) {
+ if (min <= info->current_limits[i]
+ && max >= info->current_limits[i]) {
+ return regmap_update_bits(rdev->regmap,
+diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c
+index 7a0c15957bd0..2302b0df7630 100644
+--- a/drivers/regulator/pv88090-regulator.c
++++ b/drivers/regulator/pv88090-regulator.c
+@@ -157,7 +157,7 @@ static int pv88090_set_current_limit(struct regulator_dev *rdev, int min,
+ int i;
+
+ /* search for closest to maximum */
+- for (i = info->n_current_limits; i >= 0; i--) {
++ for (i = info->n_current_limits - 1; i >= 0; i--) {
+ if (min <= info->current_limits[i]
+ && max >= info->current_limits[i]) {
+ return regmap_update_bits(rdev->regmap,
+diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c
+index 45e96e154690..5a5e9b5bf4be 100644
+--- a/drivers/regulator/tps65086-regulator.c
++++ b/drivers/regulator/tps65086-regulator.c
+@@ -90,8 +90,8 @@ static const struct regulator_linear_range tps65086_buck345_25mv_ranges[] = {
+ static const struct regulator_linear_range tps65086_ldoa1_ranges[] = {
+ REGULATOR_LINEAR_RANGE(1350000, 0x0, 0x0, 0),
+ REGULATOR_LINEAR_RANGE(1500000, 0x1, 0x7, 100000),
+- REGULATOR_LINEAR_RANGE(2300000, 0x8, 0xA, 100000),
+- REGULATOR_LINEAR_RANGE(2700000, 0xB, 0xD, 150000),
++ REGULATOR_LINEAR_RANGE(2300000, 0x8, 0xB, 100000),
++ REGULATOR_LINEAR_RANGE(2850000, 0xC, 0xD, 150000),
+ REGULATOR_LINEAR_RANGE(3300000, 0xE, 0xE, 0),
+ };
+
+diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
+index 5a5bc4bb08d2..df591435d12a 100644
+--- a/drivers/regulator/wm831x-dcdc.c
++++ b/drivers/regulator/wm831x-dcdc.c
+@@ -327,8 +327,8 @@ static int wm831x_buckv_get_voltage_sel(struct regulator_dev *rdev)
+ }
+
+ /* Current limit options */
+-static u16 wm831x_dcdc_ilim[] = {
+- 125, 250, 375, 500, 625, 750, 875, 1000
++static const unsigned int wm831x_dcdc_ilim[] = {
++ 125000, 250000, 375000, 500000, 625000, 750000, 875000, 1000000
+ };
+
+ static int wm831x_buckv_set_current_limit(struct regulator_dev *rdev,
+diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
+index 6a84b6372897..cc475dcbf27f 100644
+--- a/drivers/remoteproc/qcom_q6v5_pil.c
++++ b/drivers/remoteproc/qcom_q6v5_pil.c
+@@ -1268,16 +1268,26 @@ static const struct rproc_hexagon_res sdm845_mss = {
+
+ static const struct rproc_hexagon_res msm8996_mss = {
+ .hexagon_mba_image = "mba.mbn",
++ .proxy_supply = (struct qcom_mss_reg_res[]) {
++ {
++ .supply = "pll",
++ .uA = 100000,
++ },
++ {}
++ },
+ .proxy_clk_names = (char*[]){
+ "xo",
+ "pnoc",
++ "qdss",
+ NULL
+ },
+ .active_clk_names = (char*[]){
+ "iface",
+ "bus",
+ "mem",
+- "gpll0_mss_clk",
++ "gpll0_mss",
++ "snoc_axi",
++ "mnoc_axi",
+ NULL
+ },
+ .need_mem_protection = true,
+diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c
+index cab293cb2bf0..1fc48ebd3cd0 100644
+--- a/drivers/rtc/rtc-88pm80x.c
++++ b/drivers/rtc/rtc-88pm80x.c
+@@ -114,12 +114,14 @@ static int pm80x_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ unsigned char buf[4];
+ unsigned long ticks, base, data;
+ regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
+- base = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
++ base = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
++ (buf[1] << 8) | buf[0];
+ dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]);
+
+ /* load 32-bit read-only counter */
+ regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4);
+- data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
++ data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
++ (buf[1] << 8) | buf[0];
+ ticks = base + data;
+ dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
+ base, data, ticks);
+@@ -137,7 +139,8 @@ static int pm80x_rtc_set_time(struct device *dev, struct rtc_time *tm)
+
+ /* load 32-bit read-only counter */
+ regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4);
+- data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
++ data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
++ (buf[1] << 8) | buf[0];
+ base = ticks - data;
+ dev_dbg(info->dev, "set base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
+ base, data, ticks);
+@@ -158,11 +161,13 @@ static int pm80x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+ int ret;
+
+ regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
+- base = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
++ base = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
++ (buf[1] << 8) | buf[0];
+ dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]);
+
+ regmap_raw_read(info->map, PM800_RTC_EXPIRE1_1, buf, 4);
+- data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
++ data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
++ (buf[1] << 8) | buf[0];
+ ticks = base + data;
+ dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
+ base, data, ticks);
+@@ -185,12 +190,14 @@ static int pm80x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+ regmap_update_bits(info->map, PM800_RTC_CONTROL, PM800_ALARM1_EN, 0);
+
+ regmap_raw_read(info->map, PM800_RTC_EXPIRE2_1, buf, 4);
+- base = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
++ base = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
++ (buf[1] << 8) | buf[0];
+ dev_dbg(info->dev, "%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3]);
+
+ /* load 32-bit read-only counter */
+ regmap_raw_read(info->map, PM800_RTC_COUNTER1, buf, 4);
+- data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
++ data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
++ (buf[1] << 8) | buf[0];
+ ticks = base + data;
+ dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
+ base, data, ticks);
+diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
+index fbcf13bbbd8d..73697e4b18a9 100644
+--- a/drivers/rtc/rtc-88pm860x.c
++++ b/drivers/rtc/rtc-88pm860x.c
+@@ -115,11 +115,13 @@ static int pm860x_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf);
+ dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1],
+ buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
+- base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7];
++ base = ((unsigned long)buf[1] << 24) | (buf[3] << 16) |
++ (buf[5] << 8) | buf[7];
+
+ /* load 32-bit read-only counter */
+ pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
+- data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
++ data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
++ (buf[1] << 8) | buf[0];
+ ticks = base + data;
+ dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
+ base, data, ticks);
+@@ -145,7 +147,8 @@ static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm)
+
+ /* load 32-bit read-only counter */
+ pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
+- data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
++ data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
++ (buf[1] << 8) | buf[0];
+ base = ticks - data;
+ dev_dbg(info->dev, "set base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
+ base, data, ticks);
+@@ -170,10 +173,12 @@ static int pm860x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+ pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf);
+ dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1],
+ buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
+- base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7];
++ base = ((unsigned long)buf[1] << 24) | (buf[3] << 16) |
++ (buf[5] << 8) | buf[7];
+
+ pm860x_bulk_read(info->i2c, PM8607_RTC_EXPIRE1, 4, buf);
+- data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
++ data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
++ (buf[1] << 8) | buf[0];
+ ticks = base + data;
+ dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
+ base, data, ticks);
+@@ -198,11 +203,13 @@ static int pm860x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+ pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf);
+ dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1],
+ buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]);
+- base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7];
++ base = ((unsigned long)buf[1] << 24) | (buf[3] << 16) |
++ (buf[5] << 8) | buf[7];
+
+ /* load 32-bit read-only counter */
+ pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf);
+- data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
++ data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
++ (buf[1] << 8) | buf[0];
+ ticks = base + data;
+ dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n",
+ base, data, ticks);
+diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
+index 71396b62dc52..ebd59e86a567 100644
+--- a/drivers/rtc/rtc-ds1307.c
++++ b/drivers/rtc/rtc-ds1307.c
+@@ -749,8 +749,8 @@ static int rx8130_set_alarm(struct device *dev, struct rtc_wkalrm *t)
+ if (ret < 0)
+ return ret;
+
+- ctl[0] &= ~RX8130_REG_EXTENSION_WADA;
+- ctl[1] |= RX8130_REG_FLAG_AF;
++ ctl[0] &= RX8130_REG_EXTENSION_WADA;
++ ctl[1] &= ~RX8130_REG_FLAG_AF;
+ ctl[2] &= ~RX8130_REG_CONTROL0_AIE;
+
+ ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl,
+@@ -773,8 +773,7 @@ static int rx8130_set_alarm(struct device *dev, struct rtc_wkalrm *t)
+
+ ctl[2] |= RX8130_REG_CONTROL0_AIE;
+
+- return regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl,
+- sizeof(ctl));
++ return regmap_write(ds1307->regmap, RX8130_REG_CONTROL0, ctl[2]);
+ }
+
+ static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled)
+diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
+index 9caaccccaa57..b1ebca099b0d 100644
+--- a/drivers/rtc/rtc-ds1672.c
++++ b/drivers/rtc/rtc-ds1672.c
+@@ -58,7 +58,8 @@ static int ds1672_get_datetime(struct i2c_client *client, struct rtc_time *tm)
+ "%s: raw read data - counters=%02x,%02x,%02x,%02x\n",
+ __func__, buf[0], buf[1], buf[2], buf[3]);
+
+- time = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
++ time = ((unsigned long)buf[3] << 24) | (buf[2] << 16) |
++ (buf[1] << 8) | buf[0];
+
+ rtc_time_to_tm(time, tm);
+
+diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
+index 2f1772a358ca..18a6f15e313d 100644
+--- a/drivers/rtc/rtc-mc146818-lib.c
++++ b/drivers/rtc/rtc-mc146818-lib.c
+@@ -82,7 +82,7 @@ unsigned int mc146818_get_time(struct rtc_time *time)
+ time->tm_year += real_year - 72;
+ #endif
+
+- if (century)
++ if (century > 20)
+ time->tm_year += (century - 19) * 100;
+
+ /*
+diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
+index f85f1fc29e32..964ed91416e1 100644
+--- a/drivers/rtc/rtc-mt6397.c
++++ b/drivers/rtc/rtc-mt6397.c
+@@ -362,7 +362,7 @@ static int mtk_rtc_probe(struct platform_device *pdev)
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n",
+ rtc->irq, ret);
+- goto out_dispose_irq;
++ return ret;
+ }
+
+ device_init_wakeup(&pdev->dev, 1);
+@@ -378,9 +378,7 @@ static int mtk_rtc_probe(struct platform_device *pdev)
+ return 0;
+
+ out_free_irq:
+- free_irq(rtc->irq, rtc->rtc_dev);
+-out_dispose_irq:
+- irq_dispose_mapping(rtc->irq);
++ free_irq(rtc->irq, rtc);
+ return ret;
+ }
+
+@@ -388,8 +386,7 @@ static int mtk_rtc_remove(struct platform_device *pdev)
+ {
+ struct mt6397_rtc *rtc = platform_get_drvdata(pdev);
+
+- free_irq(rtc->irq, rtc->rtc_dev);
+- irq_dispose_mapping(rtc->irq);
++ free_irq(rtc->irq, rtc);
+
+ return 0;
+ }
+diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
+index 7cb786d76e3c..8c62406f92dd 100644
+--- a/drivers/rtc/rtc-pcf2127.c
++++ b/drivers/rtc/rtc-pcf2127.c
+@@ -57,20 +57,14 @@ static int pcf2127_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ struct pcf2127 *pcf2127 = dev_get_drvdata(dev);
+ unsigned char buf[10];
+ int ret;
+- int i;
+
+- for (i = 0; i <= PCF2127_REG_CTRL3; i++) {
+- ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL1 + i,
+- (unsigned int *)(buf + i));
+- if (ret) {
+- dev_err(dev, "%s: read error\n", __func__);
+- return ret;
+- }
+- }
+-
+- ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_SC,
+- (buf + PCF2127_REG_SC),
+- ARRAY_SIZE(buf) - PCF2127_REG_SC);
++ /*
++ * Avoid reading CTRL2 register as it causes WD_VAL register
++ * value to reset to 0 which means watchdog is stopped.
++ */
++ ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_CTRL3,
++ (buf + PCF2127_REG_CTRL3),
++ ARRAY_SIZE(buf) - PCF2127_REG_CTRL3);
+ if (ret) {
+ dev_err(dev, "%s: read error\n", __func__);
+ return ret;
+@@ -91,14 +85,12 @@ static int pcf2127_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ }
+
+ dev_dbg(dev,
+- "%s: raw data is cr1=%02x, cr2=%02x, cr3=%02x, "
+- "sec=%02x, min=%02x, hr=%02x, "
++ "%s: raw data is cr3=%02x, sec=%02x, min=%02x, hr=%02x, "
+ "mday=%02x, wday=%02x, mon=%02x, year=%02x\n",
+- __func__,
+- buf[0], buf[1], buf[2],
+- buf[3], buf[4], buf[5],
+- buf[6], buf[7], buf[8], buf[9]);
+-
++ __func__, buf[PCF2127_REG_CTRL3], buf[PCF2127_REG_SC],
++ buf[PCF2127_REG_MN], buf[PCF2127_REG_HR],
++ buf[PCF2127_REG_DM], buf[PCF2127_REG_DW],
++ buf[PCF2127_REG_MO], buf[PCF2127_REG_YR]);
+
+ tm->tm_sec = bcd2bin(buf[PCF2127_REG_SC] & 0x7F);
+ tm->tm_min = bcd2bin(buf[PCF2127_REG_MN] & 0x7F);
+diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
+index 3efc86c25d27..d8adf69b6697 100644
+--- a/drivers/rtc/rtc-pcf8563.c
++++ b/drivers/rtc/rtc-pcf8563.c
+@@ -563,7 +563,6 @@ static int pcf8563_probe(struct i2c_client *client,
+ struct pcf8563 *pcf8563;
+ int err;
+ unsigned char buf;
+- unsigned char alm_pending;
+
+ dev_dbg(&client->dev, "%s\n", __func__);
+
+@@ -587,13 +586,13 @@ static int pcf8563_probe(struct i2c_client *client,
+ return err;
+ }
+
+- err = pcf8563_get_alarm_mode(client, NULL, &alm_pending);
+- if (err) {
+- dev_err(&client->dev, "%s: read error\n", __func__);
++ /* Clear flags and disable interrupts */
++ buf = 0;
++ err = pcf8563_write_block_data(client, PCF8563_REG_ST2, 1, &buf);
++ if (err < 0) {
++ dev_err(&client->dev, "%s: write error\n", __func__);
+ return err;
+ }
+- if (alm_pending)
+- pcf8563_set_alarm_mode(client, 0);
+
+ pcf8563->rtc = devm_rtc_device_register(&client->dev,
+ pcf8563_driver.driver.name,
+@@ -605,7 +604,7 @@ static int pcf8563_probe(struct i2c_client *client,
+ if (client->irq > 0) {
+ err = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, pcf8563_irq,
+- IRQF_SHARED|IRQF_ONESHOT|IRQF_TRIGGER_FALLING,
++ IRQF_SHARED | IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ pcf8563_driver.driver.name, client);
+ if (err) {
+ dev_err(&client->dev, "unable to request IRQ %d\n",
+diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
+index 29358a045925..e03104b734fc 100644
+--- a/drivers/rtc/rtc-pm8xxx.c
++++ b/drivers/rtc/rtc-pm8xxx.c
+@@ -213,7 +213,8 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
+ }
+ }
+
+- secs = value[0] | (value[1] << 8) | (value[2] << 16) | (value[3] << 24);
++ secs = value[0] | (value[1] << 8) | (value[2] << 16) |
++ ((unsigned long)value[3] << 24);
+
+ rtc_time_to_tm(secs, tm);
+
+@@ -288,7 +289,8 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+ return rc;
+ }
+
+- secs = value[0] | (value[1] << 8) | (value[2] << 16) | (value[3] << 24);
++ secs = value[0] | (value[1] << 8) | (value[2] << 16) |
++ ((unsigned long)value[3] << 24);
+
+ rtc_time_to_tm(secs, &alarm->time);
+
+diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
+index 3d6174eb32f6..cfe3aece51d1 100644
+--- a/drivers/rtc/rtc-rv3029c2.c
++++ b/drivers/rtc/rtc-rv3029c2.c
+@@ -282,13 +282,13 @@ static int rv3029_eeprom_read(struct device *dev, u8 reg,
+ static int rv3029_eeprom_write(struct device *dev, u8 reg,
+ u8 const buf[], size_t len)
+ {
+- int ret;
++ int ret, err;
+ size_t i;
+ u8 tmp;
+
+- ret = rv3029_eeprom_enter(dev);
+- if (ret < 0)
+- return ret;
++ err = rv3029_eeprom_enter(dev);
++ if (err < 0)
++ return err;
+
+ for (i = 0; i < len; i++, reg++) {
+ ret = rv3029_read_regs(dev, reg, &tmp, 1);
+@@ -304,11 +304,11 @@ static int rv3029_eeprom_write(struct device *dev, u8 reg,
+ break;
+ }
+
+- ret = rv3029_eeprom_exit(dev);
+- if (ret < 0)
+- return ret;
++ err = rv3029_eeprom_exit(dev);
++ if (err < 0)
++ return err;
+
+- return 0;
++ return ret;
+ }
+
+ static int rv3029_eeprom_update_bits(struct device *dev,
+diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
+index c238b190b3c9..aa90004f49e2 100644
+--- a/drivers/s390/net/qeth_l2_main.c
++++ b/drivers/s390/net/qeth_l2_main.c
+@@ -2330,10 +2330,10 @@ static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc,
+ static void qeth_l2_vnicc_init(struct qeth_card *card)
+ {
+ u32 *timeout = &card->options.vnicc.learning_timeout;
++ bool enable, error = false;
+ unsigned int chars_len, i;
+ unsigned long chars_tmp;
+ u32 sup_cmds, vnicc;
+- bool enable, error;
+
+ QETH_CARD_TEXT(card, 2, "vniccini");
+ /* reset rx_bcast */
+@@ -2354,17 +2354,24 @@ static void qeth_l2_vnicc_init(struct qeth_card *card)
+ chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE;
+ for_each_set_bit(i, &chars_tmp, chars_len) {
+ vnicc = BIT(i);
+- qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds);
+- if (!(sup_cmds & IPA_VNICC_SET_TIMEOUT) ||
+- !(sup_cmds & IPA_VNICC_GET_TIMEOUT))
++ if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) {
++ sup_cmds = 0;
++ error = true;
++ }
++ if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) &&
++ (sup_cmds & IPA_VNICC_GET_TIMEOUT))
++ card->options.vnicc.getset_timeout_sup |= vnicc;
++ else
+ card->options.vnicc.getset_timeout_sup &= ~vnicc;
+- if (!(sup_cmds & IPA_VNICC_ENABLE) ||
+- !(sup_cmds & IPA_VNICC_DISABLE))
++ if ((sup_cmds & IPA_VNICC_ENABLE) &&
++ (sup_cmds & IPA_VNICC_DISABLE))
++ card->options.vnicc.set_char_sup |= vnicc;
++ else
+ card->options.vnicc.set_char_sup &= ~vnicc;
+ }
+ /* enforce assumed default values and recover settings, if changed */
+- error = qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
+- timeout);
++ error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
++ timeout);
+ chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT;
+ chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
+ for_each_set_bit(i, &chars_tmp, chars_len) {
+diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c
+index 4e3a50202e8c..d28088218c36 100644
+--- a/drivers/scsi/fnic/fnic_isr.c
++++ b/drivers/scsi/fnic/fnic_isr.c
+@@ -254,7 +254,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
+ int vecs = n + m + o + 1;
+
+ if (pci_alloc_irq_vectors(fnic->pdev, vecs, vecs,
+- PCI_IRQ_MSIX) < 0) {
++ PCI_IRQ_MSIX) == vecs) {
+ fnic->rq_count = n;
+ fnic->raw_wq_count = m;
+ fnic->wq_copy_count = o;
+@@ -280,7 +280,7 @@ int fnic_set_intr_mode(struct fnic *fnic)
+ fnic->wq_copy_count >= 1 &&
+ fnic->cq_count >= 3 &&
+ fnic->intr_count >= 1 &&
+- pci_alloc_irq_vectors(fnic->pdev, 1, 1, PCI_IRQ_MSI) < 0) {
++ pci_alloc_irq_vectors(fnic->pdev, 1, 1, PCI_IRQ_MSI) == 1) {
+ fnic->rq_count = 1;
+ fnic->raw_wq_count = 1;
+ fnic->wq_copy_count = 1;
+diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
+index 42bcf7f3a0f9..6ba257cbc6d9 100644
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -2603,7 +2603,7 @@ void fc_exch_recv(struct fc_lport *lport, struct fc_frame *fp)
+
+ /* lport lock ? */
+ if (!lport || lport->state == LPORT_ST_DISABLED) {
+- FC_LPORT_DBG(lport, "Receiving frames for an lport that "
++ FC_LIBFC_DBG("Receiving frames for an lport that "
+ "has not been initialized correctly\n");
+ fc_frame_free(fp);
+ return;
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index 2f31d266339f..99469f9057ee 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -3894,12 +3894,12 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
+ /*
+ * The cur_state should not last for more than max_wait secs
+ */
+- for (i = 0; i < max_wait; i++) {
++ for (i = 0; i < max_wait * 50; i++) {
+ curr_abs_state = instance->instancet->
+ read_fw_status_reg(instance->reg_set);
+
+ if (abs_state == curr_abs_state) {
+- msleep(1000);
++ msleep(20);
+ } else
+ break;
+ }
+diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
+index bb20a4a228cf..fff20a370767 100644
+--- a/drivers/scsi/qla2xxx/qla_os.c
++++ b/drivers/scsi/qla2xxx/qla_os.c
+@@ -6967,8 +6967,7 @@ qla2x00_module_init(void)
+ /* Initialize target kmem_cache and mem_pools */
+ ret = qlt_init();
+ if (ret < 0) {
+- kmem_cache_destroy(srb_cachep);
+- return ret;
++ goto destroy_cache;
+ } else if (ret > 0) {
+ /*
+ * If initiator mode is explictly disabled by qlt_init(),
+@@ -6989,11 +6988,10 @@ qla2x00_module_init(void)
+ qla2xxx_transport_template =
+ fc_attach_transport(&qla2xxx_transport_functions);
+ if (!qla2xxx_transport_template) {
+- kmem_cache_destroy(srb_cachep);
+ ql_log(ql_log_fatal, NULL, 0x0002,
+ "fc_attach_transport failed...Failing load!.\n");
+- qlt_exit();
+- return -ENODEV;
++ ret = -ENODEV;
++ goto qlt_exit;
+ }
+
+ apidev_major = register_chrdev(0, QLA2XXX_APIDEV, &apidev_fops);
+@@ -7005,27 +7003,37 @@ qla2x00_module_init(void)
+ qla2xxx_transport_vport_template =
+ fc_attach_transport(&qla2xxx_transport_vport_functions);
+ if (!qla2xxx_transport_vport_template) {
+- kmem_cache_destroy(srb_cachep);
+- qlt_exit();
+- fc_release_transport(qla2xxx_transport_template);
+ ql_log(ql_log_fatal, NULL, 0x0004,
+ "fc_attach_transport vport failed...Failing load!.\n");
+- return -ENODEV;
++ ret = -ENODEV;
++ goto unreg_chrdev;
+ }
+ ql_log(ql_log_info, NULL, 0x0005,
+ "QLogic Fibre Channel HBA Driver: %s.\n",
+ qla2x00_version_str);
+ ret = pci_register_driver(&qla2xxx_pci_driver);
+ if (ret) {
+- kmem_cache_destroy(srb_cachep);
+- qlt_exit();
+- fc_release_transport(qla2xxx_transport_template);
+- fc_release_transport(qla2xxx_transport_vport_template);
+ ql_log(ql_log_fatal, NULL, 0x0006,
+ "pci_register_driver failed...ret=%d Failing load!.\n",
+ ret);
++ goto release_vport_transport;
+ }
+ return ret;
++
++release_vport_transport:
++ fc_release_transport(qla2xxx_transport_vport_template);
++
++unreg_chrdev:
++ if (apidev_major >= 0)
++ unregister_chrdev(apidev_major, QLA2XXX_APIDEV);
++ fc_release_transport(qla2xxx_transport_template);
++
++qlt_exit:
++ qlt_exit();
++
++destroy_cache:
++ kmem_cache_destroy(srb_cachep);
++ return ret;
+ }
+
+ /**
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index e9545411ec5a..95206e227730 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -2233,14 +2233,14 @@ void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd,
+ ctio->u.status1.scsi_status |=
+ cpu_to_le16(SS_RESIDUAL_UNDER);
+
+- /* Response code and sense key */
+- put_unaligned_le32(((0x70 << 24) | (sense_key << 8)),
+- (&ctio->u.status1.sense_data)[0]);
++ /* Fixed format sense data. */
++ ctio->u.status1.sense_data[0] = 0x70;
++ ctio->u.status1.sense_data[2] = sense_key;
+ /* Additional sense length */
+- put_unaligned_le32(0x0a, (&ctio->u.status1.sense_data)[1]);
++ ctio->u.status1.sense_data[7] = 0xa;
+ /* ASC and ASCQ */
+- put_unaligned_le32(((asc << 24) | (ascq << 16)),
+- (&ctio->u.status1.sense_data)[3]);
++ ctio->u.status1.sense_data[12] = asc;
++ ctio->u.status1.sense_data[13] = ascq;
+
+ /* Memory Barrier */
+ wmb();
+@@ -2290,7 +2290,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
+ mcmd->orig_iocb.imm_ntfy.u.isp24.status_subcode ==
+ ELS_TPRLO) {
+ ql_dbg(ql_dbg_disc, vha, 0x2106,
+- "TM response logo %phC status %#x state %#x",
++ "TM response logo %8phC status %#x state %#x",
+ mcmd->sess->port_name, mcmd->fc_tm_rsp,
+ mcmd->flags);
+ qlt_schedule_sess_for_deletion(mcmd->sess);
+@@ -5334,11 +5334,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
+ se_sess = sess->se_sess;
+
+ tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
+- if (tag < 0)
+- return;
+-
+- cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
+- if (!cmd) {
++ if (tag < 0) {
+ ql_dbg(ql_dbg_io, vha, 0x3009,
+ "qla_target(%d): %s: Allocation of cmd failed\n",
+ vha->vp_idx, __func__);
+@@ -5353,6 +5349,7 @@ qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
+ return;
+ }
+
++ cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
+ memset(cmd, 0, sizeof(struct qla_tgt_cmd));
+
+ qlt_incr_num_pend_cmds(vha);
+diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
+index 6289965c42e9..05421d029dff 100644
+--- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
++++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
+@@ -54,12 +54,12 @@ static int meson_gx_pwrc_vpu_power_off(struct generic_pm_domain *genpd)
+ /* Power Down Memories */
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
+- 0x2 << i, 0x3 << i);
++ 0x3 << i, 0x3 << i);
+ udelay(5);
+ }
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
+- 0x2 << i, 0x3 << i);
++ 0x3 << i, 0x3 << i);
+ udelay(5);
+ }
+ for (i = 8; i < 16; i++) {
+@@ -108,13 +108,13 @@ static int meson_gx_pwrc_vpu_power_on(struct generic_pm_domain *genpd)
+ /* Power Up Memories */
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG0,
+- 0x2 << i, 0);
++ 0x3 << i, 0);
+ udelay(5);
+ }
+
+ for (i = 0; i < 32; i += 2) {
+ regmap_update_bits(pd->regmap_hhi, HHI_VPU_MEM_PD_REG1,
+- 0x2 << i, 0);
++ 0x3 << i, 0);
+ udelay(5);
+ }
+
+diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
+index 37ea0a1c24c8..1ae339f5eadb 100644
+--- a/drivers/soc/amlogic/meson-gx-socinfo.c
++++ b/drivers/soc/amlogic/meson-gx-socinfo.c
+@@ -43,20 +43,21 @@ static const struct meson_gx_package_id {
+ const char *name;
+ unsigned int major_id;
+ unsigned int pack_id;
++ unsigned int pack_mask;
+ } soc_packages[] = {
+- { "S905", 0x1f, 0 },
+- { "S905H", 0x1f, 0x13 },
+- { "S905M", 0x1f, 0x20 },
+- { "S905D", 0x21, 0 },
+- { "S905X", 0x21, 0x80 },
+- { "S905W", 0x21, 0xa0 },
+- { "S905L", 0x21, 0xc0 },
+- { "S905M2", 0x21, 0xe0 },
+- { "S912", 0x22, 0 },
+- { "962X", 0x24, 0x10 },
+- { "962E", 0x24, 0x20 },
+- { "A113X", 0x25, 0x37 },
+- { "A113D", 0x25, 0x22 },
++ { "S905", 0x1f, 0, 0x20 }, /* pack_id != 0x20 */
++ { "S905H", 0x1f, 0x3, 0xf }, /* pack_id & 0xf == 0x3 */
++ { "S905M", 0x1f, 0x20, 0xf0 }, /* pack_id == 0x20 */
++ { "S905D", 0x21, 0, 0xf0 },
++ { "S905X", 0x21, 0x80, 0xf0 },
++ { "S905W", 0x21, 0xa0, 0xf0 },
++ { "S905L", 0x21, 0xc0, 0xf0 },
++ { "S905M2", 0x21, 0xe0, 0xf0 },
++ { "S912", 0x22, 0, 0x0 }, /* Only S912 is known for GXM */
++ { "962X", 0x24, 0x10, 0xf0 },
++ { "962E", 0x24, 0x20, 0xf0 },
++ { "A113X", 0x25, 0x37, 0xff },
++ { "A113D", 0x25, 0x22, 0xff },
+ };
+
+ static inline unsigned int socinfo_to_major(u32 socinfo)
+@@ -81,13 +82,14 @@ static inline unsigned int socinfo_to_misc(u32 socinfo)
+
+ static const char *socinfo_to_package_id(u32 socinfo)
+ {
+- unsigned int pack = socinfo_to_pack(socinfo) & 0xf0;
++ unsigned int pack = socinfo_to_pack(socinfo);
+ unsigned int major = socinfo_to_major(socinfo);
+ int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(soc_packages) ; ++i) {
+ if (soc_packages[i].major_id == major &&
+- soc_packages[i].pack_id == pack)
++ soc_packages[i].pack_id ==
++ (pack & soc_packages[i].pack_mask))
+ return soc_packages[i].name;
+ }
+
+diff --git a/drivers/soc/fsl/qe/gpio.c b/drivers/soc/fsl/qe/gpio.c
+index 819bed0f5667..51b3a47b5a55 100644
+--- a/drivers/soc/fsl/qe/gpio.c
++++ b/drivers/soc/fsl/qe/gpio.c
+@@ -179,8 +179,10 @@ struct qe_pin *qe_pin_request(struct device_node *np, int index)
+ if (err < 0)
+ goto err0;
+ gc = gpio_to_chip(err);
+- if (WARN_ON(!gc))
++ if (WARN_ON(!gc)) {
++ err = -ENODEV;
+ goto err0;
++ }
+
+ if (!of_device_is_compatible(gc->of_node, "fsl,mpc8323-qe-pario-bank")) {
+ pr_debug("%s: tried to get a non-qe pin\n", __func__);
+diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
+index a6f646295f06..78d73ec587e1 100644
+--- a/drivers/soc/qcom/cmd-db.c
++++ b/drivers/soc/qcom/cmd-db.c
+@@ -283,8 +283,8 @@ static int cmd_db_dev_probe(struct platform_device *pdev)
+ }
+
+ cmd_db_header = memremap(rmem->base, rmem->size, MEMREMAP_WB);
+- if (IS_ERR_OR_NULL(cmd_db_header)) {
+- ret = PTR_ERR(cmd_db_header);
++ if (!cmd_db_header) {
++ ret = -ENOMEM;
+ cmd_db_header = NULL;
+ return ret;
+ }
+diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
+index 584bcb018a62..285a6f463013 100644
+--- a/drivers/spi/spi-bcm-qspi.c
++++ b/drivers/spi/spi-bcm-qspi.c
+@@ -354,7 +354,7 @@ static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
+ {
+ int bpc = 0, bpp = 0;
+ u8 command = op->cmd.opcode;
+- int width = op->cmd.buswidth ? op->cmd.buswidth : SPI_NBITS_SINGLE;
++ int width = op->data.buswidth ? op->data.buswidth : SPI_NBITS_SINGLE;
+ int addrlen = op->addr.nbytes;
+ int flex_mode = 1;
+
+@@ -992,7 +992,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
+ if (mspi_read)
+ return bcm_qspi_mspi_exec_mem_op(spi, op);
+
+- ret = bcm_qspi_bspi_set_mode(qspi, op, -1);
++ ret = bcm_qspi_bspi_set_mode(qspi, op, 0);
+
+ if (!ret)
+ ret = bcm_qspi_bspi_exec_mem_op(spi, op);
+diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
+index 12c1fa5b06c5..c63ed402cf86 100644
+--- a/drivers/spi/spi-bcm2835aux.c
++++ b/drivers/spi/spi-bcm2835aux.c
+@@ -416,7 +416,18 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, master);
+ master->mode_bits = (SPI_CPOL | SPI_CS_HIGH | SPI_NO_CS);
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+- master->num_chipselect = -1;
++ /* even though the driver never officially supported native CS
++ * allow a single native CS for legacy DT support purposes when
++ * no cs-gpio is configured.
++ * Known limitations for native cs are:
++ * * multiple chip-selects: cs0-cs2 are all simultaniously asserted
++ * whenever there is a transfer - this even includes SPI_NO_CS
++ * * SPI_CS_HIGH: is ignores - cs are always asserted low
++ * * cs_change: cs is deasserted after each spi_transfer
++ * * cs_delay_usec: cs is always deasserted one SCK cycle after
++ * a spi_transfer
++ */
++ master->num_chipselect = 1;
+ master->transfer_one = bcm2835aux_spi_transfer_one;
+ master->handle_err = bcm2835aux_spi_handle_err;
+ master->prepare_message = bcm2835aux_spi_prepare_message;
+diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
+index 7c88f74f7f47..94cc0a152449 100644
+--- a/drivers/spi/spi-cadence.c
++++ b/drivers/spi/spi-cadence.c
+@@ -584,11 +584,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
+ goto clk_dis_apb;
+ }
+
+- pm_runtime_use_autosuspend(&pdev->dev);
+- pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+- pm_runtime_set_active(&pdev->dev);
+- pm_runtime_enable(&pdev->dev);
+-
+ ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
+ if (ret < 0)
+ master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
+@@ -603,8 +598,10 @@ static int cdns_spi_probe(struct platform_device *pdev)
+ /* SPI controller initializations */
+ cdns_spi_init_hw(xspi);
+
+- pm_runtime_mark_last_busy(&pdev->dev);
+- pm_runtime_put_autosuspend(&pdev->dev);
++ pm_runtime_set_active(&pdev->dev);
++ pm_runtime_enable(&pdev->dev);
++ pm_runtime_use_autosuspend(&pdev->dev);
++ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
+index 8b79e36fab21..cd784552de7f 100644
+--- a/drivers/spi/spi-fsl-spi.c
++++ b/drivers/spi/spi-fsl-spi.c
+@@ -407,7 +407,6 @@ static int fsl_spi_do_one_msg(struct spi_master *master,
+ }
+
+ m->status = status;
+- spi_finalize_current_message(master);
+
+ if (status || !cs_change) {
+ ndelay(nsecs);
+@@ -415,6 +414,7 @@ static int fsl_spi_do_one_msg(struct spi_master *master,
+ }
+
+ fsl_spi_setup_transfer(spi, NULL);
++ spi_finalize_current_message(master);
+ return 0;
+ }
+
+diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
+index a1888dc6a938..09cfae3abce2 100644
+--- a/drivers/spi/spi-tegra114.c
++++ b/drivers/spi/spi-tegra114.c
+@@ -307,10 +307,16 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_spi_writel(tspi, x, SPI_TX_FIFO);
+ }
++
++ tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
+ } else {
++ unsigned int write_bytes;
+ max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
+ written_words = max_n_32bit;
+ nbytes = written_words * tspi->bytes_per_word;
++ if (nbytes > t->len - tspi->cur_pos)
++ nbytes = t->len - tspi->cur_pos;
++ write_bytes = nbytes;
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+
+@@ -319,8 +325,10 @@ static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_spi_writel(tspi, x, SPI_TX_FIFO);
+ }
++
++ tspi->cur_tx_pos += write_bytes;
+ }
+- tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
++
+ return written_words;
+ }
+
+@@ -344,20 +352,27 @@ static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
+ for (i = 0; len && (i < 4); i++, len--)
+ *rx_buf++ = (x >> i*8) & 0xFF;
+ }
+- tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ read_words += tspi->curr_dma_words;
++ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ } else {
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
++ u8 bytes_per_word = tspi->bytes_per_word;
++ unsigned int read_bytes;
+
++ len = rx_full_count * bytes_per_word;
++ if (len > t->len - tspi->cur_pos)
++ len = t->len - tspi->cur_pos;
++ read_bytes = len;
+ for (count = 0; count < rx_full_count; count++) {
+ u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask;
+
+- for (i = 0; (i < tspi->bytes_per_word); i++)
++ for (i = 0; len && (i < bytes_per_word); i++, len--)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+- tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
+ read_words += rx_full_count;
++ tspi->cur_rx_pos += read_bytes;
+ }
++
+ return read_words;
+ }
+
+@@ -372,12 +387,17 @@ static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
+
+ memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
++ tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ } else {
+ unsigned int i;
+ unsigned int count;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
++ unsigned int write_bytes;
+
++ if (consume > t->len - tspi->cur_pos)
++ consume = t->len - tspi->cur_pos;
++ write_bytes = consume;
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ u32 x = 0;
+
+@@ -386,8 +406,9 @@ static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tspi->tx_dma_buf[count] = x;
+ }
++
++ tspi->cur_tx_pos += write_bytes;
+ }
+- tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
+@@ -405,20 +426,28 @@ static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
+
+ memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
++ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ } else {
+ unsigned int i;
+ unsigned int count;
+ unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
++ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
++ unsigned int read_bytes;
+
++ if (consume > t->len - tspi->cur_pos)
++ consume = t->len - tspi->cur_pos;
++ read_bytes = consume;
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ u32 x = tspi->rx_dma_buf[count] & rx_mask;
+
+- for (i = 0; (i < tspi->bytes_per_word); i++)
++ for (i = 0; consume && (i < tspi->bytes_per_word);
++ i++, consume--)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
++
++ tspi->cur_rx_pos += read_bytes;
+ }
+- tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+@@ -470,22 +499,39 @@ static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
+ return 0;
+ }
+
+-static int tegra_spi_start_dma_based_transfer(
+- struct tegra_spi_data *tspi, struct spi_transfer *t)
++static int tegra_spi_flush_fifos(struct tegra_spi_data *tspi)
+ {
+- u32 val;
+- unsigned int len;
+- int ret = 0;
++ unsigned long timeout = jiffies + HZ;
+ u32 status;
+
+- /* Make sure that Rx and Tx fifo are empty */
+ status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
+- dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n",
+- (unsigned)status);
+- return -EIO;
++ status |= SPI_RX_FIFO_FLUSH | SPI_TX_FIFO_FLUSH;
++ tegra_spi_writel(tspi, status, SPI_FIFO_STATUS);
++ while ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
++ status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
++ if (time_after(jiffies, timeout)) {
++ dev_err(tspi->dev,
++ "timeout waiting for fifo flush\n");
++ return -EIO;
++ }
++
++ udelay(1);
++ }
+ }
+
++ return 0;
++}
++
++static int tegra_spi_start_dma_based_transfer(
++ struct tegra_spi_data *tspi, struct spi_transfer *t)
++{
++ u32 val;
++ unsigned int len;
++ int ret = 0;
++ u8 dma_burst;
++ struct dma_slave_config dma_sconfig = {0};
++
+ val = SPI_DMA_BLK_SET(tspi->curr_dma_words - 1);
+ tegra_spi_writel(tspi, val, SPI_DMA_BLK);
+
+@@ -496,12 +542,16 @@ static int tegra_spi_start_dma_based_transfer(
+ len = tspi->curr_dma_words * 4;
+
+ /* Set attention level based on length of transfer */
+- if (len & 0xF)
++ if (len & 0xF) {
+ val |= SPI_TX_TRIG_1 | SPI_RX_TRIG_1;
+- else if (((len) >> 4) & 0x1)
++ dma_burst = 1;
++ } else if (((len) >> 4) & 0x1) {
+ val |= SPI_TX_TRIG_4 | SPI_RX_TRIG_4;
+- else
++ dma_burst = 4;
++ } else {
+ val |= SPI_TX_TRIG_8 | SPI_RX_TRIG_8;
++ dma_burst = 8;
++ }
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SPI_IE_TX;
+@@ -512,7 +562,18 @@ static int tegra_spi_start_dma_based_transfer(
+ tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ tspi->dma_control_reg = val;
+
++ dma_sconfig.device_fc = true;
+ if (tspi->cur_direction & DATA_DIR_TX) {
++ dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
++ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
++ dma_sconfig.dst_maxburst = dma_burst;
++ ret = dmaengine_slave_config(tspi->tx_dma_chan, &dma_sconfig);
++ if (ret < 0) {
++ dev_err(tspi->dev,
++ "DMA slave config failed: %d\n", ret);
++ return ret;
++ }
++
+ tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t);
+ ret = tegra_spi_start_tx_dma(tspi, len);
+ if (ret < 0) {
+@@ -523,6 +584,16 @@ static int tegra_spi_start_dma_based_transfer(
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
++ dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
++ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
++ dma_sconfig.src_maxburst = dma_burst;
++ ret = dmaengine_slave_config(tspi->rx_dma_chan, &dma_sconfig);
++ if (ret < 0) {
++ dev_err(tspi->dev,
++ "DMA slave config failed: %d\n", ret);
++ return ret;
++ }
++
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+@@ -582,7 +653,6 @@ static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ int ret;
+- struct dma_slave_config dma_sconfig;
+
+ dma_chan = dma_request_slave_channel_reason(tspi->dev,
+ dma_to_memory ? "rx" : "tx");
+@@ -602,19 +672,6 @@ static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
+ return -ENOMEM;
+ }
+
+- if (dma_to_memory) {
+- dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
+- dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+- dma_sconfig.src_maxburst = 0;
+- } else {
+- dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
+- dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+- dma_sconfig.dst_maxburst = 0;
+- }
+-
+- ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
+- if (ret)
+- goto scrub;
+ if (dma_to_memory) {
+ tspi->rx_dma_chan = dma_chan;
+ tspi->rx_dma_buf = dma_buf;
+@@ -625,11 +682,6 @@ static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
+ tspi->tx_dma_phys = dma_phys;
+ }
+ return 0;
+-
+-scrub:
+- dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+- dma_release_channel(dma_chan);
+- return ret;
+ }
+
+ static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
+@@ -730,6 +782,8 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
+
+ if (tspi->is_packed)
+ command1 |= SPI_PACKED;
++ else
++ command1 &= ~SPI_PACKED;
+
+ command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN);
+ tspi->cur_direction = 0;
+@@ -748,6 +802,9 @@ static int tegra_spi_start_transfer_one(struct spi_device *spi,
+ dev_dbg(tspi->dev, "The def 0x%x and written 0x%x\n",
+ tspi->def_command1_reg, (unsigned)command1);
+
++ ret = tegra_spi_flush_fifos(tspi);
++ if (ret < 0)
++ return ret;
+ if (total_fifo_words > SPI_FIFO_DEPTH)
+ ret = tegra_spi_start_dma_based_transfer(tspi, t);
+ else
+@@ -838,7 +895,17 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
+ if (WARN_ON(ret == 0)) {
+ dev_err(tspi->dev,
+ "spi transfer timeout, err %d\n", ret);
++ if (tspi->is_curr_dma_xfer &&
++ (tspi->cur_direction & DATA_DIR_TX))
++ dmaengine_terminate_all(tspi->tx_dma_chan);
++ if (tspi->is_curr_dma_xfer &&
++ (tspi->cur_direction & DATA_DIR_RX))
++ dmaengine_terminate_all(tspi->rx_dma_chan);
+ ret = -EIO;
++ tegra_spi_flush_fifos(tspi);
++ reset_control_assert(tspi->rst);
++ udelay(2);
++ reset_control_deassert(tspi->rst);
+ goto complete_xfer;
+ }
+
+@@ -889,6 +956,7 @@ static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi)
+ tspi->status_reg);
+ dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
+ tspi->command1_reg, tspi->dma_control_reg);
++ tegra_spi_flush_fifos(tspi);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+@@ -961,6 +1029,7 @@ static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi)
+ tspi->status_reg);
+ dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
+ tspi->command1_reg, tspi->dma_control_reg);
++ tegra_spi_flush_fifos(tspi);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
+index 4389ab80c23e..fa730a871d25 100644
+--- a/drivers/spi/spi-topcliff-pch.c
++++ b/drivers/spi/spi-topcliff-pch.c
+@@ -1008,6 +1008,9 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
+
+ /* RX */
+ dma->sg_rx_p = kcalloc(num, sizeof(*dma->sg_rx_p), GFP_ATOMIC);
++ if (!dma->sg_rx_p)
++ return;
++
+ sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */
+ /* offset, length setting */
+ sg = dma->sg_rx_p;
+@@ -1068,6 +1071,9 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
+ }
+
+ dma->sg_tx_p = kcalloc(num, sizeof(*dma->sg_tx_p), GFP_ATOMIC);
++ if (!dma->sg_tx_p)
++ return;
++
+ sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
+ /* offset, length setting */
+ sg = dma->sg_tx_p;
+diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c
+index 22571abcaa4e..034d86869772 100644
+--- a/drivers/staging/android/vsoc.c
++++ b/drivers/staging/android/vsoc.c
+@@ -260,7 +260,8 @@ do_create_fd_scoped_permission(struct vsoc_device_region *region_p,
+ atomic_t *owner_ptr = NULL;
+ struct vsoc_device_region *managed_region_p;
+
+- if (copy_from_user(&np->permission, &arg->perm, sizeof(*np)) ||
++ if (copy_from_user(&np->permission,
++ &arg->perm, sizeof(np->permission)) ||
+ copy_from_user(&managed_fd,
+ &arg->managed_region_fd, sizeof(managed_fd))) {
+ return -EFAULT;
+diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
+index d799b1b55de3..747518c29542 100644
+--- a/drivers/staging/comedi/drivers/ni_mio_common.c
++++ b/drivers/staging/comedi/drivers/ni_mio_common.c
+@@ -4984,7 +4984,10 @@ static int ni_valid_rtsi_output_source(struct comedi_device *dev,
+ case NI_RTSI_OUTPUT_G_SRC0:
+ case NI_RTSI_OUTPUT_G_GATE0:
+ case NI_RTSI_OUTPUT_RGOUT0:
+- case NI_RTSI_OUTPUT_RTSI_BRD_0:
++ case NI_RTSI_OUTPUT_RTSI_BRD(0):
++ case NI_RTSI_OUTPUT_RTSI_BRD(1):
++ case NI_RTSI_OUTPUT_RTSI_BRD(2):
++ case NI_RTSI_OUTPUT_RTSI_BRD(3):
+ return 1;
+ case NI_RTSI_OUTPUT_RTSI_OSC:
+ return (devpriv->is_m_series) ? 1 : 0;
+@@ -5005,11 +5008,18 @@ static int ni_set_rtsi_routing(struct comedi_device *dev,
+ devpriv->rtsi_trig_a_output_reg |= NISTC_RTSI_TRIG(chan, src);
+ ni_stc_writew(dev, devpriv->rtsi_trig_a_output_reg,
+ NISTC_RTSI_TRIGA_OUT_REG);
+- } else if (chan < 8) {
++ } else if (chan < NISTC_RTSI_TRIG_NUM_CHAN(devpriv->is_m_series)) {
+ devpriv->rtsi_trig_b_output_reg &= ~NISTC_RTSI_TRIG_MASK(chan);
+ devpriv->rtsi_trig_b_output_reg |= NISTC_RTSI_TRIG(chan, src);
+ ni_stc_writew(dev, devpriv->rtsi_trig_b_output_reg,
+ NISTC_RTSI_TRIGB_OUT_REG);
++ } else if (chan != NISTC_RTSI_TRIG_OLD_CLK_CHAN) {
++ /* probably should never reach this, since the
++ * ni_valid_rtsi_output_source above errors out if chan is too
++ * high
++ */
++ dev_err(dev->class_dev, "%s: unknown rtsi channel\n", __func__);
++ return -EINVAL;
+ }
+ return 2;
+ }
+@@ -5025,12 +5035,12 @@ static unsigned int ni_get_rtsi_routing(struct comedi_device *dev,
+ } else if (chan < NISTC_RTSI_TRIG_NUM_CHAN(devpriv->is_m_series)) {
+ return NISTC_RTSI_TRIG_TO_SRC(chan,
+ devpriv->rtsi_trig_b_output_reg);
+- } else {
+- if (chan == NISTC_RTSI_TRIG_OLD_CLK_CHAN)
+- return NI_RTSI_OUTPUT_RTSI_OSC;
+- dev_err(dev->class_dev, "bug! should never get here?\n");
+- return 0;
++ } else if (chan == NISTC_RTSI_TRIG_OLD_CLK_CHAN) {
++ return NI_RTSI_OUTPUT_RTSI_OSC;
+ }
++
++ dev_err(dev->class_dev, "%s: unknown rtsi channel\n", __func__);
++ return -EINVAL;
+ }
+
+ static int ni_rtsi_insn_config(struct comedi_device *dev,
+diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
+index 010ae1e9c7fb..40680eaf3974 100644
+--- a/drivers/staging/greybus/light.c
++++ b/drivers/staging/greybus/light.c
+@@ -1098,21 +1098,21 @@ static void gb_lights_channel_release(struct gb_channel *channel)
+ static void gb_lights_light_release(struct gb_light *light)
+ {
+ int i;
+- int count;
+
+ light->ready = false;
+
+- count = light->channels_count;
+-
+ if (light->has_flash)
+ gb_lights_light_v4l2_unregister(light);
++ light->has_flash = false;
+
+- for (i = 0; i < count; i++) {
++ for (i = 0; i < light->channels_count; i++)
+ gb_lights_channel_release(&light->channels[i]);
+- light->channels_count--;
+- }
++ light->channels_count = 0;
++
+ kfree(light->channels);
++ light->channels = NULL;
+ kfree(light->name);
++ light->name = NULL;
+ }
+
+ static void gb_lights_release(struct gb_lights *glights)
+diff --git a/drivers/staging/most/cdev/cdev.c b/drivers/staging/most/cdev/cdev.c
+index 0b48677fa958..27d58b55b810 100644
+--- a/drivers/staging/most/cdev/cdev.c
++++ b/drivers/staging/most/cdev/cdev.c
+@@ -453,7 +453,9 @@ static int comp_probe(struct most_interface *iface, int channel_id,
+ c->devno = MKDEV(comp.major, current_minor);
+ cdev_init(&c->cdev, &channel_fops);
+ c->cdev.owner = THIS_MODULE;
+- cdev_add(&c->cdev, c->devno, 1);
++ retval = cdev_add(&c->cdev, c->devno, 1);
++ if (retval < 0)
++ goto err_free_c;
+ c->iface = iface;
+ c->cfg = cfg;
+ c->channel_id = channel_id;
+@@ -485,6 +487,7 @@ error_create_device:
+ list_del(&c->list);
+ error_alloc_kfifo:
+ cdev_del(&c->cdev);
++err_free_c:
+ kfree(c);
+ error_alloc_channel:
+ ida_simple_remove(&comp.minor_id, current_minor);
+diff --git a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
+index 53f55f129a76..ddbeff8224ab 100644
+--- a/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
++++ b/drivers/staging/rtlwifi/halmac/halmac_88xx/halmac_func_88xx.c
+@@ -2466,8 +2466,11 @@ halmac_parse_psd_data_88xx(struct halmac_adapter *halmac_adapter, u8 *c2h_buf,
+ segment_size = (u8)PSD_DATA_GET_SEGMENT_SIZE(c2h_buf);
+ psd_set->data_size = total_size;
+
+- if (!psd_set->data)
++ if (!psd_set->data) {
+ psd_set->data = kzalloc(psd_set->data_size, GFP_KERNEL);
++ if (!psd_set->data)
++ return HALMAC_RET_MALLOC_FAIL;
++ }
+
+ if (segment_id == 0)
+ psd_set->segment_size = segment_size;
+diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+index 455082867246..edf25922b12d 100644
+--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
++++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+@@ -47,6 +47,7 @@ MODULE_DESCRIPTION("Broadcom 2835 MMAL video capture");
+ MODULE_AUTHOR("Vincent Sanders");
+ MODULE_LICENSE("GPL");
+ MODULE_VERSION(BM2835_MMAL_VERSION);
++MODULE_ALIAS("platform:bcm2835-camera");
+
+ int bcm2835_v4l2_debug;
+ module_param_named(debug, bcm2835_v4l2_debug, int, 0644);
+@@ -1854,6 +1855,12 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
+ num_cameras = get_num_cameras(instance,
+ resolutions,
+ MAX_BCM2835_CAMERAS);
++
++ if (num_cameras < 1) {
++ ret = -ENODEV;
++ goto cleanup_mmal;
++ }
++
+ if (num_cameras > MAX_BCM2835_CAMERAS)
+ num_cameras = MAX_BCM2835_CAMERAS;
+
+@@ -1953,6 +1960,9 @@ cleanup_gdev:
+ pr_info("%s: error %d while loading driver\n",
+ BM2835_MMAL_MODULE_NAME, ret);
+
++cleanup_mmal:
++ vchiq_mmal_finalise(instance);
++
+ return ret;
+ }
+
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index e9ff2a7c0c0e..22e97a93728d 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -85,7 +85,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
+ goto out_unlock;
+ }
+
+- se_cmd->se_lun = rcu_dereference(deve->se_lun);
++ se_cmd->se_lun = se_lun;
+ se_cmd->pr_res_key = deve->pr_res_key;
+ se_cmd->orig_fe_lun = unpacked_lun;
+ se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+@@ -176,7 +176,7 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
+ goto out_unlock;
+ }
+
+- se_cmd->se_lun = rcu_dereference(deve->se_lun);
++ se_cmd->se_lun = se_lun;
+ se_cmd->pr_res_key = deve->pr_res_key;
+ se_cmd->orig_fe_lun = unpacked_lun;
+ se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
+diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
+index dfd23245f778..9df6b7260466 100644
+--- a/drivers/thermal/cpu_cooling.c
++++ b/drivers/thermal/cpu_cooling.c
+@@ -458,7 +458,7 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev,
+ load = 0;
+
+ total_load += load;
+- if (trace_thermal_power_cpu_limit_enabled() && load_cpu)
++ if (load_cpu)
+ load_cpu[i] = load;
+
+ i++;
+diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
+index 0691f260f6ea..f64643629d8b 100644
+--- a/drivers/thermal/mtk_thermal.c
++++ b/drivers/thermal/mtk_thermal.c
+@@ -431,7 +431,8 @@ static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank)
+ u32 raw;
+
+ for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) {
+- raw = readl(mt->thermal_base + conf->msr[i]);
++ raw = readl(mt->thermal_base +
++ conf->msr[conf->bank_data[bank->id].sensors[i]]);
+
+ temp = raw_to_mcelsius(mt,
+ conf->bank_data[bank->id].sensors[i],
+@@ -568,7 +569,8 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num,
+
+ for (i = 0; i < conf->bank_data[num].num_sensors; i++)
+ writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]],
+- mt->thermal_base + conf->adcpnp[i]);
++ mt->thermal_base +
++ conf->adcpnp[conf->bank_data[num].sensors[i]]);
+
+ writel((1 << conf->bank_data[num].num_sensors) - 1,
+ mt->thermal_base + TEMP_MONCTL0);
+diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
+index 704c8ad045bb..8f553453dd7f 100644
+--- a/drivers/thermal/rcar_gen3_thermal.c
++++ b/drivers/thermal/rcar_gen3_thermal.c
+@@ -14,7 +14,6 @@
+ #include <linux/of_device.h>
+ #include <linux/platform_device.h>
+ #include <linux/pm_runtime.h>
+-#include <linux/spinlock.h>
+ #include <linux/sys_soc.h>
+ #include <linux/thermal.h>
+
+@@ -81,7 +80,6 @@ struct rcar_gen3_thermal_tsc {
+ struct rcar_gen3_thermal_priv {
+ struct rcar_gen3_thermal_tsc *tscs[TSC_MAX_NUM];
+ unsigned int num_tscs;
+- spinlock_t lock; /* Protect interrupts on and off */
+ void (*thermal_init)(struct rcar_gen3_thermal_tsc *tsc);
+ };
+
+@@ -231,38 +229,16 @@ static irqreturn_t rcar_gen3_thermal_irq(int irq, void *data)
+ {
+ struct rcar_gen3_thermal_priv *priv = data;
+ u32 status;
+- int i, ret = IRQ_HANDLED;
++ int i;
+
+- spin_lock(&priv->lock);
+ for (i = 0; i < priv->num_tscs; i++) {
+ status = rcar_gen3_thermal_read(priv->tscs[i], REG_GEN3_IRQSTR);
+ rcar_gen3_thermal_write(priv->tscs[i], REG_GEN3_IRQSTR, 0);
+ if (status)
+- ret = IRQ_WAKE_THREAD;
++ thermal_zone_device_update(priv->tscs[i]->zone,
++ THERMAL_EVENT_UNSPECIFIED);
+ }
+
+- if (ret == IRQ_WAKE_THREAD)
+- rcar_thermal_irq_set(priv, false);
+-
+- spin_unlock(&priv->lock);
+-
+- return ret;
+-}
+-
+-static irqreturn_t rcar_gen3_thermal_irq_thread(int irq, void *data)
+-{
+- struct rcar_gen3_thermal_priv *priv = data;
+- unsigned long flags;
+- int i;
+-
+- for (i = 0; i < priv->num_tscs; i++)
+- thermal_zone_device_update(priv->tscs[i]->zone,
+- THERMAL_EVENT_UNSPECIFIED);
+-
+- spin_lock_irqsave(&priv->lock, flags);
+- rcar_thermal_irq_set(priv, true);
+- spin_unlock_irqrestore(&priv->lock, flags);
+-
+ return IRQ_HANDLED;
+ }
+
+@@ -364,8 +340,6 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
+ if (soc_device_match(r8a7795es1))
+ priv->thermal_init = rcar_gen3_thermal_init_r8a7795es1;
+
+- spin_lock_init(&priv->lock);
+-
+ platform_set_drvdata(pdev, priv);
+
+ /*
+@@ -383,9 +357,9 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
+ if (!irqname)
+ return -ENOMEM;
+
+- ret = devm_request_threaded_irq(dev, irq, rcar_gen3_thermal_irq,
+- rcar_gen3_thermal_irq_thread,
+- IRQF_SHARED, irqname, priv);
++ ret = devm_request_threaded_irq(dev, irq, NULL,
++ rcar_gen3_thermal_irq,
++ IRQF_ONESHOT, irqname, priv);
+ if (ret)
+ return ret;
+ }
+diff --git a/drivers/tty/ipwireless/hardware.c b/drivers/tty/ipwireless/hardware.c
+index b0baa4ce10f9..6bbf35682d53 100644
+--- a/drivers/tty/ipwireless/hardware.c
++++ b/drivers/tty/ipwireless/hardware.c
+@@ -1516,6 +1516,8 @@ static void ipw_send_setup_packet(struct ipw_hardware *hw)
+ sizeof(struct ipw_setup_get_version_query_packet),
+ ADDR_SETUP_PROT, TL_PROTOCOLID_SETUP,
+ TL_SETUP_SIGNO_GET_VERSION_QRY);
++ if (!ver_packet)
++ return;
+ ver_packet->header.length = sizeof(struct tl_setup_get_version_qry);
+
+ /*
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index ee8a5cb61a5f..2daccb10ae2f 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -528,26 +528,26 @@ static int lpuart32_poll_init(struct uart_port *port)
+ spin_lock_irqsave(&sport->port.lock, flags);
+
+ /* Disable Rx & Tx */
+- writel(0, sport->port.membase + UARTCTRL);
++ lpuart32_write(&sport->port, UARTCTRL, 0);
+
+- temp = readl(sport->port.membase + UARTFIFO);
++ temp = lpuart32_read(&sport->port, UARTFIFO);
+
+ /* Enable Rx and Tx FIFO */
+- writel(temp | UARTFIFO_RXFE | UARTFIFO_TXFE,
+- sport->port.membase + UARTFIFO);
++ lpuart32_write(&sport->port, UARTFIFO,
++ temp | UARTFIFO_RXFE | UARTFIFO_TXFE);
+
+ /* flush Tx and Rx FIFO */
+- writel(UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH,
+- sport->port.membase + UARTFIFO);
++ lpuart32_write(&sport->port, UARTFIFO,
++ UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH);
+
+ /* explicitly clear RDRF */
+- if (readl(sport->port.membase + UARTSTAT) & UARTSTAT_RDRF) {
+- readl(sport->port.membase + UARTDATA);
+- writel(UARTFIFO_RXUF, sport->port.membase + UARTFIFO);
++ if (lpuart32_read(&sport->port, UARTSTAT) & UARTSTAT_RDRF) {
++ lpuart32_read(&sport->port, UARTDATA);
++ lpuart32_write(&sport->port, UARTFIFO, UARTFIFO_RXUF);
+ }
+
+ /* Enable Rx and Tx */
+- writel(UARTCTRL_RE | UARTCTRL_TE, sport->port.membase + UARTCTRL);
++ lpuart32_write(&sport->port, UARTCTRL, UARTCTRL_RE | UARTCTRL_TE);
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ return 0;
+@@ -555,18 +555,18 @@ static int lpuart32_poll_init(struct uart_port *port)
+
+ static void lpuart32_poll_put_char(struct uart_port *port, unsigned char c)
+ {
+- while (!(readl(port->membase + UARTSTAT) & UARTSTAT_TDRE))
++ while (!(lpuart32_read(port, UARTSTAT) & UARTSTAT_TDRE))
+ barrier();
+
+- writel(c, port->membase + UARTDATA);
++ lpuart32_write(port, UARTDATA, c);
+ }
+
+ static int lpuart32_poll_get_char(struct uart_port *port)
+ {
+- if (!(readl(port->membase + UARTSTAT) & UARTSTAT_RDRF))
++ if (!(lpuart32_read(port, UARTSTAT) & UARTSTAT_RDRF))
+ return NO_POLL_CHAR;
+
+- return readl(port->membase + UARTDATA);
++ return lpuart32_read(port, UARTDATA);
+ }
+ #endif
+
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index e8d7a7bb4339..bce4ac1787ad 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -105,9 +105,7 @@ static int stm32_config_rs485(struct uart_port *port,
+ struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+ u32 usartdiv, baud, cr1, cr3;
+ bool over8;
+- unsigned long flags;
+
+- spin_lock_irqsave(&port->lock, flags);
+ stm32_clr_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+
+ port->rs485 = *rs485conf;
+@@ -147,7 +145,6 @@ static int stm32_config_rs485(struct uart_port *port,
+ }
+
+ stm32_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+- spin_unlock_irqrestore(&port->lock, flags);
+
+ return 0;
+ }
+@@ -194,8 +191,8 @@ static int stm32_pending_rx(struct uart_port *port, u32 *sr, int *last_res,
+ return 0;
+ }
+
+-static unsigned long
+-stm32_get_char(struct uart_port *port, u32 *sr, int *last_res)
++static unsigned long stm32_get_char(struct uart_port *port, u32 *sr,
++ int *last_res)
+ {
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+@@ -205,10 +202,13 @@ stm32_get_char(struct uart_port *port, u32 *sr, int *last_res)
+ c = stm32_port->rx_buf[RX_BUF_L - (*last_res)--];
+ if ((*last_res) == 0)
+ *last_res = RX_BUF_L;
+- return c;
+ } else {
+- return readl_relaxed(port->membase + ofs->rdr);
++ c = readl_relaxed(port->membase + ofs->rdr);
++ /* apply RDR data mask */
++ c &= stm32_port->rdr_mask;
+ }
++
++ return c;
+ }
+
+ static void stm32_receive_chars(struct uart_port *port, bool threaded)
+@@ -225,35 +225,51 @@ static void stm32_receive_chars(struct uart_port *port, bool threaded)
+
+ while (stm32_pending_rx(port, &sr, &stm32_port->last_res, threaded)) {
+ sr |= USART_SR_DUMMY_RX;
+- c = stm32_get_char(port, &sr, &stm32_port->last_res);
+ flag = TTY_NORMAL;
+- port->icount.rx++;
+
++ /*
++ * Status bits has to be cleared before reading the RDR:
++ * In FIFO mode, reading the RDR will pop the next data
++ * (if any) along with its status bits into the SR.
++ * Not doing so leads to misalignement between RDR and SR,
++ * and clear status bits of the next rx data.
++ *
++ * Clear errors flags for stm32f7 and stm32h7 compatible
++ * devices. On stm32f4 compatible devices, the error bit is
++ * cleared by the sequence [read SR - read DR].
++ */
++ if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
++ writel_relaxed(sr & USART_SR_ERR_MASK,
++ port->membase + ofs->icr);
++
++ c = stm32_get_char(port, &sr, &stm32_port->last_res);
++ port->icount.rx++;
+ if (sr & USART_SR_ERR_MASK) {
+- if (sr & USART_SR_LBD) {
+- port->icount.brk++;
+- if (uart_handle_break(port))
+- continue;
+- } else if (sr & USART_SR_ORE) {
+- if (ofs->icr != UNDEF_REG)
+- writel_relaxed(USART_ICR_ORECF,
+- port->membase +
+- ofs->icr);
++ if (sr & USART_SR_ORE) {
+ port->icount.overrun++;
+ } else if (sr & USART_SR_PE) {
+ port->icount.parity++;
+ } else if (sr & USART_SR_FE) {
+- port->icount.frame++;
++ /* Break detection if character is null */
++ if (!c) {
++ port->icount.brk++;
++ if (uart_handle_break(port))
++ continue;
++ } else {
++ port->icount.frame++;
++ }
+ }
+
+ sr &= port->read_status_mask;
+
+- if (sr & USART_SR_LBD)
+- flag = TTY_BREAK;
+- else if (sr & USART_SR_PE)
++ if (sr & USART_SR_PE) {
+ flag = TTY_PARITY;
+- else if (sr & USART_SR_FE)
+- flag = TTY_FRAME;
++ } else if (sr & USART_SR_FE) {
++ if (!c)
++ flag = TTY_BREAK;
++ else
++ flag = TTY_FRAME;
++ }
+ }
+
+ if (uart_handle_sysrq_char(port, c))
+@@ -271,21 +287,6 @@ static void stm32_tx_dma_complete(void *arg)
+ struct uart_port *port = arg;
+ struct stm32_port *stm32port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+- unsigned int isr;
+- int ret;
+-
+- ret = readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
+- isr,
+- (isr & USART_SR_TC),
+- 10, 100000);
+-
+- if (ret)
+- dev_err(port->dev, "terminal count not set\n");
+-
+- if (ofs->icr == UNDEF_REG)
+- stm32_clr_bits(port, ofs->isr, USART_SR_TC);
+- else
+- stm32_set_bits(port, ofs->icr, USART_CR_TC);
+
+ stm32_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+ stm32port->tx_dma_busy = false;
+@@ -377,7 +378,6 @@ static void stm32_transmit_chars_dma(struct uart_port *port)
+ /* Issue pending DMA TX requests */
+ dma_async_issue_pending(stm32port->tx_ch);
+
+- stm32_clr_bits(port, ofs->isr, USART_SR_TC);
+ stm32_set_bits(port, ofs->cr3, USART_CR3_DMAT);
+
+ xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
+@@ -401,15 +401,15 @@ static void stm32_transmit_chars(struct uart_port *port)
+ return;
+ }
+
+- if (uart_tx_stopped(port)) {
+- stm32_stop_tx(port);
++ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
++ stm32_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
+ return;
+ }
+
+- if (uart_circ_empty(xmit)) {
+- stm32_stop_tx(port);
+- return;
+- }
++ if (ofs->icr == UNDEF_REG)
++ stm32_clr_bits(port, ofs->isr, USART_SR_TC);
++ else
++ writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
+
+ if (stm32_port->tx_ch)
+ stm32_transmit_chars_dma(port);
+@@ -420,7 +420,7 @@ static void stm32_transmit_chars(struct uart_port *port)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+- stm32_stop_tx(port);
++ stm32_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
+ }
+
+ static irqreturn_t stm32_interrupt(int irq, void *ptr)
+@@ -554,7 +554,6 @@ static int stm32_startup(struct uart_port *port)
+ {
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+- struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+ const char *name = to_platform_device(port->dev)->name;
+ u32 val;
+ int ret;
+@@ -565,15 +564,6 @@ static int stm32_startup(struct uart_port *port)
+ if (ret)
+ return ret;
+
+- if (cfg->has_wakeup && stm32_port->wakeirq >= 0) {
+- ret = dev_pm_set_dedicated_wake_irq(port->dev,
+- stm32_port->wakeirq);
+- if (ret) {
+- free_irq(port->irq, port);
+- return ret;
+- }
+- }
+-
+ val = USART_CR1_RXNEIE | USART_CR1_TE | USART_CR1_RE;
+ if (stm32_port->fifoen)
+ val |= USART_CR1_FIFOEN;
+@@ -587,18 +577,56 @@ static void stm32_shutdown(struct uart_port *port)
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+- u32 val;
++ u32 val, isr;
++ int ret;
+
+ val = USART_CR1_TXEIE | USART_CR1_RXNEIE | USART_CR1_TE | USART_CR1_RE;
+ val |= BIT(cfg->uart_enable_bit);
+ if (stm32_port->fifoen)
+ val |= USART_CR1_FIFOEN;
++
++ ret = readl_relaxed_poll_timeout(port->membase + ofs->isr,
++ isr, (isr & USART_SR_TC),
++ 10, 100000);
++
++ if (ret)
++ dev_err(port->dev, "transmission complete not set\n");
++
+ stm32_clr_bits(port, ofs->cr1, val);
+
+- dev_pm_clear_wake_irq(port->dev);
+ free_irq(port->irq, port);
+ }
+
++unsigned int stm32_get_databits(struct ktermios *termios)
++{
++ unsigned int bits;
++
++ tcflag_t cflag = termios->c_cflag;
++
++ switch (cflag & CSIZE) {
++ /*
++ * CSIZE settings are not necessarily supported in hardware.
++ * CSIZE unsupported configurations are handled here to set word length
++ * to 8 bits word as default configuration and to print debug message.
++ */
++ case CS5:
++ bits = 5;
++ break;
++ case CS6:
++ bits = 6;
++ break;
++ case CS7:
++ bits = 7;
++ break;
++ /* default including CS8 */
++ default:
++ bits = 8;
++ break;
++ }
++
++ return bits;
++}
++
+ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+ {
+@@ -606,7 +634,7 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ struct stm32_usart_config *cfg = &stm32_port->info->cfg;
+ struct serial_rs485 *rs485conf = &port->rs485;
+- unsigned int baud;
++ unsigned int baud, bits;
+ u32 usartdiv, mantissa, fraction, oversampling;
+ tcflag_t cflag = termios->c_cflag;
+ u32 cr1, cr2, cr3;
+@@ -632,16 +660,29 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
+ if (cflag & CSTOPB)
+ cr2 |= USART_CR2_STOP_2B;
+
++ bits = stm32_get_databits(termios);
++ stm32_port->rdr_mask = (BIT(bits) - 1);
++
+ if (cflag & PARENB) {
++ bits++;
+ cr1 |= USART_CR1_PCE;
+- if ((cflag & CSIZE) == CS8) {
+- if (cfg->has_7bits_data)
+- cr1 |= USART_CR1_M0;
+- else
+- cr1 |= USART_CR1_M;
+- }
+ }
+
++ /*
++ * Word length configuration:
++ * CS8 + parity, 9 bits word aka [M1:M0] = 0b01
++ * CS7 or (CS6 + parity), 7 bits word aka [M1:M0] = 0b10
++ * CS8 or (CS7 + parity), 8 bits word aka [M1:M0] = 0b00
++ * M0 and M1 already cleared by cr1 initialization.
++ */
++ if (bits == 9)
++ cr1 |= USART_CR1_M0;
++ else if ((bits == 7) && cfg->has_7bits_data)
++ cr1 |= USART_CR1_M1;
++ else if (bits != 8)
++ dev_dbg(port->dev, "Unsupported data bits config: %u bits\n"
++ , bits);
++
+ if (cflag & PARODD)
+ cr1 |= USART_CR1_PS;
+
+@@ -679,14 +720,14 @@ static void stm32_set_termios(struct uart_port *port, struct ktermios *termios,
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= USART_SR_PE | USART_SR_FE;
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
+- port->read_status_mask |= USART_SR_LBD;
++ port->read_status_mask |= USART_SR_FE;
+
+ /* Characters to ignore */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask = USART_SR_PE | USART_SR_FE;
+ if (termios->c_iflag & IGNBRK) {
+- port->ignore_status_mask |= USART_SR_LBD;
++ port->ignore_status_mask |= USART_SR_FE;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+@@ -1024,11 +1065,18 @@ static int stm32_serial_probe(struct platform_device *pdev)
+ ret = device_init_wakeup(&pdev->dev, true);
+ if (ret)
+ goto err_uninit;
++
++ ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
++ stm32port->wakeirq);
++ if (ret)
++ goto err_nowup;
++
++ device_set_wakeup_enable(&pdev->dev, false);
+ }
+
+ ret = uart_add_one_port(&stm32_usart_driver, &stm32port->port);
+ if (ret)
+- goto err_nowup;
++ goto err_wirq;
+
+ ret = stm32_of_dma_rx_probe(stm32port, pdev);
+ if (ret)
+@@ -1042,6 +1090,10 @@ static int stm32_serial_probe(struct platform_device *pdev)
+
+ return 0;
+
++err_wirq:
++ if (stm32port->info->cfg.has_wakeup && stm32port->wakeirq >= 0)
++ dev_pm_clear_wake_irq(&pdev->dev);
++
+ err_nowup:
+ if (stm32port->info->cfg.has_wakeup && stm32port->wakeirq >= 0)
+ device_init_wakeup(&pdev->dev, false);
+@@ -1079,8 +1131,10 @@ static int stm32_serial_remove(struct platform_device *pdev)
+ TX_BUF_L, stm32_port->tx_buf,
+ stm32_port->tx_dma_buf);
+
+- if (cfg->has_wakeup && stm32_port->wakeirq >= 0)
++ if (cfg->has_wakeup && stm32_port->wakeirq >= 0) {
++ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
++ }
+
+ clk_disable_unprepare(stm32_port->clk);
+
+diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
+index 6f294e280ea3..30d2433e27c3 100644
+--- a/drivers/tty/serial/stm32-usart.h
++++ b/drivers/tty/serial/stm32-usart.h
+@@ -108,7 +108,6 @@ struct stm32_usart_info stm32h7_info = {
+ #define USART_SR_RXNE BIT(5)
+ #define USART_SR_TC BIT(6)
+ #define USART_SR_TXE BIT(7)
+-#define USART_SR_LBD BIT(8)
+ #define USART_SR_CTSIF BIT(9)
+ #define USART_SR_CTS BIT(10) /* F7 */
+ #define USART_SR_RTOF BIT(11) /* F7 */
+@@ -120,8 +119,7 @@ struct stm32_usart_info stm32h7_info = {
+ #define USART_SR_SBKF BIT(18) /* F7 */
+ #define USART_SR_WUF BIT(20) /* H7 */
+ #define USART_SR_TEACK BIT(21) /* F7 */
+-#define USART_SR_ERR_MASK (USART_SR_LBD | USART_SR_ORE | \
+- USART_SR_FE | USART_SR_PE)
++#define USART_SR_ERR_MASK (USART_SR_ORE | USART_SR_FE | USART_SR_PE)
+ /* Dummy bits */
+ #define USART_SR_DUMMY_RX BIT(16)
+
+@@ -151,8 +149,7 @@ struct stm32_usart_info stm32h7_info = {
+ #define USART_CR1_PS BIT(9)
+ #define USART_CR1_PCE BIT(10)
+ #define USART_CR1_WAKE BIT(11)
+-#define USART_CR1_M BIT(12)
+-#define USART_CR1_M0 BIT(12) /* F7 */
++#define USART_CR1_M0 BIT(12) /* F7 (CR1_M for F4) */
+ #define USART_CR1_MME BIT(13) /* F7 */
+ #define USART_CR1_CMIE BIT(14) /* F7 */
+ #define USART_CR1_OVER8 BIT(15)
+@@ -169,8 +166,6 @@ struct stm32_usart_info stm32h7_info = {
+ /* USART_CR2 */
+ #define USART_CR2_ADD_MASK GENMASK(3, 0) /* F4 */
+ #define USART_CR2_ADDM7 BIT(4) /* F7 */
+-#define USART_CR2_LBDL BIT(5)
+-#define USART_CR2_LBDIE BIT(6)
+ #define USART_CR2_LBCL BIT(8)
+ #define USART_CR2_CPHA BIT(9)
+ #define USART_CR2_CPOL BIT(10)
+@@ -227,12 +222,10 @@ struct stm32_usart_info stm32h7_info = {
+
+ /* USART_ICR */
+ #define USART_ICR_PECF BIT(0) /* F7 */
+-#define USART_ICR_FFECF BIT(1) /* F7 */
+-#define USART_ICR_NCF BIT(2) /* F7 */
++#define USART_ICR_FECF BIT(1) /* F7 */
+ #define USART_ICR_ORECF BIT(3) /* F7 */
+ #define USART_ICR_IDLECF BIT(4) /* F7 */
+ #define USART_ICR_TCCF BIT(6) /* F7 */
+-#define USART_ICR_LBDCF BIT(8) /* F7 */
+ #define USART_ICR_CTSCF BIT(9) /* F7 */
+ #define USART_ICR_RTOCF BIT(11) /* F7 */
+ #define USART_ICR_EOBCF BIT(12) /* F7 */
+@@ -261,6 +254,7 @@ struct stm32_port {
+ bool hw_flow_control;
+ bool fifoen;
+ int wakeirq;
++ int rdr_mask; /* receive data register mask */
+ };
+
+ static struct stm32_port stm32_ports[STM32_MAX_PORTS];
+diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
+index 2762148c169d..9c788748bdc6 100644
+--- a/drivers/uio/uio.c
++++ b/drivers/uio/uio.c
+@@ -938,9 +938,12 @@ int __uio_register_device(struct module *owner,
+ atomic_set(&idev->event, 0);
+
+ ret = uio_get_minor(idev);
+- if (ret)
++ if (ret) {
++ kfree(idev);
+ return ret;
++ }
+
++ device_initialize(&idev->dev);
+ idev->dev.devt = MKDEV(uio_major, idev->minor);
+ idev->dev.class = &uio_class;
+ idev->dev.parent = parent;
+@@ -951,7 +954,7 @@ int __uio_register_device(struct module *owner,
+ if (ret)
+ goto err_device_create;
+
+- ret = device_register(&idev->dev);
++ ret = device_add(&idev->dev);
+ if (ret)
+ goto err_device_create;
+
+@@ -983,9 +986,10 @@ int __uio_register_device(struct module *owner,
+ err_request_irq:
+ uio_dev_del_attributes(idev);
+ err_uio_dev_add_attributes:
+- device_unregister(&idev->dev);
++ device_del(&idev->dev);
+ err_device_create:
+ uio_free_minor(idev);
++ put_device(&idev->dev);
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(__uio_register_device);
+diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
+index b8a1fdefb515..4929c5883068 100644
+--- a/drivers/usb/class/cdc-wdm.c
++++ b/drivers/usb/class/cdc-wdm.c
+@@ -1107,7 +1107,7 @@ static int wdm_post_reset(struct usb_interface *intf)
+ rv = recover_from_urb_loss(desc);
+ mutex_unlock(&desc->wlock);
+ mutex_unlock(&desc->rlock);
+- return 0;
++ return rv;
+ }
+
+ static struct usb_driver wdm_driver = {
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index 3f68edde0f03..f64d1cd08fb6 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -2230,6 +2230,7 @@ static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep)
+ if (status & DEV_DMA_STS_MASK)
+ dev_err(hsotg->dev, "descriptor %d closed with %x\n",
+ i, status & DEV_DMA_STS_MASK);
++ desc++;
+ }
+
+ return bytes_rem;
+diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
+index 518ead12458d..5d22f4bf2a9f 100644
+--- a/drivers/usb/dwc3/Kconfig
++++ b/drivers/usb/dwc3/Kconfig
+@@ -52,7 +52,8 @@ comment "Platform Glue Driver Support"
+
+ config USB_DWC3_OMAP
+ tristate "Texas Instruments OMAP5 and similar Platforms"
+- depends on EXTCON && (ARCH_OMAP2PLUS || COMPILE_TEST)
++ depends on ARCH_OMAP2PLUS || COMPILE_TEST
++ depends on EXTCON || !EXTCON
+ depends on OF
+ default USB_DWC3
+ help
+@@ -114,6 +115,7 @@ config USB_DWC3_ST
+ config USB_DWC3_QCOM
+ tristate "Qualcomm Platform"
+ depends on ARCH_QCOM || COMPILE_TEST
++ depends on EXTCON || !EXTCON
+ depends on OF
+ default USB_DWC3
+ help
+diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
+index d44b26d5b2a2..367697144cda 100644
+--- a/drivers/usb/gadget/udc/fsl_udc_core.c
++++ b/drivers/usb/gadget/udc/fsl_udc_core.c
+@@ -2247,8 +2247,10 @@ static int struct_udc_setup(struct fsl_udc *udc,
+ udc->phy_mode = pdata->phy_mode;
+
+ udc->eps = kcalloc(udc->max_ep, sizeof(struct fsl_ep), GFP_KERNEL);
+- if (!udc->eps)
+- return -1;
++ if (!udc->eps) {
++ ERR("kmalloc udc endpoint status failed\n");
++ goto eps_alloc_failed;
++ }
+
+ /* initialized QHs, take care of alignment */
+ size = udc->max_ep * sizeof(struct ep_queue_head);
+@@ -2262,8 +2264,7 @@ static int struct_udc_setup(struct fsl_udc *udc,
+ &udc->ep_qh_dma, GFP_KERNEL);
+ if (!udc->ep_qh) {
+ ERR("malloc QHs for udc failed\n");
+- kfree(udc->eps);
+- return -1;
++ goto ep_queue_alloc_failed;
+ }
+
+ udc->ep_qh_size = size;
+@@ -2272,8 +2273,17 @@ static int struct_udc_setup(struct fsl_udc *udc,
+ /* FIXME: fsl_alloc_request() ignores ep argument */
+ udc->status_req = container_of(fsl_alloc_request(NULL, GFP_KERNEL),
+ struct fsl_req, req);
++ if (!udc->status_req) {
++ ERR("kzalloc for udc status request failed\n");
++ goto udc_status_alloc_failed;
++ }
++
+ /* allocate a small amount of memory to get valid address */
+ udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
++ if (!udc->status_req->req.buf) {
++ ERR("kzalloc for udc request buffer failed\n");
++ goto udc_req_buf_alloc_failed;
++ }
+
+ udc->resume_state = USB_STATE_NOTATTACHED;
+ udc->usb_state = USB_STATE_POWERED;
+@@ -2281,6 +2291,18 @@ static int struct_udc_setup(struct fsl_udc *udc,
+ udc->remote_wakeup = 0; /* default to 0 on reset */
+
+ return 0;
++
++udc_req_buf_alloc_failed:
++ kfree(udc->status_req);
++udc_status_alloc_failed:
++ kfree(udc->ep_qh);
++ udc->ep_qh_size = 0;
++ep_queue_alloc_failed:
++ kfree(udc->eps);
++eps_alloc_failed:
++ udc->phy_mode = 0;
++ return -1;
++
+ }
+
+ /*----------------------------------------------------------------
+diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
+index 8f180bf7561a..9772c0de59b7 100644
+--- a/drivers/usb/host/xhci-hub.c
++++ b/drivers/usb/host/xhci-hub.c
+@@ -1104,7 +1104,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ }
+ port_li = readl(ports[wIndex]->addr + PORTLI);
+ status = xhci_get_ext_port_status(temp, port_li);
+- put_unaligned_le32(cpu_to_le32(status), &buf[4]);
++ put_unaligned_le32(status, &buf[4]);
+ }
+ break;
+ case SetPortFeature:
+diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
+index 91ea3083e7ad..affb5393c4c6 100644
+--- a/drivers/usb/phy/Kconfig
++++ b/drivers/usb/phy/Kconfig
+@@ -20,7 +20,7 @@ config AB8500_USB
+ in host mode, low speed.
+
+ config FSL_USB2_OTG
+- bool "Freescale USB OTG Transceiver Driver"
++ tristate "Freescale USB OTG Transceiver Driver"
+ depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM
+ depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
+ select USB_PHY
+diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
+index 183550b63faa..dade34d70419 100644
+--- a/drivers/usb/phy/phy-twl6030-usb.c
++++ b/drivers/usb/phy/phy-twl6030-usb.c
+@@ -400,7 +400,7 @@ static int twl6030_usb_remove(struct platform_device *pdev)
+ {
+ struct twl6030_usb *twl = platform_get_drvdata(pdev);
+
+- cancel_delayed_work(&twl->get_status_work);
++ cancel_delayed_work_sync(&twl->get_status_work);
+ twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
+ REG_INT_MSK_LINE_C);
+ twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
+diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
+index 00878c386dd0..8445890accdf 100644
+--- a/drivers/usb/typec/Kconfig
++++ b/drivers/usb/typec/Kconfig
+@@ -95,6 +95,7 @@ source "drivers/usb/typec/ucsi/Kconfig"
+ config TYPEC_TPS6598X
+ tristate "TI TPS6598x USB Power Delivery controller driver"
+ depends on I2C
++ select REGMAP_I2C
+ help
+ Say Y or M here if your system has TI TPS65982 or TPS65983 USB Power
+ Delivery controller.
+diff --git a/drivers/usb/typec/fusb302/fusb302.c b/drivers/usb/typec/fusb302/fusb302.c
+index 62a0060d39d8..c749ce486ec8 100644
+--- a/drivers/usb/typec/fusb302/fusb302.c
++++ b/drivers/usb/typec/fusb302/fusb302.c
+@@ -990,13 +990,17 @@ done:
+ return ret;
+ }
+
+-static int tcpm_start_drp_toggling(struct tcpc_dev *dev,
+- enum typec_cc_status cc)
++static int tcpm_start_toggling(struct tcpc_dev *dev,
++ enum typec_port_type port_type,
++ enum typec_cc_status cc)
+ {
+ struct fusb302_chip *chip = container_of(dev, struct fusb302_chip,
+ tcpc_dev);
+ int ret = 0;
+
++ if (port_type != TYPEC_PORT_DRP)
++ return -EOPNOTSUPP;
++
+ mutex_lock(&chip->lock);
+ ret = fusb302_set_src_current(chip, cc_src_current[cc]);
+ if (ret < 0) {
+@@ -1206,7 +1210,7 @@ static void init_tcpc_dev(struct tcpc_dev *fusb302_tcpc_dev)
+ fusb302_tcpc_dev->set_vbus = tcpm_set_vbus;
+ fusb302_tcpc_dev->set_pd_rx = tcpm_set_pd_rx;
+ fusb302_tcpc_dev->set_roles = tcpm_set_roles;
+- fusb302_tcpc_dev->start_drp_toggling = tcpm_start_drp_toggling;
++ fusb302_tcpc_dev->start_toggling = tcpm_start_toggling;
+ fusb302_tcpc_dev->pd_transmit = tcpm_pd_transmit;
+ }
+
+diff --git a/drivers/usb/typec/tcpci.c b/drivers/usb/typec/tcpci.c
+index ac6b418b15f1..c1f7073a56de 100644
+--- a/drivers/usb/typec/tcpci.c
++++ b/drivers/usb/typec/tcpci.c
+@@ -100,13 +100,17 @@ static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
+ return 0;
+ }
+
+-static int tcpci_start_drp_toggling(struct tcpc_dev *tcpc,
+- enum typec_cc_status cc)
++static int tcpci_start_toggling(struct tcpc_dev *tcpc,
++ enum typec_port_type port_type,
++ enum typec_cc_status cc)
+ {
+ int ret;
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ unsigned int reg = TCPC_ROLE_CTRL_DRP;
+
++ if (port_type != TYPEC_PORT_DRP)
++ return -EOPNOTSUPP;
++
+ /* Handle vendor drp toggling */
+ if (tcpci->data->start_drp_toggling) {
+ ret = tcpci->data->start_drp_toggling(tcpci, tcpci->data, cc);
+@@ -511,7 +515,7 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
+ tcpci->tcpc.get_cc = tcpci_get_cc;
+ tcpci->tcpc.set_polarity = tcpci_set_polarity;
+ tcpci->tcpc.set_vconn = tcpci_set_vconn;
+- tcpci->tcpc.start_drp_toggling = tcpci_start_drp_toggling;
++ tcpci->tcpc.start_toggling = tcpci_start_toggling;
+
+ tcpci->tcpc.set_pd_rx = tcpci_set_pd_rx;
+ tcpci->tcpc.set_roles = tcpci_set_roles;
+diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
+index 39cf19001239..29d72e9b0f01 100644
+--- a/drivers/usb/typec/tcpm.c
++++ b/drivers/usb/typec/tcpm.c
+@@ -31,7 +31,7 @@
+
+ #define FOREACH_STATE(S) \
+ S(INVALID_STATE), \
+- S(DRP_TOGGLING), \
++ S(TOGGLING), \
+ S(SRC_UNATTACHED), \
+ S(SRC_ATTACH_WAIT), \
+ S(SRC_ATTACHED), \
+@@ -473,7 +473,7 @@ static void tcpm_log(struct tcpm_port *port, const char *fmt, ...)
+ /* Do not log while disconnected and unattached */
+ if (tcpm_port_is_disconnected(port) &&
+ (port->state == SRC_UNATTACHED || port->state == SNK_UNATTACHED ||
+- port->state == DRP_TOGGLING))
++ port->state == TOGGLING))
+ return;
+
+ va_start(args, fmt);
+@@ -2561,20 +2561,16 @@ static int tcpm_set_charge(struct tcpm_port *port, bool charge)
+ return 0;
+ }
+
+-static bool tcpm_start_drp_toggling(struct tcpm_port *port,
+- enum typec_cc_status cc)
++static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc)
+ {
+ int ret;
+
+- if (port->tcpc->start_drp_toggling &&
+- port->port_type == TYPEC_PORT_DRP) {
+- tcpm_log_force(port, "Start DRP toggling");
+- ret = port->tcpc->start_drp_toggling(port->tcpc, cc);
+- if (!ret)
+- return true;
+- }
++ if (!port->tcpc->start_toggling)
++ return false;
+
+- return false;
++ tcpm_log_force(port, "Start toggling");
++ ret = port->tcpc->start_toggling(port->tcpc, port->port_type, cc);
++ return ret == 0;
+ }
+
+ static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
+@@ -2868,15 +2864,15 @@ static void run_state_machine(struct tcpm_port *port)
+
+ port->enter_state = port->state;
+ switch (port->state) {
+- case DRP_TOGGLING:
++ case TOGGLING:
+ break;
+ /* SRC states */
+ case SRC_UNATTACHED:
+ if (!port->non_pd_role_swap)
+ tcpm_swap_complete(port, -ENOTCONN);
+ tcpm_src_detach(port);
+- if (tcpm_start_drp_toggling(port, tcpm_rp_cc(port))) {
+- tcpm_set_state(port, DRP_TOGGLING, 0);
++ if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
++ tcpm_set_state(port, TOGGLING, 0);
+ break;
+ }
+ tcpm_set_cc(port, tcpm_rp_cc(port));
+@@ -3074,8 +3070,8 @@ static void run_state_machine(struct tcpm_port *port)
+ tcpm_swap_complete(port, -ENOTCONN);
+ tcpm_pps_complete(port, -ENOTCONN);
+ tcpm_snk_detach(port);
+- if (tcpm_start_drp_toggling(port, TYPEC_CC_RD)) {
+- tcpm_set_state(port, DRP_TOGGLING, 0);
++ if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
++ tcpm_set_state(port, TOGGLING, 0);
+ break;
+ }
+ tcpm_set_cc(port, TYPEC_CC_RD);
+@@ -3642,7 +3638,7 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1,
+ : "connected");
+
+ switch (port->state) {
+- case DRP_TOGGLING:
++ case TOGGLING:
+ if (tcpm_port_is_debug(port) || tcpm_port_is_audio(port) ||
+ tcpm_port_is_source(port))
+ tcpm_set_state(port, SRC_ATTACH_WAIT, 0);
+diff --git a/drivers/usb/typec/typec_wcove.c b/drivers/usb/typec/typec_wcove.c
+index 6770afd40765..f1f8f45e2f3d 100644
+--- a/drivers/usb/typec/typec_wcove.c
++++ b/drivers/usb/typec/typec_wcove.c
+@@ -416,12 +416,16 @@ static int wcove_pd_transmit(struct tcpc_dev *tcpc,
+ return regmap_write(wcove->regmap, USBC_TXCMD, cmd | USBC_TXCMD_START);
+ }
+
+-static int wcove_start_drp_toggling(struct tcpc_dev *tcpc,
+- enum typec_cc_status cc)
++static int wcove_start_toggling(struct tcpc_dev *tcpc,
++ enum typec_port_type port_type,
++ enum typec_cc_status cc)
+ {
+ struct wcove_typec *wcove = tcpc_to_wcove(tcpc);
+ unsigned int usbc_ctrl;
+
++ if (port_type != TYPEC_PORT_DRP)
++ return -EOPNOTSUPP;
++
+ usbc_ctrl = USBC_CONTROL1_MODE_DRP | USBC_CONTROL1_DRPTOGGLE_RANDOM;
+
+ switch (cc) {
+@@ -642,7 +646,7 @@ static int wcove_typec_probe(struct platform_device *pdev)
+ wcove->tcpc.set_polarity = wcove_set_polarity;
+ wcove->tcpc.set_vconn = wcove_set_vconn;
+ wcove->tcpc.set_current_limit = wcove_set_current_limit;
+- wcove->tcpc.start_drp_toggling = wcove_start_drp_toggling;
++ wcove->tcpc.start_toggling = wcove_start_toggling;
+
+ wcove->tcpc.set_pd_rx = wcove_set_pd_rx;
+ wcove->tcpc.set_roles = wcove_set_roles;
+diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
+index 0212f0ee8aea..e052f62fdea7 100644
+--- a/drivers/vfio/mdev/mdev_core.c
++++ b/drivers/vfio/mdev/mdev_core.c
+@@ -150,10 +150,10 @@ static int mdev_device_remove_ops(struct mdev_device *mdev, bool force_remove)
+
+ static int mdev_device_remove_cb(struct device *dev, void *data)
+ {
+- if (!dev_is_mdev(dev))
+- return 0;
++ if (dev_is_mdev(dev))
++ mdev_device_remove(dev, true);
+
+- return mdev_device_remove(dev, data ? *(bool *)data : true);
++ return 0;
+ }
+
+ /*
+@@ -182,6 +182,7 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
+ /* Check for duplicate */
+ parent = __find_parent_device(dev);
+ if (parent) {
++ parent = NULL;
+ ret = -EEXIST;
+ goto add_dev_err;
+ }
+@@ -240,7 +241,6 @@ EXPORT_SYMBOL(mdev_register_device);
+ void mdev_unregister_device(struct device *dev)
+ {
+ struct mdev_parent *parent;
+- bool force_remove = true;
+
+ mutex_lock(&parent_list_lock);
+ parent = __find_parent_device(dev);
+@@ -254,8 +254,7 @@ void mdev_unregister_device(struct device *dev)
+ list_del(&parent->next);
+ class_compat_remove_link(mdev_bus_compat_class, dev, NULL);
+
+- device_for_each_child(dev, (void *)&force_remove,
+- mdev_device_remove_cb);
++ device_for_each_child(dev, NULL, mdev_device_remove_cb);
+
+ parent_remove_sysfs_files(parent);
+
+diff --git a/drivers/vfio/mdev/mdev_sysfs.c b/drivers/vfio/mdev/mdev_sysfs.c
+index 249472f05509..e7770b511d03 100644
+--- a/drivers/vfio/mdev/mdev_sysfs.c
++++ b/drivers/vfio/mdev/mdev_sysfs.c
+@@ -280,7 +280,7 @@ type_link_failed:
+
+ void mdev_remove_sysfs_files(struct device *dev, struct mdev_type *type)
+ {
++ sysfs_remove_files(&dev->kobj, mdev_device_attrs);
+ sysfs_remove_link(&dev->kobj, "mdev_type");
+ sysfs_remove_link(type->devices_kobj, dev_name(dev));
+- sysfs_remove_files(&dev->kobj, mdev_device_attrs);
+ }
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 0a6eb53e79fb..66783a37f450 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -696,6 +696,7 @@ static long vfio_pci_ioctl(void *device_data,
+ {
+ void __iomem *io;
+ size_t size;
++ u16 orig_cmd;
+
+ info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
+ info.flags = 0;
+@@ -711,15 +712,23 @@ static long vfio_pci_ioctl(void *device_data,
+ break;
+ }
+
+- /* Is it really there? */
++ /*
++ * Is it really there? Enable memory decode for
++ * implicit access in pci_map_rom().
++ */
++ pci_read_config_word(pdev, PCI_COMMAND, &orig_cmd);
++ pci_write_config_word(pdev, PCI_COMMAND,
++ orig_cmd | PCI_COMMAND_MEMORY);
++
+ io = pci_map_rom(pdev, &size);
+- if (!io || !size) {
++ if (io) {
++ info.flags = VFIO_REGION_INFO_FLAG_READ;
++ pci_unmap_rom(pdev, io);
++ } else {
+ info.size = 0;
+- break;
+ }
+- pci_unmap_rom(pdev, io);
+
+- info.flags = VFIO_REGION_INFO_FLAG_READ;
++ pci_write_config_word(pdev, PCI_COMMAND, orig_cmd);
+ break;
+ }
+ case VFIO_PCI_VGA_REGION_INDEX:
+diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
+index a9be2d8e98df..55090d9f9de0 100644
+--- a/drivers/vhost/test.c
++++ b/drivers/vhost/test.c
+@@ -162,6 +162,7 @@ static int vhost_test_release(struct inode *inode, struct file *f)
+
+ vhost_test_stop(n, &private);
+ vhost_test_flush(n);
++ vhost_dev_stop(&n->dev);
+ vhost_dev_cleanup(&n->dev);
+ /* We do an extra flush before freeing memory,
+ * since jobs can re-queue themselves. */
+@@ -238,6 +239,7 @@ static long vhost_test_reset_owner(struct vhost_test *n)
+ }
+ vhost_test_stop(n, &priv);
+ vhost_test_flush(n);
++ vhost_dev_stop(&n->dev);
+ vhost_dev_reset_owner(&n->dev, umem);
+ done:
+ mutex_unlock(&n->dev.mutex);
+diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c
+index 2030a6b77a09..ef2553f452ca 100644
+--- a/drivers/video/backlight/lm3630a_bl.c
++++ b/drivers/video/backlight/lm3630a_bl.c
+@@ -201,7 +201,7 @@ static int lm3630a_bank_a_update_status(struct backlight_device *bl)
+ LM3630A_LEDA_ENABLE, LM3630A_LEDA_ENABLE);
+ if (ret < 0)
+ goto out_i2c_err;
+- return bl->props.brightness;
++ return 0;
+
+ out_i2c_err:
+ dev_err(pchip->dev, "i2c failed to access\n");
+@@ -278,7 +278,7 @@ static int lm3630a_bank_b_update_status(struct backlight_device *bl)
+ LM3630A_LEDB_ENABLE, LM3630A_LEDB_ENABLE);
+ if (ret < 0)
+ goto out_i2c_err;
+- return bl->props.brightness;
++ return 0;
+
+ out_i2c_err:
+ dev_err(pchip->dev, "i2c failed to access REG_CTRL\n");
+diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
+index 7ddc0930e98c..3a3098d4873b 100644
+--- a/drivers/video/backlight/pwm_bl.c
++++ b/drivers/video/backlight/pwm_bl.c
+@@ -199,29 +199,17 @@ int pwm_backlight_brightness_default(struct device *dev,
+ struct platform_pwm_backlight_data *data,
+ unsigned int period)
+ {
+- unsigned int counter = 0;
+- unsigned int i, n;
++ unsigned int i;
+ u64 retval;
+
+ /*
+- * Count the number of bits needed to represent the period number. The
+- * number of bits is used to calculate the number of levels used for the
+- * brightness-levels table, the purpose of this calculation is have a
+- * pre-computed table with enough levels to get linear brightness
+- * perception. The period is divided by the number of bits so for a
+- * 8-bit PWM we have 255 / 8 = 32 brightness levels or for a 16-bit PWM
+- * we have 65535 / 16 = 4096 brightness levels.
+- *
+- * Note that this method is based on empirical testing on different
+- * devices with PWM of 8 and 16 bits of resolution.
++ * Once we have 4096 levels there's little point going much higher...
++ * neither interactive sliders nor animation benefits from having
++ * more values in the table.
+ */
+- n = period;
+- while (n) {
+- counter += n % 2;
+- n >>= 1;
+- }
++ data->max_brightness =
++ min((int)DIV_ROUND_UP(period, fls(period)), 4096);
+
+- data->max_brightness = DIV_ROUND_UP(period, counter);
+ data->levels = devm_kcalloc(dev, data->max_brightness,
+ sizeof(*data->levels), GFP_KERNEL);
+ if (!data->levels)
+diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c
+index f103665cad43..f9b366d17587 100644
+--- a/drivers/video/fbdev/chipsfb.c
++++ b/drivers/video/fbdev/chipsfb.c
+@@ -350,7 +350,7 @@ static void init_chips(struct fb_info *p, unsigned long addr)
+ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
+ {
+ struct fb_info *p;
+- unsigned long addr, size;
++ unsigned long addr;
+ unsigned short cmd;
+ int rc = -ENODEV;
+
+@@ -362,7 +362,6 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent)
+ if ((dp->resource[0].flags & IORESOURCE_MEM) == 0)
+ goto err_disable;
+ addr = pci_resource_start(dp, 0);
+- size = pci_resource_len(dp, 0);
+ if (addr == 0)
+ goto err_disable;
+
+diff --git a/drivers/watchdog/rtd119x_wdt.c b/drivers/watchdog/rtd119x_wdt.c
+index d001c17ddfde..99caec6882d2 100644
+--- a/drivers/watchdog/rtd119x_wdt.c
++++ b/drivers/watchdog/rtd119x_wdt.c
+@@ -135,7 +135,7 @@ static int rtd119x_wdt_probe(struct platform_device *pdev)
+ rtd119x_wdt_set_timeout(&data->wdt_dev, data->wdt_dev.timeout);
+ rtd119x_wdt_stop(&data->wdt_dev);
+
+- ret = devm_watchdog_register_device(&pdev->dev, &data->wdt_dev);
++ ret = watchdog_register_device(&data->wdt_dev);
+ if (ret) {
+ clk_disable_unprepare(data->clk);
+ clk_put(data->clk);
+diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
+index ff9397d9638a..b6c65afd3677 100644
+--- a/drivers/watchdog/sprd_wdt.c
++++ b/drivers/watchdog/sprd_wdt.c
+@@ -342,10 +342,9 @@ static int sprd_wdt_probe(struct platform_device *pdev)
+
+ static int __maybe_unused sprd_wdt_pm_suspend(struct device *dev)
+ {
+- struct watchdog_device *wdd = dev_get_drvdata(dev);
+ struct sprd_wdt *wdt = dev_get_drvdata(dev);
+
+- if (watchdog_active(wdd))
++ if (watchdog_active(&wdt->wdd))
+ sprd_wdt_stop(&wdt->wdd);
+ sprd_wdt_disable(wdt);
+
+@@ -354,7 +353,6 @@ static int __maybe_unused sprd_wdt_pm_suspend(struct device *dev)
+
+ static int __maybe_unused sprd_wdt_pm_resume(struct device *dev)
+ {
+- struct watchdog_device *wdd = dev_get_drvdata(dev);
+ struct sprd_wdt *wdt = dev_get_drvdata(dev);
+ int ret;
+
+@@ -362,7 +360,7 @@ static int __maybe_unused sprd_wdt_pm_resume(struct device *dev)
+ if (ret)
+ return ret;
+
+- if (watchdog_active(wdd)) {
++ if (watchdog_active(&wdt->wdd)) {
+ ret = sprd_wdt_start(&wdt->wdd);
+ if (ret) {
+ sprd_wdt_disable(wdt);
+diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
+index b1357aa4bc55..f192b6f42da9 100644
+--- a/drivers/xen/cpu_hotplug.c
++++ b/drivers/xen/cpu_hotplug.c
+@@ -54,7 +54,7 @@ static int vcpu_online(unsigned int cpu)
+ }
+ static void vcpu_hotplug(unsigned int cpu)
+ {
+- if (!cpu_possible(cpu))
++ if (cpu >= nr_cpu_ids || !cpu_possible(cpu))
+ return;
+
+ switch (vcpu_online(cpu)) {
+diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
+index d4ea33581ac2..b3fbfed28682 100644
+--- a/drivers/xen/pvcalls-back.c
++++ b/drivers/xen/pvcalls-back.c
+@@ -784,7 +784,7 @@ static int pvcalls_back_poll(struct xenbus_device *dev,
+ mappass->reqcopy = *req;
+ icsk = inet_csk(mappass->sock->sk);
+ queue = &icsk->icsk_accept_queue;
+- data = queue->rskq_accept_head != NULL;
++ data = READ_ONCE(queue->rskq_accept_head) != NULL;
+ if (data) {
+ mappass->reqcopy.cmd = 0;
+ ret = 0;
+diff --git a/fs/affs/super.c b/fs/affs/super.c
+index d1ad11a8a4a5..b6ce0c36029b 100644
+--- a/fs/affs/super.c
++++ b/fs/affs/super.c
+@@ -561,14 +561,9 @@ affs_remount(struct super_block *sb, int *flags, char *data)
+ int root_block;
+ unsigned long mount_flags;
+ int res = 0;
+- char *new_opts;
+ char volume[32];
+ char *prefix = NULL;
+
+- new_opts = kstrdup(data, GFP_KERNEL);
+- if (data && !new_opts)
+- return -ENOMEM;
+-
+ pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data);
+
+ sync_filesystem(sb);
+@@ -579,7 +574,6 @@ affs_remount(struct super_block *sb, int *flags, char *data)
+ &blocksize, &prefix, volume,
+ &mount_flags)) {
+ kfree(prefix);
+- kfree(new_opts);
+ return -EINVAL;
+ }
+
+diff --git a/fs/afs/callback.c b/fs/afs/callback.c
+index 4ad701250299..97283b04fa6f 100644
+--- a/fs/afs/callback.c
++++ b/fs/afs/callback.c
+@@ -221,14 +221,8 @@ void afs_break_callback(struct afs_vnode *vnode)
+ vnode->cb_break++;
+ afs_clear_permits(vnode);
+
+- spin_lock(&vnode->lock);
+-
+- _debug("break callback");
+-
+- if (list_empty(&vnode->granted_locks) &&
+- !list_empty(&vnode->pending_locks))
++ if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
+ afs_lock_may_be_available(vnode);
+- spin_unlock(&vnode->lock);
+ }
+
+ write_sequnlock(&vnode->cb_lock);
+diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
+index 8b400f5aead5..0e7162527db8 100644
+--- a/fs/afs/dir_edit.c
++++ b/fs/afs/dir_edit.c
+@@ -72,13 +72,11 @@ static int afs_find_contig_bits(union afs_xdr_dir_block *block, unsigned int nr_
+ static void afs_set_contig_bits(union afs_xdr_dir_block *block,
+ int bit, unsigned int nr_slots)
+ {
+- u64 mask, before, after;
++ u64 mask;
+
+ mask = (1 << nr_slots) - 1;
+ mask <<= bit;
+
+- before = *(u64 *)block->hdr.bitmap;
+-
+ block->hdr.bitmap[0] |= (u8)(mask >> 0 * 8);
+ block->hdr.bitmap[1] |= (u8)(mask >> 1 * 8);
+ block->hdr.bitmap[2] |= (u8)(mask >> 2 * 8);
+@@ -87,8 +85,6 @@ static void afs_set_contig_bits(union afs_xdr_dir_block *block,
+ block->hdr.bitmap[5] |= (u8)(mask >> 5 * 8);
+ block->hdr.bitmap[6] |= (u8)(mask >> 6 * 8);
+ block->hdr.bitmap[7] |= (u8)(mask >> 7 * 8);
+-
+- after = *(u64 *)block->hdr.bitmap;
+ }
+
+ /*
+@@ -97,13 +93,11 @@ static void afs_set_contig_bits(union afs_xdr_dir_block *block,
+ static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
+ int bit, unsigned int nr_slots)
+ {
+- u64 mask, before, after;
++ u64 mask;
+
+ mask = (1 << nr_slots) - 1;
+ mask <<= bit;
+
+- before = *(u64 *)block->hdr.bitmap;
+-
+ block->hdr.bitmap[0] &= ~(u8)(mask >> 0 * 8);
+ block->hdr.bitmap[1] &= ~(u8)(mask >> 1 * 8);
+ block->hdr.bitmap[2] &= ~(u8)(mask >> 2 * 8);
+@@ -112,8 +106,6 @@ static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
+ block->hdr.bitmap[5] &= ~(u8)(mask >> 5 * 8);
+ block->hdr.bitmap[6] &= ~(u8)(mask >> 6 * 8);
+ block->hdr.bitmap[7] &= ~(u8)(mask >> 7 * 8);
+-
+- after = *(u64 *)block->hdr.bitmap;
+ }
+
+ /*
+diff --git a/fs/afs/file.c b/fs/afs/file.c
+index 843d3b970b84..0bd78df6a64e 100644
+--- a/fs/afs/file.c
++++ b/fs/afs/file.c
+@@ -169,11 +169,12 @@ int afs_release(struct inode *inode, struct file *file)
+ {
+ struct afs_vnode *vnode = AFS_FS_I(inode);
+ struct afs_file *af = file->private_data;
++ int ret = 0;
+
+ _enter("{%x:%u},", vnode->fid.vid, vnode->fid.vnode);
+
+ if ((file->f_mode & FMODE_WRITE))
+- return vfs_fsync(file, 0);
++ ret = vfs_fsync(file, 0);
+
+ file->private_data = NULL;
+ if (af->wb)
+@@ -181,8 +182,8 @@ int afs_release(struct inode *inode, struct file *file)
+ key_put(af->key);
+ kfree(af);
+ afs_prune_wb_keys(vnode);
+- _leave(" = 0");
+- return 0;
++ _leave(" = %d", ret);
++ return ret;
+ }
+
+ /*
+diff --git a/fs/afs/flock.c b/fs/afs/flock.c
+index 1bb300ef362b..fbf4986b1224 100644
+--- a/fs/afs/flock.c
++++ b/fs/afs/flock.c
+@@ -13,9 +13,11 @@
+
+ #define AFS_LOCK_GRANTED 0
+ #define AFS_LOCK_PENDING 1
++#define AFS_LOCK_YOUR_TRY 2
+
+ struct workqueue_struct *afs_lock_manager;
+
++static void afs_next_locker(struct afs_vnode *vnode, int error);
+ static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl);
+ static void afs_fl_release_private(struct file_lock *fl);
+
+@@ -24,6 +26,12 @@ static const struct file_lock_operations afs_lock_ops = {
+ .fl_release_private = afs_fl_release_private,
+ };
+
++static inline void afs_set_lock_state(struct afs_vnode *vnode, enum afs_lock_state state)
++{
++ _debug("STATE %u -> %u", vnode->lock_state, state);
++ vnode->lock_state = state;
++}
++
+ /*
+ * if the callback is broken on this vnode, then the lock may now be available
+ */
+@@ -31,14 +39,17 @@ void afs_lock_may_be_available(struct afs_vnode *vnode)
+ {
+ _enter("{%x:%u}", vnode->fid.vid, vnode->fid.vnode);
+
+- queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
++ spin_lock(&vnode->lock);
++ if (vnode->lock_state == AFS_VNODE_LOCK_WAITING_FOR_CB)
++ afs_next_locker(vnode, 0);
++ spin_unlock(&vnode->lock);
+ }
+
+ /*
+ * the lock will time out in 5 minutes unless we extend it, so schedule
+ * extension in a bit less than that time
+ */
+-static void afs_schedule_lock_extension(struct afs_vnode *vnode)
++static void __maybe_unused afs_schedule_lock_extension(struct afs_vnode *vnode)
+ {
+ queue_delayed_work(afs_lock_manager, &vnode->lock_work,
+ AFS_LOCKWAIT * HZ / 2);
+@@ -49,22 +60,65 @@ static void afs_schedule_lock_extension(struct afs_vnode *vnode)
+ * first lock in the queue is itself a readlock)
+ * - the caller must hold the vnode lock
+ */
+-static void afs_grant_locks(struct afs_vnode *vnode, struct file_lock *fl)
++static void afs_grant_locks(struct afs_vnode *vnode)
+ {
+ struct file_lock *p, *_p;
++ bool exclusive = (vnode->lock_type == AFS_LOCK_WRITE);
+
+- list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
+- if (fl->fl_type == F_RDLCK) {
+- list_for_each_entry_safe(p, _p, &vnode->pending_locks,
+- fl_u.afs.link) {
+- if (p->fl_type == F_RDLCK) {
+- p->fl_u.afs.state = AFS_LOCK_GRANTED;
+- list_move_tail(&p->fl_u.afs.link,
+- &vnode->granted_locks);
+- wake_up(&p->fl_wait);
+- }
++ list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
++ if (!exclusive && p->fl_type == F_WRLCK)
++ continue;
++
++ list_move_tail(&p->fl_u.afs.link, &vnode->granted_locks);
++ p->fl_u.afs.state = AFS_LOCK_GRANTED;
++ wake_up(&p->fl_wait);
++ }
++}
++
++/*
++ * If an error is specified, reject every pending lock that matches the
++ * authentication and type of the lock we failed to get. If there are any
++ * remaining lockers, try to wake up one of them to have a go.
++ */
++static void afs_next_locker(struct afs_vnode *vnode, int error)
++{
++ struct file_lock *p, *_p, *next = NULL;
++ struct key *key = vnode->lock_key;
++ unsigned int fl_type = F_RDLCK;
++
++ _enter("");
++
++ if (vnode->lock_type == AFS_LOCK_WRITE)
++ fl_type = F_WRLCK;
++
++ list_for_each_entry_safe(p, _p, &vnode->pending_locks, fl_u.afs.link) {
++ if (error &&
++ p->fl_type == fl_type &&
++ afs_file_key(p->fl_file) == key) {
++ list_del_init(&p->fl_u.afs.link);
++ p->fl_u.afs.state = error;
++ wake_up(&p->fl_wait);
+ }
++
++ /* Select the next locker to hand off to. */
++ if (next &&
++ (next->fl_type == F_WRLCK || p->fl_type == F_RDLCK))
++ continue;
++ next = p;
++ }
++
++ vnode->lock_key = NULL;
++ key_put(key);
++
++ if (next) {
++ afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
++ next->fl_u.afs.state = AFS_LOCK_YOUR_TRY;
++ wake_up(&next->fl_wait);
++ } else {
++ afs_set_lock_state(vnode, AFS_VNODE_LOCK_NONE);
+ }
++
++ _leave("");
+ }
+
+ /*
+@@ -170,8 +224,6 @@ void afs_lock_work(struct work_struct *work)
+ {
+ struct afs_vnode *vnode =
+ container_of(work, struct afs_vnode, lock_work.work);
+- struct file_lock *fl, *next;
+- afs_lock_type_t type;
+ struct key *key;
+ int ret;
+
+@@ -184,7 +236,7 @@ again:
+ switch (vnode->lock_state) {
+ case AFS_VNODE_LOCK_NEED_UNLOCK:
+ _debug("unlock");
+- vnode->lock_state = AFS_VNODE_LOCK_UNLOCKING;
++ afs_set_lock_state(vnode, AFS_VNODE_LOCK_UNLOCKING);
+ spin_unlock(&vnode->lock);
+
+ /* attempt to release the server lock; if it fails, we just
+@@ -196,22 +248,9 @@ again:
+ vnode->fid.vid, vnode->fid.vnode, ret);
+
+ spin_lock(&vnode->lock);
+- key_put(vnode->lock_key);
+- vnode->lock_key = NULL;
+- vnode->lock_state = AFS_VNODE_LOCK_NONE;
+-
+- if (list_empty(&vnode->pending_locks)) {
+- spin_unlock(&vnode->lock);
+- return;
+- }
+-
+- /* The new front of the queue now owns the state variables. */
+- next = list_entry(vnode->pending_locks.next,
+- struct file_lock, fl_u.afs.link);
+- vnode->lock_key = key_get(afs_file_key(next->fl_file));
+- vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
+- vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
+- goto again;
++ afs_next_locker(vnode, 0);
++ spin_unlock(&vnode->lock);
++ return;
+
+ /* If we've already got a lock, then it must be time to extend that
+ * lock as AFS locks time out after 5 minutes.
+@@ -222,7 +261,7 @@ again:
+ ASSERT(!list_empty(&vnode->granted_locks));
+
+ key = key_get(vnode->lock_key);
+- vnode->lock_state = AFS_VNODE_LOCK_EXTENDING;
++ afs_set_lock_state(vnode, AFS_VNODE_LOCK_EXTENDING);
+ spin_unlock(&vnode->lock);
+
+ ret = afs_extend_lock(vnode, key); /* RPC */
+@@ -236,72 +275,26 @@ again:
+
+ if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING)
+ goto again;
+- vnode->lock_state = AFS_VNODE_LOCK_GRANTED;
++ afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
+
+- if (ret == 0)
+- afs_schedule_lock_extension(vnode);
+- else
++ if (ret != 0)
+ queue_delayed_work(afs_lock_manager, &vnode->lock_work,
+ HZ * 10);
+ spin_unlock(&vnode->lock);
+ _leave(" [ext]");
+ return;
+
+- /* If we don't have a granted lock, then we must've been called
+- * back by the server, and so if might be possible to get a
+- * lock we're currently waiting for.
+- */
++ /* If we're waiting for a callback to indicate lock release, we can't
++ * actually rely on this, so need to recheck at regular intervals. The
++ * problem is that the server might not notify us if the lock just
++ * expires (say because a client died) rather than being explicitly
++ * released.
++ */
+ case AFS_VNODE_LOCK_WAITING_FOR_CB:
+- _debug("get");
+-
+- key = key_get(vnode->lock_key);
+- type = vnode->lock_type;
+- vnode->lock_state = AFS_VNODE_LOCK_SETTING;
++ _debug("retry");
++ afs_next_locker(vnode, 0);
+ spin_unlock(&vnode->lock);
+-
+- ret = afs_set_lock(vnode, key, type); /* RPC */
+- key_put(key);
+-
+- spin_lock(&vnode->lock);
+- switch (ret) {
+- case -EWOULDBLOCK:
+- _debug("blocked");
+- break;
+- case 0:
+- _debug("acquired");
+- vnode->lock_state = AFS_VNODE_LOCK_GRANTED;
+- /* Fall through */
+- default:
+- /* Pass the lock or the error onto the first locker in
+- * the list - if they're looking for this type of lock.
+- * If they're not, we assume that whoever asked for it
+- * took a signal.
+- */
+- if (list_empty(&vnode->pending_locks)) {
+- _debug("withdrawn");
+- vnode->lock_state = AFS_VNODE_LOCK_NEED_UNLOCK;
+- goto again;
+- }
+-
+- fl = list_entry(vnode->pending_locks.next,
+- struct file_lock, fl_u.afs.link);
+- type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
+- if (vnode->lock_type != type) {
+- _debug("changed");
+- vnode->lock_state = AFS_VNODE_LOCK_NEED_UNLOCK;
+- goto again;
+- }
+-
+- fl->fl_u.afs.state = ret;
+- if (ret == 0)
+- afs_grant_locks(vnode, fl);
+- else
+- list_del_init(&fl->fl_u.afs.link);
+- wake_up(&fl->fl_wait);
+- spin_unlock(&vnode->lock);
+- _leave(" [granted]");
+- return;
+- }
++ return;
+
+ default:
+ /* Looks like a lock request was withdrawn. */
+@@ -319,14 +312,15 @@ again:
+ */
+ static void afs_defer_unlock(struct afs_vnode *vnode)
+ {
+- _enter("");
++ _enter("%u", vnode->lock_state);
+
+- if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED ||
+- vnode->lock_state == AFS_VNODE_LOCK_EXTENDING) {
++ if (list_empty(&vnode->granted_locks) &&
++ (vnode->lock_state == AFS_VNODE_LOCK_GRANTED ||
++ vnode->lock_state == AFS_VNODE_LOCK_EXTENDING)) {
+ cancel_delayed_work(&vnode->lock_work);
+
+- vnode->lock_state = AFS_VNODE_LOCK_NEED_UNLOCK;
+- afs_lock_may_be_available(vnode);
++ afs_set_lock_state(vnode, AFS_VNODE_LOCK_NEED_UNLOCK);
++ queue_delayed_work(afs_lock_manager, &vnode->lock_work, 0);
+ }
+ }
+
+@@ -375,50 +369,6 @@ static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key,
+ return 0;
+ }
+
+-/*
+- * Remove the front runner from the pending queue.
+- * - The caller must hold vnode->lock.
+- */
+-static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
+-{
+- struct file_lock *next;
+-
+- _enter("");
+-
+- /* ->lock_type, ->lock_key and ->lock_state only belong to this
+- * file_lock if we're at the front of the pending queue or if we have
+- * the lock granted or if the lock_state is NEED_UNLOCK or UNLOCKING.
+- */
+- if (vnode->granted_locks.next == &fl->fl_u.afs.link &&
+- vnode->granted_locks.prev == &fl->fl_u.afs.link) {
+- list_del_init(&fl->fl_u.afs.link);
+- afs_defer_unlock(vnode);
+- return;
+- }
+-
+- if (!list_empty(&vnode->granted_locks) ||
+- vnode->pending_locks.next != &fl->fl_u.afs.link) {
+- list_del_init(&fl->fl_u.afs.link);
+- return;
+- }
+-
+- list_del_init(&fl->fl_u.afs.link);
+- key_put(vnode->lock_key);
+- vnode->lock_key = NULL;
+- vnode->lock_state = AFS_VNODE_LOCK_NONE;
+-
+- if (list_empty(&vnode->pending_locks))
+- return;
+-
+- /* The new front of the queue now owns the state variables. */
+- next = list_entry(vnode->pending_locks.next,
+- struct file_lock, fl_u.afs.link);
+- vnode->lock_key = key_get(afs_file_key(next->fl_file));
+- vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
+- vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
+- afs_lock_may_be_available(vnode);
+-}
+-
+ /*
+ * request a lock on a file on the server
+ */
+@@ -432,10 +382,6 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl)
+
+ _enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
+
+- /* only whole-file locks are supported */
+- if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX)
+- return -EINVAL;
+-
+ fl->fl_ops = &afs_lock_ops;
+ INIT_LIST_HEAD(&fl->fl_u.afs.link);
+ fl->fl_u.afs.state = AFS_LOCK_PENDING;
+@@ -447,44 +393,66 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl)
+ return ret;
+
+ spin_lock(&vnode->lock);
++ list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
+
+- /* If we've already got a readlock on the server then we instantly
+- * grant another readlock, irrespective of whether there are any
+- * pending writelocks.
++ /* If we've already got a lock on the server then try to move to having
++ * the VFS grant the requested lock. Note that this means that other
++ * clients may get starved out.
+ */
+- if (type == AFS_LOCK_READ &&
+- vnode->lock_state == AFS_VNODE_LOCK_GRANTED &&
+- vnode->lock_type == AFS_LOCK_READ) {
+- _debug("instant readlock");
+- ASSERT(!list_empty(&vnode->granted_locks));
+- goto share_existing_lock;
+- }
++ _debug("try %u", vnode->lock_state);
++ if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED) {
++ if (type == AFS_LOCK_READ) {
++ _debug("instant readlock");
++ list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
++ fl->fl_u.afs.state = AFS_LOCK_GRANTED;
++ goto vnode_is_locked_u;
++ }
+
+- list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
++ if (vnode->lock_type == AFS_LOCK_WRITE) {
++ _debug("instant writelock");
++ list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
++ fl->fl_u.afs.state = AFS_LOCK_GRANTED;
++ goto vnode_is_locked_u;
++ }
++ }
+
+ if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
+ goto need_to_wait;
+
++try_to_lock:
+ /* We don't have a lock on this vnode and we aren't currently waiting
+ * for one either, so ask the server for a lock.
+ *
+ * Note that we need to be careful if we get interrupted by a signal
+ * after dispatching the request as we may still get the lock, even
+ * though we don't wait for the reply (it's not too bad a problem - the
+- * lock will expire in 10 mins anyway).
++ * lock will expire in 5 mins anyway).
+ */
+ _debug("not locked");
+ vnode->lock_key = key_get(key);
+ vnode->lock_type = type;
+- vnode->lock_state = AFS_VNODE_LOCK_SETTING;
++ afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
+ spin_unlock(&vnode->lock);
+
+ ret = afs_set_lock(vnode, key, type); /* RPC */
+
+ spin_lock(&vnode->lock);
+ switch (ret) {
++ case -EKEYREJECTED:
++ case -EKEYEXPIRED:
++ case -EKEYREVOKED:
++ case -EPERM:
++ case -EACCES:
++ fl->fl_u.afs.state = ret;
++ list_del_init(&fl->fl_u.afs.link);
++ afs_next_locker(vnode, ret);
++ goto error_unlock;
++
+ default:
+- goto abort_attempt;
++ fl->fl_u.afs.state = ret;
++ list_del_init(&fl->fl_u.afs.link);
++ afs_next_locker(vnode, 0);
++ goto error_unlock;
+
+ case -EWOULDBLOCK:
+ /* The server doesn't have a lock-waiting queue, so the client
+@@ -494,29 +462,23 @@ static int afs_do_setlk(struct file *file, struct file_lock *fl)
+ _debug("would block");
+ ASSERT(list_empty(&vnode->granted_locks));
+ ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link);
+- vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
+- goto need_to_wait;
++ goto lock_is_contended;
+
+ case 0:
+ _debug("acquired");
+- break;
++ afs_set_lock_state(vnode, AFS_VNODE_LOCK_GRANTED);
++ afs_grant_locks(vnode);
++ goto vnode_is_locked_u;
+ }
+
+- /* we've acquired a server lock, but it needs to be renewed after 5
+- * mins */
+- vnode->lock_state = AFS_VNODE_LOCK_GRANTED;
+- afs_schedule_lock_extension(vnode);
+-
+-share_existing_lock:
+- /* the lock has been granted as far as we're concerned... */
+- fl->fl_u.afs.state = AFS_LOCK_GRANTED;
+- list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
+-
+-given_lock:
+- /* ... but we do still need to get the VFS's blessing */
++vnode_is_locked_u:
+ spin_unlock(&vnode->lock);
++vnode_is_locked:
++ /* the lock has been granted by the server... */
++ ASSERTCMP(fl->fl_u.afs.state, ==, AFS_LOCK_GRANTED);
+
+- ret = posix_lock_file(file, fl, NULL);
++ /* ... but the VFS still needs to distribute access on this client. */
++ ret = locks_lock_file_wait(file, fl);
+ if (ret < 0)
+ goto vfs_rejected_lock;
+
+@@ -528,38 +490,61 @@ given_lock:
+ _leave(" = 0");
+ return 0;
+
++lock_is_contended:
++ if (!(fl->fl_flags & FL_SLEEP)) {
++ list_del_init(&fl->fl_u.afs.link);
++ afs_next_locker(vnode, 0);
++ ret = -EAGAIN;
++ goto error_unlock;
++ }
++
++ afs_set_lock_state(vnode, AFS_VNODE_LOCK_WAITING_FOR_CB);
++ queue_delayed_work(afs_lock_manager, &vnode->lock_work, HZ * 5);
++
+ need_to_wait:
+ /* We're going to have to wait. Either this client doesn't have a lock
+ * on the server yet and we need to wait for a callback to occur, or
+- * the client does have a lock on the server, but it belongs to some
+- * other process(es) and is incompatible with the lock we want.
++ * the client does have a lock on the server, but it's shared and we
++ * need an exclusive lock.
+ */
+- ret = -EAGAIN;
+- if (fl->fl_flags & FL_SLEEP) {
+- spin_unlock(&vnode->lock);
++ spin_unlock(&vnode->lock);
+
+- _debug("sleep");
+- ret = wait_event_interruptible(fl->fl_wait,
+- fl->fl_u.afs.state != AFS_LOCK_PENDING);
++ _debug("sleep");
++ ret = wait_event_interruptible(fl->fl_wait,
++ fl->fl_u.afs.state != AFS_LOCK_PENDING);
++ _debug("wait = %d", ret);
+
++ if (fl->fl_u.afs.state >= 0 && fl->fl_u.afs.state != AFS_LOCK_GRANTED) {
+ spin_lock(&vnode->lock);
+- }
+
+- if (fl->fl_u.afs.state == AFS_LOCK_GRANTED)
+- goto given_lock;
+- if (fl->fl_u.afs.state < 0)
+- ret = fl->fl_u.afs.state;
++ switch (fl->fl_u.afs.state) {
++ case AFS_LOCK_YOUR_TRY:
++ fl->fl_u.afs.state = AFS_LOCK_PENDING;
++ goto try_to_lock;
++ case AFS_LOCK_PENDING:
++ if (ret > 0) {
++ /* We need to retry the lock. We may not be
++ * notified by the server if it just expired
++ * rather than being released.
++ */
++ ASSERTCMP(vnode->lock_state, ==, AFS_VNODE_LOCK_WAITING_FOR_CB);
++ afs_set_lock_state(vnode, AFS_VNODE_LOCK_SETTING);
++ fl->fl_u.afs.state = AFS_LOCK_PENDING;
++ goto try_to_lock;
++ }
++ goto error_unlock;
++ case AFS_LOCK_GRANTED:
++ default:
++ break;
++ }
+
+-abort_attempt:
+- /* we aren't going to get the lock, either because we're unwilling to
+- * wait, or because some signal happened */
+- _debug("abort");
+- afs_dequeue_lock(vnode, fl);
++ spin_unlock(&vnode->lock);
++ }
+
+-error_unlock:
+- spin_unlock(&vnode->lock);
+- _leave(" = %d", ret);
+- return ret;
++ if (fl->fl_u.afs.state == AFS_LOCK_GRANTED)
++ goto vnode_is_locked;
++ ret = fl->fl_u.afs.state;
++ goto error;
+
+ vfs_rejected_lock:
+ /* The VFS rejected the lock we just obtained, so we have to discard
+@@ -569,9 +554,13 @@ vfs_rejected_lock:
+ _debug("vfs refused %d", ret);
+ spin_lock(&vnode->lock);
+ list_del_init(&fl->fl_u.afs.link);
+- if (list_empty(&vnode->granted_locks))
+- afs_defer_unlock(vnode);
+- goto error_unlock;
++ afs_defer_unlock(vnode);
++
++error_unlock:
++ spin_unlock(&vnode->lock);
++error:
++ _leave(" = %d", ret);
++ return ret;
+ }
+
+ /*
+@@ -587,11 +576,7 @@ static int afs_do_unlk(struct file *file, struct file_lock *fl)
+ /* Flush all pending writes before doing anything with locks. */
+ vfs_fsync(file, 0);
+
+- /* only whole-file unlocks are supported */
+- if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX)
+- return -EINVAL;
+-
+- ret = posix_lock_file(file, fl, NULL);
++ ret = locks_lock_file_wait(file, fl);
+ _leave(" = %d [%u]", ret, vnode->lock_state);
+ return ret;
+ }
+@@ -618,12 +603,15 @@ static int afs_do_getlk(struct file *file, struct file_lock *fl)
+ goto error;
+
+ lock_count = READ_ONCE(vnode->status.lock_count);
+- if (lock_count > 0)
+- fl->fl_type = F_RDLCK;
+- else
+- fl->fl_type = F_WRLCK;
+- fl->fl_start = 0;
+- fl->fl_end = OFFSET_MAX;
++ if (lock_count != 0) {
++ if (lock_count > 0)
++ fl->fl_type = F_RDLCK;
++ else
++ fl->fl_type = F_WRLCK;
++ fl->fl_start = 0;
++ fl->fl_end = OFFSET_MAX;
++ fl->fl_pid = 0;
++ }
+ }
+
+ ret = 0;
+@@ -710,7 +698,11 @@ static void afs_fl_release_private(struct file_lock *fl)
+ _enter("");
+
+ spin_lock(&vnode->lock);
+- afs_dequeue_lock(vnode, fl);
++
++ list_del_init(&fl->fl_u.afs.link);
++ if (list_empty(&vnode->granted_locks))
++ afs_defer_unlock(vnode);
++
+ _debug("state %u for %p", vnode->lock_state, vnode);
+ spin_unlock(&vnode->lock);
+ }
+diff --git a/fs/afs/inode.c b/fs/afs/inode.c
+index 0726e40db0f8..e6f11da5461b 100644
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -398,12 +398,9 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
+ vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
+ vnode->cb_v_break = vnode->volume->cb_v_break;
+ valid = false;
+- } else if (vnode->status.type == AFS_FTYPE_DIR &&
+- (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags) ||
+- vnode->cb_expires_at - 10 <= now)) {
++ } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
+ valid = false;
+- } else if (test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags) ||
+- vnode->cb_expires_at - 10 <= now) {
++ } else if (vnode->cb_expires_at - 10 <= now) {
+ valid = false;
+ } else {
+ valid = true;
+@@ -541,6 +538,7 @@ void afs_evict_inode(struct inode *inode)
+ }
+ #endif
+
++ afs_prune_wb_keys(vnode);
+ afs_put_permits(rcu_access_pointer(vnode->permit_cache));
+ key_put(vnode->lock_key);
+ vnode->lock_key = NULL;
+diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
+index 2543f24d23f8..560dd5ff5a15 100644
+--- a/fs/afs/rxrpc.c
++++ b/fs/afs/rxrpc.c
+@@ -573,6 +573,7 @@ static long afs_wait_for_call_to_complete(struct afs_call *call,
+ call->need_attention = false;
+ __set_current_state(TASK_RUNNING);
+ afs_deliver_to_call(call);
++ timeout = rtt2;
+ continue;
+ }
+
+diff --git a/fs/afs/security.c b/fs/afs/security.c
+index 81dfedb7879f..66042b432baa 100644
+--- a/fs/afs/security.c
++++ b/fs/afs/security.c
+@@ -87,11 +87,9 @@ void afs_clear_permits(struct afs_vnode *vnode)
+ permits = rcu_dereference_protected(vnode->permit_cache,
+ lockdep_is_held(&vnode->lock));
+ RCU_INIT_POINTER(vnode->permit_cache, NULL);
+- vnode->cb_break++;
+ spin_unlock(&vnode->lock);
+
+- if (permits)
+- afs_put_permits(permits);
++ afs_put_permits(permits);
+ }
+
+ /*
+diff --git a/fs/afs/super.c b/fs/afs/super.c
+index 18b9b7ca20c9..4961d32ccd1e 100644
+--- a/fs/afs/super.c
++++ b/fs/afs/super.c
+@@ -393,6 +393,7 @@ static int afs_fill_super(struct super_block *sb,
+ /* fill in the superblock */
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
++ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_magic = AFS_FS_MAGIC;
+ sb->s_op = &afs_super_ops;
+ if (!as->dyn_root)
+diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
+index cfcc674e64a5..411f67c79f09 100644
+--- a/fs/afs/xattr.c
++++ b/fs/afs/xattr.c
+@@ -50,7 +50,7 @@ static int afs_xattr_get_cell(const struct xattr_handler *handler,
+ return namelen;
+ if (namelen > size)
+ return -ERANGE;
+- memcpy(buffer, cell->name, size);
++ memcpy(buffer, cell->name, namelen);
+ return namelen;
+ }
+
+@@ -104,7 +104,7 @@ static int afs_xattr_get_volume(const struct xattr_handler *handler,
+ return namelen;
+ if (namelen > size)
+ return -ERANGE;
+- memcpy(buffer, volname, size);
++ memcpy(buffer, volname, namelen);
+ return namelen;
+ }
+
+diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
+index 78490e544c91..c2c93fe9d7fd 100644
+--- a/fs/btrfs/file.c
++++ b/fs/btrfs/file.c
+@@ -1895,7 +1895,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
+ bool sync = (file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host);
+ ssize_t err;
+ loff_t pos;
+- size_t count = iov_iter_count(from);
++ size_t count;
+ loff_t oldsize;
+ int clean_page = 0;
+
+@@ -1917,6 +1917,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
+ }
+
+ pos = iocb->ki_pos;
++ count = iov_iter_count(from);
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ /*
+ * We will allocate space in case nodatacow is not set,
+diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
+index e1b50c62ba65..c7dcb7c52108 100644
+--- a/fs/btrfs/inode-map.c
++++ b/fs/btrfs/inode-map.c
+@@ -12,6 +12,19 @@
+ #include "inode-map.h"
+ #include "transaction.h"
+
++static void fail_caching_thread(struct btrfs_root *root)
++{
++ struct btrfs_fs_info *fs_info = root->fs_info;
++
++ btrfs_warn(fs_info, "failed to start inode caching task");
++ btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE,
++ "disabling inode map caching");
++ spin_lock(&root->ino_cache_lock);
++ root->ino_cache_state = BTRFS_CACHE_ERROR;
++ spin_unlock(&root->ino_cache_lock);
++ wake_up(&root->ino_cache_wait);
++}
++
+ static int caching_kthread(void *data)
+ {
+ struct btrfs_root *root = data;
+@@ -28,8 +41,10 @@ static int caching_kthread(void *data)
+ return 0;
+
+ path = btrfs_alloc_path();
+- if (!path)
++ if (!path) {
++ fail_caching_thread(root);
+ return -ENOMEM;
++ }
+
+ /* Since the commit root is read-only, we can safely skip locking. */
+ path->skip_locking = 1;
+@@ -145,6 +160,7 @@ static void start_caching(struct btrfs_root *root)
+ spin_lock(&root->ino_cache_lock);
+ root->ino_cache_state = BTRFS_CACHE_FINISHED;
+ spin_unlock(&root->ino_cache_lock);
++ wake_up(&root->ino_cache_wait);
+ return;
+ }
+
+@@ -163,11 +179,8 @@ static void start_caching(struct btrfs_root *root)
+
+ tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu",
+ root->root_key.objectid);
+- if (IS_ERR(tsk)) {
+- btrfs_warn(fs_info, "failed to start inode caching task");
+- btrfs_clear_pending_and_info(fs_info, INODE_MAP_CACHE,
+- "disabling inode map caching");
+- }
++ if (IS_ERR(tsk))
++ fail_caching_thread(root);
+ }
+
+ int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
+@@ -185,11 +198,14 @@ again:
+
+ wait_event(root->ino_cache_wait,
+ root->ino_cache_state == BTRFS_CACHE_FINISHED ||
++ root->ino_cache_state == BTRFS_CACHE_ERROR ||
+ root->free_ino_ctl->free_space > 0);
+
+ if (root->ino_cache_state == BTRFS_CACHE_FINISHED &&
+ root->free_ino_ctl->free_space == 0)
+ return -ENOSPC;
++ else if (root->ino_cache_state == BTRFS_CACHE_ERROR)
++ return btrfs_find_free_objectid(root, objectid);
+ else
+ goto again;
+ }
+diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
+index 5e4f3f833e85..a09ce27ab220 100644
+--- a/fs/ceph/xattr.c
++++ b/fs/ceph/xattr.c
+@@ -221,7 +221,7 @@ static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val,
+ static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val,
+ size_t size)
+ {
+- return snprintf(val, size, "%lld.09%ld", ci->i_rctime.tv_sec,
++ return snprintf(val, size, "%lld.%09ld", ci->i_rctime.tv_sec,
+ ci->i_rctime.tv_nsec);
+ }
+
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 7e85070d010f..576cf71576da 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -970,6 +970,7 @@ cifs_demultiplex_thread(void *p)
+ mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
+
+ set_freezable();
++ allow_kernel_signal(SIGKILL);
+ while (server->tcpStatus != CifsExiting) {
+ if (try_to_freeze())
+ continue;
+@@ -2454,7 +2455,7 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
+
+ task = xchg(&server->tsk, NULL);
+ if (task)
+- force_sig(SIGKILL, task);
++ send_sig(SIGKILL, task, 1);
+ }
+
+ static struct TCP_Server_Info *
+diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
+index 808cae6d5f50..ae3248326c44 100644
+--- a/fs/exportfs/expfs.c
++++ b/fs/exportfs/expfs.c
+@@ -147,6 +147,7 @@ static struct dentry *reconnect_one(struct vfsmount *mnt,
+ tmp = lookup_one_len_unlocked(nbuf, parent, strlen(nbuf));
+ if (IS_ERR(tmp)) {
+ dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp));
++ err = PTR_ERR(tmp);
+ goto out_err;
+ }
+ if (tmp != dentry) {
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index 56f6e1782d5f..4572cb057951 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -1419,7 +1419,7 @@ int htree_inlinedir_to_tree(struct file *dir_file,
+ err = ext4_htree_store_dirent(dir_file, hinfo->hash,
+ hinfo->minor_hash, de, &tmp_str);
+ if (err) {
+- count = err;
++ ret = err;
+ goto out;
+ }
+ count++;
+diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
+index 0d3d848d186b..ebe19894884b 100644
+--- a/fs/f2fs/dir.c
++++ b/fs/f2fs/dir.c
+@@ -572,6 +572,11 @@ add_dentry:
+
+ if (inode) {
+ f2fs_i_pino_write(inode, dir->i_ino);
++
++ /* synchronize inode page's data from inode cache */
++ if (is_inode_flag_set(inode, FI_NEW_INODE))
++ f2fs_update_inode(inode, page);
++
+ f2fs_put_page(page, 1);
+ }
+
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index 72d154e71bb5..6b5b685af599 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -1701,7 +1701,7 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
+ if (time_to_inject(sbi, FAULT_BLOCK)) {
+ f2fs_show_injection_info(FAULT_BLOCK);
+ release = *count;
+- goto enospc;
++ goto release_quota;
+ }
+
+ /*
+@@ -1741,6 +1741,7 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
+
+ enospc:
+ percpu_counter_sub(&sbi->alloc_valid_block_count, release);
++release_quota:
+ dquot_release_reservation_block(inode, release);
+ return -ENOSPC;
+ }
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 6bbb5f6801e2..c1ba29d10789 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -133,6 +133,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
+
+ err = f2fs_get_node_info(fio.sbi, dn->nid, &ni);
+ if (err) {
++ f2fs_truncate_data_blocks_range(dn, 1);
+ f2fs_put_dnode(dn);
+ return err;
+ }
+@@ -577,6 +578,11 @@ int f2fs_add_inline_entry(struct inode *dir, const struct qstr *new_name,
+ /* we don't need to mark_inode_dirty now */
+ if (inode) {
+ f2fs_i_pino_write(inode, dir->i_ino);
++
++ /* synchronize inode page's data from inode cache */
++ if (is_inode_flag_set(inode, FI_NEW_INODE))
++ f2fs_update_inode(inode, page);
++
+ f2fs_put_page(page, 1);
+ }
+
+diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
+index a5663cb621d8..78789c5ed36b 100644
+--- a/fs/jfs/jfs_txnmgr.c
++++ b/fs/jfs/jfs_txnmgr.c
+@@ -1928,8 +1928,7 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd,
+ * header ?
+ */
+ if (tlck->type & tlckTRUNCATE) {
+- /* This odd declaration suppresses a bogus gcc warning */
+- pxd_t pxd = pxd; /* truncated extent of xad */
++ pxd_t pxd; /* truncated extent of xad */
+ int twm;
+
+ /*
+diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
+index 74ff459b75ef..b0c0c2fc2fba 100644
+--- a/fs/nfs/delegation.c
++++ b/fs/nfs/delegation.c
+@@ -240,6 +240,8 @@ static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation
+ spin_lock(&delegation->lock);
+ if (delegation->inode != NULL)
+ inode = igrab(delegation->inode);
++ if (!inode)
++ set_bit(NFS_DELEGATION_INODE_FREEING, &delegation->flags);
+ spin_unlock(&delegation->lock);
+ return inode;
+ }
+@@ -955,10 +957,11 @@ restart:
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ list_for_each_entry_rcu(delegation, &server->delegations,
+ super_list) {
+- if (test_bit(NFS_DELEGATION_RETURNING,
+- &delegation->flags))
+- continue;
+- if (test_bit(NFS_DELEGATION_NEED_RECLAIM,
++ if (test_bit(NFS_DELEGATION_INODE_FREEING,
++ &delegation->flags) ||
++ test_bit(NFS_DELEGATION_RETURNING,
++ &delegation->flags) ||
++ test_bit(NFS_DELEGATION_NEED_RECLAIM,
+ &delegation->flags) == 0)
+ continue;
+ if (!nfs_sb_active(server->super))
+@@ -1064,10 +1067,11 @@ restart:
+ list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
+ list_for_each_entry_rcu(delegation, &server->delegations,
+ super_list) {
+- if (test_bit(NFS_DELEGATION_RETURNING,
+- &delegation->flags))
+- continue;
+- if (test_bit(NFS_DELEGATION_TEST_EXPIRED,
++ if (test_bit(NFS_DELEGATION_INODE_FREEING,
++ &delegation->flags) ||
++ test_bit(NFS_DELEGATION_RETURNING,
++ &delegation->flags) ||
++ test_bit(NFS_DELEGATION_TEST_EXPIRED,
+ &delegation->flags) == 0)
+ continue;
+ if (!nfs_sb_active(server->super))
+diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
+index dd0f3eed3890..f09b153ac82f 100644
+--- a/fs/nfs/delegation.h
++++ b/fs/nfs/delegation.h
+@@ -34,6 +34,7 @@ enum {
+ NFS_DELEGATION_RETURNING,
+ NFS_DELEGATION_REVOKED,
+ NFS_DELEGATION_TEST_EXPIRED,
++ NFS_DELEGATION_INODE_FREEING,
+ };
+
+ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred,
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.h b/fs/nfs/flexfilelayout/flexfilelayout.h
+index de50a342d5a5..2ac99124474c 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.h
++++ b/fs/nfs/flexfilelayout/flexfilelayout.h
+@@ -132,16 +132,6 @@ FF_LAYOUT_LSEG(struct pnfs_layout_segment *lseg)
+ generic_hdr);
+ }
+
+-static inline struct nfs4_deviceid_node *
+-FF_LAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg, u32 idx)
+-{
+- if (idx >= FF_LAYOUT_LSEG(lseg)->mirror_array_cnt ||
+- FF_LAYOUT_LSEG(lseg)->mirror_array[idx] == NULL ||
+- FF_LAYOUT_LSEG(lseg)->mirror_array[idx]->mirror_ds == NULL)
+- return NULL;
+- return &FF_LAYOUT_LSEG(lseg)->mirror_array[idx]->mirror_ds->id_node;
+-}
+-
+ static inline struct nfs4_ff_layout_ds *
+ FF_LAYOUT_MIRROR_DS(struct nfs4_deviceid_node *node)
+ {
+@@ -151,9 +141,25 @@ FF_LAYOUT_MIRROR_DS(struct nfs4_deviceid_node *node)
+ static inline struct nfs4_ff_layout_mirror *
+ FF_LAYOUT_COMP(struct pnfs_layout_segment *lseg, u32 idx)
+ {
+- if (idx >= FF_LAYOUT_LSEG(lseg)->mirror_array_cnt)
+- return NULL;
+- return FF_LAYOUT_LSEG(lseg)->mirror_array[idx];
++ struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
++
++ if (idx < fls->mirror_array_cnt)
++ return fls->mirror_array[idx];
++ return NULL;
++}
++
++static inline struct nfs4_deviceid_node *
++FF_LAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg, u32 idx)
++{
++ struct nfs4_ff_layout_mirror *mirror = FF_LAYOUT_COMP(lseg, idx);
++
++ if (mirror != NULL) {
++ struct nfs4_ff_layout_ds *mirror_ds = mirror->mirror_ds;
++
++ if (!IS_ERR_OR_NULL(mirror_ds))
++ return &mirror_ds->id_node;
++ }
++ return NULL;
+ }
+
+ static inline u32
+diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c
+index 69f72ed2bf87..ec9803088f6b 100644
+--- a/fs/nfs/nfs42xdr.c
++++ b/fs/nfs/nfs42xdr.c
+@@ -59,43 +59,53 @@
+ #define decode_clone_maxsz (op_decode_hdr_maxsz)
+
+ #define NFS4_enc_allocate_sz (compound_encode_hdr_maxsz + \
++ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_allocate_maxsz + \
+ encode_getattr_maxsz)
+ #define NFS4_dec_allocate_sz (compound_decode_hdr_maxsz + \
++ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_allocate_maxsz + \
+ decode_getattr_maxsz)
+ #define NFS4_enc_copy_sz (compound_encode_hdr_maxsz + \
++ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_savefh_maxsz + \
+ encode_putfh_maxsz + \
+ encode_copy_maxsz + \
+ encode_commit_maxsz)
+ #define NFS4_dec_copy_sz (compound_decode_hdr_maxsz + \
++ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_savefh_maxsz + \
+ decode_putfh_maxsz + \
+ decode_copy_maxsz + \
+ decode_commit_maxsz)
+ #define NFS4_enc_offload_cancel_sz (compound_encode_hdr_maxsz + \
++ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_offload_cancel_maxsz)
+ #define NFS4_dec_offload_cancel_sz (compound_decode_hdr_maxsz + \
++ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_offload_cancel_maxsz)
+ #define NFS4_enc_deallocate_sz (compound_encode_hdr_maxsz + \
++ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_deallocate_maxsz + \
+ encode_getattr_maxsz)
+ #define NFS4_dec_deallocate_sz (compound_decode_hdr_maxsz + \
++ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_deallocate_maxsz + \
+ decode_getattr_maxsz)
+ #define NFS4_enc_seek_sz (compound_encode_hdr_maxsz + \
++ encode_sequence_maxsz + \
+ encode_putfh_maxsz + \
+ encode_seek_maxsz)
+ #define NFS4_dec_seek_sz (compound_decode_hdr_maxsz + \
++ decode_sequence_maxsz + \
+ decode_putfh_maxsz + \
+ decode_seek_maxsz)
+ #define NFS4_enc_layoutstats_sz (compound_encode_hdr_maxsz + \
+diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
+index c818f9886f61..66f699e18755 100644
+--- a/fs/nfs/pnfs.c
++++ b/fs/nfs/pnfs.c
+@@ -758,22 +758,35 @@ static int
+ pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
+ struct nfs_server *server,
+ struct list_head *layout_list)
++ __must_hold(&clp->cl_lock)
++ __must_hold(RCU)
+ {
+ struct pnfs_layout_hdr *lo, *next;
+ struct inode *inode;
+
+ list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
+- if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
++ if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) ||
++ test_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags) ||
++ !list_empty(&lo->plh_bulk_destroy))
+ continue;
++ /* If the sb is being destroyed, just bail */
++ if (!nfs_sb_active(server->super))
++ break;
+ inode = igrab(lo->plh_inode);
+- if (inode == NULL)
+- continue;
+- list_del_init(&lo->plh_layouts);
+- if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
+- continue;
+- rcu_read_unlock();
+- spin_unlock(&clp->cl_lock);
+- iput(inode);
++ if (inode != NULL) {
++ list_del_init(&lo->plh_layouts);
++ if (pnfs_layout_add_bulk_destroy_list(inode,
++ layout_list))
++ continue;
++ rcu_read_unlock();
++ spin_unlock(&clp->cl_lock);
++ iput(inode);
++ } else {
++ rcu_read_unlock();
++ spin_unlock(&clp->cl_lock);
++ set_bit(NFS_LAYOUT_INODE_FREEING, &lo->plh_flags);
++ }
++ nfs_sb_deactive(server->super);
+ spin_lock(&clp->cl_lock);
+ rcu_read_lock();
+ return -EAGAIN;
+@@ -811,7 +824,7 @@ pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
+ /* Free all lsegs that are attached to commit buckets */
+ nfs_commit_inode(inode, 0);
+ pnfs_put_layout_hdr(lo);
+- iput(inode);
++ nfs_iput_and_deactive(inode);
+ }
+ return ret;
+ }
+diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
+index ece367ebde69..3ba44819a88a 100644
+--- a/fs/nfs/pnfs.h
++++ b/fs/nfs/pnfs.h
+@@ -104,6 +104,7 @@ enum {
+ NFS_LAYOUT_RETURN_REQUESTED, /* Return this layout ASAP */
+ NFS_LAYOUT_INVALID_STID, /* layout stateid id is invalid */
+ NFS_LAYOUT_FIRST_LAYOUTGET, /* Serialize first layoutget */
++ NFS_LAYOUT_INODE_FREEING, /* The inode is being freed */
+ };
+
+ enum layoutdriver_policy_flags {
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index d90efdea9fbd..5db7aceb4190 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -1930,7 +1930,7 @@ static int nfs_parse_devname(const char *dev_name,
+ /* kill possible hostname list: not supported */
+ comma = strchr(dev_name, ',');
+ if (comma != NULL && comma < end)
+- *comma = 0;
++ len = comma - dev_name;
+ }
+
+ if (len > maxnamlen)
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 117ffd90419e..e27637fa0f79 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -646,7 +646,7 @@ out:
+ return ret;
+ out_launder:
+ nfs_write_error_remove_page(req);
+- return ret;
++ return 0;
+ }
+
+ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
+diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
+index a7c0c657dfaf..13ca7c16bfc7 100644
+--- a/fs/xfs/xfs_quotaops.c
++++ b/fs/xfs/xfs_quotaops.c
+@@ -202,6 +202,9 @@ xfs_fs_rm_xquota(
+ if (XFS_IS_QUOTA_ON(mp))
+ return -EINVAL;
+
++ if (uflags & ~(FS_USER_QUOTA | FS_GROUP_QUOTA | FS_PROJ_QUOTA))
++ return -EINVAL;
++
+ if (uflags & FS_USER_QUOTA)
+ flags |= XFS_DQ_USER;
+ if (uflags & FS_GROUP_QUOTA)
+diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
+index 777814755fa6..675aa1e876ce 100644
+--- a/include/drm/drm_panel.h
++++ b/include/drm/drm_panel.h
+@@ -24,6 +24,7 @@
+ #ifndef __DRM_PANEL_H__
+ #define __DRM_PANEL_H__
+
++#include <linux/err.h>
+ #include <linux/errno.h>
+ #include <linux/list.h>
+
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index df1252e22dcf..cd412817654f 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -917,26 +917,18 @@ int acpi_subsys_prepare(struct device *dev);
+ void acpi_subsys_complete(struct device *dev);
+ int acpi_subsys_suspend_late(struct device *dev);
+ int acpi_subsys_suspend_noirq(struct device *dev);
+-int acpi_subsys_resume_noirq(struct device *dev);
+-int acpi_subsys_resume_early(struct device *dev);
+ int acpi_subsys_suspend(struct device *dev);
+ int acpi_subsys_freeze(struct device *dev);
+-int acpi_subsys_freeze_late(struct device *dev);
+-int acpi_subsys_freeze_noirq(struct device *dev);
+-int acpi_subsys_thaw_noirq(struct device *dev);
++int acpi_subsys_poweroff(struct device *dev);
+ #else
+ static inline int acpi_dev_resume_early(struct device *dev) { return 0; }
+ static inline int acpi_subsys_prepare(struct device *dev) { return 0; }
+ static inline void acpi_subsys_complete(struct device *dev) {}
+ static inline int acpi_subsys_suspend_late(struct device *dev) { return 0; }
+ static inline int acpi_subsys_suspend_noirq(struct device *dev) { return 0; }
+-static inline int acpi_subsys_resume_noirq(struct device *dev) { return 0; }
+-static inline int acpi_subsys_resume_early(struct device *dev) { return 0; }
+ static inline int acpi_subsys_suspend(struct device *dev) { return 0; }
+ static inline int acpi_subsys_freeze(struct device *dev) { return 0; }
+-static inline int acpi_subsys_freeze_late(struct device *dev) { return 0; }
+-static inline int acpi_subsys_freeze_noirq(struct device *dev) { return 0; }
+-static inline int acpi_subsys_thaw_noirq(struct device *dev) { return 0; }
++static inline int acpi_subsys_poweroff(struct device *dev) { return 0; }
+ #endif
+
+ #ifdef CONFIG_ACPI
+diff --git a/include/linux/device.h b/include/linux/device.h
+index 19dd8852602c..c74ce473589a 100644
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -701,7 +701,8 @@ extern unsigned long devm_get_free_pages(struct device *dev,
+ gfp_t gfp_mask, unsigned int order);
+ extern void devm_free_pages(struct device *dev, unsigned long addr);
+
+-void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
++void __iomem *devm_ioremap_resource(struct device *dev,
++ const struct resource *res);
+
+ void __iomem *devm_of_iomap(struct device *dev,
+ struct device_node *node, int index,
+@@ -849,11 +850,12 @@ struct device_link {
+ struct list_head c_node;
+ enum device_link_state status;
+ u32 flags;
+- bool rpm_active;
++ refcount_t rpm_active;
+ struct kref kref;
+ #ifdef CONFIG_SRCU
+ struct rcu_head rcu_head;
+ #endif
++ bool supplier_preactivated; /* Owned by consumer probe. */
+ };
+
+ /**
+diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
+index 3188c0bef3e7..1d21e98d6854 100644
+--- a/include/linux/irqchip/arm-gic-v3.h
++++ b/include/linux/irqchip/arm-gic-v3.h
+@@ -165,7 +165,7 @@
+ #define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB)
+ #define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC)
+ #define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt)
+-#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt)
++#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb)
+ #define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt)
+ #define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb)
+ #define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt)
+@@ -192,7 +192,7 @@
+ #define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB)
+ #define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC)
+ #define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt)
+-#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt)
++#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb)
+ #define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt)
+ #define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb)
+ #define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt)
+@@ -251,7 +251,7 @@
+ #define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB)
+ #define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC)
+ #define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
+-#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt)
++#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWb)
+ #define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt)
+ #define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb)
+ #define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt)
+@@ -277,7 +277,7 @@
+ #define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB)
+ #define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC)
+ #define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
+-#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
++#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWb)
+ #define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt)
+ #define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb)
+ #define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt)
+@@ -351,7 +351,7 @@
+ #define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB)
+ #define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC)
+ #define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt)
+-#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt)
++#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWb)
+ #define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt)
+ #define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb)
+ #define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt)
+@@ -375,7 +375,7 @@
+ #define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB)
+ #define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC)
+ #define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt)
+-#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt)
++#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb)
+ #define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt)
+ #define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb)
+ #define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt)
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index 177f11c96187..76b76b6aa83d 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -9053,8 +9053,6 @@ struct mlx5_ifc_query_lag_out_bits {
+
+ u8 syndrome[0x20];
+
+- u8 reserved_at_40[0x40];
+-
+ struct mlx5_ifc_lagc_bits ctx;
+ };
+
+diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
+index 4224902a8e22..358d6be357ed 100644
+--- a/include/linux/mmc/sdio_ids.h
++++ b/include/linux/mmc/sdio_ids.h
+@@ -68,6 +68,8 @@
+
+ #define SDIO_VENDOR_ID_TI 0x0097
+ #define SDIO_DEVICE_ID_TI_WL1271 0x4076
++#define SDIO_VENDOR_ID_TI_WL1251 0x104c
++#define SDIO_DEVICE_ID_TI_WL1251 0x9066
+
+ #define SDIO_VENDOR_ID_STE 0x0020
+ #define SDIO_DEVICE_ID_STE_CW1200 0x2280
+diff --git a/include/linux/of.h b/include/linux/of.h
+index d5a863c1ee39..d4f14b0302b6 100644
+--- a/include/linux/of.h
++++ b/include/linux/of.h
+@@ -1001,7 +1001,7 @@ static inline struct device_node *of_find_matching_node(
+
+ static inline const char *of_node_get_device_type(const struct device_node *np)
+ {
+- return of_get_property(np, "type", NULL);
++ return of_get_property(np, "device_type", NULL);
+ }
+
+ static inline bool of_node_is_type(const struct device_node *np, const char *type)
+@@ -1425,7 +1425,8 @@ int of_overlay_notifier_unregister(struct notifier_block *nb);
+
+ #else
+
+-static inline int of_overlay_fdt_apply(void *overlay_fdt, int *ovcs_id)
++static inline int of_overlay_fdt_apply(void *overlay_fdt, u32 overlay_fdt_size,
++ int *ovcs_id)
+ {
+ return -ENOTSUPP;
+ }
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index b22bc81f3669..d8b4d31acd18 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -494,6 +494,11 @@ struct perf_addr_filters_head {
+ unsigned int nr_file_filters;
+ };
+
++struct perf_addr_filter_range {
++ unsigned long start;
++ unsigned long size;
++};
++
+ /**
+ * enum perf_event_state - the states of an event:
+ */
+@@ -670,7 +675,7 @@ struct perf_event {
+ /* address range filters */
+ struct perf_addr_filters_head addr_filters;
+ /* vma address array for file-based filders */
+- unsigned long *addr_filters_offs;
++ struct perf_addr_filter_range *addr_filter_ranges;
+ unsigned long addr_filters_gen;
+
+ void (*destroy)(struct perf_event *);
+diff --git a/include/linux/platform_data/dma-imx-sdma.h b/include/linux/platform_data/dma-imx-sdma.h
+index 6eaa53cef0bd..30e676b36b24 100644
+--- a/include/linux/platform_data/dma-imx-sdma.h
++++ b/include/linux/platform_data/dma-imx-sdma.h
+@@ -51,7 +51,10 @@ struct sdma_script_start_addrs {
+ /* End of v2 array */
+ s32 zcanfd_2_mcu_addr;
+ s32 zqspi_2_mcu_addr;
++ s32 mcu_2_ecspi_addr;
+ /* End of v3 array */
++ s32 mcu_2_zqspi_addr;
++ /* End of v4 array */
+ };
+
+ /**
+diff --git a/include/linux/rtc.h b/include/linux/rtc.h
+index 6aedc30003e7..5a34f59941fb 100644
+--- a/include/linux/rtc.h
++++ b/include/linux/rtc.h
+@@ -163,7 +163,7 @@ struct rtc_device {
+ #define to_rtc_device(d) container_of(d, struct rtc_device, dev)
+
+ /* useful timestamps */
+-#define RTC_TIMESTAMP_BEGIN_1900 -2208989361LL /* 1900-01-01 00:00:00 */
++#define RTC_TIMESTAMP_BEGIN_1900 -2208988800LL /* 1900-01-01 00:00:00 */
+ #define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */
+ #define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */
+
+diff --git a/include/linux/signal.h b/include/linux/signal.h
+index e4d01469ed60..0be5ce2375cb 100644
+--- a/include/linux/signal.h
++++ b/include/linux/signal.h
+@@ -272,6 +272,9 @@ extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping);
+ extern void exit_signals(struct task_struct *tsk);
+ extern void kernel_sigaction(int, __sighandler_t);
+
++#define SIG_KTHREAD ((__force __sighandler_t)2)
++#define SIG_KTHREAD_KERNEL ((__force __sighandler_t)3)
++
+ static inline void allow_signal(int sig)
+ {
+ /*
+@@ -279,7 +282,17 @@ static inline void allow_signal(int sig)
+ * know it'll be handled, so that they don't get converted to
+ * SIGKILL or just silently dropped.
+ */
+- kernel_sigaction(sig, (__force __sighandler_t)2);
++ kernel_sigaction(sig, SIG_KTHREAD);
++}
++
++static inline void allow_kernel_signal(int sig)
++{
++ /*
++ * Kernel threads handle their own signals. Let the signal code
++ * know signals sent by the kernel will be handled, so that they
++ * don't get silently dropped.
++ */
++ kernel_sigaction(sig, SIG_KTHREAD_KERNEL);
+ }
+
+ static inline void disallow_signal(int sig)
+diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
+index ab400af6f0ce..623719c91706 100644
+--- a/include/linux/switchtec.h
++++ b/include/linux/switchtec.h
+@@ -244,8 +244,8 @@ struct ntb_ctrl_regs {
+ u64 xlate_addr;
+ } bar_entry[6];
+ u32 reserved2[216];
+- u32 req_id_table[256];
+- u32 reserved3[512];
++ u32 req_id_table[512];
++ u32 reserved3[256];
+ u64 lut_entry[512];
+ } __packed;
+
+diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
+index 50c74a77db55..8cb93aff69f1 100644
+--- a/include/linux/usb/tcpm.h
++++ b/include/linux/usb/tcpm.h
+@@ -121,10 +121,10 @@ struct tcpc_config {
+ * with partner.
+ * @set_pd_rx: Called to enable or disable reception of PD messages
+ * @set_roles: Called to set power and data roles
+- * @start_drp_toggling:
+- * Optional; if supported by hardware, called to start DRP
+- * toggling. DRP toggling is stopped automatically if
+- * a connection is established.
++ * @start_toggling:
++ * Optional; if supported by hardware, called to start dual-role
++ * toggling or single-role connection detection. Toggling stops
++ * automatically if a connection is established.
+ * @try_role: Optional; called to set a preferred role
+ * @pd_transmit:Called to transmit PD message
+ * @mux: Pointer to multiplexer data
+@@ -147,8 +147,9 @@ struct tcpc_dev {
+ int (*set_pd_rx)(struct tcpc_dev *dev, bool on);
+ int (*set_roles)(struct tcpc_dev *dev, bool attached,
+ enum typec_role role, enum typec_data_role data);
+- int (*start_drp_toggling)(struct tcpc_dev *dev,
+- enum typec_cc_status cc);
++ int (*start_toggling)(struct tcpc_dev *dev,
++ enum typec_port_type port_type,
++ enum typec_cc_status cc);
+ int (*try_role)(struct tcpc_dev *dev, int role);
+ int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type,
+ const struct pd_message *msg);
+diff --git a/include/media/davinci/vpbe.h b/include/media/davinci/vpbe.h
+index 79a566d7defd..180a05e91497 100644
+--- a/include/media/davinci/vpbe.h
++++ b/include/media/davinci/vpbe.h
+@@ -92,7 +92,7 @@ struct vpbe_config {
+ struct encoder_config_info *ext_encoders;
+ /* amplifier information goes here */
+ struct amp_config_info *amp;
+- int num_outputs;
++ unsigned int num_outputs;
+ /* Order is venc outputs followed by LCD and then external encoders */
+ struct vpbe_output *outputs;
+ };
+diff --git a/include/net/request_sock.h b/include/net/request_sock.h
+index 347015515a7d..1653435f18f5 100644
+--- a/include/net/request_sock.h
++++ b/include/net/request_sock.h
+@@ -183,7 +183,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
+
+ static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
+ {
+- return queue->rskq_accept_head == NULL;
++ return READ_ONCE(queue->rskq_accept_head) == NULL;
+ }
+
+ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
+@@ -195,7 +195,7 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
+ req = queue->rskq_accept_head;
+ if (req) {
+ sk_acceptq_removed(parent);
+- queue->rskq_accept_head = req->dl_next;
++ WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
+ if (queue->rskq_accept_head == NULL)
+ queue->rskq_accept_tail = NULL;
+ }
+diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
+index 2abbc15824af..2c6570e6fcfe 100644
+--- a/include/net/sctp/sctp.h
++++ b/include/net/sctp/sctp.h
+@@ -625,4 +625,9 @@ static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
+ return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
+ }
+
++static inline bool sctp_newsk_ready(const struct sock *sk)
++{
++ return sock_flag(sk, SOCK_DEAD) || sk->sk_socket;
++}
++
+ #endif /* __net_sctp_h__ */
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index ac4ffe8013d8..918bfd0d7d1f 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -261,7 +261,7 @@ static inline bool tcp_under_memory_pressure(const struct sock *sk)
+ mem_cgroup_under_socket_pressure(sk->sk_memcg))
+ return true;
+
+- return tcp_memory_pressure;
++ return READ_ONCE(tcp_memory_pressure);
+ }
+ /*
+ * The next routines deal with comparing 32 bit unsigned ints
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index fb9b19a3b749..48dc1ce2170d 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -1054,7 +1054,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
+ void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
+
+ struct xfrm_if_parms {
+- char name[IFNAMSIZ]; /* name of XFRM device */
+ int link; /* ifindex of underlying L2 interface */
+ u32 if_id; /* interface identifyer */
+ };
+diff --git a/include/sound/soc.h b/include/sound/soc.h
+index 41cec42fb456..88aa48e5485f 100644
+--- a/include/sound/soc.h
++++ b/include/sound/soc.h
+@@ -548,12 +548,12 @@ static inline void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
+ }
+ #endif
+
+-#ifdef CONFIG_SND_SOC_AC97_BUS
+ struct snd_ac97 *snd_soc_alloc_ac97_component(struct snd_soc_component *component);
+ struct snd_ac97 *snd_soc_new_ac97_component(struct snd_soc_component *component,
+ unsigned int id, unsigned int id_mask);
+ void snd_soc_free_ac97_component(struct snd_ac97 *ac97);
+
++#ifdef CONFIG_SND_SOC_AC97_BUS
+ int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
+ int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
+ struct platform_device *pdev);
+diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
+index a08916eb7615..0924119bcfa4 100644
+--- a/include/trace/events/rxrpc.h
++++ b/include/trace/events/rxrpc.h
+@@ -554,10 +554,10 @@ TRACE_EVENT(rxrpc_peer,
+ );
+
+ TRACE_EVENT(rxrpc_conn,
+- TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
++ TP_PROTO(unsigned int conn_debug_id, enum rxrpc_conn_trace op,
+ int usage, const void *where),
+
+- TP_ARGS(conn, op, usage, where),
++ TP_ARGS(conn_debug_id, op, usage, where),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, conn )
+@@ -567,7 +567,7 @@ TRACE_EVENT(rxrpc_conn,
+ ),
+
+ TP_fast_assign(
+- __entry->conn = conn->debug_id;
++ __entry->conn = conn_debug_id;
+ __entry->op = op;
+ __entry->usage = usage;
+ __entry->where = where;
+diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
+index 972265f32871..1e2662ff0529 100644
+--- a/include/uapi/linux/btf.h
++++ b/include/uapi/linux/btf.h
+@@ -22,9 +22,9 @@ struct btf_header {
+ };
+
+ /* Max # of type identifier */
+-#define BTF_MAX_TYPE 0x0000ffff
++#define BTF_MAX_TYPE 0x000fffff
+ /* Max offset into the string section */
+-#define BTF_MAX_NAME_OFFSET 0x0000ffff
++#define BTF_MAX_NAME_OFFSET 0x00ffffff
+ /* Max # of struct/union/enum members or func args */
+ #define BTF_MAX_VLEN 0xffff
+
+diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
+index 325ec6ef0a76..5eac62e1b68d 100644
+--- a/include/uapi/linux/netfilter/nf_tables.h
++++ b/include/uapi/linux/netfilter/nf_tables.h
+@@ -1128,7 +1128,7 @@ enum nft_log_level {
+ NFT_LOGLEVEL_AUDIT,
+ __NFT_LOGLEVEL_MAX
+ };
+-#define NFT_LOGLEVEL_MAX (__NFT_LOGLEVEL_MAX + 1)
++#define NFT_LOGLEVEL_MAX (__NFT_LOGLEVEL_MAX - 1)
+
+ /**
+ * enum nft_queue_attributes - nf_tables queue expression netlink attributes
+diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
+index 177a52436394..86477f3894e5 100644
+--- a/kernel/bpf/offload.c
++++ b/kernel/bpf/offload.c
+@@ -645,8 +645,10 @@ struct bpf_offload_dev *bpf_offload_dev_create(void)
+ down_write(&bpf_devs_lock);
+ if (!offdevs_inited) {
+ err = rhashtable_init(&offdevs, &offdevs_params);
+- if (err)
++ if (err) {
++ up_write(&bpf_devs_lock);
+ return ERR_PTR(err);
++ }
+ offdevs_inited = true;
+ }
+ up_write(&bpf_devs_lock);
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 9bbfb1ff4ac9..e85636fb81b9 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1253,7 +1253,7 @@ static int check_stack_access(struct bpf_verifier_env *env,
+ char tn_buf[48];
+
+ tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
+- verbose(env, "variable stack access var_off=%s off=%d size=%d",
++ verbose(env, "variable stack access var_off=%s off=%d size=%d\n",
+ tn_buf, off, size);
+ return -EACCES;
+ }
+diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
+index f338d23b112b..dc6bf35e7884 100644
+--- a/kernel/debug/kdb/kdb_main.c
++++ b/kernel/debug/kdb/kdb_main.c
+@@ -2604,7 +2604,7 @@ static int kdb_per_cpu(int argc, const char **argv)
+ diag = kdbgetularg(argv[3], &whichcpu);
+ if (diag)
+ return diag;
+- if (!cpu_online(whichcpu)) {
++ if (whichcpu >= nr_cpu_ids || !cpu_online(whichcpu)) {
+ kdb_printf("cpu %ld is not online\n", whichcpu);
+ return KDB_BADCPUNUM;
+ }
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 460d5fd3ec4e..16af86ab24c4 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1254,6 +1254,7 @@ static void put_ctx(struct perf_event_context *ctx)
+ * perf_event_context::lock
+ * perf_event::mmap_mutex
+ * mmap_sem
++ * perf_addr_filters_head::lock
+ *
+ * cpu_hotplug_lock
+ * pmus_lock
+@@ -2803,7 +2804,7 @@ static int perf_event_stop(struct perf_event *event, int restart)
+ *
+ * (p1) when userspace mappings change as a result of (1) or (2) or (3) below,
+ * we update the addresses of corresponding vmas in
+- * event::addr_filters_offs array and bump the event::addr_filters_gen;
++ * event::addr_filter_ranges array and bump the event::addr_filters_gen;
+ * (p2) when an event is scheduled in (pmu::add), it calls
+ * perf_event_addr_filters_sync() which calls pmu::addr_filters_sync()
+ * if the generation has changed since the previous call.
+@@ -4447,7 +4448,7 @@ static void _free_event(struct perf_event *event)
+
+ perf_event_free_bpf_prog(event);
+ perf_addr_filters_splice(event, NULL);
+- kfree(event->addr_filters_offs);
++ kfree(event->addr_filter_ranges);
+
+ if (event->destroy)
+ event->destroy(event);
+@@ -5011,6 +5012,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
+ if (perf_event_check_period(event, value))
+ return -EINVAL;
+
++ if (!event->attr.freq && (value & (1ULL << 63)))
++ return -EINVAL;
++
+ event_function_call(event, __perf_event_period, &value);
+
+ return 0;
+@@ -6742,7 +6746,8 @@ static void perf_event_addr_filters_exec(struct perf_event *event, void *data)
+ raw_spin_lock_irqsave(&ifh->lock, flags);
+ list_for_each_entry(filter, &ifh->list, entry) {
+ if (filter->path.dentry) {
+- event->addr_filters_offs[count] = 0;
++ event->addr_filter_ranges[count].start = 0;
++ event->addr_filter_ranges[count].size = 0;
+ restart++;
+ }
+
+@@ -7424,28 +7429,47 @@ static bool perf_addr_filter_match(struct perf_addr_filter *filter,
+ return true;
+ }
+
++static bool perf_addr_filter_vma_adjust(struct perf_addr_filter *filter,
++ struct vm_area_struct *vma,
++ struct perf_addr_filter_range *fr)
++{
++ unsigned long vma_size = vma->vm_end - vma->vm_start;
++ unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
++ struct file *file = vma->vm_file;
++
++ if (!perf_addr_filter_match(filter, file, off, vma_size))
++ return false;
++
++ if (filter->offset < off) {
++ fr->start = vma->vm_start;
++ fr->size = min(vma_size, filter->size - (off - filter->offset));
++ } else {
++ fr->start = vma->vm_start + filter->offset - off;
++ fr->size = min(vma->vm_end - fr->start, filter->size);
++ }
++
++ return true;
++}
++
+ static void __perf_addr_filters_adjust(struct perf_event *event, void *data)
+ {
+ struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
+ struct vm_area_struct *vma = data;
+- unsigned long off = vma->vm_pgoff << PAGE_SHIFT, flags;
+- struct file *file = vma->vm_file;
+ struct perf_addr_filter *filter;
+ unsigned int restart = 0, count = 0;
++ unsigned long flags;
+
+ if (!has_addr_filter(event))
+ return;
+
+- if (!file)
++ if (!vma->vm_file)
+ return;
+
+ raw_spin_lock_irqsave(&ifh->lock, flags);
+ list_for_each_entry(filter, &ifh->list, entry) {
+- if (perf_addr_filter_match(filter, file, off,
+- vma->vm_end - vma->vm_start)) {
+- event->addr_filters_offs[count] = vma->vm_start;
++ if (perf_addr_filter_vma_adjust(filter, vma,
++ &event->addr_filter_ranges[count]))
+ restart++;
+- }
+
+ count++;
+ }
+@@ -8805,26 +8829,19 @@ static void perf_addr_filters_splice(struct perf_event *event,
+ * @filter; if so, adjust filter's address range.
+ * Called with mm::mmap_sem down for reading.
+ */
+-static unsigned long perf_addr_filter_apply(struct perf_addr_filter *filter,
+- struct mm_struct *mm)
++static void perf_addr_filter_apply(struct perf_addr_filter *filter,
++ struct mm_struct *mm,
++ struct perf_addr_filter_range *fr)
+ {
+ struct vm_area_struct *vma;
+
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+- struct file *file = vma->vm_file;
+- unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+- unsigned long vma_size = vma->vm_end - vma->vm_start;
+-
+- if (!file)
+- continue;
+-
+- if (!perf_addr_filter_match(filter, file, off, vma_size))
++ if (!vma->vm_file)
+ continue;
+
+- return vma->vm_start;
++ if (perf_addr_filter_vma_adjust(filter, vma, fr))
++ return;
+ }
+-
+- return 0;
+ }
+
+ /*
+@@ -8847,26 +8864,29 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
+ if (task == TASK_TOMBSTONE)
+ return;
+
+- if (!ifh->nr_file_filters)
+- return;
+-
+- mm = get_task_mm(event->ctx->task);
+- if (!mm)
+- goto restart;
++ if (ifh->nr_file_filters) {
++ mm = get_task_mm(event->ctx->task);
++ if (!mm)
++ goto restart;
+
+- down_read(&mm->mmap_sem);
++ down_read(&mm->mmap_sem);
++ }
+
+ raw_spin_lock_irqsave(&ifh->lock, flags);
+ list_for_each_entry(filter, &ifh->list, entry) {
+- event->addr_filters_offs[count] = 0;
++ if (filter->path.dentry) {
++ /*
++ * Adjust base offset if the filter is associated to a
++ * binary that needs to be mapped:
++ */
++ event->addr_filter_ranges[count].start = 0;
++ event->addr_filter_ranges[count].size = 0;
+
+- /*
+- * Adjust base offset if the filter is associated to a binary
+- * that needs to be mapped:
+- */
+- if (filter->path.dentry)
+- event->addr_filters_offs[count] =
+- perf_addr_filter_apply(filter, mm);
++ perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]);
++ } else {
++ event->addr_filter_ranges[count].start = filter->offset;
++ event->addr_filter_ranges[count].size = filter->size;
++ }
+
+ count++;
+ }
+@@ -8874,9 +8894,11 @@ static void perf_event_addr_filters_apply(struct perf_event *event)
+ event->addr_filters_gen++;
+ raw_spin_unlock_irqrestore(&ifh->lock, flags);
+
+- up_read(&mm->mmap_sem);
++ if (ifh->nr_file_filters) {
++ up_read(&mm->mmap_sem);
+
+- mmput(mm);
++ mmput(mm);
++ }
+
+ restart:
+ perf_event_stop(event, 1);
+@@ -10128,14 +10150,28 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ goto err_pmu;
+
+ if (has_addr_filter(event)) {
+- event->addr_filters_offs = kcalloc(pmu->nr_addr_filters,
+- sizeof(unsigned long),
+- GFP_KERNEL);
+- if (!event->addr_filters_offs) {
++ event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters,
++ sizeof(struct perf_addr_filter_range),
++ GFP_KERNEL);
++ if (!event->addr_filter_ranges) {
+ err = -ENOMEM;
+ goto err_per_task;
+ }
+
++ /*
++ * Clone the parent's vma offsets: they are valid until exec()
++ * even if the mm is not shared with the parent.
++ */
++ if (event->parent) {
++ struct perf_addr_filters_head *ifh = perf_event_addr_filters(event);
++
++ raw_spin_lock_irq(&ifh->lock);
++ memcpy(event->addr_filter_ranges,
++ event->parent->addr_filter_ranges,
++ pmu->nr_addr_filters * sizeof(struct perf_addr_filter_range));
++ raw_spin_unlock_irq(&ifh->lock);
++ }
++
+ /* force hw sync on the address filters */
+ event->addr_filters_gen = 1;
+ }
+@@ -10154,7 +10190,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
+ return event;
+
+ err_addr_filters:
+- kfree(event->addr_filters_offs);
++ kfree(event->addr_filter_ranges);
+
+ err_per_task:
+ exclusive_event_destroy(event);
+diff --git a/kernel/fork.c b/kernel/fork.c
+index 8cb5cd7c97e1..1a2d18e98bf9 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -216,6 +216,7 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
+ memset(s->addr, 0, THREAD_SIZE);
+
+ tsk->stack_vm_area = s;
++ tsk->stack = s->addr;
+ return s->addr;
+ }
+
+@@ -230,14 +231,20 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node)
+ * free_thread_stack() can be called in interrupt context,
+ * so cache the vm_struct.
+ */
+- if (stack)
++ if (stack) {
+ tsk->stack_vm_area = find_vm_area(stack);
++ tsk->stack = stack;
++ }
+ return stack;
+ #else
+ struct page *page = alloc_pages_node(node, THREADINFO_GFP,
+ THREAD_SIZE_ORDER);
+
+- return page ? page_address(page) : NULL;
++ if (likely(page)) {
++ tsk->stack = page_address(page);
++ return tsk->stack;
++ }
++ return NULL;
+ #endif
+ }
+
+@@ -268,7 +275,10 @@ static struct kmem_cache *thread_stack_cache;
+ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
+ int node)
+ {
+- return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
++ unsigned long *stack;
++ stack = kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
++ tsk->stack = stack;
++ return stack;
+ }
+
+ static void free_thread_stack(struct task_struct *tsk)
+diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
+index 5d9fc01b60a6..6e8520a81dd8 100644
+--- a/kernel/irq/irqdomain.c
++++ b/kernel/irq/irqdomain.c
+@@ -148,6 +148,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
+ switch (fwid->type) {
+ case IRQCHIP_FWNODE_NAMED:
+ case IRQCHIP_FWNODE_NAMED_ID:
++ domain->fwnode = fwnode;
+ domain->name = kstrdup(fwid->name, GFP_KERNEL);
+ if (!domain->name) {
+ kfree(domain);
+@@ -183,7 +184,7 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
+ * unhappy about. Replace them with ':', which does
+ * the trick and is not as offensive as '\'...
+ */
+- name = kstrdup(of_node_full_name(of_node), GFP_KERNEL);
++ name = kasprintf(GFP_KERNEL, "%pOF", of_node);
+ if (!name) {
+ kfree(domain);
+ return NULL;
+diff --git a/kernel/signal.c b/kernel/signal.c
+index 7278302e3485..08911bb6fe9a 100644
+--- a/kernel/signal.c
++++ b/kernel/signal.c
+@@ -86,6 +86,11 @@ static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
+ handler == SIG_DFL && !(force && sig_kernel_only(sig)))
+ return true;
+
++ /* Only allow kernel generated signals to this kthread */
++ if (unlikely((t->flags & PF_KTHREAD) &&
++ (handler == SIG_KTHREAD_KERNEL) && !force))
++ return true;
++
+ return sig_handler_ignored(handler, sig);
+ }
+
+diff --git a/lib/devres.c b/lib/devres.c
+index faccf1a037d0..aa0f5308ac6b 100644
+--- a/lib/devres.c
++++ b/lib/devres.c
+@@ -131,7 +131,8 @@ EXPORT_SYMBOL(devm_iounmap);
+ * if (IS_ERR(base))
+ * return PTR_ERR(base);
+ */
+-void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res)
++void __iomem *devm_ioremap_resource(struct device *dev,
++ const struct resource *res)
+ {
+ resource_size_t size;
+ const char *name;
+diff --git a/lib/kfifo.c b/lib/kfifo.c
+index 015656aa8182..6320ab91e343 100644
+--- a/lib/kfifo.c
++++ b/lib/kfifo.c
+@@ -82,7 +82,8 @@ int __kfifo_init(struct __kfifo *fifo, void *buffer,
+ {
+ size /= esize;
+
+- size = roundup_pow_of_two(size);
++ if (!is_power_of_2(size))
++ size = rounddown_pow_of_two(size);
+
+ fifo->in = 0;
+ fifo->out = 0;
+diff --git a/net/6lowpan/nhc.c b/net/6lowpan/nhc.c
+index 4fa2fdda174d..9e56fb98f33c 100644
+--- a/net/6lowpan/nhc.c
++++ b/net/6lowpan/nhc.c
+@@ -18,7 +18,7 @@
+ #include "nhc.h"
+
+ static struct rb_root rb_root = RB_ROOT;
+-static struct lowpan_nhc *lowpan_nexthdr_nhcs[NEXTHDR_MAX];
++static struct lowpan_nhc *lowpan_nexthdr_nhcs[NEXTHDR_MAX + 1];
+ static DEFINE_SPINLOCK(lowpan_nhc_lock);
+
+ static int lowpan_nhc_insert(struct lowpan_nhc *nhc)
+diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c
+index 94e88f510c5b..450b257afa84 100644
+--- a/net/bpfilter/bpfilter_kern.c
++++ b/net/bpfilter/bpfilter_kern.c
+@@ -25,7 +25,7 @@ static void shutdown_umh(struct umh_info *info)
+ return;
+ tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID);
+ if (tsk) {
+- force_sig(SIGKILL, tsk);
++ send_sig(SIGKILL, tsk, 1);
+ put_task_struct(tsk);
+ }
+ fput(info->pipe_to_umh);
+diff --git a/net/bridge/br_arp_nd_proxy.c b/net/bridge/br_arp_nd_proxy.c
+index 2cf7716254be..d42e3904b498 100644
+--- a/net/bridge/br_arp_nd_proxy.c
++++ b/net/bridge/br_arp_nd_proxy.c
+@@ -311,7 +311,7 @@ static void br_nd_send(struct net_bridge *br, struct net_bridge_port *p,
+ /* Neighbor Advertisement */
+ memset(na, 0, sizeof(*na) + na_olen);
+ na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
+- na->icmph.icmp6_router = 0; /* XXX: should be 1 ? */
++ na->icmph.icmp6_router = (n->flags & NTF_ROUTER) ? 1 : 0;
+ na->icmph.icmp6_override = 1;
+ na->icmph.icmp6_solicited = 1;
+ na->target = ns->target;
+diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
+index 785e19afd6aa..f59230e4fc29 100644
+--- a/net/bridge/netfilter/ebtables.c
++++ b/net/bridge/netfilter/ebtables.c
+@@ -2165,7 +2165,9 @@ static int compat_copy_entries(unsigned char *data, unsigned int size_user,
+ if (ret < 0)
+ return ret;
+
+- WARN_ON(size_remaining);
++ if (size_remaining)
++ return -EINVAL;
++
+ return state->buf_kern_offset;
+ }
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index a26d87073f71..73ebacabfde8 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4349,12 +4349,17 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
+
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
+
++ /* check if bpf_xdp_adjust_head was used */
+ off = xdp->data - orig_data;
+- if (off > 0)
+- __skb_pull(skb, off);
+- else if (off < 0)
+- __skb_push(skb, -off);
+- skb->mac_header += off;
++ if (off) {
++ if (off > 0)
++ __skb_pull(skb, off);
++ else if (off < 0)
++ __skb_push(skb, -off);
++
++ skb->mac_header += off;
++ skb_reset_network_header(skb);
++ }
+
+ /* check if bpf_xdp_adjust_tail was used. it can only "shrink"
+ * pckt.
+@@ -4465,23 +4470,6 @@ static int netif_rx_internal(struct sk_buff *skb)
+
+ trace_netif_rx(skb);
+
+- if (static_branch_unlikely(&generic_xdp_needed_key)) {
+- int ret;
+-
+- preempt_disable();
+- rcu_read_lock();
+- ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
+- rcu_read_unlock();
+- preempt_enable();
+-
+- /* Consider XDP consuming the packet a success from
+- * the netdev point of view we do not want to count
+- * this as an error.
+- */
+- if (ret != XDP_PASS)
+- return NET_RX_SUCCESS;
+- }
+-
+ #ifdef CONFIG_RPS
+ if (static_key_false(&rps_needed)) {
+ struct rps_dev_flow voidflow, *rflow = &voidflow;
+@@ -4815,6 +4803,18 @@ another_round:
+
+ __this_cpu_inc(softnet_data.processed);
+
++ if (static_branch_unlikely(&generic_xdp_needed_key)) {
++ int ret2;
++
++ preempt_disable();
++ ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
++ preempt_enable();
++
++ if (ret2 != XDP_PASS)
++ return NET_RX_DROP;
++ skb_reset_mac_len(skb);
++ }
++
+ if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
+ skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
+ skb = skb_vlan_untag(skb);
+@@ -5133,19 +5133,6 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
+ if (skb_defer_rx_timestamp(skb))
+ return NET_RX_SUCCESS;
+
+- if (static_branch_unlikely(&generic_xdp_needed_key)) {
+- int ret;
+-
+- preempt_disable();
+- rcu_read_lock();
+- ret = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb);
+- rcu_read_unlock();
+- preempt_enable();
+-
+- if (ret != XDP_PASS)
+- return NET_RX_DROP;
+- }
+-
+ rcu_read_lock();
+ #ifdef CONFIG_RPS
+ if (static_key_false(&rps_needed)) {
+@@ -5166,7 +5153,6 @@ static int netif_receive_skb_internal(struct sk_buff *skb)
+
+ static void netif_receive_skb_list_internal(struct list_head *head)
+ {
+- struct bpf_prog *xdp_prog = NULL;
+ struct sk_buff *skb, *next;
+ struct list_head sublist;
+
+@@ -5179,21 +5165,6 @@ static void netif_receive_skb_list_internal(struct list_head *head)
+ }
+ list_splice_init(&sublist, head);
+
+- if (static_branch_unlikely(&generic_xdp_needed_key)) {
+- preempt_disable();
+- rcu_read_lock();
+- list_for_each_entry_safe(skb, next, head, list) {
+- xdp_prog = rcu_dereference(skb->dev->xdp_prog);
+- skb_list_del_init(skb);
+- if (do_xdp_generic(xdp_prog, skb) == XDP_PASS)
+- list_add_tail(&skb->list, &sublist);
+- }
+- rcu_read_unlock();
+- preempt_enable();
+- /* Put passed packets back on main list */
+- list_splice_init(&sublist, head);
+- }
+-
+ rcu_read_lock();
+ #ifdef CONFIG_RPS
+ if (static_key_false(&rps_needed)) {
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 91b950261975..9daf1a4118b5 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4367,7 +4367,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
+ return -ENODEV;
+
+ idev = __in6_dev_get_safely(dev);
+- if (unlikely(!idev || !net->ipv6.devconf_all->forwarding))
++ if (unlikely(!idev || !idev->cnf.forwarding))
+ return BPF_FIB_LKUP_RET_FWD_DISABLED;
+
+ if (flags & BPF_FIB_LOOKUP_OUTPUT) {
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index e260d44ebdca..bf738ec68cb5 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -1885,8 +1885,8 @@ static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
+ goto nla_put_failure;
+ {
+ unsigned long now = jiffies;
+- unsigned int flush_delta = now - tbl->last_flush;
+- unsigned int rand_delta = now - tbl->last_rand;
++ long flush_delta = now - tbl->last_flush;
++ long rand_delta = now - tbl->last_rand;
+ struct neigh_hash_table *nht;
+ struct ndt_config ndc = {
+ .ndtc_key_len = tbl->key_len,
+diff --git a/net/core/sock.c b/net/core/sock.c
+index bbde5f6a7dc9..b9ec14f2c729 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -2179,8 +2179,8 @@ static void sk_leave_memory_pressure(struct sock *sk)
+ } else {
+ unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
+
+- if (memory_pressure && *memory_pressure)
+- *memory_pressure = 0;
++ if (memory_pressure && READ_ONCE(*memory_pressure))
++ WRITE_ONCE(*memory_pressure, 0);
+ }
+ }
+
+diff --git a/net/dsa/port.c b/net/dsa/port.c
+index ed0595459df1..ea7efc86b9d7 100644
+--- a/net/dsa/port.c
++++ b/net/dsa/port.c
+@@ -69,7 +69,6 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
+
+ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
+ {
+- u8 stp_state = dp->bridge_dev ? BR_STATE_BLOCKING : BR_STATE_FORWARDING;
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+ int err;
+@@ -80,7 +79,8 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
+ return err;
+ }
+
+- dsa_port_set_state_now(dp, stp_state);
++ if (!dp->bridge_dev)
++ dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
+
+ return 0;
+ }
+@@ -90,7 +90,8 @@ void dsa_port_disable(struct dsa_port *dp, struct phy_device *phy)
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
+
+- dsa_port_set_state_now(dp, BR_STATE_DISABLED);
++ if (!dp->bridge_dev)
++ dsa_port_set_state_now(dp, BR_STATE_DISABLED);
+
+ if (ds->ops->port_disable)
+ ds->ops->port_disable(ds, port, phy);
+diff --git a/net/dsa/slave.c b/net/dsa/slave.c
+index b39720d0995d..8ee28b6016d8 100644
+--- a/net/dsa/slave.c
++++ b/net/dsa/slave.c
+@@ -1219,9 +1219,9 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
+ phy_flags = ds->ops->get_phy_flags(ds, dp->index);
+
+ ret = phylink_of_phy_connect(dp->pl, port_dn, phy_flags);
+- if (ret == -ENODEV) {
+- /* We could not connect to a designated PHY or SFP, so use the
+- * switch internal MDIO bus instead
++ if (ret == -ENODEV && ds->slave_mii_bus) {
++ /* We could not connect to a designated PHY or SFP, so try to
++ * use the switch internal MDIO bus instead
+ */
+ ret = dsa_slave_phy_connect(slave_dev, dp->index);
+ if (ret) {
+@@ -1233,7 +1233,7 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
+ }
+ }
+
+- return 0;
++ return ret;
+ }
+
+ static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
+diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
+index e7857a8ac86d..f3074249c6fc 100644
+--- a/net/ieee802154/6lowpan/reassembly.c
++++ b/net/ieee802154/6lowpan/reassembly.c
+@@ -629,7 +629,7 @@ err_sysctl:
+
+ void lowpan_net_frag_exit(void)
+ {
+- inet_frags_fini(&lowpan_frags);
+ lowpan_frags_sysctl_unregister();
+ unregister_pernet_subsys(&lowpan_frags_ops);
++ inet_frags_fini(&lowpan_frags);
+ }
+diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
+index 1fbe2f815474..bbf3b3daa999 100644
+--- a/net/ipv4/af_inet.c
++++ b/net/ipv4/af_inet.c
+@@ -424,8 +424,8 @@ int inet_release(struct socket *sock)
+ if (sock_flag(sk, SOCK_LINGER) &&
+ !(current->flags & PF_EXITING))
+ timeout = sk->sk_lingertime;
+- sock->sk = NULL;
+ sk->sk_prot->close(sk, timeout);
++ sock->sk = NULL;
+ }
+ return 0;
+ }
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 636a11c56cf5..79320858e719 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -937,7 +937,7 @@ struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
+ req->sk = child;
+ req->dl_next = NULL;
+ if (queue->rskq_accept_head == NULL)
+- queue->rskq_accept_head = req;
++ WRITE_ONCE(queue->rskq_accept_head, req);
+ else
+ queue->rskq_accept_tail->dl_next = req;
+ queue->rskq_accept_tail = req;
+diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
+index d63091812342..fbf30122e8bf 100644
+--- a/net/ipv4/ip_output.c
++++ b/net/ipv4/ip_output.c
+@@ -940,7 +940,7 @@ static int __ip_append_data(struct sock *sk,
+ unsigned int fraglen;
+ unsigned int fraggap;
+ unsigned int alloclen;
+- unsigned int pagedlen = 0;
++ unsigned int pagedlen;
+ struct sk_buff *skb_prev;
+ alloc_new_skb:
+ skb_prev = skb;
+@@ -957,6 +957,7 @@ alloc_new_skb:
+ if (datalen > mtu - fragheaderlen)
+ datalen = maxfraglen - fragheaderlen;
+ fraglen = datalen + fragheaderlen;
++ pagedlen = 0;
+
+ if ((flags & MSG_MORE) &&
+ !(rt->dst.dev->features&NETIF_F_SG))
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 420e891ac59d..f03a1b68e70f 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -574,8 +574,9 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, u8 proto)
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
+ }
+- ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src, 0,
+- RT_TOS(tos), tunnel->parms.link, tunnel->fwmark);
++ ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
++ tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
++ 0, skb->mark);
+ if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
+ goto tx_error;
+ rt = ip_route_output_key(tunnel->net, &fl4);
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index cd8a92e7a39e..af9361eba64a 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -325,7 +325,7 @@ void tcp_enter_memory_pressure(struct sock *sk)
+ {
+ unsigned long val;
+
+- if (tcp_memory_pressure)
++ if (READ_ONCE(tcp_memory_pressure))
+ return;
+ val = jiffies;
+
+@@ -340,7 +340,7 @@ void tcp_leave_memory_pressure(struct sock *sk)
+ {
+ unsigned long val;
+
+- if (!tcp_memory_pressure)
++ if (!READ_ONCE(tcp_memory_pressure))
+ return;
+ val = xchg(&tcp_memory_pressure, 0);
+ if (val)
+diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
+index 0c0522b79b43..aa343654abfc 100644
+--- a/net/ipv4/udp_offload.c
++++ b/net/ipv4/udp_offload.c
+@@ -227,6 +227,11 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
+ seg = segs;
+ uh = udp_hdr(seg);
+
++ /* preserve TX timestamp flags and TS key for first segment */
++ skb_shinfo(seg)->tskey = skb_shinfo(gso_skb)->tskey;
++ skb_shinfo(seg)->tx_flags |=
++ (skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP);
++
+ /* compute checksum adjustment based on old length versus new */
+ newlen = htons(sizeof(*uh) + mss);
+ check = csum16_add(csum16_sub(uh->check, uh->len), newlen);
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index bbb5ffb3397d..7091568b9f63 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -1529,7 +1529,8 @@ static struct fib6_node *fib6_locate_1(struct fib6_node *root,
+ if (plen == fn->fn_bit)
+ return fn;
+
+- prev = fn;
++ if (fn->fn_flags & RTN_RTINFO)
++ prev = fn;
+
+ next:
+ /*
+diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
+index b3515a4f1303..1f2d0022ba6f 100644
+--- a/net/ipv6/ip6_gre.c
++++ b/net/ipv6/ip6_gre.c
+@@ -2218,6 +2218,7 @@ static void ip6erspan_tap_setup(struct net_device *dev)
+ {
+ ether_setup(dev);
+
++ dev->max_mtu = 0;
+ dev->netdev_ops = &ip6erspan_netdev_ops;
+ dev->needs_free_netdev = true;
+ dev->priv_destructor = ip6gre_dev_free;
+diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
+index eed9231c90ad..9886a84c2511 100644
+--- a/net/ipv6/ip6_output.c
++++ b/net/ipv6/ip6_output.c
+@@ -1357,7 +1357,7 @@ emsgsize:
+ unsigned int fraglen;
+ unsigned int fraggap;
+ unsigned int alloclen;
+- unsigned int pagedlen = 0;
++ unsigned int pagedlen;
+ alloc_new_skb:
+ /* There's no room in the current skb */
+ if (skb)
+@@ -1381,6 +1381,7 @@ alloc_new_skb:
+ if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
+ datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
+ fraglen = datalen + fragheaderlen;
++ pagedlen = 0;
+
+ if ((flags & MSG_MORE) &&
+ !(rt->dst.dev->features&NETIF_F_SG))
+diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
+index 4856d9320b28..a41156a00dd4 100644
+--- a/net/ipv6/raw.c
++++ b/net/ipv6/raw.c
+@@ -660,6 +660,8 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
+
+ skb->ip_summed = CHECKSUM_NONE;
+
++ sock_tx_timestamp(sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
++
+ if (flags & MSG_CONFIRM)
+ skb_set_dst_pending_confirm(skb, 1);
+
+diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
+index 095825f964e2..c6132e39ab16 100644
+--- a/net/ipv6/reassembly.c
++++ b/net/ipv6/reassembly.c
+@@ -593,8 +593,8 @@ err_protocol:
+
+ void ipv6_frag_exit(void)
+ {
+- inet_frags_fini(&ip6_frags);
+ ip6_frags_sysctl_unregister();
+ unregister_pernet_subsys(&ip6_frags_ops);
+ inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
++ inet_frags_fini(&ip6_frags);
+ }
+diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
+index f024914da1b2..23a1002ed86d 100644
+--- a/net/iucv/af_iucv.c
++++ b/net/iucv/af_iucv.c
+@@ -13,6 +13,7 @@
+ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+ #include <linux/module.h>
++#include <linux/netdevice.h>
+ #include <linux/types.h>
+ #include <linux/list.h>
+ #include <linux/errno.h>
+@@ -355,6 +356,9 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
+ err = -ENODEV;
+ goto err_free;
+ }
++
++ dev_hard_header(skb, skb->dev, ETH_P_AF_IUCV, NULL, NULL, skb->len);
++
+ if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
+ err = -ENETDOWN;
+ goto err_free;
+@@ -367,6 +371,8 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
+ skb_trim(skb, skb->dev->mtu);
+ }
+ skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
++
++ __skb_header_release(skb);
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb) {
+ err = -ENOMEM;
+@@ -466,12 +472,14 @@ static void iucv_sever_path(struct sock *sk, int with_user_data)
+ /* Send controlling flags through an IUCV socket for HIPER transport */
+ static int iucv_send_ctrl(struct sock *sk, u8 flags)
+ {
++ struct iucv_sock *iucv = iucv_sk(sk);
+ int err = 0;
+ int blen;
+ struct sk_buff *skb;
+ u8 shutdown = 0;
+
+- blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
++ blen = sizeof(struct af_iucv_trans_hdr) +
++ LL_RESERVED_SPACE(iucv->hs_dev);
+ if (sk->sk_shutdown & SEND_SHUTDOWN) {
+ /* controlling flags should be sent anyway */
+ shutdown = sk->sk_shutdown;
+@@ -1131,7 +1139,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
+ * segmented records using the MSG_EOR flag), but
+ * for SOCK_STREAM we might want to improve it in future */
+ if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+- headroom = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
++ headroom = sizeof(struct af_iucv_trans_hdr) +
++ LL_RESERVED_SPACE(iucv->hs_dev);
+ linear = len;
+ } else {
+ if (len < PAGE_SIZE) {
+@@ -2456,6 +2465,13 @@ out:
+ return err;
+ }
+
++static void afiucv_iucv_exit(void)
++{
++ device_unregister(af_iucv_dev);
++ driver_unregister(&af_iucv_driver);
++ pr_iucv->iucv_unregister(&af_iucv_handler, 0);
++}
++
+ static int __init afiucv_init(void)
+ {
+ int err;
+@@ -2489,11 +2505,18 @@ static int __init afiucv_init(void)
+ err = afiucv_iucv_init();
+ if (err)
+ goto out_sock;
+- } else
+- register_netdevice_notifier(&afiucv_netdev_notifier);
++ }
++
++ err = register_netdevice_notifier(&afiucv_netdev_notifier);
++ if (err)
++ goto out_notifier;
++
+ dev_add_pack(&iucv_packet_type);
+ return 0;
+
++out_notifier:
++ if (pr_iucv)
++ afiucv_iucv_exit();
+ out_sock:
+ sock_unregister(PF_IUCV);
+ out_proto:
+@@ -2507,12 +2530,11 @@ out:
+ static void __exit afiucv_exit(void)
+ {
+ if (pr_iucv) {
+- device_unregister(af_iucv_dev);
+- driver_unregister(&af_iucv_driver);
+- pr_iucv->iucv_unregister(&af_iucv_handler, 0);
++ afiucv_iucv_exit();
+ symbol_put(iucv_if);
+- } else
+- unregister_netdevice_notifier(&afiucv_netdev_notifier);
++ }
++
++ unregister_netdevice_notifier(&afiucv_netdev_notifier);
+ dev_remove_pack(&iucv_packet_type);
+ sock_unregister(PF_IUCV);
+ proto_unregister(&iucv_proto);
+diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
+index 52b5a2797c0c..e4dec03a19fe 100644
+--- a/net/l2tp/l2tp_core.c
++++ b/net/l2tp/l2tp_core.c
+@@ -1735,7 +1735,8 @@ static __net_exit void l2tp_exit_net(struct net *net)
+ }
+ rcu_read_unlock_bh();
+
+- flush_workqueue(l2tp_wq);
++ if (l2tp_wq)
++ flush_workqueue(l2tp_wq);
+ rcu_barrier();
+
+ for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index b99e73a7e7e0..ce841d59bc72 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -113,22 +113,26 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr)
+ *
+ * Send data via reliable llc2 connection.
+ * Returns 0 upon success, non-zero if action did not succeed.
++ *
++ * This function always consumes a reference to the skb.
+ */
+ static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock)
+ {
+ struct llc_sock* llc = llc_sk(sk);
+- int rc = 0;
+
+ if (unlikely(llc_data_accept_state(llc->state) ||
+ llc->remote_busy_flag ||
+ llc->p_flag)) {
+ long timeout = sock_sndtimeo(sk, noblock);
++ int rc;
+
+ rc = llc_ui_wait_for_busy_core(sk, timeout);
++ if (rc) {
++ kfree_skb(skb);
++ return rc;
++ }
+ }
+- if (unlikely(!rc))
+- rc = llc_build_and_send_pkt(sk, skb);
+- return rc;
++ return llc_build_and_send_pkt(sk, skb);
+ }
+
+ static void llc_ui_sk_init(struct socket *sock, struct sock *sk)
+@@ -900,7 +904,7 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
+ int flags = msg->msg_flags;
+ int noblock = flags & MSG_DONTWAIT;
+- struct sk_buff *skb;
++ struct sk_buff *skb = NULL;
+ size_t size = 0;
+ int rc = -EINVAL, copied = 0, hdrlen;
+
+@@ -909,10 +913,10 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ lock_sock(sk);
+ if (addr) {
+ if (msg->msg_namelen < sizeof(*addr))
+- goto release;
++ goto out;
+ } else {
+ if (llc_ui_addr_null(&llc->addr))
+- goto release;
++ goto out;
+ addr = &llc->addr;
+ }
+ /* must bind connection to sap if user hasn't done it. */
+@@ -920,7 +924,7 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ /* bind to sap with null dev, exclusive. */
+ rc = llc_ui_autobind(sock, addr);
+ if (rc)
+- goto release;
++ goto out;
+ }
+ hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
+ size = hdrlen + len;
+@@ -929,12 +933,12 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ copied = size - hdrlen;
+ rc = -EINVAL;
+ if (copied < 0)
+- goto release;
++ goto out;
+ release_sock(sk);
+ skb = sock_alloc_send_skb(sk, size, noblock, &rc);
+ lock_sock(sk);
+ if (!skb)
+- goto release;
++ goto out;
+ skb->dev = llc->dev;
+ skb->protocol = llc_proto_type(addr->sllc_arphrd);
+ skb_reserve(skb, hdrlen);
+@@ -944,29 +948,31 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ if (sk->sk_type == SOCK_DGRAM || addr->sllc_ua) {
+ llc_build_and_send_ui_pkt(llc->sap, skb, addr->sllc_mac,
+ addr->sllc_sap);
++ skb = NULL;
+ goto out;
+ }
+ if (addr->sllc_test) {
+ llc_build_and_send_test_pkt(llc->sap, skb, addr->sllc_mac,
+ addr->sllc_sap);
++ skb = NULL;
+ goto out;
+ }
+ if (addr->sllc_xid) {
+ llc_build_and_send_xid_pkt(llc->sap, skb, addr->sllc_mac,
+ addr->sllc_sap);
++ skb = NULL;
+ goto out;
+ }
+ rc = -ENOPROTOOPT;
+ if (!(sk->sk_type == SOCK_STREAM && !addr->sllc_ua))
+ goto out;
+ rc = llc_ui_send_data(sk, skb, noblock);
++ skb = NULL;
+ out:
+- if (rc) {
+- kfree_skb(skb);
+-release:
++ kfree_skb(skb);
++ if (rc)
+ dprintk("%s: failed sending from %02X to %02X: %d\n",
+ __func__, llc->laddr.lsap, llc->daddr.lsap, rc);
+- }
+ release_sock(sk);
+ return rc ? : copied;
+ }
+diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
+index ed2aca12460c..a79b739eb223 100644
+--- a/net/llc/llc_conn.c
++++ b/net/llc/llc_conn.c
+@@ -55,6 +55,8 @@ int sysctl_llc2_busy_timeout = LLC2_BUSY_TIME * HZ;
+ * (executing it's actions and changing state), upper layer will be
+ * indicated or confirmed, if needed. Returns 0 for success, 1 for
+ * failure. The socket lock has to be held before calling this function.
++ *
++ * This function always consumes a reference to the skb.
+ */
+ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
+ {
+@@ -62,12 +64,6 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
+ struct llc_sock *llc = llc_sk(skb->sk);
+ struct llc_conn_state_ev *ev = llc_conn_ev(skb);
+
+- /*
+- * We have to hold the skb, because llc_conn_service will kfree it in
+- * the sending path and we need to look at the skb->cb, where we encode
+- * llc_conn_state_ev.
+- */
+- skb_get(skb);
+ ev->ind_prim = ev->cfm_prim = 0;
+ /*
+ * Send event to state machine
+@@ -75,21 +71,12 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
+ rc = llc_conn_service(skb->sk, skb);
+ if (unlikely(rc != 0)) {
+ printk(KERN_ERR "%s: llc_conn_service failed\n", __func__);
+- goto out_kfree_skb;
+- }
+-
+- if (unlikely(!ev->ind_prim && !ev->cfm_prim)) {
+- /* indicate or confirm not required */
+- if (!skb->next)
+- goto out_kfree_skb;
+ goto out_skb_put;
+ }
+
+- if (unlikely(ev->ind_prim && ev->cfm_prim)) /* Paranoia */
+- skb_get(skb);
+-
+ switch (ev->ind_prim) {
+ case LLC_DATA_PRIM:
++ skb_get(skb);
+ llc_save_primitive(sk, skb, LLC_DATA_PRIM);
+ if (unlikely(sock_queue_rcv_skb(sk, skb))) {
+ /*
+@@ -106,6 +93,7 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
+ * skb->sk pointing to the newly created struct sock in
+ * llc_conn_handler. -acme
+ */
++ skb_get(skb);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ sk->sk_state_change(sk);
+ break;
+@@ -121,7 +109,6 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
+ sk->sk_state_change(sk);
+ }
+ }
+- kfree_skb(skb);
+ sock_put(sk);
+ break;
+ case LLC_RESET_PRIM:
+@@ -130,14 +117,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
+ * RESET is not being notified to upper layers for now
+ */
+ printk(KERN_INFO "%s: received a reset ind!\n", __func__);
+- kfree_skb(skb);
+ break;
+ default:
+- if (ev->ind_prim) {
++ if (ev->ind_prim)
+ printk(KERN_INFO "%s: received unknown %d prim!\n",
+ __func__, ev->ind_prim);
+- kfree_skb(skb);
+- }
+ /* No indication */
+ break;
+ }
+@@ -179,15 +163,12 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
+ printk(KERN_INFO "%s: received a reset conf!\n", __func__);
+ break;
+ default:
+- if (ev->cfm_prim) {
++ if (ev->cfm_prim)
+ printk(KERN_INFO "%s: received unknown %d prim!\n",
+ __func__, ev->cfm_prim);
+- break;
+- }
+- goto out_skb_put; /* No confirmation */
++ /* No confirmation */
++ break;
+ }
+-out_kfree_skb:
+- kfree_skb(skb);
+ out_skb_put:
+ kfree_skb(skb);
+ return rc;
+diff --git a/net/llc/llc_if.c b/net/llc/llc_if.c
+index 8db03c2d5440..ad6547736c21 100644
+--- a/net/llc/llc_if.c
++++ b/net/llc/llc_if.c
+@@ -38,6 +38,8 @@
+ * closed and -EBUSY when sending data is not permitted in this state or
+ * LLC has send an I pdu with p bit set to 1 and is waiting for it's
+ * response.
++ *
++ * This function always consumes a reference to the skb.
+ */
+ int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb)
+ {
+@@ -46,20 +48,22 @@ int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb)
+ struct llc_sock *llc = llc_sk(sk);
+
+ if (unlikely(llc->state == LLC_CONN_STATE_ADM))
+- goto out;
++ goto out_free;
+ rc = -EBUSY;
+ if (unlikely(llc_data_accept_state(llc->state) || /* data_conn_refuse */
+ llc->p_flag)) {
+ llc->failed_data_req = 1;
+- goto out;
++ goto out_free;
+ }
+ ev = llc_conn_ev(skb);
+ ev->type = LLC_CONN_EV_TYPE_PRIM;
+ ev->prim = LLC_DATA_PRIM;
+ ev->prim_type = LLC_PRIM_TYPE_REQ;
+ skb->dev = llc->dev;
+- rc = llc_conn_state_process(sk, skb);
+-out:
++ return llc_conn_state_process(sk, skb);
++
++out_free:
++ kfree_skb(skb);
+ return rc;
+ }
+
+diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
+index 3d5520776655..0b60e330c115 100644
+--- a/net/mac80211/rc80211_minstrel_ht.c
++++ b/net/mac80211/rc80211_minstrel_ht.c
+@@ -529,7 +529,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
+
+ /* (re)Initialize group rate indexes */
+ for(j = 0; j < MAX_THR_RATES; j++)
+- tmp_group_tp_rate[j] = group;
++ tmp_group_tp_rate[j] = MCS_GROUP_RATES * group;
+
+ for (i = 0; i < MCS_GROUP_RATES; i++) {
+ if (!(mi->supported[group] & BIT(i)))
+diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
+index b12f23c996f4..02d0b22d0114 100644
+--- a/net/mac80211/rx.c
++++ b/net/mac80211/rx.c
+@@ -3391,9 +3391,18 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
+ /* process for all: mesh, mlme, ibss */
+ break;
++ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
++ if (is_multicast_ether_addr(mgmt->da) &&
++ !is_broadcast_ether_addr(mgmt->da))
++ return RX_DROP_MONITOR;
++
++ /* process only for station/IBSS */
++ if (sdata->vif.type != NL80211_IFTYPE_STATION &&
++ sdata->vif.type != NL80211_IFTYPE_ADHOC)
++ return RX_DROP_MONITOR;
++ break;
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
+- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+ if (is_multicast_ether_addr(mgmt->da) &&
+ !is_broadcast_ether_addr(mgmt->da))
+diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
+index 94f53a9b7d1a..faf6ef1b6a45 100644
+--- a/net/mpls/mpls_iptunnel.c
++++ b/net/mpls/mpls_iptunnel.c
+@@ -28,7 +28,7 @@
+ #include "internal.h"
+
+ static const struct nla_policy mpls_iptunnel_policy[MPLS_IPTUNNEL_MAX + 1] = {
+- [MPLS_IPTUNNEL_DST] = { .type = NLA_U32 },
++ [MPLS_IPTUNNEL_DST] = { .len = sizeof(u32) },
+ [MPLS_IPTUNNEL_TTL] = { .type = NLA_U8 },
+ };
+
+diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
+index 7ba9ea55816a..31fa94064a62 100644
+--- a/net/netfilter/nf_conntrack_netlink.c
++++ b/net/netfilter/nf_conntrack_netlink.c
+@@ -555,10 +555,8 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
+ goto nla_put_failure;
+
+ if (ctnetlink_dump_status(skb, ct) < 0 ||
+- ctnetlink_dump_timeout(skb, ct) < 0 ||
+ ctnetlink_dump_acct(skb, ct, type) < 0 ||
+ ctnetlink_dump_timestamp(skb, ct) < 0 ||
+- ctnetlink_dump_protoinfo(skb, ct) < 0 ||
+ ctnetlink_dump_helpinfo(skb, ct) < 0 ||
+ ctnetlink_dump_mark(skb, ct) < 0 ||
+ ctnetlink_dump_secctx(skb, ct) < 0 ||
+@@ -570,6 +568,11 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
+ ctnetlink_dump_ct_synproxy(skb, ct) < 0)
+ goto nla_put_failure;
+
++ if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) &&
++ (ctnetlink_dump_timeout(skb, ct) < 0 ||
++ ctnetlink_dump_protoinfo(skb, ct) < 0))
++ goto nla_put_failure;
++
+ nlmsg_end(skb, nlh);
+ return skb->len;
+
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index 70bd730ca059..890799c16aa4 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -491,14 +491,17 @@ EXPORT_SYMBOL_GPL(nf_flow_table_init);
+ static void nf_flow_table_do_cleanup(struct flow_offload *flow, void *data)
+ {
+ struct net_device *dev = data;
++ struct flow_offload_entry *e;
++
++ e = container_of(flow, struct flow_offload_entry, flow);
+
+ if (!dev) {
+ flow_offload_teardown(flow);
+ return;
+ }
+-
+- if (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
+- flow->tuplehash[1].tuple.iifidx == dev->ifindex)
++ if (net_eq(nf_ct_net(e->ct), dev_net(dev)) &&
++ (flow->tuplehash[0].tuple.iifidx == dev->ifindex ||
++ flow->tuplehash[1].tuple.iifidx == dev->ifindex))
+ flow_offload_dead(flow);
+ }
+
+diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
+index 1ef8cb789c41..166edea0e452 100644
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -103,8 +103,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
+ ct->status & IPS_SEQ_ADJUST)
+ goto out;
+
+- if (ctinfo == IP_CT_NEW ||
+- ctinfo == IP_CT_RELATED)
++ if (!nf_ct_is_confirmed(ct))
+ goto out;
+
+ if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
+diff --git a/net/netfilter/nft_osf.c b/net/netfilter/nft_osf.c
+index a35fb59ace73..df4e3e0412ed 100644
+--- a/net/netfilter/nft_osf.c
++++ b/net/netfilter/nft_osf.c
+@@ -69,6 +69,15 @@ nla_put_failure:
+ return -1;
+ }
+
++static int nft_osf_validate(const struct nft_ctx *ctx,
++ const struct nft_expr *expr,
++ const struct nft_data **data)
++{
++ return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
++ (1 << NF_INET_PRE_ROUTING) |
++ (1 << NF_INET_FORWARD));
++}
++
+ static struct nft_expr_type nft_osf_type;
+ static const struct nft_expr_ops nft_osf_op = {
+ .eval = nft_osf_eval,
+@@ -76,6 +85,7 @@ static const struct nft_expr_ops nft_osf_op = {
+ .init = nft_osf_init,
+ .dump = nft_osf_dump,
+ .type = &nft_osf_type,
++ .validate = nft_osf_validate,
+ };
+
+ static struct nft_expr_type nft_osf_type __read_mostly = {
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index 015124e649cb..05118e03c3e4 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -488,6 +488,23 @@ static bool nft_hash_lookup_fast(const struct net *net,
+ return false;
+ }
+
++static u32 nft_jhash(const struct nft_set *set, const struct nft_hash *priv,
++ const struct nft_set_ext *ext)
++{
++ const struct nft_data *key = nft_set_ext_key(ext);
++ u32 hash, k1;
++
++ if (set->klen == 4) {
++ k1 = *(u32 *)key;
++ hash = jhash_1word(k1, priv->seed);
++ } else {
++ hash = jhash(key, set->klen, priv->seed);
++ }
++ hash = reciprocal_scale(hash, priv->buckets);
++
++ return hash;
++}
++
+ static int nft_hash_insert(const struct net *net, const struct nft_set *set,
+ const struct nft_set_elem *elem,
+ struct nft_set_ext **ext)
+@@ -497,8 +514,7 @@ static int nft_hash_insert(const struct net *net, const struct nft_set *set,
+ u8 genmask = nft_genmask_next(net);
+ u32 hash;
+
+- hash = jhash(nft_set_ext_key(&this->ext), set->klen, priv->seed);
+- hash = reciprocal_scale(hash, priv->buckets);
++ hash = nft_jhash(set, priv, &this->ext);
+ hlist_for_each_entry(he, &priv->table[hash], node) {
+ if (!memcmp(nft_set_ext_key(&this->ext),
+ nft_set_ext_key(&he->ext), set->klen) &&
+@@ -537,10 +553,9 @@ static void *nft_hash_deactivate(const struct net *net,
+ u8 genmask = nft_genmask_next(net);
+ u32 hash;
+
+- hash = jhash(nft_set_ext_key(&this->ext), set->klen, priv->seed);
+- hash = reciprocal_scale(hash, priv->buckets);
++ hash = nft_jhash(set, priv, &this->ext);
+ hlist_for_each_entry(he, &priv->table[hash], node) {
+- if (!memcmp(nft_set_ext_key(&this->ext), &elem->key.val,
++ if (!memcmp(nft_set_ext_key(&he->ext), &elem->key.val,
+ set->klen) &&
+ nft_set_elem_active(&he->ext, genmask)) {
+ nft_set_elem_change_active(net, set, &he->ext);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index ac65e66d1d72..ddf90e6fac51 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -1297,15 +1297,21 @@ static void packet_sock_destruct(struct sock *sk)
+
+ static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
+ {
+- u32 rxhash;
++ u32 *history = po->rollover->history;
++ u32 victim, rxhash;
+ int i, count = 0;
+
+ rxhash = skb_get_hash(skb);
+ for (i = 0; i < ROLLOVER_HLEN; i++)
+- if (po->rollover->history[i] == rxhash)
++ if (READ_ONCE(history[i]) == rxhash)
+ count++;
+
+- po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
++ victim = prandom_u32() % ROLLOVER_HLEN;
++
++ /* Avoid dirtying the cache line if possible */
++ if (READ_ONCE(history[victim]) != rxhash)
++ WRITE_ONCE(history[victim], rxhash);
++
+ return count > (ROLLOVER_HLEN >> 1);
+ }
+
+@@ -3371,20 +3377,29 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ sock_recv_ts_and_drops(msg, sk, skb);
+
+ if (msg->msg_name) {
++ int copy_len;
++
+ /* If the address length field is there to be filled
+ * in, we fill it in now.
+ */
+ if (sock->type == SOCK_PACKET) {
+ __sockaddr_check_size(sizeof(struct sockaddr_pkt));
+ msg->msg_namelen = sizeof(struct sockaddr_pkt);
++ copy_len = msg->msg_namelen;
+ } else {
+ struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
+
+ msg->msg_namelen = sll->sll_halen +
+ offsetof(struct sockaddr_ll, sll_addr);
++ copy_len = msg->msg_namelen;
++ if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
++ memset(msg->msg_name +
++ offsetof(struct sockaddr_ll, sll_addr),
++ 0, sizeof(sll->sll_addr));
++ msg->msg_namelen = sizeof(struct sockaddr_ll);
++ }
+ }
+- memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
+- msg->msg_namelen);
++ memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
+ }
+
+ if (pkt_sk(sk)->auxdata) {
+diff --git a/net/rds/ib_stats.c b/net/rds/ib_stats.c
+index 9252ad126335..ac46d8961b61 100644
+--- a/net/rds/ib_stats.c
++++ b/net/rds/ib_stats.c
+@@ -42,7 +42,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct rds_ib_statistics, rds_ib_stats);
+ static const char *const rds_ib_stat_names[] = {
+ "ib_connect_raced",
+ "ib_listen_closed_stale",
+- "s_ib_evt_handler_call",
++ "ib_evt_handler_call",
+ "ib_tasklet_call",
+ "ib_tx_cq_event",
+ "ib_tx_ring_full",
+diff --git a/net/rds/stats.c b/net/rds/stats.c
+index 73be187d389e..6bbab4d74c4f 100644
+--- a/net/rds/stats.c
++++ b/net/rds/stats.c
+@@ -76,6 +76,8 @@ static const char *const rds_stat_names[] = {
+ "cong_update_received",
+ "cong_send_error",
+ "cong_send_blocked",
++ "recv_bytes_added_to_sock",
++ "recv_bytes_freed_fromsock",
+ };
+
+ void rds_stats_info_copy(struct rds_info_iterator *iter,
+diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
+index 7319d3ca30e9..a74edb10cbfc 100644
+--- a/net/rxrpc/af_rxrpc.c
++++ b/net/rxrpc/af_rxrpc.c
+@@ -869,7 +869,6 @@ static void rxrpc_sock_destructor(struct sock *sk)
+ static int rxrpc_release_sock(struct sock *sk)
+ {
+ struct rxrpc_sock *rx = rxrpc_sk(sk);
+- struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
+
+ _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
+
+@@ -905,8 +904,6 @@ static int rxrpc_release_sock(struct sock *sk)
+ rxrpc_release_calls_on_socket(rx);
+ flush_workqueue(rxrpc_workqueue);
+ rxrpc_purge_queue(&sk->sk_receive_queue);
+- rxrpc_queue_work(&rxnet->service_conn_reaper);
+- rxrpc_queue_work(&rxnet->client_conn_reaper);
+
+ rxrpc_unuse_local(rx->local);
+ rx->local = NULL;
+diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
+index dfd9eab77cc8..ccef6e40e002 100644
+--- a/net/rxrpc/ar-internal.h
++++ b/net/rxrpc/ar-internal.h
+@@ -644,6 +644,7 @@ struct rxrpc_call {
+ u8 ackr_reason; /* reason to ACK */
+ u16 ackr_skew; /* skew on packet being ACK'd */
+ rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
++ rxrpc_serial_t ackr_first_seq; /* first sequence number received */
+ rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
+ rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
+ rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
+@@ -902,6 +903,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *);
+ void rxrpc_put_client_conn(struct rxrpc_connection *);
+ void rxrpc_discard_expired_client_conns(struct work_struct *);
+ void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
++void rxrpc_clean_up_local_conns(struct rxrpc_local *);
+
+ /*
+ * conn_event.c
+diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
+index 8079aacaecac..c5566bc4aaca 100644
+--- a/net/rxrpc/call_accept.c
++++ b/net/rxrpc/call_accept.c
+@@ -88,7 +88,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
+ smp_store_release(&b->conn_backlog_head,
+ (head + 1) & (size - 1));
+
+- trace_rxrpc_conn(conn, rxrpc_conn_new_service,
++ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
+ atomic_read(&conn->usage), here);
+ }
+
+diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
+index c979a56faaef..38d548532024 100644
+--- a/net/rxrpc/conn_client.c
++++ b/net/rxrpc/conn_client.c
+@@ -217,7 +217,8 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
+ rxrpc_get_local(conn->params.local);
+ key_get(conn->params.key);
+
+- trace_rxrpc_conn(conn, rxrpc_conn_new_client, atomic_read(&conn->usage),
++ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client,
++ atomic_read(&conn->usage),
+ __builtin_return_address(0));
+ trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
+ _leave(" = %p", conn);
+@@ -989,11 +990,12 @@ rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
+ void rxrpc_put_client_conn(struct rxrpc_connection *conn)
+ {
+ const void *here = __builtin_return_address(0);
++ unsigned int debug_id = conn->debug_id;
+ int n;
+
+ do {
+ n = atomic_dec_return(&conn->usage);
+- trace_rxrpc_conn(conn, rxrpc_conn_put_client, n, here);
++ trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here);
+ if (n > 0)
+ return;
+ ASSERTCMP(n, >=, 0);
+@@ -1166,3 +1168,47 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
+
+ _leave("");
+ }
++
++/*
++ * Clean up the client connections on a local endpoint.
++ */
++void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
++{
++ struct rxrpc_connection *conn, *tmp;
++ struct rxrpc_net *rxnet = local->rxnet;
++ unsigned int nr_active;
++ LIST_HEAD(graveyard);
++
++ _enter("");
++
++ spin_lock(&rxnet->client_conn_cache_lock);
++ nr_active = rxnet->nr_active_client_conns;
++
++ list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
++ cache_link) {
++ if (conn->params.local == local) {
++ ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE);
++
++ trace_rxrpc_client(conn, -1, rxrpc_client_discard);
++ if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
++ BUG();
++ conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
++ list_move(&conn->cache_link, &graveyard);
++ nr_active--;
++ }
++ }
++
++ rxnet->nr_active_client_conns = nr_active;
++ spin_unlock(&rxnet->client_conn_cache_lock);
++ ASSERTCMP(nr_active, >=, 0);
++
++ while (!list_empty(&graveyard)) {
++ conn = list_entry(graveyard.next,
++ struct rxrpc_connection, cache_link);
++ list_del_init(&conn->cache_link);
++
++ rxrpc_put_connection(conn);
++ }
++
++ _leave(" [culled]");
++}
+diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
+index 885dae829f4a..f338efd2880a 100644
+--- a/net/rxrpc/conn_object.c
++++ b/net/rxrpc/conn_object.c
+@@ -272,7 +272,7 @@ bool rxrpc_queue_conn(struct rxrpc_connection *conn)
+ if (n == 0)
+ return false;
+ if (rxrpc_queue_work(&conn->processor))
+- trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here);
++ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, n + 1, here);
+ else
+ rxrpc_put_connection(conn);
+ return true;
+@@ -287,7 +287,7 @@ void rxrpc_see_connection(struct rxrpc_connection *conn)
+ if (conn) {
+ int n = atomic_read(&conn->usage);
+
+- trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here);
++ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_seen, n, here);
+ }
+ }
+
+@@ -299,7 +299,7 @@ void rxrpc_get_connection(struct rxrpc_connection *conn)
+ const void *here = __builtin_return_address(0);
+ int n = atomic_inc_return(&conn->usage);
+
+- trace_rxrpc_conn(conn, rxrpc_conn_got, n, here);
++ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n, here);
+ }
+
+ /*
+@@ -313,7 +313,7 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
+ if (conn) {
+ int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
+ if (n > 0)
+- trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
++ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n + 1, here);
+ else
+ conn = NULL;
+ }
+@@ -336,10 +336,11 @@ static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
+ void rxrpc_put_service_conn(struct rxrpc_connection *conn)
+ {
+ const void *here = __builtin_return_address(0);
++ unsigned int debug_id = conn->debug_id;
+ int n;
+
+ n = atomic_dec_return(&conn->usage);
+- trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
++ trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, n, here);
+ ASSERTCMP(n, >=, 0);
+ if (n == 1)
+ rxrpc_set_service_reap_timer(conn->params.local->rxnet,
+@@ -401,7 +402,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
+ if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
+ continue;
+
+- if (rxnet->live) {
++ if (rxnet->live && !conn->params.local->dead) {
+ idle_timestamp = READ_ONCE(conn->idle_timestamp);
+ expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
+ if (conn->params.local->service_closed)
+@@ -423,7 +424,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
+ */
+ if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
+ continue;
+- trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, NULL);
++ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_reap_service, 0, NULL);
+
+ if (rxrpc_conn_is_client(conn))
+ BUG();
+diff --git a/net/rxrpc/conn_service.c b/net/rxrpc/conn_service.c
+index 80773a50c755..6da7c4bf15e8 100644
+--- a/net/rxrpc/conn_service.c
++++ b/net/rxrpc/conn_service.c
+@@ -138,7 +138,7 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
+ list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
+ write_unlock(&rxnet->conn_lock);
+
+- trace_rxrpc_conn(conn, rxrpc_conn_new_service,
++ trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
+ atomic_read(&conn->usage),
+ __builtin_return_address(0));
+ }
+diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
+index 7965600ee5de..2f91ab909191 100644
+--- a/net/rxrpc/input.c
++++ b/net/rxrpc/input.c
+@@ -837,7 +837,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
+ u8 acks[RXRPC_MAXACKS];
+ } buf;
+ rxrpc_serial_t acked_serial;
+- rxrpc_seq_t first_soft_ack, hard_ack;
++ rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
+ int nr_acks, offset, ioffset;
+
+ _enter("");
+@@ -851,13 +851,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
+
+ acked_serial = ntohl(buf.ack.serial);
+ first_soft_ack = ntohl(buf.ack.firstPacket);
++ prev_pkt = ntohl(buf.ack.previousPacket);
+ hard_ack = first_soft_ack - 1;
+ nr_acks = buf.ack.nAcks;
+ summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
+ buf.ack.reason : RXRPC_ACK__INVALID);
+
+ trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
+- first_soft_ack, ntohl(buf.ack.previousPacket),
++ first_soft_ack, prev_pkt,
+ summary.ack_reason, nr_acks);
+
+ if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
+@@ -878,8 +879,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
+ rxrpc_propose_ack_respond_to_ack);
+ }
+
+- /* Discard any out-of-order or duplicate ACKs. */
+- if (before_eq(sp->hdr.serial, call->acks_latest))
++ /* Discard any out-of-order or duplicate ACKs (outside lock). */
++ if (before(first_soft_ack, call->ackr_first_seq) ||
++ before(prev_pkt, call->ackr_prev_seq))
+ return;
+
+ buf.info.rxMTU = 0;
+@@ -890,12 +892,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
+
+ spin_lock(&call->input_lock);
+
+- /* Discard any out-of-order or duplicate ACKs. */
+- if (before_eq(sp->hdr.serial, call->acks_latest))
++ /* Discard any out-of-order or duplicate ACKs (inside lock). */
++ if (before(first_soft_ack, call->ackr_first_seq) ||
++ before(prev_pkt, call->ackr_prev_seq))
+ goto out;
+ call->acks_latest_ts = skb->tstamp;
+ call->acks_latest = sp->hdr.serial;
+
++ call->ackr_first_seq = first_soft_ack;
++ call->ackr_prev_seq = prev_pkt;
++
+ /* Parse rwind and mtu sizes if provided. */
+ if (buf.info.rxMTU)
+ rxrpc_input_ackinfo(call, skb, &buf.info);
+diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
+index c752ad487067..04f0976841a4 100644
+--- a/net/rxrpc/local_object.c
++++ b/net/rxrpc/local_object.c
+@@ -430,11 +430,14 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
+
+ _enter("%d", local->debug_id);
+
++ local->dead = true;
++
+ mutex_lock(&rxnet->local_mutex);
+ list_del_init(&local->link);
+ mutex_unlock(&rxnet->local_mutex);
+
+- ASSERT(RB_EMPTY_ROOT(&local->client_conns));
++ rxrpc_clean_up_local_conns(local);
++ rxrpc_service_connection_reaper(&rxnet->service_conn_reaper);
+ ASSERT(!local->service);
+
+ if (socket) {
+diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
+index 345dc1c5fe72..31e47cfb3e68 100644
+--- a/net/rxrpc/output.c
++++ b/net/rxrpc/output.c
+@@ -524,6 +524,9 @@ send_fragmentable:
+ }
+ break;
+ #endif
++
++ default:
++ BUG();
+ }
+
+ if (ret < 0)
+diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
+index 1e269441065a..9ecbf8edcf39 100644
+--- a/net/sched/act_csum.c
++++ b/net/sched/act_csum.c
+@@ -560,8 +560,11 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+ {
+ struct tcf_csum *p = to_tcf_csum(a);
++ bool orig_vlan_tag_present = false;
++ unsigned int vlan_hdr_count = 0;
+ struct tcf_csum_params *params;
+ u32 update_flags;
++ __be16 protocol;
+ int action;
+
+ params = rcu_dereference_bh(p->params);
+@@ -574,7 +577,9 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
+ goto drop;
+
+ update_flags = params->update_flags;
+- switch (tc_skb_protocol(skb)) {
++ protocol = tc_skb_protocol(skb);
++again:
++ switch (protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ if (!tcf_csum_ipv4(skb, update_flags))
+ goto drop;
+@@ -583,13 +588,35 @@ static int tcf_csum_act(struct sk_buff *skb, const struct tc_action *a,
+ if (!tcf_csum_ipv6(skb, update_flags))
+ goto drop;
+ break;
++ case cpu_to_be16(ETH_P_8021AD): /* fall through */
++ case cpu_to_be16(ETH_P_8021Q):
++ if (skb_vlan_tag_present(skb) && !orig_vlan_tag_present) {
++ protocol = skb->protocol;
++ orig_vlan_tag_present = true;
++ } else {
++ struct vlan_hdr *vlan = (struct vlan_hdr *)skb->data;
++
++ protocol = vlan->h_vlan_encapsulated_proto;
++ skb_pull(skb, VLAN_HLEN);
++ skb_reset_network_header(skb);
++ vlan_hdr_count++;
++ }
++ goto again;
++ }
++
++out:
++ /* Restore the skb for the pulled VLAN tags */
++ while (vlan_hdr_count--) {
++ skb_push(skb, VLAN_HLEN);
++ skb_reset_network_header(skb);
+ }
+
+ return action;
+
+ drop:
+ qstats_drop_inc(this_cpu_ptr(p->common.cpu_qstats));
+- return TC_ACT_SHOT;
++ action = TC_ACT_SHOT;
++ goto out;
+ }
+
+ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
+index 399e3beae6cf..a30c17a28281 100644
+--- a/net/sched/act_mirred.c
++++ b/net/sched/act_mirred.c
+@@ -445,7 +445,11 @@ static int __init mirred_init_module(void)
+ return err;
+
+ pr_info("Mirror/redirect action on\n");
+- return tcf_register_action(&act_mirred_ops, &mirred_net_ops);
++ err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
++ if (err)
++ unregister_netdevice_notifier(&mirred_device_notifier);
++
++ return err;
+ }
+
+ static void __exit mirred_cleanup_module(void)
+diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
+index e26a24017faa..940e72d6db18 100644
+--- a/net/sched/sch_cbs.c
++++ b/net/sched/sch_cbs.c
+@@ -61,16 +61,20 @@
+ #include <linux/string.h>
+ #include <linux/errno.h>
+ #include <linux/skbuff.h>
++#include <net/netevent.h>
+ #include <net/netlink.h>
+ #include <net/sch_generic.h>
+ #include <net/pkt_sched.h>
+
++static LIST_HEAD(cbs_list);
++static DEFINE_SPINLOCK(cbs_list_lock);
++
+ #define BYTES_PER_KBIT (1000LL / 8)
+
+ struct cbs_sched_data {
+ bool offload;
+ int queue;
+- s64 port_rate; /* in bytes/s */
++ atomic64_t port_rate; /* in bytes/s */
+ s64 last; /* timestamp in ns */
+ s64 credits; /* in bytes */
+ s32 locredit; /* in bytes */
+@@ -82,6 +86,7 @@ struct cbs_sched_data {
+ struct sk_buff **to_free);
+ struct sk_buff *(*dequeue)(struct Qdisc *sch);
+ struct Qdisc *qdisc;
++ struct list_head cbs_list;
+ };
+
+ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+@@ -206,7 +211,8 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
+ /* As sendslope is a negative number, this will decrease the
+ * amount of q->credits.
+ */
+- credits = credits_from_len(len, q->sendslope, q->port_rate);
++ credits = credits_from_len(len, q->sendslope,
++ atomic64_read(&q->port_rate));
+ credits += q->credits;
+
+ q->credits = max_t(s64, credits, q->locredit);
+@@ -293,6 +299,58 @@ static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q,
+ return 0;
+ }
+
++static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
++{
++ struct ethtool_link_ksettings ecmd;
++ int speed = SPEED_10;
++ int port_rate = -1;
++ int err;
++
++ err = __ethtool_get_link_ksettings(dev, &ecmd);
++ if (err < 0)
++ goto skip;
++
++ if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
++ speed = ecmd.base.speed;
++
++skip:
++ port_rate = speed * 1000 * BYTES_PER_KBIT;
++
++ atomic64_set(&q->port_rate, port_rate);
++ netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n",
++ dev->name, (long long)atomic64_read(&q->port_rate),
++ ecmd.base.speed);
++}
++
++static int cbs_dev_notifier(struct notifier_block *nb, unsigned long event,
++ void *ptr)
++{
++ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
++ struct cbs_sched_data *q;
++ struct net_device *qdev;
++ bool found = false;
++
++ ASSERT_RTNL();
++
++ if (event != NETDEV_UP && event != NETDEV_CHANGE)
++ return NOTIFY_DONE;
++
++ spin_lock(&cbs_list_lock);
++ list_for_each_entry(q, &cbs_list, cbs_list) {
++ qdev = qdisc_dev(q->qdisc);
++ if (qdev == dev) {
++ found = true;
++ break;
++ }
++ }
++ spin_unlock(&cbs_list_lock);
++
++ if (found)
++ cbs_set_port_rate(dev, q);
++
++ return NOTIFY_DONE;
++}
++
+ static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
+ {
+@@ -314,16 +372,7 @@ static int cbs_change(struct Qdisc *sch, struct nlattr *opt,
+ qopt = nla_data(tb[TCA_CBS_PARMS]);
+
+ if (!qopt->offload) {
+- struct ethtool_link_ksettings ecmd;
+- s64 link_speed;
+-
+- if (!__ethtool_get_link_ksettings(dev, &ecmd))
+- link_speed = ecmd.base.speed;
+- else
+- link_speed = SPEED_1000;
+-
+- q->port_rate = link_speed * 1000 * BYTES_PER_KBIT;
+-
++ cbs_set_port_rate(dev, q);
+ cbs_disable_offload(dev, q);
+ } else {
+ err = cbs_enable_offload(dev, q, qopt, extack);
+@@ -346,6 +395,7 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
+ {
+ struct cbs_sched_data *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
++ int err;
+
+ if (!opt) {
+ NL_SET_ERR_MSG(extack, "Missing CBS qdisc options which are mandatory");
+@@ -366,7 +416,17 @@ static int cbs_init(struct Qdisc *sch, struct nlattr *opt,
+
+ qdisc_watchdog_init(&q->watchdog, sch);
+
+- return cbs_change(sch, opt, extack);
++ err = cbs_change(sch, opt, extack);
++ if (err)
++ return err;
++
++ if (!q->offload) {
++ spin_lock(&cbs_list_lock);
++ list_add(&q->cbs_list, &cbs_list);
++ spin_unlock(&cbs_list_lock);
++ }
++
++ return 0;
+ }
+
+ static void cbs_destroy(struct Qdisc *sch)
+@@ -374,8 +434,11 @@ static void cbs_destroy(struct Qdisc *sch)
+ struct cbs_sched_data *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+
+- qdisc_watchdog_cancel(&q->watchdog);
++ spin_lock(&cbs_list_lock);
++ list_del(&q->cbs_list);
++ spin_unlock(&cbs_list_lock);
+
++ qdisc_watchdog_cancel(&q->watchdog);
+ cbs_disable_offload(dev, q);
+
+ if (q->qdisc)
+@@ -486,14 +549,29 @@ static struct Qdisc_ops cbs_qdisc_ops __read_mostly = {
+ .owner = THIS_MODULE,
+ };
+
++static struct notifier_block cbs_device_notifier = {
++ .notifier_call = cbs_dev_notifier,
++};
++
+ static int __init cbs_module_init(void)
+ {
+- return register_qdisc(&cbs_qdisc_ops);
++ int err;
++
++ err = register_netdevice_notifier(&cbs_device_notifier);
++ if (err)
++ return err;
++
++ err = register_qdisc(&cbs_qdisc_ops);
++ if (err)
++ unregister_netdevice_notifier(&cbs_device_notifier);
++
++ return err;
+ }
+
+ static void __exit cbs_module_exit(void)
+ {
+ unregister_qdisc(&cbs_qdisc_ops);
++ unregister_netdevice_notifier(&cbs_device_notifier);
+ }
+ module_init(cbs_module_init)
+ module_exit(cbs_module_exit)
+diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
+index 15f8f24c190d..014a28d8dd4f 100644
+--- a/net/sched/sch_netem.c
++++ b/net/sched/sch_netem.c
+@@ -436,8 +436,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ struct netem_skb_cb *cb;
+ struct sk_buff *skb2;
+ struct sk_buff *segs = NULL;
+- unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
+- int nb = 0;
++ unsigned int prev_len = qdisc_pkt_len(skb);
+ int count = 1;
+ int rc = NET_XMIT_SUCCESS;
+ int rc_drop = NET_XMIT_DROP;
+@@ -494,6 +493,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ segs = netem_segment(skb, sch, to_free);
+ if (!segs)
+ return rc_drop;
++ qdisc_skb_cb(segs)->pkt_len = segs->len;
+ } else {
+ segs = skb;
+ }
+@@ -509,6 +509,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+ if (skb->ip_summed == CHECKSUM_PARTIAL &&
+ skb_checksum_help(skb)) {
+ qdisc_drop(skb, sch, to_free);
++ skb = NULL;
+ goto finish_segs;
+ }
+
+@@ -583,6 +584,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+
+ finish_segs:
+ if (segs) {
++ unsigned int len, last_len;
++ int nb;
++
++ len = skb ? skb->len : 0;
++ nb = skb ? 1 : 0;
++
+ while (segs) {
+ skb2 = segs->next;
+ segs->next = NULL;
+@@ -598,9 +605,10 @@ finish_segs:
+ }
+ segs = skb2;
+ }
+- sch->q.qlen += nb;
+- if (nb > 1)
+- qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
++ /* Parent qdiscs accounted for 1 skb of size @prev_len */
++ qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
++ } else if (!skb) {
++ return NET_XMIT_DROP;
+ }
+ return NET_XMIT_SUCCESS;
+ }
+diff --git a/net/sctp/input.c b/net/sctp/input.c
+index bfe29158afcc..f64d882c8698 100644
+--- a/net/sctp/input.c
++++ b/net/sctp/input.c
+@@ -255,7 +255,7 @@ int sctp_rcv(struct sk_buff *skb)
+ bh_lock_sock(sk);
+ }
+
+- if (sock_owned_by_user(sk)) {
++ if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
+ if (sctp_add_backlog(sk, skb)) {
+ bh_unlock_sock(sk);
+ sctp_chunk_free(chunk);
+@@ -333,7 +333,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+ local_bh_disable();
+ bh_lock_sock(sk);
+
+- if (sock_owned_by_user(sk)) {
++ if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
+ if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
+ sctp_chunk_free(chunk);
+ else
+@@ -348,7 +348,13 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+ if (backloged)
+ return 0;
+ } else {
+- sctp_inq_push(inqueue, chunk);
++ if (!sctp_newsk_ready(sk)) {
++ if (!sk_add_backlog(sk, skb, sk->sk_rcvbuf))
++ return 0;
++ sctp_chunk_free(chunk);
++ } else {
++ sctp_inq_push(inqueue, chunk);
++ }
+ }
+
+ done:
+diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
+index dbf64a93d68a..371b4cf31fcd 100644
+--- a/net/smc/smc_diag.c
++++ b/net/smc/smc_diag.c
+@@ -38,6 +38,7 @@ static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
+ {
+ struct smc_sock *smc = smc_sk(sk);
+
++ r->diag_family = sk->sk_family;
+ if (!smc->clcsock)
+ return;
+ r->id.idiag_sport = htons(smc->clcsock->sk->sk_num);
+@@ -45,14 +46,12 @@ static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
+ r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if;
+ sock_diag_save_cookie(sk, r->id.idiag_cookie);
+ if (sk->sk_protocol == SMCPROTO_SMC) {
+- r->diag_family = PF_INET;
+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+ r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr;
+ r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr;
+ #if IS_ENABLED(CONFIG_IPV6)
+ } else if (sk->sk_protocol == SMCPROTO_SMC6) {
+- r->diag_family = PF_INET6;
+ memcpy(&r->id.idiag_src, &smc->clcsock->sk->sk_v6_rcv_saddr,
+ sizeof(smc->clcsock->sk->sk_v6_rcv_saddr));
+ memcpy(&r->id.idiag_dst, &smc->clcsock->sk->sk_v6_daddr,
+diff --git a/net/smc/smc_rx.c b/net/smc/smc_rx.c
+index bbcf0fe4ae10..36340912df48 100644
+--- a/net/smc/smc_rx.c
++++ b/net/smc/smc_rx.c
+@@ -212,8 +212,7 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo,
+ rc = sk_wait_event(sk, timeo,
+ sk->sk_err ||
+ sk->sk_shutdown & RCV_SHUTDOWN ||
+- fcrit(conn) ||
+- smc_cdc_rxed_any_close_or_senddone(conn),
++ fcrit(conn),
+ &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
+ sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
+@@ -263,6 +262,18 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
+ return -EAGAIN;
+ }
+
++static bool smc_rx_recvmsg_data_available(struct smc_sock *smc)
++{
++ struct smc_connection *conn = &smc->conn;
++
++ if (smc_rx_data_available(conn))
++ return true;
++ else if (conn->urg_state == SMC_URG_VALID)
++ /* we received a single urgent Byte - skip */
++ smc_rx_update_cons(smc, 0);
++ return false;
++}
++
+ /* smc_rx_recvmsg - receive data from RMBE
+ * @msg: copy data to receive buffer
+ * @pipe: copy data to pipe if set - indicates splice() call
+@@ -304,16 +315,18 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
+ if (read_done >= target || (pipe && read_done))
+ break;
+
+- if (atomic_read(&conn->bytes_to_rcv))
++ if (smc_rx_recvmsg_data_available(smc))
+ goto copy;
+- else if (conn->urg_state == SMC_URG_VALID)
+- /* we received a single urgent Byte - skip */
+- smc_rx_update_cons(smc, 0);
+
+ if (sk->sk_shutdown & RCV_SHUTDOWN ||
+- smc_cdc_rxed_any_close_or_senddone(conn) ||
+- conn->local_tx_ctrl.conn_state_flags.peer_conn_abort)
++ conn->local_tx_ctrl.conn_state_flags.peer_conn_abort) {
++ /* smc_cdc_msg_recv_action() could have run after
++ * above smc_rx_recvmsg_data_available()
++ */
++ if (smc_rx_recvmsg_data_available(smc))
++ goto copy;
+ break;
++ }
+
+ if (read_done) {
+ if (sk->sk_err ||
+diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
+index 1a65f88d021a..24c7a1e2bd34 100644
+--- a/net/sunrpc/auth_gss/svcauth_gss.c
++++ b/net/sunrpc/auth_gss/svcauth_gss.c
+@@ -1054,24 +1054,32 @@ gss_read_verf(struct rpc_gss_wire_cred *gc,
+ return 0;
+ }
+
+-/* Ok this is really heavily depending on a set of semantics in
+- * how rqstp is set up by svc_recv and pages laid down by the
+- * server when reading a request. We are basically guaranteed that
+- * the token lays all down linearly across a set of pages, starting
+- * at iov_base in rq_arg.head[0] which happens to be the first of a
+- * set of pages stored in rq_pages[].
+- * rq_arg.head[0].iov_base will provide us the page_base to pass
+- * to the upcall.
+- */
+-static inline int
+-gss_read_proxy_verf(struct svc_rqst *rqstp,
+- struct rpc_gss_wire_cred *gc, __be32 *authp,
+- struct xdr_netobj *in_handle,
+- struct gssp_in_token *in_token)
++static void gss_free_in_token_pages(struct gssp_in_token *in_token)
+ {
+- struct kvec *argv = &rqstp->rq_arg.head[0];
+ u32 inlen;
+- int res;
++ int i;
++
++ i = 0;
++ inlen = in_token->page_len;
++ while (inlen) {
++ if (in_token->pages[i])
++ put_page(in_token->pages[i]);
++ inlen -= inlen > PAGE_SIZE ? PAGE_SIZE : inlen;
++ }
++
++ kfree(in_token->pages);
++ in_token->pages = NULL;
++}
++
++static int gss_read_proxy_verf(struct svc_rqst *rqstp,
++ struct rpc_gss_wire_cred *gc, __be32 *authp,
++ struct xdr_netobj *in_handle,
++ struct gssp_in_token *in_token)
++{
++ struct kvec *argv = &rqstp->rq_arg.head[0];
++ unsigned int page_base, length;
++ int pages, i, res;
++ size_t inlen;
+
+ res = gss_read_common_verf(gc, argv, authp, in_handle);
+ if (res)
+@@ -1081,10 +1089,36 @@ gss_read_proxy_verf(struct svc_rqst *rqstp,
+ if (inlen > (argv->iov_len + rqstp->rq_arg.page_len))
+ return SVC_DENIED;
+
+- in_token->pages = rqstp->rq_pages;
+- in_token->page_base = (ulong)argv->iov_base & ~PAGE_MASK;
++ pages = DIV_ROUND_UP(inlen, PAGE_SIZE);
++ in_token->pages = kcalloc(pages, sizeof(struct page *), GFP_KERNEL);
++ if (!in_token->pages)
++ return SVC_DENIED;
++ in_token->page_base = 0;
+ in_token->page_len = inlen;
++ for (i = 0; i < pages; i++) {
++ in_token->pages[i] = alloc_page(GFP_KERNEL);
++ if (!in_token->pages[i]) {
++ gss_free_in_token_pages(in_token);
++ return SVC_DENIED;
++ }
++ }
+
++ length = min_t(unsigned int, inlen, argv->iov_len);
++ memcpy(page_address(in_token->pages[0]), argv->iov_base, length);
++ inlen -= length;
++
++ i = 1;
++ page_base = rqstp->rq_arg.page_base;
++ while (inlen) {
++ length = min_t(unsigned int, inlen, PAGE_SIZE);
++ memcpy(page_address(in_token->pages[i]),
++ page_address(rqstp->rq_arg.pages[i]) + page_base,
++ length);
++
++ inlen -= length;
++ page_base = 0;
++ i++;
++ }
+ return 0;
+ }
+
+@@ -1259,8 +1293,11 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
+ break;
+ case GSS_S_COMPLETE:
+ status = gss_proxy_save_rsc(sn->rsc_cache, &ud, &handle);
+- if (status)
++ if (status) {
++ pr_info("%s: gss_proxy_save_rsc failed (%d)\n",
++ __func__, status);
+ goto out;
++ }
+ cli_handle.data = (u8 *)&handle;
+ cli_handle.len = sizeof(handle);
+ break;
+@@ -1271,15 +1308,20 @@ static int svcauth_gss_proxy_init(struct svc_rqst *rqstp,
+
+ /* Got an answer to the upcall; use it: */
+ if (gss_write_init_verf(sn->rsc_cache, rqstp,
+- &cli_handle, &ud.major_status))
++ &cli_handle, &ud.major_status)) {
++ pr_info("%s: gss_write_init_verf failed\n", __func__);
+ goto out;
++ }
+ if (gss_write_resv(resv, PAGE_SIZE,
+ &cli_handle, &ud.out_token,
+- ud.major_status, ud.minor_status))
++ ud.major_status, ud.minor_status)) {
++ pr_info("%s: gss_write_resv failed\n", __func__);
+ goto out;
++ }
+
+ ret = SVC_COMPLETE;
+ out:
++ gss_free_in_token_pages(&ud.in_token);
+ gssp_free_upcall_data(&ud);
+ return ret;
+ }
+diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
+index 90ec322dbbc0..ef1f3d076af9 100644
+--- a/net/sunrpc/xprtrdma/verbs.c
++++ b/net/sunrpc/xprtrdma/verbs.c
+@@ -1558,10 +1558,11 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
+ rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr,
+ (const struct ib_recv_wr **)&bad_wr);
+ if (rc) {
+- for (wr = bad_wr; wr; wr = wr->next) {
++ for (wr = bad_wr; wr;) {
+ struct rpcrdma_rep *rep;
+
+ rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
++ wr = wr->next;
+ rpcrdma_recv_buffer_put(rep);
+ --count;
+ }
+diff --git a/net/tipc/link.c b/net/tipc/link.c
+index 0fbf8ea18ce0..cc9a0485536b 100644
+--- a/net/tipc/link.c
++++ b/net/tipc/link.c
+@@ -830,18 +830,31 @@ static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
+ */
+ static void link_prepare_wakeup(struct tipc_link *l)
+ {
++ struct sk_buff_head *wakeupq = &l->wakeupq;
++ struct sk_buff_head *inputq = l->inputq;
+ struct sk_buff *skb, *tmp;
+- int imp, i = 0;
++ struct sk_buff_head tmpq;
++ int avail[5] = {0,};
++ int imp = 0;
++
++ __skb_queue_head_init(&tmpq);
+
+- skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
++ for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
++ avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
++
++ skb_queue_walk_safe(wakeupq, skb, tmp) {
+ imp = TIPC_SKB_CB(skb)->chain_imp;
+- if (l->backlog[imp].len < l->backlog[imp].limit) {
+- skb_unlink(skb, &l->wakeupq);
+- skb_queue_tail(l->inputq, skb);
+- } else if (i++ > 10) {
+- break;
+- }
++ if (avail[imp] <= 0)
++ continue;
++ avail[imp]--;
++ __skb_unlink(skb, wakeupq);
++ __skb_queue_tail(&tmpq, skb);
+ }
++
++ spin_lock_bh(&inputq->lock);
++ skb_queue_splice_tail(&tmpq, inputq);
++ spin_unlock_bh(&inputq->lock);
++
+ }
+
+ void tipc_link_reset(struct tipc_link *l)
+diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c
+index 67f69389ec17..23706ee16607 100644
+--- a/net/tipc/monitor.c
++++ b/net/tipc/monitor.c
+@@ -665,6 +665,21 @@ void tipc_mon_delete(struct net *net, int bearer_id)
+ kfree(mon);
+ }
+
++void tipc_mon_reinit_self(struct net *net)
++{
++ struct tipc_monitor *mon;
++ int bearer_id;
++
++ for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
++ mon = tipc_monitor(net, bearer_id);
++ if (!mon)
++ continue;
++ write_lock_bh(&mon->lock);
++ mon->self->addr = tipc_own_addr(net);
++ write_unlock_bh(&mon->lock);
++ }
++}
++
+ int tipc_nl_monitor_set_threshold(struct net *net, u32 cluster_size)
+ {
+ struct tipc_net *tn = tipc_net(net);
+diff --git a/net/tipc/monitor.h b/net/tipc/monitor.h
+index 2a21b93e0d04..ed63d2e650b0 100644
+--- a/net/tipc/monitor.h
++++ b/net/tipc/monitor.h
+@@ -77,6 +77,7 @@ int __tipc_nl_add_monitor(struct net *net, struct tipc_nl_msg *msg,
+ u32 bearer_id);
+ int tipc_nl_add_monitor_peer(struct net *net, struct tipc_nl_msg *msg,
+ u32 bearer_id, u32 *prev_node);
++void tipc_mon_reinit_self(struct net *net);
+
+ extern const int tipc_max_domain_size;
+ #endif
+diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
+index e0a3dd424d8c..836e629e8f4a 100644
+--- a/net/tipc/name_distr.c
++++ b/net/tipc/name_distr.c
+@@ -94,8 +94,9 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
+ list_add_tail_rcu(&publ->binding_node, &nt->node_scope);
+ return NULL;
+ }
+- list_add_tail_rcu(&publ->binding_node, &nt->cluster_scope);
+-
++ write_lock_bh(&nt->cluster_scope_lock);
++ list_add_tail(&publ->binding_node, &nt->cluster_scope);
++ write_unlock_bh(&nt->cluster_scope_lock);
+ skb = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
+ if (!skb) {
+ pr_warn("Publication distribution failure\n");
+@@ -112,11 +113,13 @@ struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
+ */
+ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
+ {
++ struct name_table *nt = tipc_name_table(net);
+ struct sk_buff *buf;
+ struct distr_item *item;
+
+- list_del_rcu(&publ->binding_node);
+-
++ write_lock_bh(&nt->cluster_scope_lock);
++ list_del(&publ->binding_node);
++ write_unlock_bh(&nt->cluster_scope_lock);
+ if (publ->scope == TIPC_NODE_SCOPE)
+ return NULL;
+
+@@ -147,7 +150,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list,
+ ITEM_SIZE) * ITEM_SIZE;
+ u32 msg_rem = msg_dsz;
+
+- list_for_each_entry_rcu(publ, pls, binding_node) {
++ list_for_each_entry(publ, pls, binding_node) {
+ /* Prepare next buffer: */
+ if (!skb) {
+ skb = named_prepare_buf(net, PUBLICATION, msg_rem,
+@@ -189,11 +192,10 @@ void tipc_named_node_up(struct net *net, u32 dnode)
+
+ __skb_queue_head_init(&head);
+
+- rcu_read_lock();
++ read_lock_bh(&nt->cluster_scope_lock);
+ named_distribute(net, &head, dnode, &nt->cluster_scope);
+- rcu_read_unlock();
+-
+ tipc_node_xmit(net, &head, dnode, 0);
++ read_unlock_bh(&nt->cluster_scope_lock);
+ }
+
+ /**
+diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
+index d72985ca1d55..89993afe0fbd 100644
+--- a/net/tipc/name_table.c
++++ b/net/tipc/name_table.c
+@@ -744,6 +744,7 @@ int tipc_nametbl_init(struct net *net)
+
+ INIT_LIST_HEAD(&nt->node_scope);
+ INIT_LIST_HEAD(&nt->cluster_scope);
++ rwlock_init(&nt->cluster_scope_lock);
+ tn->nametbl = nt;
+ spin_lock_init(&tn->nametbl_lock);
+ return 0;
+diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
+index 892bd750b85f..f79066334cc8 100644
+--- a/net/tipc/name_table.h
++++ b/net/tipc/name_table.h
+@@ -100,6 +100,7 @@ struct name_table {
+ struct hlist_head services[TIPC_NAMETBL_SIZE];
+ struct list_head node_scope;
+ struct list_head cluster_scope;
++ rwlock_t cluster_scope_lock;
+ u32 local_publ_count;
+ };
+
+diff --git a/net/tipc/net.c b/net/tipc/net.c
+index 7ce1e86b024f..2e2e938fe4b7 100644
+--- a/net/tipc/net.c
++++ b/net/tipc/net.c
+@@ -42,6 +42,7 @@
+ #include "node.h"
+ #include "bcast.h"
+ #include "netlink.h"
++#include "monitor.h"
+
+ /*
+ * The TIPC locking policy is designed to ensure a very fine locking
+@@ -136,6 +137,7 @@ static void tipc_net_finalize(struct net *net, u32 addr)
+ tipc_set_node_addr(net, addr);
+ tipc_named_reinit(net);
+ tipc_sk_reinit(net);
++ tipc_mon_reinit_self(net);
+ tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
+ TIPC_CLUSTER_SCOPE, 0, addr);
+ }
+diff --git a/net/tipc/node.c b/net/tipc/node.c
+index 32556f480a60..e67ffd194927 100644
+--- a/net/tipc/node.c
++++ b/net/tipc/node.c
+@@ -810,10 +810,10 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
+ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
+ {
+ struct tipc_link_entry *le = &n->links[bearer_id];
++ struct tipc_media_addr *maddr = NULL;
+ struct tipc_link *l = le->link;
+- struct tipc_media_addr *maddr;
+- struct sk_buff_head xmitq;
+ int old_bearer_id = bearer_id;
++ struct sk_buff_head xmitq;
+
+ if (!l)
+ return;
+@@ -835,7 +835,8 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
+ tipc_node_write_unlock(n);
+ if (delete)
+ tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
+- tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
++ if (!skb_queue_empty(&xmitq))
++ tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
+ tipc_sk_rcv(n->net, &le->inputq);
+ }
+
+diff --git a/net/tipc/socket.c b/net/tipc/socket.c
+index 5841d62ff580..40947ad90222 100644
+--- a/net/tipc/socket.c
++++ b/net/tipc/socket.c
+@@ -501,7 +501,7 @@ static void __tipc_shutdown(struct socket *sock, int error)
+ struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
+ struct net *net = sock_net(sk);
+- long timeout = CONN_TIMEOUT_DEFAULT;
++ long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
+ u32 dnode = tsk_peer_node(tsk);
+ struct sk_buff *skb;
+
+diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
+index 1a779b1e8510..40f6d82083d7 100644
+--- a/net/tipc/sysctl.c
++++ b/net/tipc/sysctl.c
+@@ -37,6 +37,8 @@
+
+ #include <linux/sysctl.h>
+
++static int zero;
++static int one = 1;
+ static struct ctl_table_header *tipc_ctl_hdr;
+
+ static struct ctl_table tipc_table[] = {
+@@ -45,14 +47,16 @@ static struct ctl_table tipc_table[] = {
+ .data = &sysctl_tipc_rmem,
+ .maxlen = sizeof(sysctl_tipc_rmem),
+ .mode = 0644,
+- .proc_handler = proc_dointvec,
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &one,
+ },
+ {
+ .procname = "named_timeout",
+ .data = &sysctl_tipc_named_timeout,
+ .maxlen = sizeof(sysctl_tipc_named_timeout),
+ .mode = 0644,
+- .proc_handler = proc_dointvec,
++ .proc_handler = proc_dointvec_minmax,
++ .extra1 = &zero,
+ },
+ {}
+ };
+diff --git a/net/tls/tls_device_fallback.c b/net/tls/tls_device_fallback.c
+index 426dd97725e4..6cf832891b53 100644
+--- a/net/tls/tls_device_fallback.c
++++ b/net/tls/tls_device_fallback.c
+@@ -208,6 +208,10 @@ static void complete_skb(struct sk_buff *nskb, struct sk_buff *skb, int headln)
+
+ update_chksum(nskb, headln);
+
++ /* sock_efree means skb must gone through skb_orphan_partial() */
++ if (nskb->destructor == sock_efree)
++ return;
++
+ delta = nskb->truesize - skb->truesize;
+ if (likely(delta < 0))
+ WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
+diff --git a/net/wireless/reg.c b/net/wireless/reg.c
+index 64841238df85..5643bdee7198 100644
+--- a/net/wireless/reg.c
++++ b/net/wireless/reg.c
+@@ -3870,6 +3870,15 @@ static int __init regulatory_init_db(void)
+ {
+ int err;
+
++ /*
++ * It's possible that - due to other bugs/issues - cfg80211
++ * never called regulatory_init() below, or that it failed;
++ * in that case, don't try to do any further work here as
++ * it's doomed to lead to crashes.
++ */
++ if (IS_ERR_OR_NULL(reg_pdev))
++ return -EINVAL;
++
+ err = load_builtin_regdb_keys();
+ if (err)
+ return err;
+diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
+index d9117ab035f7..556a649512b6 100644
+--- a/net/xdp/xdp_umem.c
++++ b/net/xdp/xdp_umem.c
+@@ -23,6 +23,9 @@ void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
+ {
+ unsigned long flags;
+
++ if (!xs->tx)
++ return;
++
+ spin_lock_irqsave(&umem->xsk_list_lock, flags);
+ list_add_rcu(&xs->list, &umem->xsk_list);
+ spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
+@@ -32,6 +35,9 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
+ {
+ unsigned long flags;
+
++ if (!xs->tx)
++ return;
++
+ spin_lock_irqsave(&umem->xsk_list_lock, flags);
+ list_del_rcu(&xs->list);
+ spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index ff15207036dc..72caa4fb13f4 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -218,6 +218,9 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
+
+ mutex_lock(&xs->mutex);
+
++ if (xs->queue_id >= xs->dev->real_num_tx_queues)
++ goto out;
++
+ while (xskq_peek_desc(xs->tx, &desc)) {
+ char *buffer;
+ u64 addr;
+@@ -228,12 +231,6 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
+ goto out;
+ }
+
+- if (xskq_reserve_addr(xs->umem->cq))
+- goto out;
+-
+- if (xs->queue_id >= xs->dev->real_num_tx_queues)
+- goto out;
+-
+ len = desc.len;
+ skb = sock_alloc_send_skb(sk, len, 1, &err);
+ if (unlikely(!skb)) {
+@@ -245,7 +242,7 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
+ addr = desc.addr;
+ buffer = xdp_umem_get_data(xs->umem, addr);
+ err = skb_store_bits(skb, 0, buffer, len);
+- if (unlikely(err)) {
++ if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) {
+ kfree_skb(skb);
+ goto out;
+ }
+@@ -323,7 +320,7 @@ static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
+
+ /* Make sure queue is ready before it can be seen by others */
+ smp_wmb();
+- *queue = q;
++ WRITE_ONCE(*queue, q);
+ return 0;
+ }
+
+@@ -457,7 +454,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
+ }
+
+ xdp_get_umem(umem_xs->umem);
+- xs->umem = umem_xs->umem;
++ WRITE_ONCE(xs->umem, umem_xs->umem);
+ sockfd_put(sock);
+ } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
+ err = -EINVAL;
+@@ -537,7 +534,7 @@ static int xsk_setsockopt(struct socket *sock, int level, int optname,
+
+ /* Make sure umem is ready before it can be seen by others */
+ smp_wmb();
+- xs->umem = umem;
++ WRITE_ONCE(xs->umem, umem);
+ mutex_unlock(&xs->mutex);
+ return 0;
+ }
+@@ -661,6 +658,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
+ if (!umem)
+ return -EINVAL;
+
++ /* Matches the smp_wmb() in XDP_UMEM_REG */
++ smp_rmb();
+ if (offset == XDP_UMEM_PGOFF_FILL_RING)
+ q = READ_ONCE(umem->fq);
+ else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
+@@ -670,6 +669,8 @@ static int xsk_mmap(struct file *file, struct socket *sock,
+ if (!q)
+ return -EINVAL;
+
++ /* Matches the smp_wmb() in xsk_init_queue */
++ smp_rmb();
+ qpg = virt_to_head_page(q->ring);
+ if (size > (PAGE_SIZE << compound_order(qpg)))
+ return -EINVAL;
+diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
+index d6a3cdf7885c..4ee512622e93 100644
+--- a/net/xfrm/xfrm_interface.c
++++ b/net/xfrm/xfrm_interface.c
+@@ -145,8 +145,6 @@ static int xfrmi_create(struct net_device *dev)
+ if (err < 0)
+ goto out;
+
+- strcpy(xi->p.name, dev->name);
+-
+ dev_hold(dev);
+ xfrmi_link(xfrmn, xi);
+
+@@ -293,7 +291,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+ if (tdev == dev) {
+ stats->collisions++;
+ net_warn_ratelimited("%s: Local routing loop detected!\n",
+- xi->p.name);
++ dev->name);
+ goto tx_err_dst_release;
+ }
+
+@@ -648,12 +646,6 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
+ int err;
+
+ xfrmi_netlink_parms(data, &p);
+-
+- if (!tb[IFLA_IFNAME])
+- return -EINVAL;
+-
+- nla_strlcpy(p.name, tb[IFLA_IFNAME], IFNAMSIZ);
+-
+ xi = xfrmi_locate(net, &p);
+ if (xi)
+ return -EEXIST;
+diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c
+index ef26f882f92f..a55c81301c1a 100644
+--- a/samples/bpf/xdp_rxq_info_user.c
++++ b/samples/bpf/xdp_rxq_info_user.c
+@@ -472,9 +472,9 @@ int main(int argc, char **argv)
+ if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
+ return EXIT_FAIL;
+
+- map = bpf_map__next(NULL, obj);
+- stats_global_map = bpf_map__next(map, obj);
+- rx_queue_index_map = bpf_map__next(stats_global_map, obj);
++ map = bpf_object__find_map_by_name(obj, "config_map");
++ stats_global_map = bpf_object__find_map_by_name(obj, "stats_global_map");
++ rx_queue_index_map = bpf_object__find_map_by_name(obj, "rx_queue_index_map");
+ if (!map || !stats_global_map || !rx_queue_index_map) {
+ printf("finding a map in obj file failed\n");
+ return EXIT_FAIL;
+diff --git a/security/apparmor/include/cred.h b/security/apparmor/include/cred.h
+index e287b7d0d4be..265ae6641a06 100644
+--- a/security/apparmor/include/cred.h
++++ b/security/apparmor/include/cred.h
+@@ -151,6 +151,8 @@ static inline struct aa_label *begin_current_label_crit_section(void)
+ {
+ struct aa_label *label = aa_current_raw_label();
+
++ might_sleep();
++
+ if (label_is_stale(label)) {
+ label = aa_get_newest_label(label);
+ if (aa_replace_current_label(label) == 0)
+diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
+index 590ca7d8fae5..730de4638b4e 100644
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -114,13 +114,13 @@ static int apparmor_ptrace_access_check(struct task_struct *child,
+ struct aa_label *tracer, *tracee;
+ int error;
+
+- tracer = begin_current_label_crit_section();
++ tracer = __begin_current_label_crit_section();
+ tracee = aa_get_task_label(child);
+ error = aa_may_ptrace(tracer, tracee,
+ (mode & PTRACE_MODE_READ) ? AA_PTRACE_READ
+ : AA_PTRACE_TRACE);
+ aa_put_label(tracee);
+- end_current_label_crit_section(tracer);
++ __end_current_label_crit_section(tracer);
+
+ return error;
+ }
+diff --git a/security/apparmor/net.c b/security/apparmor/net.c
+index bb24cfa0a164..d5d72dd1ca1f 100644
+--- a/security/apparmor/net.c
++++ b/security/apparmor/net.c
+@@ -146,17 +146,20 @@ int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family,
+ static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request,
+ struct sock *sk)
+ {
+- struct aa_profile *profile;
+- DEFINE_AUDIT_SK(sa, op, sk);
++ int error = 0;
+
+ AA_BUG(!label);
+ AA_BUG(!sk);
+
+- if (unconfined(label))
+- return 0;
++ if (!unconfined(label)) {
++ struct aa_profile *profile;
++ DEFINE_AUDIT_SK(sa, op, sk);
+
+- return fn_for_each_confined(label, profile,
+- aa_profile_af_sk_perm(profile, &sa, request, sk));
++ error = fn_for_each_confined(label, profile,
++ aa_profile_af_sk_perm(profile, &sa, request, sk));
++ }
++
++ return error;
+ }
+
+ int aa_sk_perm(const char *op, u32 request, struct sock *sk)
+diff --git a/security/keys/key.c b/security/keys/key.c
+index 249a6da4d277..749a5cf27a19 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -297,6 +297,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
+ key->gid = gid;
+ key->perm = perm;
+ key->restrict_link = restrict_link;
++ key->last_used_at = ktime_get_real_seconds();
+
+ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
+ key->flags |= 1 << KEY_FLAG_IN_QUOTA;
+diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
+index d2d96ca082b7..6224fd3bbf7c 100644
+--- a/sound/aoa/codecs/onyx.c
++++ b/sound/aoa/codecs/onyx.c
+@@ -74,8 +74,10 @@ static int onyx_read_register(struct onyx *onyx, u8 reg, u8 *value)
+ return 0;
+ }
+ v = i2c_smbus_read_byte_data(onyx->i2c, reg);
+- if (v < 0)
++ if (v < 0) {
++ *value = 0;
+ return -1;
++ }
+ *value = (u8)v;
+ onyx->cache[ONYX_REG_CONTROL-FIRSTREGISTER] = *value;
+ return 0;
+diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
+index 8a9dd4767b1e..63cc10604afc 100644
+--- a/sound/pci/hda/hda_controller.h
++++ b/sound/pci/hda/hda_controller.h
+@@ -176,11 +176,10 @@ struct azx {
+ #define azx_bus(chip) (&(chip)->bus.core)
+ #define bus_to_azx(_bus) container_of(_bus, struct azx, bus.core)
+
+-#ifdef CONFIG_X86
+-#define azx_snoop(chip) ((chip)->snoop)
+-#else
+-#define azx_snoop(chip) true
+-#endif
++static inline bool azx_snoop(struct azx *chip)
++{
++ return !IS_ENABLED(CONFIG_X86) || chip->snoop;
++}
+
+ /*
+ * macros for easy use
+diff --git a/sound/sh/aica.c b/sound/sh/aica.c
+index 2b26311405a4..ad3f71358486 100644
+--- a/sound/sh/aica.c
++++ b/sound/sh/aica.c
+@@ -303,7 +303,7 @@ static void aica_period_elapsed(struct timer_list *t)
+ {
+ struct snd_card_aica *dreamcastcard = from_timer(dreamcastcard,
+ t, timer);
+- struct snd_pcm_substream *substream = dreamcastcard->timer_substream;
++ struct snd_pcm_substream *substream = dreamcastcard->substream;
+ /*timer function - so cannot sleep */
+ int play_period;
+ struct snd_pcm_runtime *runtime;
+@@ -335,13 +335,6 @@ static void spu_begin_dma(struct snd_pcm_substream *substream)
+ dreamcastcard = substream->pcm->private_data;
+ /*get the queue to do the work */
+ schedule_work(&(dreamcastcard->spu_dma_work));
+- /* Timer may already be running */
+- if (unlikely(dreamcastcard->timer_substream)) {
+- mod_timer(&dreamcastcard->timer, jiffies + 4);
+- return;
+- }
+- timer_setup(&dreamcastcard->timer, aica_period_elapsed, 0);
+- dreamcastcard->timer_substream = substream;
+ mod_timer(&dreamcastcard->timer, jiffies + 4);
+ }
+
+@@ -379,8 +372,8 @@ static int snd_aicapcm_pcm_close(struct snd_pcm_substream
+ {
+ struct snd_card_aica *dreamcastcard = substream->pcm->private_data;
+ flush_work(&(dreamcastcard->spu_dma_work));
+- if (dreamcastcard->timer_substream)
+- del_timer(&dreamcastcard->timer);
++ del_timer(&dreamcastcard->timer);
++ dreamcastcard->substream = NULL;
+ kfree(dreamcastcard->channel);
+ spu_disable();
+ return 0;
+@@ -615,6 +608,7 @@ static int snd_aica_probe(struct platform_device *devptr)
+ "Yamaha AICA Super Intelligent Sound Processor for SEGA Dreamcast");
+ /* Prepare to use the queue */
+ INIT_WORK(&(dreamcastcard->spu_dma_work), run_spu_dma);
++ timer_setup(&dreamcastcard->timer, aica_period_elapsed, 0);
+ /* Load the PCM 'chip' */
+ err = snd_aicapcmchip(dreamcastcard, 0);
+ if (unlikely(err < 0))
+diff --git a/sound/soc/codecs/cs4349.c b/sound/soc/codecs/cs4349.c
+index bee0e343723f..f08d7a296c0c 100644
+--- a/sound/soc/codecs/cs4349.c
++++ b/sound/soc/codecs/cs4349.c
+@@ -381,6 +381,7 @@ static struct i2c_driver cs4349_i2c_driver = {
+ .driver = {
+ .name = "cs4349",
+ .of_match_table = cs4349_of_match,
++ .pm = &cs4349_runtime_pm,
+ },
+ .id_table = cs4349_i2c_id,
+ .probe = cs4349_i2c_probe,
+diff --git a/sound/soc/codecs/es8328.c b/sound/soc/codecs/es8328.c
+index e9fc2fd97d2f..3afa163f7652 100644
+--- a/sound/soc/codecs/es8328.c
++++ b/sound/soc/codecs/es8328.c
+@@ -231,7 +231,7 @@ static const struct soc_enum es8328_rline_enum =
+ ARRAY_SIZE(es8328_line_texts),
+ es8328_line_texts);
+ static const struct snd_kcontrol_new es8328_right_line_controls =
+- SOC_DAPM_ENUM("Route", es8328_lline_enum);
++ SOC_DAPM_ENUM("Route", es8328_rline_enum);
+
+ /* Left Mixer */
+ static const struct snd_kcontrol_new es8328_left_mixer_controls[] = {
+diff --git a/sound/soc/codecs/wm8737.c b/sound/soc/codecs/wm8737.c
+index e9ae821e7609..568b77692f5f 100644
+--- a/sound/soc/codecs/wm8737.c
++++ b/sound/soc/codecs/wm8737.c
+@@ -170,7 +170,7 @@ SOC_DOUBLE("Polarity Invert Switch", WM8737_ADC_CONTROL, 5, 6, 1, 0),
+ SOC_SINGLE("3D Switch", WM8737_3D_ENHANCE, 0, 1, 0),
+ SOC_SINGLE("3D Depth", WM8737_3D_ENHANCE, 1, 15, 0),
+ SOC_ENUM("3D Low Cut-off", low_3d),
+-SOC_ENUM("3D High Cut-off", low_3d),
++SOC_ENUM("3D High Cut-off", high_3d),
+ SOC_SINGLE_TLV("3D ADC Volume", WM8737_3D_ENHANCE, 7, 1, 1, adc_tlv),
+
+ SOC_SINGLE("Noise Gate Switch", WM8737_NOISE_GATE, 0, 1, 0),
+diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
+index ccdf088461b7..54c306707c02 100644
+--- a/sound/soc/codecs/wm9705.c
++++ b/sound/soc/codecs/wm9705.c
+@@ -325,8 +325,7 @@ static int wm9705_soc_probe(struct snd_soc_component *component)
+ if (wm9705->mfd_pdata) {
+ wm9705->ac97 = wm9705->mfd_pdata->ac97;
+ regmap = wm9705->mfd_pdata->regmap;
+- } else {
+-#ifdef CONFIG_SND_SOC_AC97_BUS
++ } else if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS)) {
+ wm9705->ac97 = snd_soc_new_ac97_component(component, WM9705_VENDOR_ID,
+ WM9705_VENDOR_ID_MASK);
+ if (IS_ERR(wm9705->ac97)) {
+@@ -339,7 +338,8 @@ static int wm9705_soc_probe(struct snd_soc_component *component)
+ snd_soc_free_ac97_component(wm9705->ac97);
+ return PTR_ERR(regmap);
+ }
+-#endif
++ } else {
++ return -ENXIO;
+ }
+
+ snd_soc_component_set_drvdata(component, wm9705->ac97);
+@@ -350,14 +350,12 @@ static int wm9705_soc_probe(struct snd_soc_component *component)
+
+ static void wm9705_soc_remove(struct snd_soc_component *component)
+ {
+-#ifdef CONFIG_SND_SOC_AC97_BUS
+ struct wm9705_priv *wm9705 = snd_soc_component_get_drvdata(component);
+
+- if (!wm9705->mfd_pdata) {
++ if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS) && !wm9705->mfd_pdata) {
+ snd_soc_component_exit_regmap(component);
+ snd_soc_free_ac97_component(wm9705->ac97);
+ }
+-#endif
+ }
+
+ static const struct snd_soc_component_driver soc_component_dev_wm9705 = {
+diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
+index ade34c26ad2f..01949eaba4fd 100644
+--- a/sound/soc/codecs/wm9712.c
++++ b/sound/soc/codecs/wm9712.c
+@@ -638,13 +638,13 @@ static int wm9712_soc_probe(struct snd_soc_component *component)
+ {
+ struct wm9712_priv *wm9712 = snd_soc_component_get_drvdata(component);
+ struct regmap *regmap;
+- int ret;
+
+ if (wm9712->mfd_pdata) {
+ wm9712->ac97 = wm9712->mfd_pdata->ac97;
+ regmap = wm9712->mfd_pdata->regmap;
+- } else {
+-#ifdef CONFIG_SND_SOC_AC97_BUS
++ } else if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS)) {
++ int ret;
++
+ wm9712->ac97 = snd_soc_new_ac97_component(component, WM9712_VENDOR_ID,
+ WM9712_VENDOR_ID_MASK);
+ if (IS_ERR(wm9712->ac97)) {
+@@ -659,7 +659,8 @@ static int wm9712_soc_probe(struct snd_soc_component *component)
+ snd_soc_free_ac97_component(wm9712->ac97);
+ return PTR_ERR(regmap);
+ }
+-#endif
++ } else {
++ return -ENXIO;
+ }
+
+ snd_soc_component_init_regmap(component, regmap);
+@@ -672,14 +673,12 @@ static int wm9712_soc_probe(struct snd_soc_component *component)
+
+ static void wm9712_soc_remove(struct snd_soc_component *component)
+ {
+-#ifdef CONFIG_SND_SOC_AC97_BUS
+ struct wm9712_priv *wm9712 = snd_soc_component_get_drvdata(component);
+
+- if (!wm9712->mfd_pdata) {
++ if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS) && !wm9712->mfd_pdata) {
+ snd_soc_component_exit_regmap(component);
+ snd_soc_free_ac97_component(wm9712->ac97);
+ }
+-#endif
+ }
+
+ static const struct snd_soc_component_driver soc_component_dev_wm9712 = {
+diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
+index 643863bb32e0..5a2fdf4f69bf 100644
+--- a/sound/soc/codecs/wm9713.c
++++ b/sound/soc/codecs/wm9713.c
+@@ -1214,8 +1214,7 @@ static int wm9713_soc_probe(struct snd_soc_component *component)
+ if (wm9713->mfd_pdata) {
+ wm9713->ac97 = wm9713->mfd_pdata->ac97;
+ regmap = wm9713->mfd_pdata->regmap;
+- } else {
+-#ifdef CONFIG_SND_SOC_AC97_BUS
++ } else if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS)) {
+ wm9713->ac97 = snd_soc_new_ac97_component(component, WM9713_VENDOR_ID,
+ WM9713_VENDOR_ID_MASK);
+ if (IS_ERR(wm9713->ac97))
+@@ -1225,7 +1224,8 @@ static int wm9713_soc_probe(struct snd_soc_component *component)
+ snd_soc_free_ac97_component(wm9713->ac97);
+ return PTR_ERR(regmap);
+ }
+-#endif
++ } else {
++ return -ENXIO;
+ }
+
+ snd_soc_component_init_regmap(component, regmap);
+@@ -1238,14 +1238,12 @@ static int wm9713_soc_probe(struct snd_soc_component *component)
+
+ static void wm9713_soc_remove(struct snd_soc_component *component)
+ {
+-#ifdef CONFIG_SND_SOC_AC97_BUS
+ struct wm9713_priv *wm9713 = snd_soc_component_get_drvdata(component);
+
+- if (!wm9713->mfd_pdata) {
++ if (IS_ENABLED(CONFIG_SND_SOC_AC97_BUS) && !wm9713->mfd_pdata) {
+ snd_soc_component_exit_regmap(component);
+ snd_soc_free_ac97_component(wm9713->ac97);
+ }
+-#endif
+ }
+
+ static const struct snd_soc_component_driver soc_component_dev_wm9713 = {
+diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
+index 6a8c279a4b20..14ab16e1369f 100644
+--- a/sound/soc/davinci/davinci-mcasp.c
++++ b/sound/soc/davinci/davinci-mcasp.c
+@@ -874,14 +874,13 @@ static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream,
+ active_slots = hweight32(mcasp->tdm_mask[stream]);
+ active_serializers = (channels + active_slots - 1) /
+ active_slots;
+- if (active_serializers == 1) {
++ if (active_serializers == 1)
+ active_slots = channels;
+- for (i = 0; i < total_slots; i++) {
+- if ((1 << i) & mcasp->tdm_mask[stream]) {
+- mask |= (1 << i);
+- if (--active_slots <= 0)
+- break;
+- }
++ for (i = 0; i < total_slots; i++) {
++ if ((1 << i) & mcasp->tdm_mask[stream]) {
++ mask |= (1 << i);
++ if (--active_slots <= 0)
++ break;
+ }
+ }
+ } else {
+diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
+index 9b9a7ec52905..4bd8da3a5f5b 100644
+--- a/sound/soc/fsl/imx-sgtl5000.c
++++ b/sound/soc/fsl/imx-sgtl5000.c
+@@ -112,7 +112,8 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
+ codec_dev = of_find_i2c_device_by_node(codec_np);
+ if (!codec_dev) {
+ dev_err(&pdev->dev, "failed to find codec platform device\n");
+- return -EPROBE_DEFER;
++ ret = -EPROBE_DEFER;
++ goto fail;
+ }
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+diff --git a/sound/soc/meson/axg-tdmin.c b/sound/soc/meson/axg-tdmin.c
+index bbac44c81688..37207bbebb2a 100644
+--- a/sound/soc/meson/axg-tdmin.c
++++ b/sound/soc/meson/axg-tdmin.c
+@@ -119,7 +119,6 @@ static int axg_tdmin_prepare(struct regmap *map, struct axg_tdm_stream *ts)
+ break;
+
+ case SND_SOC_DAIFMT_LEFT_J:
+- case SND_SOC_DAIFMT_RIGHT_J:
+ case SND_SOC_DAIFMT_DSP_B:
+ val = TDMIN_CTRL_IN_BIT_SKEW(2);
+ break;
+diff --git a/sound/soc/meson/axg-tdmout.c b/sound/soc/meson/axg-tdmout.c
+index f73368ee1088..d11acb3cc696 100644
+--- a/sound/soc/meson/axg-tdmout.c
++++ b/sound/soc/meson/axg-tdmout.c
+@@ -136,7 +136,6 @@ static int axg_tdmout_prepare(struct regmap *map, struct axg_tdm_stream *ts)
+ break;
+
+ case SND_SOC_DAIFMT_LEFT_J:
+- case SND_SOC_DAIFMT_RIGHT_J:
+ case SND_SOC_DAIFMT_DSP_B:
+ val |= TDMOUT_CTRL0_INIT_BITNUM(2);
+ break;
+diff --git a/sound/soc/qcom/apq8016_sbc.c b/sound/soc/qcom/apq8016_sbc.c
+index 1dd23bba1bed..4b559932adc3 100644
+--- a/sound/soc/qcom/apq8016_sbc.c
++++ b/sound/soc/qcom/apq8016_sbc.c
+@@ -164,41 +164,52 @@ static struct apq8016_sbc_data *apq8016_sbc_parse_of(struct snd_soc_card *card)
+
+ if (!cpu || !codec) {
+ dev_err(dev, "Can't find cpu/codec DT node\n");
+- return ERR_PTR(-EINVAL);
++ ret = -EINVAL;
++ goto error;
+ }
+
+ link->cpu_of_node = of_parse_phandle(cpu, "sound-dai", 0);
+ if (!link->cpu_of_node) {
+ dev_err(card->dev, "error getting cpu phandle\n");
+- return ERR_PTR(-EINVAL);
++ ret = -EINVAL;
++ goto error;
+ }
+
+ ret = snd_soc_of_get_dai_name(cpu, &link->cpu_dai_name);
+ if (ret) {
+ dev_err(card->dev, "error getting cpu dai name\n");
+- return ERR_PTR(ret);
++ goto error;
+ }
+
+ ret = snd_soc_of_get_dai_link_codecs(dev, codec, link);
+
+ if (ret < 0) {
+ dev_err(card->dev, "error getting codec dai name\n");
+- return ERR_PTR(ret);
++ goto error;
+ }
+
+ link->platform_of_node = link->cpu_of_node;
+ ret = of_property_read_string(np, "link-name", &link->name);
+ if (ret) {
+ dev_err(card->dev, "error getting codec dai_link name\n");
+- return ERR_PTR(ret);
++ goto error;
+ }
+
+ link->stream_name = link->name;
+ link->init = apq8016_sbc_dai_init;
+ link++;
++
++ of_node_put(cpu);
++ of_node_put(codec);
+ }
+
+ return data;
++
++ error:
++ of_node_put(np);
++ of_node_put(cpu);
++ of_node_put(codec);
++ return ERR_PTR(ret);
+ }
+
+ static const struct snd_soc_dapm_widget apq8016_sbc_dapm_widgets[] = {
+diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
+index 551bfc581fc1..53fefa7c982f 100644
+--- a/sound/soc/soc-pcm.c
++++ b/sound/soc/soc-pcm.c
+@@ -42,8 +42,8 @@ static bool snd_soc_dai_stream_valid(struct snd_soc_dai *dai, int stream)
+ else
+ codec_stream = &dai->driver->capture;
+
+- /* If the codec specifies any rate at all, it supports the stream. */
+- return codec_stream->rates;
++ /* If the codec specifies any channels at all, it supports the stream */
++ return codec_stream->channels_min;
+ }
+
+ /**
+diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
+index 18cf8404d27c..f248e563986c 100644
+--- a/sound/soc/sunxi/sun4i-i2s.c
++++ b/sound/soc/sunxi/sun4i-i2s.c
+@@ -80,8 +80,8 @@
+ #define SUN4I_I2S_CLK_DIV_MCLK_MASK GENMASK(3, 0)
+ #define SUN4I_I2S_CLK_DIV_MCLK(mclk) ((mclk) << 0)
+
+-#define SUN4I_I2S_RX_CNT_REG 0x28
+-#define SUN4I_I2S_TX_CNT_REG 0x2c
++#define SUN4I_I2S_TX_CNT_REG 0x28
++#define SUN4I_I2S_RX_CNT_REG 0x2c
+
+ #define SUN4I_I2S_TX_CHAN_SEL_REG 0x30
+ #define SUN4I_I2S_CHAN_SEL(num_chan) (((num_chan) - 1) << 0)
+diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
+index bf615fa16dc8..a3db6a68dfe6 100644
+--- a/sound/soc/sunxi/sun8i-codec.c
++++ b/sound/soc/sunxi/sun8i-codec.c
+@@ -465,7 +465,11 @@ static const struct snd_soc_dapm_route sun8i_codec_dapm_routes[] = {
+ { "Right Digital DAC Mixer", "AIF1 Slot 0 Digital DAC Playback Switch",
+ "AIF1 Slot 0 Right"},
+
+- /* ADC routes */
++ /* ADC Routes */
++ { "AIF1 Slot 0 Right ADC", NULL, "ADC" },
++ { "AIF1 Slot 0 Left ADC", NULL, "ADC" },
++
++ /* ADC Mixer Routes */
+ { "Left Digital ADC Mixer", "AIF1 Data Digital ADC Capture Switch",
+ "AIF1 Slot 0 Left ADC" },
+ { "Right Digital ADC Mixer", "AIF1 Data Digital ADC Capture Switch",
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index d7778f2bcbf8..6ac6a0980124 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -3480,7 +3480,9 @@ int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
+ if (err < 0)
+ goto _error;
+
+- snd_usb_mixer_apply_create_quirk(mixer);
++ err = snd_usb_mixer_apply_create_quirk(mixer);
++ if (err < 0)
++ goto _error;
+
+ err = snd_device_new(chip->card, SNDRV_DEV_CODEC, mixer, &dev_ops);
+ if (err < 0)
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index 65f9c4ba62ee..90d4f61cc230 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -3349,19 +3349,14 @@ AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_MIXER,
+ },
+- /* Capture */
+- {
+- .ifnum = 1,
+- .type = QUIRK_IGNORE_INTERFACE,
+- },
+ /* Playback */
+ {
+- .ifnum = 2,
++ .ifnum = 1,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 2,
+- .iface = 2,
++ .iface = 1,
+ .altsetting = 1,
+ .altset_idx = 1,
+ .attributes = UAC_EP_CS_ATTR_FILL_MAX |
+diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c
+index e4e6e2b3fd84..ff0cc3c17141 100644
+--- a/tools/bpf/bpftool/btf_dumper.c
++++ b/tools/bpf/bpftool/btf_dumper.c
+@@ -26,9 +26,9 @@ static void btf_dumper_ptr(const void *data, json_writer_t *jw,
+ bool is_plain_text)
+ {
+ if (is_plain_text)
+- jsonw_printf(jw, "%p", *(unsigned long *)data);
++ jsonw_printf(jw, "%p", data);
+ else
+- jsonw_printf(jw, "%u", *(unsigned long *)data);
++ jsonw_printf(jw, "%lu", *(unsigned long *)data);
+ }
+
+ static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
+@@ -129,7 +129,7 @@ static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
+ switch (BTF_INT_ENCODING(*int_type)) {
+ case 0:
+ if (BTF_INT_BITS(*int_type) == 64)
+- jsonw_printf(jw, "%lu", *(__u64 *)data);
++ jsonw_printf(jw, "%llu", *(__u64 *)data);
+ else if (BTF_INT_BITS(*int_type) == 32)
+ jsonw_printf(jw, "%u", *(__u32 *)data);
+ else if (BTF_INT_BITS(*int_type) == 16)
+@@ -142,7 +142,7 @@ static int btf_dumper_int(const struct btf_type *t, __u8 bit_offset,
+ break;
+ case BTF_INT_SIGNED:
+ if (BTF_INT_BITS(*int_type) == 64)
+- jsonw_printf(jw, "%ld", *(long long *)data);
++ jsonw_printf(jw, "%lld", *(long long *)data);
+ else if (BTF_INT_BITS(*int_type) == 32)
+ jsonw_printf(jw, "%d", *(int *)data);
+ else if (BTF_INT_BITS(*int_type) == 16)
+diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c
+index ee7a9765c6b3..adbcd84818f7 100644
+--- a/tools/bpf/bpftool/cgroup.c
++++ b/tools/bpf/bpftool/cgroup.c
+@@ -164,7 +164,7 @@ static int do_show(int argc, char **argv)
+
+ cgroup_fd = open(argv[0], O_RDONLY);
+ if (cgroup_fd < 0) {
+- p_err("can't open cgroup %s", argv[1]);
++ p_err("can't open cgroup %s", argv[0]);
+ goto exit;
+ }
+
+@@ -345,7 +345,7 @@ static int do_attach(int argc, char **argv)
+
+ cgroup_fd = open(argv[0], O_RDONLY);
+ if (cgroup_fd < 0) {
+- p_err("can't open cgroup %s", argv[1]);
++ p_err("can't open cgroup %s", argv[0]);
+ goto exit;
+ }
+
+@@ -403,7 +403,7 @@ static int do_detach(int argc, char **argv)
+
+ cgroup_fd = open(argv[0], O_RDONLY);
+ if (cgroup_fd < 0) {
+- p_err("can't open cgroup %s", argv[1]);
++ p_err("can't open cgroup %s", argv[0]);
+ goto exit;
+ }
+
+diff --git a/tools/bpf/bpftool/map_perf_ring.c b/tools/bpf/bpftool/map_perf_ring.c
+index 6d41323be291..8ec0148d7426 100644
+--- a/tools/bpf/bpftool/map_perf_ring.c
++++ b/tools/bpf/bpftool/map_perf_ring.c
+@@ -205,7 +205,7 @@ int do_event_pipe(int argc, char **argv)
+ NEXT_ARG();
+ cpu = strtoul(*argv, &endptr, 0);
+ if (*endptr) {
+- p_err("can't parse %s as CPU ID", **argv);
++ p_err("can't parse %s as CPU ID", *argv);
+ goto err_close_map;
+ }
+
+@@ -216,7 +216,7 @@ int do_event_pipe(int argc, char **argv)
+ NEXT_ARG();
+ index = strtoul(*argv, &endptr, 0);
+ if (*endptr) {
+- p_err("can't parse %s as index", **argv);
++ p_err("can't parse %s as index", *argv);
+ goto err_close_map;
+ }
+
+diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
+index 21f867a543e0..ab208400ea14 100644
+--- a/tools/perf/util/machine.c
++++ b/tools/perf/util/machine.c
+@@ -681,24 +681,6 @@ int machine__process_switch_event(struct machine *machine __maybe_unused,
+ return 0;
+ }
+
+-static void dso__adjust_kmod_long_name(struct dso *dso, const char *filename)
+-{
+- const char *dup_filename;
+-
+- if (!filename || !dso || !dso->long_name)
+- return;
+- if (dso->long_name[0] != '[')
+- return;
+- if (!strchr(filename, '/'))
+- return;
+-
+- dup_filename = strdup(filename);
+- if (!dup_filename)
+- return;
+-
+- dso__set_long_name(dso, dup_filename, true);
+-}
+-
+ struct map *machine__findnew_module_map(struct machine *machine, u64 start,
+ const char *filename)
+ {
+@@ -710,15 +692,8 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start,
+ return NULL;
+
+ map = map_groups__find_by_name(&machine->kmaps, m.name);
+- if (map) {
+- /*
+- * If the map's dso is an offline module, give dso__load()
+- * a chance to find the file path of that module by fixing
+- * long_name.
+- */
+- dso__adjust_kmod_long_name(map->dso, filename);
++ if (map)
+ goto out;
+- }
+
+ dso = machine__findnew_module_dso(machine, &m, filename);
+ if (dso == NULL)
+diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c
+index dac927e82336..4c156aeab6b8 100644
+--- a/tools/testing/selftests/ipc/msgque.c
++++ b/tools/testing/selftests/ipc/msgque.c
+@@ -1,9 +1,10 @@
+ // SPDX-License-Identifier: GPL-2.0
++#define _GNU_SOURCE
+ #include <stdlib.h>
+ #include <stdio.h>
+ #include <string.h>
+ #include <errno.h>
+-#include <linux/msg.h>
++#include <sys/msg.h>
+ #include <fcntl.h>
+
+ #include "../kselftest.h"
+@@ -73,7 +74,7 @@ int restore_queue(struct msgque_data *msgque)
+ return 0;
+
+ destroy:
+- if (msgctl(id, IPC_RMID, 0))
++ if (msgctl(id, IPC_RMID, NULL))
+ printf("Failed to destroy queue: %d\n", -errno);
+ return ret;
+ }
+@@ -120,7 +121,7 @@ int check_and_destroy_queue(struct msgque_data *msgque)
+
+ ret = 0;
+ err:
+- if (msgctl(msgque->msq_id, IPC_RMID, 0)) {
++ if (msgctl(msgque->msq_id, IPC_RMID, NULL)) {
+ printf("Failed to destroy queue: %d\n", -errno);
+ return -errno;
+ }
+@@ -129,7 +130,7 @@ err:
+
+ int dump_queue(struct msgque_data *msgque)
+ {
+- struct msqid64_ds ds;
++ struct msqid_ds ds;
+ int kern_id;
+ int i, ret;
+
+@@ -245,7 +246,7 @@ int main(int argc, char **argv)
+ return ksft_exit_pass();
+
+ err_destroy:
+- if (msgctl(msgque.msq_id, IPC_RMID, 0)) {
++ if (msgctl(msgque.msq_id, IPC_RMID, NULL)) {
+ printf("Failed to destroy queue: %d\n", -errno);
+ return ksft_exit_fail();
+ }