summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlice Ferrazzi <alicef@gentoo.org>2021-03-04 21:06:21 +0900
committerAlice Ferrazzi <alicef@gentoo.org>2021-03-04 21:06:32 +0900
commit3deea72d2b30ab70beb05aeea4a79a496cabf190 (patch)
treead6540d787e83122afad36f83fd55fa4e0f2be1f
parentRemove old wireguard patch (diff)
downloadlinux-patches-3deea72d2b30ab70beb05aeea4a79a496cabf190.tar.gz
linux-patches-3deea72d2b30ab70beb05aeea4a79a496cabf190.tar.bz2
linux-patches-3deea72d2b30ab70beb05aeea4a79a496cabf190.zip
Linux patch 5.4.102
Signed-off-by: Alice Ferrazzi <alicef@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1101_linux-5.4.102.patch11908
2 files changed, 11912 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index da56deed..4e6bf3e9 100644
--- a/0000_README
+++ b/0000_README
@@ -447,6 +447,10 @@ Patch: 1100_linux-5.4.101.patch
From: http://www.kernel.org
Desc: Linux 5.4.101
+Patch: 1101_linux-5.4.102.patch
+From: http://www.kernel.org
+Desc: Linux 5.4.102
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1101_linux-5.4.102.patch b/1101_linux-5.4.102.patch
new file mode 100644
index 00000000..cb9e2715
--- /dev/null
+++ b/1101_linux-5.4.102.patch
@@ -0,0 +1,11908 @@
+diff --git a/Documentation/filesystems/seq_file.txt b/Documentation/filesystems/seq_file.txt
+index d412b236a9d6f..7cf7143921a1f 100644
+--- a/Documentation/filesystems/seq_file.txt
++++ b/Documentation/filesystems/seq_file.txt
+@@ -192,6 +192,12 @@ between the calls to start() and stop(), so holding a lock during that time
+ is a reasonable thing to do. The seq_file code will also avoid taking any
+ other locks while the iterator is active.
+
++The iterater value returned by start() or next() is guaranteed to be
++passed to a subsequent next() or stop() call. This allows resources
++such as locks that were taken to be reliably released. There is *no*
++guarantee that the iterator will be passed to show(), though in practice
++it often will be.
++
+
+ Formatted output
+
+diff --git a/Makefile b/Makefile
+index f56442751d2c3..80ff67e5f73a6 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 101
++SUBLEVEL = 102
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
+index 93dffed0ac6e0..cbe126297f549 100644
+--- a/arch/arm/boot/compressed/head.S
++++ b/arch/arm/boot/compressed/head.S
+@@ -1142,9 +1142,9 @@ __armv4_mmu_cache_off:
+ __armv7_mmu_cache_off:
+ mrc p15, 0, r0, c1, c0
+ #ifdef CONFIG_MMU
+- bic r0, r0, #0x000d
++ bic r0, r0, #0x0005
+ #else
+- bic r0, r0, #0x000c
++ bic r0, r0, #0x0004
+ #endif
+ mcr p15, 0, r0, c1, c0 @ turn MMU and cache off
+ mov r12, lr
+diff --git a/arch/arm/boot/dts/armada-388-helios4.dts b/arch/arm/boot/dts/armada-388-helios4.dts
+index 705adfa8c680f..a94758090fb0d 100644
+--- a/arch/arm/boot/dts/armada-388-helios4.dts
++++ b/arch/arm/boot/dts/armada-388-helios4.dts
+@@ -70,6 +70,9 @@
+
+ system-leds {
+ compatible = "gpio-leds";
++ pinctrl-names = "default";
++ pinctrl-0 = <&helios_system_led_pins>;
++
+ status-led {
+ label = "helios4:green:status";
+ gpios = <&gpio0 24 GPIO_ACTIVE_LOW>;
+@@ -86,6 +89,9 @@
+
+ io-leds {
+ compatible = "gpio-leds";
++ pinctrl-names = "default";
++ pinctrl-0 = <&helios_io_led_pins>;
++
+ sata1-led {
+ label = "helios4:green:ata1";
+ gpios = <&gpio1 17 GPIO_ACTIVE_LOW>;
+@@ -121,11 +127,15 @@
+ fan1: j10-pwm {
+ compatible = "pwm-fan";
+ pwms = <&gpio1 9 40000>; /* Target freq:25 kHz */
++ pinctrl-names = "default";
++ pinctrl-0 = <&helios_fan1_pins>;
+ };
+
+ fan2: j17-pwm {
+ compatible = "pwm-fan";
+ pwms = <&gpio1 23 40000>; /* Target freq:25 kHz */
++ pinctrl-names = "default";
++ pinctrl-0 = <&helios_fan2_pins>;
+ };
+
+ usb2_phy: usb2-phy {
+@@ -291,16 +301,22 @@
+ "mpp39", "mpp40";
+ marvell,function = "sd0";
+ };
+- helios_led_pins: helios-led-pins {
+- marvell,pins = "mpp24", "mpp25",
+- "mpp49", "mpp50",
++ helios_system_led_pins: helios-system-led-pins {
++ marvell,pins = "mpp24", "mpp25";
++ marvell,function = "gpio";
++ };
++ helios_io_led_pins: helios-io-led-pins {
++ marvell,pins = "mpp49", "mpp50",
+ "mpp52", "mpp53",
+ "mpp54";
+ marvell,function = "gpio";
+ };
+- helios_fan_pins: helios-fan-pins {
+- marvell,pins = "mpp41", "mpp43",
+- "mpp48", "mpp55";
++ helios_fan1_pins: helios_fan1_pins {
++ marvell,pins = "mpp41", "mpp43";
++ marvell,function = "gpio";
++ };
++ helios_fan2_pins: helios_fan2_pins {
++ marvell,pins = "mpp48", "mpp55";
+ marvell,function = "gpio";
+ };
+ microsom_spi1_cs_pins: spi1-cs-pins {
+diff --git a/arch/arm/boot/dts/aspeed-g4.dtsi b/arch/arm/boot/dts/aspeed-g4.dtsi
+index dffb595d30e40..679d04d585a4a 100644
+--- a/arch/arm/boot/dts/aspeed-g4.dtsi
++++ b/arch/arm/boot/dts/aspeed-g4.dtsi
+@@ -371,6 +371,7 @@
+ compatible = "aspeed,ast2400-ibt-bmc";
+ reg = <0xc0 0x18>;
+ interrupts = <8>;
++ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
+ };
+ };
+diff --git a/arch/arm/boot/dts/aspeed-g5.dtsi b/arch/arm/boot/dts/aspeed-g5.dtsi
+index e8feb8b66a2f7..412c96b3c3ac0 100644
+--- a/arch/arm/boot/dts/aspeed-g5.dtsi
++++ b/arch/arm/boot/dts/aspeed-g5.dtsi
+@@ -464,6 +464,7 @@
+ compatible = "aspeed,ast2500-ibt-bmc";
+ reg = <0xc0 0x18>;
+ interrupts = <8>;
++ clocks = <&syscon ASPEED_CLK_GATE_LCLK>;
+ status = "disabled";
+ };
+ };
+diff --git a/arch/arm/boot/dts/exynos3250-artik5.dtsi b/arch/arm/boot/dts/exynos3250-artik5.dtsi
+index dee35e3a5c4ba..69d134db6e94e 100644
+--- a/arch/arm/boot/dts/exynos3250-artik5.dtsi
++++ b/arch/arm/boot/dts/exynos3250-artik5.dtsi
+@@ -75,7 +75,7 @@
+ s2mps14_pmic@66 {
+ compatible = "samsung,s2mps14-pmic";
+ interrupt-parent = <&gpx3>;
+- interrupts = <5 IRQ_TYPE_NONE>;
++ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&s2mps14_irq>;
+ reg = <0x66>;
+diff --git a/arch/arm/boot/dts/exynos3250-monk.dts b/arch/arm/boot/dts/exynos3250-monk.dts
+index 248bd372fe705..a23a8749c94e4 100644
+--- a/arch/arm/boot/dts/exynos3250-monk.dts
++++ b/arch/arm/boot/dts/exynos3250-monk.dts
+@@ -195,7 +195,7 @@
+ s2mps14_pmic@66 {
+ compatible = "samsung,s2mps14-pmic";
+ interrupt-parent = <&gpx0>;
+- interrupts = <7 IRQ_TYPE_NONE>;
++ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ reg = <0x66>;
+ wakeup-source;
+
+diff --git a/arch/arm/boot/dts/exynos3250-rinato.dts b/arch/arm/boot/dts/exynos3250-rinato.dts
+index 86c26a4edfd72..468932f452895 100644
+--- a/arch/arm/boot/dts/exynos3250-rinato.dts
++++ b/arch/arm/boot/dts/exynos3250-rinato.dts
+@@ -260,7 +260,7 @@
+ s2mps14_pmic@66 {
+ compatible = "samsung,s2mps14-pmic";
+ interrupt-parent = <&gpx0>;
+- interrupts = <7 IRQ_TYPE_NONE>;
++ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ reg = <0x66>;
+ wakeup-source;
+
+diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
+index 3d501926c2278..2355c53164840 100644
+--- a/arch/arm/boot/dts/exynos5250-spring.dts
++++ b/arch/arm/boot/dts/exynos5250-spring.dts
+@@ -108,7 +108,7 @@
+ compatible = "samsung,s5m8767-pmic";
+ reg = <0x66>;
+ interrupt-parent = <&gpx3>;
+- interrupts = <2 IRQ_TYPE_NONE>;
++ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&s5m8767_irq &s5m8767_dvs &s5m8767_ds>;
+ wakeup-source;
+diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+index 592d7b45ecc87..53bf988855e0d 100644
+--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
++++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+@@ -349,7 +349,7 @@
+ reg = <0x66>;
+
+ interrupt-parent = <&gpx3>;
+- interrupts = <2 IRQ_TYPE_EDGE_FALLING>;
++ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&s2mps11_irq>;
+
+diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+index 829147e320e08..9e64a4ab94940 100644
+--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
++++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+@@ -141,7 +141,7 @@
+ samsung,s2mps11-acokb-ground;
+
+ interrupt-parent = <&gpx0>;
+- interrupts = <4 IRQ_TYPE_EDGE_FALLING>;
++ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&s2mps11_irq>;
+
+diff --git a/arch/arm/boot/dts/omap443x.dtsi b/arch/arm/boot/dts/omap443x.dtsi
+index 86b9caf461dfa..6e320efd9fc1d 100644
+--- a/arch/arm/boot/dts/omap443x.dtsi
++++ b/arch/arm/boot/dts/omap443x.dtsi
+@@ -33,10 +33,12 @@
+ };
+
+ ocp {
++ /* 4430 has only gpio_86 tshut and no talert interrupt */
+ bandgap: bandgap@4a002260 {
+ reg = <0x4a002260 0x4
+ 0x4a00232C 0x4>;
+ compatible = "ti,omap4430-bandgap";
++ gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+
+ #thermal-sensor-cells = <0>;
+ };
+diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
+index f7211b57b1e78..165c184801e19 100644
+--- a/arch/arm/mach-ixp4xx/Kconfig
++++ b/arch/arm/mach-ixp4xx/Kconfig
+@@ -13,7 +13,6 @@ config MACH_IXP4XX_OF
+ select I2C
+ select I2C_IOP3XX
+ select PCI
+- select TIMER_OF
+ select USE_OF
+ help
+ Say 'Y' here to support Device Tree-based IXP4xx platforms.
+diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
+index a0bc9bbb92f34..9c8ea59398658 100644
+--- a/arch/arm64/Kconfig
++++ b/arch/arm64/Kconfig
+@@ -489,7 +489,7 @@ config ARM64_ERRATUM_1024718
+ help
+ This option adds a workaround for ARM Cortex-A55 Erratum 1024718.
+
+- Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
++ Affected Cortex-A55 cores (all revisions) could cause incorrect
+ update of the hardware dirty bit when the DBM/AP bits are updated
+ without a break-before-make. The workaround is to disable the usage
+ of hardware DBM locally on the affected cores. CPUs not affected by
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
+index 78c82a665c84a..bb1de8217b86d 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
+@@ -103,8 +103,6 @@
+ };
+
+ &ehci0 {
+- phys = <&usbphy 0>;
+- phy-names = "usb";
+ status = "okay";
+ };
+
+@@ -142,6 +140,7 @@
+ pinctrl-0 = <&mmc2_pins>, <&mmc2_ds_pin>;
+ vmmc-supply = <&reg_dcdc1>;
+ vqmmc-supply = <&reg_eldo1>;
++ max-frequency = <200000000>;
+ bus-width = <8>;
+ non-removable;
+ cap-mmc-hw-reset;
+@@ -150,8 +149,6 @@
+ };
+
+ &ohci0 {
+- phys = <&usbphy 0>;
+- phy-names = "usb";
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+index 9d20e13f0c02b..d935e3028fcb6 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-sopine.dtsi
+@@ -55,7 +55,6 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins>;
+ vmmc-supply = <&reg_dcdc1>;
+- non-removable;
+ disable-wp;
+ bus-width = <4>;
+ cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+index 367699c8c9028..cf9e3234afaf8 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
+@@ -476,7 +476,7 @@
+ resets = <&ccu RST_BUS_MMC2>;
+ reset-names = "ahb";
+ interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
+- max-frequency = <200000000>;
++ max-frequency = <150000000>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -530,6 +530,8 @@
+ <&ccu CLK_USB_OHCI0>;
+ resets = <&ccu RST_BUS_OHCI0>,
+ <&ccu RST_BUS_EHCI0>;
++ phys = <&usbphy 0>;
++ phy-names = "usb";
+ status = "disabled";
+ };
+
+@@ -540,6 +542,8 @@
+ clocks = <&ccu CLK_BUS_OHCI0>,
+ <&ccu CLK_USB_OHCI0>;
+ resets = <&ccu RST_BUS_OHCI0>;
++ phys = <&usbphy 0>;
++ phy-names = "usb";
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+index ab081efd59718..1583cd5915214 100644
+--- a/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
++++ b/arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
+@@ -332,6 +332,7 @@
+ interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc0_pins>;
++ max-frequency = <150000000>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -348,6 +349,7 @@
+ interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
++ max-frequency = <150000000>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -364,6 +366,7 @@
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc2_pins>;
++ max-frequency = <150000000>;
+ status = "disabled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+@@ -533,6 +536,8 @@
+ <&ccu CLK_USB_OHCI0>;
+ resets = <&ccu RST_BUS_OHCI0>,
+ <&ccu RST_BUS_EHCI0>;
++ phys = <&usb2phy 0>;
++ phy-names = "usb";
+ status = "disabled";
+ };
+
+@@ -543,6 +548,8 @@
+ clocks = <&ccu CLK_BUS_OHCI0>,
+ <&ccu CLK_USB_OHCI0>;
+ resets = <&ccu RST_BUS_OHCI0>;
++ phys = <&usb2phy 0>;
++ phy-names = "usb";
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
+index 6f90b0e62cba6..148bdca8d9c96 100644
+--- a/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
++++ b/arch/arm64/boot/dts/exynos/exynos5433-tm2-common.dtsi
+@@ -389,7 +389,7 @@
+ s2mps13-pmic@66 {
+ compatible = "samsung,s2mps13-pmic";
+ interrupt-parent = <&gpa0>;
+- interrupts = <7 IRQ_TYPE_NONE>;
++ interrupts = <7 IRQ_TYPE_LEVEL_LOW>;
+ reg = <0x66>;
+ samsung,s2mps11-wrstbi-ground;
+
+diff --git a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
+index 61ee7b6a31594..09aead2be000c 100644
+--- a/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
++++ b/arch/arm64/boot/dts/exynos/exynos7-espresso.dts
+@@ -90,7 +90,7 @@
+ s2mps15_pmic@66 {
+ compatible = "samsung,s2mps15-pmic";
+ reg = <0x66>;
+- interrupts = <2 IRQ_TYPE_NONE>;
++ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&gpa0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pmic_irq>;
+diff --git a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+index aa52927e2e9c2..fad70c2df7bc0 100644
+--- a/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
++++ b/arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
+@@ -202,7 +202,7 @@
+ };
+
+ partition@20000 {
+- label = "u-boot";
++ label = "a53-firmware";
+ reg = <0x20000 0x160000>;
+ };
+
+diff --git a/arch/arm64/boot/dts/mediatek/mt7622.dtsi b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+index 7cd8c3f52b471..e7e002d8b1089 100644
+--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi
++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi
+@@ -698,6 +698,8 @@
+ clocks = <&pericfg CLK_PERI_MSDC30_1_PD>,
+ <&topckgen CLK_TOP_AXI_SEL>;
+ clock-names = "source", "hclk";
++ resets = <&pericfg MT7622_PERI_MSDC1_SW_RST>;
++ reset-names = "hrst";
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index d95273af9f1e4..449843f2184d8 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -53,7 +53,7 @@
+ no-map;
+ };
+
+- reserved@8668000 {
++ reserved@86680000 {
+ reg = <0x0 0x86680000 0x0 0x80000>;
+ no-map;
+ };
+@@ -66,7 +66,7 @@
+ qcom,client-id = <1>;
+ };
+
+- rfsa@867e00000 {
++ rfsa@867e0000 {
+ reg = <0x0 0x867e0000 0x0 0x20000>;
+ no-map;
+ };
+diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
+index aa57dc639f77f..aa13344a3a5e8 100644
+--- a/arch/arm64/crypto/aes-glue.c
++++ b/arch/arm64/crypto/aes-glue.c
+@@ -55,7 +55,7 @@ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 Crypto Extensions");
+ #define aes_mac_update neon_aes_mac_update
+ MODULE_DESCRIPTION("AES-ECB/CBC/CTR/XTS using ARMv8 NEON");
+ #endif
+-#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS)
++#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
+ MODULE_ALIAS_CRYPTO("ecb(aes)");
+ MODULE_ALIAS_CRYPTO("cbc(aes)");
+ MODULE_ALIAS_CRYPTO("ctr(aes)");
+@@ -668,7 +668,7 @@ static int __maybe_unused xts_decrypt(struct skcipher_request *req)
+ }
+
+ static struct skcipher_alg aes_algs[] = { {
+-#if defined(USE_V8_CRYPTO_EXTENSIONS) || !defined(CONFIG_CRYPTO_AES_ARM64_BS)
++#if defined(USE_V8_CRYPTO_EXTENSIONS) || !IS_ENABLED(CONFIG_CRYPTO_AES_ARM64_BS)
+ .base = {
+ .cra_name = "__ecb(aes)",
+ .cra_driver_name = "__ecb-aes-" MODE,
+diff --git a/arch/arm64/crypto/sha1-ce-glue.c b/arch/arm64/crypto/sha1-ce-glue.c
+index bdc1b6d7aff79..05cdad31b0225 100644
+--- a/arch/arm64/crypto/sha1-ce-glue.c
++++ b/arch/arm64/crypto/sha1-ce-glue.c
+@@ -19,6 +19,7 @@
+ MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
+ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+ MODULE_LICENSE("GPL v2");
++MODULE_ALIAS_CRYPTO("sha1");
+
+ struct sha1_ce_state {
+ struct sha1_state sst;
+diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c
+index 604a01a4ede6f..1de80293ac312 100644
+--- a/arch/arm64/crypto/sha2-ce-glue.c
++++ b/arch/arm64/crypto/sha2-ce-glue.c
+@@ -19,6 +19,8 @@
+ MODULE_DESCRIPTION("SHA-224/SHA-256 secure hash using ARMv8 Crypto Extensions");
+ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+ MODULE_LICENSE("GPL v2");
++MODULE_ALIAS_CRYPTO("sha224");
++MODULE_ALIAS_CRYPTO("sha256");
+
+ struct sha256_ce_state {
+ struct sha256_state sst;
+diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c
+index 9a4bbfc45f407..ddf7aca9ff459 100644
+--- a/arch/arm64/crypto/sha3-ce-glue.c
++++ b/arch/arm64/crypto/sha3-ce-glue.c
+@@ -23,6 +23,10 @@
+ MODULE_DESCRIPTION("SHA3 secure hash using ARMv8 Crypto Extensions");
+ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+ MODULE_LICENSE("GPL v2");
++MODULE_ALIAS_CRYPTO("sha3-224");
++MODULE_ALIAS_CRYPTO("sha3-256");
++MODULE_ALIAS_CRYPTO("sha3-384");
++MODULE_ALIAS_CRYPTO("sha3-512");
+
+ asmlinkage void sha3_ce_transform(u64 *st, const u8 *data, int blocks,
+ int md_len);
+diff --git a/arch/arm64/crypto/sha512-ce-glue.c b/arch/arm64/crypto/sha512-ce-glue.c
+index 2369540040aa9..6dfcb4f3e7768 100644
+--- a/arch/arm64/crypto/sha512-ce-glue.c
++++ b/arch/arm64/crypto/sha512-ce-glue.c
+@@ -23,6 +23,8 @@
+ MODULE_DESCRIPTION("SHA-384/SHA-512 secure hash using ARMv8 Crypto Extensions");
+ MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+ MODULE_LICENSE("GPL v2");
++MODULE_ALIAS_CRYPTO("sha384");
++MODULE_ALIAS_CRYPTO("sha512");
+
+ asmlinkage void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
+ int blocks);
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index f2ec845404149..79caab15ccbf7 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1092,7 +1092,7 @@ static bool cpu_has_broken_dbm(void)
+ /* List of CPUs which have broken DBM support. */
+ static const struct midr_range cpus[] = {
+ #ifdef CONFIG_ARM64_ERRATUM_1024718
+- MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
++ MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+ #endif
+ {},
+ };
+diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
+index bdb5ec3419006..438de2301cfe3 100644
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -970,6 +970,7 @@ __primary_switch:
+
+ tlbi vmalle1 // Remove any stale TLB entries
+ dsb nsh
++ isb
+
+ msr sctlr_el1, x19 // re-enable the MMU
+ isb
+diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c
+index 7b08bf9499b6b..d2a62dd17d79d 100644
+--- a/arch/arm64/kernel/machine_kexec_file.c
++++ b/arch/arm64/kernel/machine_kexec_file.c
+@@ -150,8 +150,10 @@ static int create_dtb(struct kimage *image,
+
+ /* duplicate a device tree blob */
+ ret = fdt_open_into(initial_boot_params, buf, buf_size);
+- if (ret)
++ if (ret) {
++ vfree(buf);
+ return -EINVAL;
++ }
+
+ ret = setup_dtb(image, initrd_load_addr, initrd_len,
+ cmdline, buf);
+diff --git a/arch/arm64/kernel/probes/uprobes.c b/arch/arm64/kernel/probes/uprobes.c
+index a412d8edbcd24..2c247634552b1 100644
+--- a/arch/arm64/kernel/probes/uprobes.c
++++ b/arch/arm64/kernel/probes/uprobes.c
+@@ -38,7 +38,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
+
+ /* TODO: Currently we do not support AARCH32 instruction probing */
+ if (mm->context.flags & MMCF_AARCH32)
+- return -ENOTSUPP;
++ return -EOPNOTSUPP;
+ else if (!IS_ALIGNED(addr, AARCH64_INSN_SIZE))
+ return -EINVAL;
+
+diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
+index c23527ba65d09..64bffc1f75e0c 100644
+--- a/arch/mips/include/asm/asm.h
++++ b/arch/mips/include/asm/asm.h
+@@ -20,10 +20,27 @@
+ #include <asm/sgidefs.h>
+ #include <asm/asm-eva.h>
+
++#ifndef __VDSO__
++/*
++ * Emit CFI data in .debug_frame sections, not .eh_frame sections.
++ * We don't do DWARF unwinding at runtime, so only the offline DWARF
++ * information is useful to anyone. Note we should change this if we
++ * ever decide to enable DWARF unwinding at runtime.
++ */
++#define CFI_SECTIONS .cfi_sections .debug_frame
++#else
++ /*
++ * For the vDSO, emit both runtime unwind information and debug
++ * symbols for the .dbg file.
++ */
++#define CFI_SECTIONS
++#endif
++
+ /*
+ * LEAF - declare leaf routine
+ */
+ #define LEAF(symbol) \
++ CFI_SECTIONS; \
+ .globl symbol; \
+ .align 2; \
+ .type symbol, @function; \
+@@ -36,6 +53,7 @@ symbol: .frame sp, 0, ra; \
+ * NESTED - declare nested routine entry point
+ */
+ #define NESTED(symbol, framesize, rpc) \
++ CFI_SECTIONS; \
+ .globl symbol; \
+ .align 2; \
+ .type symbol, @function; \
+diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
+index eb9d7af938365..faf98f209b3f4 100644
+--- a/arch/mips/kernel/vmlinux.lds.S
++++ b/arch/mips/kernel/vmlinux.lds.S
+@@ -93,6 +93,7 @@ SECTIONS
+
+ INIT_TASK_DATA(THREAD_SIZE)
+ NOSAVE_DATA
++ PAGE_ALIGNED_DATA(PAGE_SIZE)
+ CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+ READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+ DATA_DATA
+@@ -225,6 +226,5 @@ SECTIONS
+ *(.options)
+ *(.pdr)
+ *(.reginfo)
+- *(.eh_frame)
+ }
+ }
+diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
+index 115b417dfb8e3..9fcc118312cb9 100644
+--- a/arch/mips/lantiq/irq.c
++++ b/arch/mips/lantiq/irq.c
+@@ -302,7 +302,7 @@ static void ltq_hw_irq_handler(struct irq_desc *desc)
+ generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
+
+ /* if this is a EBU irq, we need to ack it or get a deadlock */
+- if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
++ if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
+ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
+ LTQ_EBU_PCC_ISTAT);
+ }
+diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
+index 504fd61592405..3375bbe63284e 100644
+--- a/arch/mips/mm/c-r4k.c
++++ b/arch/mips/mm/c-r4k.c
+@@ -1560,7 +1560,7 @@ static int probe_scache(void)
+ return 1;
+ }
+
+-static void __init loongson2_sc_init(void)
++static void loongson2_sc_init(void)
+ {
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
+index cb285e474c880..c4cbb65e742f4 100644
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -723,7 +723,7 @@ config PPC_64K_PAGES
+
+ config PPC_256K_PAGES
+ bool "256k page size"
+- depends on 44x && !STDBINUTILS
++ depends on 44x && !STDBINUTILS && !PPC_47x
+ help
+ Make the page size 256k.
+
+diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
+index f29bb176381f1..c72894ff9d614 100644
+--- a/arch/powerpc/kernel/entry_32.S
++++ b/arch/powerpc/kernel/entry_32.S
+@@ -336,6 +336,9 @@ trace_syscall_entry_irq_off:
+
+ .globl transfer_to_syscall
+ transfer_to_syscall:
++#ifdef CONFIG_PPC_BOOK3S_32
++ kuep_lock r11, r12
++#endif
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ andi. r12,r9,MSR_EE
+ beq- trace_syscall_entry_irq_off
+diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
+index f6428b90a6c77..6f3e417f55a35 100644
+--- a/arch/powerpc/kernel/head_8xx.S
++++ b/arch/powerpc/kernel/head_8xx.S
+@@ -191,7 +191,7 @@ SystemCall:
+ /* On the MPC8xx, this is a software emulation interrupt. It occurs
+ * for all unimplemented and illegal instructions.
+ */
+- EXCEPTION(0x1000, SoftEmu, program_check_exception, EXC_XFER_STD)
++ EXCEPTION(0x1000, SoftEmu, emulation_assist_interrupt, EXC_XFER_STD)
+
+ /* Called from DataStoreTLBMiss when perf TLB misses events are activated */
+ #ifdef CONFIG_PERF_EVENTS
+diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
+index 689664cd4e79b..1b65fb7c0bdaa 100644
+--- a/arch/powerpc/kernel/prom_init.c
++++ b/arch/powerpc/kernel/prom_init.c
+@@ -1305,14 +1305,10 @@ static void __init prom_check_platform_support(void)
+ if (prop_len > sizeof(vec))
+ prom_printf("WARNING: ibm,arch-vec-5-platform-support longer than expected (len: %d)\n",
+ prop_len);
+- prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support",
+- &vec, sizeof(vec));
+- for (i = 0; i < sizeof(vec); i += 2) {
+- prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2
+- , vec[i]
+- , vec[i + 1]);
+- prom_parse_platform_support(vec[i], vec[i + 1],
+- &supported);
++ prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", &vec, sizeof(vec));
++ for (i = 0; i < prop_len; i += 2) {
++ prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2, vec[i], vec[i + 1]);
++ prom_parse_platform_support(vec[i], vec[i + 1], &supported);
+ }
+ }
+
+diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
+index 3a77bb6434521..e03c064716789 100644
+--- a/arch/powerpc/kvm/powerpc.c
++++ b/arch/powerpc/kvm/powerpc.c
+@@ -1513,7 +1513,7 @@ int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+ return emulated;
+ }
+
+-int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
++static int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
+ {
+ union kvmppc_one_reg reg;
+ int vmx_offset = 0;
+@@ -1531,7 +1531,7 @@ int kvmppc_get_vmx_dword(struct kvm_vcpu *vcpu, int index, u64 *val)
+ return result;
+ }
+
+-int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
++static int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
+ {
+ union kvmppc_one_reg reg;
+ int vmx_offset = 0;
+@@ -1549,7 +1549,7 @@ int kvmppc_get_vmx_word(struct kvm_vcpu *vcpu, int index, u64 *val)
+ return result;
+ }
+
+-int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
++static int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
+ {
+ union kvmppc_one_reg reg;
+ int vmx_offset = 0;
+@@ -1567,7 +1567,7 @@ int kvmppc_get_vmx_hword(struct kvm_vcpu *vcpu, int index, u64 *val)
+ return result;
+ }
+
+-int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
++static int kvmppc_get_vmx_byte(struct kvm_vcpu *vcpu, int index, u64 *val)
+ {
+ union kvmppc_one_reg reg;
+ int vmx_offset = 0;
+diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
+index 16e86ba8aa209..f6b7749d6ada7 100644
+--- a/arch/powerpc/platforms/pseries/dlpar.c
++++ b/arch/powerpc/platforms/pseries/dlpar.c
+@@ -127,7 +127,6 @@ void dlpar_free_cc_nodes(struct device_node *dn)
+ #define NEXT_PROPERTY 3
+ #define PREV_PARENT 4
+ #define MORE_MEMORY 5
+-#define CALL_AGAIN -2
+ #define ERR_CFG_USE -9003
+
+ struct device_node *dlpar_configure_connector(__be32 drc_index,
+@@ -168,6 +167,9 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
+
+ spin_unlock(&rtas_data_buf_lock);
+
++ if (rtas_busy_delay(rc))
++ continue;
++
+ switch (rc) {
+ case COMPLETE:
+ break;
+@@ -216,9 +218,6 @@ struct device_node *dlpar_configure_connector(__be32 drc_index,
+ last_dn = last_dn->parent;
+ break;
+
+- case CALL_AGAIN:
+- break;
+-
+ case MORE_MEMORY:
+ case ERR_CFG_USE:
+ default:
+diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
+index c475ca49cfc6b..3e72f955bff7f 100644
+--- a/arch/s390/kernel/vtime.c
++++ b/arch/s390/kernel/vtime.c
+@@ -136,7 +136,8 @@ static int do_account_vtime(struct task_struct *tsk)
+ " stck %1" /* Store current tod clock value */
+ #endif
+ : "=Q" (S390_lowcore.last_update_timer),
+- "=Q" (S390_lowcore.last_update_clock));
++ "=Q" (S390_lowcore.last_update_clock)
++ : : "cc");
+ clock = S390_lowcore.last_update_clock - clock;
+ timer -= S390_lowcore.last_update_timer;
+
+diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
+index 18e9fb6fcf1bf..349e27771ceaf 100644
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -524,7 +524,7 @@ config COMPAT
+ bool
+ depends on SPARC64
+ default y
+- select COMPAT_BINFMT_ELF
++ select COMPAT_BINFMT_ELF if BINFMT_ELF
+ select HAVE_UID16
+ select ARCH_WANT_OLD_COMPAT_IPC
+ select COMPAT_OLD_SIGACTION
+diff --git a/arch/sparc/lib/memset.S b/arch/sparc/lib/memset.S
+index b89d42b29e344..f427f34b8b79b 100644
+--- a/arch/sparc/lib/memset.S
++++ b/arch/sparc/lib/memset.S
+@@ -142,6 +142,7 @@ __bzero:
+ ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
+ ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
+ 13:
++ EXT(12b, 13b, 21f)
+ be 8f
+ andcc %o1, 4, %g0
+
+diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
+index b7eaf655635cd..11499136720d8 100644
+--- a/arch/um/kernel/tlb.c
++++ b/arch/um/kernel/tlb.c
+@@ -126,6 +126,9 @@ static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
+ struct host_vm_op *last;
+ int fd = -1, ret = 0;
+
++ if (virt + len > STUB_START && virt < STUB_END)
++ return -EINVAL;
++
+ if (hvc->userspace)
+ fd = phys_mapping(phys, &offset);
+ else
+@@ -163,7 +166,7 @@ static int add_munmap(unsigned long addr, unsigned long len,
+ struct host_vm_op *last;
+ int ret = 0;
+
+- if ((addr >= STUB_START) && (addr < STUB_END))
++ if (addr + len > STUB_START && addr < STUB_END)
+ return -EINVAL;
+
+ if (hvc->index != 0) {
+@@ -193,6 +196,9 @@ static int add_mprotect(unsigned long addr, unsigned long len,
+ struct host_vm_op *last;
+ int ret = 0;
+
++ if (addr + len > STUB_START && addr < STUB_END)
++ return -EINVAL;
++
+ if (hvc->index != 0) {
+ last = &hvc->ops[hvc->index - 1];
+ if ((last->type == MPROTECT) &&
+@@ -433,6 +439,10 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
+ struct mm_id *mm_id;
+
+ address &= PAGE_MASK;
++
++ if (address >= STUB_START && address < STUB_END)
++ goto kill;
++
+ pgd = pgd_offset(mm, address);
+ if (!pgd_present(*pgd))
+ goto kill;
+diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
+index 3e707e81afdb4..88ad272aa2b46 100644
+--- a/arch/x86/crypto/aesni-intel_glue.c
++++ b/arch/x86/crypto/aesni-intel_glue.c
+@@ -707,7 +707,8 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ unsigned long auth_tag_len = crypto_aead_authsize(tfm);
+ const struct aesni_gcm_tfm_s *gcm_tfm = aesni_gcm_tfm;
+- struct gcm_context_data data AESNI_ALIGN_ATTR;
++ u8 databuf[sizeof(struct gcm_context_data) + (AESNI_ALIGN - 8)] __aligned(8);
++ struct gcm_context_data *data = PTR_ALIGN((void *)databuf, AESNI_ALIGN);
+ struct scatter_walk dst_sg_walk = {};
+ unsigned long left = req->cryptlen;
+ unsigned long len, srclen, dstlen;
+@@ -760,8 +761,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ }
+
+ kernel_fpu_begin();
+- gcm_tfm->init(aes_ctx, &data, iv,
+- hash_subkey, assoc, assoclen);
++ gcm_tfm->init(aes_ctx, data, iv, hash_subkey, assoc, assoclen);
+ if (req->src != req->dst) {
+ while (left) {
+ src = scatterwalk_map(&src_sg_walk);
+@@ -771,10 +771,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ len = min(srclen, dstlen);
+ if (len) {
+ if (enc)
+- gcm_tfm->enc_update(aes_ctx, &data,
++ gcm_tfm->enc_update(aes_ctx, data,
+ dst, src, len);
+ else
+- gcm_tfm->dec_update(aes_ctx, &data,
++ gcm_tfm->dec_update(aes_ctx, data,
+ dst, src, len);
+ }
+ left -= len;
+@@ -792,10 +792,10 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ len = scatterwalk_clamp(&src_sg_walk, left);
+ if (len) {
+ if (enc)
+- gcm_tfm->enc_update(aes_ctx, &data,
++ gcm_tfm->enc_update(aes_ctx, data,
+ src, src, len);
+ else
+- gcm_tfm->dec_update(aes_ctx, &data,
++ gcm_tfm->dec_update(aes_ctx, data,
+ src, src, len);
+ }
+ left -= len;
+@@ -804,7 +804,7 @@ static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
+ scatterwalk_done(&src_sg_walk, 1, left);
+ }
+ }
+- gcm_tfm->finalize(aes_ctx, &data, authTag, auth_tag_len);
++ gcm_tfm->finalize(aes_ctx, data, authTag, auth_tag_len);
+ kernel_fpu_end();
+
+ if (!assocmem)
+@@ -853,7 +853,8 @@ static int helper_rfc4106_encrypt(struct aead_request *req)
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+ void *aes_ctx = &(ctx->aes_key_expanded);
+- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
++ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
++ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
+ unsigned int i;
+ __be32 counter = cpu_to_be32(1);
+
+@@ -880,7 +881,8 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
+ void *aes_ctx = &(ctx->aes_key_expanded);
+- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
++ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
++ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
+ unsigned int i;
+
+ if (unlikely(req->assoclen != 16 && req->assoclen != 20))
+@@ -1010,7 +1012,8 @@ static int generic_gcmaes_encrypt(struct aead_request *req)
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
+ void *aes_ctx = &(ctx->aes_key_expanded);
+- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
++ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
++ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
+ __be32 counter = cpu_to_be32(1);
+
+ memcpy(iv, req->iv, 12);
+@@ -1026,7 +1029,8 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
+ struct crypto_aead *tfm = crypto_aead_reqtfm(req);
+ struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
+ void *aes_ctx = &(ctx->aes_key_expanded);
+- u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
++ u8 ivbuf[16 + (AESNI_ALIGN - 8)] __aligned(8);
++ u8 *iv = PTR_ALIGN(&ivbuf[0], AESNI_ALIGN);
+
+ memcpy(iv, req->iv, 12);
+ *((__be32 *)(iv+12)) = counter;
+diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
+index 9aad0e0876fba..fda3e7747c223 100644
+--- a/arch/x86/include/asm/virtext.h
++++ b/arch/x86/include/asm/virtext.h
+@@ -30,15 +30,22 @@ static inline int cpu_has_vmx(void)
+ }
+
+
+-/** Disable VMX on the current CPU
++/**
++ * cpu_vmxoff() - Disable VMX on the current CPU
+ *
+- * vmxoff causes a undefined-opcode exception if vmxon was not run
+- * on the CPU previously. Only call this function if you know VMX
+- * is enabled.
++ * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
++ *
++ * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
++ * atomically track post-VMXON state, e.g. this may be called in NMI context.
++ * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
++ * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
++ * magically in RM, VM86, compat mode, or at CPL>0.
+ */
+ static inline void cpu_vmxoff(void)
+ {
+- asm volatile ("vmxoff");
++ asm_volatile_goto("1: vmxoff\n\t"
++ _ASM_EXTABLE(1b, %l[fault]) :::: fault);
++fault:
+ cr4_clear_bits(X86_CR4_VMXE);
+ }
+
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 762f5c1465a6f..835b6fc0c1bbf 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -538,29 +538,20 @@ static void emergency_vmx_disable_all(void)
+ local_irq_disable();
+
+ /*
+- * We need to disable VMX on all CPUs before rebooting, otherwise
+- * we risk hanging up the machine, because the CPU ignore INIT
+- * signals when VMX is enabled.
++ * Disable VMX on all CPUs before rebooting, otherwise we risk hanging
++ * the machine, because the CPU blocks INIT when it's in VMX root.
+ *
+- * We can't take any locks and we may be on an inconsistent
+- * state, so we use NMIs as IPIs to tell the other CPUs to disable
+- * VMX and halt.
++ * We can't take any locks and we may be on an inconsistent state, so
++ * use NMIs as IPIs to tell the other CPUs to exit VMX root and halt.
+ *
+- * For safety, we will avoid running the nmi_shootdown_cpus()
+- * stuff unnecessarily, but we don't have a way to check
+- * if other CPUs have VMX enabled. So we will call it only if the
+- * CPU we are running on has VMX enabled.
+- *
+- * We will miss cases where VMX is not enabled on all CPUs. This
+- * shouldn't do much harm because KVM always enable VMX on all
+- * CPUs anyway. But we can miss it on the small window where KVM
+- * is still enabling VMX.
++ * Do the NMI shootdown even if VMX if off on _this_ CPU, as that
++ * doesn't prevent a different CPU from being in VMX root operation.
+ */
+- if (cpu_has_vmx() && cpu_vmx_enabled()) {
+- /* Disable VMX on this CPU. */
+- cpu_vmxoff();
++ if (cpu_has_vmx()) {
++ /* Safely force _this_ CPU out of VMX root operation. */
++ __cpu_emergency_vmxoff();
+
+- /* Halt and disable VMX on the other CPUs */
++ /* Halt and exit VMX root operation on the other CPUs. */
+ nmi_shootdown_cpus(vmxoff_nmi);
+
+ }
+diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
+index 1da558f28aa57..b9d14fdbd2d81 100644
+--- a/arch/x86/kvm/svm.c
++++ b/arch/x86/kvm/svm.c
+@@ -4327,7 +4327,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
+ !guest_has_spec_ctrl_msr(vcpu))
+ return 1;
+
+- if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
++ if (kvm_spec_ctrl_test_value(data))
+ return 1;
+
+ svm->spec_ctrl = data;
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index e7fd2f00edc11..e177848a36313 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -1974,7 +1974,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ !guest_has_spec_ctrl_msr(vcpu))
+ return 1;
+
+- if (data & ~kvm_spec_ctrl_valid_bits(vcpu))
++ if (kvm_spec_ctrl_test_value(data))
+ return 1;
+
+ vmx->spec_ctrl = data;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 73095d7213993..153659e8f4039 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -10374,28 +10374,32 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
+ }
+ EXPORT_SYMBOL_GPL(kvm_arch_no_poll);
+
+-u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu)
++
++int kvm_spec_ctrl_test_value(u64 value)
+ {
+- uint64_t bits = SPEC_CTRL_IBRS | SPEC_CTRL_STIBP | SPEC_CTRL_SSBD;
++ /*
++ * test that setting IA32_SPEC_CTRL to given value
++ * is allowed by the host processor
++ */
++
++ u64 saved_value;
++ unsigned long flags;
++ int ret = 0;
+
+- /* The STIBP bit doesn't fault even if it's not advertised */
+- if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL) &&
+- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_IBRS))
+- bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
+- if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL) &&
+- !boot_cpu_has(X86_FEATURE_AMD_IBRS))
+- bits &= ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP);
++ local_irq_save(flags);
+
+- if (!guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL_SSBD) &&
+- !guest_cpuid_has(vcpu, X86_FEATURE_AMD_SSBD))
+- bits &= ~SPEC_CTRL_SSBD;
+- if (!boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
+- !boot_cpu_has(X86_FEATURE_AMD_SSBD))
+- bits &= ~SPEC_CTRL_SSBD;
++ if (rdmsrl_safe(MSR_IA32_SPEC_CTRL, &saved_value))
++ ret = 1;
++ else if (wrmsrl_safe(MSR_IA32_SPEC_CTRL, value))
++ ret = 1;
++ else
++ wrmsrl(MSR_IA32_SPEC_CTRL, saved_value);
+
+- return bits;
++ local_irq_restore(flags);
++
++ return ret;
+ }
+-EXPORT_SYMBOL_GPL(kvm_spec_ctrl_valid_bits);
++EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
+
+ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
+ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_fast_mmio);
+diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
+index 301286d924320..c520d373790a2 100644
+--- a/arch/x86/kvm/x86.h
++++ b/arch/x86/kvm/x86.h
+@@ -368,6 +368,6 @@ static inline bool kvm_pat_valid(u64 data)
+
+ void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu);
+ void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu);
+-u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
++int kvm_spec_ctrl_test_value(u64 value);
+
+ #endif
+diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
+index d9fbd4f699202..35b2e35c22035 100644
+--- a/arch/x86/mm/pat.c
++++ b/arch/x86/mm/pat.c
+@@ -1132,12 +1132,14 @@ static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
+
+ static void *memtype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ {
++ kfree(v);
+ ++*pos;
+ return memtype_get_idx(*pos);
+ }
+
+ static void memtype_seq_stop(struct seq_file *seq, void *v)
+ {
++ kfree(v);
+ }
+
+ static int memtype_seq_show(struct seq_file *seq, void *v)
+@@ -1146,7 +1148,6 @@ static int memtype_seq_show(struct seq_file *seq, void *v)
+
+ seq_printf(seq, "%s @ 0x%Lx-0x%Lx\n", cattr_name(print_entry->type),
+ print_entry->start, print_entry->end);
+- kfree(print_entry);
+
+ return 0;
+ }
+diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
+index ba32adaeefdd0..c19006d59b791 100644
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2937,6 +2937,7 @@ static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
+ }
+
+ bfqd->in_service_queue = bfqq;
++ bfqd->in_serv_last_pos = 0;
+ }
+
+ /*
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index be1dca0103a45..13be635300a85 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -473,6 +473,14 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
+ }
+ EXPORT_SYMBOL(blk_queue_stack_limits);
+
++static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
++{
++ sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
++ if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
++ sectors = PAGE_SIZE >> SECTOR_SHIFT;
++ return sectors;
++}
++
+ /**
+ * blk_stack_limits - adjust queue_limits for stacked devices
+ * @t: the stacking driver limits (top device)
+@@ -586,6 +594,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ ret = -1;
+ }
+
++ t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
++ t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
++ t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
++
+ /* Discard alignment and granularity */
+ if (b->discard_granularity) {
+ alignment = queue_limit_discard_alignment(b, start);
+diff --git a/block/bsg.c b/block/bsg.c
+index 833c44b3d458e..0d012efef5274 100644
+--- a/block/bsg.c
++++ b/block/bsg.c
+@@ -157,8 +157,10 @@ static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg)
+ return PTR_ERR(rq);
+
+ ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode);
+- if (ret)
++ if (ret) {
++ blk_put_request(rq);
+ return ret;
++ }
+
+ rq->timeout = msecs_to_jiffies(hdr.timeout);
+ if (!rq->timeout)
+diff --git a/certs/blacklist.c b/certs/blacklist.c
+index ec00bf337eb67..025a41de28fda 100644
+--- a/certs/blacklist.c
++++ b/certs/blacklist.c
+@@ -153,7 +153,7 @@ static int __init blacklist_init(void)
+ KEY_USR_VIEW | KEY_USR_READ |
+ KEY_USR_SEARCH,
+ KEY_ALLOC_NOT_IN_QUOTA |
+- KEY_FLAG_KEEP,
++ KEY_ALLOC_SET_KEEP,
+ NULL, NULL);
+ if (IS_ERR(blacklist_keyring))
+ panic("Can't allocate system blacklist keyring\n");
+diff --git a/crypto/ecdh_helper.c b/crypto/ecdh_helper.c
+index 66fcb2ea81544..fca63b559f655 100644
+--- a/crypto/ecdh_helper.c
++++ b/crypto/ecdh_helper.c
+@@ -67,6 +67,9 @@ int crypto_ecdh_decode_key(const char *buf, unsigned int len,
+ if (secret.type != CRYPTO_KPP_SECRET_TYPE_ECDH)
+ return -EINVAL;
+
++ if (unlikely(len < secret.len))
++ return -EINVAL;
++
+ ptr = ecdh_unpack_data(&params->curve_id, ptr, sizeof(params->curve_id));
+ ptr = ecdh_unpack_data(&params->key_size, ptr, sizeof(params->key_size));
+ if (secret.len != crypto_ecdh_key_len(params))
+diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
+index 01738d8e888e3..06c756651425e 100644
+--- a/drivers/acpi/acpi_configfs.c
++++ b/drivers/acpi/acpi_configfs.c
+@@ -267,7 +267,12 @@ static int __init acpi_configfs_init(void)
+
+ acpi_table_group = configfs_register_default_group(root, "table",
+ &acpi_tables_type);
+- return PTR_ERR_OR_ZERO(acpi_table_group);
++ if (IS_ERR(acpi_table_group)) {
++ configfs_unregister_subsystem(&acpi_configfs);
++ return PTR_ERR(acpi_table_group);
++ }
++
++ return 0;
+ }
+ module_init(acpi_configfs_init);
+
+diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
+index 3eacf474e1e39..a08e3eb2a6f9f 100644
+--- a/drivers/acpi/property.c
++++ b/drivers/acpi/property.c
+@@ -794,9 +794,6 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
+ const union acpi_object *obj;
+ int ret;
+
+- if (!val)
+- return -EINVAL;
+-
+ if (proptype >= DEV_PROP_U8 && proptype <= DEV_PROP_U64) {
+ ret = acpi_data_get_property(data, propname, ACPI_TYPE_INTEGER, &obj);
+ if (ret)
+@@ -806,28 +803,43 @@ static int acpi_data_prop_read_single(const struct acpi_device_data *data,
+ case DEV_PROP_U8:
+ if (obj->integer.value > U8_MAX)
+ return -EOVERFLOW;
+- *(u8 *)val = obj->integer.value;
++
++ if (val)
++ *(u8 *)val = obj->integer.value;
++
+ break;
+ case DEV_PROP_U16:
+ if (obj->integer.value > U16_MAX)
+ return -EOVERFLOW;
+- *(u16 *)val = obj->integer.value;
++
++ if (val)
++ *(u16 *)val = obj->integer.value;
++
+ break;
+ case DEV_PROP_U32:
+ if (obj->integer.value > U32_MAX)
+ return -EOVERFLOW;
+- *(u32 *)val = obj->integer.value;
++
++ if (val)
++ *(u32 *)val = obj->integer.value;
++
+ break;
+ default:
+- *(u64 *)val = obj->integer.value;
++ if (val)
++ *(u64 *)val = obj->integer.value;
++
+ break;
+ }
++
++ if (!val)
++ return 1;
+ } else if (proptype == DEV_PROP_STRING) {
+ ret = acpi_data_get_property(data, propname, ACPI_TYPE_STRING, &obj);
+ if (ret)
+ return ret;
+
+- *(char **)val = obj->string.pointer;
++ if (val)
++ *(char **)val = obj->string.pointer;
+
+ return 1;
+ } else {
+@@ -841,7 +853,7 @@ int acpi_dev_prop_read_single(struct acpi_device *adev, const char *propname,
+ {
+ int ret;
+
+- if (!adev)
++ if (!adev || !val)
+ return -EINVAL;
+
+ ret = acpi_data_prop_read_single(&adev->data, propname, proptype, val);
+@@ -935,10 +947,20 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
+ const union acpi_object *items;
+ int ret;
+
+- if (val && nval == 1) {
++ if (nval == 1 || !val) {
+ ret = acpi_data_prop_read_single(data, propname, proptype, val);
+- if (ret >= 0)
++ /*
++ * The overflow error means that the property is there and it is
++ * single-value, but its type does not match, so return.
++ */
++ if (ret >= 0 || ret == -EOVERFLOW)
+ return ret;
++
++ /*
++ * Reading this property as a single-value one failed, but its
++ * value may still be represented as one-element array, so
++ * continue.
++ */
+ }
+
+ ret = acpi_data_get_property_array(data, propname, ACPI_TYPE_ANY, &obj);
+diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
+index fe1523664816a..af58768a03937 100644
+--- a/drivers/amba/bus.c
++++ b/drivers/amba/bus.c
+@@ -299,10 +299,11 @@ static int amba_remove(struct device *dev)
+ {
+ struct amba_device *pcdev = to_amba_device(dev);
+ struct amba_driver *drv = to_amba_driver(dev->driver);
+- int ret;
++ int ret = 0;
+
+ pm_runtime_get_sync(dev);
+- ret = drv->remove(pcdev);
++ if (drv->remove)
++ ret = drv->remove(pcdev);
+ pm_runtime_put_noidle(dev);
+
+ /* Undo the runtime PM settings in amba_probe() */
+@@ -319,7 +320,9 @@ static int amba_remove(struct device *dev)
+ static void amba_shutdown(struct device *dev)
+ {
+ struct amba_driver *drv = to_amba_driver(dev->driver);
+- drv->shutdown(to_amba_device(dev));
++
++ if (drv->shutdown)
++ drv->shutdown(to_amba_device(dev));
+ }
+
+ /**
+@@ -332,12 +335,13 @@ static void amba_shutdown(struct device *dev)
+ */
+ int amba_driver_register(struct amba_driver *drv)
+ {
+- drv->drv.bus = &amba_bustype;
++ if (!drv->probe)
++ return -EINVAL;
+
+-#define SETFN(fn) if (drv->fn) drv->drv.fn = amba_##fn
+- SETFN(probe);
+- SETFN(remove);
+- SETFN(shutdown);
++ drv->drv.bus = &amba_bustype;
++ drv->drv.probe = amba_probe;
++ drv->drv.remove = amba_remove;
++ drv->drv.shutdown = amba_shutdown;
+
+ return driver_register(&drv->drv);
+ }
+diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
+index 66a570d0da837..067b55cc157ef 100644
+--- a/drivers/ata/ahci_brcm.c
++++ b/drivers/ata/ahci_brcm.c
+@@ -361,6 +361,10 @@ static int brcm_ahci_resume(struct device *dev)
+ if (ret)
+ return ret;
+
++ ret = ahci_platform_enable_regulators(hpriv);
++ if (ret)
++ goto out_disable_clks;
++
+ brcm_sata_init(priv);
+ brcm_sata_phys_enable(priv);
+ brcm_sata_alpm_init(hpriv);
+@@ -390,6 +394,8 @@ out_disable_platform_phys:
+ ahci_platform_disable_phys(hpriv);
+ out_disable_phys:
+ brcm_sata_phys_disable(priv);
++ ahci_platform_disable_regulators(hpriv);
++out_disable_clks:
+ ahci_platform_disable_clks(hpriv);
+ return ret;
+ }
+@@ -463,6 +469,10 @@ static int brcm_ahci_probe(struct platform_device *pdev)
+ if (ret)
+ goto out_reset;
+
++ ret = ahci_platform_enable_regulators(hpriv);
++ if (ret)
++ goto out_disable_clks;
++
+ /* Must be first so as to configure endianness including that
+ * of the standard AHCI register space.
+ */
+@@ -472,7 +482,7 @@ static int brcm_ahci_probe(struct platform_device *pdev)
+ priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
+ if (!priv->port_mask) {
+ ret = -ENODEV;
+- goto out_disable_clks;
++ goto out_disable_regulators;
+ }
+
+ /* Must be done before ahci_platform_enable_phys() */
+@@ -497,6 +507,8 @@ out_disable_platform_phys:
+ ahci_platform_disable_phys(hpriv);
+ out_disable_phys:
+ brcm_sata_phys_disable(priv);
++out_disable_regulators:
++ ahci_platform_disable_regulators(hpriv);
+ out_disable_clks:
+ ahci_platform_disable_clks(hpriv);
+ out_reset:
+diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c
+index a2fcde582e2a1..33b887b389061 100644
+--- a/drivers/auxdisplay/ht16k33.c
++++ b/drivers/auxdisplay/ht16k33.c
+@@ -117,8 +117,7 @@ static void ht16k33_fb_queue(struct ht16k33_priv *priv)
+ {
+ struct ht16k33_fbdev *fbdev = &priv->fbdev;
+
+- schedule_delayed_work(&fbdev->work,
+- msecs_to_jiffies(HZ / fbdev->refresh_rate));
++ schedule_delayed_work(&fbdev->work, HZ / fbdev->refresh_rate);
+ }
+
+ /*
+diff --git a/drivers/base/regmap/regmap-sdw.c b/drivers/base/regmap/regmap-sdw.c
+index 50a66382d87d0..e75168b941d0c 100644
+--- a/drivers/base/regmap/regmap-sdw.c
++++ b/drivers/base/regmap/regmap-sdw.c
+@@ -12,7 +12,7 @@ static int regmap_sdw_write(void *context, unsigned int reg, unsigned int val)
+ struct device *dev = context;
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+
+- return sdw_write(slave, reg, val);
++ return sdw_write_no_pm(slave, reg, val);
+ }
+
+ static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
+@@ -21,7 +21,7 @@ static int regmap_sdw_read(void *context, unsigned int reg, unsigned int *val)
+ struct sdw_slave *slave = dev_to_sdw_dev(dev);
+ int read;
+
+- read = sdw_read(slave, reg);
++ read = sdw_read_no_pm(slave, reg);
+ if (read < 0)
+ return read;
+
+diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
+index 77cc138d138cd..7d5236eafe845 100644
+--- a/drivers/base/swnode.c
++++ b/drivers/base/swnode.c
+@@ -534,14 +534,18 @@ software_node_get_next_child(const struct fwnode_handle *fwnode,
+ struct swnode *c = to_swnode(child);
+
+ if (!p || list_empty(&p->children) ||
+- (c && list_is_last(&c->entry, &p->children)))
++ (c && list_is_last(&c->entry, &p->children))) {
++ fwnode_handle_put(child);
+ return NULL;
++ }
+
+ if (c)
+ c = list_next_entry(c, entry);
+ else
+ c = list_first_entry(&p->children, struct swnode, entry);
+- return &c->fwnode;
++
++ fwnode_handle_put(child);
++ return fwnode_handle_get(&c->fwnode);
+ }
+
+ static struct fwnode_handle *
+diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
+index ac97a1e2e5ddc..40ea1a425c431 100644
+--- a/drivers/block/floppy.c
++++ b/drivers/block/floppy.c
+@@ -4063,21 +4063,22 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
+ if (UFDCS->rawcmd == 1)
+ UFDCS->rawcmd = 2;
+
+- if (!(mode & FMODE_NDELAY)) {
+- if (mode & (FMODE_READ|FMODE_WRITE)) {
+- UDRS->last_checked = 0;
+- clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+- check_disk_change(bdev);
+- if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
+- goto out;
+- if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+- goto out;
+- }
+- res = -EROFS;
+- if ((mode & FMODE_WRITE) &&
+- !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
++ if (mode & (FMODE_READ|FMODE_WRITE)) {
++ UDRS->last_checked = 0;
++ clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
++ check_disk_change(bdev);
++ if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
++ goto out;
++ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+ goto out;
+ }
++
++ res = -EROFS;
++
++ if ((mode & FMODE_WRITE) &&
++ !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
++ goto out;
++
+ mutex_unlock(&open_lock);
+ mutex_unlock(&floppy_mutex);
+ return 0;
+diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
+index 98d53764871f5..2acb719e596f5 100644
+--- a/drivers/bluetooth/btqcomsmd.c
++++ b/drivers/bluetooth/btqcomsmd.c
+@@ -142,12 +142,16 @@ static int btqcomsmd_probe(struct platform_device *pdev)
+
+ btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD",
+ btqcomsmd_cmd_callback, btq);
+- if (IS_ERR(btq->cmd_channel))
+- return PTR_ERR(btq->cmd_channel);
++ if (IS_ERR(btq->cmd_channel)) {
++ ret = PTR_ERR(btq->cmd_channel);
++ goto destroy_acl_channel;
++ }
+
+ hdev = hci_alloc_dev();
+- if (!hdev)
+- return -ENOMEM;
++ if (!hdev) {
++ ret = -ENOMEM;
++ goto destroy_cmd_channel;
++ }
+
+ hci_set_drvdata(hdev, btq);
+ btq->hdev = hdev;
+@@ -161,14 +165,21 @@ static int btqcomsmd_probe(struct platform_device *pdev)
+ hdev->set_bdaddr = qca_set_bdaddr_rome;
+
+ ret = hci_register_dev(hdev);
+- if (ret < 0) {
+- hci_free_dev(hdev);
+- return ret;
+- }
++ if (ret < 0)
++ goto hci_free_dev;
+
+ platform_set_drvdata(pdev, btq);
+
+ return 0;
++
++hci_free_dev:
++ hci_free_dev(hdev);
++destroy_cmd_channel:
++ rpmsg_destroy_ept(btq->cmd_channel);
++destroy_acl_channel:
++ rpmsg_destroy_ept(btq->acl_channel);
++
++ return ret;
+ }
+
+ static int btqcomsmd_remove(struct platform_device *pdev)
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index b92bd97b1c399..b467fd05c5e82 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -2568,7 +2568,7 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
+ skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC);
+ if (!skb) {
+ hdev->stat.err_rx++;
+- goto err_out;
++ return;
+ }
+
+ hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
+@@ -2586,13 +2586,18 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
+ */
+ if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) {
+ data->evt_skb = skb_clone(skb, GFP_ATOMIC);
+- if (!data->evt_skb)
+- goto err_out;
++ if (!data->evt_skb) {
++ kfree_skb(skb);
++ return;
++ }
+ }
+
+ err = hci_recv_frame(hdev, skb);
+- if (err < 0)
+- goto err_free_skb;
++ if (err < 0) {
++ kfree_skb(data->evt_skb);
++ data->evt_skb = NULL;
++ return;
++ }
+
+ if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT,
+ &data->flags)) {
+@@ -2601,11 +2606,6 @@ static void btusb_mtk_wmt_recv(struct urb *urb)
+ wake_up_bit(&data->flags,
+ BTUSB_TX_WAIT_VND_EVT);
+ }
+-err_out:
+- return;
+-err_free_skb:
+- kfree_skb(data->evt_skb);
+- data->evt_skb = NULL;
+ return;
+ } else if (urb->status == -ENOENT) {
+ /* Avoid suspend failed when usb_kill_urb */
+diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
+index f83d67eafc9f0..8be4d807d1370 100644
+--- a/drivers/bluetooth/hci_ldisc.c
++++ b/drivers/bluetooth/hci_ldisc.c
+@@ -127,10 +127,9 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
+ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
+ goto no_schedule;
+
+- if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
+- set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
++ set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
++ if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state))
+ goto no_schedule;
+- }
+
+ BT_DBG("");
+
+@@ -174,10 +173,10 @@ restart:
+ kfree_skb(skb);
+ }
+
++ clear_bit(HCI_UART_SENDING, &hu->tx_state);
+ if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state))
+ goto restart;
+
+- clear_bit(HCI_UART_SENDING, &hu->tx_state);
+ wake_up_bit(&hu->tx_state, HCI_UART_SENDING);
+ }
+
+diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
+index 5b9aa73ff2b7f..1b4ad231e6ed3 100644
+--- a/drivers/bluetooth/hci_serdev.c
++++ b/drivers/bluetooth/hci_serdev.c
+@@ -85,9 +85,9 @@ static void hci_uart_write_work(struct work_struct *work)
+ hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
+ kfree_skb(skb);
+ }
+- } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
+
+- clear_bit(HCI_UART_SENDING, &hu->tx_state);
++ clear_bit(HCI_UART_SENDING, &hu->tx_state);
++ } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
+ }
+
+ /* ------- Interface to HCI layer ------ */
+diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
+index e262445fed5f5..f35f0f31f52ad 100644
+--- a/drivers/char/hw_random/timeriomem-rng.c
++++ b/drivers/char/hw_random/timeriomem-rng.c
+@@ -69,7 +69,7 @@ static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
+ */
+ if (retval > 0)
+ usleep_range(period_us,
+- period_us + min(1, period_us / 100));
++ period_us + max(1, period_us / 100));
+
+ *(u32 *)data = readl(priv->io_base);
+ retval += sizeof(u32);
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 2c29f83ae3d5a..ffd61aadb7614 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -2149,7 +2149,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+ return -EPERM;
+ if (crng_init < 2)
+ return -ENODATA;
+- crng_reseed(&primary_crng, NULL);
++ crng_reseed(&primary_crng, &input_pool);
+ crng_global_init_time = jiffies - 1;
+ return 0;
+ default:
+diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
+index 63f6bed78d893..46d1fac247db7 100644
+--- a/drivers/char/tpm/tpm_tis_core.c
++++ b/drivers/char/tpm/tpm_tis_core.c
+@@ -125,7 +125,8 @@ static bool check_locality(struct tpm_chip *chip, int l)
+ if (rc < 0)
+ return false;
+
+- if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) ==
++ if ((access & (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID
++ | TPM_ACCESS_REQUEST_USE)) ==
+ (TPM_ACCESS_ACTIVE_LOCALITY | TPM_ACCESS_VALID)) {
+ priv->locality = l;
+ return true;
+@@ -134,58 +135,13 @@ static bool check_locality(struct tpm_chip *chip, int l)
+ return false;
+ }
+
+-static bool locality_inactive(struct tpm_chip *chip, int l)
+-{
+- struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+- int rc;
+- u8 access;
+-
+- rc = tpm_tis_read8(priv, TPM_ACCESS(l), &access);
+- if (rc < 0)
+- return false;
+-
+- if ((access & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY))
+- == TPM_ACCESS_VALID)
+- return true;
+-
+- return false;
+-}
+-
+ static int release_locality(struct tpm_chip *chip, int l)
+ {
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
+- unsigned long stop, timeout;
+- long rc;
+
+ tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
+
+- stop = jiffies + chip->timeout_a;
+-
+- if (chip->flags & TPM_CHIP_FLAG_IRQ) {
+-again:
+- timeout = stop - jiffies;
+- if ((long)timeout <= 0)
+- return -1;
+-
+- rc = wait_event_interruptible_timeout(priv->int_queue,
+- (locality_inactive(chip, l)),
+- timeout);
+-
+- if (rc > 0)
+- return 0;
+-
+- if (rc == -ERESTARTSYS && freezing(current)) {
+- clear_thread_flag(TIF_SIGPENDING);
+- goto again;
+- }
+- } else {
+- do {
+- if (locality_inactive(chip, l))
+- return 0;
+- tpm_msleep(TPM_TIMEOUT);
+- } while (time_before(jiffies, stop));
+- }
+- return -1;
++ return 0;
+ }
+
+ static int request_locality(struct tpm_chip *chip, int l)
+diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
+index 7015974f24b43..84ca38450d021 100644
+--- a/drivers/clk/clk-ast2600.c
++++ b/drivers/clk/clk-ast2600.c
+@@ -17,7 +17,8 @@
+
+ #define ASPEED_G6_NUM_CLKS 67
+
+-#define ASPEED_G6_SILICON_REV 0x004
++#define ASPEED_G6_SILICON_REV 0x014
++#define CHIP_REVISION_ID GENMASK(23, 16)
+
+ #define ASPEED_G6_RESET_CTRL 0x040
+ #define ASPEED_G6_RESET_CTRL2 0x050
+@@ -189,18 +190,34 @@ static struct clk_hw *ast2600_calc_pll(const char *name, u32 val)
+ static struct clk_hw *ast2600_calc_apll(const char *name, u32 val)
+ {
+ unsigned int mult, div;
++ u32 chip_id = readl(scu_g6_base + ASPEED_G6_SILICON_REV);
+
+- if (val & BIT(20)) {
+- /* Pass through mode */
+- mult = div = 1;
++ if (((chip_id & CHIP_REVISION_ID) >> 16) >= 2) {
++ if (val & BIT(24)) {
++ /* Pass through mode */
++ mult = div = 1;
++ } else {
++ /* F = 25Mhz * [(m + 1) / (n + 1)] / (p + 1) */
++ u32 m = val & 0x1fff;
++ u32 n = (val >> 13) & 0x3f;
++ u32 p = (val >> 19) & 0xf;
++
++ mult = (m + 1);
++ div = (n + 1) * (p + 1);
++ }
+ } else {
+- /* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */
+- u32 m = (val >> 5) & 0x3f;
+- u32 od = (val >> 4) & 0x1;
+- u32 n = val & 0xf;
++ if (val & BIT(20)) {
++ /* Pass through mode */
++ mult = div = 1;
++ } else {
++ /* F = 25Mhz * (2-od) * [(m + 2) / (n + 1)] */
++ u32 m = (val >> 5) & 0x3f;
++ u32 od = (val >> 4) & 0x1;
++ u32 n = val & 0xf;
+
+- mult = (2 - od) * (m + 2);
+- div = n + 1;
++ mult = (2 - od) * (m + 2);
++ div = n + 1;
++ }
+ }
+ return clk_hw_register_fixed_factor(NULL, name, "clkin", 0,
+ mult, div);
+diff --git a/drivers/clk/meson/clk-pll.c b/drivers/clk/meson/clk-pll.c
+index 3a5853ca98c6c..e8df254f8085b 100644
+--- a/drivers/clk/meson/clk-pll.c
++++ b/drivers/clk/meson/clk-pll.c
+@@ -363,13 +363,14 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ {
+ struct clk_regmap *clk = to_clk_regmap(hw);
+ struct meson_clk_pll_data *pll = meson_clk_pll_data(clk);
+- unsigned int enabled, m, n, frac = 0, ret;
++ unsigned int enabled, m, n, frac = 0;
+ unsigned long old_rate;
++ int ret;
+
+ if (parent_rate == 0 || rate == 0)
+ return -EINVAL;
+
+- old_rate = rate;
++ old_rate = clk_hw_get_rate(hw);
+
+ ret = meson_clk_get_pll_settings(rate, parent_rate, &m, &n, pll);
+ if (ret)
+@@ -391,7 +392,8 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ if (!enabled)
+ return 0;
+
+- if (meson_clk_pll_enable(hw)) {
++ ret = meson_clk_pll_enable(hw);
++ if (ret) {
+ pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
+ __func__, old_rate);
+ /*
+@@ -403,7 +405,7 @@ static int meson_clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ meson_clk_pll_set_rate(hw, old_rate, parent_rate);
+ }
+
+- return 0;
++ return ret;
+ }
+
+ /*
+diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
+index 091acd59c1d64..752f267b2881a 100644
+--- a/drivers/clk/qcom/gcc-msm8998.c
++++ b/drivers/clk/qcom/gcc-msm8998.c
+@@ -135,7 +135,7 @@ static struct pll_vco fabia_vco[] = {
+
+ static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+@@ -145,58 +145,58 @@ static struct clk_alpha_pll gpll0 = {
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_ops,
++ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ }
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll0_out_main = {
+ .offset = 0x0,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_main",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll0_out_odd = {
+ .offset = 0x0,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_odd",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll0_out_test = {
+ .offset = 0x0,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_test",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll gpll1 = {
+ .offset = 0x1000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+@@ -206,58 +206,58 @@ static struct clk_alpha_pll gpll1 = {
+ .name = "gpll1",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_ops,
++ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ }
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll1_out_even = {
+ .offset = 0x1000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_even",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll1_out_main = {
+ .offset = 0x1000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_main",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll1_out_odd = {
+ .offset = 0x1000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_odd",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll1_out_test = {
+ .offset = 0x1000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll1_out_test",
+ .parent_names = (const char *[]){ "gpll1" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll gpll2 = {
+ .offset = 0x2000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+@@ -267,58 +267,58 @@ static struct clk_alpha_pll gpll2 = {
+ .name = "gpll2",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_ops,
++ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ }
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll2_out_even = {
+ .offset = 0x2000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2_out_even",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll2_out_main = {
+ .offset = 0x2000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2_out_main",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll2_out_odd = {
+ .offset = 0x2000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2_out_odd",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll2_out_test = {
+ .offset = 0x2000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2_out_test",
+ .parent_names = (const char *[]){ "gpll2" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll gpll3 = {
+ .offset = 0x3000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+@@ -328,58 +328,58 @@ static struct clk_alpha_pll gpll3 = {
+ .name = "gpll3",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_ops,
++ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ }
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll3_out_even = {
+ .offset = 0x3000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_even",
+ .parent_names = (const char *[]){ "gpll3" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll3_out_main = {
+ .offset = 0x3000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_main",
+ .parent_names = (const char *[]){ "gpll3" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll3_out_odd = {
+ .offset = 0x3000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_odd",
+ .parent_names = (const char *[]){ "gpll3" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll3_out_test = {
+ .offset = 0x3000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_test",
+ .parent_names = (const char *[]){ "gpll3" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll gpll4 = {
+ .offset = 0x77000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .clkr = {
+@@ -389,52 +389,52 @@ static struct clk_alpha_pll gpll4 = {
+ .name = "gpll4",
+ .parent_names = (const char *[]){ "xo" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_ops,
++ .ops = &clk_alpha_pll_fixed_fabia_ops,
+ }
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll4_out_even = {
+ .offset = 0x77000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_even",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll4_out_main = {
+ .offset = 0x77000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_main",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll4_out_odd = {
+ .offset = 0x77000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_odd",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+ static struct clk_alpha_pll_postdiv gpll4_out_test = {
+ .offset = 0x77000,
+- .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
++ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_test",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+- .ops = &clk_alpha_pll_postdiv_ops,
++ .ops = &clk_alpha_pll_postdiv_fabia_ops,
+ },
+ };
+
+diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+index d89353a3cdec7..2f00f1b7b9c00 100644
+--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
++++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+@@ -228,7 +228,7 @@ static const char * const psi_ahb1_ahb2_parents[] = { "osc24M", "osc32k",
+ static SUNXI_CCU_MP_WITH_MUX(psi_ahb1_ahb2_clk, "psi-ahb1-ahb2",
+ psi_ahb1_ahb2_parents,
+ 0x510,
+- 0, 5, /* M */
++ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+@@ -237,19 +237,19 @@ static const char * const ahb3_apb1_apb2_parents[] = { "osc24M", "osc32k",
+ "psi-ahb1-ahb2",
+ "pll-periph0" };
+ static SUNXI_CCU_MP_WITH_MUX(ahb3_clk, "ahb3", ahb3_apb1_apb2_parents, 0x51c,
+- 0, 5, /* M */
++ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+ static SUNXI_CCU_MP_WITH_MUX(apb1_clk, "apb1", ahb3_apb1_apb2_parents, 0x520,
+- 0, 5, /* M */
++ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+ static SUNXI_CCU_MP_WITH_MUX(apb2_clk, "apb2", ahb3_apb1_apb2_parents, 0x524,
+- 0, 5, /* M */
++ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+@@ -673,7 +673,7 @@ static struct ccu_mux hdmi_cec_clk = {
+
+ .common = {
+ .reg = 0xb10,
+- .features = CCU_FEATURE_VARIABLE_PREDIV,
++ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS("hdmi-cec",
+ hdmi_cec_parents,
+ &ccu_mux_ops,
+diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
+index f35a53ce8988a..3bb5625504e2f 100644
+--- a/drivers/clocksource/Kconfig
++++ b/drivers/clocksource/Kconfig
+@@ -79,6 +79,7 @@ config IXP4XX_TIMER
+ bool "Intel XScale IXP4xx timer driver" if COMPILE_TEST
+ depends on HAS_IOMEM
+ select CLKSRC_MMIO
++ select TIMER_OF if OF
+ help
+ Enables support for the Intel XScale IXP4xx SoC timer.
+
+diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c
+index f6ddae30933f7..dae8c0c2e606f 100644
+--- a/drivers/clocksource/mxs_timer.c
++++ b/drivers/clocksource/mxs_timer.c
+@@ -138,10 +138,7 @@ static void mxs_irq_clear(char *state)
+
+ /* Clear pending interrupt */
+ timrot_irq_acknowledge();
+-
+-#ifdef DEBUG
+- pr_info("%s: changing mode to %s\n", __func__, state)
+-#endif /* DEBUG */
++ pr_debug("%s: changing mode to %s\n", __func__, state);
+ }
+
+ static int mxs_shutdown(struct clock_event_device *evt)
+diff --git a/drivers/cpufreq/brcmstb-avs-cpufreq.c b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+index 77b0e5d0fb134..a3c82f530d608 100644
+--- a/drivers/cpufreq/brcmstb-avs-cpufreq.c
++++ b/drivers/cpufreq/brcmstb-avs-cpufreq.c
+@@ -566,6 +566,16 @@ unmap_base:
+ return ret;
+ }
+
++static void brcm_avs_prepare_uninit(struct platform_device *pdev)
++{
++ struct private_data *priv;
++
++ priv = platform_get_drvdata(pdev);
++
++ iounmap(priv->avs_intr_base);
++ iounmap(priv->base);
++}
++
+ static int brcm_avs_cpufreq_init(struct cpufreq_policy *policy)
+ {
+ struct cpufreq_frequency_table *freq_table;
+@@ -701,21 +711,21 @@ static int brcm_avs_cpufreq_probe(struct platform_device *pdev)
+
+ brcm_avs_driver.driver_data = pdev;
+
+- return cpufreq_register_driver(&brcm_avs_driver);
++ ret = cpufreq_register_driver(&brcm_avs_driver);
++ if (ret)
++ brcm_avs_prepare_uninit(pdev);
++
++ return ret;
+ }
+
+ static int brcm_avs_cpufreq_remove(struct platform_device *pdev)
+ {
+- struct private_data *priv;
+ int ret;
+
+ ret = cpufreq_unregister_driver(&brcm_avs_driver);
+- if (ret)
+- return ret;
++ WARN_ON(ret);
+
+- priv = platform_get_drvdata(pdev);
+- iounmap(priv->base);
+- iounmap(priv->avs_intr_base);
++ brcm_avs_prepare_uninit(pdev);
+
+ return 0;
+ }
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index b9ca89dc75c7d..88fe803a044d5 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1566,11 +1566,9 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
+ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+ {
+ cpu->pstate.min_pstate = pstate_funcs.get_min();
+- cpu->pstate.max_pstate = pstate_funcs.get_max();
+ cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
+ cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
+ cpu->pstate.scaling = pstate_funcs.get_scaling();
+- cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
+
+ if (hwp_active && !hwp_mode_bdw) {
+ unsigned int phy_max, current_max;
+@@ -1578,9 +1576,12 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+ intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
+ cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
+ cpu->pstate.turbo_pstate = phy_max;
++ cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(READ_ONCE(cpu->hwp_cap_cached));
+ } else {
+ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
++ cpu->pstate.max_pstate = pstate_funcs.get_max();
+ }
++ cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
+
+ if (pstate_funcs.get_aperf_mperf_shift)
+ cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
+diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c
+index ec4b5033013eb..98b8483577ce2 100644
+--- a/drivers/crypto/bcm/cipher.c
++++ b/drivers/crypto/bcm/cipher.c
+@@ -41,7 +41,7 @@
+
+ /* ================= Device Structure ================== */
+
+-struct device_private iproc_priv;
++struct bcm_device_private iproc_priv;
+
+ /* ==================== Parameters ===================== */
+
+diff --git a/drivers/crypto/bcm/cipher.h b/drivers/crypto/bcm/cipher.h
+index 766452b24d0ab..01feed268a0d4 100644
+--- a/drivers/crypto/bcm/cipher.h
++++ b/drivers/crypto/bcm/cipher.h
+@@ -418,7 +418,7 @@ struct spu_hw {
+ u32 num_chan;
+ };
+
+-struct device_private {
++struct bcm_device_private {
+ struct platform_device *pdev;
+
+ struct spu_hw spu;
+@@ -465,6 +465,6 @@ struct device_private {
+ struct mbox_chan **mbox;
+ };
+
+-extern struct device_private iproc_priv;
++extern struct bcm_device_private iproc_priv;
+
+ #endif
+diff --git a/drivers/crypto/bcm/util.c b/drivers/crypto/bcm/util.c
+index cd7504101acde..7227dbf8f46c7 100644
+--- a/drivers/crypto/bcm/util.c
++++ b/drivers/crypto/bcm/util.c
+@@ -348,7 +348,7 @@ char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode)
+ static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *offp)
+ {
+- struct device_private *ipriv;
++ struct bcm_device_private *ipriv;
+ char *buf;
+ ssize_t ret, out_offset, out_count;
+ int i;
+diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.h b/drivers/crypto/chelsio/chtls/chtls_cm.h
+index 3fac0c74a41fa..df4451b306495 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_cm.h
++++ b/drivers/crypto/chelsio/chtls/chtls_cm.h
+@@ -50,9 +50,6 @@
+ #define MIN_RCV_WND (24 * 1024U)
+ #define LOOPBACK(x) (((x) & htonl(0xff000000)) == htonl(0x7f000000))
+
+-/* ulp_mem_io + ulptx_idata + payload + padding */
+-#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
+-
+ /* for TX: a skb must have a headroom of at least TX_HEADER_LEN bytes */
+ #define TX_HEADER_LEN \
+ (sizeof(struct fw_ofld_tx_data_wr) + sizeof(struct sge_opaque_hdr))
+diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+index 7e5e092a23b3c..dce3a6f96c97e 100644
+--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
++++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+@@ -30,6 +30,8 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
+ unsigned int ileft = areq->cryptlen;
+ unsigned int oleft = areq->cryptlen;
+ unsigned int todo;
++ unsigned long pi = 0, po = 0; /* progress for in and out */
++ bool miter_err;
+ struct sg_mapping_iter mi, mo;
+ unsigned int oi, oo; /* offset for in and out */
+ unsigned long flags;
+@@ -44,50 +46,62 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
+
+ spin_lock_irqsave(&ss->slock, flags);
+
+- for (i = 0; i < op->keylen; i += 4)
+- writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
++ for (i = 0; i < op->keylen / 4; i++)
++ writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
+
+ if (areq->iv) {
+ for (i = 0; i < 4 && i < ivsize / 4; i++) {
+ v = *(u32 *)(areq->iv + i * 4);
+- writel(v, ss->base + SS_IV0 + i * 4);
++ writesl(ss->base + SS_IV0 + i * 4, &v, 1);
+ }
+ }
+ writel(mode, ss->base + SS_CTL);
+
+- sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+- SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+- sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+- SG_MITER_TO_SG | SG_MITER_ATOMIC);
+- sg_miter_next(&mi);
+- sg_miter_next(&mo);
+- if (!mi.addr || !mo.addr) {
+- dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+- err = -EINVAL;
+- goto release_ss;
+- }
+
+ ileft = areq->cryptlen / 4;
+ oleft = areq->cryptlen / 4;
+ oi = 0;
+ oo = 0;
+ do {
+- todo = min(rx_cnt, ileft);
+- todo = min_t(size_t, todo, (mi.length - oi) / 4);
+- if (todo) {
+- ileft -= todo;
+- writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
+- oi += todo * 4;
+- }
+- if (oi == mi.length) {
+- sg_miter_next(&mi);
+- oi = 0;
++ if (ileft) {
++ sg_miter_start(&mi, areq->src, sg_nents(areq->src),
++ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
++ if (pi)
++ sg_miter_skip(&mi, pi);
++ miter_err = sg_miter_next(&mi);
++ if (!miter_err || !mi.addr) {
++ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
++ err = -EINVAL;
++ goto release_ss;
++ }
++ todo = min(rx_cnt, ileft);
++ todo = min_t(size_t, todo, (mi.length - oi) / 4);
++ if (todo) {
++ ileft -= todo;
++ writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
++ oi += todo * 4;
++ }
++ if (oi == mi.length) {
++ pi += mi.length;
++ oi = 0;
++ }
++ sg_miter_stop(&mi);
+ }
+
+ spaces = readl(ss->base + SS_FCSR);
+ rx_cnt = SS_RXFIFO_SPACES(spaces);
+ tx_cnt = SS_TXFIFO_SPACES(spaces);
+
++ sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
++ SG_MITER_TO_SG | SG_MITER_ATOMIC);
++ if (po)
++ sg_miter_skip(&mo, po);
++ miter_err = sg_miter_next(&mo);
++ if (!miter_err || !mo.addr) {
++ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
++ err = -EINVAL;
++ goto release_ss;
++ }
+ todo = min(tx_cnt, oleft);
+ todo = min_t(size_t, todo, (mo.length - oo) / 4);
+ if (todo) {
+@@ -96,9 +110,10 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
+ oo += todo * 4;
+ }
+ if (oo == mo.length) {
+- sg_miter_next(&mo);
+ oo = 0;
++ po += mo.length;
+ }
++ sg_miter_stop(&mo);
+ } while (oleft);
+
+ if (areq->iv) {
+@@ -109,8 +124,6 @@ static int noinline_for_stack sun4i_ss_opti_poll(struct skcipher_request *areq)
+ }
+
+ release_ss:
+- sg_miter_stop(&mi);
+- sg_miter_stop(&mo);
+ writel(0, ss->base + SS_CTL);
+ spin_unlock_irqrestore(&ss->slock, flags);
+ return err;
+@@ -164,12 +177,14 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ unsigned int oleft = areq->cryptlen;
+ unsigned int todo;
+ struct sg_mapping_iter mi, mo;
++ unsigned long pi = 0, po = 0; /* progress for in and out */
++ bool miter_err;
+ unsigned int oi, oo; /* offset for in and out */
+ unsigned int ob = 0; /* offset in buf */
+ unsigned int obo = 0; /* offset in bufo*/
+ unsigned int obl = 0; /* length of data in bufo */
+ unsigned long flags;
+- bool need_fallback;
++ bool need_fallback = false;
+
+ if (!areq->cryptlen)
+ return 0;
+@@ -188,12 +203,12 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ * we can use the SS optimized function
+ */
+ while (in_sg && no_chunk == 1) {
+- if (in_sg->length % 4)
++ if ((in_sg->length | in_sg->offset) & 3u)
+ no_chunk = 0;
+ in_sg = sg_next(in_sg);
+ }
+ while (out_sg && no_chunk == 1) {
+- if (out_sg->length % 4)
++ if ((out_sg->length | out_sg->offset) & 3u)
+ no_chunk = 0;
+ out_sg = sg_next(out_sg);
+ }
+@@ -206,28 +221,17 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+
+ spin_lock_irqsave(&ss->slock, flags);
+
+- for (i = 0; i < op->keylen; i += 4)
+- writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
++ for (i = 0; i < op->keylen / 4; i++)
++ writesl(ss->base + SS_KEY0 + i * 4, &op->key[i], 1);
+
+ if (areq->iv) {
+ for (i = 0; i < 4 && i < ivsize / 4; i++) {
+ v = *(u32 *)(areq->iv + i * 4);
+- writel(v, ss->base + SS_IV0 + i * 4);
++ writesl(ss->base + SS_IV0 + i * 4, &v, 1);
+ }
+ }
+ writel(mode, ss->base + SS_CTL);
+
+- sg_miter_start(&mi, areq->src, sg_nents(areq->src),
+- SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+- sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
+- SG_MITER_TO_SG | SG_MITER_ATOMIC);
+- sg_miter_next(&mi);
+- sg_miter_next(&mo);
+- if (!mi.addr || !mo.addr) {
+- dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
+- err = -EINVAL;
+- goto release_ss;
+- }
+ ileft = areq->cryptlen;
+ oleft = areq->cryptlen;
+ oi = 0;
+@@ -235,8 +239,16 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+
+ while (oleft) {
+ if (ileft) {
+- char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
+-
++ sg_miter_start(&mi, areq->src, sg_nents(areq->src),
++ SG_MITER_FROM_SG | SG_MITER_ATOMIC);
++ if (pi)
++ sg_miter_skip(&mi, pi);
++ miter_err = sg_miter_next(&mi);
++ if (!miter_err || !mi.addr) {
++ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
++ err = -EINVAL;
++ goto release_ss;
++ }
+ /*
+ * todo is the number of consecutive 4byte word that we
+ * can read from current SG
+@@ -258,52 +270,57 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ */
+ todo = min(rx_cnt * 4 - ob, ileft);
+ todo = min_t(size_t, todo, mi.length - oi);
+- memcpy(buf + ob, mi.addr + oi, todo);
++ memcpy(ss->buf + ob, mi.addr + oi, todo);
+ ileft -= todo;
+ oi += todo;
+ ob += todo;
+ if (!(ob % 4)) {
+- writesl(ss->base + SS_RXFIFO, buf,
++ writesl(ss->base + SS_RXFIFO, ss->buf,
+ ob / 4);
+ ob = 0;
+ }
+ }
+ if (oi == mi.length) {
+- sg_miter_next(&mi);
++ pi += mi.length;
+ oi = 0;
+ }
++ sg_miter_stop(&mi);
+ }
+
+ spaces = readl(ss->base + SS_FCSR);
+ rx_cnt = SS_RXFIFO_SPACES(spaces);
+ tx_cnt = SS_TXFIFO_SPACES(spaces);
+- dev_dbg(ss->dev,
+- "%x %u/%zu %u/%u cnt=%u %u/%zu %u/%u cnt=%u %u\n",
+- mode,
+- oi, mi.length, ileft, areq->cryptlen, rx_cnt,
+- oo, mo.length, oleft, areq->cryptlen, tx_cnt, ob);
+
+ if (!tx_cnt)
+ continue;
++ sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
++ SG_MITER_TO_SG | SG_MITER_ATOMIC);
++ if (po)
++ sg_miter_skip(&mo, po);
++ miter_err = sg_miter_next(&mo);
++ if (!miter_err || !mo.addr) {
++ dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
++ err = -EINVAL;
++ goto release_ss;
++ }
+ /* todo in 4bytes word */
+ todo = min(tx_cnt, oleft / 4);
+ todo = min_t(size_t, todo, (mo.length - oo) / 4);
++
+ if (todo) {
+ readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
+ oleft -= todo * 4;
+ oo += todo * 4;
+ if (oo == mo.length) {
+- sg_miter_next(&mo);
++ po += mo.length;
+ oo = 0;
+ }
+ } else {
+- char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
+-
+ /*
+ * read obl bytes in bufo, we read at maximum for
+ * emptying the device
+ */
+- readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
++ readsl(ss->base + SS_TXFIFO, ss->bufo, tx_cnt);
+ obl = tx_cnt * 4;
+ obo = 0;
+ do {
+@@ -315,17 +332,19 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ */
+ todo = min_t(size_t,
+ mo.length - oo, obl - obo);
+- memcpy(mo.addr + oo, bufo + obo, todo);
++ memcpy(mo.addr + oo, ss->bufo + obo, todo);
+ oleft -= todo;
+ obo += todo;
+ oo += todo;
+ if (oo == mo.length) {
++ po += mo.length;
+ sg_miter_next(&mo);
+ oo = 0;
+ }
+ } while (obo < obl);
+ /* bufo must be fully used here */
+ }
++ sg_miter_stop(&mo);
+ }
+ if (areq->iv) {
+ for (i = 0; i < 4 && i < ivsize / 4; i++) {
+@@ -335,8 +354,6 @@ static int sun4i_ss_cipher_poll(struct skcipher_request *areq)
+ }
+
+ release_ss:
+- sg_miter_stop(&mi);
+- sg_miter_stop(&mo);
+ writel(0, ss->base + SS_CTL);
+ spin_unlock_irqrestore(&ss->slock, flags);
+
+diff --git a/drivers/crypto/sunxi-ss/sun4i-ss.h b/drivers/crypto/sunxi-ss/sun4i-ss.h
+index 35a27a7145f84..9a2adc130d9aa 100644
+--- a/drivers/crypto/sunxi-ss/sun4i-ss.h
++++ b/drivers/crypto/sunxi-ss/sun4i-ss.h
+@@ -138,6 +138,8 @@ struct sun4i_ss_ctx {
+ struct reset_control *reset;
+ struct device *dev;
+ struct resource *res;
++ char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
++ char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
+ spinlock_t slock; /* control the use of the device */
+ #ifdef CONFIG_CRYPTO_DEV_SUN4I_SS_PRNG
+ u32 seed[SS_SEED_LEN / BITS_PER_LONG];
+diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
+index b7c66fc0ae0c2..8ef6e93e43f3c 100644
+--- a/drivers/crypto/talitos.c
++++ b/drivers/crypto/talitos.c
+@@ -1097,11 +1097,12 @@ static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
+ */
+ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
+ unsigned int offset, int datalen, int elen,
+- struct talitos_ptr *link_tbl_ptr)
++ struct talitos_ptr *link_tbl_ptr, int align)
+ {
+ int n_sg = elen ? sg_count + 1 : sg_count;
+ int count = 0;
+ int cryptlen = datalen + elen;
++ int padding = ALIGN(cryptlen, align) - cryptlen;
+
+ while (cryptlen && sg && n_sg--) {
+ unsigned int len = sg_dma_len(sg);
+@@ -1125,7 +1126,7 @@ static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
+ offset += datalen;
+ }
+ to_talitos_ptr(link_tbl_ptr + count,
+- sg_dma_address(sg) + offset, len, 0);
++ sg_dma_address(sg) + offset, sg_next(sg) ? len : len + padding, 0);
+ to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
+ count++;
+ cryptlen -= len;
+@@ -1148,10 +1149,11 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+ unsigned int len, struct talitos_edesc *edesc,
+ struct talitos_ptr *ptr, int sg_count,
+ unsigned int offset, int tbl_off, int elen,
+- bool force)
++ bool force, int align)
+ {
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
++ int aligned_len = ALIGN(len, align);
+
+ if (!src) {
+ to_talitos_ptr(ptr, 0, 0, is_sec1);
+@@ -1159,22 +1161,22 @@ static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
+ }
+ to_talitos_ptr_ext_set(ptr, elen, is_sec1);
+ if (sg_count == 1 && !force) {
+- to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
++ to_talitos_ptr(ptr, sg_dma_address(src) + offset, aligned_len, is_sec1);
+ return sg_count;
+ }
+ if (is_sec1) {
+- to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
++ to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, aligned_len, is_sec1);
+ return sg_count;
+ }
+ sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
+- &edesc->link_tbl[tbl_off]);
++ &edesc->link_tbl[tbl_off], align);
+ if (sg_count == 1 && !force) {
+ /* Only one segment now, so no link tbl needed*/
+ copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
+ return sg_count;
+ }
+ to_talitos_ptr(ptr, edesc->dma_link_tbl +
+- tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
++ tbl_off * sizeof(struct talitos_ptr), aligned_len, is_sec1);
+ to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
+
+ return sg_count;
+@@ -1186,7 +1188,7 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
+ unsigned int offset, int tbl_off)
+ {
+ return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
+- tbl_off, 0, false);
++ tbl_off, 0, false, 1);
+ }
+
+ /*
+@@ -1255,7 +1257,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+
+ ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
+ sg_count, areq->assoclen, tbl_off, elen,
+- false);
++ false, 1);
+
+ if (ret > 1) {
+ tbl_off += ret;
+@@ -1275,7 +1277,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
+ elen = 0;
+ ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
+ sg_count, areq->assoclen, tbl_off, elen,
+- is_ipsec_esp && !encrypt);
++ is_ipsec_esp && !encrypt, 1);
+ tbl_off += ret;
+
+ if (!encrypt && is_ipsec_esp) {
+@@ -1583,6 +1585,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
+ bool sync_needed = false;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ bool is_sec1 = has_ftr_sec1(priv);
++ bool is_ctr = (desc->hdr & DESC_HDR_SEL0_MASK) == DESC_HDR_SEL0_AESU &&
++ (desc->hdr & DESC_HDR_MODE0_AESU_MASK) == DESC_HDR_MODE0_AESU_CTR;
+
+ /* first DWORD empty */
+
+@@ -1603,8 +1607,8 @@ static int common_nonsnoop(struct talitos_edesc *edesc,
+ /*
+ * cipher in
+ */
+- sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
+- &desc->ptr[3], sg_count, 0, 0);
++ sg_count = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[3],
++ sg_count, 0, 0, 0, false, is_ctr ? 16 : 1);
+ if (sg_count > 1)
+ sync_needed = true;
+
+diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
+index 1469b956948ab..32825119e8805 100644
+--- a/drivers/crypto/talitos.h
++++ b/drivers/crypto/talitos.h
+@@ -344,6 +344,7 @@ static inline bool has_ftr_sec1(struct talitos_private *priv)
+
+ /* primary execution unit mode (MODE0) and derivatives */
+ #define DESC_HDR_MODE0_ENCRYPT cpu_to_be32(0x00100000)
++#define DESC_HDR_MODE0_AESU_MASK cpu_to_be32(0x00600000)
+ #define DESC_HDR_MODE0_AESU_CBC cpu_to_be32(0x00200000)
+ #define DESC_HDR_MODE0_AESU_CTR cpu_to_be32(0x00600000)
+ #define DESC_HDR_MODE0_DEU_CBC cpu_to_be32(0x00400000)
+diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
+index ad72b3f42ffa0..eae385a312b88 100644
+--- a/drivers/dma/fsldma.c
++++ b/drivers/dma/fsldma.c
+@@ -1214,6 +1214,7 @@ static int fsldma_of_probe(struct platform_device *op)
+ {
+ struct fsldma_device *fdev;
+ struct device_node *child;
++ unsigned int i;
+ int err;
+
+ fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
+@@ -1292,6 +1293,10 @@ static int fsldma_of_probe(struct platform_device *op)
+ return 0;
+
+ out_free_fdev:
++ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
++ if (fdev->chan[i])
++ fsl_dma_chan_remove(fdev->chan[i]);
++ }
+ irq_dispose_mapping(fdev->irq);
+ iounmap(fdev->regs);
+ out_free:
+@@ -1314,6 +1319,7 @@ static int fsldma_of_remove(struct platform_device *op)
+ if (fdev->chan[i])
+ fsl_dma_chan_remove(fdev->chan[i]);
+ }
++ irq_dispose_mapping(fdev->irq);
+
+ iounmap(fdev->regs);
+ kfree(fdev);
+diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c
+index 07cc7320a614f..9045a6f7f5893 100644
+--- a/drivers/dma/hsu/pci.c
++++ b/drivers/dma/hsu/pci.c
+@@ -26,22 +26,12 @@
+ static irqreturn_t hsu_pci_irq(int irq, void *dev)
+ {
+ struct hsu_dma_chip *chip = dev;
+- struct pci_dev *pdev = to_pci_dev(chip->dev);
+ u32 dmaisr;
+ u32 status;
+ unsigned short i;
+ int ret = 0;
+ int err;
+
+- /*
+- * On Intel Tangier B0 and Anniedale the interrupt line, disregarding
+- * to have different numbers, is shared between HSU DMA and UART IPs.
+- * Thus on such SoCs we are expecting that IRQ handler is called in
+- * UART driver only.
+- */
+- if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
+- return IRQ_HANDLED;
+-
+ dmaisr = readl(chip->regs + HSU_PCI_DMAISR);
+ for (i = 0; i < chip->hsu->nr_channels; i++) {
+ if (dmaisr & 0x1) {
+@@ -105,6 +95,17 @@ static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ if (ret)
+ goto err_register_irq;
+
++ /*
++ * On Intel Tangier B0 and Anniedale the interrupt line, disregarding
++ * to have different numbers, is shared between HSU DMA and UART IPs.
++ * Thus on such SoCs we are expecting that IRQ handler is called in
++ * UART driver only. Instead of handling the spurious interrupt
++ * from HSU DMA here and waste CPU time and delay HSU UART interrupt
++ * handling, disable the interrupt entirely.
++ */
++ if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA)
++ disable_irq_nosync(chip->irq);
++
+ pci_set_drvdata(pdev, chip);
+
+ return 0;
+diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c
+index af20e9a790a2a..bb9c361e224bc 100644
+--- a/drivers/dma/owl-dma.c
++++ b/drivers/dma/owl-dma.c
+@@ -1201,6 +1201,7 @@ static int owl_dma_remove(struct platform_device *pdev)
+ owl_dma_free(od);
+
+ clk_disable_unprepare(od->clk);
++ dma_pool_destroy(od->lli_pool);
+
+ return 0;
+ }
+diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
+index 14fb8f6a1ad29..abcc8c0136c4e 100644
+--- a/drivers/gpio/gpio-pcf857x.c
++++ b/drivers/gpio/gpio-pcf857x.c
+@@ -332,7 +332,7 @@ static int pcf857x_probe(struct i2c_client *client,
+ * reset state. Otherwise it flags pins to be driven low.
+ */
+ gpio->out = ~n_latch;
+- gpio->status = gpio->out;
++ gpio->status = gpio->read(gpio->client);
+
+ status = devm_gpiochip_add_data(&client->dev, &gpio->chip, gpio);
+ if (status < 0)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+index 3f744e72912f1..bcb7ab5c602d1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+@@ -870,7 +870,7 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
+ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
+ {
+ int ret;
+- long level;
++ unsigned long level;
+ char *sub_str = NULL;
+ char *tmp;
+ char buf_cpy[AMDGPU_MASK_BUF_MAX + 1];
+@@ -886,8 +886,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
+ while (tmp[0]) {
+ sub_str = strsep(&tmp, delimiter);
+ if (strlen(sub_str)) {
+- ret = kstrtol(sub_str, 0, &level);
+- if (ret)
++ ret = kstrtoul(sub_str, 0, &level);
++ if (ret || level > 31)
+ return -EINVAL;
+ *mask |= 1 << level;
+ } else
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+index 91899d28fa722..e8132210c244c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+@@ -21,7 +21,7 @@
+ *
+ */
+
+-#if !defined(_AMDGPU_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
++#if !defined(_AMDGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+ #define _AMDGPU_TRACE_H_
+
+ #include <linux/stringify.h>
+diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
+index 317aa257c06bb..41631271d64ca 100644
+--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
++++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
+@@ -276,6 +276,8 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
+ {
+ u32 reference_clock = adev->clock.spll.reference_freq;
+
++ if (adev->asic_type == CHIP_RENOIR)
++ return 10000;
+ if (adev->asic_type == CHIP_RAVEN)
+ return reference_clock / 4;
+
+diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+index 5815983caaf80..0d2e13627c647 100644
+--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
++++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+@@ -245,6 +245,23 @@ static enum bp_result encoder_control_digx_v3(
+ cntl->enable_dp_audio);
+ params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
++ switch (cntl->color_depth) {
++ case COLOR_DEPTH_888:
++ params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
++ break;
++ case COLOR_DEPTH_101010:
++ params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
++ break;
++ case COLOR_DEPTH_121212:
++ params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
++ break;
++ case COLOR_DEPTH_161616:
++ params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
++ break;
++ default:
++ break;
++ }
++
+ if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+ result = BP_RESULT_OK;
+
+@@ -274,6 +291,23 @@ static enum bp_result encoder_control_digx_v4(
+ cntl->enable_dp_audio));
+ params.ucLaneNum = (uint8_t)(cntl->lanes_number);
+
++ switch (cntl->color_depth) {
++ case COLOR_DEPTH_888:
++ params.ucBitPerColor = PANEL_8BIT_PER_COLOR;
++ break;
++ case COLOR_DEPTH_101010:
++ params.ucBitPerColor = PANEL_10BIT_PER_COLOR;
++ break;
++ case COLOR_DEPTH_121212:
++ params.ucBitPerColor = PANEL_12BIT_PER_COLOR;
++ break;
++ case COLOR_DEPTH_161616:
++ params.ucBitPerColor = PANEL_16BIT_PER_COLOR;
++ break;
++ default:
++ break;
++ }
++
+ if (EXEC_BIOS_CMD_TABLE(DIGxEncoderControl, params))
+ result = BP_RESULT_OK;
+
+@@ -1057,6 +1091,19 @@ static enum bp_result set_pixel_clock_v5(
+ * driver choose program it itself, i.e. here we program it
+ * to 888 by default.
+ */
++ if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
++ switch (bp_params->color_depth) {
++ case TRANSMITTER_COLOR_DEPTH_30:
++ /* yes this is correct, the atom define is wrong */
++ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_32BPP;
++ break;
++ case TRANSMITTER_COLOR_DEPTH_36:
++ /* yes this is correct, the atom define is wrong */
++ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V5_MISC_HDMI_30BPP;
++ break;
++ default:
++ break;
++ }
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+ result = BP_RESULT_OK;
+@@ -1135,6 +1182,20 @@ static enum bp_result set_pixel_clock_v6(
+ * driver choose program it itself, i.e. here we pass required
+ * target rate that includes deep color.
+ */
++ if (bp_params->signal_type == SIGNAL_TYPE_HDMI_TYPE_A)
++ switch (bp_params->color_depth) {
++ case TRANSMITTER_COLOR_DEPTH_30:
++ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_30BPP_V6;
++ break;
++ case TRANSMITTER_COLOR_DEPTH_36:
++ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_36BPP_V6;
++ break;
++ case TRANSMITTER_COLOR_DEPTH_48:
++ clk.sPCLKInput.ucMiscInfo |= PIXEL_CLOCK_V6_MISC_HDMI_48BPP;
++ break;
++ default:
++ break;
++ }
+
+ if (EXEC_BIOS_CMD_TABLE(SetPixelClock, clk))
+ result = BP_RESULT_OK;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+index f787a6b947812..eca67d5d5b10d 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+@@ -871,6 +871,20 @@ static bool dce110_program_pix_clk(
+ bp_pc_params.flags.SET_EXTERNAL_REF_DIV_SRC =
+ pll_settings->use_external_clk;
+
++ switch (pix_clk_params->color_depth) {
++ case COLOR_DEPTH_101010:
++ bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_30;
++ break;
++ case COLOR_DEPTH_121212:
++ bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_36;
++ break;
++ case COLOR_DEPTH_161616:
++ bp_pc_params.color_depth = TRANSMITTER_COLOR_DEPTH_48;
++ break;
++ default:
++ break;
++ }
++
+ if (clk_src->bios->funcs->set_pixel_clock(
+ clk_src->bios, &bp_pc_params) != BP_RESULT_OK)
+ return false;
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+index 6ed922a3c1cd5..c25840a774f94 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+@@ -563,6 +563,7 @@ static void dce110_stream_encoder_hdmi_set_stream_attribute(
+ cntl.enable_dp_audio = enable_audio;
+ cntl.pixel_clock = actual_pix_clk_khz;
+ cntl.lanes_number = LANE_COUNT_FOUR;
++ cntl.color_depth = crtc_timing->display_color_depth;
+
+ if (enc110->base.bp->funcs->encoder_control(
+ enc110->base.bp, &cntl) != BP_RESULT_OK)
+diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+index ab63d0d0304cb..6fd57cfb112f5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
++++ b/drivers/gpu/drm/amd/display/dc/dce/dce_transform.c
+@@ -429,12 +429,12 @@ static void set_clamp(
+ clamp_max = 0x3FC0;
+ break;
+ case COLOR_DEPTH_101010:
+- /* 10bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
+- clamp_max = 0x3FFC;
++ /* 10bit MSB aligned on 14 bit bus '11 1111 1111 0000' */
++ clamp_max = 0x3FF0;
+ break;
+ case COLOR_DEPTH_121212:
+- /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1111' */
+- clamp_max = 0x3FFF;
++ /* 12bit MSB aligned on 14 bit bus '11 1111 1111 1100' */
++ clamp_max = 0x3FFC;
+ break;
+ default:
+ clamp_max = 0x3FC0;
+diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+index cbe7818529bbf..623455cd75203 100644
+--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
++++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+@@ -168,6 +168,11 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .ack = NULL
+ };
+
++static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
++ .set = NULL,
++ .ack = NULL
++};
++
+ #undef BASE_INNER
+ #define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
+
+@@ -230,6 +235,17 @@ static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .funcs = &vblank_irq_info_funcs\
+ }
+
++/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
++ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
++ */
++#define vupdate_no_lock_int_entry(reg_num)\
++ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
++ IRQ_REG_ENTRY(OTG, reg_num,\
++ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
++ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
++ .funcs = &vupdate_no_lock_irq_info_funcs\
++ }
++
+ #define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+@@ -338,6 +354,12 @@ irq_source_info_dcn21[DAL_IRQ_SOURCES_NUMBER] = {
+ vupdate_int_entry(3),
+ vupdate_int_entry(4),
+ vupdate_int_entry(5),
++ vupdate_no_lock_int_entry(0),
++ vupdate_no_lock_int_entry(1),
++ vupdate_no_lock_int_entry(2),
++ vupdate_no_lock_int_entry(3),
++ vupdate_no_lock_int_entry(4),
++ vupdate_no_lock_int_entry(5),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ vblank_int_entry(2),
+diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
+index 6b8502bcf0fd3..02ffde5fd7226 100644
+--- a/drivers/gpu/drm/drm_fb_helper.c
++++ b/drivers/gpu/drm/drm_fb_helper.c
+@@ -965,11 +965,15 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
+ drm_modeset_lock_all(fb_helper->dev);
+ drm_client_for_each_modeset(modeset, &fb_helper->client) {
+ crtc = modeset->crtc;
+- if (!crtc->funcs->gamma_set || !crtc->gamma_size)
+- return -EINVAL;
++ if (!crtc->funcs->gamma_set || !crtc->gamma_size) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+- if (cmap->start + cmap->len > crtc->gamma_size)
+- return -EINVAL;
++ if (cmap->start + cmap->len > crtc->gamma_size) {
++ ret = -EINVAL;
++ goto out;
++ }
+
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+@@ -982,8 +986,9 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info)
+ ret = crtc->funcs->gamma_set(crtc, r, g, b,
+ crtc->gamma_size, NULL);
+ if (ret)
+- return ret;
++ goto out;
+ }
++out:
+ drm_modeset_unlock_all(fb_helper->dev);
+
+ return ret;
+diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+index e281070611480..fc9a34ed58bd1 100644
+--- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
++++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
+@@ -279,11 +279,8 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
+ hdmi_dev = pci_get_drvdata(dev);
+
+ i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL);
+- if (i2c_dev == NULL) {
+- DRM_ERROR("Can't allocate interface\n");
+- ret = -ENOMEM;
+- goto exit;
+- }
++ if (!i2c_dev)
++ return -ENOMEM;
+
+ i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
+ i2c_dev->status = I2C_STAT_INIT;
+@@ -300,16 +297,23 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
+ oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
+ if (ret) {
+ DRM_ERROR("Failed to request IRQ for I2C controller\n");
+- goto err;
++ goto free_dev;
+ }
+
+ /* Adapter registration */
+ ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
+- return ret;
++ if (ret) {
++ DRM_ERROR("Failed to add I2C adapter\n");
++ goto free_irq;
++ }
+
+-err:
++ return 0;
++
++free_irq:
++ free_irq(dev->irq, hdmi_dev);
++free_dev:
+ kfree(i2c_dev);
+-exit:
++
+ return ret;
+ }
+
+diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
+index 7005f8f69c683..d414525eccf6d 100644
+--- a/drivers/gpu/drm/gma500/psb_drv.c
++++ b/drivers/gpu/drm/gma500/psb_drv.c
+@@ -313,6 +313,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
+ if (ret)
+ goto out_err;
+
++ ret = -ENOMEM;
++
+ dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0);
+ if (!dev_priv->mmu)
+ goto out_err;
+diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
+index b030f7ae33029..4cf95e8031e38 100644
+--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
+@@ -2129,7 +2129,11 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
+ if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits, force_dvi))
+ return MODE_CLOCK_HIGH;
+
+- /* BXT DPLL can't generate 223-240 MHz */
++ /* GLK DPLL can't generate 446-480 MHz */
++ if (IS_GEMINILAKE(dev_priv) && clock > 446666 && clock < 480000)
++ return MODE_CLOCK_RANGE;
++
++ /* BXT/GLK DPLL can't generate 223-240 MHz */
+ if (IS_GEN9_LP(dev_priv) && clock > 223333 && clock < 240000)
+ return MODE_CLOCK_RANGE;
+
+diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+index 03c6d6157e4d0..395146884a222 100644
+--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
++++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+@@ -1099,7 +1099,7 @@ static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus)
+ struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc,
+ pp_done);
+
+- complete(&mdp5_crtc->pp_completion);
++ complete_all(&mdp5_crtc->pp_completion);
+ }
+
+ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
+diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+index 1afb7c579dbbb..eca86bf448f74 100644
+--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
++++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+@@ -139,7 +139,7 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
+ .disable = dsi_20nm_phy_disable,
+ .init = msm_dsi_phy_init_common,
+ },
+- .io_start = { 0xfd998300, 0xfd9a0300 },
++ .io_start = { 0xfd998500, 0xfd9a0500 },
+ .num_dsi_phy = 2,
+ };
+
+diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+index f5f59261ea819..d1beaad0c82b6 100644
+--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
++++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/conn.h
+@@ -14,6 +14,7 @@ enum dcb_connector_type {
+ DCB_CONNECTOR_LVDS_SPWG = 0x41,
+ DCB_CONNECTOR_DP = 0x46,
+ DCB_CONNECTOR_eDP = 0x47,
++ DCB_CONNECTOR_mDP = 0x48,
+ DCB_CONNECTOR_HDMI_0 = 0x60,
+ DCB_CONNECTOR_HDMI_1 = 0x61,
+ DCB_CONNECTOR_HDMI_C = 0x63,
+diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
+index 282fd90b65e13..9ce7b0d4b8764 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
++++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
+@@ -497,6 +497,7 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
+ if (ret) {
+ NV_PRINTK(err, cli, "channel failed to initialise, %d\n", ret);
+ nouveau_channel_del(pchan);
++ goto done;
+ }
+
+ ret = nouveau_svmm_join((*pchan)->vmm->svmm, (*pchan)->inst);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 0994aee7671ad..496e7dcd6b7dc 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -1240,6 +1240,7 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
+ case DCB_CONNECTOR_DMS59_DP0:
+ case DCB_CONNECTOR_DMS59_DP1:
+ case DCB_CONNECTOR_DP :
++ case DCB_CONNECTOR_mDP :
+ case DCB_CONNECTOR_USB_C : return DRM_MODE_CONNECTOR_DisplayPort;
+ case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
+ case DCB_CONNECTOR_HDMI_0 :
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index 134e9106ebac1..37679507f9432 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -851,6 +851,9 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
+ if (sched->thread)
+ kthread_stop(sched->thread);
+
++ /* Confirm no work left behind accessing device structures */
++ cancel_delayed_work_sync(&sched->work_tdr);
++
+ sched->ready = false;
+ }
+ EXPORT_SYMBOL(drm_sched_fini);
+diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
+index 6bf1425e8b0ca..eb3b2350687fb 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
+@@ -545,30 +545,13 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
+ if (info->bus_flags & DRM_BUS_FLAG_DE_LOW)
+ val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE;
+
+- /*
+- * On A20 and similar SoCs, the only way to achieve Positive Edge
+- * (Rising Edge), is setting dclk clock phase to 2/3(240°).
+- * By default TCON works in Negative Edge(Falling Edge),
+- * this is why phase is set to 0 in that case.
+- * Unfortunately there's no way to logically invert dclk through
+- * IO_POL register.
+- * The only acceptable way to work, triple checked with scope,
+- * is using clock phase set to 0° for Negative Edge and set to 240°
+- * for Positive Edge.
+- * On A33 and similar SoCs there would be a 90° phase option,
+- * but it divides also dclk by 2.
+- * Following code is a way to avoid quirks all around TCON
+- * and DOTCLOCK drivers.
+- */
+- if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE)
+- clk_set_phase(tcon->dclk, 240);
+-
+ if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+- clk_set_phase(tcon->dclk, 0);
++ val |= SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE;
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
+ SUN4I_TCON0_IO_POL_HSYNC_POSITIVE |
+ SUN4I_TCON0_IO_POL_VSYNC_POSITIVE |
++ SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE |
+ SUN4I_TCON0_IO_POL_DE_NEGATIVE,
+ val);
+
+diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
+index 5bdbaf0847824..ce500c8dd4c72 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
++++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
+@@ -113,6 +113,7 @@
+ #define SUN4I_TCON0_IO_POL_REG 0x88
+ #define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28)
+ #define SUN4I_TCON0_IO_POL_DE_NEGATIVE BIT(27)
++#define SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE BIT(26)
+ #define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25)
+ #define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE BIT(24)
+
+diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
+index 263eca119ff0f..8d202011b2db5 100644
+--- a/drivers/hid/hid-core.c
++++ b/drivers/hid/hid-core.c
+@@ -1300,6 +1300,9 @@ EXPORT_SYMBOL_GPL(hid_open_report);
+
+ static s32 snto32(__u32 value, unsigned n)
+ {
++ if (!value || !n)
++ return 0;
++
+ switch (n) {
+ case 8: return ((__s8)value);
+ case 16: return ((__s16)value);
+diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
+index e5550a5bf49d0..86001cfbdb6f1 100644
+--- a/drivers/hid/hid-logitech-dj.c
++++ b/drivers/hid/hid-logitech-dj.c
+@@ -980,6 +980,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
+ case 0x07:
+ device_type = "eQUAD step 4 Gaming";
+ logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
++ workitem.reports_supported |= STD_KEYBOARD;
+ break;
+ case 0x08:
+ device_type = "eQUAD step 4 for gamepads";
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index b74acbd5997b5..f1928c1ac139c 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -2600,7 +2600,12 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
+ wacom_wac->is_invalid_bt_frame = !value;
+ return;
+ case HID_DG_CONTACTMAX:
+- features->touch_max = value;
++ if (!features->touch_max) {
++ features->touch_max = value;
++ } else {
++ hid_warn(hdev, "%s: ignoring attempt to overwrite non-zero touch_max "
++ "%d -> %d\n", __func__, features->touch_max, value);
++ }
+ return;
+ }
+
+diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
+index 2be9c01e175ca..f36036be7f032 100644
+--- a/drivers/hsi/controllers/omap_ssi_core.c
++++ b/drivers/hsi/controllers/omap_ssi_core.c
+@@ -424,7 +424,7 @@ static int ssi_hw_init(struct hsi_controller *ssi)
+ struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
+ int err;
+
+- err = pm_runtime_get_sync(ssi->device.parent);
++ err = pm_runtime_resume_and_get(ssi->device.parent);
+ if (err < 0) {
+ dev_err(&ssi->device, "runtime PM failed %d\n", err);
+ return err;
+diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
+index 452307c79e4b9..0b55bc146b292 100644
+--- a/drivers/hv/channel_mgmt.c
++++ b/drivers/hv/channel_mgmt.c
+@@ -1101,8 +1101,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
+ vmbus_device_unregister(channel->device_obj);
+ put_device(dev);
+ }
+- }
+- if (channel->primary_channel != NULL) {
++ } else if (channel->primary_channel != NULL) {
+ /*
+ * Sub-channel is being rescinded. Following is the channel
+ * close sequence when initiated from the driveri (refer to
+diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
+index dd9661c11782a..70cd9fc7fb869 100644
+--- a/drivers/i2c/busses/i2c-bcm-iproc.c
++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
+@@ -157,6 +157,11 @@
+
+ #define IE_S_ALL_INTERRUPT_SHIFT 21
+ #define IE_S_ALL_INTERRUPT_MASK 0x3f
++/*
++ * It takes ~18us to reading 10bytes of data, hence to keep tasklet
++ * running for less time, max slave read per tasklet is set to 10 bytes.
++ */
++#define MAX_SLAVE_RX_PER_INT 10
+
+ enum i2c_slave_read_status {
+ I2C_SLAVE_RX_FIFO_EMPTY = 0,
+@@ -203,8 +208,18 @@ struct bcm_iproc_i2c_dev {
+ /* bytes that have been read */
+ unsigned int rx_bytes;
+ unsigned int thld_bytes;
++
++ bool slave_rx_only;
++ bool rx_start_rcvd;
++ bool slave_read_complete;
++ u32 tx_underrun;
++ u32 slave_int_mask;
++ struct tasklet_struct slave_rx_tasklet;
+ };
+
++/* tasklet to process slave rx data */
++static void slave_rx_tasklet_fn(unsigned long);
++
+ /*
+ * Can be expanded in the future if more interrupt status bits are utilized
+ */
+@@ -213,7 +228,8 @@ struct bcm_iproc_i2c_dev {
+
+ #define ISR_MASK_SLAVE (BIT(IS_S_START_BUSY_SHIFT)\
+ | BIT(IS_S_RX_EVENT_SHIFT) | BIT(IS_S_RD_EVENT_SHIFT)\
+- | BIT(IS_S_TX_UNDERRUN_SHIFT))
++ | BIT(IS_S_TX_UNDERRUN_SHIFT) | BIT(IS_S_RX_FIFO_FULL_SHIFT)\
++ | BIT(IS_S_RX_THLD_SHIFT))
+
+ static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave);
+ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave);
+@@ -257,6 +273,7 @@ static void bcm_iproc_i2c_slave_init(
+ {
+ u32 val;
+
++ iproc_i2c->tx_underrun = 0;
+ if (need_reset) {
+ /* put controller in reset */
+ val = iproc_i2c_rd_reg(iproc_i2c, CFG_OFFSET);
+@@ -293,8 +310,11 @@ static void bcm_iproc_i2c_slave_init(
+
+ /* Enable interrupt register to indicate a valid byte in receive fifo */
+ val = BIT(IE_S_RX_EVENT_SHIFT);
++ /* Enable interrupt register to indicate a Master read transaction */
++ val |= BIT(IE_S_RD_EVENT_SHIFT);
+ /* Enable interrupt register for the Slave BUSY command */
+ val |= BIT(IE_S_START_BUSY_SHIFT);
++ iproc_i2c->slave_int_mask = val;
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+ }
+
+@@ -319,76 +339,176 @@ static void bcm_iproc_i2c_check_slave_status(
+ }
+ }
+
+-static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
+- u32 status)
++static void bcm_iproc_i2c_slave_read(struct bcm_iproc_i2c_dev *iproc_i2c)
+ {
++ u8 rx_data, rx_status;
++ u32 rx_bytes = 0;
+ u32 val;
+- u8 value, rx_status;
+
+- /* Slave RX byte receive */
+- if (status & BIT(IS_S_RX_EVENT_SHIFT)) {
++ while (rx_bytes < MAX_SLAVE_RX_PER_INT) {
+ val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET);
+ rx_status = (val >> S_RX_STATUS_SHIFT) & S_RX_STATUS_MASK;
++ rx_data = ((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
++
+ if (rx_status == I2C_SLAVE_RX_START) {
+- /* Start of SMBUS for Master write */
++ /* Start of SMBUS Master write */
+ i2c_slave_event(iproc_i2c->slave,
+- I2C_SLAVE_WRITE_REQUESTED, &value);
+-
+- val = iproc_i2c_rd_reg(iproc_i2c, S_RX_OFFSET);
+- value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
++ I2C_SLAVE_WRITE_REQUESTED, &rx_data);
++ iproc_i2c->rx_start_rcvd = true;
++ iproc_i2c->slave_read_complete = false;
++ } else if (rx_status == I2C_SLAVE_RX_DATA &&
++ iproc_i2c->rx_start_rcvd) {
++ /* Middle of SMBUS Master write */
+ i2c_slave_event(iproc_i2c->slave,
+- I2C_SLAVE_WRITE_RECEIVED, &value);
+- } else if (status & BIT(IS_S_RD_EVENT_SHIFT)) {
+- /* Start of SMBUS for Master Read */
+- i2c_slave_event(iproc_i2c->slave,
+- I2C_SLAVE_READ_REQUESTED, &value);
+- iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value);
++ I2C_SLAVE_WRITE_RECEIVED, &rx_data);
++ } else if (rx_status == I2C_SLAVE_RX_END &&
++ iproc_i2c->rx_start_rcvd) {
++ /* End of SMBUS Master write */
++ if (iproc_i2c->slave_rx_only)
++ i2c_slave_event(iproc_i2c->slave,
++ I2C_SLAVE_WRITE_RECEIVED,
++ &rx_data);
++
++ i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP,
++ &rx_data);
++ } else if (rx_status == I2C_SLAVE_RX_FIFO_EMPTY) {
++ iproc_i2c->rx_start_rcvd = false;
++ iproc_i2c->slave_read_complete = true;
++ break;
++ }
+
+- val = BIT(S_CMD_START_BUSY_SHIFT);
+- iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
++ rx_bytes++;
++ }
++}
+
+- /*
+- * Enable interrupt for TX FIFO becomes empty and
+- * less than PKT_LENGTH bytes were output on the SMBUS
+- */
+- val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+- val |= BIT(IE_S_TX_UNDERRUN_SHIFT);
+- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
+- } else {
+- /* Master write other than start */
+- value = (u8)((val >> S_RX_DATA_SHIFT) & S_RX_DATA_MASK);
++static void slave_rx_tasklet_fn(unsigned long data)
++{
++ struct bcm_iproc_i2c_dev *iproc_i2c = (struct bcm_iproc_i2c_dev *)data;
++ u32 int_clr;
++
++ bcm_iproc_i2c_slave_read(iproc_i2c);
++
++ /* clear pending IS_S_RX_EVENT_SHIFT interrupt */
++ int_clr = BIT(IS_S_RX_EVENT_SHIFT);
++
++ if (!iproc_i2c->slave_rx_only && iproc_i2c->slave_read_complete) {
++ /*
++ * In case of single byte master-read request,
++ * IS_S_TX_UNDERRUN_SHIFT event is generated before
++ * IS_S_START_BUSY_SHIFT event. Hence start slave data send
++ * from first IS_S_TX_UNDERRUN_SHIFT event.
++ *
++ * This means don't send any data from slave when
++ * IS_S_RD_EVENT_SHIFT event is generated else it will increment
++ * eeprom or other backend slave driver read pointer twice.
++ */
++ iproc_i2c->tx_underrun = 0;
++ iproc_i2c->slave_int_mask |= BIT(IE_S_TX_UNDERRUN_SHIFT);
++
++ /* clear IS_S_RD_EVENT_SHIFT interrupt */
++ int_clr |= BIT(IS_S_RD_EVENT_SHIFT);
++ }
++
++ /* clear slave interrupt */
++ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, int_clr);
++ /* enable slave interrupts */
++ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, iproc_i2c->slave_int_mask);
++}
++
++static bool bcm_iproc_i2c_slave_isr(struct bcm_iproc_i2c_dev *iproc_i2c,
++ u32 status)
++{
++ u32 val;
++ u8 value;
++
++ /*
++ * Slave events in case of master-write, master-write-read and,
++ * master-read
++ *
++ * Master-write : only IS_S_RX_EVENT_SHIFT event
++ * Master-write-read: both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
++ * events
++ * Master-read : both IS_S_RX_EVENT_SHIFT and IS_S_RD_EVENT_SHIFT
++ * events or only IS_S_RD_EVENT_SHIFT
++ */
++ if (status & BIT(IS_S_RX_EVENT_SHIFT) ||
++ status & BIT(IS_S_RD_EVENT_SHIFT)) {
++ /* disable slave interrupts */
++ val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
++ val &= ~iproc_i2c->slave_int_mask;
++ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
++
++ if (status & BIT(IS_S_RD_EVENT_SHIFT))
++ /* Master-write-read request */
++ iproc_i2c->slave_rx_only = false;
++ else
++ /* Master-write request only */
++ iproc_i2c->slave_rx_only = true;
++
++ /* schedule tasklet to read data later */
++ tasklet_schedule(&iproc_i2c->slave_rx_tasklet);
++
++ /* clear only IS_S_RX_EVENT_SHIFT interrupt */
++ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET,
++ BIT(IS_S_RX_EVENT_SHIFT));
++ }
++
++ if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
++ iproc_i2c->tx_underrun++;
++ if (iproc_i2c->tx_underrun == 1)
++ /* Start of SMBUS for Master Read */
+ i2c_slave_event(iproc_i2c->slave,
+- I2C_SLAVE_WRITE_RECEIVED, &value);
+- if (rx_status == I2C_SLAVE_RX_END)
+- i2c_slave_event(iproc_i2c->slave,
+- I2C_SLAVE_STOP, &value);
+- }
+- } else if (status & BIT(IS_S_TX_UNDERRUN_SHIFT)) {
+- /* Master read other than start */
+- i2c_slave_event(iproc_i2c->slave,
+- I2C_SLAVE_READ_PROCESSED, &value);
++ I2C_SLAVE_READ_REQUESTED,
++ &value);
++ else
++ /* Master read other than start */
++ i2c_slave_event(iproc_i2c->slave,
++ I2C_SLAVE_READ_PROCESSED,
++ &value);
+
+ iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, value);
++ /* start transfer */
+ val = BIT(S_CMD_START_BUSY_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
++
++ /* clear interrupt */
++ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET,
++ BIT(IS_S_TX_UNDERRUN_SHIFT));
+ }
+
+- /* Stop */
++ /* Stop received from master in case of master read transaction */
+ if (status & BIT(IS_S_START_BUSY_SHIFT)) {
+- i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value);
+ /*
+ * Enable interrupt for TX FIFO becomes empty and
+ * less than PKT_LENGTH bytes were output on the SMBUS
+ */
+- val = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET);
+- val &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
+- iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, val);
++ iproc_i2c->slave_int_mask &= ~BIT(IE_S_TX_UNDERRUN_SHIFT);
++ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET,
++ iproc_i2c->slave_int_mask);
++
++ /* End of SMBUS for Master Read */
++ val = BIT(S_TX_WR_STATUS_SHIFT);
++ iproc_i2c_wr_reg(iproc_i2c, S_TX_OFFSET, val);
++
++ val = BIT(S_CMD_START_BUSY_SHIFT);
++ iproc_i2c_wr_reg(iproc_i2c, S_CMD_OFFSET, val);
++
++ /* flush TX FIFOs */
++ val = iproc_i2c_rd_reg(iproc_i2c, S_FIFO_CTRL_OFFSET);
++ val |= (BIT(S_FIFO_TX_FLUSH_SHIFT));
++ iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, val);
++
++ i2c_slave_event(iproc_i2c->slave, I2C_SLAVE_STOP, &value);
++
++ /* clear interrupt */
++ iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET,
++ BIT(IS_S_START_BUSY_SHIFT));
+ }
+
+- /* clear interrupt status */
+- iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, status);
++ /* check slave transmit status only if slave is transmitting */
++ if (!iproc_i2c->slave_rx_only)
++ bcm_iproc_i2c_check_slave_status(iproc_i2c);
+
+- bcm_iproc_i2c_check_slave_status(iproc_i2c);
+ return true;
+ }
+
+@@ -503,12 +623,17 @@ static void bcm_iproc_i2c_process_m_event(struct bcm_iproc_i2c_dev *iproc_i2c,
+ static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
+ {
+ struct bcm_iproc_i2c_dev *iproc_i2c = data;
+- u32 status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET);
++ u32 slave_status;
++ u32 status;
+ bool ret;
+- u32 sl_status = status & ISR_MASK_SLAVE;
+
+- if (sl_status) {
+- ret = bcm_iproc_i2c_slave_isr(iproc_i2c, sl_status);
++ status = iproc_i2c_rd_reg(iproc_i2c, IS_OFFSET);
++ /* process only slave interrupt which are enabled */
++ slave_status = status & iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET) &
++ ISR_MASK_SLAVE;
++
++ if (slave_status) {
++ ret = bcm_iproc_i2c_slave_isr(iproc_i2c, slave_status);
+ if (ret)
+ return IRQ_HANDLED;
+ else
+@@ -1025,6 +1150,10 @@ static int bcm_iproc_i2c_reg_slave(struct i2c_client *slave)
+ return -EAFNOSUPPORT;
+
+ iproc_i2c->slave = slave;
++
++ tasklet_init(&iproc_i2c->slave_rx_tasklet, slave_rx_tasklet_fn,
++ (unsigned long)iproc_i2c);
++
+ bcm_iproc_i2c_slave_init(iproc_i2c, false);
+ return 0;
+ }
+@@ -1045,6 +1174,8 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave)
+ IE_S_ALL_INTERRUPT_SHIFT);
+ iproc_i2c_wr_reg(iproc_i2c, IE_OFFSET, tmp);
+
++ tasklet_kill(&iproc_i2c->slave_rx_tasklet);
++
+ /* Erase the slave address programmed */
+ tmp = iproc_i2c_rd_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET);
+ tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT);
+diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
+index 506991596b68d..5e89cd6b690ce 100644
+--- a/drivers/i2c/busses/i2c-brcmstb.c
++++ b/drivers/i2c/busses/i2c-brcmstb.c
+@@ -316,7 +316,7 @@ static int brcmstb_send_i2c_cmd(struct brcmstb_i2c_dev *dev,
+ goto cmd_out;
+ }
+
+- if ((CMD_RD || CMD_WR) &&
++ if ((cmd == CMD_RD || cmd == CMD_WR) &&
+ bsc_readl(dev, iic_enable) & BSC_IIC_EN_NOACK_MASK) {
+ rc = -EREMOTEIO;
+ dev_dbg(dev->device, "controller received NOACK intr for %s\n",
+diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
+index 17abf60c94aeb..b56a427fb928f 100644
+--- a/drivers/i2c/busses/i2c-qcom-geni.c
++++ b/drivers/i2c/busses/i2c-qcom-geni.c
+@@ -87,6 +87,9 @@ struct geni_i2c_dev {
+ u32 clk_freq_out;
+ const struct geni_i2c_clk_fld *clk_fld;
+ int suspended;
++ void *dma_buf;
++ size_t xfer_len;
++ dma_addr_t dma_addr;
+ };
+
+ struct geni_i2c_err_log {
+@@ -350,14 +353,39 @@ static void geni_i2c_tx_fsm_rst(struct geni_i2c_dev *gi2c)
+ dev_err(gi2c->se.dev, "Timeout resetting TX_FSM\n");
+ }
+
++static void geni_i2c_rx_msg_cleanup(struct geni_i2c_dev *gi2c,
++ struct i2c_msg *cur)
++{
++ gi2c->cur_rd = 0;
++ if (gi2c->dma_buf) {
++ if (gi2c->err)
++ geni_i2c_rx_fsm_rst(gi2c);
++ geni_se_rx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
++ i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
++ }
++}
++
++static void geni_i2c_tx_msg_cleanup(struct geni_i2c_dev *gi2c,
++ struct i2c_msg *cur)
++{
++ gi2c->cur_wr = 0;
++ if (gi2c->dma_buf) {
++ if (gi2c->err)
++ geni_i2c_tx_fsm_rst(gi2c);
++ geni_se_tx_dma_unprep(&gi2c->se, gi2c->dma_addr, gi2c->xfer_len);
++ i2c_put_dma_safe_msg_buf(gi2c->dma_buf, cur, !gi2c->err);
++ }
++}
++
+ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ u32 m_param)
+ {
+- dma_addr_t rx_dma;
++ dma_addr_t rx_dma = 0;
+ unsigned long time_left;
+ void *dma_buf = NULL;
+ struct geni_se *se = &gi2c->se;
+ size_t len = msg->len;
++ struct i2c_msg *cur;
+
+ if (!of_machine_is_compatible("lenovo,yoga-c630"))
+ dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+@@ -374,19 +402,18 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ geni_se_select_mode(se, GENI_SE_FIFO);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
+ dma_buf = NULL;
++ } else {
++ gi2c->xfer_len = len;
++ gi2c->dma_addr = rx_dma;
++ gi2c->dma_buf = dma_buf;
+ }
+
++ cur = gi2c->cur;
+ time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
+ if (!time_left)
+ geni_i2c_abort_xfer(gi2c);
+
+- gi2c->cur_rd = 0;
+- if (dma_buf) {
+- if (gi2c->err)
+- geni_i2c_rx_fsm_rst(gi2c);
+- geni_se_rx_dma_unprep(se, rx_dma, len);
+- i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
+- }
++ geni_i2c_rx_msg_cleanup(gi2c, cur);
+
+ return gi2c->err;
+ }
+@@ -394,11 +421,12 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ u32 m_param)
+ {
+- dma_addr_t tx_dma;
++ dma_addr_t tx_dma = 0;
+ unsigned long time_left;
+ void *dma_buf = NULL;
+ struct geni_se *se = &gi2c->se;
+ size_t len = msg->len;
++ struct i2c_msg *cur;
+
+ if (!of_machine_is_compatible("lenovo,yoga-c630"))
+ dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
+@@ -415,22 +443,21 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
+ geni_se_select_mode(se, GENI_SE_FIFO);
+ i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
+ dma_buf = NULL;
++ } else {
++ gi2c->xfer_len = len;
++ gi2c->dma_addr = tx_dma;
++ gi2c->dma_buf = dma_buf;
+ }
+
+ if (!dma_buf) /* Get FIFO IRQ */
+ writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);
+
++ cur = gi2c->cur;
+ time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
+ if (!time_left)
+ geni_i2c_abort_xfer(gi2c);
+
+- gi2c->cur_wr = 0;
+- if (dma_buf) {
+- if (gi2c->err)
+- geni_i2c_tx_fsm_rst(gi2c);
+- geni_se_tx_dma_unprep(se, tx_dma, len);
+- i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
+- }
++ geni_i2c_tx_msg_cleanup(gi2c, cur);
+
+ return gi2c->err;
+ }
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index fd7c84721b0de..c933c1c7ddd8e 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -4336,7 +4336,7 @@ static void cm_add_one(struct ib_device *ib_device)
+ unsigned long flags;
+ int ret;
+ int count = 0;
+- u8 i;
++ unsigned int i;
+
+ cm_dev = kzalloc(struct_size(cm_dev, port, ib_device->phys_port_cnt),
+ GFP_KERNEL);
+@@ -4348,7 +4348,7 @@ static void cm_add_one(struct ib_device *ib_device)
+ cm_dev->going_down = 0;
+
+ set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
+- for (i = 1; i <= ib_device->phys_port_cnt; i++) {
++ rdma_for_each_port (ib_device, i) {
+ if (!rdma_cap_ib_cm(ib_device, i))
+ continue;
+
+@@ -4427,7 +4427,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
+ .clr_port_cap_mask = IB_PORT_CM_SUP
+ };
+ unsigned long flags;
+- int i;
++ unsigned int i;
+
+ if (!cm_dev)
+ return;
+@@ -4440,7 +4440,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
+ cm_dev->going_down = 1;
+ spin_unlock_irq(&cm.lock);
+
+- for (i = 1; i <= ib_device->phys_port_cnt; i++) {
++ rdma_for_each_port (ib_device, i) {
+ if (!rdma_cap_ib_cm(ib_device, i))
+ continue;
+
+diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
+index da229eab59032..ad3a092b8b5c3 100644
+--- a/drivers/infiniband/core/user_mad.c
++++ b/drivers/infiniband/core/user_mad.c
+@@ -379,6 +379,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+
+ mutex_lock(&file->mutex);
+
++ if (file->agents_dead) {
++ mutex_unlock(&file->mutex);
++ return -EIO;
++ }
++
+ while (list_empty(&file->recv_list)) {
+ mutex_unlock(&file->mutex);
+
+@@ -392,6 +397,11 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
+ mutex_lock(&file->mutex);
+ }
+
++ if (file->agents_dead) {
++ mutex_unlock(&file->mutex);
++ return -EIO;
++ }
++
+ packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
+ list_del(&packet->list);
+
+@@ -524,7 +534,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
+
+ agent = __get_agent(file, packet->mad.hdr.id);
+ if (!agent) {
+- ret = -EINVAL;
++ ret = -EIO;
+ goto err_up;
+ }
+
+@@ -653,10 +663,14 @@ static __poll_t ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
+ /* we will always be able to post a MAD send */
+ __poll_t mask = EPOLLOUT | EPOLLWRNORM;
+
++ mutex_lock(&file->mutex);
+ poll_wait(filp, &file->recv_wait, wait);
+
+ if (!list_empty(&file->recv_list))
+ mask |= EPOLLIN | EPOLLRDNORM;
++ if (file->agents_dead)
++ mask = EPOLLERR;
++ mutex_unlock(&file->mutex);
+
+ return mask;
+ }
+@@ -1336,6 +1350,7 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
+ list_for_each_entry(file, &port->file_list, port_list) {
+ mutex_lock(&file->mutex);
+ file->agents_dead = 1;
++ wake_up_interruptible(&file->recv_wait);
+ mutex_unlock(&file->mutex);
+
+ for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index e36d315690819..3e68ba9dab45d 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -657,7 +657,7 @@ struct hns_roce_qp {
+ u8 rdb_en;
+ u8 sdb_en;
+ u32 doorbell_qpn;
+- u32 sq_signal_bits;
++ enum ib_sig_type sq_signal_bits;
+ struct hns_roce_wq sq;
+
+ struct ib_umem *umem;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index e8933daab4995..d01e3222c00cf 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -1009,7 +1009,7 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ u32 timeout = 0;
+ int handle = 0;
+ u16 desc_ret;
+- int ret = 0;
++ int ret;
+ int ntc;
+
+ spin_lock_bh(&csq->lock);
+@@ -1054,15 +1054,14 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ if (hns_roce_cmq_csq_done(hr_dev)) {
+ complete = true;
+ handle = 0;
++ ret = 0;
+ while (handle < num) {
+ /* get the result of hardware write back */
+ desc_to_use = &csq->desc[ntc];
+ desc[handle] = *desc_to_use;
+ dev_dbg(hr_dev->dev, "Get cmq desc:\n");
+ desc_ret = le16_to_cpu(desc[handle].retval);
+- if (desc_ret == CMD_EXEC_SUCCESS)
+- ret = 0;
+- else
++ if (unlikely(desc_ret != CMD_EXEC_SUCCESS))
+ ret = -EIO;
+ priv->cmq.last_status = desc_ret;
+ ntc++;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
+index b5d196c119eec..f23a341400c06 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_main.c
++++ b/drivers/infiniband/hw/hns/hns_roce_main.c
+@@ -848,8 +848,7 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
+ return 0;
+
+ err_qp_table_free:
+- if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
+- hns_roce_cleanup_qp_table(hr_dev);
++ hns_roce_cleanup_qp_table(hr_dev);
+
+ err_cq_table_free:
+ hns_roce_cleanup_cq_table(hr_dev);
+diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
+index fd75a9043bf15..4d6f25fdcc0ef 100644
+--- a/drivers/infiniband/hw/mlx5/devx.c
++++ b/drivers/infiniband/hw/mlx5/devx.c
+@@ -1118,7 +1118,9 @@ static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ break;
+ case MLX5_CMD_OP_CREATE_TIR:
+- MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
++ *obj_id = MLX5_GET(create_tir_out, out, tirn);
++ MLX5_SET(destroy_tir_in, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
++ MLX5_SET(destroy_tir_in, din, tirn, *obj_id);
+ break;
+ case MLX5_CMD_OP_CREATE_TIS:
+ MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index c9e583c05ef27..e2656b68ec222 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -6213,8 +6213,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+
+ err_mp:
+ mlx5_ib_cleanup_multiport_master(dev);
+-
+- return -ENOMEM;
++ return err;
+ }
+
+ static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev)
+diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
+index 312c2fc961c00..d411356828911 100644
+--- a/drivers/infiniband/sw/rxe/rxe_net.c
++++ b/drivers/infiniband/sw/rxe/rxe_net.c
+@@ -453,6 +453,11 @@ int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
+
+ void rxe_loopback(struct sk_buff *skb)
+ {
++ if (skb->protocol == htons(ETH_P_IP))
++ skb_pull(skb, sizeof(struct iphdr));
++ else
++ skb_pull(skb, sizeof(struct ipv6hdr));
++
+ rxe_rcv(skb);
+ }
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_recv.c b/drivers/infiniband/sw/rxe/rxe_recv.c
+index 9bfb98056fc2a..369ba76f1605e 100644
+--- a/drivers/infiniband/sw/rxe/rxe_recv.c
++++ b/drivers/infiniband/sw/rxe/rxe_recv.c
+@@ -36,21 +36,26 @@
+ #include "rxe.h"
+ #include "rxe_loc.h"
+
++/* check that QP matches packet opcode type and is in a valid state */
+ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ struct rxe_qp *qp)
+ {
++ unsigned int pkt_type;
++
+ if (unlikely(!qp->valid))
+ goto err1;
+
++ pkt_type = pkt->opcode & 0xe0;
++
+ switch (qp_type(qp)) {
+ case IB_QPT_RC:
+- if (unlikely((pkt->opcode & IB_OPCODE_RC) != 0)) {
++ if (unlikely(pkt_type != IB_OPCODE_RC)) {
+ pr_warn_ratelimited("bad qp type\n");
+ goto err1;
+ }
+ break;
+ case IB_QPT_UC:
+- if (unlikely(!(pkt->opcode & IB_OPCODE_UC))) {
++ if (unlikely(pkt_type != IB_OPCODE_UC)) {
+ pr_warn_ratelimited("bad qp type\n");
+ goto err1;
+ }
+@@ -58,7 +63,7 @@ static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
+ case IB_QPT_UD:
+ case IB_QPT_SMI:
+ case IB_QPT_GSI:
+- if (unlikely(!(pkt->opcode & IB_OPCODE_UD))) {
++ if (unlikely(pkt_type != IB_OPCODE_UD)) {
+ pr_warn_ratelimited("bad qp type\n");
+ goto err1;
+ }
+@@ -300,7 +305,6 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
+
+ list_for_each_entry(mce, &mcg->qp_list, qp_list) {
+ qp = mce->qp;
+- pkt = SKB_TO_PKT(skb);
+
+ /* validate qp for incoming packet */
+ err = check_type_state(rxe, pkt, qp);
+@@ -312,12 +316,18 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
+ continue;
+
+ /* for all but the last qp create a new clone of the
+- * skb and pass to the qp.
++ * skb and pass to the qp. If an error occurs in the
++ * checks for the last qp in the list we need to
++ * free the skb since it hasn't been passed on to
++ * rxe_rcv_pkt() which would free it later.
+ */
+- if (mce->qp_list.next != &mcg->qp_list)
++ if (mce->qp_list.next != &mcg->qp_list) {
+ per_qp_skb = skb_clone(skb, GFP_ATOMIC);
+- else
++ } else {
+ per_qp_skb = skb;
++ /* show we have consumed the skb */
++ skb = NULL;
++ }
+
+ if (unlikely(!per_qp_skb))
+ continue;
+@@ -332,9 +342,8 @@ static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
+
+ rxe_drop_ref(mcg); /* drop ref from rxe_pool_get_key. */
+
+- return;
+-
+ err1:
++ /* free skb if not consumed */
+ kfree_skb(skb);
+ }
+
+diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
+index dba4535494abd..4d8bc995b4503 100644
+--- a/drivers/infiniband/sw/siw/siw.h
++++ b/drivers/infiniband/sw/siw/siw.h
+@@ -667,7 +667,7 @@ static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
+ {
+ struct siw_sqe *orq_e = orq_get_tail(qp);
+
+- if (orq_e && READ_ONCE(orq_e->flags) == 0)
++ if (READ_ONCE(orq_e->flags) == 0)
+ return orq_e;
+
+ return NULL;
+diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
+index fb66d67572787..dbbf8c6c16d38 100644
+--- a/drivers/infiniband/sw/siw/siw_main.c
++++ b/drivers/infiniband/sw/siw/siw_main.c
+@@ -134,7 +134,7 @@ static struct {
+
+ static int siw_init_cpulist(void)
+ {
+- int i, num_nodes = num_possible_nodes();
++ int i, num_nodes = nr_node_ids;
+
+ memset(siw_tx_thread, 0, sizeof(siw_tx_thread));
+
+diff --git a/drivers/infiniband/sw/siw/siw_qp.c b/drivers/infiniband/sw/siw/siw_qp.c
+index b4317480cee74..5927ac5923dd8 100644
+--- a/drivers/infiniband/sw/siw/siw_qp.c
++++ b/drivers/infiniband/sw/siw/siw_qp.c
+@@ -199,26 +199,26 @@ void siw_qp_llp_write_space(struct sock *sk)
+
+ static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
+ {
+- irq_size = roundup_pow_of_two(irq_size);
+- orq_size = roundup_pow_of_two(orq_size);
+-
+- qp->attrs.irq_size = irq_size;
+- qp->attrs.orq_size = orq_size;
+-
+- qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe));
+- if (!qp->irq) {
+- siw_dbg_qp(qp, "irq malloc for %d failed\n", irq_size);
+- qp->attrs.irq_size = 0;
+- return -ENOMEM;
++ if (irq_size) {
++ irq_size = roundup_pow_of_two(irq_size);
++ qp->irq = vzalloc(irq_size * sizeof(struct siw_sqe));
++ if (!qp->irq) {
++ qp->attrs.irq_size = 0;
++ return -ENOMEM;
++ }
+ }
+- qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe));
+- if (!qp->orq) {
+- siw_dbg_qp(qp, "orq malloc for %d failed\n", orq_size);
+- qp->attrs.orq_size = 0;
+- qp->attrs.irq_size = 0;
+- vfree(qp->irq);
+- return -ENOMEM;
++ if (orq_size) {
++ orq_size = roundup_pow_of_two(orq_size);
++ qp->orq = vzalloc(orq_size * sizeof(struct siw_sqe));
++ if (!qp->orq) {
++ qp->attrs.orq_size = 0;
++ qp->attrs.irq_size = 0;
++ vfree(qp->irq);
++ return -ENOMEM;
++ }
+ }
++ qp->attrs.irq_size = irq_size;
++ qp->attrs.orq_size = orq_size;
+ siw_dbg_qp(qp, "ORD %d, IRD %d\n", orq_size, irq_size);
+ return 0;
+ }
+@@ -288,13 +288,14 @@ int siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl)
+ if (ctrl & MPA_V2_RDMA_WRITE_RTR)
+ wqe->sqe.opcode = SIW_OP_WRITE;
+ else if (ctrl & MPA_V2_RDMA_READ_RTR) {
+- struct siw_sqe *rreq;
++ struct siw_sqe *rreq = NULL;
+
+ wqe->sqe.opcode = SIW_OP_READ;
+
+ spin_lock(&qp->orq_lock);
+
+- rreq = orq_get_free(qp);
++ if (qp->attrs.orq_size)
++ rreq = orq_get_free(qp);
+ if (rreq) {
+ siw_read_to_orq(rreq, &wqe->sqe);
+ qp->orq_put++;
+@@ -877,135 +878,88 @@ void siw_read_to_orq(struct siw_sqe *rreq, struct siw_sqe *sqe)
+ rreq->num_sge = 1;
+ }
+
+-/*
+- * Must be called with SQ locked.
+- * To avoid complete SQ starvation by constant inbound READ requests,
+- * the active IRQ will not be served after qp->irq_burst, if the
+- * SQ has pending work.
+- */
+-int siw_activate_tx(struct siw_qp *qp)
++static int siw_activate_tx_from_sq(struct siw_qp *qp)
+ {
+- struct siw_sqe *irqe, *sqe;
++ struct siw_sqe *sqe;
+ struct siw_wqe *wqe = tx_wqe(qp);
+ int rv = 1;
+
+- irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size];
+-
+- if (irqe->flags & SIW_WQE_VALID) {
+- sqe = sq_get_next(qp);
+-
+- /*
+- * Avoid local WQE processing starvation in case
+- * of constant inbound READ request stream
+- */
+- if (sqe && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) {
+- qp->irq_burst = 0;
+- goto skip_irq;
+- }
+- memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
+- wqe->wr_status = SIW_WR_QUEUED;
+-
+- /* start READ RESPONSE */
+- wqe->sqe.opcode = SIW_OP_READ_RESPONSE;
+- wqe->sqe.flags = 0;
+- if (irqe->num_sge) {
+- wqe->sqe.num_sge = 1;
+- wqe->sqe.sge[0].length = irqe->sge[0].length;
+- wqe->sqe.sge[0].laddr = irqe->sge[0].laddr;
+- wqe->sqe.sge[0].lkey = irqe->sge[0].lkey;
+- } else {
+- wqe->sqe.num_sge = 0;
+- }
+-
+- /* Retain original RREQ's message sequence number for
+- * potential error reporting cases.
+- */
+- wqe->sqe.sge[1].length = irqe->sge[1].length;
+-
+- wqe->sqe.rkey = irqe->rkey;
+- wqe->sqe.raddr = irqe->raddr;
++ sqe = sq_get_next(qp);
++ if (!sqe)
++ return 0;
+
+- wqe->processed = 0;
+- qp->irq_get++;
++ memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
++ wqe->wr_status = SIW_WR_QUEUED;
+
+- /* mark current IRQ entry free */
+- smp_store_mb(irqe->flags, 0);
++ /* First copy SQE to kernel private memory */
++ memcpy(&wqe->sqe, sqe, sizeof(*sqe));
+
++ if (wqe->sqe.opcode >= SIW_NUM_OPCODES) {
++ rv = -EINVAL;
+ goto out;
+ }
+- sqe = sq_get_next(qp);
+- if (sqe) {
+-skip_irq:
+- memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
+- wqe->wr_status = SIW_WR_QUEUED;
+-
+- /* First copy SQE to kernel private memory */
+- memcpy(&wqe->sqe, sqe, sizeof(*sqe));
+-
+- if (wqe->sqe.opcode >= SIW_NUM_OPCODES) {
++ if (wqe->sqe.flags & SIW_WQE_INLINE) {
++ if (wqe->sqe.opcode != SIW_OP_SEND &&
++ wqe->sqe.opcode != SIW_OP_WRITE) {
+ rv = -EINVAL;
+ goto out;
+ }
+- if (wqe->sqe.flags & SIW_WQE_INLINE) {
+- if (wqe->sqe.opcode != SIW_OP_SEND &&
+- wqe->sqe.opcode != SIW_OP_WRITE) {
+- rv = -EINVAL;
+- goto out;
+- }
+- if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) {
+- rv = -EINVAL;
+- goto out;
+- }
+- wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
+- wqe->sqe.sge[0].lkey = 0;
+- wqe->sqe.num_sge = 1;
++ if (wqe->sqe.sge[0].length > SIW_MAX_INLINE) {
++ rv = -EINVAL;
++ goto out;
+ }
+- if (wqe->sqe.flags & SIW_WQE_READ_FENCE) {
+- /* A READ cannot be fenced */
+- if (unlikely(wqe->sqe.opcode == SIW_OP_READ ||
+- wqe->sqe.opcode ==
+- SIW_OP_READ_LOCAL_INV)) {
+- siw_dbg_qp(qp, "cannot fence read\n");
+- rv = -EINVAL;
+- goto out;
+- }
+- spin_lock(&qp->orq_lock);
++ wqe->sqe.sge[0].laddr = (uintptr_t)&wqe->sqe.sge[1];
++ wqe->sqe.sge[0].lkey = 0;
++ wqe->sqe.num_sge = 1;
++ }
++ if (wqe->sqe.flags & SIW_WQE_READ_FENCE) {
++ /* A READ cannot be fenced */
++ if (unlikely(wqe->sqe.opcode == SIW_OP_READ ||
++ wqe->sqe.opcode ==
++ SIW_OP_READ_LOCAL_INV)) {
++ siw_dbg_qp(qp, "cannot fence read\n");
++ rv = -EINVAL;
++ goto out;
++ }
++ spin_lock(&qp->orq_lock);
+
+- if (!siw_orq_empty(qp)) {
+- qp->tx_ctx.orq_fence = 1;
+- rv = 0;
+- }
+- spin_unlock(&qp->orq_lock);
++ if (qp->attrs.orq_size && !siw_orq_empty(qp)) {
++ qp->tx_ctx.orq_fence = 1;
++ rv = 0;
++ }
++ spin_unlock(&qp->orq_lock);
+
+- } else if (wqe->sqe.opcode == SIW_OP_READ ||
+- wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
+- struct siw_sqe *rreq;
++ } else if (wqe->sqe.opcode == SIW_OP_READ ||
++ wqe->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
++ struct siw_sqe *rreq;
+
+- wqe->sqe.num_sge = 1;
++ if (unlikely(!qp->attrs.orq_size)) {
++ /* We negotiated not to send READ req's */
++ rv = -EINVAL;
++ goto out;
++ }
++ wqe->sqe.num_sge = 1;
+
+- spin_lock(&qp->orq_lock);
++ spin_lock(&qp->orq_lock);
+
+- rreq = orq_get_free(qp);
+- if (rreq) {
+- /*
+- * Make an immediate copy in ORQ to be ready
+- * to process loopback READ reply
+- */
+- siw_read_to_orq(rreq, &wqe->sqe);
+- qp->orq_put++;
+- } else {
+- qp->tx_ctx.orq_fence = 1;
+- rv = 0;
+- }
+- spin_unlock(&qp->orq_lock);
++ rreq = orq_get_free(qp);
++ if (rreq) {
++ /*
++ * Make an immediate copy in ORQ to be ready
++ * to process loopback READ reply
++ */
++ siw_read_to_orq(rreq, &wqe->sqe);
++ qp->orq_put++;
++ } else {
++ qp->tx_ctx.orq_fence = 1;
++ rv = 0;
+ }
+-
+- /* Clear SQE, can be re-used by application */
+- smp_store_mb(sqe->flags, 0);
+- qp->sq_get++;
+- } else {
+- rv = 0;
++ spin_unlock(&qp->orq_lock);
+ }
++
++ /* Clear SQE, can be re-used by application */
++ smp_store_mb(sqe->flags, 0);
++ qp->sq_get++;
+ out:
+ if (unlikely(rv < 0)) {
+ siw_dbg_qp(qp, "error %d\n", rv);
+@@ -1014,6 +968,65 @@ out:
+ return rv;
+ }
+
++/*
++ * Must be called with SQ locked.
++ * To avoid complete SQ starvation by constant inbound READ requests,
++ * the active IRQ will not be served after qp->irq_burst, if the
++ * SQ has pending work.
++ */
++int siw_activate_tx(struct siw_qp *qp)
++{
++ struct siw_sqe *irqe;
++ struct siw_wqe *wqe = tx_wqe(qp);
++
++ if (!qp->attrs.irq_size)
++ return siw_activate_tx_from_sq(qp);
++
++ irqe = &qp->irq[qp->irq_get % qp->attrs.irq_size];
++
++ if (!(irqe->flags & SIW_WQE_VALID))
++ return siw_activate_tx_from_sq(qp);
++
++ /*
++ * Avoid local WQE processing starvation in case
++ * of constant inbound READ request stream
++ */
++ if (sq_get_next(qp) && ++qp->irq_burst >= SIW_IRQ_MAXBURST_SQ_ACTIVE) {
++ qp->irq_burst = 0;
++ return siw_activate_tx_from_sq(qp);
++ }
++ memset(wqe->mem, 0, sizeof(*wqe->mem) * SIW_MAX_SGE);
++ wqe->wr_status = SIW_WR_QUEUED;
++
++ /* start READ RESPONSE */
++ wqe->sqe.opcode = SIW_OP_READ_RESPONSE;
++ wqe->sqe.flags = 0;
++ if (irqe->num_sge) {
++ wqe->sqe.num_sge = 1;
++ wqe->sqe.sge[0].length = irqe->sge[0].length;
++ wqe->sqe.sge[0].laddr = irqe->sge[0].laddr;
++ wqe->sqe.sge[0].lkey = irqe->sge[0].lkey;
++ } else {
++ wqe->sqe.num_sge = 0;
++ }
++
++ /* Retain original RREQ's message sequence number for
++ * potential error reporting cases.
++ */
++ wqe->sqe.sge[1].length = irqe->sge[1].length;
++
++ wqe->sqe.rkey = irqe->rkey;
++ wqe->sqe.raddr = irqe->raddr;
++
++ wqe->processed = 0;
++ qp->irq_get++;
++
++ /* mark current IRQ entry free */
++ smp_store_mb(irqe->flags, 0);
++
++ return 1;
++}
++
+ /*
+ * Check if current CQ state qualifies for calling CQ completion
+ * handler. Must be called with CQ lock held.
+diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
+index 0520e70084f97..c7c38f7fd29d6 100644
+--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
++++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
+@@ -680,6 +680,10 @@ static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx)
+ }
+ spin_lock_irqsave(&qp->sq_lock, flags);
+
++ if (unlikely(!qp->attrs.irq_size)) {
++ run_sq = 0;
++ goto error_irq;
++ }
+ if (tx_work->wr_status == SIW_WR_IDLE) {
+ /*
+ * immediately schedule READ response w/o
+@@ -712,8 +716,9 @@ static int siw_init_rresp(struct siw_qp *qp, struct siw_rx_stream *srx)
+ /* RRESP now valid as current TX wqe or placed into IRQ */
+ smp_store_mb(resp->flags, SIW_WQE_VALID);
+ } else {
+- pr_warn("siw: [QP %u]: irq %d exceeded %d\n", qp_id(qp),
+- qp->irq_put % qp->attrs.irq_size, qp->attrs.irq_size);
++error_irq:
++ pr_warn("siw: [QP %u]: IRQ exceeded or null, size %d\n",
++ qp_id(qp), qp->attrs.irq_size);
+
+ siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP,
+ RDMAP_ETYPE_REMOTE_OPERATION,
+@@ -740,6 +745,9 @@ static int siw_orqe_start_rx(struct siw_qp *qp)
+ struct siw_sqe *orqe;
+ struct siw_wqe *wqe = NULL;
+
++ if (unlikely(!qp->attrs.orq_size))
++ return -EPROTO;
++
+ /* make sure ORQ indices are current */
+ smp_mb();
+
+@@ -796,8 +804,8 @@ int siw_proc_rresp(struct siw_qp *qp)
+ */
+ rv = siw_orqe_start_rx(qp);
+ if (rv) {
+- pr_warn("siw: [QP %u]: ORQ empty at idx %d\n",
+- qp_id(qp), qp->orq_get % qp->attrs.orq_size);
++ pr_warn("siw: [QP %u]: ORQ empty, size %d\n",
++ qp_id(qp), qp->attrs.orq_size);
+ goto error_term;
+ }
+ rv = siw_rresp_check_ntoh(srx, frx);
+@@ -1290,11 +1298,13 @@ static int siw_rdmap_complete(struct siw_qp *qp, int error)
+ wc_status);
+ siw_wqe_put_mem(wqe, SIW_OP_READ);
+
+- if (!error)
++ if (!error) {
+ rv = siw_check_tx_fence(qp);
+- else
+- /* Disable current ORQ eleement */
+- WRITE_ONCE(orq_get_current(qp)->flags, 0);
++ } else {
++ /* Disable current ORQ element */
++ if (qp->attrs.orq_size)
++ WRITE_ONCE(orq_get_current(qp)->flags, 0);
++ }
+ break;
+
+ case RDMAP_RDMA_READ_REQ:
+diff --git a/drivers/infiniband/sw/siw/siw_qp_tx.c b/drivers/infiniband/sw/siw/siw_qp_tx.c
+index e7cd04eda04ac..424918eb1cd4a 100644
+--- a/drivers/infiniband/sw/siw/siw_qp_tx.c
++++ b/drivers/infiniband/sw/siw/siw_qp_tx.c
+@@ -1107,8 +1107,8 @@ next_wqe:
+ /*
+ * RREQ may have already been completed by inbound RRESP!
+ */
+- if (tx_type == SIW_OP_READ ||
+- tx_type == SIW_OP_READ_LOCAL_INV) {
++ if ((tx_type == SIW_OP_READ ||
++ tx_type == SIW_OP_READ_LOCAL_INV) && qp->attrs.orq_size) {
+ /* Cleanup pending entry in ORQ */
+ qp->orq_put--;
+ qp->orq[qp->orq_put % qp->attrs.orq_size].flags = 0;
+diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
+index 1b1a40db529c6..2c3704f0f10fa 100644
+--- a/drivers/infiniband/sw/siw/siw_verbs.c
++++ b/drivers/infiniband/sw/siw/siw_verbs.c
+@@ -387,13 +387,23 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
+ if (rv)
+ goto err_out;
+
++ num_sqe = attrs->cap.max_send_wr;
++ num_rqe = attrs->cap.max_recv_wr;
++
+ /* All queue indices are derived from modulo operations
+ * on a free running 'get' (consumer) and 'put' (producer)
+ * unsigned counter. Having queue sizes at power of two
+ * avoids handling counter wrap around.
+ */
+- num_sqe = roundup_pow_of_two(attrs->cap.max_send_wr);
+- num_rqe = roundup_pow_of_two(attrs->cap.max_recv_wr);
++ if (num_sqe)
++ num_sqe = roundup_pow_of_two(num_sqe);
++ else {
++ /* Zero sized SQ is not supported */
++ rv = -EINVAL;
++ goto err_out;
++ }
++ if (num_rqe)
++ num_rqe = roundup_pow_of_two(num_rqe);
+
+ if (qp->kernel_verbs)
+ qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
+@@ -401,7 +411,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
+ qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
+
+ if (qp->sendq == NULL) {
+- siw_dbg(base_dev, "SQ size %d alloc failed\n", num_sqe);
+ rv = -ENOMEM;
+ goto err_out_xa;
+ }
+@@ -434,7 +443,6 @@ struct ib_qp *siw_create_qp(struct ib_pd *pd,
+ vmalloc_user(num_rqe * sizeof(struct siw_rqe));
+
+ if (qp->recvq == NULL) {
+- siw_dbg(base_dev, "RQ size %d alloc failed\n", num_rqe);
+ rv = -ENOMEM;
+ goto err_out_xa;
+ }
+@@ -982,9 +990,9 @@ int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
+ unsigned long flags;
+ int rv = 0;
+
+- if (qp->srq) {
++ if (qp->srq || qp->attrs.rq_size == 0) {
+ *bad_wr = wr;
+- return -EOPNOTSUPP; /* what else from errno.h? */
++ return -EINVAL;
+ }
+ if (!qp->kernel_verbs) {
+ siw_dbg_qp(qp, "no kernel post_recv for user mapped sq\n");
+diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
+index a2b5fbba2d3b3..430dc69750048 100644
+--- a/drivers/input/joydev.c
++++ b/drivers/input/joydev.c
+@@ -456,7 +456,7 @@ static int joydev_handle_JSIOCSAXMAP(struct joydev *joydev,
+ if (IS_ERR(abspam))
+ return PTR_ERR(abspam);
+
+- for (i = 0; i < joydev->nabs; i++) {
++ for (i = 0; i < len && i < joydev->nabs; i++) {
+ if (abspam[i] > ABS_MAX) {
+ retval = -EINVAL;
+ goto out;
+@@ -480,6 +480,9 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
+ int i;
+ int retval = 0;
+
++ if (len % sizeof(*keypam))
++ return -EINVAL;
++
+ len = min(len, sizeof(joydev->keypam));
+
+ /* Validate the map. */
+@@ -487,7 +490,7 @@ static int joydev_handle_JSIOCSBTNMAP(struct joydev *joydev,
+ if (IS_ERR(keypam))
+ return PTR_ERR(keypam);
+
+- for (i = 0; i < joydev->nkey; i++) {
++ for (i = 0; i < (len / 2) && i < joydev->nkey; i++) {
+ if (keypam[i] > KEY_MAX || keypam[i] < BTN_MISC) {
+ retval = -EINVAL;
+ goto out;
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index 3d004ca76b6ed..e5f1e3cf9179f 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -305,6 +305,7 @@ static const struct xpad_device {
+ { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 },
+ { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 },
+ { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE },
++ { 0x20d6, 0x2009, "PowerA Enhanced Wired Controller for Xbox Series X|S", 0, XTYPE_XBOXONE },
+ { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 },
+ { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE },
+ { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index b7dbcbac3a1a5..e7346c5f4738a 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -588,6 +588,10 @@ static const struct dmi_system_id i8042_dmi_noselftest_table[] = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_CHASSIS_TYPE, "10"), /* Notebook */
+ },
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
++ DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /* Convertible Notebook */
++ },
+ },
+ { }
+ };
+diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c
+index d6772a2c2d096..e396857cb4c1b 100644
+--- a/drivers/input/touchscreen/elo.c
++++ b/drivers/input/touchscreen/elo.c
+@@ -341,8 +341,10 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv)
+ switch (elo->id) {
+
+ case 0: /* 10-byte protocol */
+- if (elo_setup_10(elo))
++ if (elo_setup_10(elo)) {
++ err = -EIO;
+ goto fail3;
++ }
+
+ break;
+
+diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
+index fe245439adee0..2c67f8eacc7c5 100644
+--- a/drivers/input/touchscreen/raydium_i2c_ts.c
++++ b/drivers/input/touchscreen/raydium_i2c_ts.c
+@@ -410,6 +410,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
+ enum raydium_bl_ack state)
+ {
+ int error;
++ static const u8 cmd[] = { 0xFF, 0x39 };
+
+ error = raydium_i2c_send(client, RM_CMD_BOOT_WRT, data, len);
+ if (error) {
+@@ -418,7 +419,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
+ return error;
+ }
+
+- error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, NULL, 0);
++ error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, cmd, sizeof(cmd));
+ if (error) {
+ dev_err(&client->dev, "Ack obj command failed: %d\n", error);
+ return error;
+diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
+index 2e2ea5719c90e..902522df03592 100644
+--- a/drivers/input/touchscreen/sur40.c
++++ b/drivers/input/touchscreen/sur40.c
+@@ -774,6 +774,7 @@ static int sur40_probe(struct usb_interface *interface,
+ dev_err(&interface->dev,
+ "Unable to register video controls.");
+ v4l2_ctrl_handler_free(&sur40->hdl);
++ error = sur40->hdl.error;
+ goto err_unreg_v4l2;
+ }
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 859567ad3db4e..36de6f7ddf221 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -986,6 +986,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
+
+ extern struct workqueue_struct *bcache_wq;
+ extern struct workqueue_struct *bch_journal_wq;
++extern struct workqueue_struct *bch_flush_wq;
+ extern struct mutex bch_register_lock;
+ extern struct list_head bch_cache_sets;
+
+@@ -1027,5 +1028,7 @@ void bch_debug_exit(void);
+ void bch_debug_init(void);
+ void bch_request_exit(void);
+ int bch_request_init(void);
++void bch_btree_exit(void);
++int bch_btree_init(void);
+
+ #endif /* _BCACHE_H */
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index 8d06105fc9ff5..5a33910aea788 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -99,6 +99,8 @@
+ #define PTR_HASH(c, k) \
+ (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
+
++static struct workqueue_struct *btree_io_wq;
++
+ #define insert_lock(s, b) ((b)->level <= (s)->lock)
+
+ /*
+@@ -366,7 +368,7 @@ static void __btree_node_write_done(struct closure *cl)
+ btree_complete_write(b, w);
+
+ if (btree_node_dirty(b))
+- schedule_delayed_work(&b->work, 30 * HZ);
++ queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
+
+ closure_return_with_destructor(cl, btree_node_write_unlock);
+ }
+@@ -539,7 +541,7 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
+ BUG_ON(!i->keys);
+
+ if (!btree_node_dirty(b))
+- schedule_delayed_work(&b->work, 30 * HZ);
++ queue_delayed_work(btree_io_wq, &b->work, 30 * HZ);
+
+ set_btree_node_dirty(b);
+
+@@ -2659,3 +2661,18 @@ void bch_keybuf_init(struct keybuf *buf)
+ spin_lock_init(&buf->lock);
+ array_allocator_init(&buf->freelist);
+ }
++
++void bch_btree_exit(void)
++{
++ if (btree_io_wq)
++ destroy_workqueue(btree_io_wq);
++}
++
++int __init bch_btree_init(void)
++{
++ btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0);
++ if (!btree_io_wq)
++ return -ENOMEM;
++
++ return 0;
++}
+diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
+index 8250d2d1d780c..b4fd923e0d401 100644
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -958,8 +958,8 @@ atomic_t *bch_journal(struct cache_set *c,
+ journal_try_write(c);
+ } else if (!w->dirty) {
+ w->dirty = true;
+- schedule_delayed_work(&c->journal.work,
+- msecs_to_jiffies(c->journal_delay_ms));
++ queue_delayed_work(bch_flush_wq, &c->journal.work,
++ msecs_to_jiffies(c->journal_delay_ms));
+ spin_unlock(&c->journal.lock);
+ } else {
+ spin_unlock(&c->journal.lock);
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 63f5ce18311bb..b0d569032dd4e 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -48,6 +48,7 @@ static int bcache_major;
+ static DEFINE_IDA(bcache_device_idx);
+ static wait_queue_head_t unregister_wait;
+ struct workqueue_struct *bcache_wq;
++struct workqueue_struct *bch_flush_wq;
+ struct workqueue_struct *bch_journal_wq;
+
+
+@@ -2652,6 +2653,9 @@ static void bcache_exit(void)
+ destroy_workqueue(bcache_wq);
+ if (bch_journal_wq)
+ destroy_workqueue(bch_journal_wq);
++ if (bch_flush_wq)
++ destroy_workqueue(bch_flush_wq);
++ bch_btree_exit();
+
+ if (bcache_major)
+ unregister_blkdev(bcache_major, "bcache");
+@@ -2707,10 +2711,26 @@ static int __init bcache_init(void)
+ return bcache_major;
+ }
+
++ if (bch_btree_init())
++ goto err;
++
+ bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
+ if (!bcache_wq)
+ goto err;
+
++ /*
++ * Let's not make this `WQ_MEM_RECLAIM` for the following reasons:
++ *
++ * 1. It used `system_wq` before which also does no memory reclaim.
++ * 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and
++ * reduced throughput can be observed.
++ *
++ * We still want to user our own queue to not congest the `system_wq`.
++ */
++ bch_flush_wq = alloc_workqueue("bch_flush", 0, 0);
++ if (!bch_flush_wq)
++ goto err;
++
+ bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
+ if (!bch_journal_wq)
+ goto err;
+diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
+index c4ef1fceead6e..3fea121fcbcf9 100644
+--- a/drivers/md/dm-core.h
++++ b/drivers/md/dm-core.h
+@@ -106,6 +106,10 @@ struct mapped_device {
+
+ struct block_device *bdev;
+
++ int swap_bios;
++ struct semaphore swap_bios_semaphore;
++ struct mutex swap_bios_lock;
++
+ struct dm_stats stats;
+
+ /* for blk-mq request-based DM support */
+diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
+index 1af82fbbac0c4..d85648b9c247a 100644
+--- a/drivers/md/dm-crypt.c
++++ b/drivers/md/dm-crypt.c
+@@ -2737,6 +2737,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+ wake_up_process(cc->write_thread);
+
+ ti->num_flush_bios = 1;
++ ti->limit_swap_bios = true;
+
+ return 0;
+
+diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
+index bdb84b8e71621..6b0b3a13ab4a2 100644
+--- a/drivers/md/dm-era-target.c
++++ b/drivers/md/dm-era-target.c
+@@ -47,6 +47,7 @@ struct writeset {
+ static void writeset_free(struct writeset *ws)
+ {
+ vfree(ws->bits);
++ ws->bits = NULL;
+ }
+
+ static int setup_on_disk_bitset(struct dm_disk_bitset *info,
+@@ -71,8 +72,6 @@ static size_t bitset_size(unsigned nr_bits)
+ */
+ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
+ {
+- ws->md.nr_bits = nr_blocks;
+- ws->md.root = INVALID_WRITESET_ROOT;
+ ws->bits = vzalloc(bitset_size(nr_blocks));
+ if (!ws->bits) {
+ DMERR("%s: couldn't allocate in memory bitset", __func__);
+@@ -85,12 +84,14 @@ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
+ /*
+ * Wipes the in-core bitset, and creates a new on disk bitset.
+ */
+-static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws)
++static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws,
++ dm_block_t nr_blocks)
+ {
+ int r;
+
+- memset(ws->bits, 0, bitset_size(ws->md.nr_bits));
++ memset(ws->bits, 0, bitset_size(nr_blocks));
+
++ ws->md.nr_bits = nr_blocks;
+ r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root);
+ if (r) {
+ DMERR("%s: setup_on_disk_bitset failed", __func__);
+@@ -134,7 +135,7 @@ static int writeset_test_and_set(struct dm_disk_bitset *info,
+ {
+ int r;
+
+- if (!test_and_set_bit(block, ws->bits)) {
++ if (!test_bit(block, ws->bits)) {
+ r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root);
+ if (r) {
+ /* FIXME: fail mode */
+@@ -388,7 +389,7 @@ static void ws_dec(void *context, const void *value)
+
+ static int ws_eq(void *context, const void *value1, const void *value2)
+ {
+- return !memcmp(value1, value2, sizeof(struct writeset_metadata));
++ return !memcmp(value1, value2, sizeof(struct writeset_disk));
+ }
+
+ /*----------------------------------------------------------------*/
+@@ -564,6 +565,15 @@ static int open_metadata(struct era_metadata *md)
+ }
+
+ disk = dm_block_data(sblock);
++
++ /* Verify the data block size hasn't changed */
++ if (le32_to_cpu(disk->data_block_size) != md->block_size) {
++ DMERR("changing the data block size (from %u to %llu) is not supported",
++ le32_to_cpu(disk->data_block_size), md->block_size);
++ r = -EINVAL;
++ goto bad;
++ }
++
+ r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION,
+ disk->metadata_space_map_root,
+ sizeof(disk->metadata_space_map_root),
+@@ -575,10 +585,10 @@ static int open_metadata(struct era_metadata *md)
+
+ setup_infos(md);
+
+- md->block_size = le32_to_cpu(disk->data_block_size);
+ md->nr_blocks = le32_to_cpu(disk->nr_blocks);
+ md->current_era = le32_to_cpu(disk->current_era);
+
++ ws_unpack(&disk->current_writeset, &md->current_writeset->md);
+ md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root);
+ md->era_array_root = le64_to_cpu(disk->era_array_root);
+ md->metadata_snap = le64_to_cpu(disk->metadata_snap);
+@@ -746,6 +756,12 @@ static int metadata_digest_lookup_writeset(struct era_metadata *md,
+ ws_unpack(&disk, &d->writeset);
+ d->value = cpu_to_le32(key);
+
++ /*
++ * We initialise another bitset info to avoid any caching side effects
++ * with the previous one.
++ */
++ dm_disk_bitset_init(md->tm, &d->info);
++
+ d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks);
+ d->current_bit = 0;
+ d->step = metadata_digest_transcribe_writeset;
+@@ -759,12 +775,6 @@ static int metadata_digest_start(struct era_metadata *md, struct digest *d)
+ return 0;
+
+ memset(d, 0, sizeof(*d));
+-
+- /*
+- * We initialise another bitset info to avoid any caching side
+- * effects with the previous one.
+- */
+- dm_disk_bitset_init(md->tm, &d->info);
+ d->step = metadata_digest_lookup_writeset;
+
+ return 0;
+@@ -802,6 +812,8 @@ static struct era_metadata *metadata_open(struct block_device *bdev,
+
+ static void metadata_close(struct era_metadata *md)
+ {
++ writeset_free(&md->writesets[0]);
++ writeset_free(&md->writesets[1]);
+ destroy_persistent_data_objects(md);
+ kfree(md);
+ }
+@@ -839,6 +851,7 @@ static int metadata_resize(struct era_metadata *md, void *arg)
+ r = writeset_alloc(&md->writesets[1], *new_size);
+ if (r) {
+ DMERR("%s: writeset_alloc failed for writeset 1", __func__);
++ writeset_free(&md->writesets[0]);
+ return r;
+ }
+
+@@ -849,6 +862,8 @@ static int metadata_resize(struct era_metadata *md, void *arg)
+ &value, &md->era_array_root);
+ if (r) {
+ DMERR("%s: dm_array_resize failed", __func__);
++ writeset_free(&md->writesets[0]);
++ writeset_free(&md->writesets[1]);
+ return r;
+ }
+
+@@ -870,7 +885,6 @@ static int metadata_era_archive(struct era_metadata *md)
+ }
+
+ ws_pack(&md->current_writeset->md, &value);
+- md->current_writeset->md.root = INVALID_WRITESET_ROOT;
+
+ keys[0] = md->current_era;
+ __dm_bless_for_disk(&value);
+@@ -882,6 +896,7 @@ static int metadata_era_archive(struct era_metadata *md)
+ return r;
+ }
+
++ md->current_writeset->md.root = INVALID_WRITESET_ROOT;
+ md->archived_writesets = true;
+
+ return 0;
+@@ -898,7 +913,7 @@ static int metadata_new_era(struct era_metadata *md)
+ int r;
+ struct writeset *new_writeset = next_writeset(md);
+
+- r = writeset_init(&md->bitset_info, new_writeset);
++ r = writeset_init(&md->bitset_info, new_writeset, md->nr_blocks);
+ if (r) {
+ DMERR("%s: writeset_init failed", __func__);
+ return r;
+@@ -951,7 +966,7 @@ static int metadata_commit(struct era_metadata *md)
+ int r;
+ struct dm_block *sblock;
+
+- if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) {
++ if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
+ r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
+ &md->current_writeset->md.root);
+ if (r) {
+@@ -1226,8 +1241,10 @@ static void process_deferred_bios(struct era *era)
+ int r;
+ struct bio_list deferred_bios, marked_bios;
+ struct bio *bio;
++ struct blk_plug plug;
+ bool commit_needed = false;
+ bool failed = false;
++ struct writeset *ws = era->md->current_writeset;
+
+ bio_list_init(&deferred_bios);
+ bio_list_init(&marked_bios);
+@@ -1237,9 +1254,11 @@ static void process_deferred_bios(struct era *era)
+ bio_list_init(&era->deferred_bios);
+ spin_unlock(&era->deferred_lock);
+
++ if (bio_list_empty(&deferred_bios))
++ return;
++
+ while ((bio = bio_list_pop(&deferred_bios))) {
+- r = writeset_test_and_set(&era->md->bitset_info,
+- era->md->current_writeset,
++ r = writeset_test_and_set(&era->md->bitset_info, ws,
+ get_block(era, bio));
+ if (r < 0) {
+ /*
+@@ -1247,7 +1266,6 @@ static void process_deferred_bios(struct era *era)
+ * FIXME: finish.
+ */
+ failed = true;
+-
+ } else if (r == 0)
+ commit_needed = true;
+
+@@ -1263,9 +1281,19 @@ static void process_deferred_bios(struct era *era)
+ if (failed)
+ while ((bio = bio_list_pop(&marked_bios)))
+ bio_io_error(bio);
+- else
+- while ((bio = bio_list_pop(&marked_bios)))
++ else {
++ blk_start_plug(&plug);
++ while ((bio = bio_list_pop(&marked_bios))) {
++ /*
++ * Only update the in-core writeset if the on-disk one
++ * was updated too.
++ */
++ if (commit_needed)
++ set_bit(get_block(era, bio), ws->bits);
+ generic_make_request(bio);
++ }
++ blk_finish_plug(&plug);
++ }
+ }
+
+ static void process_rpc_calls(struct era *era)
+@@ -1486,15 +1514,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
+ }
+ era->md = md;
+
+- era->nr_blocks = calc_nr_blocks(era);
+-
+- r = metadata_resize(era->md, &era->nr_blocks);
+- if (r) {
+- ti->error = "couldn't resize metadata";
+- era_destroy(era);
+- return -ENOMEM;
+- }
+-
+ era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
+ if (!era->wq) {
+ ti->error = "could not create workqueue for metadata object";
+@@ -1571,16 +1590,24 @@ static int era_preresume(struct dm_target *ti)
+ dm_block_t new_size = calc_nr_blocks(era);
+
+ if (era->nr_blocks != new_size) {
+- r = in_worker1(era, metadata_resize, &new_size);
+- if (r)
++ r = metadata_resize(era->md, &new_size);
++ if (r) {
++ DMERR("%s: metadata_resize failed", __func__);
++ return r;
++ }
++
++ r = metadata_commit(era->md);
++ if (r) {
++ DMERR("%s: metadata_commit failed", __func__);
+ return r;
++ }
+
+ era->nr_blocks = new_size;
+ }
+
+ start_worker(era);
+
+- r = in_worker0(era, metadata_new_era);
++ r = in_worker0(era, metadata_era_rollover);
+ if (r) {
+ DMERR("%s: metadata_era_rollover failed", __func__);
+ return r;
+diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
+index 08ae59a6e8734..4c2971835d330 100644
+--- a/drivers/md/dm-writecache.c
++++ b/drivers/md/dm-writecache.c
+@@ -142,6 +142,7 @@ struct dm_writecache {
+ size_t metadata_sectors;
+ size_t n_blocks;
+ uint64_t seq_count;
++ sector_t data_device_sectors;
+ void *block_start;
+ struct wc_entry *entries;
+ unsigned block_size;
+@@ -918,6 +919,8 @@ static void writecache_resume(struct dm_target *ti)
+
+ wc_lock(wc);
+
++ wc->data_device_sectors = i_size_read(wc->dev->bdev->bd_inode) >> SECTOR_SHIFT;
++
+ if (WC_MODE_PMEM(wc)) {
+ persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
+ } else {
+@@ -1488,6 +1491,10 @@ static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t
+ void *address = memory_data(wc, e);
+
+ persistent_memory_flush_cache(address, block_size);
++
++ if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors))
++ return true;
++
+ return bio_add_page(&wb->bio, persistent_memory_page(address),
+ block_size, persistent_memory_page_offset(address)) != 0;
+ }
+@@ -1559,6 +1566,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
+ if (writecache_has_error(wc)) {
+ bio->bi_status = BLK_STS_IOERR;
+ bio_endio(bio);
++ } else if (unlikely(!bio_sectors(bio))) {
++ bio->bi_status = BLK_STS_OK;
++ bio_endio(bio);
+ } else {
+ submit_bio(bio);
+ }
+@@ -1602,6 +1612,14 @@ static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writebac
+ e = f;
+ }
+
++ if (unlikely(to.sector + to.count > wc->data_device_sectors)) {
++ if (to.sector >= wc->data_device_sectors) {
++ writecache_copy_endio(0, 0, c);
++ continue;
++ }
++ from.count = to.count = wc->data_device_sectors - to.sector;
++ }
++
+ dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
+
+ __writeback_throttle(wc, wbl);
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index c6ce42daff27b..de32f8553735f 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -146,6 +146,16 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
+ #define DM_NUMA_NODE NUMA_NO_NODE
+ static int dm_numa_node = DM_NUMA_NODE;
+
++#define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE)
++static int swap_bios = DEFAULT_SWAP_BIOS;
++static int get_swap_bios(void)
++{
++ int latch = READ_ONCE(swap_bios);
++ if (unlikely(latch <= 0))
++ latch = DEFAULT_SWAP_BIOS;
++ return latch;
++}
++
+ /*
+ * For mempools pre-allocation at the table loading time.
+ */
+@@ -972,6 +982,11 @@ void disable_write_zeroes(struct mapped_device *md)
+ limits->max_write_zeroes_sectors = 0;
+ }
+
++static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
++{
++ return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
++}
++
+ static void clone_endio(struct bio *bio)
+ {
+ blk_status_t error = bio->bi_status;
+@@ -1009,6 +1024,11 @@ static void clone_endio(struct bio *bio)
+ }
+ }
+
++ if (unlikely(swap_bios_limit(tio->ti, bio))) {
++ struct mapped_device *md = io->md;
++ up(&md->swap_bios_semaphore);
++ }
++
+ free_tio(tio);
+ dec_pending(io, error);
+ }
+@@ -1263,6 +1283,22 @@ void dm_remap_zone_report(struct dm_target *ti, sector_t start,
+ }
+ EXPORT_SYMBOL_GPL(dm_remap_zone_report);
+
++static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
++{
++ mutex_lock(&md->swap_bios_lock);
++ while (latch < md->swap_bios) {
++ cond_resched();
++ down(&md->swap_bios_semaphore);
++ md->swap_bios--;
++ }
++ while (latch > md->swap_bios) {
++ cond_resched();
++ up(&md->swap_bios_semaphore);
++ md->swap_bios++;
++ }
++ mutex_unlock(&md->swap_bios_lock);
++}
++
+ static blk_qc_t __map_bio(struct dm_target_io *tio)
+ {
+ int r;
+@@ -1283,6 +1319,14 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
+ atomic_inc(&io->io_count);
+ sector = clone->bi_iter.bi_sector;
+
++ if (unlikely(swap_bios_limit(ti, clone))) {
++ struct mapped_device *md = io->md;
++ int latch = get_swap_bios();
++ if (unlikely(latch != md->swap_bios))
++ __set_swap_bios_limit(md, latch);
++ down(&md->swap_bios_semaphore);
++ }
++
+ r = ti->type->map(ti, clone);
+ switch (r) {
+ case DM_MAPIO_SUBMITTED:
+@@ -1297,10 +1341,18 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
+ ret = generic_make_request(clone);
+ break;
+ case DM_MAPIO_KILL:
++ if (unlikely(swap_bios_limit(ti, clone))) {
++ struct mapped_device *md = io->md;
++ up(&md->swap_bios_semaphore);
++ }
+ free_tio(tio);
+ dec_pending(io, BLK_STS_IOERR);
+ break;
+ case DM_MAPIO_REQUEUE:
++ if (unlikely(swap_bios_limit(ti, clone))) {
++ struct mapped_device *md = io->md;
++ up(&md->swap_bios_semaphore);
++ }
+ free_tio(tio);
+ dec_pending(io, BLK_STS_DM_REQUEUE);
+ break;
+@@ -1894,6 +1946,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
+ mutex_destroy(&md->suspend_lock);
+ mutex_destroy(&md->type_lock);
+ mutex_destroy(&md->table_devices_lock);
++ mutex_destroy(&md->swap_bios_lock);
+
+ dm_mq_cleanup_mapped_device(md);
+ }
+@@ -1963,6 +2016,10 @@ static struct mapped_device *alloc_dev(int minor)
+ init_waitqueue_head(&md->eventq);
+ init_completion(&md->kobj_holder.completion);
+
++ md->swap_bios = get_swap_bios();
++ sema_init(&md->swap_bios_semaphore, md->swap_bios);
++ mutex_init(&md->swap_bios_lock);
++
+ md->disk->major = _major;
+ md->disk->first_minor = minor;
+ md->disk->fops = &dm_blk_dops;
+@@ -3245,6 +3302,9 @@ MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
+ module_param(dm_numa_node, int, S_IRUGO | S_IWUSR);
+ MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
+
++module_param(swap_bios, int, S_IRUGO | S_IWUSR);
++MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
++
+ MODULE_DESCRIPTION(DM_NAME " driver");
+ MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
+ MODULE_LICENSE("GPL");
+diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
+index 041fcbb4eebdf..79e608dba4b6d 100644
+--- a/drivers/media/i2c/ov5670.c
++++ b/drivers/media/i2c/ov5670.c
+@@ -2081,7 +2081,8 @@ static int ov5670_init_controls(struct ov5670 *ov5670)
+
+ /* By default, V4L2_CID_PIXEL_RATE is read only */
+ ov5670->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov5670_ctrl_ops,
+- V4L2_CID_PIXEL_RATE, 0,
++ V4L2_CID_PIXEL_RATE,
++ link_freq_configs[0].pixel_rate,
+ link_freq_configs[0].pixel_rate,
+ 1,
+ link_freq_configs[0].pixel_rate);
+diff --git a/drivers/media/pci/cx25821/cx25821-core.c b/drivers/media/pci/cx25821/cx25821-core.c
+index 41be22ce66f3e..44839a6461e88 100644
+--- a/drivers/media/pci/cx25821/cx25821-core.c
++++ b/drivers/media/pci/cx25821/cx25821-core.c
+@@ -976,8 +976,10 @@ int cx25821_riscmem_alloc(struct pci_dev *pci,
+ __le32 *cpu;
+ dma_addr_t dma = 0;
+
+- if (NULL != risc->cpu && risc->size < size)
++ if (risc->cpu && risc->size < size) {
+ pci_free_consistent(pci, risc->size, risc->cpu, risc->dma);
++ risc->cpu = NULL;
++ }
+ if (NULL == risc->cpu) {
+ cpu = pci_zalloc_consistent(pci, size, &dma);
+ if (NULL == cpu)
+diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+index 253f05aef3b1f..7808ec1052bf6 100644
+--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c
++++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c
+@@ -1288,7 +1288,7 @@ static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
+ fmt->format.code = formats[0].mbus_code;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+- if (formats[i].mbus_code == fmt->format.code) {
++ if (formats[i].mbus_code == mbus_code) {
+ fmt->format.code = mbus_code;
+ break;
+ }
+diff --git a/drivers/media/pci/saa7134/saa7134-empress.c b/drivers/media/pci/saa7134/saa7134-empress.c
+index cb65d345fd3e9..e2666d1c68964 100644
+--- a/drivers/media/pci/saa7134/saa7134-empress.c
++++ b/drivers/media/pci/saa7134/saa7134-empress.c
+@@ -282,8 +282,11 @@ static int empress_init(struct saa7134_dev *dev)
+ q->lock = &dev->lock;
+ q->dev = &dev->pci->dev;
+ err = vb2_queue_init(q);
+- if (err)
++ if (err) {
++ video_device_release(dev->empress_dev);
++ dev->empress_dev = NULL;
+ return err;
++ }
+ dev->empress_dev->queue = q;
+ dev->empress_dev->device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING |
+ V4L2_CAP_VIDEO_CAPTURE;
+diff --git a/drivers/media/pci/smipcie/smipcie-ir.c b/drivers/media/pci/smipcie/smipcie-ir.c
+index 9445d792bfc98..731aa702e2b79 100644
+--- a/drivers/media/pci/smipcie/smipcie-ir.c
++++ b/drivers/media/pci/smipcie/smipcie-ir.c
+@@ -60,39 +60,45 @@ static void smi_ir_decode(struct smi_rc *ir)
+ {
+ struct smi_dev *dev = ir->dev;
+ struct rc_dev *rc_dev = ir->rc_dev;
+- u32 dwIRControl, dwIRData;
+- u8 index, ucIRCount, readLoop;
++ u32 control, data;
++ u8 index, ir_count, read_loop;
+
+- dwIRControl = smi_read(IR_Init_Reg);
++ control = smi_read(IR_Init_Reg);
+
+- if (dwIRControl & rbIRVld) {
+- ucIRCount = (u8) smi_read(IR_Data_Cnt);
++ dev_dbg(&rc_dev->dev, "ircontrol: 0x%08x\n", control);
+
+- readLoop = ucIRCount/4;
+- if (ucIRCount % 4)
+- readLoop += 1;
+- for (index = 0; index < readLoop; index++) {
+- dwIRData = smi_read(IR_DATA_BUFFER_BASE + (index * 4));
++ if (control & rbIRVld) {
++ ir_count = (u8)smi_read(IR_Data_Cnt);
+
+- ir->irData[index*4 + 0] = (u8)(dwIRData);
+- ir->irData[index*4 + 1] = (u8)(dwIRData >> 8);
+- ir->irData[index*4 + 2] = (u8)(dwIRData >> 16);
+- ir->irData[index*4 + 3] = (u8)(dwIRData >> 24);
++ dev_dbg(&rc_dev->dev, "ircount %d\n", ir_count);
++
++ read_loop = ir_count / 4;
++ if (ir_count % 4)
++ read_loop += 1;
++ for (index = 0; index < read_loop; index++) {
++ data = smi_read(IR_DATA_BUFFER_BASE + (index * 4));
++ dev_dbg(&rc_dev->dev, "IRData 0x%08x\n", data);
++
++ ir->irData[index * 4 + 0] = (u8)(data);
++ ir->irData[index * 4 + 1] = (u8)(data >> 8);
++ ir->irData[index * 4 + 2] = (u8)(data >> 16);
++ ir->irData[index * 4 + 3] = (u8)(data >> 24);
+ }
+- smi_raw_process(rc_dev, ir->irData, ucIRCount);
+- smi_set(IR_Init_Reg, rbIRVld);
++ smi_raw_process(rc_dev, ir->irData, ir_count);
+ }
+
+- if (dwIRControl & rbIRhighidle) {
++ if (control & rbIRhighidle) {
+ struct ir_raw_event rawir = {};
+
++ dev_dbg(&rc_dev->dev, "high idle\n");
++
+ rawir.pulse = 0;
+ rawir.duration = US_TO_NS(SMI_SAMPLE_PERIOD *
+ SMI_SAMPLE_IDLEMIN);
+ ir_raw_event_store_with_filter(rc_dev, &rawir);
+- smi_set(IR_Init_Reg, rbIRhighidle);
+ }
+
++ smi_set(IR_Init_Reg, rbIRVld);
+ ir_raw_event_handle(rc_dev);
+ }
+
+@@ -151,7 +157,7 @@ int smi_ir_init(struct smi_dev *dev)
+ rc_dev->dev.parent = &dev->pci_dev->dev;
+
+ rc_dev->map_name = dev->info->rc_map;
+- rc_dev->timeout = MS_TO_NS(100);
++ rc_dev->timeout = US_TO_NS(SMI_SAMPLE_PERIOD * SMI_SAMPLE_IDLEMIN);
+ rc_dev->rx_resolution = US_TO_NS(SMI_SAMPLE_PERIOD);
+
+ ir->rc_dev = rc_dev;
+@@ -174,7 +180,7 @@ void smi_ir_exit(struct smi_dev *dev)
+ struct smi_rc *ir = &dev->ir;
+ struct rc_dev *rc_dev = ir->rc_dev;
+
+- smi_ir_stop(ir);
+ rc_unregister_device(rc_dev);
++ smi_ir_stop(ir);
+ ir->rc_dev = NULL;
+ }
+diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
+index 4eaaf39b9223c..e0299a7899231 100644
+--- a/drivers/media/platform/aspeed-video.c
++++ b/drivers/media/platform/aspeed-video.c
+@@ -1529,12 +1529,12 @@ static int aspeed_video_setup_video(struct aspeed_video *video)
+ V4L2_JPEG_CHROMA_SUBSAMPLING_420, mask,
+ V4L2_JPEG_CHROMA_SUBSAMPLING_444);
+
+- if (video->ctrl_handler.error) {
++ rc = video->ctrl_handler.error;
++ if (rc) {
+ v4l2_ctrl_handler_free(&video->ctrl_handler);
+ v4l2_device_unregister(v4l2_dev);
+
+- dev_err(video->dev, "Failed to init controls: %d\n",
+- video->ctrl_handler.error);
++ dev_err(video->dev, "Failed to init controls: %d\n", rc);
+ return rc;
+ }
+
+diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
+index 8d47ea0c33f84..6e04e3ec61bac 100644
+--- a/drivers/media/platform/pxa_camera.c
++++ b/drivers/media/platform/pxa_camera.c
+@@ -1447,6 +1447,9 @@ static int pxac_vb2_prepare(struct vb2_buffer *vb)
+ struct pxa_camera_dev *pcdev = vb2_get_drv_priv(vb->vb2_queue);
+ struct pxa_buffer *buf = vb2_to_pxa_buffer(vb);
+ int ret = 0;
++#ifdef DEBUG
++ int i;
++#endif
+
+ switch (pcdev->channels) {
+ case 1:
+diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c
+index 1d50dfbbb762e..4c2675b437181 100644
+--- a/drivers/media/platform/qcom/camss/camss-video.c
++++ b/drivers/media/platform/qcom/camss/camss-video.c
+@@ -901,6 +901,7 @@ int msm_video_register(struct camss_video *video, struct v4l2_device *v4l2_dev,
+ video->nformats = ARRAY_SIZE(formats_rdi_8x96);
+ }
+ } else {
++ ret = -EINVAL;
+ goto error_video_register;
+ }
+
+diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
+index dc62533cf32ce..aa66e4f5f3f34 100644
+--- a/drivers/media/platform/vsp1/vsp1_drv.c
++++ b/drivers/media/platform/vsp1/vsp1_drv.c
+@@ -882,8 +882,10 @@ static int vsp1_probe(struct platform_device *pdev)
+ }
+
+ done:
+- if (ret)
++ if (ret) {
+ pm_runtime_disable(&pdev->dev);
++ rcar_fcp_put(vsp1->fcp);
++ }
+
+ return ret;
+ }
+diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
+index f9616158bcf44..867f5fb6fbe11 100644
+--- a/drivers/media/rc/mceusb.c
++++ b/drivers/media/rc/mceusb.c
+@@ -1169,7 +1169,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, u8 *buf_in)
+ switch (subcmd) {
+ /* the one and only 5-byte return value command */
+ case MCE_RSP_GETPORTSTATUS:
+- if (buf_in[5] == 0)
++ if (buf_in[5] == 0 && *hi < 8)
+ ir->txports_cabled |= 1 << *hi;
+ break;
+
+diff --git a/drivers/media/tuners/qm1d1c0042.c b/drivers/media/tuners/qm1d1c0042.c
+index 83ca5dc047ea2..baa9950783b66 100644
+--- a/drivers/media/tuners/qm1d1c0042.c
++++ b/drivers/media/tuners/qm1d1c0042.c
+@@ -343,8 +343,10 @@ static int qm1d1c0042_init(struct dvb_frontend *fe)
+ if (val == reg_initval[reg_index][0x00])
+ break;
+ }
+- if (reg_index >= QM1D1C0042_NUM_REG_ROWS)
++ if (reg_index >= QM1D1C0042_NUM_REG_ROWS) {
++ ret = -EINVAL;
+ goto failed;
++ }
+ memcpy(state->regs, reg_initval[reg_index], QM1D1C0042_NUM_REGS);
+ usleep_range(2000, 3000);
+
+diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+index 62d3566bf7eeb..5ac1a6af87826 100644
+--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
++++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
+@@ -391,7 +391,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
+ ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
+
+ if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
+- lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa),
++ lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa);
+
+ usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC);
+ info("INT Interrupt Service Started");
+diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
+index e6088b5d1b805..3daa64bb1e1d9 100644
+--- a/drivers/media/usb/em28xx/em28xx-core.c
++++ b/drivers/media/usb/em28xx/em28xx-core.c
+@@ -956,14 +956,10 @@ int em28xx_alloc_urbs(struct em28xx *dev, enum em28xx_mode mode, int xfer_bulk,
+
+ usb_bufs->buf[i] = kzalloc(sb_size, GFP_KERNEL);
+ if (!usb_bufs->buf[i]) {
+- em28xx_uninit_usb_xfer(dev, mode);
+-
+ for (i--; i >= 0; i--)
+ kfree(usb_bufs->buf[i]);
+
+- kfree(usb_bufs->buf);
+- usb_bufs->buf = NULL;
+-
++ em28xx_uninit_usb_xfer(dev, mode);
+ return -ENOMEM;
+ }
+
+diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
+index 19c90fa9e443d..293a460f4616c 100644
+--- a/drivers/media/usb/tm6000/tm6000-dvb.c
++++ b/drivers/media/usb/tm6000/tm6000-dvb.c
+@@ -141,6 +141,10 @@ static int tm6000_start_stream(struct tm6000_core *dev)
+ if (ret < 0) {
+ printk(KERN_ERR "tm6000: error %i in %s during pipe reset\n",
+ ret, __func__);
++
++ kfree(dvb->bulk_urb->transfer_buffer);
++ usb_free_urb(dvb->bulk_urb);
++ dvb->bulk_urb = NULL;
+ return ret;
+ } else
+ printk(KERN_ERR "tm6000: pipe reset\n");
+diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
+index 5e6f3153b5ff8..7d60dd3b0bd85 100644
+--- a/drivers/media/usb/uvc/uvc_v4l2.c
++++ b/drivers/media/usb/uvc/uvc_v4l2.c
+@@ -248,7 +248,9 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
+ goto done;
+
+ /* After the probe, update fmt with the values returned from
+- * negotiation with the device.
++ * negotiation with the device. Some devices return invalid bFormatIndex
++ * and bFrameIndex values, in which case we can only assume they have
++ * accepted the requested format as-is.
+ */
+ for (i = 0; i < stream->nformats; ++i) {
+ if (probe->bFormatIndex == stream->format[i].index) {
+@@ -257,11 +259,10 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
+ }
+ }
+
+- if (i == stream->nformats) {
+- uvc_trace(UVC_TRACE_FORMAT, "Unknown bFormatIndex %u\n",
++ if (i == stream->nformats)
++ uvc_trace(UVC_TRACE_FORMAT,
++ "Unknown bFormatIndex %u, using default\n",
+ probe->bFormatIndex);
+- return -EINVAL;
+- }
+
+ for (i = 0; i < format->nframes; ++i) {
+ if (probe->bFrameIndex == format->frame[i].bFrameIndex) {
+@@ -270,11 +271,10 @@ static int uvc_v4l2_try_format(struct uvc_streaming *stream,
+ }
+ }
+
+- if (i == format->nframes) {
+- uvc_trace(UVC_TRACE_FORMAT, "Unknown bFrameIndex %u\n",
++ if (i == format->nframes)
++ uvc_trace(UVC_TRACE_FORMAT,
++ "Unknown bFrameIndex %u, using default\n",
+ probe->bFrameIndex);
+- return -EINVAL;
+- }
+
+ fmt->fmt.pix.width = frame->wWidth;
+ fmt->fmt.pix.height = frame->wHeight;
+diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
+index a113e811faabe..da1ce7fd4cf5c 100644
+--- a/drivers/memory/mtk-smi.c
++++ b/drivers/memory/mtk-smi.c
+@@ -127,7 +127,7 @@ static void mtk_smi_clk_disable(const struct mtk_smi *smi)
+
+ int mtk_smi_larb_get(struct device *larbdev)
+ {
+- int ret = pm_runtime_get_sync(larbdev);
++ int ret = pm_runtime_resume_and_get(larbdev);
+
+ return (ret < 0) ? ret : 0;
+ }
+@@ -336,7 +336,7 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
+ int ret;
+
+ /* Power on smi-common. */
+- ret = pm_runtime_get_sync(larb->smi_common_dev);
++ ret = pm_runtime_resume_and_get(larb->smi_common_dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to pm get for smi-common(%d).\n", ret);
+ return ret;
+diff --git a/drivers/memory/ti-aemif.c b/drivers/memory/ti-aemif.c
+index db526dbf71eed..94219d2a2773d 100644
+--- a/drivers/memory/ti-aemif.c
++++ b/drivers/memory/ti-aemif.c
+@@ -378,8 +378,10 @@ static int aemif_probe(struct platform_device *pdev)
+ */
+ for_each_available_child_of_node(np, child_np) {
+ ret = of_aemif_parse_abus_config(pdev, child_np);
+- if (ret < 0)
++ if (ret < 0) {
++ of_node_put(child_np);
+ goto error;
++ }
+ }
+ } else if (pdata && pdata->num_abus_data > 0) {
+ for (i = 0; i < pdata->num_abus_data; i++, aemif->num_cs++) {
+@@ -405,8 +407,10 @@ static int aemif_probe(struct platform_device *pdev)
+ for_each_available_child_of_node(np, child_np) {
+ ret = of_platform_populate(child_np, NULL,
+ dev_lookup, dev);
+- if (ret < 0)
++ if (ret < 0) {
++ of_node_put(child_np);
+ goto error;
++ }
+ }
+ } else if (pdata) {
+ for (i = 0; i < pdata->num_sub_devices; i++) {
+diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
+index fab3cdc27ed64..19d57a45134c6 100644
+--- a/drivers/mfd/bd9571mwv.c
++++ b/drivers/mfd/bd9571mwv.c
+@@ -185,9 +185,9 @@ static int bd9571mwv_probe(struct i2c_client *client,
+ return ret;
+ }
+
+- ret = mfd_add_devices(bd->dev, PLATFORM_DEVID_AUTO, bd9571mwv_cells,
+- ARRAY_SIZE(bd9571mwv_cells), NULL, 0,
+- regmap_irq_get_domain(bd->irq_data));
++ ret = devm_mfd_add_devices(bd->dev, PLATFORM_DEVID_AUTO,
++ bd9571mwv_cells, ARRAY_SIZE(bd9571mwv_cells),
++ NULL, 0, regmap_irq_get_domain(bd->irq_data));
+ if (ret) {
+ regmap_del_irq_chip(bd->irq, bd->irq_data);
+ return ret;
+diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
+index 8a7cc0f86958b..65b98f3fbd929 100644
+--- a/drivers/mfd/wm831x-auxadc.c
++++ b/drivers/mfd/wm831x-auxadc.c
+@@ -93,11 +93,10 @@ static int wm831x_auxadc_read_irq(struct wm831x *wm831x,
+ wait_for_completion_timeout(&req->done, msecs_to_jiffies(500));
+
+ mutex_lock(&wm831x->auxadc_lock);
+-
+- list_del(&req->list);
+ ret = req->val;
+
+ out:
++ list_del(&req->list);
+ mutex_unlock(&wm831x->auxadc_lock);
+
+ kfree(req);
+diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
+index 3a9467aaa4356..c3e3907a0c2f1 100644
+--- a/drivers/misc/cardreader/rts5227.c
++++ b/drivers/misc/cardreader/rts5227.c
+@@ -338,6 +338,11 @@ static int rts522a_extra_init_hw(struct rtsx_pcr *pcr)
+ {
+ rts5227_extra_init_hw(pcr);
+
++ /* Power down OCP for power consumption */
++ if (!pcr->card_exist)
++ rtsx_pci_write_register(pcr, FPDCTL, OC_POWER_DOWN,
++ OC_POWER_DOWN);
++
+ rtsx_pci_write_register(pcr, FUNC_FORCE_CTL, FUNC_FORCE_UPME_XMT_DBG,
+ FUNC_FORCE_UPME_XMT_DBG);
+ rtsx_pci_write_register(pcr, PCLK_CTL, 0x04, 0x04);
+diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
+index 94cfb675fe4ed..414dcbd3c3c25 100644
+--- a/drivers/misc/eeprom/eeprom_93xx46.c
++++ b/drivers/misc/eeprom/eeprom_93xx46.c
+@@ -511,3 +511,4 @@ MODULE_LICENSE("GPL");
+ MODULE_DESCRIPTION("Driver for 93xx46 EEPROMs");
+ MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
+ MODULE_ALIAS("spi:93xx46");
++MODULE_ALIAS("spi:eeprom-93xx46");
+diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
+index a44094cdbc36c..d20b2b99c6f24 100644
+--- a/drivers/misc/mei/hbm.c
++++ b/drivers/misc/mei/hbm.c
+@@ -1300,7 +1300,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
+ return -EPROTO;
+ }
+
+- dev->dev_state = MEI_DEV_POWER_DOWN;
++ mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
+ dev_info(dev->dev, "hbm: stop response: resetting.\n");
+ /* force the reset */
+ return -EPROTO;
+diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
+index c49065887e8f5..c2338750313c4 100644
+--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
++++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
+@@ -537,6 +537,9 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
+
+ queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
+
++ if (queue_size + queue_page_size > KMALLOC_MAX_SIZE)
++ return NULL;
++
+ queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
+ if (queue) {
+ queue->q_header = NULL;
+@@ -630,7 +633,7 @@ static void qp_release_pages(struct page **pages,
+
+ for (i = 0; i < num_pages; i++) {
+ if (dirty)
+- set_page_dirty(pages[i]);
++ set_page_dirty_lock(pages[i]);
+
+ put_page(pages[i]);
+ pages[i] = NULL;
+diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+index cb89f0578d425..f54d0427e9c00 100644
+--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
++++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+@@ -186,8 +186,8 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
+ mmc_get_dma_dir(data)))
+ goto force_pio;
+
+- /* This DMAC cannot handle if buffer is not 8-bytes alignment */
+- if (!IS_ALIGNED(sg_dma_address(sg), 8))
++ /* This DMAC cannot handle if buffer is not 128-bytes alignment */
++ if (!IS_ALIGNED(sg_dma_address(sg), 128))
+ goto force_pio_with_unmap;
+
+ if (data->flags & MMC_DATA_READ) {
+diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
+index b03d652226225..771676209005b 100644
+--- a/drivers/mmc/host/sdhci-esdhc-imx.c
++++ b/drivers/mmc/host/sdhci-esdhc-imx.c
+@@ -1589,9 +1589,10 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
+- int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
++ int dead;
+
+ pm_runtime_get_sync(&pdev->dev);
++ dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
+index d07b9793380f0..10705e5fa90ee 100644
+--- a/drivers/mmc/host/sdhci-sprd.c
++++ b/drivers/mmc/host/sdhci-sprd.c
+@@ -665,14 +665,14 @@ static int sdhci_sprd_remove(struct platform_device *pdev)
+ {
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+ struct sdhci_sprd_host *sprd_host = TO_SPRD_HOST(host);
+- struct mmc_host *mmc = host->mmc;
+
+- mmc_remove_host(mmc);
++ sdhci_remove_host(host, 0);
++
+ clk_disable_unprepare(sprd_host->clk_sdio);
+ clk_disable_unprepare(sprd_host->clk_enable);
+ clk_disable_unprepare(sprd_host->clk_2x_enable);
+
+- mmc_free_host(mmc);
++ sdhci_pltfm_free(pdev);
+
+ return 0;
+ }
+diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
+index b11ac2314328d..6eba2441c7efd 100644
+--- a/drivers/mmc/host/usdhi6rol0.c
++++ b/drivers/mmc/host/usdhi6rol0.c
+@@ -1860,10 +1860,12 @@ static int usdhi6_probe(struct platform_device *pdev)
+
+ ret = mmc_add_host(mmc);
+ if (ret < 0)
+- goto e_clk_off;
++ goto e_release_dma;
+
+ return 0;
+
++e_release_dma:
++ usdhi6_dma_release(host);
+ e_clk_off:
+ clk_disable_unprepare(host->clk);
+ e_free_mmc:
+diff --git a/drivers/mtd/parsers/afs.c b/drivers/mtd/parsers/afs.c
+index 752b6cf005f71..8fd61767af831 100644
+--- a/drivers/mtd/parsers/afs.c
++++ b/drivers/mtd/parsers/afs.c
+@@ -370,10 +370,8 @@ static int parse_afs_partitions(struct mtd_info *mtd,
+ return i;
+
+ out_free_parts:
+- while (i >= 0) {
++ while (--i >= 0)
+ kfree(parts[i].name);
+- i--;
+- }
+ kfree(parts);
+ *pparts = NULL;
+ return ret;
+diff --git a/drivers/mtd/parsers/parser_imagetag.c b/drivers/mtd/parsers/parser_imagetag.c
+index d69607b482272..fab0949aabba1 100644
+--- a/drivers/mtd/parsers/parser_imagetag.c
++++ b/drivers/mtd/parsers/parser_imagetag.c
+@@ -83,6 +83,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
+ pr_err("invalid rootfs address: %*ph\n",
+ (int)sizeof(buf->flash_image_start),
+ buf->flash_image_start);
++ ret = -EINVAL;
+ goto out;
+ }
+
+@@ -92,6 +93,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
+ pr_err("invalid kernel address: %*ph\n",
+ (int)sizeof(buf->kernel_address),
+ buf->kernel_address);
++ ret = -EINVAL;
+ goto out;
+ }
+
+@@ -100,6 +102,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
+ pr_err("invalid kernel length: %*ph\n",
+ (int)sizeof(buf->kernel_length),
+ buf->kernel_length);
++ ret = -EINVAL;
+ goto out;
+ }
+
+@@ -108,6 +111,7 @@ static int bcm963xx_parse_imagetag_partitions(struct mtd_info *master,
+ pr_err("invalid total length: %*ph\n",
+ (int)sizeof(buf->total_length),
+ buf->total_length);
++ ret = -EINVAL;
+ goto out;
+ }
+
+diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
+index 7bef63947b29f..97a5e1eaeefdf 100644
+--- a/drivers/mtd/spi-nor/cadence-quadspi.c
++++ b/drivers/mtd/spi-nor/cadence-quadspi.c
+@@ -475,7 +475,7 @@ static int cqspi_read_setup(struct spi_nor *nor)
+ /* Setup dummy clock cycles */
+ dummy_clk = nor->read_dummy;
+ if (dummy_clk > CQSPI_DUMMY_CLKS_MAX)
+- dummy_clk = CQSPI_DUMMY_CLKS_MAX;
++ return -EOPNOTSUPP;
+
+ if (dummy_clk / 8) {
+ reg |= (1 << CQSPI_REG_RD_INSTR_MODE_EN_LSB);
+diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c
+index 6dac9dd8bf42d..8fcc48056a8bc 100644
+--- a/drivers/mtd/spi-nor/hisi-sfc.c
++++ b/drivers/mtd/spi-nor/hisi-sfc.c
+@@ -396,8 +396,10 @@ static int hisi_spi_nor_register_all(struct hifmc_host *host)
+
+ for_each_available_child_of_node(dev->of_node, np) {
+ ret = hisi_spi_nor_register(np, host);
+- if (ret)
++ if (ret) {
++ of_node_put(np);
+ goto fail;
++ }
+
+ if (host->num_chip == HIFMC_MAX_CHIP_NUM) {
+ dev_warn(dev, "Flash device number exceeds the maximum chipselect number\n");
+diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
+index 40586ad17f522..dd6963e4af2c7 100644
+--- a/drivers/mtd/spi-nor/spi-nor.c
++++ b/drivers/mtd/spi-nor/spi-nor.c
+@@ -1011,14 +1011,15 @@ spi_nor_find_best_erase_type(const struct spi_nor_erase_map *map,
+
+ erase = &map->erase_type[i];
+
++ /* Alignment is not mandatory for overlaid regions */
++ if (region->offset & SNOR_OVERLAID_REGION &&
++ region->size <= len)
++ return erase;
++
+ /* Don't erase more than what the user has asked for. */
+ if (erase->size > len)
+ continue;
+
+- /* Alignment is not mandatory for overlaid regions */
+- if (region->offset & SNOR_OVERLAID_REGION)
+- return erase;
+-
+ spi_nor_div_by_erase_size(erase, addr, &rem);
+ if (rem)
+ continue;
+@@ -1152,6 +1153,7 @@ static int spi_nor_init_erase_cmd_list(struct spi_nor *nor,
+ goto destroy_erase_cmd_list;
+
+ if (prev_erase != erase ||
++ erase->size != cmd->size ||
+ region->offset & SNOR_OVERLAID_REGION) {
+ cmd = spi_nor_init_erase_cmd(region, erase);
+ if (IS_ERR(cmd)) {
+@@ -3700,7 +3702,7 @@ spi_nor_region_check_overlay(struct spi_nor_erase_region *region,
+ int i;
+
+ for (i = 0; i < SNOR_ERASE_TYPE_MAX; i++) {
+- if (!(erase_type & BIT(i)))
++ if (!(erase[i].size && erase_type & BIT(erase[i].idx)))
+ continue;
+ if (region->size & erase[i].size_mask) {
+ spi_nor_region_mark_overlay(region);
+@@ -3770,6 +3772,7 @@ spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
+ offset = (region[i].offset & ~SNOR_ERASE_FLAGS_MASK) +
+ region[i].size;
+ }
++ spi_nor_region_mark_end(&region[i - 1]);
+
+ save_uniform_erase_type = map->uniform_erase_type;
+ map->uniform_erase_type = spi_nor_sort_erase_mask(map,
+@@ -3793,8 +3796,6 @@ spi_nor_init_non_uniform_erase_map(struct spi_nor *nor,
+ if (!(regions_erase_type & BIT(erase[i].idx)))
+ spi_nor_set_erase_type(&erase[i], 0, 0xFF);
+
+- spi_nor_region_mark_end(&region[i - 1]);
+-
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index b40d4377cc71d..b2cd3bdba9f89 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -1279,10 +1279,18 @@
+ #define MDIO_PMA_10GBR_FECCTRL 0x00ab
+ #endif
+
++#ifndef MDIO_PMA_RX_CTRL1
++#define MDIO_PMA_RX_CTRL1 0x8051
++#endif
++
+ #ifndef MDIO_PCS_DIG_CTRL
+ #define MDIO_PCS_DIG_CTRL 0x8000
+ #endif
+
++#ifndef MDIO_PCS_DIGITAL_STAT
++#define MDIO_PCS_DIGITAL_STAT 0x8010
++#endif
++
+ #ifndef MDIO_AN_XNP
+ #define MDIO_AN_XNP 0x0016
+ #endif
+@@ -1358,6 +1366,8 @@
+ #define XGBE_KR_TRAINING_ENABLE BIT(1)
+
+ #define XGBE_PCS_CL37_BP BIT(12)
++#define XGBE_PCS_PSEQ_STATE_MASK 0x1c
++#define XGBE_PCS_PSEQ_STATE_POWER_GOOD 0x10
+
+ #define XGBE_AN_CL37_INT_CMPLT BIT(0)
+ #define XGBE_AN_CL37_INT_MASK 0x01
+@@ -1375,6 +1385,10 @@
+ #define XGBE_PMA_CDR_TRACK_EN_OFF 0x00
+ #define XGBE_PMA_CDR_TRACK_EN_ON 0x01
+
++#define XGBE_PMA_RX_RST_0_MASK BIT(4)
++#define XGBE_PMA_RX_RST_0_RESET_ON 0x10
++#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00
++
+ /* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+ * the variable
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 3bd20f7651207..da8c2c4aca7ef 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1443,6 +1443,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
+ return;
+
+ netif_tx_stop_all_queues(netdev);
++ netif_carrier_off(pdata->netdev);
+
+ xgbe_stop_timers(pdata);
+ flush_workqueue(pdata->dev_workqueue);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+index 8a3a60bb26888..156a0bc8ab01d 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
+@@ -1345,7 +1345,7 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata)
+ &an_restart);
+ if (an_restart) {
+ xgbe_phy_config_aneg(pdata);
+- return;
++ goto adjust_link;
+ }
+
+ if (pdata->phy.link) {
+@@ -1396,7 +1396,6 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
+ pdata->phy_if.phy_impl.stop(pdata);
+
+ pdata->phy.link = 0;
+- netif_carrier_off(pdata->netdev);
+
+ xgbe_phy_adjust_link(pdata);
+ }
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+index 128cd648ba99c..d6f6afb67bcc6 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+@@ -921,6 +921,9 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata)
+ if ((phy_id & 0xfffffff0) != 0x03625d10)
+ return false;
+
++ /* Reset PHY - wait for self-clearing reset bit to clear */
++ genphy_soft_reset(phy_data->phydev);
++
+ /* Disable RGMII mode */
+ phy_write(phy_data->phydev, 0x18, 0x7007);
+ reg = phy_read(phy_data->phydev, 0x18);
+@@ -1948,6 +1951,27 @@ static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata)
+ xgbe_phy_put_comm_ownership(pdata);
+ }
+
++static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
++{
++ int reg;
++
++ reg = XMDIO_READ_BITS(pdata, MDIO_MMD_PCS, MDIO_PCS_DIGITAL_STAT,
++ XGBE_PCS_PSEQ_STATE_MASK);
++ if (reg == XGBE_PCS_PSEQ_STATE_POWER_GOOD) {
++ /* Mailbox command timed out, reset of RX block is required.
++ * This can be done by asseting the reset bit and wait for
++ * its compeletion.
++ */
++ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
++ XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_ON);
++ ndelay(20);
++ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_RX_CTRL1,
++ XGBE_PMA_RX_RST_0_MASK, XGBE_PMA_RX_RST_0_RESET_OFF);
++ usleep_range(40, 50);
++ netif_err(pdata, link, pdata->netdev, "firmware mailbox reset performed\n");
++ }
++}
++
+ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+ unsigned int cmd, unsigned int sub_cmd)
+ {
+@@ -1955,9 +1979,11 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+ unsigned int wait;
+
+ /* Log if a previous command did not complete */
+- if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
++ if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) {
+ netif_dbg(pdata, link, pdata->netdev,
+ "firmware mailbox not ready for command\n");
++ xgbe_phy_rx_reset(pdata);
++ }
+
+ /* Construct the command */
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd);
+@@ -1979,6 +2005,9 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
+
+ netif_dbg(pdata, link, pdata->netdev,
+ "firmware mailbox command did not complete\n");
++
++ /* Reset on error */
++ xgbe_phy_rx_reset(pdata);
+ }
+
+ static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
+@@ -2575,6 +2604,14 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart)
+ if (reg & MDIO_STAT1_LSTATUS)
+ return 1;
+
++ if (pdata->phy.autoneg == AUTONEG_ENABLE &&
++ phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE) {
++ if (!test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
++ netif_carrier_off(pdata->netdev);
++ *an_restart = 1;
++ }
++ }
++
+ /* No link, attempt a receiver reset cycle */
+ if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) {
+ phy_data->rrc_count = 0;
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 7c8187d386756..4ae49d92c1eed 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -8347,9 +8347,10 @@ void bnxt_tx_disable(struct bnxt *bp)
+ txr->dev_state = BNXT_DEV_STATE_CLOSING;
+ }
+ }
++ /* Drop carrier first to prevent TX timeout */
++ netif_carrier_off(bp->dev);
+ /* Stop all TX queues */
+ netif_tx_disable(bp->dev);
+- netif_carrier_off(bp->dev);
+ }
+
+ void bnxt_tx_enable(struct bnxt *bp)
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+index cee582e361341..6b71ec33bf14d 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+@@ -44,6 +44,9 @@
+
+ #define MAX_ULD_QSETS 16
+
++/* ulp_mem_io + ulptx_idata + payload + padding */
++#define MAX_IMM_ULPTX_WR_LEN (32 + 8 + 256 + 8)
++
+ /* CPL message priority levels */
+ enum {
+ CPL_PRIORITY_DATA = 0, /* data messages */
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+index 049f1bbe27ab3..57bf10b4d80c8 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
+@@ -2158,17 +2158,22 @@ int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
+ * @skb: the packet
+ *
+ * Returns true if a packet can be sent as an offload WR with immediate
+- * data. We currently use the same limit as for Ethernet packets.
++ * data.
++ * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field.
++ * However, FW_ULPTX_WR commands have a 256 byte immediate only
++ * payload limit.
+ */
+ static inline int is_ofld_imm(const struct sk_buff *skb)
+ {
+ struct work_request_hdr *req = (struct work_request_hdr *)skb->data;
+ unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi));
+
+- if (opcode == FW_CRYPTO_LOOKASIDE_WR)
++ if (unlikely(opcode == FW_ULPTX_WR))
++ return skb->len <= MAX_IMM_ULPTX_WR_LEN;
++ else if (opcode == FW_CRYPTO_LOOKASIDE_WR)
+ return skb->len <= SGE_MAX_WR_LEN;
+ else
+- return skb->len <= MAX_IMM_TX_PKT_LEN;
++ return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 0f35eec967ae8..309cdc5ebc1ff 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -202,8 +202,13 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
+ if (!ltb->buff)
+ return;
+
++ /* VIOS automatically unmaps the long term buffer at remote
++ * end for the following resets:
++ * FAILOVER, MOBILITY, TIMEOUT.
++ */
+ if (adapter->reset_reason != VNIC_RESET_FAILOVER &&
+- adapter->reset_reason != VNIC_RESET_MOBILITY)
++ adapter->reset_reason != VNIC_RESET_MOBILITY &&
++ adapter->reset_reason != VNIC_RESET_TIMEOUT)
+ send_request_unmap(adapter, ltb->map_id);
+ dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+ }
+@@ -1254,10 +1259,8 @@ static int __ibmvnic_close(struct net_device *netdev)
+
+ adapter->state = VNIC_CLOSING;
+ rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
+- if (rc)
+- return rc;
+ adapter->state = VNIC_CLOSED;
+- return 0;
++ return rc;
+ }
+
+ static int ibmvnic_close(struct net_device *netdev)
+@@ -1520,6 +1523,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
+ skb_copy_from_linear_data(skb, dst, skb->len);
+ }
+
++ /* post changes to long_term_buff *dst before VIOS accessing it */
++ dma_wmb();
++
+ tx_pool->consumer_index =
+ (tx_pool->consumer_index + 1) % tx_pool->num_buffers;
+
+@@ -2342,6 +2348,8 @@ restart_poll:
+ offset = be16_to_cpu(next->rx_comp.off_frame_data);
+ flags = next->rx_comp.flags;
+ skb = rx_buff->skb;
++ /* load long_term_buff before copying to skb */
++ dma_rmb();
+ skb_copy_to_linear_data(skb, rx_buff->data + offset,
+ length);
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index b577e6adf3bff..82c62e4678705 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -4874,7 +4874,7 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+ enum i40e_admin_queue_err adq_err;
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+- bool is_reset_needed;
++ u32 reset_needed = 0;
+ i40e_status status;
+ u32 i, j;
+
+@@ -4919,9 +4919,11 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
+ flags_complete:
+ changed_flags = orig_flags ^ new_flags;
+
+- is_reset_needed = !!(changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
+- I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED |
+- I40E_FLAG_DISABLE_FW_LLDP));
++ if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP)
++ reset_needed = I40E_PF_RESET_AND_REBUILD_FLAG;
++ if (changed_flags & (I40E_FLAG_VEB_STATS_ENABLED |
++ I40E_FLAG_LEGACY_RX | I40E_FLAG_SOURCE_PRUNING_DISABLED))
++ reset_needed = BIT(__I40E_PF_RESET_REQUESTED);
+
+ /* Before we finalize any flag changes, we need to perform some
+ * checks to ensure that the changes are supported and safe.
+@@ -5038,7 +5040,7 @@ flags_complete:
+ case I40E_AQ_RC_EEXIST:
+ dev_warn(&pf->pdev->dev,
+ "FW LLDP agent is already running\n");
+- is_reset_needed = false;
++ reset_needed = 0;
+ break;
+ case I40E_AQ_RC_EPERM:
+ dev_warn(&pf->pdev->dev,
+@@ -5067,8 +5069,8 @@ flags_complete:
+ /* Issue reset to cause things to take effect, as additional bits
+ * are added we will need to create a mask of bits requiring reset
+ */
+- if (is_reset_needed)
+- i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED), true);
++ if (reset_needed)
++ i40e_do_reset(pf, reset_needed, true);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index c19b45a90fcd2..0604b5aaad86f 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -2603,7 +2603,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
+ return;
+ if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
+ return;
+- if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
++ if (test_bit(__I40E_VF_DISABLE, pf->state)) {
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
+ return;
+ }
+@@ -2621,7 +2621,6 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
+ }
+ }
+ }
+- clear_bit(__I40E_VF_DISABLE, pf->state);
+ }
+
+ /**
+@@ -7612,6 +7611,8 @@ int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
+ if (filter->flags >= ARRAY_SIZE(flag_table))
+ return I40E_ERR_CONFIG;
+
++ memset(&cld_filter, 0, sizeof(cld_filter));
++
+ /* copy element needed to add cloud filter from filter */
+ i40e_set_cld_element(filter, &cld_filter);
+
+@@ -7675,10 +7676,13 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+ return -EOPNOTSUPP;
+
+ /* adding filter using src_port/src_ip is not supported at this stage */
+- if (filter->src_port || filter->src_ipv4 ||
++ if (filter->src_port ||
++ (filter->src_ipv4 && filter->n_proto != ETH_P_IPV6) ||
+ !ipv6_addr_any(&filter->ip.v6.src_ip6))
+ return -EOPNOTSUPP;
+
++ memset(&cld_filter, 0, sizeof(cld_filter));
++
+ /* copy element needed to add cloud filter from filter */
+ i40e_set_cld_element(filter, &cld_filter.element);
+
+@@ -7702,7 +7706,7 @@ int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT);
+ }
+
+- } else if (filter->dst_ipv4 ||
++ } else if ((filter->dst_ipv4 && filter->n_proto != ETH_P_IPV6) ||
+ !ipv6_addr_any(&filter->ip.v6.dst_ip6)) {
+ cld_filter.element.flags =
+ cpu_to_le16(I40E_AQC_ADD_CLOUD_FILTER_IP_PORT);
+@@ -8481,11 +8485,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
+ dev_dbg(&pf->pdev->dev, "PFR requested\n");
+ i40e_handle_reset_warning(pf, lock_acquired);
+
+- dev_info(&pf->pdev->dev,
+- pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
+- "FW LLDP is disabled\n" :
+- "FW LLDP is enabled\n");
+-
+ } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
+ /* Request a PF Reset
+ *
+@@ -8493,6 +8492,10 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
+ */
+ i40e_prep_for_reset(pf, lock_acquired);
+ i40e_reset_and_rebuild(pf, true, lock_acquired);
++ dev_info(&pf->pdev->dev,
++ pf->flags & I40E_FLAG_DISABLE_FW_LLDP ?
++ "FW LLDP is disabled\n" :
++ "FW LLDP is enabled\n");
+
+ } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
+ int v;
+@@ -9955,7 +9958,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state);
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_hw *hw = &pf->hw;
+- u8 set_fc_aq_fail = 0;
+ i40e_status ret;
+ u32 val;
+ int v;
+@@ -10081,13 +10083,6 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
+- /* make sure our flow control settings are restored */
+- ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
+- if (ret)
+- dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
+- i40e_stat_str(&pf->hw, ret),
+- i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+-
+ /* Rebuild the VSIs and VEBs that existed before reset.
+ * They are still in our local switch element arrays, so only
+ * need to rebuild the switch model in the HW.
+@@ -11770,6 +11765,8 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf)
+ struct i40e_aqc_configure_partition_bw_data bw_data;
+ i40e_status status;
+
++ memset(&bw_data, 0, sizeof(bw_data));
++
+ /* Set the valid bit for this PF */
+ bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
+ bw_data.max_bw[pf->hw.pf_id] = pf->max_bw & I40E_ALT_BW_VALUE_MASK;
+@@ -14768,7 +14765,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ int err;
+ u32 val;
+ u32 i;
+- u8 set_fc_aq_fail;
+
+ err = pci_enable_device_mem(pdev);
+ if (err)
+@@ -15090,24 +15086,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ }
+ INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
+
+- /* Make sure flow control is set according to current settings */
+- err = i40e_set_fc(hw, &set_fc_aq_fail, true);
+- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
+- dev_dbg(&pf->pdev->dev,
+- "Set fc with err %s aq_err %s on get_phy_cap\n",
+- i40e_stat_str(hw, err),
+- i40e_aq_str(hw, hw->aq.asq_last_status));
+- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
+- dev_dbg(&pf->pdev->dev,
+- "Set fc with err %s aq_err %s on set_phy_config\n",
+- i40e_stat_str(hw, err),
+- i40e_aq_str(hw, hw->aq.asq_last_status));
+- if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
+- dev_dbg(&pf->pdev->dev,
+- "Set fc with err %s aq_err %s on get_link_info\n",
+- i40e_stat_str(hw, err),
+- i40e_aq_str(hw, hw->aq.asq_last_status));
+-
+ /* if FDIR VSI was set up, start it now */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index f47841f3a69d5..218aada8949d9 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -3093,13 +3093,16 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
+
+ l4_proto = ip.v4->protocol;
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
++ int ret;
++
+ tunnel |= I40E_TX_CTX_EXT_IP_IPV6;
+
+ exthdr = ip.hdr + sizeof(*ip.v6);
+ l4_proto = ip.v6->nexthdr;
+- if (l4.hdr != exthdr)
+- ipv6_skip_exthdr(skb, exthdr - skb->data,
+- &l4_proto, &frag_off);
++ ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
++ &l4_proto, &frag_off);
++ if (ret < 0)
++ return -1;
+ }
+
+ /* define outer transport */
+diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
+index 94e3f8b869be4..7b0543056b101 100644
+--- a/drivers/net/ethernet/marvell/mvneta.c
++++ b/drivers/net/ethernet/marvell/mvneta.c
+@@ -3027,7 +3027,9 @@ static int mvneta_txq_sw_init(struct mvneta_port *pp,
+ }
+
+ /* Setup XPS mapping */
+- if (txq_number > 1)
++ if (pp->neta_armada3700)
++ cpu = 0;
++ else if (txq_number > 1)
+ cpu = txq->id % num_present_cpus();
+ else
+ cpu = pp->rxq_def % num_present_cpus();
+@@ -3764,6 +3766,11 @@ static int mvneta_cpu_online(unsigned int cpu, struct hlist_node *node)
+ node_online);
+ struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
+
++ /* Armada 3700's per-cpu interrupt for mvneta is broken, all interrupts
++ * are routed to CPU 0, so we don't need all the cpu-hotplug support
++ */
++ if (pp->neta_armada3700)
++ return 0;
+
+ spin_lock(&pp->lock);
+ /*
+diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+index 1187ef1375e29..cb341372d5a35 100644
+--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
++++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+@@ -4986,6 +4986,7 @@ static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule
+
+ if (!fs_rule->mirr_mbox) {
+ mlx4_err(dev, "rule mirroring mailbox is null\n");
++ mlx4_free_cmd_mailbox(dev, mailbox);
+ return -EINVAL;
+ }
+ memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 1e8244ec5b332..131be1fa770cb 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4077,7 +4077,7 @@ static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
+
+ static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
+ {
+- RTL_W8(tp, MaxTxPacketSize, 0x3f);
++ RTL_W8(tp, MaxTxPacketSize, 0x24);
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
+ RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_512B);
+@@ -4085,7 +4085,7 @@ static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
+
+ static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
+ {
+- RTL_W8(tp, MaxTxPacketSize, 0x0c);
++ RTL_W8(tp, MaxTxPacketSize, 0x3f);
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
+ RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
+ rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
+diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
+index 8b94d9ad9e2ba..f87e135a8aef9 100644
+--- a/drivers/net/ethernet/sun/sunvnet_common.c
++++ b/drivers/net/ethernet/sun/sunvnet_common.c
+@@ -1353,27 +1353,12 @@ sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
+ if (vio_version_after_eq(&port->vio, 1, 3))
+ localmtu -= VLAN_HLEN;
+
+- if (skb->protocol == htons(ETH_P_IP)) {
+- struct flowi4 fl4;
+- struct rtable *rt = NULL;
+-
+- memset(&fl4, 0, sizeof(fl4));
+- fl4.flowi4_oif = dev->ifindex;
+- fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
+- fl4.daddr = ip_hdr(skb)->daddr;
+- fl4.saddr = ip_hdr(skb)->saddr;
+-
+- rt = ip_route_output_key(dev_net(dev), &fl4);
+- if (!IS_ERR(rt)) {
+- skb_dst_set(skb, &rt->dst);
+- icmp_send(skb, ICMP_DEST_UNREACH,
+- ICMP_FRAG_NEEDED,
+- htonl(localmtu));
+- }
+- }
++ if (skb->protocol == htons(ETH_P_IP))
++ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
++ htonl(localmtu));
+ #if IS_ENABLED(CONFIG_IPV6)
+ else if (skb->protocol == htons(ETH_P_IPV6))
+- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
++ icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
+ #endif
+ goto out_dropped;
+ }
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index bb6e52f3bdf9b..f98318d93ce72 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1689,6 +1689,18 @@ static int axienet_probe(struct platform_device *pdev)
+ lp->options = XAE_OPTION_DEFAULTS;
+ lp->rx_bd_num = RX_BD_NUM_DEFAULT;
+ lp->tx_bd_num = TX_BD_NUM_DEFAULT;
++
++ lp->clk = devm_clk_get_optional(&pdev->dev, NULL);
++ if (IS_ERR(lp->clk)) {
++ ret = PTR_ERR(lp->clk);
++ goto free_netdev;
++ }
++ ret = clk_prepare_enable(lp->clk);
++ if (ret) {
++ dev_err(&pdev->dev, "Unable to enable clock: %d\n", ret);
++ goto free_netdev;
++ }
++
+ /* Map device registers */
+ ethres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ lp->regs = devm_ioremap_resource(&pdev->dev, ethres);
+@@ -1836,20 +1848,6 @@ static int axienet_probe(struct platform_device *pdev)
+
+ lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
+ if (lp->phy_node) {
+- lp->clk = devm_clk_get(&pdev->dev, NULL);
+- if (IS_ERR(lp->clk)) {
+- dev_warn(&pdev->dev, "Failed to get clock: %ld\n",
+- PTR_ERR(lp->clk));
+- lp->clk = NULL;
+- } else {
+- ret = clk_prepare_enable(lp->clk);
+- if (ret) {
+- dev_err(&pdev->dev, "Unable to enable clock: %d\n",
+- ret);
+- goto free_netdev;
+- }
+- }
+-
+ ret = axienet_mdio_setup(lp);
+ if (ret)
+ dev_warn(&pdev->dev,
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 4e19c3149848b..d0653babab923 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -545,9 +545,8 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
+ if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
+ mtu < ntohs(iph->tot_len)) {
+ netdev_dbg(dev, "packet too big, fragmentation needed\n");
+- memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+- htonl(mtu));
++ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
++ htonl(mtu));
+ goto err_rt;
+ }
+
+diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
+index 5502e145aa17b..8443df79fabc7 100644
+--- a/drivers/net/vxlan.c
++++ b/drivers/net/vxlan.c
+@@ -4424,7 +4424,6 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_dev *vxlan, *next;
+ struct net_device *dev, *aux;
+- unsigned int h;
+
+ for_each_netdev_safe(net, dev, aux)
+ if (dev->rtnl_link_ops == &vxlan_link_ops)
+@@ -4438,14 +4437,13 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
+ unregister_netdevice_queue(vxlan->dev, head);
+ }
+
+- for (h = 0; h < PORT_HASH_SIZE; ++h)
+- WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
+ }
+
+ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
+ {
+ struct net *net;
+ LIST_HEAD(list);
++ unsigned int h;
+
+ rtnl_lock();
+ list_for_each_entry(net, net_list, exit_list)
+@@ -4453,6 +4451,13 @@ static void __net_exit vxlan_exit_batch_net(struct list_head *net_list)
+
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
++
++ list_for_each_entry(net, net_list, exit_list) {
++ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
++
++ for (h = 0; h < PORT_HASH_SIZE; ++h)
++ WARN_ON_ONCE(!hlist_empty(&vn->sock_list[h]));
++ }
+ }
+
+ static struct pernet_operations vxlan_net_ops = {
+diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
+index 63607c3b8e818..d4589b2ab3b6d 100644
+--- a/drivers/net/wireless/ath/ath10k/snoc.c
++++ b/drivers/net/wireless/ath/ath10k/snoc.c
+@@ -1039,12 +1039,13 @@ static int ath10k_snoc_hif_power_up(struct ath10k *ar,
+ ret = ath10k_snoc_init_pipes(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to initialize CE: %d\n", ret);
+- goto err_wlan_enable;
++ goto err_free_rri;
+ }
+
+ return 0;
+
+-err_wlan_enable:
++err_free_rri:
++ ath10k_ce_free_rri(ar);
+ ath10k_snoc_wlan_disable(ar);
+
+ return ret;
+diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
+index 26ea51a721564..859a865c59950 100644
+--- a/drivers/net/wireless/ath/ath9k/debug.c
++++ b/drivers/net/wireless/ath/ath9k/debug.c
+@@ -1223,8 +1223,11 @@ static ssize_t write_file_nf_override(struct file *file,
+
+ ah->nf_override = val;
+
+- if (ah->curchan)
++ if (ah->curchan) {
++ ath9k_ps_wakeup(sc);
+ ath9k_hw_loadnf(ah, ah->curchan);
++ ath9k_ps_restore(sc);
++ }
+
+ return count;
+ }
+diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c
+index d3c001fa8eb46..32ce1b42ce08b 100644
+--- a/drivers/net/wireless/broadcom/b43/phy_n.c
++++ b/drivers/net/wireless/broadcom/b43/phy_n.c
+@@ -5308,7 +5308,7 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
+
+ for (i = 0; i < 4; i++) {
+ if (dev->phy.rev >= 3)
+- table[i] = coef[i];
++ coef[i] = table[i];
+ else
+ coef[i] = 0;
+ }
+diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
+index e889488b84a03..8090895873011 100644
+--- a/drivers/net/xen-netback/interface.c
++++ b/drivers/net/xen-netback/interface.c
+@@ -162,13 +162,15 @@ irqreturn_t xenvif_interrupt(int irq, void *dev_id)
+ {
+ struct xenvif_queue *queue = dev_id;
+ int old;
++ bool has_rx, has_tx;
+
+ old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
+ WARN(old, "Interrupt while EOI pending\n");
+
+- /* Use bitwise or as we need to call both functions. */
+- if ((!xenvif_handle_tx_interrupt(queue) |
+- !xenvif_handle_rx_interrupt(queue))) {
++ has_tx = xenvif_handle_tx_interrupt(queue);
++ has_rx = xenvif_handle_rx_interrupt(queue);
++
++ if (!has_rx && !has_tx) {
+ atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
+ xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
+ }
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index 84f4078216a36..3ba68baeed1db 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -314,7 +314,9 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
+
+ for_each_child_of_node(parent, child) {
+ addr = of_get_property(child, "reg", &len);
+- if (!addr || (len < 2 * sizeof(u32))) {
++ if (!addr)
++ continue;
++ if (len < 2 * sizeof(u32)) {
+ dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
+ return -EINVAL;
+ }
+@@ -345,6 +347,7 @@ static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
+ cell->name, nvmem->stride);
+ /* Cells already added will be freed later. */
+ kfree_const(cell->name);
++ of_node_put(cell->np);
+ kfree(cell);
+ return -EINVAL;
+ }
+diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
+index 223d617ecfe17..fc24102e25ce7 100644
+--- a/drivers/of/fdt.c
++++ b/drivers/of/fdt.c
+@@ -1153,8 +1153,16 @@ int __init __weak early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size)
+ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
+ phys_addr_t size, bool nomap)
+ {
+- if (nomap)
+- return memblock_remove(base, size);
++ if (nomap) {
++ /*
++ * If the memory is already reserved (by another region), we
++ * should not allow it to be marked nomap.
++ */
++ if (memblock_is_region_reserved(base, size))
++ return -EBUSY;
++
++ return memblock_mark_nomap(base, size);
++ }
+ return memblock_reserve(base, size);
+ }
+
+diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
+index 14196c0287a24..a8eab4e67af10 100644
+--- a/drivers/pci/controller/dwc/pcie-qcom.c
++++ b/drivers/pci/controller/dwc/pcie-qcom.c
+@@ -402,7 +402,9 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
+
+ /* enable external reference clock */
+ val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
+- val &= ~PHY_REFCLK_USE_PAD;
++ /* USE_PAD is required only for ipq806x */
++ if (!of_device_is_compatible(node, "qcom,pcie-apq8064"))
++ val &= ~PHY_REFCLK_USE_PAD;
+ val |= PHY_REFCLK_SSP_EN;
+ writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
+
+diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
+index d21fa04fa44d2..da5023e27951a 100644
+--- a/drivers/pci/setup-res.c
++++ b/drivers/pci/setup-res.c
+@@ -409,10 +409,16 @@ EXPORT_SYMBOL(pci_release_resource);
+ int pci_resize_resource(struct pci_dev *dev, int resno, int size)
+ {
+ struct resource *res = dev->resource + resno;
++ struct pci_host_bridge *host;
+ int old, ret;
+ u32 sizes;
+ u16 cmd;
+
++ /* Check if we must preserve the firmware's resource assignment */
++ host = pci_find_host_bridge(dev->bus);
++ if (host->preserve_config)
++ return -ENOTSUPP;
++
+ /* Make sure the resource isn't assigned before resizing it. */
+ if (!(res->flags & IORESOURCE_UNSET))
+ return -EBUSY;
+diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
+index 31e39558d49d8..8b003c890b87b 100644
+--- a/drivers/pci/syscall.c
++++ b/drivers/pci/syscall.c
+@@ -20,7 +20,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
+ u16 word;
+ u32 dword;
+ long err;
+- long cfg_ret;
++ int cfg_ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+@@ -46,7 +46,7 @@ SYSCALL_DEFINE5(pciconfig_read, unsigned long, bus, unsigned long, dfn,
+ }
+
+ err = -EIO;
+- if (cfg_ret != PCIBIOS_SUCCESSFUL)
++ if (cfg_ret)
+ goto error;
+
+ switch (len) {
+@@ -105,7 +105,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
+ if (err)
+ break;
+ err = pci_user_write_config_byte(dev, off, byte);
+- if (err != PCIBIOS_SUCCESSFUL)
++ if (err)
+ err = -EIO;
+ break;
+
+@@ -114,7 +114,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
+ if (err)
+ break;
+ err = pci_user_write_config_word(dev, off, word);
+- if (err != PCIBIOS_SUCCESSFUL)
++ if (err)
+ err = -EIO;
+ break;
+
+@@ -123,7 +123,7 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
+ if (err)
+ break;
+ err = pci_user_write_config_dword(dev, off, dword);
+- if (err != PCIBIOS_SUCCESSFUL)
++ if (err)
+ err = -EIO;
+ break;
+
+diff --git a/drivers/phy/rockchip/phy-rockchip-emmc.c b/drivers/phy/rockchip/phy-rockchip-emmc.c
+index 2dc19ddd120f5..a005fc58bbf02 100644
+--- a/drivers/phy/rockchip/phy-rockchip-emmc.c
++++ b/drivers/phy/rockchip/phy-rockchip-emmc.c
+@@ -240,15 +240,17 @@ static int rockchip_emmc_phy_init(struct phy *phy)
+ * - SDHCI driver to get the PHY
+ * - SDHCI driver to init the PHY
+ *
+- * The clock is optional, so upon any error we just set to NULL.
++ * The clock is optional, using clk_get_optional() to get the clock
++ * and do error processing if the return value != NULL
+ *
+ * NOTE: we don't do anything special for EPROBE_DEFER here. Given the
+ * above expected use case, EPROBE_DEFER isn't sensible to expect, so
+ * it's just like any other error.
+ */
+- rk_phy->emmcclk = clk_get(&phy->dev, "emmcclk");
++ rk_phy->emmcclk = clk_get_optional(&phy->dev, "emmcclk");
+ if (IS_ERR(rk_phy->emmcclk)) {
+- dev_dbg(&phy->dev, "Error getting emmcclk: %d\n", ret);
++ ret = PTR_ERR(rk_phy->emmcclk);
++ dev_err(&phy->dev, "Error getting emmcclk: %d\n", ret);
+ rk_phy->emmcclk = NULL;
+ }
+
+diff --git a/drivers/power/reset/at91-sama5d2_shdwc.c b/drivers/power/reset/at91-sama5d2_shdwc.c
+index e341cc5c0ea6f..c84df27cd5482 100644
+--- a/drivers/power/reset/at91-sama5d2_shdwc.c
++++ b/drivers/power/reset/at91-sama5d2_shdwc.c
+@@ -37,7 +37,7 @@
+
+ #define AT91_SHDW_MR 0x04 /* Shut Down Mode Register */
+ #define AT91_SHDW_WKUPDBC_SHIFT 24
+-#define AT91_SHDW_WKUPDBC_MASK GENMASK(31, 16)
++#define AT91_SHDW_WKUPDBC_MASK GENMASK(26, 24)
+ #define AT91_SHDW_WKUPDBC(x) (((x) << AT91_SHDW_WKUPDBC_SHIFT) \
+ & AT91_SHDW_WKUPDBC_MASK)
+
+diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
+index 73352e6fbccbf..6ad6aad215cf1 100644
+--- a/drivers/pwm/pwm-rockchip.c
++++ b/drivers/pwm/pwm-rockchip.c
+@@ -361,7 +361,6 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
+
+ ret = pwmchip_add(&pc->chip);
+ if (ret < 0) {
+- clk_unprepare(pc->clk);
+ dev_err(&pdev->dev, "pwmchip_add() failed: %d\n", ret);
+ goto err_pclk;
+ }
+diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
+index aefc351bfed59..86a3c2dd05848 100644
+--- a/drivers/regulator/axp20x-regulator.c
++++ b/drivers/regulator/axp20x-regulator.c
+@@ -1072,7 +1072,7 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq)
+ static int axp20x_regulator_parse_dt(struct platform_device *pdev)
+ {
+ struct device_node *np, *regulators;
+- int ret;
++ int ret = 0;
+ u32 dcdcfreq = 0;
+
+ np = of_node_get(pdev->dev.parent->of_node);
+@@ -1087,13 +1087,12 @@ static int axp20x_regulator_parse_dt(struct platform_device *pdev)
+ ret = axp20x_set_dcdc_freq(pdev, dcdcfreq);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Error setting dcdc frequency: %d\n", ret);
+- return ret;
+ }
+-
+ of_node_put(regulators);
+ }
+
+- return 0;
++ of_node_put(np);
++ return ret;
+ }
+
+ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 workmode)
+diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
+index 5b9d570df85cc..a31b6ae92a84e 100644
+--- a/drivers/regulator/core.c
++++ b/drivers/regulator/core.c
+@@ -1576,7 +1576,7 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+ const char *supply_name)
+ {
+ struct regulator *regulator;
+- int err;
++ int err = 0;
+
+ if (dev) {
+ char buf[REG_STR_SIZE];
+@@ -1622,8 +1622,8 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
+ }
+ }
+
+- regulator->debugfs = debugfs_create_dir(supply_name,
+- rdev->debugfs);
++ if (err != -EEXIST)
++ regulator->debugfs = debugfs_create_dir(supply_name, rdev->debugfs);
+ if (!regulator->debugfs) {
+ rdev_dbg(rdev, "Failed to create debugfs directory\n");
+ } else {
+diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
+index a47f87b8373df..68d22acdb037a 100644
+--- a/drivers/regulator/qcom-rpmh-regulator.c
++++ b/drivers/regulator/qcom-rpmh-regulator.c
+@@ -874,7 +874,7 @@ static const struct rpmh_vreg_init_data pm8009_vreg_data[] = {
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, "vdd-l4"),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, "vdd-l5-l6"),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, "vdd-l5-l6"),
+- RPMH_VREG("ldo7", "ldo%s6", &pmic5_pldo_lv, "vdd-l7"),
++ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo_lv, "vdd-l7"),
+ {},
+ };
+
+diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
+index 6ca27e9d5ef7d..5276f8442f3c6 100644
+--- a/drivers/regulator/s5m8767.c
++++ b/drivers/regulator/s5m8767.c
+@@ -544,14 +544,18 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
+ rdata = devm_kcalloc(&pdev->dev,
+ pdata->num_regulators, sizeof(*rdata),
+ GFP_KERNEL);
+- if (!rdata)
++ if (!rdata) {
++ of_node_put(regulators_np);
+ return -ENOMEM;
++ }
+
+ rmode = devm_kcalloc(&pdev->dev,
+ pdata->num_regulators, sizeof(*rmode),
+ GFP_KERNEL);
+- if (!rmode)
++ if (!rmode) {
++ of_node_put(regulators_np);
+ return -ENOMEM;
++ }
+
+ pdata->regulators = rdata;
+ pdata->opmode = rmode;
+@@ -574,10 +578,13 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
+ 0,
+ GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+ "s5m8767");
+- if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT)
++ if (PTR_ERR(rdata->ext_control_gpiod) == -ENOENT) {
+ rdata->ext_control_gpiod = NULL;
+- else if (IS_ERR(rdata->ext_control_gpiod))
++ } else if (IS_ERR(rdata->ext_control_gpiod)) {
++ of_node_put(reg_np);
++ of_node_put(regulators_np);
+ return PTR_ERR(rdata->ext_control_gpiod);
++ }
+
+ rdata->id = i;
+ rdata->initdata = of_get_regulator_init_data(
+diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
+index c5b9804140860..9ae7ce3f50696 100644
+--- a/drivers/rtc/Kconfig
++++ b/drivers/rtc/Kconfig
+@@ -683,6 +683,7 @@ config RTC_DRV_S5M
+ tristate "Samsung S2M/S5M series"
+ depends on MFD_SEC_CORE || COMPILE_TEST
+ select REGMAP_IRQ
++ select REGMAP_I2C
+ help
+ If you say yes here you will get support for the
+ RTC of Samsung S2MPS14 and S5M PMIC series.
+diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
+index 957889a42d2ea..f6f03a349c3f0 100644
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -117,7 +117,7 @@ struct virtio_rev_info {
+ };
+
+ /* the highest virtio-ccw revision we support */
+-#define VIRTIO_CCW_REV_MAX 1
++#define VIRTIO_CCW_REV_MAX 2
+
+ struct virtio_ccw_vq_info {
+ struct virtqueue *vq;
+@@ -952,7 +952,7 @@ static u8 virtio_ccw_get_status(struct virtio_device *vdev)
+ u8 old_status = vcdev->dma_area->status;
+ struct ccw1 *ccw;
+
+- if (vcdev->revision < 1)
++ if (vcdev->revision < 2)
+ return vcdev->dma_area->status;
+
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+diff --git a/drivers/scsi/bnx2fc/Kconfig b/drivers/scsi/bnx2fc/Kconfig
+index e0ccb48ec9617..40e9c9dc04bd8 100644
+--- a/drivers/scsi/bnx2fc/Kconfig
++++ b/drivers/scsi/bnx2fc/Kconfig
+@@ -5,6 +5,7 @@ config SCSI_BNX2X_FCOE
+ depends on (IPV6 || IPV6=n)
+ depends on LIBFC
+ depends on LIBFCOE
++ depends on MMU
+ select NETDEVICES
+ select ETHERNET
+ select NET_VENDOR_BROADCOM
+diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+index f3d8d53ab84de..dbe5325a324d5 100644
+--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
++++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
+@@ -11,6 +11,7 @@
+ */
+
+ #include <linux/bitops.h>
++#include <linux/clk.h>
+ #include <linux/interrupt.h>
+ #include <linux/fs.h>
+ #include <linux/kfifo.h>
+@@ -67,6 +68,7 @@ struct aspeed_lpc_snoop_channel {
+ struct aspeed_lpc_snoop {
+ struct regmap *regmap;
+ int irq;
++ struct clk *clk;
+ struct aspeed_lpc_snoop_channel chan[NUM_SNOOP_CHANNELS];
+ };
+
+@@ -282,22 +284,42 @@ static int aspeed_lpc_snoop_probe(struct platform_device *pdev)
+ return -ENODEV;
+ }
+
++ lpc_snoop->clk = devm_clk_get(dev, NULL);
++ if (IS_ERR(lpc_snoop->clk)) {
++ rc = PTR_ERR(lpc_snoop->clk);
++ if (rc != -EPROBE_DEFER)
++ dev_err(dev, "couldn't get clock\n");
++ return rc;
++ }
++ rc = clk_prepare_enable(lpc_snoop->clk);
++ if (rc) {
++ dev_err(dev, "couldn't enable clock\n");
++ return rc;
++ }
++
+ rc = aspeed_lpc_snoop_config_irq(lpc_snoop, pdev);
+ if (rc)
+- return rc;
++ goto err;
+
+ rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 0, port);
+ if (rc)
+- return rc;
++ goto err;
+
+ /* Configuration of 2nd snoop channel port is optional */
+ if (of_property_read_u32_index(dev->of_node, "snoop-ports",
+ 1, &port) == 0) {
+ rc = aspeed_lpc_enable_snoop(lpc_snoop, dev, 1, port);
+- if (rc)
++ if (rc) {
+ aspeed_lpc_disable_snoop(lpc_snoop, 0);
++ goto err;
++ }
+ }
+
++ return 0;
++
++err:
++ clk_disable_unprepare(lpc_snoop->clk);
++
+ return rc;
+ }
+
+@@ -309,6 +331,8 @@ static int aspeed_lpc_snoop_remove(struct platform_device *pdev)
+ aspeed_lpc_disable_snoop(lpc_snoop, 0);
+ aspeed_lpc_disable_snoop(lpc_snoop, 1);
+
++ clk_disable_unprepare(lpc_snoop->clk);
++
+ return 0;
+ }
+
+diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
+index e3d06330d1258..f7d0f63921dc2 100644
+--- a/drivers/soundwire/cadence_master.c
++++ b/drivers/soundwire/cadence_master.c
+@@ -368,10 +368,10 @@ cdns_fill_msg_resp(struct sdw_cdns *cdns,
+ if (!(cdns->response_buf[i] & CDNS_MCP_RESP_ACK)) {
+ no_ack = 1;
+ dev_dbg_ratelimited(cdns->dev, "Msg Ack not received\n");
+- if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
+- nack = 1;
+- dev_err_ratelimited(cdns->dev, "Msg NACK received\n");
+- }
++ }
++ if (cdns->response_buf[i] & CDNS_MCP_RESP_NACK) {
++ nack = 1;
++ dev_err_ratelimited(cdns->dev, "Msg NACK received\n");
+ }
+ }
+
+diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
+index abbc1582f457e..d9711ea5b01d3 100644
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -1569,7 +1569,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
+ if (ret == 0) {
+ as->use_dma = true;
+ } else if (ret == -EPROBE_DEFER) {
+- return ret;
++ goto out_unmap_regs;
+ }
+ } else if (as->caps.has_pdc_support) {
+ as->use_pdc = true;
+diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
+index f236e3034cf85..aafac128bb5f1 100644
+--- a/drivers/spi/spi-pxa2xx-pci.c
++++ b/drivers/spi/spi-pxa2xx-pci.c
+@@ -21,7 +21,8 @@ enum {
+ PORT_BSW1,
+ PORT_BSW2,
+ PORT_CE4100,
+- PORT_LPT,
++ PORT_LPT0,
++ PORT_LPT1,
+ };
+
+ struct pxa_spi_info {
+@@ -57,8 +58,10 @@ static struct dw_dma_slave bsw1_rx_param = { .src_id = 7 };
+ static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 };
+ static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 };
+
+-static struct dw_dma_slave lpt_tx_param = { .dst_id = 0 };
+-static struct dw_dma_slave lpt_rx_param = { .src_id = 1 };
++static struct dw_dma_slave lpt1_tx_param = { .dst_id = 0 };
++static struct dw_dma_slave lpt1_rx_param = { .src_id = 1 };
++static struct dw_dma_slave lpt0_tx_param = { .dst_id = 2 };
++static struct dw_dma_slave lpt0_rx_param = { .src_id = 3 };
+
+ static bool lpss_dma_filter(struct dma_chan *chan, void *param)
+ {
+@@ -185,12 +188,19 @@ static struct pxa_spi_info spi_info_configs[] = {
+ .num_chipselect = 1,
+ .max_clk_rate = 50000000,
+ },
+- [PORT_LPT] = {
++ [PORT_LPT0] = {
+ .type = LPSS_LPT_SSP,
+ .port_id = 0,
+ .setup = lpss_spi_setup,
+- .tx_param = &lpt_tx_param,
+- .rx_param = &lpt_rx_param,
++ .tx_param = &lpt0_tx_param,
++ .rx_param = &lpt0_rx_param,
++ },
++ [PORT_LPT1] = {
++ .type = LPSS_LPT_SSP,
++ .port_id = 1,
++ .setup = lpss_spi_setup,
++ .tx_param = &lpt1_tx_param,
++ .rx_param = &lpt1_rx_param,
+ },
+ };
+
+@@ -285,8 +295,9 @@ static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
+ { PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 },
+ { PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 },
+ { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 },
+- { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT },
+- { },
++ { PCI_VDEVICE(INTEL, 0x9ce5), PORT_LPT0 },
++ { PCI_VDEVICE(INTEL, 0x9ce6), PORT_LPT1 },
++ { }
+ };
+ MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
+
+diff --git a/drivers/spi/spi-s3c24xx-fiq.S b/drivers/spi/spi-s3c24xx-fiq.S
+index e95d6282109e7..68ea12bead227 100644
+--- a/drivers/spi/spi-s3c24xx-fiq.S
++++ b/drivers/spi/spi-s3c24xx-fiq.S
+@@ -33,7 +33,6 @@
+ @ and an offset to the irq acknowledgment word
+
+ ENTRY(s3c24xx_spi_fiq_rx)
+-s3c24xx_spi_fix_rx:
+ .word fiq_rx_end - fiq_rx_start
+ .word fiq_rx_irq_ack - fiq_rx_start
+ fiq_rx_start:
+@@ -47,7 +46,7 @@ fiq_rx_start:
+ strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+
+ subs fiq_rcount, fiq_rcount, #1
+- subnes pc, lr, #4 @@ return, still have work to do
++ subsne pc, lr, #4 @@ return, still have work to do
+
+ @@ set IRQ controller so that next op will trigger IRQ
+ mov fiq_rtmp, #0
+@@ -59,7 +58,6 @@ fiq_rx_irq_ack:
+ fiq_rx_end:
+
+ ENTRY(s3c24xx_spi_fiq_txrx)
+-s3c24xx_spi_fiq_txrx:
+ .word fiq_txrx_end - fiq_txrx_start
+ .word fiq_txrx_irq_ack - fiq_txrx_start
+ fiq_txrx_start:
+@@ -74,7 +72,7 @@ fiq_txrx_start:
+ strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+
+ subs fiq_rcount, fiq_rcount, #1
+- subnes pc, lr, #4 @@ return, still have work to do
++ subsne pc, lr, #4 @@ return, still have work to do
+
+ mov fiq_rtmp, #0
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
+@@ -86,7 +84,6 @@ fiq_txrx_irq_ack:
+ fiq_txrx_end:
+
+ ENTRY(s3c24xx_spi_fiq_tx)
+-s3c24xx_spi_fix_tx:
+ .word fiq_tx_end - fiq_tx_start
+ .word fiq_tx_irq_ack - fiq_tx_start
+ fiq_tx_start:
+@@ -99,7 +96,7 @@ fiq_tx_start:
+ strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+
+ subs fiq_rcount, fiq_rcount, #1
+- subnes pc, lr, #4 @@ return, still have work to do
++ subsne pc, lr, #4 @@ return, still have work to do
+
+ mov fiq_rtmp, #0
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index 77ddf23b65d65..8622cf9d3f640 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -1668,6 +1668,10 @@ static int stm32_spi_transfer_one(struct spi_master *master,
+ struct stm32_spi *spi = spi_master_get_devdata(master);
+ int ret;
+
++ /* Don't do anything on 0 bytes transfers */
++ if (transfer->len == 0)
++ return 0;
++
+ spi->tx_buf = transfer->tx_buf;
+ spi->rx_buf = transfer->rx_buf;
+ spi->tx_len = spi->tx_buf ? transfer->len : 0;
+diff --git a/drivers/spi/spi-synquacer.c b/drivers/spi/spi-synquacer.c
+index 5ab5119e2f1b0..785e7c4451233 100644
+--- a/drivers/spi/spi-synquacer.c
++++ b/drivers/spi/spi-synquacer.c
+@@ -490,6 +490,10 @@ static void synquacer_spi_set_cs(struct spi_device *spi, bool enable)
+ val &= ~(SYNQUACER_HSSPI_DMPSEL_CS_MASK <<
+ SYNQUACER_HSSPI_DMPSEL_CS_SHIFT);
+ val |= spi->chip_select << SYNQUACER_HSSPI_DMPSEL_CS_SHIFT;
++
++ if (!enable)
++ val |= SYNQUACER_HSSPI_DMSTOP_STOP;
++
+ writel(val, sspi->regs + SYNQUACER_HSSPI_REG_DMSTART);
+ }
+
+diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
+index de844b4121107..bbbd311eda030 100644
+--- a/drivers/spmi/spmi-pmic-arb.c
++++ b/drivers/spmi/spmi-pmic-arb.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+- * Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2012-2015, 2017, 2021, The Linux Foundation. All rights reserved.
+ */
+ #include <linux/bitmap.h>
+ #include <linux/delay.h>
+@@ -505,8 +505,7 @@ static void cleanup_irq(struct spmi_pmic_arb *pmic_arb, u16 apid, int id)
+ static void periph_interrupt(struct spmi_pmic_arb *pmic_arb, u16 apid)
+ {
+ unsigned int irq;
+- u32 status;
+- int id;
++ u32 status, id;
+ u8 sid = (pmic_arb->apid_data[apid].ppid >> 8) & 0xF;
+ u8 per = pmic_arb->apid_data[apid].ppid & 0xFF;
+
+diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
+index dc4da66c3695b..54bdb64f52e88 100644
+--- a/drivers/staging/gdm724x/gdm_usb.c
++++ b/drivers/staging/gdm724x/gdm_usb.c
+@@ -56,20 +56,24 @@ static int gdm_usb_recv(void *priv_dev,
+
+ static int request_mac_address(struct lte_udev *udev)
+ {
+- u8 buf[16] = {0,};
+- struct hci_packet *hci = (struct hci_packet *)buf;
++ struct hci_packet *hci;
+ struct usb_device *usbdev = udev->usbdev;
+ int actual;
+ int ret = -1;
+
++ hci = kmalloc(struct_size(hci, data, 1), GFP_KERNEL);
++ if (!hci)
++ return -ENOMEM;
++
+ hci->cmd_evt = gdm_cpu_to_dev16(udev->gdm_ed, LTE_GET_INFORMATION);
+ hci->len = gdm_cpu_to_dev16(udev->gdm_ed, 1);
+ hci->data[0] = MAC_ADDRESS;
+
+- ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
++ ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), hci, 5,
+ &actual, 1000);
+
+ udev->request_mac_addr = 1;
++ kfree(hci);
+
+ return ret;
+ }
+diff --git a/drivers/staging/media/imx/imx-media-csc-scaler.c b/drivers/staging/media/imx/imx-media-csc-scaler.c
+index 2b635ebf62d6a..a15d970adb983 100644
+--- a/drivers/staging/media/imx/imx-media-csc-scaler.c
++++ b/drivers/staging/media/imx/imx-media-csc-scaler.c
+@@ -866,11 +866,7 @@ void imx_media_csc_scaler_device_unregister(struct imx_media_video_dev *vdev)
+ struct ipu_csc_scaler_priv *priv = vdev_to_priv(vdev);
+ struct video_device *vfd = priv->vdev.vfd;
+
+- mutex_lock(&priv->mutex);
+-
+ video_unregister_device(vfd);
+-
+- mutex_unlock(&priv->mutex);
+ }
+
+ struct imx_media_video_dev *
+diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
+index 2c3c2adca6832..e16408af92d9c 100644
+--- a/drivers/staging/media/imx/imx-media-dev.c
++++ b/drivers/staging/media/imx/imx-media-dev.c
+@@ -53,6 +53,7 @@ static int imx6_media_probe_complete(struct v4l2_async_notifier *notifier)
+ imxmd->m2m_vdev = imx_media_csc_scaler_device_init(imxmd);
+ if (IS_ERR(imxmd->m2m_vdev)) {
+ ret = PTR_ERR(imxmd->m2m_vdev);
++ imxmd->m2m_vdev = NULL;
+ goto unlock;
+ }
+
+@@ -107,10 +108,14 @@ static int imx_media_remove(struct platform_device *pdev)
+
+ v4l2_info(&imxmd->v4l2_dev, "Removing imx-media\n");
+
++ if (imxmd->m2m_vdev) {
++ imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
++ imxmd->m2m_vdev = NULL;
++ }
++
+ v4l2_async_notifier_unregister(&imxmd->notifier);
+ imx_media_unregister_ipu_internal_subdevs(imxmd);
+ v4l2_async_notifier_cleanup(&imxmd->notifier);
+- imx_media_csc_scaler_device_unregister(imxmd->m2m_vdev);
+ media_device_unregister(&imxmd->md);
+ v4l2_device_unregister(&imxmd->v4l2_dev);
+ media_device_cleanup(&imxmd->md);
+diff --git a/drivers/staging/mt7621-dma/Makefile b/drivers/staging/mt7621-dma/Makefile
+index 66da1bf10c32e..23256d1286f3e 100644
+--- a/drivers/staging/mt7621-dma/Makefile
++++ b/drivers/staging/mt7621-dma/Makefile
+@@ -1,4 +1,4 @@
+ # SPDX-License-Identifier: GPL-2.0
+-obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
++obj-$(CONFIG_MTK_HSDMA) += hsdma-mt7621.o
+
+ ccflags-y += -I$(srctree)/drivers/dma
+diff --git a/drivers/staging/mt7621-dma/hsdma-mt7621.c b/drivers/staging/mt7621-dma/hsdma-mt7621.c
+new file mode 100644
+index 0000000000000..803b66d8ee6b5
+--- /dev/null
++++ b/drivers/staging/mt7621-dma/hsdma-mt7621.c
+@@ -0,0 +1,762 @@
++// SPDX-License-Identifier: GPL-2.0+
++/*
++ * Copyright (C) 2015, Michael Lee <igvtee@gmail.com>
++ * MTK HSDMA support
++ */
++
++#include <linux/dmaengine.h>
++#include <linux/dma-mapping.h>
++#include <linux/err.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/module.h>
++#include <linux/platform_device.h>
++#include <linux/slab.h>
++#include <linux/spinlock.h>
++#include <linux/irq.h>
++#include <linux/of_dma.h>
++#include <linux/reset.h>
++#include <linux/of_device.h>
++
++#include "virt-dma.h"
++
++#define HSDMA_BASE_OFFSET 0x800
++
++#define HSDMA_REG_TX_BASE 0x00
++#define HSDMA_REG_TX_CNT 0x04
++#define HSDMA_REG_TX_CTX 0x08
++#define HSDMA_REG_TX_DTX 0x0c
++#define HSDMA_REG_RX_BASE 0x100
++#define HSDMA_REG_RX_CNT 0x104
++#define HSDMA_REG_RX_CRX 0x108
++#define HSDMA_REG_RX_DRX 0x10c
++#define HSDMA_REG_INFO 0x200
++#define HSDMA_REG_GLO_CFG 0x204
++#define HSDMA_REG_RST_CFG 0x208
++#define HSDMA_REG_DELAY_INT 0x20c
++#define HSDMA_REG_FREEQ_THRES 0x210
++#define HSDMA_REG_INT_STATUS 0x220
++#define HSDMA_REG_INT_MASK 0x228
++#define HSDMA_REG_SCH_Q01 0x280
++#define HSDMA_REG_SCH_Q23 0x284
++
++#define HSDMA_DESCS_MAX 0xfff
++#define HSDMA_DESCS_NUM 8
++#define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1)
++#define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK)
++
++/* HSDMA_REG_INFO */
++#define HSDMA_INFO_INDEX_MASK 0xf
++#define HSDMA_INFO_INDEX_SHIFT 24
++#define HSDMA_INFO_BASE_MASK 0xff
++#define HSDMA_INFO_BASE_SHIFT 16
++#define HSDMA_INFO_RX_MASK 0xff
++#define HSDMA_INFO_RX_SHIFT 8
++#define HSDMA_INFO_TX_MASK 0xff
++#define HSDMA_INFO_TX_SHIFT 0
++
++/* HSDMA_REG_GLO_CFG */
++#define HSDMA_GLO_TX_2B_OFFSET BIT(31)
++#define HSDMA_GLO_CLK_GATE BIT(30)
++#define HSDMA_GLO_BYTE_SWAP BIT(29)
++#define HSDMA_GLO_MULTI_DMA BIT(10)
++#define HSDMA_GLO_TWO_BUF BIT(9)
++#define HSDMA_GLO_32B_DESC BIT(8)
++#define HSDMA_GLO_BIG_ENDIAN BIT(7)
++#define HSDMA_GLO_TX_DONE BIT(6)
++#define HSDMA_GLO_BT_MASK 0x3
++#define HSDMA_GLO_BT_SHIFT 4
++#define HSDMA_GLO_RX_BUSY BIT(3)
++#define HSDMA_GLO_RX_DMA BIT(2)
++#define HSDMA_GLO_TX_BUSY BIT(1)
++#define HSDMA_GLO_TX_DMA BIT(0)
++
++#define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT)
++#define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT)
++#define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT)
++#define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT)
++
++#define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \
++ HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
++
++/* HSDMA_REG_RST_CFG */
++#define HSDMA_RST_RX_SHIFT 16
++#define HSDMA_RST_TX_SHIFT 0
++
++/* HSDMA_REG_DELAY_INT */
++#define HSDMA_DELAY_INT_EN BIT(15)
++#define HSDMA_DELAY_PEND_OFFSET 8
++#define HSDMA_DELAY_TIME_OFFSET 0
++#define HSDMA_DELAY_TX_OFFSET 16
++#define HSDMA_DELAY_RX_OFFSET 0
++
++#define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \
++ ((x) << HSDMA_DELAY_PEND_OFFSET))
++#define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \
++ HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
++
++/* HSDMA_REG_INT_STATUS */
++#define HSDMA_INT_DELAY_RX_COH BIT(31)
++#define HSDMA_INT_DELAY_RX_INT BIT(30)
++#define HSDMA_INT_DELAY_TX_COH BIT(29)
++#define HSDMA_INT_DELAY_TX_INT BIT(28)
++#define HSDMA_INT_RX_MASK 0x3
++#define HSDMA_INT_RX_SHIFT 16
++#define HSDMA_INT_RX_Q0 BIT(16)
++#define HSDMA_INT_TX_MASK 0xf
++#define HSDMA_INT_TX_SHIFT 0
++#define HSDMA_INT_TX_Q0 BIT(0)
++
++/* tx/rx dma desc flags */
++#define HSDMA_PLEN_MASK 0x3fff
++#define HSDMA_DESC_DONE BIT(31)
++#define HSDMA_DESC_LS0 BIT(30)
++#define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16)
++#define HSDMA_DESC_TAG BIT(15)
++#define HSDMA_DESC_LS1 BIT(14)
++#define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK)
++
++/* align 4 bytes */
++#define HSDMA_ALIGN_SIZE 3
++/* align size 128bytes */
++#define HSDMA_MAX_PLEN 0x3f80
++
++struct hsdma_desc {
++ u32 addr0;
++ u32 flags;
++ u32 addr1;
++ u32 unused;
++};
++
++struct mtk_hsdma_sg {
++ dma_addr_t src_addr;
++ dma_addr_t dst_addr;
++ u32 len;
++};
++
++struct mtk_hsdma_desc {
++ struct virt_dma_desc vdesc;
++ unsigned int num_sgs;
++ struct mtk_hsdma_sg sg[1];
++};
++
++struct mtk_hsdma_chan {
++ struct virt_dma_chan vchan;
++ unsigned int id;
++ dma_addr_t desc_addr;
++ int tx_idx;
++ int rx_idx;
++ struct hsdma_desc *tx_ring;
++ struct hsdma_desc *rx_ring;
++ struct mtk_hsdma_desc *desc;
++ unsigned int next_sg;
++};
++
++struct mtk_hsdam_engine {
++ struct dma_device ddev;
++ struct device_dma_parameters dma_parms;
++ void __iomem *base;
++ struct tasklet_struct task;
++ volatile unsigned long chan_issued;
++
++ struct mtk_hsdma_chan chan[1];
++};
++
++static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(
++ struct mtk_hsdma_chan *chan)
++{
++ return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine,
++ ddev);
++}
++
++static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c)
++{
++ return container_of(c, struct mtk_hsdma_chan, vchan.chan);
++}
++
++static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(
++ struct virt_dma_desc *vdesc)
++{
++ return container_of(vdesc, struct mtk_hsdma_desc, vdesc);
++}
++
++static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg)
++{
++ return readl(hsdma->base + reg);
++}
++
++static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma,
++ unsigned int reg, u32 val)
++{
++ writel(val, hsdma->base + reg);
++}
++
++static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ chan->tx_idx = 0;
++ chan->rx_idx = HSDMA_DESCS_NUM - 1;
++
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
++
++ mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
++ 0x1 << (chan->id + HSDMA_RST_TX_SHIFT));
++ mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
++ 0x1 << (chan->id + HSDMA_RST_RX_SHIFT));
++}
++
++static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
++{
++ dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, " \
++ "tctx %08x, tdtx: %08x, rbase %08x, " \
++ "rcnt %08x, rctx %08x, rdtx %08x\n",
++ mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
++ mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
++ mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX),
++ mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX),
++ mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE),
++ mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT),
++ mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
++ mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
++
++ dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, " \
++ "intr_stat %08x, intr_mask %08x\n",
++ mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
++ mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
++ mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT),
++ mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS),
++ mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK));
++}
++
++static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ struct hsdma_desc *tx_desc;
++ struct hsdma_desc *rx_desc;
++ int i;
++
++ dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n",
++ chan->tx_idx, chan->rx_idx);
++
++ for (i = 0; i < HSDMA_DESCS_NUM; i++) {
++ tx_desc = &chan->tx_ring[i];
++ rx_desc = &chan->rx_ring[i];
++
++ dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, " \
++ "tx addr1: %08x, rx addr0 %08x, flags %08x\n",
++ i, tx_desc->addr0, tx_desc->flags, \
++ tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
++ }
++}
++
++static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ int i;
++
++ /* disable dma */
++ mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
++
++ /* disable intr */
++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
++
++ /* init desc value */
++ for (i = 0; i < HSDMA_DESCS_NUM; i++) {
++ chan->tx_ring[i].addr0 = 0;
++ chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
++ }
++ for (i = 0; i < HSDMA_DESCS_NUM; i++) {
++ chan->rx_ring[i].addr0 = 0;
++ chan->rx_ring[i].flags = 0;
++ }
++
++ /* reset */
++ mtk_hsdma_reset_chan(hsdma, chan);
++
++ /* enable intr */
++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
++
++ /* enable dma */
++ mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
++}
++
++static int mtk_hsdma_terminate_all(struct dma_chan *c)
++{
++ struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
++ struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
++ unsigned long timeout;
++ LIST_HEAD(head);
++
++ spin_lock_bh(&chan->vchan.lock);
++ chan->desc = NULL;
++ clear_bit(chan->id, &hsdma->chan_issued);
++ vchan_get_all_descriptors(&chan->vchan, &head);
++ spin_unlock_bh(&chan->vchan.lock);
++
++ vchan_dma_desc_free_list(&chan->vchan, &head);
++
++ /* wait dma transfer complete */
++ timeout = jiffies + msecs_to_jiffies(2000);
++ while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) &
++ (HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) {
++ if (time_after_eq(jiffies, timeout)) {
++ hsdma_dump_desc(hsdma, chan);
++ mtk_hsdma_reset(hsdma, chan);
++ dev_err(hsdma->ddev.dev, "timeout, reset it\n");
++ break;
++ }
++ cpu_relax();
++ }
++
++ return 0;
++}
++
++static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ dma_addr_t src, dst;
++ size_t len, tlen;
++ struct hsdma_desc *tx_desc, *rx_desc;
++ struct mtk_hsdma_sg *sg;
++ unsigned int i;
++ int rx_idx;
++
++ sg = &chan->desc->sg[0];
++ len = sg->len;
++ chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN);
++
++ /* tx desc */
++ src = sg->src_addr;
++ for (i = 0; i < chan->desc->num_sgs; i++) {
++ tx_desc = &chan->tx_ring[chan->tx_idx];
++
++ if (len > HSDMA_MAX_PLEN)
++ tlen = HSDMA_MAX_PLEN;
++ else
++ tlen = len;
++
++ if (i & 0x1) {
++ tx_desc->addr1 = src;
++ tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
++ } else {
++ tx_desc->addr0 = src;
++ tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
++
++ /* update index */
++ chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx);
++ }
++
++ src += tlen;
++ len -= tlen;
++ }
++ if (i & 0x1)
++ tx_desc->flags |= HSDMA_DESC_LS0;
++ else
++ tx_desc->flags |= HSDMA_DESC_LS1;
++
++ /* rx desc */
++ rx_idx = HSDMA_NEXT_DESC(chan->rx_idx);
++ len = sg->len;
++ dst = sg->dst_addr;
++ for (i = 0; i < chan->desc->num_sgs; i++) {
++ rx_desc = &chan->rx_ring[rx_idx];
++ if (len > HSDMA_MAX_PLEN)
++ tlen = HSDMA_MAX_PLEN;
++ else
++ tlen = len;
++
++ rx_desc->addr0 = dst;
++ rx_desc->flags = HSDMA_DESC_PLEN0(tlen);
++
++ dst += tlen;
++ len -= tlen;
++
++ /* update index */
++ rx_idx = HSDMA_NEXT_DESC(rx_idx);
++ }
++
++ /* make sure desc and index all up to date */
++ wmb();
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
++
++ return 0;
++}
++
++static int gdma_next_desc(struct mtk_hsdma_chan *chan)
++{
++ struct virt_dma_desc *vdesc;
++
++ vdesc = vchan_next_desc(&chan->vchan);
++ if (!vdesc) {
++ chan->desc = NULL;
++ return 0;
++ }
++ chan->desc = to_mtk_hsdma_desc(vdesc);
++ chan->next_sg = 0;
++
++ return 1;
++}
++
++static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ struct mtk_hsdma_desc *desc;
++ int chan_issued;
++
++ chan_issued = 0;
++ spin_lock_bh(&chan->vchan.lock);
++ desc = chan->desc;
++ if (likely(desc)) {
++ if (chan->next_sg == desc->num_sgs) {
++ list_del(&desc->vdesc.node);
++ vchan_cookie_complete(&desc->vdesc);
++ chan_issued = gdma_next_desc(chan);
++ }
++ } else {
++ dev_dbg(hsdma->ddev.dev, "no desc to complete\n");
++ }
++
++ if (chan_issued)
++ set_bit(chan->id, &hsdma->chan_issued);
++ spin_unlock_bh(&chan->vchan.lock);
++}
++
++static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
++{
++ struct mtk_hsdam_engine *hsdma = devid;
++ u32 status;
++
++ status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS);
++ if (unlikely(!status))
++ return IRQ_NONE;
++
++ if (likely(status & HSDMA_INT_RX_Q0))
++ tasklet_schedule(&hsdma->task);
++ else
++ dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n", status);
++ /* clean intr bits */
++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status);
++
++ return IRQ_HANDLED;
++}
++
++static void mtk_hsdma_issue_pending(struct dma_chan *c)
++{
++ struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
++ struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
++
++ spin_lock_bh(&chan->vchan.lock);
++ if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
++ if (gdma_next_desc(chan)) {
++ set_bit(chan->id, &hsdma->chan_issued);
++ tasklet_schedule(&hsdma->task);
++ } else {
++ dev_dbg(hsdma->ddev.dev, "no desc to issue\n");
++ }
++ }
++ spin_unlock_bh(&chan->vchan.lock);
++}
++
++static struct dma_async_tx_descriptor *mtk_hsdma_prep_dma_memcpy(
++ struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
++ size_t len, unsigned long flags)
++{
++ struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
++ struct mtk_hsdma_desc *desc;
++
++ if (len <= 0)
++ return NULL;
++
++ desc = kzalloc(sizeof(*desc), GFP_ATOMIC);
++ if (!desc) {
++ dev_err(c->device->dev, "alloc memcpy decs error\n");
++ return NULL;
++ }
++
++ desc->sg[0].src_addr = src;
++ desc->sg[0].dst_addr = dest;
++ desc->sg[0].len = len;
++
++ return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
++}
++
++static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
++ dma_cookie_t cookie,
++ struct dma_tx_state *state)
++{
++ return dma_cookie_status(c, cookie, state);
++}
++
++static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
++{
++ vchan_free_chan_resources(to_virt_chan(c));
++}
++
++static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc)
++{
++ kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc));
++}
++
++static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma)
++{
++ struct mtk_hsdma_chan *chan;
++
++ if (test_and_clear_bit(0, &hsdma->chan_issued)) {
++ chan = &hsdma->chan[0];
++ if (chan->desc)
++ mtk_hsdma_start_transfer(hsdma, chan);
++ else
++ dev_dbg(hsdma->ddev.dev, "chan 0 no desc to issue\n");
++ }
++}
++
++static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma)
++{
++ struct mtk_hsdma_chan *chan;
++ int next_idx, drx_idx, cnt;
++
++ chan = &hsdma->chan[0];
++ next_idx = HSDMA_NEXT_DESC(chan->rx_idx);
++ drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX);
++
++ cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK;
++ if (!cnt)
++ return;
++
++ chan->next_sg += cnt;
++ chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK;
++
++ /* update rx crx */
++ wmb();
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
++
++ mtk_hsdma_chan_done(hsdma, chan);
++}
++
++static void mtk_hsdma_tasklet(unsigned long arg)
++{
++ struct mtk_hsdam_engine *hsdma = (struct mtk_hsdam_engine *)arg;
++
++ mtk_hsdma_rx(hsdma);
++ mtk_hsdma_tx(hsdma);
++}
++
++static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ int i;
++
++ chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
++ 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
++ &chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
++ if (!chan->tx_ring)
++ goto no_mem;
++
++ chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM];
++
++ /* init tx ring value */
++ for (i = 0; i < HSDMA_DESCS_NUM; i++)
++ chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
++
++ return 0;
++no_mem:
++ return -ENOMEM;
++}
++
++static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
++ struct mtk_hsdma_chan *chan)
++{
++ if (chan->tx_ring) {
++ dma_free_coherent(hsdma->ddev.dev,
++ 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
++ chan->tx_ring, chan->desc_addr);
++ chan->tx_ring = NULL;
++ chan->rx_ring = NULL;
++ }
++}
++
++static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma)
++{
++ struct mtk_hsdma_chan *chan;
++ int ret;
++ u32 reg;
++
++ /* init desc */
++ chan = &hsdma->chan[0];
++ ret = mtk_hsdam_alloc_desc(hsdma, chan);
++ if (ret)
++ return ret;
++
++ /* tx */
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr);
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM);
++ /* rx */
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr +
++ (sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM));
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM);
++ /* reset */
++ mtk_hsdma_reset_chan(hsdma, chan);
++
++ /* enable rx intr */
++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
++
++ /* enable dma */
++ mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
++
++ /* hardware info */
++ reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO);
++ dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n",
++ (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK,
++ (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK);
++
++ hsdma_dump_reg(hsdma);
++
++ return ret;
++}
++
++static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma)
++{
++ struct mtk_hsdma_chan *chan;
++
++ /* disable dma */
++ mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
++
++ /* disable intr */
++ mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
++
++ /* free desc */
++ chan = &hsdma->chan[0];
++ mtk_hsdam_free_desc(hsdma, chan);
++
++ /* tx */
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0);
++ mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0);
++ /* rx */
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0);
++ mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0);
++ /* reset */
++ mtk_hsdma_reset_chan(hsdma, chan);
++}
++
++static const struct of_device_id mtk_hsdma_of_match[] = {
++ { .compatible = "mediatek,mt7621-hsdma" },
++ { },
++};
++
++static int mtk_hsdma_probe(struct platform_device *pdev)
++{
++ const struct of_device_id *match;
++ struct mtk_hsdma_chan *chan;
++ struct mtk_hsdam_engine *hsdma;
++ struct dma_device *dd;
++ struct resource *res;
++ int ret;
++ int irq;
++ void __iomem *base;
++
++ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
++ if (ret)
++ return ret;
++
++ match = of_match_device(mtk_hsdma_of_match, &pdev->dev);
++ if (!match)
++ return -EINVAL;
++
++ hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
++ if (!hsdma)
++ return -EINVAL;
++
++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
++ base = devm_ioremap_resource(&pdev->dev, res);
++ if (IS_ERR(base))
++ return PTR_ERR(base);
++ hsdma->base = base + HSDMA_BASE_OFFSET;
++ tasklet_init(&hsdma->task, mtk_hsdma_tasklet, (unsigned long)hsdma);
++
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0)
++ return -EINVAL;
++ ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq,
++ 0, dev_name(&pdev->dev), hsdma);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to request irq\n");
++ return ret;
++ }
++
++ device_reset(&pdev->dev);
++
++ dd = &hsdma->ddev;
++ dma_cap_set(DMA_MEMCPY, dd->cap_mask);
++ dd->copy_align = HSDMA_ALIGN_SIZE;
++ dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
++ dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
++ dd->device_terminate_all = mtk_hsdma_terminate_all;
++ dd->device_tx_status = mtk_hsdma_tx_status;
++ dd->device_issue_pending = mtk_hsdma_issue_pending;
++ dd->dev = &pdev->dev;
++ dd->dev->dma_parms = &hsdma->dma_parms;
++ dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN);
++ INIT_LIST_HEAD(&dd->channels);
++
++ chan = &hsdma->chan[0];
++ chan->id = 0;
++ chan->vchan.desc_free = mtk_hsdma_desc_free;
++ vchan_init(&chan->vchan, dd);
++
++ /* init hardware */
++ ret = mtk_hsdma_init(hsdma);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to alloc ring descs\n");
++ return ret;
++ }
++
++ ret = dma_async_device_register(dd);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to register dma device\n");
++ goto err_uninit_hsdma;
++ }
++
++ ret = of_dma_controller_register(pdev->dev.of_node,
++ of_dma_xlate_by_chan_id, hsdma);
++ if (ret) {
++ dev_err(&pdev->dev, "failed to register of dma controller\n");
++ goto err_unregister;
++ }
++
++ platform_set_drvdata(pdev, hsdma);
++
++ return 0;
++
++err_unregister:
++ dma_async_device_unregister(dd);
++err_uninit_hsdma:
++ mtk_hsdma_uninit(hsdma);
++ return ret;
++}
++
++static int mtk_hsdma_remove(struct platform_device *pdev)
++{
++ struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev);
++
++ mtk_hsdma_uninit(hsdma);
++
++ of_dma_controller_free(pdev->dev.of_node);
++ dma_async_device_unregister(&hsdma->ddev);
++
++ return 0;
++}
++
++static struct platform_driver mtk_hsdma_driver = {
++ .probe = mtk_hsdma_probe,
++ .remove = mtk_hsdma_remove,
++ .driver = {
++ .name = KBUILD_MODNAME,
++ .of_match_table = mtk_hsdma_of_match,
++ },
++};
++module_platform_driver(mtk_hsdma_driver);
++
++MODULE_AUTHOR("Michael Lee <igvtee@gmail.com>");
++MODULE_DESCRIPTION("MTK HSDMA driver");
++MODULE_LICENSE("GPL v2");
+diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
+deleted file mode 100644
+index bf2772af1045f..0000000000000
+--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
++++ /dev/null
+@@ -1,762 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0+
+-/*
+- * Copyright (C) 2015, Michael Lee <igvtee@gmail.com>
+- * MTK HSDMA support
+- */
+-
+-#include <linux/dmaengine.h>
+-#include <linux/dma-mapping.h>
+-#include <linux/err.h>
+-#include <linux/init.h>
+-#include <linux/list.h>
+-#include <linux/module.h>
+-#include <linux/platform_device.h>
+-#include <linux/slab.h>
+-#include <linux/spinlock.h>
+-#include <linux/irq.h>
+-#include <linux/of_dma.h>
+-#include <linux/reset.h>
+-#include <linux/of_device.h>
+-
+-#include "virt-dma.h"
+-
+-#define HSDMA_BASE_OFFSET 0x800
+-
+-#define HSDMA_REG_TX_BASE 0x00
+-#define HSDMA_REG_TX_CNT 0x04
+-#define HSDMA_REG_TX_CTX 0x08
+-#define HSDMA_REG_TX_DTX 0x0c
+-#define HSDMA_REG_RX_BASE 0x100
+-#define HSDMA_REG_RX_CNT 0x104
+-#define HSDMA_REG_RX_CRX 0x108
+-#define HSDMA_REG_RX_DRX 0x10c
+-#define HSDMA_REG_INFO 0x200
+-#define HSDMA_REG_GLO_CFG 0x204
+-#define HSDMA_REG_RST_CFG 0x208
+-#define HSDMA_REG_DELAY_INT 0x20c
+-#define HSDMA_REG_FREEQ_THRES 0x210
+-#define HSDMA_REG_INT_STATUS 0x220
+-#define HSDMA_REG_INT_MASK 0x228
+-#define HSDMA_REG_SCH_Q01 0x280
+-#define HSDMA_REG_SCH_Q23 0x284
+-
+-#define HSDMA_DESCS_MAX 0xfff
+-#define HSDMA_DESCS_NUM 8
+-#define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1)
+-#define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK)
+-
+-/* HSDMA_REG_INFO */
+-#define HSDMA_INFO_INDEX_MASK 0xf
+-#define HSDMA_INFO_INDEX_SHIFT 24
+-#define HSDMA_INFO_BASE_MASK 0xff
+-#define HSDMA_INFO_BASE_SHIFT 16
+-#define HSDMA_INFO_RX_MASK 0xff
+-#define HSDMA_INFO_RX_SHIFT 8
+-#define HSDMA_INFO_TX_MASK 0xff
+-#define HSDMA_INFO_TX_SHIFT 0
+-
+-/* HSDMA_REG_GLO_CFG */
+-#define HSDMA_GLO_TX_2B_OFFSET BIT(31)
+-#define HSDMA_GLO_CLK_GATE BIT(30)
+-#define HSDMA_GLO_BYTE_SWAP BIT(29)
+-#define HSDMA_GLO_MULTI_DMA BIT(10)
+-#define HSDMA_GLO_TWO_BUF BIT(9)
+-#define HSDMA_GLO_32B_DESC BIT(8)
+-#define HSDMA_GLO_BIG_ENDIAN BIT(7)
+-#define HSDMA_GLO_TX_DONE BIT(6)
+-#define HSDMA_GLO_BT_MASK 0x3
+-#define HSDMA_GLO_BT_SHIFT 4
+-#define HSDMA_GLO_RX_BUSY BIT(3)
+-#define HSDMA_GLO_RX_DMA BIT(2)
+-#define HSDMA_GLO_TX_BUSY BIT(1)
+-#define HSDMA_GLO_TX_DMA BIT(0)
+-
+-#define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT)
+-#define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT)
+-#define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT)
+-#define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT)
+-
+-#define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \
+- HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
+-
+-/* HSDMA_REG_RST_CFG */
+-#define HSDMA_RST_RX_SHIFT 16
+-#define HSDMA_RST_TX_SHIFT 0
+-
+-/* HSDMA_REG_DELAY_INT */
+-#define HSDMA_DELAY_INT_EN BIT(15)
+-#define HSDMA_DELAY_PEND_OFFSET 8
+-#define HSDMA_DELAY_TIME_OFFSET 0
+-#define HSDMA_DELAY_TX_OFFSET 16
+-#define HSDMA_DELAY_RX_OFFSET 0
+-
+-#define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \
+- ((x) << HSDMA_DELAY_PEND_OFFSET))
+-#define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \
+- HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
+-
+-/* HSDMA_REG_INT_STATUS */
+-#define HSDMA_INT_DELAY_RX_COH BIT(31)
+-#define HSDMA_INT_DELAY_RX_INT BIT(30)
+-#define HSDMA_INT_DELAY_TX_COH BIT(29)
+-#define HSDMA_INT_DELAY_TX_INT BIT(28)
+-#define HSDMA_INT_RX_MASK 0x3
+-#define HSDMA_INT_RX_SHIFT 16
+-#define HSDMA_INT_RX_Q0 BIT(16)
+-#define HSDMA_INT_TX_MASK 0xf
+-#define HSDMA_INT_TX_SHIFT 0
+-#define HSDMA_INT_TX_Q0 BIT(0)
+-
+-/* tx/rx dma desc flags */
+-#define HSDMA_PLEN_MASK 0x3fff
+-#define HSDMA_DESC_DONE BIT(31)
+-#define HSDMA_DESC_LS0 BIT(30)
+-#define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16)
+-#define HSDMA_DESC_TAG BIT(15)
+-#define HSDMA_DESC_LS1 BIT(14)
+-#define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK)
+-
+-/* align 4 bytes */
+-#define HSDMA_ALIGN_SIZE 3
+-/* align size 128bytes */
+-#define HSDMA_MAX_PLEN 0x3f80
+-
+-struct hsdma_desc {
+- u32 addr0;
+- u32 flags;
+- u32 addr1;
+- u32 unused;
+-};
+-
+-struct mtk_hsdma_sg {
+- dma_addr_t src_addr;
+- dma_addr_t dst_addr;
+- u32 len;
+-};
+-
+-struct mtk_hsdma_desc {
+- struct virt_dma_desc vdesc;
+- unsigned int num_sgs;
+- struct mtk_hsdma_sg sg[1];
+-};
+-
+-struct mtk_hsdma_chan {
+- struct virt_dma_chan vchan;
+- unsigned int id;
+- dma_addr_t desc_addr;
+- int tx_idx;
+- int rx_idx;
+- struct hsdma_desc *tx_ring;
+- struct hsdma_desc *rx_ring;
+- struct mtk_hsdma_desc *desc;
+- unsigned int next_sg;
+-};
+-
+-struct mtk_hsdam_engine {
+- struct dma_device ddev;
+- struct device_dma_parameters dma_parms;
+- void __iomem *base;
+- struct tasklet_struct task;
+- volatile unsigned long chan_issued;
+-
+- struct mtk_hsdma_chan chan[1];
+-};
+-
+-static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(
+- struct mtk_hsdma_chan *chan)
+-{
+- return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine,
+- ddev);
+-}
+-
+-static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c)
+-{
+- return container_of(c, struct mtk_hsdma_chan, vchan.chan);
+-}
+-
+-static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(
+- struct virt_dma_desc *vdesc)
+-{
+- return container_of(vdesc, struct mtk_hsdma_desc, vdesc);
+-}
+-
+-static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg)
+-{
+- return readl(hsdma->base + reg);
+-}
+-
+-static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma,
+- unsigned int reg, u32 val)
+-{
+- writel(val, hsdma->base + reg);
+-}
+-
+-static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
+- struct mtk_hsdma_chan *chan)
+-{
+- chan->tx_idx = 0;
+- chan->rx_idx = HSDMA_DESCS_NUM - 1;
+-
+- mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
+- mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
+-
+- mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
+- 0x1 << (chan->id + HSDMA_RST_TX_SHIFT));
+- mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
+- 0x1 << (chan->id + HSDMA_RST_RX_SHIFT));
+-}
+-
+-static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
+-{
+- dev_dbg(hsdma->ddev.dev, "tbase %08x, tcnt %08x, " \
+- "tctx %08x, tdtx: %08x, rbase %08x, " \
+- "rcnt %08x, rctx %08x, rdtx %08x\n",
+- mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
+- mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
+- mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX),
+- mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX),
+- mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE),
+- mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT),
+- mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
+- mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
+-
+- dev_dbg(hsdma->ddev.dev, "info %08x, glo %08x, delay %08x, " \
+- "intr_stat %08x, intr_mask %08x\n",
+- mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
+- mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
+- mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT),
+- mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS),
+- mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK));
+-}
+-
+-static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
+- struct mtk_hsdma_chan *chan)
+-{
+- struct hsdma_desc *tx_desc;
+- struct hsdma_desc *rx_desc;
+- int i;
+-
+- dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n",
+- chan->tx_idx, chan->rx_idx);
+-
+- for (i = 0; i < HSDMA_DESCS_NUM; i++) {
+- tx_desc = &chan->tx_ring[i];
+- rx_desc = &chan->rx_ring[i];
+-
+- dev_dbg(hsdma->ddev.dev, "%d tx addr0: %08x, flags %08x, " \
+- "tx addr1: %08x, rx addr0 %08x, flags %08x\n",
+- i, tx_desc->addr0, tx_desc->flags, \
+- tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
+- }
+-}
+-
+-static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma,
+- struct mtk_hsdma_chan *chan)
+-{
+- int i;
+-
+- /* disable dma */
+- mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
+-
+- /* disable intr */
+- mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
+-
+- /* init desc value */
+- for (i = 0; i < HSDMA_DESCS_NUM; i++) {
+- chan->tx_ring[i].addr0 = 0;
+- chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
+- }
+- for (i = 0; i < HSDMA_DESCS_NUM; i++) {
+- chan->rx_ring[i].addr0 = 0;
+- chan->rx_ring[i].flags = 0;
+- }
+-
+- /* reset */
+- mtk_hsdma_reset_chan(hsdma, chan);
+-
+- /* enable intr */
+- mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
+-
+- /* enable dma */
+- mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
+-}
+-
+-static int mtk_hsdma_terminate_all(struct dma_chan *c)
+-{
+- struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
+- struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
+- unsigned long timeout;
+- LIST_HEAD(head);
+-
+- spin_lock_bh(&chan->vchan.lock);
+- chan->desc = NULL;
+- clear_bit(chan->id, &hsdma->chan_issued);
+- vchan_get_all_descriptors(&chan->vchan, &head);
+- spin_unlock_bh(&chan->vchan.lock);
+-
+- vchan_dma_desc_free_list(&chan->vchan, &head);
+-
+- /* wait dma transfer complete */
+- timeout = jiffies + msecs_to_jiffies(2000);
+- while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) &
+- (HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) {
+- if (time_after_eq(jiffies, timeout)) {
+- hsdma_dump_desc(hsdma, chan);
+- mtk_hsdma_reset(hsdma, chan);
+- dev_err(hsdma->ddev.dev, "timeout, reset it\n");
+- break;
+- }
+- cpu_relax();
+- }
+-
+- return 0;
+-}
+-
+-static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
+- struct mtk_hsdma_chan *chan)
+-{
+- dma_addr_t src, dst;
+- size_t len, tlen;
+- struct hsdma_desc *tx_desc, *rx_desc;
+- struct mtk_hsdma_sg *sg;
+- unsigned int i;
+- int rx_idx;
+-
+- sg = &chan->desc->sg[0];
+- len = sg->len;
+- chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN);
+-
+- /* tx desc */
+- src = sg->src_addr;
+- for (i = 0; i < chan->desc->num_sgs; i++) {
+- tx_desc = &chan->tx_ring[chan->tx_idx];
+-
+- if (len > HSDMA_MAX_PLEN)
+- tlen = HSDMA_MAX_PLEN;
+- else
+- tlen = len;
+-
+- if (i & 0x1) {
+- tx_desc->addr1 = src;
+- tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
+- } else {
+- tx_desc->addr0 = src;
+- tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
+-
+- /* update index */
+- chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx);
+- }
+-
+- src += tlen;
+- len -= tlen;
+- }
+- if (i & 0x1)
+- tx_desc->flags |= HSDMA_DESC_LS0;
+- else
+- tx_desc->flags |= HSDMA_DESC_LS1;
+-
+- /* rx desc */
+- rx_idx = HSDMA_NEXT_DESC(chan->rx_idx);
+- len = sg->len;
+- dst = sg->dst_addr;
+- for (i = 0; i < chan->desc->num_sgs; i++) {
+- rx_desc = &chan->rx_ring[rx_idx];
+- if (len > HSDMA_MAX_PLEN)
+- tlen = HSDMA_MAX_PLEN;
+- else
+- tlen = len;
+-
+- rx_desc->addr0 = dst;
+- rx_desc->flags = HSDMA_DESC_PLEN0(tlen);
+-
+- dst += tlen;
+- len -= tlen;
+-
+- /* update index */
+- rx_idx = HSDMA_NEXT_DESC(rx_idx);
+- }
+-
+- /* make sure desc and index all up to date */
+- wmb();
+- mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
+-
+- return 0;
+-}
+-
+-static int gdma_next_desc(struct mtk_hsdma_chan *chan)
+-{
+- struct virt_dma_desc *vdesc;
+-
+- vdesc = vchan_next_desc(&chan->vchan);
+- if (!vdesc) {
+- chan->desc = NULL;
+- return 0;
+- }
+- chan->desc = to_mtk_hsdma_desc(vdesc);
+- chan->next_sg = 0;
+-
+- return 1;
+-}
+-
+-static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma,
+- struct mtk_hsdma_chan *chan)
+-{
+- struct mtk_hsdma_desc *desc;
+- int chan_issued;
+-
+- chan_issued = 0;
+- spin_lock_bh(&chan->vchan.lock);
+- desc = chan->desc;
+- if (likely(desc)) {
+- if (chan->next_sg == desc->num_sgs) {
+- list_del(&desc->vdesc.node);
+- vchan_cookie_complete(&desc->vdesc);
+- chan_issued = gdma_next_desc(chan);
+- }
+- } else {
+- dev_dbg(hsdma->ddev.dev, "no desc to complete\n");
+- }
+-
+- if (chan_issued)
+- set_bit(chan->id, &hsdma->chan_issued);
+- spin_unlock_bh(&chan->vchan.lock);
+-}
+-
+-static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
+-{
+- struct mtk_hsdam_engine *hsdma = devid;
+- u32 status;
+-
+- status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS);
+- if (unlikely(!status))
+- return IRQ_NONE;
+-
+- if (likely(status & HSDMA_INT_RX_Q0))
+- tasklet_schedule(&hsdma->task);
+- else
+- dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n", status);
+- /* clean intr bits */
+- mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status);
+-
+- return IRQ_HANDLED;
+-}
+-
+-static void mtk_hsdma_issue_pending(struct dma_chan *c)
+-{
+- struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
+- struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
+-
+- spin_lock_bh(&chan->vchan.lock);
+- if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
+- if (gdma_next_desc(chan)) {
+- set_bit(chan->id, &hsdma->chan_issued);
+- tasklet_schedule(&hsdma->task);
+- } else {
+- dev_dbg(hsdma->ddev.dev, "no desc to issue\n");
+- }
+- }
+- spin_unlock_bh(&chan->vchan.lock);
+-}
+-
+-static struct dma_async_tx_descriptor *mtk_hsdma_prep_dma_memcpy(
+- struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
+- size_t len, unsigned long flags)
+-{
+- struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
+- struct mtk_hsdma_desc *desc;
+-
+- if (len <= 0)
+- return NULL;
+-
+- desc = kzalloc(sizeof(*desc), GFP_ATOMIC);
+- if (!desc) {
+- dev_err(c->device->dev, "alloc memcpy decs error\n");
+- return NULL;
+- }
+-
+- desc->sg[0].src_addr = src;
+- desc->sg[0].dst_addr = dest;
+- desc->sg[0].len = len;
+-
+- return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
+-}
+-
+-static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
+- dma_cookie_t cookie,
+- struct dma_tx_state *state)
+-{
+- return dma_cookie_status(c, cookie, state);
+-}
+-
+-static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
+-{
+- vchan_free_chan_resources(to_virt_chan(c));
+-}
+-
+-static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc)
+-{
+- kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc));
+-}
+-
+-static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma)
+-{
+- struct mtk_hsdma_chan *chan;
+-
+- if (test_and_clear_bit(0, &hsdma->chan_issued)) {
+- chan = &hsdma->chan[0];
+- if (chan->desc)
+- mtk_hsdma_start_transfer(hsdma, chan);
+- else
+- dev_dbg(hsdma->ddev.dev, "chan 0 no desc to issue\n");
+- }
+-}
+-
+-static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma)
+-{
+- struct mtk_hsdma_chan *chan;
+- int next_idx, drx_idx, cnt;
+-
+- chan = &hsdma->chan[0];
+- next_idx = HSDMA_NEXT_DESC(chan->rx_idx);
+- drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX);
+-
+- cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK;
+- if (!cnt)
+- return;
+-
+- chan->next_sg += cnt;
+- chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK;
+-
+- /* update rx crx */
+- wmb();
+- mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
+-
+- mtk_hsdma_chan_done(hsdma, chan);
+-}
+-
+-static void mtk_hsdma_tasklet(unsigned long arg)
+-{
+- struct mtk_hsdam_engine *hsdma = (struct mtk_hsdam_engine *)arg;
+-
+- mtk_hsdma_rx(hsdma);
+- mtk_hsdma_tx(hsdma);
+-}
+-
+-static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
+- struct mtk_hsdma_chan *chan)
+-{
+- int i;
+-
+- chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
+- 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
+- &chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
+- if (!chan->tx_ring)
+- goto no_mem;
+-
+- chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM];
+-
+- /* init tx ring value */
+- for (i = 0; i < HSDMA_DESCS_NUM; i++)
+- chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
+-
+- return 0;
+-no_mem:
+- return -ENOMEM;
+-}
+-
+-static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
+- struct mtk_hsdma_chan *chan)
+-{
+- if (chan->tx_ring) {
+- dma_free_coherent(hsdma->ddev.dev,
+- 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
+- chan->tx_ring, chan->desc_addr);
+- chan->tx_ring = NULL;
+- chan->rx_ring = NULL;
+- }
+-}
+-
+-static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma)
+-{
+- struct mtk_hsdma_chan *chan;
+- int ret;
+- u32 reg;
+-
+- /* init desc */
+- chan = &hsdma->chan[0];
+- ret = mtk_hsdam_alloc_desc(hsdma, chan);
+- if (ret)
+- return ret;
+-
+- /* tx */
+- mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr);
+- mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM);
+- /* rx */
+- mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr +
+- (sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM));
+- mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM);
+- /* reset */
+- mtk_hsdma_reset_chan(hsdma, chan);
+-
+- /* enable rx intr */
+- mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
+-
+- /* enable dma */
+- mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
+-
+- /* hardware info */
+- reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO);
+- dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n",
+- (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK,
+- (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK);
+-
+- hsdma_dump_reg(hsdma);
+-
+- return ret;
+-}
+-
+-static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma)
+-{
+- struct mtk_hsdma_chan *chan;
+-
+- /* disable dma */
+- mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
+-
+- /* disable intr */
+- mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
+-
+- /* free desc */
+- chan = &hsdma->chan[0];
+- mtk_hsdam_free_desc(hsdma, chan);
+-
+- /* tx */
+- mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0);
+- mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0);
+- /* rx */
+- mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0);
+- mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0);
+- /* reset */
+- mtk_hsdma_reset_chan(hsdma, chan);
+-}
+-
+-static const struct of_device_id mtk_hsdma_of_match[] = {
+- { .compatible = "mediatek,mt7621-hsdma" },
+- { },
+-};
+-
+-static int mtk_hsdma_probe(struct platform_device *pdev)
+-{
+- const struct of_device_id *match;
+- struct mtk_hsdma_chan *chan;
+- struct mtk_hsdam_engine *hsdma;
+- struct dma_device *dd;
+- struct resource *res;
+- int ret;
+- int irq;
+- void __iomem *base;
+-
+- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+- if (ret)
+- return ret;
+-
+- match = of_match_device(mtk_hsdma_of_match, &pdev->dev);
+- if (!match)
+- return -EINVAL;
+-
+- hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
+- if (!hsdma)
+- return -EINVAL;
+-
+- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- base = devm_ioremap_resource(&pdev->dev, res);
+- if (IS_ERR(base))
+- return PTR_ERR(base);
+- hsdma->base = base + HSDMA_BASE_OFFSET;
+- tasklet_init(&hsdma->task, mtk_hsdma_tasklet, (unsigned long)hsdma);
+-
+- irq = platform_get_irq(pdev, 0);
+- if (irq < 0)
+- return -EINVAL;
+- ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq,
+- 0, dev_name(&pdev->dev), hsdma);
+- if (ret) {
+- dev_err(&pdev->dev, "failed to request irq\n");
+- return ret;
+- }
+-
+- device_reset(&pdev->dev);
+-
+- dd = &hsdma->ddev;
+- dma_cap_set(DMA_MEMCPY, dd->cap_mask);
+- dd->copy_align = HSDMA_ALIGN_SIZE;
+- dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
+- dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
+- dd->device_terminate_all = mtk_hsdma_terminate_all;
+- dd->device_tx_status = mtk_hsdma_tx_status;
+- dd->device_issue_pending = mtk_hsdma_issue_pending;
+- dd->dev = &pdev->dev;
+- dd->dev->dma_parms = &hsdma->dma_parms;
+- dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN);
+- INIT_LIST_HEAD(&dd->channels);
+-
+- chan = &hsdma->chan[0];
+- chan->id = 0;
+- chan->vchan.desc_free = mtk_hsdma_desc_free;
+- vchan_init(&chan->vchan, dd);
+-
+- /* init hardware */
+- ret = mtk_hsdma_init(hsdma);
+- if (ret) {
+- dev_err(&pdev->dev, "failed to alloc ring descs\n");
+- return ret;
+- }
+-
+- ret = dma_async_device_register(dd);
+- if (ret) {
+- dev_err(&pdev->dev, "failed to register dma device\n");
+- goto err_uninit_hsdma;
+- }
+-
+- ret = of_dma_controller_register(pdev->dev.of_node,
+- of_dma_xlate_by_chan_id, hsdma);
+- if (ret) {
+- dev_err(&pdev->dev, "failed to register of dma controller\n");
+- goto err_unregister;
+- }
+-
+- platform_set_drvdata(pdev, hsdma);
+-
+- return 0;
+-
+-err_unregister:
+- dma_async_device_unregister(dd);
+-err_uninit_hsdma:
+- mtk_hsdma_uninit(hsdma);
+- return ret;
+-}
+-
+-static int mtk_hsdma_remove(struct platform_device *pdev)
+-{
+- struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev);
+-
+- mtk_hsdma_uninit(hsdma);
+-
+- of_dma_controller_free(pdev->dev.of_node);
+- dma_async_device_unregister(&hsdma->ddev);
+-
+- return 0;
+-}
+-
+-static struct platform_driver mtk_hsdma_driver = {
+- .probe = mtk_hsdma_probe,
+- .remove = mtk_hsdma_remove,
+- .driver = {
+- .name = "hsdma-mt7621",
+- .of_match_table = mtk_hsdma_of_match,
+- },
+-};
+-module_platform_driver(mtk_hsdma_driver);
+-
+-MODULE_AUTHOR("Michael Lee <igvtee@gmail.com>");
+-MODULE_DESCRIPTION("MTK HSDMA driver");
+-MODULE_LICENSE("GPL v2");
+diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+index f7f09c0d273f5..5b103e829ee7f 100644
+--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
++++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+@@ -41,6 +41,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
+ {USB_DEVICE(0x2357, 0x0111)}, /* TP-Link TL-WN727N v5.21 */
+ {USB_DEVICE(0x2C4E, 0x0102)}, /* MERCUSYS MW150US v2 */
+ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
++ {USB_DEVICE(0x7392, 0xb811)}, /* Edimax EW-7811UN V2 */
+ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
+ {} /* Terminating entry */
+ };
+diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+index 578b9f734231e..65592bf84f380 100644
+--- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
++++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c
+@@ -34,7 +34,7 @@
+ NL80211_RRF_PASSIVE_SCAN)
+
+ static const struct ieee80211_regdomain rtw_regdom_rd = {
+- .n_reg_rules = 3,
++ .n_reg_rules = 2,
+ .alpha2 = "99",
+ .reg_rules = {
+ RTW_2GHZ_CH01_11,
+diff --git a/drivers/target/iscsi/cxgbit/cxgbit_target.c b/drivers/target/iscsi/cxgbit/cxgbit_target.c
+index fcdc4211e3c27..45a1bfa2f7351 100644
+--- a/drivers/target/iscsi/cxgbit/cxgbit_target.c
++++ b/drivers/target/iscsi/cxgbit/cxgbit_target.c
+@@ -86,8 +86,7 @@ static int cxgbit_is_ofld_imm(const struct sk_buff *skb)
+ if (likely(cxgbit_skcb_flags(skb) & SKCBF_TX_ISO))
+ length += sizeof(struct cpl_tx_data_iso);
+
+-#define MAX_IMM_TX_PKT_LEN 256
+- return length <= MAX_IMM_TX_PKT_LEN;
++ return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN;
+ }
+
+ /*
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index 81afe553aa666..a91f2aa24118a 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -1313,19 +1313,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
+ if (num_packets > max_hc_pkt_count) {
+ num_packets = max_hc_pkt_count;
+ chan->xfer_len = num_packets * chan->max_packet;
++ } else if (chan->ep_is_in) {
++ /*
++ * Always program an integral # of max packets
++ * for IN transfers.
++ * Note: This assumes that the input buffer is
++ * aligned and sized accordingly.
++ */
++ chan->xfer_len = num_packets * chan->max_packet;
+ }
+ } else {
+ /* Need 1 packet for transfer length of 0 */
+ num_packets = 1;
+ }
+
+- if (chan->ep_is_in)
+- /*
+- * Always program an integral # of max packets for IN
+- * transfers
+- */
+- chan->xfer_len = num_packets * chan->max_packet;
+-
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+ /*
+diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
+index a052d39b4375e..d5f4ec1b73b15 100644
+--- a/drivers/usb/dwc2/hcd_intr.c
++++ b/drivers/usb/dwc2/hcd_intr.c
+@@ -500,7 +500,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
+ &short_read);
+
+ if (urb->actual_length + xfer_length > urb->length) {
+- dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
++ dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__);
+ xfer_length = urb->length - urb->actual_length;
+ }
+
+@@ -1977,6 +1977,18 @@ error:
+ qtd->error_count++;
+ dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
+ qtd, DWC2_HC_XFER_XACT_ERR);
++ /*
++ * We can get here after a completed transaction
++ * (urb->actual_length >= urb->length) which was not reported
++ * as completed. If that is the case, and we do not abort
++ * the transfer, a transfer of size 0 will be enqueued
++ * subsequently. If urb->actual_length is not DMA-aligned,
++ * the buffer will then point to an unaligned address, and
++ * the resulting behavior is undefined. Bail out in that
++ * situation.
++ */
++ if (qtd->urb->actual_length >= qtd->urb->length)
++ qtd->error_count = 3;
+ dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
+ dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
+ }
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 9269cda4c1831..904b0043011cf 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -593,8 +593,23 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action)
+ params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
+
+ if (desc->bInterval) {
+- params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
+- dep->interval = 1 << (desc->bInterval - 1);
++ u8 bInterval_m1;
++
++ /*
++ * Valid range for DEPCFG.bInterval_m1 is from 0 to 13, and it
++ * must be set to 0 when the controller operates in full-speed.
++ */
++ bInterval_m1 = min_t(u8, desc->bInterval - 1, 13);
++ if (dwc->gadget.speed == USB_SPEED_FULL)
++ bInterval_m1 = 0;
++
++ if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT &&
++ dwc->gadget.speed == USB_SPEED_FULL)
++ dep->interval = desc->bInterval;
++ else
++ dep->interval = 1 << (desc->bInterval - 1);
++
++ params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(bInterval_m1);
+ }
+
+ return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
+diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
+index 56906d15fb551..223029fa84459 100644
+--- a/drivers/usb/gadget/function/u_audio.c
++++ b/drivers/usb/gadget/function/u_audio.c
+@@ -89,7 +89,12 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
+ struct snd_uac_chip *uac = prm->uac;
+
+ /* i/f shutting down */
+- if (!prm->ep_enabled || req->status == -ESHUTDOWN)
++ if (!prm->ep_enabled) {
++ usb_ep_free_request(ep, req);
++ return;
++ }
++
++ if (req->status == -ESHUTDOWN)
+ return;
+
+ /*
+@@ -351,8 +356,14 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
+
+ for (i = 0; i < params->req_number; i++) {
+ if (prm->ureq[i].req) {
+- usb_ep_dequeue(ep, prm->ureq[i].req);
+- usb_ep_free_request(ep, prm->ureq[i].req);
++ if (usb_ep_dequeue(ep, prm->ureq[i].req))
++ usb_ep_free_request(ep, prm->ureq[i].req);
++ /*
++ * If usb_ep_dequeue() cannot successfully dequeue the
++ * request, the request will be freed by the completion
++ * callback.
++ */
++
+ prm->ureq[i].req = NULL;
+ }
+ }
+diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
+index 0fbf9adef34be..9fcff4e94484e 100644
+--- a/drivers/usb/musb/musb_core.c
++++ b/drivers/usb/musb/musb_core.c
+@@ -2102,32 +2102,35 @@ int musb_queue_resume_work(struct musb *musb,
+ {
+ struct musb_pending_work *w;
+ unsigned long flags;
++ bool is_suspended;
+ int error;
+
+ if (WARN_ON(!callback))
+ return -EINVAL;
+
+- if (pm_runtime_active(musb->controller))
+- return callback(musb, data);
++ spin_lock_irqsave(&musb->list_lock, flags);
++ is_suspended = musb->is_runtime_suspended;
++
++ if (is_suspended) {
++ w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
++ if (!w) {
++ error = -ENOMEM;
++ goto out_unlock;
++ }
+
+- w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
+- if (!w)
+- return -ENOMEM;
++ w->callback = callback;
++ w->data = data;
+
+- w->callback = callback;
+- w->data = data;
+- spin_lock_irqsave(&musb->list_lock, flags);
+- if (musb->is_runtime_suspended) {
+ list_add_tail(&w->node, &musb->pending_list);
+ error = 0;
+- } else {
+- dev_err(musb->controller, "could not add resume work %p\n",
+- callback);
+- devm_kfree(musb->controller, w);
+- error = -EINPROGRESS;
+ }
++
++out_unlock:
+ spin_unlock_irqrestore(&musb->list_lock, flags);
+
++ if (!is_suspended)
++ error = callback(musb, data);
++
+ return error;
+ }
+ EXPORT_SYMBOL_GPL(musb_queue_resume_work);
+diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
+index 01a98d071c7c7..c00e4177651a8 100644
+--- a/drivers/usb/serial/ftdi_sio.c
++++ b/drivers/usb/serial/ftdi_sio.c
+@@ -1386,8 +1386,9 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
+ index_value = get_ftdi_divisor(tty, port);
+ value = (u16)index_value;
+ index = (u16)(index_value >> 16);
+- if ((priv->chip_type == FT2232C) || (priv->chip_type == FT2232H) ||
+- (priv->chip_type == FT4232H) || (priv->chip_type == FT232H)) {
++ if (priv->chip_type == FT2232C || priv->chip_type == FT2232H ||
++ priv->chip_type == FT4232H || priv->chip_type == FT232H ||
++ priv->chip_type == FTX) {
+ /* Probably the BM type needs the MSB of the encoded fractional
+ * divider also moved like for the chips above. Any infos? */
+ index = (u16)((index << 8) | priv->interface);
+diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
+index 55b2879f27bdc..aefc1b58d9563 100644
+--- a/drivers/usb/serial/mos7720.c
++++ b/drivers/usb/serial/mos7720.c
+@@ -1250,8 +1250,10 @@ static int mos7720_write(struct tty_struct *tty, struct usb_serial_port *port,
+ if (urb->transfer_buffer == NULL) {
+ urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
+ GFP_ATOMIC);
+- if (!urb->transfer_buffer)
++ if (!urb->transfer_buffer) {
++ bytes_sent = -ENOMEM;
+ goto exit;
++ }
+ }
+ transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
+
+diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
+index ab4bf8d6d7df0..2b8a0d4b66fce 100644
+--- a/drivers/usb/serial/mos7840.c
++++ b/drivers/usb/serial/mos7840.c
+@@ -1330,8 +1330,10 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
+ if (urb->transfer_buffer == NULL) {
+ urb->transfer_buffer = kmalloc(URB_TRANSFER_BUFFER_SIZE,
+ GFP_ATOMIC);
+- if (!urb->transfer_buffer)
++ if (!urb->transfer_buffer) {
++ bytes_sent = -ENOMEM;
+ goto exit;
++ }
+ }
+ transfer_size = min(count, URB_TRANSFER_BUFFER_SIZE);
+
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index f49eae18500cc..5c167bc089a08 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -1569,7 +1569,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) },
+- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) },
++ { USB_DEVICE(ZTE_VENDOR_ID, 0x1275), /* ZTE P685M */
++ .driver_info = RSVD(3) | RSVD(4) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) },
+diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
+index bc6ba41686fa3..6b1e8cba17984 100644
+--- a/drivers/vfio/vfio_iommu_type1.c
++++ b/drivers/vfio/vfio_iommu_type1.c
+@@ -24,6 +24,7 @@
+ #include <linux/compat.h>
+ #include <linux/device.h>
+ #include <linux/fs.h>
++#include <linux/highmem.h>
+ #include <linux/iommu.h>
+ #include <linux/module.h>
+ #include <linux/mm.h>
+@@ -339,9 +340,11 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
+ unsigned long vaddr, unsigned long *pfn,
+ bool write_fault)
+ {
++ pte_t *ptep;
++ spinlock_t *ptl;
+ int ret;
+
+- ret = follow_pfn(vma, vaddr, pfn);
++ ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
+ if (ret) {
+ bool unlocked = false;
+
+@@ -355,9 +358,17 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm,
+ if (ret)
+ return ret;
+
+- ret = follow_pfn(vma, vaddr, pfn);
++ ret = follow_pte(vma->vm_mm, vaddr, &ptep, &ptl);
++ if (ret)
++ return ret;
+ }
+
++ if (write_fault && !pte_write(*ptep))
++ ret = -EFAULT;
++ else
++ *pfn = pte_pfn(*ptep);
++
++ pte_unmap_unlock(ptep, ptl);
+ return ret;
+ }
+
+@@ -866,6 +877,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
+
+ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
+ {
++ WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list));
+ vfio_unmap_unpin(iommu, dma, true);
+ vfio_unlink_dma(iommu, dma);
+ put_task_struct(dma->task);
+@@ -1974,23 +1986,6 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
+ }
+ }
+
+-static void vfio_sanity_check_pfn_list(struct vfio_iommu *iommu)
+-{
+- struct rb_node *n;
+-
+- n = rb_first(&iommu->dma_list);
+- for (; n; n = rb_next(n)) {
+- struct vfio_dma *dma;
+-
+- dma = rb_entry(n, struct vfio_dma, node);
+-
+- if (WARN_ON(!RB_EMPTY_ROOT(&dma->pfn_list)))
+- break;
+- }
+- /* mdev vendor driver must unregister notifier */
+- WARN_ON(iommu->notifier.head);
+-}
+-
+ /*
+ * Called when a domain is removed in detach. It is possible that
+ * the removed domain decided the iova aperture window. Modify the
+@@ -2088,10 +2083,10 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
+ kfree(group);
+
+ if (list_empty(&iommu->external_domain->group_list)) {
+- vfio_sanity_check_pfn_list(iommu);
+-
+- if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
++ if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) {
++ WARN_ON(iommu->notifier.head);
+ vfio_iommu_unmap_unpin_all(iommu);
++ }
+
+ kfree(iommu->external_domain);
+ iommu->external_domain = NULL;
+@@ -2124,10 +2119,12 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
+ */
+ if (list_empty(&domain->group_list)) {
+ if (list_is_singular(&iommu->domain_list)) {
+- if (!iommu->external_domain)
++ if (!iommu->external_domain) {
++ WARN_ON(iommu->notifier.head);
+ vfio_iommu_unmap_unpin_all(iommu);
+- else
++ } else {
+ vfio_iommu_unmap_unpin_reaccount(iommu);
++ }
+ }
+ iommu_domain_free(domain->domain);
+ list_del(&domain->next);
+@@ -2201,7 +2198,6 @@ static void vfio_iommu_type1_release(void *iommu_data)
+
+ if (iommu->external_domain) {
+ vfio_release_domain(iommu->external_domain, true);
+- vfio_sanity_check_pfn_list(iommu);
+ kfree(iommu->external_domain);
+ }
+
+diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
+index 1e70e838530ee..a7e5f12687b70 100644
+--- a/drivers/video/fbdev/Kconfig
++++ b/drivers/video/fbdev/Kconfig
+@@ -1269,6 +1269,7 @@ config FB_ATY
+ select FB_CFB_IMAGEBLIT
+ select FB_BACKLIGHT if FB_ATY_BACKLIGHT
+ select FB_MACMODES if PPC
++ select FB_ATY_CT if SPARC64 && PCI
+ help
+ This driver supports graphics boards with the ATI Mach64 chips.
+ Say Y if you have such a graphics board.
+@@ -1279,7 +1280,6 @@ config FB_ATY
+ config FB_ATY_CT
+ bool "Mach64 CT/VT/GT/LT (incl. 3D RAGE) support"
+ depends on PCI && FB_ATY
+- default y if SPARC64 && PCI
+ help
+ Say Y here to support use of ATI's 64-bit Rage boards (or other
+ boards based on the Mach64 CT, VT, GT, and LT chipsets) as a
+diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
+index 43c391626a000..bf2945c25ca8f 100644
+--- a/drivers/virt/vboxguest/vboxguest_utils.c
++++ b/drivers/virt/vboxguest/vboxguest_utils.c
+@@ -466,7 +466,7 @@ static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call)
+ * Cancellation fun.
+ */
+ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
+- u32 timeout_ms, bool *leak_it)
++ u32 timeout_ms, bool interruptible, bool *leak_it)
+ {
+ int rc, cancel_rc, ret;
+ long timeout;
+@@ -493,10 +493,15 @@ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call,
+ else
+ timeout = msecs_to_jiffies(timeout_ms);
+
+- timeout = wait_event_interruptible_timeout(
+- gdev->hgcm_wq,
+- hgcm_req_done(gdev, &call->header),
+- timeout);
++ if (interruptible) {
++ timeout = wait_event_interruptible_timeout(gdev->hgcm_wq,
++ hgcm_req_done(gdev, &call->header),
++ timeout);
++ } else {
++ timeout = wait_event_timeout(gdev->hgcm_wq,
++ hgcm_req_done(gdev, &call->header),
++ timeout);
++ }
+
+ /* timeout > 0 means hgcm_req_done has returned true, so success */
+ if (timeout > 0)
+@@ -629,7 +634,8 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+ hgcm_call_init_call(call, client_id, function, parms, parm_count,
+ bounce_bufs);
+
+- ret = vbg_hgcm_do_call(gdev, call, timeout_ms, &leak_it);
++ ret = vbg_hgcm_do_call(gdev, call, timeout_ms,
++ requestor & VMMDEV_REQUESTOR_USERMODE, &leak_it);
+ if (ret == 0) {
+ *vbox_status = call->header.result;
+ ret = hgcm_call_copy_back_result(call, parms, parm_count,
+diff --git a/drivers/watchdog/mei_wdt.c b/drivers/watchdog/mei_wdt.c
+index 5391bf3e6b11d..c5967d8b4256a 100644
+--- a/drivers/watchdog/mei_wdt.c
++++ b/drivers/watchdog/mei_wdt.c
+@@ -382,6 +382,7 @@ static int mei_wdt_register(struct mei_wdt *wdt)
+
+ watchdog_set_drvdata(&wdt->wdd, wdt);
+ watchdog_stop_on_reboot(&wdt->wdd);
++ watchdog_stop_on_unregister(&wdt->wdd);
+
+ ret = watchdog_register_device(&wdt->wdd);
+ if (ret)
+diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
+index ea8a6abd64ecb..094f096aee0f2 100644
+--- a/drivers/watchdog/qcom-wdt.c
++++ b/drivers/watchdog/qcom-wdt.c
+@@ -22,7 +22,6 @@ enum wdt_reg {
+ };
+
+ #define QCOM_WDT_ENABLE BIT(0)
+-#define QCOM_WDT_ENABLE_IRQ BIT(1)
+
+ static const u32 reg_offset_data_apcs_tmr[] = {
+ [WDT_RST] = 0x38,
+@@ -58,16 +57,6 @@ struct qcom_wdt *to_qcom_wdt(struct watchdog_device *wdd)
+ return container_of(wdd, struct qcom_wdt, wdd);
+ }
+
+-static inline int qcom_get_enable(struct watchdog_device *wdd)
+-{
+- int enable = QCOM_WDT_ENABLE;
+-
+- if (wdd->pretimeout)
+- enable |= QCOM_WDT_ENABLE_IRQ;
+-
+- return enable;
+-}
+-
+ static irqreturn_t qcom_wdt_isr(int irq, void *arg)
+ {
+ struct watchdog_device *wdd = arg;
+@@ -86,7 +75,7 @@ static int qcom_wdt_start(struct watchdog_device *wdd)
+ writel(1, wdt_addr(wdt, WDT_RST));
+ writel(bark * wdt->rate, wdt_addr(wdt, WDT_BARK_TIME));
+ writel(wdd->timeout * wdt->rate, wdt_addr(wdt, WDT_BITE_TIME));
+- writel(qcom_get_enable(wdd), wdt_addr(wdt, WDT_EN));
++ writel(QCOM_WDT_ENABLE, wdt_addr(wdt, WDT_EN));
+ return 0;
+ }
+
+diff --git a/fs/affs/namei.c b/fs/affs/namei.c
+index 41c5749f4db78..5400a876d73fb 100644
+--- a/fs/affs/namei.c
++++ b/fs/affs/namei.c
+@@ -460,8 +460,10 @@ affs_xrename(struct inode *old_dir, struct dentry *old_dentry,
+ return -EIO;
+
+ bh_new = affs_bread(sb, d_inode(new_dentry)->i_ino);
+- if (!bh_new)
++ if (!bh_new) {
++ affs_brelse(bh_old);
+ return -EIO;
++ }
+
+ /* Remove old header from its parent directory. */
+ affs_lock_dir(old_dir);
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index a32f23981f60f..08ca9441270d2 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -2391,8 +2391,10 @@ again:
+
+ if (!path) {
+ path = btrfs_alloc_path();
+- if (!path)
+- return -ENOMEM;
++ if (!path) {
++ ret = -ENOMEM;
++ goto out;
++ }
+ }
+
+ /*
+@@ -2487,16 +2489,14 @@ again:
+ btrfs_put_block_group(cache);
+ if (drop_reserve)
+ btrfs_delayed_refs_rsv_release(fs_info, 1);
+-
+- if (ret)
+- break;
+-
+ /*
+ * Avoid blocking other tasks for too long. It might even save
+ * us from writing caches for block groups that are going to be
+ * removed.
+ */
+ mutex_unlock(&trans->transaction->cache_write_mutex);
++ if (ret)
++ goto out;
+ mutex_lock(&trans->transaction->cache_write_mutex);
+ }
+ mutex_unlock(&trans->transaction->cache_write_mutex);
+@@ -2520,7 +2520,12 @@ again:
+ goto again;
+ }
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+- } else if (ret < 0) {
++ }
++out:
++ if (ret < 0) {
++ spin_lock(&cur_trans->dirty_bgs_lock);
++ list_splice_init(&dirty, &cur_trans->dirty_bgs);
++ spin_unlock(&cur_trans->dirty_bgs_lock);
+ btrfs_cleanup_dirty_bgs(cur_trans, fs_info);
+ }
+
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index e25133a9e9dfe..1af73367087df 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -260,9 +260,12 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
+ ret = btrfs_inc_ref(trans, root, cow, 1);
+ else
+ ret = btrfs_inc_ref(trans, root, cow, 0);
+-
+- if (ret)
++ if (ret) {
++ btrfs_tree_unlock(cow);
++ free_extent_buffer(cow);
++ btrfs_abort_transaction(trans, ret);
+ return ret;
++ }
+
+ btrfs_mark_buffer_dirty(cow);
+ *cow_ret = cow;
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 6e6be922b937d..23f59d463e24e 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -744,8 +744,10 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ while (num_entries) {
+ e = kmem_cache_zalloc(btrfs_free_space_cachep,
+ GFP_NOFS);
+- if (!e)
++ if (!e) {
++ ret = -ENOMEM;
+ goto free_cache;
++ }
+
+ ret = io_ctl_read_entry(&io_ctl, e, &type);
+ if (ret) {
+@@ -754,6 +756,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ }
+
+ if (!e->bytes) {
++ ret = -1;
+ kmem_cache_free(btrfs_free_space_cachep, e);
+ goto free_cache;
+ }
+@@ -774,6 +777,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
+ e->bitmap = kmem_cache_zalloc(
+ btrfs_free_space_bitmap_cachep, GFP_NOFS);
+ if (!e->bitmap) {
++ ret = -ENOMEM;
+ kmem_cache_free(
+ btrfs_free_space_cachep, e);
+ goto free_cache;
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index 001f13cf9ab8f..05b3e27b21d44 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1336,9 +1336,7 @@ static void __del_reloc_root(struct btrfs_root *root)
+ RB_CLEAR_NODE(&node->rb_node);
+ }
+ spin_unlock(&rc->reloc_root_tree.lock);
+- if (!node)
+- return;
+- BUG_ON((struct btrfs_root *)node->data != root);
++ ASSERT(!node || (struct btrfs_root *)node->data == root);
+ }
+
+ spin_lock(&fs_info->trans_lock);
+diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
+index 7b975dbb2bb49..1c74a7cbf5b19 100644
+--- a/fs/debugfs/inode.c
++++ b/fs/debugfs/inode.c
+@@ -293,7 +293,7 @@ struct dentry *debugfs_lookup(const char *name, struct dentry *parent)
+ {
+ struct dentry *dentry;
+
+- if (IS_ERR(parent))
++ if (!debugfs_initialized() || IS_ERR_OR_NULL(name) || IS_ERR(parent))
+ return NULL;
+
+ if (!parent)
+@@ -315,6 +315,9 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
+ struct dentry *dentry;
+ int error;
+
++ if (!debugfs_initialized())
++ return ERR_PTR(-ENOENT);
++
+ pr_debug("creating file '%s'\n", name);
+
+ if (IS_ERR(parent))
+diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
+index 503bea20cde26..f78496776e764 100644
+--- a/fs/erofs/xattr.c
++++ b/fs/erofs/xattr.c
+@@ -48,8 +48,14 @@ static int init_inode_xattrs(struct inode *inode)
+ int ret = 0;
+
+ /* the most case is that xattrs of this inode are initialized. */
+- if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
++ if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) {
++ /*
++ * paired with smp_mb() at the end of the function to ensure
++ * fields will only be observed after the bit is set.
++ */
++ smp_mb();
+ return 0;
++ }
+
+ if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
+ return -ERESTARTSYS;
+@@ -137,6 +143,8 @@ static int init_inode_xattrs(struct inode *inode)
+ }
+ xattr_iter_end(&it, atomic_map);
+
++ /* paired with smp_mb() at the beginning of the function. */
++ smp_mb();
+ set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
+
+ out_unlock:
+diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
+index 6a26c293ae2d9..fff5741007214 100644
+--- a/fs/erofs/zmap.c
++++ b/fs/erofs/zmap.c
+@@ -36,8 +36,14 @@ static int fill_inode_lazy(struct inode *inode)
+ void *kaddr;
+ struct z_erofs_map_header *h;
+
+- if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
++ if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
++ /*
++ * paired with smp_mb() at the end of the function to ensure
++ * fields will only be observed after the bit is set.
++ */
++ smp_mb();
+ return 0;
++ }
+
+ if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
+ return -ERESTARTSYS;
+@@ -83,6 +89,8 @@ static int fill_inode_lazy(struct inode *inode)
+
+ vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits +
+ ((h->h_clusterbits >> 5) & 7);
++ /* paired with smp_mb() at the beginning of the function */
++ smp_mb();
+ set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
+ unmap_done:
+ kunmap_atomic(kaddr);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index f05ec9bfbf4fd..7f22487d502b5 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2405,11 +2405,10 @@ again:
+ (frame - 1)->bh);
+ if (err)
+ goto journal_error;
+- if (restart) {
+- err = ext4_handle_dirty_dx_node(handle, dir,
+- frame->bh);
++ err = ext4_handle_dirty_dx_node(handle, dir,
++ frame->bh);
++ if (err)
+ goto journal_error;
+- }
+ } else {
+ struct dx_root *dxroot;
+ memcpy((char *) entries2, (char *) entries,
+diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
+index 68be334afc286..64ee2a064e339 100644
+--- a/fs/f2fs/data.c
++++ b/fs/f2fs/data.c
+@@ -318,7 +318,7 @@ static inline void __submit_bio(struct f2fs_sb_info *sbi,
+ if (test_opt(sbi, LFS) && current->plug)
+ blk_finish_plug(current->plug);
+
+- if (F2FS_IO_ALIGNED(sbi))
++ if (!F2FS_IO_ALIGNED(sbi))
+ goto submit_io;
+
+ start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 5d94abe467a4f..6e58b2e62b189 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -686,6 +686,10 @@ int f2fs_truncate(struct inode *inode)
+ return -EIO;
+ }
+
++ err = dquot_initialize(inode);
++ if (err)
++ return err;
++
+ /* we should check inline_data size */
+ if (!f2fs_may_inline_data(inode)) {
+ err = f2fs_convert_inline_inode(inode);
+@@ -761,7 +765,8 @@ static void __setattr_copy(struct inode *inode, const struct iattr *attr)
+ if (ia_valid & ATTR_MODE) {
+ umode_t mode = attr->ia_mode;
+
+- if (!in_group_p(inode->i_gid) && !capable(CAP_FSETID))
++ if (!in_group_p(inode->i_gid) &&
++ !capable_wrt_inode_uidgid(inode, CAP_FSETID))
+ mode &= ~S_ISGID;
+ set_acl_inode(inode, mode);
+ }
+diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
+index 183388393c6a8..cbd17e4ff920c 100644
+--- a/fs/f2fs/inline.c
++++ b/fs/f2fs/inline.c
+@@ -189,6 +189,10 @@ int f2fs_convert_inline_inode(struct inode *inode)
+ if (!f2fs_has_inline_data(inode))
+ return 0;
+
++ err = dquot_initialize(inode);
++ if (err)
++ return err;
++
+ page = f2fs_grab_cache_page(inode->i_mapping, 0, false);
+ if (!page)
+ return -ENOMEM;
+diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
+index 50fa3e08c02f3..aaec3c5b02028 100644
+--- a/fs/gfs2/bmap.c
++++ b/fs/gfs2/bmap.c
+@@ -1228,6 +1228,9 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
+
+ gfs2_inplace_release(ip);
+
++ if (ip->i_qadata && ip->i_qadata->qa_qd_num)
++ gfs2_quota_unlock(ip);
++
+ if (length != written && (iomap->flags & IOMAP_F_NEW)) {
+ /* Deallocate blocks that were just allocated. */
+ loff_t blockmask = i_blocksize(inode) - 1;
+@@ -1240,9 +1243,6 @@ static int gfs2_iomap_end(struct inode *inode, loff_t pos, loff_t length,
+ }
+ }
+
+- if (ip->i_qadata && ip->i_qadata->qa_qd_num)
+- gfs2_quota_unlock(ip);
+-
+ if (unlikely(!written))
+ goto out_unlock;
+
+diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
+index 7c7197343ee2b..72dec177b3494 100644
+--- a/fs/gfs2/lock_dlm.c
++++ b/fs/gfs2/lock_dlm.c
+@@ -280,7 +280,6 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
+ {
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+- int lvb_needs_unlock = 0;
+ int error;
+
+ if (gl->gl_lksb.sb_lkid == 0) {
+@@ -293,13 +292,10 @@ static void gdlm_put_lock(struct gfs2_glock *gl)
+ gfs2_sbstats_inc(gl, GFS2_LKS_DCOUNT);
+ gfs2_update_request_times(gl);
+
+- /* don't want to skip dlm_unlock writing the lvb when lock is ex */
+-
+- if (gl->gl_lksb.sb_lvbptr && (gl->gl_state == LM_ST_EXCLUSIVE))
+- lvb_needs_unlock = 1;
++ /* don't want to skip dlm_unlock writing the lvb when lock has one */
+
+ if (test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags) &&
+- !lvb_needs_unlock) {
++ !gl->gl_lksb.sb_lvbptr) {
+ gfs2_glock_free(gl);
+ return;
+ }
+diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c
+index f0fe641893a5e..b9e6a7ec78be4 100644
+--- a/fs/isofs/dir.c
++++ b/fs/isofs/dir.c
+@@ -152,6 +152,7 @@ static int do_isofs_readdir(struct inode *inode, struct file *file,
+ printk(KERN_NOTICE "iso9660: Corrupted directory entry"
+ " in block %lu of inode %lu\n", block,
+ inode->i_ino);
++ brelse(bh);
+ return -EIO;
+ }
+
+diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
+index cac468f04820e..558e7c51ce0d4 100644
+--- a/fs/isofs/namei.c
++++ b/fs/isofs/namei.c
+@@ -102,6 +102,7 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry,
+ printk(KERN_NOTICE "iso9660: Corrupted directory entry"
+ " in block %lu of inode %lu\n", block,
+ dir->i_ino);
++ brelse(bh);
+ return 0;
+ }
+
+diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
+index be7c8a6a57480..4fe64519870f1 100644
+--- a/fs/jffs2/summary.c
++++ b/fs/jffs2/summary.c
+@@ -783,6 +783,8 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
+ dbg_summary("Writing unknown RWCOMPAT_COPY node type %x\n",
+ je16_to_cpu(temp->u.nodetype));
+ jffs2_sum_disable_collecting(c->summary);
++ /* The above call removes the list, nothing more to do */
++ goto bail_rwcompat;
+ } else {
+ BUG(); /* unknown node in summary information */
+ }
+@@ -794,6 +796,7 @@ static int jffs2_sum_write_data(struct jffs2_sb_info *c, struct jffs2_eraseblock
+
+ c->summary->sum_num--;
+ }
++ bail_rwcompat:
+
+ jffs2_sum_reset_collected(c->summary);
+
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index caade185e568d..6fe82ce8663ef 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -1656,7 +1656,7 @@ s64 dbDiscardAG(struct inode *ip, int agno, s64 minlen)
+ } else if (rc == -ENOSPC) {
+ /* search for next smaller log2 block */
+ l2nb = BLKSTOL2(nblocks) - 1;
+- nblocks = 1 << l2nb;
++ nblocks = 1LL << l2nb;
+ } else {
+ /* Trim any already allocated blocks */
+ jfs_error(bmp->db_ipbmap->i_sb, "-EIO\n");
+diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
+index be418fccc9d86..7f39d6091dfa0 100644
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1523,12 +1523,9 @@ static int __init init_nfsd(void)
+ int retval;
+ printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@monad.swb.de).\n");
+
+- retval = register_pernet_subsys(&nfsd_net_ops);
+- if (retval < 0)
+- return retval;
+ retval = register_cld_notifier();
+ if (retval)
+- goto out_unregister_pernet;
++ return retval;
+ retval = nfsd4_init_slabs();
+ if (retval)
+ goto out_unregister_notifier;
+@@ -1546,9 +1543,14 @@ static int __init init_nfsd(void)
+ goto out_free_lockd;
+ retval = register_filesystem(&nfsd_fs_type);
+ if (retval)
++ goto out_free_exports;
++ retval = register_pernet_subsys(&nfsd_net_ops);
++ if (retval < 0)
+ goto out_free_all;
+ return 0;
+ out_free_all:
++ unregister_pernet_subsys(&nfsd_net_ops);
++out_free_exports:
+ remove_proc_entry("fs/nfs/exports", NULL);
+ remove_proc_entry("fs/nfs", NULL);
+ out_free_lockd:
+@@ -1562,13 +1564,12 @@ out_free_slabs:
+ nfsd4_free_slabs();
+ out_unregister_notifier:
+ unregister_cld_notifier();
+-out_unregister_pernet:
+- unregister_pernet_subsys(&nfsd_net_ops);
+ return retval;
+ }
+
+ static void __exit exit_nfsd(void)
+ {
++ unregister_pernet_subsys(&nfsd_net_ops);
+ nfsd_drc_slab_free();
+ remove_proc_entry("fs/nfs/exports", NULL);
+ remove_proc_entry("fs/nfs", NULL);
+@@ -1579,7 +1580,6 @@ static void __exit exit_nfsd(void)
+ nfsd_fault_inject_cleanup();
+ unregister_filesystem(&nfsd_fs_type);
+ unregister_cld_notifier();
+- unregister_pernet_subsys(&nfsd_net_ops);
+ }
+
+ MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
+diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
+index a368350d4c279..c843bc318382b 100644
+--- a/fs/ocfs2/cluster/heartbeat.c
++++ b/fs/ocfs2/cluster/heartbeat.c
+@@ -2052,7 +2052,7 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
+ o2hb_nego_timeout_handler,
+ reg, NULL, &reg->hr_handler_list);
+ if (ret)
+- goto free;
++ goto remove_item;
+
+ ret = o2net_register_handler(O2HB_NEGO_APPROVE_MSG, reg->hr_key,
+ sizeof(struct o2hb_nego_msg),
+@@ -2067,6 +2067,12 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
+
+ unregister_handler:
+ o2net_unregister_handler_list(&reg->hr_handler_list);
++remove_item:
++ spin_lock(&o2hb_live_lock);
++ list_del(&reg->hr_all_item);
++ if (o2hb_global_heartbeat_active())
++ clear_bit(reg->hr_region_num, o2hb_region_bitmap);
++ spin_unlock(&o2hb_live_lock);
+ free:
+ kfree(reg);
+ return ERR_PTR(ret);
+diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
+index 74a60bae2b237..705b79bb9b241 100644
+--- a/fs/pstore/platform.c
++++ b/fs/pstore/platform.c
+@@ -275,7 +275,7 @@ static int pstore_compress(const void *in, void *out,
+ {
+ int ret;
+
+- if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION))
++ if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS))
+ return -EINVAL;
+
+ ret = crypto_comp_compress(tfm, in, inlen, out, &outlen);
+@@ -664,7 +664,7 @@ static void decompress_record(struct pstore_record *record)
+ int unzipped_len;
+ char *unzipped, *workspace;
+
+- if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION) || !record->compressed)
++ if (!IS_ENABLED(CONFIG_PSTORE_COMPRESS) || !record->compressed)
+ return;
+
+ /* Only PSTORE_TYPE_DMESG support compression. */
+diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
+index 36dce17b01016..56aedf4ba8864 100644
+--- a/fs/quota/quota_v2.c
++++ b/fs/quota/quota_v2.c
+@@ -166,19 +166,24 @@ static int v2_read_file_info(struct super_block *sb, int type)
+ quota_error(sb, "Number of blocks too big for quota file size (%llu > %llu).",
+ (loff_t)qinfo->dqi_blocks << qinfo->dqi_blocksize_bits,
+ i_size_read(sb_dqopt(sb)->files[type]));
+- goto out;
++ goto out_free;
+ }
+ if (qinfo->dqi_free_blk >= qinfo->dqi_blocks) {
+ quota_error(sb, "Free block number too big (%u >= %u).",
+ qinfo->dqi_free_blk, qinfo->dqi_blocks);
+- goto out;
++ goto out_free;
+ }
+ if (qinfo->dqi_free_entry >= qinfo->dqi_blocks) {
+ quota_error(sb, "Block with free entry too big (%u >= %u).",
+ qinfo->dqi_free_entry, qinfo->dqi_blocks);
+- goto out;
++ goto out_free;
+ }
+ ret = 0;
++out_free:
++ if (ret) {
++ kfree(info->dqi_priv);
++ info->dqi_priv = NULL;
++ }
+ out:
+ up_read(&dqopt->dqio_sem);
+ return ret;
+diff --git a/fs/ubifs/auth.c b/fs/ubifs/auth.c
+index b10418b5fb719..8be17a7731961 100644
+--- a/fs/ubifs/auth.c
++++ b/fs/ubifs/auth.c
+@@ -342,7 +342,7 @@ int ubifs_init_authentication(struct ubifs_info *c)
+ ubifs_err(c, "hmac %s is bigger than maximum allowed hmac size (%d > %d)",
+ hmac_name, c->hmac_desc_len, UBIFS_HMAC_ARR_SZ);
+ err = -EINVAL;
+- goto out_free_hash;
++ goto out_free_hmac;
+ }
+
+ err = crypto_shash_setkey(c->hmac_tfm, ukp->data, ukp->datalen);
+diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
+index e49bd69dfc1c8..701f15ba61352 100644
+--- a/fs/ubifs/super.c
++++ b/fs/ubifs/super.c
+@@ -820,8 +820,10 @@ static int alloc_wbufs(struct ubifs_info *c)
+ c->jheads[i].wbuf.jhead = i;
+ c->jheads[i].grouped = 1;
+ c->jheads[i].log_hash = ubifs_hash_get_desc(c);
+- if (IS_ERR(c->jheads[i].log_hash))
++ if (IS_ERR(c->jheads[i].log_hash)) {
++ err = PTR_ERR(c->jheads[i].log_hash);
+ goto out;
++ }
+ }
+
+ /*
+diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
+index 233a72f169bb7..e1de0fe281644 100644
+--- a/include/acpi/acexcep.h
++++ b/include/acpi/acexcep.h
+@@ -59,11 +59,11 @@ struct acpi_exception_info {
+
+ #define AE_OK (acpi_status) 0x0000
+
+-#define ACPI_ENV_EXCEPTION(status) (status & AE_CODE_ENVIRONMENTAL)
+-#define ACPI_AML_EXCEPTION(status) (status & AE_CODE_AML)
+-#define ACPI_PROG_EXCEPTION(status) (status & AE_CODE_PROGRAMMER)
+-#define ACPI_TABLE_EXCEPTION(status) (status & AE_CODE_ACPI_TABLES)
+-#define ACPI_CNTL_EXCEPTION(status) (status & AE_CODE_CONTROL)
++#define ACPI_ENV_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ENVIRONMENTAL)
++#define ACPI_AML_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_AML)
++#define ACPI_PROG_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_PROGRAMMER)
++#define ACPI_TABLE_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_ACPI_TABLES)
++#define ACPI_CNTL_EXCEPTION(status) (((status) & AE_CODE_MASK) == AE_CODE_CONTROL)
+
+ /*
+ * Environmental exceptions
+diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
+index 2267b7c763c64..8fe1912e3eeb9 100644
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -756,8 +756,13 @@
+ /* DWARF 4 */ \
+ .debug_types 0 : { *(.debug_types) } \
+ /* DWARF 5 */ \
++ .debug_addr 0 : { *(.debug_addr) } \
++ .debug_line_str 0 : { *(.debug_line_str) } \
++ .debug_loclists 0 : { *(.debug_loclists) } \
+ .debug_macro 0 : { *(.debug_macro) } \
+- .debug_addr 0 : { *(.debug_addr) }
++ .debug_names 0 : { *(.debug_names) } \
++ .debug_rnglists 0 : { *(.debug_rnglists) } \
++ .debug_str_offsets 0 : { *(.debug_str_offsets) }
+
+ /* Stabs debugging sections. */
+ #define STABS_DEBUG \
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 7aa0d8b5aaf0c..007147f643908 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -711,7 +711,10 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
+ /* verify correctness of eBPF program */
+ int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
+ union bpf_attr __user *uattr);
++
++#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
++#endif
+
+ /* Map specifics */
+ struct xdp_buff;
+diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
+index e4e1f5c1f4929..a53d7d2c2d95c 100644
+--- a/include/linux/device-mapper.h
++++ b/include/linux/device-mapper.h
+@@ -316,6 +316,11 @@ struct dm_target {
+ * whether or not its underlying devices have support.
+ */
+ bool discards_supported:1;
++
++ /*
++ * Set if we need to limit the number of in-flight bios when swapping.
++ */
++ bool limit_swap_bios:1;
+ };
+
+ /* Each target can link one of these into the table */
+diff --git a/include/linux/filter.h b/include/linux/filter.h
+index 79830bc9e45cf..c53e2fe3c8f7f 100644
+--- a/include/linux/filter.h
++++ b/include/linux/filter.h
+@@ -846,7 +846,7 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
+ u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
+ #define __bpf_call_base_args \
+ ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \
+- __bpf_call_base)
++ (void *)__bpf_call_base)
+
+ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
+ void bpf_jit_compile(struct bpf_prog *prog);
+diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
+index a8f8889761378..0be0d68fbb009 100644
+--- a/include/linux/icmpv6.h
++++ b/include/linux/icmpv6.h
+@@ -3,6 +3,7 @@
+ #define _LINUX_ICMPV6_H
+
+ #include <linux/skbuff.h>
++#include <linux/ipv6.h>
+ #include <uapi/linux/icmpv6.h>
+
+ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
+@@ -13,21 +14,64 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb)
+ #include <linux/netdevice.h>
+
+ #if IS_ENABLED(CONFIG_IPV6)
+-extern void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info);
+
+ typedef void ip6_icmp_send_t(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+- const struct in6_addr *force_saddr);
++ const struct in6_addr *force_saddr,
++ const struct inet6_skb_parm *parm);
++void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
++ const struct in6_addr *force_saddr,
++ const struct inet6_skb_parm *parm);
++#if IS_BUILTIN(CONFIG_IPV6)
++static inline void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
++ const struct inet6_skb_parm *parm)
++{
++ icmp6_send(skb, type, code, info, NULL, parm);
++}
++static inline int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
++{
++ BUILD_BUG_ON(fn != icmp6_send);
++ return 0;
++}
++static inline int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
++{
++ BUILD_BUG_ON(fn != icmp6_send);
++ return 0;
++}
++#else
++extern void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
++ const struct inet6_skb_parm *parm);
+ extern int inet6_register_icmp_sender(ip6_icmp_send_t *fn);
+ extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
++#endif
++
++static inline void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
++{
++ __icmpv6_send(skb, type, code, info, IP6CB(skb));
++}
++
+ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
+ unsigned int data_len);
+
++#if IS_ENABLED(CONFIG_NF_NAT)
++void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
++#else
++static inline void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
++{
++ struct inet6_skb_parm parm = { 0 };
++ __icmpv6_send(skb_in, type, code, info, &parm);
++}
++#endif
++
+ #else
+
+ static inline void icmpv6_send(struct sk_buff *skb,
+ u8 type, u8 code, __u32 info)
+ {
++}
+
++static inline void icmpv6_ndo_send(struct sk_buff *skb,
++ u8 type, u8 code, __u32 info)
++{
+ }
+ #endif
+
+diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
+index ea7c7906591eb..bbe297bbbca52 100644
+--- a/include/linux/ipv6.h
++++ b/include/linux/ipv6.h
+@@ -83,7 +83,6 @@ struct ipv6_params {
+ __s32 autoconf;
+ };
+ extern struct ipv6_params ipv6_defaults;
+-#include <linux/icmpv6.h>
+ #include <linux/tcp.h>
+ #include <linux/udp.h>
+
+diff --git a/include/linux/kexec.h b/include/linux/kexec.h
+index 1776eb2e43a44..a1cffce3de8cd 100644
+--- a/include/linux/kexec.h
++++ b/include/linux/kexec.h
+@@ -293,6 +293,11 @@ struct kimage {
+ /* Information for loading purgatory */
+ struct purgatory_info purgatory_info;
+ #endif
++
++#ifdef CONFIG_IMA_KEXEC
++ /* Virtual address of IMA measurement buffer for kexec syscall */
++ void *ima_buffer;
++#endif
+ };
+
+ /* kexec interface functions */
+diff --git a/include/linux/key.h b/include/linux/key.h
+index 6cf8e71cf8b7c..9c26cc9b802a0 100644
+--- a/include/linux/key.h
++++ b/include/linux/key.h
+@@ -269,6 +269,7 @@ extern struct key *key_alloc(struct key_type *type,
+ #define KEY_ALLOC_BUILT_IN 0x0004 /* Key is built into kernel */
+ #define KEY_ALLOC_BYPASS_RESTRICTION 0x0008 /* Override the check on restricted keyrings */
+ #define KEY_ALLOC_UID_KEYRING 0x0010 /* allocating a user or user session keyring */
++#define KEY_ALLOC_SET_KEEP 0x0020 /* Set the KEEP flag on the key/keyring */
+
+ extern void key_revoke(struct key *key);
+ extern void key_invalidate(struct key *key);
+diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
+index 75a2eded7aa2c..09e6ac4b669b2 100644
+--- a/include/linux/rcupdate.h
++++ b/include/linux/rcupdate.h
+@@ -96,8 +96,10 @@ static inline void rcu_user_exit(void) { }
+
+ #ifdef CONFIG_RCU_NOCB_CPU
+ void rcu_init_nohz(void);
++void rcu_nocb_flush_deferred_wakeup(void);
+ #else /* #ifdef CONFIG_RCU_NOCB_CPU */
+ static inline void rcu_init_nohz(void) { }
++static inline void rcu_nocb_flush_deferred_wakeup(void) { }
+ #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+
+ /**
+diff --git a/include/linux/rmap.h b/include/linux/rmap.h
+index 988d176472df7..d7d6d4eb17949 100644
+--- a/include/linux/rmap.h
++++ b/include/linux/rmap.h
+@@ -214,7 +214,8 @@ struct page_vma_mapped_walk {
+
+ static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw)
+ {
+- if (pvmw->pte)
++ /* HugeTLB pte is set to the relevant page table entry without pte_mapped. */
++ if (pvmw->pte && !PageHuge(pvmw->page))
+ pte_unmap(pvmw->pte);
+ if (pvmw->ptl)
+ spin_unlock(pvmw->ptl);
+diff --git a/include/net/act_api.h b/include/net/act_api.h
+index 05b568b92e59d..4dabe4730f00f 100644
+--- a/include/net/act_api.h
++++ b/include/net/act_api.h
+@@ -156,6 +156,7 @@ int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
+ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
+ struct tc_action **a, const struct tc_action_ops *ops,
+ int bind, bool cpustats);
++void tcf_idr_insert_many(struct tc_action *actions[]);
+ void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
+ int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+ struct tc_action **a, int bind);
+diff --git a/include/net/icmp.h b/include/net/icmp.h
+index 5d4bfdba9adf0..fd84adc479633 100644
+--- a/include/net/icmp.h
++++ b/include/net/icmp.h
+@@ -43,6 +43,16 @@ static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32
+ __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
+ }
+
++#if IS_ENABLED(CONFIG_NF_NAT)
++void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info);
++#else
++static inline void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
++{
++ struct ip_options opts = { 0 };
++ __icmp_send(skb_in, type, code, info, &opts);
++}
++#endif
++
+ int icmp_rcv(struct sk_buff *skb);
+ int icmp_err(struct sk_buff *skb, u32 info);
+ int icmp_init(void);
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 37b51456784f8..b914959cd2c67 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1409,8 +1409,13 @@ static inline int tcp_full_space(const struct sock *sk)
+ */
+ static inline bool tcp_rmem_pressure(const struct sock *sk)
+ {
+- int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
+- int threshold = rcvbuf - (rcvbuf >> 3);
++ int rcvbuf, threshold;
++
++ if (tcp_under_memory_pressure(sk))
++ return true;
++
++ rcvbuf = READ_ONCE(sk->sk_rcvbuf);
++ threshold = rcvbuf - (rcvbuf >> 3);
+
+ return atomic_read(&sk->sk_rmem_alloc) > threshold;
+ }
+diff --git a/kernel/bpf/bpf_lru_list.c b/kernel/bpf/bpf_lru_list.c
+index 1b6b9349cb857..d99e89f113c43 100644
+--- a/kernel/bpf/bpf_lru_list.c
++++ b/kernel/bpf/bpf_lru_list.c
+@@ -502,13 +502,14 @@ struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
+ static void bpf_common_lru_push_free(struct bpf_lru *lru,
+ struct bpf_lru_node *node)
+ {
++ u8 node_type = READ_ONCE(node->type);
+ unsigned long flags;
+
+- if (WARN_ON_ONCE(node->type == BPF_LRU_LIST_T_FREE) ||
+- WARN_ON_ONCE(node->type == BPF_LRU_LOCAL_LIST_T_FREE))
++ if (WARN_ON_ONCE(node_type == BPF_LRU_LIST_T_FREE) ||
++ WARN_ON_ONCE(node_type == BPF_LRU_LOCAL_LIST_T_FREE))
+ return;
+
+- if (node->type == BPF_LRU_LOCAL_LIST_T_PENDING) {
++ if (node_type == BPF_LRU_LOCAL_LIST_T_PENDING) {
+ struct bpf_lru_locallist *loc_l;
+
+ loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
+diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
+index 2118d8258b7c9..ad53b19734e9b 100644
+--- a/kernel/debug/kdb/kdb_private.h
++++ b/kernel/debug/kdb/kdb_private.h
+@@ -233,7 +233,7 @@ extern struct task_struct *kdb_curr_task(int);
+ #define kdb_do_each_thread(g, p) do_each_thread(g, p)
+ #define kdb_while_each_thread(g, p) while_each_thread(g, p)
+
+-#define GFP_KDB (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)
++#define GFP_KDB (in_dbg_master() ? GFP_ATOMIC : GFP_KERNEL)
+
+ extern void *debug_kmalloc(size_t size, gfp_t flags);
+ extern void debug_kfree(void *);
+diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c
+index 79f252af7dee3..4e74db89bd23f 100644
+--- a/kernel/kexec_file.c
++++ b/kernel/kexec_file.c
+@@ -165,6 +165,11 @@ void kimage_file_post_load_cleanup(struct kimage *image)
+ vfree(pi->sechdrs);
+ pi->sechdrs = NULL;
+
++#ifdef CONFIG_IMA_KEXEC
++ vfree(image->ima_buffer);
++ image->ima_buffer = NULL;
++#endif /* CONFIG_IMA_KEXEC */
++
+ /* See if architecture has anything to cleanup post load */
+ arch_kimage_file_post_load_cleanup(image);
+
+diff --git a/kernel/module.c b/kernel/module.c
+index 9e9af40698ffe..ab1f97cfe18dc 100644
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -2310,6 +2310,21 @@ static int verify_exported_symbols(struct module *mod)
+ return 0;
+ }
+
++static bool ignore_undef_symbol(Elf_Half emachine, const char *name)
++{
++ /*
++ * On x86, PIC code and Clang non-PIC code may have call foo@PLT. GNU as
++ * before 2.37 produces an unreferenced _GLOBAL_OFFSET_TABLE_ on x86-64.
++ * i386 has a similar problem but may not deserve a fix.
++ *
++ * If we ever have to ignore many symbols, consider refactoring the code to
++ * only warn if referenced by a relocation.
++ */
++ if (emachine == EM_386 || emachine == EM_X86_64)
++ return !strcmp(name, "_GLOBAL_OFFSET_TABLE_");
++ return false;
++}
++
+ /* Change all symbols so that st_value encodes the pointer directly. */
+ static int simplify_symbols(struct module *mod, const struct load_info *info)
+ {
+@@ -2355,8 +2370,10 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
+ break;
+ }
+
+- /* Ok if weak. */
+- if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
++ /* Ok if weak or ignored. */
++ if (!ksym &&
++ (ELF_ST_BIND(sym[i].st_info) == STB_WEAK ||
++ ignore_undef_symbol(info->hdr->e_machine, name)))
+ break;
+
+ ret = PTR_ERR(ksym) ?: -ENOENT;
+diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
+index d9a659a686f31..6cfc5a00c67d6 100644
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -43,6 +43,8 @@ struct printk_safe_seq_buf {
+ static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
+ static DEFINE_PER_CPU(int, printk_context);
+
++static DEFINE_RAW_SPINLOCK(safe_read_lock);
++
+ #ifdef CONFIG_PRINTK_NMI
+ static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
+ #endif
+@@ -178,8 +180,6 @@ static void report_message_lost(struct printk_safe_seq_buf *s)
+ */
+ static void __printk_safe_flush(struct irq_work *work)
+ {
+- static raw_spinlock_t read_lock =
+- __RAW_SPIN_LOCK_INITIALIZER(read_lock);
+ struct printk_safe_seq_buf *s =
+ container_of(work, struct printk_safe_seq_buf, work);
+ unsigned long flags;
+@@ -193,7 +193,7 @@ static void __printk_safe_flush(struct irq_work *work)
+ * different CPUs. This is especially important when printing
+ * a backtrace.
+ */
+- raw_spin_lock_irqsave(&read_lock, flags);
++ raw_spin_lock_irqsave(&safe_read_lock, flags);
+
+ i = 0;
+ more:
+@@ -230,7 +230,7 @@ more:
+
+ out:
+ report_message_lost(s);
+- raw_spin_unlock_irqrestore(&read_lock, flags);
++ raw_spin_unlock_irqrestore(&safe_read_lock, flags);
+ }
+
+ /**
+@@ -276,6 +276,14 @@ void printk_safe_flush_on_panic(void)
+ raw_spin_lock_init(&logbuf_lock);
+ }
+
++ if (raw_spin_is_locked(&safe_read_lock)) {
++ if (num_online_cpus() > 1)
++ return;
++
++ debug_locks_off();
++ raw_spin_lock_init(&safe_read_lock);
++ }
++
+ printk_safe_flush();
+ }
+
+diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
+index 1b1d2b09efa9b..4dfa9dd47223a 100644
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -579,7 +579,6 @@ static void rcu_eqs_enter(bool user)
+ trace_rcu_dyntick(TPS("Start"), rdp->dynticks_nesting, 0, atomic_read(&rdp->dynticks));
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
+ rdp = this_cpu_ptr(&rcu_data);
+- do_nocb_deferred_wakeup(rdp);
+ rcu_prepare_for_idle();
+ rcu_preempt_deferred_qs(current);
+ WRITE_ONCE(rdp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
+@@ -618,7 +617,14 @@ void rcu_idle_enter(void)
+ */
+ void rcu_user_enter(void)
+ {
++ struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
++
+ lockdep_assert_irqs_disabled();
++
++ instrumentation_begin();
++ do_nocb_deferred_wakeup(rdp);
++ instrumentation_end();
++
+ rcu_eqs_enter(true);
+ }
+ #endif /* CONFIG_NO_HZ_FULL */
+diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
+index f7118842a2b88..a71a4a272515d 100644
+--- a/kernel/rcu/tree_plugin.h
++++ b/kernel/rcu/tree_plugin.h
+@@ -2190,6 +2190,11 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
+ do_nocb_deferred_wakeup_common(rdp);
+ }
+
++void rcu_nocb_flush_deferred_wakeup(void)
++{
++ do_nocb_deferred_wakeup(this_cpu_ptr(&rcu_data));
++}
++
+ void __init rcu_init_nohz(void)
+ {
+ int cpu;
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 3dd7c10d6a582..611adca1e6d0c 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3814,7 +3814,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+ if (!static_branch_unlikely(&sched_asym_cpucapacity))
+ return;
+
+- if (!p) {
++ if (!p || p->nr_cpus_allowed == 1) {
+ rq->misfit_task_load = 0;
+ return;
+ }
+diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
+index 131e7c86cf06e..3f8c7867c14c1 100644
+--- a/kernel/sched/idle.c
++++ b/kernel/sched/idle.c
+@@ -249,6 +249,7 @@ static void do_idle(void)
+ }
+
+ arch_cpu_idle_enter();
++ rcu_nocb_flush_deferred_wakeup();
+
+ /*
+ * In poll mode we reenable interrupts and spin. Also if we
+diff --git a/kernel/seccomp.c b/kernel/seccomp.c
+index 4221a4383cfc5..1d62fa2b6b918 100644
+--- a/kernel/seccomp.c
++++ b/kernel/seccomp.c
+@@ -921,6 +921,8 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
+ const bool recheck_after_trace)
+ {
+ BUG();
++
++ return -1;
+ }
+ #endif
+
+diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
+index 73956eaff8a9c..be51df4508cbe 100644
+--- a/kernel/tracepoint.c
++++ b/kernel/tracepoint.c
+@@ -53,6 +53,12 @@ struct tp_probes {
+ struct tracepoint_func probes[0];
+ };
+
++/* Called in removal of a func but failed to allocate a new tp_funcs */
++static void tp_stub_func(void)
++{
++ return;
++}
++
+ static inline void *allocate_probes(int count)
+ {
+ struct tp_probes *p = kmalloc(struct_size(p, probes, count),
+@@ -131,6 +137,7 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
+ {
+ struct tracepoint_func *old, *new;
+ int nr_probes = 0;
++ int stub_funcs = 0;
+ int pos = -1;
+
+ if (WARN_ON(!tp_func->func))
+@@ -147,14 +154,34 @@ func_add(struct tracepoint_func **funcs, struct tracepoint_func *tp_func,
+ if (old[nr_probes].func == tp_func->func &&
+ old[nr_probes].data == tp_func->data)
+ return ERR_PTR(-EEXIST);
++ if (old[nr_probes].func == tp_stub_func)
++ stub_funcs++;
+ }
+ }
+- /* + 2 : one for new probe, one for NULL func */
+- new = allocate_probes(nr_probes + 2);
++ /* + 2 : one for new probe, one for NULL func - stub functions */
++ new = allocate_probes(nr_probes + 2 - stub_funcs);
+ if (new == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (old) {
+- if (pos < 0) {
++ if (stub_funcs) {
++ /* Need to copy one at a time to remove stubs */
++ int probes = 0;
++
++ pos = -1;
++ for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
++ if (old[nr_probes].func == tp_stub_func)
++ continue;
++ if (pos < 0 && old[nr_probes].prio < prio)
++ pos = probes++;
++ new[probes++] = old[nr_probes];
++ }
++ nr_probes = probes;
++ if (pos < 0)
++ pos = probes;
++ else
++ nr_probes--; /* Account for insertion */
++
++ } else if (pos < 0) {
+ pos = nr_probes;
+ memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
+ } else {
+@@ -188,8 +215,9 @@ static void *func_remove(struct tracepoint_func **funcs,
+ /* (N -> M), (N > 1, M >= 0) probes */
+ if (tp_func->func) {
+ for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
+- if (old[nr_probes].func == tp_func->func &&
+- old[nr_probes].data == tp_func->data)
++ if ((old[nr_probes].func == tp_func->func &&
++ old[nr_probes].data == tp_func->data) ||
++ old[nr_probes].func == tp_stub_func)
+ nr_del++;
+ }
+ }
+@@ -208,14 +236,32 @@ static void *func_remove(struct tracepoint_func **funcs,
+ /* N -> M, (N > 1, M > 0) */
+ /* + 1 for NULL */
+ new = allocate_probes(nr_probes - nr_del + 1);
+- if (new == NULL)
+- return ERR_PTR(-ENOMEM);
+- for (i = 0; old[i].func; i++)
+- if (old[i].func != tp_func->func
+- || old[i].data != tp_func->data)
+- new[j++] = old[i];
+- new[nr_probes - nr_del].func = NULL;
+- *funcs = new;
++ if (new) {
++ for (i = 0; old[i].func; i++)
++ if ((old[i].func != tp_func->func
++ || old[i].data != tp_func->data)
++ && old[i].func != tp_stub_func)
++ new[j++] = old[i];
++ new[nr_probes - nr_del].func = NULL;
++ *funcs = new;
++ } else {
++ /*
++ * Failed to allocate, replace the old function
++ * with calls to tp_stub_func.
++ */
++ for (i = 0; old[i].func; i++)
++ if (old[i].func == tp_func->func &&
++ old[i].data == tp_func->data) {
++ old[i].func = tp_stub_func;
++ /* Set the prio to the next event. */
++ if (old[i + 1].func)
++ old[i].prio =
++ old[i + 1].prio;
++ else
++ old[i].prio = -1;
++ }
++ *funcs = old;
++ }
+ }
+ debug_print_probes(*funcs);
+ return old;
+@@ -271,10 +317,12 @@ static int tracepoint_remove_func(struct tracepoint *tp,
+ tp_funcs = rcu_dereference_protected(tp->funcs,
+ lockdep_is_held(&tracepoints_mutex));
+ old = func_remove(&tp_funcs, func);
+- if (IS_ERR(old)) {
+- WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
++ if (WARN_ON_ONCE(IS_ERR(old)))
+ return PTR_ERR(old);
+- }
++
++ if (tp_funcs == old)
++ /* Failed allocating new tp_funcs, replaced func with stub */
++ return 0;
+
+ if (!tp_funcs) {
+ /* Removed last function */
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 88c3f6bad1aba..d686887856fee 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -1630,6 +1630,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
+ unsigned long pfn = cc->migrate_pfn;
+ unsigned long high_pfn;
+ int order;
++ bool found_block = false;
+
+ /* Skip hints are relied on to avoid repeats on the fast search */
+ if (cc->ignore_skip_hint)
+@@ -1672,7 +1673,7 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
+ high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);
+
+ for (order = cc->order - 1;
+- order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit;
++ order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit;
+ order--) {
+ struct free_area *area = &cc->zone->free_area[order];
+ struct list_head *freelist;
+@@ -1687,7 +1688,11 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
+ list_for_each_entry(freepage, freelist, lru) {
+ unsigned long free_pfn;
+
+- nr_scanned++;
++ if (nr_scanned++ >= limit) {
++ move_freelist_tail(freelist, freepage);
++ break;
++ }
++
+ free_pfn = page_to_pfn(freepage);
+ if (free_pfn < high_pfn) {
+ /*
+@@ -1696,12 +1701,8 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
+ * the list assumes an entry is deleted, not
+ * reordered.
+ */
+- if (get_pageblock_skip(freepage)) {
+- if (list_is_last(freelist, &freepage->lru))
+- break;
+-
++ if (get_pageblock_skip(freepage))
+ continue;
+- }
+
+ /* Reorder to so a future search skips recent pages */
+ move_freelist_tail(freelist, freepage);
+@@ -1709,15 +1710,10 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
+ update_fast_start_pfn(cc, free_pfn);
+ pfn = pageblock_start_pfn(free_pfn);
+ cc->fast_search_fail = 0;
++ found_block = true;
+ set_pageblock_skip(freepage);
+ break;
+ }
+-
+- if (nr_scanned >= limit) {
+- cc->fast_search_fail++;
+- move_freelist_tail(freelist, freepage);
+- break;
+- }
+ }
+ spin_unlock_irqrestore(&cc->zone->lock, flags);
+ }
+@@ -1728,9 +1724,10 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc)
+ * If fast scanning failed then use a cached entry for a page block
+ * that had free pages as the basis for starting a linear scan.
+ */
+- if (pfn == cc->migrate_pfn)
++ if (!found_block) {
++ cc->fast_search_fail++;
+ pfn = reinit_migrate_pfn(cc);
+-
++ }
+ return pfn;
+ }
+
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index d5b03b9262d4f..2cd4c7f43dcd9 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1192,14 +1192,16 @@ static inline void destroy_compound_gigantic_page(struct page *page,
+ static void update_and_free_page(struct hstate *h, struct page *page)
+ {
+ int i;
++ struct page *subpage = page;
+
+ if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
+ return;
+
+ h->nr_huge_pages--;
+ h->nr_huge_pages_node[page_to_nid(page)]--;
+- for (i = 0; i < pages_per_huge_page(h); i++) {
+- page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
++ for (i = 0; i < pages_per_huge_page(h);
++ i++, subpage = mem_map_next(subpage, page, i)) {
++ subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
+ 1 << PG_referenced | 1 << PG_dirty |
+ 1 << PG_active | 1 << PG_private |
+ 1 << PG_writeback);
+@@ -2812,8 +2814,10 @@ static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
+ return -ENOMEM;
+
+ retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
+- if (retval)
++ if (retval) {
+ kobject_put(hstate_kobjs[hi]);
++ hstate_kobjs[hi] = NULL;
++ }
+
+ return retval;
+ }
+diff --git a/mm/memory.c b/mm/memory.c
+index b23831132933a..c432e7c764451 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -1804,11 +1804,11 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ unsigned long pfn, pgprot_t prot)
+ {
+- pte_t *pte;
++ pte_t *pte, *mapped_pte;
+ spinlock_t *ptl;
+ int err = 0;
+
+- pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
++ mapped_pte = pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
+ if (!pte)
+ return -ENOMEM;
+ arch_enter_lazy_mmu_mode();
+@@ -1822,7 +1822,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
+ pfn++;
+ } while (pte++, addr += PAGE_SIZE, addr != end);
+ arch_leave_lazy_mmu_mode();
+- pte_unmap_unlock(pte - 1, ptl);
++ pte_unmap_unlock(mapped_pte, ptl);
+ return err;
+ }
+
+@@ -4718,17 +4718,19 @@ long copy_huge_page_from_user(struct page *dst_page,
+ void *page_kaddr;
+ unsigned long i, rc = 0;
+ unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
++ struct page *subpage = dst_page;
+
+- for (i = 0; i < pages_per_huge_page; i++) {
++ for (i = 0; i < pages_per_huge_page;
++ i++, subpage = mem_map_next(subpage, dst_page, i)) {
+ if (allow_pagefault)
+- page_kaddr = kmap(dst_page + i);
++ page_kaddr = kmap(subpage);
+ else
+- page_kaddr = kmap_atomic(dst_page + i);
++ page_kaddr = kmap_atomic(subpage);
+ rc = copy_from_user(page_kaddr,
+ (const void __user *)(src + i * PAGE_SIZE),
+ PAGE_SIZE);
+ if (allow_pagefault)
+- kunmap(dst_page + i);
++ kunmap(subpage);
+ else
+ kunmap_atomic(page_kaddr);
+
+diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c
+index da7fd7c8c2dc0..463bad58478b2 100644
+--- a/net/bluetooth/a2mp.c
++++ b/net/bluetooth/a2mp.c
+@@ -381,9 +381,9 @@ static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ hdev = hci_dev_get(req->id);
+ if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) {
+ struct a2mp_amp_assoc_rsp rsp;
+- rsp.id = req->id;
+
+ memset(&rsp, 0, sizeof(rsp));
++ rsp.id = req->id;
+
+ if (tmp) {
+ rsp.status = A2MP_STATUS_COLLISION_OCCURED;
+@@ -512,6 +512,7 @@ static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb,
+ assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL);
+ if (!assoc) {
+ amp_ctrl_put(ctrl);
++ hci_dev_put(hdev);
+ return -ENOMEM;
+ }
+
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 9e19d5a3aac87..83b324419ad3d 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -1317,8 +1317,10 @@ int hci_inquiry(void __user *arg)
+ * cleared). If it is interrupted by a signal, return -EINTR.
+ */
+ if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
+- TASK_INTERRUPTIBLE))
+- return -EINTR;
++ TASK_INTERRUPTIBLE)) {
++ err = -EINTR;
++ goto done;
++ }
+ }
+
+ /* for unlimited number of responses we will use buffer with
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 2fa10fdcf6b1d..524f3364f8a05 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4880,6 +4880,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
+ {
+ struct net *net = dev_net(skb->dev);
+ int rc = -EAFNOSUPPORT;
++ bool check_mtu = false;
+
+ if (plen < sizeof(*params))
+ return -EINVAL;
+@@ -4887,22 +4888,28 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
+ if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
+ return -EINVAL;
+
++ if (params->tot_len)
++ check_mtu = true;
++
+ switch (params->family) {
+ #if IS_ENABLED(CONFIG_INET)
+ case AF_INET:
+- rc = bpf_ipv4_fib_lookup(net, params, flags, false);
++ rc = bpf_ipv4_fib_lookup(net, params, flags, check_mtu);
+ break;
+ #endif
+ #if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+- rc = bpf_ipv6_fib_lookup(net, params, flags, false);
++ rc = bpf_ipv6_fib_lookup(net, params, flags, check_mtu);
+ break;
+ #endif
+ }
+
+- if (!rc) {
++ if (rc == BPF_FIB_LKUP_RET_SUCCESS && !check_mtu) {
+ struct net_device *dev;
+
++ /* When tot_len isn't provided by user, check skb
++ * against MTU of FIB lookup resulting net_device
++ */
+ dev = dev_get_by_index_rcu(net, params->ifindex);
+ if (!is_skb_forwardable(dev, skb))
+ rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
+diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
+index d00533aea1f05..dd8fae89be723 100644
+--- a/net/ipv4/icmp.c
++++ b/net/ipv4/icmp.c
+@@ -750,6 +750,40 @@ out:;
+ }
+ EXPORT_SYMBOL(__icmp_send);
+
++#if IS_ENABLED(CONFIG_NF_NAT)
++#include <net/netfilter/nf_conntrack.h>
++void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
++{
++ struct sk_buff *cloned_skb = NULL;
++ struct ip_options opts = { 0 };
++ enum ip_conntrack_info ctinfo;
++ struct nf_conn *ct;
++ __be32 orig_ip;
++
++ ct = nf_ct_get(skb_in, &ctinfo);
++ if (!ct || !(ct->status & IPS_SRC_NAT)) {
++ __icmp_send(skb_in, type, code, info, &opts);
++ return;
++ }
++
++ if (skb_shared(skb_in))
++ skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC);
++
++ if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head ||
++ (skb_network_header(skb_in) + sizeof(struct iphdr)) >
++ skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in,
++ skb_network_offset(skb_in) + sizeof(struct iphdr))))
++ goto out;
++
++ orig_ip = ip_hdr(skb_in)->saddr;
++ ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip;
++ __icmp_send(skb_in, type, code, info, &opts);
++ ip_hdr(skb_in)->saddr = orig_ip;
++out:
++ consume_skb(cloned_skb);
++}
++EXPORT_SYMBOL(icmp_ndo_send);
++#endif
+
+ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
+ {
+diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
+index e9bb89131e02a..3db10cae7b178 100644
+--- a/net/ipv6/icmp.c
++++ b/net/ipv6/icmp.c
+@@ -312,10 +312,9 @@ static int icmpv6_getfrag(void *from, char *to, int offset, int len, int odd, st
+ }
+
+ #if IS_ENABLED(CONFIG_IPV6_MIP6)
+-static void mip6_addr_swap(struct sk_buff *skb)
++static void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt)
+ {
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+- struct inet6_skb_parm *opt = IP6CB(skb);
+ struct ipv6_destopt_hao *hao;
+ struct in6_addr tmp;
+ int off;
+@@ -332,7 +331,7 @@ static void mip6_addr_swap(struct sk_buff *skb)
+ }
+ }
+ #else
+-static inline void mip6_addr_swap(struct sk_buff *skb) {}
++static inline void mip6_addr_swap(struct sk_buff *skb, const struct inet6_skb_parm *opt) {}
+ #endif
+
+ static struct dst_entry *icmpv6_route_lookup(struct net *net,
+@@ -426,8 +425,9 @@ static int icmp6_iif(const struct sk_buff *skb)
+ /*
+ * Send an ICMP message in response to a packet in error
+ */
+-static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+- const struct in6_addr *force_saddr)
++void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
++ const struct in6_addr *force_saddr,
++ const struct inet6_skb_parm *parm)
+ {
+ struct inet6_dev *idev = NULL;
+ struct ipv6hdr *hdr = ipv6_hdr(skb);
+@@ -520,7 +520,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
+ if (!(skb->dev->flags & IFF_LOOPBACK) && !icmpv6_global_allow(net, type))
+ goto out_bh_enable;
+
+- mip6_addr_swap(skb);
++ mip6_addr_swap(skb, parm);
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_ICMPV6;
+@@ -600,12 +600,13 @@ out:
+ out_bh_enable:
+ local_bh_enable();
+ }
++EXPORT_SYMBOL(icmp6_send);
+
+ /* Slightly more convenient version of icmp6_send.
+ */
+ void icmpv6_param_prob(struct sk_buff *skb, u8 code, int pos)
+ {
+- icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL);
++ icmp6_send(skb, ICMPV6_PARAMPROB, code, pos, NULL, IP6CB(skb));
+ kfree_skb(skb);
+ }
+
+@@ -662,10 +663,10 @@ int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
+ }
+ if (type == ICMP_TIME_EXCEEDED)
+ icmp6_send(skb2, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
+- info, &temp_saddr);
++ info, &temp_saddr, IP6CB(skb2));
+ else
+ icmp6_send(skb2, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH,
+- info, &temp_saddr);
++ info, &temp_saddr, IP6CB(skb2));
+ if (rt)
+ ip6_rt_put(rt);
+
+diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c
+index 02045494c24cc..9e3574880cb03 100644
+--- a/net/ipv6/ip6_icmp.c
++++ b/net/ipv6/ip6_icmp.c
+@@ -9,6 +9,8 @@
+
+ #if IS_ENABLED(CONFIG_IPV6)
+
++#if !IS_BUILTIN(CONFIG_IPV6)
++
+ static ip6_icmp_send_t __rcu *ip6_icmp_send;
+
+ int inet6_register_icmp_sender(ip6_icmp_send_t *fn)
+@@ -31,18 +33,52 @@ int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn)
+ }
+ EXPORT_SYMBOL(inet6_unregister_icmp_sender);
+
+-void icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
++void __icmpv6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info,
++ const struct inet6_skb_parm *parm)
+ {
+ ip6_icmp_send_t *send;
+
+ rcu_read_lock();
+ send = rcu_dereference(ip6_icmp_send);
++ if (send)
++ send(skb, type, code, info, NULL, parm);
++ rcu_read_unlock();
++}
++EXPORT_SYMBOL(__icmpv6_send);
++#endif
++
++#if IS_ENABLED(CONFIG_NF_NAT)
++#include <net/netfilter/nf_conntrack.h>
++void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
++{
++ struct inet6_skb_parm parm = { 0 };
++ struct sk_buff *cloned_skb = NULL;
++ enum ip_conntrack_info ctinfo;
++ struct in6_addr orig_ip;
++ struct nf_conn *ct;
+
+- if (!send)
++ ct = nf_ct_get(skb_in, &ctinfo);
++ if (!ct || !(ct->status & IPS_SRC_NAT)) {
++ __icmpv6_send(skb_in, type, code, info, &parm);
++ return;
++ }
++
++ if (skb_shared(skb_in))
++ skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC);
++
++ if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head ||
++ (skb_network_header(skb_in) + sizeof(struct ipv6hdr)) >
++ skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in,
++ skb_network_offset(skb_in) + sizeof(struct ipv6hdr))))
+ goto out;
+- send(skb, type, code, info, NULL);
++
++ orig_ip = ipv6_hdr(skb_in)->saddr;
++ ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6;
++ __icmpv6_send(skb_in, type, code, info, &parm);
++ ipv6_hdr(skb_in)->saddr = orig_ip;
+ out:
+- rcu_read_unlock();
++ consume_skb(cloned_skb);
+ }
+-EXPORT_SYMBOL(icmpv6_send);
++EXPORT_SYMBOL(icmpv6_ndo_send);
++#endif
+ #endif
+diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
+index aa5150929996d..b5b728a71ab53 100644
+--- a/net/mac80211/mesh_hwmp.c
++++ b/net/mac80211/mesh_hwmp.c
+@@ -356,7 +356,7 @@ u32 airtime_link_metric_get(struct ieee80211_local *local,
+ */
+ tx_time = (device_constant + 10 * test_frame_len / rate);
+ estimated_retx = ((1 << (2 * ARITH_SHIFT)) / (s_unit - err));
+- result = (tx_time * estimated_retx) >> (2 * ARITH_SHIFT);
++ result = ((u64)tx_time * estimated_retx) >> (2 * ARITH_SHIFT);
+ return (u32)result;
+ }
+
+diff --git a/net/qrtr/tun.c b/net/qrtr/tun.c
+index 997af345ce374..cb425e216d461 100644
+--- a/net/qrtr/tun.c
++++ b/net/qrtr/tun.c
+@@ -31,6 +31,7 @@ static int qrtr_tun_send(struct qrtr_endpoint *ep, struct sk_buff *skb)
+ static int qrtr_tun_open(struct inode *inode, struct file *filp)
+ {
+ struct qrtr_tun *tun;
++ int ret;
+
+ tun = kzalloc(sizeof(*tun), GFP_KERNEL);
+ if (!tun)
+@@ -43,7 +44,16 @@ static int qrtr_tun_open(struct inode *inode, struct file *filp)
+
+ filp->private_data = tun;
+
+- return qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO);
++ ret = qrtr_endpoint_register(&tun->ep, QRTR_EP_NID_AUTO);
++ if (ret)
++ goto out;
++
++ return 0;
++
++out:
++ filp->private_data = NULL;
++ kfree(tun);
++ return ret;
+ }
+
+ static ssize_t qrtr_tun_read_iter(struct kiocb *iocb, struct iov_iter *to)
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index 1dc642b11443c..43c10a85e8813 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -823,7 +823,7 @@ static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
+ [TCA_ACT_OPTIONS] = { .type = NLA_NESTED },
+ };
+
+-static void tcf_idr_insert_many(struct tc_action *actions[])
++void tcf_idr_insert_many(struct tc_action *actions[])
+ {
+ int i;
+
+diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
+index d7604417367d3..83e5a8aa2fb11 100644
+--- a/net/sched/cls_api.c
++++ b/net/sched/cls_api.c
+@@ -3026,6 +3026,7 @@ int tcf_exts_validate(struct net *net, struct tcf_proto *tp, struct nlattr **tb,
+ act->type = exts->type = TCA_OLD_COMPAT;
+ exts->actions[0] = act;
+ exts->nr_actions = 1;
++ tcf_idr_insert_many(exts->actions);
+ } else if (exts->action && tb[exts->action]) {
+ int err;
+
+diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
+index 00af31d3e7744..01c65f96d2832 100644
+--- a/net/xfrm/xfrm_interface.c
++++ b/net/xfrm/xfrm_interface.c
+@@ -300,10 +300,10 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
+ if (mtu < IPV6_MIN_MTU)
+ mtu = IPV6_MIN_MTU;
+
+- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
++ icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ } else {
+- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+- htonl(mtu));
++ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
++ htonl(mtu));
+ }
+
+ dst_release(dst);
+diff --git a/security/commoncap.c b/security/commoncap.c
+index 28a6939bcc4e5..ed89a6dd4f83d 100644
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -500,7 +500,8 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size)
+ __u32 magic, nsmagic;
+ struct inode *inode = d_backing_inode(dentry);
+ struct user_namespace *task_ns = current_user_ns(),
+- *fs_ns = inode->i_sb->s_user_ns;
++ *fs_ns = inode->i_sb->s_user_ns,
++ *ancestor;
+ kuid_t rootid;
+ size_t newsize;
+
+@@ -523,6 +524,15 @@ int cap_convert_nscap(struct dentry *dentry, void **ivalue, size_t size)
+ if (nsrootid == -1)
+ return -EINVAL;
+
++ /*
++ * Do not allow allow adding a v3 filesystem capability xattr
++ * if the rootid field is ambiguous.
++ */
++ for (ancestor = task_ns->parent; ancestor; ancestor = ancestor->parent) {
++ if (from_kuid(ancestor, rootid) == 0)
++ return -EINVAL;
++ }
++
+ newsize = sizeof(struct vfs_ns_cap_data);
+ nscap = kmalloc(newsize, GFP_ATOMIC);
+ if (!nscap)
+diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
+index ee6bd945f3d6a..25dac691491b1 100644
+--- a/security/integrity/evm/evm_crypto.c
++++ b/security/integrity/evm/evm_crypto.c
+@@ -75,7 +75,7 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo)
+ {
+ long rc;
+ const char *algo;
+- struct crypto_shash **tfm, *tmp_tfm;
++ struct crypto_shash **tfm, *tmp_tfm = NULL;
+ struct shash_desc *desc;
+
+ if (type == EVM_XATTR_HMAC) {
+@@ -120,13 +120,16 @@ unlock:
+ alloc:
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),
+ GFP_KERNEL);
+- if (!desc)
++ if (!desc) {
++ crypto_free_shash(tmp_tfm);
+ return ERR_PTR(-ENOMEM);
++ }
+
+ desc->tfm = *tfm;
+
+ rc = crypto_shash_init(desc);
+ if (rc) {
++ crypto_free_shash(tmp_tfm);
+ kfree(desc);
+ return ERR_PTR(rc);
+ }
+diff --git a/security/integrity/ima/ima_kexec.c b/security/integrity/ima/ima_kexec.c
+index 9e94eca48b898..955e4b4d09e21 100644
+--- a/security/integrity/ima/ima_kexec.c
++++ b/security/integrity/ima/ima_kexec.c
+@@ -120,6 +120,7 @@ void ima_add_kexec_buffer(struct kimage *image)
+ ret = kexec_add_buffer(&kbuf);
+ if (ret) {
+ pr_err("Error passing over kexec measurement buffer.\n");
++ vfree(kexec_buffer);
+ return;
+ }
+
+@@ -129,6 +130,8 @@ void ima_add_kexec_buffer(struct kimage *image)
+ return;
+ }
+
++ image->ima_buffer = kexec_buffer;
++
+ pr_debug("kexec measurement buffer for the loaded kernel at 0x%lx.\n",
+ kbuf.mem);
+ }
+diff --git a/security/integrity/ima/ima_mok.c b/security/integrity/ima/ima_mok.c
+index 36cadadbfba47..1e5c019161738 100644
+--- a/security/integrity/ima/ima_mok.c
++++ b/security/integrity/ima/ima_mok.c
+@@ -38,13 +38,12 @@ __init int ima_mok_init(void)
+ (KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ |
+ KEY_USR_WRITE | KEY_USR_SEARCH,
+- KEY_ALLOC_NOT_IN_QUOTA,
++ KEY_ALLOC_NOT_IN_QUOTA |
++ KEY_ALLOC_SET_KEEP,
+ restriction, NULL);
+
+ if (IS_ERR(ima_blacklist_keyring))
+ panic("Can't allocate IMA blacklist keyring.");
+-
+- set_bit(KEY_FLAG_KEEP, &ima_blacklist_keyring->flags);
+ return 0;
+ }
+ device_initcall(ima_mok_init);
+diff --git a/security/keys/key.c b/security/keys/key.c
+index e9845d0d8d349..623fcb4094dd4 100644
+--- a/security/keys/key.c
++++ b/security/keys/key.c
+@@ -302,6 +302,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
+ key->flags |= 1 << KEY_FLAG_BUILTIN;
+ if (flags & KEY_ALLOC_UID_KEYRING)
+ key->flags |= 1 << KEY_FLAG_UID_KEYRING;
++ if (flags & KEY_ALLOC_SET_KEEP)
++ key->flags |= 1 << KEY_FLAG_KEEP;
+
+ #ifdef KEY_DEBUGGING
+ key->magic = KEY_DEBUG_MAGIC;
+diff --git a/security/keys/trusted.c b/security/keys/trusted.c
+index 36afc29aecc3b..92a14ab82f72f 100644
+--- a/security/keys/trusted.c
++++ b/security/keys/trusted.c
+@@ -805,7 +805,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
+ case Opt_migratable:
+ if (*args[0].from == '0')
+ pay->migratable = 0;
+- else
++ else if (*args[0].from != '1')
+ return -EINVAL;
+ break;
+ case Opt_pcrlock:
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index b3667a5efdc1f..7f9f6bbca5489 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -2447,6 +2447,8 @@ static const struct pci_device_id azx_ids[] = {
+ /* CometLake-H */
+ { PCI_DEVICE(0x8086, 0x06C8),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
++ { PCI_DEVICE(0x8086, 0xf1c8),
++ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+ /* CometLake-S */
+ { PCI_DEVICE(0x8086, 0xa3f0),
+ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f548bd48bf729..a132fe4537a55 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1895,6 +1895,7 @@ enum {
+ ALC889_FIXUP_FRONT_HP_NO_PRESENCE,
+ ALC889_FIXUP_VAIO_TT,
+ ALC888_FIXUP_EEE1601,
++ ALC886_FIXUP_EAPD,
+ ALC882_FIXUP_EAPD,
+ ALC883_FIXUP_EAPD,
+ ALC883_FIXUP_ACER_EAPD,
+@@ -2228,6 +2229,15 @@ static const struct hda_fixup alc882_fixups[] = {
+ { }
+ }
+ },
++ [ALC886_FIXUP_EAPD] = {
++ .type = HDA_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ /* change to EAPD mode */
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0068 },
++ { }
++ }
++ },
+ [ALC882_FIXUP_EAPD] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+@@ -2500,6 +2510,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
+
+ SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
++ SND_PCI_QUIRK(0x13fe, 0x1009, "Advantech MIT-W101", ALC886_FIXUP_EAPD),
+ SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
+diff --git a/sound/soc/codecs/cpcap.c b/sound/soc/codecs/cpcap.c
+index d7f05b384f1fb..1902689c5ea2c 100644
+--- a/sound/soc/codecs/cpcap.c
++++ b/sound/soc/codecs/cpcap.c
+@@ -1263,12 +1263,12 @@ static int cpcap_voice_hw_params(struct snd_pcm_substream *substream,
+
+ if (direction == SNDRV_PCM_STREAM_CAPTURE) {
+ mask = 0x0000;
+- mask |= CPCAP_BIT_MIC1_RX_TIMESLOT0;
+- mask |= CPCAP_BIT_MIC1_RX_TIMESLOT1;
+- mask |= CPCAP_BIT_MIC1_RX_TIMESLOT2;
+- mask |= CPCAP_BIT_MIC2_TIMESLOT0;
+- mask |= CPCAP_BIT_MIC2_TIMESLOT1;
+- mask |= CPCAP_BIT_MIC2_TIMESLOT2;
++ mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT0);
++ mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT1);
++ mask |= BIT(CPCAP_BIT_MIC1_RX_TIMESLOT2);
++ mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT0);
++ mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT1);
++ mask |= BIT(CPCAP_BIT_MIC2_TIMESLOT2);
+ val = 0x0000;
+ if (channels >= 2)
+ val = BIT(CPCAP_BIT_MIC1_RX_TIMESLOT0);
+diff --git a/sound/soc/codecs/cs42l56.c b/sound/soc/codecs/cs42l56.c
+index ac569ab3d30f4..51d7a87ab4c3b 100644
+--- a/sound/soc/codecs/cs42l56.c
++++ b/sound/soc/codecs/cs42l56.c
+@@ -1248,6 +1248,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
+ dev_err(&i2c_client->dev,
+ "CS42L56 Device ID (%X). Expected %X\n",
+ devid, CS42L56_DEVID);
++ ret = -EINVAL;
+ goto err_enable;
+ }
+ alpha_rev = reg & CS42L56_AREV_MASK;
+@@ -1305,7 +1306,7 @@ static int cs42l56_i2c_probe(struct i2c_client *i2c_client,
+ ret = devm_snd_soc_register_component(&i2c_client->dev,
+ &soc_component_dev_cs42l56, &cs42l56_dai, 1);
+ if (ret < 0)
+- return ret;
++ goto err_enable;
+
+ return 0;
+
+diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
+index 9b794775df537..edad6721251f4 100644
+--- a/sound/soc/generic/simple-card-utils.c
++++ b/sound/soc/generic/simple-card-utils.c
+@@ -172,16 +172,15 @@ int asoc_simple_parse_clk(struct device *dev,
+ * or device's module clock.
+ */
+ clk = devm_get_clk_from_child(dev, node, NULL);
+- if (!IS_ERR(clk)) {
+- simple_dai->sysclk = clk_get_rate(clk);
++ if (IS_ERR(clk))
++ clk = devm_get_clk_from_child(dev, dlc->of_node, NULL);
+
++ if (!IS_ERR(clk)) {
+ simple_dai->clk = clk;
+- } else if (!of_property_read_u32(node, "system-clock-frequency", &val)) {
++ simple_dai->sysclk = clk_get_rate(clk);
++ } else if (!of_property_read_u32(node, "system-clock-frequency",
++ &val)) {
+ simple_dai->sysclk = val;
+- } else {
+- clk = devm_get_clk_from_child(dev, dlc->of_node, NULL);
+- if (!IS_ERR(clk))
+- simple_dai->sysclk = clk_get_rate(clk);
+ }
+
+ if (of_property_read_bool(node, "system-clock-direction-out"))
+diff --git a/sound/soc/sof/debug.c b/sound/soc/sof/debug.c
+index 5529e8eeca462..08726034ff090 100644
+--- a/sound/soc/sof/debug.c
++++ b/sound/soc/sof/debug.c
+@@ -135,7 +135,7 @@ static ssize_t sof_dfsentry_write(struct file *file, const char __user *buffer,
+ char *string;
+ int ret;
+
+- string = kzalloc(count, GFP_KERNEL);
++ string = kzalloc(count+1, GFP_KERNEL);
+ if (!string)
+ return -ENOMEM;
+
+diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
+index 1a5e555002b2b..75218539fb107 100644
+--- a/sound/usb/pcm.c
++++ b/sound/usb/pcm.c
+@@ -1885,7 +1885,7 @@ void snd_usb_preallocate_buffer(struct snd_usb_substream *subs)
+ {
+ struct snd_pcm *pcm = subs->stream->pcm;
+ struct snd_pcm_substream *s = pcm->streams[subs->direction].substream;
+- struct device *dev = subs->dev->bus->controller;
++ struct device *dev = subs->dev->bus->sysdev;
+
+ if (!snd_usb_use_vmalloc)
+ snd_pcm_lib_preallocate_pages(s, SNDRV_DMA_TYPE_DEV_SG,
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 1b7e748170e54..06aaf04e629c2 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -626,8 +626,8 @@ static int add_jump_destinations(struct objtool_file *file)
+ * case where the parent function's only reference to a
+ * subfunction is through a jump table.
+ */
+- if (!strstr(insn->func->name, ".cold.") &&
+- strstr(insn->jump_dest->func->name, ".cold.")) {
++ if (!strstr(insn->func->name, ".cold") &&
++ strstr(insn->jump_dest->func->name, ".cold")) {
+ insn->func->cfunc = insn->jump_dest->func;
+ insn->jump_dest->func->pfunc = insn->func;
+
+@@ -2192,15 +2192,19 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
+ break;
+
+ case INSN_STD:
+- if (state.df)
++ if (state.df) {
+ WARN_FUNC("recursive STD", sec, insn->offset);
++ return 1;
++ }
+
+ state.df = true;
+ break;
+
+ case INSN_CLD:
+- if (!state.df && func)
++ if (!state.df && func) {
+ WARN_FUNC("redundant CLD", sec, insn->offset);
++ return 1;
++ }
+
+ state.df = false;
+ break;
+diff --git a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
+index df9201434cb6a..b0a10a219b50d 100644
+--- a/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
++++ b/tools/perf/pmu-events/arch/arm64/ampere/emag/cache.json
+@@ -114,7 +114,7 @@
+ "PublicDescription": "Level 2 access to instruciton TLB that caused a page table walk. This event counts on any instruciton access which causes L2I_TLB_REFILL to count",
+ "EventCode": "0x35",
+ "EventName": "L2I_TLB_ACCESS",
+- "BriefDescription": "L2D TLB access"
++ "BriefDescription": "L2I TLB access"
+ },
+ {
+ "PublicDescription": "Branch target buffer misprediction",
+diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
+index 3a02426db9a63..2f76d4a9de860 100644
+--- a/tools/perf/tests/sample-parsing.c
++++ b/tools/perf/tests/sample-parsing.c
+@@ -180,7 +180,7 @@ static int do_test(u64 sample_type, u64 sample_regs, u64 read_format)
+ .data = {1, 211, 212, 213},
+ };
+ u64 regs[64];
+- const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
++ const u32 raw_data[] = {0x12345678, 0x0a0b0c0d, 0x11020304, 0x05060708, 0 };
+ const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
+ struct perf_sample sample = {
+ .ip = 101,
+diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
+index fc1e5a991008d..bfaa9afdb8b4c 100644
+--- a/tools/perf/util/event.c
++++ b/tools/perf/util/event.c
+@@ -597,6 +597,8 @@ int machine__resolve(struct machine *machine, struct addr_location *al,
+ }
+
+ al->sym = map__find_symbol(al->map, al->addr);
++ } else if (symbol_conf.dso_list) {
++ al->filtered |= (1 << HIST_FILTER__DSO);
+ }
+
+ if (symbol_conf.sym_list &&
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+index 7ffcbd6fcd1ae..7f53b63088b2c 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -1745,6 +1745,9 @@ static int intel_pt_walk_psbend(struct intel_pt_decoder *decoder)
+ break;
+
+ case INTEL_PT_CYC:
++ intel_pt_calc_cyc_timestamp(decoder);
++ break;
++
+ case INTEL_PT_VMCS:
+ case INTEL_PT_MNT:
+ case INTEL_PT_PAD:
+@@ -2634,9 +2637,18 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
+ }
+ if (intel_pt_sample_time(decoder->pkt_state)) {
+ intel_pt_update_sample_time(decoder);
+- if (decoder->sample_cyc)
++ if (decoder->sample_cyc) {
+ decoder->sample_tot_cyc_cnt = decoder->tot_cyc_cnt;
++ decoder->state.flags |= INTEL_PT_SAMPLE_IPC;
++ decoder->sample_cyc = false;
++ }
+ }
++ /*
++ * When using only TSC/MTC to compute cycles, IPC can be
++ * sampled as soon as the cycle count changes.
++ */
++ if (!decoder->have_cyc)
++ decoder->state.flags |= INTEL_PT_SAMPLE_IPC;
+ }
+
+ decoder->state.timestamp = decoder->sample_timestamp;
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+index e289e463d635e..7396da0fa3a7c 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+@@ -17,6 +17,7 @@
+ #define INTEL_PT_ABORT_TX (1 << 1)
+ #define INTEL_PT_ASYNC (1 << 2)
+ #define INTEL_PT_FUP_IP (1 << 3)
++#define INTEL_PT_SAMPLE_IPC (1 << 4)
+
+ enum intel_pt_sample_type {
+ INTEL_PT_BRANCH = 1 << 0,
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index 8aeaeba48a41f..d0e0ce11faf58 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -1304,7 +1304,8 @@ static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
+ sample.branch_stack = (struct branch_stack *)&dummy_bs;
+ }
+
+- sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
++ if (ptq->state->flags & INTEL_PT_SAMPLE_IPC)
++ sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
+ if (sample.cyc_cnt) {
+ sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
+ ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
+@@ -1366,7 +1367,8 @@ static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
+ sample.stream_id = ptq->pt->instructions_id;
+ sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
+
+- sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
++ if (ptq->state->flags & INTEL_PT_SAMPLE_IPC)
++ sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
+ if (sample.cyc_cnt) {
+ sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
+ ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
+@@ -1901,14 +1903,8 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
+
+ ptq->have_sample = false;
+
+- if (ptq->state->tot_cyc_cnt > ptq->ipc_cyc_cnt) {
+- /*
+- * Cycle count and instruction count only go together to create
+- * a valid IPC ratio when the cycle count changes.
+- */
+- ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
+- ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
+- }
++ ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
++ ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
+
+ /*
+ * Do PEBS first to allow for the possibility that the PEBS timestamp
+diff --git a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
+index cf001a2c69420..7c2cb04569dab 100755
+--- a/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
++++ b/tools/testing/selftests/powerpc/eeh/eeh-basic.sh
+@@ -81,5 +81,5 @@ echo "$failed devices failed to recover ($dev_count tested)"
+ lspci | diff -u $pre_lspci -
+ rm -f $pre_lspci
+
+-test "$failed" == 0
++test "$failed" -eq 0
+ exit $?