summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2023-08-23 14:08:43 -0400
committerMike Pagano <mpagano@gentoo.org>2023-08-23 14:08:43 -0400
commitebb4944c0658d818673b757eea7c32d00ad5523e (patch)
treeb081b19fc1415675c8e2357cec7fa268c83c46ef
parentRemove redundant patch (diff)
downloadlinux-patches-ebb4944c0658d818673b757eea7c32d00ad5523e.tar.gz
linux-patches-ebb4944c0658d818673b757eea7c32d00ad5523e.tar.bz2
linux-patches-ebb4944c0658d818673b757eea7c32d00ad5523e.zip
Linux patch 6.1.476.1-53
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1046_linux-6.1.47.patch8047
2 files changed, 8051 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index c1bcb313..0ad2cca2 100644
--- a/0000_README
+++ b/0000_README
@@ -227,6 +227,10 @@ Patch: 1045_linux-6.1.46.patch
From: https://www.kernel.org
Desc: Linux 6.1.46
+Patch: 1046_linux-6.1.47.patch
+From: https://www.kernel.org
+Desc: Linux 6.1.47
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1046_linux-6.1.47.patch b/1046_linux-6.1.47.patch
new file mode 100644
index 00000000..ab31fe98
--- /dev/null
+++ b/1046_linux-6.1.47.patch
@@ -0,0 +1,8047 @@
+diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
+index 286be425f3bfa..882b6198dd0d1 100644
+--- a/Documentation/admin-guide/kernel-parameters.txt
++++ b/Documentation/admin-guide/kernel-parameters.txt
+@@ -323,6 +323,7 @@
+ option with care.
+ pgtbl_v1 - Use v1 page table for DMA-API (Default).
+ pgtbl_v2 - Use v2 page table for DMA-API.
++ irtcachedis - Disable Interrupt Remapping Table (IRT) caching.
+
+ amd_iommu_dump= [HW,X86-64]
+ Enable AMD IOMMU driver option to dump the ACPI table
+diff --git a/Makefile b/Makefile
+index bdb965177db52..375efcfb91f8f 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 1
+-SUBLEVEL = 46
++SUBLEVEL = 47
+ EXTRAVERSION =
+ NAME = Curry Ramen
+
+diff --git a/arch/arm/boot/dts/imx50-kobo-aura.dts b/arch/arm/boot/dts/imx50-kobo-aura.dts
+index 51bf6117fb124..467db6b4ed7f8 100644
+--- a/arch/arm/boot/dts/imx50-kobo-aura.dts
++++ b/arch/arm/boot/dts/imx50-kobo-aura.dts
+@@ -26,7 +26,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_leds>;
+
+- on {
++ led-on {
+ label = "kobo_aura:orange:on";
+ gpios = <&gpio6 24 GPIO_ACTIVE_LOW>;
+ panic-indicator;
+diff --git a/arch/arm/boot/dts/imx53-cx9020.dts b/arch/arm/boot/dts/imx53-cx9020.dts
+index cfb18849a92b4..055d23a9aee7c 100644
+--- a/arch/arm/boot/dts/imx53-cx9020.dts
++++ b/arch/arm/boot/dts/imx53-cx9020.dts
+@@ -86,27 +86,27 @@
+ leds {
+ compatible = "gpio-leds";
+
+- pwr-r {
++ led-pwr-r {
+ gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+- pwr-g {
++ led-pwr-g {
+ gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+
+- pwr-b {
++ led-pwr-b {
+ gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+- sd1-b {
++ led-sd1-b {
+ linux,default-trigger = "mmc0";
+ gpios = <&gpio3 20 GPIO_ACTIVE_HIGH>;
+ };
+
+- sd2-b {
++ led-sd2-b {
+ linux,default-trigger = "mmc1";
+ gpios = <&gpio3 17 GPIO_ACTIVE_HIGH>;
+ };
+diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts
+index a1a6228d1aa66..2bd2432d317ff 100644
+--- a/arch/arm/boot/dts/imx53-m53evk.dts
++++ b/arch/arm/boot/dts/imx53-m53evk.dts
+@@ -52,13 +52,13 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&led_pin_gpio>;
+
+- user1 {
++ led-user1 {
+ label = "user1";
+ gpios = <&gpio2 8 0>;
+ linux,default-trigger = "heartbeat";
+ };
+
+- user2 {
++ led-user2 {
+ label = "user2";
+ gpios = <&gpio2 9 0>;
+ linux,default-trigger = "heartbeat";
+diff --git a/arch/arm/boot/dts/imx53-m53menlo.dts b/arch/arm/boot/dts/imx53-m53menlo.dts
+index d5c68d1ea707c..4d77b6077fc1b 100644
+--- a/arch/arm/boot/dts/imx53-m53menlo.dts
++++ b/arch/arm/boot/dts/imx53-m53menlo.dts
+@@ -34,19 +34,19 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_led>;
+
+- user1 {
++ led-user1 {
+ label = "TestLed601";
+ gpios = <&gpio6 1 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+ };
+
+- user2 {
++ led-user2 {
+ label = "TestLed602";
+ gpios = <&gpio6 2 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+
+- eth {
++ led-eth {
+ label = "EthLedYe";
+ gpios = <&gpio2 11 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "netdev";
+diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi
+index 892dd1a4bac35..a439a47fb65ac 100644
+--- a/arch/arm/boot/dts/imx53-tx53.dtsi
++++ b/arch/arm/boot/dts/imx53-tx53.dtsi
+@@ -94,7 +94,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_stk5led>;
+
+- user {
++ led-user {
+ label = "Heartbeat";
+ gpios = <&gpio2 20 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+diff --git a/arch/arm/boot/dts/imx53-usbarmory.dts b/arch/arm/boot/dts/imx53-usbarmory.dts
+index f34993a490ee8..acc44010d5106 100644
+--- a/arch/arm/boot/dts/imx53-usbarmory.dts
++++ b/arch/arm/boot/dts/imx53-usbarmory.dts
+@@ -67,7 +67,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_led>;
+
+- user {
++ led-user {
+ label = "LED";
+ gpios = <&gpio4 27 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "heartbeat";
+diff --git a/arch/arm/boot/dts/imx6dl-b1x5pv2.dtsi b/arch/arm/boot/dts/imx6dl-b1x5pv2.dtsi
+index 337db29b0010a..37697fac9dea9 100644
+--- a/arch/arm/boot/dts/imx6dl-b1x5pv2.dtsi
++++ b/arch/arm/boot/dts/imx6dl-b1x5pv2.dtsi
+@@ -211,17 +211,17 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_q7_gpio1 &pinctrl_q7_gpio3 &pinctrl_q7_gpio5>;
+
+- alarm1 {
++ led-alarm1 {
+ label = "alarm:red";
+ gpios = <&gpio1 8 GPIO_ACTIVE_HIGH>;
+ };
+
+- alarm2 {
++ led-alarm2 {
+ label = "alarm:yellow";
+ gpios = <&gpio4 27 GPIO_ACTIVE_HIGH>;
+ };
+
+- alarm3 {
++ led-alarm3 {
+ label = "alarm:blue";
+ gpios = <&gpio4 15 GPIO_ACTIVE_HIGH>;
+ };
+diff --git a/arch/arm/boot/dts/imx6dl-prtrvt.dts b/arch/arm/boot/dts/imx6dl-prtrvt.dts
+index 56bb1ca56a2df..36b031236e475 100644
+--- a/arch/arm/boot/dts/imx6dl-prtrvt.dts
++++ b/arch/arm/boot/dts/imx6dl-prtrvt.dts
+@@ -124,6 +124,10 @@
+ status = "disabled";
+ };
+
++&usbotg {
++ disable-over-current;
++};
++
+ &vpu {
+ status = "disabled";
+ };
+diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts
+index e7be05f205d32..24c7f535f63bd 100644
+--- a/arch/arm/boot/dts/imx6dl-riotboard.dts
++++ b/arch/arm/boot/dts/imx6dl-riotboard.dts
+@@ -25,14 +25,14 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_led>;
+
+- led0: user1 {
++ led0: led-user1 {
+ label = "user1";
+ gpios = <&gpio5 2 GPIO_ACTIVE_LOW>;
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+- led1: user2 {
++ led1: led-user2 {
+ label = "user2";
+ gpios = <&gpio3 28 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+index 52162e8c7274b..aacbf317feea6 100644
+--- a/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
++++ b/arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
+@@ -274,7 +274,7 @@
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+- chan@0 {
++ led@0 {
+ chan-name = "R";
+ led-cur = /bits/ 8 <0x20>;
+ max-cur = /bits/ 8 <0x60>;
+@@ -282,7 +282,7 @@
+ color = <LED_COLOR_ID_RED>;
+ };
+
+- chan@1 {
++ led@1 {
+ chan-name = "G";
+ led-cur = /bits/ 8 <0x20>;
+ max-cur = /bits/ 8 <0x60>;
+@@ -290,7 +290,7 @@
+ color = <LED_COLOR_ID_GREEN>;
+ };
+
+- chan@2 {
++ led@2 {
+ chan-name = "B";
+ led-cur = /bits/ 8 <0x20>;
+ max-cur = /bits/ 8 <0x60>;
+diff --git a/arch/arm/boot/dts/imx6q-gw5400-a.dts b/arch/arm/boot/dts/imx6q-gw5400-a.dts
+index e894faba571f9..522a51042965a 100644
+--- a/arch/arm/boot/dts/imx6q-gw5400-a.dts
++++ b/arch/arm/boot/dts/imx6q-gw5400-a.dts
+@@ -34,20 +34,20 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- led0: user1 {
++ led0: led-user1 {
+ label = "user1";
+ gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* 102 -> MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+- led1: user2 {
++ led1: led-user2 {
+ label = "user2";
+ gpios = <&gpio4 10 GPIO_ACTIVE_HIGH>; /* 106 -> MX6_PANLEDR */
+ default-state = "off";
+ };
+
+- led2: user3 {
++ led2: led-user3 {
+ label = "user3";
+ gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* 111 -> MX6_LOCLED# */
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6q-h100.dts b/arch/arm/boot/dts/imx6q-h100.dts
+index b8feadbff967d..6406ade14f57b 100644
+--- a/arch/arm/boot/dts/imx6q-h100.dts
++++ b/arch/arm/boot/dts/imx6q-h100.dts
+@@ -76,19 +76,19 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_h100_leds>;
+
+- led0: power {
++ led0: led-power {
+ label = "power";
+ gpios = <&gpio3 0 GPIO_ACTIVE_LOW>;
+ default-state = "on";
+ };
+
+- led1: stream {
++ led1: led-stream {
+ label = "stream";
+ gpios = <&gpio2 29 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+- led2: rec {
++ led2: led-rec {
+ label = "rec";
+ gpios = <&gpio2 28 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6q-kp.dtsi b/arch/arm/boot/dts/imx6q-kp.dtsi
+index 1ade0bff681d6..5e0ed55600405 100644
+--- a/arch/arm/boot/dts/imx6q-kp.dtsi
++++ b/arch/arm/boot/dts/imx6q-kp.dtsi
+@@ -66,14 +66,14 @@
+ leds {
+ compatible = "gpio-leds";
+
+- green {
++ led-green {
+ label = "led1";
+ gpios = <&gpio3 16 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "gpio";
+ default-state = "off";
+ };
+
+- red {
++ led-red {
+ label = "led0";
+ gpios = <&gpio3 23 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "gpio";
+diff --git a/arch/arm/boot/dts/imx6q-marsboard.dts b/arch/arm/boot/dts/imx6q-marsboard.dts
+index cc18010023942..2c9961333b0a8 100644
+--- a/arch/arm/boot/dts/imx6q-marsboard.dts
++++ b/arch/arm/boot/dts/imx6q-marsboard.dts
+@@ -73,14 +73,14 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_led>;
+
+- user1 {
++ led-user1 {
+ label = "imx6:green:user1";
+ gpios = <&gpio5 2 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ linux,default-trigger = "heartbeat";
+ };
+
+- user2 {
++ led-user2 {
+ label = "imx6:green:user2";
+ gpios = <&gpio3 28 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts
+index 8daef65d5bb35..2f576e2ce73f2 100644
+--- a/arch/arm/boot/dts/imx6q-tbs2910.dts
++++ b/arch/arm/boot/dts/imx6q-tbs2910.dts
+@@ -49,7 +49,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- blue {
++ led-blue {
+ label = "blue_status_led";
+ gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
+ default-state = "keep";
+diff --git a/arch/arm/boot/dts/imx6qdl-emcon.dtsi b/arch/arm/boot/dts/imx6qdl-emcon.dtsi
+index 7228b894a763f..ee2dd75cead6d 100644
+--- a/arch/arm/boot/dts/imx6qdl-emcon.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-emcon.dtsi
+@@ -46,14 +46,14 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_som_leds>;
+
+- green {
++ led-green {
+ label = "som:green";
+ gpios = <&gpio3 0 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ default-state = "on";
+ };
+
+- red {
++ led-red {
+ label = "som:red";
+ gpios = <&gpio3 1 GPIO_ACTIVE_LOW>;
+ default-state = "keep";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
+index 069c27fab432c..e75e1a5364b85 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
+@@ -71,14 +71,14 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- led0: user1 {
++ led0: led-user1 {
+ label = "user1";
+ gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+- led1: user2 {
++ led1: led-user2 {
+ label = "user2";
+ gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+index 728810b9d677d..47d9a8d08197d 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+@@ -80,20 +80,20 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- led0: user1 {
++ led0: led-user1 {
+ label = "user1";
+ gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+- led1: user2 {
++ led1: led-user2 {
+ label = "user2";
+ gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ default-state = "off";
+ };
+
+- led2: user3 {
++ led2: led-user3 {
+ label = "user3";
+ gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+index 6c0c109046d80..fb1d29abe0991 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+@@ -80,20 +80,20 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- led0: user1 {
++ led0: led-user1 {
+ label = "user1";
+ gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+- led1: user2 {
++ led1: led-user2 {
+ label = "user2";
+ gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ default-state = "off";
+ };
+
+- led2: user3 {
++ led2: led-user3 {
+ label = "user3";
+ gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+index a9b04f9f1c2bc..4e20cb97058eb 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+@@ -81,20 +81,20 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- led0: user1 {
++ led0: led-user1 {
+ label = "user1";
+ gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+- led1: user2 {
++ led1: led-user2 {
+ label = "user2";
+ gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ default-state = "off";
+ };
+
+- led2: user3 {
++ led2: led-user3 {
+ label = "user3";
+ gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
+index 435dec6338fe6..0fa4b8eeddee7 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw551x.dtsi
+@@ -115,7 +115,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- led0: user1 {
++ led0: led-user1 {
+ label = "user1";
+ gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
+ default-state = "on";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw552x.dtsi b/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
+index 2e61102ae6946..77ae611b817a4 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw552x.dtsi
+@@ -72,20 +72,20 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- led0: user1 {
++ led0: led-user1 {
+ label = "user1";
+ gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+- led1: user2 {
++ led1: led-user2 {
+ label = "user2";
+ gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ default-state = "off";
+ };
+
+- led2: user3 {
++ led2: led-user3 {
+ label = "user3";
+ gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw553x.dtsi b/arch/arm/boot/dts/imx6qdl-gw553x.dtsi
+index 4662408b225a5..7f16c602cc075 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw553x.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw553x.dtsi
+@@ -113,14 +113,14 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- led0: user1 {
++ led0: led-user1 {
+ label = "user1";
+ gpios = <&gpio4 10 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+- led1: user2 {
++ led1: led-user2 {
+ label = "user2";
+ gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
+index 4b81a975c979d..46cf4080fec38 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw560x.dtsi
+@@ -139,20 +139,20 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- led0: user1 {
++ led0: led-user1 {
+ label = "user1";
+ gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+- led1: user2 {
++ led1: led-user2 {
+ label = "user2";
+ gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ default-state = "off";
+ };
+
+- led2: user3 {
++ led2: led-user3 {
+ label = "user3";
+ gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw5903.dtsi b/arch/arm/boot/dts/imx6qdl-gw5903.dtsi
+index 1fdb7ba630f1b..a74cde0501589 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw5903.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw5903.dtsi
+@@ -123,7 +123,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- led0: user1 {
++ led0: led-user1 {
+ label = "user1";
+ gpios = <&gpio6 14 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw5904.dtsi b/arch/arm/boot/dts/imx6qdl-gw5904.dtsi
+index 612b6e068e282..9fc79af2bc9aa 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw5904.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw5904.dtsi
+@@ -120,20 +120,20 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- led0: user1 {
++ led0: led-user1 {
+ label = "user1";
+ gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+- led1: user2 {
++ led1: led-user2 {
+ label = "user2";
+ gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ default-state = "off";
+ };
+
+- led2: user3 {
++ led2: led-user3 {
+ label = "user3";
+ gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw5907.dtsi b/arch/arm/boot/dts/imx6qdl-gw5907.dtsi
+index fcd3bdfd61827..955a51226eda7 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw5907.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw5907.dtsi
+@@ -71,14 +71,14 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- led0: user1 {
++ led0: led-user1 {
+ label = "user1";
+ gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+- led1: user2 {
++ led1: led-user2 {
+ label = "user2";
+ gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
+index 6bb4855d13ce5..218d6e667ed24 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw5910.dtsi
+@@ -74,20 +74,20 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- led0: user1 {
++ led0: led-user1 {
+ label = "user1";
+ gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+- led1: user2 {
++ led1: led-user2 {
+ label = "user2";
+ gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ default-state = "off";
+ };
+
+- led2: user3 {
++ led2: led-user3 {
+ label = "user3";
+ gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw5912.dtsi b/arch/arm/boot/dts/imx6qdl-gw5912.dtsi
+index 0415bcb416400..40e235e315cc4 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw5912.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw5912.dtsi
+@@ -72,20 +72,20 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- led0: user1 {
++ led0: led-user1 {
+ label = "user1";
+ gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+- led1: user2 {
++ led1: led-user2 {
+ label = "user2";
+ gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ default-state = "off";
+ };
+
+- led2: user3 {
++ led2: led-user3 {
+ label = "user3";
+ gpios = <&gpio4 15 GPIO_ACTIVE_LOW>; /* MX6_LOCLED# */
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi
+index 696427b487f01..82f47c295b085 100644
+--- a/arch/arm/boot/dts/imx6qdl-gw5913.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-gw5913.dtsi
+@@ -71,14 +71,14 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- led0: user1 {
++ led0: led-user1 {
+ label = "user1";
+ gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDG */
+ default-state = "on";
+ linux,default-trigger = "heartbeat";
+ };
+
+- led1: user2 {
++ led1: led-user2 {
+ label = "user2";
+ gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; /* MX6_PANLEDR */
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
+index a53a5d0766a51..6d4eab1942b94 100644
+--- a/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-nit6xlite.dtsi
+@@ -85,31 +85,31 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_leds>;
+
+- j14-pin1 {
++ led-j14-pin1 {
+ gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
+ retain-state-suspended;
+ default-state = "off";
+ };
+
+- j14-pin3 {
++ led-j14-pin3 {
+ gpios = <&gpio1 3 GPIO_ACTIVE_LOW>;
+ retain-state-suspended;
+ default-state = "off";
+ };
+
+- j14-pins8-9 {
++ led-j14-pins8-9 {
+ gpios = <&gpio3 29 GPIO_ACTIVE_LOW>;
+ retain-state-suspended;
+ default-state = "off";
+ };
+
+- j46-pin2 {
++ led-j46-pin2 {
+ gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
+ retain-state-suspended;
+ default-state = "off";
+ };
+
+- j46-pin3 {
++ led-j46-pin3 {
+ gpios = <&gpio1 8 GPIO_ACTIVE_LOW>;
+ retain-state-suspended;
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+index 57c21a01f126d..81a9a302aec1b 100644
+--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+@@ -181,13 +181,13 @@
+ leds {
+ compatible = "gpio-leds";
+
+- speaker-enable {
++ led-speaker-enable {
+ gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
+ retain-state-suspended;
+ default-state = "off";
+ };
+
+- ttymxc4-rs232 {
++ led-ttymxc4-rs232 {
+ gpios = <&gpio6 10 GPIO_ACTIVE_HIGH>;
+ retain-state-suspended;
+ default-state = "on";
+diff --git a/arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi
+index 120d6e997a4c5..1ca4d219609f6 100644
+--- a/arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-phytec-mira.dtsi
+@@ -25,17 +25,17 @@
+ pinctrl-0 = <&pinctrl_gpioleds>;
+ status = "disabled";
+
+- red {
++ led-red {
+ label = "phyboard-mira:red";
+ gpios = <&gpio5 22 GPIO_ACTIVE_HIGH>;
+ };
+
+- green {
++ led-green {
+ label = "phyboard-mira:green";
+ gpios = <&gpio5 23 GPIO_ACTIVE_HIGH>;
+ };
+
+- blue {
++ led-blue {
+ label = "phyboard-mira:blue";
+ gpios = <&gpio5 24 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "mmc0";
+@@ -182,7 +182,7 @@
+ pinctrl-0 = <&pinctrl_rtc_int>;
+ reg = <0x68>;
+ interrupt-parent = <&gpio7>;
+- interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
++ interrupts = <8 IRQ_TYPE_LEVEL_LOW>;
+ status = "disabled";
+ };
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+index 768bc0e3a2b38..80adb2a02cc94 100644
+--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+@@ -47,12 +47,12 @@
+ pinctrl-0 = <&pinctrl_leds>;
+ compatible = "gpio-leds";
+
+- led_green: green {
++ led_green: led-green {
+ label = "phyflex:green";
+ gpios = <&gpio1 30 0>;
+ };
+
+- led_red: red {
++ led_red: led-red {
+ label = "phyflex:red";
+ gpios = <&gpio2 31 0>;
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-prti6q.dtsi b/arch/arm/boot/dts/imx6qdl-prti6q.dtsi
+index f0db0d4471f40..36f84f4da6b0d 100644
+--- a/arch/arm/boot/dts/imx6qdl-prti6q.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-prti6q.dtsi
+@@ -69,6 +69,7 @@
+ vbus-supply = <&reg_usb_h1_vbus>;
+ phy_type = "utmi";
+ dr_mode = "host";
++ disable-over-current;
+ status = "okay";
+ };
+
+@@ -78,10 +79,18 @@
+ pinctrl-0 = <&pinctrl_usbotg>;
+ phy_type = "utmi";
+ dr_mode = "host";
+- disable-over-current;
++ over-current-active-low;
+ status = "okay";
+ };
+
++&usbphynop1 {
++ status = "disabled";
++};
++
++&usbphynop2 {
++ status = "disabled";
++};
++
+ &usdhc1 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usdhc1>;
+diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
+index de514eb5aa99d..f804ff95a6ad6 100644
+--- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
+@@ -55,7 +55,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_led>;
+
+- led0: usr {
++ led0: led-usr {
+ label = "usr";
+ gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+index 3dbb460ef102e..10886a1461bfb 100644
+--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+@@ -21,7 +21,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- user {
++ led-user {
+ label = "debug";
+ gpios = <&gpio5 15 GPIO_ACTIVE_HIGH>;
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+index 37482a9023fce..bcb83d52e26ed 100644
+--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+@@ -130,7 +130,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_gpio_leds>;
+
+- red {
++ led-red {
+ gpios = <&gpio1 2 0>;
+ default-state = "on";
+ };
+diff --git a/arch/arm/boot/dts/imx6qdl-ts7970.dtsi b/arch/arm/boot/dts/imx6qdl-ts7970.dtsi
+index c096d25a6f5b5..1e0a041e9f60a 100644
+--- a/arch/arm/boot/dts/imx6qdl-ts7970.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-ts7970.dtsi
+@@ -73,13 +73,13 @@
+ default-state = "off";
+ };
+
+- en-usb-5v {
++ en-usb-5v-led {
+ label = "en-usb-5v";
+ gpios = <&gpio2 22 GPIO_ACTIVE_HIGH>;
+ default-state = "on";
+ };
+
+- sel_dc_usb {
++ sel-dc-usb-led {
+ label = "sel_dc_usb";
+ gpios = <&gpio5 17 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+diff --git a/arch/arm/boot/dts/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
+index f41f86a76ea95..a197bac95cbac 100644
+--- a/arch/arm/boot/dts/imx6qdl-tx6.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
+@@ -92,7 +92,7 @@
+ leds {
+ compatible = "gpio-leds";
+
+- user_led: user {
++ user_led: led-user {
+ label = "Heartbeat";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_user_led>;
+diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts
+index f16c830f1e918..dc5d596c18db4 100644
+--- a/arch/arm/boot/dts/imx6sl-evk.dts
++++ b/arch/arm/boot/dts/imx6sl-evk.dts
+@@ -33,7 +33,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_led>;
+
+- user {
++ led-user {
+ label = "debug";
+ gpios = <&gpio3 20 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+diff --git a/arch/arm/boot/dts/imx6sll-evk.dts b/arch/arm/boot/dts/imx6sll-evk.dts
+index 32b3d82fec53c..269092ac881c5 100644
+--- a/arch/arm/boot/dts/imx6sll-evk.dts
++++ b/arch/arm/boot/dts/imx6sll-evk.dts
+@@ -37,7 +37,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_led>;
+
+- user {
++ led-user {
+ label = "debug";
+ gpios = <&gpio2 4 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi
+index 2873369a57c02..3659fd5ecfa62 100644
+--- a/arch/arm/boot/dts/imx6sll.dtsi
++++ b/arch/arm/boot/dts/imx6sll.dtsi
+@@ -552,7 +552,7 @@
+ reg = <0x020ca000 0x1000>;
+ interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&clks IMX6SLL_CLK_USBPHY2>;
+- phy-reg_3p0-supply = <&reg_3p0>;
++ phy-3p0-supply = <&reg_3p0>;
+ fsl,anatop = <&anatop>;
+ };
+
+diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
+index 83ee97252ff11..b0c27b9b02446 100644
+--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
++++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
+@@ -20,7 +20,7 @@
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_led>;
+
+- user {
++ led-user {
+ label = "debug";
+ gpios = <&gpio1 24 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
+index c84ea1fac5e98..725d0b5cb55f6 100644
+--- a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
++++ b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi
+@@ -15,14 +15,14 @@
+ leds {
+ compatible = "gpio-leds";
+
+- red {
++ led-red {
+ label = "udoo-neo:red:mmc";
+ gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ linux,default-trigger = "mmc0";
+ };
+
+- orange {
++ led-orange {
+ label = "udoo-neo:orange:user";
+ gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>;
+ default-state = "keep";
+diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
+index abc3572d699e6..1f1053a898fbf 100644
+--- a/arch/arm/boot/dts/imx6sx.dtsi
++++ b/arch/arm/boot/dts/imx6sx.dtsi
+@@ -981,6 +981,8 @@
+ <&clks IMX6SX_CLK_USDHC1>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
++ fsl,tuning-start-tap = <20>;
++ fsl,tuning-step= <2>;
+ status = "disabled";
+ };
+
+@@ -993,6 +995,8 @@
+ <&clks IMX6SX_CLK_USDHC2>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
++ fsl,tuning-start-tap = <20>;
++ fsl,tuning-step= <2>;
+ status = "disabled";
+ };
+
+@@ -1005,6 +1009,8 @@
+ <&clks IMX6SX_CLK_USDHC3>;
+ clock-names = "ipg", "ahb", "per";
+ bus-width = <4>;
++ fsl,tuning-start-tap = <20>;
++ fsl,tuning-step= <2>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/imx6ul-phytec-phycore-som.dtsi b/arch/arm/boot/dts/imx6ul-phytec-phycore-som.dtsi
+index 3cddc68917a08..e4d2652a75c0b 100644
+--- a/arch/arm/boot/dts/imx6ul-phytec-phycore-som.dtsi
++++ b/arch/arm/boot/dts/imx6ul-phytec-phycore-som.dtsi
+@@ -30,7 +30,7 @@
+ pinctrl-0 = <&pinctrl_gpioleds_som>;
+ compatible = "gpio-leds";
+
+- phycore-green {
++ led-phycore-green {
+ gpios = <&gpio5 4 GPIO_ACTIVE_HIGH>;
+ linux,default-trigger = "heartbeat";
+ };
+diff --git a/arch/arm/boot/dts/imx6ul-tx6ul.dtsi b/arch/arm/boot/dts/imx6ul-tx6ul.dtsi
+index 15ee0275feaff..70cef5e817bd1 100644
+--- a/arch/arm/boot/dts/imx6ul-tx6ul.dtsi
++++ b/arch/arm/boot/dts/imx6ul-tx6ul.dtsi
+@@ -131,7 +131,7 @@
+ leds {
+ compatible = "gpio-leds";
+
+- user_led: user {
++ user_led: led-user {
+ label = "Heartbeat";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_led>;
+diff --git a/arch/arm64/boot/dts/freescale/imx8mm.dtsi b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+index 420ba0d6f1343..12c82bb1bb7aa 100644
+--- a/arch/arm64/boot/dts/freescale/imx8mm.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx8mm.dtsi
+@@ -1145,10 +1145,9 @@
+ compatible = "fsl,imx8mm-mipi-csi2";
+ reg = <0x32e30000 0x1000>;
+ interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
+- assigned-clocks = <&clk IMX8MM_CLK_CSI1_CORE>,
+- <&clk IMX8MM_CLK_CSI1_PHY_REF>;
+- assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_1000M>,
+- <&clk IMX8MM_SYS_PLL2_1000M>;
++ assigned-clocks = <&clk IMX8MM_CLK_CSI1_CORE>;
++ assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_1000M>;
++
+ clock-frequency = <333000000>;
+ clocks = <&clk IMX8MM_CLK_DISP_APB_ROOT>,
+ <&clk IMX8MM_CLK_CSI1_ROOT>,
+diff --git a/arch/arm64/boot/dts/freescale/imx93.dtsi b/arch/arm64/boot/dts/freescale/imx93.dtsi
+index 8ab9f8194702e..c2f60d41d6fd1 100644
+--- a/arch/arm64/boot/dts/freescale/imx93.dtsi
++++ b/arch/arm64/boot/dts/freescale/imx93.dtsi
+@@ -254,7 +254,7 @@
+
+ anatop: anatop@44480000 {
+ compatible = "fsl,imx93-anatop", "syscon";
+- reg = <0x44480000 0x10000>;
++ reg = <0x44480000 0x2000>;
+ };
+ };
+
+diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+index bf8077a1cf9a7..9731a7c63d53b 100644
+--- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
++++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts
+@@ -121,7 +121,7 @@
+ };
+ };
+
+- pm8150l-thermal {
++ pm8150l-pcb-thermal {
+ polling-delay-passive = <0>;
+ polling-delay = <0>;
+ thermal-sensors = <&pm8150l_adc_tm 1>;
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
+index f9884902f8745..c3f53aa1ea4ac 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-4c-plus.dts
+@@ -548,9 +548,8 @@
+ &sdhci {
+ max-frequency = <150000000>;
+ bus-width = <8>;
+- mmc-hs400-1_8v;
++ mmc-hs200-1_8v;
+ non-removable;
+- mmc-hs400-enhanced-strobe;
+ status = "okay";
+ };
+
+diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+index 1f76d3501bda3..9bdc0b93001f4 100644
+--- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
++++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi
+@@ -45,7 +45,7 @@
+ sdio_pwrseq: sdio-pwrseq {
+ compatible = "mmc-pwrseq-simple";
+ clocks = <&rk808 1>;
+- clock-names = "ext_clock";
++ clock-names = "lpo";
+ pinctrl-names = "default";
+ pinctrl-0 = <&wifi_enable_h>;
+ reset-gpios = <&gpio0 RK_PB2 GPIO_ACTIVE_LOW>;
+@@ -645,9 +645,9 @@
+ };
+
+ &sdhci {
++ max-frequency = <150000000>;
+ bus-width = <8>;
+- mmc-hs400-1_8v;
+- mmc-hs400-enhanced-strobe;
++ mmc-hs200-1_8v;
+ non-removable;
+ status = "okay";
+ };
+diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h
+index 6f86b7ab6c28f..d720b6f7e5f9c 100644
+--- a/arch/arm64/include/asm/fpsimd.h
++++ b/arch/arm64/include/asm/fpsimd.h
+@@ -339,7 +339,7 @@ static inline int sme_max_virtualisable_vl(void)
+ return vec_max_virtualisable_vl(ARM64_VEC_SME);
+ }
+
+-extern void sme_alloc(struct task_struct *task);
++extern void sme_alloc(struct task_struct *task, bool flush);
+ extern unsigned int sme_get_vl(void);
+ extern int sme_set_current_vl(unsigned long arg);
+ extern int sme_get_current_vl(void);
+@@ -365,7 +365,7 @@ static inline void sme_smstart_sm(void) { }
+ static inline void sme_smstop_sm(void) { }
+ static inline void sme_smstop(void) { }
+
+-static inline void sme_alloc(struct task_struct *task) { }
++static inline void sme_alloc(struct task_struct *task, bool flush) { }
+ static inline void sme_setup(void) { }
+ static inline unsigned int sme_get_vl(void) { return 0; }
+ static inline int sme_max_vl(void) { return 0; }
+diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
+index b5a8e8b3c691c..577cf444c1135 100644
+--- a/arch/arm64/include/asm/kvm_host.h
++++ b/arch/arm64/include/asm/kvm_host.h
+@@ -559,6 +559,8 @@ struct kvm_vcpu_arch {
+ #define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4))
+ /* Software step state is Active-pending */
+ #define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5))
++/* WFI instruction trapped */
++#define IN_WFI __vcpu_single_flag(sflags, BIT(7))
+
+
+ /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
+diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
+index 356036babd093..8cd59d387b90b 100644
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -1239,9 +1239,9 @@ void fpsimd_release_task(struct task_struct *dead_task)
+ * the interest of testability and predictability, the architecture
+ * guarantees that when ZA is enabled it will be zeroed.
+ */
+-void sme_alloc(struct task_struct *task)
++void sme_alloc(struct task_struct *task, bool flush)
+ {
+- if (task->thread.za_state) {
++ if (task->thread.za_state && flush) {
+ memset(task->thread.za_state, 0, za_state_size(task));
+ return;
+ }
+@@ -1460,7 +1460,7 @@ void do_sme_acc(unsigned long esr, struct pt_regs *regs)
+ }
+
+ sve_alloc(current, false);
+- sme_alloc(current);
++ sme_alloc(current, true);
+ if (!current->thread.sve_state || !current->thread.za_state) {
+ force_sig(SIGKILL);
+ return;
+diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
+index f19f020ccff96..f606c942f514e 100644
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -886,6 +886,13 @@ static int sve_set_common(struct task_struct *target,
+ break;
+ case ARM64_VEC_SME:
+ target->thread.svcr |= SVCR_SM_MASK;
++
++ /*
++ * Disable traps and ensure there is SME storage but
++ * preserve any currently set values in ZA/ZT.
++ */
++ sme_alloc(target, false);
++ set_tsk_thread_flag(target, TIF_SME);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+@@ -1107,7 +1114,7 @@ static int za_set(struct task_struct *target,
+ }
+
+ /* Allocate/reinit ZA storage */
+- sme_alloc(target);
++ sme_alloc(target, true);
+ if (!target->thread.za_state) {
+ ret = -ENOMEM;
+ goto out;
+diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
+index 43adbfa5ead78..82f4572c8ddfc 100644
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -430,7 +430,7 @@ static int restore_za_context(struct user_ctxs *user)
+ fpsimd_flush_task_state(current);
+ /* From now, fpsimd_thread_switch() won't touch thread.sve_state */
+
+- sme_alloc(current);
++ sme_alloc(current, true);
+ if (!current->thread.za_state) {
+ current->thread.svcr &= ~SVCR_ZA_MASK;
+ clear_thread_flag(TIF_SME);
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 35481d51aada8..6cc380a15eb76 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -692,13 +692,15 @@ void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
+ */
+ preempt_disable();
+ kvm_vgic_vmcr_sync(vcpu);
+- vgic_v4_put(vcpu, true);
++ vcpu_set_flag(vcpu, IN_WFI);
++ vgic_v4_put(vcpu);
+ preempt_enable();
+
+ kvm_vcpu_halt(vcpu);
+ vcpu_clear_flag(vcpu, IN_WFIT);
+
+ preempt_disable();
++ vcpu_clear_flag(vcpu, IN_WFI);
+ vgic_v4_load(vcpu);
+ preempt_enable();
+ }
+@@ -766,7 +768,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
+ if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
+ /* The distributor enable bits were changed */
+ preempt_disable();
+- vgic_v4_put(vcpu, false);
++ vgic_v4_put(vcpu);
+ vgic_v4_load(vcpu);
+ preempt_enable();
+ }
+diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
+index f86c3007a319c..1f8eea53e982f 100644
+--- a/arch/arm64/kvm/vgic/vgic-v3.c
++++ b/arch/arm64/kvm/vgic/vgic-v3.c
+@@ -742,7 +742,7 @@ void vgic_v3_put(struct kvm_vcpu *vcpu)
+ {
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+
+- WARN_ON(vgic_v4_put(vcpu, false));
++ WARN_ON(vgic_v4_put(vcpu));
+
+ vgic_v3_vmcr_sync(vcpu);
+
+diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
+index c1c28fe680ba3..339a55194b2c6 100644
+--- a/arch/arm64/kvm/vgic/vgic-v4.c
++++ b/arch/arm64/kvm/vgic/vgic-v4.c
+@@ -336,14 +336,14 @@ void vgic_v4_teardown(struct kvm *kvm)
+ its_vm->vpes = NULL;
+ }
+
+-int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
++int vgic_v4_put(struct kvm_vcpu *vcpu)
+ {
+ struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+
+ if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
+ return 0;
+
+- return its_make_vpe_non_resident(vpe, need_db);
++ return its_make_vpe_non_resident(vpe, !!vcpu_get_flag(vcpu, IN_WFI));
+ }
+
+ int vgic_v4_load(struct kvm_vcpu *vcpu)
+@@ -354,6 +354,9 @@ int vgic_v4_load(struct kvm_vcpu *vcpu)
+ if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
+ return 0;
+
++ if (vcpu_get_flag(vcpu, IN_WFI))
++ return 0;
++
+ /*
+ * Before making the VPE resident, make sure the redistributor
+ * corresponding to our current CPU expects us here. See the
+diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
+index bc817a5619d64..43c635ddba709 100644
+--- a/arch/powerpc/kernel/rtas_flash.c
++++ b/arch/powerpc/kernel/rtas_flash.c
+@@ -710,9 +710,9 @@ static int __init rtas_flash_init(void)
+ if (!rtas_validate_flash_data.buf)
+ return -ENOMEM;
+
+- flash_block_cache = kmem_cache_create("rtas_flash_cache",
+- RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0,
+- NULL);
++ flash_block_cache = kmem_cache_create_usercopy("rtas_flash_cache",
++ RTAS_BLK_SIZE, RTAS_BLK_SIZE,
++ 0, 0, RTAS_BLK_SIZE, NULL);
+ if (!flash_block_cache) {
+ printk(KERN_ERR "%s: failed to create block cache\n",
+ __func__);
+diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile
+index 699eeffd9f551..f9522fd70b2f3 100644
+--- a/arch/powerpc/mm/kasan/Makefile
++++ b/arch/powerpc/mm/kasan/Makefile
+@@ -1,6 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+
+ KASAN_SANITIZE := n
++KCOV_INSTRUMENT := n
+
+ obj-$(CONFIG_PPC32) += init_32.o
+ obj-$(CONFIG_PPC_8xx) += 8xx.o
+diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
+index ec486e5369d9b..09b47ebacf2e8 100644
+--- a/arch/riscv/lib/uaccess.S
++++ b/arch/riscv/lib/uaccess.S
+@@ -17,8 +17,11 @@ ENTRY(__asm_copy_from_user)
+ li t6, SR_SUM
+ csrs CSR_STATUS, t6
+
+- /* Save for return value */
+- mv t5, a2
++ /*
++ * Save the terminal address which will be used to compute the number
++ * of bytes copied in case of a fixup exception.
++ */
++ add t5, a0, a2
+
+ /*
+ * Register allocation for code below:
+@@ -176,7 +179,7 @@ ENTRY(__asm_copy_from_user)
+ 10:
+ /* Disable access to user memory */
+ csrc CSR_STATUS, t6
+- mv a0, t5
++ sub a0, t5, a0
+ ret
+ ENDPROC(__asm_copy_to_user)
+ ENDPROC(__asm_copy_from_user)
+@@ -228,7 +231,7 @@ ENTRY(__clear_user)
+ 11:
+ /* Disable access to user memory */
+ csrc CSR_STATUS, t6
+- mv a0, a1
++ sub a0, a3, a0
+ ret
+ ENDPROC(__clear_user)
+ EXPORT_SYMBOL(__clear_user)
+diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
+index ad9844c5b40cb..e6468eab2681e 100644
+--- a/block/blk-crypto-fallback.c
++++ b/block/blk-crypto-fallback.c
+@@ -78,7 +78,7 @@ static struct blk_crypto_fallback_keyslot {
+ struct crypto_skcipher *tfms[BLK_ENCRYPTION_MODE_MAX];
+ } *blk_crypto_keyslots;
+
+-static struct blk_crypto_profile blk_crypto_fallback_profile;
++static struct blk_crypto_profile *blk_crypto_fallback_profile;
+ static struct workqueue_struct *blk_crypto_wq;
+ static mempool_t *blk_crypto_bounce_page_pool;
+ static struct bio_set crypto_bio_split;
+@@ -292,7 +292,7 @@ static bool blk_crypto_fallback_encrypt_bio(struct bio **bio_ptr)
+ * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
+ * this bio's algorithm and key.
+ */
+- blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
++ blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
+ bc->bc_key, &slot);
+ if (blk_st != BLK_STS_OK) {
+ src_bio->bi_status = blk_st;
+@@ -395,7 +395,7 @@ static void blk_crypto_fallback_decrypt_bio(struct work_struct *work)
+ * Get a blk-crypto-fallback keyslot that contains a crypto_skcipher for
+ * this bio's algorithm and key.
+ */
+- blk_st = blk_crypto_get_keyslot(&blk_crypto_fallback_profile,
++ blk_st = blk_crypto_get_keyslot(blk_crypto_fallback_profile,
+ bc->bc_key, &slot);
+ if (blk_st != BLK_STS_OK) {
+ bio->bi_status = blk_st;
+@@ -499,7 +499,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
+ return false;
+ }
+
+- if (!__blk_crypto_cfg_supported(&blk_crypto_fallback_profile,
++ if (!__blk_crypto_cfg_supported(blk_crypto_fallback_profile,
+ &bc->bc_key->crypto_cfg)) {
+ bio->bi_status = BLK_STS_NOTSUPP;
+ return false;
+@@ -526,7 +526,7 @@ bool blk_crypto_fallback_bio_prep(struct bio **bio_ptr)
+
+ int blk_crypto_fallback_evict_key(const struct blk_crypto_key *key)
+ {
+- return __blk_crypto_evict_key(&blk_crypto_fallback_profile, key);
++ return __blk_crypto_evict_key(blk_crypto_fallback_profile, key);
+ }
+
+ static bool blk_crypto_fallback_inited;
+@@ -534,7 +534,6 @@ static int blk_crypto_fallback_init(void)
+ {
+ int i;
+ int err;
+- struct blk_crypto_profile *profile = &blk_crypto_fallback_profile;
+
+ if (blk_crypto_fallback_inited)
+ return 0;
+@@ -545,18 +544,27 @@ static int blk_crypto_fallback_init(void)
+ if (err)
+ goto out;
+
+- err = blk_crypto_profile_init(profile, blk_crypto_num_keyslots);
+- if (err)
++ /* Dynamic allocation is needed because of lockdep_register_key(). */
++ blk_crypto_fallback_profile =
++ kzalloc(sizeof(*blk_crypto_fallback_profile), GFP_KERNEL);
++ if (!blk_crypto_fallback_profile) {
++ err = -ENOMEM;
+ goto fail_free_bioset;
++ }
++
++ err = blk_crypto_profile_init(blk_crypto_fallback_profile,
++ blk_crypto_num_keyslots);
++ if (err)
++ goto fail_free_profile;
+ err = -ENOMEM;
+
+- profile->ll_ops = blk_crypto_fallback_ll_ops;
+- profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
++ blk_crypto_fallback_profile->ll_ops = blk_crypto_fallback_ll_ops;
++ blk_crypto_fallback_profile->max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
+
+ /* All blk-crypto modes have a crypto API fallback. */
+ for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
+- profile->modes_supported[i] = 0xFFFFFFFF;
+- profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
++ blk_crypto_fallback_profile->modes_supported[i] = 0xFFFFFFFF;
++ blk_crypto_fallback_profile->modes_supported[BLK_ENCRYPTION_MODE_INVALID] = 0;
+
+ blk_crypto_wq = alloc_workqueue("blk_crypto_wq",
+ WQ_UNBOUND | WQ_HIGHPRI |
+@@ -597,7 +605,9 @@ fail_free_keyslots:
+ fail_free_wq:
+ destroy_workqueue(blk_crypto_wq);
+ fail_destroy_profile:
+- blk_crypto_profile_destroy(profile);
++ blk_crypto_profile_destroy(blk_crypto_fallback_profile);
++fail_free_profile:
++ kfree(blk_crypto_fallback_profile);
+ fail_free_bioset:
+ bioset_exit(&crypto_bio_split);
+ out:
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index faad19b396d50..d6f405763c56f 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -600,6 +600,9 @@ static const struct usb_device_id blacklist_table[] = {
+ { USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
++ { USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK |
++ BTUSB_WIDEBAND_SPEECH |
++ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
+index cae078bffc715..9b7268bae66ab 100644
+--- a/drivers/bus/ti-sysc.c
++++ b/drivers/bus/ti-sysc.c
+@@ -2159,6 +2159,8 @@ static int sysc_reset(struct sysc *ddata)
+ sysc_val = sysc_read_sysconfig(ddata);
+ sysc_val |= sysc_mask;
+ sysc_write(ddata, sysc_offset, sysc_val);
++ /* Flush posted write */
++ sysc_val = sysc_read_sysconfig(ddata);
+ }
+
+ if (ddata->cfg.srst_udelay)
+diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c
+index fe06644725203..f5d4359555d77 100644
+--- a/drivers/cpuidle/cpuidle-psci-domain.c
++++ b/drivers/cpuidle/cpuidle-psci-domain.c
+@@ -117,20 +117,6 @@ static void psci_pd_remove(void)
+ }
+ }
+
+-static bool psci_pd_try_set_osi_mode(void)
+-{
+- int ret;
+-
+- if (!psci_has_osi_support())
+- return false;
+-
+- ret = psci_set_osi_mode(true);
+- if (ret)
+- return false;
+-
+- return true;
+-}
+-
+ static void psci_cpuidle_domain_sync_state(struct device *dev)
+ {
+ /*
+@@ -149,15 +135,12 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
+ {
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *node;
+- bool use_osi;
++ bool use_osi = psci_has_osi_support();
+ int ret = 0, pd_count = 0;
+
+ if (!np)
+ return -ENODEV;
+
+- /* If OSI mode is supported, let's try to enable it. */
+- use_osi = psci_pd_try_set_osi_mode();
+-
+ /*
+ * Parse child nodes for the "#power-domain-cells" property and
+ * initialize a genpd/genpd-of-provider pair when it's found.
+@@ -167,32 +150,37 @@ static int psci_cpuidle_domain_probe(struct platform_device *pdev)
+ continue;
+
+ ret = psci_pd_init(node, use_osi);
+- if (ret)
+- goto put_node;
++ if (ret) {
++ of_node_put(node);
++ goto exit;
++ }
+
+ pd_count++;
+ }
+
+ /* Bail out if not using the hierarchical CPU topology. */
+ if (!pd_count)
+- goto no_pd;
++ return 0;
+
+ /* Link genpd masters/subdomains to model the CPU topology. */
+ ret = dt_idle_pd_init_topology(np);
+ if (ret)
+ goto remove_pd;
+
+- pr_info("Initialized CPU PM domain topology\n");
++ /* let's try to enable OSI. */
++ ret = psci_set_osi_mode(use_osi);
++ if (ret)
++ goto remove_pd;
++
++ pr_info("Initialized CPU PM domain topology using %s mode\n",
++ use_osi ? "OSI" : "PC");
+ return 0;
+
+-put_node:
+- of_node_put(node);
+ remove_pd:
++ dt_idle_pd_remove_topology(np);
+ psci_pd_remove();
++exit:
+ pr_err("failed to create CPU PM domains ret=%d\n", ret);
+-no_pd:
+- if (use_osi)
+- psci_set_osi_mode(false);
+ return ret;
+ }
+
+diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
+index af22be84034bb..a53eacebca339 100644
+--- a/drivers/firewire/net.c
++++ b/drivers/firewire/net.c
+@@ -479,7 +479,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
+ struct sk_buff *skb, u16 source_node_id,
+ bool is_broadcast, u16 ether_type)
+ {
+- int status;
++ int status, len;
+
+ switch (ether_type) {
+ case ETH_P_ARP:
+@@ -533,13 +533,15 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
+ }
+ skb->protocol = protocol;
+ }
++
++ len = skb->len;
+ status = netif_rx(skb);
+ if (status == NET_RX_DROP) {
+ net->stats.rx_errors++;
+ net->stats.rx_dropped++;
+ } else {
+ net->stats.rx_packets++;
+- net->stats.rx_bytes += skb->len;
++ net->stats.rx_bytes += len;
+ }
+
+ return 0;
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+index fdb53d4394f30..02a112d00d413 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+@@ -185,7 +185,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
+ uint64_t *chunk_array_user;
+ uint64_t *chunk_array;
+ uint32_t uf_offset = 0;
+- unsigned int size;
++ size_t size;
+ int ret;
+ int i;
+
+@@ -1607,15 +1607,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev,
+ continue;
+
+ r = dma_fence_wait_timeout(fence, true, timeout);
++ if (r > 0 && fence->error)
++ r = fence->error;
++
+ dma_fence_put(fence);
+ if (r < 0)
+ return r;
+
+ if (r == 0)
+ break;
+-
+- if (fence->error)
+- return fence->error;
+ }
+
+ memset(wait, 0, sizeof(*wait));
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+index 773383e660e8c..e6427a00cf6d6 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+@@ -4232,6 +4232,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
+ drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
+
+ cancel_delayed_work_sync(&adev->delayed_init_work);
++ flush_delayed_work(&adev->gfx.gfx_off_delay_work);
+
+ amdgpu_ras_suspend(adev);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+index ed6878d5b3ce3..418e4c77ceb80 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+@@ -501,6 +501,41 @@ int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
+ return 0;
+ }
+
++/**
++ * amdgpu_fence_need_ring_interrupt_restore - helper function to check whether
++ * fence driver interrupts need to be restored.
++ *
++ * @ring: ring that to be checked
++ *
++ * Interrupts for rings that belong to GFX IP don't need to be restored
++ * when the target power state is s0ix.
++ *
++ * Return true if need to restore interrupts, false otherwise.
++ */
++static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)
++{
++ struct amdgpu_device *adev = ring->adev;
++ bool is_gfx_power_domain = false;
++
++ switch (ring->funcs->type) {
++ case AMDGPU_RING_TYPE_SDMA:
++ /* SDMA 5.x+ is part of GFX power domain so it's covered by GFXOFF */
++ if (adev->ip_versions[SDMA0_HWIP][0] >= IP_VERSION(5, 0, 0))
++ is_gfx_power_domain = true;
++ break;
++ case AMDGPU_RING_TYPE_GFX:
++ case AMDGPU_RING_TYPE_COMPUTE:
++ case AMDGPU_RING_TYPE_KIQ:
++ case AMDGPU_RING_TYPE_MES:
++ is_gfx_power_domain = true;
++ break;
++ default:
++ break;
++ }
++
++ return !(adev->in_s0ix && is_gfx_power_domain);
++}
++
+ /**
+ * amdgpu_fence_driver_hw_fini - tear down the fence driver
+ * for all possible rings.
+@@ -529,7 +564,8 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
+ amdgpu_fence_driver_force_completion(ring);
+
+ if (!drm_dev_is_unplugged(adev_to_drm(adev)) &&
+- ring->fence_drv.irq_src)
++ ring->fence_drv.irq_src &&
++ amdgpu_fence_need_ring_interrupt_restore(ring))
+ amdgpu_irq_put(adev, ring->fence_drv.irq_src,
+ ring->fence_drv.irq_type);
+
+@@ -604,7 +640,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
+ continue;
+
+ /* enable the interrupt */
+- if (ring->fence_drv.irq_src)
++ if (ring->fence_drv.irq_src &&
++ amdgpu_fence_need_ring_interrupt_restore(ring))
+ amdgpu_irq_get(adev, ring->fence_drv.irq_src,
+ ring->fence_drv.irq_type);
+ }
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+index b803e785d3aff..23f0067f92e4e 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+@@ -585,15 +585,8 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
+
+ if (adev->gfx.gfx_off_req_count == 0 &&
+ !adev->gfx.gfx_off_state) {
+- /* If going to s2idle, no need to wait */
+- if (adev->in_s0ix) {
+- if (!amdgpu_dpm_set_powergating_by_smu(adev,
+- AMD_IP_BLOCK_TYPE_GFX, true))
+- adev->gfx.gfx_off_state = true;
+- } else {
+- schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
++ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ delay);
+- }
+ }
+ } else {
+ if (adev->gfx.gfx_off_req_count == 0) {
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+index ca5dc51600fac..9efbc0f7c6bdf 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+@@ -160,7 +160,6 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
+ continue;
+
+ for (k = 0; k < src->num_types; ++k) {
+- atomic_set(&src->enabled_types[k], 0);
+ r = src->funcs->set(adev, src, k,
+ AMDGPU_IRQ_STATE_DISABLE);
+ if (r)
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+index eecbd8eeb1f5a..8764ff7ed97e0 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+@@ -514,6 +514,8 @@ static int psp_sw_fini(void *handle)
+ kfree(cmd);
+ cmd = NULL;
+
++ psp_free_shared_bufs(psp);
++
+ if (psp->km_ring.ring_mem)
+ amdgpu_bo_free_kernel(&adev->firmware.rbuf,
+ &psp->km_ring.ring_mem_mc_addr,
+@@ -2673,8 +2675,6 @@ static int psp_hw_fini(void *handle)
+
+ psp_ring_destroy(psp, PSP_RING_TYPE__KM);
+
+- psp_free_shared_bufs(psp);
+-
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+index d3558c34d406c..296b2d5976af7 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+@@ -361,6 +361,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
+ amdgpu_bo_free_kernel(&ring->ring_obj,
+ &ring->gpu_addr,
+ (void **)&ring->ring);
++ } else {
++ kfree(ring->fence_drv.fences);
+ }
+
+ dma_fence_put(ring->vmid_wait);
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+index ec938a1a50621..4c661e024e13d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+@@ -1352,6 +1352,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+ amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
+
+ bo_va->ref_count = 1;
++ bo_va->last_pt_update = dma_fence_get_stub();
+ INIT_LIST_HEAD(&bo_va->valids);
+ INIT_LIST_HEAD(&bo_va->invalids);
+
+@@ -2073,7 +2074,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ vm->update_funcs = &amdgpu_vm_cpu_funcs;
+ else
+ vm->update_funcs = &amdgpu_vm_sdma_funcs;
+- vm->last_update = NULL;
++
++ vm->last_update = dma_fence_get_stub();
+ vm->last_unlocked = dma_fence_get_stub();
+ vm->last_tlb_flush = dma_fence_get_stub();
+
+@@ -2198,7 +2200,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+ goto unreserve_bo;
+
+ dma_fence_put(vm->last_update);
+- vm->last_update = NULL;
++ vm->last_update = dma_fence_get_stub();
+ vm->is_compute_context = true;
+
+ /* Free the shadow bo for compute VM */
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+index 9f718b98da1f7..249b269e2cc53 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+@@ -7397,27 +7397,55 @@ is_scaling_state_different(const struct dm_connector_state *dm_state,
+ }
+
+ #ifdef CONFIG_DRM_AMD_DC_HDCP
+-static bool is_content_protection_different(struct drm_connector_state *state,
+- const struct drm_connector_state *old_state,
+- const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
++static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state,
++ struct drm_crtc_state *old_crtc_state,
++ struct drm_connector_state *new_conn_state,
++ struct drm_connector_state *old_conn_state,
++ const struct drm_connector *connector,
++ struct hdcp_workqueue *hdcp_w)
+ {
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+ struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
+
+- /* Handle: Type0/1 change */
+- if (old_state->hdcp_content_type != state->hdcp_content_type &&
+- state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+- state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
++ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
++ connector->index, connector->status, connector->dpms);
++ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
++ old_conn_state->content_protection, new_conn_state->content_protection);
++
++ if (old_crtc_state)
++ pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
++ old_crtc_state->enable,
++ old_crtc_state->active,
++ old_crtc_state->mode_changed,
++ old_crtc_state->active_changed,
++ old_crtc_state->connectors_changed);
++
++ if (new_crtc_state)
++ pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
++ new_crtc_state->enable,
++ new_crtc_state->active,
++ new_crtc_state->mode_changed,
++ new_crtc_state->active_changed,
++ new_crtc_state->connectors_changed);
++
++ /* hdcp content type change */
++ if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type &&
++ new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
++ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
++ pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__);
+ return true;
+ }
+
+- /* CP is being re enabled, ignore this
+- *
+- * Handles: ENABLED -> DESIRED
+- */
+- if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+- state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+- state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
++ /* CP is being re enabled, ignore this */
++ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
++ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
++ if (new_crtc_state && new_crtc_state->mode_changed) {
++ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
++ pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__);
++ return true;
++ };
++ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
++ pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__);
+ return false;
+ }
+
+@@ -7425,9 +7453,9 @@ static bool is_content_protection_different(struct drm_connector_state *state,
+ *
+ * Handles: UNDESIRED -> ENABLED
+ */
+- if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
+- state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+- state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
++ if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
++ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
++ new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+
+ /* Stream removed and re-enabled
+ *
+@@ -7437,10 +7465,12 @@ static bool is_content_protection_different(struct drm_connector_state *state,
+ *
+ * Handles: DESIRED -> DESIRED (Special case)
+ */
+- if (!(old_state->crtc && old_state->crtc->enabled) &&
+- state->crtc && state->crtc->enabled &&
++ if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) &&
++ new_conn_state->crtc && new_conn_state->crtc->enabled &&
+ connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ dm_con_state->update_hdcp = false;
++ pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n",
++ __func__);
+ return true;
+ }
+
+@@ -7452,35 +7482,42 @@ static bool is_content_protection_different(struct drm_connector_state *state,
+ *
+ * Handles: DESIRED -> DESIRED (Special case)
+ */
+- if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+- connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
++ if (dm_con_state->update_hdcp &&
++ new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
++ connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
+ dm_con_state->update_hdcp = false;
++ pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n",
++ __func__);
+ return true;
+ }
+
+- /*
+- * Handles: UNDESIRED -> UNDESIRED
+- * DESIRED -> DESIRED
+- * ENABLED -> ENABLED
+- */
+- if (old_state->content_protection == state->content_protection)
++ if (old_conn_state->content_protection == new_conn_state->content_protection) {
++ if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) {
++ if (new_crtc_state && new_crtc_state->mode_changed) {
++ pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n",
++ __func__);
++ return true;
++ };
++ pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n",
++ __func__);
++ return false;
++ };
++
++ pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__);
+ return false;
++ }
+
+- /*
+- * Handles: UNDESIRED -> DESIRED
+- * DESIRED -> UNDESIRED
+- * ENABLED -> UNDESIRED
+- */
+- if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
++ if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) {
++ pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n",
++ __func__);
+ return true;
++ }
+
+- /*
+- * Handles: DESIRED -> ENABLED
+- */
++ pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__);
+ return false;
+ }
+-
+ #endif
++
+ static void remove_stream(struct amdgpu_device *adev,
+ struct amdgpu_crtc *acrtc,
+ struct dc_stream_state *stream)
+@@ -8335,10 +8372,67 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
+ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
+
++ if (!adev->dm.hdcp_workqueue)
++ continue;
++
++ pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i);
++
++ if (!connector)
++ continue;
++
++ pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n",
++ connector->index, connector->status, connector->dpms);
++ pr_debug("[HDCP_DM] state protection old: %x new: %x\n",
++ old_con_state->content_protection, new_con_state->content_protection);
++
++ if (aconnector->dc_sink) {
++ if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
++ aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) {
++ pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n",
++ aconnector->dc_sink->edid_caps.display_name);
++ }
++ }
++
+ new_crtc_state = NULL;
++ old_crtc_state = NULL;
+
+- if (acrtc)
++ if (acrtc) {
+ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
++ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
++ }
++
++ if (old_crtc_state)
++ pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
++ old_crtc_state->enable,
++ old_crtc_state->active,
++ old_crtc_state->mode_changed,
++ old_crtc_state->active_changed,
++ old_crtc_state->connectors_changed);
++
++ if (new_crtc_state)
++ pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n",
++ new_crtc_state->enable,
++ new_crtc_state->active,
++ new_crtc_state->mode_changed,
++ new_crtc_state->active_changed,
++ new_crtc_state->connectors_changed);
++ }
++
++ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
++ struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
++ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
++ struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
++
++ if (!adev->dm.hdcp_workqueue)
++ continue;
++
++ new_crtc_state = NULL;
++ old_crtc_state = NULL;
++
++ if (acrtc) {
++ new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
++ old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
++ }
+
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+
+@@ -8350,11 +8444,44 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
+ continue;
+ }
+
+- if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
++ if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state,
++ old_con_state, connector, adev->dm.hdcp_workqueue)) {
++ /* when display is unplugged from mst hub, connctor will
++ * be destroyed within dm_dp_mst_connector_destroy. connector
++ * hdcp perperties, like type, undesired, desired, enabled,
++ * will be lost. So, save hdcp properties into hdcp_work within
++ * amdgpu_dm_atomic_commit_tail. if the same display is
++ * plugged back with same display index, its hdcp properties
++ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
++ */
++
++ bool enable_encryption = false;
++
++ if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED)
++ enable_encryption = true;
++
++ if (aconnector->dc_link && aconnector->dc_sink &&
++ aconnector->dc_link->type == dc_connection_mst_branch) {
++ struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
++ struct hdcp_workqueue *hdcp_w =
++ &hdcp_work[aconnector->dc_link->link_index];
++
++ hdcp_w->hdcp_content_type[connector->index] =
++ new_con_state->hdcp_content_type;
++ hdcp_w->content_protection[connector->index] =
++ new_con_state->content_protection;
++ }
++
++ if (new_crtc_state && new_crtc_state->mode_changed &&
++ new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED)
++ enable_encryption = true;
++
++ DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption);
++
+ hdcp_update_display(
+ adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
+- new_con_state->hdcp_content_type,
+- new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
++ new_con_state->hdcp_content_type, enable_encryption);
++ }
+ }
+ #endif
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+index 09294ff122fea..bbbf7d0eff82f 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+@@ -52,6 +52,20 @@ struct hdcp_workqueue {
+ struct mod_hdcp_link link;
+
+ enum mod_hdcp_encryption_status encryption_status;
++
++ /* when display is unplugged from mst hub, connctor will be
++ * destroyed within dm_dp_mst_connector_destroy. connector
++ * hdcp perperties, like type, undesired, desired, enabled,
++ * will be lost. So, save hdcp properties into hdcp_work within
++ * amdgpu_dm_atomic_commit_tail. if the same display is
++ * plugged back with same display index, its hdcp properties
++ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
++ */
++ /* un-desired, desired, enabled */
++ unsigned int content_protection[AMDGPU_DM_MAX_DISPLAY_INDEX];
++ /* hdcp1.x, hdcp2.x */
++ unsigned int hdcp_content_type[AMDGPU_DM_MAX_DISPLAY_INDEX];
++
+ uint8_t max_link;
+
+ uint8_t *srm;
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index d07e1053b36b3..a9ddff774a978 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -32,6 +32,10 @@
+ #include "amdgpu_dm.h"
+ #include "amdgpu_dm_mst_types.h"
+
++#ifdef CONFIG_DRM_AMD_DC_HDCP
++#include "amdgpu_dm_hdcp.h"
++#endif
++
+ #include "dc.h"
+ #include "dm_helpers.h"
+
+@@ -363,6 +367,32 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
+ /* dc_link_add_remote_sink returns a new reference */
+ aconnector->dc_sink = dc_sink;
+
++ /* when display is unplugged from mst hub, connctor will be
++ * destroyed within dm_dp_mst_connector_destroy. connector
++ * hdcp perperties, like type, undesired, desired, enabled,
++ * will be lost. So, save hdcp properties into hdcp_work within
++ * amdgpu_dm_atomic_commit_tail. if the same display is
++ * plugged back with same display index, its hdcp properties
++ * will be retrieved from hdcp_work within dm_dp_mst_get_modes
++ */
++#ifdef CONFIG_DRM_AMD_DC_HDCP
++ if (aconnector->dc_sink && connector->state) {
++ struct drm_device *dev = connector->dev;
++ struct amdgpu_device *adev = drm_to_adev(dev);
++
++ if (adev->dm.hdcp_workqueue) {
++ struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue;
++ struct hdcp_workqueue *hdcp_w =
++ &hdcp_work[aconnector->dc_link->link_index];
++
++ connector->state->hdcp_content_type =
++ hdcp_w->hdcp_content_type[connector->index];
++ connector->state->content_protection =
++ hdcp_w->content_protection[connector->index];
++ }
++ }
++#endif
++
+ if (aconnector->dc_sink) {
+ amdgpu_dm_update_freesync_caps(
+ connector, aconnector->edid);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+index 915a20461c77c..893c0809cd4e0 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+@@ -230,7 +230,8 @@
+ type DTBCLK_P2_SRC_SEL;\
+ type DTBCLK_P2_EN;\
+ type DTBCLK_P3_SRC_SEL;\
+- type DTBCLK_P3_EN;
++ type DTBCLK_P3_EN;\
++ type DENTIST_DISPCLK_CHG_DONE;
+
+ struct dccg_shift {
+ DCCG_REG_FIELD_LIST(uint8_t)
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+index 7d2b982506fd7..cef32a1f91cdc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+@@ -47,6 +47,14 @@ void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+ {
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
++ if (dccg->dpp_clock_gated[dpp_inst]) {
++ /*
++ * Do not update the DPPCLK DTO if the clock is stopped.
++ * It is treated the same as if the pipe itself were in PG.
++ */
++ return;
++ }
++
+ if (dccg->ref_dppclk && req_dppclk) {
+ int ref_dppclk = dccg->ref_dppclk;
+ int modulo, phase;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+index 85ea3334355c2..b74705c1c8dcc 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dccg.c
+@@ -296,6 +296,9 @@ static void dccg314_dpp_root_clock_control(
+ {
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
++ if (dccg->dpp_clock_gated[dpp_inst] != clock_on)
++ return;
++
+ if (clock_on) {
+ /* turn off the DTO and leave phase/modulo at max */
+ REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 0);
+@@ -309,6 +312,8 @@ static void dccg314_dpp_root_clock_control(
+ DPPCLK0_DTO_PHASE, 0,
+ DPPCLK0_DTO_MODULO, 1);
+ }
++
++ dccg->dpp_clock_gated[dpp_inst] = !clock_on;
+ }
+
+ static const struct dccg_funcs dccg314_funcs = {
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+index b7782433ce6ba..503ab45b4ace3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+@@ -920,6 +920,22 @@ static const struct dc_debug_options debug_defaults_drv = {
+ .afmt = true,
+ }
+ },
++
++ .root_clock_optimization = {
++ .bits = {
++ .dpp = true,
++ .dsc = false,
++ .hdmistream = false,
++ .hdmichar = false,
++ .dpstream = false,
++ .symclk32_se = false,
++ .symclk32_le = false,
++ .symclk_fe = false,
++ .physymclk = false,
++ .dpiasymclk = false,
++ }
++ },
++
+ .seamless_boot_odm_combine = true
+ };
+
+@@ -1917,6 +1933,10 @@ static bool dcn314_resource_construct(
+ dc->debug = debug_defaults_drv;
+ else
+ dc->debug = debug_defaults_diags;
++
++ /* Disable root clock optimization */
++ dc->debug.root_clock_optimization.u32All = 0;
++
+ // Init the vm_helper
+ if (dc->vm_helper)
+ vm_helper_init(dc->vm_helper, 16);
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+index 3fb4bcc343531..ffbb739d85b69 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.c
+@@ -42,6 +42,20 @@
+ #define DC_LOGGER \
+ dccg->ctx->logger
+
++/* This function is a workaround for writing to OTG_PIXEL_RATE_DIV
++ * without the probability of causing a DIG FIFO error.
++ */
++static void dccg32_wait_for_dentist_change_done(
++ struct dccg *dccg)
++{
++ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
++
++ uint32_t dentist_dispclk_value = REG_READ(DENTIST_DISPCLK_CNTL);
++
++ REG_WRITE(DENTIST_DISPCLK_CNTL, dentist_dispclk_value);
++ REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 50, 2000);
++}
++
+ static void dccg32_get_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+@@ -110,21 +124,29 @@ static void dccg32_set_pixel_rate_div(
+ REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
+ OTG0_PIXEL_RATE_DIVK1, k1,
+ OTG0_PIXEL_RATE_DIVK2, k2);
++
++ dccg32_wait_for_dentist_change_done(dccg);
+ break;
+ case 1:
+ REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
+ OTG1_PIXEL_RATE_DIVK1, k1,
+ OTG1_PIXEL_RATE_DIVK2, k2);
++
++ dccg32_wait_for_dentist_change_done(dccg);
+ break;
+ case 2:
+ REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
+ OTG2_PIXEL_RATE_DIVK1, k1,
+ OTG2_PIXEL_RATE_DIVK2, k2);
++
++ dccg32_wait_for_dentist_change_done(dccg);
+ break;
+ case 3:
+ REG_UPDATE_2(OTG_PIXEL_RATE_DIV,
+ OTG3_PIXEL_RATE_DIVK1, k1,
+ OTG3_PIXEL_RATE_DIVK2, k2);
++
++ dccg32_wait_for_dentist_change_done(dccg);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
+index 1c46fad0977bf..fc3c9c650d43c 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dccg.h
+@@ -147,7 +147,8 @@
+ DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_SRC_SEL, mask_sh),\
+ DCCG_SF(DTBCLK_P_CNTL, DTBCLK_P3_EN, mask_sh),\
+ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
+- DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh)
++ DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
++ DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, mask_sh)
+
+
+ struct dccg *dccg32_create(
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+index f5fa7abd97fc7..d477dcc9149fa 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -1177,7 +1177,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign
+ *k2_div = PIXEL_RATE_DIV_BY_2;
+ else
+ *k2_div = PIXEL_RATE_DIV_BY_4;
+- } else if (dc_is_dp_signal(stream->signal)) {
++ } else if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) {
+ if (two_pix_per_container) {
+ *k1_div = PIXEL_RATE_DIV_BY_1;
+ *k2_div = PIXEL_RATE_DIV_BY_2;
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+index 026cf13d203fc..03cdfb5577888 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+@@ -1272,7 +1272,8 @@ unsigned int dcn32_calc_num_avail_chans_for_mall(struct dc *dc, int num_chans);
+ DCCG_SRII(PHASE, DTBCLK_DTO, 0), DCCG_SRII(PHASE, DTBCLK_DTO, 1), \
+ DCCG_SRII(PHASE, DTBCLK_DTO, 2), DCCG_SRII(PHASE, DTBCLK_DTO, 3), \
+ SR(DCCG_AUDIO_DTBCLK_DTO_MODULO), SR(DCCG_AUDIO_DTBCLK_DTO_PHASE), \
+- SR(OTG_PIXEL_RATE_DIV), SR(DTBCLK_P_CNTL), SR(DCCG_AUDIO_DTO_SOURCE) \
++ SR(OTG_PIXEL_RATE_DIV), SR(DTBCLK_P_CNTL), \
++ SR(DCCG_AUDIO_DTO_SOURCE), SR(DENTIST_DISPCLK_CNTL) \
+ )
+
+ /* VMID */
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+index 2bb768413c92a..19f55657272e4 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+@@ -808,7 +808,7 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
+ v->SwathHeightC[k],
+ TWait,
+ (v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
+- v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= MIN_DCFCLK_FREQ_MHZ) ?
++ v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
+ mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
+ /* Output */
+ &v->DSTXAfterScaler[k],
+@@ -3289,7 +3289,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
+ v->swath_width_chroma_ub_this_state[k],
+ v->SwathHeightYThisState[k],
+ v->SwathHeightCThisState[k], v->TWait,
+- (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= MIN_DCFCLK_FREQ_MHZ) ?
++ (v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
+ mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
+
+ /* Output */
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+index e92eee2c664d0..a475775bc3894 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h
+@@ -52,7 +52,7 @@
+ #define BPP_BLENDED_PIPE 0xffffffff
+
+ #define MEM_STROBE_FREQ_MHZ 1600
+-#define MIN_DCFCLK_FREQ_MHZ 200
++#define DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ 300
+ #define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0
+
+ struct display_mode_lib;
+diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+index ad6acd1b34e1d..9651cccb084a3 100644
+--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
++++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+@@ -68,6 +68,7 @@ struct dccg {
+ const struct dccg_funcs *funcs;
+ int pipe_dppclk_khz[MAX_PIPES];
+ int ref_dppclk;
++ bool dpp_clock_gated[MAX_PIPES];
+ //int dtbclk_khz[MAX_PIPES];/* TODO needs to be removed */
+ //int audio_dtbclk_khz;/* TODO needs to be removed */
+ //int ref_dtbclk_khz;/* TODO needs to be removed */
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+index d191ff52d4f06..a664a0a284784 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+@@ -1562,9 +1562,9 @@ static int smu_disable_dpms(struct smu_context *smu)
+
+ /*
+ * For SMU 13.0.4/11, PMFW will handle the features disablement properly
+- * for gpu reset case. Driver involvement is unnecessary.
++ * for gpu reset and S0i3 cases. Driver involvement is unnecessary.
+ */
+- if (amdgpu_in_reset(adev)) {
++ if (amdgpu_in_reset(adev) || adev->in_s0ix) {
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 4):
+ case IP_VERSION(13, 0, 11):
+diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+index 31835d96deef9..839a812e0da32 100644
+--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
++++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+@@ -588,7 +588,9 @@ err0_out:
+ return -ENOMEM;
+ }
+
+-static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu)
++static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *smu,
++ bool use_metrics_v3,
++ bool use_metrics_v2)
+ {
+ struct smu_table_context *smu_table= &smu->smu_table;
+ SmuMetricsExternal_t *metrics_ext =
+@@ -596,13 +598,11 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
+ uint32_t throttler_status = 0;
+ int i;
+
+- if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
+- (smu->smc_fw_version >= 0x3A4900)) {
++ if (use_metrics_v3) {
+ for (i = 0; i < THROTTLER_COUNT; i++)
+ throttler_status |=
+ (metrics_ext->SmuMetrics_V3.ThrottlingPercentage[i] ? 1U << i : 0);
+- } else if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
+- (smu->smc_fw_version >= 0x3A4300)) {
++ } else if (use_metrics_v2) {
+ for (i = 0; i < THROTTLER_COUNT; i++)
+ throttler_status |=
+ (metrics_ext->SmuMetrics_V2.ThrottlingPercentage[i] ? 1U << i : 0);
+@@ -864,7 +864,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
+ metrics->TemperatureVrSoc) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_THROTTLER_STATUS:
+- *value = sienna_cichlid_get_throttler_status_locked(smu);
++ *value = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2);
+ break;
+ case METRICS_CURR_FANSPEED:
+ *value = use_metrics_v3 ? metrics_v3->CurrFanSpeed :
+@@ -4017,7 +4017,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
+ gpu_metrics->current_dclk1 = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCLK_1] :
+ use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_1] : metrics->CurrClock[PPCLK_DCLK_1];
+
+- gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu);
++ gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu, use_metrics_v3, use_metrics_v2);
+ gpu_metrics->indep_throttle_status =
+ smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
+ sienna_cichlid_throttler_map);
+diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
+index a15e09b551708..2c2e0f041f869 100644
+--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
++++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
+@@ -2727,7 +2727,7 @@ static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
+ __drm_atomic_helper_connector_reset(&sdvo_connector->base.base,
+ &conn_state->base.base);
+
+- INIT_LIST_HEAD(&sdvo_connector->base.panel.fixed_modes);
++ intel_panel_init_alloc(&sdvo_connector->base);
+
+ return sdvo_connector;
+ }
+diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
+index 49c5451cdfb16..d6dd79541f6a9 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
++++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
+@@ -1407,8 +1407,7 @@ nouveau_connector_create(struct drm_device *dev,
+ ret = nvif_conn_ctor(&disp->disp, nv_connector->base.name, nv_connector->index,
+ &nv_connector->conn);
+ if (ret) {
+- kfree(nv_connector);
+- return ERR_PTR(ret);
++ goto drm_conn_err;
+ }
+ }
+
+@@ -1470,4 +1469,9 @@ nouveau_connector_create(struct drm_device *dev,
+
+ drm_connector_register(connector);
+ return connector;
++
++drm_conn_err:
++ drm_connector_cleanup(connector);
++ kfree(nv_connector);
++ return ERR_PTR(ret);
+ }
+diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
+index f851aaf2c5917..5e067ba7e5fba 100644
+--- a/drivers/gpu/drm/panel/panel-simple.c
++++ b/drivers/gpu/drm/panel/panel-simple.c
+@@ -969,21 +969,21 @@ static const struct panel_desc auo_g104sn02 = {
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+ };
+
+-static const struct drm_display_mode auo_g121ean01_mode = {
+- .clock = 66700,
+- .hdisplay = 1280,
+- .hsync_start = 1280 + 58,
+- .hsync_end = 1280 + 58 + 8,
+- .htotal = 1280 + 58 + 8 + 70,
+- .vdisplay = 800,
+- .vsync_start = 800 + 6,
+- .vsync_end = 800 + 6 + 4,
+- .vtotal = 800 + 6 + 4 + 10,
++static const struct display_timing auo_g121ean01_timing = {
++ .pixelclock = { 60000000, 74400000, 90000000 },
++ .hactive = { 1280, 1280, 1280 },
++ .hfront_porch = { 20, 50, 100 },
++ .hback_porch = { 20, 50, 100 },
++ .hsync_len = { 30, 100, 200 },
++ .vactive = { 800, 800, 800 },
++ .vfront_porch = { 2, 10, 25 },
++ .vback_porch = { 2, 10, 25 },
++ .vsync_len = { 4, 18, 50 },
+ };
+
+ static const struct panel_desc auo_g121ean01 = {
+- .modes = &auo_g121ean01_mode,
+- .num_modes = 1,
++ .timings = &auo_g121ean01_timing,
++ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 261,
+diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
+index 432758ad39a35..94753e017ea8d 100644
+--- a/drivers/gpu/drm/qxl/qxl_drv.h
++++ b/drivers/gpu/drm/qxl/qxl_drv.h
+@@ -312,7 +312,7 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+ u32 domain,
+ size_t size,
+ struct qxl_surface *surf,
+- struct qxl_bo **qobj,
++ struct drm_gem_object **gobj,
+ uint32_t *handle);
+ void qxl_gem_object_free(struct drm_gem_object *gobj);
+ int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv);
+diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
+index d636ba6854513..17df5c7ccf691 100644
+--- a/drivers/gpu/drm/qxl/qxl_dumb.c
++++ b/drivers/gpu/drm/qxl/qxl_dumb.c
+@@ -34,6 +34,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
+ {
+ struct qxl_device *qdev = to_qxl(dev);
+ struct qxl_bo *qobj;
++ struct drm_gem_object *gobj;
+ uint32_t handle;
+ int r;
+ struct qxl_surface surf;
+@@ -62,11 +63,13 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
+
+ r = qxl_gem_object_create_with_handle(qdev, file_priv,
+ QXL_GEM_DOMAIN_CPU,
+- args->size, &surf, &qobj,
++ args->size, &surf, &gobj,
+ &handle);
+ if (r)
+ return r;
++ qobj = gem_to_qxl_bo(gobj);
+ qobj->is_dumb = true;
++ drm_gem_object_put(gobj);
+ args->pitch = pitch;
+ args->handle = handle;
+ return 0;
+diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
+index a08da0bd9098b..fc5e3763c3595 100644
+--- a/drivers/gpu/drm/qxl/qxl_gem.c
++++ b/drivers/gpu/drm/qxl/qxl_gem.c
+@@ -72,32 +72,41 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
+ return 0;
+ }
+
++/*
++ * If the caller passed a valid gobj pointer, it is responsible to call
++ * drm_gem_object_put() when it no longer needs to acess the object.
++ *
++ * If gobj is NULL, it is handled internally.
++ */
+ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
+ struct drm_file *file_priv,
+ u32 domain,
+ size_t size,
+ struct qxl_surface *surf,
+- struct qxl_bo **qobj,
++ struct drm_gem_object **gobj,
+ uint32_t *handle)
+ {
+- struct drm_gem_object *gobj;
+ int r;
++ struct drm_gem_object *local_gobj;
+
+- BUG_ON(!qobj);
+ BUG_ON(!handle);
+
+ r = qxl_gem_object_create(qdev, size, 0,
+ domain,
+ false, false, surf,
+- &gobj);
++ &local_gobj);
+ if (r)
+ return -ENOMEM;
+- r = drm_gem_handle_create(file_priv, gobj, handle);
++ r = drm_gem_handle_create(file_priv, local_gobj, handle);
+ if (r)
+ return r;
+- /* drop reference from allocate - handle holds it now */
+- *qobj = gem_to_qxl_bo(gobj);
+- drm_gem_object_put(gobj);
++
++ if (gobj)
++ *gobj = local_gobj;
++ else
++ /* drop reference from allocate - handle holds it now */
++ drm_gem_object_put(local_gobj);
++
+ return 0;
+ }
+
+diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
+index 30f58b21372aa..dd0f834d881ce 100644
+--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
+@@ -38,7 +38,6 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
+ struct qxl_device *qdev = to_qxl(dev);
+ struct drm_qxl_alloc *qxl_alloc = data;
+ int ret;
+- struct qxl_bo *qobj;
+ uint32_t handle;
+ u32 domain = QXL_GEM_DOMAIN_VRAM;
+
+@@ -50,7 +49,7 @@ int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr
+ domain,
+ qxl_alloc->size,
+ NULL,
+- &qobj, &handle);
++ NULL, &handle);
+ if (ret) {
+ DRM_ERROR("%s: failed to create gem ret=%d\n",
+ __func__, ret);
+@@ -386,7 +385,6 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
+ {
+ struct qxl_device *qdev = to_qxl(dev);
+ struct drm_qxl_alloc_surf *param = data;
+- struct qxl_bo *qobj;
+ int handle;
+ int ret;
+ int size, actual_stride;
+@@ -406,7 +404,7 @@ int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
+ QXL_GEM_DOMAIN_SURFACE,
+ size,
+ &surf,
+- &qobj, &handle);
++ NULL, &handle);
+ if (ret) {
+ DRM_ERROR("%s: failed to create gem ret=%d\n",
+ __func__, ret);
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+index b7dd59fe119e6..9edb5edb2bad9 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+@@ -223,20 +223,6 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
+ * DU channels that have a display PLL can't use the internal
+ * system clock, and have no internal clock divider.
+ */
+-
+- /*
+- * The H3 ES1.x exhibits dot clock duty cycle stability issues.
+- * We can work around them by configuring the DPLL to twice the
+- * desired frequency, coupled with a /2 post-divider. Restrict
+- * the workaround to H3 ES1.x as ES2.0 and all other SoCs have
+- * no post-divider when a display PLL is present (as shown by
+- * the workaround breaking HDMI output on M3-W during testing).
+- */
+- if (rcdu->info->quirks & RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY) {
+- target *= 2;
+- div = 1;
+- }
+-
+ extclk = clk_get_rate(rcrtc->extclock);
+ rcar_du_dpll_divider(rcrtc, &dpll, extclk, target);
+
+@@ -245,30 +231,13 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
+ | DPLLCR_N(dpll.n) | DPLLCR_M(dpll.m)
+ | DPLLCR_STBY;
+
+- if (rcrtc->index == 1) {
++ if (rcrtc->index == 1)
+ dpllcr |= DPLLCR_PLCS1
+ | DPLLCR_INCS_DOTCLKIN1;
+- } else {
+- dpllcr |= DPLLCR_PLCS0_PLL
++ else
++ dpllcr |= DPLLCR_PLCS0
+ | DPLLCR_INCS_DOTCLKIN0;
+
+- /*
+- * On ES2.x we have a single mux controlled via bit 21,
+- * which selects between DCLKIN source (bit 21 = 0) and
+- * a PLL source (bit 21 = 1), where the PLL is always
+- * PLL1.
+- *
+- * On ES1.x we have an additional mux, controlled
+- * via bit 20, for choosing between PLL0 (bit 20 = 0)
+- * and PLL1 (bit 20 = 1). We always want to use PLL1,
+- * so on ES1.x, in addition to setting bit 21, we need
+- * to set the bit 20.
+- */
+-
+- if (rcdu->info->quirks & RCAR_DU_QUIRK_H3_ES1_PLL)
+- dpllcr |= DPLLCR_PLCS0_H3ES1X_PLL1;
+- }
+-
+ rcar_du_group_write(rcrtc->group, DPLLCR, dpllcr);
+
+ escr = ESCR_DCLKSEL_DCLKIN | div;
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+index 6381578c4db58..bd7003d6e0753 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+@@ -16,7 +16,6 @@
+ #include <linux/platform_device.h>
+ #include <linux/pm.h>
+ #include <linux/slab.h>
+-#include <linux/sys_soc.h>
+ #include <linux/wait.h>
+
+ #include <drm/drm_atomic_helper.h>
+@@ -387,43 +386,6 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
+ .dpll_mask = BIT(2) | BIT(1),
+ };
+
+-static const struct rcar_du_device_info rcar_du_r8a7795_es1_info = {
+- .gen = 3,
+- .features = RCAR_DU_FEATURE_CRTC_IRQ
+- | RCAR_DU_FEATURE_CRTC_CLOCK
+- | RCAR_DU_FEATURE_VSP1_SOURCE
+- | RCAR_DU_FEATURE_INTERLACED
+- | RCAR_DU_FEATURE_TVM_SYNC,
+- .quirks = RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY
+- | RCAR_DU_QUIRK_H3_ES1_PLL,
+- .channels_mask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
+- .routes = {
+- /*
+- * R8A7795 has one RGB output, two HDMI outputs and one
+- * LVDS output.
+- */
+- [RCAR_DU_OUTPUT_DPAD0] = {
+- .possible_crtcs = BIT(3),
+- .port = 0,
+- },
+- [RCAR_DU_OUTPUT_HDMI0] = {
+- .possible_crtcs = BIT(1),
+- .port = 1,
+- },
+- [RCAR_DU_OUTPUT_HDMI1] = {
+- .possible_crtcs = BIT(2),
+- .port = 2,
+- },
+- [RCAR_DU_OUTPUT_LVDS0] = {
+- .possible_crtcs = BIT(0),
+- .port = 3,
+- },
+- },
+- .num_lvds = 1,
+- .num_rpf = 5,
+- .dpll_mask = BIT(2) | BIT(1),
+-};
+-
+ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+@@ -592,11 +554,6 @@ static const struct of_device_id rcar_du_of_table[] = {
+
+ MODULE_DEVICE_TABLE(of, rcar_du_of_table);
+
+-static const struct soc_device_attribute rcar_du_soc_table[] = {
+- { .soc_id = "r8a7795", .revision = "ES1.*", .data = &rcar_du_r8a7795_es1_info },
+- { /* sentinel */ }
+-};
+-
+ const char *rcar_du_output_name(enum rcar_du_output output)
+ {
+ static const char * const names[] = {
+@@ -688,7 +645,6 @@ static void rcar_du_shutdown(struct platform_device *pdev)
+
+ static int rcar_du_probe(struct platform_device *pdev)
+ {
+- const struct soc_device_attribute *soc_attr;
+ struct rcar_du_device *rcdu;
+ unsigned int mask;
+ int ret;
+@@ -706,10 +662,6 @@ static int rcar_du_probe(struct platform_device *pdev)
+
+ rcdu->info = of_device_get_match_data(rcdu->dev);
+
+- soc_attr = soc_device_match(rcar_du_soc_table);
+- if (soc_attr)
+- rcdu->info = soc_attr->data;
+-
+ platform_set_drvdata(pdev, rcdu);
+
+ /* I/O resources */
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+index acc3673fefe18..5cfa2bb7ad93d 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
++++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+@@ -34,8 +34,6 @@ struct rcar_du_device;
+ #define RCAR_DU_FEATURE_NO_BLENDING BIT(5) /* PnMR.SPIM does not have ALP nor EOR bits */
+
+ #define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */
+-#define RCAR_DU_QUIRK_H3_ES1_PCLK_STABILITY BIT(1) /* H3 ES1 has pclk stability issue */
+-#define RCAR_DU_QUIRK_H3_ES1_PLL BIT(2) /* H3 ES1 PLL setup differs from non-ES1 */
+
+ enum rcar_du_output {
+ RCAR_DU_OUTPUT_DPAD0,
+diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+index 789ae9285108e..288eff12b2b1a 100644
+--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
++++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+@@ -283,8 +283,7 @@
+ #define DPLLCR 0x20044
+ #define DPLLCR_CODE (0x95 << 24)
+ #define DPLLCR_PLCS1 (1 << 23)
+-#define DPLLCR_PLCS0_PLL (1 << 21)
+-#define DPLLCR_PLCS0_H3ES1X_PLL1 (1 << 20)
++#define DPLLCR_PLCS0 (1 << 21)
+ #define DPLLCR_CLKE (1 << 18)
+ #define DPLLCR_FDPLL(n) ((n) << 12)
+ #define DPLLCR_N(n) ((n) << 5)
+diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
+index 03c6becda795c..b8be4c1db4235 100644
+--- a/drivers/gpu/drm/stm/ltdc.c
++++ b/drivers/gpu/drm/stm/ltdc.c
+@@ -1145,7 +1145,7 @@ static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
+
+ static int ltdc_crtc_set_crc_source(struct drm_crtc *crtc, const char *source)
+ {
+- struct ltdc_device *ldev = crtc_to_ltdc(crtc);
++ struct ltdc_device *ldev;
+ int ret;
+
+ DRM_DEBUG_DRIVER("\n");
+@@ -1153,6 +1153,8 @@ static int ltdc_crtc_set_crc_source(struct drm_crtc *crtc, const char *source)
+ if (!crtc)
+ return -ENODEV;
+
++ ldev = crtc_to_ltdc(crtc);
++
+ if (source && strcmp(source, "auto") == 0) {
+ ldev->crc_active = true;
+ ret = regmap_set_bits(ldev->regmap, LTDC_GCR, GCR_CRCEN);
+diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
+index 0b4204b9a253c..97eefb77f6014 100644
+--- a/drivers/hid/hid-logitech-hidpp.c
++++ b/drivers/hid/hid-logitech-hidpp.c
+@@ -4403,6 +4403,8 @@ static const struct hid_device_id hidpp_devices[] = {
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC086) },
+ { /* Logitech G903 Hero Gaming Mouse over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC091) },
++ { /* Logitech G915 TKL Keyboard over USB */
++ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC343) },
+ { /* Logitech G920 Wheel over USB */
+ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL),
+ .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS},
+@@ -4418,6 +4420,8 @@ static const struct hid_device_id hidpp_devices[] = {
+ { /* MX5500 keyboard over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
+ .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
++ { /* Logitech G915 TKL keyboard over Bluetooth */
++ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb35f) },
+ { /* M-RCQ142 V470 Cordless Laser Mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) },
+ { /* MX Master mouse over Bluetooth */
+diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+index fc108f19a64c3..e99f3a3c65e15 100644
+--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
++++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+@@ -33,6 +33,7 @@
+ #define ADL_N_DEVICE_ID 0x54FC
+ #define RPL_S_DEVICE_ID 0x7A78
+ #define MTL_P_DEVICE_ID 0x7E45
++#define ARL_H_DEVICE_ID 0x7745
+
+ #define REVISION_ID_CHT_A0 0x6
+ #define REVISION_ID_CHT_Ax_SI 0x0
+diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+index 7120b30ac51d0..55cb25038e632 100644
+--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
++++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+@@ -44,6 +44,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ADL_N_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, RPL_S_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MTL_P_DEVICE_ID)},
++ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, ARL_H_DEVICE_ID)},
+ {0, }
+ };
+ MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
+diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
+index 85d8a6b048856..30a2a3200bed9 100644
+--- a/drivers/i2c/busses/i2c-bcm-iproc.c
++++ b/drivers/i2c/busses/i2c-bcm-iproc.c
+@@ -233,13 +233,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 offset)
+ {
+ u32 val;
++ unsigned long flags;
+
+ if (iproc_i2c->idm_base) {
+- spin_lock(&iproc_i2c->idm_lock);
++ spin_lock_irqsave(&iproc_i2c->idm_lock, flags);
+ writel(iproc_i2c->ape_addr_mask,
+ iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET);
+ val = readl(iproc_i2c->base + offset);
+- spin_unlock(&iproc_i2c->idm_lock);
++ spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags);
+ } else {
+ val = readl(iproc_i2c->base + offset);
+ }
+@@ -250,12 +251,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
+ static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c,
+ u32 offset, u32 val)
+ {
++ unsigned long flags;
++
+ if (iproc_i2c->idm_base) {
+- spin_lock(&iproc_i2c->idm_lock);
++ spin_lock_irqsave(&iproc_i2c->idm_lock, flags);
+ writel(iproc_i2c->ape_addr_mask,
+ iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET);
+ writel(val, iproc_i2c->base + offset);
+- spin_unlock(&iproc_i2c->idm_lock);
++ spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags);
+ } else {
+ writel(val, iproc_i2c->base + offset);
+ }
+diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
+index dc3c5a15a95b9..004ccb2d9f369 100644
+--- a/drivers/i2c/busses/i2c-designware-master.c
++++ b/drivers/i2c/busses/i2c-designware-master.c
+@@ -525,9 +525,21 @@ i2c_dw_read(struct dw_i2c_dev *dev)
+ u32 flags = msgs[dev->msg_read_idx].flags;
+
+ regmap_read(dev->map, DW_IC_DATA_CMD, &tmp);
++ tmp &= DW_IC_DATA_CMD_DAT;
+ /* Ensure length byte is a valid value */
+- if (flags & I2C_M_RECV_LEN &&
+- (tmp & DW_IC_DATA_CMD_DAT) <= I2C_SMBUS_BLOCK_MAX && tmp > 0) {
++ if (flags & I2C_M_RECV_LEN) {
++ /*
++ * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be
++ * detected from the registers, the controller can be
++ * disabled if the STOP bit is set. But it is only set
++ * after receiving block data response length in
++ * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read
++ * another byte with STOP bit set when the block data
++ * response length is invalid to complete the transaction.
++ */
++ if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX)
++ tmp = 1;
++
+ len = i2c_dw_recv_len(dev, tmp);
+ }
+ *buf++ = tmp;
+diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c
+index 8a61bee745a16..14ec9ee8f6f35 100644
+--- a/drivers/i2c/busses/i2c-hisi.c
++++ b/drivers/i2c/busses/i2c-hisi.c
+@@ -328,6 +328,14 @@ static irqreturn_t hisi_i2c_irq(int irq, void *context)
+ struct hisi_i2c_controller *ctlr = context;
+ u32 int_stat;
+
++ /*
++ * Don't handle the interrupt if cltr->completion is NULL. We may
++ * reach here because the interrupt is spurious or the transfer is
++ * started by another port (e.g. firmware) rather than us.
++ */
++ if (!ctlr->completion)
++ return IRQ_NONE;
++
+ int_stat = readl(ctlr->iobase + HISI_I2C_INT_MSTAT);
+ hisi_i2c_clear_int(ctlr, int_stat);
+ if (!(int_stat & HISI_I2C_INT_ALL))
+diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
+index 2bc40f957e509..aa469b33ee2ee 100644
+--- a/drivers/i2c/busses/i2c-tegra.c
++++ b/drivers/i2c/busses/i2c-tegra.c
+@@ -449,7 +449,7 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev)
+ if (i2c_dev->is_vi)
+ return 0;
+
+- if (!i2c_dev->hw->has_apb_dma) {
++ if (i2c_dev->hw->has_apb_dma) {
+ if (!IS_ENABLED(CONFIG_TEGRA20_APB_DMA)) {
+ dev_dbg(i2c_dev->dev, "APB DMA support not enabled\n");
+ return 0;
+diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
+index 542e4c63a8de6..d4e7864c56f18 100644
+--- a/drivers/infiniband/hw/mlx5/qpc.c
++++ b/drivers/infiniband/hw/mlx5/qpc.c
+@@ -297,8 +297,7 @@ int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp)
+ MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+ MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
+ MLX5_SET(destroy_qp_in, in, uid, qp->uid);
+- mlx5_cmd_exec_in(dev->mdev, destroy_qp, in);
+- return 0;
++ return mlx5_cmd_exec_in(dev->mdev, destroy_qp, in);
+ }
+
+ int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev,
+@@ -548,14 +547,14 @@ int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn)
+ return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in);
+ }
+
+-static void destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid)
++static int destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid)
+ {
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
+
+ MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, in, rqn, rqn);
+ MLX5_SET(destroy_rq_in, in, uid, uid);
+- mlx5_cmd_exec_in(dev->mdev, destroy_rq, in);
++ return mlx5_cmd_exec_in(dev->mdev, destroy_rq, in);
+ }
+
+ int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
+@@ -586,8 +585,7 @@ int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
+ struct mlx5_core_qp *rq)
+ {
+ destroy_resource_common(dev, rq);
+- destroy_rq_tracked(dev, rq->qpn, rq->uid);
+- return 0;
++ return destroy_rq_tracked(dev, rq->qpn, rq->uid);
+ }
+
+ static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
+diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
+index 5ecc17240eff5..f5e9377b55212 100644
+--- a/drivers/iommu/amd/amd_iommu_types.h
++++ b/drivers/iommu/amd/amd_iommu_types.h
+@@ -172,6 +172,7 @@
+ #define CONTROL_GAINT_EN 29
+ #define CONTROL_XT_EN 50
+ #define CONTROL_INTCAPXT_EN 51
++#define CONTROL_IRTCACHEDIS 59
+ #define CONTROL_SNPAVIC_EN 61
+
+ #define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
+@@ -708,6 +709,9 @@ struct amd_iommu {
+ /* if one, we need to send a completion wait command */
+ bool need_sync;
+
++ /* true if disable irte caching */
++ bool irtcachedis_enabled;
++
+ /* Handle for IOMMU core code */
+ struct iommu_device iommu;
+
+diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
+index b0af8b5967e0d..f6e64c9858021 100644
+--- a/drivers/iommu/amd/init.c
++++ b/drivers/iommu/amd/init.c
+@@ -160,6 +160,7 @@ static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;
+ static bool amd_iommu_detected;
+ static bool amd_iommu_disabled __initdata;
+ static bool amd_iommu_force_enable __initdata;
++static bool amd_iommu_irtcachedis;
+ static int amd_iommu_target_ivhd_type;
+
+ /* Global EFR and EFR2 registers */
+@@ -477,6 +478,9 @@ static void iommu_disable(struct amd_iommu *iommu)
+
+ /* Disable IOMMU hardware itself */
+ iommu_feature_disable(iommu, CONTROL_IOMMU_EN);
++
++ /* Clear IRTE cache disabling bit */
++ iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
+ }
+
+ /*
+@@ -2700,6 +2704,33 @@ static void iommu_enable_ga(struct amd_iommu *iommu)
+ #endif
+ }
+
++static void iommu_disable_irtcachedis(struct amd_iommu *iommu)
++{
++ iommu_feature_disable(iommu, CONTROL_IRTCACHEDIS);
++}
++
++static void iommu_enable_irtcachedis(struct amd_iommu *iommu)
++{
++ u64 ctrl;
++
++ if (!amd_iommu_irtcachedis)
++ return;
++
++ /*
++ * Note:
++ * The support for IRTCacheDis feature is dertermined by
++ * checking if the bit is writable.
++ */
++ iommu_feature_enable(iommu, CONTROL_IRTCACHEDIS);
++ ctrl = readq(iommu->mmio_base + MMIO_CONTROL_OFFSET);
++ ctrl &= (1ULL << CONTROL_IRTCACHEDIS);
++ if (ctrl)
++ iommu->irtcachedis_enabled = true;
++ pr_info("iommu%d (%#06x) : IRT cache is %s\n",
++ iommu->index, iommu->devid,
++ iommu->irtcachedis_enabled ? "disabled" : "enabled");
++}
++
+ static void early_enable_iommu(struct amd_iommu *iommu)
+ {
+ iommu_disable(iommu);
+@@ -2710,6 +2741,7 @@ static void early_enable_iommu(struct amd_iommu *iommu)
+ iommu_set_exclusion_range(iommu);
+ iommu_enable_ga(iommu);
+ iommu_enable_xt(iommu);
++ iommu_enable_irtcachedis(iommu);
+ iommu_enable(iommu);
+ iommu_flush_all_caches(iommu);
+ }
+@@ -2760,10 +2792,12 @@ static void early_enable_iommus(void)
+ for_each_iommu(iommu) {
+ iommu_disable_command_buffer(iommu);
+ iommu_disable_event_buffer(iommu);
++ iommu_disable_irtcachedis(iommu);
+ iommu_enable_command_buffer(iommu);
+ iommu_enable_event_buffer(iommu);
+ iommu_enable_ga(iommu);
+ iommu_enable_xt(iommu);
++ iommu_enable_irtcachedis(iommu);
+ iommu_set_device_table(iommu);
+ iommu_flush_all_caches(iommu);
+ }
+@@ -3411,6 +3445,8 @@ static int __init parse_amd_iommu_options(char *str)
+ amd_iommu_pgtable = AMD_IOMMU_V1;
+ } else if (strncmp(str, "pgtbl_v2", 8) == 0) {
+ amd_iommu_pgtable = AMD_IOMMU_V2;
++ } else if (strncmp(str, "irtcachedis", 11) == 0) {
++ amd_iommu_irtcachedis = true;
+ } else {
+ pr_notice("Unknown option - '%s'\n", str);
+ }
+diff --git a/drivers/leds/rgb/leds-qcom-lpg.c b/drivers/leds/rgb/leds-qcom-lpg.c
+index f1c2419334e6f..f85a5d65d1314 100644
+--- a/drivers/leds/rgb/leds-qcom-lpg.c
++++ b/drivers/leds/rgb/leds-qcom-lpg.c
+@@ -1112,8 +1112,10 @@ static int lpg_add_led(struct lpg *lpg, struct device_node *np)
+ i = 0;
+ for_each_available_child_of_node(np, child) {
+ ret = lpg_parse_channel(lpg, child, &led->channels[i]);
+- if (ret < 0)
++ if (ret < 0) {
++ of_node_put(child);
+ return ret;
++ }
+
+ info[i].color_index = led->channels[i]->color;
+ info[i].intensity = 0;
+@@ -1291,8 +1293,10 @@ static int lpg_probe(struct platform_device *pdev)
+
+ for_each_available_child_of_node(pdev->dev.of_node, np) {
+ ret = lpg_add_led(lpg, np);
+- if (ret)
++ if (ret) {
++ of_node_put(np);
+ return ret;
++ }
+ }
+
+ for (i = 0; i < lpg->num_channels; i++)
+diff --git a/drivers/media/platform/mediatek/vpu/mtk_vpu.c b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
+index 47b684b92f817..6beab9e86a22a 100644
+--- a/drivers/media/platform/mediatek/vpu/mtk_vpu.c
++++ b/drivers/media/platform/mediatek/vpu/mtk_vpu.c
+@@ -562,15 +562,17 @@ static int load_requested_vpu(struct mtk_vpu *vpu,
+ int vpu_load_firmware(struct platform_device *pdev)
+ {
+ struct mtk_vpu *vpu;
+- struct device *dev = &pdev->dev;
++ struct device *dev;
+ struct vpu_run *run;
+ int ret;
+
+ if (!pdev) {
+- dev_err(dev, "VPU platform device is invalid\n");
++ pr_err("VPU platform device is invalid\n");
+ return -EINVAL;
+ }
+
++ dev = &pdev->dev;
++
+ vpu = platform_get_drvdata(pdev);
+ run = &vpu->run;
+
+diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
+index a26e4a5d87b6b..d8cd9b09c20de 100644
+--- a/drivers/media/platform/qcom/camss/camss-vfe.c
++++ b/drivers/media/platform/qcom/camss/camss-vfe.c
+@@ -1540,7 +1540,11 @@ int msm_vfe_register_entities(struct vfe_device *vfe,
+ }
+
+ video_out->ops = &vfe->video_ops;
+- video_out->bpl_alignment = 8;
++ if (vfe->camss->version == CAMSS_845 ||
++ vfe->camss->version == CAMSS_8250)
++ video_out->bpl_alignment = 16;
++ else
++ video_out->bpl_alignment = 8;
+ video_out->line_based = 0;
+ if (i == VFE_LINE_PIX) {
+ video_out->bpl_alignment = 16;
+diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
+index e0dca445abf14..9ee1b6abd8a05 100644
+--- a/drivers/misc/habanalabs/common/device.c
++++ b/drivers/misc/habanalabs/common/device.c
+@@ -870,6 +870,18 @@ static void device_early_fini(struct hl_device *hdev)
+ hdev->asic_funcs->early_fini(hdev);
+ }
+
++static bool is_pci_link_healthy(struct hl_device *hdev)
++{
++ u16 vendor_id;
++
++ if (!hdev->pdev)
++ return false;
++
++ pci_read_config_word(hdev->pdev, PCI_VENDOR_ID, &vendor_id);
++
++ return (vendor_id == PCI_VENDOR_ID_HABANALABS);
++}
++
+ static void hl_device_heartbeat(struct work_struct *work)
+ {
+ struct hl_device *hdev = container_of(work, struct hl_device,
+@@ -882,7 +894,8 @@ static void hl_device_heartbeat(struct work_struct *work)
+ goto reschedule;
+
+ if (hl_device_operational(hdev, NULL))
+- dev_err(hdev->dev, "Device heartbeat failed!\n");
++ dev_err(hdev->dev, "Device heartbeat failed! PCI link is %s\n",
++ is_pci_link_healthy(hdev) ? "healthy" : "broken");
+
+ hl_device_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT);
+
+diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
+index 58c95b13be69a..257b94cec6248 100644
+--- a/drivers/misc/habanalabs/common/habanalabs.h
++++ b/drivers/misc/habanalabs/common/habanalabs.h
+@@ -34,6 +34,8 @@
+ struct hl_device;
+ struct hl_fpriv;
+
++#define PCI_VENDOR_ID_HABANALABS 0x1da3
++
+ /* Use upper bits of mmap offset to store habana driver specific information.
+ * bits[63:59] - Encode mmap type
+ * bits[45:0] - mmap offset value
+diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
+index 112632afe7d53..ae3cab3f4aa55 100644
+--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
++++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
+@@ -54,8 +54,6 @@ module_param(boot_error_status_mask, ulong, 0444);
+ MODULE_PARM_DESC(boot_error_status_mask,
+ "Mask of the error status during device CPU boot (If bitX is cleared then error X is masked. Default all 1's)");
+
+-#define PCI_VENDOR_ID_HABANALABS 0x1da3
+-
+ #define PCI_IDS_GOYA 0x0001
+ #define PCI_IDS_GAUDI 0x1000
+ #define PCI_IDS_GAUDI_SEC 0x1010
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 498333b769fdb..cdd7f126d4aea 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -2097,14 +2097,14 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq,
+ mmc_blk_urgent_bkops(mq, mqrq);
+ }
+
+-static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
++static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type)
+ {
+ unsigned long flags;
+ bool put_card;
+
+ spin_lock_irqsave(&mq->lock, flags);
+
+- mq->in_flight[mmc_issue_type(mq, req)] -= 1;
++ mq->in_flight[issue_type] -= 1;
+
+ put_card = (mmc_tot_in_flight(mq) == 0);
+
+@@ -2117,6 +2117,7 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
+ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
+ bool can_sleep)
+ {
++ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
+ struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct mmc_host *host = mq->card->host;
+@@ -2136,7 +2137,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
+ blk_mq_complete_request(req);
+ }
+
+- mmc_blk_mq_dec_in_flight(mq, req);
++ mmc_blk_mq_dec_in_flight(mq, issue_type);
+ }
+
+ void mmc_blk_mq_recovery(struct mmc_queue *mq)
+diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c
+index 6c4f43e112826..7ede74bf37230 100644
+--- a/drivers/mmc/host/sdhci_f_sdh30.c
++++ b/drivers/mmc/host/sdhci_f_sdh30.c
+@@ -26,9 +26,16 @@ struct f_sdhost_priv {
+ bool enable_cmd_dat_delay;
+ };
+
++static void *sdhci_f_sdhost_priv(struct sdhci_host *host)
++{
++ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
++
++ return sdhci_pltfm_priv(pltfm_host);
++}
++
+ static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host)
+ {
+- struct f_sdhost_priv *priv = sdhci_priv(host);
++ struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
+ u32 ctrl = 0;
+
+ usleep_range(2500, 3000);
+@@ -61,7 +68,7 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host)
+
+ static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask)
+ {
+- struct f_sdhost_priv *priv = sdhci_priv(host);
++ struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
+ u32 ctl;
+
+ if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0)
+@@ -85,30 +92,32 @@ static const struct sdhci_ops sdhci_f_sdh30_ops = {
+ .set_uhs_signaling = sdhci_set_uhs_signaling,
+ };
+
++static const struct sdhci_pltfm_data sdhci_f_sdh30_pltfm_data = {
++ .ops = &sdhci_f_sdh30_ops,
++ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC
++ | SDHCI_QUIRK_INVERTED_WRITE_PROTECT,
++ .quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE
++ | SDHCI_QUIRK2_TUNING_WORK_AROUND,
++};
++
+ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
+ {
+ struct sdhci_host *host;
+ struct device *dev = &pdev->dev;
+- int irq, ctrl = 0, ret = 0;
++ int ctrl = 0, ret = 0;
+ struct f_sdhost_priv *priv;
++ struct sdhci_pltfm_host *pltfm_host;
+ u32 reg = 0;
+
+- irq = platform_get_irq(pdev, 0);
+- if (irq < 0)
+- return irq;
+-
+- host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv));
++ host = sdhci_pltfm_init(pdev, &sdhci_f_sdh30_pltfm_data,
++ sizeof(struct f_sdhost_priv));
+ if (IS_ERR(host))
+ return PTR_ERR(host);
+
+- priv = sdhci_priv(host);
++ pltfm_host = sdhci_priv(host);
++ priv = sdhci_pltfm_priv(pltfm_host);
+ priv->dev = dev;
+
+- host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
+- SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
+- host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE |
+- SDHCI_QUIRK2_TUNING_WORK_AROUND;
+-
+ priv->enable_cmd_dat_delay = device_property_read_bool(dev,
+ "fujitsu,cmd-dat-delay-select");
+
+@@ -116,18 +125,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev)
+ if (ret)
+ goto err;
+
+- platform_set_drvdata(pdev, host);
+-
+- host->hw_name = "f_sdh30";
+- host->ops = &sdhci_f_sdh30_ops;
+- host->irq = irq;
+-
+- host->ioaddr = devm_platform_ioremap_resource(pdev, 0);
+- if (IS_ERR(host->ioaddr)) {
+- ret = PTR_ERR(host->ioaddr);
+- goto err;
+- }
+-
+ if (dev_of_node(dev)) {
+ sdhci_get_of_property(pdev);
+
+@@ -182,23 +179,22 @@ err_add_host:
+ err_clk:
+ clk_disable_unprepare(priv->clk_iface);
+ err:
+- sdhci_free_host(host);
++ sdhci_pltfm_free(pdev);
++
+ return ret;
+ }
+
+ static int sdhci_f_sdh30_remove(struct platform_device *pdev)
+ {
+ struct sdhci_host *host = platform_get_drvdata(pdev);
+- struct f_sdhost_priv *priv = sdhci_priv(host);
+-
+- sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) ==
+- 0xffffffff);
++ struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host);
++ struct clk *clk_iface = priv->clk_iface;
++ struct clk *clk = priv->clk;
+
+- clk_disable_unprepare(priv->clk_iface);
+- clk_disable_unprepare(priv->clk);
++ sdhci_pltfm_unregister(pdev);
+
+- sdhci_free_host(host);
+- platform_set_drvdata(pdev, NULL);
++ clk_disable_unprepare(clk_iface);
++ clk_disable_unprepare(clk);
+
+ return 0;
+ }
+diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c
+index 7c7ec8d10232b..b5b1a42ca25e1 100644
+--- a/drivers/mmc/host/wbsd.c
++++ b/drivers/mmc/host/wbsd.c
+@@ -1705,8 +1705,6 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma,
+
+ wbsd_release_resources(host);
+ wbsd_free_mmc(dev);
+-
+- mmc_free_host(mmc);
+ return ret;
+ }
+
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index b69bd44ada1f2..a73008b9e0b3c 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -3006,6 +3006,14 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
+
+ /* If there is a GPIO connected to the reset pin, toggle it */
+ if (gpiod) {
++ /* If the switch has just been reset and not yet completed
++ * loading EEPROM, the reset may interrupt the I2C transaction
++ * mid-byte, causing the first EEPROM read after the reset
++ * from the wrong location resulting in the switch booting
++ * to wrong mode and inoperable.
++ */
++ mv88e6xxx_g1_wait_eeprom_done(chip);
++
+ gpiod_set_value_cansleep(gpiod, 1);
+ usleep_range(10000, 20000);
+ gpiod_set_value_cansleep(gpiod, 0);
+diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
+index abd6cc0cd641f..5fb991835078a 100644
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -5070,6 +5070,9 @@ static int __maybe_unused macb_suspend(struct device *dev)
+ unsigned int q;
+ int err;
+
++ if (!device_may_wakeup(&bp->dev->dev))
++ phy_exit(bp->sgmii_phy);
++
+ if (!netif_running(netdev))
+ return 0;
+
+@@ -5130,7 +5133,6 @@ static int __maybe_unused macb_suspend(struct device *dev)
+ if (!(bp->wol & MACB_WOL_ENABLED)) {
+ rtnl_lock();
+ phylink_stop(bp->phylink);
+- phy_exit(bp->sgmii_phy);
+ rtnl_unlock();
+ spin_lock_irqsave(&bp->lock, flags);
+ macb_reset_hw(bp);
+@@ -5160,6 +5162,9 @@ static int __maybe_unused macb_resume(struct device *dev)
+ unsigned int q;
+ int err;
+
++ if (!device_may_wakeup(&bp->dev->dev))
++ phy_init(bp->sgmii_phy);
++
+ if (!netif_running(netdev))
+ return 0;
+
+@@ -5220,8 +5225,6 @@ static int __maybe_unused macb_resume(struct device *dev)
+ macb_set_rx_mode(netdev);
+ macb_restore_features(bp);
+ rtnl_lock();
+- if (!device_may_wakeup(&bp->dev->dev))
+- phy_init(bp->sgmii_phy);
+
+ phylink_start(bp->phylink);
+ rtnl_unlock();
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+index 17e3f26eee4a4..779ba907009a5 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+@@ -210,11 +210,11 @@ read_nvm_exit:
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+- * @words: number of words to write
+- * @data: buffer with words to write to the Shadow RAM
++ * @words: number of words to read
++ * @data: buffer with words to read to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+- * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
++ * Reads a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+ static int i40e_read_nvm_aq(struct i40e_hw *hw,
+ u8 module_pointer, u32 offset,
+@@ -234,18 +234,18 @@ static int i40e_read_nvm_aq(struct i40e_hw *hw,
+ */
+ if ((offset + words) > hw->nvm.sr_size)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+- "NVM write error: offset %d beyond Shadow RAM limit %d\n",
++ "NVM read error: offset %d beyond Shadow RAM limit %d\n",
+ (offset + words), hw->nvm.sr_size);
+ else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+- /* We can write only up to 4KB (one sector), in one AQ write */
++ /* We can read only up to 4KB (one sector), in one AQ write */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+- "NVM write fail error: tried to write %d words, limit is %d.\n",
++ "NVM read fail error: tried to read %d words, limit is %d.\n",
+ words, I40E_SR_SECTOR_SIZE_IN_WORDS);
+ else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+ != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+- /* A single write cannot spread over two sectors */
++ /* A single read cannot spread over two sectors */
+ i40e_debug(hw, I40E_DEBUG_NVM,
+- "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n",
++ "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n",
+ offset, words);
+ else
+ ret_code = i40e_aq_read_nvm(hw, module_pointer,
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+index f544d2b0abdbd..fe912b1c468ef 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+@@ -1289,6 +1289,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+ fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos;
++ fltr->ip_ver = 4;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+@@ -1300,6 +1301,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst;
+ fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi;
+ fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos;
++ fltr->ip_ver = 4;
+ break;
+ case IPV4_USER_FLOW:
+ fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src;
+@@ -1312,6 +1314,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes;
+ fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos;
+ fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto;
++ fltr->ip_ver = 4;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+@@ -1330,6 +1333,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
+ fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
+ fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass;
++ fltr->ip_ver = 6;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+@@ -1345,6 +1349,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ sizeof(struct in6_addr));
+ fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi;
+ fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass;
++ fltr->ip_ver = 6;
+ break;
+ case IPV6_USER_FLOW:
+ memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+@@ -1361,6 +1366,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes;
+ fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass;
+ fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto;
++ fltr->ip_ver = 6;
+ break;
+ case ETHER_FLOW:
+ fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto;
+@@ -1371,6 +1377,10 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe
+ return -EINVAL;
+ }
+
++ err = iavf_validate_fdir_fltr_masks(adapter, fltr);
++ if (err)
++ return err;
++
+ if (iavf_fdir_is_dup_fltr(adapter, fltr))
+ return -EEXIST;
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+index 505e82ebafe47..03e774bd2a5b4 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+@@ -18,6 +18,79 @@ static const struct in6_addr ipv6_addr_full_mask = {
+ }
+ };
+
++static const struct in6_addr ipv6_addr_zero_mask = {
++ .in6_u = {
++ .u6_addr8 = {
++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++ }
++ }
++};
++
++/**
++ * iavf_validate_fdir_fltr_masks - validate Flow Director filter fields masks
++ * @adapter: pointer to the VF adapter structure
++ * @fltr: Flow Director filter data structure
++ *
++ * Returns 0 if all masks of packet fields are either full or empty. Returns
++ * error on at least one partial mask.
++ */
++int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
++ struct iavf_fdir_fltr *fltr)
++{
++ if (fltr->eth_mask.etype && fltr->eth_mask.etype != htons(U16_MAX))
++ goto partial_mask;
++
++ if (fltr->ip_ver == 4) {
++ if (fltr->ip_mask.v4_addrs.src_ip &&
++ fltr->ip_mask.v4_addrs.src_ip != htonl(U32_MAX))
++ goto partial_mask;
++
++ if (fltr->ip_mask.v4_addrs.dst_ip &&
++ fltr->ip_mask.v4_addrs.dst_ip != htonl(U32_MAX))
++ goto partial_mask;
++
++ if (fltr->ip_mask.tos && fltr->ip_mask.tos != U8_MAX)
++ goto partial_mask;
++ } else if (fltr->ip_ver == 6) {
++ if (memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_zero_mask,
++ sizeof(struct in6_addr)) &&
++ memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask,
++ sizeof(struct in6_addr)))
++ goto partial_mask;
++
++ if (memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_zero_mask,
++ sizeof(struct in6_addr)) &&
++ memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask,
++ sizeof(struct in6_addr)))
++ goto partial_mask;
++
++ if (fltr->ip_mask.tclass && fltr->ip_mask.tclass != U8_MAX)
++ goto partial_mask;
++ }
++
++ if (fltr->ip_mask.proto && fltr->ip_mask.proto != U8_MAX)
++ goto partial_mask;
++
++ if (fltr->ip_mask.src_port && fltr->ip_mask.src_port != htons(U16_MAX))
++ goto partial_mask;
++
++ if (fltr->ip_mask.dst_port && fltr->ip_mask.dst_port != htons(U16_MAX))
++ goto partial_mask;
++
++ if (fltr->ip_mask.spi && fltr->ip_mask.spi != htonl(U32_MAX))
++ goto partial_mask;
++
++ if (fltr->ip_mask.l4_header &&
++ fltr->ip_mask.l4_header != htonl(U32_MAX))
++ goto partial_mask;
++
++ return 0;
++
++partial_mask:
++ dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, partial masks are not supported\n");
++ return -EOPNOTSUPP;
++}
++
+ /**
+ * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload
+ * @fltr: Flow Director filter data structure
+@@ -263,8 +336,6 @@ iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr,
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
+ }
+
+- fltr->ip_ver = 4;
+-
+ return 0;
+ }
+
+@@ -309,8 +380,6 @@ iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr,
+ VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
+ }
+
+- fltr->ip_ver = 6;
+-
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+index 33c55c366315b..9eb9f73f6adf3 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h
++++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h
+@@ -110,6 +110,8 @@ struct iavf_fdir_fltr {
+ struct virtchnl_fdir_add vc_add_msg;
+ };
+
++int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter,
++ struct iavf_fdir_fltr *fltr);
+ int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+ void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr);
+diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+index f9f15acae90a0..2ffe5708a045b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
++++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
+@@ -562,6 +562,12 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ break;
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ {
++ if (ice_is_adq_active(pf)) {
++ dev_err(ice_pf_to_dev(pf), "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
++ NL_SET_ERR_MSG_MOD(extack, "Couldn't change eswitch mode to switchdev - ADQ is active. Delete ADQ configs and try again, e.g. tc qdisc del dev $PF root");
++ return -EOPNOTSUPP;
++ }
++
+ dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
+ pf->hw.pf_id);
+ NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index a771e597795d3..7a00d297be3a9 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -8787,6 +8787,11 @@ ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ ice_setup_tc_block_cb,
+ np, np, true);
+ case TC_SETUP_QDISC_MQPRIO:
++ if (ice_is_eswitch_mode_switchdev(pf)) {
++ netdev_err(netdev, "TC MQPRIO offload not supported, switchdev is enabled\n");
++ return -EOPNOTSUPP;
++ }
++
+ if (pf->adev) {
+ mutex_lock(&pf->adev_mutex);
+ device_lock(&pf->adev->dev);
+diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h
+index ce530f5fd7bda..52849f5e8048d 100644
+--- a/drivers/net/ethernet/intel/igc/igc_base.h
++++ b/drivers/net/ethernet/intel/igc/igc_base.h
+@@ -85,8 +85,13 @@ union igc_adv_rx_desc {
+ #define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */
+
+ /* SRRCTL bit definitions */
+-#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */
+-#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */
+-#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
++#define IGC_SRRCTL_BSIZEPKT_MASK GENMASK(6, 0)
++#define IGC_SRRCTL_BSIZEPKT(x) FIELD_PREP(IGC_SRRCTL_BSIZEPKT_MASK, \
++ (x) / 1024) /* in 1 KB resolution */
++#define IGC_SRRCTL_BSIZEHDR_MASK GENMASK(13, 8)
++#define IGC_SRRCTL_BSIZEHDR(x) FIELD_PREP(IGC_SRRCTL_BSIZEHDR_MASK, \
++ (x) / 64) /* in 64 bytes resolution */
++#define IGC_SRRCTL_DESCTYPE_MASK GENMASK(27, 25)
++#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF FIELD_PREP(IGC_SRRCTL_DESCTYPE_MASK, 1)
+
+ #endif /* _IGC_BASE_H */
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index d877dc0f87f71..2f3947cf513bd 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -675,8 +675,11 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter,
+ else
+ buf_size = IGC_RXBUFFER_2048;
+
+- srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT;
+- srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT;
++ srrctl = rd32(IGC_SRRCTL(reg_idx));
++ srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK |
++ IGC_SRRCTL_DESCTYPE_MASK);
++ srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN);
++ srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size);
+ srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ wr32(IGC_SRRCTL(reg_idx), srrctl);
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 8979dd05e873f..d4ec46d1c8cfb 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -1121,12 +1121,12 @@ static void octep_remove(struct pci_dev *pdev)
+ if (!oct)
+ return;
+
+- cancel_work_sync(&oct->tx_timeout_task);
+ cancel_work_sync(&oct->ctrl_mbox_task);
+ netdev = oct->netdev;
+ if (netdev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(netdev);
+
++ cancel_work_sync(&oct->tx_timeout_task);
+ octep_device_cleanup(oct);
+ pci_release_mem_regions(pdev);
+ free_netdev(netdev);
+diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
+index e8d427c7d1cff..dc43e74147fbf 100644
+--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
++++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
+@@ -177,6 +177,15 @@ static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
+ }
+ #endif
+
++static int __maybe_unused qede_suspend(struct device *dev)
++{
++ dev_info(dev, "Device does not support suspend operation\n");
++
++ return -EOPNOTSUPP;
++}
++
++static DEFINE_SIMPLE_DEV_PM_OPS(qede_pm_ops, qede_suspend, NULL);
++
+ static const struct pci_error_handlers qede_err_handler = {
+ .error_detected = qede_io_error_detected,
+ };
+@@ -191,6 +200,7 @@ static struct pci_driver qede_pci_driver = {
+ .sriov_configure = qede_sriov_configure,
+ #endif
+ .err_handler = &qede_err_handler,
++ .driver.pm = &qede_pm_ops,
+ };
+
+ static struct qed_eth_cb_ops qede_ll_ops = {
+diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
+index 3478860d40232..d312147cd2dd7 100644
+--- a/drivers/net/ethernet/sfc/tc.c
++++ b/drivers/net/ethernet/sfc/tc.c
+@@ -603,10 +603,10 @@ int efx_init_tc(struct efx_nic *efx)
+ rc = efx_tc_configure_rep_mport(efx);
+ if (rc)
+ return rc;
+- efx->tc->up = true;
+ rc = flow_indr_dev_register(efx_tc_indr_setup_cb, efx);
+ if (rc)
+ return rc;
++ efx->tc->up = true;
+ return 0;
+ }
+
+diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
+index c1424119e8212..847ab37f13671 100644
+--- a/drivers/net/pcs/pcs-rzn1-miic.c
++++ b/drivers/net/pcs/pcs-rzn1-miic.c
+@@ -317,15 +317,21 @@ struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
+
+ pdev = of_find_device_by_node(pcs_np);
+ of_node_put(pcs_np);
+- if (!pdev || !platform_get_drvdata(pdev))
++ if (!pdev || !platform_get_drvdata(pdev)) {
++ if (pdev)
++ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
++ }
+
+ miic_port = kzalloc(sizeof(*miic_port), GFP_KERNEL);
+- if (!miic_port)
++ if (!miic_port) {
++ put_device(&pdev->dev);
+ return ERR_PTR(-ENOMEM);
++ }
+
+ miic = platform_get_drvdata(pdev);
+ device_link_add(dev, miic->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
++ put_device(&pdev->dev);
+
+ miic_port->miic = miic;
+ miic_port->port = port - 1;
+diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
+index 61824a463df85..edd4b1e58d965 100644
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -305,7 +305,6 @@ struct at803x_priv {
+ bool is_1000basex;
+ struct regulator_dev *vddio_rdev;
+ struct regulator_dev *vddh_rdev;
+- struct regulator *vddio;
+ u64 stats[ARRAY_SIZE(at803x_hw_stats)];
+ };
+
+@@ -461,21 +460,27 @@ static int at803x_set_wol(struct phy_device *phydev,
+ phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
+ mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
+
+- /* Enable WOL function */
+- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL,
+- 0, AT803X_WOL_EN);
+- if (ret)
+- return ret;
++ /* Enable WOL function for 1588 */
++ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
++ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++ AT803X_PHY_MMD3_WOL_CTRL,
++ 0, AT803X_WOL_EN);
++ if (ret)
++ return ret;
++ }
+ /* Enable WOL interrupt */
+ ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
+ if (ret)
+ return ret;
+ } else {
+- /* Disable WoL function */
+- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL,
+- AT803X_WOL_EN, 0);
+- if (ret)
+- return ret;
++ /* Disable WoL function for 1588 */
++ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
++ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++ AT803X_PHY_MMD3_WOL_CTRL,
++ AT803X_WOL_EN, 0);
++ if (ret)
++ return ret;
++ }
+ /* Disable WOL interrupt */
+ ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
+ if (ret)
+@@ -510,11 +515,11 @@ static void at803x_get_wol(struct phy_device *phydev,
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+
+- value = phy_read_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL);
++ value = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (value < 0)
+ return;
+
+- if (value & AT803X_WOL_EN)
++ if (value & AT803X_INTR_ENABLE_WOL)
+ wol->wolopts |= WAKE_MAGIC;
+ }
+
+@@ -825,11 +830,11 @@ static int at803x_parse_dt(struct phy_device *phydev)
+ if (ret < 0)
+ return ret;
+
+- priv->vddio = devm_regulator_get_optional(&phydev->mdio.dev,
+- "vddio");
+- if (IS_ERR(priv->vddio)) {
++ ret = devm_regulator_get_enable_optional(&phydev->mdio.dev,
++ "vddio");
++ if (ret) {
+ phydev_err(phydev, "failed to get VDDIO regulator\n");
+- return PTR_ERR(priv->vddio);
++ return ret;
+ }
+
+ /* Only AR8031/8033 support 1000Base-X for SFP modules */
+@@ -857,23 +862,12 @@ static int at803x_probe(struct phy_device *phydev)
+ if (ret)
+ return ret;
+
+- if (priv->vddio) {
+- ret = regulator_enable(priv->vddio);
+- if (ret < 0)
+- return ret;
+- }
+-
+ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
+ int ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
+ int mode_cfg;
+- struct ethtool_wolinfo wol = {
+- .wolopts = 0,
+- };
+
+- if (ccr < 0) {
+- ret = ccr;
+- goto err;
+- }
++ if (ccr < 0)
++ return ccr;
+ mode_cfg = ccr & AT803X_MODE_CFG_MASK;
+
+ switch (mode_cfg) {
+@@ -887,29 +881,17 @@ static int at803x_probe(struct phy_device *phydev)
+ break;
+ }
+
+- /* Disable WOL by default */
+- ret = at803x_set_wol(phydev, &wol);
+- if (ret < 0) {
+- phydev_err(phydev, "failed to disable WOL on probe: %d\n", ret);
+- goto err;
+- }
++ /* Disable WoL in 1588 register which is enabled
++ * by default
++ */
++ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
++ AT803X_PHY_MMD3_WOL_CTRL,
++ AT803X_WOL_EN, 0);
++ if (ret)
++ return ret;
+ }
+
+ return 0;
+-
+-err:
+- if (priv->vddio)
+- regulator_disable(priv->vddio);
+-
+- return ret;
+-}
+-
+-static void at803x_remove(struct phy_device *phydev)
+-{
+- struct at803x_priv *priv = phydev->priv;
+-
+- if (priv->vddio)
+- regulator_disable(priv->vddio);
+ }
+
+ static int at803x_get_features(struct phy_device *phydev)
+@@ -2022,7 +2004,6 @@ static struct phy_driver at803x_driver[] = {
+ .name = "Qualcomm Atheros AR8035",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = at803x_probe,
+- .remove = at803x_remove,
+ .config_aneg = at803x_config_aneg,
+ .config_init = at803x_config_init,
+ .soft_reset = genphy_soft_reset,
+@@ -2044,7 +2025,6 @@ static struct phy_driver at803x_driver[] = {
+ .name = "Qualcomm Atheros AR8030",
+ .phy_id_mask = AT8030_PHY_ID_MASK,
+ .probe = at803x_probe,
+- .remove = at803x_remove,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+ .set_wol = at803x_set_wol,
+@@ -2060,7 +2040,6 @@ static struct phy_driver at803x_driver[] = {
+ .name = "Qualcomm Atheros AR8031/AR8033",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = at803x_probe,
+- .remove = at803x_remove,
+ .config_init = at803x_config_init,
+ .config_aneg = at803x_config_aneg,
+ .soft_reset = genphy_soft_reset,
+@@ -2083,7 +2062,6 @@ static struct phy_driver at803x_driver[] = {
+ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID),
+ .name = "Qualcomm Atheros AR8032",
+ .probe = at803x_probe,
+- .remove = at803x_remove,
+ .flags = PHY_POLL_CABLE_TEST,
+ .config_init = at803x_config_init,
+ .link_change_notify = at803x_link_change_notify,
+@@ -2099,7 +2077,6 @@ static struct phy_driver at803x_driver[] = {
+ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID),
+ .name = "Qualcomm Atheros AR9331 built-in PHY",
+ .probe = at803x_probe,
+- .remove = at803x_remove,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+@@ -2116,7 +2093,6 @@ static struct phy_driver at803x_driver[] = {
+ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
+ .name = "Qualcomm Atheros QCA9561 built-in PHY",
+ .probe = at803x_probe,
+- .remove = at803x_remove,
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+@@ -2182,7 +2158,6 @@ static struct phy_driver at803x_driver[] = {
+ .name = "Qualcomm QCA8081",
+ .flags = PHY_POLL_CABLE_TEST,
+ .probe = at803x_probe,
+- .remove = at803x_remove,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
+index ad71c88c87e78..f9ad8902100f3 100644
+--- a/drivers/net/phy/broadcom.c
++++ b/drivers/net/phy/broadcom.c
+@@ -486,6 +486,17 @@ static int bcm54xx_resume(struct phy_device *phydev)
+ return bcm54xx_config_init(phydev);
+ }
+
++static int bcm54810_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
++{
++ return -EOPNOTSUPP;
++}
++
++static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
++ u16 val)
++{
++ return -EOPNOTSUPP;
++}
++
+ static int bcm54811_config_init(struct phy_device *phydev)
+ {
+ int err, reg;
+@@ -981,6 +992,8 @@ static struct phy_driver broadcom_drivers[] = {
+ .get_strings = bcm_phy_get_strings,
+ .get_stats = bcm54xx_get_stats,
+ .probe = bcm54xx_phy_probe,
++ .read_mmd = bcm54810_read_mmd,
++ .write_mmd = bcm54810_write_mmd,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = bcm5481_config_aneg,
+ .config_intr = bcm_phy_config_intr,
+diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
+index 82f74f96eba29..944f76e6fc8eb 100644
+--- a/drivers/net/phy/phy_device.c
++++ b/drivers/net/phy/phy_device.c
+@@ -3050,6 +3050,8 @@ static int phy_probe(struct device *dev)
+ goto out;
+ }
+
++ phy_disable_interrupts(phydev);
++
+ /* Start out supporting everything. Eventually,
+ * a controller will attach, and may modify one
+ * or both of these values
+@@ -3137,16 +3139,6 @@ static int phy_remove(struct device *dev)
+ return 0;
+ }
+
+-static void phy_shutdown(struct device *dev)
+-{
+- struct phy_device *phydev = to_phy_device(dev);
+-
+- if (phydev->state == PHY_READY || !phydev->attached_dev)
+- return;
+-
+- phy_disable_interrupts(phydev);
+-}
+-
+ /**
+ * phy_driver_register - register a phy_driver with the PHY layer
+ * @new_driver: new phy_driver to register
+@@ -3180,7 +3172,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
+ new_driver->mdiodrv.driver.bus = &mdio_bus_type;
+ new_driver->mdiodrv.driver.probe = phy_probe;
+ new_driver->mdiodrv.driver.remove = phy_remove;
+- new_driver->mdiodrv.driver.shutdown = phy_shutdown;
+ new_driver->mdiodrv.driver.owner = owner;
+ new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS;
+
+diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
+index 509ba706781ed..921ca59822b0f 100644
+--- a/drivers/net/team/team.c
++++ b/drivers/net/team/team.c
+@@ -2200,7 +2200,9 @@ static void team_setup(struct net_device *dev)
+
+ dev->hw_features = TEAM_VLAN_FEATURES |
+ NETIF_F_HW_VLAN_CTAG_RX |
+- NETIF_F_HW_VLAN_CTAG_FILTER;
++ NETIF_F_HW_VLAN_CTAG_FILTER |
++ NETIF_F_HW_VLAN_STAG_RX |
++ NETIF_F_HW_VLAN_STAG_FILTER;
+
+ dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
+ dev->features |= dev->hw_features;
+diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
+index 075d5d42f5eb6..21d3461fb5d1c 100644
+--- a/drivers/net/virtio_net.c
++++ b/drivers/net/virtio_net.c
+@@ -2504,7 +2504,7 @@ static void virtnet_init_default_rss(struct virtnet_info *vi)
+ vi->ctrl->rss.indirection_table[i] = indir_val;
+ }
+
+- vi->ctrl->rss.max_tx_vq = vi->curr_queue_pairs;
++ vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
+ vi->ctrl->rss.hash_key_length = vi->rss_key_size;
+
+ netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
+@@ -3825,6 +3825,8 @@ static int virtnet_probe(struct virtio_device *vdev)
+ eth_hw_addr_set(dev, addr);
+ } else {
+ eth_hw_addr_random(dev);
++ dev_info(&vdev->dev, "Assigned random MAC address %pM\n",
++ dev->dev_addr);
+ }
+
+ /* Set up our device-specific information */
+@@ -3940,8 +3942,6 @@ static int virtnet_probe(struct virtio_device *vdev)
+ if (vi->has_rss || vi->has_rss_hash_report)
+ virtnet_init_default_rss(vi);
+
+- _virtnet_set_queues(vi, vi->curr_queue_pairs);
+-
+ /* serialize netdev register + virtio_device_ready() with ndo_open() */
+ rtnl_lock();
+
+@@ -3954,6 +3954,26 @@ static int virtnet_probe(struct virtio_device *vdev)
+
+ virtio_device_ready(vdev);
+
++ _virtnet_set_queues(vi, vi->curr_queue_pairs);
++
++ /* a random MAC address has been assigned, notify the device.
++ * We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
++ * because many devices work fine without getting MAC explicitly
++ */
++ if (!virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
++ virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
++ struct scatterlist sg;
++
++ sg_init_one(&sg, dev->dev_addr, dev->addr_len);
++ if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
++ VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
++ pr_debug("virtio_net: setting MAC address failed\n");
++ rtnl_unlock();
++ err = -EINVAL;
++ goto free_unregister_netdev;
++ }
++ }
++
+ rtnl_unlock();
+
+ err = virtnet_cpu_notif_add(vi);
+diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
+index 1b6b437823d22..528e73ccfa43e 100644
+--- a/drivers/pci/controller/dwc/pcie-tegra194.c
++++ b/drivers/pci/controller/dwc/pcie-tegra194.c
+@@ -224,6 +224,7 @@
+ #define EP_STATE_ENABLED 1
+
+ static const unsigned int pcie_gen_freq[] = {
++ GEN1_CORE_CLK_FREQ, /* PCI_EXP_LNKSTA_CLS == 0; undefined */
+ GEN1_CORE_CLK_FREQ,
+ GEN2_CORE_CLK_FREQ,
+ GEN3_CORE_CLK_FREQ,
+@@ -455,7 +456,11 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
+
+ speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
+ PCI_EXP_LNKSTA_CLS;
+- clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
++
++ if (speed >= ARRAY_SIZE(pcie_gen_freq))
++ speed = 0;
++
++ clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]);
+
+ if (pcie->of_data->has_ltr_req_fix)
+ return IRQ_HANDLED;
+@@ -1016,7 +1021,11 @@ retry_link:
+
+ speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
+ PCI_EXP_LNKSTA_CLS;
+- clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
++
++ if (speed >= ARRAY_SIZE(pcie_gen_freq))
++ speed = 0;
++
++ clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]);
+
+ tegra_pcie_enable_interrupts(pp);
+
+diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
+index ad1141fddb4cc..8bda75990bce5 100644
+--- a/drivers/pcmcia/rsrc_nonstatic.c
++++ b/drivers/pcmcia/rsrc_nonstatic.c
+@@ -1053,6 +1053,8 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s)
+ q = p->next;
+ kfree(p);
+ }
++
++ kfree(data);
+ }
+
+
+diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c
+index 1ca140356a084..3f759121dc00a 100644
+--- a/drivers/soc/aspeed/aspeed-socinfo.c
++++ b/drivers/soc/aspeed/aspeed-socinfo.c
+@@ -137,6 +137,7 @@ static int __init aspeed_socinfo_init(void)
+
+ soc_dev = soc_device_register(attrs);
+ if (IS_ERR(soc_dev)) {
++ kfree(attrs->machine);
+ kfree(attrs->soc_id);
+ kfree(attrs->serial_number);
+ kfree(attrs);
+diff --git a/drivers/soc/aspeed/aspeed-uart-routing.c b/drivers/soc/aspeed/aspeed-uart-routing.c
+index ef8b24fd18518..59123e1f27acb 100644
+--- a/drivers/soc/aspeed/aspeed-uart-routing.c
++++ b/drivers/soc/aspeed/aspeed-uart-routing.c
+@@ -524,7 +524,7 @@ static ssize_t aspeed_uart_routing_store(struct device *dev,
+ struct aspeed_uart_routing_selector *sel = to_routing_selector(attr);
+ int val;
+
+- val = match_string(sel->options, -1, buf);
++ val = __sysfs_match_string(sel->options, -1, buf);
+ if (val < 0) {
+ dev_err(dev, "invalid value \"%s\"\n", buf);
+ return -EINVAL;
+diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
+index 4a6a3802d7e51..288aaa05d0071 100644
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -1479,6 +1479,8 @@ static struct pci_device_id nhi_ids[] = {
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
+
+ /* Any USB4 compliant host */
+ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) },
+diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
+index b0718020c6f59..0f029ce758825 100644
+--- a/drivers/thunderbolt/nhi.h
++++ b/drivers/thunderbolt/nhi.h
+@@ -75,6 +75,10 @@ extern const struct tb_nhi_ops icl_nhi_ops;
+ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef
+ #define PCI_DEVICE_ID_INTEL_ADL_NHI0 0x463e
+ #define PCI_DEVICE_ID_INTEL_ADL_NHI1 0x466d
++#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI 0x5781
++#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI 0x5784
++#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE 0x5786
++#define PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE 0x57a4
+ #define PCI_DEVICE_ID_INTEL_MTL_M_NHI0 0x7eb2
+ #define PCI_DEVICE_ID_INTEL_MTL_P_NHI0 0x7ec2
+ #define PCI_DEVICE_ID_INTEL_MTL_P_NHI1 0x7ec3
+diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
+index 1157b8869bcca..8c2ee431fcde8 100644
+--- a/drivers/thunderbolt/quirks.c
++++ b/drivers/thunderbolt/quirks.c
+@@ -74,6 +74,14 @@ static const struct tb_quirk tb_quirks[] = {
+ quirk_usb3_maximum_bandwidth },
+ { 0x8087, PCI_DEVICE_ID_INTEL_MTL_P_NHI1, 0x0000, 0x0000,
+ quirk_usb3_maximum_bandwidth },
++ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
++ quirk_usb3_maximum_bandwidth },
++ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
++ quirk_usb3_maximum_bandwidth },
++ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_80G_BRIDGE, 0x0000, 0x0000,
++ quirk_usb3_maximum_bandwidth },
++ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
++ quirk_usb3_maximum_bandwidth },
+ /*
+ * CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
+ */
+diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
+index 9cc28197dbc45..edbd92435b41a 100644
+--- a/drivers/thunderbolt/retimer.c
++++ b/drivers/thunderbolt/retimer.c
+@@ -187,6 +187,21 @@ static ssize_t nvm_authenticate_show(struct device *dev,
+ return ret;
+ }
+
++static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status)
++{
++ int i;
++
++ tb_port_dbg(port, "reading NVM authentication status of retimers\n");
++
++ /*
++ * Before doing anything else, read the authentication status.
++ * If the retimer has it set, store it for the new retimer
++ * device instance.
++ */
++ for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
++ usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
++}
++
+ static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
+ {
+ int i;
+@@ -455,18 +470,16 @@ int tb_retimer_scan(struct tb_port *port, bool add)
+ return ret;
+
+ /*
+- * Enable sideband channel for each retimer. We can do this
+- * regardless whether there is device connected or not.
++ * Immediately after sending enumerate retimers read the
++ * authentication status of each retimer.
+ */
+- tb_retimer_set_inbound_sbtx(port);
++ tb_retimer_nvm_authenticate_status(port, status);
+
+ /*
+- * Before doing anything else, read the authentication status.
+- * If the retimer has it set, store it for the new retimer
+- * device instance.
++ * Enable sideband channel for each retimer. We can do this
++ * regardless whether there is device connected or not.
+ */
+- for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
+- usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
++ tb_retimer_set_inbound_sbtx(port);
+
+ for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
+ /*
+diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
+index 59a559366b614..c1fa20a4e3420 100644
+--- a/drivers/tty/n_gsm.c
++++ b/drivers/tty/n_gsm.c
+@@ -2481,12 +2481,13 @@ static void gsm_error(struct gsm_mux *gsm)
+ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc)
+ {
+ int i;
+- struct gsm_dlci *dlci = gsm->dlci[0];
++ struct gsm_dlci *dlci;
+ struct gsm_msg *txq, *ntxq;
+
+ gsm->dead = true;
+ mutex_lock(&gsm->mutex);
+
++ dlci = gsm->dlci[0];
+ if (dlci) {
+ if (disc && dlci->state != DLCI_CLOSED) {
+ gsm_dlci_begin_close(dlci);
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index acf578aa9930b..38760bd6e0c29 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -3281,6 +3281,7 @@ void serial8250_init_port(struct uart_8250_port *up)
+ struct uart_port *port = &up->port;
+
+ spin_lock_init(&port->lock);
++ port->pm = NULL;
+ port->ops = &serial8250_pops;
+ port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
+
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index f6d0ea2c6be4b..c5a9b89c4d313 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -1125,8 +1125,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport)
+ unsigned long sr = lpuart32_read(&sport->port, UARTSTAT);
+
+ if (sr & (UARTSTAT_PE | UARTSTAT_FE)) {
+- /* Read DR to clear the error flags */
+- lpuart32_read(&sport->port, UARTDATA);
++ /* Clear the error flags */
++ lpuart32_write(&sport->port, sr, UARTSTAT);
+
+ if (sr & UARTSTAT_PE)
+ sport->port.icount.parity++;
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index 28edbaf7bb329..2a9c4058824a8 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -1753,13 +1753,10 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
+ struct uart_port *port = platform_get_drvdata(pdev);
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+- int err;
+ u32 cr3;
+
+ pm_runtime_get_sync(&pdev->dev);
+- err = uart_remove_one_port(&stm32_usart_driver, port);
+- if (err)
+- return(err);
++ uart_remove_one_port(&stm32_usart_driver, port);
+
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index 9ffcecd3058c1..60b4de0a4f76d 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -70,6 +70,10 @@ static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = {
+ CI_HDRC_PMQOS,
+ };
+
++static const struct ci_hdrc_imx_platform_flag imx8ulp_usb_data = {
++ .flags = CI_HDRC_SUPPORTS_RUNTIME_PM,
++};
++
+ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+ { .compatible = "fsl,imx23-usb", .data = &imx23_usb_data},
+ { .compatible = "fsl,imx28-usb", .data = &imx28_usb_data},
+@@ -80,6 +84,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
+ { .compatible = "fsl,imx6ul-usb", .data = &imx6ul_usb_data},
+ { .compatible = "fsl,imx7d-usb", .data = &imx7d_usb_data},
+ { .compatible = "fsl,imx7ulp-usb", .data = &imx7ulp_usb_data},
++ { .compatible = "fsl,imx8ulp-usb", .data = &imx8ulp_usb_data},
+ { /* sentinel */ }
+ };
+ MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
+diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
+index bac0f5458cab9..2318c7906acdb 100644
+--- a/drivers/usb/chipidea/usbmisc_imx.c
++++ b/drivers/usb/chipidea/usbmisc_imx.c
+@@ -135,7 +135,7 @@
+ #define TXVREFTUNE0_MASK (0xf << 20)
+
+ #define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \
+- MX6_BM_ID_WAKEUP)
++ MX6_BM_ID_WAKEUP | MX6SX_BM_DPDM_WAKEUP_EN)
+
+ struct usbmisc_ops {
+ /* It's called once when probe a usb device */
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index ea2c5b6cde8cd..3c51355ccc94d 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -915,8 +915,11 @@ static void __gs_console_push(struct gs_console *cons)
+ }
+
+ req->length = size;
++
++ spin_unlock_irq(&cons->lock);
+ if (usb_ep_queue(ep, req, GFP_ATOMIC))
+ req->length = 0;
++ spin_lock_irq(&cons->lock);
+ }
+
+ static void gs_console_work(struct work_struct *work)
+diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
+index dd1c6b2ca7c6f..e81865978299c 100644
+--- a/drivers/usb/gadget/function/uvc_video.c
++++ b/drivers/usb/gadget/function/uvc_video.c
+@@ -386,6 +386,9 @@ static void uvcg_video_pump(struct work_struct *work)
+ struct uvc_buffer *buf;
+ unsigned long flags;
+ int ret;
++ bool buf_int;
++ /* video->max_payload_size is only set when using bulk transfer */
++ bool is_bulk = video->max_payload_size;
+
+ while (video->ep->enabled) {
+ /*
+@@ -408,20 +411,35 @@ static void uvcg_video_pump(struct work_struct *work)
+ */
+ spin_lock_irqsave(&queue->irqlock, flags);
+ buf = uvcg_queue_head(queue);
+- if (buf == NULL) {
++
++ if (buf != NULL) {
++ video->encode(req, video, buf);
++ /* Always interrupt for the last request of a video buffer */
++ buf_int = buf->state == UVC_BUF_STATE_DONE;
++ } else if (!(queue->flags & UVC_QUEUE_DISCONNECTED) && !is_bulk) {
++ /*
++ * No video buffer available; the queue is still connected and
++ * we're traferring over ISOC. Queue a 0 length request to
++ * prevent missed ISOC transfers.
++ */
++ req->length = 0;
++ buf_int = false;
++ } else {
++ /*
++ * Either queue has been disconnected or no video buffer
++ * available to bulk transfer. Either way, stop processing
++ * further.
++ */
+ spin_unlock_irqrestore(&queue->irqlock, flags);
+ break;
+ }
+
+- video->encode(req, video, buf);
+-
+ /*
+ * With usb3 we have more requests. This will decrease the
+ * interrupt load to a quarter but also catches the corner
+ * cases, which needs to be handled.
+ */
+- if (list_empty(&video->req_free) ||
+- buf->state == UVC_BUF_STATE_DONE ||
++ if (list_empty(&video->req_free) || buf_int ||
+ !(video->req_int_count %
+ DIV_ROUND_UP(video->uvc_num_requests, 4))) {
+ video->req_int_count = 0;
+@@ -441,8 +459,7 @@ static void uvcg_video_pump(struct work_struct *work)
+
+ /* Endpoint now owns the request */
+ req = NULL;
+- if (buf->state != UVC_BUF_STATE_DONE)
+- video->req_int_count++;
++ video->req_int_count++;
+ }
+
+ if (!req)
+@@ -527,4 +544,3 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
+ V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex);
+ return 0;
+ }
+-
+diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+index 25fc4120b618d..b53420e874acb 100644
+--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
++++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+@@ -31,6 +31,7 @@ struct mlx5_vdpa_mr {
+ struct list_head head;
+ unsigned long num_directs;
+ unsigned long num_klms;
++ /* state of dvq mr */
+ bool initialized;
+
+ /* serialize mkey creation and destruction */
+@@ -121,6 +122,7 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
+ int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+ unsigned int asid);
+ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
++void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid);
+
+ #define mlx5_vdpa_warn(__dev, format, ...) \
+ dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \
+diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
+index a4d7ee2339fa5..113aac0446de5 100644
+--- a/drivers/vdpa/mlx5/core/mr.c
++++ b/drivers/vdpa/mlx5/core/mr.c
+@@ -491,15 +491,24 @@ static void destroy_user_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr
+ }
+ }
+
+-void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
++static void _mlx5_vdpa_destroy_cvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
++{
++ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
++ return;
++
++ prune_iotlb(mvdev);
++}
++
++static void _mlx5_vdpa_destroy_dvq_mr(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
+ {
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+
+- mutex_lock(&mr->mkey_mtx);
++ if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
++ return;
++
+ if (!mr->initialized)
+- goto out;
++ return;
+
+- prune_iotlb(mvdev);
+ if (mr->user_mr)
+ destroy_user_mr(mvdev, mr);
+ else
+@@ -507,45 +516,79 @@ void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
+
+ memset(mr, 0, sizeof(*mr));
+ mr->initialized = false;
+-out:
++}
++
++void mlx5_vdpa_destroy_mr_asid(struct mlx5_vdpa_dev *mvdev, unsigned int asid)
++{
++ struct mlx5_vdpa_mr *mr = &mvdev->mr;
++
++ mutex_lock(&mr->mkey_mtx);
++
++ _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
++ _mlx5_vdpa_destroy_cvq_mr(mvdev, asid);
++
+ mutex_unlock(&mr->mkey_mtx);
+ }
+
+-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
+- struct vhost_iotlb *iotlb, unsigned int asid)
++void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev)
++{
++ mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_CVQ_GROUP]);
++ mlx5_vdpa_destroy_mr_asid(mvdev, mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]);
++}
++
++static int _mlx5_vdpa_create_cvq_mr(struct mlx5_vdpa_dev *mvdev,
++ struct vhost_iotlb *iotlb,
++ unsigned int asid)
++{
++ if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] != asid)
++ return 0;
++
++ return dup_iotlb(mvdev, iotlb);
++}
++
++static int _mlx5_vdpa_create_dvq_mr(struct mlx5_vdpa_dev *mvdev,
++ struct vhost_iotlb *iotlb,
++ unsigned int asid)
+ {
+ struct mlx5_vdpa_mr *mr = &mvdev->mr;
+ int err;
+
+- if (mr->initialized)
++ if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] != asid)
+ return 0;
+
+- if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+- if (iotlb)
+- err = create_user_mr(mvdev, iotlb);
+- else
+- err = create_dma_mr(mvdev, mr);
++ if (mr->initialized)
++ return 0;
+
+- if (err)
+- return err;
+- }
++ if (iotlb)
++ err = create_user_mr(mvdev, iotlb);
++ else
++ err = create_dma_mr(mvdev, mr);
+
+- if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) {
+- err = dup_iotlb(mvdev, iotlb);
+- if (err)
+- goto out_err;
+- }
++ if (err)
++ return err;
+
+ mr->initialized = true;
++
++ return 0;
++}
++
++static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
++ struct vhost_iotlb *iotlb, unsigned int asid)
++{
++ int err;
++
++ err = _mlx5_vdpa_create_dvq_mr(mvdev, iotlb, asid);
++ if (err)
++ return err;
++
++ err = _mlx5_vdpa_create_cvq_mr(mvdev, iotlb, asid);
++ if (err)
++ goto out_err;
++
+ return 0;
+
+ out_err:
+- if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+- if (iotlb)
+- destroy_user_mr(mvdev, mr);
+- else
+- destroy_dma_mr(mvdev, mr);
+- }
++ _mlx5_vdpa_destroy_dvq_mr(mvdev, asid);
+
+ return err;
+ }
+diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+index daac3ab314785..bf99654371b35 100644
+--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
++++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
+@@ -2406,7 +2406,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
+ goto err_mr;
+
+ teardown_driver(ndev);
+- mlx5_vdpa_destroy_mr(mvdev);
++ mlx5_vdpa_destroy_mr_asid(mvdev, asid);
+ err = mlx5_vdpa_create_mr(mvdev, iotlb, asid);
+ if (err)
+ goto err_mr;
+@@ -2422,7 +2422,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
+ return 0;
+
+ err_setup:
+- mlx5_vdpa_destroy_mr(mvdev);
++ mlx5_vdpa_destroy_mr_asid(mvdev, asid);
+ err_mr:
+ return err;
+ }
+diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
+index febdc99b51a7b..908b3f98ecbee 100644
+--- a/drivers/vdpa/vdpa.c
++++ b/drivers/vdpa/vdpa.c
+@@ -1172,44 +1172,41 @@ static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
+ [VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
+ [VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
+ [VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
++ [VDPA_ATTR_DEV_NET_CFG_MAX_VQP] = { .type = NLA_U16 },
+ /* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
+ [VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
++ [VDPA_ATTR_DEV_QUEUE_INDEX] = { .type = NLA_U32 },
++ [VDPA_ATTR_DEV_FEATURES] = { .type = NLA_U64 },
+ };
+
+ static const struct genl_ops vdpa_nl_ops[] = {
+ {
+ .cmd = VDPA_CMD_MGMTDEV_GET,
+- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_mgmtdev_get_doit,
+ .dumpit = vdpa_nl_cmd_mgmtdev_get_dumpit,
+ },
+ {
+ .cmd = VDPA_CMD_DEV_NEW,
+- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_add_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = VDPA_CMD_DEV_DEL,
+- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_del_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = VDPA_CMD_DEV_GET,
+- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_get_doit,
+ .dumpit = vdpa_nl_cmd_dev_get_dumpit,
+ },
+ {
+ .cmd = VDPA_CMD_DEV_CONFIG_GET,
+- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_config_get_doit,
+ .dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
+ },
+ {
+ .cmd = VDPA_CMD_DEV_VSTATS_GET,
+- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_stats_get_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
+index 72f924ec4658d..edcd74cc4c0f7 100644
+--- a/drivers/vdpa/vdpa_user/vduse_dev.c
++++ b/drivers/vdpa/vdpa_user/vduse_dev.c
+@@ -899,10 +899,10 @@ static void vduse_dev_irq_inject(struct work_struct *work)
+ {
+ struct vduse_dev *dev = container_of(work, struct vduse_dev, inject);
+
+- spin_lock_irq(&dev->irq_lock);
++ spin_lock_bh(&dev->irq_lock);
+ if (dev->config_cb.callback)
+ dev->config_cb.callback(dev->config_cb.private);
+- spin_unlock_irq(&dev->irq_lock);
++ spin_unlock_bh(&dev->irq_lock);
+ }
+
+ static void vduse_vq_irq_inject(struct work_struct *work)
+@@ -910,10 +910,10 @@ static void vduse_vq_irq_inject(struct work_struct *work)
+ struct vduse_virtqueue *vq = container_of(work,
+ struct vduse_virtqueue, inject);
+
+- spin_lock_irq(&vq->irq_lock);
++ spin_lock_bh(&vq->irq_lock);
+ if (vq->ready && vq->cb.callback)
+ vq->cb.callback(vq->cb.private);
+- spin_unlock_irq(&vq->irq_lock);
++ spin_unlock_bh(&vq->irq_lock);
+ }
+
+ static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
+diff --git a/drivers/video/aperture.c b/drivers/video/aperture.c
+index 41e77de1ea82c..5c94abdb1ad6d 100644
+--- a/drivers/video/aperture.c
++++ b/drivers/video/aperture.c
+@@ -332,15 +332,16 @@ int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *na
+ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+ #endif
+
++ if (primary)
++ sysfb_disable();
++
+ for (bar = 0; bar < PCI_STD_NUM_BARS; ++bar) {
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
+ continue;
+
+ base = pci_resource_start(pdev, bar);
+ size = pci_resource_len(pdev, bar);
+- ret = aperture_remove_conflicting_devices(base, size, primary, name);
+- if (ret)
+- return ret;
++ aperture_detach_devices(base, size);
+ }
+
+ /*
+diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
+index 4ff25dfc865d9..d3d643cf7506c 100644
+--- a/drivers/video/fbdev/hyperv_fb.c
++++ b/drivers/video/fbdev/hyperv_fb.c
+@@ -995,13 +995,10 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ struct pci_dev *pdev = NULL;
+ void __iomem *fb_virt;
+ int gen2vm = efi_enabled(EFI_BOOT);
++ resource_size_t base, size;
+ phys_addr_t paddr;
+ int ret;
+
+- info->apertures = alloc_apertures(1);
+- if (!info->apertures)
+- return -ENOMEM;
+-
+ if (!gen2vm) {
+ pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
+ PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
+@@ -1010,8 +1007,8 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ return -ENODEV;
+ }
+
+- info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
+- info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
++ base = pci_resource_start(pdev, 0);
++ size = pci_resource_len(pdev, 0);
+
+ /*
+ * For Gen 1 VM, we can directly use the contiguous memory
+@@ -1034,8 +1031,8 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ }
+ pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
+ } else {
+- info->apertures->ranges[0].base = screen_info.lfb_base;
+- info->apertures->ranges[0].size = screen_info.lfb_size;
++ base = screen_info.lfb_base;
++ size = screen_info.lfb_size;
+ }
+
+ /*
+@@ -1077,9 +1074,7 @@ static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
+ info->screen_size = dio_fb_size;
+
+ getmem_done:
+- aperture_remove_conflicting_devices(info->apertures->ranges[0].base,
+- info->apertures->ranges[0].size,
+- false, KBUILD_MODNAME);
++ aperture_remove_conflicting_devices(base, size, false, KBUILD_MODNAME);
+
+ if (gen2vm) {
+ /* framebuffer is reallocated, clear screen_info to avoid misuse from kexec */
+diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+index 51fbf02a03430..76b50b6c98ad9 100644
+--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
++++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+@@ -519,7 +519,9 @@ static int mmphw_probe(struct platform_device *pdev)
+ "unable to get clk %s\n", mi->clk_name);
+ goto failed;
+ }
+- clk_prepare_enable(ctrl->clk);
++ ret = clk_prepare_enable(ctrl->clk);
++ if (ret)
++ goto failed;
+
+ /* init global regs */
+ ctrl_set_default(ctrl);
+diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
+index 3ff746e3f24aa..dec3cba884586 100644
+--- a/drivers/virtio/virtio_mmio.c
++++ b/drivers/virtio/virtio_mmio.c
+@@ -590,9 +590,8 @@ static void virtio_mmio_release_dev(struct device *_d)
+ struct virtio_device *vdev =
+ container_of(_d, struct virtio_device, dev);
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+- struct platform_device *pdev = vm_dev->pdev;
+
+- devm_kfree(&pdev->dev, vm_dev);
++ kfree(vm_dev);
+ }
+
+ /* Platform device */
+@@ -603,7 +602,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
+ unsigned long magic;
+ int rc;
+
+- vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
++ vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL);
+ if (!vm_dev)
+ return -ENOMEM;
+
+diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c
+index 14f8d8d90920f..2bd3dc25cb030 100644
+--- a/drivers/watchdog/sp5100_tco.c
++++ b/drivers/watchdog/sp5100_tco.c
+@@ -96,7 +96,7 @@ static enum tco_reg_layout tco_reg_layout(struct pci_dev *dev)
+ sp5100_tco_pci->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
+ sp5100_tco_pci->revision >= AMD_ZEN_SMBUS_PCI_REV) {
+ return efch_mmio;
+- } else if (dev->vendor == PCI_VENDOR_ID_AMD &&
++ } else if ((dev->vendor == PCI_VENDOR_ID_AMD || dev->vendor == PCI_VENDOR_ID_HYGON) &&
+ ((dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS &&
+ dev->revision >= 0x41) ||
+ (dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS &&
+@@ -579,6 +579,8 @@ static const struct pci_device_id sp5100_tco_pci_tbl[] = {
+ PCI_ANY_ID, },
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, PCI_ANY_ID,
+ PCI_ANY_ID, },
++ { PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, PCI_ANY_ID,
++ PCI_ANY_ID, },
+ { 0, }, /* End of list */
+ };
+ MODULE_DEVICE_TABLE(pci, sp5100_tco_pci_tbl);
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 3495bc775afa3..08017b180a10d 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1533,6 +1533,10 @@ void btrfs_mark_bg_unused(struct btrfs_block_group *bg)
+ btrfs_get_block_group(bg);
+ trace_btrfs_add_unused_block_group(bg);
+ list_add_tail(&bg->bg_list, &fs_info->unused_bgs);
++ } else if (!test_bit(BLOCK_GROUP_FLAG_NEW, &bg->runtime_flags)) {
++ /* Pull out the block group from the reclaim_bgs list. */
++ trace_btrfs_add_unused_block_group(bg);
++ list_move_tail(&bg->bg_list, &fs_info->unused_bgs);
+ }
+ spin_unlock(&fs_info->unused_bgs_lock);
+ }
+@@ -2493,6 +2497,7 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans)
+ next:
+ btrfs_delayed_refs_rsv_release(fs_info, 1);
+ list_del_init(&block_group->bg_list);
++ clear_bit(BLOCK_GROUP_FLAG_NEW, &block_group->runtime_flags);
+ }
+ btrfs_trans_release_chunk_metadata(trans);
+ }
+@@ -2532,6 +2537,13 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
+ if (!cache)
+ return ERR_PTR(-ENOMEM);
+
++ /*
++ * Mark it as new before adding it to the rbtree of block groups or any
++ * list, so that no other task finds it and calls btrfs_mark_bg_unused()
++ * before the new flag is set.
++ */
++ set_bit(BLOCK_GROUP_FLAG_NEW, &cache->runtime_flags);
++
+ cache->length = size;
+ set_free_space_tree_thresholds(cache);
+ cache->used = bytes_used;
+@@ -2540,7 +2552,7 @@ struct btrfs_block_group *btrfs_make_block_group(struct btrfs_trans_handle *tran
+ cache->global_root_id = calculate_global_root_id(fs_info, cache->start);
+
+ if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
+- cache->needs_free_space = 1;
++ set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags);
+
+ ret = btrfs_load_block_group_zone_info(cache, true);
+ if (ret) {
+diff --git a/fs/btrfs/block-group.h b/fs/btrfs/block-group.h
+index debd42aeae0f1..47a2dcbfee255 100644
+--- a/fs/btrfs/block-group.h
++++ b/fs/btrfs/block-group.h
+@@ -55,6 +55,15 @@ enum btrfs_block_group_flags {
+ BLOCK_GROUP_FLAG_CHUNK_ITEM_INSERTED,
+ BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE,
+ BLOCK_GROUP_FLAG_ZONED_DATA_RELOC,
++ /* Does the block group need to be added to the free space tree? */
++ BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE,
++ /* Indicate that the block group is placed on a sequential zone */
++ BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE,
++ /*
++ * Indicate that block group is in the list of new block groups of a
++ * transaction.
++ */
++ BLOCK_GROUP_FLAG_NEW,
+ };
+
+ enum btrfs_caching_type {
+@@ -204,15 +213,6 @@ struct btrfs_block_group {
+ /* Lock for free space tree operations. */
+ struct mutex free_space_lock;
+
+- /*
+- * Does the block group need to be added to the free space tree?
+- * Protected by free_space_lock.
+- */
+- int needs_free_space;
+-
+- /* Flag indicating this block group is placed on a sequential zone */
+- bool seq_zone;
+-
+ /*
+ * Number of extents in this block group used for swap files.
+ * All accesses protected by the spinlock 'lock'.
+diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
+index 4abbe4b352533..56d7580fdc3c4 100644
+--- a/fs/btrfs/extent_map.c
++++ b/fs/btrfs/extent_map.c
+@@ -784,8 +784,6 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
+
+ if (skip_pinned && test_bit(EXTENT_FLAG_PINNED, &em->flags)) {
+ start = em_end;
+- if (end != (u64)-1)
+- len = start + len - em_end;
+ goto next;
+ }
+
+@@ -853,8 +851,8 @@ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end,
+ if (!split)
+ goto remove_em;
+ }
+- split->start = start + len;
+- split->len = em_end - (start + len);
++ split->start = end;
++ split->len = em_end - end;
+ split->block_start = em->block_start;
+ split->flags = flags;
+ split->compress_type = em->compress_type;
+diff --git a/fs/btrfs/free-space-tree.c b/fs/btrfs/free-space-tree.c
+index a207db9322264..6a44733a95e1c 100644
+--- a/fs/btrfs/free-space-tree.c
++++ b/fs/btrfs/free-space-tree.c
+@@ -803,7 +803,7 @@ int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
+ u32 flags;
+ int ret;
+
+- if (block_group->needs_free_space) {
++ if (test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags)) {
+ ret = __add_block_group_free_space(trans, block_group, path);
+ if (ret)
+ return ret;
+@@ -996,7 +996,7 @@ int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
+ u32 flags;
+ int ret;
+
+- if (block_group->needs_free_space) {
++ if (test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags)) {
+ ret = __add_block_group_free_space(trans, block_group, path);
+ if (ret)
+ return ret;
+@@ -1350,7 +1350,7 @@ static int __add_block_group_free_space(struct btrfs_trans_handle *trans,
+ {
+ int ret;
+
+- block_group->needs_free_space = 0;
++ clear_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags);
+
+ ret = add_new_free_space_info(trans, block_group, path);
+ if (ret)
+@@ -1372,7 +1372,7 @@ int add_block_group_free_space(struct btrfs_trans_handle *trans,
+ return 0;
+
+ mutex_lock(&block_group->free_space_lock);
+- if (!block_group->needs_free_space)
++ if (!test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags))
+ goto out;
+
+ path = btrfs_alloc_path();
+@@ -1405,7 +1405,7 @@ int remove_block_group_free_space(struct btrfs_trans_handle *trans,
+ if (!btrfs_fs_compat_ro(trans->fs_info, FREE_SPACE_TREE))
+ return 0;
+
+- if (block_group->needs_free_space) {
++ if (test_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &block_group->runtime_flags)) {
+ /* We never added this block group to the free space tree. */
+ return 0;
+ }
+diff --git a/fs/btrfs/tests/free-space-tree-tests.c b/fs/btrfs/tests/free-space-tree-tests.c
+index 13734ed43bfcb..766117a76d742 100644
+--- a/fs/btrfs/tests/free-space-tree-tests.c
++++ b/fs/btrfs/tests/free-space-tree-tests.c
+@@ -470,7 +470,7 @@ static int run_test(test_func_t test_func, int bitmaps, u32 sectorsize,
+ }
+ cache->bitmap_low_thresh = 0;
+ cache->bitmap_high_thresh = (u32)-1;
+- cache->needs_free_space = 1;
++ set_bit(BLOCK_GROUP_FLAG_NEEDS_FREE_SPACE, &cache->runtime_flags);
+ cache->fs_info = root->fs_info;
+
+ btrfs_init_dummy_trans(&trans, root->fs_info);
+diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
+index 2e0832d70406c..567c5c010f931 100644
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -4652,8 +4652,7 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
+ }
+ }
+
+- BUG_ON(fs_info->balance_ctl ||
+- test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
++ ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
+ atomic_dec(&fs_info->balance_cancel_req);
+ mutex_unlock(&fs_info->balance_mutex);
+ return 0;
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 836babd23db52..9bc7ac06c5177 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1436,7 +1436,7 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new)
+ }
+
+ if (num_sequential > 0)
+- cache->seq_zone = true;
++ set_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
+
+ if (num_conventional > 0) {
+ /* Zone capacity is always zone size in emulation */
+@@ -1658,7 +1658,7 @@ bool btrfs_use_zone_append(struct btrfs_inode *inode, u64 start)
+ if (!cache)
+ return false;
+
+- ret = cache->seq_zone;
++ ret = !!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &cache->runtime_flags);
+ btrfs_put_block_group(cache);
+
+ return ret;
+@@ -2177,7 +2177,8 @@ static void btrfs_zone_finish_endio_workfn(struct work_struct *work)
+ void btrfs_schedule_zone_finish_bg(struct btrfs_block_group *bg,
+ struct extent_buffer *eb)
+ {
+- if (!bg->seq_zone || eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
++ if (!test_bit(BLOCK_GROUP_FLAG_SEQUENTIAL_ZONE, &bg->runtime_flags) ||
++ eb->start + eb->len * 2 <= bg->start + bg->zone_capacity)
+ return;
+
+ if (WARN_ON(bg->zone_finish_work.func == btrfs_zone_finish_endio_workfn)) {
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index dcabe2783edfe..5399a9ea5b4f1 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -645,6 +645,7 @@ bad:
+ err = -EIO;
+ out_bad:
+ pr_err("mds parse_reply err %d\n", err);
++ ceph_msg_dump(msg);
+ return err;
+ }
+
+@@ -3534,6 +3535,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
+
+ bad:
+ pr_err("mdsc_handle_forward decode error err=%d\n", err);
++ ceph_msg_dump(msg);
+ }
+
+ static int __decode_session_metadata(void **p, void *end,
+@@ -5254,6 +5256,7 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
+ bad:
+ pr_err("error decoding fsmap %d. Shutting down mount.\n", err);
+ ceph_umount_begin(mdsc->fsc->sb);
++ ceph_msg_dump(msg);
+ err_out:
+ mutex_lock(&mdsc->mutex);
+ mdsc->mdsmap_err = err;
+@@ -5322,6 +5325,7 @@ bad_unlock:
+ bad:
+ pr_err("error decoding mdsmap %d. Shutting down mount.\n", err);
+ ceph_umount_begin(mdsc->fsc->sb);
++ ceph_msg_dump(msg);
+ return;
+ }
+
+diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
+index 9d27aa8bd2bc6..44c564f0bc622 100644
+--- a/fs/gfs2/super.c
++++ b/fs/gfs2/super.c
+@@ -981,7 +981,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
+ {
+ struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
+ struct gfs2_args *args = &sdp->sd_args;
+- int val;
++ unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
++
++ spin_lock(&sdp->sd_tune.gt_spin);
++ logd_secs = sdp->sd_tune.gt_logd_secs;
++ quota_quantum = sdp->sd_tune.gt_quota_quantum;
++ statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
++ statfs_slow = sdp->sd_tune.gt_statfs_slow;
++ spin_unlock(&sdp->sd_tune.gt_spin);
+
+ if (is_ancestor(root, sdp->sd_master_dir))
+ seq_puts(s, ",meta");
+@@ -1036,17 +1043,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root)
+ }
+ if (args->ar_discard)
+ seq_puts(s, ",discard");
+- val = sdp->sd_tune.gt_logd_secs;
+- if (val != 30)
+- seq_printf(s, ",commit=%d", val);
+- val = sdp->sd_tune.gt_statfs_quantum;
+- if (val != 30)
+- seq_printf(s, ",statfs_quantum=%d", val);
+- else if (sdp->sd_tune.gt_statfs_slow)
++ if (logd_secs != 30)
++ seq_printf(s, ",commit=%d", logd_secs);
++ if (statfs_quantum != 30)
++ seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
++ else if (statfs_slow)
+ seq_puts(s, ",statfs_quantum=0");
+- val = sdp->sd_tune.gt_quota_quantum;
+- if (val != 60)
+- seq_printf(s, ",quota_quantum=%d", val);
++ if (quota_quantum != 60)
++ seq_printf(s, ",quota_quantum=%d", quota_quantum);
+ if (args->ar_statfs_percent)
+ seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
+ if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index b1b476fb7229b..dda13e1f1b330 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -874,6 +874,7 @@ int ni_create_attr_list(struct ntfs_inode *ni)
+ if (err)
+ goto out1;
+
++ err = -EINVAL;
+ /* Call mi_remove_attr() in reverse order to keep pointers 'arr_move' valid. */
+ while (to_free > 0) {
+ struct ATTRIB *b = arr_move[--nb];
+@@ -882,7 +883,8 @@ int ni_create_attr_list(struct ntfs_inode *ni)
+
+ attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off),
+ b->name_len, asize, name_off);
+- WARN_ON(!attr);
++ if (!attr)
++ goto out1;
+
+ mi_get_ref(mi, &le_b[nb]->ref);
+ le_b[nb]->id = attr->id;
+@@ -892,17 +894,20 @@ int ni_create_attr_list(struct ntfs_inode *ni)
+ attr->id = le_b[nb]->id;
+
+ /* Remove from primary record. */
+- WARN_ON(!mi_remove_attr(NULL, &ni->mi, b));
++ if (!mi_remove_attr(NULL, &ni->mi, b))
++ goto out1;
+
+ if (to_free <= asize)
+ break;
+ to_free -= asize;
+- WARN_ON(!nb);
++ if (!nb)
++ goto out1;
+ }
+
+ attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0,
+ lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT);
+- WARN_ON(!attr);
++ if (!attr)
++ goto out1;
+
+ attr->non_res = 0;
+ attr->flags = 0;
+@@ -922,9 +927,10 @@ out1:
+ kfree(ni->attr_list.le);
+ ni->attr_list.le = NULL;
+ ni->attr_list.size = 0;
++ return err;
+
+ out:
+- return err;
++ return 0;
+ }
+
+ /*
+diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c
+index b6e22bcb929ba..829b62d3bb889 100644
+--- a/fs/ntfs3/fsntfs.c
++++ b/fs/ntfs3/fsntfs.c
+@@ -154,7 +154,7 @@ int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes,
+ /* Check errors. */
+ if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- ||
+ fn * SECTOR_SIZE > bytes) {
+- return -EINVAL; /* Native chkntfs returns ok! */
++ return -E_NTFS_CORRUPT;
+ }
+
+ /* Get fixup pointer. */
+diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c
+index 9e9a9ffd92958..495cfb37962fa 100644
+--- a/fs/ntfs3/index.c
++++ b/fs/ntfs3/index.c
+@@ -1103,6 +1103,12 @@ ok:
+ *node = in;
+
+ out:
++ if (err == -E_NTFS_CORRUPT) {
++ ntfs_inode_err(&ni->vfs_inode, "directory corrupted");
++ ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR);
++ err = -EINVAL;
++ }
++
+ if (ib != in->index)
+ kfree(ib);
+
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index 24227b2e1b2b0..8c9abaf139e67 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -53,6 +53,8 @@ enum utf16_endian;
+ #define E_NTFS_NONRESIDENT 556
+ /* NTFS specific error code about punch hole. */
+ #define E_NTFS_NOTALIGNED 557
++/* NTFS specific error code when on-disk struct is corrupted. */
++#define E_NTFS_CORRUPT 558
+
+
+ /* sbi->flags */
+diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c
+index af1e4b364ea8e..ba336c7280b85 100644
+--- a/fs/ntfs3/record.c
++++ b/fs/ntfs3/record.c
+@@ -124,7 +124,7 @@ int mi_read(struct mft_inode *mi, bool is_mft)
+ struct rw_semaphore *rw_lock = NULL;
+
+ if (is_mounted(sbi)) {
+- if (!is_mft) {
++ if (!is_mft && mft_ni) {
+ rw_lock = &mft_ni->file.run_lock;
+ down_read(rw_lock);
+ }
+@@ -148,7 +148,7 @@ int mi_read(struct mft_inode *mi, bool is_mft)
+ ni_lock(mft_ni);
+ down_write(rw_lock);
+ }
+- err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, &mft_ni->file.run,
++ err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, run,
+ vbo >> sbi->cluster_bits);
+ if (rw_lock) {
+ up_write(rw_lock);
+@@ -180,6 +180,12 @@ ok:
+ return 0;
+
+ out:
++ if (err == -E_NTFS_CORRUPT) {
++ ntfs_err(sbi->sb, "mft corrupted");
++ ntfs_set_state(sbi, NTFS_DIRTY_ERROR);
++ err = -EINVAL;
++ }
++
+ return err;
+ }
+
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 078df1e2dd18a..18d66497c42d1 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -883,11 +883,11 @@ struct dentry *
+ cifs_smb3_do_mount(struct file_system_type *fs_type,
+ int flags, struct smb3_fs_context *old_ctx)
+ {
+- int rc;
+- struct super_block *sb = NULL;
+- struct cifs_sb_info *cifs_sb = NULL;
+ struct cifs_mnt_data mnt_data;
++ struct cifs_sb_info *cifs_sb;
++ struct super_block *sb;
+ struct dentry *root;
++ int rc;
+
+ /*
+ * Prints in Kernel / CIFS log the attempted mount operation
+@@ -898,11 +898,9 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
+ else
+ cifs_info("Attempting to mount %s\n", old_ctx->UNC);
+
+- cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL);
+- if (cifs_sb == NULL) {
+- root = ERR_PTR(-ENOMEM);
+- goto out;
+- }
++ cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
++ if (!cifs_sb)
++ return ERR_PTR(-ENOMEM);
+
+ cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
+ if (!cifs_sb->ctx) {
+@@ -945,10 +943,8 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
+
+ sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
+ if (IS_ERR(sb)) {
+- root = ERR_CAST(sb);
+ cifs_umount(cifs_sb);
+- cifs_sb = NULL;
+- goto out;
++ return ERR_CAST(sb);
+ }
+
+ if (sb->s_root) {
+@@ -979,13 +975,9 @@ out_super:
+ deactivate_locked_super(sb);
+ return root;
+ out:
+- if (cifs_sb) {
+- if (!sb || IS_ERR(sb)) { /* otherwise kill_sb will handle */
+- kfree(cifs_sb->prepath);
+- smb3_cleanup_fs_context(cifs_sb->ctx);
+- kfree(cifs_sb);
+- }
+- }
++ kfree(cifs_sb->prepath);
++ smb3_cleanup_fs_context(cifs_sb->ctx);
++ kfree(cifs_sb);
+ return root;
+ }
+
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index 27c6d14e369f1..0f3405e0f2e48 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -4885,9 +4885,9 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
+
+ io_error:
+ kunmap(page);
+- unlock_page(page);
+
+ read_complete:
++ unlock_page(page);
+ return rc;
+ }
+
+@@ -5082,9 +5082,11 @@ void cifs_oplock_break(struct work_struct *work)
+ struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
+ oplock_break);
+ struct inode *inode = d_inode(cfile->dentry);
++ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct cifsInodeInfo *cinode = CIFS_I(inode);
+- struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+- struct TCP_Server_Info *server = tcon->ses->server;
++ struct cifs_tcon *tcon;
++ struct TCP_Server_Info *server;
++ struct tcon_link *tlink;
+ int rc = 0;
+ bool purge_cache = false, oplock_break_cancelled;
+ __u64 persistent_fid, volatile_fid;
+@@ -5093,6 +5095,12 @@ void cifs_oplock_break(struct work_struct *work)
+ wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
+ TASK_UNINTERRUPTIBLE);
+
++ tlink = cifs_sb_tlink(cifs_sb);
++ if (IS_ERR(tlink))
++ goto out;
++ tcon = tlink_tcon(tlink);
++ server = tcon->ses->server;
++
+ server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
+ cfile->oplock_epoch, &purge_cache);
+
+@@ -5142,18 +5150,19 @@ oplock_break_ack:
+ /*
+ * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require
+ * an acknowledgment to be sent when the file has already been closed.
+- * check for server null, since can race with kill_sb calling tree disconnect.
+ */
+ spin_lock(&cinode->open_file_lock);
+- if (tcon->ses && tcon->ses->server && !oplock_break_cancelled &&
+- !list_empty(&cinode->openFileList)) {
++ /* check list empty since can race with kill_sb calling tree disconnect */
++ if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) {
+ spin_unlock(&cinode->open_file_lock);
+- rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid,
+- volatile_fid, net_fid, cinode);
++ rc = server->ops->oplock_response(tcon, persistent_fid,
++ volatile_fid, net_fid, cinode);
+ cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
+ } else
+ spin_unlock(&cinode->open_file_lock);
+
++ cifs_put_tlink(tlink);
++out:
+ cifs_done_oplock_break(cinode);
+ }
+
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 3ca593cdda76e..ba46156e32680 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -3841,6 +3841,12 @@ void smb2_reconnect_server(struct work_struct *work)
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
++ spin_lock(&ses->ses_lock);
++ if (ses->ses_status == SES_EXITING) {
++ spin_unlock(&ses->ses_lock);
++ continue;
++ }
++ spin_unlock(&ses->ses_lock);
+
+ tcon_selected = false;
+
+diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
+index 4df9e73a8bb5f..1d7d4cffaefc6 100644
+--- a/include/kvm/arm_vgic.h
++++ b/include/kvm/arm_vgic.h
+@@ -429,6 +429,6 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
+
+ int vgic_v4_load(struct kvm_vcpu *vcpu);
+ void vgic_v4_commit(struct kvm_vcpu *vcpu);
+-int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
++int vgic_v4_put(struct kvm_vcpu *vcpu);
+
+ #endif /* __KVM_ARM_VGIC_H */
+diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
+index 2c8860e406bd8..0417360a6db9b 100644
+--- a/include/linux/iopoll.h
++++ b/include/linux/iopoll.h
+@@ -53,6 +53,7 @@
+ } \
+ if (__sleep_us) \
+ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \
++ cpu_relax(); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+ })
+@@ -95,6 +96,7 @@
+ } \
+ if (__delay_us) \
+ udelay(__delay_us); \
++ cpu_relax(); \
+ } \
+ (cond) ? 0 : -ETIMEDOUT; \
+ })
+diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
+index a960de68ac69e..6047058d67037 100644
+--- a/include/linux/virtio_net.h
++++ b/include/linux/virtio_net.h
+@@ -148,6 +148,10 @@ retry:
+ if (gso_type & SKB_GSO_UDP)
+ nh_off -= thlen;
+
++ /* Kernel has a special handling for GSO_BY_FRAGS. */
++ if (gso_size == GSO_BY_FRAGS)
++ return -EINVAL;
++
+ /* Too small packets are not really GSO ones. */
+ if (skb->len - nh_off > gso_size) {
+ shinfo->gso_size = gso_size;
+diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
+index bb9de6a899e07..d6c8eb2b52019 100644
+--- a/include/media/v4l2-mem2mem.h
++++ b/include/media/v4l2-mem2mem.h
+@@ -593,7 +593,14 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx,
+ static inline
+ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+ {
+- return m2m_ctx->out_q_ctx.num_rdy;
++ unsigned int num_buf_rdy;
++ unsigned long flags;
++
++ spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
++ num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy;
++ spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
++
++ return num_buf_rdy;
+ }
+
+ /**
+@@ -605,7 +612,14 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+ static inline
+ unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx)
+ {
+- return m2m_ctx->cap_q_ctx.num_rdy;
++ unsigned int num_buf_rdy;
++ unsigned long flags;
++
++ spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
++ num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy;
++ spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
++
++ return num_buf_rdy;
+ }
+
+ /**
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 1bbdddcf61542..699408944952c 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -1445,6 +1445,12 @@ static inline bool sk_has_memory_pressure(const struct sock *sk)
+ return sk->sk_prot->memory_pressure != NULL;
+ }
+
++static inline bool sk_under_global_memory_pressure(const struct sock *sk)
++{
++ return sk->sk_prot->memory_pressure &&
++ !!*sk->sk_prot->memory_pressure;
++}
++
+ static inline bool sk_under_memory_pressure(const struct sock *sk)
+ {
+ if (!sk->sk_prot->memory_pressure)
+diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c
+index b4526668072e7..27596f3b4aef3 100644
+--- a/kernel/dma/remap.c
++++ b/kernel/dma/remap.c
+@@ -43,13 +43,13 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
+ void *vaddr;
+ int i;
+
+- pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
++ pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
+ if (!pages)
+ return NULL;
+ for (i = 0; i < count; i++)
+ pages[i] = nth_page(page, i);
+ vaddr = vmap(pages, count, VM_DMA_COHERENT, prot);
+- kfree(pages);
++ kvfree(pages);
+
+ return vaddr;
+ }
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 5e5aea2360a87..612873ec2197f 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -4464,17 +4464,9 @@ static inline int util_fits_cpu(unsigned long util,
+ *
+ * For uclamp_max, we can tolerate a drop in performance level as the
+ * goal is to cap the task. So it's okay if it's getting less.
+- *
+- * In case of capacity inversion we should honour the inverted capacity
+- * for both uclamp_min and uclamp_max all the time.
+ */
+- capacity_orig = cpu_in_capacity_inversion(cpu);
+- if (capacity_orig) {
+- capacity_orig_thermal = capacity_orig;
+- } else {
+- capacity_orig = capacity_orig_of(cpu);
+- capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
+- }
++ capacity_orig = capacity_orig_of(cpu);
++ capacity_orig_thermal = capacity_orig - arch_scale_thermal_pressure(cpu);
+
+ /*
+ * We want to force a task to fit a cpu as implied by uclamp_max.
+@@ -4549,8 +4541,8 @@ static inline int util_fits_cpu(unsigned long util,
+ * handle the case uclamp_min > uclamp_max.
+ */
+ uclamp_min = min(uclamp_min, uclamp_max);
+- if (util < uclamp_min && capacity_orig != SCHED_CAPACITY_SCALE)
+- fits = fits && (uclamp_min <= capacity_orig_thermal);
++ if (fits && (util < uclamp_min) && (uclamp_min > capacity_orig_thermal))
++ return -1;
+
+ return fits;
+ }
+@@ -4560,7 +4552,11 @@ static inline int task_fits_cpu(struct task_struct *p, int cpu)
+ unsigned long uclamp_min = uclamp_eff_value(p, UCLAMP_MIN);
+ unsigned long uclamp_max = uclamp_eff_value(p, UCLAMP_MAX);
+ unsigned long util = task_util_est(p);
+- return util_fits_cpu(util, uclamp_min, uclamp_max, cpu);
++ /*
++ * Return true only if the cpu fully fits the task requirements, which
++ * include the utilization but also the performance hints.
++ */
++ return (util_fits_cpu(util, uclamp_min, uclamp_max, cpu) > 0);
+ }
+
+ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+@@ -6043,6 +6039,7 @@ static inline bool cpu_overutilized(int cpu)
+ unsigned long rq_util_min = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MIN);
+ unsigned long rq_util_max = uclamp_rq_get(cpu_rq(cpu), UCLAMP_MAX);
+
++ /* Return true only if the utilization doesn't fit CPU's capacity */
+ return !util_fits_cpu(cpu_util_cfs(cpu), rq_util_min, rq_util_max, cpu);
+ }
+
+@@ -6836,6 +6833,7 @@ static int
+ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
+ {
+ unsigned long task_util, util_min, util_max, best_cap = 0;
++ int fits, best_fits = 0;
+ int cpu, best_cpu = -1;
+ struct cpumask *cpus;
+
+@@ -6851,12 +6849,28 @@ select_idle_capacity(struct task_struct *p, struct sched_domain *sd, int target)
+
+ if (!available_idle_cpu(cpu) && !sched_idle_cpu(cpu))
+ continue;
+- if (util_fits_cpu(task_util, util_min, util_max, cpu))
++
++ fits = util_fits_cpu(task_util, util_min, util_max, cpu);
++
++ /* This CPU fits with all requirements */
++ if (fits > 0)
+ return cpu;
++ /*
++ * Only the min performance hint (i.e. uclamp_min) doesn't fit.
++ * Look for the CPU with best capacity.
++ */
++ else if (fits < 0)
++ cpu_cap = capacity_orig_of(cpu) - thermal_load_avg(cpu_rq(cpu));
+
+- if (cpu_cap > best_cap) {
++ /*
++ * First, select CPU which fits better (-1 being better than 0).
++ * Then, select the one with best capacity at same level.
++ */
++ if ((fits < best_fits) ||
++ ((fits == best_fits) && (cpu_cap > best_cap))) {
+ best_cap = cpu_cap;
+ best_cpu = cpu;
++ best_fits = fits;
+ }
+ }
+
+@@ -6869,7 +6883,11 @@ static inline bool asym_fits_cpu(unsigned long util,
+ int cpu)
+ {
+ if (sched_asym_cpucap_active())
+- return util_fits_cpu(util, util_min, util_max, cpu);
++ /*
++ * Return true only if the cpu fully fits the task requirements
++ * which include the utilization and the performance hints.
++ */
++ return (util_fits_cpu(util, util_min, util_max, cpu) > 0);
+
+ return true;
+ }
+@@ -7236,6 +7254,9 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ unsigned long p_util_max = uclamp_is_used() ? uclamp_eff_value(p, UCLAMP_MAX) : 1024;
+ struct root_domain *rd = this_rq()->rd;
+ int cpu, best_energy_cpu, target = -1;
++ int prev_fits = -1, best_fits = -1;
++ unsigned long best_thermal_cap = 0;
++ unsigned long prev_thermal_cap = 0;
+ struct sched_domain *sd;
+ struct perf_domain *pd;
+ struct energy_env eenv;
+@@ -7271,6 +7292,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ unsigned long prev_spare_cap = 0;
+ int max_spare_cap_cpu = -1;
+ unsigned long base_energy;
++ int fits, max_fits = -1;
+
+ cpumask_and(cpus, perf_domain_span(pd), cpu_online_mask);
+
+@@ -7320,7 +7342,9 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ util_min = max(rq_util_min, p_util_min);
+ util_max = max(rq_util_max, p_util_max);
+ }
+- if (!util_fits_cpu(util, util_min, util_max, cpu))
++
++ fits = util_fits_cpu(util, util_min, util_max, cpu);
++ if (!fits)
+ continue;
+
+ lsub_positive(&cpu_cap, util);
+@@ -7328,7 +7352,9 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ if (cpu == prev_cpu) {
+ /* Always use prev_cpu as a candidate. */
+ prev_spare_cap = cpu_cap;
+- } else if (cpu_cap > max_spare_cap) {
++ prev_fits = fits;
++ } else if ((fits > max_fits) ||
++ ((fits == max_fits) && (cpu_cap > max_spare_cap))) {
+ /*
+ * Find the CPU with the maximum spare capacity
+ * among the remaining CPUs in the performance
+@@ -7336,6 +7362,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ */
+ max_spare_cap = cpu_cap;
+ max_spare_cap_cpu = cpu;
++ max_fits = fits;
+ }
+ }
+
+@@ -7354,26 +7381,50 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+ if (prev_delta < base_energy)
+ goto unlock;
+ prev_delta -= base_energy;
++ prev_thermal_cap = cpu_thermal_cap;
+ best_delta = min(best_delta, prev_delta);
+ }
+
+ /* Evaluate the energy impact of using max_spare_cap_cpu. */
+ if (max_spare_cap_cpu >= 0 && max_spare_cap > prev_spare_cap) {
++ /* Current best energy cpu fits better */
++ if (max_fits < best_fits)
++ continue;
++
++ /*
++ * Both don't fit performance hint (i.e. uclamp_min)
++ * but best energy cpu has better capacity.
++ */
++ if ((max_fits < 0) &&
++ (cpu_thermal_cap <= best_thermal_cap))
++ continue;
++
+ cur_delta = compute_energy(&eenv, pd, cpus, p,
+ max_spare_cap_cpu);
+ /* CPU utilization has changed */
+ if (cur_delta < base_energy)
+ goto unlock;
+ cur_delta -= base_energy;
+- if (cur_delta < best_delta) {
+- best_delta = cur_delta;
+- best_energy_cpu = max_spare_cap_cpu;
+- }
++
++ /*
++ * Both fit for the task but best energy cpu has lower
++ * energy impact.
++ */
++ if ((max_fits > 0) && (best_fits > 0) &&
++ (cur_delta >= best_delta))
++ continue;
++
++ best_delta = cur_delta;
++ best_energy_cpu = max_spare_cap_cpu;
++ best_fits = max_fits;
++ best_thermal_cap = cpu_thermal_cap;
+ }
+ }
+ rcu_read_unlock();
+
+- if (best_delta < prev_delta)
++ if ((best_fits > prev_fits) ||
++ ((best_fits > 0) && (best_delta < prev_delta)) ||
++ ((best_fits < 0) && (best_thermal_cap > prev_thermal_cap)))
+ target = best_energy_cpu;
+
+ return target;
+@@ -8870,82 +8921,16 @@ static unsigned long scale_rt_capacity(int cpu)
+
+ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
+ {
+- unsigned long capacity_orig = arch_scale_cpu_capacity(cpu);
+ unsigned long capacity = scale_rt_capacity(cpu);
+ struct sched_group *sdg = sd->groups;
+- struct rq *rq = cpu_rq(cpu);
+
+- rq->cpu_capacity_orig = capacity_orig;
++ cpu_rq(cpu)->cpu_capacity_orig = arch_scale_cpu_capacity(cpu);
+
+ if (!capacity)
+ capacity = 1;
+
+- rq->cpu_capacity = capacity;
+-
+- /*
+- * Detect if the performance domain is in capacity inversion state.
+- *
+- * Capacity inversion happens when another perf domain with equal or
+- * lower capacity_orig_of() ends up having higher capacity than this
+- * domain after subtracting thermal pressure.
+- *
+- * We only take into account thermal pressure in this detection as it's
+- * the only metric that actually results in *real* reduction of
+- * capacity due to performance points (OPPs) being dropped/become
+- * unreachable due to thermal throttling.
+- *
+- * We assume:
+- * * That all cpus in a perf domain have the same capacity_orig
+- * (same uArch).
+- * * Thermal pressure will impact all cpus in this perf domain
+- * equally.
+- */
+- if (sched_energy_enabled()) {
+- unsigned long inv_cap = capacity_orig - thermal_load_avg(rq);
+- struct perf_domain *pd;
+-
+- rcu_read_lock();
+-
+- pd = rcu_dereference(rq->rd->pd);
+- rq->cpu_capacity_inverted = 0;
+-
+- for (; pd; pd = pd->next) {
+- struct cpumask *pd_span = perf_domain_span(pd);
+- unsigned long pd_cap_orig, pd_cap;
+-
+- /* We can't be inverted against our own pd */
+- if (cpumask_test_cpu(cpu_of(rq), pd_span))
+- continue;
+-
+- cpu = cpumask_any(pd_span);
+- pd_cap_orig = arch_scale_cpu_capacity(cpu);
+-
+- if (capacity_orig < pd_cap_orig)
+- continue;
+-
+- /*
+- * handle the case of multiple perf domains have the
+- * same capacity_orig but one of them is under higher
+- * thermal pressure. We record it as capacity
+- * inversion.
+- */
+- if (capacity_orig == pd_cap_orig) {
+- pd_cap = pd_cap_orig - thermal_load_avg(cpu_rq(cpu));
+-
+- if (pd_cap > inv_cap) {
+- rq->cpu_capacity_inverted = inv_cap;
+- break;
+- }
+- } else if (pd_cap_orig > inv_cap) {
+- rq->cpu_capacity_inverted = inv_cap;
+- break;
+- }
+- }
+-
+- rcu_read_unlock();
+- }
+-
+- trace_sched_cpu_capacity_tp(rq);
++ cpu_rq(cpu)->cpu_capacity = capacity;
++ trace_sched_cpu_capacity_tp(cpu_rq(cpu));
+
+ sdg->sgc->capacity = capacity;
+ sdg->sgc->min_capacity = capacity;
+@@ -10183,24 +10168,23 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
+ */
+ update_sd_lb_stats(env, &sds);
+
+- if (sched_energy_enabled()) {
+- struct root_domain *rd = env->dst_rq->rd;
+-
+- if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
+- goto out_balanced;
+- }
+-
+- local = &sds.local_stat;
+- busiest = &sds.busiest_stat;
+-
+ /* There is no busy sibling group to pull tasks from */
+ if (!sds.busiest)
+ goto out_balanced;
+
++ busiest = &sds.busiest_stat;
++
+ /* Misfit tasks should be dealt with regardless of the avg load */
+ if (busiest->group_type == group_misfit_task)
+ goto force_balance;
+
++ if (sched_energy_enabled()) {
++ struct root_domain *rd = env->dst_rq->rd;
++
++ if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
++ goto out_balanced;
++ }
++
+ /* ASYM feature bypasses nice load balance check */
+ if (busiest->group_type == group_asym_packing)
+ goto force_balance;
+@@ -10213,6 +10197,7 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
+ if (busiest->group_type == group_imbalanced)
+ goto force_balance;
+
++ local = &sds.local_stat;
+ /*
+ * If the local group is busier than the selected busiest group
+ * don't try and pull any tasks.
+diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
+index 5f18460f62f0f..d6d488e8eb554 100644
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1041,7 +1041,6 @@ struct rq {
+
+ unsigned long cpu_capacity;
+ unsigned long cpu_capacity_orig;
+- unsigned long cpu_capacity_inverted;
+
+ struct balance_callback *balance_callback;
+
+@@ -2879,24 +2878,6 @@ static inline unsigned long capacity_orig_of(int cpu)
+ return cpu_rq(cpu)->cpu_capacity_orig;
+ }
+
+-/*
+- * Returns inverted capacity if the CPU is in capacity inversion state.
+- * 0 otherwise.
+- *
+- * Capacity inversion detection only considers thermal impact where actual
+- * performance points (OPPs) gets dropped.
+- *
+- * Capacity inversion state happens when another performance domain that has
+- * equal or lower capacity_orig_of() becomes effectively larger than the perf
+- * domain this CPU belongs to due to thermal pressure throttling it hard.
+- *
+- * See comment in update_cpu_capacity().
+- */
+-static inline unsigned long cpu_in_capacity_inversion(int cpu)
+-{
+- return cpu_rq(cpu)->cpu_capacity_inverted;
+-}
+-
+ /**
+ * enum cpu_util_type - CPU utilization type
+ * @FREQUENCY_UTIL: Utilization used to select frequency
+diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
+index c49ed619a64dd..de55107aef5d5 100644
+--- a/kernel/trace/ring_buffer.c
++++ b/kernel/trace/ring_buffer.c
+@@ -544,6 +544,7 @@ struct trace_buffer {
+ unsigned flags;
+ int cpus;
+ atomic_t record_disabled;
++ atomic_t resizing;
+ cpumask_var_t cpumask;
+
+ struct lock_class_key *reader_lock_key;
+@@ -2173,7 +2174,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+
+ /* prevent another thread from changing buffer sizes */
+ mutex_lock(&buffer->mutex);
+-
++ atomic_inc(&buffer->resizing);
+
+ if (cpu_id == RING_BUFFER_ALL_CPUS) {
+ /*
+@@ -2312,6 +2313,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ atomic_dec(&buffer->record_disabled);
+ }
+
++ atomic_dec(&buffer->resizing);
+ mutex_unlock(&buffer->mutex);
+ return 0;
+
+@@ -2332,6 +2334,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
+ }
+ }
+ out_err_unlock:
++ atomic_dec(&buffer->resizing);
+ mutex_unlock(&buffer->mutex);
+ return err;
+ }
+@@ -5539,6 +5542,15 @@ int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
+ if (local_read(&cpu_buffer_b->committing))
+ goto out_dec;
+
++ /*
++ * When resize is in progress, we cannot swap it because
++ * it will mess the state of the cpu buffer.
++ */
++ if (atomic_read(&buffer_a->resizing))
++ goto out_dec;
++ if (atomic_read(&buffer_b->resizing))
++ goto out_dec;
++
+ buffer_a->buffers[cpu] = cpu_buffer_b;
+ buffer_b->buffers[cpu] = cpu_buffer_a;
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 709af9631be45..af33c5a4166d4 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1885,9 +1885,10 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
+ * place on this CPU. We fail to record, but we reset
+ * the max trace buffer (no one writes directly to it)
+ * and flag that it failed.
++ * Another reason is resize is in progress.
+ */
+ trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
+- "Failed to swap buffers due to commit in progress\n");
++ "Failed to swap buffers due to commit or resize in progress\n");
+ }
+
+ WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index d3ffa0fd49e57..c38ec6efec0f7 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -1581,9 +1581,37 @@ static inline void destroy_compound_gigantic_page(struct page *page,
+ unsigned int order) { }
+ #endif
+
++static inline void __clear_hugetlb_destructor(struct hstate *h,
++ struct page *page)
++{
++ lockdep_assert_held(&hugetlb_lock);
++
++ /*
++ * Very subtle
++ *
++ * For non-gigantic pages set the destructor to the normal compound
++ * page dtor. This is needed in case someone takes an additional
++ * temporary ref to the page, and freeing is delayed until they drop
++ * their reference.
++ *
++ * For gigantic pages set the destructor to the null dtor. This
++ * destructor will never be called. Before freeing the gigantic
++ * page destroy_compound_gigantic_folio will turn the folio into a
++ * simple group of pages. After this the destructor does not
++ * apply.
++ *
++ */
++ if (hstate_is_gigantic(h))
++ set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
++ else
++ set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
++}
++
+ /*
+- * Remove hugetlb page from lists, and update dtor so that page appears
+- * as just a compound page.
++ * Remove hugetlb page from lists.
++ * If vmemmap exists for the page, update dtor so that the page appears
++ * as just a compound page. Otherwise, wait until after allocating vmemmap
++ * to update dtor.
+ *
+ * A reference is held on the page, except in the case of demote.
+ *
+@@ -1614,31 +1642,19 @@ static void __remove_hugetlb_page(struct hstate *h, struct page *page,
+ }
+
+ /*
+- * Very subtle
+- *
+- * For non-gigantic pages set the destructor to the normal compound
+- * page dtor. This is needed in case someone takes an additional
+- * temporary ref to the page, and freeing is delayed until they drop
+- * their reference.
+- *
+- * For gigantic pages set the destructor to the null dtor. This
+- * destructor will never be called. Before freeing the gigantic
+- * page destroy_compound_gigantic_page will turn the compound page
+- * into a simple group of pages. After this the destructor does not
+- * apply.
+- *
+- * This handles the case where more than one ref is held when and
+- * after update_and_free_page is called.
+- *
+- * In the case of demote we do not ref count the page as it will soon
+- * be turned into a page of smaller size.
++ * We can only clear the hugetlb destructor after allocating vmemmap
++ * pages. Otherwise, someone (memory error handling) may try to write
++ * to tail struct pages.
++ */
++ if (!HPageVmemmapOptimized(page))
++ __clear_hugetlb_destructor(h, page);
++
++ /*
++ * In the case of demote we do not ref count the page as it will soon
++ * be turned into a page of smaller size.
+ */
+ if (!demote)
+ set_page_refcounted(page);
+- if (hstate_is_gigantic(h))
+- set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
+- else
+- set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
+
+ h->nr_huge_pages--;
+ h->nr_huge_pages_node[nid]--;
+@@ -1706,6 +1722,7 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
+ {
+ int i;
+ struct page *subpage;
++ bool clear_dtor = HPageVmemmapOptimized(page);
+
+ if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
+ return;
+@@ -1736,6 +1753,16 @@ static void __update_and_free_page(struct hstate *h, struct page *page)
+ if (unlikely(PageHWPoison(page)))
+ hugetlb_clear_page_hwpoison(page);
+
++ /*
++ * If vmemmap pages were allocated above, then we need to clear the
++ * hugetlb destructor under the hugetlb lock.
++ */
++ if (clear_dtor) {
++ spin_lock_irq(&hugetlb_lock);
++ __clear_hugetlb_destructor(h, page);
++ spin_unlock_irq(&hugetlb_lock);
++ }
++
+ for (i = 0; i < pages_per_huge_page(h); i++) {
+ subpage = nth_page(page, i);
+ subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
+diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
+index d03941cace2c4..37f755c9a1b70 100644
+--- a/mm/zsmalloc.c
++++ b/mm/zsmalloc.c
+@@ -33,8 +33,7 @@
+ /*
+ * lock ordering:
+ * page_lock
+- * pool->migrate_lock
+- * class->lock
++ * pool->lock
+ * zspage->lock
+ */
+
+@@ -192,7 +191,6 @@ static const int fullness_threshold_frac = 4;
+ static size_t huge_class_size;
+
+ struct size_class {
+- spinlock_t lock;
+ struct list_head fullness_list[NR_ZS_FULLNESS];
+ /*
+ * Size of objects stored in this class. Must be multiple
+@@ -247,8 +245,8 @@ struct zs_pool {
+ #ifdef CONFIG_COMPACTION
+ struct work_struct free_work;
+ #endif
+- /* protect page/zspage migration */
+- rwlock_t migrate_lock;
++ spinlock_t lock;
++ atomic_t compaction_in_progress;
+ };
+
+ struct zspage {
+@@ -355,7 +353,7 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
+ kmem_cache_free(pool->zspage_cachep, zspage);
+ }
+
+-/* class->lock(which owns the handle) synchronizes races */
++/* pool->lock(which owns the handle) synchronizes races */
+ static void record_obj(unsigned long handle, unsigned long obj)
+ {
+ *(unsigned long *)handle = obj;
+@@ -452,7 +450,7 @@ static __maybe_unused int is_first_page(struct page *page)
+ return PagePrivate(page);
+ }
+
+-/* Protected by class->lock */
++/* Protected by pool->lock */
+ static inline int get_zspage_inuse(struct zspage *zspage)
+ {
+ return zspage->inuse;
+@@ -597,13 +595,13 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
+ if (class->index != i)
+ continue;
+
+- spin_lock(&class->lock);
++ spin_lock(&pool->lock);
+ class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL);
+ class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
+ obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
+ obj_used = zs_stat_get(class, OBJ_USED);
+ freeable = zs_can_compact(class);
+- spin_unlock(&class->lock);
++ spin_unlock(&pool->lock);
+
+ objs_per_zspage = class->objs_per_zspage;
+ pages_used = obj_allocated / objs_per_zspage *
+@@ -916,7 +914,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
+
+ get_zspage_mapping(zspage, &class_idx, &fg);
+
+- assert_spin_locked(&class->lock);
++ assert_spin_locked(&pool->lock);
+
+ VM_BUG_ON(get_zspage_inuse(zspage));
+ VM_BUG_ON(fg != ZS_EMPTY);
+@@ -1247,19 +1245,19 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
+ BUG_ON(in_interrupt());
+
+ /* It guarantees it can get zspage from handle safely */
+- read_lock(&pool->migrate_lock);
++ spin_lock(&pool->lock);
+ obj = handle_to_obj(handle);
+ obj_to_location(obj, &page, &obj_idx);
+ zspage = get_zspage(page);
+
+ /*
+- * migration cannot move any zpages in this zspage. Here, class->lock
++ * migration cannot move any zpages in this zspage. Here, pool->lock
+ * is too heavy since callers would take some time until they calls
+ * zs_unmap_object API so delegate the locking from class to zspage
+ * which is smaller granularity.
+ */
+ migrate_read_lock(zspage);
+- read_unlock(&pool->migrate_lock);
++ spin_unlock(&pool->lock);
+
+ class = zspage_class(pool, zspage);
+ off = (class->size * obj_idx) & ~PAGE_MASK;
+@@ -1412,8 +1410,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
+ size += ZS_HANDLE_SIZE;
+ class = pool->size_class[get_size_class_index(size)];
+
+- /* class->lock effectively protects the zpage migration */
+- spin_lock(&class->lock);
++ /* pool->lock effectively protects the zpage migration */
++ spin_lock(&pool->lock);
+ zspage = find_get_zspage(class);
+ if (likely(zspage)) {
+ obj = obj_malloc(pool, zspage, handle);
+@@ -1421,12 +1419,12 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
+ fix_fullness_group(class, zspage);
+ record_obj(handle, obj);
+ class_stat_inc(class, OBJ_USED, 1);
+- spin_unlock(&class->lock);
++ spin_unlock(&pool->lock);
+
+ return handle;
+ }
+
+- spin_unlock(&class->lock);
++ spin_unlock(&pool->lock);
+
+ zspage = alloc_zspage(pool, class, gfp);
+ if (!zspage) {
+@@ -1434,7 +1432,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
+ return (unsigned long)ERR_PTR(-ENOMEM);
+ }
+
+- spin_lock(&class->lock);
++ spin_lock(&pool->lock);
+ obj = obj_malloc(pool, zspage, handle);
+ newfg = get_fullness_group(class, zspage);
+ insert_zspage(class, zspage, newfg);
+@@ -1447,7 +1445,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
+
+ /* We completely set up zspage so mark them as movable */
+ SetZsPageMovable(pool, zspage);
+- spin_unlock(&class->lock);
++ spin_unlock(&pool->lock);
+
+ return handle;
+ }
+@@ -1491,16 +1489,14 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
+ return;
+
+ /*
+- * The pool->migrate_lock protects the race with zpage's migration
++ * The pool->lock protects the race with zpage's migration
+ * so it's safe to get the page from handle.
+ */
+- read_lock(&pool->migrate_lock);
++ spin_lock(&pool->lock);
+ obj = handle_to_obj(handle);
+ obj_to_page(obj, &f_page);
+ zspage = get_zspage(f_page);
+ class = zspage_class(pool, zspage);
+- spin_lock(&class->lock);
+- read_unlock(&pool->migrate_lock);
+
+ obj_free(class->size, obj);
+ class_stat_dec(class, OBJ_USED, 1);
+@@ -1510,7 +1506,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
+
+ free_zspage(pool, class, zspage);
+ out:
+- spin_unlock(&class->lock);
++ spin_unlock(&pool->lock);
+ cache_free_handle(pool, handle);
+ }
+ EXPORT_SYMBOL_GPL(zs_free);
+@@ -1821,6 +1817,7 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
+
+ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
+ {
++ struct zs_pool *pool;
+ struct zspage *zspage;
+
+ /*
+@@ -1831,9 +1828,10 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
+ VM_BUG_ON_PAGE(PageIsolated(page), page);
+
+ zspage = get_zspage(page);
+- migrate_write_lock(zspage);
++ pool = zspage->pool;
++ spin_lock(&pool->lock);
+ inc_zspage_isolation(zspage);
+- migrate_write_unlock(zspage);
++ spin_unlock(&pool->lock);
+
+ return true;
+ }
+@@ -1867,16 +1865,12 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
+ pool = zspage->pool;
+
+ /*
+- * The pool migrate_lock protects the race between zpage migration
++ * The pool's lock protects the race between zpage migration
+ * and zs_free.
+ */
+- write_lock(&pool->migrate_lock);
++ spin_lock(&pool->lock);
+ class = zspage_class(pool, zspage);
+
+- /*
+- * the class lock protects zpage alloc/free in the zspage.
+- */
+- spin_lock(&class->lock);
+ /* the migrate_write_lock protects zpage access via zs_map_object */
+ migrate_write_lock(zspage);
+
+@@ -1904,13 +1898,12 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
+ kunmap_atomic(s_addr);
+
+ replace_sub_page(class, zspage, newpage, page);
++ dec_zspage_isolation(zspage);
+ /*
+ * Since we complete the data copy and set up new zspage structure,
+- * it's okay to release migration_lock.
++ * it's okay to release the pool's lock.
+ */
+- write_unlock(&pool->migrate_lock);
+- spin_unlock(&class->lock);
+- dec_zspage_isolation(zspage);
++ spin_unlock(&pool->lock);
+ migrate_write_unlock(zspage);
+
+ get_page(newpage);
+@@ -1927,15 +1920,17 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
+
+ static void zs_page_putback(struct page *page)
+ {
++ struct zs_pool *pool;
+ struct zspage *zspage;
+
+ VM_BUG_ON_PAGE(!PageMovable(page), page);
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
+
+ zspage = get_zspage(page);
+- migrate_write_lock(zspage);
++ pool = zspage->pool;
++ spin_lock(&pool->lock);
+ dec_zspage_isolation(zspage);
+- migrate_write_unlock(zspage);
++ spin_unlock(&pool->lock);
+ }
+
+ static const struct movable_operations zsmalloc_mops = {
+@@ -1964,9 +1959,9 @@ static void async_free_zspage(struct work_struct *work)
+ if (class->index != i)
+ continue;
+
+- spin_lock(&class->lock);
++ spin_lock(&pool->lock);
+ list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages);
+- spin_unlock(&class->lock);
++ spin_unlock(&pool->lock);
+ }
+
+ list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
+@@ -1976,9 +1971,9 @@ static void async_free_zspage(struct work_struct *work)
+ get_zspage_mapping(zspage, &class_idx, &fullness);
+ VM_BUG_ON(fullness != ZS_EMPTY);
+ class = pool->size_class[class_idx];
+- spin_lock(&class->lock);
++ spin_lock(&pool->lock);
+ __free_zspage(pool, class, zspage);
+- spin_unlock(&class->lock);
++ spin_unlock(&pool->lock);
+ }
+ };
+
+@@ -2039,10 +2034,11 @@ static unsigned long __zs_compact(struct zs_pool *pool,
+ struct zspage *dst_zspage = NULL;
+ unsigned long pages_freed = 0;
+
+- /* protect the race between zpage migration and zs_free */
+- write_lock(&pool->migrate_lock);
+- /* protect zpage allocation/free */
+- spin_lock(&class->lock);
++ /*
++ * protect the race between zpage migration and zs_free
++ * as well as zpage allocation/free
++ */
++ spin_lock(&pool->lock);
+ while ((src_zspage = isolate_zspage(class, true))) {
+ /* protect someone accessing the zspage(i.e., zs_map_object) */
+ migrate_write_lock(src_zspage);
+@@ -2067,7 +2063,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
+ putback_zspage(class, dst_zspage);
+ migrate_write_unlock(dst_zspage);
+ dst_zspage = NULL;
+- if (rwlock_is_contended(&pool->migrate_lock))
++ if (spin_is_contended(&pool->lock))
+ break;
+ }
+
+@@ -2084,11 +2080,9 @@ static unsigned long __zs_compact(struct zs_pool *pool,
+ pages_freed += class->pages_per_zspage;
+ } else
+ migrate_write_unlock(src_zspage);
+- spin_unlock(&class->lock);
+- write_unlock(&pool->migrate_lock);
++ spin_unlock(&pool->lock);
+ cond_resched();
+- write_lock(&pool->migrate_lock);
+- spin_lock(&class->lock);
++ spin_lock(&pool->lock);
+ }
+
+ if (src_zspage) {
+@@ -2096,8 +2090,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
+ migrate_write_unlock(src_zspage);
+ }
+
+- spin_unlock(&class->lock);
+- write_unlock(&pool->migrate_lock);
++ spin_unlock(&pool->lock);
+
+ return pages_freed;
+ }
+@@ -2108,6 +2101,15 @@ unsigned long zs_compact(struct zs_pool *pool)
+ struct size_class *class;
+ unsigned long pages_freed = 0;
+
++ /*
++ * Pool compaction is performed under pool->lock so it is basically
++ * single-threaded. Having more than one thread in __zs_compact()
++ * will increase pool->lock contention, which will impact other
++ * zsmalloc operations that need pool->lock.
++ */
++ if (atomic_xchg(&pool->compaction_in_progress, 1))
++ return 0;
++
+ for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
+ class = pool->size_class[i];
+ if (class->index != i)
+@@ -2115,6 +2117,7 @@ unsigned long zs_compact(struct zs_pool *pool)
+ pages_freed += __zs_compact(pool, class);
+ }
+ atomic_long_add(pages_freed, &pool->stats.pages_compacted);
++ atomic_set(&pool->compaction_in_progress, 0);
+
+ return pages_freed;
+ }
+@@ -2200,7 +2203,8 @@ struct zs_pool *zs_create_pool(const char *name)
+ return NULL;
+
+ init_deferred_free(pool);
+- rwlock_init(&pool->migrate_lock);
++ spin_lock_init(&pool->lock);
++ atomic_set(&pool->compaction_in_progress, 0);
+
+ pool->name = kstrdup(name, GFP_KERNEL);
+ if (!pool->name)
+@@ -2271,7 +2275,6 @@ struct zs_pool *zs_create_pool(const char *name)
+ class->index = i;
+ class->pages_per_zspage = pages_per_zspage;
+ class->objs_per_zspage = objs_per_zspage;
+- spin_lock_init(&class->lock);
+ pool->size_class[i] = class;
+ for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;
+ fullness++)
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 02fc9961464cf..a7899857aee5d 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -6375,9 +6375,14 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
+ if (!chan)
+ goto done;
+
++ chan = l2cap_chan_hold_unless_zero(chan);
++ if (!chan)
++ goto done;
++
+ l2cap_chan_lock(chan);
+ l2cap_chan_del(chan, ECONNREFUSED);
+ l2cap_chan_unlock(chan);
++ l2cap_chan_put(chan);
+
+ done:
+ mutex_unlock(&conn->chan_lock);
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 89c94f3e96bc3..d2e8565d0b33f 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -7277,7 +7277,7 @@ static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+- memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
++ memcpy(&rp.addr, &cp->addr, sizeof(rp.addr));
+
+ status = mgmt_status(err);
+ if (status == MGMT_STATUS_SUCCESS) {
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 3b5304f084ef3..509773919d302 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -3099,7 +3099,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount)
+ if (mem_cgroup_sockets_enabled && sk->sk_memcg)
+ mem_cgroup_uncharge_skmem(sk->sk_memcg, amount);
+
+- if (sk_under_memory_pressure(sk) &&
++ if (sk_under_global_memory_pressure(sk) &&
+ (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)))
+ sk_leave_memory_pressure(sk);
+ }
+diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
+index 8c2bd1d9ddce3..615c1dcf3a28e 100644
+--- a/net/ipv4/ip_vti.c
++++ b/net/ipv4/ip_vti.c
+@@ -287,12 +287,12 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+- xfrm_decode_session(skb, &fl, AF_INET);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET);
+ break;
+ case htons(ETH_P_IPV6):
+- xfrm_decode_session(skb, &fl, AF_INET6);
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET6);
+ break;
+ default:
+ goto tx_err;
+diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
+index 0b5d0a2867a8c..cf354c29ec123 100644
+--- a/net/ipv4/tcp_timer.c
++++ b/net/ipv4/tcp_timer.c
+@@ -586,7 +586,9 @@ out_reset_timer:
+ tcp_stream_is_thin(tp) &&
+ icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
+ icsk->icsk_backoff = 0;
+- icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
++ icsk->icsk_rto = clamp(__tcp_set_rto(tp),
++ tcp_rto_min(sk),
++ TCP_RTO_MAX);
+ } else {
+ /* Use normal (exponential) backoff */
+ icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
+diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
+index 151337d7f67b4..cb71463bbbabd 100644
+--- a/net/ipv6/ip6_vti.c
++++ b/net/ipv6/ip6_vti.c
+@@ -570,12 +570,12 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ vti6_addr_conflict(t, ipv6_hdr(skb)))
+ goto tx_err;
+
+- xfrm_decode_session(skb, &fl, AF_INET6);
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET6);
+ break;
+ case htons(ETH_P_IP):
+- xfrm_decode_session(skb, &fl, AF_INET);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET);
+ break;
+ default:
+ goto tx_err;
+diff --git a/net/key/af_key.c b/net/key/af_key.c
+index 8c21de50eadf8..8a8f2429d5d99 100644
+--- a/net/key/af_key.c
++++ b/net/key/af_key.c
+@@ -1848,9 +1848,9 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
+ if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
+ struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
+
+- if ((xfilter->sadb_x_filter_splen >=
++ if ((xfilter->sadb_x_filter_splen >
+ (sizeof(xfrm_address_t) << 3)) ||
+- (xfilter->sadb_x_filter_dplen >=
++ (xfilter->sadb_x_filter_dplen >
+ (sizeof(xfrm_address_t) << 3))) {
+ mutex_unlock(&pfk->dump_lock);
+ return -EINVAL;
+diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
+index 03af6a2ffd567..17a1b731a76b1 100644
+--- a/net/netfilter/ipvs/ip_vs_ctl.c
++++ b/net/netfilter/ipvs/ip_vs_ctl.c
+@@ -1798,6 +1798,7 @@ static int
+ proc_do_sync_threshold(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+ {
++ struct netns_ipvs *ipvs = table->extra2;
+ int *valp = table->data;
+ int val[2];
+ int rc;
+@@ -1807,6 +1808,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write,
+ .mode = table->mode,
+ };
+
++ mutex_lock(&ipvs->sync_mutex);
+ memcpy(val, valp, sizeof(val));
+ rc = proc_dointvec(&tmp, write, buffer, lenp, ppos);
+ if (write) {
+@@ -1816,6 +1818,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write,
+ else
+ memcpy(valp, val, sizeof(val));
+ }
++ mutex_unlock(&ipvs->sync_mutex);
+ return rc;
+ }
+
+@@ -4080,6 +4083,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs)
+ ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD;
+ ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD;
+ tbl[idx].data = &ipvs->sysctl_sync_threshold;
++ tbl[idx].extra2 = ipvs;
+ tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold);
+ ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD;
+ tbl[idx++].data = &ipvs->sysctl_sync_refresh_period;
+diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
+index 895e0ca542994..7247af51bdfc4 100644
+--- a/net/netfilter/nf_conntrack_proto_sctp.c
++++ b/net/netfilter/nf_conntrack_proto_sctp.c
+@@ -49,8 +49,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = {
+ [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS,
+ [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS,
+ [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS,
+- [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000,
+- [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000,
++ [SCTP_CONNTRACK_SHUTDOWN_SENT] = 3 SECS,
++ [SCTP_CONNTRACK_SHUTDOWN_RECD] = 3 SECS,
+ [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS,
+ [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS,
+ };
+@@ -105,7 +105,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = {
+ {
+ /* ORIGINAL */
+ /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */
+-/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW},
++/* init */ {sCL, sCL, sCW, sCE, sES, sCL, sCL, sSA, sCW},
+ /* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL},
+ /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+ /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL},
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index f6e6273838859..4c2df7af73f76 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -6874,6 +6874,7 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx,
+ ret = __nft_set_catchall_flush(ctx, set, &elem);
+ if (ret < 0)
+ break;
++ nft_set_elem_change_active(ctx->net, set, ext);
+ }
+
+ return ret;
+diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
+index e65a83328b554..cf9a1ae87d9b1 100644
+--- a/net/netfilter/nft_dynset.c
++++ b/net/netfilter/nft_dynset.c
+@@ -191,6 +191,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx,
+ if (IS_ERR(set))
+ return PTR_ERR(set);
+
++ if (set->flags & NFT_SET_OBJECT)
++ return -EOPNOTSUPP;
++
+ if (set->ops->update == NULL)
+ return -EOPNOTSUPP;
+
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index a81829c10feab..32cfd0a84b0e2 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1665,6 +1665,17 @@ static void nft_pipapo_commit(const struct nft_set *set)
+ priv->clone = new_clone;
+ }
+
++static bool nft_pipapo_transaction_mutex_held(const struct nft_set *set)
++{
++#ifdef CONFIG_PROVE_LOCKING
++ const struct net *net = read_pnet(&set->net);
++
++ return lockdep_is_held(&nft_pernet(net)->commit_mutex);
++#else
++ return true;
++#endif
++}
++
+ static void nft_pipapo_abort(const struct nft_set *set)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
+@@ -1673,7 +1684,7 @@ static void nft_pipapo_abort(const struct nft_set *set)
+ if (!priv->dirty)
+ return;
+
+- m = rcu_dereference(priv->match);
++ m = rcu_dereference_protected(priv->match, nft_pipapo_transaction_mutex_held(set));
+
+ new_clone = pipapo_clone(m);
+ if (IS_ERR(new_clone))
+diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
+index 5920fdca12875..3c7b245354096 100644
+--- a/net/openvswitch/datapath.c
++++ b/net/openvswitch/datapath.c
+@@ -1806,7 +1806,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
+ parms.port_no = OVSP_LOCAL;
+ parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
+ parms.desired_ifindex = a[OVS_DP_ATTR_IFINDEX]
+- ? nla_get_u32(a[OVS_DP_ATTR_IFINDEX]) : 0;
++ ? nla_get_s32(a[OVS_DP_ATTR_IFINDEX]) : 0;
+
+ /* So far only local changes have been made, now need the lock. */
+ ovs_lock();
+@@ -2026,7 +2026,7 @@ static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
+ [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
+ [OVS_DP_ATTR_MASKS_CACHE_SIZE] = NLA_POLICY_RANGE(NLA_U32, 0,
+ PCPU_MIN_UNIT_SIZE / sizeof(struct mask_cache_entry)),
+- [OVS_DP_ATTR_IFINDEX] = {.type = NLA_U32 },
++ [OVS_DP_ATTR_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 0),
+ };
+
+ static const struct genl_small_ops dp_datapath_genl_ops[] = {
+@@ -2276,7 +2276,7 @@ restart:
+ parms.port_no = port_no;
+ parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
+ parms.desired_ifindex = a[OVS_VPORT_ATTR_IFINDEX]
+- ? nla_get_u32(a[OVS_VPORT_ATTR_IFINDEX]) : 0;
++ ? nla_get_s32(a[OVS_VPORT_ATTR_IFINDEX]) : 0;
+
+ vport = new_vport(&parms);
+ err = PTR_ERR(vport);
+@@ -2513,7 +2513,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
+ [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
+ [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
+ [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
+- [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
++ [OVS_VPORT_ATTR_IFINDEX] = NLA_POLICY_MIN(NLA_S32, 0),
+ [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
+ };
+
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 5712a5297bd01..84219c5121bc2 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -379,8 +379,8 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
+ sk->sk_state = SMC_INIT;
+ sk->sk_destruct = smc_destruct;
+ sk->sk_protocol = protocol;
+- WRITE_ONCE(sk->sk_sndbuf, READ_ONCE(net->smc.sysctl_wmem));
+- WRITE_ONCE(sk->sk_rcvbuf, READ_ONCE(net->smc.sysctl_rmem));
++ WRITE_ONCE(sk->sk_sndbuf, 2 * READ_ONCE(net->smc.sysctl_wmem));
++ WRITE_ONCE(sk->sk_rcvbuf, 2 * READ_ONCE(net->smc.sysctl_rmem));
+ smc = smc_sk(sk);
+ INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
+ INIT_WORK(&smc->connect_work, smc_connect_work);
+diff --git a/net/smc/smc.h b/net/smc/smc.h
+index 5ed765ea0c731..1d36720fc019c 100644
+--- a/net/smc/smc.h
++++ b/net/smc/smc.h
+@@ -161,7 +161,7 @@ struct smc_connection {
+
+ struct smc_buf_desc *sndbuf_desc; /* send buffer descriptor */
+ struct smc_buf_desc *rmb_desc; /* RMBE descriptor */
+- int rmbe_size_short;/* compressed notation */
++ int rmbe_size_comp; /* compressed notation */
+ int rmbe_update_limit;
+ /* lower limit for consumer
+ * cursor update
+diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
+index dfb9797f7bc63..9b8999e2afca5 100644
+--- a/net/smc/smc_clc.c
++++ b/net/smc/smc_clc.c
+@@ -1002,7 +1002,7 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
+ clc->hdr.typev1 = SMC_TYPE_D;
+ clc->d0.gid = conn->lgr->smcd->local_gid;
+ clc->d0.token = conn->rmb_desc->token;
+- clc->d0.dmbe_size = conn->rmbe_size_short;
++ clc->d0.dmbe_size = conn->rmbe_size_comp;
+ clc->d0.dmbe_idx = 0;
+ memcpy(&clc->d0.linkid, conn->lgr->id, SMC_LGR_ID_SIZE);
+ if (version == SMC_V1) {
+@@ -1045,7 +1045,7 @@ static int smc_clc_send_confirm_accept(struct smc_sock *smc,
+ clc->r0.qp_mtu = min(link->path_mtu, link->peer_mtu);
+ break;
+ }
+- clc->r0.rmbe_size = conn->rmbe_size_short;
++ clc->r0.rmbe_size = conn->rmbe_size_comp;
+ clc->r0.rmb_dma_addr = conn->rmb_desc->is_vm ?
+ cpu_to_be64((uintptr_t)conn->rmb_desc->cpu_addr) :
+ cpu_to_be64((u64)sg_dma_address
+diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
+index f82f43573a159..c676d92af7b7d 100644
+--- a/net/smc/smc_core.c
++++ b/net/smc/smc_core.c
+@@ -852,8 +852,8 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
+ lgr->freeing = 0;
+ lgr->vlan_id = ini->vlan_id;
+ refcount_set(&lgr->refcnt, 1); /* set lgr refcnt to 1 */
+- mutex_init(&lgr->sndbufs_lock);
+- mutex_init(&lgr->rmbs_lock);
++ init_rwsem(&lgr->sndbufs_lock);
++ init_rwsem(&lgr->rmbs_lock);
+ rwlock_init(&lgr->conns_lock);
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
+ INIT_LIST_HEAD(&lgr->sndbufs[i]);
+@@ -1095,7 +1095,7 @@ err_out:
+ static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
+ struct smc_link_group *lgr)
+ {
+- struct mutex *lock; /* lock buffer list */
++ struct rw_semaphore *lock; /* lock buffer list */
+ int rc;
+
+ if (is_rmb && buf_desc->is_conf_rkey && !list_empty(&lgr->list)) {
+@@ -1115,9 +1115,9 @@ static void smcr_buf_unuse(struct smc_buf_desc *buf_desc, bool is_rmb,
+ /* buf registration failed, reuse not possible */
+ lock = is_rmb ? &lgr->rmbs_lock :
+ &lgr->sndbufs_lock;
+- mutex_lock(lock);
++ down_write(lock);
+ list_del(&buf_desc->list);
+- mutex_unlock(lock);
++ up_write(lock);
+
+ smc_buf_free(lgr, is_rmb, buf_desc);
+ } else {
+@@ -1220,15 +1220,16 @@ static void smcr_buf_unmap_lgr(struct smc_link *lnk)
+ int i;
+
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
+- mutex_lock(&lgr->rmbs_lock);
++ down_write(&lgr->rmbs_lock);
+ list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list)
+ smcr_buf_unmap_link(buf_desc, true, lnk);
+- mutex_unlock(&lgr->rmbs_lock);
+- mutex_lock(&lgr->sndbufs_lock);
++ up_write(&lgr->rmbs_lock);
++
++ down_write(&lgr->sndbufs_lock);
+ list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i],
+ list)
+ smcr_buf_unmap_link(buf_desc, false, lnk);
+- mutex_unlock(&lgr->sndbufs_lock);
++ up_write(&lgr->sndbufs_lock);
+ }
+ }
+
+@@ -1986,19 +1987,19 @@ int smc_uncompress_bufsize(u8 compressed)
+ * buffer size; if not available, return NULL
+ */
+ static struct smc_buf_desc *smc_buf_get_slot(int compressed_bufsize,
+- struct mutex *lock,
++ struct rw_semaphore *lock,
+ struct list_head *buf_list)
+ {
+ struct smc_buf_desc *buf_slot;
+
+- mutex_lock(lock);
++ down_read(lock);
+ list_for_each_entry(buf_slot, buf_list, list) {
+ if (cmpxchg(&buf_slot->used, 0, 1) == 0) {
+- mutex_unlock(lock);
++ up_read(lock);
+ return buf_slot;
+ }
+ }
+- mutex_unlock(lock);
++ up_read(lock);
+ return NULL;
+ }
+
+@@ -2107,13 +2108,13 @@ int smcr_link_reg_buf(struct smc_link *link, struct smc_buf_desc *buf_desc)
+ return 0;
+ }
+
+-static int _smcr_buf_map_lgr(struct smc_link *lnk, struct mutex *lock,
++static int _smcr_buf_map_lgr(struct smc_link *lnk, struct rw_semaphore *lock,
+ struct list_head *lst, bool is_rmb)
+ {
+ struct smc_buf_desc *buf_desc, *bf;
+ int rc = 0;
+
+- mutex_lock(lock);
++ down_write(lock);
+ list_for_each_entry_safe(buf_desc, bf, lst, list) {
+ if (!buf_desc->used)
+ continue;
+@@ -2122,7 +2123,7 @@ static int _smcr_buf_map_lgr(struct smc_link *lnk, struct mutex *lock,
+ goto out;
+ }
+ out:
+- mutex_unlock(lock);
++ up_write(lock);
+ return rc;
+ }
+
+@@ -2155,37 +2156,37 @@ int smcr_buf_reg_lgr(struct smc_link *lnk)
+ int i, rc = 0;
+
+ /* reg all RMBs for a new link */
+- mutex_lock(&lgr->rmbs_lock);
++ down_write(&lgr->rmbs_lock);
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
+ list_for_each_entry_safe(buf_desc, bf, &lgr->rmbs[i], list) {
+ if (!buf_desc->used)
+ continue;
+ rc = smcr_link_reg_buf(lnk, buf_desc);
+ if (rc) {
+- mutex_unlock(&lgr->rmbs_lock);
++ up_write(&lgr->rmbs_lock);
+ return rc;
+ }
+ }
+ }
+- mutex_unlock(&lgr->rmbs_lock);
++ up_write(&lgr->rmbs_lock);
+
+ if (lgr->buf_type == SMCR_PHYS_CONT_BUFS)
+ return rc;
+
+ /* reg all vzalloced sndbufs for a new link */
+- mutex_lock(&lgr->sndbufs_lock);
++ down_write(&lgr->sndbufs_lock);
+ for (i = 0; i < SMC_RMBE_SIZES; i++) {
+ list_for_each_entry_safe(buf_desc, bf, &lgr->sndbufs[i], list) {
+ if (!buf_desc->used || !buf_desc->is_vm)
+ continue;
+ rc = smcr_link_reg_buf(lnk, buf_desc);
+ if (rc) {
+- mutex_unlock(&lgr->sndbufs_lock);
++ up_write(&lgr->sndbufs_lock);
+ return rc;
+ }
+ }
+ }
+- mutex_unlock(&lgr->sndbufs_lock);
++ up_write(&lgr->sndbufs_lock);
+ return rc;
+ }
+
+@@ -2304,31 +2305,30 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
+ struct smc_connection *conn = &smc->conn;
+ struct smc_link_group *lgr = conn->lgr;
+ struct list_head *buf_list;
+- int bufsize, bufsize_short;
++ int bufsize, bufsize_comp;
++ struct rw_semaphore *lock; /* lock buffer list */
+ bool is_dgraded = false;
+- struct mutex *lock; /* lock buffer list */
+- int sk_buf_size;
+
+ if (is_rmb)
+ /* use socket recv buffer size (w/o overhead) as start value */
+- sk_buf_size = smc->sk.sk_rcvbuf;
++ bufsize = smc->sk.sk_rcvbuf / 2;
+ else
+ /* use socket send buffer size (w/o overhead) as start value */
+- sk_buf_size = smc->sk.sk_sndbuf;
++ bufsize = smc->sk.sk_sndbuf / 2;
+
+- for (bufsize_short = smc_compress_bufsize(sk_buf_size, is_smcd, is_rmb);
+- bufsize_short >= 0; bufsize_short--) {
++ for (bufsize_comp = smc_compress_bufsize(bufsize, is_smcd, is_rmb);
++ bufsize_comp >= 0; bufsize_comp--) {
+ if (is_rmb) {
+ lock = &lgr->rmbs_lock;
+- buf_list = &lgr->rmbs[bufsize_short];
++ buf_list = &lgr->rmbs[bufsize_comp];
+ } else {
+ lock = &lgr->sndbufs_lock;
+- buf_list = &lgr->sndbufs[bufsize_short];
++ buf_list = &lgr->sndbufs[bufsize_comp];
+ }
+- bufsize = smc_uncompress_bufsize(bufsize_short);
++ bufsize = smc_uncompress_bufsize(bufsize_comp);
+
+ /* check for reusable slot in the link group */
+- buf_desc = smc_buf_get_slot(bufsize_short, lock, buf_list);
++ buf_desc = smc_buf_get_slot(bufsize_comp, lock, buf_list);
+ if (buf_desc) {
+ buf_desc->is_dma_need_sync = 0;
+ SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
+@@ -2354,9 +2354,9 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
+ SMC_STAT_RMB_ALLOC(smc, is_smcd, is_rmb);
+ SMC_STAT_RMB_SIZE(smc, is_smcd, is_rmb, bufsize);
+ buf_desc->used = 1;
+- mutex_lock(lock);
++ down_write(lock);
+ list_add(&buf_desc->list, buf_list);
+- mutex_unlock(lock);
++ up_write(lock);
+ break; /* found */
+ }
+
+@@ -2372,8 +2372,8 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
+
+ if (is_rmb) {
+ conn->rmb_desc = buf_desc;
+- conn->rmbe_size_short = bufsize_short;
+- smc->sk.sk_rcvbuf = bufsize;
++ conn->rmbe_size_comp = bufsize_comp;
++ smc->sk.sk_rcvbuf = bufsize * 2;
+ atomic_set(&conn->bytes_to_rcv, 0);
+ conn->rmbe_update_limit =
+ smc_rmb_wnd_update_limit(buf_desc->len);
+@@ -2381,7 +2381,7 @@ static int __smc_buf_create(struct smc_sock *smc, bool is_smcd, bool is_rmb)
+ smc_ism_set_conn(conn); /* map RMB/smcd_dev to conn */
+ } else {
+ conn->sndbuf_desc = buf_desc;
+- smc->sk.sk_sndbuf = bufsize;
++ smc->sk.sk_sndbuf = bufsize * 2;
+ atomic_set(&conn->sndbuf_space, bufsize);
+ }
+ return 0;
+@@ -2430,9 +2430,9 @@ int smc_buf_create(struct smc_sock *smc, bool is_smcd)
+ /* create rmb */
+ rc = __smc_buf_create(smc, is_smcd, true);
+ if (rc) {
+- mutex_lock(&smc->conn.lgr->sndbufs_lock);
++ down_write(&smc->conn.lgr->sndbufs_lock);
+ list_del(&smc->conn.sndbuf_desc->list);
+- mutex_unlock(&smc->conn.lgr->sndbufs_lock);
++ up_write(&smc->conn.lgr->sndbufs_lock);
+ smc_buf_free(smc->conn.lgr, false, smc->conn.sndbuf_desc);
+ smc->conn.sndbuf_desc = NULL;
+ }
+diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
+index 285f9bd8e232e..6051d92270130 100644
+--- a/net/smc/smc_core.h
++++ b/net/smc/smc_core.h
+@@ -252,9 +252,9 @@ struct smc_link_group {
+ unsigned short vlan_id; /* vlan id of link group */
+
+ struct list_head sndbufs[SMC_RMBE_SIZES];/* tx buffers */
+- struct mutex sndbufs_lock; /* protects tx buffers */
++ struct rw_semaphore sndbufs_lock; /* protects tx buffers */
+ struct list_head rmbs[SMC_RMBE_SIZES]; /* rx buffers */
+- struct mutex rmbs_lock; /* protects rx buffers */
++ struct rw_semaphore rmbs_lock; /* protects rx buffers */
+
+ u8 id[SMC_LGR_ID_SIZE]; /* unique lgr id */
+ struct delayed_work free_work; /* delayed freeing of an lgr */
+diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c
+index 760f8bbff822e..fcb24a0ccf761 100644
+--- a/net/smc/smc_llc.c
++++ b/net/smc/smc_llc.c
+@@ -611,7 +611,7 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
+
+ prim_lnk_idx = link->link_idx;
+ lnk_idx = link_new->link_idx;
+- mutex_lock(&lgr->rmbs_lock);
++ down_write(&lgr->rmbs_lock);
+ ext->num_rkeys = lgr->conns_num;
+ if (!ext->num_rkeys)
+ goto out;
+@@ -631,7 +631,7 @@ static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext,
+ }
+ len += i * sizeof(ext->rt[0]);
+ out:
+- mutex_unlock(&lgr->rmbs_lock);
++ up_write(&lgr->rmbs_lock);
+ return len;
+ }
+
+@@ -892,7 +892,7 @@ static int smc_llc_cli_rkey_exchange(struct smc_link *link,
+ int rc = 0;
+ int i;
+
+- mutex_lock(&lgr->rmbs_lock);
++ down_write(&lgr->rmbs_lock);
+ num_rkeys_send = lgr->conns_num;
+ buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
+ do {
+@@ -919,7 +919,7 @@ static int smc_llc_cli_rkey_exchange(struct smc_link *link,
+ break;
+ } while (num_rkeys_send || num_rkeys_recv);
+
+- mutex_unlock(&lgr->rmbs_lock);
++ up_write(&lgr->rmbs_lock);
+ return rc;
+ }
+
+@@ -1002,14 +1002,14 @@ static void smc_llc_save_add_link_rkeys(struct smc_link *link,
+ ext = (struct smc_llc_msg_add_link_v2_ext *)((u8 *)lgr->wr_rx_buf_v2 +
+ SMC_WR_TX_SIZE);
+ max = min_t(u8, ext->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2);
+- mutex_lock(&lgr->rmbs_lock);
++ down_write(&lgr->rmbs_lock);
+ for (i = 0; i < max; i++) {
+ smc_rtoken_set(lgr, link->link_idx, link_new->link_idx,
+ ext->rt[i].rmb_key,
+ ext->rt[i].rmb_vaddr_new,
+ ext->rt[i].rmb_key_new);
+ }
+- mutex_unlock(&lgr->rmbs_lock);
++ up_write(&lgr->rmbs_lock);
+ }
+
+ static void smc_llc_save_add_link_info(struct smc_link *link,
+@@ -1316,7 +1316,7 @@ static int smc_llc_srv_rkey_exchange(struct smc_link *link,
+ int rc = 0;
+ int i;
+
+- mutex_lock(&lgr->rmbs_lock);
++ down_write(&lgr->rmbs_lock);
+ num_rkeys_send = lgr->conns_num;
+ buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst);
+ do {
+@@ -1341,7 +1341,7 @@ static int smc_llc_srv_rkey_exchange(struct smc_link *link,
+ smc_llc_flow_qentry_del(&lgr->llc_flow_lcl);
+ } while (num_rkeys_send || num_rkeys_recv);
+ out:
+- mutex_unlock(&lgr->rmbs_lock);
++ up_write(&lgr->rmbs_lock);
+ return rc;
+ }
+
+diff --git a/net/smc/smc_sysctl.c b/net/smc/smc_sysctl.c
+index b6f79fabb9d3f..0b2a957ca5f5f 100644
+--- a/net/smc/smc_sysctl.c
++++ b/net/smc/smc_sysctl.c
+@@ -21,6 +21,10 @@
+
+ static int min_sndbuf = SMC_BUF_MIN_SIZE;
+ static int min_rcvbuf = SMC_BUF_MIN_SIZE;
++static int max_sndbuf = INT_MAX / 2;
++static int max_rcvbuf = INT_MAX / 2;
++static const int net_smc_wmem_init = (64 * 1024);
++static const int net_smc_rmem_init = (64 * 1024);
+
+ static struct ctl_table smc_table[] = {
+ {
+@@ -53,6 +57,7 @@ static struct ctl_table smc_table[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_sndbuf,
++ .extra2 = &max_sndbuf,
+ },
+ {
+ .procname = "rmem",
+@@ -61,6 +66,7 @@ static struct ctl_table smc_table[] = {
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &min_rcvbuf,
++ .extra2 = &max_rcvbuf,
+ },
+ { }
+ };
+@@ -88,8 +94,8 @@ int __net_init smc_sysctl_net_init(struct net *net)
+ net->smc.sysctl_autocorking_size = SMC_AUTOCORKING_DEFAULT_SIZE;
+ net->smc.sysctl_smcr_buf_type = SMCR_PHYS_CONT_BUFS;
+ net->smc.sysctl_smcr_testlink_time = SMC_LLC_TESTLINK_DEFAULT_TIME;
+- WRITE_ONCE(net->smc.sysctl_wmem, READ_ONCE(net->ipv4.sysctl_tcp_wmem[1]));
+- WRITE_ONCE(net->smc.sysctl_rmem, READ_ONCE(net->ipv4.sysctl_tcp_rmem[1]));
++ WRITE_ONCE(net->smc.sysctl_wmem, net_smc_wmem_init);
++ WRITE_ONCE(net->smc.sysctl_rmem, net_smc_rmem_init);
+
+ return 0;
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 78fa620a63981..ca31847a6c70c 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -2290,6 +2290,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
+
+ if (false) {
+ alloc_skb:
++ spin_unlock(&other->sk_receive_queue.lock);
+ unix_state_unlock(other);
+ mutex_unlock(&unix_sk(other)->iolock);
+ newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
+@@ -2329,6 +2330,7 @@ alloc_skb:
+ init_scm = false;
+ }
+
++ spin_lock(&other->sk_receive_queue.lock);
+ skb = skb_peek_tail(&other->sk_receive_queue);
+ if (tail && tail == skb) {
+ skb = newskb;
+@@ -2359,14 +2361,11 @@ alloc_skb:
+ refcount_add(size, &sk->sk_wmem_alloc);
+
+ if (newskb) {
+- err = unix_scm_to_skb(&scm, skb, false);
+- if (err)
+- goto err_state_unlock;
+- spin_lock(&other->sk_receive_queue.lock);
++ unix_scm_to_skb(&scm, skb, false);
+ __skb_queue_tail(&other->sk_receive_queue, newskb);
+- spin_unlock(&other->sk_receive_queue.lock);
+ }
+
++ spin_unlock(&other->sk_receive_queue.lock);
+ unix_state_unlock(other);
+ mutex_unlock(&unix_sk(other)->iolock);
+
+diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c
+index 8cbf45a8bcdc2..655fe4ff86212 100644
+--- a/net/xfrm/xfrm_compat.c
++++ b/net/xfrm/xfrm_compat.c
+@@ -108,7 +108,7 @@ static const struct nla_policy compat_policy[XFRMA_MAX+1] = {
+ [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
+ [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
+ [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
+- [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
++ [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) },
+ [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
+ [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
+ [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
+diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c
+index 94a3609548b11..d71dbe822096a 100644
+--- a/net/xfrm/xfrm_interface_core.c
++++ b/net/xfrm/xfrm_interface_core.c
+@@ -528,8 +528,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IPV6):
+- xfrm_decode_session(skb, &fl, AF_INET6);
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET6);
+ if (!dst) {
+ fl.u.ip6.flowi6_oif = dev->ifindex;
+ fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
+@@ -543,8 +543,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
+ }
+ break;
+ case htons(ETH_P_IP):
+- xfrm_decode_session(skb, &fl, AF_INET);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
++ xfrm_decode_session(skb, &fl, AF_INET);
+ if (!dst) {
+ struct rtable *rt;
+
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 2d68a173b2273..d042ca01211fa 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -615,7 +615,7 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
+ struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
+ struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH];
+
+- if (re) {
++ if (re && x->replay_esn && x->preplay_esn) {
+ struct xfrm_replay_state_esn *replay_esn;
+ replay_esn = nla_data(re);
+ memcpy(x->replay_esn, replay_esn,
+@@ -1250,6 +1250,15 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
+ sizeof(*filter), GFP_KERNEL);
+ if (filter == NULL)
+ return -ENOMEM;
++
++ /* see addr_match(), (prefix length >> 5) << 2
++ * will be used to compare xfrm_address_t
++ */
++ if (filter->splen > (sizeof(xfrm_address_t) << 3) ||
++ filter->dplen > (sizeof(xfrm_address_t) << 3)) {
++ kfree(filter);
++ return -EINVAL;
++ }
+ }
+
+ if (attrs[XFRMA_PROTO])
+@@ -2960,7 +2969,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
+ [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) },
+ [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) },
+ [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) },
+- [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) },
++ [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) },
+ [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) },
+ [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) },
+ [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 },
+@@ -2980,6 +2989,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
+ [XFRMA_SET_MARK] = { .type = NLA_U32 },
+ [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 },
+ [XFRMA_IF_ID] = { .type = NLA_U32 },
++ [XFRMA_MTIMER_THRESH] = { .type = NLA_U32 },
+ };
+ EXPORT_SYMBOL_GPL(xfrma_policy);
+
+diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
+index fe3587547cfec..39610a15bcc98 100644
+--- a/sound/hda/hdac_regmap.c
++++ b/sound/hda/hdac_regmap.c
+@@ -597,10 +597,9 @@ EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw_once);
+ */
+ void snd_hdac_regmap_sync(struct hdac_device *codec)
+ {
+- if (codec->regmap) {
+- mutex_lock(&codec->regmap_lock);
++ mutex_lock(&codec->regmap_lock);
++ if (codec->regmap)
+ regcache_sync(codec->regmap);
+- mutex_unlock(&codec->regmap_lock);
+- }
++ mutex_unlock(&codec->regmap_lock);
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_regmap_sync);
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index f93b68a2a8393..aa475154c582f 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7081,6 +7081,8 @@ enum {
+ ALC285_FIXUP_SPEAKER2_TO_DAC1,
+ ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1,
+ ALC285_FIXUP_ASUS_HEADSET_MIC,
++ ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1,
++ ALC285_FIXUP_ASUS_I2C_HEADSET_MIC,
+ ALC280_FIXUP_HP_HEADSET_MIC,
+ ALC221_FIXUP_HP_FRONT_MIC,
+ ALC292_FIXUP_TPT460,
+@@ -7136,6 +7138,10 @@ enum {
+ ALC294_FIXUP_ASUS_DUAL_SPK,
+ ALC285_FIXUP_THINKPAD_X1_GEN7,
+ ALC285_FIXUP_THINKPAD_HEADSET_JACK,
++ ALC294_FIXUP_ASUS_ALLY,
++ ALC294_FIXUP_ASUS_ALLY_PINS,
++ ALC294_FIXUP_ASUS_ALLY_VERBS,
++ ALC294_FIXUP_ASUS_ALLY_SPEAKER,
+ ALC294_FIXUP_ASUS_HPE,
+ ALC294_FIXUP_ASUS_COEF_1B,
+ ALC294_FIXUP_ASUS_GX502_HP,
+@@ -8069,6 +8075,22 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1
+ },
++ [ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc285_fixup_speaker2_to_dac1,
++ .chained = true,
++ .chain_id = ALC287_FIXUP_CS35L41_I2C_2
++ },
++ [ALC285_FIXUP_ASUS_I2C_HEADSET_MIC] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x19, 0x03a11050 },
++ { 0x1b, 0x03a11c30 },
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1
++ },
+ [ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -8450,6 +8472,47 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC294_FIXUP_SPK2_TO_DAC1
+ },
++ [ALC294_FIXUP_ASUS_ALLY] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = cs35l41_fixup_i2c_two,
++ .chained = true,
++ .chain_id = ALC294_FIXUP_ASUS_ALLY_PINS
++ },
++ [ALC294_FIXUP_ASUS_ALLY_PINS] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x19, 0x03a11050 },
++ { 0x1a, 0x03a11c30 },
++ { 0x21, 0x03211420 },
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC294_FIXUP_ASUS_ALLY_VERBS
++ },
++ [ALC294_FIXUP_ASUS_ALLY_VERBS] = {
++ .type = HDA_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x45 },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0x5089 },
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x46 },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0004 },
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x47 },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0xa47a },
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x49 },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0x0049},
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x4a },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0x201b },
++ { 0x20, AC_VERB_SET_COEF_INDEX, 0x6b },
++ { 0x20, AC_VERB_SET_PROC_COEF, 0x4278},
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC294_FIXUP_ASUS_ALLY_SPEAKER
++ },
++ [ALC294_FIXUP_ASUS_ALLY_SPEAKER] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc285_fixup_speaker2_to_dac1,
++ },
+ [ALC285_FIXUP_THINKPAD_X1_GEN7] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc285_fixup_thinkpad_x1_gen7,
+@@ -9533,7 +9596,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x8b96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8b97, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+ SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
+- SND_PCI_QUIRK(0x103c, 0x8c26, "HP HP EliteBook 800G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c46, "HP EliteBook 830 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c47, "HP EliteBook 840 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c48, "HP EliteBook 860 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c49, "HP Elite x360 830 2-in-1 G11", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c70, "HP EliteBook 835 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c71, "HP EliteBook 845 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
++ SND_PCI_QUIRK(0x103c, 0x8c72, "HP EliteBook 865 G11", ALC287_FIXUP_CS35L41_I2C_2_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
+ SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+@@ -9553,15 +9622,19 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1313, "Asus K42JZ", ALC269VB_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
++ SND_PCI_QUIRK(0x1043, 0x1433, "ASUS GX650P", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
++ SND_PCI_QUIRK(0x1043, 0x1463, "Asus GA402X", ALC285_FIXUP_ASUS_I2C_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1473, "ASUS GU604V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1483, "ASUS GU603V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
++ SND_PCI_QUIRK(0x1043, 0x1573, "ASUS GZ301V", ALC285_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1662, "ASUS GV301QH", ALC294_FIXUP_ASUS_DUAL_SPK),
+ SND_PCI_QUIRK(0x1043, 0x1683, "ASUS UM3402YAR", ALC287_FIXUP_CS35L41_I2C_2),
+ SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+ SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x1740, "ASUS UX430UA", ALC295_FIXUP_ASUS_DACS),
+ SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
++ SND_PCI_QUIRK(0x1043, 0x17f3, "ROG Ally RC71L_RC71L", ALC294_FIXUP_ASUS_ALLY),
+ SND_PCI_QUIRK(0x1043, 0x1881, "ASUS Zephyrus S/M", ALC294_FIXUP_ASUS_GX502_PINS),
+ SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x18f1, "Asus FX505DT", ALC256_FIXUP_ASUS_HEADSET_MIC),
+@@ -10547,6 +10620,7 @@ static int patch_alc269(struct hda_codec *codec)
+ spec = codec->spec;
+ spec->gen.shared_mic_vref_pin = 0x18;
+ codec->power_save_node = 0;
++ spec->en_3kpull_low = true;
+
+ #ifdef CONFIG_PM
+ codec->patch_ops.suspend = alc269_suspend;
+@@ -10629,14 +10703,16 @@ static int patch_alc269(struct hda_codec *codec)
+ spec->shutup = alc256_shutup;
+ spec->init_hook = alc256_init;
+ spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
+- if (codec->bus->pci->vendor == PCI_VENDOR_ID_AMD)
+- spec->en_3kpull_low = true;
++ if (codec->core.vendor_id == 0x10ec0236 &&
++ codec->bus->pci->vendor != PCI_VENDOR_ID_AMD)
++ spec->en_3kpull_low = false;
+ break;
+ case 0x10ec0257:
+ spec->codec_variant = ALC269_TYPE_ALC257;
+ spec->shutup = alc256_shutup;
+ spec->init_hook = alc256_init;
+ spec->gen.mixer_nid = 0;
++ spec->en_3kpull_low = false;
+ break;
+ case 0x10ec0215:
+ case 0x10ec0245:
+@@ -11268,6 +11344,7 @@ enum {
+ ALC897_FIXUP_HP_HSMIC_VERB,
+ ALC897_FIXUP_LENOVO_HEADSET_MODE,
+ ALC897_FIXUP_HEADSET_MIC_PIN2,
++ ALC897_FIXUP_UNIS_H3C_X500S,
+ };
+
+ static const struct hda_fixup alc662_fixups[] = {
+@@ -11707,6 +11784,13 @@ static const struct hda_fixup alc662_fixups[] = {
+ .chained = true,
+ .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE
+ },
++ [ALC897_FIXUP_UNIS_H3C_X500S] = {
++ .type = HDA_FIXUP_VERBS,
++ .v.verbs = (const struct hda_verb[]) {
++ { 0x14, AC_VERB_SET_EAPD_BTLENABLE, 0 },
++ {}
++ },
++ },
+ };
+
+ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
+@@ -11868,6 +11952,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
+ {.id = ALC662_FIXUP_USI_HEADSET_MODE, .name = "usi-headset"},
+ {.id = ALC662_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"},
+ {.id = ALC669_FIXUP_ACER_ASPIRE_ETHOS, .name = "aspire-ethos"},
++ {.id = ALC897_FIXUP_UNIS_H3C_X500S, .name = "unis-h3c-x500s"},
+ {}
+ };
+
+diff --git a/sound/soc/amd/Kconfig b/sound/soc/amd/Kconfig
+index c88ebd84bdd50..3968c478c9381 100644
+--- a/sound/soc/amd/Kconfig
++++ b/sound/soc/amd/Kconfig
+@@ -81,6 +81,7 @@ config SND_SOC_AMD_VANGOGH_MACH
+ tristate "AMD Vangogh support for NAU8821 CS35L41"
+ select SND_SOC_NAU8821
+ select SND_SOC_CS35L41_SPI
++ select SND_AMD_ACP_CONFIG
+ depends on SND_SOC_AMD_ACP5x && I2C && SPI_MASTER
+ help
+ This option enables machine driver for Vangogh platform
+diff --git a/sound/soc/amd/vangogh/acp5x.h b/sound/soc/amd/vangogh/acp5x.h
+index bd9f1c5684d17..ac1936a8c43ff 100644
+--- a/sound/soc/amd/vangogh/acp5x.h
++++ b/sound/soc/amd/vangogh/acp5x.h
+@@ -147,6 +147,8 @@ static inline void acp_writel(u32 val, void __iomem *base_addr)
+ writel(val, base_addr - ACP5x_PHY_BASE_ADDRESS);
+ }
+
++int snd_amd_acp_find_config(struct pci_dev *pci);
++
+ static inline u64 acp_get_byte_count(struct i2s_stream_instance *rtd,
+ int direction)
+ {
+diff --git a/sound/soc/amd/vangogh/pci-acp5x.c b/sound/soc/amd/vangogh/pci-acp5x.c
+index e0df17c88e8e0..c4634a8a17cdc 100644
+--- a/sound/soc/amd/vangogh/pci-acp5x.c
++++ b/sound/soc/amd/vangogh/pci-acp5x.c
+@@ -125,10 +125,15 @@ static int snd_acp5x_probe(struct pci_dev *pci,
+ {
+ struct acp5x_dev_data *adata;
+ struct platform_device_info pdevinfo[ACP5x_DEVS];
+- unsigned int irqflags;
++ unsigned int irqflags, flag;
+ int ret, i;
+ u32 addr, val;
+
++ /* Return if acp config flag is defined */
++ flag = snd_amd_acp_find_config(pci);
++ if (flag)
++ return -ENODEV;
++
+ irqflags = IRQF_SHARED;
+ if (pci->revision != 0x50)
+ return -ENODEV;
+diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c
+index 6e66cc218fa8d..76ff097518c88 100644
+--- a/sound/soc/codecs/rt5665.c
++++ b/sound/soc/codecs/rt5665.c
+@@ -4472,6 +4472,8 @@ static void rt5665_remove(struct snd_soc_component *component)
+ struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component);
+
+ regmap_write(rt5665->regmap, RT5665_RESET, 0);
++
++ regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies);
+ }
+
+ #ifdef CONFIG_PM
+diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c
+index a37c85d301471..064b6feb76167 100644
+--- a/sound/soc/intel/boards/sof_sdw.c
++++ b/sound/soc/intel/boards/sof_sdw.c
+@@ -374,6 +374,31 @@ static const struct dmi_system_id sof_sdw_quirk_table[] = {
+ },
+ .driver_data = (void *)(RT711_JD1),
+ },
++ {
++ .callback = sof_sdw_quirk_cb,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Meteor Lake Client Platform"),
++ },
++ .driver_data = (void *)(RT711_JD2_100K),
++ },
++ {
++ .callback = sof_sdw_quirk_cb,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Rex"),
++ },
++ .driver_data = (void *)(SOF_SDW_PCH_DMIC),
++ },
++ /* LunarLake devices */
++ {
++ .callback = sof_sdw_quirk_cb,
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Lunar Lake Client Platform"),
++ },
++ .driver_data = (void *)(RT711_JD2_100K),
++ },
+ {}
+ };
+
+diff --git a/sound/soc/intel/boards/sof_sdw_rt711_sdca.c b/sound/soc/intel/boards/sof_sdw_rt711_sdca.c
+index 7f16304d025be..cf8b9793fe0e5 100644
+--- a/sound/soc/intel/boards/sof_sdw_rt711_sdca.c
++++ b/sound/soc/intel/boards/sof_sdw_rt711_sdca.c
+@@ -143,6 +143,9 @@ int sof_sdw_rt711_sdca_exit(struct snd_soc_card *card, struct snd_soc_dai_link *
+ if (!ctx->headset_codec_dev)
+ return 0;
+
++ if (!SOF_RT711_JDSRC(sof_sdw_quirk))
++ return 0;
++
+ device_remove_software_node(ctx->headset_codec_dev);
+ put_device(ctx->headset_codec_dev);
+
+diff --git a/sound/soc/meson/axg-tdm-formatter.c b/sound/soc/meson/axg-tdm-formatter.c
+index 9883dc777f630..63333a2b0a9c3 100644
+--- a/sound/soc/meson/axg-tdm-formatter.c
++++ b/sound/soc/meson/axg-tdm-formatter.c
+@@ -30,27 +30,32 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map,
+ struct axg_tdm_stream *ts,
+ unsigned int offset)
+ {
+- unsigned int val, ch = ts->channels;
+- unsigned long mask;
+- int i, j;
++ unsigned int ch = ts->channels;
++ u32 val[AXG_TDM_NUM_LANES];
++ int i, j, k;
++
++ /*
++ * We need to mimick the slot distribution used by the HW to keep the
++ * channel placement consistent regardless of the number of channel
++ * in the stream. This is why the odd algorithm below is used.
++ */
++ memset(val, 0, sizeof(*val) * AXG_TDM_NUM_LANES);
+
+ /*
+ * Distribute the channels of the stream over the available slots
+- * of each TDM lane
++ * of each TDM lane. We need to go over the 32 slots ...
+ */
+- for (i = 0; i < AXG_TDM_NUM_LANES; i++) {
+- val = 0;
+- mask = ts->mask[i];
+-
+- for (j = find_first_bit(&mask, 32);
+- (j < 32) && ch;
+- j = find_next_bit(&mask, 32, j + 1)) {
+- val |= 1 << j;
+- ch -= 1;
++ for (i = 0; (i < 32) && ch; i += 2) {
++ /* ... of all the lanes ... */
++ for (j = 0; j < AXG_TDM_NUM_LANES; j++) {
++ /* ... then distribute the channels in pairs */
++ for (k = 0; k < 2; k++) {
++ if ((BIT(i + k) & ts->mask[j]) && ch) {
++ val[j] |= BIT(i + k);
++ ch -= 1;
++ }
++ }
+ }
+-
+- regmap_write(map, offset, val);
+- offset += regmap_get_reg_stride(map);
+ }
+
+ /*
+@@ -63,6 +68,11 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map,
+ return -EINVAL;
+ }
+
++ for (i = 0; i < AXG_TDM_NUM_LANES; i++) {
++ regmap_write(map, offset, val[i]);
++ offset += regmap_get_reg_stride(map);
++ }
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(axg_tdm_formatter_set_channel_masks);
+diff --git a/sound/soc/sof/amd/acp.h b/sound/soc/sof/amd/acp.h
+index dd3c072d01721..14148c311f504 100644
+--- a/sound/soc/sof/amd/acp.h
++++ b/sound/soc/sof/amd/acp.h
+@@ -54,6 +54,9 @@
+
+ #define ACP_DSP_TO_HOST_IRQ 0x04
+
++#define ACP_RN_PCI_ID 0x01
++#define ACP_RMB_PCI_ID 0x6F
++
+ #define HOST_BRIDGE_CZN 0x1630
+ #define HOST_BRIDGE_RMB 0x14B5
+ #define ACP_SHA_STAT 0x8000
+diff --git a/sound/soc/sof/amd/pci-rmb.c b/sound/soc/sof/amd/pci-rmb.c
+index 4e1de462b431b..5698d910b26f3 100644
+--- a/sound/soc/sof/amd/pci-rmb.c
++++ b/sound/soc/sof/amd/pci-rmb.c
+@@ -90,6 +90,9 @@ static int acp_pci_rmb_probe(struct pci_dev *pci, const struct pci_device_id *pc
+ unsigned int flag, i, addr;
+ int ret;
+
++ if (pci->revision != ACP_RMB_PCI_ID)
++ return -ENODEV;
++
+ flag = snd_amd_acp_find_config(pci);
+ if (flag != FLAG_AMD_SOF && flag != FLAG_AMD_SOF_ONLY_DMIC)
+ return -ENODEV;
+diff --git a/sound/soc/sof/amd/pci-rn.c b/sound/soc/sof/amd/pci-rn.c
+index fca40b261671b..9189f63632789 100644
+--- a/sound/soc/sof/amd/pci-rn.c
++++ b/sound/soc/sof/amd/pci-rn.c
+@@ -90,6 +90,9 @@ static int acp_pci_rn_probe(struct pci_dev *pci, const struct pci_device_id *pci
+ unsigned int flag, i, addr;
+ int ret;
+
++ if (pci->revision != ACP_RN_PCI_ID)
++ return -ENODEV;
++
+ flag = snd_amd_acp_find_config(pci);
+ if (flag != FLAG_AMD_SOF && flag != FLAG_AMD_SOF_ONLY_DMIC)
+ return -ENODEV;
+diff --git a/sound/soc/sof/core.c b/sound/soc/sof/core.c
+index 625977a29d8a8..75a1e2c6539f2 100644
+--- a/sound/soc/sof/core.c
++++ b/sound/soc/sof/core.c
+@@ -479,8 +479,10 @@ int snd_sof_device_shutdown(struct device *dev)
+ if (IS_ENABLED(CONFIG_SND_SOC_SOF_PROBE_WORK_QUEUE))
+ cancel_work_sync(&sdev->probe_work);
+
+- if (sdev->fw_state == SOF_FW_BOOT_COMPLETE)
++ if (sdev->fw_state == SOF_FW_BOOT_COMPLETE) {
++ sof_fw_trace_free(sdev);
+ return snd_sof_shutdown(sdev);
++ }
+
+ return 0;
+ }
+diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
+index 1188ec51816bd..63764afdcf617 100644
+--- a/sound/soc/sof/intel/hda.c
++++ b/sound/soc/sof/intel/hda.c
+@@ -1309,12 +1309,22 @@ static void hda_generic_machine_select(struct snd_sof_dev *sdev,
+ hda_mach->mach_params.dmic_num = dmic_num;
+ pdata->tplg_filename = tplg_filename;
+
+- if (codec_num == 2) {
++ if (codec_num == 2 ||
++ (codec_num == 1 && !HDA_IDISP_CODEC(bus->codec_mask))) {
+ /*
+ * Prevent SoundWire links from starting when an external
+ * HDaudio codec is used
+ */
+ hda_mach->mach_params.link_mask = 0;
++ } else {
++ /*
++ * Allow SoundWire links to start when no external HDaudio codec
++ * was detected. This will not create a SoundWire card but
++ * will help detect if any SoundWire codec reports as ATTACHED.
++ */
++ struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata;
++
++ hda_mach->mach_params.link_mask = hdev->info.link_mask;
+ }
+
+ *mach = hda_mach;
+diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
+index efb4a3311cc59..5d72dc8441cbb 100644
+--- a/sound/usb/quirks-table.h
++++ b/sound/usb/quirks-table.h
+@@ -4507,6 +4507,35 @@ YAMAHA_DEVICE(0x7010, "UB99"),
+ }
+ }
+ },
++{
++ /* Advanced modes of the Mythware XA001AU.
++ * For the standard mode, Mythware XA001AU has ID ffad:a001
++ */
++ USB_DEVICE_VENDOR_SPEC(0xffad, 0xa001),
++ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
++ .vendor_name = "Mythware",
++ .product_name = "XA001AU",
++ .ifnum = QUIRK_ANY_INTERFACE,
++ .type = QUIRK_COMPOSITE,
++ .data = (const struct snd_usb_audio_quirk[]) {
++ {
++ .ifnum = 0,
++ .type = QUIRK_IGNORE_INTERFACE,
++ },
++ {
++ .ifnum = 1,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE,
++ },
++ {
++ .ifnum = 2,
++ .type = QUIRK_AUDIO_STANDARD_INTERFACE,
++ },
++ {
++ .ifnum = -1
++ }
++ }
++ }
++},
+
+ #undef USB_DEVICE_VENDOR_SPEC
+ #undef USB_AUDIO_DEVICE
+diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+index aff88f78e3391..5ea9d63915f77 100755
+--- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
++++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh
+@@ -72,7 +72,8 @@ test_span_gre_ttl()
+
+ RET=0
+
+- mirror_install $swp1 ingress $tundev "matchall $tcflags"
++ mirror_install $swp1 ingress $tundev \
++ "prot ip flower $tcflags ip_prot icmp"
+ tc filter add dev $h3 ingress pref 77 prot $prot \
+ flower skip_hw ip_ttl 50 action pass
+
+diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh
+index 919c0dd9fe4bc..b0f5e55d2d0b2 100755
+--- a/tools/testing/selftests/net/forwarding/tc_actions.sh
++++ b/tools/testing/selftests/net/forwarding/tc_actions.sh
+@@ -9,6 +9,8 @@ NUM_NETIFS=4
+ source tc_common.sh
+ source lib.sh
+
++require_command ncat
++
+ tcflags="skip_hw"
+
+ h1_create()
+@@ -201,10 +203,10 @@ mirred_egress_to_ingress_test()
+
+ mirred_egress_to_ingress_tcp_test()
+ {
+- local tmpfile=$(mktemp) tmpfile1=$(mktemp)
++ mirred_e2i_tf1=$(mktemp) mirred_e2i_tf2=$(mktemp)
+
+ RET=0
+- dd conv=sparse status=none if=/dev/zero bs=1M count=2 of=$tmpfile
++ dd conv=sparse status=none if=/dev/zero bs=1M count=2 of=$mirred_e2i_tf1
+ tc filter add dev $h1 protocol ip pref 100 handle 100 egress flower \
+ $tcflags ip_proto tcp src_ip 192.0.2.1 dst_ip 192.0.2.2 \
+ action ct commit nat src addr 192.0.2.2 pipe \
+@@ -220,11 +222,11 @@ mirred_egress_to_ingress_tcp_test()
+ ip_proto icmp \
+ action drop
+
+- ip vrf exec v$h1 nc --recv-only -w10 -l -p 12345 -o $tmpfile1 &
++ ip vrf exec v$h1 ncat --recv-only -w10 -l -p 12345 -o $mirred_e2i_tf2 &
+ local rpid=$!
+- ip vrf exec v$h1 nc -w1 --send-only 192.0.2.2 12345 <$tmpfile
++ ip vrf exec v$h1 ncat -w1 --send-only 192.0.2.2 12345 <$mirred_e2i_tf1
+ wait -n $rpid
+- cmp -s $tmpfile $tmpfile1
++ cmp -s $mirred_e2i_tf1 $mirred_e2i_tf2
+ check_err $? "server output check failed"
+
+ $MZ $h1 -c 10 -p 64 -a $h1mac -b $h1mac -A 192.0.2.1 -B 192.0.2.1 \
+@@ -241,7 +243,7 @@ mirred_egress_to_ingress_tcp_test()
+ tc filter del dev $h1 egress protocol ip pref 101 handle 101 flower
+ tc filter del dev $h1 ingress protocol ip pref 102 handle 102 flower
+
+- rm -f $tmpfile $tmpfile1
++ rm -f $mirred_e2i_tf1 $mirred_e2i_tf2
+ log_test "mirred_egress_to_ingress_tcp ($tcflags)"
+ }
+
+@@ -270,6 +272,8 @@ setup_prepare()
+
+ cleanup()
+ {
++ local tf
++
+ pre_cleanup
+
+ switch_destroy
+@@ -280,6 +284,8 @@ cleanup()
+
+ ip link set $swp2 address $swp2origmac
+ ip link set $swp1 address $swp1origmac
++
++ for tf in $mirred_e2i_tf1 $mirred_e2i_tf2; do rm -f $tf; done
+ }
+
+ mirred_egress_redirect_test()