diff options
author | 2019-09-21 12:22:36 -0400 | |
---|---|---|
committer | 2019-09-21 12:22:36 -0400 | |
commit | b7a9c69a82eb6aea9059b1ee493d8349aa02eb15 (patch) | |
tree | 193951a9c484d49852037da2e09adc843904b49a | |
parent | Add FILE_LOCKING to GENTOO_LINUX config. See bug #694688. (diff) | |
download | linux-patches-5.2-17.tar.gz linux-patches-5.2-17.tar.bz2 linux-patches-5.2-17.zip |
Linux patch 5.2.175.2-17
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1016_linux-5.2.17.patch | 4122 |
2 files changed, 4126 insertions, 0 deletions
diff --git a/0000_README b/0000_README index c046e8a8..200ad404 100644 --- a/0000_README +++ b/0000_README @@ -107,6 +107,10 @@ Patch: 1015_linux-5.2.16.patch From: https://www.kernel.org Desc: Linux 5.2.16 +Patch: 1016_linux-5.2.17.patch +From: https://www.kernel.org +Desc: Linux 5.2.17 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1016_linux-5.2.17.patch b/1016_linux-5.2.17.patch new file mode 100644 index 00000000..8e36dc23 --- /dev/null +++ b/1016_linux-5.2.17.patch @@ -0,0 +1,4122 @@ +diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt +index 1da2f1668f08..845d689e0fd7 100644 +--- a/Documentation/filesystems/overlayfs.txt ++++ b/Documentation/filesystems/overlayfs.txt +@@ -302,7 +302,7 @@ beneath or above the path of another overlay lower layer path. + + Using an upper layer path and/or a workdir path that are already used by + another overlay mount is not allowed and may fail with EBUSY. Using +-partially overlapping paths is not allowed but will not fail with EBUSY. ++partially overlapping paths is not allowed and may fail with EBUSY. + If files are accessed from two overlayfs mounts which share or overlap the + upper layer and/or workdir path the behavior of the overlay is undefined, + though it will not result in a crash or deadlock. +diff --git a/Makefile b/Makefile +index 3cec03e93b40..32226d81fbb5 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 2 +-SUBLEVEL = 16 ++SUBLEVEL = 17 + EXTRAVERSION = + NAME = Bobtail Squid + +diff --git a/arch/arm/boot/dts/am33xx-l4.dtsi b/arch/arm/boot/dts/am33xx-l4.dtsi +index ced1a19d5f89..46849d6ecb3e 100644 +--- a/arch/arm/boot/dts/am33xx-l4.dtsi ++++ b/arch/arm/boot/dts/am33xx-l4.dtsi +@@ -185,7 +185,7 @@ + uart0: serial@0 { + compatible = "ti,am3352-uart", "ti,omap3-uart"; + clock-frequency = <48000000>; +- reg = <0x0 0x2000>; ++ reg = <0x0 0x1000>; + interrupts = <72>; + status = "disabled"; + dmas = <&edma 26 0>, <&edma 27 0>; +@@ -934,7 +934,7 @@ + uart1: serial@0 { + compatible = "ti,am3352-uart", "ti,omap3-uart"; + clock-frequency = <48000000>; +- reg = <0x0 0x2000>; ++ reg = <0x0 0x1000>; + interrupts = <73>; + status = "disabled"; + dmas = <&edma 28 0>, <&edma 29 0>; +@@ -966,7 +966,7 @@ + uart2: serial@0 { + compatible = "ti,am3352-uart", "ti,omap3-uart"; + clock-frequency = <48000000>; +- reg = <0x0 0x2000>; ++ reg = <0x0 0x1000>; + interrupts = <74>; + status = "disabled"; + dmas = <&edma 30 0>, <&edma 31 0>; +@@ -1614,7 +1614,7 @@ + uart3: serial@0 { + compatible = "ti,am3352-uart", "ti,omap3-uart"; + clock-frequency = <48000000>; +- reg = <0x0 0x2000>; ++ reg = <0x0 0x1000>; + interrupts = <44>; + status = "disabled"; + }; +@@ -1644,7 +1644,7 @@ + uart4: serial@0 { + compatible = "ti,am3352-uart", "ti,omap3-uart"; + clock-frequency = <48000000>; +- reg = <0x0 0x2000>; ++ reg = <0x0 0x1000>; + interrupts = <45>; + status = "disabled"; + }; +@@ -1674,7 +1674,7 @@ + uart5: serial@0 { + compatible = "ti,am3352-uart", "ti,omap3-uart"; + clock-frequency = <48000000>; +- reg = <0x0 0x2000>; ++ reg = <0x0 0x1000>; + interrupts = <46>; + status = "disabled"; + }; +@@ -1758,6 +1758,8 @@ + + target-module@cc000 { /* 0x481cc000, ap 60 46.0 */ + compatible = "ti,sysc-omap4", "ti,sysc"; ++ reg = <0xcc020 0x4>; ++ reg-names = "rev"; + ti,hwmods = "d_can0"; + /* Domains (P, C): per_pwrdm, l4ls_clkdm */ + clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN0_CLKCTRL 0>, +@@ -1780,6 +1782,8 @@ + + target-module@d0000 { /* 0x481d0000, ap 62 42.0 */ + compatible = "ti,sysc-omap4", "ti,sysc"; ++ reg = <0xd0020 0x4>; ++ reg-names = "rev"; + ti,hwmods = "d_can1"; + /* Domains (P, C): per_pwrdm, l4ls_clkdm */ + clocks = <&l4ls_clkctrl AM3_L4LS_D_CAN1_CLKCTRL 0>, +diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi +index e5c2f71a7c77..fb6b8aa12cc5 100644 +--- a/arch/arm/boot/dts/am33xx.dtsi ++++ b/arch/arm/boot/dts/am33xx.dtsi +@@ -234,13 +234,33 @@ + interrupt-names = "edma3_tcerrint"; + }; + +- mmc3: mmc@47810000 { +- compatible = "ti,omap4-hsmmc"; ++ target-module@47810000 { ++ compatible = "ti,sysc-omap2", "ti,sysc"; + ti,hwmods = "mmc3"; +- ti,needs-special-reset; +- interrupts = <29>; +- reg = <0x47810000 0x1000>; +- status = "disabled"; ++ reg = <0x478102fc 0x4>, ++ <0x47810110 0x4>, ++ <0x47810114 0x4>; ++ reg-names = "rev", "sysc", "syss"; ++ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY | ++ SYSC_OMAP2_ENAWAKEUP | ++ SYSC_OMAP2_SOFTRESET | ++ SYSC_OMAP2_AUTOIDLE)>; ++ ti,sysc-sidle = <SYSC_IDLE_FORCE>, ++ <SYSC_IDLE_NO>, ++ <SYSC_IDLE_SMART>; ++ ti,syss-mask = <1>; ++ clocks = <&l3s_clkctrl AM3_L3S_MMC3_CLKCTRL 0>; ++ clock-names = "fck"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x47810000 0x1000>; ++ ++ mmc3: mmc@0 { ++ compatible = "ti,omap4-hsmmc"; ++ ti,needs-special-reset; ++ interrupts = <29>; ++ reg = <0x0 0x1000>; ++ }; + }; + + usb: usb@47400000 { +diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi +index 55aff4db9c7c..848e2a8884e2 100644 +--- a/arch/arm/boot/dts/am4372.dtsi ++++ b/arch/arm/boot/dts/am4372.dtsi +@@ -228,13 +228,33 @@ + interrupt-names = "edma3_tcerrint"; + }; + +- mmc3: mmc@47810000 { +- compatible = "ti,omap4-hsmmc"; +- reg = <0x47810000 0x1000>; ++ target-module@47810000 { ++ compatible = "ti,sysc-omap2", "ti,sysc"; + ti,hwmods = "mmc3"; +- ti,needs-special-reset; +- interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>; +- status = "disabled"; ++ reg = <0x478102fc 0x4>, ++ <0x47810110 0x4>, ++ <0x47810114 0x4>; ++ reg-names = "rev", "sysc", "syss"; ++ ti,sysc-mask = <(SYSC_OMAP2_CLOCKACTIVITY | ++ SYSC_OMAP2_ENAWAKEUP | ++ SYSC_OMAP2_SOFTRESET | ++ SYSC_OMAP2_AUTOIDLE)>; ++ ti,sysc-sidle = <SYSC_IDLE_FORCE>, ++ <SYSC_IDLE_NO>, ++ <SYSC_IDLE_SMART>; ++ ti,syss-mask = <1>; ++ clocks = <&l3s_clkctrl AM4_L3S_MMC3_CLKCTRL 0>; ++ clock-names = "fck"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges = <0x0 0x47810000 0x1000>; ++ ++ mmc3: mmc@0 { ++ compatible = "ti,omap4-hsmmc"; ++ ti,needs-special-reset; ++ interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>; ++ reg = <0x0 0x1000>; ++ }; + }; + + sham: sham@53100000 { +diff --git a/arch/arm/boot/dts/am437x-l4.dtsi b/arch/arm/boot/dts/am437x-l4.dtsi +index 989cb60b9029..04bee4ff9dcb 100644 +--- a/arch/arm/boot/dts/am437x-l4.dtsi ++++ b/arch/arm/boot/dts/am437x-l4.dtsi +@@ -1574,6 +1574,8 @@ + + target-module@cc000 { /* 0x481cc000, ap 50 46.0 */ + compatible = "ti,sysc-omap4", "ti,sysc"; ++ reg = <0xcc020 0x4>; ++ reg-names = "rev"; + ti,hwmods = "d_can0"; + /* Domains (P, C): per_pwrdm, l4ls_clkdm */ + clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN0_CLKCTRL 0>; +@@ -1593,6 +1595,8 @@ + + target-module@d0000 { /* 0x481d0000, ap 52 3a.0 */ + compatible = "ti,sysc-omap4", "ti,sysc"; ++ reg = <0xd0020 0x4>; ++ reg-names = "rev"; + ti,hwmods = "d_can1"; + /* Domains (P, C): per_pwrdm, l4ls_clkdm */ + clocks = <&l4ls_clkctrl AM4_L4LS_D_CAN1_CLKCTRL 0>; +diff --git a/arch/arm/boot/dts/am571x-idk.dts b/arch/arm/boot/dts/am571x-idk.dts +index 1d5e99964bbf..0aaacea1d887 100644 +--- a/arch/arm/boot/dts/am571x-idk.dts ++++ b/arch/arm/boot/dts/am571x-idk.dts +@@ -175,14 +175,9 @@ + }; + + &mmc1 { +- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; ++ pinctrl-names = "default", "hs"; + pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; + pinctrl-1 = <&mmc1_pins_hs>; +- pinctrl-2 = <&mmc1_pins_sdr12>; +- pinctrl-3 = <&mmc1_pins_sdr25>; +- pinctrl-4 = <&mmc1_pins_sdr50>; +- pinctrl-5 = <&mmc1_pins_ddr50_rev20 &mmc1_iodelay_ddr50_conf>; +- pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>; + }; + + &mmc2 { +diff --git a/arch/arm/boot/dts/am572x-idk.dts b/arch/arm/boot/dts/am572x-idk.dts +index c65d7f6d3b5a..ea1c119feaa5 100644 +--- a/arch/arm/boot/dts/am572x-idk.dts ++++ b/arch/arm/boot/dts/am572x-idk.dts +@@ -16,14 +16,9 @@ + }; + + &mmc1 { +- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; ++ pinctrl-names = "default", "hs"; + pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; + pinctrl-1 = <&mmc1_pins_hs>; +- pinctrl-2 = <&mmc1_pins_sdr12>; +- pinctrl-3 = <&mmc1_pins_sdr25>; +- pinctrl-4 = <&mmc1_pins_sdr50>; +- pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>; +- pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>; + }; + + &mmc2 { +diff --git a/arch/arm/boot/dts/am574x-idk.dts b/arch/arm/boot/dts/am574x-idk.dts +index dc5141c35610..7935d70874ce 100644 +--- a/arch/arm/boot/dts/am574x-idk.dts ++++ b/arch/arm/boot/dts/am574x-idk.dts +@@ -24,14 +24,9 @@ + }; + + &mmc1 { +- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; ++ pinctrl-names = "default", "hs"; + pinctrl-0 = <&mmc1_pins_default_no_clk_pu>; + pinctrl-1 = <&mmc1_pins_hs>; +- pinctrl-2 = <&mmc1_pins_default>; +- pinctrl-3 = <&mmc1_pins_hs>; +- pinctrl-4 = <&mmc1_pins_sdr50>; +- pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_conf>; +- pinctrl-6 = <&mmc1_pins_ddr50 &mmc1_iodelay_sdr104_conf>; + }; + + &mmc2 { +diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi +index d02f5fa61e5f..bc76f1705c0f 100644 +--- a/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi ++++ b/arch/arm/boot/dts/am57xx-beagle-x15-common.dtsi +@@ -379,7 +379,7 @@ + }; + }; + +-&gpio7 { ++&gpio7_target { + ti,no-reset-on-init; + ti,no-idle-on-init; + }; +@@ -430,6 +430,7 @@ + + bus-width = <4>; + cd-gpios = <&gpio6 27 GPIO_ACTIVE_LOW>; /* gpio 219 */ ++ no-1-8-v; + }; + + &mmc2 { +diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts +index a374b5cd6db0..7b113b52c3fb 100644 +--- a/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts ++++ b/arch/arm/boot/dts/am57xx-beagle-x15-revb1.dts +@@ -16,14 +16,9 @@ + }; + + &mmc1 { +- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; ++ pinctrl-names = "default", "hs"; + pinctrl-0 = <&mmc1_pins_default>; + pinctrl-1 = <&mmc1_pins_hs>; +- pinctrl-2 = <&mmc1_pins_sdr12>; +- pinctrl-3 = <&mmc1_pins_sdr25>; +- pinctrl-4 = <&mmc1_pins_sdr50>; +- pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev11_conf>; +- pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev11_conf>; + vmmc-supply = <&vdd_3v3>; + vqmmc-supply = <&ldo1_reg>; + }; +diff --git a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts +index 4badd2144db9..30c500b15b21 100644 +--- a/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts ++++ b/arch/arm/boot/dts/am57xx-beagle-x15-revc.dts +@@ -16,14 +16,9 @@ + }; + + &mmc1 { +- pinctrl-names = "default", "hs", "sdr12", "sdr25", "sdr50", "ddr50", "sdr104"; ++ pinctrl-names = "default", "hs"; + pinctrl-0 = <&mmc1_pins_default>; + pinctrl-1 = <&mmc1_pins_hs>; +- pinctrl-2 = <&mmc1_pins_sdr12>; +- pinctrl-3 = <&mmc1_pins_sdr25>; +- pinctrl-4 = <&mmc1_pins_sdr50>; +- pinctrl-5 = <&mmc1_pins_ddr50 &mmc1_iodelay_ddr_rev20_conf>; +- pinctrl-6 = <&mmc1_pins_sdr104 &mmc1_iodelay_sdr104_rev20_conf>; + vmmc-supply = <&vdd_3v3>; + vqmmc-supply = <&ldo1_reg>; + }; +diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts +index 714e971b912a..de7f85efaa51 100644 +--- a/arch/arm/boot/dts/dra7-evm.dts ++++ b/arch/arm/boot/dts/dra7-evm.dts +@@ -498,7 +498,7 @@ + phy-supply = <&ldousb_reg>; + }; + +-&gpio7 { ++&gpio7_target { + ti,no-reset-on-init; + ti,no-idle-on-init; + }; +diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi +index 23faedec08ab..21e5914fdd62 100644 +--- a/arch/arm/boot/dts/dra7-l4.dtsi ++++ b/arch/arm/boot/dts/dra7-l4.dtsi +@@ -1261,7 +1261,7 @@ + }; + }; + +- target-module@51000 { /* 0x48051000, ap 45 2e.0 */ ++ gpio7_target: target-module@51000 { /* 0x48051000, ap 45 2e.0 */ + compatible = "ti,sysc-omap2", "ti,sysc"; + ti,hwmods = "gpio7"; + reg = <0x51000 0x4>, +@@ -3025,7 +3025,7 @@ + + target-module@80000 { /* 0x48480000, ap 31 16.0 */ + compatible = "ti,sysc-omap4", "ti,sysc"; +- reg = <0x80000 0x4>; ++ reg = <0x80020 0x4>; + reg-names = "rev"; + clocks = <&l4per2_clkctrl DRA7_L4PER2_DCAN2_CLKCTRL 0>; + clock-names = "fck"; +@@ -4577,7 +4577,7 @@ + + target-module@c000 { /* 0x4ae3c000, ap 30 04.0 */ + compatible = "ti,sysc-omap4", "ti,sysc"; +- reg = <0xc000 0x4>; ++ reg = <0xc020 0x4>; + reg-names = "rev"; + clocks = <&wkupaon_clkctrl DRA7_WKUPAON_DCAN1_CLKCTRL 0>; + clock-names = "fck"; +diff --git a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi +index 28ebb4eb884a..214b9e6de2c3 100644 +--- a/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi ++++ b/arch/arm/boot/dts/dra74x-mmc-iodelay.dtsi +@@ -32,7 +32,7 @@ + * + * Datamanual Revisions: + * +- * AM572x Silicon Revision 2.0: SPRS953B, Revised November 2016 ++ * AM572x Silicon Revision 2.0: SPRS953F, Revised May 2019 + * AM572x Silicon Revision 1.1: SPRS915R, Revised November 2016 + * + */ +@@ -229,45 +229,45 @@ + + mmc3_pins_default: mmc3_pins_default { + pinctrl-single,pins = < +- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ +- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ +- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ +- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ +- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ +- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ ++ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ ++ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ ++ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ ++ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ ++ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ ++ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + >; + }; + + mmc3_pins_hs: mmc3_pins_hs { + pinctrl-single,pins = < +- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ +- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ +- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ +- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ +- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ +- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ ++ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ ++ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ ++ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ ++ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ ++ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ ++ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + >; + }; + + mmc3_pins_sdr12: mmc3_pins_sdr12 { + pinctrl-single,pins = < +- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ +- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ +- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ +- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ +- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ +- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ ++ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ ++ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ ++ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ ++ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ ++ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ ++ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + >; + }; + + mmc3_pins_sdr25: mmc3_pins_sdr25 { + pinctrl-single,pins = < +- DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ +- DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ +- DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ +- DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ +- DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ +- DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ ++ DRA7XX_CORE_IOPAD(0x377c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_clk.mmc3_clk */ ++ DRA7XX_CORE_IOPAD(0x3780, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_cmd.mmc3_cmd */ ++ DRA7XX_CORE_IOPAD(0x3784, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat0.mmc3_dat0 */ ++ DRA7XX_CORE_IOPAD(0x3788, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat1.mmc3_dat1 */ ++ DRA7XX_CORE_IOPAD(0x378c, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat2.mmc3_dat2 */ ++ DRA7XX_CORE_IOPAD(0x3790, (PIN_INPUT_PULLUP | MODE_SELECT | MUX_MODE0)) /* mmc3_dat3.mmc3_dat3 */ + >; + }; + +diff --git a/arch/arm/mach-omap1/ams-delta-fiq-handler.S b/arch/arm/mach-omap1/ams-delta-fiq-handler.S +index 81159af44862..14a6c3eb3298 100644 +--- a/arch/arm/mach-omap1/ams-delta-fiq-handler.S ++++ b/arch/arm/mach-omap1/ams-delta-fiq-handler.S +@@ -126,6 +126,8 @@ restart: + orr r11, r11, r13 @ mask all requested interrupts + str r11, [r12, #OMAP1510_GPIO_INT_MASK] + ++ str r13, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack all requested interrupts ++ + ands r10, r13, #KEYBRD_CLK_MASK @ extract keyboard status - set? + beq hksw @ no - try next source + +@@ -133,7 +135,6 @@ restart: + @@@@@@@@@@@@@@@@@@@@@@ + @ Keyboard clock FIQ mode interrupt handler + @ r10 now contains KEYBRD_CLK_MASK, use it +- str r10, [r12, #OMAP1510_GPIO_INT_STATUS] @ ack the interrupt + bic r11, r11, r10 @ unmask it + str r11, [r12, #OMAP1510_GPIO_INT_MASK] + +diff --git a/arch/arm/mach-omap1/ams-delta-fiq.c b/arch/arm/mach-omap1/ams-delta-fiq.c +index 0af2bf6f9933..fd87382a3f18 100644 +--- a/arch/arm/mach-omap1/ams-delta-fiq.c ++++ b/arch/arm/mach-omap1/ams-delta-fiq.c +@@ -69,9 +69,7 @@ static irqreturn_t deferred_fiq(int irq, void *dev_id) + * interrupts default to since commit 80ac93c27441 + * requires interrupt already acked and unmasked. + */ +- if (irq_chip->irq_ack) +- irq_chip->irq_ack(d); +- if (irq_chip->irq_unmask) ++ if (!WARN_ON_ONCE(!irq_chip->irq_unmask)) + irq_chip->irq_unmask(d); + } + for (; irq_counter[gpio] < fiq_count; irq_counter[gpio]++) +diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c +index f9c02f9f1c92..5c3845730dbf 100644 +--- a/arch/arm/mach-omap2/omap4-common.c ++++ b/arch/arm/mach-omap2/omap4-common.c +@@ -127,6 +127,9 @@ static int __init omap4_sram_init(void) + struct device_node *np; + struct gen_pool *sram_pool; + ++ if (!soc_is_omap44xx() && !soc_is_omap54xx()) ++ return 0; ++ + np = of_find_compatible_node(NULL, NULL, "ti,omap4-mpu"); + if (!np) + pr_warn("%s:Unable to allocate sram needed to handle errata I688\n", +diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +index 4a5b4aee6615..1ec21e9ba1e9 100644 +--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c ++++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +@@ -379,7 +379,8 @@ static struct omap_hwmod dra7xx_dcan2_hwmod = { + static struct omap_hwmod_class_sysconfig dra7xx_epwmss_sysc = { + .rev_offs = 0x0, + .sysc_offs = 0x4, +- .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET, ++ .sysc_flags = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | ++ SYSC_HAS_RESET_STATUS, + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), + .sysc_fields = &omap_hwmod_sysc_type2, + }; +diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c +index 749a5a6f6143..98e17388a563 100644 +--- a/arch/arm/mm/init.c ++++ b/arch/arm/mm/init.c +@@ -174,6 +174,11 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max_low, + #ifdef CONFIG_HAVE_ARCH_PFN_VALID + int pfn_valid(unsigned long pfn) + { ++ phys_addr_t addr = __pfn_to_phys(pfn); ++ ++ if (__phys_to_pfn(addr) != pfn) ++ return 0; ++ + return memblock_is_map_memory(__pfn_to_phys(pfn)); + } + EXPORT_SYMBOL(pfn_valid); +@@ -613,7 +618,8 @@ static void update_sections_early(struct section_perm perms[], int n) + if (t->flags & PF_KTHREAD) + continue; + for_each_thread(t, s) +- set_section_perms(perms, n, true, s->mm); ++ if (s->mm) ++ set_section_perms(perms, n, true, s->mm); + } + set_section_perms(perms, n, true, current->active_mm); + set_section_perms(perms, n, true, &init_mm); +diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi +index 9f72396ba710..4c92c197aeb8 100644 +--- a/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi ++++ b/arch/arm64/boot/dts/amlogic/meson-g12a.dtsi +@@ -591,6 +591,7 @@ + clocks = <&clkc CLKID_USB1_DDR_BRIDGE>; + clock-names = "ddr"; + phys = <&usb2_phy1>; ++ phy-names = "usb2-phy"; + dr_mode = "peripheral"; + g-rx-fifo-size = <192>; + g-np-tx-fifo-size = <128>; +diff --git a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts +index a7dc11e36fd9..071f66d8719e 100644 +--- a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts ++++ b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts +@@ -97,7 +97,7 @@ + reg = <0x0 0x48000000 0x0 0x18000000>; + }; + +- reg_1p8v: regulator0 { ++ reg_1p8v: regulator-1p8v { + compatible = "regulator-fixed"; + regulator-name = "fixed-1.8V"; + regulator-min-microvolt = <1800000>; +@@ -106,7 +106,7 @@ + regulator-always-on; + }; + +- reg_3p3v: regulator1 { ++ reg_3p3v: regulator-3p3v { + compatible = "regulator-fixed"; + regulator-name = "fixed-3.3V"; + regulator-min-microvolt = <3300000>; +@@ -115,7 +115,7 @@ + regulator-always-on; + }; + +- reg_12p0v: regulator1 { ++ reg_12p0v: regulator-12p0v { + compatible = "regulator-fixed"; + regulator-name = "D12.0V"; + regulator-min-microvolt = <12000000>; +diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h +index b9574d850f14..4e07aa514f60 100644 +--- a/arch/arm64/include/asm/pgtable.h ++++ b/arch/arm64/include/asm/pgtable.h +@@ -214,8 +214,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte) + * Only if the new pte is valid and kernel, otherwise TLB maintenance + * or update_mmu_cache() have the necessary barriers. + */ +- if (pte_valid_not_user(pte)) ++ if (pte_valid_not_user(pte)) { + dsb(ishst); ++ isb(); ++ } + } + + extern void __sync_icache_dcache(pte_t pteval); +@@ -453,8 +455,10 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) + + WRITE_ONCE(*pmdp, pmd); + +- if (pmd_valid(pmd)) ++ if (pmd_valid(pmd)) { + dsb(ishst); ++ isb(); ++ } + } + + static inline void pmd_clear(pmd_t *pmdp) +@@ -512,8 +516,10 @@ static inline void set_pud(pud_t *pudp, pud_t pud) + + WRITE_ONCE(*pudp, pud); + +- if (pud_valid(pud)) ++ if (pud_valid(pud)) { + dsb(ishst); ++ isb(); ++ } + } + + static inline void pud_clear(pud_t *pudp) +diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c +index 273ae66a9a45..8deb432c2975 100644 +--- a/arch/powerpc/mm/book3s64/radix_pgtable.c ++++ b/arch/powerpc/mm/book3s64/radix_pgtable.c +@@ -515,14 +515,6 @@ void __init radix__early_init_devtree(void) + mmu_psize_defs[MMU_PAGE_64K].shift = 16; + mmu_psize_defs[MMU_PAGE_64K].ap = 0x5; + found: +-#ifdef CONFIG_SPARSEMEM_VMEMMAP +- if (mmu_psize_defs[MMU_PAGE_2M].shift) { +- /* +- * map vmemmap using 2M if available +- */ +- mmu_vmemmap_psize = MMU_PAGE_2M; +- } +-#endif /* CONFIG_SPARSEMEM_VMEMMAP */ + return; + } + +@@ -587,7 +579,13 @@ void __init radix__early_init_mmu(void) + + #ifdef CONFIG_SPARSEMEM_VMEMMAP + /* vmemmap mapping */ +- mmu_vmemmap_psize = mmu_virtual_psize; ++ if (mmu_psize_defs[MMU_PAGE_2M].shift) { ++ /* ++ * map vmemmap using 2M if available ++ */ ++ mmu_vmemmap_psize = MMU_PAGE_2M; ++ } else ++ mmu_vmemmap_psize = mmu_virtual_psize; + #endif + /* + * initialize page table size +diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h +index c207f6634b91..15b3edaabc28 100644 +--- a/arch/riscv/include/asm/fixmap.h ++++ b/arch/riscv/include/asm/fixmap.h +@@ -25,10 +25,6 @@ enum fixed_addresses { + __end_of_fixed_addresses + }; + +-#define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE) +-#define FIXADDR_TOP (VMALLOC_START) +-#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) +- + #define FIXMAP_PAGE_IO PAGE_KERNEL + + #define __early_set_fixmap __set_fixmap +diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h +index f7c3f7de15f2..e6faa469c133 100644 +--- a/arch/riscv/include/asm/pgtable.h ++++ b/arch/riscv/include/asm/pgtable.h +@@ -408,14 +408,22 @@ static inline void pgtable_cache_init(void) + #define VMALLOC_END (PAGE_OFFSET - 1) + #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) + ++#define FIXADDR_TOP VMALLOC_START ++#ifdef CONFIG_64BIT ++#define FIXADDR_SIZE PMD_SIZE ++#else ++#define FIXADDR_SIZE PGDIR_SIZE ++#endif ++#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) ++ + /* +- * Task size is 0x40000000000 for RV64 or 0xb800000 for RV32. ++ * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. + * Note that PGDIR_SIZE must evenly divide TASK_SIZE. + */ + #ifdef CONFIG_64BIT + #define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2) + #else +-#define TASK_SIZE VMALLOC_START ++#define TASK_SIZE FIXADDR_START + #endif + + #include <asm-generic/pgtable.h> +diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c +index 5e7c63033159..fd9844f947f7 100644 +--- a/arch/s390/net/bpf_jit_comp.c ++++ b/arch/s390/net/bpf_jit_comp.c +@@ -853,7 +853,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i + break; + case BPF_ALU64 | BPF_NEG: /* dst = -dst */ + /* lcgr %dst,%dst */ +- EMIT4(0xb9130000, dst_reg, dst_reg); ++ EMIT4(0xb9030000, dst_reg, dst_reg); + break; + /* + * BPF_FROM_BE/LE +@@ -1027,8 +1027,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i + /* llgf %w1,map.max_entries(%b2) */ + EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2, + offsetof(struct bpf_array, map.max_entries)); +- /* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */ +- EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3, ++ /* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */ ++ EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3, + REG_W1, 0, 0xa); + + /* +@@ -1054,8 +1054,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i + * goto out; + */ + +- /* sllg %r1,%b3,3: %r1 = index * 8 */ +- EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3); ++ /* llgfr %r1,%b3: %r1 = (u32) index */ ++ EMIT4(0xb9160000, REG_1, BPF_REG_3); ++ /* sllg %r1,%r1,3: %r1 *= 8 */ ++ EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3); + /* lg %r1,prog(%b2,%r1) */ + EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2, + REG_1, offsetof(struct bpf_array, ptrs)); +diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c +index 62f317c9113a..5b35b7ea5d72 100644 +--- a/arch/x86/events/amd/ibs.c ++++ b/arch/x86/events/amd/ibs.c +@@ -661,10 +661,17 @@ fail: + + throttle = perf_event_overflow(event, &data, ®s); + out: +- if (throttle) ++ if (throttle) { + perf_ibs_stop(event, 0); +- else +- perf_ibs_enable_event(perf_ibs, hwc, period >> 4); ++ } else { ++ period >>= 4; ++ ++ if ((ibs_caps & IBS_CAPS_RDWROPCNT) && ++ (*config & IBS_OP_CNT_CTL)) ++ period |= *config & IBS_OP_CUR_CNT_RAND; ++ ++ perf_ibs_enable_event(perf_ibs, hwc, period); ++ } + + perf_event_update_userpage(event); + +diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c +index 6179be624f35..2369ea1a1db7 100644 +--- a/arch/x86/events/intel/core.c ++++ b/arch/x86/events/intel/core.c +@@ -3572,6 +3572,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left) + return left; + } + ++static u64 nhm_limit_period(struct perf_event *event, u64 left) ++{ ++ return max(left, 32ULL); ++} ++ + PMU_FORMAT_ATTR(event, "config:0-7" ); + PMU_FORMAT_ATTR(umask, "config:8-15" ); + PMU_FORMAT_ATTR(edge, "config:18" ); +@@ -4550,6 +4555,7 @@ __init int intel_pmu_init(void) + x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints; + x86_pmu.enable_all = intel_pmu_nhm_enable_all; + x86_pmu.extra_regs = intel_nehalem_extra_regs; ++ x86_pmu.limit_period = nhm_limit_period; + + mem_attr = nhm_mem_events_attrs; + +diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c +index e65d7fe6489f..5208ba49c89a 100644 +--- a/arch/x86/hyperv/mmu.c ++++ b/arch/x86/hyperv/mmu.c +@@ -37,12 +37,14 @@ static inline int fill_gva_list(u64 gva_list[], int offset, + * Lower 12 bits encode the number of additional + * pages to flush (in addition to the 'cur' page). + */ +- if (diff >= HV_TLB_FLUSH_UNIT) ++ if (diff >= HV_TLB_FLUSH_UNIT) { + gva_list[gva_n] |= ~PAGE_MASK; +- else if (diff) ++ cur += HV_TLB_FLUSH_UNIT; ++ } else if (diff) { + gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT; ++ cur = end; ++ } + +- cur += HV_TLB_FLUSH_UNIT; + gva_n++; + + } while (cur < end); +diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h +index 1392d5e6e8d6..ee26e9215f18 100644 +--- a/arch/x86/include/asm/perf_event.h ++++ b/arch/x86/include/asm/perf_event.h +@@ -252,16 +252,20 @@ struct pebs_lbr { + #define IBSCTL_LVT_OFFSET_VALID (1ULL<<8) + #define IBSCTL_LVT_OFFSET_MASK 0x0F + +-/* ibs fetch bits/masks */ ++/* IBS fetch bits/masks */ + #define IBS_FETCH_RAND_EN (1ULL<<57) + #define IBS_FETCH_VAL (1ULL<<49) + #define IBS_FETCH_ENABLE (1ULL<<48) + #define IBS_FETCH_CNT 0xFFFF0000ULL + #define IBS_FETCH_MAX_CNT 0x0000FFFFULL + +-/* ibs op bits/masks */ +-/* lower 4 bits of the current count are ignored: */ +-#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32) ++/* ++ * IBS op bits/masks ++ * The lower 7 bits of the current count are random bits ++ * preloaded by hardware and ignored in software ++ */ ++#define IBS_OP_CUR_CNT (0xFFF80ULL<<32) ++#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32) + #define IBS_OP_CNT_CTL (1ULL<<19) + #define IBS_OP_VAL (1ULL<<18) + #define IBS_OP_ENABLE (1ULL<<17) +diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h +index c82abd6e4ca3..869794bd0fd9 100644 +--- a/arch/x86/include/asm/uaccess.h ++++ b/arch/x86/include/asm/uaccess.h +@@ -442,8 +442,10 @@ __pu_label: \ + ({ \ + int __gu_err; \ + __inttype(*(ptr)) __gu_val; \ ++ __typeof__(ptr) __gu_ptr = (ptr); \ ++ __typeof__(size) __gu_size = (size); \ + __uaccess_begin_nospec(); \ +- __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ ++ __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \ + __uaccess_end(); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ + __builtin_expect(__gu_err, 0); \ +diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c +index c9fec0657eea..e8c6466ef65e 100644 +--- a/arch/x86/kernel/apic/io_apic.c ++++ b/arch/x86/kernel/apic/io_apic.c +@@ -2434,7 +2434,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from) + * dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use + * gsi_top if ioapic_dynirq_base hasn't been initialized yet. + */ +- return ioapic_initialized ? ioapic_dynirq_base : gsi_top; ++ if (!ioapic_initialized) ++ return gsi_top; ++ /* ++ * For DT enabled machines ioapic_dynirq_base is irrelevant and not ++ * updated. So simply return @from if ioapic_dynirq_base == 0. ++ */ ++ return ioapic_dynirq_base ? : from; + } + + #ifdef CONFIG_X86_32 +diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig +index 2e2efa577437..8c37294f1d1e 100644 +--- a/drivers/atm/Kconfig ++++ b/drivers/atm/Kconfig +@@ -200,7 +200,7 @@ config ATM_NICSTAR_USE_SUNI + make the card work). + + config ATM_NICSTAR_USE_IDT77105 +- bool "Use IDT77015 PHY driver (25Mbps)" ++ bool "Use IDT77105 PHY driver (25Mbps)" + depends on ATM_NICSTAR + help + Support for the PHYsical layer chip in ForeRunner LE25 cards. In +diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c +index fee57f7f3821..81ac7805397d 100644 +--- a/drivers/block/floppy.c ++++ b/drivers/block/floppy.c +@@ -3780,7 +3780,7 @@ static int compat_getdrvprm(int drive, + v.native_format = UDP->native_format; + mutex_unlock(&floppy_mutex); + +- if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params))) ++ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params))) + return -EFAULT; + return 0; + } +@@ -3816,7 +3816,7 @@ static int compat_getdrvstat(int drive, bool poll, + v.bufblocks = UDRS->bufblocks; + mutex_unlock(&floppy_mutex); + +- if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct))) ++ if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct))) + return -EFAULT; + return 0; + Eintr: +diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c +index b72741668c92..0d122440d111 100644 +--- a/drivers/bus/ti-sysc.c ++++ b/drivers/bus/ti-sysc.c +@@ -853,7 +853,7 @@ static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode) + *best_mode = SYSC_IDLE_SMART_WKUP; + else if (idlemodes & BIT(SYSC_IDLE_SMART)) + *best_mode = SYSC_IDLE_SMART; +- else if (idlemodes & SYSC_IDLE_FORCE) ++ else if (idlemodes & BIT(SYSC_IDLE_FORCE)) + *best_mode = SYSC_IDLE_FORCE; + else + return -EINVAL; +@@ -1127,7 +1127,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { + SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0), + SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902, + 0xffff00f0, 0), +- SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0), ++ SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0), ++ SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0), + SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0), + SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0), + SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0), +@@ -1388,10 +1389,7 @@ static int sysc_init_sysc_mask(struct sysc *ddata) + if (error) + return 0; + +- if (val) +- ddata->cfg.sysc_val = val & ddata->cap->sysc_mask; +- else +- ddata->cfg.sysc_val = ddata->cap->sysc_mask; ++ ddata->cfg.sysc_val = val & ddata->cap->sysc_mask; + + return 0; + } +@@ -2081,27 +2079,27 @@ static int sysc_probe(struct platform_device *pdev) + + error = sysc_init_dts_quirks(ddata); + if (error) +- goto unprepare; ++ return error; + + error = sysc_map_and_check_registers(ddata); + if (error) +- goto unprepare; ++ return error; + + error = sysc_init_sysc_mask(ddata); + if (error) +- goto unprepare; ++ return error; + + error = sysc_init_idlemodes(ddata); + if (error) +- goto unprepare; ++ return error; + + error = sysc_init_syss_mask(ddata); + if (error) +- goto unprepare; ++ return error; + + error = sysc_init_pdata(ddata); + if (error) +- goto unprepare; ++ return error; + + sysc_init_early_quirks(ddata); + +@@ -2111,7 +2109,7 @@ static int sysc_probe(struct platform_device *pdev) + + error = sysc_init_resets(ddata); + if (error) +- return error; ++ goto unprepare; + + error = sysc_init_module(ddata); + if (error) +diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c +index 54de669c38b8..f1d89bdebdda 100644 +--- a/drivers/dma/sh/rcar-dmac.c ++++ b/drivers/dma/sh/rcar-dmac.c +@@ -192,6 +192,7 @@ struct rcar_dmac_chan { + * @iomem: remapped I/O memory base + * @n_channels: number of available channels + * @channels: array of DMAC channels ++ * @channels_mask: bitfield of which DMA channels are managed by this driver + * @modules: bitmask of client modules in use + */ + struct rcar_dmac { +@@ -202,6 +203,7 @@ struct rcar_dmac { + + unsigned int n_channels; + struct rcar_dmac_chan *channels; ++ unsigned int channels_mask; + + DECLARE_BITMAP(modules, 256); + }; +@@ -438,7 +440,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac) + u16 dmaor; + + /* Clear all channels and enable the DMAC globally. */ +- rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0)); ++ rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask); + rcar_dmac_write(dmac, RCAR_DMAOR, + RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME); + +@@ -814,6 +816,9 @@ static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac) + for (i = 0; i < dmac->n_channels; ++i) { + struct rcar_dmac_chan *chan = &dmac->channels[i]; + ++ if (!(dmac->channels_mask & BIT(i))) ++ continue; ++ + /* Stop and reinitialize the channel. */ + spin_lock_irq(&chan->lock); + rcar_dmac_chan_halt(chan); +@@ -1776,6 +1781,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, + return 0; + } + ++#define RCAR_DMAC_MAX_CHANNELS 32 ++ + static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) + { + struct device_node *np = dev->of_node; +@@ -1787,12 +1794,16 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) + return ret; + } + +- if (dmac->n_channels <= 0 || dmac->n_channels >= 100) { ++ /* The hardware and driver don't support more than 32 bits in CHCLR */ ++ if (dmac->n_channels <= 0 || ++ dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) { + dev_err(dev, "invalid number of channels %u\n", + dmac->n_channels); + return -EINVAL; + } + ++ dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0); ++ + return 0; + } + +@@ -1802,7 +1813,6 @@ static int rcar_dmac_probe(struct platform_device *pdev) + DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | + DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | + DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; +- unsigned int channels_offset = 0; + struct dma_device *engine; + struct rcar_dmac *dmac; + struct resource *mem; +@@ -1831,10 +1841,8 @@ static int rcar_dmac_probe(struct platform_device *pdev) + * level we can't disable it selectively, so ignore channel 0 for now if + * the device is part of an IOMMU group. + */ +- if (device_iommu_mapped(&pdev->dev)) { +- dmac->n_channels--; +- channels_offset = 1; +- } ++ if (device_iommu_mapped(&pdev->dev)) ++ dmac->channels_mask &= ~BIT(0); + + dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, + sizeof(*dmac->channels), GFP_KERNEL); +@@ -1892,8 +1900,10 @@ static int rcar_dmac_probe(struct platform_device *pdev) + INIT_LIST_HEAD(&engine->channels); + + for (i = 0; i < dmac->n_channels; ++i) { +- ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], +- i + channels_offset); ++ if (!(dmac->channels_mask & BIT(i))) ++ continue; ++ ++ ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i); + if (ret < 0) + goto error; + } +diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c +index baac476c8622..525dc7338fe3 100644 +--- a/drivers/dma/sprd-dma.c ++++ b/drivers/dma/sprd-dma.c +@@ -908,6 +908,7 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); + struct dma_slave_config *slave_cfg = &schan->slave_cfg; + dma_addr_t src = 0, dst = 0; ++ dma_addr_t start_src = 0, start_dst = 0; + struct sprd_dma_desc *sdesc; + struct scatterlist *sg; + u32 len = 0; +@@ -954,6 +955,11 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + dst = sg_dma_address(sg); + } + ++ if (!i) { ++ start_src = src; ++ start_dst = dst; ++ } ++ + /* + * The link-list mode needs at least 2 link-list + * configurations. If there is only one sg, it doesn't +@@ -970,8 +976,8 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + } + } + +- ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len, +- dir, flags, slave_cfg); ++ ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src, ++ start_dst, len, dir, flags, slave_cfg); + if (ret) { + kfree(sdesc); + return NULL; +diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c +index ad2f0a4cd6a4..f255056696ee 100644 +--- a/drivers/dma/ti/dma-crossbar.c ++++ b/drivers/dma/ti/dma-crossbar.c +@@ -391,8 +391,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev) + + ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events, + nelm * 2); +- if (ret) ++ if (ret) { ++ kfree(rsv_events); + return ret; ++ } + + for (i = 0; i < nelm; i++) { + ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1], +diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c +index ba27802efcd0..d07c0d5de7a2 100644 +--- a/drivers/dma/ti/omap-dma.c ++++ b/drivers/dma/ti/omap-dma.c +@@ -1540,8 +1540,10 @@ static int omap_dma_probe(struct platform_device *pdev) + + rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq, + IRQF_SHARED, "omap-dma-engine", od); +- if (rc) ++ if (rc) { ++ omap_dma_free(od); + return rc; ++ } + } + + if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123) +diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c +index fd5212c395c0..34d48618f3fc 100644 +--- a/drivers/firmware/google/vpd.c ++++ b/drivers/firmware/google/vpd.c +@@ -92,8 +92,8 @@ static int vpd_section_check_key_name(const u8 *key, s32 key_len) + return VPD_OK; + } + +-static int vpd_section_attrib_add(const u8 *key, s32 key_len, +- const u8 *value, s32 value_len, ++static int vpd_section_attrib_add(const u8 *key, u32 key_len, ++ const u8 *value, u32 value_len, + void *arg) + { + int ret; +diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c +index c62fa7063a7c..584d0d56491f 100644 +--- a/drivers/firmware/google/vpd_decode.c ++++ b/drivers/firmware/google/vpd_decode.c +@@ -11,8 +11,8 @@ + + #include "vpd_decode.h" + +-static int vpd_decode_len(const s32 max_len, const u8 *in, +- s32 *length, s32 *decoded_len) ++static int vpd_decode_len(const u32 max_len, const u8 *in, ++ u32 *length, u32 *decoded_len) + { + u8 more; + int i = 0; +@@ -32,18 +32,39 @@ static int vpd_decode_len(const s32 max_len, const u8 *in, + } while (more); + + *decoded_len = i; ++ return VPD_OK; ++} ++ ++static int vpd_decode_entry(const u32 max_len, const u8 *input_buf, ++ u32 *_consumed, const u8 **entry, u32 *entry_len) ++{ ++ u32 decoded_len; ++ u32 consumed = *_consumed; ++ ++ if (vpd_decode_len(max_len - consumed, &input_buf[consumed], ++ entry_len, &decoded_len) != VPD_OK) ++ return VPD_FAIL; ++ if (max_len - consumed < decoded_len) ++ return VPD_FAIL; ++ ++ consumed += decoded_len; ++ *entry = input_buf + consumed; ++ ++ /* entry_len is untrusted data and must be checked again. */ ++ if (max_len - consumed < *entry_len) ++ return VPD_FAIL; + ++ consumed += decoded_len; ++ *_consumed = consumed; + return VPD_OK; + } + +-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed, ++int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed, + vpd_decode_callback callback, void *callback_arg) + { + int type; +- int res; +- s32 key_len; +- s32 value_len; +- s32 decoded_len; ++ u32 key_len; ++ u32 value_len; + const u8 *key; + const u8 *value; + +@@ -58,26 +79,14 @@ int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed, + case VPD_TYPE_STRING: + (*consumed)++; + +- /* key */ +- res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed], +- &key_len, &decoded_len); +- if (res != VPD_OK || *consumed + decoded_len >= max_len) ++ if (vpd_decode_entry(max_len, input_buf, consumed, &key, ++ &key_len) != VPD_OK) + return VPD_FAIL; + +- *consumed += decoded_len; +- key = &input_buf[*consumed]; +- *consumed += key_len; +- +- /* value */ +- res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed], +- &value_len, &decoded_len); +- if (res != VPD_OK || *consumed + decoded_len > max_len) ++ if (vpd_decode_entry(max_len, input_buf, consumed, &value, ++ &value_len) != VPD_OK) + return VPD_FAIL; + +- *consumed += decoded_len; +- value = &input_buf[*consumed]; +- *consumed += value_len; +- + if (type == VPD_TYPE_STRING) + return callback(key, key_len, value, value_len, + callback_arg); +diff --git a/drivers/firmware/google/vpd_decode.h b/drivers/firmware/google/vpd_decode.h +index cf8c2ace155a..8dbe41cac599 100644 +--- a/drivers/firmware/google/vpd_decode.h ++++ b/drivers/firmware/google/vpd_decode.h +@@ -25,8 +25,8 @@ enum { + }; + + /* Callback for vpd_decode_string to invoke. */ +-typedef int vpd_decode_callback(const u8 *key, s32 key_len, +- const u8 *value, s32 value_len, ++typedef int vpd_decode_callback(const u8 *key, u32 key_len, ++ const u8 *value, u32 value_len, + void *arg); + + /* +@@ -44,7 +44,7 @@ typedef int vpd_decode_callback(const u8 *key, s32 key_len, + * If one entry is successfully decoded, sends it to callback and returns the + * result. + */ +-int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed, ++int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed, + vpd_decode_callback callback, void *callback_arg); + + #endif /* __VPD_DECODE_H */ +diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c +index a13f224303c6..0221dee8dd4c 100644 +--- a/drivers/fpga/altera-ps-spi.c ++++ b/drivers/fpga/altera-ps-spi.c +@@ -210,7 +210,7 @@ static int altera_ps_write_complete(struct fpga_manager *mgr, + return -EIO; + } + +- if (!IS_ERR(conf->confd)) { ++ if (conf->confd) { + if (!gpiod_get_raw_value_cansleep(conf->confd)) { + dev_err(&mgr->dev, "CONF_DONE is inactive!\n"); + return -EIO; +@@ -289,10 +289,13 @@ static int altera_ps_probe(struct spi_device *spi) + return PTR_ERR(conf->status); + } + +- conf->confd = devm_gpiod_get(&spi->dev, "confd", GPIOD_IN); ++ conf->confd = devm_gpiod_get_optional(&spi->dev, "confd", GPIOD_IN); + if (IS_ERR(conf->confd)) { +- dev_warn(&spi->dev, "Not using confd gpio: %ld\n", +- PTR_ERR(conf->confd)); ++ dev_err(&spi->dev, "Failed to get confd gpio: %ld\n", ++ PTR_ERR(conf->confd)); ++ return PTR_ERR(conf->confd); ++ } else if (!conf->confd) { ++ dev_warn(&spi->dev, "Not using confd gpio"); + } + + /* Register manager with unique name */ +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +index a28a3d722ba2..62298ae5c81c 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +@@ -535,21 +535,24 @@ int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, + struct drm_sched_entity *entity) + { + struct amdgpu_ctx_entity *centity = to_amdgpu_ctx_entity(entity); +- unsigned idx = centity->sequence & (amdgpu_sched_jobs - 1); +- struct dma_fence *other = centity->fences[idx]; ++ struct dma_fence *other; ++ unsigned idx; ++ long r; + +- if (other) { +- signed long r; +- r = dma_fence_wait(other, true); +- if (r < 0) { +- if (r != -ERESTARTSYS) +- DRM_ERROR("Error (%ld) waiting for fence!\n", r); ++ spin_lock(&ctx->ring_lock); ++ idx = centity->sequence & (amdgpu_sched_jobs - 1); ++ other = dma_fence_get(centity->fences[idx]); ++ spin_unlock(&ctx->ring_lock); + +- return r; +- } +- } ++ if (!other) ++ return 0; + +- return 0; ++ r = dma_fence_wait(other, true); ++ if (r < 0 && r != -ERESTARTSYS) ++ DRM_ERROR("Error (%ld) waiting for fence!\n", r); ++ ++ dma_fence_put(other); ++ return r; + } + + void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr) +diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +index 9b9f87b84910..d98fe481cd36 100644 +--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c ++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +@@ -2288,12 +2288,16 @@ static int vega20_force_dpm_highest(struct pp_hwmgr *hwmgr) + data->dpm_table.soc_table.dpm_state.soft_max_level = + data->dpm_table.soc_table.dpm_levels[soft_level].value; + +- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); ++ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | ++ FEATURE_DPM_UCLK_MASK | ++ FEATURE_DPM_SOCCLK_MASK); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload boot level to highest!", + return ret); + +- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); ++ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | ++ FEATURE_DPM_UCLK_MASK | ++ FEATURE_DPM_SOCCLK_MASK); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload dpm max level to highest!", + return ret); +@@ -2326,12 +2330,16 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) + data->dpm_table.soc_table.dpm_state.soft_max_level = + data->dpm_table.soc_table.dpm_levels[soft_level].value; + +- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); ++ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | ++ FEATURE_DPM_UCLK_MASK | ++ FEATURE_DPM_SOCCLK_MASK); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload boot level to highest!", + return ret); + +- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); ++ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | ++ FEATURE_DPM_UCLK_MASK | ++ FEATURE_DPM_SOCCLK_MASK); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload dpm max level to highest!", + return ret); +@@ -2342,14 +2350,54 @@ static int vega20_force_dpm_lowest(struct pp_hwmgr *hwmgr) + + static int vega20_unforce_dpm_levels(struct pp_hwmgr *hwmgr) + { ++ struct vega20_hwmgr *data = ++ (struct vega20_hwmgr *)(hwmgr->backend); ++ uint32_t soft_min_level, soft_max_level; + int ret = 0; + +- ret = vega20_upload_dpm_min_level(hwmgr, 0xFFFFFFFF); ++ /* gfxclk soft min/max settings */ ++ soft_min_level = ++ vega20_find_lowest_dpm_level(&(data->dpm_table.gfx_table)); ++ soft_max_level = ++ vega20_find_highest_dpm_level(&(data->dpm_table.gfx_table)); ++ ++ data->dpm_table.gfx_table.dpm_state.soft_min_level = ++ data->dpm_table.gfx_table.dpm_levels[soft_min_level].value; ++ data->dpm_table.gfx_table.dpm_state.soft_max_level = ++ data->dpm_table.gfx_table.dpm_levels[soft_max_level].value; ++ ++ /* uclk soft min/max settings */ ++ soft_min_level = ++ vega20_find_lowest_dpm_level(&(data->dpm_table.mem_table)); ++ soft_max_level = ++ vega20_find_highest_dpm_level(&(data->dpm_table.mem_table)); ++ ++ data->dpm_table.mem_table.dpm_state.soft_min_level = ++ data->dpm_table.mem_table.dpm_levels[soft_min_level].value; ++ data->dpm_table.mem_table.dpm_state.soft_max_level = ++ data->dpm_table.mem_table.dpm_levels[soft_max_level].value; ++ ++ /* socclk soft min/max settings */ ++ soft_min_level = ++ vega20_find_lowest_dpm_level(&(data->dpm_table.soc_table)); ++ soft_max_level = ++ vega20_find_highest_dpm_level(&(data->dpm_table.soc_table)); ++ ++ data->dpm_table.soc_table.dpm_state.soft_min_level = ++ data->dpm_table.soc_table.dpm_levels[soft_min_level].value; ++ data->dpm_table.soc_table.dpm_state.soft_max_level = ++ data->dpm_table.soc_table.dpm_levels[soft_max_level].value; ++ ++ ret = vega20_upload_dpm_min_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | ++ FEATURE_DPM_UCLK_MASK | ++ FEATURE_DPM_SOCCLK_MASK); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload DPM Bootup Levels!", + return ret); + +- ret = vega20_upload_dpm_max_level(hwmgr, 0xFFFFFFFF); ++ ret = vega20_upload_dpm_max_level(hwmgr, FEATURE_DPM_GFXCLK_MASK | ++ FEATURE_DPM_UCLK_MASK | ++ FEATURE_DPM_SOCCLK_MASK); + PP_ASSERT_WITH_CODE(!ret, + "Failed to upload DPM Max Levels!", + return ret); +diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c +index de0f882f0f7b..14b41de44ebc 100644 +--- a/drivers/gpu/drm/omapdrm/dss/output.c ++++ b/drivers/gpu/drm/omapdrm/dss/output.c +@@ -4,6 +4,7 @@ + * Author: Archit Taneja <archit@ti.com> + */ + ++#include <linux/bitops.h> + #include <linux/kernel.h> + #include <linux/module.h> + #include <linux/platform_device.h> +@@ -20,7 +21,8 @@ int omapdss_device_init_output(struct omap_dss_device *out) + { + struct device_node *remote_node; + +- remote_node = of_graph_get_remote_node(out->dev->of_node, 0, 0); ++ remote_node = of_graph_get_remote_node(out->dev->of_node, ++ ffs(out->of_ports) - 1, 0); + if (!remote_node) { + dev_dbg(out->dev, "failed to find video sink\n"); + return 0; +diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c +index b2da31310d24..09b526518f5a 100644 +--- a/drivers/gpu/drm/virtio/virtgpu_object.c ++++ b/drivers/gpu/drm/virtio/virtgpu_object.c +@@ -204,6 +204,7 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, + .interruptible = false, + .no_wait_gpu = false + }; ++ size_t max_segment; + + /* wtf swapping */ + if (bo->pages) +@@ -215,8 +216,13 @@ int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, + if (!bo->pages) + goto out; + +- ret = sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, +- nr_pages << PAGE_SHIFT, GFP_KERNEL); ++ max_segment = virtio_max_dma_size(qdev->vdev); ++ max_segment &= PAGE_MASK; ++ if (max_segment > SCATTERLIST_MAX_SEGMENT) ++ max_segment = SCATTERLIST_MAX_SEGMENT; ++ ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, ++ nr_pages << PAGE_SHIFT, ++ max_segment, GFP_KERNEL); + if (ret) + goto out; + return 0; +diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c +index 3299b1474d1b..53bddb50aeba 100644 +--- a/drivers/hid/wacom_sys.c ++++ b/drivers/hid/wacom_sys.c +@@ -311,14 +311,16 @@ static void wacom_feature_mapping(struct hid_device *hdev, + /* leave touch_max as is if predefined */ + if (!features->touch_max) { + /* read manually */ +- data = kzalloc(2, GFP_KERNEL); ++ n = hid_report_len(field->report); ++ data = hid_alloc_report_buf(field->report, GFP_KERNEL); + if (!data) + break; + data[0] = field->report->id; + ret = wacom_get_report(hdev, HID_FEATURE_REPORT, +- data, 2, WAC_CMD_RETRIES); +- if (ret == 2) { +- features->touch_max = data[1]; ++ data, n, WAC_CMD_RETRIES); ++ if (ret == n) { ++ ret = hid_report_raw_event(hdev, ++ HID_FEATURE_REPORT, data, n, 0); + } else { + features->touch_max = 16; + hid_warn(hdev, "wacom_feature_mapping: " +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c +index 53ed51adb8ac..58719461850d 100644 +--- a/drivers/hid/wacom_wac.c ++++ b/drivers/hid/wacom_wac.c +@@ -2510,6 +2510,7 @@ static void wacom_wac_finger_event(struct hid_device *hdev, + struct wacom *wacom = hid_get_drvdata(hdev); + struct wacom_wac *wacom_wac = &wacom->wacom_wac; + unsigned equivalent_usage = wacom_equivalent_usage(usage->hid); ++ struct wacom_features *features = &wacom->wacom_wac.features; + + switch (equivalent_usage) { + case HID_GD_X: +@@ -2530,6 +2531,9 @@ static void wacom_wac_finger_event(struct hid_device *hdev, + case HID_DG_TIPSWITCH: + wacom_wac->hid_data.tipswitch = value; + break; ++ case HID_DG_CONTACTMAX: ++ features->touch_max = value; ++ return; + } + + +diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c +index ad1681872e39..b99322d83f48 100644 +--- a/drivers/i2c/busses/i2c-bcm-iproc.c ++++ b/drivers/i2c/busses/i2c-bcm-iproc.c +@@ -801,7 +801,10 @@ static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter, + + static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap) + { +- u32 val = I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; ++ u32 val; ++ ++ /* We do not support the SMBUS Quick command */ ++ val = I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); + + if (adap->algo->reg_slave) + val |= I2C_FUNC_SLAVE; +diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c +index e7f9305b2dd9..f5f001738df5 100644 +--- a/drivers/i2c/busses/i2c-designware-slave.c ++++ b/drivers/i2c/busses/i2c-designware-slave.c +@@ -94,6 +94,7 @@ static int i2c_dw_unreg_slave(struct i2c_client *slave) + + dev->disable_int(dev); + dev->disable(dev); ++ synchronize_irq(dev->irq); + dev->slave = NULL; + pm_runtime_put(dev->dev); + +diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c +index 252edb433fdf..29eae1bf4f86 100644 +--- a/drivers/i2c/busses/i2c-mt65xx.c ++++ b/drivers/i2c/busses/i2c-mt65xx.c +@@ -234,6 +234,10 @@ static const struct i2c_adapter_quirks mt7622_i2c_quirks = { + .max_num_msgs = 255, + }; + ++static const struct i2c_adapter_quirks mt8183_i2c_quirks = { ++ .flags = I2C_AQ_NO_ZERO_LEN, ++}; ++ + static const struct mtk_i2c_compatible mt2712_compat = { + .regs = mt_i2c_regs_v1, + .pmic_i2c = 0, +@@ -298,6 +302,7 @@ static const struct mtk_i2c_compatible mt8173_compat = { + }; + + static const struct mtk_i2c_compatible mt8183_compat = { ++ .quirks = &mt8183_i2c_quirks, + .regs = mt_i2c_regs_v2, + .pmic_i2c = 0, + .dcm = 0, +@@ -870,7 +875,11 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id) + + static u32 mtk_i2c_functionality(struct i2c_adapter *adap) + { +- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; ++ if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN) ++ return I2C_FUNC_I2C | ++ (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); ++ else ++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; + } + + static const struct i2c_algorithm mtk_i2c_algorithm = { +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c +index 420efaab3860..e78c20d7df41 100644 +--- a/drivers/input/mouse/elan_i2c_core.c ++++ b/drivers/input/mouse/elan_i2c_core.c +@@ -1357,7 +1357,7 @@ static const struct acpi_device_id elan_acpi_id[] = { + { "ELAN0618", 0 }, + { "ELAN0619", 0 }, + { "ELAN061A", 0 }, +- { "ELAN061B", 0 }, ++/* { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */ + { "ELAN061C", 0 }, + { "ELAN061D", 0 }, + { "ELAN061E", 0 }, +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c +index dce1d8d2e8a4..3e687f18b203 100644 +--- a/drivers/iommu/amd_iommu.c ++++ b/drivers/iommu/amd_iommu.c +@@ -1143,6 +1143,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu) + iommu_completion_wait(iommu); + } + ++static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id) ++{ ++ struct iommu_cmd cmd; ++ ++ build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, ++ dom_id, 1); ++ iommu_queue_command(iommu, &cmd); ++ ++ iommu_completion_wait(iommu); ++} ++ + static void amd_iommu_flush_all(struct amd_iommu *iommu) + { + struct iommu_cmd cmd; +@@ -1414,18 +1425,21 @@ static void free_pagetable(struct protection_domain *domain) + * another level increases the size of the address space by 9 bits to a size up + * to 64 bits. + */ +-static bool increase_address_space(struct protection_domain *domain, ++static void increase_address_space(struct protection_domain *domain, + gfp_t gfp) + { ++ unsigned long flags; + u64 *pte; + +- if (domain->mode == PAGE_MODE_6_LEVEL) ++ spin_lock_irqsave(&domain->lock, flags); ++ ++ if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL)) + /* address space already 64 bit large */ +- return false; ++ goto out; + + pte = (void *)get_zeroed_page(gfp); + if (!pte) +- return false; ++ goto out; + + *pte = PM_LEVEL_PDE(domain->mode, + iommu_virt_to_phys(domain->pt_root)); +@@ -1433,7 +1447,10 @@ static bool increase_address_space(struct protection_domain *domain, + domain->mode += 1; + domain->updated = true; + +- return true; ++out: ++ spin_unlock_irqrestore(&domain->lock, flags); ++ ++ return; + } + + static u64 *alloc_pte(struct protection_domain *domain, +@@ -1863,6 +1880,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, + { + u64 pte_root = 0; + u64 flags = 0; ++ u32 old_domid; + + if (domain->mode != PAGE_MODE_NONE) + pte_root = iommu_virt_to_phys(domain->pt_root); +@@ -1912,8 +1930,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, + flags &= ~DEV_DOMID_MASK; + flags |= domain->id; + ++ old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK; + amd_iommu_dev_table[devid].data[1] = flags; + amd_iommu_dev_table[devid].data[0] = pte_root; ++ ++ /* ++ * A kdump kernel might be replacing a domain ID that was copied from ++ * the previous kernel--if so, it needs to flush the translation cache ++ * entries for the old domain ID that is being overwritten ++ */ ++ if (old_domid) { ++ struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; ++ ++ amd_iommu_flush_tlb_domid(iommu, old_domid); ++ } + } + + static void clear_dte_entry(u16 devid) +diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c +index eceaa7e968ae..641dc223c97b 100644 +--- a/drivers/iommu/intel-svm.c ++++ b/drivers/iommu/intel-svm.c +@@ -100,24 +100,19 @@ int intel_svm_finish_prq(struct intel_iommu *iommu) + } + + static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev, +- unsigned long address, unsigned long pages, int ih, int gl) ++ unsigned long address, unsigned long pages, int ih) + { + struct qi_desc desc; + +- if (pages == -1) { +- /* For global kernel pages we have to flush them in *all* PASIDs +- * because that's the only option the hardware gives us. Despite +- * the fact that they are actually only accessible through one. */ +- if (gl) +- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | +- QI_EIOTLB_DID(sdev->did) | +- QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) | +- QI_EIOTLB_TYPE; +- else +- desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | +- QI_EIOTLB_DID(sdev->did) | +- QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | +- QI_EIOTLB_TYPE; ++ /* ++ * Do PASID granu IOTLB invalidation if page selective capability is ++ * not available. ++ */ ++ if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) { ++ desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | ++ QI_EIOTLB_DID(sdev->did) | ++ QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) | ++ QI_EIOTLB_TYPE; + desc.qw1 = 0; + } else { + int mask = ilog2(__roundup_pow_of_two(pages)); +@@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d + QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | + QI_EIOTLB_TYPE; + desc.qw1 = QI_EIOTLB_ADDR(address) | +- QI_EIOTLB_GL(gl) | + QI_EIOTLB_IH(ih) | + QI_EIOTLB_AM(mask); + } +@@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d + } + + static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address, +- unsigned long pages, int ih, int gl) ++ unsigned long pages, int ih) + { + struct intel_svm_dev *sdev; + + rcu_read_lock(); + list_for_each_entry_rcu(sdev, &svm->devs, list) +- intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl); ++ intel_flush_svm_range_dev(svm, sdev, address, pages, ih); + rcu_read_unlock(); + } + +@@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn, + struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); + + intel_flush_svm_range(svm, start, +- (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0); ++ (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0); + } + + static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) +@@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) + rcu_read_lock(); + list_for_each_entry_rcu(sdev, &svm->devs, list) { + intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid); +- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); ++ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0); + } + rcu_read_unlock(); + +@@ -410,7 +404,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) + * large and has to be physically contiguous. So it's + * hard to be as defensive as we might like. */ + intel_pasid_tear_down_entry(iommu, dev, svm->pasid); +- intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); ++ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0); + kfree_rcu(sdev, rcu); + + if (list_empty(&svm->devs)) { +diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c +index b9dad0accd1b..d855e9c09c08 100644 +--- a/drivers/media/platform/stm32/stm32-dcmi.c ++++ b/drivers/media/platform/stm32/stm32-dcmi.c +@@ -1702,7 +1702,7 @@ static int dcmi_probe(struct platform_device *pdev) + if (irq <= 0) { + if (irq != -EPROBE_DEFER) + dev_err(&pdev->dev, "Could not get irq\n"); +- return irq; ++ return irq ? irq : -ENXIO; + } + + dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c +index c659e18b358b..676d233d46d5 100644 +--- a/drivers/media/usb/dvb-usb/technisat-usb2.c ++++ b/drivers/media/usb/dvb-usb/technisat-usb2.c +@@ -608,10 +608,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a) + static int technisat_usb2_get_ir(struct dvb_usb_device *d) + { + struct technisat_usb2_state *state = d->priv; +- u8 *buf = state->buf; +- u8 *b; +- int ret; + struct ir_raw_event ev; ++ u8 *buf = state->buf; ++ int i, ret; + + buf[0] = GET_IR_DATA_VENDOR_REQUEST; + buf[1] = 0x08; +@@ -647,26 +646,25 @@ unlock: + return 0; /* no key pressed */ + + /* decoding */ +- b = buf+1; + + #if 0 + deb_rc("RC: %d ", ret); +- debug_dump(b, ret, deb_rc); ++ debug_dump(buf + 1, ret, deb_rc); + #endif + + ev.pulse = 0; +- while (1) { +- ev.pulse = !ev.pulse; +- ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000; +- ir_raw_event_store(d->rc_dev, &ev); +- +- b++; +- if (*b == 0xff) { ++ for (i = 1; i < ARRAY_SIZE(state->buf); i++) { ++ if (buf[i] == 0xff) { + ev.pulse = 0; + ev.duration = 888888*2; + ir_raw_event_store(d->rc_dev, &ev); + break; + } ++ ++ ev.pulse = !ev.pulse; ++ ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR * ++ FIRMWARE_CLOCK_TICK) / 1000; ++ ir_raw_event_store(d->rc_dev, &ev); + } + + ir_raw_event_handle(d->rc_dev); +diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c +index e4d2dcd5cc0f..19c90fa9e443 100644 +--- a/drivers/media/usb/tm6000/tm6000-dvb.c ++++ b/drivers/media/usb/tm6000/tm6000-dvb.c +@@ -97,6 +97,7 @@ static void tm6000_urb_received(struct urb *urb) + printk(KERN_ERR "tm6000: error %s\n", __func__); + kfree(urb->transfer_buffer); + usb_free_urb(urb); ++ dev->dvb->bulk_urb = NULL; + } + } + } +@@ -127,6 +128,7 @@ static int tm6000_start_stream(struct tm6000_core *dev) + dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL); + if (!dvb->bulk_urb->transfer_buffer) { + usb_free_urb(dvb->bulk_urb); ++ dvb->bulk_urb = NULL; + return -ENOMEM; + } + +@@ -153,6 +155,7 @@ static int tm6000_start_stream(struct tm6000_core *dev) + + kfree(dvb->bulk_urb->transfer_buffer); + usb_free_urb(dvb->bulk_urb); ++ dvb->bulk_urb = NULL; + return ret; + } + +diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c +index 75178624d3f5..fb15f255a1db 100644 +--- a/drivers/net/dsa/microchip/ksz9477_spi.c ++++ b/drivers/net/dsa/microchip/ksz9477_spi.c +@@ -157,6 +157,7 @@ static const struct of_device_id ksz9477_dt_ids[] = { + { .compatible = "microchip,ksz9897" }, + { .compatible = "microchip,ksz9893" }, + { .compatible = "microchip,ksz9563" }, ++ { .compatible = "microchip,ksz8563" }, + {}, + }; + MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c +index b41f23679a08..7ce9c69e9c44 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c +@@ -469,13 +469,19 @@ static int __init xgbe_mod_init(void) + + ret = xgbe_platform_init(); + if (ret) +- return ret; ++ goto err_platform_init; + + ret = xgbe_pci_init(); + if (ret) +- return ret; ++ goto err_pci_init; + + return 0; ++ ++err_pci_init: ++ xgbe_platform_exit(); ++err_platform_init: ++ unregister_netdevice_notifier(&xgbe_netdev_notifier); ++ return ret; + } + + static void __exit xgbe_mod_exit(void) +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c +index 1fff462a4175..3dbf3ff1c450 100644 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c +@@ -431,7 +431,8 @@ int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id) + if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) + break; + } +- if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) { ++ if (rule && rule->type == aq_rx_filter_vlan && ++ be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) { + struct ethtool_rxnfc cmd; + + cmd.fs.location = rule->aq_fsp.location; +@@ -843,7 +844,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic) + return err; + + if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { +- if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) { ++ if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) { + err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, + !(aq_nic->packet_filter & IFF_PROMISC)); + aq_nic->aq_nic_cfg.is_vlan_force_promisc = false; +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c +index 5315df5ff6f8..4ebf083c51c5 100644 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c +@@ -61,6 +61,10 @@ static int aq_ndev_open(struct net_device *ndev) + if (err < 0) + goto err_exit; + ++ err = aq_filters_vlans_update(aq_nic); ++ if (err < 0) ++ goto err_exit; ++ + err = aq_nic_start(aq_nic); + if (err < 0) + goto err_exit; +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +index 41172fbebddd..1a2b09065293 100644 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +@@ -390,7 +390,7 @@ int aq_nic_start(struct aq_nic_s *self) + self->aq_nic_cfg.link_irq_vec); + err = request_threaded_irq(irqvec, NULL, + aq_linkstate_threaded_isr, +- IRQF_SHARED, ++ IRQF_SHARED | IRQF_ONESHOT, + self->ndev->name, self); + if (err < 0) + goto err_exit; +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +index 715685aa48c3..28892b8acd0e 100644 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +@@ -86,6 +86,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) + } + } + ++err_exit: + if (!was_tx_cleaned) + work_done = budget; + +@@ -95,7 +96,7 @@ static int aq_vec_poll(struct napi_struct *napi, int budget) + 1U << self->aq_ring_param.vec_idx); + } + } +-err_exit: ++ + return work_done; + } + +diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c +index 8c1497e7d9c5..aa31948eac64 100644 +--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c ++++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c +@@ -79,7 +79,7 @@ static int enetc_ptp_probe(struct pci_dev *pdev, + n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); + if (n != 1) { + err = -EPERM; +- goto err_irq; ++ goto err_irq_vectors; + } + + ptp_qoriq->irq = pci_irq_vector(pdev, 0); +@@ -103,6 +103,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev, + err_no_clock: + free_irq(ptp_qoriq->irq, ptp_qoriq); + err_irq: ++ pci_free_irq_vectors(pdev); ++err_irq_vectors: + iounmap(base); + err_ioremap: + kfree(ptp_qoriq); +@@ -120,6 +122,7 @@ static void enetc_ptp_remove(struct pci_dev *pdev) + struct ptp_qoriq *ptp_qoriq = pci_get_drvdata(pdev); + + ptp_qoriq_free(ptp_qoriq); ++ pci_free_irq_vectors(pdev); + kfree(ptp_qoriq); + + pci_release_mem_regions(pdev); +diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c +index fe879c07ae3c..fc5ea87bd387 100644 +--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c ++++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c +@@ -11,6 +11,7 @@ + #include <linux/io.h> + #include <linux/ip.h> + #include <linux/ipv6.h> ++#include <linux/marvell_phy.h> + #include <linux/module.h> + #include <linux/phy.h> + #include <linux/platform_device.h> +@@ -1149,6 +1150,13 @@ static void hns_nic_adjust_link(struct net_device *ndev) + } + } + ++static int hns_phy_marvell_fixup(struct phy_device *phydev) ++{ ++ phydev->dev_flags |= MARVELL_PHY_LED0_LINK_LED1_ACTIVE; ++ ++ return 0; ++} ++ + /** + *hns_nic_init_phy - init phy + *@ndev: net device +@@ -1174,6 +1182,16 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) + if (h->phy_if != PHY_INTERFACE_MODE_XGMII) { + phy_dev->dev_flags = 0; + ++ /* register the PHY fixup (for Marvell 88E1510) */ ++ ret = phy_register_fixup_for_uid(MARVELL_PHY_ID_88E1510, ++ MARVELL_PHY_ID_MASK, ++ hns_phy_marvell_fixup); ++ /* we can live without it, so just issue a warning */ ++ if (ret) ++ netdev_warn(ndev, ++ "Cannot register PHY fixup, ret=%d\n", ++ ret); ++ + ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link, + h->phy_if); + } else { +@@ -2429,8 +2447,11 @@ static int hns_nic_dev_remove(struct platform_device *pdev) + hns_nic_uninit_ring_data(priv); + priv->ring_data = NULL; + +- if (ndev->phydev) ++ if (ndev->phydev) { ++ phy_unregister_fixup_for_uid(MARVELL_PHY_ID_88E1510, ++ MARVELL_PHY_ID_MASK); + phy_disconnect(ndev->phydev); ++ } + + if (!IS_ERR_OR_NULL(priv->ae_handle)) + hnae_put_handle(priv->ae_handle); +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +index 66b691b7221f..f1e0c16263a4 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +@@ -3896,6 +3896,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) + + hns3_client_stop(handle); + ++ hns3_uninit_phy(netdev); ++ + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { + netdev_warn(netdev, "already uninitialized\n"); + goto out_netdev_free; +@@ -3905,8 +3907,6 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) + + hns3_clear_all_ring(handle, true); + +- hns3_uninit_phy(netdev); +- + hns3_nic_uninit_vector_data(priv); + + ret = hns3_nic_dealloc_vector_data(priv); +diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c +index cebd20f3128d..fa4bb940665c 100644 +--- a/drivers/net/ethernet/ibm/ibmvnic.c ++++ b/drivers/net/ethernet/ibm/ibmvnic.c +@@ -1983,6 +1983,10 @@ static void __ibmvnic_reset(struct work_struct *work) + + rwi = get_next_rwi(adapter); + while (rwi) { ++ if (adapter->state == VNIC_REMOVING || ++ adapter->state == VNIC_REMOVED) ++ goto out; ++ + if (adapter->force_reset_recovery) { + adapter->force_reset_recovery = false; + rc = do_hard_reset(adapter, rwi, reset_state); +@@ -2007,7 +2011,7 @@ static void __ibmvnic_reset(struct work_struct *work) + netdev_dbg(adapter->netdev, "Reset failed\n"); + free_all_rwi(adapter); + } +- ++out: + adapter->resetting = false; + if (we_lock_rtnl) + rtnl_unlock(); +diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c +index c93a6f9b735b..7e88446ac97a 100644 +--- a/drivers/net/ethernet/marvell/sky2.c ++++ b/drivers/net/ethernet/marvell/sky2.c +@@ -4924,6 +4924,13 @@ static const struct dmi_system_id msi_blacklist[] = { + DMI_MATCH(DMI_BOARD_NAME, "P6T"), + }, + }, ++ { ++ .ident = "ASUS P6X", ++ .matches = { ++ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), ++ DMI_MATCH(DMI_BOARD_NAME, "P6X"), ++ }, ++ }, + {} + }; + +diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c +index 6de23b56b294..c875a2fa7596 100644 +--- a/drivers/net/ethernet/qlogic/qed/qed_main.c ++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c +@@ -1215,7 +1215,7 @@ static int qed_slowpath_start(struct qed_dev *cdev, + &drv_version); + if (rc) { + DP_NOTICE(cdev, "Failed sending drv version command\n"); +- return rc; ++ goto err4; + } + } + +@@ -1223,6 +1223,8 @@ static int qed_slowpath_start(struct qed_dev *cdev, + + return 0; + ++err4: ++ qed_ll2_dealloc_if(cdev); + err3: + qed_hw_stop(cdev); + err2: +diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c +index 7a5e6c5abb57..276c7cae7cee 100644 +--- a/drivers/net/ethernet/seeq/sgiseeq.c ++++ b/drivers/net/ethernet/seeq/sgiseeq.c +@@ -794,15 +794,16 @@ static int sgiseeq_probe(struct platform_device *pdev) + printk(KERN_ERR "Sgiseeq: Cannot register net device, " + "aborting.\n"); + err = -ENODEV; +- goto err_out_free_page; ++ goto err_out_free_attrs; + } + + printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr); + + return 0; + +-err_out_free_page: +- free_page((unsigned long) sp->srings); ++err_out_free_attrs: ++ dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings, ++ sp->srings_dma, DMA_ATTR_NON_CONSISTENT); + err_out_free_dev: + free_netdev(dev); + +diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c +index b41696e16bdc..c20e7ef18bc9 100644 +--- a/drivers/net/ieee802154/mac802154_hwsim.c ++++ b/drivers/net/ieee802154/mac802154_hwsim.c +@@ -802,7 +802,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev, + err = hwsim_subscribe_all_others(phy); + if (err < 0) { + mutex_unlock(&hwsim_phys_lock); +- goto err_reg; ++ goto err_subscribe; + } + } + list_add_tail(&phy->list, &hwsim_phys); +@@ -812,6 +812,8 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev, + + return idx; + ++err_subscribe: ++ ieee802154_unregister_hw(phy->hw); + err_reg: + kfree(pib); + err_pib: +@@ -901,9 +903,9 @@ static __init int hwsim_init_module(void) + return 0; + + platform_drv: +- genl_unregister_family(&hwsim_genl_family); +-platform_dev: + platform_device_unregister(mac802154hwsim_dev); ++platform_dev: ++ genl_unregister_family(&hwsim_genl_family); + return rc; + } + +diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c +index 1a7b7bd412f9..f2553dff5b17 100644 +--- a/drivers/net/usb/r8152.c ++++ b/drivers/net/usb/r8152.c +@@ -787,8 +787,11 @@ int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data) + ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0), + RTL8152_REQ_GET_REGS, RTL8152_REQT_READ, + value, index, tmp, size, 500); ++ if (ret < 0) ++ memset(data, 0xff, size); ++ else ++ memcpy(data, tmp, size); + +- memcpy(data, tmp, size); + kfree(tmp); + + return ret; +diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c +index 653d347a9a19..580387f9f12a 100644 +--- a/drivers/net/wireless/marvell/mwifiex/ie.c ++++ b/drivers/net/wireless/marvell/mwifiex/ie.c +@@ -241,6 +241,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len, + } + + vs_ie = (struct ieee_types_header *)vendor_ie; ++ if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 > ++ IEEE_MAX_IE_SIZE) ++ return -EINVAL; + memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length), + vs_ie, vs_ie->len + 2); + le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2); +diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +index 18f7d9bf30b2..0939a8c8f3ab 100644 +--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c ++++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c +@@ -265,6 +265,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, + + rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len); + if (rate_ie) { ++ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES) ++ return; + memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len); + rate_len = rate_ie->len; + } +@@ -272,8 +274,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg, + rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, + params->beacon.tail, + params->beacon.tail_len); +- if (rate_ie) ++ if (rate_ie) { ++ if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len) ++ return; + memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len); ++ } + + return; + } +@@ -391,6 +396,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv, + params->beacon.tail_len); + if (vendor_ie) { + wmm_ie = vendor_ie; ++ if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info)) ++ return; + memcpy(&bss_cfg->wmm_info, wmm_ie + + sizeof(struct ieee_types_header), *(wmm_ie + 1)); + priv->wmm_enabled = 1; +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c +index 8d33970a2950..5f5722bf6762 100644 +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -906,7 +906,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, + __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); + } + if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { +- queue->rx.rsp_cons = ++cons; ++ queue->rx.rsp_cons = ++cons + skb_queue_len(list); + kfree_skb(nskb); + return ~0U; + } +diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c +index 8ffba67568ec..b7f6b1324395 100644 +--- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c ++++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c +@@ -61,6 +61,7 @@ + USB2_OBINT_IDDIGCHG) + + /* VBCTRL */ ++#define USB2_VBCTRL_OCCLREN BIT(16) + #define USB2_VBCTRL_DRVVBUSSEL BIT(8) + + /* LINECTRL1 */ +@@ -374,6 +375,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) + writel(val, usb2_base + USB2_LINECTRL1); + + val = readl(usb2_base + USB2_VBCTRL); ++ val &= ~USB2_VBCTRL_OCCLREN; + writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL); + val = readl(usb2_base + USB2_ADPCTRL); + writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL); +diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c +index 0b4f36905321..8e667967928a 100644 +--- a/drivers/tty/serial/atmel_serial.c ++++ b/drivers/tty/serial/atmel_serial.c +@@ -1400,7 +1400,6 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending) + + atmel_port->hd_start_rx = false; + atmel_start_rx(port); +- return; + } + + atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); +diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c +index 73d71a4e6c0c..f49b7d6fbc88 100644 +--- a/drivers/tty/serial/sprd_serial.c ++++ b/drivers/tty/serial/sprd_serial.c +@@ -609,7 +609,7 @@ static inline void sprd_rx(struct uart_port *port) + + if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE | + SPRD_LSR_FE | SPRD_LSR_OE)) +- if (handle_lsr_errors(port, &lsr, &flag)) ++ if (handle_lsr_errors(port, &flag, &lsr)) + continue; + if (uart_handle_sysrq_char(port, ch)) + continue; +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c +index 9d6cb709ca7b..151a74a54386 100644 +--- a/drivers/usb/core/config.c ++++ b/drivers/usb/core/config.c +@@ -921,7 +921,7 @@ int usb_get_bos_descriptor(struct usb_device *dev) + struct usb_bos_descriptor *bos; + struct usb_dev_cap_header *cap; + struct usb_ssp_cap_descriptor *ssp_cap; +- unsigned char *buffer; ++ unsigned char *buffer, *buffer0; + int length, total_len, num, i, ssac; + __u8 cap_type; + int ret; +@@ -966,10 +966,12 @@ int usb_get_bos_descriptor(struct usb_device *dev) + ret = -ENOMSG; + goto err; + } ++ ++ buffer0 = buffer; + total_len -= length; ++ buffer += length; + + for (i = 0; i < num; i++) { +- buffer += length; + cap = (struct usb_dev_cap_header *)buffer; + + if (total_len < sizeof(*cap) || total_len < cap->bLength) { +@@ -983,8 +985,6 @@ int usb_get_bos_descriptor(struct usb_device *dev) + break; + } + +- total_len -= length; +- + if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { + dev_warn(ddev, "descriptor type invalid, skip\n"); + continue; +@@ -1019,7 +1019,11 @@ int usb_get_bos_descriptor(struct usb_device *dev) + default: + break; + } ++ ++ total_len -= length; ++ buffer += length; + } ++ dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0); + + return 0; + +diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c +index 294158113d62..77142f9bf26a 100644 +--- a/drivers/usb/host/xhci-tegra.c ++++ b/drivers/usb/host/xhci-tegra.c +@@ -1217,6 +1217,16 @@ static int tegra_xusb_probe(struct platform_device *pdev) + + tegra_xusb_config(tegra, regs); + ++ /* ++ * The XUSB Falcon microcontroller can only address 40 bits, so set ++ * the DMA mask accordingly. ++ */ ++ err = dma_set_mask_and_coherent(tegra->dev, DMA_BIT_MASK(40)); ++ if (err < 0) { ++ dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); ++ goto put_rpm; ++ } ++ + err = tegra_xusb_load_firmware(tegra); + if (err < 0) { + dev_err(&pdev->dev, "failed to load firmware: %d\n", err); +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c +index 18c7c6b2fe08..85b2107e8a3d 100644 +--- a/fs/cifs/connect.c ++++ b/fs/cifs/connect.c +@@ -2961,6 +2961,7 @@ static int + cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) + { + int rc = 0; ++ int is_domain = 0; + const char *delim, *payload; + char *desc; + ssize_t len; +@@ -3008,6 +3009,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) + rc = PTR_ERR(key); + goto out_err; + } ++ is_domain = 1; + } + + down_read(&key->sem); +@@ -3065,6 +3067,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) + goto out_key_put; + } + ++ /* ++ * If we have a domain key then we must set the domainName in the ++ * for the request. ++ */ ++ if (is_domain && ses->domainName) { ++ vol->domainname = kstrndup(ses->domainName, ++ strlen(ses->domainName), ++ GFP_KERNEL); ++ if (!vol->domainname) { ++ cifs_dbg(FYI, "Unable to allocate %zd bytes for " ++ "domain\n", len); ++ rc = -ENOMEM; ++ kfree(vol->username); ++ vol->username = NULL; ++ kzfree(vol->password); ++ vol->password = NULL; ++ goto out_key_put; ++ } ++ } ++ + out_key_put: + up_read(&key->sem); + key_put(key); +diff --git a/fs/fs_parser.c b/fs/fs_parser.c +index 0d388faa25d1..460ea4206fa2 100644 +--- a/fs/fs_parser.c ++++ b/fs/fs_parser.c +@@ -264,6 +264,7 @@ int fs_lookup_param(struct fs_context *fc, + return invalf(fc, "%s: not usable as path", param->key); + } + ++ f->refcnt++; /* filename_lookup() drops our ref. */ + ret = filename_lookup(param->dirfd, f, flags, _path, NULL); + if (ret < 0) { + errorf(fc, "%s: Lookup failure for '%s'", param->key, f->name); +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c +index 9f44ddc34c7b..3321cc7a7ead 100644 +--- a/fs/nfs/dir.c ++++ b/fs/nfs/dir.c +@@ -1483,7 +1483,7 @@ static int nfs_finish_open(struct nfs_open_context *ctx, + if (S_ISREG(file->f_path.dentry->d_inode->i_mode)) + nfs_file_set_open_context(file, ctx); + else +- err = -ESTALE; ++ err = -EOPENSTALE; + out: + return err; + } +diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c +index c67cdbb36ce7..38d915814221 100644 +--- a/fs/nfs/flexfilelayout/flexfilelayout.c ++++ b/fs/nfs/flexfilelayout/flexfilelayout.c +@@ -8,6 +8,7 @@ + */ + + #include <linux/nfs_fs.h> ++#include <linux/nfs_mount.h> + #include <linux/nfs_page.h> + #include <linux/module.h> + #include <linux/sched/mm.h> +@@ -928,7 +929,9 @@ retry: + pgm = &pgio->pg_mirrors[0]; + pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; + +- pgio->pg_maxretrans = io_maxretrans; ++ if (NFS_SERVER(pgio->pg_inode)->flags & ++ (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) ++ pgio->pg_maxretrans = io_maxretrans; + return; + out_nolseg: + if (pgio->pg_error < 0) +@@ -936,6 +939,7 @@ out_nolseg: + out_mds: + pnfs_put_lseg(pgio->pg_lseg); + pgio->pg_lseg = NULL; ++ pgio->pg_maxretrans = 0; + nfs_pageio_reset_read_mds(pgio); + } + +@@ -996,12 +1000,15 @@ retry: + pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize; + } + +- pgio->pg_maxretrans = io_maxretrans; ++ if (NFS_SERVER(pgio->pg_inode)->flags & ++ (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) ++ pgio->pg_maxretrans = io_maxretrans; + return; + + out_mds: + pnfs_put_lseg(pgio->pg_lseg); + pgio->pg_lseg = NULL; ++ pgio->pg_maxretrans = 0; + nfs_pageio_reset_write_mds(pgio); + } + +diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h +index 81e2fdff227e..9ab9427405f3 100644 +--- a/fs/nfs/internal.h ++++ b/fs/nfs/internal.h +@@ -773,3 +773,13 @@ static inline bool nfs_error_is_fatal(int err) + } + } + ++static inline bool nfs_error_is_fatal_on_server(int err) ++{ ++ switch (err) { ++ case 0: ++ case -ERESTARTSYS: ++ case -EINTR: ++ return false; ++ } ++ return nfs_error_is_fatal(err); ++} +diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c +index 3a507c42c1ca..336643b82188 100644 +--- a/fs/nfs/nfs4file.c ++++ b/fs/nfs/nfs4file.c +@@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp) + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + switch (err) { +- case -EPERM: +- case -EACCES: +- case -EDQUOT: +- case -ENOSPC: +- case -EROFS: +- goto out_put_ctx; + default: ++ goto out_put_ctx; ++ case -ENOENT: ++ case -ESTALE: ++ case -EISDIR: ++ case -ENOTDIR: ++ case -ELOOP: + goto out_drop; + } + } +@@ -187,7 +187,11 @@ static loff_t nfs42_remap_file_range(struct file *src_file, loff_t src_off, + bool same_inode = false; + int ret; + +- if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) ++ /* NFS does not support deduplication. */ ++ if (remap_flags & REMAP_FILE_DEDUP) ++ return -EOPNOTSUPP; ++ ++ if (remap_flags & ~REMAP_FILE_ADVISORY) + return -EINVAL; + + /* check alignment w.r.t. clone_blksize */ +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c +index 8b6211753228..eae584dbfa08 100644 +--- a/fs/nfs/pagelist.c ++++ b/fs/nfs/pagelist.c +@@ -590,7 +590,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr, + } + + hdr->res.fattr = &hdr->fattr; +- hdr->res.count = count; ++ hdr->res.count = 0; + hdr->res.eof = 0; + hdr->res.verf = &hdr->verf; + nfs_fattr_init(&hdr->fattr); +diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c +index 5552fa8b6e12..0f7288b94633 100644 +--- a/fs/nfs/proc.c ++++ b/fs/nfs/proc.c +@@ -594,7 +594,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) + /* Emulate the eof flag, which isn't normally needed in NFSv2 + * as it is guaranteed to always return the file attributes + */ +- if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size) ++ if ((hdr->res.count == 0 && hdr->args.count > 0) || ++ hdr->args.offset + hdr->res.count >= hdr->res.fattr->size) + hdr->res.eof = 1; + } + return 0; +@@ -615,8 +616,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task, + + static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr) + { +- if (task->tk_status >= 0) ++ if (task->tk_status >= 0) { ++ hdr->res.count = hdr->args.count; + nfs_writeback_update_inode(hdr); ++ } + return 0; + } + +diff --git a/fs/nfs/read.c b/fs/nfs/read.c +index c19841c82b6a..cfe0b586eadd 100644 +--- a/fs/nfs/read.c ++++ b/fs/nfs/read.c +@@ -91,19 +91,25 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio) + } + EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); + +-static void nfs_readpage_release(struct nfs_page *req) ++static void nfs_readpage_release(struct nfs_page *req, int error) + { + struct inode *inode = d_inode(nfs_req_openctx(req)->dentry); ++ struct page *page = req->wb_page; + + dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id, + (unsigned long long)NFS_FILEID(inode), req->wb_bytes, + (long long)req_offset(req)); + ++ if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT) ++ SetPageError(page); + if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { +- if (PageUptodate(req->wb_page)) +- nfs_readpage_to_fscache(inode, req->wb_page, 0); ++ struct address_space *mapping = page_file_mapping(page); + +- unlock_page(req->wb_page); ++ if (PageUptodate(page)) ++ nfs_readpage_to_fscache(inode, page, 0); ++ else if (!PageError(page) && !PagePrivate(page)) ++ generic_error_remove_page(mapping, page); ++ unlock_page(page); + } + nfs_release_request(req); + } +@@ -131,7 +137,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, + &nfs_async_read_completion_ops); + if (!nfs_pageio_add_request(&pgio, new)) { + nfs_list_remove_request(new); +- nfs_readpage_release(new); ++ nfs_readpage_release(new, pgio.pg_error); + } + nfs_pageio_complete(&pgio); + +@@ -153,6 +159,7 @@ static void nfs_page_group_set_uptodate(struct nfs_page *req) + static void nfs_read_completion(struct nfs_pgio_header *hdr) + { + unsigned long bytes = 0; ++ int error; + + if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) + goto out; +@@ -179,14 +186,19 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr) + zero_user_segment(page, start, end); + } + } ++ error = 0; + bytes += req->wb_bytes; + if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) { + if (bytes <= hdr->good_bytes) + nfs_page_group_set_uptodate(req); ++ else { ++ error = hdr->error; ++ xchg(&nfs_req_openctx(req)->error, error); ++ } + } else + nfs_page_group_set_uptodate(req); + nfs_list_remove_request(req); +- nfs_readpage_release(req); ++ nfs_readpage_release(req, error); + } + out: + hdr->release(hdr); +@@ -213,7 +225,7 @@ nfs_async_read_error(struct list_head *head, int error) + while (!list_empty(head)) { + req = nfs_list_entry(head->next); + nfs_list_remove_request(req); +- nfs_readpage_release(req); ++ nfs_readpage_release(req, error); + } + } + +@@ -337,8 +349,13 @@ int nfs_readpage(struct file *file, struct page *page) + goto out; + } + ++ xchg(&ctx->error, 0); + error = nfs_readpage_async(ctx, inode, page); +- ++ if (!error) { ++ error = wait_on_page_locked_killable(page); ++ if (!PageUptodate(page) && !error) ++ error = xchg(&ctx->error, 0); ++ } + out: + put_nfs_open_context(ctx); + return error; +@@ -372,8 +389,8 @@ readpage_async_filler(void *data, struct page *page) + zero_user_segment(page, len, PAGE_SIZE); + if (!nfs_pageio_add_request(desc->pgio, new)) { + nfs_list_remove_request(new); +- nfs_readpage_release(new); + error = desc->pgio->pg_error; ++ nfs_readpage_release(new, error); + goto out; + } + return 0; +diff --git a/fs/nfs/write.c b/fs/nfs/write.c +index 059a7c38bc4f..ee6932c9819e 100644 +--- a/fs/nfs/write.c ++++ b/fs/nfs/write.c +@@ -57,6 +57,7 @@ static const struct rpc_call_ops nfs_commit_ops; + static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; + static const struct nfs_commit_completion_ops nfs_commit_completion_ops; + static const struct nfs_rw_ops nfs_rw_write_ops; ++static void nfs_inode_remove_request(struct nfs_page *req); + static void nfs_clear_request_commit(struct nfs_page *req); + static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, + struct inode *inode); +@@ -591,23 +592,13 @@ release_request: + + static void nfs_write_error(struct nfs_page *req, int error) + { ++ nfs_set_pageerror(page_file_mapping(req->wb_page)); + nfs_mapping_set_error(req->wb_page, error); ++ nfs_inode_remove_request(req); + nfs_end_page_writeback(req); + nfs_release_request(req); + } + +-static bool +-nfs_error_is_fatal_on_server(int err) +-{ +- switch (err) { +- case 0: +- case -ERESTARTSYS: +- case -EINTR: +- return false; +- } +- return nfs_error_is_fatal(err); +-} +- + /* + * Find an associated nfs write request, and prepare to flush it out + * May return an error if the user signalled nfs_wait_on_request(). +@@ -615,7 +606,6 @@ nfs_error_is_fatal_on_server(int err) + static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, + struct page *page) + { +- struct address_space *mapping; + struct nfs_page *req; + int ret = 0; + +@@ -630,12 +620,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, + WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); + + /* If there is a fatal error that covers this write, just exit */ +- ret = 0; +- mapping = page_file_mapping(page); +- if (test_bit(AS_ENOSPC, &mapping->flags) || +- test_bit(AS_EIO, &mapping->flags)) ++ ret = pgio->pg_error; ++ if (nfs_error_is_fatal_on_server(ret)) + goto out_launder; + ++ ret = 0; + if (!nfs_pageio_add_request(pgio, req)) { + ret = pgio->pg_error; + /* +@@ -647,6 +636,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, + } else + ret = -EAGAIN; + nfs_redirty_request(req); ++ pgio->pg_error = 0; + } else + nfs_add_stats(page_file_mapping(page)->host, + NFSIOS_WRITEPAGES, 1); +@@ -666,7 +656,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, + ret = nfs_page_async_flush(pgio, page); + if (ret == -EAGAIN) { + redirty_page_for_writepage(wbc, page); +- ret = 0; ++ ret = AOP_WRITEPAGE_ACTIVATE; + } + return ret; + } +@@ -685,10 +675,11 @@ static int nfs_writepage_locked(struct page *page, + nfs_pageio_init_write(&pgio, inode, 0, + false, &nfs_async_write_completion_ops); + err = nfs_do_writepage(page, wbc, &pgio); ++ pgio.pg_error = 0; + nfs_pageio_complete(&pgio); + if (err < 0) + return err; +- if (pgio.pg_error < 0) ++ if (nfs_error_is_fatal(pgio.pg_error)) + return pgio.pg_error; + return 0; + } +@@ -698,7 +689,8 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc) + int ret; + + ret = nfs_writepage_locked(page, wbc); +- unlock_page(page); ++ if (ret != AOP_WRITEPAGE_ACTIVATE) ++ unlock_page(page); + return ret; + } + +@@ -707,7 +699,8 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control * + int ret; + + ret = nfs_do_writepage(page, wbc, data); +- unlock_page(page); ++ if (ret != AOP_WRITEPAGE_ACTIVATE) ++ unlock_page(page); + return ret; + } + +@@ -734,6 +727,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) + &nfs_async_write_completion_ops); + pgio.pg_io_completion = ioc; + err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); ++ pgio.pg_error = 0; + nfs_pageio_complete(&pgio); + nfs_io_completion_put(ioc); + +@@ -742,7 +736,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) + if (err < 0) + goto out_err; + err = pgio.pg_error; +- if (err < 0) ++ if (nfs_error_is_fatal(err)) + goto out_err; + return 0; + out_err: +diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h +index 28a2d12a1029..a8279280e88d 100644 +--- a/fs/overlayfs/ovl_entry.h ++++ b/fs/overlayfs/ovl_entry.h +@@ -66,6 +66,7 @@ struct ovl_fs { + bool workdir_locked; + /* Traps in ovl inode cache */ + struct inode *upperdir_trap; ++ struct inode *workbasedir_trap; + struct inode *workdir_trap; + struct inode *indexdir_trap; + /* Inode numbers in all layers do not use the high xino_bits */ +diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c +index b368e2e102fa..afbcb116a7f1 100644 +--- a/fs/overlayfs/super.c ++++ b/fs/overlayfs/super.c +@@ -212,6 +212,7 @@ static void ovl_free_fs(struct ovl_fs *ofs) + { + unsigned i; + ++ iput(ofs->workbasedir_trap); + iput(ofs->indexdir_trap); + iput(ofs->workdir_trap); + iput(ofs->upperdir_trap); +@@ -1003,6 +1004,25 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir, + return 0; + } + ++/* ++ * Determine how we treat concurrent use of upperdir/workdir based on the ++ * index feature. This is papering over mount leaks of container runtimes, ++ * for example, an old overlay mount is leaked and now its upperdir is ++ * attempted to be used as a lower layer in a new overlay mount. ++ */ ++static int ovl_report_in_use(struct ovl_fs *ofs, const char *name) ++{ ++ if (ofs->config.index) { ++ pr_err("overlayfs: %s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n", ++ name); ++ return -EBUSY; ++ } else { ++ pr_warn("overlayfs: %s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n", ++ name); ++ return 0; ++ } ++} ++ + static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs, + struct path *upperpath) + { +@@ -1040,14 +1060,12 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs, + upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME); + ofs->upper_mnt = upper_mnt; + +- err = -EBUSY; + if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) { + ofs->upperdir_locked = true; +- } else if (ofs->config.index) { +- pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n"); +- goto out; + } else { +- pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n"); ++ err = ovl_report_in_use(ofs, "upperdir"); ++ if (err) ++ goto out; + } + + err = 0; +@@ -1157,16 +1175,19 @@ static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs, + + ofs->workbasedir = dget(workpath.dentry); + +- err = -EBUSY; + if (ovl_inuse_trylock(ofs->workbasedir)) { + ofs->workdir_locked = true; +- } else if (ofs->config.index) { +- pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n"); +- goto out; + } else { +- pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n"); ++ err = ovl_report_in_use(ofs, "workdir"); ++ if (err) ++ goto out; + } + ++ err = ovl_setup_trap(sb, ofs->workbasedir, &ofs->workbasedir_trap, ++ "workdir"); ++ if (err) ++ goto out; ++ + err = ovl_make_workdir(sb, ofs, &workpath); + + out: +@@ -1313,16 +1334,16 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs, + if (err < 0) + goto out; + +- err = -EBUSY; +- if (ovl_is_inuse(stack[i].dentry)) { +- pr_err("overlayfs: lowerdir is in-use as upperdir/workdir\n"); +- goto out; +- } +- + err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir"); + if (err) + goto out; + ++ if (ovl_is_inuse(stack[i].dentry)) { ++ err = ovl_report_in_use(ofs, "lowerdir"); ++ if (err) ++ goto out; ++ } ++ + mnt = clone_private_mount(&stack[i]); + err = PTR_ERR(mnt); + if (IS_ERR(mnt)) { +@@ -1469,8 +1490,8 @@ out_err: + * - another layer of this overlayfs instance + * - upper/work dir of any overlayfs instance + */ +-static int ovl_check_layer(struct super_block *sb, struct dentry *dentry, +- const char *name) ++static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs, ++ struct dentry *dentry, const char *name) + { + struct dentry *next = dentry, *parent; + int err = 0; +@@ -1482,13 +1503,11 @@ static int ovl_check_layer(struct super_block *sb, struct dentry *dentry, + + /* Walk back ancestors to root (inclusive) looking for traps */ + while (!err && parent != next) { +- if (ovl_is_inuse(parent)) { +- err = -EBUSY; +- pr_err("overlayfs: %s path overlapping in-use upperdir/workdir\n", +- name); +- } else if (ovl_lookup_trap_inode(sb, parent)) { ++ if (ovl_lookup_trap_inode(sb, parent)) { + err = -ELOOP; + pr_err("overlayfs: overlapping %s path\n", name); ++ } else if (ovl_is_inuse(parent)) { ++ err = ovl_report_in_use(ofs, name); + } + next = parent; + parent = dget_parent(next); +@@ -1509,7 +1528,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb, + int i, err; + + if (ofs->upper_mnt) { +- err = ovl_check_layer(sb, ofs->upper_mnt->mnt_root, "upperdir"); ++ err = ovl_check_layer(sb, ofs, ofs->upper_mnt->mnt_root, ++ "upperdir"); + if (err) + return err; + +@@ -1520,13 +1540,14 @@ static int ovl_check_overlapping_layers(struct super_block *sb, + * workbasedir. In that case, we already have their traps in + * inode cache and we will catch that case on lookup. + */ +- err = ovl_check_layer(sb, ofs->workbasedir, "workdir"); ++ err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir"); + if (err) + return err; + } + + for (i = 0; i < ofs->numlower; i++) { +- err = ovl_check_layer(sb, ofs->lower_layers[i].mnt->mnt_root, ++ err = ovl_check_layer(sb, ofs, ++ ofs->lower_layers[i].mnt->mnt_root, + "lowerdir"); + if (err) + return err; +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h +index 6a8dd4af0147..ba8dc520cc79 100644 +--- a/include/linux/intel-iommu.h ++++ b/include/linux/intel-iommu.h +@@ -346,7 +346,6 @@ enum { + #define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) + + #define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) +-#define QI_EIOTLB_GL(gl) (((u64)gl) << 7) + #define QI_EIOTLB_IH(ih) (((u64)ih) << 6) + #define QI_EIOTLB_AM(am) (((u64)am)) + #define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) +@@ -378,8 +377,6 @@ enum { + #define QI_RESP_INVALID 0x1 + #define QI_RESP_FAILURE 0xf + +-#define QI_GRAN_ALL_ALL 0 +-#define QI_GRAN_NONG_ALL 1 + #define QI_GRAN_NONG_PASID 2 + #define QI_GRAN_PSI_PASID 3 + +diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h +index a16fbe9a2a67..aa99c73c3fbd 100644 +--- a/include/net/pkt_sched.h ++++ b/include/net/pkt_sched.h +@@ -118,7 +118,12 @@ void __qdisc_run(struct Qdisc *q); + static inline void qdisc_run(struct Qdisc *q) + { + if (qdisc_run_begin(q)) { +- __qdisc_run(q); ++ /* NOLOCK qdisc must check 'state' under the qdisc seqlock ++ * to avoid racing with dev_qdisc_reset() ++ */ ++ if (!(q->flags & TCQ_F_NOLOCK) || ++ likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) ++ __qdisc_run(q); + qdisc_run_end(q); + } + } +diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h +index 8a5f70c7cdf2..5e69fba181bc 100644 +--- a/include/net/sock_reuseport.h ++++ b/include/net/sock_reuseport.h +@@ -21,7 +21,8 @@ struct sock_reuseport { + unsigned int synq_overflow_ts; + /* ID stays the same even after the size of socks[] grows. */ + unsigned int reuseport_id; +- bool bind_inany; ++ unsigned int bind_inany:1; ++ unsigned int has_conns:1; + struct bpf_prog __rcu *prog; /* optional BPF sock selector */ + struct sock *socks[0]; /* array of sock pointers */ + }; +@@ -35,6 +36,24 @@ extern struct sock *reuseport_select_sock(struct sock *sk, + struct sk_buff *skb, + int hdr_len); + extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog); ++ ++static inline bool reuseport_has_conns(struct sock *sk, bool set) ++{ ++ struct sock_reuseport *reuse; ++ bool ret = false; ++ ++ rcu_read_lock(); ++ reuse = rcu_dereference(sk->sk_reuseport_cb); ++ if (reuse) { ++ if (set) ++ reuse->has_conns = 1; ++ ret = reuse->has_conns; ++ } ++ rcu_read_unlock(); ++ ++ return ret; ++} ++ + int reuseport_get_id(struct sock_reuseport *reuse); + + #endif /* _SOCK_REUSEPORT_H */ +diff --git a/include/uapi/linux/netfilter/xt_nfacct.h b/include/uapi/linux/netfilter/xt_nfacct.h +index 5c8a4d760ee3..b5123ab8d54a 100644 +--- a/include/uapi/linux/netfilter/xt_nfacct.h ++++ b/include/uapi/linux/netfilter/xt_nfacct.h +@@ -11,4 +11,9 @@ struct xt_nfacct_match_info { + struct nf_acct *nfacct; + }; + ++struct xt_nfacct_match_info_v1 { ++ char name[NFACCT_NAME_MAX]; ++ struct nf_acct *nfacct __attribute__((aligned(8))); ++}; ++ + #endif /* _XT_NFACCT_MATCH_H */ +diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c +index 95a260f9214b..136ce049c4ad 100644 +--- a/kernel/kallsyms.c ++++ b/kernel/kallsyms.c +@@ -263,8 +263,10 @@ int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize, + { + char namebuf[KSYM_NAME_LEN]; + +- if (is_ksym_addr(addr)) +- return !!get_symbol_pos(addr, symbolsize, offset); ++ if (is_ksym_addr(addr)) { ++ get_symbol_pos(addr, symbolsize, offset); ++ return 1; ++ } + return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) || + !!__bpf_address_lookup(addr, symbolsize, offset, namebuf); + } +diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c +index fad95ef64e01..bc06e3cdfa84 100644 +--- a/net/batman-adv/bat_v_ogm.c ++++ b/net/batman-adv/bat_v_ogm.c +@@ -631,17 +631,23 @@ batadv_v_ogm_process_per_outif(struct batadv_priv *bat_priv, + * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated + * @buff_pos: current position in the skb + * @packet_len: total length of the skb +- * @tvlv_len: tvlv length of the previously considered OGM ++ * @ogm2_packet: potential OGM2 in buffer + * + * Return: true if there is enough space for another OGM, false otherwise. + */ +-static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, +- __be16 tvlv_len) ++static bool ++batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, ++ const struct batadv_ogm2_packet *ogm2_packet) + { + int next_buff_pos = 0; + +- next_buff_pos += buff_pos + BATADV_OGM2_HLEN; +- next_buff_pos += ntohs(tvlv_len); ++ /* check if there is enough space for the header */ ++ next_buff_pos += buff_pos + sizeof(*ogm2_packet); ++ if (next_buff_pos > packet_len) ++ return false; ++ ++ /* check if there is enough space for the optional TVLV */ ++ next_buff_pos += ntohs(ogm2_packet->tvlv_len); + + return (next_buff_pos <= packet_len) && + (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); +@@ -818,7 +824,7 @@ int batadv_v_ogm_packet_recv(struct sk_buff *skb, + ogm_packet = (struct batadv_ogm2_packet *)skb->data; + + while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb), +- ogm_packet->tvlv_len)) { ++ ogm_packet)) { + batadv_v_ogm_process(skb, ogm_offset, if_incoming); + + ogm_offset += BATADV_OGM2_HLEN; +diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c +index c8177a89f52c..4096d8a74a2b 100644 +--- a/net/bridge/netfilter/ebtables.c ++++ b/net/bridge/netfilter/ebtables.c +@@ -221,7 +221,7 @@ unsigned int ebt_do_table(struct sk_buff *skb, + return NF_DROP; + } + +- ADD_COUNTER(*(counter_base + i), 1, skb->len); ++ ADD_COUNTER(*(counter_base + i), skb->len, 1); + + /* these should only watch: not modify, nor tell us + * what to do with the packet +@@ -959,8 +959,8 @@ static void get_counters(const struct ebt_counter *oldcounters, + continue; + counter_base = COUNTER_BASE(oldcounters, nentries, cpu); + for (i = 0; i < nentries; i++) +- ADD_COUNTER(counters[i], counter_base[i].pcnt, +- counter_base[i].bcnt); ++ ADD_COUNTER(counters[i], counter_base[i].bcnt, ++ counter_base[i].pcnt); + } + } + +@@ -1280,7 +1280,7 @@ static int do_update_counters(struct net *net, const char *name, + + /* we add to the counters of the first cpu */ + for (i = 0; i < num_counters; i++) +- ADD_COUNTER(t->private->counters[i], tmp[i].pcnt, tmp[i].bcnt); ++ ADD_COUNTER(t->private->counters[i], tmp[i].bcnt, tmp[i].pcnt); + + write_unlock_bh(&t->lock); + ret = 0; +diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c +index 5d6724cee38f..4f75df40fb12 100644 +--- a/net/ceph/crypto.c ++++ b/net/ceph/crypto.c +@@ -136,8 +136,10 @@ void ceph_crypto_key_destroy(struct ceph_crypto_key *key) + if (key) { + kfree(key->key); + key->key = NULL; +- crypto_free_sync_skcipher(key->tfm); +- key->tfm = NULL; ++ if (key->tfm) { ++ crypto_free_sync_skcipher(key->tfm); ++ key->tfm = NULL; ++ } + } + } + +diff --git a/net/core/dev.c b/net/core/dev.c +index 2ff556906b5d..828ecca03c07 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3475,18 +3475,22 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, + qdisc_calculate_pkt_len(skb, q); + + if (q->flags & TCQ_F_NOLOCK) { +- if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { +- __qdisc_drop(skb, &to_free); +- rc = NET_XMIT_DROP; +- } else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty && +- qdisc_run_begin(q)) { ++ if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty && ++ qdisc_run_begin(q)) { ++ if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, ++ &q->state))) { ++ __qdisc_drop(skb, &to_free); ++ rc = NET_XMIT_DROP; ++ goto end_run; ++ } + qdisc_bstats_cpu_update(q, skb); + ++ rc = NET_XMIT_SUCCESS; + if (sch_direct_xmit(skb, q, dev, txq, NULL, true)) + __qdisc_run(q); + ++end_run: + qdisc_run_end(q); +- rc = NET_XMIT_SUCCESS; + } else { + rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; + qdisc_run(q); +diff --git a/net/core/filter.c b/net/core/filter.c +index 534c310bb089..7aee6f368754 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -8553,13 +8553,13 @@ sk_reuseport_is_valid_access(int off, int size, + return size == size_default; + + /* Fields that allow narrowing */ +- case offsetof(struct sk_reuseport_md, eth_protocol): ++ case bpf_ctx_range(struct sk_reuseport_md, eth_protocol): + if (size < FIELD_SIZEOF(struct sk_buff, protocol)) + return false; + /* fall through */ +- case offsetof(struct sk_reuseport_md, ip_protocol): +- case offsetof(struct sk_reuseport_md, bind_inany): +- case offsetof(struct sk_reuseport_md, len): ++ case bpf_ctx_range(struct sk_reuseport_md, ip_protocol): ++ case bpf_ctx_range(struct sk_reuseport_md, bind_inany): ++ case bpf_ctx_range(struct sk_reuseport_md, len): + bpf_ctx_record_field_size(info, size_default); + return bpf_ctx_narrow_access_ok(off, size, size_default); + +diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c +index edd622956083..b15c0c0f6e55 100644 +--- a/net/core/flow_dissector.c ++++ b/net/core/flow_dissector.c +@@ -138,8 +138,8 @@ int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) + mutex_unlock(&flow_dissector_mutex); + return -ENOENT; + } +- bpf_prog_put(attached); + RCU_INIT_POINTER(net->flow_dissector_prog, NULL); ++ bpf_prog_put(attached); + mutex_unlock(&flow_dissector_mutex); + return 0; + } +diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c +index dc4aefdf2a08..2f89777763ad 100644 +--- a/net/core/sock_reuseport.c ++++ b/net/core/sock_reuseport.c +@@ -295,8 +295,19 @@ struct sock *reuseport_select_sock(struct sock *sk, + + select_by_hash: + /* no bpf or invalid bpf result: fall back to hash usage */ +- if (!sk2) +- sk2 = reuse->socks[reciprocal_scale(hash, socks)]; ++ if (!sk2) { ++ int i, j; ++ ++ i = j = reciprocal_scale(hash, socks); ++ while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) { ++ i++; ++ if (i >= reuse->num_socks) ++ i = 0; ++ if (i == j) ++ goto out; ++ } ++ sk2 = reuse->socks[i]; ++ } + } + + out: +diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c +index 820dd8da57fc..1739b98a8f4b 100644 +--- a/net/dsa/dsa2.c ++++ b/net/dsa/dsa2.c +@@ -577,6 +577,8 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master) + tag_protocol = ds->ops->get_tag_protocol(ds, dp->index); + tag_ops = dsa_tag_driver_get(tag_protocol); + if (IS_ERR(tag_ops)) { ++ if (PTR_ERR(tag_ops) == -ENOPROTOOPT) ++ return -EPROBE_DEFER; + dev_warn(ds->dev, "No tagger for this switch\n"); + return PTR_ERR(tag_ops); + } +diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c +index 7bd29e694603..9a0fe0c2fa02 100644 +--- a/net/ipv4/datagram.c ++++ b/net/ipv4/datagram.c +@@ -15,6 +15,7 @@ + #include <net/sock.h> + #include <net/route.h> + #include <net/tcp_states.h> ++#include <net/sock_reuseport.h> + + int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) + { +@@ -69,6 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len + } + inet->inet_daddr = fl4->daddr; + inet->inet_dport = usin->sin_port; ++ reuseport_has_conns(sk, true); + sk->sk_state = TCP_ESTABLISHED; + sk_set_txhash(sk); + inet->inet_id = jiffies; +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c +index eed59c847722..acab7738f733 100644 +--- a/net/ipv4/udp.c ++++ b/net/ipv4/udp.c +@@ -434,12 +434,13 @@ static struct sock *udp4_lib_lookup2(struct net *net, + score = compute_score(sk, net, saddr, sport, + daddr, hnum, dif, sdif, exact_dif); + if (score > badness) { +- if (sk->sk_reuseport) { ++ if (sk->sk_reuseport && ++ sk->sk_state != TCP_ESTABLISHED) { + hash = udp_ehashfn(net, daddr, hnum, + saddr, sport); + result = reuseport_select_sock(sk, hash, skb, + sizeof(struct udphdr)); +- if (result) ++ if (result && !reuseport_has_conns(sk, false)) + return result; + } + badness = score; +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c +index 9d78c907b918..694168e2302e 100644 +--- a/net/ipv6/datagram.c ++++ b/net/ipv6/datagram.c +@@ -27,6 +27,7 @@ + #include <net/ip6_route.h> + #include <net/tcp_states.h> + #include <net/dsfield.h> ++#include <net/sock_reuseport.h> + + #include <linux/errqueue.h> + #include <linux/uaccess.h> +@@ -254,6 +255,7 @@ ipv4_connected: + goto out; + } + ++ reuseport_has_conns(sk, true); + sk->sk_state = TCP_ESTABLISHED; + sk_set_txhash(sk); + out: +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c +index dd2d0b963260..d5779d6a6065 100644 +--- a/net/ipv6/ip6_gre.c ++++ b/net/ipv6/ip6_gre.c +@@ -968,7 +968,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, + if (unlikely(!tun_info || + !(tun_info->mode & IP_TUNNEL_INFO_TX) || + ip_tunnel_info_af(tun_info) != AF_INET6)) +- return -EINVAL; ++ goto tx_err; + + key = &tun_info->key; + memset(&fl6, 0, sizeof(fl6)); +diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c +index 70b01bd95022..1258be19e186 100644 +--- a/net/ipv6/udp.c ++++ b/net/ipv6/udp.c +@@ -168,13 +168,14 @@ static struct sock *udp6_lib_lookup2(struct net *net, + score = compute_score(sk, net, saddr, sport, + daddr, hnum, dif, sdif, exact_dif); + if (score > badness) { +- if (sk->sk_reuseport) { ++ if (sk->sk_reuseport && ++ sk->sk_state != TCP_ESTABLISHED) { + hash = udp6_ehashfn(net, daddr, hnum, + saddr, sport); + + result = reuseport_select_sock(sk, hash, skb, + sizeof(struct udphdr)); +- if (result) ++ if (result && !reuseport_has_conns(sk, false)) + return result; + } + result = sk; +diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c +index 8c6c11bab5b6..b5df6c4c159c 100644 +--- a/net/netfilter/nf_conntrack_ftp.c ++++ b/net/netfilter/nf_conntrack_ftp.c +@@ -322,7 +322,7 @@ static int find_pattern(const char *data, size_t dlen, + i++; + } + +- pr_debug("Skipped up to `%c'!\n", skip); ++ pr_debug("Skipped up to 0x%hhx delimiter!\n", skip); + + *numoff = i; + *numlen = getnum(data + i, dlen - i, cmd, term, numoff); +diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c +index e0d392cb3075..0006503d2da9 100644 +--- a/net/netfilter/nf_conntrack_standalone.c ++++ b/net/netfilter/nf_conntrack_standalone.c +@@ -1037,8 +1037,13 @@ static int nf_conntrack_standalone_init_sysctl(struct net *net) + table[NF_SYSCTL_CT_COUNT].data = &net->ct.count; + table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum; + table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid; ++ table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct; ++ table[NF_SYSCTL_CT_HELPER].data = &net->ct.sysctl_auto_assign_helper; + #ifdef CONFIG_NF_CONNTRACK_EVENTS + table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events; ++#endif ++#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP ++ table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp; + #endif + table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout; + table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout; +diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c +index 49248fe5847a..55106bebf2b5 100644 +--- a/net/netfilter/nf_flow_table_core.c ++++ b/net/netfilter/nf_flow_table_core.c +@@ -218,7 +218,7 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow) + return err; + } + +- flow->timeout = (u32)jiffies; ++ flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; + return 0; + } + EXPORT_SYMBOL_GPL(flow_offload_add); +diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c +index d68c801dd614..b9e7dd6e60ce 100644 +--- a/net/netfilter/nf_flow_table_ip.c ++++ b/net/netfilter/nf_flow_table_ip.c +@@ -228,7 +228,6 @@ static unsigned int nf_flow_xmit_xfrm(struct sk_buff *skb, + { + skb_orphan(skb); + skb_dst_set_noref(skb, dst); +- skb->tstamp = 0; + dst_output(state->net, state->sk, skb); + return NF_STOLEN; + } +@@ -284,6 +283,7 @@ nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, + flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; + iph = ip_hdr(skb); + ip_decrease_ttl(iph); ++ skb->tstamp = 0; + + if (unlikely(dst_xfrm(&rt->dst))) { + memset(skb->cb, 0, sizeof(struct inet_skb_parm)); +@@ -512,6 +512,7 @@ nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, + flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; + ip6h = ipv6_hdr(skb); + ip6h->hop_limit--; ++ skb->tstamp = 0; + + if (unlikely(dst_xfrm(&rt->dst))) { + memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); +diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c +index 060a4ed46d5e..01705ad74a9a 100644 +--- a/net/netfilter/nft_flow_offload.c ++++ b/net/netfilter/nft_flow_offload.c +@@ -149,6 +149,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx, + return nft_chain_validate_hooks(ctx->chain, hook_mask); + } + ++static const struct nla_policy nft_flow_offload_policy[NFTA_FLOW_MAX + 1] = { ++ [NFTA_FLOW_TABLE_NAME] = { .type = NLA_STRING, ++ .len = NFT_NAME_MAXLEN - 1 }, ++}; ++ + static int nft_flow_offload_init(const struct nft_ctx *ctx, + const struct nft_expr *expr, + const struct nlattr * const tb[]) +@@ -207,6 +212,7 @@ static const struct nft_expr_ops nft_flow_offload_ops = { + static struct nft_expr_type nft_flow_offload_type __read_mostly = { + .name = "flow_offload", + .ops = &nft_flow_offload_ops, ++ .policy = nft_flow_offload_policy, + .maxattr = NFTA_FLOW_MAX, + .owner = THIS_MODULE, + }; +diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c +index d0ab1adf5bff..5aab6df74e0f 100644 +--- a/net/netfilter/xt_nfacct.c ++++ b/net/netfilter/xt_nfacct.c +@@ -54,25 +54,39 @@ nfacct_mt_destroy(const struct xt_mtdtor_param *par) + nfnl_acct_put(info->nfacct); + } + +-static struct xt_match nfacct_mt_reg __read_mostly = { +- .name = "nfacct", +- .family = NFPROTO_UNSPEC, +- .checkentry = nfacct_mt_checkentry, +- .match = nfacct_mt, +- .destroy = nfacct_mt_destroy, +- .matchsize = sizeof(struct xt_nfacct_match_info), +- .usersize = offsetof(struct xt_nfacct_match_info, nfacct), +- .me = THIS_MODULE, ++static struct xt_match nfacct_mt_reg[] __read_mostly = { ++ { ++ .name = "nfacct", ++ .revision = 0, ++ .family = NFPROTO_UNSPEC, ++ .checkentry = nfacct_mt_checkentry, ++ .match = nfacct_mt, ++ .destroy = nfacct_mt_destroy, ++ .matchsize = sizeof(struct xt_nfacct_match_info), ++ .usersize = offsetof(struct xt_nfacct_match_info, nfacct), ++ .me = THIS_MODULE, ++ }, ++ { ++ .name = "nfacct", ++ .revision = 1, ++ .family = NFPROTO_UNSPEC, ++ .checkentry = nfacct_mt_checkentry, ++ .match = nfacct_mt, ++ .destroy = nfacct_mt_destroy, ++ .matchsize = sizeof(struct xt_nfacct_match_info_v1), ++ .usersize = offsetof(struct xt_nfacct_match_info_v1, nfacct), ++ .me = THIS_MODULE, ++ }, + }; + + static int __init nfacct_mt_init(void) + { +- return xt_register_match(&nfacct_mt_reg); ++ return xt_register_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg)); + } + + static void __exit nfacct_mt_exit(void) + { +- xt_unregister_match(&nfacct_mt_reg); ++ xt_unregister_matches(nfacct_mt_reg, ARRAY_SIZE(nfacct_mt_reg)); + } + + module_init(nfacct_mt_init); +diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c +index ead7c6022208..b92b22ce8abd 100644 +--- a/net/netfilter/xt_physdev.c ++++ b/net/netfilter/xt_physdev.c +@@ -101,11 +101,9 @@ static int physdev_mt_check(const struct xt_mtchk_param *par) + if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) && + (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || + info->invert & XT_PHYSDEV_OP_BRIDGED) && +- par->hook_mask & ((1 << NF_INET_LOCAL_OUT) | +- (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) { ++ par->hook_mask & (1 << NF_INET_LOCAL_OUT)) { + pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n"); +- if (par->hook_mask & (1 << NF_INET_LOCAL_OUT)) +- return -EINVAL; ++ return -EINVAL; + } + + if (!brnf_probed) { +diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c +index ac28f6a5d70e..17bd8f539bc7 100644 +--- a/net/sched/sch_generic.c ++++ b/net/sched/sch_generic.c +@@ -985,6 +985,9 @@ static void qdisc_destroy(struct Qdisc *qdisc) + + void qdisc_put(struct Qdisc *qdisc) + { ++ if (!qdisc) ++ return; ++ + if (qdisc->flags & TCQ_F_BUILTIN || + !refcount_dec_and_test(&qdisc->refcnt)) + return; +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c +index a680d28c231e..fbb85ea24ea0 100644 +--- a/net/sunrpc/clnt.c ++++ b/net/sunrpc/clnt.c +@@ -2301,7 +2301,7 @@ call_status(struct rpc_task *task) + case -ECONNABORTED: + case -ENOTCONN: + rpc_force_rebind(clnt); +- /* fall through */ ++ break; + case -EADDRINUSE: + rpc_delay(task, 3*HZ); + /* fall through */ +diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c +index 88a1de9def11..b28aaddbe08e 100644 +--- a/net/wireless/nl80211.c ++++ b/net/wireless/nl80211.c +@@ -10640,9 +10640,11 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev, + hyst = wdev->cqm_config->rssi_hyst; + n = wdev->cqm_config->n_rssi_thresholds; + +- for (i = 0; i < n; i++) ++ for (i = 0; i < n; i++) { ++ i = array_index_nospec(i, n); + if (last < wdev->cqm_config->rssi_thresholds[i]) + break; ++ } + + low_index = i - 1; + if (low_index >= 0) { +diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c +index 9c6de4f114f8..9bd7b96027c1 100644 +--- a/net/xdp/xdp_umem.c ++++ b/net/xdp/xdp_umem.c +@@ -368,7 +368,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) + umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL); + if (!umem->pages) { + err = -ENOMEM; +- goto out_account; ++ goto out_pin; + } + + for (i = 0; i < umem->npgs; i++) +@@ -376,6 +376,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) + + return 0; + ++out_pin: ++ xdp_umem_unpin_pages(umem); + out_account: + xdp_umem_unaccount_pages(umem); + return err; +diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh +index a7a36209a193..6c2c05a75b54 100755 +--- a/scripts/decode_stacktrace.sh ++++ b/scripts/decode_stacktrace.sh +@@ -85,7 +85,7 @@ parse_symbol() { + fi + + # Strip out the base of the path +- code=${code//^$basepath/""} ++ code=${code#$basepath/} + + # In the case of inlines, move everything to same line + code=${code//$'\n'/' '} +diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c +index e45b5cf3b97f..8491becb5727 100644 +--- a/security/keys/request_key_auth.c ++++ b/security/keys/request_key_auth.c +@@ -66,6 +66,9 @@ static void request_key_auth_describe(const struct key *key, + { + struct request_key_auth *rka = get_request_key_auth(key); + ++ if (!rka) ++ return; ++ + seq_puts(m, "key:"); + seq_puts(m, key->description); + if (key_is_positive(key)) +@@ -83,6 +86,9 @@ static long request_key_auth_read(const struct key *key, + size_t datalen; + long ret; + ++ if (!rka) ++ return -EKEYREVOKED; ++ + datalen = rka->callout_len; + ret = datalen; + +diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c +index 7a4e21a31523..d41651afe5f6 100644 +--- a/tools/bpf/bpftool/prog.c ++++ b/tools/bpf/bpftool/prog.c +@@ -362,7 +362,9 @@ static int do_show(int argc, char **argv) + if (fd < 0) + return -1; + +- return show_prog(fd); ++ err = show_prog(fd); ++ close(fd); ++ return err; + } + + if (argc) +diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c +index 1cd28ebf8443..5c0154cf190c 100644 +--- a/tools/power/x86/turbostat/turbostat.c ++++ b/tools/power/x86/turbostat/turbostat.c +@@ -506,6 +506,7 @@ unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAU + unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC; + + #define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME) ++#define DO_BIC_READ(COUNTER_NAME) (bic_present & COUNTER_NAME) + #define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME) + #define BIC_PRESENT(COUNTER_BIT) (bic_present |= COUNTER_BIT) + #define BIC_NOT_PRESENT(COUNTER_BIT) (bic_present &= ~COUNTER_BIT) +@@ -1287,6 +1288,14 @@ delta_core(struct core_data *new, struct core_data *old) + } + } + ++int soft_c1_residency_display(int bic) ++{ ++ if (!DO_BIC(BIC_CPU_c1) || use_c1_residency_msr) ++ return 0; ++ ++ return DO_BIC_READ(bic); ++} ++ + /* + * old = new - old + */ +@@ -1322,7 +1331,8 @@ delta_thread(struct thread_data *new, struct thread_data *old, + + old->c1 = new->c1 - old->c1; + +- if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { ++ if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) || ++ soft_c1_residency_display(BIC_Avg_MHz)) { + if ((new->aperf > old->aperf) && (new->mperf > old->mperf)) { + old->aperf = new->aperf - old->aperf; + old->mperf = new->mperf - old->mperf; +@@ -1774,7 +1784,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p) + retry: + t->tsc = rdtsc(); /* we are running on local CPU of interest */ + +- if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz)) { ++ if (DO_BIC(BIC_Avg_MHz) || DO_BIC(BIC_Busy) || DO_BIC(BIC_Bzy_MHz) || ++ soft_c1_residency_display(BIC_Avg_MHz)) { + unsigned long long tsc_before, tsc_between, tsc_after, aperf_time, mperf_time; + + /* +@@ -1851,20 +1862,20 @@ retry: + if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) + goto done; + +- if (DO_BIC(BIC_CPU_c3)) { ++ if (DO_BIC(BIC_CPU_c3) || soft_c1_residency_display(BIC_CPU_c3)) { + if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) + return -6; + } + +- if (DO_BIC(BIC_CPU_c6) && !do_knl_cstates) { ++ if ((DO_BIC(BIC_CPU_c6) || soft_c1_residency_display(BIC_CPU_c6)) && !do_knl_cstates) { + if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6)) + return -7; +- } else if (do_knl_cstates) { ++ } else if (do_knl_cstates || soft_c1_residency_display(BIC_CPU_c6)) { + if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6)) + return -7; + } + +- if (DO_BIC(BIC_CPU_c7)) ++ if (DO_BIC(BIC_CPU_c7) || soft_c1_residency_display(BIC_CPU_c7)) + if (get_msr(cpu, MSR_CORE_C7_RESIDENCY, &c->c7)) + return -8; + +@@ -2912,6 +2923,7 @@ int snapshot_cpu_lpi_us(void) + if (retval != 1) { + fprintf(stderr, "Disabling Low Power Idle CPU output\n"); + BIC_NOT_PRESENT(BIC_CPU_LPI); ++ fclose(fp); + return -1; + } + +@@ -2938,6 +2950,7 @@ int snapshot_sys_lpi_us(void) + if (retval != 1) { + fprintf(stderr, "Disabling Low Power Idle System output\n"); + BIC_NOT_PRESENT(BIC_SYS_LPI); ++ fclose(fp); + return -1; + } + fclose(fp); +@@ -3209,6 +3222,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model) + break; + case INTEL_FAM6_HASWELL_CORE: /* HSW */ + case INTEL_FAM6_HASWELL_X: /* HSX */ ++ case INTEL_FAM6_HASWELL_ULT: /* HSW */ + case INTEL_FAM6_HASWELL_GT3E: /* HSW */ + case INTEL_FAM6_BROADWELL_CORE: /* BDW */ + case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ +@@ -3405,6 +3419,7 @@ int has_config_tdp(unsigned int family, unsigned int model) + case INTEL_FAM6_IVYBRIDGE: /* IVB */ + case INTEL_FAM6_HASWELL_CORE: /* HSW */ + case INTEL_FAM6_HASWELL_X: /* HSX */ ++ case INTEL_FAM6_HASWELL_ULT: /* HSW */ + case INTEL_FAM6_HASWELL_GT3E: /* HSW */ + case INTEL_FAM6_BROADWELL_CORE: /* BDW */ + case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ +@@ -3841,6 +3856,7 @@ void rapl_probe_intel(unsigned int family, unsigned int model) + case INTEL_FAM6_SANDYBRIDGE: + case INTEL_FAM6_IVYBRIDGE: + case INTEL_FAM6_HASWELL_CORE: /* HSW */ ++ case INTEL_FAM6_HASWELL_ULT: /* HSW */ + case INTEL_FAM6_HASWELL_GT3E: /* HSW */ + case INTEL_FAM6_BROADWELL_CORE: /* BDW */ + case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ +@@ -4032,6 +4048,7 @@ void perf_limit_reasons_probe(unsigned int family, unsigned int model) + + switch (model) { + case INTEL_FAM6_HASWELL_CORE: /* HSW */ ++ case INTEL_FAM6_HASWELL_ULT: /* HSW */ + case INTEL_FAM6_HASWELL_GT3E: /* HSW */ + do_gfx_perf_limit_reasons = 1; + case INTEL_FAM6_HASWELL_X: /* HSX */ +@@ -4251,6 +4268,7 @@ int has_snb_msrs(unsigned int family, unsigned int model) + case INTEL_FAM6_IVYBRIDGE_X: /* IVB Xeon */ + case INTEL_FAM6_HASWELL_CORE: /* HSW */ + case INTEL_FAM6_HASWELL_X: /* HSW */ ++ case INTEL_FAM6_HASWELL_ULT: /* HSW */ + case INTEL_FAM6_HASWELL_GT3E: /* HSW */ + case INTEL_FAM6_BROADWELL_CORE: /* BDW */ + case INTEL_FAM6_BROADWELL_GT3E: /* BDW */ +@@ -4284,7 +4302,7 @@ int has_hsw_msrs(unsigned int family, unsigned int model) + return 0; + + switch (model) { +- case INTEL_FAM6_HASWELL_CORE: ++ case INTEL_FAM6_HASWELL_ULT: /* HSW */ + case INTEL_FAM6_BROADWELL_CORE: /* BDW */ + case INTEL_FAM6_SKYLAKE_MOBILE: /* SKL */ + case INTEL_FAM6_CANNONLAKE_MOBILE: /* CNL */ +@@ -4568,9 +4586,6 @@ unsigned int intel_model_duplicates(unsigned int model) + case INTEL_FAM6_XEON_PHI_KNM: + return INTEL_FAM6_XEON_PHI_KNL; + +- case INTEL_FAM6_HASWELL_ULT: +- return INTEL_FAM6_HASWELL_CORE; +- + case INTEL_FAM6_BROADWELL_X: + case INTEL_FAM6_BROADWELL_XEON_D: /* BDX-DE */ + return INTEL_FAM6_BROADWELL_X; +@@ -4582,6 +4597,7 @@ unsigned int intel_model_duplicates(unsigned int model) + return INTEL_FAM6_SKYLAKE_MOBILE; + + case INTEL_FAM6_ICELAKE_MOBILE: ++ case INTEL_FAM6_ICELAKE_NNPI: + return INTEL_FAM6_CANNONLAKE_MOBILE; + } + return model; +@@ -5123,7 +5139,7 @@ int initialize_counters(int cpu_id) + + void allocate_output_buffer() + { +- output_buffer = calloc(1, (1 + topo.num_cpus) * 1024); ++ output_buffer = calloc(1, (1 + topo.num_cpus) * 2048); + outp = output_buffer; + if (outp == NULL) + err(-1, "calloc output buffer"); +diff --git a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c +index 34a796b303fe..3fe1eed900d4 100644 +--- a/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c ++++ b/tools/power/x86/x86_energy_perf_policy/x86_energy_perf_policy.c +@@ -545,7 +545,7 @@ void cmdline(int argc, char **argv) + + progname = argv[0]; + +- while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw", ++ while ((opt = getopt_long_only(argc, argv, "+a:c:dD:E:e:f:m:M:rt:u:vw:", + long_options, &option_index)) != -1) { + switch (opt) { + case 'a': +@@ -1259,6 +1259,15 @@ void probe_dev_msr(void) + if (system("/sbin/modprobe msr > /dev/null 2>&1")) + err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" "); + } ++ ++static void get_cpuid_or_exit(unsigned int leaf, ++ unsigned int *eax, unsigned int *ebx, ++ unsigned int *ecx, unsigned int *edx) ++{ ++ if (!__get_cpuid(leaf, eax, ebx, ecx, edx)) ++ errx(1, "Processor not supported\n"); ++} ++ + /* + * early_cpuid() + * initialize turbo_is_enabled, has_hwp, has_epb +@@ -1266,15 +1275,10 @@ void probe_dev_msr(void) + */ + void early_cpuid(void) + { +- unsigned int eax, ebx, ecx, edx, max_level; ++ unsigned int eax, ebx, ecx, edx; + unsigned int fms, family, model; + +- __get_cpuid(0, &max_level, &ebx, &ecx, &edx); +- +- if (max_level < 6) +- errx(1, "Processor not supported\n"); +- +- __get_cpuid(1, &fms, &ebx, &ecx, &edx); ++ get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx); + family = (fms >> 8) & 0xf; + model = (fms >> 4) & 0xf; + if (family == 6 || family == 0xf) +@@ -1288,7 +1292,7 @@ void early_cpuid(void) + bdx_highest_ratio = msr & 0xFF; + } + +- __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); ++ get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx); + turbo_is_enabled = (eax >> 1) & 1; + has_hwp = (eax >> 7) & 1; + has_epb = (ecx >> 3) & 1; +@@ -1306,7 +1310,7 @@ void parse_cpuid(void) + + eax = ebx = ecx = edx = 0; + +- __get_cpuid(0, &max_level, &ebx, &ecx, &edx); ++ get_cpuid_or_exit(0, &max_level, &ebx, &ecx, &edx); + + if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e) + genuine_intel = 1; +@@ -1315,7 +1319,7 @@ void parse_cpuid(void) + fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ", + (char *)&ebx, (char *)&edx, (char *)&ecx); + +- __get_cpuid(1, &fms, &ebx, &ecx, &edx); ++ get_cpuid_or_exit(1, &fms, &ebx, &ecx, &edx); + family = (fms >> 8) & 0xf; + model = (fms >> 4) & 0xf; + stepping = fms & 0xf; +@@ -1340,7 +1344,7 @@ void parse_cpuid(void) + errx(1, "CPUID: no MSR"); + + +- __get_cpuid(0x6, &eax, &ebx, &ecx, &edx); ++ get_cpuid_or_exit(0x6, &eax, &ebx, &ecx, &edx); + /* turbo_is_enabled already set */ + /* has_hwp already set */ + has_hwp_notify = eax & (1 << 8); +diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config +index f7a0744db31e..5dc109f4c097 100644 +--- a/tools/testing/selftests/bpf/config ++++ b/tools/testing/selftests/bpf/config +@@ -34,3 +34,4 @@ CONFIG_NET_MPLS_GSO=m + CONFIG_MPLS_ROUTING=m + CONFIG_MPLS_IPTUNNEL=m + CONFIG_IPV6_SIT=m ++CONFIG_BPF_JIT=y +diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c +index 2fc4625c1a15..655729004391 100644 +--- a/tools/testing/selftests/bpf/test_cgroup_storage.c ++++ b/tools/testing/selftests/bpf/test_cgroup_storage.c +@@ -20,9 +20,9 @@ int main(int argc, char **argv) + BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, + BPF_FUNC_get_local_storage), +- BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), ++ BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), +- BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), ++ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), + + BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */ + BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */ +@@ -30,7 +30,7 @@ int main(int argc, char **argv) + BPF_FUNC_get_local_storage), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0), +- BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0), ++ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1), + BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), +diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c +index fb679ac3d4b0..0e6652733462 100644 +--- a/tools/testing/selftests/bpf/test_sock.c ++++ b/tools/testing/selftests/bpf/test_sock.c +@@ -13,6 +13,7 @@ + #include <bpf/bpf.h> + + #include "cgroup_helpers.h" ++#include "bpf_endian.h" + #include "bpf_rlimit.h" + #include "bpf_util.h" + +@@ -232,7 +233,8 @@ static struct sock_test tests[] = { + /* if (ip == expected && port == expected) */ + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock, src_ip6[3])), +- BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x01000000, 4), ++ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ++ __bpf_constant_ntohl(0x00000001), 4), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock, src_port)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2), +@@ -261,7 +263,8 @@ static struct sock_test tests[] = { + /* if (ip == expected && port == expected) */ + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock, src_ip4)), +- BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x0100007F, 4), ++ BPF_JMP_IMM(BPF_JNE, BPF_REG_7, ++ __bpf_constant_ntohl(0x7F000001), 4), + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6, + offsetof(struct bpf_sock, src_port)), + BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2), +diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c +index 5294abb3f178..8ffd07e2a160 100644 +--- a/virt/kvm/coalesced_mmio.c ++++ b/virt/kvm/coalesced_mmio.c +@@ -40,7 +40,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, + return 1; + } + +-static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) ++static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last) + { + struct kvm_coalesced_mmio_ring *ring; + unsigned avail; +@@ -52,7 +52,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) + * there is always one unused entry in the buffer + */ + ring = dev->kvm->coalesced_mmio_ring; +- avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; ++ avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX; + if (avail == 0) { + /* full */ + return 0; +@@ -67,25 +67,28 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu, + { + struct kvm_coalesced_mmio_dev *dev = to_mmio(this); + struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; ++ __u32 insert; + + if (!coalesced_mmio_in_range(dev, addr, len)) + return -EOPNOTSUPP; + + spin_lock(&dev->kvm->ring_lock); + +- if (!coalesced_mmio_has_room(dev)) { ++ insert = READ_ONCE(ring->last); ++ if (!coalesced_mmio_has_room(dev, insert) || ++ insert >= KVM_COALESCED_MMIO_MAX) { + spin_unlock(&dev->kvm->ring_lock); + return -EOPNOTSUPP; + } + + /* copy data in first free entry of the ring */ + +- ring->coalesced_mmio[ring->last].phys_addr = addr; +- ring->coalesced_mmio[ring->last].len = len; +- memcpy(ring->coalesced_mmio[ring->last].data, val, len); +- ring->coalesced_mmio[ring->last].pio = dev->zone.pio; ++ ring->coalesced_mmio[insert].phys_addr = addr; ++ ring->coalesced_mmio[insert].len = len; ++ memcpy(ring->coalesced_mmio[insert].data, val, len); ++ ring->coalesced_mmio[insert].pio = dev->zone.pio; + smp_wmb(); +- ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; ++ ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX; + spin_unlock(&dev->kvm->ring_lock); + return 0; + } |