summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '1066_linux-4.14.67.patch')
-rw-r--r--1066_linux-4.14.67.patch6716
1 files changed, 6716 insertions, 0 deletions
diff --git a/1066_linux-4.14.67.patch b/1066_linux-4.14.67.patch
new file mode 100644
index 00000000..caaaf722
--- /dev/null
+++ b/1066_linux-4.14.67.patch
@@ -0,0 +1,6716 @@
+diff --git a/Makefile b/Makefile
+index e69d0d091742..4dad2d1c24ba 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 66
++SUBLEVEL = 67
+ EXTRAVERSION =
+ NAME = Petit Gorille
+
+@@ -357,9 +357,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
+ else if [ -x /bin/bash ]; then echo /bin/bash; \
+ else echo sh; fi ; fi)
+
+-HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS)
+-HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS)
+-HOST_LFS_LIBS := $(shell getconf LFS_LIBS)
++HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
++HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
++HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
+
+ HOSTCC = gcc
+ HOSTCXX = g++
+diff --git a/arch/arc/Makefile b/arch/arc/Makefile
+index d37f49d6a27f..6c1b20dd76ad 100644
+--- a/arch/arc/Makefile
++++ b/arch/arc/Makefile
+@@ -16,7 +16,7 @@ endif
+
+ KBUILD_DEFCONFIG := nsim_700_defconfig
+
+-cflags-y += -fno-common -pipe -fno-builtin -D__linux__
++cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
+ cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
+ cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
+
+@@ -140,16 +140,3 @@ dtbs: scripts
+
+ archclean:
+ $(Q)$(MAKE) $(clean)=$(boot)
+-
+-# Hacks to enable final link due to absence of link-time branch relexation
+-# and gcc choosing optimal(shorter) branches at -O3
+-#
+-# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
+-# However lib/decompress_inflate.o (.init.text) calls
+-# zlib_inflate_workspacesize (.text) causing relocation errors.
+-# Thus forcing all exten calls in this file to be long calls
+-export CFLAGS_decompress_inflate.o = -mmedium-calls
+-export CFLAGS_initramfs.o = -mmedium-calls
+-ifdef CONFIG_SMP
+-export CFLAGS_core.o = -mmedium-calls
+-endif
+diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
+index c28e6c347b49..871f3cb16af9 100644
+--- a/arch/arc/include/asm/mach_desc.h
++++ b/arch/arc/include/asm/mach_desc.h
+@@ -34,9 +34,7 @@ struct machine_desc {
+ const char *name;
+ const char **dt_compat;
+ void (*init_early)(void);
+-#ifdef CONFIG_SMP
+ void (*init_per_cpu)(unsigned int);
+-#endif
+ void (*init_machine)(void);
+ void (*init_late)(void);
+
+diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
+index 538b36afe89e..62b185057c04 100644
+--- a/arch/arc/kernel/irq.c
++++ b/arch/arc/kernel/irq.c
+@@ -31,10 +31,10 @@ void __init init_IRQ(void)
+ /* a SMP H/w block could do IPI IRQ request here */
+ if (plat_smp_ops.init_per_cpu)
+ plat_smp_ops.init_per_cpu(smp_processor_id());
++#endif
+
+ if (machine_desc->init_per_cpu)
+ machine_desc->init_per_cpu(smp_processor_id());
+-#endif
+ }
+
+ /*
+diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
+index 5ac3b547453f..4674541eba3f 100644
+--- a/arch/arc/kernel/process.c
++++ b/arch/arc/kernel/process.c
+@@ -47,7 +47,8 @@ SYSCALL_DEFINE0(arc_gettls)
+ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
+ {
+ struct pt_regs *regs = current_pt_regs();
+- int uval = -EFAULT;
++ u32 uval;
++ int ret;
+
+ /*
+ * This is only for old cores lacking LLOCK/SCOND, which by defintion
+@@ -60,23 +61,47 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
+ /* Z indicates to userspace if operation succeded */
+ regs->status32 &= ~STATUS_Z_MASK;
+
+- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+- return -EFAULT;
++ ret = access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr));
++ if (!ret)
++ goto fail;
+
++again:
+ preempt_disable();
+
+- if (__get_user(uval, uaddr))
+- goto done;
++ ret = __get_user(uval, uaddr);
++ if (ret)
++ goto fault;
+
+- if (uval == expected) {
+- if (!__put_user(new, uaddr))
+- regs->status32 |= STATUS_Z_MASK;
+- }
++ if (uval != expected)
++ goto out;
+
+-done:
+- preempt_enable();
++ ret = __put_user(new, uaddr);
++ if (ret)
++ goto fault;
++
++ regs->status32 |= STATUS_Z_MASK;
+
++out:
++ preempt_enable();
+ return uval;
++
++fault:
++ preempt_enable();
++
++ if (unlikely(ret != -EFAULT))
++ goto fail;
++
++ down_read(&current->mm->mmap_sem);
++ ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
++ FAULT_FLAG_WRITE, NULL);
++ up_read(&current->mm->mmap_sem);
++
++ if (likely(!ret))
++ goto again;
++
++fail:
++ force_sig(SIGSEGV, current);
++ return ret;
+ }
+
+ #ifdef CONFIG_ISA_ARCV2
+diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
+index 00da3f2c4072..4b57094a0356 100644
+--- a/arch/arm/boot/dts/am3517.dtsi
++++ b/arch/arm/boot/dts/am3517.dtsi
+@@ -87,6 +87,11 @@
+ };
+ };
+
++/* Table Table 5-79 of the TRM shows 480ab000 is reserved */
++&usb_otg_hs {
++ status = "disabled";
++};
++
+ &iva {
+ status = "disabled";
+ };
+diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
+index 2c6bf0684f50..094fd0ea91a0 100644
+--- a/arch/arm/boot/dts/am437x-sk-evm.dts
++++ b/arch/arm/boot/dts/am437x-sk-evm.dts
+@@ -535,6 +535,8 @@
+
+ touchscreen-size-x = <480>;
+ touchscreen-size-y = <272>;
++
++ wakeup-source;
+ };
+
+ tlv320aic3106: tlv320aic3106@1b {
+diff --git a/arch/arm/boot/dts/armada-385-synology-ds116.dts b/arch/arm/boot/dts/armada-385-synology-ds116.dts
+index 31510eb56f10..874189b4d218 100644
+--- a/arch/arm/boot/dts/armada-385-synology-ds116.dts
++++ b/arch/arm/boot/dts/armada-385-synology-ds116.dts
+@@ -170,7 +170,7 @@
+ 3700 5
+ 3900 6
+ 4000 7>;
+- cooling-cells = <2>;
++ #cooling-cells = <2>;
+ };
+
+ gpio-leds {
+diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
+index 9a9902974b1b..8b2c65cd61a2 100644
+--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
++++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
+@@ -216,7 +216,7 @@
+ reg = <0x18008000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+- interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
++ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <100000>;
+ status = "disabled";
+ };
+@@ -245,7 +245,7 @@
+ reg = <0x1800b000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+- interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
++ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <100000>;
+ status = "disabled";
+ };
+@@ -256,7 +256,7 @@
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>;
++ interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
+
+ linux,pci-domain = <0>;
+
+@@ -278,10 +278,10 @@
+ compatible = "brcm,iproc-msi";
+ msi-controller;
+ interrupt-parent = <&gic>;
+- interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>,
+- <GIC_SPI 97 IRQ_TYPE_NONE>,
+- <GIC_SPI 98 IRQ_TYPE_NONE>,
+- <GIC_SPI 99 IRQ_TYPE_NONE>;
++ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+@@ -291,7 +291,7 @@
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>;
++ interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
+
+ linux,pci-domain = <1>;
+
+@@ -313,10 +313,10 @@
+ compatible = "brcm,iproc-msi";
+ msi-controller;
+ interrupt-parent = <&gic>;
+- interrupts = <GIC_SPI 102 IRQ_TYPE_NONE>,
+- <GIC_SPI 103 IRQ_TYPE_NONE>,
+- <GIC_SPI 104 IRQ_TYPE_NONE>,
+- <GIC_SPI 105 IRQ_TYPE_NONE>;
++ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
+index d5f5e92e7488..1792192001a2 100644
+--- a/arch/arm/boot/dts/bcm-nsp.dtsi
++++ b/arch/arm/boot/dts/bcm-nsp.dtsi
+@@ -391,7 +391,7 @@
+ reg = <0x38000 0x50>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+- interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
++ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <100000>;
+ dma-coherent;
+ status = "disabled";
+@@ -496,7 +496,7 @@
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_NONE>;
++ interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
+
+ linux,pci-domain = <0>;
+
+@@ -519,10 +519,10 @@
+ compatible = "brcm,iproc-msi";
+ msi-controller;
+ interrupt-parent = <&gic>;
+- interrupts = <GIC_SPI 127 IRQ_TYPE_NONE>,
+- <GIC_SPI 128 IRQ_TYPE_NONE>,
+- <GIC_SPI 129 IRQ_TYPE_NONE>,
+- <GIC_SPI 130 IRQ_TYPE_NONE>;
++ interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
+ brcm,pcie-msi-inten;
+ };
+ };
+@@ -533,7 +533,7 @@
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_NONE>;
++ interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
+
+ linux,pci-domain = <1>;
+
+@@ -556,10 +556,10 @@
+ compatible = "brcm,iproc-msi";
+ msi-controller;
+ interrupt-parent = <&gic>;
+- interrupts = <GIC_SPI 133 IRQ_TYPE_NONE>,
+- <GIC_SPI 134 IRQ_TYPE_NONE>,
+- <GIC_SPI 135 IRQ_TYPE_NONE>,
+- <GIC_SPI 136 IRQ_TYPE_NONE>;
++ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
+ brcm,pcie-msi-inten;
+ };
+ };
+@@ -570,7 +570,7 @@
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_NONE>;
++ interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+
+ linux,pci-domain = <2>;
+
+@@ -593,10 +593,10 @@
+ compatible = "brcm,iproc-msi";
+ msi-controller;
+ interrupt-parent = <&gic>;
+- interrupts = <GIC_SPI 139 IRQ_TYPE_NONE>,
+- <GIC_SPI 140 IRQ_TYPE_NONE>,
+- <GIC_SPI 141 IRQ_TYPE_NONE>,
+- <GIC_SPI 142 IRQ_TYPE_NONE>;
++ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
++ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
+ brcm,pcie-msi-inten;
+ };
+ };
+diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
+index 045b9bb857f9..501877e87a5b 100644
+--- a/arch/arm/boot/dts/bcm5301x.dtsi
++++ b/arch/arm/boot/dts/bcm5301x.dtsi
+@@ -365,7 +365,7 @@
+ i2c0: i2c@18009000 {
+ compatible = "brcm,iproc-i2c";
+ reg = <0x18009000 0x50>;
+- interrupts = <GIC_SPI 121 IRQ_TYPE_NONE>;
++ interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <100000>;
+diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
+index 8a15f7193c82..77dd62e260db 100644
+--- a/arch/arm/boot/dts/da850.dtsi
++++ b/arch/arm/boot/dts/da850.dtsi
+@@ -518,11 +518,7 @@
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x226000 0x1000>;
+- interrupts = <42 IRQ_TYPE_EDGE_BOTH
+- 43 IRQ_TYPE_EDGE_BOTH 44 IRQ_TYPE_EDGE_BOTH
+- 45 IRQ_TYPE_EDGE_BOTH 46 IRQ_TYPE_EDGE_BOTH
+- 47 IRQ_TYPE_EDGE_BOTH 48 IRQ_TYPE_EDGE_BOTH
+- 49 IRQ_TYPE_EDGE_BOTH 50 IRQ_TYPE_EDGE_BOTH>;
++ interrupts = <42 43 44 45 46 47 48 49 50>;
+ ti,ngpio = <144>;
+ ti,davinci-gpio-unbanked = <0>;
+ status = "disabled";
+diff --git a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
+index eeb7679fd348..849eb3443cde 100644
+--- a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
++++ b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
+@@ -644,7 +644,7 @@
+ dsa,member = <0 0>;
+ eeprom-length = <512>;
+ interrupt-parent = <&gpio6>;
+- interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
++ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
+index ca0f13cafe38..6e5a3d9c23e1 100644
+--- a/arch/arm/configs/imx_v4_v5_defconfig
++++ b/arch/arm/configs/imx_v4_v5_defconfig
+@@ -144,9 +144,11 @@ CONFIG_USB_STORAGE=y
+ CONFIG_USB_CHIPIDEA=y
+ CONFIG_USB_CHIPIDEA_UDC=y
+ CONFIG_USB_CHIPIDEA_HOST=y
++CONFIG_USB_CHIPIDEA_ULPI=y
+ CONFIG_NOP_USB_XCEIV=y
+ CONFIG_USB_GADGET=y
+ CONFIG_USB_ETH=m
++CONFIG_USB_ULPI_BUS=y
+ CONFIG_MMC=y
+ CONFIG_MMC_SDHCI=y
+ CONFIG_MMC_SDHCI_PLTFM=y
+diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
+index 32acac9ab81a..21ac9f02407e 100644
+--- a/arch/arm/configs/imx_v6_v7_defconfig
++++ b/arch/arm/configs/imx_v6_v7_defconfig
+@@ -289,6 +289,7 @@ CONFIG_USB_STORAGE=y
+ CONFIG_USB_CHIPIDEA=y
+ CONFIG_USB_CHIPIDEA_UDC=y
+ CONFIG_USB_CHIPIDEA_HOST=y
++CONFIG_USB_CHIPIDEA_ULPI=y
+ CONFIG_USB_SERIAL=m
+ CONFIG_USB_SERIAL_GENERIC=y
+ CONFIG_USB_SERIAL_FTDI_SIO=m
+@@ -325,6 +326,7 @@ CONFIG_USB_GADGETFS=m
+ CONFIG_USB_FUNCTIONFS=m
+ CONFIG_USB_MASS_STORAGE=m
+ CONFIG_USB_G_SERIAL=m
++CONFIG_USB_ULPI_BUS=y
+ CONFIG_MMC=y
+ CONFIG_MMC_SDHCI=y
+ CONFIG_MMC_SDHCI_PLTFM=y
+diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
+index 25f12118c364..2f6ac1afa804 100644
+--- a/arch/arm/mach-davinci/board-da850-evm.c
++++ b/arch/arm/mach-davinci/board-da850-evm.c
+@@ -773,7 +773,7 @@ static struct gpiod_lookup_table mmc_gpios_table = {
+ GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd",
+ GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp",
+- GPIO_ACTIVE_LOW),
++ GPIO_ACTIVE_HIGH),
+ },
+ };
+
+diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
+index 69df3620eca5..1c73694c871a 100644
+--- a/arch/arm/mach-omap2/omap-smp.c
++++ b/arch/arm/mach-omap2/omap-smp.c
+@@ -109,6 +109,45 @@ void omap5_erratum_workaround_801819(void)
+ static inline void omap5_erratum_workaround_801819(void) { }
+ #endif
+
++#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
++/*
++ * Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with
++ * ICIALLU) to activate the workaround for secondary Core.
++ * NOTE: it is assumed that the primary core's configuration is done
++ * by the boot loader (kernel will detect a misconfiguration and complain
++ * if this is not done).
++ *
++ * In General Purpose(GP) devices, ACR bit settings can only be done
++ * by ROM code in "secure world" using the smc call and there is no
++ * option to update the "firmware" on such devices. This also works for
++ * High security(HS) devices, as a backup option in case the
++ * "update" is not done in the "security firmware".
++ */
++static void omap5_secondary_harden_predictor(void)
++{
++ u32 acr, acr_mask;
++
++ asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
++
++ /*
++ * ACTLR[0] (Enable invalidates of BTB with ICIALLU)
++ */
++ acr_mask = BIT(0);
++
++ /* Do we already have it done.. if yes, skip expensive smc */
++ if ((acr & acr_mask) == acr_mask)
++ return;
++
++ acr |= acr_mask;
++ omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
++
++ pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n",
++ __func__, smp_processor_id());
++}
++#else
++static inline void omap5_secondary_harden_predictor(void) { }
++#endif
++
+ static void omap4_secondary_init(unsigned int cpu)
+ {
+ /*
+@@ -131,6 +170,8 @@ static void omap4_secondary_init(unsigned int cpu)
+ set_cntfreq();
+ /* Configure ACR to disable streaming WA for 801819 */
+ omap5_erratum_workaround_801819();
++ /* Enable ACR to allow for ICUALLU workaround */
++ omap5_secondary_harden_predictor();
+ }
+
+ /*
+diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
+index 9c10248fadcc..4e8c2116808e 100644
+--- a/arch/arm/mach-pxa/irq.c
++++ b/arch/arm/mach-pxa/irq.c
+@@ -185,7 +185,7 @@ static int pxa_irq_suspend(void)
+ {
+ int i;
+
+- for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
++ for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
+ void __iomem *base = irq_base(i);
+
+ saved_icmr[i] = __raw_readl(base + ICMR);
+@@ -204,7 +204,7 @@ static void pxa_irq_resume(void)
+ {
+ int i;
+
+- for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
++ for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
+ void __iomem *base = irq_base(i);
+
+ __raw_writel(saved_icmr[i], base + ICMR);
+diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
+index 0f6d1537f330..defb7fc26428 100644
+--- a/arch/arm/mm/init.c
++++ b/arch/arm/mm/init.c
+@@ -745,19 +745,28 @@ static int __mark_rodata_ro(void *unused)
+ return 0;
+ }
+
++static int kernel_set_to_readonly __read_mostly;
++
+ void mark_rodata_ro(void)
+ {
++ kernel_set_to_readonly = 1;
+ stop_machine(__mark_rodata_ro, NULL, NULL);
+ }
+
+ void set_kernel_text_rw(void)
+ {
++ if (!kernel_set_to_readonly)
++ return;
++
+ set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
+ current->active_mm);
+ }
+
+ void set_kernel_text_ro(void)
+ {
++ if (!kernel_set_to_readonly)
++ return;
++
+ set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
+ current->active_mm);
+ }
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi
+index f06cc234693b..379abc3d82fe 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi
+@@ -7,7 +7,7 @@
+
+ &apb {
+ mali: gpu@c0000 {
+- compatible = "amlogic,meson-gxbb-mali", "arm,mali-450";
++ compatible = "amlogic,meson-gxl-mali", "arm,mali-450";
+ reg = <0x0 0xc0000 0x0 0x40000>;
+ interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
+diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+index 35c8457e3d1f..0b72094bcf5a 100644
+--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
++++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+@@ -118,7 +118,7 @@
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_NONE>;
++ interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>;
+
+ linux,pci-domain = <0>;
+
+@@ -149,7 +149,7 @@
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_NONE>;
++ interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
+
+ linux,pci-domain = <4>;
+
+@@ -566,7 +566,7 @@
+ reg = <0x66080000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+- interrupts = <GIC_SPI 394 IRQ_TYPE_NONE>;
++ interrupts = <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <100000>;
+ status = "disabled";
+ };
+@@ -594,7 +594,7 @@
+ reg = <0x660b0000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+- interrupts = <GIC_SPI 395 IRQ_TYPE_NONE>;
++ interrupts = <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <100000>;
+ status = "disabled";
+ };
+diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts b/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts
+index eb6f08cdbd79..77efa28c4dd5 100644
+--- a/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts
++++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts
+@@ -43,6 +43,10 @@
+ enet-phy-lane-swap;
+ };
+
++&sdio0 {
++ mmc-ddr-1_8v;
++};
++
+ &uart2 {
+ status = "okay";
+ };
+diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts b/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts
+index 5084b037320f..55ba495ef56e 100644
+--- a/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts
++++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts
+@@ -42,3 +42,7 @@
+ &gphy0 {
+ enet-phy-lane-swap;
+ };
++
++&sdio0 {
++ mmc-ddr-1_8v;
++};
+diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+index e6f75c633623..2b76293b51c8 100644
+--- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
++++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+@@ -409,7 +409,7 @@
+ reg = <0x000b0000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+- interrupts = <GIC_SPI 177 IRQ_TYPE_NONE>;
++ interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <100000>;
+ status = "disabled";
+ };
+@@ -453,7 +453,7 @@
+ reg = <0x000e0000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+- interrupts = <GIC_SPI 178 IRQ_TYPE_NONE>;
++ interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
+ clock-frequency = <100000>;
+ status = "disabled";
+ };
+diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+index 61da6e65900b..3cc449425a03 100644
+--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
++++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
+@@ -1132,14 +1132,14 @@
+
+ port@0 {
+ reg = <0>;
+- etf_out: endpoint {
++ etf_in: endpoint {
+ slave-mode;
+ remote-endpoint = <&funnel0_out>;
+ };
+ };
+ port@1 {
+ reg = <0>;
+- etf_in: endpoint {
++ etf_out: endpoint {
+ remote-endpoint = <&replicator_in>;
+ };
+ };
+diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
+index 9f7195a5773e..b7ad41d7b6ee 100644
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -214,7 +214,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
+ * This is the secondary CPU boot entry. We're using this CPUs
+ * idle thread stack, but a set of temporary page tables.
+ */
+-asmlinkage void secondary_start_kernel(void)
++asmlinkage notrace void secondary_start_kernel(void)
+ {
+ struct mm_struct *mm = &init_mm;
+ unsigned int cpu;
+diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
+index 614af886b7ef..58470b151bc3 100644
+--- a/arch/arm64/mm/dma-mapping.c
++++ b/arch/arm64/mm/dma-mapping.c
+@@ -629,13 +629,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
+ size >> PAGE_SHIFT);
+ return NULL;
+ }
+- if (!coherent)
+- __dma_flush_area(page_to_virt(page), iosize);
+-
+ addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
+ prot,
+ __builtin_return_address(0));
+- if (!addr) {
++ if (addr) {
++ memset(addr, 0, size);
++ if (!coherent)
++ __dma_flush_area(page_to_virt(page), iosize);
++ } else {
+ iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
+ dma_release_from_contiguous(dev, page,
+ size >> PAGE_SHIFT);
+diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
+index 8b707c249026..12fe700632f4 100644
+--- a/arch/m68k/include/asm/mcf_pgalloc.h
++++ b/arch/m68k/include/asm/mcf_pgalloc.h
+@@ -44,6 +44,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
+ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
+ unsigned long address)
+ {
++ pgtable_page_dtor(page);
+ __free_page(page);
+ }
+
+@@ -74,8 +75,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
+ return page;
+ }
+
+-extern inline void pte_free(struct mm_struct *mm, struct page *page)
++static inline void pte_free(struct mm_struct *mm, struct page *page)
+ {
++ pgtable_page_dtor(page);
+ __free_page(page);
+ }
+
+diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
+index 1b7160c79646..b16e95a4e875 100644
+--- a/arch/openrisc/kernel/entry.S
++++ b/arch/openrisc/kernel/entry.S
+@@ -221,12 +221,6 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
+ l.addi r3,r1,0 // pt_regs
+ /* r4 set be EXCEPTION_HANDLE */ // effective address of fault
+
+- /*
+- * __PHX__: TODO
+- *
+- * all this can be written much simpler. look at
+- * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part
+- */
+ #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX
+ l.lwz r6,PT_PC(r3) // address of an offending insn
+ l.lwz r6,0(r6) // instruction that caused pf
+@@ -258,7 +252,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
+
+ #else
+
+- l.lwz r6,PT_SR(r3) // SR
++ l.mfspr r6,r0,SPR_SR // SR
+ l.andi r6,r6,SPR_SR_DSX // check for delay slot exception
+ l.sfne r6,r0 // exception happened in delay slot
+ l.bnf 7f
+diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
+index 1e87913576e3..90979acdf165 100644
+--- a/arch/openrisc/kernel/head.S
++++ b/arch/openrisc/kernel/head.S
+@@ -141,8 +141,7 @@
+ * r4 - EEAR exception EA
+ * r10 - current pointing to current_thread_info struct
+ * r12 - syscall 0, since we didn't come from syscall
+- * r13 - temp it actually contains new SR, not needed anymore
+- * r31 - handler address of the handler we'll jump to
++ * r30 - handler address of the handler we'll jump to
+ *
+ * handler has to save remaining registers to the exception
+ * ksp frame *before* tainting them!
+@@ -178,6 +177,7 @@
+ /* r1 is KSP, r30 is __pa(KSP) */ ;\
+ tophys (r30,r1) ;\
+ l.sw PT_GPR12(r30),r12 ;\
++ /* r4 use for tmp before EA */ ;\
+ l.mfspr r12,r0,SPR_EPCR_BASE ;\
+ l.sw PT_PC(r30),r12 ;\
+ l.mfspr r12,r0,SPR_ESR_BASE ;\
+@@ -197,7 +197,10 @@
+ /* r12 == 1 if we come from syscall */ ;\
+ CLEAR_GPR(r12) ;\
+ /* ----- turn on MMU ----- */ ;\
+- l.ori r30,r0,(EXCEPTION_SR) ;\
++ /* Carry DSX into exception SR */ ;\
++ l.mfspr r30,r0,SPR_SR ;\
++ l.andi r30,r30,SPR_SR_DSX ;\
++ l.ori r30,r30,(EXCEPTION_SR) ;\
+ l.mtspr r0,r30,SPR_ESR_BASE ;\
+ /* r30: EA address of handler */ ;\
+ LOAD_SYMBOL_2_GPR(r30,handler) ;\
+diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
+index 8d8437169b5e..0d44e8007ad6 100644
+--- a/arch/openrisc/kernel/traps.c
++++ b/arch/openrisc/kernel/traps.c
+@@ -358,7 +358,7 @@ static inline int in_delay_slot(struct pt_regs *regs)
+ return 0;
+ }
+ #else
+- return regs->sr & SPR_SR_DSX;
++ return mfspr(SPR_SR) & SPR_SR_DSX;
+ #endif
+ }
+
+diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
+index af03359e6ac5..a82776592c8e 100644
+--- a/arch/parisc/include/asm/spinlock.h
++++ b/arch/parisc/include/asm/spinlock.h
+@@ -20,7 +20,6 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
+ {
+ volatile unsigned int *a;
+
+- mb();
+ a = __ldcw_align(x);
+ while (__ldcw(a) == 0)
+ while (*a == 0)
+@@ -30,16 +29,15 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *x,
+ local_irq_disable();
+ } else
+ cpu_relax();
+- mb();
+ }
+
+ static inline void arch_spin_unlock(arch_spinlock_t *x)
+ {
+ volatile unsigned int *a;
+- mb();
++
+ a = __ldcw_align(x);
+- *a = 1;
+ mb();
++ *a = 1;
+ }
+
+ static inline int arch_spin_trylock(arch_spinlock_t *x)
+@@ -47,10 +45,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
+ volatile unsigned int *a;
+ int ret;
+
+- mb();
+ a = __ldcw_align(x);
+ ret = __ldcw(a) != 0;
+- mb();
+
+ return ret;
+ }
+diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
+index 4886a6db42e9..5f7e57fcaeef 100644
+--- a/arch/parisc/kernel/syscall.S
++++ b/arch/parisc/kernel/syscall.S
+@@ -629,12 +629,12 @@ cas_action:
+ stw %r1, 4(%sr2,%r20)
+ #endif
+ /* The load and store could fail */
+-1: ldw,ma 0(%r26), %r28
++1: ldw 0(%r26), %r28
+ sub,<> %r28, %r25, %r0
+-2: stw,ma %r24, 0(%r26)
++2: stw %r24, 0(%r26)
+ /* Free lock */
+ sync
+- stw,ma %r20, 0(%sr2,%r20)
++ stw %r20, 0(%sr2,%r20)
+ #if ENABLE_LWS_DEBUG
+ /* Clear thread register indicator */
+ stw %r0, 4(%sr2,%r20)
+@@ -798,30 +798,30 @@ cas2_action:
+ ldo 1(%r0),%r28
+
+ /* 8bit CAS */
+-13: ldb,ma 0(%r26), %r29
++13: ldb 0(%r26), %r29
+ sub,= %r29, %r25, %r0
+ b,n cas2_end
+-14: stb,ma %r24, 0(%r26)
++14: stb %r24, 0(%r26)
+ b cas2_end
+ copy %r0, %r28
+ nop
+ nop
+
+ /* 16bit CAS */
+-15: ldh,ma 0(%r26), %r29
++15: ldh 0(%r26), %r29
+ sub,= %r29, %r25, %r0
+ b,n cas2_end
+-16: sth,ma %r24, 0(%r26)
++16: sth %r24, 0(%r26)
+ b cas2_end
+ copy %r0, %r28
+ nop
+ nop
+
+ /* 32bit CAS */
+-17: ldw,ma 0(%r26), %r29
++17: ldw 0(%r26), %r29
+ sub,= %r29, %r25, %r0
+ b,n cas2_end
+-18: stw,ma %r24, 0(%r26)
++18: stw %r24, 0(%r26)
+ b cas2_end
+ copy %r0, %r28
+ nop
+@@ -829,10 +829,10 @@ cas2_action:
+
+ /* 64bit CAS */
+ #ifdef CONFIG_64BIT
+-19: ldd,ma 0(%r26), %r29
++19: ldd 0(%r26), %r29
+ sub,*= %r29, %r25, %r0
+ b,n cas2_end
+-20: std,ma %r24, 0(%r26)
++20: std %r24, 0(%r26)
+ copy %r0, %r28
+ #else
+ /* Compare first word */
+@@ -851,7 +851,7 @@ cas2_action:
+ cas2_end:
+ /* Free lock */
+ sync
+- stw,ma %r20, 0(%sr2,%r20)
++ stw %r20, 0(%sr2,%r20)
+ /* Enable interrupts */
+ ssm PSW_SM_I, %r0
+ /* Return to userspace, set no error */
+diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
+index 11cd151733d4..45f1ea117128 100644
+--- a/arch/s390/net/bpf_jit_comp.c
++++ b/arch/s390/net/bpf_jit_comp.c
+@@ -1403,6 +1403,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
+ goto free_addrs;
+ }
+ if (bpf_jit_prog(&jit, fp)) {
++ bpf_jit_binary_free(header);
+ fp = orig_fp;
+ goto free_addrs;
+ }
+diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
+index 1c2cfa0644aa..97ccf4c3b45b 100644
+--- a/arch/x86/kernel/cpu/microcode/intel.c
++++ b/arch/x86/kernel/cpu/microcode/intel.c
+@@ -190,8 +190,11 @@ static void save_microcode_patch(void *data, unsigned int size)
+ p = memdup_patch(data, size);
+ if (!p)
+ pr_err("Error allocating buffer %p\n", data);
+- else
++ else {
+ list_replace(&iter->plist, &p->plist);
++ kfree(iter->data);
++ kfree(iter);
++ }
+ }
+ }
+
+diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
+index 5b609e28ce3f..48703d430a2f 100644
+--- a/arch/x86/kernel/kvmclock.c
++++ b/arch/x86/kernel/kvmclock.c
+@@ -143,6 +143,7 @@ static unsigned long kvm_get_tsc_khz(void)
+ src = &hv_clock[cpu].pvti;
+ tsc_khz = pvclock_tsc_khz(src);
+ put_cpu();
++ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
+ return tsc_khz;
+ }
+
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 5ebb0dbcf4f7..30447d210f37 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -223,6 +223,11 @@ static void notrace start_secondary(void *unused)
+ #ifdef CONFIG_X86_32
+ /* switch away from the initial page table */
+ load_cr3(swapper_pg_dir);
++ /*
++ * Initialize the CR4 shadow before doing anything that could
++ * try to read it.
++ */
++ cr4_init_shadow();
+ __flush_tlb_all();
+ #endif
+ load_current_idt();
+diff --git a/block/sed-opal.c b/block/sed-opal.c
+index 9ed51d0c6b1d..4f5e70d4abc3 100644
+--- a/block/sed-opal.c
++++ b/block/sed-opal.c
+@@ -877,7 +877,7 @@ static size_t response_get_string(const struct parsed_resp *resp, int n,
+ return 0;
+ }
+
+- if (n > resp->num) {
++ if (n >= resp->num) {
+ pr_debug("Response has %d tokens. Can't access %d\n",
+ resp->num, n);
+ return 0;
+@@ -899,7 +899,7 @@ static u64 response_get_u64(const struct parsed_resp *resp, int n)
+ return 0;
+ }
+
+- if (n > resp->num) {
++ if (n >= resp->num) {
+ pr_debug("Response has %d tokens. Can't access %d\n",
+ resp->num, n);
+ return 0;
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 58bc28aff3aa..3d624c72c6c2 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -2029,6 +2029,17 @@ static inline void acpi_ec_query_exit(void)
+ }
+ }
+
++static const struct dmi_system_id acpi_ec_no_wakeup[] = {
++ {
++ .ident = "Thinkpad X1 Carbon 6th",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
++ DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
++ },
++ },
++ { },
++};
++
+ int __init acpi_ec_init(void)
+ {
+ int result;
+@@ -2039,6 +2050,15 @@ int __init acpi_ec_init(void)
+ if (result)
+ return result;
+
++ /*
++ * Disable EC wakeup on following systems to prevent periodic
++ * wakeup from EC GPE.
++ */
++ if (dmi_check_system(acpi_ec_no_wakeup)) {
++ ec_no_wakeup = true;
++ pr_debug("Disabling EC wakeup on suspend-to-idle\n");
++ }
++
+ /* Drivers must be started after acpi_ec_query_init() */
+ dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
+ /*
+diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
+index d56822f58ab1..8260b90eb64b 100644
+--- a/drivers/acpi/nfit/core.c
++++ b/drivers/acpi/nfit/core.c
+@@ -224,6 +224,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
+ const guid_t *guid;
+ int rc, i;
+
++ if (cmd_rc)
++ *cmd_rc = -EINVAL;
+ func = cmd;
+ if (cmd == ND_CMD_CALL) {
+ call_pkg = buf;
+@@ -314,6 +316,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
+ * If we return an error (like elsewhere) then caller wouldn't
+ * be able to rely upon data returned to make calculation.
+ */
++ if (cmd_rc)
++ *cmd_rc = 0;
+ return 0;
+ }
+
+diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
+index 5ae268b8514e..bc562fd2b0a0 100644
+--- a/drivers/ata/libahci.c
++++ b/drivers/ata/libahci.c
+@@ -35,6 +35,7 @@
+ #include <linux/kernel.h>
+ #include <linux/gfp.h>
+ #include <linux/module.h>
++#include <linux/nospec.h>
+ #include <linux/blkdev.h>
+ #include <linux/delay.h>
+ #include <linux/interrupt.h>
+@@ -1135,10 +1136,12 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
+
+ /* get the slot number from the message */
+ pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+- if (pmp < EM_MAX_SLOTS)
++ if (pmp < EM_MAX_SLOTS) {
++ pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
+ emp = &pp->em_priv[pmp];
+- else
++ } else {
+ return -EINVAL;
++ }
+
+ /* mask off the activity bits if we are in sw_activity
+ * mode, user should turn off sw_activity before setting
+diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
+index de8566e55334..c72071c300bb 100644
+--- a/drivers/block/drbd/drbd_req.c
++++ b/drivers/block/drbd/drbd_req.c
+@@ -1244,8 +1244,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
+ _drbd_start_io_acct(device, req);
+
+ /* process discards always from our submitter thread */
+- if ((bio_op(bio) & REQ_OP_WRITE_ZEROES) ||
+- (bio_op(bio) & REQ_OP_DISCARD))
++ if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
++ bio_op(bio) == REQ_OP_DISCARD)
+ goto queue_for_submitter_thread;
+
+ if (rw == WRITE && req->private_bio && req->i.size
+diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
+index 6fb64e73bc96..5feba04ab940 100644
+--- a/drivers/block/nbd.c
++++ b/drivers/block/nbd.c
+@@ -76,6 +76,7 @@ struct link_dead_args {
+ #define NBD_HAS_CONFIG_REF 4
+ #define NBD_BOUND 5
+ #define NBD_DESTROY_ON_DISCONNECT 6
++#define NBD_DISCONNECT_ON_CLOSE 7
+
+ struct nbd_config {
+ u32 flags;
+@@ -138,6 +139,7 @@ static void nbd_config_put(struct nbd_device *nbd);
+ static void nbd_connect_reply(struct genl_info *info, int index);
+ static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
+ static void nbd_dead_link_work(struct work_struct *work);
++static void nbd_disconnect_and_put(struct nbd_device *nbd);
+
+ static inline struct device *nbd_to_dev(struct nbd_device *nbd)
+ {
+@@ -1291,6 +1293,12 @@ out:
+ static void nbd_release(struct gendisk *disk, fmode_t mode)
+ {
+ struct nbd_device *nbd = disk->private_data;
++ struct block_device *bdev = bdget_disk(disk, 0);
++
++ if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
++ bdev->bd_openers == 0)
++ nbd_disconnect_and_put(nbd);
++
+ nbd_config_put(nbd);
+ nbd_put(nbd);
+ }
+@@ -1690,6 +1698,10 @@ again:
+ &config->runtime_flags);
+ put_dev = true;
+ }
++ if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
++ set_bit(NBD_DISCONNECT_ON_CLOSE,
++ &config->runtime_flags);
++ }
+ }
+
+ if (info->attrs[NBD_ATTR_SOCKETS]) {
+@@ -1734,6 +1746,16 @@ out:
+ return ret;
+ }
+
++static void nbd_disconnect_and_put(struct nbd_device *nbd)
++{
++ mutex_lock(&nbd->config_lock);
++ nbd_disconnect(nbd);
++ mutex_unlock(&nbd->config_lock);
++ if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
++ &nbd->config->runtime_flags))
++ nbd_config_put(nbd);
++}
++
+ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
+ {
+ struct nbd_device *nbd;
+@@ -1766,12 +1788,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
+ nbd_put(nbd);
+ return 0;
+ }
+- mutex_lock(&nbd->config_lock);
+- nbd_disconnect(nbd);
+- mutex_unlock(&nbd->config_lock);
+- if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
+- &nbd->config->runtime_flags))
+- nbd_config_put(nbd);
++ nbd_disconnect_and_put(nbd);
+ nbd_config_put(nbd);
+ nbd_put(nbd);
+ return 0;
+@@ -1782,7 +1799,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
+ struct nbd_device *nbd = NULL;
+ struct nbd_config *config;
+ int index;
+- int ret = -EINVAL;
++ int ret = 0;
+ bool put_dev = false;
+
+ if (!netlink_capable(skb, CAP_SYS_ADMIN))
+@@ -1822,6 +1839,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
+ !nbd->task_recv) {
+ dev_err(nbd_to_dev(nbd),
+ "not configured, cannot reconfigure\n");
++ ret = -EINVAL;
+ goto out;
+ }
+
+@@ -1846,6 +1864,14 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
+ &config->runtime_flags))
+ refcount_inc(&nbd->refs);
+ }
++
++ if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
++ set_bit(NBD_DISCONNECT_ON_CLOSE,
++ &config->runtime_flags);
++ } else {
++ clear_bit(NBD_DISCONNECT_ON_CLOSE,
++ &config->runtime_flags);
++ }
+ }
+
+ if (info->attrs[NBD_ATTR_SOCKETS]) {
+diff --git a/drivers/dax/device.c b/drivers/dax/device.c
+index 7b0bf825c4e7..050e299129ac 100644
+--- a/drivers/dax/device.c
++++ b/drivers/dax/device.c
+@@ -188,14 +188,16 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
+
+ /* prevent private mappings from being established */
+ if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
+- dev_info(dev, "%s: %s: fail, attempted private mapping\n",
++ dev_info_ratelimited(dev,
++ "%s: %s: fail, attempted private mapping\n",
+ current->comm, func);
+ return -EINVAL;
+ }
+
+ mask = dax_region->align - 1;
+ if (vma->vm_start & mask || vma->vm_end & mask) {
+- dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
++ dev_info_ratelimited(dev,
++ "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
+ current->comm, func, vma->vm_start, vma->vm_end,
+ mask);
+ return -EINVAL;
+@@ -203,13 +205,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
+
+ if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
+ && (vma->vm_flags & VM_DONTCOPY) == 0) {
+- dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
++ dev_info_ratelimited(dev,
++ "%s: %s: fail, dax range requires MADV_DONTFORK\n",
+ current->comm, func);
+ return -EINVAL;
+ }
+
+ if (!vma_is_dax(vma)) {
+- dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
++ dev_info_ratelimited(dev,
++ "%s: %s: fail, vma is not DAX capable\n",
+ current->comm, func);
+ return -EINVAL;
+ }
+diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
+index 01d2a750a621..219ae3b545db 100644
+--- a/drivers/dma/k3dma.c
++++ b/drivers/dma/k3dma.c
+@@ -787,7 +787,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
+ struct k3_dma_dev *d = ofdma->of_dma_data;
+ unsigned int request = dma_spec->args[0];
+
+- if (request > d->dma_requests)
++ if (request >= d->dma_requests)
+ return NULL;
+
+ return dma_get_slave_channel(&(d->chans[request].vc.chan));
+diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
+index 7432c8894e32..d19862f4dc9a 100644
+--- a/drivers/dma/pl330.c
++++ b/drivers/dma/pl330.c
+@@ -2923,7 +2923,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
+ pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
+ pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
+ pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+- pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
++ pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
+ 1 : PL330_MAX_BURST);
+
+diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
+index 480072139b7a..80801c616395 100644
+--- a/drivers/edac/edac_mc.c
++++ b/drivers/edac/edac_mc.c
+@@ -215,6 +215,7 @@ const char * const edac_mem_types[] = {
+ [MEM_LRDDR3] = "Load-Reduced DDR3 RAM",
+ [MEM_DDR4] = "Unbuffered DDR4 RAM",
+ [MEM_RDDR4] = "Registered DDR4 RAM",
++ [MEM_LRDDR4] = "Load-Reduced-DDR4-RAM",
+ };
+ EXPORT_SYMBOL_GPL(edac_mem_types);
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+index 5183b46563f6..242dfb1433d2 100644
+--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+@@ -899,7 +899,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
+ .emit_frame_size =
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
+- .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
++ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
+ .emit_ib = amdgpu_vce_ring_emit_ib,
+ .emit_fence = amdgpu_vce_ring_emit_fence,
+ .test_ring = amdgpu_vce_ring_test_ring,
+@@ -923,7 +923,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
+ 6 + /* vce_v3_0_emit_vm_flush */
+ 4 + /* vce_v3_0_emit_pipeline_sync */
+ 6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
+- .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
++ .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
+ .emit_ib = vce_v3_0_ring_emit_ib,
+ .emit_vm_flush = vce_v3_0_emit_vm_flush,
+ .emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
+diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
+index 17bca99e8ac8..7e2c341dfe5f 100644
+--- a/drivers/gpu/drm/arm/malidp_hw.c
++++ b/drivers/gpu/drm/arm/malidp_hw.c
+@@ -634,7 +634,8 @@ const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
+ .vsync_irq = MALIDP500_DE_IRQ_VSYNC,
+ },
+ .se_irq_map = {
+- .irq_mask = MALIDP500_SE_IRQ_CONF_MODE,
++ .irq_mask = MALIDP500_SE_IRQ_CONF_MODE |
++ MALIDP500_SE_IRQ_GLOBAL,
+ .vsync_irq = 0,
+ },
+ .dc_irq_map = {
+diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
+index 94e7e3fa3408..16b8b310ae5c 100644
+--- a/drivers/gpu/drm/arm/malidp_planes.c
++++ b/drivers/gpu/drm/arm/malidp_planes.c
+@@ -23,6 +23,7 @@
+
+ /* Layer specific register offsets */
+ #define MALIDP_LAYER_FORMAT 0x000
++#define LAYER_FORMAT_MASK 0x3f
+ #define MALIDP_LAYER_CONTROL 0x004
+ #define LAYER_ENABLE (1 << 0)
+ #define LAYER_FLOWCFG_MASK 7
+@@ -278,7 +279,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
+ dest_w = plane->state->crtc_w;
+ dest_h = plane->state->crtc_h;
+
+- malidp_hw_write(mp->hwdev, ms->format, mp->layer->base);
++ val = malidp_hw_read(mp->hwdev, mp->layer->base);
++ val = (val & ~LAYER_FORMAT_MASK) | ms->format;
++ malidp_hw_write(mp->hwdev, val, mp->layer->base);
+
+ for (i = 0; i < ms->n_planes; i++) {
+ /* calculate the offset for the layer's plane registers */
+diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
+index 79ce877bf45f..3039936f8f3f 100644
+--- a/drivers/gpu/drm/armada/armada_crtc.c
++++ b/drivers/gpu/drm/armada/armada_crtc.c
+@@ -483,8 +483,9 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
+ u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
+
+ /*
+- * This is rediculous - rather than writing bits to clear, we
+- * have to set the actual status register value. This is racy.
++ * Reading the ISR appears to clear bits provided CLEAN_SPU_IRQ_ISR
++ * is set. Writing has some other effect to acknowledge the IRQ -
++ * without this, we only get a single IRQ.
+ */
+ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+
+@@ -1104,16 +1105,22 @@ armada_drm_crtc_set_property(struct drm_crtc *crtc,
+ static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc)
+ {
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
++ unsigned long flags;
+
++ spin_lock_irqsave(&dcrtc->irq_lock, flags);
+ armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA);
++ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+ return 0;
+ }
+
+ static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc)
+ {
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
++ unsigned long flags;
+
++ spin_lock_irqsave(&dcrtc->irq_lock, flags);
+ armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA);
++ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+ }
+
+ static const struct drm_crtc_funcs armada_crtc_funcs = {
+@@ -1221,6 +1228,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
+ CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
+ writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
+ writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
++ readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
+ writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
+
+ ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
+diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
+index 27319a8335e2..345dc4d0851e 100644
+--- a/drivers/gpu/drm/armada/armada_hw.h
++++ b/drivers/gpu/drm/armada/armada_hw.h
+@@ -160,6 +160,7 @@ enum {
+ CFG_ALPHAM_GRA = 0x1 << 16,
+ CFG_ALPHAM_CFG = 0x2 << 16,
+ CFG_ALPHA_MASK = 0xff << 8,
++#define CFG_ALPHA(x) ((x) << 8)
+ CFG_PIXCMD_MASK = 0xff,
+ };
+
+diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
+index edc44910d79f..2076346b09ee 100644
+--- a/drivers/gpu/drm/armada/armada_overlay.c
++++ b/drivers/gpu/drm/armada/armada_overlay.c
+@@ -28,6 +28,7 @@ struct armada_ovl_plane_properties {
+ uint16_t contrast;
+ uint16_t saturation;
+ uint32_t colorkey_mode;
++ uint32_t colorkey_enable;
+ };
+
+ struct armada_ovl_plane {
+@@ -59,11 +60,13 @@ armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
+ writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
+
+ spin_lock_irq(&dcrtc->irq_lock);
+- armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
+- CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+- dcrtc->base + LCD_SPU_DMA_CTRL1);
+-
+- armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
++ armada_updatel(prop->colorkey_mode,
++ CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
++ dcrtc->base + LCD_SPU_DMA_CTRL1);
++ if (dcrtc->variant->has_spu_adv_reg)
++ armada_updatel(prop->colorkey_enable,
++ ADV_GRACOLORKEY | ADV_VIDCOLORKEY,
++ dcrtc->base + LCD_SPU_ADV_REG);
+ spin_unlock_irq(&dcrtc->irq_lock);
+ }
+
+@@ -339,8 +342,17 @@ static int armada_ovl_plane_set_property(struct drm_plane *plane,
+ dplane->prop.colorkey_vb |= K2B(val);
+ update_attr = true;
+ } else if (property == priv->colorkey_mode_prop) {
+- dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
+- dplane->prop.colorkey_mode |= CFG_CKMODE(val);
++ if (val == CKMODE_DISABLE) {
++ dplane->prop.colorkey_mode =
++ CFG_CKMODE(CKMODE_DISABLE) |
++ CFG_ALPHAM_CFG | CFG_ALPHA(255);
++ dplane->prop.colorkey_enable = 0;
++ } else {
++ dplane->prop.colorkey_mode =
++ CFG_CKMODE(val) |
++ CFG_ALPHAM_GRA | CFG_ALPHA(0);
++ dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
++ }
+ update_attr = true;
+ } else if (property == priv->brightness_prop) {
+ dplane->prop.brightness = val - 256;
+@@ -470,7 +482,9 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
+ dplane->prop.colorkey_yr = 0xfefefe00;
+ dplane->prop.colorkey_ug = 0x01010100;
+ dplane->prop.colorkey_vb = 0x01010100;
+- dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
++ dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
++ CFG_ALPHAM_GRA | CFG_ALPHA(0);
++ dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
+ dplane->prop.brightness = 0;
+ dplane->prop.contrast = 0x4000;
+ dplane->prop.saturation = 0x4000;
+diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
+index 5131bfb94f06..0cb69ee94ac1 100644
+--- a/drivers/gpu/drm/bridge/sil-sii8620.c
++++ b/drivers/gpu/drm/bridge/sil-sii8620.c
+@@ -788,6 +788,7 @@ static void sii8620_burst_rx_all(struct sii8620 *ctx)
+ static void sii8620_fetch_edid(struct sii8620 *ctx)
+ {
+ u8 lm_ddc, ddc_cmd, int3, cbus;
++ unsigned long timeout;
+ int fetched, i;
+ int edid_len = EDID_LENGTH;
+ u8 *edid;
+@@ -837,23 +838,31 @@ static void sii8620_fetch_edid(struct sii8620 *ctx)
+ REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
+ );
+
+- do {
+- int3 = sii8620_readb(ctx, REG_INTR3);
++ int3 = 0;
++ timeout = jiffies + msecs_to_jiffies(200);
++ for (;;) {
+ cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
+-
+- if (int3 & BIT_DDC_CMD_DONE)
+- break;
+-
+- if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) {
++ if (~cbus & BIT_CBUS_STATUS_CBUS_CONNECTED) {
++ kfree(edid);
++ edid = NULL;
++ goto end;
++ }
++ if (int3 & BIT_DDC_CMD_DONE) {
++ if (sii8620_readb(ctx, REG_DDC_DOUT_CNT)
++ >= FETCH_SIZE)
++ break;
++ } else {
++ int3 = sii8620_readb(ctx, REG_INTR3);
++ }
++ if (time_is_before_jiffies(timeout)) {
++ ctx->error = -ETIMEDOUT;
++ dev_err(ctx->dev, "timeout during EDID read\n");
+ kfree(edid);
+ edid = NULL;
+ goto end;
+ }
+- } while (1);
+-
+- sii8620_readb(ctx, REG_DDC_STATUS);
+- while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
+ usleep_range(10, 20);
++ }
+
+ sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
+ if (fetched + FETCH_SIZE == EDID_LENGTH) {
+@@ -1036,23 +1045,23 @@ static void sii8620_set_format(struct sii8620 *ctx)
+ BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
+ ctx->use_packed_pixel ? ~0 : 0);
+ } else {
+- if (ctx->use_packed_pixel)
++ if (ctx->use_packed_pixel) {
+ sii8620_write_seq_static(ctx,
+ REG_VID_MODE, BIT_VID_MODE_M1080P,
+ REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
+ REG_MHLTX_CTL6, 0x60
+ );
+- else
++ } else {
+ sii8620_write_seq_static(ctx,
+ REG_VID_MODE, 0,
+ REG_MHL_TOP_CTL, 1,
+ REG_MHLTX_CTL6, 0xa0
+ );
++ }
+ }
+
+ if (ctx->use_packed_pixel)
+- out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) |
+- BIT_TPI_OUTPUT_CSCMODE709;
++ out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL);
+ else
+ out_fmt = VAL_TPI_FORMAT(RGB, FULL);
+
+@@ -1187,7 +1196,7 @@ static void sii8620_start_hdmi(struct sii8620 *ctx)
+ int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3);
+ int i;
+
+- for (i = 0; i < ARRAY_SIZE(clk_spec); ++i)
++ for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i)
+ if (clk < clk_spec[i].max_clk)
+ break;
+
+diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+index 6be5b53c3b27..f905c214fdd0 100644
+--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
++++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+@@ -261,7 +261,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
+ unsigned long val;
+
+ val = readl(ctx->addr + DECON_WINCONx(win));
+- val &= ~WINCONx_BPPMODE_MASK;
++ val &= WINCONx_ENWIN_F;
+
+ switch (fb->format->format) {
+ case DRM_FORMAT_XRGB1555:
+@@ -352,8 +352,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
+ writel(val, ctx->addr + DECON_VIDOSDxB(win));
+ }
+
+- val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
+- VIDOSD_Wx_ALPHA_B_F(0x0);
++ val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) |
++ VIDOSD_Wx_ALPHA_B_F(0xff);
+ writel(val, ctx->addr + DECON_VIDOSDxC(win));
+
+ val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
+diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+index 0506b2b17ac1..48f913d8208c 100644
+--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
++++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+@@ -532,21 +532,25 @@ static int gsc_src_set_fmt(struct device *dev, u32 fmt)
+ GSC_IN_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_NV21:
++ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P);
++ break;
+ case DRM_FORMAT_NV61:
+- cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
+- GSC_IN_YUV420_2P);
++ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P);
+ break;
+ case DRM_FORMAT_YUV422:
+ cfg |= GSC_IN_YUV422_3P;
+ break;
+ case DRM_FORMAT_YUV420:
++ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P);
++ break;
+ case DRM_FORMAT_YVU420:
+- cfg |= GSC_IN_YUV420_3P;
++ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P);
+ break;
+ case DRM_FORMAT_NV12:
++ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P);
++ break;
+ case DRM_FORMAT_NV16:
+- cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
+- GSC_IN_YUV420_2P);
++ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P);
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
+@@ -806,18 +810,25 @@ static int gsc_dst_set_fmt(struct device *dev, u32 fmt)
+ GSC_OUT_CHROMA_ORDER_CRCB);
+ break;
+ case DRM_FORMAT_NV21:
+- case DRM_FORMAT_NV61:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
+ break;
++ case DRM_FORMAT_NV61:
++ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P);
++ break;
+ case DRM_FORMAT_YUV422:
++ cfg |= GSC_OUT_YUV422_3P;
++ break;
+ case DRM_FORMAT_YUV420:
++ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P);
++ break;
+ case DRM_FORMAT_YVU420:
+- cfg |= GSC_OUT_YUV420_3P;
++ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P);
+ break;
+ case DRM_FORMAT_NV12:
++ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P);
++ break;
+ case DRM_FORMAT_NV16:
+- cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
+- GSC_OUT_YUV420_2P);
++ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P);
+ break;
+ default:
+ dev_err(ippdrv->dev, "invalid target yuv order 0x%x.\n", fmt);
+diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
+index 4704a993cbb7..16b39734115c 100644
+--- a/drivers/gpu/drm/exynos/regs-gsc.h
++++ b/drivers/gpu/drm/exynos/regs-gsc.h
+@@ -138,6 +138,7 @@
+ #define GSC_OUT_YUV420_3P (3 << 4)
+ #define GSC_OUT_YUV422_1P (4 << 4)
+ #define GSC_OUT_YUV422_2P (5 << 4)
++#define GSC_OUT_YUV422_3P (6 << 4)
+ #define GSC_OUT_YUV444 (7 << 4)
+ #define GSC_OUT_TILE_TYPE_MASK (1 << 2)
+ #define GSC_OUT_TILE_C_16x8 (0 << 2)
+diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
+index 9bf4045cd679..73c672fc17c4 100644
+--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
++++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
+@@ -42,6 +42,8 @@
+ #include <linux/vfio.h>
+ #include <linux/mdev.h>
+
++#include <linux/nospec.h>
++
+ #include "i915_drv.h"
+ #include "gvt.h"
+
+@@ -953,7 +955,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
+ } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
+ struct vfio_region_info info;
+ struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
+- int i, ret;
++ unsigned int i;
++ int ret;
+ struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
+ size_t size;
+ int nr_areas = 1;
+@@ -1030,6 +1033,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
+ if (info.index >= VFIO_PCI_NUM_REGIONS +
+ vgpu->vdev.num_regions)
+ return -EINVAL;
++ info.index =
++ array_index_nospec(info.index,
++ VFIO_PCI_NUM_REGIONS +
++ vgpu->vdev.num_regions);
+
+ i = info.index - VFIO_PCI_NUM_REGIONS;
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index 2170534101ca..60ffb70bb908 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -599,7 +599,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
+ struct nouveau_bo *nvbo;
+ uint32_t data;
+
+- if (unlikely(r->bo_index > req->nr_buffers)) {
++ if (unlikely(r->bo_index >= req->nr_buffers)) {
+ NV_PRINTK(err, cli, "reloc bo index invalid\n");
+ ret = -EINVAL;
+ break;
+@@ -609,7 +609,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
+ if (b->presumed.valid)
+ continue;
+
+- if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
++ if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
+ NV_PRINTK(err, cli, "reloc container bo index invalid\n");
+ ret = -EINVAL;
+ break;
+diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
+index 0598b4c18c25..75b1c8c03ce9 100644
+--- a/drivers/gpu/drm/tegra/drm.c
++++ b/drivers/gpu/drm/tegra/drm.c
+@@ -470,7 +470,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
+ * unaligned offset is malformed and cause commands stream
+ * corruption on the buffer address relocation.
+ */
+- if (offset & 3 || offset >= obj->gem.size) {
++ if (offset & 3 || offset > obj->gem.size) {
+ err = -EINVAL;
+ goto fail;
+ }
+diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
+index db509ab8874e..acd99783bbca 100644
+--- a/drivers/gpu/host1x/job.c
++++ b/drivers/gpu/host1x/job.c
+@@ -686,7 +686,8 @@ void host1x_job_unpin(struct host1x_job *job)
+ for (i = 0; i < job->num_unpins; i++) {
+ struct host1x_job_unpin_data *unpin = &job->unpins[i];
+
+- if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
++ if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
++ unpin->size && host->domain) {
+ iommu_unmap(host->domain, job->addr_phys[i],
+ unpin->size);
+ free_iova(&host->iova,
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index c401b5b63f4c..4c72e68637c2 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -3212,8 +3212,14 @@ void wacom_setup_device_quirks(struct wacom *wacom)
+ if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+
+- features->x_max = 4096;
+- features->y_max = 4096;
++ if (features->type == INTUOSHT2) {
++ features->x_max = features->x_max / 10;
++ features->y_max = features->y_max / 10;
++ }
++ else {
++ features->x_max = 4096;
++ features->y_max = 4096;
++ }
+ }
+ else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
+ features->device_type |= WACOM_DEVICETYPE_PAD;
+diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
+index f5f3f8cf57ea..5f87764d7015 100644
+--- a/drivers/hwmon/nct6775.c
++++ b/drivers/hwmon/nct6775.c
+@@ -4107,7 +4107,7 @@ static int nct6775_probe(struct platform_device *pdev)
+ * The temperature is already monitored if the respective bit in <mask>
+ * is set.
+ */
+- for (i = 0; i < 32; i++) {
++ for (i = 0; i < 31; i++) {
+ if (!(data->temp_mask & BIT(i + 1)))
+ continue;
+ if (!reg_temp_alternate[i])
+diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
+index 75c6b98585ba..b73dd837fb53 100644
+--- a/drivers/i2c/busses/i2c-imx.c
++++ b/drivers/i2c/busses/i2c-imx.c
+@@ -665,9 +665,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
+ struct imx_i2c_dma *dma = i2c_imx->dma;
+ struct device *dev = &i2c_imx->adapter.dev;
+
+- temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+- temp |= I2CR_DMAEN;
+- imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+
+ dma->chan_using = dma->chan_rx;
+ dma->dma_transfer_dir = DMA_DEV_TO_MEM;
+@@ -780,6 +777,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
+ int i, result;
+ unsigned int temp;
+ int block_data = msgs->flags & I2C_M_RECV_LEN;
++ int use_dma = i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data;
+
+ dev_dbg(&i2c_imx->adapter.dev,
+ "<%s> write slave address: addr=0x%x\n",
+@@ -806,12 +804,14 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
+ */
+ if ((msgs->len - 1) || block_data)
+ temp &= ~I2CR_TXAK;
++ if (use_dma)
++ temp |= I2CR_DMAEN;
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+ imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */
+
+ dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__);
+
+- if (i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data)
++ if (use_dma)
+ return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg);
+
+ /* read data */
+diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
+index a9126b3cda61..847d9bf6744c 100644
+--- a/drivers/i2c/i2c-core-acpi.c
++++ b/drivers/i2c/i2c-core-acpi.c
+@@ -475,11 +475,16 @@ static int acpi_gsb_i2c_write_bytes(struct i2c_client *client,
+ msgs[0].buf = buffer;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+- if (ret < 0)
+- dev_err(&client->adapter->dev, "i2c write failed\n");
+
+ kfree(buffer);
+- return ret;
++
++ if (ret < 0) {
++ dev_err(&client->adapter->dev, "i2c write failed: %d\n", ret);
++ return ret;
++ }
++
++ /* 1 transfer must have completed successfully */
++ return (ret == 1) ? 0 : -EIO;
+ }
+
+ static acpi_status
+diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
+index 8f26428804a2..5f625ffa2a88 100644
+--- a/drivers/iio/pressure/bmp280-core.c
++++ b/drivers/iio/pressure/bmp280-core.c
+@@ -362,10 +362,9 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
+ }
+ comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
+
+- *val = comp_humidity;
+- *val2 = 1024;
++ *val = comp_humidity * 1000 / 1024;
+
+- return IIO_VAL_FRACTIONAL;
++ return IIO_VAL_INT;
+ }
+
+ static int bmp280_read_raw(struct iio_dev *indio_dev,
+diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
+index 1587cedee13e..761d3ce6a63a 100644
+--- a/drivers/infiniband/hw/mlx4/mr.c
++++ b/drivers/infiniband/hw/mlx4/mr.c
+@@ -247,8 +247,11 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
+ }
+
+ if (flags & IB_MR_REREG_ACCESS) {
+- if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
+- return -EPERM;
++ if (ib_access_writable(mr_access_flags) &&
++ !mmr->umem->writable) {
++ err = -EPERM;
++ goto release_mpt_entry;
++ }
+
+ err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
+ convert_access(mr_access_flags));
+diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
+index 3c7522d025f2..93d67d97c279 100644
+--- a/drivers/infiniband/hw/mlx5/srq.c
++++ b/drivers/infiniband/hw/mlx5/srq.c
+@@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
+
+ desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
+ srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
+- if (desc_size == 0 || srq->msrq.max_gs > desc_size)
+- return ERR_PTR(-EINVAL);
++ if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
++ err = -EINVAL;
++ goto err_srq;
++ }
+ desc_size = roundup_pow_of_two(desc_size);
+ desc_size = max_t(size_t, 32, desc_size);
+- if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
+- return ERR_PTR(-EINVAL);
++ if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
++ err = -EINVAL;
++ goto err_srq;
++ }
+ srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
+ sizeof(struct mlx5_wqe_data_seg);
+ srq->msrq.wqe_shift = ilog2(desc_size);
+ buf_size = srq->msrq.max * desc_size;
+- if (buf_size < desc_size)
+- return ERR_PTR(-EINVAL);
++ if (buf_size < desc_size) {
++ err = -EINVAL;
++ goto err_srq;
++ }
+ in.type = init_attr->srq_type;
+
+ if (pd->uobject)
+diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
+index 54cc9cb1e3b7..de853bcc2384 100644
+--- a/drivers/infiniband/sw/rxe/rxe_req.c
++++ b/drivers/infiniband/sw/rxe/rxe_req.c
+@@ -645,6 +645,9 @@ next_wqe:
+ } else {
+ goto exit;
+ }
++ if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
++ qp->sq_sig_type == IB_SIGNAL_ALL_WR)
++ rxe_run_task(&qp->comp.task, 1);
+ qp->req.wqe_index = next_index(qp->sq.queue,
+ qp->req.wqe_index);
+ goto next_wqe;
+diff --git a/drivers/input/rmi4/rmi_2d_sensor.c b/drivers/input/rmi4/rmi_2d_sensor.c
+index 8bb866c7b985..8eeffa066022 100644
+--- a/drivers/input/rmi4/rmi_2d_sensor.c
++++ b/drivers/input/rmi4/rmi_2d_sensor.c
+@@ -32,15 +32,15 @@ void rmi_2d_sensor_abs_process(struct rmi_2d_sensor *sensor,
+ if (obj->type == RMI_2D_OBJECT_NONE)
+ return;
+
+- if (axis_align->swap_axes)
+- swap(obj->x, obj->y);
+-
+ if (axis_align->flip_x)
+ obj->x = sensor->max_x - obj->x;
+
+ if (axis_align->flip_y)
+ obj->y = sensor->max_y - obj->y;
+
++ if (axis_align->swap_axes)
++ swap(obj->x, obj->y);
++
+ /*
+ * Here checking if X offset or y offset are specified is
+ * redundant. We just add the offsets or clip the values.
+@@ -120,15 +120,15 @@ void rmi_2d_sensor_rel_report(struct rmi_2d_sensor *sensor, int x, int y)
+ x = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)x));
+ y = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)y));
+
+- if (axis_align->swap_axes)
+- swap(x, y);
+-
+ if (axis_align->flip_x)
+ x = min(RMI_2D_REL_POS_MAX, -x);
+
+ if (axis_align->flip_y)
+ y = min(RMI_2D_REL_POS_MAX, -y);
+
++ if (axis_align->swap_axes)
++ swap(x, y);
++
+ if (x || y) {
+ input_report_rel(sensor->input, REL_X, x);
+ input_report_rel(sensor->input, REL_Y, y);
+@@ -141,17 +141,10 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
+ struct input_dev *input = sensor->input;
+ int res_x;
+ int res_y;
++ int max_x, max_y;
+ int input_flags = 0;
+
+ if (sensor->report_abs) {
+- if (sensor->axis_align.swap_axes) {
+- swap(sensor->max_x, sensor->max_y);
+- swap(sensor->axis_align.clip_x_low,
+- sensor->axis_align.clip_y_low);
+- swap(sensor->axis_align.clip_x_high,
+- sensor->axis_align.clip_y_high);
+- }
+-
+ sensor->min_x = sensor->axis_align.clip_x_low;
+ if (sensor->axis_align.clip_x_high)
+ sensor->max_x = min(sensor->max_x,
+@@ -163,14 +156,19 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
+ sensor->axis_align.clip_y_high);
+
+ set_bit(EV_ABS, input->evbit);
+- input_set_abs_params(input, ABS_MT_POSITION_X, 0, sensor->max_x,
+- 0, 0);
+- input_set_abs_params(input, ABS_MT_POSITION_Y, 0, sensor->max_y,
+- 0, 0);
++
++ max_x = sensor->max_x;
++ max_y = sensor->max_y;
++ if (sensor->axis_align.swap_axes)
++ swap(max_x, max_y);
++ input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
++ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
+
+ if (sensor->x_mm && sensor->y_mm) {
+ res_x = (sensor->max_x - sensor->min_x) / sensor->x_mm;
+ res_y = (sensor->max_y - sensor->min_y) / sensor->y_mm;
++ if (sensor->axis_align.swap_axes)
++ swap(res_x, res_y);
+
+ input_abs_set_res(input, ABS_X, res_x);
+ input_abs_set_res(input, ABS_Y, res_y);
+diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
+index b20c23f970f4..262a0f0f8fd5 100644
+--- a/drivers/md/raid10.c
++++ b/drivers/md/raid10.c
+@@ -3754,6 +3754,13 @@ static int raid10_run(struct mddev *mddev)
+ disk->rdev->saved_raid_disk < 0)
+ conf->fullsync = 1;
+ }
++
++ if (disk->replacement &&
++ !test_bit(In_sync, &disk->replacement->flags) &&
++ disk->replacement->saved_raid_disk < 0) {
++ conf->fullsync = 1;
++ }
++
+ disk->recovery_disabled = mddev->recovery_disabled - 1;
+ }
+
+diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
+index 5dc8bd042cc5..504e34f29518 100644
+--- a/drivers/mtd/devices/mtd_dataflash.c
++++ b/drivers/mtd/devices/mtd_dataflash.c
+@@ -737,8 +737,8 @@ static struct flash_info dataflash_data[] = {
+ { "AT45DB642x", 0x1f2800, 8192, 1056, 11, SUP_POW2PS},
+ { "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
+
+- { "AT45DB641E", 0x1f28000100, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
+- { "at45db641e", 0x1f28000100, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
++ { "AT45DB641E", 0x1f28000100ULL, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
++ { "at45db641e", 0x1f28000100ULL, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
+ };
+
+ static struct flash_info *jedec_lookup(struct spi_device *spi,
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+index 352beff796ae..828e2e56b75e 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+@@ -1529,6 +1529,7 @@ struct bnx2x {
+ struct link_vars link_vars;
+ u32 link_cnt;
+ struct bnx2x_link_report_data last_reported_link;
++ bool force_link_down;
+
+ struct mdio_if_info mdio;
+
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 6465414dad74..8498a357d389 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -1260,6 +1260,11 @@ void __bnx2x_link_report(struct bnx2x *bp)
+ {
+ struct bnx2x_link_report_data cur_data;
+
++ if (bp->force_link_down) {
++ bp->link_vars.link_up = 0;
++ return;
++ }
++
+ /* reread mf_cfg */
+ if (IS_PF(bp) && !CHIP_IS_E1(bp))
+ bnx2x_read_mf_cfg(bp);
+@@ -2817,6 +2822,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
+ bp->pending_max = 0;
+ }
+
++ bp->force_link_down = false;
+ if (bp->port.pmf) {
+ rc = bnx2x_initial_phy_init(bp, load_mode);
+ if (rc)
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+index e855a271db48..bd3e3f080ebf 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+@@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
+ bp->sp_rtnl_state = 0;
+ smp_mb();
+
++ /* Immediately indicate link as down */
++ bp->link_vars.link_up = 0;
++ bp->force_link_down = true;
++ netif_carrier_off(bp->dev);
++ BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
++
+ bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
+ bnx2x_nic_load(bp, LOAD_NORMAL);
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 94931318587c..937db8019289 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -6348,7 +6348,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+ rc = bnxt_request_irq(bp);
+ if (rc) {
+ netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
+- goto open_err;
++ goto open_err_irq;
+ }
+ }
+
+@@ -6386,6 +6386,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
+
+ open_err:
+ bnxt_disable_napi(bp);
++
++open_err_irq:
+ bnxt_del_napi(bp);
+
+ open_err_free_mem:
+@@ -7866,11 +7868,11 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
+ int rx, tx, cp;
+
+ _bnxt_get_max_rings(bp, &rx, &tx, &cp);
++ *max_rx = rx;
++ *max_tx = tx;
+ if (!rx || !tx || !cp)
+ return -ENOMEM;
+
+- *max_rx = rx;
+- *max_tx = tx;
+ return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
+ }
+
+@@ -7884,8 +7886,11 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
+ /* Not enough rings, try disabling agg rings. */
+ bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+ rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
+- if (rc)
++ if (rc) {
++ /* set BNXT_FLAG_AGG_RINGS back for consistency */
++ bp->flags |= BNXT_FLAG_AGG_RINGS;
+ return rc;
++ }
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
+ bp->dev->hw_features &= ~NETIF_F_LRO;
+ bp->dev->features &= ~NETIF_F_LRO;
+diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+index 2887bcaf6af5..45c51277e0cf 100644
+--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
++++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+@@ -643,13 +643,21 @@ static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
+ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
+ {
+ struct octeon_mgmt *p = netdev_priv(netdev);
+- int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
++ int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ netdev->mtu = new_mtu;
+
+- cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
++ /* HW lifts the limit if the frame is VLAN tagged
++ * (+4 bytes per each tag, up to two tags)
++ */
++ cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
++ /* Set the hardware to truncate packets larger than the MTU. The jabber
++ * register must be set to a multiple of 8 bytes, so round up. JABBER is
++ * an unconditional limit, so we need to account for two possible VLAN
++ * tags.
++ */
+ cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
+- (size_without_fcs + 7) & 0xfff8);
++ (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 44a0d04dd8a0..74a42f12064b 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -253,7 +253,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
+ "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
+ enable ? "set" : "unset", pi->port_id, i, -err);
+ else
+- txq->dcb_prio = value;
++ txq->dcb_prio = enable ? value : 0;
+ }
+ }
+
+diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
+index 8dc21c9f9716..df613a87ccff 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
++++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
+@@ -79,7 +79,6 @@ void enic_rfs_flw_tbl_init(struct enic *enic)
+ enic->rfs_h.max = enic->config.num_arfs;
+ enic->rfs_h.free = enic->rfs_h.max;
+ enic->rfs_h.toclean = 0;
+- enic_rfs_timer_start(enic);
+ }
+
+ void enic_rfs_flw_tbl_free(struct enic *enic)
+@@ -88,7 +87,6 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
+
+ enic_rfs_timer_stop(enic);
+ spin_lock_bh(&enic->rfs_h.lock);
+- enic->rfs_h.free = 0;
+ for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
+ struct hlist_head *hhead;
+ struct hlist_node *tmp;
+@@ -99,6 +97,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
+ enic_delfltr(enic, n->fltr_id);
+ hlist_del(&n->node);
+ kfree(n);
++ enic->rfs_h.free++;
+ }
+ }
+ spin_unlock_bh(&enic->rfs_h.lock);
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index a03a32a4ffca..800edfbd36c1 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -1930,7 +1930,7 @@ static int enic_open(struct net_device *netdev)
+ vnic_intr_unmask(&enic->intr[i]);
+
+ enic_notify_timer_start(enic);
+- enic_rfs_flw_tbl_init(enic);
++ enic_rfs_timer_start(enic);
+
+ return 0;
+
+@@ -2854,6 +2854,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ enic->notify_timer.function = enic_notify_timer;
+ enic->notify_timer.data = (unsigned long)enic;
+
++ enic_rfs_flw_tbl_init(enic);
+ enic_set_rx_coal_setting(enic);
+ INIT_WORK(&enic->reset, enic_reset);
+ INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
+diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+index 519a021c0a25..a202c50d6fc7 100644
+--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
++++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
+ /* Default alignment for start of data in an Rx FD */
+ #define DPAA_FD_DATA_ALIGNMENT 16
+
++/* The DPAA requires 256 bytes reserved and mapped for the SGT */
++#define DPAA_SGT_SIZE 256
++
+ /* Values for the L3R field of the FM Parse Results
+ */
+ /* L3 Type field: First IP Present IPv4 */
+@@ -1622,8 +1625,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
+
+ if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
+ nr_frags = skb_shinfo(skb)->nr_frags;
+- dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
+- sizeof(struct qm_sg_entry) * (1 + nr_frags),
++ dma_unmap_single(dev, addr,
++ qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
+ dma_dir);
+
+ /* The sgt buffer has been allocated with netdev_alloc_frag(),
+@@ -1907,8 +1910,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
+ void *sgt_buf;
+
+ /* get a page frag to store the SGTable */
+- sz = SKB_DATA_ALIGN(priv->tx_headroom +
+- sizeof(struct qm_sg_entry) * (1 + nr_frags));
++ sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
+ sgt_buf = netdev_alloc_frag(sz);
+ if (unlikely(!sgt_buf)) {
+ netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
+@@ -1976,9 +1978,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
+ skbh = (struct sk_buff **)buffer_start;
+ *skbh = skb;
+
+- addr = dma_map_single(dev, buffer_start, priv->tx_headroom +
+- sizeof(struct qm_sg_entry) * (1 + nr_frags),
+- dma_dir);
++ addr = dma_map_single(dev, buffer_start,
++ priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
+ if (unlikely(dma_mapping_error(dev, addr))) {
+ dev_err(dev, "DMA mapping failed");
+ err = -EINVAL;
+diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
+index 1789b206be58..495190764155 100644
+--- a/drivers/net/ethernet/freescale/fman/fman_port.c
++++ b/drivers/net/ethernet/freescale/fman/fman_port.c
+@@ -324,6 +324,10 @@ struct fman_port_qmi_regs {
+ #define HWP_HXS_PHE_REPORT 0x00000800
+ #define HWP_HXS_PCAC_PSTAT 0x00000100
+ #define HWP_HXS_PCAC_PSTOP 0x00000001
++#define HWP_HXS_TCP_OFFSET 0xA
++#define HWP_HXS_UDP_OFFSET 0xB
++#define HWP_HXS_SH_PAD_REM 0x80000000
++
+ struct fman_port_hwp_regs {
+ struct {
+ u32 ssa; /* Soft Sequence Attachment */
+@@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port)
+ iowrite32be(0xffffffff, &regs->pmda[i].lcv);
+ }
+
++ /* Short packet padding removal from checksum calculation */
++ iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_TCP_OFFSET].ssa);
++ iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_UDP_OFFSET].ssa);
++
+ start_port_hwp(port);
+ }
+
+diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
+index 98493be7b4af..046af22a37cb 100644
+--- a/drivers/net/ethernet/ibm/ibmvnic.c
++++ b/drivers/net/ethernet/ibm/ibmvnic.c
+@@ -1463,8 +1463,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
+
+ rc = ibmvnic_login(netdev);
+ if (rc) {
+- adapter->state = VNIC_PROBED;
+- return 0;
++ adapter->state = reset_state;
++ return rc;
+ }
+
+ rc = reset_tx_pools(adapter);
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+index 64429a14c630..815284fe9324 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+@@ -1895,7 +1895,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
+ if (enable_addr != 0)
+ rar_high |= IXGBE_RAH_AV;
+
++ /* Record lower 32 bits of MAC address and then make
++ * sure that write is flushed to hardware before writing
++ * the upper 16 bits and setting the valid bit.
++ */
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
++ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+
+ return 0;
+@@ -1927,8 +1932,13 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
+ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
+ rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
+
+- IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
++ /* Clear the address valid bit and upper 16 bits of the address
++ * before clearing the lower bits. This way we aren't updating
++ * a live filter.
++ */
+ IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
++ IXGBE_WRITE_FLUSH(hw);
++ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+
+ /* clear VMDq pool/queue selection for this RAR */
+ hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index f697084937c3..de72b66df3e5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1525,17 +1525,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
+ }
+
+ /* Public E-Switch API */
+-#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev))
++#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
++
+
+ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
+ {
+ int err;
+ int i, enabled_events;
+
+- if (!ESW_ALLOWED(esw))
+- return 0;
+-
+- if (!MLX5_ESWITCH_MANAGER(esw->dev) ||
++ if (!ESW_ALLOWED(esw) ||
+ !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
+ esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
+ return -EOPNOTSUPP;
+@@ -1728,7 +1726,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
+ u64 node_guid;
+ int err = 0;
+
+- if (!ESW_ALLOWED(esw))
++ if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
+ return -EPERM;
+ if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
+ return -EINVAL;
+@@ -1805,7 +1803,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
+ {
+ struct mlx5_vport *evport;
+
+- if (!ESW_ALLOWED(esw))
++ if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
+ return -EPERM;
+ if (!LEGAL_VPORT(esw, vport))
+ return -EINVAL;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+index 71153c0f1605..aa9a88e84e3b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+@@ -549,8 +549,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ return -EINVAL;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return -EACCES;
+- if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
+- return -EOPNOTSUPP;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
+index cd34097b79f1..37a6d7822a38 100644
+--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
++++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
+@@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
+ err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
+ nfp_resource_address(state->res),
+ fwinf, sizeof(*fwinf));
+- if (err < sizeof(*fwinf))
++ if (err < (int)sizeof(*fwinf))
+ goto err_release;
+
+ if (!nffw_res_flg_init_get(fwinf))
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+index b306961b02fd..d62dccb85539 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+@@ -255,9 +255,8 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
+ *type = DCBX_PROTOCOL_ROCE_V2;
+ } else {
+ *type = DCBX_MAX_PROTOCOL_TYPE;
+- DP_ERR(p_hwfn,
+- "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n",
+- id, app_prio_bitmap);
++ DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n",
++ app_prio_bitmap);
+ return false;
+ }
+
+@@ -1472,8 +1471,8 @@ static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+- *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
+- DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_STATIC);
++ *cap = (DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE |
++ DCB_CAP_DCBX_STATIC);
+ break;
+ default:
+ *cap = false;
+@@ -1541,8 +1540,6 @@ static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
+ if (!dcbx_info)
+ return 0;
+
+- if (dcbx_info->operational.enabled)
+- mode |= DCB_CAP_DCBX_LLD_MANAGED;
+ if (dcbx_info->operational.ieee)
+ mode |= DCB_CAP_DCBX_VER_IEEE;
+ if (dcbx_info->operational.cee)
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+index c06ad4f0758e..5f52f14761a3 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+@@ -201,8 +201,9 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
+
+ skb = build_skb(buffer->data, 0);
+ if (!skb) {
+- rc = -ENOMEM;
+- goto out_post;
++ DP_INFO(cdev, "Failed to build SKB\n");
++ kfree(buffer->data);
++ goto out_post1;
+ }
+
+ data->u.placement_offset += NET_SKB_PAD;
+@@ -224,8 +225,14 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
+ cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
+ data->opaque_data_0,
+ data->opaque_data_1);
++ } else {
++ DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
++ QED_MSG_LL2 | QED_MSG_STORAGE),
++ "Dropping the packet\n");
++ kfree(buffer->data);
+ }
+
++out_post1:
+ /* Update Buffer information and update FW producer */
+ buffer->data = new_data;
+ buffer->phys_addr = new_phys_addr;
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
+index 2c958921dfb3..954f7ce4cf28 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
+@@ -565,8 +565,16 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
+ /* Fastpath interrupts */
+ for (j = 0; j < 64; j++) {
+ if ((0x2ULL << j) & status) {
+- hwfn->simd_proto_handler[j].func(
+- hwfn->simd_proto_handler[j].token);
++ struct qed_simd_fp_handler *p_handler =
++ &hwfn->simd_proto_handler[j];
++
++ if (p_handler->func)
++ p_handler->func(p_handler->token);
++ else
++ DP_NOTICE(hwfn,
++ "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
++ j, status);
++
+ status &= ~(0x2ULL << j);
+ rc = IRQ_HANDLED;
+ }
+diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+index 287d89dd086f..9c94240bb05a 100644
+--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
++++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+@@ -1128,6 +1128,8 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ ret = kstrtoul(buf, 16, &data);
++ if (ret)
++ return ret;
+
+ switch (data) {
+ case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
+diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
+index 9c236298fe21..b1f5f0b8e546 100644
+--- a/drivers/net/ethernet/qualcomm/qca_spi.c
++++ b/drivers/net/ethernet/qualcomm/qca_spi.c
+@@ -658,7 +658,7 @@ qcaspi_netdev_open(struct net_device *dev)
+ return ret;
+ }
+
+- netif_start_queue(qca->net_dev);
++ /* SPI thread takes care of TX queue */
+
+ return 0;
+ }
+@@ -761,6 +761,9 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
+ qca->net_dev->stats.tx_errors++;
+ /* Trigger tx queue flush and QCA7000 reset */
+ qca->sync = QCASPI_SYNC_UNKNOWN;
++
++ if (qca->spi_thread)
++ wake_up_process(qca->spi_thread);
+ }
+
+ static int
+@@ -879,22 +882,22 @@ qca_spi_probe(struct spi_device *spi)
+
+ if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
+ (qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
+- dev_info(&spi->dev, "Invalid clkspeed: %d\n",
+- qcaspi_clkspeed);
++ dev_err(&spi->dev, "Invalid clkspeed: %d\n",
++ qcaspi_clkspeed);
+ return -EINVAL;
+ }
+
+ if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
+ (qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
+- dev_info(&spi->dev, "Invalid burst len: %d\n",
+- qcaspi_burst_len);
++ dev_err(&spi->dev, "Invalid burst len: %d\n",
++ qcaspi_burst_len);
+ return -EINVAL;
+ }
+
+ if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
+ (qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
+- dev_info(&spi->dev, "Invalid pluggable: %d\n",
+- qcaspi_pluggable);
++ dev_err(&spi->dev, "Invalid pluggable: %d\n",
++ qcaspi_pluggable);
+ return -EINVAL;
+ }
+
+@@ -956,8 +959,8 @@ qca_spi_probe(struct spi_device *spi)
+ }
+
+ if (register_netdev(qcaspi_devs)) {
+- dev_info(&spi->dev, "Unable to register net device %s\n",
+- qcaspi_devs->name);
++ dev_err(&spi->dev, "Unable to register net device %s\n",
++ qcaspi_devs->name);
+ free_netdev(qcaspi_devs);
+ return -EFAULT;
+ }
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index fdf30bfa403b..e87a779bfcfe 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -959,6 +959,13 @@ static void ravb_adjust_link(struct net_device *ndev)
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ bool new_state = false;
++ unsigned long flags;
++
++ spin_lock_irqsave(&priv->lock, flags);
++
++ /* Disable TX and RX right over here, if E-MAC change is ignored */
++ if (priv->no_avb_link)
++ ravb_rcv_snd_disable(ndev);
+
+ if (phydev->link) {
+ if (phydev->duplex != priv->duplex) {
+@@ -976,18 +983,21 @@ static void ravb_adjust_link(struct net_device *ndev)
+ ravb_modify(ndev, ECMR, ECMR_TXF, 0);
+ new_state = true;
+ priv->link = phydev->link;
+- if (priv->no_avb_link)
+- ravb_rcv_snd_enable(ndev);
+ }
+ } else if (priv->link) {
+ new_state = true;
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = -1;
+- if (priv->no_avb_link)
+- ravb_rcv_snd_disable(ndev);
+ }
+
++ /* Enable TX and RX right over here, if E-MAC change is ignored */
++ if (priv->no_avb_link && phydev->link)
++ ravb_rcv_snd_enable(ndev);
++
++ mmiowb();
++ spin_unlock_irqrestore(&priv->lock, flags);
++
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+ }
+@@ -1094,52 +1104,18 @@ static int ravb_get_link_ksettings(struct net_device *ndev,
+ static int ravb_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+ {
+- struct ravb_private *priv = netdev_priv(ndev);
+- unsigned long flags;
+- int error;
+-
+ if (!ndev->phydev)
+ return -ENODEV;
+
+- spin_lock_irqsave(&priv->lock, flags);
+-
+- /* Disable TX and RX */
+- ravb_rcv_snd_disable(ndev);
+-
+- error = phy_ethtool_ksettings_set(ndev->phydev, cmd);
+- if (error)
+- goto error_exit;
+-
+- if (cmd->base.duplex == DUPLEX_FULL)
+- priv->duplex = 1;
+- else
+- priv->duplex = 0;
+-
+- ravb_set_duplex(ndev);
+-
+-error_exit:
+- mdelay(1);
+-
+- /* Enable TX and RX */
+- ravb_rcv_snd_enable(ndev);
+-
+- mmiowb();
+- spin_unlock_irqrestore(&priv->lock, flags);
+-
+- return error;
++ return phy_ethtool_ksettings_set(ndev->phydev, cmd);
+ }
+
+ static int ravb_nway_reset(struct net_device *ndev)
+ {
+- struct ravb_private *priv = netdev_priv(ndev);
+ int error = -ENODEV;
+- unsigned long flags;
+
+- if (ndev->phydev) {
+- spin_lock_irqsave(&priv->lock, flags);
++ if (ndev->phydev)
+ error = phy_start_aneg(ndev->phydev);
+- spin_unlock_irqrestore(&priv->lock, flags);
+- }
+
+ return error;
+ }
+diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
+index 38080e95a82d..abfb9faadbc4 100644
+--- a/drivers/net/ethernet/renesas/sh_eth.c
++++ b/drivers/net/ethernet/renesas/sh_eth.c
+@@ -1821,8 +1821,15 @@ static void sh_eth_adjust_link(struct net_device *ndev)
+ {
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
++ unsigned long flags;
+ int new_state = 0;
+
++ spin_lock_irqsave(&mdp->lock, flags);
++
++ /* Disable TX and RX right over here, if E-MAC change is ignored */
++ if (mdp->cd->no_psr || mdp->no_ether_link)
++ sh_eth_rcv_snd_disable(ndev);
++
+ if (phydev->link) {
+ if (phydev->duplex != mdp->duplex) {
+ new_state = 1;
+@@ -1841,18 +1848,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)
+ sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
+ new_state = 1;
+ mdp->link = phydev->link;
+- if (mdp->cd->no_psr || mdp->no_ether_link)
+- sh_eth_rcv_snd_enable(ndev);
+ }
+ } else if (mdp->link) {
+ new_state = 1;
+ mdp->link = 0;
+ mdp->speed = 0;
+ mdp->duplex = -1;
+- if (mdp->cd->no_psr || mdp->no_ether_link)
+- sh_eth_rcv_snd_disable(ndev);
+ }
+
++ /* Enable TX and RX right over here, if E-MAC change is ignored */
++ if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
++ sh_eth_rcv_snd_enable(ndev);
++
++ mmiowb();
++ spin_unlock_irqrestore(&mdp->lock, flags);
++
+ if (new_state && netif_msg_link(mdp))
+ phy_print_status(phydev);
+ }
+@@ -1933,39 +1943,10 @@ static int sh_eth_get_link_ksettings(struct net_device *ndev,
+ static int sh_eth_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+ {
+- struct sh_eth_private *mdp = netdev_priv(ndev);
+- unsigned long flags;
+- int ret;
+-
+ if (!ndev->phydev)
+ return -ENODEV;
+
+- spin_lock_irqsave(&mdp->lock, flags);
+-
+- /* disable tx and rx */
+- sh_eth_rcv_snd_disable(ndev);
+-
+- ret = phy_ethtool_ksettings_set(ndev->phydev, cmd);
+- if (ret)
+- goto error_exit;
+-
+- if (cmd->base.duplex == DUPLEX_FULL)
+- mdp->duplex = 1;
+- else
+- mdp->duplex = 0;
+-
+- if (mdp->cd->set_duplex)
+- mdp->cd->set_duplex(ndev);
+-
+-error_exit:
+- mdelay(1);
+-
+- /* enable tx and rx */
+- sh_eth_rcv_snd_enable(ndev);
+-
+- spin_unlock_irqrestore(&mdp->lock, flags);
+-
+- return ret;
++ return phy_ethtool_ksettings_set(ndev->phydev, cmd);
+ }
+
+ /* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
+@@ -2156,18 +2137,10 @@ static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
+
+ static int sh_eth_nway_reset(struct net_device *ndev)
+ {
+- struct sh_eth_private *mdp = netdev_priv(ndev);
+- unsigned long flags;
+- int ret;
+-
+ if (!ndev->phydev)
+ return -ENODEV;
+
+- spin_lock_irqsave(&mdp->lock, flags);
+- ret = phy_start_aneg(ndev->phydev);
+- spin_unlock_irqrestore(&mdp->lock, flags);
+-
+- return ret;
++ return phy_start_aneg(ndev->phydev);
+ }
+
+ static u32 sh_eth_get_msglevel(struct net_device *ndev)
+diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
+index 97035766c291..5790cd61436d 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
++++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
+@@ -111,7 +111,7 @@ config DWMAC_ROCKCHIP
+ config DWMAC_SOCFPGA
+ tristate "SOCFPGA dwmac support"
+ default ARCH_SOCFPGA
+- depends on OF && (ARCH_SOCFPGA || COMPILE_TEST)
++ depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
+ select MFD_SYSCON
+ help
+ Support for ethernet controller on Altera SOCFPGA
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+index 6e359572b9f0..5b3b06a0a3bf 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+@@ -55,6 +55,7 @@ struct socfpga_dwmac {
+ struct device *dev;
+ struct regmap *sys_mgr_base_addr;
+ struct reset_control *stmmac_rst;
++ struct reset_control *stmmac_ocp_rst;
+ void __iomem *splitter_base;
+ bool f2h_ptp_ref_clk;
+ struct tse_pcs pcs;
+@@ -262,8 +263,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
+ val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+
+ /* Assert reset to the enet controller before changing the phy mode */
+- if (dwmac->stmmac_rst)
+- reset_control_assert(dwmac->stmmac_rst);
++ reset_control_assert(dwmac->stmmac_ocp_rst);
++ reset_control_assert(dwmac->stmmac_rst);
+
+ regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
+ ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
+@@ -288,8 +289,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
+ /* Deassert reset for the phy configuration to be sampled by
+ * the enet controller, and operation to start in requested mode
+ */
+- if (dwmac->stmmac_rst)
+- reset_control_deassert(dwmac->stmmac_rst);
++ reset_control_deassert(dwmac->stmmac_ocp_rst);
++ reset_control_deassert(dwmac->stmmac_rst);
+ if (phymode == PHY_INTERFACE_MODE_SGMII) {
+ if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
+ dev_err(dwmac->dev, "Unable to initialize TSE PCS");
+@@ -324,6 +325,15 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
+ goto err_remove_config_dt;
+ }
+
++ dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp");
++ if (IS_ERR(dwmac->stmmac_ocp_rst)) {
++ ret = PTR_ERR(dwmac->stmmac_ocp_rst);
++ dev_err(dev, "error getting reset control of ocp %d\n", ret);
++ goto err_remove_config_dt;
++ }
++
++ reset_control_deassert(dwmac->stmmac_ocp_rst);
++
+ ret = socfpga_dwmac_parse_data(dwmac, dev);
+ if (ret) {
+ dev_err(dev, "Unable to parse OF data\n");
+diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
+index 4bb561856af5..47a096134043 100644
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -1387,6 +1387,10 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
+
+ static int match_first_device(struct device *dev, void *data)
+ {
++ if (dev->parent && dev->parent->of_node)
++ return of_device_is_compatible(dev->parent->of_node,
++ "ti,davinci_mdio");
++
+ return !strncmp(dev_name(dev), "davinci_mdio", 12);
+ }
+
+diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
+index 78a6414c5fd9..7d94a7842557 100644
+--- a/drivers/net/hamradio/bpqether.c
++++ b/drivers/net/hamradio/bpqether.c
+@@ -89,10 +89,6 @@
+ static const char banner[] __initconst = KERN_INFO \
+ "AX.25: bpqether driver version 004\n";
+
+-static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
+-
+-static char bpq_eth_addr[6];
+-
+ static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
+ static int bpq_device_event(struct notifier_block *, unsigned long, void *);
+
+@@ -515,8 +511,8 @@ static int bpq_new_device(struct net_device *edev)
+ bpq->ethdev = edev;
+ bpq->axdev = ndev;
+
+- memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr));
+- memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr));
++ eth_broadcast_addr(bpq->dest_addr);
++ eth_broadcast_addr(bpq->acpt_addr);
+
+ err = register_netdevice(ndev);
+ if (err)
+diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
+index cb03a6ea076a..328c37e9096d 100644
+--- a/drivers/net/hyperv/rndis_filter.c
++++ b/drivers/net/hyperv/rndis_filter.c
+@@ -1299,6 +1299,7 @@ out:
+ /* setting up multiple channels failed */
+ net_device->max_chn = 1;
+ net_device->num_chn = 1;
++ return 0;
+
+ err_dev_remv:
+ rndis_filter_device_remove(dev, net_device);
+diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
+index 548d9d026a85..5c48bdb6f678 100644
+--- a/drivers/net/ieee802154/at86rf230.c
++++ b/drivers/net/ieee802154/at86rf230.c
+@@ -940,7 +940,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
+ static int
+ at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
+ {
+- BUG_ON(!level);
++ WARN_ON(!level);
+ *level = 0xbe;
+ return 0;
+ }
+@@ -1121,8 +1121,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
+ if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
+ u16 addr = le16_to_cpu(filt->short_addr);
+
+- dev_vdbg(&lp->spi->dev,
+- "at86rf230_set_hw_addr_filt called for saddr\n");
++ dev_vdbg(&lp->spi->dev, "%s called for saddr\n", __func__);
+ __at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
+ __at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
+ }
+@@ -1130,8 +1129,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
+ if (changed & IEEE802154_AFILT_PANID_CHANGED) {
+ u16 pan = le16_to_cpu(filt->pan_id);
+
+- dev_vdbg(&lp->spi->dev,
+- "at86rf230_set_hw_addr_filt called for pan id\n");
++ dev_vdbg(&lp->spi->dev, "%s called for pan id\n", __func__);
+ __at86rf230_write(lp, RG_PAN_ID_0, pan);
+ __at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
+ }
+@@ -1140,15 +1138,13 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
+ u8 i, addr[8];
+
+ memcpy(addr, &filt->ieee_addr, 8);
+- dev_vdbg(&lp->spi->dev,
+- "at86rf230_set_hw_addr_filt called for IEEE addr\n");
++ dev_vdbg(&lp->spi->dev, "%s called for IEEE addr\n", __func__);
+ for (i = 0; i < 8; i++)
+ __at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
+ }
+
+ if (changed & IEEE802154_AFILT_PANC_CHANGED) {
+- dev_vdbg(&lp->spi->dev,
+- "at86rf230_set_hw_addr_filt called for panc change\n");
++ dev_vdbg(&lp->spi->dev, "%s called for panc change\n", __func__);
+ if (filt->pan_coord)
+ at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
+ else
+@@ -1252,7 +1248,6 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw,
+ return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
+ }
+
+-
+ static int
+ at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
+ {
+diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
+index 0d673f7682ee..176395e4b7bb 100644
+--- a/drivers/net/ieee802154/fakelb.c
++++ b/drivers/net/ieee802154/fakelb.c
+@@ -49,7 +49,7 @@ struct fakelb_phy {
+
+ static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
+ {
+- BUG_ON(!level);
++ WARN_ON(!level);
+ *level = 0xbe;
+
+ return 0;
+diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
+index e7f7a1a002ee..58133c9f701b 100644
+--- a/drivers/net/ipvlan/ipvlan_main.c
++++ b/drivers/net/ipvlan/ipvlan_main.c
+@@ -73,10 +73,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
+ {
+ struct ipvl_dev *ipvlan;
+ struct net_device *mdev = port->dev;
+- int err = 0;
++ unsigned int flags;
++ int err;
+
+ ASSERT_RTNL();
+ if (port->mode != nval) {
++ list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
++ flags = ipvlan->dev->flags;
++ if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) {
++ err = dev_change_flags(ipvlan->dev,
++ flags | IFF_NOARP);
++ } else {
++ err = dev_change_flags(ipvlan->dev,
++ flags & ~IFF_NOARP);
++ }
++ if (unlikely(err))
++ goto fail;
++ }
+ if (nval == IPVLAN_MODE_L3S) {
+ /* New mode is L3S */
+ err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
+@@ -84,21 +97,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
+ mdev->l3mdev_ops = &ipvl_l3mdev_ops;
+ mdev->priv_flags |= IFF_L3MDEV_MASTER;
+ } else
+- return err;
++ goto fail;
+ } else if (port->mode == IPVLAN_MODE_L3S) {
+ /* Old mode was L3S */
+ mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
+ ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
+ mdev->l3mdev_ops = NULL;
+ }
+- list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+- if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S)
+- ipvlan->dev->flags |= IFF_NOARP;
+- else
+- ipvlan->dev->flags &= ~IFF_NOARP;
+- }
+ port->mode = nval;
+ }
++ return 0;
++
++fail:
++ /* Undo the flags changes that have been done so far. */
++ list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) {
++ flags = ipvlan->dev->flags;
++ if (port->mode == IPVLAN_MODE_L3 ||
++ port->mode == IPVLAN_MODE_L3S)
++ dev_change_flags(ipvlan->dev, flags | IFF_NOARP);
++ else
++ dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP);
++ }
++
+ return err;
+ }
+
+diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
+index 5f565bd574da..48ba80a8ca5c 100644
+--- a/drivers/net/usb/rtl8150.c
++++ b/drivers/net/usb/rtl8150.c
+@@ -681,7 +681,7 @@ static void rtl8150_set_multicast(struct net_device *netdev)
+ (netdev->flags & IFF_ALLMULTI)) {
+ rx_creg &= 0xfffe;
+ rx_creg |= 0x0002;
+- dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name);
++ dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name);
+ } else {
+ /* ~RX_MULTICAST, ~RX_PROMISCUOUS */
+ rx_creg &= 0x00fc;
+diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
+index 7a6a1fe79309..05553d252446 100644
+--- a/drivers/net/usb/smsc75xx.c
++++ b/drivers/net/usb/smsc75xx.c
+@@ -82,6 +82,9 @@ static bool turbo_mode = true;
+ module_param(turbo_mode, bool, 0644);
+ MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
+
++static int smsc75xx_link_ok_nopm(struct usbnet *dev);
++static int smsc75xx_phy_gig_workaround(struct usbnet *dev);
++
+ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
+ u32 *data, int in_pm)
+ {
+@@ -852,6 +855,9 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
+ return -EIO;
+ }
+
++ /* phy workaround for gig link */
++ smsc75xx_phy_gig_workaround(dev);
++
+ smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
+ ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
+ ADVERTISE_PAUSE_ASYM);
+@@ -987,6 +993,62 @@ static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
+ return -EIO;
+ }
+
++static int smsc75xx_phy_gig_workaround(struct usbnet *dev)
++{
++ struct mii_if_info *mii = &dev->mii;
++ int ret = 0, timeout = 0;
++ u32 buf, link_up = 0;
++
++ /* Set the phy in Gig loopback */
++ smsc75xx_mdio_write(dev->net, mii->phy_id, MII_BMCR, 0x4040);
++
++ /* Wait for the link up */
++ do {
++ link_up = smsc75xx_link_ok_nopm(dev);
++ usleep_range(10000, 20000);
++ timeout++;
++ } while ((!link_up) && (timeout < 1000));
++
++ if (timeout >= 1000) {
++ netdev_warn(dev->net, "Timeout waiting for PHY link up\n");
++ return -EIO;
++ }
++
++ /* phy reset */
++ ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
++ if (ret < 0) {
++ netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
++ return ret;
++ }
++
++ buf |= PMT_CTL_PHY_RST;
++
++ ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
++ if (ret < 0) {
++ netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret);
++ return ret;
++ }
++
++ timeout = 0;
++ do {
++ usleep_range(10000, 20000);
++ ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
++ if (ret < 0) {
++ netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n",
++ ret);
++ return ret;
++ }
++ timeout++;
++ } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
++
++ if (timeout >= 100) {
++ netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
++ return -EIO;
++ }
++
++ return 0;
++}
++
+ static int smsc75xx_reset(struct usbnet *dev)
+ {
+ struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index df11bb449988..52ebed1f55a1 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -5923,8 +5923,19 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
+ ath10k_mac_max_vht_nss(vht_mcs_mask)));
+
+ if (changed & IEEE80211_RC_BW_CHANGED) {
+- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
+- sta->addr, bw);
++ enum wmi_phy_mode mode;
++
++ mode = chan_to_phymode(&def);
++ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
++ sta->addr, bw, mode);
++
++ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
++ WMI_PEER_PHYMODE, mode);
++ if (err) {
++ ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
++ sta->addr, mode, err);
++ goto exit;
++ }
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_CHAN_WIDTH, bw);
+@@ -5965,6 +5976,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
+ sta->addr);
+ }
+
++exit:
+ mutex_unlock(&ar->conf_mutex);
+ }
+
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
+index bab876cf25fe..d0e05aa437e3 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.h
++++ b/drivers/net/wireless/ath/ath10k/wmi.h
+@@ -6002,6 +6002,7 @@ enum wmi_peer_param {
+ WMI_PEER_NSS = 0x5,
+ WMI_PEER_USE_4ADDR = 0x6,
+ WMI_PEER_DEBUG = 0xa,
++ WMI_PEER_PHYMODE = 0xd,
+ WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */
+ };
+
+diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+index eccd25febfe6..4c28b04ea605 100644
+--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+@@ -4245,6 +4245,13 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (bus) {
++ /* Stop watchdog task */
++ if (bus->watchdog_tsk) {
++ send_sig(SIGTERM, bus->watchdog_tsk, 1);
++ kthread_stop(bus->watchdog_tsk);
++ bus->watchdog_tsk = NULL;
++ }
++
+ /* De-register interrupt handler */
+ brcmf_sdiod_intr_unregister(bus->sdiodev);
+
+diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
+index d5553c47014f..5d823e965883 100644
+--- a/drivers/nfc/pn533/usb.c
++++ b/drivers/nfc/pn533/usb.c
+@@ -74,7 +74,7 @@ static void pn533_recv_response(struct urb *urb)
+ struct sk_buff *skb = NULL;
+
+ if (!urb->status) {
+- skb = alloc_skb(urb->actual_length, GFP_KERNEL);
++ skb = alloc_skb(urb->actual_length, GFP_ATOMIC);
+ if (!skb) {
+ nfc_err(&phy->udev->dev, "failed to alloc memory\n");
+ } else {
+@@ -186,7 +186,7 @@ static int pn533_usb_send_frame(struct pn533 *dev,
+
+ if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
+ /* request for response for sent packet directly */
+- rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC);
++ rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
+ if (rc)
+ goto error;
+ } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) {
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 38c128f230e7..3a63d58d2ca9 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1016,7 +1016,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+
+ status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
+ (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
+- (void __user *)(uintptr_t)cmd.metadata, cmd.metadata,
++ (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
+ 0, &cmd.result, timeout);
+ if (status >= 0) {
+ if (put_user(cmd.result, &ucmd->result))
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 240b0d628222..5fa7856f6b34 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -598,6 +598,14 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
+ }
+
+ ctrl->csts = NVME_CSTS_RDY;
++
++ /*
++ * Controllers that are not yet enabled should not really enforce the
++ * keep alive timeout, but we still want to track a timeout and cleanup
++ * in case a host died before it enabled the controller. Hence, simply
++ * reset the keep alive timer when the controller is enabled.
++ */
++ mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ }
+
+ static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
+diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
+index 2afafd5d8915..635886e4835c 100644
+--- a/drivers/nvmem/core.c
++++ b/drivers/nvmem/core.c
+@@ -865,6 +865,10 @@ struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
+ return cell;
+ }
+
++ /* NULL cell_id only allowed for device tree; invalid otherwise */
++ if (!cell_id)
++ return ERR_PTR(-EINVAL);
++
+ return nvmem_cell_get_from_list(cell_id);
+ }
+ EXPORT_SYMBOL_GPL(nvmem_cell_get);
+diff --git a/drivers/pci/host/pci-ftpci100.c b/drivers/pci/host/pci-ftpci100.c
+index 4ea7d2ebcc5c..4e6b21931514 100644
+--- a/drivers/pci/host/pci-ftpci100.c
++++ b/drivers/pci/host/pci-ftpci100.c
+@@ -353,11 +353,13 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
+ irq = of_irq_get(intc, 0);
+ if (irq <= 0) {
+ dev_err(p->dev, "failed to get parent IRQ\n");
++ of_node_put(intc);
+ return irq ?: -EINVAL;
+ }
+
+ p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
+ &faraday_pci_irqdomain_ops, p);
++ of_node_put(intc);
+ if (!p->irqdomain) {
+ dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
+ return -EINVAL;
+diff --git a/drivers/pci/host/pci-host-common.c b/drivers/pci/host/pci-host-common.c
+index 44a47d4f0b8f..148896f73c06 100644
+--- a/drivers/pci/host/pci-host-common.c
++++ b/drivers/pci/host/pci-host-common.c
+@@ -45,7 +45,7 @@ static int gen_pci_parse_request_of_pci_ranges(struct device *dev,
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+- err = pci_remap_iospace(res, iobase);
++ err = devm_pci_remap_iospace(dev, res, iobase);
+ if (err) {
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ err, res);
+diff --git a/drivers/pci/host/pci-versatile.c b/drivers/pci/host/pci-versatile.c
+index d417acab0ecf..aff4cfb555fb 100644
+--- a/drivers/pci/host/pci-versatile.c
++++ b/drivers/pci/host/pci-versatile.c
+@@ -89,7 +89,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
+- err = pci_remap_iospace(res, iobase);
++ err = devm_pci_remap_iospace(dev, res, iobase);
+ if (err) {
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ err, res);
+diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c
+index 8f44a7d14bff..41edce16a07c 100644
+--- a/drivers/pci/host/pcie-rcar.c
++++ b/drivers/pci/host/pcie-rcar.c
+@@ -1105,7 +1105,7 @@ static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
+ struct resource *res = win->res;
+
+ if (resource_type(res) == IORESOURCE_IO) {
+- err = pci_remap_iospace(res, iobase);
++ err = devm_pci_remap_iospace(dev, res, iobase);
+ if (err) {
+ dev_warn(dev, "error %d: failed to map resource %pR\n",
+ err, res);
+diff --git a/drivers/pci/host/pcie-xilinx-nwl.c b/drivers/pci/host/pcie-xilinx-nwl.c
+index 65dea98b2643..dd527ea558d7 100644
+--- a/drivers/pci/host/pcie-xilinx-nwl.c
++++ b/drivers/pci/host/pcie-xilinx-nwl.c
+@@ -561,7 +561,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
+ PCI_NUM_INTX,
+ &legacy_domain_ops,
+ pcie);
+-
++ of_node_put(legacy_intc_node);
+ if (!pcie->legacy_irq_domain) {
+ dev_err(dev, "failed to create IRQ domain\n");
+ return -ENOMEM;
+diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
+index 94e13cb8608f..29f024f0ed7f 100644
+--- a/drivers/pci/host/pcie-xilinx.c
++++ b/drivers/pci/host/pcie-xilinx.c
+@@ -511,6 +511,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
+ port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ &intx_domain_ops,
+ port);
++ of_node_put(pcie_intc_node);
+ if (!port->leg_domain) {
+ dev_err(dev, "Failed to get a INTx IRQ domain\n");
+ return -ENODEV;
+diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
+index 7b0e97be9063..591f2e05ab1c 100644
+--- a/drivers/pci/hotplug/pci_hotplug_core.c
++++ b/drivers/pci/hotplug/pci_hotplug_core.c
+@@ -452,8 +452,17 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
+ list_add(&slot->slot_list, &pci_hotplug_slot_list);
+
+ result = fs_add_slot(pci_slot);
++ if (result)
++ goto err_list_del;
++
+ kobject_uevent(&pci_slot->kobj, KOBJ_ADD);
+ dbg("Added slot %s to the list\n", name);
++ goto out;
++
++err_list_del:
++ list_del(&slot->slot_list);
++ pci_slot->hotplug = NULL;
++ pci_destroy_slot(pci_slot);
+ out:
+ mutex_unlock(&pci_hp_mutex);
+ return result;
+diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
+index e7d6cfaf3865..9fc4357c3db9 100644
+--- a/drivers/pci/hotplug/pciehp.h
++++ b/drivers/pci/hotplug/pciehp.h
+@@ -132,6 +132,7 @@ int pciehp_unconfigure_device(struct slot *p_slot);
+ void pciehp_queue_pushbutton_work(struct work_struct *work);
+ struct controller *pcie_init(struct pcie_device *dev);
+ int pcie_init_notification(struct controller *ctrl);
++void pcie_shutdown_notification(struct controller *ctrl);
+ int pciehp_enable_slot(struct slot *p_slot);
+ int pciehp_disable_slot(struct slot *p_slot);
+ void pcie_reenable_notification(struct controller *ctrl);
+diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
+index 1288289cc85d..c38e392d63e4 100644
+--- a/drivers/pci/hotplug/pciehp_core.c
++++ b/drivers/pci/hotplug/pciehp_core.c
+@@ -76,6 +76,12 @@ static int reset_slot(struct hotplug_slot *slot, int probe);
+ */
+ static void release_slot(struct hotplug_slot *hotplug_slot)
+ {
++ struct slot *slot = hotplug_slot->private;
++
++ /* queued work needs hotplug_slot name */
++ cancel_delayed_work(&slot->work);
++ drain_workqueue(slot->wq);
++
+ kfree(hotplug_slot->ops);
+ kfree(hotplug_slot->info);
+ kfree(hotplug_slot);
+@@ -278,6 +284,7 @@ static void pciehp_remove(struct pcie_device *dev)
+ {
+ struct controller *ctrl = get_service_data(dev);
+
++ pcie_shutdown_notification(ctrl);
+ cleanup_slot(ctrl);
+ pciehp_release_ctrl(ctrl);
+ }
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 46c2ee2caf28..2fa830727362 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -562,8 +562,6 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
+ {
+ struct controller *ctrl = (struct controller *)dev_id;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+- struct pci_bus *subordinate = pdev->subordinate;
+- struct pci_dev *dev;
+ struct slot *slot = ctrl->slot;
+ u16 status, events;
+ u8 present;
+@@ -611,14 +609,9 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
+ wake_up(&ctrl->queue);
+ }
+
+- if (subordinate) {
+- list_for_each_entry(dev, &subordinate->devices, bus_list) {
+- if (dev->ignore_hotplug) {
+- ctrl_dbg(ctrl, "ignoring hotplug event %#06x (%s requested no hotplug)\n",
+- events, pci_name(dev));
+- return IRQ_HANDLED;
+- }
+- }
++ if (pdev->ignore_hotplug) {
++ ctrl_dbg(ctrl, "ignoring hotplug event %#06x\n", events);
++ return IRQ_HANDLED;
+ }
+
+ /* Check Attention Button Pressed */
+@@ -789,7 +782,7 @@ int pcie_init_notification(struct controller *ctrl)
+ return 0;
+ }
+
+-static void pcie_shutdown_notification(struct controller *ctrl)
++void pcie_shutdown_notification(struct controller *ctrl)
+ {
+ if (ctrl->notification_enabled) {
+ pcie_disable_notification(ctrl);
+@@ -824,7 +817,7 @@ abort:
+ static void pcie_cleanup_slot(struct controller *ctrl)
+ {
+ struct slot *slot = ctrl->slot;
+- cancel_delayed_work(&slot->work);
++
+ destroy_workqueue(slot->wq);
+ kfree(slot);
+ }
+@@ -912,7 +905,6 @@ abort:
+
+ void pciehp_release_ctrl(struct controller *ctrl)
+ {
+- pcie_shutdown_notification(ctrl);
+ pcie_cleanup_slot(ctrl);
+ kfree(ctrl);
+ }
+diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
+index 62a0677b32f1..22924629e64a 100644
+--- a/drivers/pci/pci.c
++++ b/drivers/pci/pci.c
+@@ -3446,6 +3446,44 @@ void pci_unmap_iospace(struct resource *res)
+ }
+ EXPORT_SYMBOL(pci_unmap_iospace);
+
++static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
++{
++ struct resource **res = ptr;
++
++ pci_unmap_iospace(*res);
++}
++
++/**
++ * devm_pci_remap_iospace - Managed pci_remap_iospace()
++ * @dev: Generic device to remap IO address for
++ * @res: Resource describing the I/O space
++ * @phys_addr: physical address of range to be mapped
++ *
++ * Managed pci_remap_iospace(). Map is automatically unmapped on driver
++ * detach.
++ */
++int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
++ phys_addr_t phys_addr)
++{
++ const struct resource **ptr;
++ int error;
++
++ ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
++ if (!ptr)
++ return -ENOMEM;
++
++ error = pci_remap_iospace(res, phys_addr);
++ if (error) {
++ devres_free(ptr);
++ } else {
++ *ptr = res;
++ devres_add(dev, ptr);
++ }
++
++ return error;
++}
++EXPORT_SYMBOL(devm_pci_remap_iospace);
++
+ /**
+ * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
+ * @dev: Generic device to remap IO address for
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index 4bccaf688aad..e23bfd9845b1 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -1560,6 +1560,10 @@ static void pci_configure_mps(struct pci_dev *dev)
+ if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
+ return;
+
++ /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
++ if (dev->is_virtfn)
++ return;
++
+ mps = pcie_get_mps(dev);
+ p_mps = pcie_get_mps(bridge);
+
+diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
+index eb23311bc70c..8b79c2f7931f 100644
+--- a/drivers/perf/xgene_pmu.c
++++ b/drivers/perf/xgene_pmu.c
+@@ -1463,7 +1463,7 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
+ case PMU_TYPE_IOB:
+ return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
+ case PMU_TYPE_IOB_SLOW:
+- return devm_kasprintf(dev, GFP_KERNEL, "iob-slow%d", id);
++ return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id);
+ case PMU_TYPE_MCB:
+ return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
+ case PMU_TYPE_MC:
+diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
+index 35c17653c694..87618a4e90e4 100644
+--- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
++++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
+@@ -460,8 +460,8 @@ static int nsp_pinmux_enable(struct pinctrl_dev *pctrl_dev,
+ const struct nsp_pin_function *func;
+ const struct nsp_pin_group *grp;
+
+- if (grp_select > pinctrl->num_groups ||
+- func_select > pinctrl->num_functions)
++ if (grp_select >= pinctrl->num_groups ||
++ func_select >= pinctrl->num_functions)
+ return -EINVAL;
+
+ func = &pinctrl->functions[func_select];
+@@ -577,6 +577,8 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
+ return PTR_ERR(pinctrl->base0);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
++ if (!res)
++ return -EINVAL;
+ pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
+ resource_size(res));
+ if (!pinctrl->base1) {
+diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
+index d84761822243..103aaab41357 100644
+--- a/drivers/pinctrl/pinctrl-ingenic.c
++++ b/drivers/pinctrl/pinctrl-ingenic.c
+@@ -536,7 +536,7 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
+ ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
+ } else {
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
+- ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, input);
++ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false);
+ }
+
+diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
+index 82f05c4b8c52..ae7a49ade414 100644
+--- a/drivers/s390/cio/vfio_ccw_drv.c
++++ b/drivers/s390/cio/vfio_ccw_drv.c
+@@ -176,6 +176,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
+ {
+ struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
+ unsigned long flags;
++ int rc = -EAGAIN;
+
+ spin_lock_irqsave(sch->lock, flags);
+ if (!device_is_registered(&sch->dev))
+@@ -186,6 +187,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
+
+ if (cio_update_schib(sch)) {
+ vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
++ rc = 0;
+ goto out_unlock;
+ }
+
+@@ -194,11 +196,12 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
+ private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
+ VFIO_CCW_STATE_STANDBY;
+ }
++ rc = 0;
+
+ out_unlock:
+ spin_unlock_irqrestore(sch->lock, flags);
+
+- return 0;
++ return rc;
+ }
+
+ static struct css_device_id vfio_ccw_sch_ids[] = {
+diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
+index 382edb79a0de..56bcdd412d26 100644
+--- a/drivers/scsi/qedf/qedf_main.c
++++ b/drivers/scsi/qedf/qedf_main.c
+@@ -3240,6 +3240,11 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
+
+ init_completion(&qedf->flogi_compl);
+
++ status = qed_ops->common->update_drv_state(qedf->cdev, true);
++ if (status)
++ QEDF_ERR(&(qedf->dbg_ctx),
++ "Failed to send drv state to MFW.\n");
++
+ memset(&link_params, 0, sizeof(struct qed_link_params));
+ link_params.link_up = true;
+ status = qed_ops->common->set_link(qedf->cdev, &link_params);
+@@ -3288,6 +3293,7 @@ static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ static void __qedf_remove(struct pci_dev *pdev, int mode)
+ {
+ struct qedf_ctx *qedf;
++ int rc;
+
+ if (!pdev) {
+ QEDF_ERR(NULL, "pdev is NULL.\n");
+@@ -3382,6 +3388,12 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
+ qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
+ pci_set_drvdata(pdev, NULL);
+ }
++
++ rc = qed_ops->common->update_drv_state(qedf->cdev, false);
++ if (rc)
++ QEDF_ERR(&(qedf->dbg_ctx),
++ "Failed to send drv state to MFW.\n");
++
+ qed_ops->common->slowpath_stop(qedf->cdev);
+ qed_ops->common->remove(qedf->cdev);
+
+diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
+index 1573749fe615..e7daadc089fc 100644
+--- a/drivers/scsi/qedi/qedi_main.c
++++ b/drivers/scsi/qedi/qedi_main.c
+@@ -2087,6 +2087,7 @@ kset_free:
+ static void __qedi_remove(struct pci_dev *pdev, int mode)
+ {
+ struct qedi_ctx *qedi = pci_get_drvdata(pdev);
++ int rval;
+
+ if (qedi->tmf_thread) {
+ flush_workqueue(qedi->tmf_thread);
+@@ -2116,6 +2117,10 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
+ if (mode == QEDI_MODE_NORMAL)
+ qedi_free_iscsi_pf_param(qedi);
+
++ rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
++ if (rval)
++ QEDI_ERR(&qedi->dbg_ctx, "Failed to send drv state to MFW\n");
++
+ if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
+ qedi_ops->common->slowpath_stop(qedi->cdev);
+ qedi_ops->common->remove(qedi->cdev);
+@@ -2390,6 +2395,12 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
+ if (qedi_setup_boot_info(qedi))
+ QEDI_ERR(&qedi->dbg_ctx,
+ "No iSCSI boot target configured\n");
++
++ rc = qedi_ops->common->update_drv_state(qedi->cdev, true);
++ if (rc)
++ QEDI_ERR(&qedi->dbg_ctx,
++ "Failed to send drv state to MFW\n");
++
+ }
+
+ return 0;
+diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
+index 36f59a1be7e9..61389bdc7926 100644
+--- a/drivers/scsi/xen-scsifront.c
++++ b/drivers/scsi/xen-scsifront.c
+@@ -654,10 +654,17 @@ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
+ static int scsifront_sdev_configure(struct scsi_device *sdev)
+ {
+ struct vscsifrnt_info *info = shost_priv(sdev->host);
++ int err;
+
+- if (info && current == info->curr)
+- xenbus_printf(XBT_NIL, info->dev->nodename,
++ if (info && current == info->curr) {
++ err = xenbus_printf(XBT_NIL, info->dev->nodename,
+ info->dev_state_path, "%d", XenbusStateConnected);
++ if (err) {
++ xenbus_dev_error(info->dev, err,
++ "%s: writing dev_state_path", __func__);
++ return err;
++ }
++ }
+
+ return 0;
+ }
+@@ -665,10 +672,15 @@ static int scsifront_sdev_configure(struct scsi_device *sdev)
+ static void scsifront_sdev_destroy(struct scsi_device *sdev)
+ {
+ struct vscsifrnt_info *info = shost_priv(sdev->host);
++ int err;
+
+- if (info && current == info->curr)
+- xenbus_printf(XBT_NIL, info->dev->nodename,
++ if (info && current == info->curr) {
++ err = xenbus_printf(XBT_NIL, info->dev->nodename,
+ info->dev_state_path, "%d", XenbusStateClosed);
++ if (err)
++ xenbus_dev_error(info->dev, err,
++ "%s: writing dev_state_path", __func__);
++ }
+ }
+
+ static struct scsi_host_template scsifront_sht = {
+@@ -1003,9 +1015,12 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
+
+ if (scsi_add_device(info->host, chn, tgt, lun)) {
+ dev_err(&dev->dev, "scsi_add_device\n");
+- xenbus_printf(XBT_NIL, dev->nodename,
++ err = xenbus_printf(XBT_NIL, dev->nodename,
+ info->dev_state_path,
+ "%d", XenbusStateClosed);
++ if (err)
++ xenbus_dev_error(dev, err,
++ "%s: writing dev_state_path", __func__);
+ }
+ break;
+ case VSCSIFRONT_OP_DEL_LUN:
+@@ -1019,10 +1034,14 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
+ }
+ break;
+ case VSCSIFRONT_OP_READD_LUN:
+- if (device_state == XenbusStateConnected)
+- xenbus_printf(XBT_NIL, dev->nodename,
++ if (device_state == XenbusStateConnected) {
++ err = xenbus_printf(XBT_NIL, dev->nodename,
+ info->dev_state_path,
+ "%d", XenbusStateConnected);
++ if (err)
++ xenbus_dev_error(dev, err,
++ "%s: writing dev_state_path", __func__);
++ }
+ break;
+ default:
+ break;
+diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
+index 1613ccf0c059..c54d229f8da4 100644
+--- a/drivers/soc/imx/gpc.c
++++ b/drivers/soc/imx/gpc.c
+@@ -27,9 +27,16 @@
+ #define GPC_PGC_SW2ISO_SHIFT 0x8
+ #define GPC_PGC_SW_SHIFT 0x0
+
++#define GPC_PGC_PCI_PDN 0x200
++#define GPC_PGC_PCI_SR 0x20c
++
+ #define GPC_PGC_GPU_PDN 0x260
+ #define GPC_PGC_GPU_PUPSCR 0x264
+ #define GPC_PGC_GPU_PDNSCR 0x268
++#define GPC_PGC_GPU_SR 0x26c
++
++#define GPC_PGC_DISP_PDN 0x240
++#define GPC_PGC_DISP_SR 0x24c
+
+ #define GPU_VPU_PUP_REQ BIT(1)
+ #define GPU_VPU_PDN_REQ BIT(0)
+@@ -303,10 +310,24 @@ static const struct of_device_id imx_gpc_dt_ids[] = {
+ { }
+ };
+
++static const struct regmap_range yes_ranges[] = {
++ regmap_reg_range(GPC_CNTR, GPC_CNTR),
++ regmap_reg_range(GPC_PGC_PCI_PDN, GPC_PGC_PCI_SR),
++ regmap_reg_range(GPC_PGC_GPU_PDN, GPC_PGC_GPU_SR),
++ regmap_reg_range(GPC_PGC_DISP_PDN, GPC_PGC_DISP_SR),
++};
++
++static const struct regmap_access_table access_table = {
++ .yes_ranges = yes_ranges,
++ .n_yes_ranges = ARRAY_SIZE(yes_ranges),
++};
++
+ static const struct regmap_config imx_gpc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
++ .rd_table = &access_table,
++ .wr_table = &access_table,
+ .max_register = 0x2ac,
+ };
+
+diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
+index f4e3bd40c72e..6ef18cf8f243 100644
+--- a/drivers/soc/imx/gpcv2.c
++++ b/drivers/soc/imx/gpcv2.c
+@@ -39,10 +39,15 @@
+
+ #define GPC_M4_PU_PDN_FLG 0x1bc
+
+-
+-#define PGC_MIPI 4
+-#define PGC_PCIE 5
+-#define PGC_USB_HSIC 8
++/*
++ * The PGC offset values in Reference Manual
++ * (Rev. 1, 01/2018 and the older ones) GPC chapter's
++ * GPC_PGC memory map are incorrect, below offset
++ * values are from design RTL.
++ */
++#define PGC_MIPI 16
++#define PGC_PCIE 17
++#define PGC_USB_HSIC 20
+ #define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40)
+ #define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc)
+
+diff --git a/drivers/staging/typec/tcpm.c b/drivers/staging/typec/tcpm.c
+index 8af62e74d54c..f237e31926f4 100644
+--- a/drivers/staging/typec/tcpm.c
++++ b/drivers/staging/typec/tcpm.c
+@@ -2479,7 +2479,8 @@ static void run_state_machine(struct tcpm_port *port)
+ tcpm_port_is_sink(port) &&
+ time_is_after_jiffies(port->delayed_runtime)) {
+ tcpm_set_state(port, SNK_DISCOVERY,
+- port->delayed_runtime - jiffies);
++ jiffies_to_msecs(port->delayed_runtime -
++ jiffies));
+ break;
+ }
+ tcpm_set_state(port, unattached_state(port), 0);
+diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
+index 899e8fe5e00f..9e26c530d2dd 100644
+--- a/drivers/tty/pty.c
++++ b/drivers/tty/pty.c
+@@ -625,7 +625,7 @@ int ptm_open_peer(struct file *master, struct tty_struct *tty, int flags)
+ if (tty->driver != ptm_driver)
+ return -EIO;
+
+- fd = get_unused_fd_flags(0);
++ fd = get_unused_fd_flags(flags);
+ if (fd < 0) {
+ retval = fd;
+ goto err;
+diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
+index ec965ac5f1f5..3c0d386dc62f 100644
+--- a/drivers/usb/dwc2/core.h
++++ b/drivers/usb/dwc2/core.h
+@@ -872,6 +872,7 @@ struct dwc2_hregs_backup {
+ * @frame_list_sz: Frame list size
+ * @desc_gen_cache: Kmem cache for generic descriptors
+ * @desc_hsisoc_cache: Kmem cache for hs isochronous descriptors
++ * @unaligned_cache: Kmem cache for DMA mode to handle non-aligned buf
+ *
+ * These are for peripheral mode:
+ *
+@@ -1004,6 +1005,8 @@ struct dwc2_hsotg {
+ u32 frame_list_sz;
+ struct kmem_cache *desc_gen_cache;
+ struct kmem_cache *desc_hsisoc_cache;
++ struct kmem_cache *unaligned_cache;
++#define DWC2_KMEM_UNALIGNED_BUF_SIZE 1024
+
+ #ifdef DEBUG
+ u32 frrem_samples;
+diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
+index 6ef001a83fe2..e164439b2154 100644
+--- a/drivers/usb/dwc2/gadget.c
++++ b/drivers/usb/dwc2/gadget.c
+@@ -848,6 +848,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
+ u32 index;
+ u32 maxsize = 0;
+ u32 mask = 0;
++ u8 pid = 0;
+
+ maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
+ if (len > maxsize) {
+@@ -893,7 +894,11 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
+ ((len << DEV_DMA_NBYTES_SHIFT) & mask));
+
+ if (hs_ep->dir_in) {
+- desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) &
++ if (len)
++ pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
++ else
++ pid = 1;
++ desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
+ DEV_DMA_ISOC_PID_MASK) |
+ ((len % hs_ep->ep.maxpacket) ?
+ DEV_DMA_SHORT : 0) |
+@@ -932,6 +937,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
+ u32 ctrl;
+
+ if (list_empty(&hs_ep->queue)) {
++ hs_ep->target_frame = TARGET_FRAME_INITIAL;
+ dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
+ return;
+ }
+@@ -4716,9 +4722,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
+ }
+
+ ret = usb_add_gadget_udc(dev, &hsotg->gadget);
+- if (ret)
++ if (ret) {
++ dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
++ hsotg->ctrl_req);
+ return ret;
+-
++ }
+ dwc2_hsotg_dump(hsotg);
+
+ return 0;
+@@ -4731,6 +4739,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg, int irq)
+ int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
+ {
+ usb_del_gadget_udc(&hsotg->gadget);
++ dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
+
+ return 0;
+ }
+diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
+index 46d3b0fc00c5..fa20ec43a187 100644
+--- a/drivers/usb/dwc2/hcd.c
++++ b/drivers/usb/dwc2/hcd.c
+@@ -1544,11 +1544,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
+ }
+
+ if (hsotg->params.host_dma) {
+- dwc2_writel((u32)chan->xfer_dma,
+- hsotg->regs + HCDMA(chan->hc_num));
++ dma_addr_t dma_addr;
++
++ if (chan->align_buf) {
++ if (dbg_hc(chan))
++ dev_vdbg(hsotg->dev, "align_buf\n");
++ dma_addr = chan->align_buf;
++ } else {
++ dma_addr = chan->xfer_dma;
++ }
++ dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
++
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
+- (unsigned long)chan->xfer_dma, chan->hc_num);
++ (unsigned long)dma_addr, chan->hc_num);
+ }
+
+ /* Start the split */
+@@ -2604,6 +2613,35 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
+ }
+ }
+
++static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
++ struct dwc2_qh *qh,
++ struct dwc2_host_chan *chan)
++{
++ if (!hsotg->unaligned_cache ||
++ chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE)
++ return -ENOMEM;
++
++ if (!qh->dw_align_buf) {
++ qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache,
++ GFP_ATOMIC | GFP_DMA);
++ if (!qh->dw_align_buf)
++ return -ENOMEM;
++ }
++
++ qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf,
++ DWC2_KMEM_UNALIGNED_BUF_SIZE,
++ DMA_FROM_DEVICE);
++
++ if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) {
++ dev_err(hsotg->dev, "can't map align_buf\n");
++ chan->align_buf = 0;
++ return -EINVAL;
++ }
++
++ chan->align_buf = qh->dw_align_buf_dma;
++ return 0;
++}
++
+ #define DWC2_USB_DMA_ALIGN 4
+
+ static void dwc2_free_dma_aligned_buffer(struct urb *urb)
+@@ -2783,6 +2821,32 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+ /* Set the transfer attributes */
+ dwc2_hc_init_xfer(hsotg, chan, qtd);
+
++ /* For non-dword aligned buffers */
++ if (hsotg->params.host_dma && qh->do_split &&
++ chan->ep_is_in && (chan->xfer_dma & 0x3)) {
++ dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
++ if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) {
++ dev_err(hsotg->dev,
++ "Failed to allocate memory to handle non-aligned buffer\n");
++ /* Add channel back to free list */
++ chan->align_buf = 0;
++ chan->multi_count = 0;
++ list_add_tail(&chan->hc_list_entry,
++ &hsotg->free_hc_list);
++ qtd->in_process = 0;
++ qh->channel = NULL;
++ return -ENOMEM;
++ }
++ } else {
++ /*
++ * We assume that DMA is always aligned in non-split
++ * case or split out case. Warn if not.
++ */
++ WARN_ON_ONCE(hsotg->params.host_dma &&
++ (chan->xfer_dma & 0x3));
++ chan->align_buf = 0;
++ }
++
+ if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
+ chan->ep_type == USB_ENDPOINT_XFER_ISOC)
+ /*
+@@ -5277,6 +5341,19 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
+ }
+ }
+
++ if (hsotg->params.host_dma) {
++ /*
++ * Create kmem caches to handle non-aligned buffer
++ * in Buffer DMA mode.
++ */
++ hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma",
++ DWC2_KMEM_UNALIGNED_BUF_SIZE, 4,
++ SLAB_CACHE_DMA, NULL);
++ if (!hsotg->unaligned_cache)
++ dev_err(hsotg->dev,
++ "unable to create dwc2 unaligned cache\n");
++ }
++
+ hsotg->otg_port = 1;
+ hsotg->frame_list = NULL;
+ hsotg->frame_list_dma = 0;
+@@ -5311,8 +5388,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
+ return 0;
+
+ error4:
+- kmem_cache_destroy(hsotg->desc_gen_cache);
++ kmem_cache_destroy(hsotg->unaligned_cache);
+ kmem_cache_destroy(hsotg->desc_hsisoc_cache);
++ kmem_cache_destroy(hsotg->desc_gen_cache);
+ error3:
+ dwc2_hcd_release(hsotg);
+ error2:
+@@ -5353,8 +5431,9 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
+ usb_remove_hcd(hcd);
+ hsotg->priv = NULL;
+
+- kmem_cache_destroy(hsotg->desc_gen_cache);
++ kmem_cache_destroy(hsotg->unaligned_cache);
+ kmem_cache_destroy(hsotg->desc_hsisoc_cache);
++ kmem_cache_destroy(hsotg->desc_gen_cache);
+
+ dwc2_hcd_release(hsotg);
+ usb_put_hcd(hcd);
+diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
+index 11c3c145b793..461bdc67df6f 100644
+--- a/drivers/usb/dwc2/hcd.h
++++ b/drivers/usb/dwc2/hcd.h
+@@ -75,6 +75,8 @@ struct dwc2_qh;
+ * (micro)frame
+ * @xfer_buf: Pointer to current transfer buffer position
+ * @xfer_dma: DMA address of xfer_buf
++ * @align_buf: In Buffer DMA mode this will be used if xfer_buf is not
++ * DWORD aligned
+ * @xfer_len: Total number of bytes to transfer
+ * @xfer_count: Number of bytes transferred so far
+ * @start_pkt_count: Packet count at start of transfer
+@@ -132,6 +134,7 @@ struct dwc2_host_chan {
+
+ u8 *xfer_buf;
+ dma_addr_t xfer_dma;
++ dma_addr_t align_buf;
+ u32 xfer_len;
+ u32 xfer_count;
+ u16 start_pkt_count;
+@@ -302,6 +305,9 @@ struct dwc2_hs_transfer_time {
+ * is tightly packed.
+ * @ls_duration_us: Duration on the low speed bus schedule.
+ * @ntd: Actual number of transfer descriptors in a list
++ * @dw_align_buf: Used instead of original buffer if its physical address
++ * is not dword-aligned
++ * @dw_align_buf_dma: DMA address for dw_align_buf
+ * @qtd_list: List of QTDs for this QH
+ * @channel: Host channel currently processing transfers for this QH
+ * @qh_list_entry: Entry for QH in either the periodic or non-periodic
+@@ -345,6 +351,8 @@ struct dwc2_qh {
+ struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES];
+ u32 ls_start_schedule_slice;
+ u16 ntd;
++ u8 *dw_align_buf;
++ dma_addr_t dw_align_buf_dma;
+ struct list_head qtd_list;
+ struct dwc2_host_chan *channel;
+ struct list_head qh_list_entry;
+diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
+index 28a8210710b1..17905ba1139c 100644
+--- a/drivers/usb/dwc2/hcd_intr.c
++++ b/drivers/usb/dwc2/hcd_intr.c
+@@ -923,14 +923,21 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
+ frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
+ len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
+ DWC2_HC_XFER_COMPLETE, NULL);
+- if (!len) {
++ if (!len && !qtd->isoc_split_offset) {
+ qtd->complete_split = 0;
+- qtd->isoc_split_offset = 0;
+ return 0;
+ }
+
+ frame_desc->actual_length += len;
+
++ if (chan->align_buf) {
++ dev_vdbg(hsotg->dev, "non-aligned buffer\n");
++ dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
++ DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE);
++ memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma),
++ chan->qh->dw_align_buf, len);
++ }
++
+ qtd->isoc_split_offset += len;
+
+ hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
+diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
+index 7f51a77bc5cc..56e61220efc6 100644
+--- a/drivers/usb/dwc2/hcd_queue.c
++++ b/drivers/usb/dwc2/hcd_queue.c
+@@ -1632,6 +1632,9 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
+
+ if (qh->desc_list)
+ dwc2_hcd_qh_free_ddma(hsotg, qh);
++ else if (hsotg->unaligned_cache && qh->dw_align_buf)
++ kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
++
+ kfree(qh);
+ }
+
+diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
+index fbfc09ebd2ec..acf41ba3638d 100644
+--- a/drivers/usb/dwc3/dwc3-of-simple.c
++++ b/drivers/usb/dwc3/dwc3-of-simple.c
+@@ -132,8 +132,9 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
+
+ of_platform_depopulate(dev);
+
+- pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
++ pm_runtime_put_noidle(dev);
++ pm_runtime_set_suspended(dev);
+
+ return 0;
+ }
+diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
+index bc5e91d4fac8..09c0454833ad 100644
+--- a/drivers/usb/dwc3/dwc3-pci.c
++++ b/drivers/usb/dwc3/dwc3-pci.c
+@@ -41,6 +41,7 @@
+ #define PCI_DEVICE_ID_INTEL_GLK 0x31aa
+ #define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
+ #define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
++#define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee
+
+ #define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
+ #define PCI_INTEL_BXT_FUNC_PMU_PWR 4
+@@ -273,6 +274,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICLLP), },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
+ { } /* Terminating Entry */
+ };
+diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
+index 940de04ed72a..b805962f5154 100644
+--- a/drivers/usb/gadget/composite.c
++++ b/drivers/usb/gadget/composite.c
+@@ -1720,6 +1720,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ */
+ if (w_value && !f->get_alt)
+ break;
++
++ spin_lock(&cdev->lock);
+ value = f->set_alt(f, w_index, w_value);
+ if (value == USB_GADGET_DELAYED_STATUS) {
+ DBG(cdev,
+@@ -1729,6 +1731,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
+ DBG(cdev, "delayed_status count %d\n",
+ cdev->delayed_status);
+ }
++ spin_unlock(&cdev->lock);
+ break;
+ case USB_REQ_GET_INTERFACE:
+ if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
+diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
+index 52e6897fa35a..17467545391b 100644
+--- a/drivers/usb/gadget/function/f_fs.c
++++ b/drivers/usb/gadget/function/f_fs.c
+@@ -219,6 +219,7 @@ struct ffs_io_data {
+
+ struct mm_struct *mm;
+ struct work_struct work;
++ struct work_struct cancellation_work;
+
+ struct usb_ep *ep;
+ struct usb_request *req;
+@@ -1073,22 +1074,31 @@ ffs_epfile_open(struct inode *inode, struct file *file)
+ return 0;
+ }
+
++static void ffs_aio_cancel_worker(struct work_struct *work)
++{
++ struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
++ cancellation_work);
++
++ ENTER();
++
++ usb_ep_dequeue(io_data->ep, io_data->req);
++}
++
+ static int ffs_aio_cancel(struct kiocb *kiocb)
+ {
+ struct ffs_io_data *io_data = kiocb->private;
+- struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
++ struct ffs_data *ffs = io_data->ffs;
+ int value;
+
+ ENTER();
+
+- spin_lock_irq(&epfile->ffs->eps_lock);
+-
+- if (likely(io_data && io_data->ep && io_data->req))
+- value = usb_ep_dequeue(io_data->ep, io_data->req);
+- else
++ if (likely(io_data && io_data->ep && io_data->req)) {
++ INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker);
++ queue_work(ffs->io_completion_wq, &io_data->cancellation_work);
++ value = -EINPROGRESS;
++ } else {
+ value = -EINVAL;
+-
+- spin_unlock_irq(&epfile->ffs->eps_lock);
++ }
+
+ return value;
+ }
+diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
+index 74436f8ca538..32ddafe7af87 100644
+--- a/drivers/usb/host/xhci-tegra.c
++++ b/drivers/usb/host/xhci-tegra.c
+@@ -482,7 +482,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
+ unsigned long mask;
+ unsigned int port;
+ bool idle, enable;
+- int err;
++ int err = 0;
+
+ memset(&rsp, 0, sizeof(rsp));
+
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index fe84b36627ec..6b11fd9d8efe 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -1024,8 +1024,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+ command = readl(&xhci->op_regs->command);
+ command |= CMD_CRS;
+ writel(command, &xhci->op_regs->command);
++ /*
++ * Some controllers take up to 55+ ms to complete the controller
++ * restore so setting the timeout to 100ms. Xhci specification
++ * doesn't mention any timeout value.
++ */
+ if (xhci_handshake(&xhci->op_regs->status,
+- STS_RESTORE, 0, 10 * 1000)) {
++ STS_RESTORE, 0, 100 * 1000)) {
+ xhci_warn(xhci, "WARN: xHC restore state timeout\n");
+ spin_unlock_irq(&xhci->lock);
+ return -ETIMEDOUT;
+diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
+index c425d03d37d2..587d12829925 100644
+--- a/drivers/xen/manage.c
++++ b/drivers/xen/manage.c
+@@ -292,8 +292,15 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
+ return;
+ }
+
+- if (sysrq_key != '\0')
+- xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
++ if (sysrq_key != '\0') {
++ err = xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
++ if (err) {
++ pr_err("%s: Error %d writing sysrq in control/sysrq\n",
++ __func__, err);
++ xenbus_transaction_end(xbt, 1);
++ return;
++ }
++ }
+
+ err = xenbus_transaction_end(xbt, 0);
+ if (err == -EAGAIN)
+@@ -345,7 +352,12 @@ static int setup_shutdown_watcher(void)
+ continue;
+ snprintf(node, FEATURE_PATH_SIZE, "feature-%s",
+ shutdown_handlers[idx].command);
+- xenbus_printf(XBT_NIL, "control", node, "%u", 1);
++ err = xenbus_printf(XBT_NIL, "control", node, "%u", 1);
++ if (err) {
++ pr_err("%s: Error %d writing %s\n", __func__,
++ err, node);
++ return err;
++ }
+ }
+
+ return 0;
+diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
+index 7bc88fd43cfc..e2f3e8b0fba9 100644
+--- a/drivers/xen/xen-scsiback.c
++++ b/drivers/xen/xen-scsiback.c
+@@ -1012,6 +1012,7 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
+ {
+ struct v2p_entry *entry;
+ unsigned long flags;
++ int err;
+
+ if (try) {
+ spin_lock_irqsave(&info->v2p_lock, flags);
+@@ -1027,8 +1028,11 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
+ scsiback_del_translation_entry(info, vir);
+ }
+ } else if (!try) {
+- xenbus_printf(XBT_NIL, info->dev->nodename, state,
++ err = xenbus_printf(XBT_NIL, info->dev->nodename, state,
+ "%d", XenbusStateClosed);
++ if (err)
++ xenbus_dev_error(info->dev, err,
++ "%s: writing %s", __func__, state);
+ }
+ }
+
+@@ -1067,8 +1071,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
+ snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent);
+ val = xenbus_read(XBT_NIL, dev->nodename, str, NULL);
+ if (IS_ERR(val)) {
+- xenbus_printf(XBT_NIL, dev->nodename, state,
++ err = xenbus_printf(XBT_NIL, dev->nodename, state,
+ "%d", XenbusStateClosed);
++ if (err)
++ xenbus_dev_error(info->dev, err,
++ "%s: writing %s", __func__, state);
+ return;
+ }
+ strlcpy(phy, val, VSCSI_NAMELEN);
+@@ -1079,8 +1086,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
+ err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
+ &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
+ if (XENBUS_EXIST_ERR(err)) {
+- xenbus_printf(XBT_NIL, dev->nodename, state,
++ err = xenbus_printf(XBT_NIL, dev->nodename, state,
+ "%d", XenbusStateClosed);
++ if (err)
++ xenbus_dev_error(info->dev, err,
++ "%s: writing %s", __func__, state);
+ return;
+ }
+
+diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
+index 936d58ca2b49..61192c536e6c 100644
+--- a/fs/btrfs/scrub.c
++++ b/fs/btrfs/scrub.c
+@@ -1166,11 +1166,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
+ return ret;
+ }
+
+- if (sctx->is_dev_replace && !is_metadata && !have_csum) {
+- sblocks_for_recheck = NULL;
+- goto nodatasum_case;
+- }
+-
+ /*
+ * read all mirrors one after the other. This includes to
+ * re-read the extent or metadata block that failed (that was
+@@ -1283,13 +1278,19 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
+ goto out;
+ }
+
+- if (!is_metadata && !have_csum) {
++ /*
++ * NOTE: Even for nodatasum case, it's still possible that it's a
++ * compressed data extent, thus scrub_fixup_nodatasum(), which write
++ * inode page cache onto disk, could cause serious data corruption.
++ *
++ * So here we could only read from disk, and hope our recovery could
++ * reach disk before the newer write.
++ */
++ if (0 && !is_metadata && !have_csum) {
+ struct scrub_fixup_nodatasum *fixup_nodatasum;
+
+ WARN_ON(sctx->is_dev_replace);
+
+-nodatasum_case:
+-
+ /*
+ * !is_metadata and !have_csum, this means that the data
+ * might not be COWed, that it might be modified
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index f2550a076edc..d5124ed35154 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -1087,6 +1087,7 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
+ if (IS_ERR(realdn)) {
+ pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
+ PTR_ERR(realdn), dn, in, ceph_vinop(in));
++ dput(dn);
+ dn = realdn; /* note realdn contains the error */
+ goto out;
+ } else if (realdn) {
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 048c586d9a8b..1792999eec91 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -26,6 +26,7 @@
+ #include <linux/log2.h>
+ #include <linux/module.h>
+ #include <linux/slab.h>
++#include <linux/nospec.h>
+ #include <linux/backing-dev.h>
+ #include <trace/events/ext4.h>
+
+@@ -2152,7 +2153,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+ * This should tell if fe_len is exactly power of 2
+ */
+ if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
+- ac->ac_2order = i - 1;
++ ac->ac_2order = array_index_nospec(i - 1,
++ sb->s_blocksize_bits + 2);
+ }
+
+ /* if stream allocation is enabled, use global goal */
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 7c05bd4222b2..3c7bbbae0afa 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -3240,7 +3240,7 @@ static int build_curseg(struct f2fs_sb_info *sbi)
+ return restore_curseg_summaries(sbi);
+ }
+
+-static void build_sit_entries(struct f2fs_sb_info *sbi)
++static int build_sit_entries(struct f2fs_sb_info *sbi)
+ {
+ struct sit_info *sit_i = SIT_I(sbi);
+ struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_COLD_DATA);
+@@ -3250,6 +3250,8 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
+ int sit_blk_cnt = SIT_BLK_CNT(sbi);
+ unsigned int i, start, end;
+ unsigned int readed, start_blk = 0;
++ int err = 0;
++ block_t total_node_blocks = 0;
+
+ do {
+ readed = ra_meta_pages(sbi, start_blk, BIO_MAX_PAGES,
+@@ -3268,8 +3270,12 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
+ sit = sit_blk->entries[SIT_ENTRY_OFFSET(sit_i, start)];
+ f2fs_put_page(page, 1);
+
+- check_block_count(sbi, start, &sit);
++ err = check_block_count(sbi, start, &sit);
++ if (err)
++ return err;
+ seg_info_from_raw_sit(se, &sit);
++ if (IS_NODESEG(se->type))
++ total_node_blocks += se->valid_blocks;
+
+ /* build discard map only one time */
+ if (f2fs_discard_en(sbi)) {
+@@ -3302,9 +3308,15 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
+ sit = sit_in_journal(journal, i);
+
+ old_valid_blocks = se->valid_blocks;
++ if (IS_NODESEG(se->type))
++ total_node_blocks -= old_valid_blocks;
+
+- check_block_count(sbi, start, &sit);
++ err = check_block_count(sbi, start, &sit);
++ if (err)
++ break;
+ seg_info_from_raw_sit(se, &sit);
++ if (IS_NODESEG(se->type))
++ total_node_blocks += se->valid_blocks;
+
+ if (f2fs_discard_en(sbi)) {
+ if (is_set_ckpt_flags(sbi, CP_TRIMMED_FLAG)) {
+@@ -3323,6 +3335,16 @@ static void build_sit_entries(struct f2fs_sb_info *sbi)
+ se->valid_blocks - old_valid_blocks;
+ }
+ up_read(&curseg->journal_rwsem);
++
++ if (!err && total_node_blocks != valid_node_count(sbi)) {
++ f2fs_msg(sbi->sb, KERN_ERR,
++ "SIT is corrupted node# %u vs %u",
++ total_node_blocks, valid_node_count(sbi));
++ set_sbi_flag(sbi, SBI_NEED_FSCK);
++ err = -EINVAL;
++ }
++
++ return err;
+ }
+
+ static void init_free_segmap(struct f2fs_sb_info *sbi)
+@@ -3492,7 +3514,9 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
+ return err;
+
+ /* reinit free segmap based on SIT */
+- build_sit_entries(sbi);
++ err = build_sit_entries(sbi);
++ if (err)
++ return err;
+
+ init_free_segmap(sbi);
+ err = build_dirty_segmap(sbi);
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index e0a6cc23ace3..39ada30889b6 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -625,7 +625,7 @@ static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr)
+ /*
+ * Summary block is always treated as an invalid block
+ */
+-static inline void check_block_count(struct f2fs_sb_info *sbi,
++static inline int check_block_count(struct f2fs_sb_info *sbi,
+ int segno, struct f2fs_sit_entry *raw_sit)
+ {
+ #ifdef CONFIG_F2FS_CHECK_FS
+@@ -647,11 +647,25 @@ static inline void check_block_count(struct f2fs_sb_info *sbi,
+ cur_pos = next_pos;
+ is_valid = !is_valid;
+ } while (cur_pos < sbi->blocks_per_seg);
+- BUG_ON(GET_SIT_VBLOCKS(raw_sit) != valid_blocks);
++
++ if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
++ f2fs_msg(sbi->sb, KERN_ERR,
++ "Mismatch valid blocks %d vs. %d",
++ GET_SIT_VBLOCKS(raw_sit), valid_blocks);
++ set_sbi_flag(sbi, SBI_NEED_FSCK);
++ return -EINVAL;
++ }
+ #endif
+ /* check segment usage, and check boundary of a given segment number */
+- f2fs_bug_on(sbi, GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
+- || segno > TOTAL_SEGS(sbi) - 1);
++ if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
++ || segno > TOTAL_SEGS(sbi) - 1)) {
++ f2fs_msg(sbi->sb, KERN_ERR,
++ "Wrong valid blocks %d or segno %u",
++ GET_SIT_VBLOCKS(raw_sit), segno);
++ set_sbi_flag(sbi, SBI_NEED_FSCK);
++ return -EINVAL;
++ }
++ return 0;
+ }
+
+ static inline pgoff_t current_sit_addr(struct f2fs_sb_info *sbi,
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 43fbf4495090..51deff8e1f86 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -8429,6 +8429,8 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
+
+ dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
+
++ nfs4_sequence_free_slot(&lgp->res.seq_res);
++
+ switch (nfs4err) {
+ case 0:
+ goto out;
+@@ -8493,7 +8495,6 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
+ goto out;
+ }
+
+- nfs4_sequence_free_slot(&lgp->res.seq_res);
+ err = nfs4_handle_exception(server, nfs4err, exception);
+ if (!status) {
+ if (exception->retry)
+@@ -8619,20 +8620,22 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
+ if (IS_ERR(task))
+ return ERR_CAST(task);
+ status = rpc_wait_for_completion_task(task);
+- if (status == 0) {
++ if (status != 0)
++ goto out;
++
++ /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
++ if (task->tk_status < 0 || lgp->res.layoutp->len == 0) {
+ status = nfs4_layoutget_handle_exception(task, lgp, &exception);
+ *timeout = exception.timeout;
+- }
+-
++ } else
++ lseg = pnfs_layout_process(lgp);
++out:
+ trace_nfs4_layoutget(lgp->args.ctx,
+ &lgp->args.range,
+ &lgp->res.range,
+ &lgp->res.stateid,
+ status);
+
+- /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
+- if (status == 0 && lgp->res.layoutp->len)
+- lseg = pnfs_layout_process(lgp);
+ rpc_put_task(task);
+ dprintk("<-- %s status=%d\n", __func__, status);
+ if (status)
+diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
+index 46492fb37a4c..505f87a8c724 100644
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -792,8 +792,10 @@ static int listxattr_filler(struct dir_context *ctx, const char *name,
+ return 0;
+ size = namelen + 1;
+ if (b->buf) {
+- if (size > b->size)
++ if (b->pos + size > b->size) {
++ b->pos = -ERANGE;
+ return -ERANGE;
++ }
+ memcpy(b->buf + b->pos, name, namelen);
+ b->buf[b->pos + namelen] = 0;
+ }
+diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
+index 3efa3b861d44..941b11811f85 100644
+--- a/include/linux/fsl/guts.h
++++ b/include/linux/fsl/guts.h
+@@ -16,6 +16,7 @@
+ #define __FSL_GUTS_H__
+
+ #include <linux/types.h>
++#include <linux/io.h>
+
+ /**
+ * Global Utility Registers.
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 727e309baa5e..9d6fae809c09 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1235,6 +1235,8 @@ int pci_register_io_range(phys_addr_t addr, resource_size_t size);
+ unsigned long pci_address_to_pio(phys_addr_t addr);
+ phys_addr_t pci_pio_to_address(unsigned long pio);
+ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
++int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
++ phys_addr_t phys_addr);
+ void pci_unmap_iospace(struct resource *res);
+ void __iomem *devm_pci_remap_cfgspace(struct device *dev,
+ resource_size_t offset,
+diff --git a/include/net/ipv6.h b/include/net/ipv6.h
+index e59f385da38e..f280c61e019a 100644
+--- a/include/net/ipv6.h
++++ b/include/net/ipv6.h
+@@ -313,14 +313,7 @@ struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
+ struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
+ struct ipv6_txoptions *opt,
+ int newtype,
+- struct ipv6_opt_hdr __user *newopt,
+- int newoptlen);
+-struct ipv6_txoptions *
+-ipv6_renew_options_kern(struct sock *sk,
+- struct ipv6_txoptions *opt,
+- int newtype,
+- struct ipv6_opt_hdr *newopt,
+- int newoptlen);
++ struct ipv6_opt_hdr *newopt);
+ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
+ struct ipv6_txoptions *opt);
+
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index 049008493faf..f4bf75fac349 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -120,6 +120,7 @@ struct net {
+ #endif
+ #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+ struct netns_nf_frag nf_frag;
++ struct ctl_table_header *nf_frag_frags_hdr;
+ #endif
+ struct sock *nfnl;
+ struct sock *nfnl_stash;
+diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
+index dc825a5ddd7f..c004d051c2d3 100644
+--- a/include/net/netns/ipv6.h
++++ b/include/net/netns/ipv6.h
+@@ -94,7 +94,6 @@ struct netns_ipv6 {
+
+ #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
+ struct netns_nf_frag {
+- struct netns_sysctl_ipv6 sysctl;
+ struct netns_frags frags;
+ };
+ #endif
+diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
+index efef0b4b1b2b..46b8c7f1c8d5 100644
+--- a/include/net/tc_act/tc_tunnel_key.h
++++ b/include/net/tc_act/tc_tunnel_key.h
+@@ -18,7 +18,6 @@
+ struct tcf_tunnel_key_params {
+ struct rcu_head rcu;
+ int tcft_action;
+- int action;
+ struct metadata_dst *tcft_enc_metadata;
+ };
+
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 686e33ea76e7..eca8d65cad1e 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -938,8 +938,6 @@ enum tcp_ca_event {
+ CA_EVENT_LOSS, /* loss timeout */
+ CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
+ CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
+- CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
+- CA_EVENT_NON_DELAYED_ACK,
+ };
+
+ /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
+diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
+index 85a3fb65e40a..20d6cc91435d 100644
+--- a/include/uapi/linux/nbd.h
++++ b/include/uapi/linux/nbd.h
+@@ -53,6 +53,9 @@ enum {
+ /* These are client behavior specific flags. */
+ #define NBD_CFLAG_DESTROY_ON_DISCONNECT (1 << 0) /* delete the nbd device on
+ disconnect. */
++#define NBD_CFLAG_DISCONNECT_ON_CLOSE (1 << 1) /* disconnect the nbd device on
++ * close by last opener.
++ */
+
+ /* userspace doesn't need the nbd_device structure */
+
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index 6533f08d1238..3d0ecc273cc6 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -730,13 +730,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
+ * old element will be freed immediately.
+ * Otherwise return an error
+ */
+- atomic_dec(&htab->count);
+- return ERR_PTR(-E2BIG);
++ l_new = ERR_PTR(-E2BIG);
++ goto dec_count;
+ }
+ l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
+ htab->map.numa_node);
+- if (!l_new)
+- return ERR_PTR(-ENOMEM);
++ if (!l_new) {
++ l_new = ERR_PTR(-ENOMEM);
++ goto dec_count;
++ }
+ }
+
+ memcpy(l_new->key, key, key_size);
+@@ -749,7 +751,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (!pptr) {
+ kfree(l_new);
+- return ERR_PTR(-ENOMEM);
++ l_new = ERR_PTR(-ENOMEM);
++ goto dec_count;
+ }
+ }
+
+@@ -763,6 +766,9 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
+
+ l_new->hash = hash;
+ return l_new;
++dec_count:
++ atomic_dec(&htab->count);
++ return l_new;
+ }
+
+ static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
+diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
+index 2f0f5720b123..d7c155048ea9 100644
+--- a/kernel/locking/lockdep.c
++++ b/kernel/locking/lockdep.c
+@@ -1296,11 +1296,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
+ this.parent = NULL;
+ this.class = class;
+
+- local_irq_save(flags);
++ raw_local_irq_save(flags);
+ arch_spin_lock(&lockdep_lock);
+ ret = __lockdep_count_forward_deps(&this);
+ arch_spin_unlock(&lockdep_lock);
+- local_irq_restore(flags);
++ raw_local_irq_restore(flags);
+
+ return ret;
+ }
+@@ -1323,11 +1323,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
+ this.parent = NULL;
+ this.class = class;
+
+- local_irq_save(flags);
++ raw_local_irq_save(flags);
+ arch_spin_lock(&lockdep_lock);
+ ret = __lockdep_count_backward_deps(&this);
+ arch_spin_unlock(&lockdep_lock);
+- local_irq_restore(flags);
++ raw_local_irq_restore(flags);
+
+ return ret;
+ }
+@@ -4478,7 +4478,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
+ if (unlikely(!debug_locks))
+ return;
+
+- local_irq_save(flags);
++ raw_local_irq_save(flags);
+ for (i = 0; i < curr->lockdep_depth; i++) {
+ hlock = curr->held_locks + i;
+
+@@ -4489,7 +4489,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
+ print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
+ break;
+ }
+- local_irq_restore(flags);
++ raw_local_irq_restore(flags);
+ }
+ EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 20919489883f..fbc75c84076e 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2958,6 +2958,7 @@ out_nobuffer:
+ }
+ EXPORT_SYMBOL_GPL(trace_vbprintk);
+
++__printf(3, 0)
+ static int
+ __trace_array_vprintk(struct ring_buffer *buffer,
+ unsigned long ip, const char *fmt, va_list args)
+@@ -3012,12 +3013,14 @@ out_nobuffer:
+ return len;
+ }
+
++__printf(3, 0)
+ int trace_array_vprintk(struct trace_array *tr,
+ unsigned long ip, const char *fmt, va_list args)
+ {
+ return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
+ }
+
++__printf(3, 0)
+ int trace_array_printk(struct trace_array *tr,
+ unsigned long ip, const char *fmt, ...)
+ {
+@@ -3033,6 +3036,7 @@ int trace_array_printk(struct trace_array *tr,
+ return ret;
+ }
+
++__printf(3, 4)
+ int trace_array_printk_buf(struct ring_buffer *buffer,
+ unsigned long ip, const char *fmt, ...)
+ {
+@@ -3048,6 +3052,7 @@ int trace_array_printk_buf(struct ring_buffer *buffer,
+ return ret;
+ }
+
++__printf(2, 0)
+ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
+ {
+ return trace_array_vprintk(&global_trace, ip, fmt, args);
+diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
+index d90f29a166d8..71a4319256b6 100644
+--- a/mm/kasan/kasan.c
++++ b/mm/kasan/kasan.c
+@@ -613,12 +613,13 @@ void kasan_kfree_large(const void *ptr)
+ int kasan_module_alloc(void *addr, size_t size)
+ {
+ void *ret;
++ size_t scaled_size;
+ size_t shadow_size;
+ unsigned long shadow_start;
+
+ shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
+- shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
+- PAGE_SIZE);
++ scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
++ shadow_size = round_up(scaled_size, PAGE_SIZE);
+
+ if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
+ return -EINVAL;
+diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
+index 71d8809fbe94..5bd9b389f8c9 100644
+--- a/net/batman-adv/bat_iv_ogm.c
++++ b/net/batman-adv/bat_iv_ogm.c
+@@ -2718,7 +2718,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ {
+ struct batadv_neigh_ifinfo *router_ifinfo = NULL;
+ struct batadv_neigh_node *router;
+- struct batadv_gw_node *curr_gw;
++ struct batadv_gw_node *curr_gw = NULL;
+ int ret = 0;
+ void *hdr;
+
+@@ -2766,6 +2766,8 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ ret = 0;
+
+ out:
++ if (curr_gw)
++ batadv_gw_node_put(curr_gw);
+ if (router_ifinfo)
+ batadv_neigh_ifinfo_put(router_ifinfo);
+ if (router)
+diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
+index a8f4c3902cf5..371a1f1651b4 100644
+--- a/net/batman-adv/bat_v.c
++++ b/net/batman-adv/bat_v.c
+@@ -929,7 +929,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ {
+ struct batadv_neigh_ifinfo *router_ifinfo = NULL;
+ struct batadv_neigh_node *router;
+- struct batadv_gw_node *curr_gw;
++ struct batadv_gw_node *curr_gw = NULL;
+ int ret = 0;
+ void *hdr;
+
+@@ -997,6 +997,8 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
+ ret = 0;
+
+ out:
++ if (curr_gw)
++ batadv_gw_node_put(curr_gw);
+ if (router_ifinfo)
+ batadv_neigh_ifinfo_put(router_ifinfo);
+ if (router)
+diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
+index 8a3ce79b1307..0f4d4eece3e4 100644
+--- a/net/batman-adv/translation-table.c
++++ b/net/batman-adv/translation-table.c
+@@ -1679,7 +1679,9 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
+ ether_addr_copy(common->addr, tt_addr);
+ common->vid = vid;
+
+- common->flags = flags;
++ if (!is_multicast_ether_addr(common->addr))
++ common->flags = flags & (~BATADV_TT_SYNC_MASK);
++
+ tt_global_entry->roam_at = 0;
+ /* node must store current time in case of roaming. This is
+ * needed to purge this entry out on timeout (if nobody claims
+@@ -1742,7 +1744,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
+ * TT_CLIENT_TEMP, therefore they have to be copied in the
+ * client entry
+ */
+- common->flags |= flags & (~BATADV_TT_SYNC_MASK);
++ if (!is_multicast_ether_addr(common->addr))
++ common->flags |= flags & (~BATADV_TT_SYNC_MASK);
+
+ /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
+ * one originator left in the list and we previously received a
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 6ca771f2f25b..85f4a1047707 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -8297,7 +8297,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
+ /* We get here if we can't use the current device name */
+ if (!pat)
+ goto out;
+- if (dev_get_valid_name(net, dev, pat) < 0)
++ err = dev_get_valid_name(net, dev, pat);
++ if (err < 0)
+ goto out;
+ }
+
+@@ -8309,7 +8310,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
+ dev_close(dev);
+
+ /* And unlink it from device chain */
+- err = -ENODEV;
+ unlist_netdevice(dev);
+
+ synchronize_net();
+diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
+index e9f0489e4229..22db273f15ea 100644
+--- a/net/ieee802154/6lowpan/core.c
++++ b/net/ieee802154/6lowpan/core.c
+@@ -90,12 +90,18 @@ static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n)
+ return 0;
+ }
+
++static int lowpan_get_iflink(const struct net_device *dev)
++{
++ return lowpan_802154_dev(dev)->wdev->ifindex;
++}
++
+ static const struct net_device_ops lowpan_netdev_ops = {
+ .ndo_init = lowpan_dev_init,
+ .ndo_start_xmit = lowpan_xmit,
+ .ndo_open = lowpan_open,
+ .ndo_stop = lowpan_stop,
+ .ndo_neigh_construct = lowpan_neigh_construct,
++ .ndo_get_iflink = lowpan_get_iflink,
+ };
+
+ static void lowpan_setup(struct net_device *ldev)
+diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
+index 114d4bef1bec..0d1a2cda1bfb 100644
+--- a/net/ipv4/netfilter/ip_tables.c
++++ b/net/ipv4/netfilter/ip_tables.c
+@@ -1894,6 +1894,7 @@ static struct xt_match ipt_builtin_mt[] __read_mostly = {
+ .checkentry = icmp_checkentry,
+ .proto = IPPROTO_ICMP,
+ .family = NFPROTO_IPV4,
++ .me = THIS_MODULE,
+ },
+ };
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index e81ff9d545a4..7462ec7587ce 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1837,7 +1837,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+ * shouldn't happen.
+ */
+ if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
+- "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
++ "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n",
+ *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
+ flags))
+ break;
+@@ -1852,7 +1852,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
+ goto found_fin_ok;
+ WARN(!(flags & MSG_PEEK),
+- "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
++ "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n",
+ *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
+ }
+
+diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
+index 1a9b88c8cf72..8b637f9f23a2 100644
+--- a/net/ipv4/tcp_dctcp.c
++++ b/net/ipv4/tcp_dctcp.c
+@@ -55,7 +55,6 @@ struct dctcp {
+ u32 dctcp_alpha;
+ u32 next_seq;
+ u32 ce_state;
+- u32 delayed_ack_reserved;
+ u32 loss_cwnd;
+ };
+
+@@ -96,7 +95,6 @@ static void dctcp_init(struct sock *sk)
+
+ ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
+
+- ca->delayed_ack_reserved = 0;
+ ca->loss_cwnd = 0;
+ ca->ce_state = 0;
+
+@@ -230,25 +228,6 @@ static void dctcp_state(struct sock *sk, u8 new_state)
+ }
+ }
+
+-static void dctcp_update_ack_reserved(struct sock *sk, enum tcp_ca_event ev)
+-{
+- struct dctcp *ca = inet_csk_ca(sk);
+-
+- switch (ev) {
+- case CA_EVENT_DELAYED_ACK:
+- if (!ca->delayed_ack_reserved)
+- ca->delayed_ack_reserved = 1;
+- break;
+- case CA_EVENT_NON_DELAYED_ACK:
+- if (ca->delayed_ack_reserved)
+- ca->delayed_ack_reserved = 0;
+- break;
+- default:
+- /* Don't care for the rest. */
+- break;
+- }
+-}
+-
+ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
+ {
+ switch (ev) {
+@@ -258,10 +237,6 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
+ case CA_EVENT_ECN_NO_CE:
+ dctcp_ce_state_1_to_0(sk);
+ break;
+- case CA_EVENT_DELAYED_ACK:
+- case CA_EVENT_NON_DELAYED_ACK:
+- dctcp_update_ack_reserved(sk, ev);
+- break;
+ default:
+ /* Don't care for the rest. */
+ break;
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index 3d8f6f342cb1..b2ead31afcba 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -3513,8 +3513,6 @@ void tcp_send_delayed_ack(struct sock *sk)
+ int ato = icsk->icsk_ack.ato;
+ unsigned long timeout;
+
+- tcp_ca_event(sk, CA_EVENT_DELAYED_ACK);
+-
+ if (ato > TCP_DELACK_MIN) {
+ const struct tcp_sock *tp = tcp_sk(sk);
+ int max_ato = HZ / 2;
+@@ -3571,8 +3569,6 @@ void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
+ if (sk->sk_state == TCP_CLOSE)
+ return;
+
+- tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK);
+-
+ /* We are not putting this on the write queue, so
+ * tcp_transmit_skb() will set the ownership to this
+ * sock.
+diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
+index 1323b9679cf7..1c0bb9fb76e6 100644
+--- a/net/ipv6/calipso.c
++++ b/net/ipv6/calipso.c
+@@ -799,8 +799,7 @@ static int calipso_opt_update(struct sock *sk, struct ipv6_opt_hdr *hop)
+ {
+ struct ipv6_txoptions *old = txopt_get(inet6_sk(sk)), *txopts;
+
+- txopts = ipv6_renew_options_kern(sk, old, IPV6_HOPOPTS,
+- hop, hop ? ipv6_optlen(hop) : 0);
++ txopts = ipv6_renew_options(sk, old, IPV6_HOPOPTS, hop);
+ txopt_put(old);
+ if (IS_ERR(txopts))
+ return PTR_ERR(txopts);
+@@ -1222,8 +1221,7 @@ static int calipso_req_setattr(struct request_sock *req,
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+
+- txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS,
+- new, new ? ipv6_optlen(new) : 0);
++ txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new);
+
+ kfree(new);
+
+@@ -1260,8 +1258,7 @@ static void calipso_req_delattr(struct request_sock *req)
+ if (calipso_opt_del(req_inet->ipv6_opt->hopopt, &new))
+ return; /* Nothing to do */
+
+- txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS,
+- new, new ? ipv6_optlen(new) : 0);
++ txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new);
+
+ if (!IS_ERR(txopts)) {
+ txopts = xchg(&req_inet->ipv6_opt, txopts);
+diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
+index d6189c2a35e4..47a5f8f88c70 100644
+--- a/net/ipv6/exthdrs.c
++++ b/net/ipv6/exthdrs.c
+@@ -987,29 +987,21 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
+ }
+ EXPORT_SYMBOL_GPL(ipv6_dup_options);
+
+-static int ipv6_renew_option(void *ohdr,
+- struct ipv6_opt_hdr __user *newopt, int newoptlen,
+- int inherit,
+- struct ipv6_opt_hdr **hdr,
+- char **p)
++static void ipv6_renew_option(int renewtype,
++ struct ipv6_opt_hdr **dest,
++ struct ipv6_opt_hdr *old,
++ struct ipv6_opt_hdr *new,
++ int newtype, char **p)
+ {
+- if (inherit) {
+- if (ohdr) {
+- memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr));
+- *hdr = (struct ipv6_opt_hdr *)*p;
+- *p += CMSG_ALIGN(ipv6_optlen(*hdr));
+- }
+- } else {
+- if (newopt) {
+- if (copy_from_user(*p, newopt, newoptlen))
+- return -EFAULT;
+- *hdr = (struct ipv6_opt_hdr *)*p;
+- if (ipv6_optlen(*hdr) > newoptlen)
+- return -EINVAL;
+- *p += CMSG_ALIGN(newoptlen);
+- }
+- }
+- return 0;
++ struct ipv6_opt_hdr *src;
++
++ src = (renewtype == newtype ? new : old);
++ if (!src)
++ return;
++
++ memcpy(*p, src, ipv6_optlen(src));
++ *dest = (struct ipv6_opt_hdr *)*p;
++ *p += CMSG_ALIGN(ipv6_optlen(*dest));
+ }
+
+ /**
+@@ -1035,13 +1027,11 @@ static int ipv6_renew_option(void *ohdr,
+ */
+ struct ipv6_txoptions *
+ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
+- int newtype,
+- struct ipv6_opt_hdr __user *newopt, int newoptlen)
++ int newtype, struct ipv6_opt_hdr *newopt)
+ {
+ int tot_len = 0;
+ char *p;
+ struct ipv6_txoptions *opt2;
+- int err;
+
+ if (opt) {
+ if (newtype != IPV6_HOPOPTS && opt->hopopt)
+@@ -1054,8 +1044,8 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
+ tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
+ }
+
+- if (newopt && newoptlen)
+- tot_len += CMSG_ALIGN(newoptlen);
++ if (newopt)
++ tot_len += CMSG_ALIGN(ipv6_optlen(newopt));
+
+ if (!tot_len)
+ return NULL;
+@@ -1070,29 +1060,19 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
+ opt2->tot_len = tot_len;
+ p = (char *)(opt2 + 1);
+
+- err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen,
+- newtype != IPV6_HOPOPTS,
+- &opt2->hopopt, &p);
+- if (err)
+- goto out;
+-
+- err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen,
+- newtype != IPV6_RTHDRDSTOPTS,
+- &opt2->dst0opt, &p);
+- if (err)
+- goto out;
+-
+- err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen,
+- newtype != IPV6_RTHDR,
+- (struct ipv6_opt_hdr **)&opt2->srcrt, &p);
+- if (err)
+- goto out;
+-
+- err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen,
+- newtype != IPV6_DSTOPTS,
+- &opt2->dst1opt, &p);
+- if (err)
+- goto out;
++ ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt,
++ (opt ? opt->hopopt : NULL),
++ newopt, newtype, &p);
++ ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt,
++ (opt ? opt->dst0opt : NULL),
++ newopt, newtype, &p);
++ ipv6_renew_option(IPV6_RTHDR,
++ (struct ipv6_opt_hdr **)&opt2->srcrt,
++ (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL),
++ newopt, newtype, &p);
++ ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt,
++ (opt ? opt->dst1opt : NULL),
++ newopt, newtype, &p);
+
+ opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
+ (opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
+@@ -1100,37 +1080,6 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
+ opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
+
+ return opt2;
+-out:
+- sock_kfree_s(sk, opt2, opt2->tot_len);
+- return ERR_PTR(err);
+-}
+-
+-/**
+- * ipv6_renew_options_kern - replace a specific ext hdr with a new one.
+- *
+- * @sk: sock from which to allocate memory
+- * @opt: original options
+- * @newtype: option type to replace in @opt
+- * @newopt: new option of type @newtype to replace (kernel-mem)
+- * @newoptlen: length of @newopt
+- *
+- * See ipv6_renew_options(). The difference is that @newopt is
+- * kernel memory, rather than user memory.
+- */
+-struct ipv6_txoptions *
+-ipv6_renew_options_kern(struct sock *sk, struct ipv6_txoptions *opt,
+- int newtype, struct ipv6_opt_hdr *newopt,
+- int newoptlen)
+-{
+- struct ipv6_txoptions *ret_val;
+- const mm_segment_t old_fs = get_fs();
+-
+- set_fs(KERNEL_DS);
+- ret_val = ipv6_renew_options(sk, opt, newtype,
+- (struct ipv6_opt_hdr __user *)newopt,
+- newoptlen);
+- set_fs(old_fs);
+- return ret_val;
+ }
+
+ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
+diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
+index 1276d5bd5675..5c91b05c8d8f 100644
+--- a/net/ipv6/ipv6_sockglue.c
++++ b/net/ipv6/ipv6_sockglue.c
+@@ -390,6 +390,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ case IPV6_DSTOPTS:
+ {
+ struct ipv6_txoptions *opt;
++ struct ipv6_opt_hdr *new = NULL;
++
++ /* hop-by-hop / destination options are privileged option */
++ retv = -EPERM;
++ if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
++ break;
+
+ /* remove any sticky options header with a zero option
+ * length, per RFC3542.
+@@ -401,17 +407,22 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
+ else if (optlen < sizeof(struct ipv6_opt_hdr) ||
+ optlen & 0x7 || optlen > 8 * 255)
+ goto e_inval;
+-
+- /* hop-by-hop / destination options are privileged option */
+- retv = -EPERM;
+- if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
+- break;
++ else {
++ new = memdup_user(optval, optlen);
++ if (IS_ERR(new)) {
++ retv = PTR_ERR(new);
++ break;
++ }
++ if (unlikely(ipv6_optlen(new) > optlen)) {
++ kfree(new);
++ goto e_inval;
++ }
++ }
+
+ opt = rcu_dereference_protected(np->opt,
+ lockdep_sock_is_held(sk));
+- opt = ipv6_renew_options(sk, opt, optname,
+- (struct ipv6_opt_hdr __user *)optval,
+- optlen);
++ opt = ipv6_renew_options(sk, opt, optname, new);
++ kfree(new);
+ if (IS_ERR(opt)) {
+ retv = PTR_ERR(opt);
+ break;
+diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
+index 6fd913d63835..d112762b4cb8 100644
+--- a/net/ipv6/mcast.c
++++ b/net/ipv6/mcast.c
+@@ -2083,7 +2083,8 @@ void ipv6_mc_dad_complete(struct inet6_dev *idev)
+ mld_send_initial_cr(idev);
+ idev->mc_dad_count--;
+ if (idev->mc_dad_count)
+- mld_dad_start_timer(idev, idev->mc_maxdelay);
++ mld_dad_start_timer(idev,
++ unsolicited_report_interval(idev));
+ }
+ }
+
+@@ -2095,7 +2096,8 @@ static void mld_dad_timer_expire(unsigned long data)
+ if (idev->mc_dad_count) {
+ idev->mc_dad_count--;
+ if (idev->mc_dad_count)
+- mld_dad_start_timer(idev, idev->mc_maxdelay);
++ mld_dad_start_timer(idev,
++ unsolicited_report_interval(idev));
+ }
+ in6_dev_put(idev);
+ }
+@@ -2453,7 +2455,8 @@ static void mld_ifc_timer_expire(unsigned long data)
+ if (idev->mc_ifc_count) {
+ idev->mc_ifc_count--;
+ if (idev->mc_ifc_count)
+- mld_ifc_start_timer(idev, idev->mc_maxdelay);
++ mld_ifc_start_timer(idev,
++ unsolicited_report_interval(idev));
+ }
+ in6_dev_put(idev);
+ }
+diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
+index 2e51e0156903..90f5bf2502a7 100644
+--- a/net/ipv6/netfilter/ip6_tables.c
++++ b/net/ipv6/netfilter/ip6_tables.c
+@@ -1907,6 +1907,7 @@ static struct xt_match ip6t_builtin_mt[] __read_mostly = {
+ .checkentry = icmp6_checkentry,
+ .proto = IPPROTO_ICMPV6,
+ .family = NFPROTO_IPV6,
++ .me = THIS_MODULE,
+ },
+ };
+
+diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
+index 722a9db8c6a7..ee33a6743f3b 100644
+--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
++++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
+@@ -117,7 +117,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
+ if (hdr == NULL)
+ goto err_reg;
+
+- net->nf_frag.sysctl.frags_hdr = hdr;
++ net->nf_frag_frags_hdr = hdr;
+ return 0;
+
+ err_reg:
+@@ -131,8 +131,8 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
+ {
+ struct ctl_table *table;
+
+- table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg;
+- unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr);
++ table = net->nf_frag_frags_hdr->ctl_table_arg;
++ unregister_net_sysctl_table(net->nf_frag_frags_hdr);
+ if (!net_eq(net, &init_net))
+ kfree(table);
+ }
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 01130392b7c0..a268acc48af0 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -1949,7 +1949,7 @@ int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp)
+ return -EOPNOTSUPP;
+
+ /* On boot, we can set this without any fancy locking. */
+- if (!nf_conntrack_htable_size)
++ if (!nf_conntrack_hash)
+ return param_set_uint(val, kp);
+
+ rc = kstrtouint(val, 0, &hashsize);
+diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
+index 551a1eddf0fa..a75b11c39312 100644
+--- a/net/netfilter/nf_conntrack_helper.c
++++ b/net/netfilter/nf_conntrack_helper.c
+@@ -465,6 +465,11 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
+
+ nf_ct_expect_iterate_destroy(expect_iter_me, NULL);
+ nf_ct_iterate_destroy(unhelp, me);
++
++ /* Maybe someone has gotten the helper already when unhelp above.
++ * So need to wait it.
++ */
++ synchronize_rcu();
+ }
+ EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
+
+diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
+index 0f5a4d79f6b8..812de5496b37 100644
+--- a/net/netfilter/nf_conntrack_proto_dccp.c
++++ b/net/netfilter/nf_conntrack_proto_dccp.c
+@@ -243,14 +243,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
+ * We currently ignore Sync packets
+ *
+ * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
+- sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
++ sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+ },
+ [DCCP_PKT_SYNCACK] = {
+ /*
+ * We currently ignore SyncAck packets
+ *
+ * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
+- sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
++ sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+ },
+ },
+ [CT_DCCP_ROLE_SERVER] = {
+@@ -371,14 +371,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
+ * We currently ignore Sync packets
+ *
+ * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
+- sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
++ sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+ },
+ [DCCP_PKT_SYNCACK] = {
+ /*
+ * We currently ignore SyncAck packets
+ *
+ * sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
+- sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
++ sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+ },
+ },
+ };
+diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
+index 276324abfa60..cdc744aa5889 100644
+--- a/net/netfilter/nf_log.c
++++ b/net/netfilter/nf_log.c
+@@ -440,6 +440,10 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
+ if (write) {
+ struct ctl_table tmp = *table;
+
++ /* proc_dostring() can append to existing strings, so we need to
++ * initialize it as an empty string.
++ */
++ buf[0] = '\0';
+ tmp.data = buf;
+ r = proc_dostring(&tmp, write, buffer, lenp, ppos);
+ if (r)
+diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
+index 3bd637eadc42..6da1cec1494a 100644
+--- a/net/netfilter/nft_compat.c
++++ b/net/netfilter/nft_compat.c
+@@ -825,10 +825,18 @@ nft_target_select_ops(const struct nft_ctx *ctx,
+ rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV]));
+ family = ctx->afi->family;
+
++ if (strcmp(tg_name, XT_ERROR_TARGET) == 0 ||
++ strcmp(tg_name, XT_STANDARD_TARGET) == 0 ||
++ strcmp(tg_name, "standard") == 0)
++ return ERR_PTR(-EINVAL);
++
+ /* Re-use the existing target if it's already loaded. */
+ list_for_each_entry(nft_target, &nft_target_list, head) {
+ struct xt_target *target = nft_target->ops.data;
+
++ if (!target->target)
++ continue;
++
+ if (nft_target_cmp(target, tg_name, rev, family))
+ return &nft_target->ops;
+ }
+@@ -837,6 +845,11 @@ nft_target_select_ops(const struct nft_ctx *ctx,
+ if (IS_ERR(target))
+ return ERR_PTR(-ENOENT);
+
++ if (!target->target) {
++ err = -EINVAL;
++ goto err;
++ }
++
+ if (target->targetsize > nla_len(tb[NFTA_TARGET_INFO])) {
+ err = -EINVAL;
+ goto err;
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 27dafe36f29c..8833a58ca3ee 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -2919,6 +2919,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
+ goto out_free;
+ } else if (reserve) {
+ skb_reserve(skb, -reserve);
++ if (len < reserve)
++ skb_reset_network_header(skb);
+ }
+
+ /* Returns -EFAULT on error */
+@@ -4267,6 +4269,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ }
+
+ if (req->tp_block_nr) {
++ unsigned int min_frame_size;
++
+ /* Sanity tests and some calculations */
+ err = -EBUSY;
+ if (unlikely(rb->pg_vec))
+@@ -4289,12 +4293,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
+ goto out;
+ if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
+ goto out;
++ min_frame_size = po->tp_hdrlen + po->tp_reserve;
+ if (po->tp_version >= TPACKET_V3 &&
+- req->tp_block_size <=
+- BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr))
++ req->tp_block_size <
++ BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
+ goto out;
+- if (unlikely(req->tp_frame_size < po->tp_hdrlen +
+- po->tp_reserve))
++ if (unlikely(req->tp_frame_size < min_frame_size))
+ goto out;
+ if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
+ goto out;
+diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
+index 78418f38464a..084adea6a818 100644
+--- a/net/qrtr/qrtr.c
++++ b/net/qrtr/qrtr.c
+@@ -710,6 +710,10 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
+ node = NULL;
+ if (addr->sq_node == QRTR_NODE_BCAST) {
+ enqueue_fn = qrtr_bcast_enqueue;
++ if (addr->sq_port != QRTR_PORT_CTRL) {
++ release_sock(sk);
++ return -ENOTCONN;
++ }
+ } else if (addr->sq_node == ipc->us.sq_node) {
+ enqueue_fn = qrtr_local_enqueue;
+ } else {
+diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
+index 7cb63616805d..cd51f2ed55fa 100644
+--- a/net/sched/act_tunnel_key.c
++++ b/net/sched/act_tunnel_key.c
+@@ -36,7 +36,7 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
+
+ tcf_lastuse_update(&t->tcf_tm);
+ bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
+- action = params->action;
++ action = READ_ONCE(t->tcf_action);
+
+ switch (params->tcft_action) {
+ case TCA_TUNNEL_KEY_ACT_RELEASE:
+@@ -182,7 +182,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
+
+ params_old = rtnl_dereference(t->params);
+
+- params_new->action = parm->action;
++ t->tcf_action = parm->action;
+ params_new->tcft_action = parm->t_action;
+ params_new->tcft_enc_metadata = metadata;
+
+@@ -254,13 +254,13 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
+ .index = t->tcf_index,
+ .refcnt = t->tcf_refcnt - ref,
+ .bindcnt = t->tcf_bindcnt - bind,
++ .action = t->tcf_action,
+ };
+ struct tcf_t tm;
+
+ params = rtnl_dereference(t->params);
+
+ opt.t_action = params->tcft_action;
+- opt.action = params->action;
+
+ if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
+diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
+index 3afac275ee82..acd9380a56fb 100644
+--- a/net/sctp/chunk.c
++++ b/net/sctp/chunk.c
+@@ -230,7 +230,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
+ /* Account for a different sized first fragment */
+ if (msg_len >= first_len) {
+ msg->can_delay = 0;
+- SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
++ if (msg_len > first_len)
++ SCTP_INC_STATS(sock_net(asoc->base.sk),
++ SCTP_MIB_FRAGUSRMSGS);
+ } else {
+ /* Which may be the only one... */
+ first_len = msg_len;
+diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
+index 654a81238406..43ef7be69428 100644
+--- a/net/smc/af_smc.c
++++ b/net/smc/af_smc.c
+@@ -1180,8 +1180,7 @@ static int smc_shutdown(struct socket *sock, int how)
+ lock_sock(sk);
+
+ rc = -ENOTCONN;
+- if ((sk->sk_state != SMC_LISTEN) &&
+- (sk->sk_state != SMC_ACTIVE) &&
++ if ((sk->sk_state != SMC_ACTIVE) &&
+ (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
+ (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
+ (sk->sk_state != SMC_APPCLOSEWAIT1) &&
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index ea28aa505302..4cd351b74e48 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -5990,7 +5990,7 @@ do { \
+ nl80211_check_s32);
+ /*
+ * Check HT operation mode based on
+- * IEEE 802.11 2012 8.4.2.59 HT Operation element.
++ * IEEE 802.11-2016 9.4.2.57 HT Operation element.
+ */
+ if (tb[NL80211_MESHCONF_HT_OPMODE]) {
+ ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]);
+@@ -6000,22 +6000,9 @@ do { \
+ IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
+ return -EINVAL;
+
+- if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) &&
+- (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
+- return -EINVAL;
++ /* NON_HT_STA bit is reserved, but some programs set it */
++ ht_opmode &= ~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
+
+- switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) {
+- case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
+- case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+- if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)
+- return -EINVAL;
+- break;
+- case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
+- case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+- if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
+- return -EINVAL;
+- break;
+- }
+ cfg->ht_opmode = ht_opmode;
+ mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
+ }
+@@ -10542,9 +10529,12 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
+ rem) {
+ u8 *mask_pat;
+
+- nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+- nl80211_packet_pattern_policy,
+- info->extack);
++ err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
++ nl80211_packet_pattern_policy,
++ info->extack);
++ if (err)
++ goto error;
++
+ err = -EINVAL;
+ if (!pat_tb[NL80211_PKTPAT_MASK] ||
+ !pat_tb[NL80211_PKTPAT_PATTERN])
+@@ -10793,8 +10783,11 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
+ rem) {
+ u8 *mask_pat;
+
+- nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+- nl80211_packet_pattern_policy, NULL);
++ err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
++ nl80211_packet_pattern_policy, NULL);
++ if (err)
++ return err;
++
+ if (!pat_tb[NL80211_PKTPAT_MASK] ||
+ !pat_tb[NL80211_PKTPAT_PATTERN])
+ return -EINVAL;
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index dbfcfefd6d69..dde40f995ac0 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -1665,9 +1665,11 @@ static inline size_t userpolicy_type_attrsize(void)
+ #ifdef CONFIG_XFRM_SUB_POLICY
+ static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
+ {
+- struct xfrm_userpolicy_type upt = {
+- .type = type,
+- };
++ struct xfrm_userpolicy_type upt;
++
++ /* Sadly there are two holes in struct xfrm_userpolicy_type */
++ memset(&upt, 0, sizeof(upt));
++ upt.type = type;
+
+ return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
+ }
+diff --git a/samples/bpf/parse_varlen.c b/samples/bpf/parse_varlen.c
+index 95c16324760c..0b6f22feb2c9 100644
+--- a/samples/bpf/parse_varlen.c
++++ b/samples/bpf/parse_varlen.c
+@@ -6,6 +6,7 @@
+ */
+ #define KBUILD_MODNAME "foo"
+ #include <linux/if_ether.h>
++#include <linux/if_vlan.h>
+ #include <linux/ip.h>
+ #include <linux/ipv6.h>
+ #include <linux/in.h>
+@@ -108,11 +109,6 @@ static int parse_ipv6(void *data, uint64_t nh_off, void *data_end)
+ return 0;
+ }
+
+-struct vlan_hdr {
+- uint16_t h_vlan_TCI;
+- uint16_t h_vlan_encapsulated_proto;
+-};
+-
+ SEC("varlen")
+ int handle_ingress(struct __sk_buff *skb)
+ {
+diff --git a/samples/bpf/test_overhead_user.c b/samples/bpf/test_overhead_user.c
+index d291167fd3c7..7dad9a3168e1 100644
+--- a/samples/bpf/test_overhead_user.c
++++ b/samples/bpf/test_overhead_user.c
+@@ -6,6 +6,7 @@
+ */
+ #define _GNU_SOURCE
+ #include <sched.h>
++#include <errno.h>
+ #include <stdio.h>
+ #include <sys/types.h>
+ #include <asm/unistd.h>
+@@ -44,8 +45,13 @@ static void test_task_rename(int cpu)
+ exit(1);
+ }
+ start_time = time_get_ns();
+- for (i = 0; i < MAX_CNT; i++)
+- write(fd, buf, sizeof(buf));
++ for (i = 0; i < MAX_CNT; i++) {
++ if (write(fd, buf, sizeof(buf)) < 0) {
++ printf("task rename failed: %s\n", strerror(errno));
++ close(fd);
++ return;
++ }
++ }
+ printf("task_rename:%d: %lld events per sec\n",
+ cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+ close(fd);
+@@ -63,8 +69,13 @@ static void test_urandom_read(int cpu)
+ exit(1);
+ }
+ start_time = time_get_ns();
+- for (i = 0; i < MAX_CNT; i++)
+- read(fd, buf, sizeof(buf));
++ for (i = 0; i < MAX_CNT; i++) {
++ if (read(fd, buf, sizeof(buf)) < 0) {
++ printf("failed to read from /dev/urandom: %s\n", strerror(errno));
++ close(fd);
++ return;
++ }
++ }
+ printf("urandom_read:%d: %lld events per sec\n",
+ cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+ close(fd);
+diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c
+index 7bd827b84a67..c7d525e5696e 100644
+--- a/samples/bpf/trace_event_user.c
++++ b/samples/bpf/trace_event_user.c
+@@ -121,6 +121,16 @@ static void print_stacks(void)
+ }
+ }
+
++static inline int generate_load(void)
++{
++ if (system("dd if=/dev/zero of=/dev/null count=5000k status=none") < 0) {
++ printf("failed to generate some load with dd: %s\n", strerror(errno));
++ return -1;
++ }
++
++ return 0;
++}
++
+ static void test_perf_event_all_cpu(struct perf_event_attr *attr)
+ {
+ int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
+@@ -138,7 +148,11 @@ static void test_perf_event_all_cpu(struct perf_event_attr *attr)
+ assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
+ assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE) == 0);
+ }
+- system("dd if=/dev/zero of=/dev/null count=5000k status=none");
++
++ if (generate_load() < 0) {
++ error = 1;
++ goto all_cpu_err;
++ }
+ print_stacks();
+ all_cpu_err:
+ for (i--; i >= 0; i--) {
+@@ -152,7 +166,7 @@ all_cpu_err:
+
+ static void test_perf_event_task(struct perf_event_attr *attr)
+ {
+- int pmu_fd;
++ int pmu_fd, error = 0;
+
+ /* open task bound event */
+ pmu_fd = sys_perf_event_open(attr, 0, -1, -1, 0);
+@@ -162,10 +176,17 @@ static void test_perf_event_task(struct perf_event_attr *attr)
+ }
+ assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
+ assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE) == 0);
+- system("dd if=/dev/zero of=/dev/null count=5000k status=none");
++
++ if (generate_load() < 0) {
++ error = 1;
++ goto err;
++ }
+ print_stacks();
++err:
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+ close(pmu_fd);
++ if (error)
++ int_exit(0);
+ }
+
+ static void test_bpf_perf_event(void)
+diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
+index 20d9caa4be99..126e3f2e1ed7 100644
+--- a/scripts/kconfig/zconf.y
++++ b/scripts/kconfig/zconf.y
+@@ -31,7 +31,7 @@ struct symbol *symbol_hash[SYMBOL_HASHSIZE];
+ static struct menu *current_menu, *current_entry;
+
+ %}
+-%expect 32
++%expect 31
+
+ %union
+ {
+@@ -345,7 +345,7 @@ choice_block:
+
+ /* if entry */
+
+-if_entry: T_IF expr nl
++if_entry: T_IF expr T_EOL
+ {
+ printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno());
+ menu_add_entry(NULL);
+diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
+index 286171a16ed2..fdf01bfd1b07 100644
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -2281,6 +2281,7 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
+ struct smack_known *skp = smk_of_task_struct(p);
+
+ isp->smk_inode = skp;
++ isp->smk_flags |= SMK_INODE_INSTANT;
+ }
+
+ /*
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index a4c571cb3b87..350c33ec82b3 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -2001,7 +2001,8 @@ static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
+ struct snd_seq_client *cptr = NULL;
+
+ /* search for next client */
+- info->client++;
++ if (info->client < INT_MAX)
++ info->client++;
+ if (info->client < 0)
+ info->client = 0;
+ for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) {
+diff --git a/tools/build/Makefile b/tools/build/Makefile
+index 5eb4b5ad79cb..5edf65e684ab 100644
+--- a/tools/build/Makefile
++++ b/tools/build/Makefile
+@@ -43,7 +43,7 @@ $(OUTPUT)fixdep-in.o: FORCE
+ $(Q)$(MAKE) $(build)=fixdep
+
+ $(OUTPUT)fixdep: $(OUTPUT)fixdep-in.o
+- $(QUIET_LINK)$(HOSTCC) $(LDFLAGS) -o $@ $<
++ $(QUIET_LINK)$(HOSTCC) $(HOSTLDFLAGS) -o $@ $<
+
+ FORCE:
+
+diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
+index 4e60e105583e..0d1acb704f64 100644
+--- a/tools/objtool/elf.c
++++ b/tools/objtool/elf.c
+@@ -302,19 +302,34 @@ static int read_symbols(struct elf *elf)
+ continue;
+ sym->pfunc = sym->cfunc = sym;
+ coldstr = strstr(sym->name, ".cold.");
+- if (coldstr) {
+- coldstr[0] = '\0';
+- pfunc = find_symbol_by_name(elf, sym->name);
+- coldstr[0] = '.';
+-
+- if (!pfunc) {
+- WARN("%s(): can't find parent function",
+- sym->name);
+- goto err;
+- }
+-
+- sym->pfunc = pfunc;
+- pfunc->cfunc = sym;
++ if (!coldstr)
++ continue;
++
++ coldstr[0] = '\0';
++ pfunc = find_symbol_by_name(elf, sym->name);
++ coldstr[0] = '.';
++
++ if (!pfunc) {
++ WARN("%s(): can't find parent function",
++ sym->name);
++ goto err;
++ }
++
++ sym->pfunc = pfunc;
++ pfunc->cfunc = sym;
++
++ /*
++ * Unfortunately, -fnoreorder-functions puts the child
++ * inside the parent. Remove the overlap so we can
++ * have sane assumptions.
++ *
++ * Note that pfunc->len now no longer matches
++ * pfunc->sym.st_size.
++ */
++ if (sym->sec == pfunc->sec &&
++ sym->offset >= pfunc->offset &&
++ sym->offset + sym->len == pfunc->offset + pfunc->len) {
++ pfunc->len -= sym->len;
+ }
+ }
+ }
+diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+index 0c370f81e002..bd630c222e65 100644
+--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
++++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+@@ -243,7 +243,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
+ u64 ip;
+ u64 skip_slot = -1;
+
+- if (chain->nr < 3)
++ if (!chain || chain->nr < 3)
+ return skip_slot;
+
+ ip = chain->ips[2];
+diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c
+index 4b2caf6d48e7..fead6b3b4206 100644
+--- a/tools/perf/arch/x86/util/perf_regs.c
++++ b/tools/perf/arch/x86/util/perf_regs.c
+@@ -226,7 +226,7 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
+ else if (rm[2].rm_so != rm[2].rm_eo)
+ prefix[0] = '+';
+ else
+- strncpy(prefix, "+0", 2);
++ scnprintf(prefix, sizeof(prefix), "+0");
+ }
+
+ /* Rename register */
+diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
+index 944070e98a2c..0afcc7eccc61 100644
+--- a/tools/perf/bench/numa.c
++++ b/tools/perf/bench/numa.c
+@@ -1098,7 +1098,7 @@ static void *worker_thread(void *__tdata)
+ u8 *global_data;
+ u8 *process_data;
+ u8 *thread_data;
+- u64 bytes_done;
++ u64 bytes_done, secs;
+ long work_done;
+ u32 l;
+ struct rusage rusage;
+@@ -1254,7 +1254,8 @@ static void *worker_thread(void *__tdata)
+ timersub(&stop, &start0, &diff);
+ td->runtime_ns = diff.tv_sec * NSEC_PER_SEC;
+ td->runtime_ns += diff.tv_usec * NSEC_PER_USEC;
+- td->speed_gbs = bytes_done / (td->runtime_ns / NSEC_PER_SEC) / 1e9;
++ secs = td->runtime_ns / NSEC_PER_SEC;
++ td->speed_gbs = secs ? bytes_done / secs / 1e9 : 0;
+
+ getrusage(RUSAGE_THREAD, &rusage);
+ td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC;
+diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
+index cf36de7ea255..c1d20d951434 100644
+--- a/tools/perf/jvmti/jvmti_agent.c
++++ b/tools/perf/jvmti/jvmti_agent.c
+@@ -35,6 +35,7 @@
+ #include <sys/mman.h>
+ #include <syscall.h> /* for gettid() */
+ #include <err.h>
++#include <linux/kernel.h>
+
+ #include "jvmti_agent.h"
+ #include "../util/jitdump.h"
+@@ -249,7 +250,7 @@ void *jvmti_open(void)
+ /*
+ * jitdump file name
+ */
+- snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
++ scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+
+ fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666);
+ if (fd == -1)
+diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
+index 81ede20f49d7..4e9dad8c9763 100644
+--- a/tools/perf/tests/topology.c
++++ b/tools/perf/tests/topology.c
+@@ -43,6 +43,7 @@ static int session_write_header(char *path)
+
+ perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
+ perf_header__set_feat(&session->header, HEADER_NRCPUS);
++ perf_header__set_feat(&session->header, HEADER_ARCH);
+
+ session->header.data_size += DATA_SIZE;
+
+diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp
+index bf31ceab33bd..89512504551b 100644
+--- a/tools/perf/util/c++/clang.cpp
++++ b/tools/perf/util/c++/clang.cpp
+@@ -146,8 +146,15 @@ getBPFObjectFromModule(llvm::Module *Module)
+ raw_svector_ostream ostream(*Buffer);
+
+ legacy::PassManager PM;
+- if (TargetMachine->addPassesToEmitFile(PM, ostream,
+- TargetMachine::CGFT_ObjectFile)) {
++ bool NotAdded;
++#if CLANG_VERSION_MAJOR < 7
++ NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream,
++ TargetMachine::CGFT_ObjectFile);
++#else
++ NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream, nullptr,
++ TargetMachine::CGFT_ObjectFile);
++#endif
++ if (NotAdded) {
+ llvm::errs() << "TargetMachine can't emit a file of this type\n";
+ return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);;
+ }
+diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
+index ba0cea8fef72..8a678a3d5a2a 100644
+--- a/tools/perf/util/header.c
++++ b/tools/perf/util/header.c
+@@ -1824,6 +1824,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
+ int cpu_nr = ff->ph->env.nr_cpus_avail;
+ u64 size = 0;
+ struct perf_header *ph = ff->ph;
++ bool do_core_id_test = true;
+
+ ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
+ if (!ph->env.cpu)
+@@ -1878,6 +1879,13 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
+ return 0;
+ }
+
++ /* On s390 the socket_id number is not related to the numbers of cpus.
++ * The socket_id number might be higher than the numbers of cpus.
++ * This depends on the configuration.
++ */
++ if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
++ do_core_id_test = false;
++
+ for (i = 0; i < (u32)cpu_nr; i++) {
+ if (do_read_u32(ff, &nr))
+ goto free_cpu;
+@@ -1887,7 +1895,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
+ if (do_read_u32(ff, &nr))
+ goto free_cpu;
+
+- if (nr != (u32)-1 && nr > (u32)cpu_nr) {
++ if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
+ pr_debug("socket_id number is too big."
+ "You may need to upgrade the perf tool.\n");
+ goto free_cpu;
+diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
+index 4952b429caa7..2bdaac048a0a 100644
+--- a/tools/perf/util/llvm-utils.c
++++ b/tools/perf/util/llvm-utils.c
+@@ -265,16 +265,16 @@ static const char *kinc_fetch_script =
+ "#!/usr/bin/env sh\n"
+ "if ! test -d \"$KBUILD_DIR\"\n"
+ "then\n"
+-" exit -1\n"
++" exit 1\n"
+ "fi\n"
+ "if ! test -f \"$KBUILD_DIR/include/generated/autoconf.h\"\n"
+ "then\n"
+-" exit -1\n"
++" exit 1\n"
+ "fi\n"
+ "TMPDIR=`mktemp -d`\n"
+ "if test -z \"$TMPDIR\"\n"
+ "then\n"
+-" exit -1\n"
++" exit 1\n"
+ "fi\n"
+ "cat << EOF > $TMPDIR/Makefile\n"
+ "obj-y := dummy.o\n"
+diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
+index 988310cd3049..b6115cbdf842 100644
+--- a/tools/perf/util/parse-events.y
++++ b/tools/perf/util/parse-events.y
+@@ -226,11 +226,16 @@ event_def: event_pmu |
+ event_pmu:
+ PE_NAME opt_pmu_config
+ {
++ struct parse_events_state *parse_state = _parse_state;
++ struct parse_events_error *error = parse_state->error;
+ struct list_head *list, *orig_terms, *terms;
+
+ if (parse_events_copy_term_list($2, &orig_terms))
+ YYABORT;
+
++ if (error)
++ error->idx = @1.first_column;
++
+ ALLOC_LIST(list);
+ if (parse_events_add_pmu(_parse_state, list, $1, $2)) {
+ struct perf_pmu *pmu = NULL;
+diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
+index c7187f067d31..f03fa7a835a1 100644
+--- a/tools/perf/util/scripting-engines/trace-event-python.c
++++ b/tools/perf/util/scripting-engines/trace-event-python.c
+@@ -643,14 +643,11 @@ static void python_process_tracepoint(struct perf_sample *sample,
+ if (_PyTuple_Resize(&t, n) == -1)
+ Py_FatalError("error resizing Python tuple");
+
+- if (!dict) {
++ if (!dict)
+ call_object(handler, t, handler_name);
+- } else {
++ else
+ call_object(handler, t, default_handler_name);
+- Py_DECREF(dict);
+- }
+
+- Py_XDECREF(all_entries_dict);
+ Py_DECREF(t);
+ }
+
+@@ -970,7 +967,6 @@ static void python_process_general_event(struct perf_sample *sample,
+
+ call_object(handler, t, handler_name);
+
+- Py_DECREF(dict);
+ Py_DECREF(t);
+ }
+
+diff --git a/tools/testing/selftests/bpf/test_kmod.sh b/tools/testing/selftests/bpf/test_kmod.sh
+index ed4774d8d6ed..0f7b9aa9c6a5 100755
+--- a/tools/testing/selftests/bpf/test_kmod.sh
++++ b/tools/testing/selftests/bpf/test_kmod.sh
+@@ -1,6 +1,15 @@
+ #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
+
++# Kselftest framework requirement - SKIP code is 4.
++ksft_skip=4
++
++msg="skip all tests:"
++if [ "$(id -u)" != "0" ]; then
++ echo $msg please run this as root >&2
++ exit $ksft_skip
++fi
++
+ SRC_TREE=../../../../
+
+ test_run()
+diff --git a/tools/testing/selftests/pstore/pstore_post_reboot_tests b/tools/testing/selftests/pstore/pstore_post_reboot_tests
+index 6ccb154cb4aa..22f8df1ad7d4 100755
+--- a/tools/testing/selftests/pstore/pstore_post_reboot_tests
++++ b/tools/testing/selftests/pstore/pstore_post_reboot_tests
+@@ -7,13 +7,16 @@
+ #
+ # Released under the terms of the GPL v2.
+
++# Kselftest framework requirement - SKIP code is 4.
++ksft_skip=4
++
+ . ./common_tests
+
+ if [ -e $REBOOT_FLAG ]; then
+ rm $REBOOT_FLAG
+ else
+ prlog "pstore_crash_test has not been executed yet. we skip further tests."
+- exit 0
++ exit $ksft_skip
+ fi
+
+ prlog -n "Mounting pstore filesystem ... "
+diff --git a/tools/testing/selftests/static_keys/test_static_keys.sh b/tools/testing/selftests/static_keys/test_static_keys.sh
+index 24cff498b31a..fc9f8cde7d42 100755
+--- a/tools/testing/selftests/static_keys/test_static_keys.sh
++++ b/tools/testing/selftests/static_keys/test_static_keys.sh
+@@ -2,6 +2,19 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Runs static keys kernel module tests
+
++# Kselftest framework requirement - SKIP code is 4.
++ksft_skip=4
++
++if ! /sbin/modprobe -q -n test_static_key_base; then
++ echo "static_key: module test_static_key_base is not found [SKIP]"
++ exit $ksft_skip
++fi
++
++if ! /sbin/modprobe -q -n test_static_keys; then
++ echo "static_key: module test_static_keys is not found [SKIP]"
++ exit $ksft_skip
++fi
++
+ if /sbin/modprobe -q test_static_key_base; then
+ if /sbin/modprobe -q test_static_keys; then
+ echo "static_key: ok"
+diff --git a/tools/testing/selftests/sync/config b/tools/testing/selftests/sync/config
+new file mode 100644
+index 000000000000..1ab7e8130db2
+--- /dev/null
++++ b/tools/testing/selftests/sync/config
+@@ -0,0 +1,4 @@
++CONFIG_STAGING=y
++CONFIG_ANDROID=y
++CONFIG_SYNC=y
++CONFIG_SW_SYNC=y
+diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh
+index ec232c3cfcaa..584eb8ea780a 100755
+--- a/tools/testing/selftests/sysctl/sysctl.sh
++++ b/tools/testing/selftests/sysctl/sysctl.sh
+@@ -14,6 +14,9 @@
+
+ # This performs a series tests against the proc sysctl interface.
+
++# Kselftest framework requirement - SKIP code is 4.
++ksft_skip=4
++
+ TEST_NAME="sysctl"
+ TEST_DRIVER="test_${TEST_NAME}"
+ TEST_DIR=$(dirname $0)
+@@ -41,7 +44,7 @@ test_modprobe()
+ echo "$0: $DIR not present" >&2
+ echo "You must have the following enabled in your kernel:" >&2
+ cat $TEST_DIR/config >&2
+- exit 1
++ exit $ksft_skip
+ fi
+ }
+
+@@ -98,28 +101,30 @@ test_reqs()
+ uid=$(id -u)
+ if [ $uid -ne 0 ]; then
+ echo $msg must be run as root >&2
+- exit 0
++ exit $ksft_skip
+ fi
+
+ if ! which perl 2> /dev/null > /dev/null; then
+ echo "$0: You need perl installed"
+- exit 1
++ exit $ksft_skip
+ fi
+ if ! which getconf 2> /dev/null > /dev/null; then
+ echo "$0: You need getconf installed"
+- exit 1
++ exit $ksft_skip
+ fi
+ if ! which diff 2> /dev/null > /dev/null; then
+ echo "$0: You need diff installed"
+- exit 1
++ exit $ksft_skip
+ fi
+ }
+
+ function load_req_mod()
+ {
+- trap "test_modprobe" EXIT
+-
+ if [ ! -d $DIR ]; then
++ if ! modprobe -q -n $TEST_DRIVER; then
++ echo "$0: module $TEST_DRIVER not found [SKIP]"
++ exit $ksft_skip
++ fi
+ modprobe $TEST_DRIVER
+ if [ $? -ne 0 ]; then
+ exit
+@@ -765,6 +770,7 @@ function parse_args()
+ test_reqs
+ allow_user_defaults
+ check_production_sysctl_writes_strict
++test_modprobe
+ load_req_mod
+
+ trap "test_finish" EXIT
+diff --git a/tools/testing/selftests/user/test_user_copy.sh b/tools/testing/selftests/user/test_user_copy.sh
+index d60506fc77f8..f9b31a57439b 100755
+--- a/tools/testing/selftests/user/test_user_copy.sh
++++ b/tools/testing/selftests/user/test_user_copy.sh
+@@ -2,6 +2,13 @@
+ # SPDX-License-Identifier: GPL-2.0
+ # Runs copy_to/from_user infrastructure using test_user_copy kernel module
+
++# Kselftest framework requirement - SKIP code is 4.
++ksft_skip=4
++
++if ! /sbin/modprobe -q -n test_user_copy; then
++ echo "user: module test_user_copy is not found [SKIP]"
++ exit $ksft_skip
++fi
+ if /sbin/modprobe -q test_user_copy; then
+ /sbin/modprobe -q -r test_user_copy
+ echo "user_copy: ok"
+diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c
+index 1097f04e4d80..bcec71250873 100644
+--- a/tools/testing/selftests/vm/compaction_test.c
++++ b/tools/testing/selftests/vm/compaction_test.c
+@@ -16,6 +16,8 @@
+ #include <unistd.h>
+ #include <string.h>
+
++#include "../kselftest.h"
++
+ #define MAP_SIZE 1048576
+
+ struct map_list {
+@@ -169,7 +171,7 @@ int main(int argc, char **argv)
+ printf("Either the sysctl compact_unevictable_allowed is not\n"
+ "set to 1 or couldn't read the proc file.\n"
+ "Skipping the test\n");
+- return 0;
++ return KSFT_SKIP;
+ }
+
+ lim.rlim_cur = RLIM_INFINITY;
+diff --git a/tools/testing/selftests/vm/mlock2-tests.c b/tools/testing/selftests/vm/mlock2-tests.c
+index 4997b9222cfa..637b6d0ac0d0 100644
+--- a/tools/testing/selftests/vm/mlock2-tests.c
++++ b/tools/testing/selftests/vm/mlock2-tests.c
+@@ -9,6 +9,8 @@
+ #include <stdbool.h>
+ #include "mlock2.h"
+
++#include "../kselftest.h"
++
+ struct vm_boundaries {
+ unsigned long start;
+ unsigned long end;
+@@ -303,7 +305,7 @@ static int test_mlock_lock()
+ if (mlock2_(map, 2 * page_size, 0)) {
+ if (errno == ENOSYS) {
+ printf("Cannot call new mlock family, skipping test\n");
+- _exit(0);
++ _exit(KSFT_SKIP);
+ }
+ perror("mlock2(0)");
+ goto unmap;
+@@ -412,7 +414,7 @@ static int test_mlock_onfault()
+ if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
+ if (errno == ENOSYS) {
+ printf("Cannot call new mlock family, skipping test\n");
+- _exit(0);
++ _exit(KSFT_SKIP);
+ }
+ perror("mlock2(MLOCK_ONFAULT)");
+ goto unmap;
+@@ -425,7 +427,7 @@ static int test_mlock_onfault()
+ if (munlock(map, 2 * page_size)) {
+ if (errno == ENOSYS) {
+ printf("Cannot call new mlock family, skipping test\n");
+- _exit(0);
++ _exit(KSFT_SKIP);
+ }
+ perror("munlock()");
+ goto unmap;
+@@ -457,7 +459,7 @@ static int test_lock_onfault_of_present()
+ if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
+ if (errno == ENOSYS) {
+ printf("Cannot call new mlock family, skipping test\n");
+- _exit(0);
++ _exit(KSFT_SKIP);
+ }
+ perror("mlock2(MLOCK_ONFAULT)");
+ goto unmap;
+@@ -583,7 +585,7 @@ static int test_vma_management(bool call_mlock)
+ if (call_mlock && mlock2_(map, 3 * page_size, MLOCK_ONFAULT)) {
+ if (errno == ENOSYS) {
+ printf("Cannot call new mlock family, skipping test\n");
+- _exit(0);
++ _exit(KSFT_SKIP);
+ }
+ perror("mlock(ONFAULT)\n");
+ goto out;
+diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
+index 45708aa3ce47..57ab6ac0d4a4 100755
+--- a/tools/testing/selftests/vm/run_vmtests
++++ b/tools/testing/selftests/vm/run_vmtests
+@@ -2,6 +2,9 @@
+ # SPDX-License-Identifier: GPL-2.0
+ #please run as root
+
++# Kselftest framework requirement - SKIP code is 4.
++ksft_skip=4
++
+ mnt=./huge
+ exitcode=0
+
+@@ -36,7 +39,7 @@ if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
+ echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages
+ if [ $? -ne 0 ]; then
+ echo "Please run this test as root"
+- exit 1
++ exit $ksft_skip
+ fi
+ while read name size unit; do
+ if [ "$name" = "HugePages_Free:" ]; then
+diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
+index de2f9ec8a87f..7b8171e3128a 100644
+--- a/tools/testing/selftests/vm/userfaultfd.c
++++ b/tools/testing/selftests/vm/userfaultfd.c
+@@ -69,6 +69,8 @@
+ #include <setjmp.h>
+ #include <stdbool.h>
+
++#include "../kselftest.h"
++
+ #ifdef __NR_userfaultfd
+
+ static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
+@@ -1322,7 +1324,7 @@ int main(int argc, char **argv)
+ int main(void)
+ {
+ printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
+- return 0;
++ return KSFT_SKIP;
+ }
+
+ #endif /* __NR_userfaultfd */
+diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c
+index 246145b84a12..4d9dc3f2fd70 100644
+--- a/tools/testing/selftests/x86/sigreturn.c
++++ b/tools/testing/selftests/x86/sigreturn.c
+@@ -610,21 +610,41 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss)
+ */
+ for (int i = 0; i < NGREG; i++) {
+ greg_t req = requested_regs[i], res = resulting_regs[i];
++
+ if (i == REG_TRAPNO || i == REG_IP)
+ continue; /* don't care */
+- if (i == REG_SP) {
+- printf("\tSP: %llx -> %llx\n", (unsigned long long)req,
+- (unsigned long long)res);
+
++ if (i == REG_SP) {
+ /*
+- * In many circumstances, the high 32 bits of rsp
+- * are zeroed. For example, we could be a real
+- * 32-bit program, or we could hit any of a number
+- * of poorly-documented IRET or segmented ESP
+- * oddities. If this happens, it's okay.
++ * If we were using a 16-bit stack segment, then
++ * the kernel is a bit stuck: IRET only restores
++ * the low 16 bits of ESP/RSP if SS is 16-bit.
++ * The kernel uses a hack to restore bits 31:16,
++ * but that hack doesn't help with bits 63:32.
++ * On Intel CPUs, bits 63:32 end up zeroed, and, on
++ * AMD CPUs, they leak the high bits of the kernel
++ * espfix64 stack pointer. There's very little that
++ * the kernel can do about it.
++ *
++ * Similarly, if we are returning to a 32-bit context,
++ * the CPU will often lose the high 32 bits of RSP.
+ */
+- if (res == (req & 0xFFFFFFFF))
+- continue; /* OK; not expected to work */
++
++ if (res == req)
++ continue;
++
++ if (cs_bits != 64 && ((res ^ req) & 0xFFFFFFFF) == 0) {
++ printf("[NOTE]\tSP: %llx -> %llx\n",
++ (unsigned long long)req,
++ (unsigned long long)res);
++ continue;
++ }
++
++ printf("[FAIL]\tSP mismatch: requested 0x%llx; got 0x%llx\n",
++ (unsigned long long)requested_regs[i],
++ (unsigned long long)resulting_regs[i]);
++ nerrs++;
++ continue;
+ }
+
+ bool ignore_reg = false;
+@@ -654,25 +674,18 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss)
+ #endif
+
+ /* Sanity check on the kernel */
+- if (i == REG_CX && requested_regs[i] != resulting_regs[i]) {
++ if (i == REG_CX && req != res) {
+ printf("[FAIL]\tCX (saved SP) mismatch: requested 0x%llx; got 0x%llx\n",
+- (unsigned long long)requested_regs[i],
+- (unsigned long long)resulting_regs[i]);
++ (unsigned long long)req,
++ (unsigned long long)res);
+ nerrs++;
+ continue;
+ }
+
+- if (requested_regs[i] != resulting_regs[i] && !ignore_reg) {
+- /*
+- * SP is particularly interesting here. The
+- * usual cause of failures is that we hit the
+- * nasty IRET case of returning to a 16-bit SS,
+- * in which case bits 16:31 of the *kernel*
+- * stack pointer persist in ESP.
+- */
++ if (req != res && !ignore_reg) {
+ printf("[FAIL]\tReg %d mismatch: requested 0x%llx; got 0x%llx\n",
+- i, (unsigned long long)requested_regs[i],
+- (unsigned long long)resulting_regs[i]);
++ i, (unsigned long long)req,
++ (unsigned long long)res);
+ nerrs++;
+ }
+ }
+diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh
+index 754de7da426a..232e958ec454 100755
+--- a/tools/testing/selftests/zram/zram.sh
++++ b/tools/testing/selftests/zram/zram.sh
+@@ -2,6 +2,9 @@
+ # SPDX-License-Identifier: GPL-2.0
+ TCID="zram.sh"
+
++# Kselftest framework requirement - SKIP code is 4.
++ksft_skip=4
++
+ . ./zram_lib.sh
+
+ run_zram () {
+@@ -24,5 +27,5 @@ elif [ -b /dev/zram0 ]; then
+ else
+ echo "$TCID : No zram.ko module or /dev/zram0 device file not found"
+ echo "$TCID : CONFIG_ZRAM is not set"
+- exit 1
++ exit $ksft_skip
+ fi
+diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh
+index f6a9c73e7a44..9e73a4fb9b0a 100755
+--- a/tools/testing/selftests/zram/zram_lib.sh
++++ b/tools/testing/selftests/zram/zram_lib.sh
+@@ -18,6 +18,9 @@ MODULE=0
+ dev_makeswap=-1
+ dev_mounted=-1
+
++# Kselftest framework requirement - SKIP code is 4.
++ksft_skip=4
++
+ trap INT
+
+ check_prereqs()
+@@ -27,7 +30,7 @@ check_prereqs()
+
+ if [ $uid -ne 0 ]; then
+ echo $msg must be run as root >&2
+- exit 0
++ exit $ksft_skip
+ fi
+ }
+
+diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
+index 6b4fcd52f14c..a37b03c25457 100644
+--- a/virt/kvm/arm/vgic/vgic-v3.c
++++ b/virt/kvm/arm/vgic/vgic-v3.c
+@@ -492,11 +492,6 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
+ pr_warn("GICV physical address 0x%llx not page aligned\n",
+ (unsigned long long)info->vcpu.start);
+ kvm_vgic_global_state.vcpu_base = 0;
+- } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
+- pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+- (unsigned long long)resource_size(&info->vcpu),
+- PAGE_SIZE);
+- kvm_vgic_global_state.vcpu_base = 0;
+ } else {
+ kvm_vgic_global_state.vcpu_base = info->vcpu.start;
+ kvm_vgic_global_state.can_emulate_gicv2 = true;
+diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
+index 58a9b31b0dd5..088734a700e9 100644
+--- a/virt/kvm/eventfd.c
++++ b/virt/kvm/eventfd.c
+@@ -405,11 +405,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
+ if (events & POLLIN)
+ schedule_work(&irqfd->inject);
+
+- /*
+- * do not drop the file until the irqfd is fully initialized, otherwise
+- * we might race against the POLLHUP
+- */
+- fdput(f);
+ #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
+ if (kvm_arch_has_irq_bypass()) {
+ irqfd->consumer.token = (void *)irqfd->eventfd;
+@@ -425,6 +420,12 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
+ #endif
+
+ srcu_read_unlock(&kvm->irq_srcu, idx);
++
++ /*
++ * do not drop the file until the irqfd is fully initialized, otherwise
++ * we might race against the POLLHUP
++ */
++ fdput(f);
+ return 0;
+
+ fail: