diff options
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1059_linux-4.14.60.patch | 7461 |
2 files changed, 7465 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 7ca7e18c..685cb5dc 100644 --- a/0000_README +++ b/0000_README @@ -279,6 +279,10 @@ Patch: 1058_linux-4.14.59.patch From: http://www.kernel.org Desc: Linux 4.14.59 +Patch: 1059_linux-4.14.60.patch +From: http://www.kernel.org +Desc: Linux 4.14.60 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1059_linux-4.14.60.patch b/1059_linux-4.14.60.patch new file mode 100644 index 00000000..03968ccd --- /dev/null +++ b/1059_linux-4.14.60.patch @@ -0,0 +1,7461 @@ +diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.txt b/Documentation/devicetree/bindings/net/dsa/qca8k.txt +index 9c67ee4890d7..bbcb255c3150 100644 +--- a/Documentation/devicetree/bindings/net/dsa/qca8k.txt ++++ b/Documentation/devicetree/bindings/net/dsa/qca8k.txt +@@ -2,7 +2,10 @@ + + Required properties: + +-- compatible: should be "qca,qca8337" ++- compatible: should be one of: ++ "qca,qca8334" ++ "qca,qca8337" ++ + - #size-cells: must be 0 + - #address-cells: must be 1 + +@@ -14,6 +17,20 @@ port and PHY id, each subnode describing a port needs to have a valid phandle + referencing the internal PHY connected to it. The CPU port of this switch is + always port 0. + ++A CPU port node has the following optional node: ++ ++- fixed-link : Fixed-link subnode describing a link to a non-MDIO ++ managed entity. See ++ Documentation/devicetree/bindings/net/fixed-link.txt ++ for details. ++ ++For QCA8K the 'fixed-link' sub-node supports only the following properties: ++ ++- 'speed' (integer, mandatory), to indicate the link speed. Accepted ++ values are 10, 100 and 1000 ++- 'full-duplex' (boolean, optional), to indicate that full duplex is ++ used. When absent, half duplex is assumed. ++ + Example: + + +@@ -53,6 +70,10 @@ Example: + label = "cpu"; + ethernet = <&gmac1>; + phy-mode = "rgmii"; ++ fixed-link { ++ speed = 1000; ++ full-duplex; ++ }; + }; + + port@1 { +diff --git a/Documentation/devicetree/bindings/net/meson-dwmac.txt b/Documentation/devicetree/bindings/net/meson-dwmac.txt +index 354dd9896bb5..910187ebf1ce 100644 +--- a/Documentation/devicetree/bindings/net/meson-dwmac.txt ++++ b/Documentation/devicetree/bindings/net/meson-dwmac.txt +@@ -10,6 +10,7 @@ Required properties on all platforms: + - "amlogic,meson6-dwmac" + - "amlogic,meson8b-dwmac" + - "amlogic,meson-gxbb-dwmac" ++ - "amlogic,meson-axg-dwmac" + Additionally "snps,dwmac" and any applicable more + detailed version number described in net/stmmac.txt + should be used. +diff --git a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt +index 2392557ede27..df77d394edc0 100644 +--- a/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt ++++ b/Documentation/devicetree/bindings/pinctrl/meson,pinctrl.txt +@@ -3,8 +3,10 @@ + Required properties for the root node: + - compatible: one of "amlogic,meson8-cbus-pinctrl" + "amlogic,meson8b-cbus-pinctrl" ++ "amlogic,meson8m2-cbus-pinctrl" + "amlogic,meson8-aobus-pinctrl" + "amlogic,meson8b-aobus-pinctrl" ++ "amlogic,meson8m2-aobus-pinctrl" + "amlogic,meson-gxbb-periphs-pinctrl" + "amlogic,meson-gxbb-aobus-pinctrl" + "amlogic,meson-gxl-periphs-pinctrl" +diff --git a/Documentation/vfio-mediated-device.txt b/Documentation/vfio-mediated-device.txt +index 1b3950346532..c3f69bcaf96e 100644 +--- a/Documentation/vfio-mediated-device.txt ++++ b/Documentation/vfio-mediated-device.txt +@@ -145,6 +145,11 @@ The functions in the mdev_parent_ops structure are as follows: + * create: allocate basic resources in a driver for a mediated device + * remove: free resources in a driver when a mediated device is destroyed + ++(Note that mdev-core provides no implicit serialization of create/remove ++callbacks per mdev parent device, per mdev type, or any other categorization. ++Vendor drivers are expected to be fully asynchronous in this respect or ++provide their own internal resource protection.) ++ + The callbacks in the mdev_parent_ops structure are as follows: + + * open: open callback of mediated device +diff --git a/Makefile b/Makefile +index 81b0e99dce80..5b48ec630990 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 14 +-SUBLEVEL = 59 ++SUBLEVEL = 60 + EXTRAVERSION = + NAME = Petit Gorille + +diff --git a/arch/arm/boot/dts/emev2.dtsi b/arch/arm/boot/dts/emev2.dtsi +index 42ea246e71cb..fec1241b858f 100644 +--- a/arch/arm/boot/dts/emev2.dtsi ++++ b/arch/arm/boot/dts/emev2.dtsi +@@ -31,13 +31,13 @@ + #address-cells = <1>; + #size-cells = <0>; + +- cpu@0 { ++ cpu0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a9"; + reg = <0>; + clock-frequency = <533000000>; + }; +- cpu@1 { ++ cpu1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a9"; + reg = <1>; +@@ -57,6 +57,7 @@ + compatible = "arm,cortex-a9-pmu"; + interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>; ++ interrupt-affinity = <&cpu0>, <&cpu1>; + }; + + clocks@e0110000 { +diff --git a/arch/arm/boot/dts/sh73a0.dtsi b/arch/arm/boot/dts/sh73a0.dtsi +index 4ea5c5a16c57..5fc24d4c2d5d 100644 +--- a/arch/arm/boot/dts/sh73a0.dtsi ++++ b/arch/arm/boot/dts/sh73a0.dtsi +@@ -22,7 +22,7 @@ + #address-cells = <1>; + #size-cells = <0>; + +- cpu@0 { ++ cpu0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a9"; + reg = <0>; +@@ -30,7 +30,7 @@ + power-domains = <&pd_a2sl>; + next-level-cache = <&L2>; + }; +- cpu@1 { ++ cpu1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a9"; + reg = <1>; +@@ -89,6 +89,7 @@ + compatible = "arm,cortex-a9-pmu"; + interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>; ++ interrupt-affinity = <&cpu0>, <&cpu1>; + }; + + cmt1: timer@e6138000 { +diff --git a/arch/arm/boot/dts/stih407-pinctrl.dtsi b/arch/arm/boot/dts/stih407-pinctrl.dtsi +index bd1a82e8fffe..fe501d32d059 100644 +--- a/arch/arm/boot/dts/stih407-pinctrl.dtsi ++++ b/arch/arm/boot/dts/stih407-pinctrl.dtsi +@@ -52,7 +52,7 @@ + st,syscfg = <&syscfg_sbc>; + reg = <0x0961f080 0x4>; + reg-names = "irqmux"; +- interrupts = <GIC_SPI 188 IRQ_TYPE_NONE>; ++ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "irqmux"; + ranges = <0 0x09610000 0x6000>; + +@@ -376,7 +376,7 @@ + st,syscfg = <&syscfg_front>; + reg = <0x0920f080 0x4>; + reg-names = "irqmux"; +- interrupts = <GIC_SPI 189 IRQ_TYPE_NONE>; ++ interrupts = <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "irqmux"; + ranges = <0 0x09200000 0x10000>; + +@@ -936,7 +936,7 @@ + st,syscfg = <&syscfg_front>; + reg = <0x0921f080 0x4>; + reg-names = "irqmux"; +- interrupts = <GIC_SPI 190 IRQ_TYPE_NONE>; ++ interrupts = <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "irqmux"; + ranges = <0 0x09210000 0x10000>; + +@@ -969,7 +969,7 @@ + st,syscfg = <&syscfg_rear>; + reg = <0x0922f080 0x4>; + reg-names = "irqmux"; +- interrupts = <GIC_SPI 191 IRQ_TYPE_NONE>; ++ interrupts = <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "irqmux"; + ranges = <0 0x09220000 0x6000>; + +@@ -1164,7 +1164,7 @@ + st,syscfg = <&syscfg_flash>; + reg = <0x0923f080 0x4>; + reg-names = "irqmux"; +- interrupts = <GIC_SPI 192 IRQ_TYPE_NONE>; ++ interrupts = <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "irqmux"; + ranges = <0 0x09230000 0x3000>; + +diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c +index 323a4df59a6c..ece2d1d43724 100644 +--- a/arch/arm/net/bpf_jit_32.c ++++ b/arch/arm/net/bpf_jit_32.c +@@ -718,7 +718,7 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk, + } + + /* dst = dst >> src */ +-static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk, ++static inline void emit_a32_rsh_r64(const u8 dst[], const u8 src[], bool dstk, + bool sstk, struct jit_ctx *ctx) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; +@@ -734,7 +734,7 @@ static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk, + emit(ARM_LDR_I(rm, ARM_SP, STACK_VAR(dst_hi)), ctx); + } + +- /* Do LSH operation */ ++ /* Do RSH operation */ + emit(ARM_RSB_I(ARM_IP, rt, 32), ctx); + emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx); + emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx); +@@ -784,7 +784,7 @@ static inline void emit_a32_lsh_i64(const u8 dst[], bool dstk, + } + + /* dst = dst >> val */ +-static inline void emit_a32_lsr_i64(const u8 dst[], bool dstk, ++static inline void emit_a32_rsh_i64(const u8 dst[], bool dstk, + const u32 val, struct jit_ctx *ctx) { + const u8 *tmp = bpf2a32[TMP_REG_1]; + const u8 *tmp2 = bpf2a32[TMP_REG_2]; +@@ -1340,7 +1340,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) + case BPF_ALU64 | BPF_RSH | BPF_K: + if (unlikely(imm > 63)) + return -EINVAL; +- emit_a32_lsr_i64(dst, dstk, imm, ctx); ++ emit_a32_rsh_i64(dst, dstk, imm, ctx); + break; + /* dst = dst << src */ + case BPF_ALU64 | BPF_LSH | BPF_X: +@@ -1348,7 +1348,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) + break; + /* dst = dst >> src */ + case BPF_ALU64 | BPF_RSH | BPF_X: +- emit_a32_lsr_r64(dst, src, dstk, sstk, ctx); ++ emit_a32_rsh_r64(dst, src, dstk, sstk, ctx); + break; + /* dst = dst >> src (signed) */ + case BPF_ALU64 | BPF_ARSH | BPF_X: +diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi +index 9eb11a8d9eda..26a978616071 100644 +--- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi ++++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi +@@ -93,20 +93,12 @@ + regulator-always-on; + }; + +- rsnd_ak4613: sound { +- compatible = "simple-audio-card"; ++ sound_card: sound { ++ compatible = "audio-graph-card"; + +- simple-audio-card,format = "left_j"; +- simple-audio-card,bitclock-master = <&sndcpu>; +- simple-audio-card,frame-master = <&sndcpu>; ++ label = "rcar-sound"; + +- sndcpu: simple-audio-card,cpu { +- sound-dai = <&rcar_sound>; +- }; +- +- sndcodec: simple-audio-card,codec { +- sound-dai = <&ak4613>; +- }; ++ dais = <&rsnd_port0>; + }; + + vbus0_usb2: regulator-vbus0-usb2 { +@@ -320,6 +312,12 @@ + asahi-kasei,out4-single-end; + asahi-kasei,out5-single-end; + asahi-kasei,out6-single-end; ++ ++ port { ++ ak4613_endpoint: endpoint { ++ remote-endpoint = <&rsnd_endpoint0>; ++ }; ++ }; + }; + + cs2000: clk_multiplier@4f { +@@ -538,10 +536,18 @@ + <&audio_clk_c>, + <&cpg CPG_CORE CPG_AUDIO_CLK_I>; + +- rcar_sound,dai { +- dai0 { +- playback = <&ssi0 &src0 &dvc0>; +- capture = <&ssi1 &src1 &dvc1>; ++ ports { ++ rsnd_port0: port@0 { ++ rsnd_endpoint0: endpoint { ++ remote-endpoint = <&ak4613_endpoint>; ++ ++ dai-format = "left_j"; ++ bitclock-master = <&rsnd_endpoint0>; ++ frame-master = <&rsnd_endpoint0>; ++ ++ playback = <&ssi0 &src0 &dvc0>; ++ capture = <&ssi1 &src1 &dvc1>; ++ }; + }; + }; + }; +diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig +index 34480e9af2e7..b05796578e7a 100644 +--- a/arch/arm64/configs/defconfig ++++ b/arch/arm64/configs/defconfig +@@ -302,6 +302,8 @@ CONFIG_GPIO_XGENE_SB=y + CONFIG_GPIO_PCA953X=y + CONFIG_GPIO_PCA953X_IRQ=y + CONFIG_GPIO_MAX77620=y ++CONFIG_POWER_AVS=y ++CONFIG_ROCKCHIP_IODOMAIN=y + CONFIG_POWER_RESET_MSM=y + CONFIG_POWER_RESET_XGENE=y + CONFIG_POWER_RESET_SYSCON=y +diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h +index ae852add053d..0f2e1ab5e166 100644 +--- a/arch/arm64/include/asm/cmpxchg.h ++++ b/arch/arm64/include/asm/cmpxchg.h +@@ -229,7 +229,9 @@ static inline void __cmpwait_case_##name(volatile void *ptr, \ + unsigned long tmp; \ + \ + asm volatile( \ +- " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \ ++ " sevl\n" \ ++ " wfe\n" \ ++ " ldxr" #sz "\t%" #w "[tmp], %[v]\n" \ + " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \ + " cbnz %" #w "[tmp], 1f\n" \ + " wfe\n" \ +diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c +index 00e7b900ca41..1190d90e01e6 100644 +--- a/arch/arm64/mm/init.c ++++ b/arch/arm64/mm/init.c +@@ -651,11 +651,13 @@ void __init mem_init(void) + BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64); + #endif + ++#ifdef CONFIG_SPARSEMEM_VMEMMAP + /* + * Make sure we chose the upper bound of sizeof(struct page) +- * correctly. ++ * correctly when sizing the VMEMMAP array. + */ + BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT)); ++#endif + + if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { + extern int sysctl_overcommit_memory; +diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile +index 47f94cc383b6..7c2f52d4a0e4 100644 +--- a/arch/microblaze/boot/Makefile ++++ b/arch/microblaze/boot/Makefile +@@ -22,17 +22,19 @@ $(obj)/linux.bin.gz: $(obj)/linux.bin FORCE + quiet_cmd_cp = CP $< $@$2 + cmd_cp = cat $< >$@$2 || (rm -f $@ && echo false) + +-quiet_cmd_strip = STRIP $@ ++quiet_cmd_strip = STRIP $< $@$2 + cmd_strip = $(STRIP) -K microblaze_start -K _end -K __log_buf \ +- -K _fdt_start vmlinux -o $@ ++ -K _fdt_start $< -o $@$2 + + UIMAGE_LOADADDR = $(CONFIG_KERNEL_BASE_ADDR) ++UIMAGE_IN = $@ ++UIMAGE_OUT = $@.ub + + $(obj)/simpleImage.%: vmlinux FORCE + $(call if_changed,cp,.unstrip) + $(call if_changed,objcopy) + $(call if_changed,uimage) +- $(call if_changed,strip) +- @echo 'Kernel: $@ is ready' ' (#'`cat .version`')' ++ $(call if_changed,strip,.strip) ++ @echo 'Kernel: $(UIMAGE_OUT) is ready' ' (#'`cat .version`')' + + clean-files += simpleImage.*.unstrip linux.bin.ub dts/*.dtb +diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h +index c7c63959ba91..e582d2c88092 100644 +--- a/arch/powerpc/include/asm/barrier.h ++++ b/arch/powerpc/include/asm/barrier.h +@@ -76,6 +76,21 @@ do { \ + ___p1; \ + }) + ++#ifdef CONFIG_PPC_BOOK3S_64 ++/* ++ * Prevent execution of subsequent instructions until preceding branches have ++ * been fully resolved and are no longer executing speculatively. ++ */ ++#define barrier_nospec_asm ori 31,31,0 ++ ++// This also acts as a compiler barrier due to the memory clobber. ++#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory") ++ ++#else /* !CONFIG_PPC_BOOK3S_64 */ ++#define barrier_nospec_asm ++#define barrier_nospec() ++#endif ++ + #include <asm-generic/barrier.h> + + #endif /* _ASM_POWERPC_BARRIER_H */ +diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h +index c1d257aa4c2d..66298461b640 100644 +--- a/arch/powerpc/include/asm/cache.h ++++ b/arch/powerpc/include/asm/cache.h +@@ -9,11 +9,14 @@ + #if defined(CONFIG_PPC_8xx) || defined(CONFIG_403GCX) + #define L1_CACHE_SHIFT 4 + #define MAX_COPY_PREFETCH 1 ++#define IFETCH_ALIGN_SHIFT 2 + #elif defined(CONFIG_PPC_E500MC) + #define L1_CACHE_SHIFT 6 + #define MAX_COPY_PREFETCH 4 ++#define IFETCH_ALIGN_SHIFT 3 + #elif defined(CONFIG_PPC32) + #define MAX_COPY_PREFETCH 4 ++#define IFETCH_ALIGN_SHIFT 3 /* 603 fetches 2 insn at a time */ + #if defined(CONFIG_PPC_47x) + #define L1_CACHE_SHIFT 7 + #else +diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c +index ca2243df9cb2..470284f9e4f6 100644 +--- a/arch/powerpc/kernel/eeh_driver.c ++++ b/arch/powerpc/kernel/eeh_driver.c +@@ -450,9 +450,11 @@ static void *eeh_add_virt_device(void *data, void *userdata) + + driver = eeh_pcid_get(dev); + if (driver) { +- eeh_pcid_put(dev); +- if (driver->err_handler) ++ if (driver->err_handler) { ++ eeh_pcid_put(dev); + return NULL; ++ } ++ eeh_pcid_put(dev); + } + + #ifdef CONFIG_PPC_POWERNV +@@ -489,17 +491,19 @@ static void *eeh_rmv_device(void *data, void *userdata) + if (eeh_dev_removed(edev)) + return NULL; + +- driver = eeh_pcid_get(dev); +- if (driver) { +- eeh_pcid_put(dev); +- if (removed && +- eeh_pe_passed(edev->pe)) +- return NULL; +- if (removed && +- driver->err_handler && +- driver->err_handler->error_detected && +- driver->err_handler->slot_reset) ++ if (removed) { ++ if (eeh_pe_passed(edev->pe)) + return NULL; ++ driver = eeh_pcid_get(dev); ++ if (driver) { ++ if (driver->err_handler && ++ driver->err_handler->error_detected && ++ driver->err_handler->slot_reset) { ++ eeh_pcid_put(dev); ++ return NULL; ++ } ++ eeh_pcid_put(dev); ++ } + } + + /* Remove it from PCI subsystem */ +diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S +index 4fee00d414e8..2d0d89e2cb9a 100644 +--- a/arch/powerpc/kernel/head_8xx.S ++++ b/arch/powerpc/kernel/head_8xx.S +@@ -958,7 +958,7 @@ start_here: + tovirt(r6,r6) + lis r5, abatron_pteptrs@h + ori r5, r5, abatron_pteptrs@l +- stw r5, 0xf0(r0) /* Must match your Abatron config file */ ++ stw r5, 0xf0(0) /* Must match your Abatron config file */ + tophys(r5,r5) + stw r6, 0(r5) + +diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c +index 1d817f4d97d9..2094f2b249fd 100644 +--- a/arch/powerpc/kernel/pci_32.c ++++ b/arch/powerpc/kernel/pci_32.c +@@ -11,6 +11,7 @@ + #include <linux/sched.h> + #include <linux/errno.h> + #include <linux/bootmem.h> ++#include <linux/syscalls.h> + #include <linux/irq.h> + #include <linux/list.h> + #include <linux/of.h> +diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c +index 02190e90c7ae..f8782c7ef50f 100644 +--- a/arch/powerpc/kernel/prom_init.c ++++ b/arch/powerpc/kernel/prom_init.c +@@ -334,6 +334,7 @@ static void __init prom_print_dec(unsigned long val) + call_prom("write", 3, 1, prom.stdout, buf+i, size); + } + ++__printf(1, 2) + static void __init prom_printf(const char *format, ...) + { + const char *p, *q, *s; +@@ -1148,7 +1149,7 @@ static void __init prom_send_capabilities(void) + */ + + cores = DIV_ROUND_UP(NR_CPUS, prom_count_smt_threads()); +- prom_printf("Max number of cores passed to firmware: %lu (NR_CPUS = %lu)\n", ++ prom_printf("Max number of cores passed to firmware: %u (NR_CPUS = %d)\n", + cores, NR_CPUS); + + ibm_architecture_vec.vec5.max_cpus = cpu_to_be32(cores); +@@ -1230,7 +1231,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) + + if (align) + base = _ALIGN_UP(base, align); +- prom_debug("alloc_up(%x, %x)\n", size, align); ++ prom_debug("%s(%lx, %lx)\n", __func__, size, align); + if (ram_top == 0) + prom_panic("alloc_up() called with mem not initialized\n"); + +@@ -1241,7 +1242,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) + + for(; (base + size) <= alloc_top; + base = _ALIGN_UP(base + 0x100000, align)) { +- prom_debug(" trying: 0x%x\n\r", base); ++ prom_debug(" trying: 0x%lx\n\r", base); + addr = (unsigned long)prom_claim(base, size, 0); + if (addr != PROM_ERROR && addr != 0) + break; +@@ -1253,12 +1254,12 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) + return 0; + alloc_bottom = addr + size; + +- prom_debug(" -> %x\n", addr); +- prom_debug(" alloc_bottom : %x\n", alloc_bottom); +- prom_debug(" alloc_top : %x\n", alloc_top); +- prom_debug(" alloc_top_hi : %x\n", alloc_top_high); +- prom_debug(" rmo_top : %x\n", rmo_top); +- prom_debug(" ram_top : %x\n", ram_top); ++ prom_debug(" -> %lx\n", addr); ++ prom_debug(" alloc_bottom : %lx\n", alloc_bottom); ++ prom_debug(" alloc_top : %lx\n", alloc_top); ++ prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); ++ prom_debug(" rmo_top : %lx\n", rmo_top); ++ prom_debug(" ram_top : %lx\n", ram_top); + + return addr; + } +@@ -1273,7 +1274,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, + { + unsigned long base, addr = 0; + +- prom_debug("alloc_down(%x, %x, %s)\n", size, align, ++ prom_debug("%s(%lx, %lx, %s)\n", __func__, size, align, + highmem ? "(high)" : "(low)"); + if (ram_top == 0) + prom_panic("alloc_down() called with mem not initialized\n"); +@@ -1301,7 +1302,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, + base = _ALIGN_DOWN(alloc_top - size, align); + for (; base > alloc_bottom; + base = _ALIGN_DOWN(base - 0x100000, align)) { +- prom_debug(" trying: 0x%x\n\r", base); ++ prom_debug(" trying: 0x%lx\n\r", base); + addr = (unsigned long)prom_claim(base, size, 0); + if (addr != PROM_ERROR && addr != 0) + break; +@@ -1312,12 +1313,12 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, + alloc_top = addr; + + bail: +- prom_debug(" -> %x\n", addr); +- prom_debug(" alloc_bottom : %x\n", alloc_bottom); +- prom_debug(" alloc_top : %x\n", alloc_top); +- prom_debug(" alloc_top_hi : %x\n", alloc_top_high); +- prom_debug(" rmo_top : %x\n", rmo_top); +- prom_debug(" ram_top : %x\n", ram_top); ++ prom_debug(" -> %lx\n", addr); ++ prom_debug(" alloc_bottom : %lx\n", alloc_bottom); ++ prom_debug(" alloc_top : %lx\n", alloc_top); ++ prom_debug(" alloc_top_hi : %lx\n", alloc_top_high); ++ prom_debug(" rmo_top : %lx\n", rmo_top); ++ prom_debug(" ram_top : %lx\n", ram_top); + + return addr; + } +@@ -1443,7 +1444,7 @@ static void __init prom_init_mem(void) + + if (size == 0) + continue; +- prom_debug(" %x %x\n", base, size); ++ prom_debug(" %lx %lx\n", base, size); + if (base == 0 && (of_platform & PLATFORM_LPAR)) + rmo_top = size; + if ((base + size) > ram_top) +@@ -1463,12 +1464,12 @@ static void __init prom_init_mem(void) + + if (prom_memory_limit) { + if (prom_memory_limit <= alloc_bottom) { +- prom_printf("Ignoring mem=%x <= alloc_bottom.\n", +- prom_memory_limit); ++ prom_printf("Ignoring mem=%lx <= alloc_bottom.\n", ++ prom_memory_limit); + prom_memory_limit = 0; + } else if (prom_memory_limit >= ram_top) { +- prom_printf("Ignoring mem=%x >= ram_top.\n", +- prom_memory_limit); ++ prom_printf("Ignoring mem=%lx >= ram_top.\n", ++ prom_memory_limit); + prom_memory_limit = 0; + } else { + ram_top = prom_memory_limit; +@@ -1500,12 +1501,13 @@ static void __init prom_init_mem(void) + alloc_bottom = PAGE_ALIGN(prom_initrd_end); + + prom_printf("memory layout at init:\n"); +- prom_printf(" memory_limit : %x (16 MB aligned)\n", prom_memory_limit); +- prom_printf(" alloc_bottom : %x\n", alloc_bottom); +- prom_printf(" alloc_top : %x\n", alloc_top); +- prom_printf(" alloc_top_hi : %x\n", alloc_top_high); +- prom_printf(" rmo_top : %x\n", rmo_top); +- prom_printf(" ram_top : %x\n", ram_top); ++ prom_printf(" memory_limit : %lx (16 MB aligned)\n", ++ prom_memory_limit); ++ prom_printf(" alloc_bottom : %lx\n", alloc_bottom); ++ prom_printf(" alloc_top : %lx\n", alloc_top); ++ prom_printf(" alloc_top_hi : %lx\n", alloc_top_high); ++ prom_printf(" rmo_top : %lx\n", rmo_top); ++ prom_printf(" ram_top : %lx\n", ram_top); + } + + static void __init prom_close_stdin(void) +@@ -1566,7 +1568,7 @@ static void __init prom_instantiate_opal(void) + return; + } + +- prom_printf("instantiating opal at 0x%x...", base); ++ prom_printf("instantiating opal at 0x%llx...", base); + + if (call_prom_ret("call-method", 4, 3, rets, + ADDR("load-opal-runtime"), +@@ -1582,10 +1584,10 @@ static void __init prom_instantiate_opal(void) + + reserve_mem(base, size); + +- prom_debug("opal base = 0x%x\n", base); +- prom_debug("opal align = 0x%x\n", align); +- prom_debug("opal entry = 0x%x\n", entry); +- prom_debug("opal size = 0x%x\n", (long)size); ++ prom_debug("opal base = 0x%llx\n", base); ++ prom_debug("opal align = 0x%llx\n", align); ++ prom_debug("opal entry = 0x%llx\n", entry); ++ prom_debug("opal size = 0x%llx\n", size); + + prom_setprop(opal_node, "/ibm,opal", "opal-base-address", + &base, sizeof(base)); +@@ -1662,7 +1664,7 @@ static void __init prom_instantiate_rtas(void) + + prom_debug("rtas base = 0x%x\n", base); + prom_debug("rtas entry = 0x%x\n", entry); +- prom_debug("rtas size = 0x%x\n", (long)size); ++ prom_debug("rtas size = 0x%x\n", size); + + prom_debug("prom_instantiate_rtas: end...\n"); + } +@@ -1720,7 +1722,7 @@ static void __init prom_instantiate_sml(void) + if (base == 0) + prom_panic("Could not allocate memory for sml\n"); + +- prom_printf("instantiating sml at 0x%x...", base); ++ prom_printf("instantiating sml at 0x%llx...", base); + + memset((void *)base, 0, size); + +@@ -1739,8 +1741,8 @@ static void __init prom_instantiate_sml(void) + prom_setprop(ibmvtpm_node, "/vdevice/vtpm", "linux,sml-size", + &size, sizeof(size)); + +- prom_debug("sml base = 0x%x\n", base); +- prom_debug("sml size = 0x%x\n", (long)size); ++ prom_debug("sml base = 0x%llx\n", base); ++ prom_debug("sml size = 0x%x\n", size); + + prom_debug("prom_instantiate_sml: end...\n"); + } +@@ -1841,7 +1843,7 @@ static void __init prom_initialize_tce_table(void) + + prom_debug("TCE table: %s\n", path); + prom_debug("\tnode = 0x%x\n", node); +- prom_debug("\tbase = 0x%x\n", base); ++ prom_debug("\tbase = 0x%llx\n", base); + prom_debug("\tsize = 0x%x\n", minsize); + + /* Initialize the table to have a one-to-one mapping +@@ -1928,12 +1930,12 @@ static void __init prom_hold_cpus(void) + } + + prom_debug("prom_hold_cpus: start...\n"); +- prom_debug(" 1) spinloop = 0x%x\n", (unsigned long)spinloop); +- prom_debug(" 1) *spinloop = 0x%x\n", *spinloop); +- prom_debug(" 1) acknowledge = 0x%x\n", ++ prom_debug(" 1) spinloop = 0x%lx\n", (unsigned long)spinloop); ++ prom_debug(" 1) *spinloop = 0x%lx\n", *spinloop); ++ prom_debug(" 1) acknowledge = 0x%lx\n", + (unsigned long)acknowledge); +- prom_debug(" 1) *acknowledge = 0x%x\n", *acknowledge); +- prom_debug(" 1) secondary_hold = 0x%x\n", secondary_hold); ++ prom_debug(" 1) *acknowledge = 0x%lx\n", *acknowledge); ++ prom_debug(" 1) secondary_hold = 0x%lx\n", secondary_hold); + + /* Set the common spinloop variable, so all of the secondary cpus + * will block when they are awakened from their OF spinloop. +@@ -1961,7 +1963,7 @@ static void __init prom_hold_cpus(void) + prom_getprop(node, "reg", ®, sizeof(reg)); + cpu_no = be32_to_cpu(reg); + +- prom_debug("cpu hw idx = %lu\n", cpu_no); ++ prom_debug("cpu hw idx = %u\n", cpu_no); + + /* Init the acknowledge var which will be reset by + * the secondary cpu when it awakens from its OF +@@ -1971,7 +1973,7 @@ static void __init prom_hold_cpus(void) + + if (cpu_no != prom.cpu) { + /* Primary Thread of non-boot cpu or any thread */ +- prom_printf("starting cpu hw idx %lu... ", cpu_no); ++ prom_printf("starting cpu hw idx %u... ", cpu_no); + call_prom("start-cpu", 3, 0, node, + secondary_hold, cpu_no); + +@@ -1982,11 +1984,11 @@ static void __init prom_hold_cpus(void) + if (*acknowledge == cpu_no) + prom_printf("done\n"); + else +- prom_printf("failed: %x\n", *acknowledge); ++ prom_printf("failed: %lx\n", *acknowledge); + } + #ifdef CONFIG_SMP + else +- prom_printf("boot cpu hw idx %lu\n", cpu_no); ++ prom_printf("boot cpu hw idx %u\n", cpu_no); + #endif /* CONFIG_SMP */ + } + +@@ -2264,7 +2266,7 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, + while ((*mem_start + needed) > *mem_end) { + unsigned long room, chunk; + +- prom_debug("Chunk exhausted, claiming more at %x...\n", ++ prom_debug("Chunk exhausted, claiming more at %lx...\n", + alloc_bottom); + room = alloc_top - alloc_bottom; + if (room > DEVTREE_CHUNK_SIZE) +@@ -2490,7 +2492,7 @@ static void __init flatten_device_tree(void) + room = alloc_top - alloc_bottom - 0x4000; + if (room > DEVTREE_CHUNK_SIZE) + room = DEVTREE_CHUNK_SIZE; +- prom_debug("starting device tree allocs at %x\n", alloc_bottom); ++ prom_debug("starting device tree allocs at %lx\n", alloc_bottom); + + /* Now try to claim that */ + mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); +@@ -2553,7 +2555,7 @@ static void __init flatten_device_tree(void) + int i; + prom_printf("reserved memory map:\n"); + for (i = 0; i < mem_reserve_cnt; i++) +- prom_printf(" %x - %x\n", ++ prom_printf(" %llx - %llx\n", + be64_to_cpu(mem_reserve_map[i].base), + be64_to_cpu(mem_reserve_map[i].size)); + } +@@ -2563,9 +2565,9 @@ static void __init flatten_device_tree(void) + */ + mem_reserve_cnt = MEM_RESERVE_MAP_SIZE; + +- prom_printf("Device tree strings 0x%x -> 0x%x\n", ++ prom_printf("Device tree strings 0x%lx -> 0x%lx\n", + dt_string_start, dt_string_end); +- prom_printf("Device tree struct 0x%x -> 0x%x\n", ++ prom_printf("Device tree struct 0x%lx -> 0x%lx\n", + dt_struct_start, dt_struct_end); + } + +@@ -2997,7 +2999,7 @@ static void __init prom_find_boot_cpu(void) + prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval)); + prom.cpu = be32_to_cpu(rval); + +- prom_debug("Booting CPU hw index = %lu\n", prom.cpu); ++ prom_debug("Booting CPU hw index = %d\n", prom.cpu); + } + + static void __init prom_check_initrd(unsigned long r3, unsigned long r4) +@@ -3019,8 +3021,8 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4) + reserve_mem(prom_initrd_start, + prom_initrd_end - prom_initrd_start); + +- prom_debug("initrd_start=0x%x\n", prom_initrd_start); +- prom_debug("initrd_end=0x%x\n", prom_initrd_end); ++ prom_debug("initrd_start=0x%lx\n", prom_initrd_start); ++ prom_debug("initrd_end=0x%lx\n", prom_initrd_end); + } + #endif /* CONFIG_BLK_DEV_INITRD */ + } +@@ -3273,7 +3275,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, + /* Don't print anything after quiesce under OPAL, it crashes OFW */ + if (of_platform != PLATFORM_OPAL) { + prom_printf("Booting Linux via __start() @ 0x%lx ...\n", kbase); +- prom_debug("->dt_header_start=0x%x\n", hdr); ++ prom_debug("->dt_header_start=0x%lx\n", hdr); + } + + #ifdef CONFIG_PPC32 +diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S +index a787776822d8..0378def28d41 100644 +--- a/arch/powerpc/lib/string.S ++++ b/arch/powerpc/lib/string.S +@@ -12,6 +12,7 @@ + #include <asm/errno.h> + #include <asm/ppc_asm.h> + #include <asm/export.h> ++#include <asm/cache.h> + + .text + +@@ -23,7 +24,7 @@ _GLOBAL(strncpy) + mtctr r5 + addi r6,r3,-1 + addi r4,r4,-1 +- .balign 16 ++ .balign IFETCH_ALIGN_BYTES + 1: lbzu r0,1(r4) + cmpwi 0,r0,0 + stbu r0,1(r6) +@@ -43,7 +44,7 @@ _GLOBAL(strncmp) + mtctr r5 + addi r5,r3,-1 + addi r4,r4,-1 +- .balign 16 ++ .balign IFETCH_ALIGN_BYTES + 1: lbzu r3,1(r5) + cmpwi 1,r3,0 + lbzu r0,1(r4) +@@ -77,7 +78,7 @@ _GLOBAL(memchr) + beq- 2f + mtctr r5 + addi r3,r3,-1 +- .balign 16 ++ .balign IFETCH_ALIGN_BYTES + 1: lbzu r0,1(r3) + cmpw 0,r0,r4 + bdnzf 2,1b +diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c +index 13cfe413b40d..6d9bf014b3e7 100644 +--- a/arch/powerpc/mm/slb.c ++++ b/arch/powerpc/mm/slb.c +@@ -62,14 +62,14 @@ static inline void slb_shadow_update(unsigned long ea, int ssize, + * updating it. No write barriers are needed here, provided + * we only update the current CPU's SLB shadow buffer. + */ +- p->save_area[index].esid = 0; +- p->save_area[index].vsid = cpu_to_be64(mk_vsid_data(ea, ssize, flags)); +- p->save_area[index].esid = cpu_to_be64(mk_esid_data(ea, ssize, index)); ++ WRITE_ONCE(p->save_area[index].esid, 0); ++ WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags))); ++ WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index))); + } + + static inline void slb_shadow_clear(enum slb_index index) + { +- get_slb_shadow()->save_area[index].esid = 0; ++ WRITE_ONCE(get_slb_shadow()->save_area[index].esid, 0); + } + + static inline void create_shadowed_slbe(unsigned long ea, int ssize, +diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c +index bd0786c23109..254634fb3fc7 100644 +--- a/arch/powerpc/net/bpf_jit_comp64.c ++++ b/arch/powerpc/net/bpf_jit_comp64.c +@@ -203,25 +203,37 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) + + static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func) + { ++ unsigned int i, ctx_idx = ctx->idx; ++ ++ /* Load function address into r12 */ ++ PPC_LI64(12, func); ++ ++ /* For bpf-to-bpf function calls, the callee's address is unknown ++ * until the last extra pass. As seen above, we use PPC_LI64() to ++ * load the callee's address, but this may optimize the number of ++ * instructions required based on the nature of the address. ++ * ++ * Since we don't want the number of instructions emitted to change, ++ * we pad the optimized PPC_LI64() call with NOPs to guarantee that ++ * we always have a five-instruction sequence, which is the maximum ++ * that PPC_LI64() can emit. ++ */ ++ for (i = ctx->idx - ctx_idx; i < 5; i++) ++ PPC_NOP(); ++ + #ifdef PPC64_ELF_ABI_v1 +- /* func points to the function descriptor */ +- PPC_LI64(b2p[TMP_REG_2], func); +- /* Load actual entry point from function descriptor */ +- PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0); +- /* ... and move it to LR */ +- PPC_MTLR(b2p[TMP_REG_1]); + /* + * Load TOC from function descriptor at offset 8. + * We can clobber r2 since we get called through a + * function pointer (so caller will save/restore r2) + * and since we don't use a TOC ourself. + */ +- PPC_BPF_LL(2, b2p[TMP_REG_2], 8); +-#else +- /* We can clobber r12 */ +- PPC_FUNC_ADDR(12, func); +- PPC_MTLR(12); ++ PPC_BPF_LL(2, 12, 8); ++ /* Load actual entry point from function descriptor */ ++ PPC_BPF_LL(12, 12, 0); + #endif ++ ++ PPC_MTLR(12); + PPC_BLRL(); + } + +diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c +index 03d115aaa191..acde7bbe0716 100644 +--- a/arch/powerpc/platforms/chrp/time.c ++++ b/arch/powerpc/platforms/chrp/time.c +@@ -28,6 +28,8 @@ + #include <asm/sections.h> + #include <asm/time.h> + ++#include <platforms/chrp/chrp.h> ++ + extern spinlock_t rtc_lock; + + #define NVRAM_AS0 0x74 +@@ -63,7 +65,7 @@ long __init chrp_time_init(void) + return 0; + } + +-int chrp_cmos_clock_read(int addr) ++static int chrp_cmos_clock_read(int addr) + { + if (nvram_as1 != 0) + outb(addr>>8, nvram_as1); +@@ -71,7 +73,7 @@ int chrp_cmos_clock_read(int addr) + return (inb(nvram_data)); + } + +-void chrp_cmos_clock_write(unsigned long val, int addr) ++static void chrp_cmos_clock_write(unsigned long val, int addr) + { + if (nvram_as1 != 0) + outb(addr>>8, nvram_as1); +diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +index 89c54de88b7a..bf4a125faec6 100644 +--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c ++++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +@@ -35,6 +35,8 @@ + */ + #define HW_BROADWAY_ICR 0x00 + #define HW_BROADWAY_IMR 0x04 ++#define HW_STARLET_ICR 0x08 ++#define HW_STARLET_IMR 0x0c + + + /* +@@ -74,6 +76,9 @@ static void hlwd_pic_unmask(struct irq_data *d) + void __iomem *io_base = irq_data_get_irq_chip_data(d); + + setbits32(io_base + HW_BROADWAY_IMR, 1 << irq); ++ ++ /* Make sure the ARM (aka. Starlet) doesn't handle this interrupt. */ ++ clrbits32(io_base + HW_STARLET_IMR, 1 << irq); + } + + +diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c +index c3c9bbb3573a..ba0964c17620 100644 +--- a/arch/powerpc/platforms/powermac/bootx_init.c ++++ b/arch/powerpc/platforms/powermac/bootx_init.c +@@ -468,7 +468,7 @@ void __init bootx_init(unsigned long r3, unsigned long r4) + boot_infos_t *bi = (boot_infos_t *) r4; + unsigned long hdr; + unsigned long space; +- unsigned long ptr, x; ++ unsigned long ptr; + char *model; + unsigned long offset = reloc_offset(); + +@@ -562,6 +562,8 @@ void __init bootx_init(unsigned long r3, unsigned long r4) + * MMU switched OFF, so this should not be useful anymore. + */ + if (bi->version < 4) { ++ unsigned long x __maybe_unused; ++ + bootx_printf("Touching pages...\n"); + + /* +diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c +index ab668cb72263..8b2eab1340f4 100644 +--- a/arch/powerpc/platforms/powermac/setup.c ++++ b/arch/powerpc/platforms/powermac/setup.c +@@ -352,6 +352,7 @@ static int pmac_late_init(void) + } + machine_late_initcall(powermac, pmac_late_init); + ++void note_bootable_part(dev_t dev, int part, int goodness); + /* + * This is __ref because we check for "initializing" before + * touching any of the __init sensitive things and "initializing" +diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h +index 05480e4cc5ca..bc764a674594 100644 +--- a/arch/s390/include/asm/cpu_mf.h ++++ b/arch/s390/include/asm/cpu_mf.h +@@ -116,7 +116,7 @@ struct hws_basic_entry { + + struct hws_diag_entry { + unsigned int def:16; /* 0-15 Data Entry Format */ +- unsigned int R:14; /* 16-19 and 20-30 reserved */ ++ unsigned int R:15; /* 16-19 and 20-30 reserved */ + unsigned int I:1; /* 31 entry valid or invalid */ + u8 data[]; /* Machine-dependent sample data */ + } __packed; +@@ -132,7 +132,9 @@ struct hws_trailer_entry { + unsigned int f:1; /* 0 - Block Full Indicator */ + unsigned int a:1; /* 1 - Alert request control */ + unsigned int t:1; /* 2 - Timestamp format */ +- unsigned long long:61; /* 3 - 63: Reserved */ ++ unsigned int :29; /* 3 - 31: Reserved */ ++ unsigned int bsdes:16; /* 32-47: size of basic SDE */ ++ unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */ + }; + unsigned long long flags; /* 0 - 63: All indicators */ + }; +diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c +index d45e06346f14..c56cb37b88e3 100644 +--- a/arch/x86/events/intel/uncore.c ++++ b/arch/x86/events/intel/uncore.c +@@ -218,7 +218,7 @@ void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *e + u64 prev_count, new_count, delta; + int shift; + +- if (event->hw.idx >= UNCORE_PMC_IDX_FIXED) ++ if (event->hw.idx == UNCORE_PMC_IDX_FIXED) + shift = 64 - uncore_fixed_ctr_bits(box); + else + shift = 64 - uncore_perf_ctr_bits(box); +diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c +index 93e7a8397cde..173e2674be6e 100644 +--- a/arch/x86/events/intel/uncore_nhmex.c ++++ b/arch/x86/events/intel/uncore_nhmex.c +@@ -246,7 +246,7 @@ static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct p + { + struct hw_perf_event *hwc = &event->hw; + +- if (hwc->idx >= UNCORE_PMC_IDX_FIXED) ++ if (hwc->idx == UNCORE_PMC_IDX_FIXED) + wrmsrl(hwc->config_base, NHMEX_PMON_CTL_EN_BIT0); + else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) + wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT22); +diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c +index c8e0cda0f272..4fc0e08a30b9 100644 +--- a/arch/x86/kernel/cpu/microcode/core.c ++++ b/arch/x86/kernel/cpu/microcode/core.c +@@ -70,7 +70,7 @@ static DEFINE_MUTEX(microcode_mutex); + /* + * Serialize late loading so that CPUs get updated one-by-one. + */ +-static DEFINE_SPINLOCK(update_lock); ++static DEFINE_RAW_SPINLOCK(update_lock); + + struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; + +@@ -560,9 +560,9 @@ static int __reload_late(void *info) + if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) + return -1; + +- spin_lock(&update_lock); ++ raw_spin_lock(&update_lock); + apply_microcode_local(&err); +- spin_unlock(&update_lock); ++ raw_spin_unlock(&update_lock); + + /* siblings return UCODE_OK because their engine got updated already */ + if (err > UCODE_NFOUND) { +diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c +index 43bbece92632..2ef2f1fe875b 100644 +--- a/arch/x86/kvm/mmu.c ++++ b/arch/x86/kvm/mmu.c +@@ -890,7 +890,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache, + if (cache->nobjs >= min) + return 0; + while (cache->nobjs < ARRAY_SIZE(cache->objects)) { +- page = (void *)__get_free_page(GFP_KERNEL); ++ page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT); + if (!page) + return -ENOMEM; + cache->objects[cache->nobjs++] = page; +diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c +index 56c9cd01fd1d..4a4b7d3c909a 100644 +--- a/block/bfq-iosched.c ++++ b/block/bfq-iosched.c +@@ -1678,7 +1678,6 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, + + if (!RB_EMPTY_NODE(&rq->rb_node)) + goto end; +- spin_lock_irq(&bfqq->bfqd->lock); + + /* + * If next and rq belong to the same bfq_queue and next is older +@@ -1702,7 +1701,6 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq, + + bfq_remove_request(q, next); + +- spin_unlock_irq(&bfqq->bfqd->lock); + end: + bfqg_stats_update_io_merged(bfqq_group(bfqq), next->cmd_flags); + } +diff --git a/block/bio.c b/block/bio.c +index 90f19d7df66c..194d28cdc642 100644 +--- a/block/bio.c ++++ b/block/bio.c +@@ -881,16 +881,16 @@ EXPORT_SYMBOL(bio_add_page); + */ + int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) + { +- unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; ++ unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx; + struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; + struct page **pages = (struct page **)bv; +- size_t offset, diff; ++ size_t offset; + ssize_t size; + + size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); + if (unlikely(size <= 0)) + return size ? size : -EFAULT; +- nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE; ++ idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE; + + /* + * Deep magic below: We need to walk the pinned pages backwards +@@ -903,17 +903,15 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) + bio->bi_iter.bi_size += size; + bio->bi_vcnt += nr_pages; + +- diff = (nr_pages * PAGE_SIZE - offset) - size; +- while (nr_pages--) { +- bv[nr_pages].bv_page = pages[nr_pages]; +- bv[nr_pages].bv_len = PAGE_SIZE; +- bv[nr_pages].bv_offset = 0; ++ while (idx--) { ++ bv[idx].bv_page = pages[idx]; ++ bv[idx].bv_len = PAGE_SIZE; ++ bv[idx].bv_offset = 0; + } + + bv[0].bv_offset += offset; + bv[0].bv_len -= offset; +- if (diff) +- bv[bio->bi_vcnt - 1].bv_len -= diff; ++ bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size; + + iov_iter_advance(iter, size); + return 0; +@@ -1891,6 +1889,7 @@ struct bio *bio_split(struct bio *bio, int sectors, + bio_integrity_trim(split); + + bio_advance(bio, split->bi_iter.bi_size); ++ bio->bi_iter.bi_done = 0; + + if (bio_flagged(bio, BIO_TRACE_COMPLETION)) + bio_set_flag(split, BIO_TRACE_COMPLETION); +diff --git a/crypto/authenc.c b/crypto/authenc.c +index 875470b0e026..0db344d5a01a 100644 +--- a/crypto/authenc.c ++++ b/crypto/authenc.c +@@ -108,6 +108,7 @@ static int crypto_authenc_setkey(struct crypto_aead *authenc, const u8 *key, + CRYPTO_TFM_RES_MASK); + + out: ++ memzero_explicit(&keys, sizeof(keys)); + return err; + + badkey: +diff --git a/crypto/authencesn.c b/crypto/authencesn.c +index 0cf5fefdb859..6de852ce4cf8 100644 +--- a/crypto/authencesn.c ++++ b/crypto/authencesn.c +@@ -90,6 +90,7 @@ static int crypto_authenc_esn_setkey(struct crypto_aead *authenc_esn, const u8 * + CRYPTO_TFM_RES_MASK); + + out: ++ memzero_explicit(&keys, sizeof(keys)); + return err; + + badkey: +diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c +index 602ae58ee2d8..75c3cb377b98 100644 +--- a/drivers/acpi/acpi_lpss.c ++++ b/drivers/acpi/acpi_lpss.c +@@ -69,6 +69,10 @@ ACPI_MODULE_NAME("acpi_lpss"); + #define LPSS_SAVE_CTX BIT(4) + #define LPSS_NO_D3_DELAY BIT(5) + ++/* Crystal Cove PMIC shares same ACPI ID between different platforms */ ++#define BYT_CRC_HRV 2 ++#define CHT_CRC_HRV 3 ++ + struct lpss_private_data; + + struct lpss_device_desc { +@@ -162,7 +166,7 @@ static void byt_pwm_setup(struct lpss_private_data *pdata) + if (!adev->pnp.unique_id || strcmp(adev->pnp.unique_id, "1")) + return; + +- if (!acpi_dev_present("INT33FD", NULL, -1)) ++ if (!acpi_dev_present("INT33FD", NULL, BYT_CRC_HRV)) + pwm_add_table(byt_pwm_lookup, ARRAY_SIZE(byt_pwm_lookup)); + } + +diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c +index 6fc204a52493..eb857d6ea1fe 100644 +--- a/drivers/acpi/pci_root.c ++++ b/drivers/acpi/pci_root.c +@@ -472,9 +472,11 @@ static void negotiate_os_control(struct acpi_pci_root *root, int *no_aspm) + } + + control = OSC_PCI_EXPRESS_CAPABILITY_CONTROL +- | OSC_PCI_EXPRESS_NATIVE_HP_CONTROL + | OSC_PCI_EXPRESS_PME_CONTROL; + ++ if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) ++ control |= OSC_PCI_EXPRESS_NATIVE_HP_CONTROL; ++ + if (pci_aer_available()) { + if (aer_acpi_firmware_first()) + dev_info(&device->dev, +diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c +index 711dd91b5e2c..2651c81d1edf 100644 +--- a/drivers/ata/libata-eh.c ++++ b/drivers/ata/libata-eh.c +@@ -2217,12 +2217,16 @@ static void ata_eh_link_autopsy(struct ata_link *link) + if (qc->err_mask & ~AC_ERR_OTHER) + qc->err_mask &= ~AC_ERR_OTHER; + +- /* SENSE_VALID trumps dev/unknown error and revalidation */ ++ /* ++ * SENSE_VALID trumps dev/unknown error and revalidation. Upper ++ * layers will determine whether the command is worth retrying ++ * based on the sense data and device class/type. Otherwise, ++ * determine directly if the command is worth retrying using its ++ * error mask and flags. ++ */ + if (qc->flags & ATA_QCFLAG_SENSE_VALID) + qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER); +- +- /* determine whether the command is worth retrying */ +- if (ata_eh_worth_retry(qc)) ++ else if (ata_eh_worth_retry(qc)) + qc->flags |= ATA_QCFLAG_RETRY; + + /* accumulate error info */ +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 86d7975afaeb..819521d5895e 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -279,6 +279,7 @@ static const struct usb_device_id blacklist_table[] = { + { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME }, + { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME }, ++ { USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME }, + + /* Broadcom BCM2035 */ + { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, +@@ -373,6 +374,9 @@ static const struct usb_device_id blacklist_table[] = { + /* Additional Realtek 8723BU Bluetooth devices */ + { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK }, + ++ /* Additional Realtek 8723DE Bluetooth devices */ ++ { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK }, ++ + /* Additional Realtek 8821AE Bluetooth devices */ + { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, + { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK }, +diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c +index 6f4ebd5e54c8..a6173ddfb5a7 100644 +--- a/drivers/bluetooth/hci_qca.c ++++ b/drivers/bluetooth/hci_qca.c +@@ -881,7 +881,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) + */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS)); +- set_current_state(TASK_INTERRUPTIBLE); ++ set_current_state(TASK_RUNNING); + + return 0; + } +diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c +index 72fd1750134d..942d076cbb0a 100644 +--- a/drivers/bus/arm-ccn.c ++++ b/drivers/bus/arm-ccn.c +@@ -736,7 +736,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) + ccn = pmu_to_arm_ccn(event->pmu); + + if (hw->sample_period) { +- dev_warn(ccn->dev, "Sampling not supported!\n"); ++ dev_dbg(ccn->dev, "Sampling not supported!\n"); + return -EOPNOTSUPP; + } + +@@ -744,12 +744,12 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) + event->attr.exclude_kernel || event->attr.exclude_hv || + event->attr.exclude_idle || event->attr.exclude_host || + event->attr.exclude_guest) { +- dev_warn(ccn->dev, "Can't exclude execution levels!\n"); ++ dev_dbg(ccn->dev, "Can't exclude execution levels!\n"); + return -EINVAL; + } + + if (event->cpu < 0) { +- dev_warn(ccn->dev, "Can't provide per-task data!\n"); ++ dev_dbg(ccn->dev, "Can't provide per-task data!\n"); + return -EOPNOTSUPP; + } + /* +@@ -771,13 +771,13 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) + switch (type) { + case CCN_TYPE_MN: + if (node_xp != ccn->mn_id) { +- dev_warn(ccn->dev, "Invalid MN ID %d!\n", node_xp); ++ dev_dbg(ccn->dev, "Invalid MN ID %d!\n", node_xp); + return -EINVAL; + } + break; + case CCN_TYPE_XP: + if (node_xp >= ccn->num_xps) { +- dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp); ++ dev_dbg(ccn->dev, "Invalid XP ID %d!\n", node_xp); + return -EINVAL; + } + break; +@@ -785,11 +785,11 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) + break; + default: + if (node_xp >= ccn->num_nodes) { +- dev_warn(ccn->dev, "Invalid node ID %d!\n", node_xp); ++ dev_dbg(ccn->dev, "Invalid node ID %d!\n", node_xp); + return -EINVAL; + } + if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) { +- dev_warn(ccn->dev, "Invalid type 0x%x for node %d!\n", ++ dev_dbg(ccn->dev, "Invalid type 0x%x for node %d!\n", + type, node_xp); + return -EINVAL; + } +@@ -808,19 +808,19 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) + if (event_id != e->event) + continue; + if (e->num_ports && port >= e->num_ports) { +- dev_warn(ccn->dev, "Invalid port %d for node/XP %d!\n", ++ dev_dbg(ccn->dev, "Invalid port %d for node/XP %d!\n", + port, node_xp); + return -EINVAL; + } + if (e->num_vcs && vc >= e->num_vcs) { +- dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n", ++ dev_dbg(ccn->dev, "Invalid vc %d for node/XP %d!\n", + vc, node_xp); + return -EINVAL; + } + valid = 1; + } + if (!valid) { +- dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n", ++ dev_dbg(ccn->dev, "Invalid event 0x%x for node/XP %d!\n", + event_id, node_xp); + return -EINVAL; + } +diff --git a/drivers/char/random.c b/drivers/char/random.c +index ddc493d976fd..ea4dbfa30657 100644 +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -1897,14 +1897,22 @@ static int + write_pool(struct entropy_store *r, const char __user *buffer, size_t count) + { + size_t bytes; +- __u32 buf[16]; ++ __u32 t, buf[16]; + const char __user *p = buffer; + + while (count > 0) { ++ int b, i = 0; ++ + bytes = min(count, sizeof(buf)); + if (copy_from_user(&buf, p, bytes)) + return -EFAULT; + ++ for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) { ++ if (!arch_get_random_int(&t)) ++ break; ++ buf[i] ^= t; ++ } ++ + count -= bytes; + p += bytes; + +diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c +index 346c4987b284..38983f56ad0d 100644 +--- a/drivers/edac/altera_edac.c ++++ b/drivers/edac/altera_edac.c +@@ -1106,7 +1106,7 @@ static void *ocram_alloc_mem(size_t size, void **other) + + static void ocram_free_mem(void *p, size_t size, void *other) + { +- gen_pool_free((struct gen_pool *)other, (u32)p, size); ++ gen_pool_free((struct gen_pool *)other, (unsigned long)p, size); + } + + static const struct edac_device_prv_data ocramecc_data = { +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +index 1360a24d2ede..f08624f2f209 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +@@ -683,8 +683,12 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, + return -EINVAL; + + /* A shared bo cannot be migrated to VRAM */ +- if (bo->prime_shared_count && (domain == AMDGPU_GEM_DOMAIN_VRAM)) +- return -EINVAL; ++ if (bo->prime_shared_count) { ++ if (domain & AMDGPU_GEM_DOMAIN_GTT) ++ domain = AMDGPU_GEM_DOMAIN_GTT; ++ else ++ return -EINVAL; ++ } + + if (bo->pin_count) { + uint32_t mem_type = bo->tbo.mem.mem_type; +diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c +index 0d8a417e2cd6..bb5cc15fa0b9 100644 +--- a/drivers/gpu/drm/drm_atomic.c ++++ b/drivers/gpu/drm/drm_atomic.c +@@ -1355,7 +1355,9 @@ drm_atomic_set_crtc_for_plane(struct drm_plane_state *plane_state, + { + struct drm_plane *plane = plane_state->plane; + struct drm_crtc_state *crtc_state; +- ++ /* Nothing to do for same crtc*/ ++ if (plane_state->crtc == crtc) ++ return 0; + if (plane_state->crtc) { + crtc_state = drm_atomic_get_crtc_state(plane_state->state, + plane_state->crtc); +diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c +index 0028591f3f95..1f08d597b87a 100644 +--- a/drivers/gpu/drm/drm_atomic_helper.c ++++ b/drivers/gpu/drm/drm_atomic_helper.c +@@ -2683,31 +2683,9 @@ commit: + return 0; + } + +-/** +- * drm_atomic_helper_disable_all - disable all currently active outputs +- * @dev: DRM device +- * @ctx: lock acquisition context +- * +- * Loops through all connectors, finding those that aren't turned off and then +- * turns them off by setting their DPMS mode to OFF and deactivating the CRTC +- * that they are connected to. +- * +- * This is used for example in suspend/resume to disable all currently active +- * functions when suspending. If you just want to shut down everything at e.g. +- * driver unload, look at drm_atomic_helper_shutdown(). +- * +- * Note that if callers haven't already acquired all modeset locks this might +- * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). +- * +- * Returns: +- * 0 on success or a negative error code on failure. +- * +- * See also: +- * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and +- * drm_atomic_helper_shutdown(). +- */ +-int drm_atomic_helper_disable_all(struct drm_device *dev, +- struct drm_modeset_acquire_ctx *ctx) ++static int __drm_atomic_helper_disable_all(struct drm_device *dev, ++ struct drm_modeset_acquire_ctx *ctx, ++ bool clean_old_fbs) + { + struct drm_atomic_state *state; + struct drm_connector_state *conn_state; +@@ -2759,8 +2737,11 @@ int drm_atomic_helper_disable_all(struct drm_device *dev, + goto free; + + drm_atomic_set_fb_for_plane(plane_state, NULL); +- plane_mask |= BIT(drm_plane_index(plane)); +- plane->old_fb = plane->fb; ++ ++ if (clean_old_fbs) { ++ plane->old_fb = plane->fb; ++ plane_mask |= BIT(drm_plane_index(plane)); ++ } + } + + ret = drm_atomic_commit(state); +@@ -2771,6 +2752,34 @@ free: + return ret; + } + ++/** ++ * drm_atomic_helper_disable_all - disable all currently active outputs ++ * @dev: DRM device ++ * @ctx: lock acquisition context ++ * ++ * Loops through all connectors, finding those that aren't turned off and then ++ * turns them off by setting their DPMS mode to OFF and deactivating the CRTC ++ * that they are connected to. ++ * ++ * This is used for example in suspend/resume to disable all currently active ++ * functions when suspending. If you just want to shut down everything at e.g. ++ * driver unload, look at drm_atomic_helper_shutdown(). ++ * ++ * Note that if callers haven't already acquired all modeset locks this might ++ * return -EDEADLK, which must be handled by calling drm_modeset_backoff(). ++ * ++ * Returns: ++ * 0 on success or a negative error code on failure. ++ * ++ * See also: ++ * drm_atomic_helper_suspend(), drm_atomic_helper_resume() and ++ * drm_atomic_helper_shutdown(). ++ */ ++int drm_atomic_helper_disable_all(struct drm_device *dev, ++ struct drm_modeset_acquire_ctx *ctx) ++{ ++ return __drm_atomic_helper_disable_all(dev, ctx, false); ++} + EXPORT_SYMBOL(drm_atomic_helper_disable_all); + + /** +@@ -2793,7 +2802,7 @@ void drm_atomic_helper_shutdown(struct drm_device *dev) + while (1) { + ret = drm_modeset_lock_all_ctx(dev, &ctx); + if (!ret) +- ret = drm_atomic_helper_disable_all(dev, &ctx); ++ ret = __drm_atomic_helper_disable_all(dev, &ctx, true); + + if (ret != -EDEADLK) + break; +@@ -2897,16 +2906,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, + struct drm_connector_state *new_conn_state; + struct drm_crtc *crtc; + struct drm_crtc_state *new_crtc_state; +- unsigned plane_mask = 0; +- struct drm_device *dev = state->dev; +- int ret; + + state->acquire_ctx = ctx; + +- for_each_new_plane_in_state(state, plane, new_plane_state, i) { +- plane_mask |= BIT(drm_plane_index(plane)); ++ for_each_new_plane_in_state(state, plane, new_plane_state, i) + state->planes[i].old_state = plane->state; +- } + + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) + state->crtcs[i].old_state = crtc->state; +@@ -2914,11 +2918,7 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state, + for_each_new_connector_in_state(state, connector, new_conn_state, i) + state->connectors[i].old_state = connector->state; + +- ret = drm_atomic_commit(state); +- if (plane_mask) +- drm_atomic_clean_old_fb(dev, plane_mask, ret); +- +- return ret; ++ return drm_atomic_commit(state); + } + EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state); + +diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c +index 41b492f99955..c022ab6e84bd 100644 +--- a/drivers/gpu/drm/drm_dp_mst_topology.c ++++ b/drivers/gpu/drm/drm_dp_mst_topology.c +@@ -2862,12 +2862,14 @@ static void drm_dp_mst_dump_mstb(struct seq_file *m, + } + } + ++#define DP_PAYLOAD_TABLE_SIZE 64 ++ + static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, + char *buf) + { + int i; + +- for (i = 0; i < 64; i += 16) { ++ for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) { + if (drm_dp_dpcd_read(mgr->aux, + DP_PAYLOAD_TABLE_UPDATE_STATUS + i, + &buf[i], 16) != 16) +@@ -2936,7 +2938,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m, + + mutex_lock(&mgr->lock); + if (mgr->mst_primary) { +- u8 buf[64]; ++ u8 buf[DP_PAYLOAD_TABLE_SIZE]; + int ret; + + ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE); +@@ -2954,8 +2956,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m, + seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n", + buf[0x9] >> 4, buf[0x9] & 0xf, buf[0xa], buf[0xb]); + if (dump_dp_payload_table(mgr, buf)) +- seq_printf(m, "payload table: %*ph\n", 63, buf); +- ++ seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf); + } + + mutex_unlock(&mgr->lock); +diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h +index e8e4ea14b12b..e05e5399af2d 100644 +--- a/drivers/gpu/drm/gma500/psb_intel_drv.h ++++ b/drivers/gpu/drm/gma500/psb_intel_drv.h +@@ -255,7 +255,7 @@ extern int intelfb_remove(struct drm_device *dev, + extern bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode); +-extern int psb_intel_lvds_mode_valid(struct drm_connector *connector, ++extern enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode); + extern int psb_intel_lvds_set_property(struct drm_connector *connector, + struct drm_property *property, +diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c +index be3eefec5152..8baf6325c6e4 100644 +--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c ++++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c +@@ -343,7 +343,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector) + } + } + +-int psb_intel_lvds_mode_valid(struct drm_connector *connector, ++enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { + struct drm_psb_private *dev_priv = connector->dev->dev_private; +diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +index a7e55c422501..0b632dc0cf7d 100644 +--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c ++++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +@@ -155,10 +155,10 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl) + (target << 28)); + nvkm_wr32(device, 0x002274, (runl << 20) | nr); + +- if (wait_event_timeout(fifo->runlist[runl].wait, +- !(nvkm_rd32(device, 0x002284 + (runl * 0x08)) +- & 0x00100000), +- msecs_to_jiffies(2000)) == 0) ++ if (nvkm_msec(device, 2000, ++ if (!(nvkm_rd32(device, 0x002284 + (runl * 0x08)) & 0x00100000)) ++ break; ++ ) < 0) + nvkm_error(subdev, "runlist %d update timeout\n", runl); + unlock: + mutex_unlock(&subdev->mutex); +diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c +index 424cd1b66575..337d3a1c2a40 100644 +--- a/drivers/gpu/drm/radeon/radeon_connectors.c ++++ b/drivers/gpu/drm/radeon/radeon_connectors.c +@@ -853,7 +853,7 @@ static int radeon_lvds_get_modes(struct drm_connector *connector) + return ret; + } + +-static int radeon_lvds_mode_valid(struct drm_connector *connector, ++static enum drm_mode_status radeon_lvds_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { + struct drm_encoder *encoder = radeon_best_single_encoder(connector); +@@ -1013,7 +1013,7 @@ static int radeon_vga_get_modes(struct drm_connector *connector) + return ret; + } + +-static int radeon_vga_mode_valid(struct drm_connector *connector, ++static enum drm_mode_status radeon_vga_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { + struct drm_device *dev = connector->dev; +@@ -1157,7 +1157,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector) + return 1; + } + +-static int radeon_tv_mode_valid(struct drm_connector *connector, ++static enum drm_mode_status radeon_tv_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { + if ((mode->hdisplay > 1024) || (mode->vdisplay > 768)) +@@ -1499,7 +1499,7 @@ static void radeon_dvi_force(struct drm_connector *connector) + radeon_connector->use_digital = true; + } + +-static int radeon_dvi_mode_valid(struct drm_connector *connector, ++static enum drm_mode_status radeon_dvi_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { + struct drm_device *dev = connector->dev; +@@ -1801,7 +1801,7 @@ out: + return ret; + } + +-static int radeon_dp_mode_valid(struct drm_connector *connector, ++static enum drm_mode_status radeon_dp_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) + { + struct drm_device *dev = connector->dev; +diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c +index febb21ee190e..584b10d3fc3d 100644 +--- a/drivers/hid/hid-plantronics.c ++++ b/drivers/hid/hid-plantronics.c +@@ -2,7 +2,7 @@ + * Plantronics USB HID Driver + * + * Copyright (c) 2014 JD Cole <jd.cole@plantronics.com> +- * Copyright (c) 2015 Terry Junge <terry.junge@plantronics.com> ++ * Copyright (c) 2015-2018 Terry Junge <terry.junge@plantronics.com> + */ + + /* +@@ -48,6 +48,10 @@ static int plantronics_input_mapping(struct hid_device *hdev, + unsigned short mapped_key; + unsigned long plt_type = (unsigned long)hid_get_drvdata(hdev); + ++ /* special case for PTT products */ ++ if (field->application == HID_GD_JOYSTICK) ++ goto defaulted; ++ + /* handle volume up/down mapping */ + /* non-standard types or multi-HID interfaces - plt_type is PID */ + if (!(plt_type & HID_USAGE_PAGE)) { +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c +index d92827556389..136a34dc31b8 100644 +--- a/drivers/hid/i2c-hid/i2c-hid.c ++++ b/drivers/hid/i2c-hid/i2c-hid.c +@@ -1036,6 +1036,14 @@ static int i2c_hid_probe(struct i2c_client *client, + pm_runtime_enable(&client->dev); + device_enable_async_suspend(&client->dev); + ++ /* Make sure there is something at this address */ ++ ret = i2c_smbus_read_byte(client); ++ if (ret < 0) { ++ dev_dbg(&client->dev, "nothing at this address: %d\n", ret); ++ ret = -ENXIO; ++ goto err_pm; ++ } ++ + ret = i2c_hid_fetch_hid_descriptor(ihid); + if (ret < 0) + goto err_pm; +diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c +index 56e46581b84b..6f2fe63e8f5a 100644 +--- a/drivers/i2c/i2c-core-base.c ++++ b/drivers/i2c/i2c-core-base.c +@@ -808,8 +808,11 @@ EXPORT_SYMBOL_GPL(i2c_new_device); + */ + void i2c_unregister_device(struct i2c_client *client) + { +- if (client->dev.of_node) ++ if (client->dev.of_node) { + of_node_clear_flag(client->dev.of_node, OF_POPULATED); ++ of_node_put(client->dev.of_node); ++ } ++ + if (ACPI_COMPANION(&client->dev)) + acpi_device_clear_enumerated(ACPI_COMPANION(&client->dev)); + device_unregister(&client->dev); +diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c +index d8efdc191c27..55252079faf6 100644 +--- a/drivers/infiniband/core/mad.c ++++ b/drivers/infiniband/core/mad.c +@@ -1558,7 +1558,8 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, + mad_reg_req->oui, 3)) { + method = &(*vendor_table)->vendor_class[ + vclass]->method_table[i]; +- BUG_ON(!*method); ++ if (!*method) ++ goto error3; + goto check_in_use; + } + } +@@ -1568,10 +1569,12 @@ static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req, + vclass]->oui[i])) { + method = &(*vendor_table)->vendor_class[ + vclass]->method_table[i]; +- BUG_ON(*method); + /* Allocate method table for this OUI */ +- if ((ret = allocate_method_table(method))) +- goto error3; ++ if (!*method) { ++ ret = allocate_method_table(method); ++ if (ret) ++ goto error3; ++ } + memcpy((*vendor_table)->vendor_class[vclass]->oui[i], + mad_reg_req->oui, 3); + goto check_in_use; +diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c +index e47baf0950e3..a22b992cde38 100644 +--- a/drivers/infiniband/core/ucma.c ++++ b/drivers/infiniband/core/ucma.c +@@ -218,7 +218,7 @@ static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) + return NULL; + + mutex_lock(&mut); +- mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL); ++ mc->id = idr_alloc(&multicast_idr, NULL, 0, 0, GFP_KERNEL); + mutex_unlock(&mut); + if (mc->id < 0) + goto error; +@@ -1404,6 +1404,10 @@ static ssize_t ucma_process_join(struct ucma_file *file, + goto err3; + } + ++ mutex_lock(&mut); ++ idr_replace(&multicast_idr, mc, mc->id); ++ mutex_unlock(&mut); ++ + mutex_unlock(&file->mut); + ucma_put_ctx(ctx); + return 0; +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c +index 186dce6bba8f..b8229d7b0ff5 100644 +--- a/drivers/infiniband/core/uverbs_cmd.c ++++ b/drivers/infiniband/core/uverbs_cmd.c +@@ -3376,6 +3376,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file, + goto err_uobj; + } + ++ if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) { ++ err = -EINVAL; ++ goto err_put; ++ } ++ + flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs * + sizeof(union ib_flow_spec), GFP_KERNEL); + if (!flow_attr) { +diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c +index 9032f77cc38d..feb80dbb5948 100644 +--- a/drivers/infiniband/core/verbs.c ++++ b/drivers/infiniband/core/verbs.c +@@ -2115,10 +2115,16 @@ static void __ib_drain_sq(struct ib_qp *qp) + struct ib_cq *cq = qp->send_cq; + struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR }; + struct ib_drain_cqe sdrain; +- struct ib_send_wr swr = {}, *bad_swr; ++ struct ib_send_wr *bad_swr; ++ struct ib_rdma_wr swr = { ++ .wr = { ++ .next = NULL, ++ { .wr_cqe = &sdrain.cqe, }, ++ .opcode = IB_WR_RDMA_WRITE, ++ }, ++ }; + int ret; + +- swr.wr_cqe = &sdrain.cqe; + sdrain.cqe.done = ib_drain_qp_done; + init_completion(&sdrain.done); + +@@ -2128,7 +2134,7 @@ static void __ib_drain_sq(struct ib_qp *qp) + return; + } + +- ret = ib_post_send(qp, &swr, &bad_swr); ++ ret = ib_post_send(qp, &swr.wr, &bad_swr); + if (ret) { + WARN_ONCE(ret, "failed to drain send queue: %d\n", ret); + return; +diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c +index ee578fa713c2..97c2225829ea 100644 +--- a/drivers/infiniband/ulp/srpt/ib_srpt.c ++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c +@@ -787,13 +787,17 @@ static int srpt_post_recv(struct srpt_device *sdev, + */ + static int srpt_zerolength_write(struct srpt_rdma_ch *ch) + { +- struct ib_send_wr wr, *bad_wr; ++ struct ib_send_wr *bad_wr; ++ struct ib_rdma_wr wr = { ++ .wr = { ++ .next = NULL, ++ { .wr_cqe = &ch->zw_cqe, }, ++ .opcode = IB_WR_RDMA_WRITE, ++ .send_flags = IB_SEND_SIGNALED, ++ } ++ }; + +- memset(&wr, 0, sizeof(wr)); +- wr.opcode = IB_WR_RDMA_WRITE; +- wr.wr_cqe = &ch->zw_cqe; +- wr.send_flags = IB_SEND_SIGNALED; +- return ib_post_send(ch->qp, &wr, &bad_wr); ++ return ib_post_send(ch->qp, &wr.wr, &bad_wr); + } + + static void srpt_zerolength_write_done(struct ib_cq *cq, struct ib_wc *wc) +diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c +index 7b5fa501bbcf..696e540304fd 100644 +--- a/drivers/input/mouse/elan_i2c_core.c ++++ b/drivers/input/mouse/elan_i2c_core.c +@@ -1262,6 +1262,8 @@ static const struct acpi_device_id elan_acpi_id[] = { + { "ELAN0611", 0 }, + { "ELAN0612", 0 }, + { "ELAN0618", 0 }, ++ { "ELAN061D", 0 }, ++ { "ELAN0622", 0 }, + { "ELAN1000", 0 }, + { } + }; +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index b353d494ad40..136f6e7bf797 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"), + }, + }, ++ { ++ /* Lenovo LaVie Z */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c +index 119f4ef0d421..b7f943f96068 100644 +--- a/drivers/irqchip/irq-ls-scfg-msi.c ++++ b/drivers/irqchip/irq-ls-scfg-msi.c +@@ -21,6 +21,7 @@ + #include <linux/of_pci.h> + #include <linux/of_platform.h> + #include <linux/spinlock.h> ++#include <linux/dma-iommu.h> + + #define MSI_IRQS_PER_MSIR 32 + #define MSI_MSIR_OFFSET 4 +@@ -94,6 +95,8 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) + + if (msi_affinity_flag) + msg->data |= cpumask_first(data->common->affinity); ++ ++ iommu_dma_map_msi_msg(data->irq, msg); + } + + static int ls_scfg_msi_set_affinity(struct irq_data *irq_data, +diff --git a/drivers/lightnvm/pblk-rb.c b/drivers/lightnvm/pblk-rb.c +index 9bc32578a766..c0dd17a82170 100644 +--- a/drivers/lightnvm/pblk-rb.c ++++ b/drivers/lightnvm/pblk-rb.c +@@ -142,10 +142,9 @@ static void clean_wctx(struct pblk_w_ctx *w_ctx) + { + int flags; + +-try: + flags = READ_ONCE(w_ctx->flags); +- if (!(flags & PBLK_SUBMITTED_ENTRY)) +- goto try; ++ WARN_ONCE(!(flags & PBLK_SUBMITTED_ENTRY), ++ "pblk: overwriting unsubmitted data\n"); + + /* Release flags on context. Protect from writes and reads */ + smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY); +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 11a67eac55b1..5599712d478e 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -6498,6 +6498,9 @@ static int hot_remove_disk(struct mddev *mddev, dev_t dev) + char b[BDEVNAME_SIZE]; + struct md_rdev *rdev; + ++ if (!mddev->pers) ++ return -ENODEV; ++ + rdev = find_rdev(mddev, dev); + if (!rdev) + return -ENXIO; +diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c +index 029ecba60727..78d830763704 100644 +--- a/drivers/md/raid1.c ++++ b/drivers/md/raid1.c +@@ -2462,6 +2462,8 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) + fix_read_error(conf, r1_bio->read_disk, + r1_bio->sector, r1_bio->sectors); + unfreeze_array(conf); ++ } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) { ++ md_error(mddev, rdev); + } else { + r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED; + } +diff --git a/drivers/media/common/siano/smsendian.c b/drivers/media/common/siano/smsendian.c +index bfe831c10b1c..b95a631f23f9 100644 +--- a/drivers/media/common/siano/smsendian.c ++++ b/drivers/media/common/siano/smsendian.c +@@ -35,7 +35,7 @@ void smsendian_handle_tx_message(void *buffer) + switch (msg->x_msg_header.msg_type) { + case MSG_SMS_DATA_DOWNLOAD_REQ: + { +- msg->msg_data[0] = le32_to_cpu(msg->msg_data[0]); ++ msg->msg_data[0] = le32_to_cpu((__force __le32)(msg->msg_data[0])); + break; + } + +@@ -44,7 +44,7 @@ void smsendian_handle_tx_message(void *buffer) + sizeof(struct sms_msg_hdr))/4; + + for (i = 0; i < msg_words; i++) +- msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]); ++ msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]); + + break; + } +@@ -64,7 +64,7 @@ void smsendian_handle_rx_message(void *buffer) + { + struct sms_version_res *ver = + (struct sms_version_res *) msg; +- ver->chip_model = le16_to_cpu(ver->chip_model); ++ ver->chip_model = le16_to_cpu((__force __le16)ver->chip_model); + break; + } + +@@ -81,7 +81,7 @@ void smsendian_handle_rx_message(void *buffer) + sizeof(struct sms_msg_hdr))/4; + + for (i = 0; i < msg_words; i++) +- msg->msg_data[i] = le32_to_cpu(msg->msg_data[i]); ++ msg->msg_data[i] = le32_to_cpu((__force __le32)msg->msg_data[i]); + + break; + } +@@ -95,9 +95,9 @@ void smsendian_handle_message_header(void *msg) + #ifdef __BIG_ENDIAN + struct sms_msg_hdr *phdr = (struct sms_msg_hdr *)msg; + +- phdr->msg_type = le16_to_cpu(phdr->msg_type); +- phdr->msg_length = le16_to_cpu(phdr->msg_length); +- phdr->msg_flags = le16_to_cpu(phdr->msg_flags); ++ phdr->msg_type = le16_to_cpu((__force __le16)phdr->msg_type); ++ phdr->msg_length = le16_to_cpu((__force __le16)phdr->msg_length); ++ phdr->msg_flags = le16_to_cpu((__force __le16)phdr->msg_flags); + #endif /* __BIG_ENDIAN */ + } + EXPORT_SYMBOL_GPL(smsendian_handle_message_header); +diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c +index 700f433261d0..e4d7f2febf00 100644 +--- a/drivers/media/i2c/smiapp/smiapp-core.c ++++ b/drivers/media/i2c/smiapp/smiapp-core.c +@@ -1001,7 +1001,7 @@ static int smiapp_read_nvm(struct smiapp_sensor *sensor, + if (rval) + goto out; + +- for (i = 0; i < 1000; i++) { ++ for (i = 1000; i > 0; i--) { + rval = smiapp_read( + sensor, + SMIAPP_REG_U8_DATA_TRANSFER_IF_1_STATUS, &s); +@@ -1012,11 +1012,10 @@ static int smiapp_read_nvm(struct smiapp_sensor *sensor, + if (s & SMIAPP_DATA_TRANSFER_IF_1_STATUS_RD_READY) + break; + +- if (--i == 0) { +- rval = -ETIMEDOUT; +- goto out; +- } +- ++ } ++ if (!i) { ++ rval = -ETIMEDOUT; ++ goto out; + } + + for (i = 0; i < SMIAPP_NVM_PAGE_SIZE; i++) { +diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c +index e79f72b8b858..62b2c5d9bdfb 100644 +--- a/drivers/media/media-device.c ++++ b/drivers/media/media-device.c +@@ -54,9 +54,10 @@ static int media_device_close(struct file *filp) + return 0; + } + +-static int media_device_get_info(struct media_device *dev, +- struct media_device_info *info) ++static long media_device_get_info(struct media_device *dev, void *arg) + { ++ struct media_device_info *info = arg; ++ + memset(info, 0, sizeof(*info)); + + if (dev->driver_name[0]) +@@ -93,9 +94,9 @@ static struct media_entity *find_entity(struct media_device *mdev, u32 id) + return NULL; + } + +-static long media_device_enum_entities(struct media_device *mdev, +- struct media_entity_desc *entd) ++static long media_device_enum_entities(struct media_device *mdev, void *arg) + { ++ struct media_entity_desc *entd = arg; + struct media_entity *ent; + + ent = find_entity(mdev, entd->id); +@@ -146,9 +147,9 @@ static void media_device_kpad_to_upad(const struct media_pad *kpad, + upad->flags = kpad->flags; + } + +-static long media_device_enum_links(struct media_device *mdev, +- struct media_links_enum *links) ++static long media_device_enum_links(struct media_device *mdev, void *arg) + { ++ struct media_links_enum *links = arg; + struct media_entity *entity; + + entity = find_entity(mdev, links->entity); +@@ -194,9 +195,9 @@ static long media_device_enum_links(struct media_device *mdev, + return 0; + } + +-static long media_device_setup_link(struct media_device *mdev, +- struct media_link_desc *linkd) ++static long media_device_setup_link(struct media_device *mdev, void *arg) + { ++ struct media_link_desc *linkd = arg; + struct media_link *link = NULL; + struct media_entity *source; + struct media_entity *sink; +@@ -222,9 +223,9 @@ static long media_device_setup_link(struct media_device *mdev, + return __media_entity_setup_link(link, linkd->flags); + } + +-static long media_device_get_topology(struct media_device *mdev, +- struct media_v2_topology *topo) ++static long media_device_get_topology(struct media_device *mdev, void *arg) + { ++ struct media_v2_topology *topo = arg; + struct media_entity *entity; + struct media_interface *intf; + struct media_pad *pad; +diff --git a/drivers/media/pci/saa7164/saa7164-fw.c b/drivers/media/pci/saa7164/saa7164-fw.c +index ef4906406ebf..a50461861133 100644 +--- a/drivers/media/pci/saa7164/saa7164-fw.c ++++ b/drivers/media/pci/saa7164/saa7164-fw.c +@@ -426,7 +426,8 @@ int saa7164_downloadfirmware(struct saa7164_dev *dev) + __func__, fw->size); + + if (fw->size != fwlength) { +- printk(KERN_ERR "xc5000: firmware incorrect size\n"); ++ printk(KERN_ERR "saa7164: firmware incorrect size %zu != %u\n", ++ fw->size, fwlength); + ret = -ENOMEM; + goto out; + } +diff --git a/drivers/media/pci/tw686x/tw686x-video.c b/drivers/media/pci/tw686x/tw686x-video.c +index c3fafa97b2d0..0ea8dd44026c 100644 +--- a/drivers/media/pci/tw686x/tw686x-video.c ++++ b/drivers/media/pci/tw686x/tw686x-video.c +@@ -1228,7 +1228,8 @@ int tw686x_video_init(struct tw686x_dev *dev) + vc->vidq.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + vc->vidq.min_buffers_needed = 2; + vc->vidq.lock = &vc->vb_mutex; +- vc->vidq.gfp_flags = GFP_DMA32; ++ vc->vidq.gfp_flags = dev->dma_mode != TW686X_DMA_MODE_MEMCPY ? ++ GFP_DMA32 : 0; + vc->vidq.dev = &dev->pci_dev->dev; + + err = vb2_queue_init(&vc->vidq); +diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c +index 1a428fe9f070..9f023bc6e1b7 100644 +--- a/drivers/media/platform/omap3isp/isp.c ++++ b/drivers/media/platform/omap3isp/isp.c +@@ -1945,6 +1945,7 @@ error_csi2: + + static void isp_detach_iommu(struct isp_device *isp) + { ++ arm_iommu_detach_device(isp->dev); + arm_iommu_release_mapping(isp->mapping); + isp->mapping = NULL; + } +@@ -1961,8 +1962,7 @@ static int isp_attach_iommu(struct isp_device *isp) + mapping = arm_iommu_create_mapping(&platform_bus_type, SZ_1G, SZ_2G); + if (IS_ERR(mapping)) { + dev_err(isp->dev, "failed to create ARM IOMMU mapping\n"); +- ret = PTR_ERR(mapping); +- goto error; ++ return PTR_ERR(mapping); + } + + isp->mapping = mapping; +@@ -1977,7 +1977,8 @@ static int isp_attach_iommu(struct isp_device *isp) + return 0; + + error: +- isp_detach_iommu(isp); ++ arm_iommu_release_mapping(isp->mapping); ++ isp->mapping = NULL; + return ret; + } + +diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c +index 070bac36d766..2e2b8c409150 100644 +--- a/drivers/media/platform/rcar_jpu.c ++++ b/drivers/media/platform/rcar_jpu.c +@@ -1280,7 +1280,7 @@ static int jpu_open(struct file *file) + /* ...issue software reset */ + ret = jpu_reset(jpu); + if (ret) +- goto device_prepare_rollback; ++ goto jpu_reset_rollback; + } + + jpu->ref_count++; +@@ -1288,6 +1288,8 @@ static int jpu_open(struct file *file) + mutex_unlock(&jpu->mutex); + return 0; + ++jpu_reset_rollback: ++ clk_disable_unprepare(jpu->clk); + device_prepare_rollback: + mutex_unlock(&jpu->mutex); + v4l_prepare_rollback: +diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c +index b3034f80163f..8ce6f9cff746 100644 +--- a/drivers/media/radio/si470x/radio-si470x-i2c.c ++++ b/drivers/media/radio/si470x/radio-si470x-i2c.c +@@ -92,7 +92,7 @@ MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*"); + */ + int si470x_get_register(struct si470x_device *radio, int regnr) + { +- u16 buf[READ_REG_NUM]; ++ __be16 buf[READ_REG_NUM]; + struct i2c_msg msgs[1] = { + { + .addr = radio->client->addr, +@@ -117,7 +117,7 @@ int si470x_get_register(struct si470x_device *radio, int regnr) + int si470x_set_register(struct si470x_device *radio, int regnr) + { + int i; +- u16 buf[WRITE_REG_NUM]; ++ __be16 buf[WRITE_REG_NUM]; + struct i2c_msg msgs[1] = { + { + .addr = radio->client->addr, +@@ -147,7 +147,7 @@ int si470x_set_register(struct si470x_device *radio, int regnr) + static int si470x_get_all_registers(struct si470x_device *radio) + { + int i; +- u16 buf[READ_REG_NUM]; ++ __be16 buf[READ_REG_NUM]; + struct i2c_msg msgs[1] = { + { + .addr = radio->client->addr, +diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c +index ffbb178c6918..2dbf632c10de 100644 +--- a/drivers/media/v4l2-core/videobuf2-core.c ++++ b/drivers/media/v4l2-core/videobuf2-core.c +@@ -912,9 +912,12 @@ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) + dprintk(4, "done processing on buffer %d, state: %d\n", + vb->index, state); + +- /* sync buffers */ +- for (plane = 0; plane < vb->num_planes; ++plane) +- call_void_memop(vb, finish, vb->planes[plane].mem_priv); ++ if (state != VB2_BUF_STATE_QUEUED && ++ state != VB2_BUF_STATE_REQUEUEING) { ++ /* sync buffers */ ++ for (plane = 0; plane < vb->num_planes; ++plane) ++ call_void_memop(vb, finish, vb->planes[plane].mem_priv); ++ } + + spin_lock_irqsave(&q->done_lock, flags); + if (state == VB2_BUF_STATE_QUEUED || +diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c +index a4803ac192bb..1d49a8dd4a37 100644 +--- a/drivers/memory/tegra/mc.c ++++ b/drivers/memory/tegra/mc.c +@@ -20,14 +20,6 @@ + #include "mc.h" + + #define MC_INTSTATUS 0x000 +-#define MC_INT_DECERR_MTS (1 << 16) +-#define MC_INT_SECERR_SEC (1 << 13) +-#define MC_INT_DECERR_VPR (1 << 12) +-#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11) +-#define MC_INT_INVALID_SMMU_PAGE (1 << 10) +-#define MC_INT_ARBITRATION_EMEM (1 << 9) +-#define MC_INT_SECURITY_VIOLATION (1 << 8) +-#define MC_INT_DECERR_EMEM (1 << 6) + + #define MC_INTMASK 0x004 + +@@ -248,12 +240,13 @@ static const char *const error_names[8] = { + static irqreturn_t tegra_mc_irq(int irq, void *data) + { + struct tegra_mc *mc = data; +- unsigned long status, mask; ++ unsigned long status; + unsigned int bit; + + /* mask all interrupts to avoid flooding */ +- status = mc_readl(mc, MC_INTSTATUS); +- mask = mc_readl(mc, MC_INTMASK); ++ status = mc_readl(mc, MC_INTSTATUS) & mc->soc->intmask; ++ if (!status) ++ return IRQ_NONE; + + for_each_set_bit(bit, &status, 32) { + const char *error = status_names[bit] ?: "unknown"; +@@ -346,7 +339,6 @@ static int tegra_mc_probe(struct platform_device *pdev) + const struct of_device_id *match; + struct resource *res; + struct tegra_mc *mc; +- u32 value; + int err; + + match = of_match_node(tegra_mc_of_match, pdev->dev.of_node); +@@ -414,11 +406,7 @@ static int tegra_mc_probe(struct platform_device *pdev) + + WARN(!mc->soc->client_id_mask, "Missing client ID mask for this SoC\n"); + +- value = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | +- MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | +- MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM; +- +- mc_writel(mc, value, MC_INTMASK); ++ mc_writel(mc, mc->soc->intmask, MC_INTMASK); + + return 0; + } +diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h +index ddb16676c3af..24e020b4609b 100644 +--- a/drivers/memory/tegra/mc.h ++++ b/drivers/memory/tegra/mc.h +@@ -14,6 +14,15 @@ + + #include <soc/tegra/mc.h> + ++#define MC_INT_DECERR_MTS (1 << 16) ++#define MC_INT_SECERR_SEC (1 << 13) ++#define MC_INT_DECERR_VPR (1 << 12) ++#define MC_INT_INVALID_APB_ASID_UPDATE (1 << 11) ++#define MC_INT_INVALID_SMMU_PAGE (1 << 10) ++#define MC_INT_ARBITRATION_EMEM (1 << 9) ++#define MC_INT_SECURITY_VIOLATION (1 << 8) ++#define MC_INT_DECERR_EMEM (1 << 6) ++ + static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset) + { + return readl(mc->regs + offset); +diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c +index ba8fff3d66a6..6d2a5a849d92 100644 +--- a/drivers/memory/tegra/tegra114.c ++++ b/drivers/memory/tegra/tegra114.c +@@ -930,4 +930,6 @@ const struct tegra_mc_soc tegra114_mc_soc = { + .atom_size = 32, + .client_id_mask = 0x7f, + .smmu = &tegra114_smmu_soc, ++ .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION | ++ MC_INT_DECERR_EMEM, + }; +diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c +index 5a58e440f4a7..9f68a56f2727 100644 +--- a/drivers/memory/tegra/tegra124.c ++++ b/drivers/memory/tegra/tegra124.c +@@ -1020,6 +1020,9 @@ const struct tegra_mc_soc tegra124_mc_soc = { + .smmu = &tegra124_smmu_soc, + .emem_regs = tegra124_mc_emem_regs, + .num_emem_regs = ARRAY_SIZE(tegra124_mc_emem_regs), ++ .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | ++ MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | ++ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, + }; + #endif /* CONFIG_ARCH_TEGRA_124_SOC */ + +@@ -1042,5 +1045,8 @@ const struct tegra_mc_soc tegra132_mc_soc = { + .atom_size = 32, + .client_id_mask = 0x7f, + .smmu = &tegra132_smmu_soc, ++ .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | ++ MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | ++ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, + }; + #endif /* CONFIG_ARCH_TEGRA_132_SOC */ +diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c +index 5e144abe4c18..47c78a6d8f00 100644 +--- a/drivers/memory/tegra/tegra210.c ++++ b/drivers/memory/tegra/tegra210.c +@@ -1077,4 +1077,7 @@ const struct tegra_mc_soc tegra210_mc_soc = { + .atom_size = 64, + .client_id_mask = 0xff, + .smmu = &tegra210_smmu_soc, ++ .intmask = MC_INT_DECERR_MTS | MC_INT_SECERR_SEC | MC_INT_DECERR_VPR | ++ MC_INT_INVALID_APB_ASID_UPDATE | MC_INT_INVALID_SMMU_PAGE | ++ MC_INT_SECURITY_VIOLATION | MC_INT_DECERR_EMEM, + }; +diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c +index b44737840e70..d0689428ea1a 100644 +--- a/drivers/memory/tegra/tegra30.c ++++ b/drivers/memory/tegra/tegra30.c +@@ -952,4 +952,6 @@ const struct tegra_mc_soc tegra30_mc_soc = { + .atom_size = 16, + .client_id_mask = 0x7f, + .smmu = &tegra30_smmu_soc, ++ .intmask = MC_INT_INVALID_SMMU_PAGE | MC_INT_SECURITY_VIOLATION | ++ MC_INT_DECERR_EMEM, + }; +diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c +index b0ca5a4c841e..c5528ae982f2 100644 +--- a/drivers/mfd/cros_ec.c ++++ b/drivers/mfd/cros_ec.c +@@ -112,7 +112,11 @@ int cros_ec_register(struct cros_ec_device *ec_dev) + + mutex_init(&ec_dev->lock); + +- cros_ec_query_all(ec_dev); ++ err = cros_ec_query_all(ec_dev); ++ if (err) { ++ dev_err(dev, "Cannot identify the EC: error %d\n", err); ++ return err; ++ } + + if (ec_dev->irq) { + err = request_threaded_irq(ec_dev->irq, NULL, ec_irq_thread, +diff --git a/drivers/mmc/core/pwrseq_simple.c b/drivers/mmc/core/pwrseq_simple.c +index 13ef162cf066..a8b9fee4d62a 100644 +--- a/drivers/mmc/core/pwrseq_simple.c ++++ b/drivers/mmc/core/pwrseq_simple.c +@@ -40,14 +40,18 @@ static void mmc_pwrseq_simple_set_gpios_value(struct mmc_pwrseq_simple *pwrseq, + struct gpio_descs *reset_gpios = pwrseq->reset_gpios; + + if (!IS_ERR(reset_gpios)) { +- int i; +- int values[reset_gpios->ndescs]; ++ int i, *values; ++ int nvalues = reset_gpios->ndescs; + +- for (i = 0; i < reset_gpios->ndescs; i++) ++ values = kmalloc_array(nvalues, sizeof(int), GFP_KERNEL); ++ if (!values) ++ return; ++ ++ for (i = 0; i < nvalues; i++) + values[i] = value; + +- gpiod_set_array_value_cansleep( +- reset_gpios->ndescs, reset_gpios->desc, values); ++ gpiod_set_array_value_cansleep(nvalues, reset_gpios->desc, values); ++ kfree(values); + } + } + +diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c +index 6a2cbbba29aa..5252885e5cda 100644 +--- a/drivers/mmc/host/dw_mmc.c ++++ b/drivers/mmc/host/dw_mmc.c +@@ -1255,6 +1255,8 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) + if (host->state == STATE_WAITING_CMD11_DONE) + sdmmc_cmd_bits |= SDMMC_CMD_VOLT_SWITCH; + ++ slot->mmc->actual_clock = 0; ++ + if (!clock) { + mci_writel(host, CLKENA, 0); + mci_send_cmd(slot, sdmmc_cmd_bits, 0); +@@ -1313,6 +1315,8 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) + + /* keep the last clock value that was requested from core */ + slot->__clk_old = clock; ++ slot->mmc->actual_clock = div ? ((host->bus_hz / div) >> 1) : ++ host->bus_hz; + } + + host->current_speed = clock; +diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c +index 4005b427023c..16deba1a2385 100644 +--- a/drivers/mtd/nand/fsl_ifc_nand.c ++++ b/drivers/mtd/nand/fsl_ifc_nand.c +@@ -342,9 +342,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + + case NAND_CMD_READID: + case NAND_CMD_PARAM: { ++ /* ++ * For READID, read 8 bytes that are currently used. ++ * For PARAM, read all 3 copies of 256-bytes pages. ++ */ ++ int len = 8; + int timing = IFC_FIR_OP_RB; +- if (command == NAND_CMD_PARAM) ++ if (command == NAND_CMD_PARAM) { + timing = IFC_FIR_OP_RBCD; ++ len = 256 * 3; ++ } + + ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | + (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | +@@ -354,12 +361,8 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, + &ifc->ifc_nand.nand_fcr0); + ifc_out32(column, &ifc->ifc_nand.row3); + +- /* +- * although currently it's 8 bytes for READID, we always read +- * the maximum 256 bytes(for PARAM) +- */ +- ifc_out32(256, &ifc->ifc_nand.nand_fbcr); +- ifc_nand_ctrl->read_bytes = 256; ++ ifc_out32(len, &ifc->ifc_nand.nand_fbcr); ++ ifc_nand_ctrl->read_bytes = len; + + set_addr(mtd, 0, 0, 0); + fsl_ifc_run_command(mtd); +diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c +index 5ada7a41449c..9645c8f05c7f 100644 +--- a/drivers/net/dsa/qca8k.c ++++ b/drivers/net/dsa/qca8k.c +@@ -473,7 +473,7 @@ qca8k_set_pad_ctrl(struct qca8k_priv *priv, int port, int mode) + static void + qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable) + { +- u32 mask = QCA8K_PORT_STATUS_TXMAC; ++ u32 mask = QCA8K_PORT_STATUS_TXMAC | QCA8K_PORT_STATUS_RXMAC; + + /* Port 0 and 6 have no internal PHY */ + if ((port > 0) && (port < 6)) +@@ -490,6 +490,7 @@ qca8k_setup(struct dsa_switch *ds) + { + struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv; + int ret, i, phy_mode = -1; ++ u32 mask; + + /* Make sure that port 0 is the cpu port */ + if (!dsa_is_cpu_port(ds, 0)) { +@@ -515,7 +516,10 @@ qca8k_setup(struct dsa_switch *ds) + if (ret < 0) + return ret; + +- /* Enable CPU Port */ ++ /* Enable CPU Port, force it to maximum bandwidth and full-duplex */ ++ mask = QCA8K_PORT_STATUS_SPEED_1000 | QCA8K_PORT_STATUS_TXFLOW | ++ QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_DUPLEX; ++ qca8k_write(priv, QCA8K_REG_PORT_STATUS(QCA8K_CPU_PORT), mask); + qca8k_reg_set(priv, QCA8K_REG_GLOBAL_FW_CTRL0, + QCA8K_GLOBAL_FW_CTRL0_CPU_PORT_EN); + qca8k_port_set_status(priv, QCA8K_CPU_PORT, 1); +@@ -584,6 +588,47 @@ qca8k_setup(struct dsa_switch *ds) + return 0; + } + ++static void ++qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy) ++{ ++ struct qca8k_priv *priv = ds->priv; ++ u32 reg; ++ ++ /* Force fixed-link setting for CPU port, skip others. */ ++ if (!phy_is_pseudo_fixed_link(phy)) ++ return; ++ ++ /* Set port speed */ ++ switch (phy->speed) { ++ case 10: ++ reg = QCA8K_PORT_STATUS_SPEED_10; ++ break; ++ case 100: ++ reg = QCA8K_PORT_STATUS_SPEED_100; ++ break; ++ case 1000: ++ reg = QCA8K_PORT_STATUS_SPEED_1000; ++ break; ++ default: ++ dev_dbg(priv->dev, "port%d link speed %dMbps not supported.\n", ++ port, phy->speed); ++ return; ++ } ++ ++ /* Set duplex mode */ ++ if (phy->duplex == DUPLEX_FULL) ++ reg |= QCA8K_PORT_STATUS_DUPLEX; ++ ++ /* Force flow control */ ++ if (dsa_is_cpu_port(ds, port)) ++ reg |= QCA8K_PORT_STATUS_RXFLOW | QCA8K_PORT_STATUS_TXFLOW; ++ ++ /* Force link down before changing MAC options */ ++ qca8k_port_set_status(priv, port, 0); ++ qca8k_write(priv, QCA8K_REG_PORT_STATUS(port), reg); ++ qca8k_port_set_status(priv, port, 1); ++} ++ + static int + qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum) + { +@@ -832,6 +877,7 @@ qca8k_get_tag_protocol(struct dsa_switch *ds) + static const struct dsa_switch_ops qca8k_switch_ops = { + .get_tag_protocol = qca8k_get_tag_protocol, + .setup = qca8k_setup, ++ .adjust_link = qca8k_adjust_link, + .get_strings = qca8k_get_strings, + .phy_read = qca8k_phy_read, + .phy_write = qca8k_phy_write, +@@ -863,6 +909,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev) + return -ENOMEM; + + priv->bus = mdiodev->bus; ++ priv->dev = &mdiodev->dev; + + /* read the switches ID register */ + id = qca8k_read(priv, QCA8K_REG_MASK_CTRL); +@@ -934,6 +981,7 @@ static SIMPLE_DEV_PM_OPS(qca8k_pm_ops, + qca8k_suspend, qca8k_resume); + + static const struct of_device_id qca8k_of_match[] = { ++ { .compatible = "qca,qca8334" }, + { .compatible = "qca,qca8337" }, + { /* sentinel */ }, + }; +diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h +index 1cf8a920d4ff..613fe5c50236 100644 +--- a/drivers/net/dsa/qca8k.h ++++ b/drivers/net/dsa/qca8k.h +@@ -51,8 +51,10 @@ + #define QCA8K_GOL_MAC_ADDR0 0x60 + #define QCA8K_GOL_MAC_ADDR1 0x64 + #define QCA8K_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) +-#define QCA8K_PORT_STATUS_SPEED GENMASK(2, 0) +-#define QCA8K_PORT_STATUS_SPEED_S 0 ++#define QCA8K_PORT_STATUS_SPEED GENMASK(1, 0) ++#define QCA8K_PORT_STATUS_SPEED_10 0 ++#define QCA8K_PORT_STATUS_SPEED_100 0x1 ++#define QCA8K_PORT_STATUS_SPEED_1000 0x2 + #define QCA8K_PORT_STATUS_TXMAC BIT(2) + #define QCA8K_PORT_STATUS_RXMAC BIT(3) + #define QCA8K_PORT_STATUS_TXFLOW BIT(4) +@@ -165,6 +167,7 @@ struct qca8k_priv { + struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS]; + struct dsa_switch *ds; + struct mutex reg_mutex; ++ struct device *dev; + }; + + struct qca8k_mib_desc { +diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c +index 52beba8c7a39..e3b7a71fcad9 100644 +--- a/drivers/net/ethernet/amazon/ena/ena_com.c ++++ b/drivers/net/ethernet/amazon/ena/ena_com.c +@@ -331,6 +331,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, + + memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr)); + ++ io_sq->dma_addr_bits = ena_dev->dma_addr_bits; + io_sq->desc_entry_size = + (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ? + sizeof(struct ena_eth_io_tx_desc) : +diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +index 1b45cd73a258..119777986ea4 100644 +--- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c ++++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +@@ -1128,14 +1128,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata) + + if (pdata->tx_pause != pdata->phy.tx_pause) { + new_state = 1; +- pdata->hw_if.config_tx_flow_control(pdata); + pdata->tx_pause = pdata->phy.tx_pause; ++ pdata->hw_if.config_tx_flow_control(pdata); + } + + if (pdata->rx_pause != pdata->phy.rx_pause) { + new_state = 1; +- pdata->hw_if.config_rx_flow_control(pdata); + pdata->rx_pause = pdata->phy.rx_pause; ++ pdata->hw_if.config_rx_flow_control(pdata); + } + + /* Speed support */ +diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +index bfd2d0382f4c..94931318587c 100644 +--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c ++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c +@@ -5927,6 +5927,9 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state) + } + mutex_unlock(&bp->hwrm_cmd_lock); + ++ if (!BNXT_SINGLE_PF(bp)) ++ return 0; ++ + diff = link_info->support_auto_speeds ^ link_info->advertising; + if ((link_info->support_auto_speeds | diff) != + link_info->support_auto_speeds) { +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +index ff7a70ffafc6..c133491ad9fa 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +@@ -1272,8 +1272,11 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) + /* We need to alloc a vport for main NIC of PF */ + num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1; + +- if (hdev->num_tqps < num_vport) +- num_vport = hdev->num_tqps; ++ if (hdev->num_tqps < num_vport) { ++ dev_err(&hdev->pdev->dev, "tqps(%d) is less than vports(%d)", ++ hdev->num_tqps, num_vport); ++ return -EINVAL; ++ } + + /* Alloc the same number of TQPs for every vport */ + tqp_per_vport = hdev->num_tqps / num_vport; +diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +index d1e4dcec5db2..69726908e72c 100644 +--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c ++++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hns3_enet.c +@@ -1598,6 +1598,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, + hns3_unmap_buffer(ring, &ring->desc_cb[i]); + ring->desc_cb[i] = *res_cb; + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); ++ ring->desc[i].rx.bd_base_info = 0; + } + + static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) +@@ -1605,6 +1606,7 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) + ring->desc_cb[i].reuse_flag = 0; + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); ++ ring->desc[i].rx.bd_base_info = 0; + } + + static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes, +@@ -2881,6 +2883,8 @@ static int __init hns3_init_module(void) + + client.ops = &client_ops; + ++ INIT_LIST_HEAD(&client.node); ++ + ret = hnae3_register_client(&client); + if (ret) + return ret; +diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c +index 7a226537877b..6265ce8915b6 100644 +--- a/drivers/net/ethernet/intel/e1000e/netdev.c ++++ b/drivers/net/ethernet/intel/e1000e/netdev.c +@@ -3558,15 +3558,12 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) + } + break; + case e1000_pch_spt: +- if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { +- /* Stable 24MHz frequency */ +- incperiod = INCPERIOD_24MHZ; +- incvalue = INCVALUE_24MHZ; +- shift = INCVALUE_SHIFT_24MHZ; +- adapter->cc.shift = shift; +- break; +- } +- return -EINVAL; ++ /* Stable 24MHz frequency */ ++ incperiod = INCPERIOD_24MHZ; ++ incvalue = INCVALUE_24MHZ; ++ shift = INCVALUE_SHIFT_24MHZ; ++ adapter->cc.shift = shift; ++ break; + case e1000_pch_cnp: + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { + /* Stable 24MHz frequency */ +diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c +index d8456c381c99..ef242dbae116 100644 +--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c ++++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c +@@ -337,6 +337,8 @@ void i40e_ptp_rx_hang(struct i40e_pf *pf) + **/ + void i40e_ptp_tx_hang(struct i40e_pf *pf) + { ++ struct sk_buff *skb; ++ + if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) + return; + +@@ -349,9 +351,12 @@ void i40e_ptp_tx_hang(struct i40e_pf *pf) + * within a second it is reasonable to assume that we never will. + */ + if (time_is_before_jiffies(pf->ptp_tx_start + HZ)) { +- dev_kfree_skb_any(pf->ptp_tx_skb); ++ skb = pf->ptp_tx_skb; + pf->ptp_tx_skb = NULL; + clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); ++ ++ /* Free the skb after we clear the bitlock */ ++ dev_kfree_skb_any(skb); + pf->tx_hwtstamp_timeouts++; + } + } +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c +index 6ca580cdfd84..1c027f9d9af5 100644 +--- a/drivers/net/ethernet/intel/igb/igb_main.c ++++ b/drivers/net/ethernet/intel/igb/igb_main.c +@@ -8376,12 +8376,17 @@ static void igb_rar_set_index(struct igb_adapter *adapter, u32 index) + if (is_valid_ether_addr(addr)) + rar_high |= E1000_RAH_AV; + +- if (hw->mac.type == e1000_82575) ++ switch (hw->mac.type) { ++ case e1000_82575: ++ case e1000_i210: + rar_high |= E1000_RAH_POOL_1 * + adapter->mac_table[index].queue; +- else ++ break; ++ default: + rar_high |= E1000_RAH_POOL_1 << + adapter->mac_table[index].queue; ++ break; ++ } + } + + wr32(E1000_RAL(index), rar_low); +diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +index 90ecc4b06462..90be4385bf36 100644 +--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +@@ -3737,6 +3737,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) + return -EPERM; + + ether_addr_copy(hw->mac.addr, addr->sa_data); ++ ether_addr_copy(hw->mac.perm_addr, addr->sa_data); + ether_addr_copy(netdev->dev_addr, addr->sa_data); + + return 0; +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +index 42a6afcaae03..7924f241e3ad 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +@@ -912,8 +912,10 @@ mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, + int err; + + /* No need to continue if only VLAN flags were changed */ +- if (mlxsw_sp_port_vlan->bridge_port) ++ if (mlxsw_sp_port_vlan->bridge_port) { ++ mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); + return 0; ++ } + + err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port); + if (err) +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 27f2e650e27b..1a9a382bf1c4 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -51,7 +51,7 @@ + #include <linux/of_mdio.h> + #include "dwmac1000.h" + +-#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x) ++#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES) + #define TSO_MAX_BUFF_SIZE (SZ_16K - 1) + + /* Module parameters */ +diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c +index 18013645e76c..0c1adad7415d 100644 +--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c ++++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c +@@ -177,12 +177,18 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) + } + + dev = bus_find_device(&platform_bus_type, NULL, node, match); +- of_node_put(node); ++ if (!dev) { ++ dev_err(dev, "unable to find platform device for %pOF\n", node); ++ goto out; ++ } ++ + priv = dev_get_drvdata(dev); + + priv->cpsw_phy_sel(priv, phy_mode, slave); + + put_device(dev); ++out: ++ of_node_put(node); + } + EXPORT_SYMBOL_GPL(cpsw_phy_sel); + +diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h +index cb250cacf721..e33a6c672a0a 100644 +--- a/drivers/net/hyperv/hyperv_net.h ++++ b/drivers/net/hyperv/hyperv_net.h +@@ -724,6 +724,8 @@ struct net_device_context { + struct hv_device *device_ctx; + /* netvsc_device */ + struct netvsc_device __rcu *nvdev; ++ /* list of netvsc net_devices */ ++ struct list_head list; + /* reconfigure work */ + struct delayed_work dwork; + /* last reconfig time */ +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +index aeabeb107fed..6a77ef38c549 100644 +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -66,6 +66,8 @@ static int debug = -1; + module_param(debug, int, S_IRUGO); + MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + ++static LIST_HEAD(netvsc_dev_list); ++ + static void netvsc_change_rx_flags(struct net_device *net, int change) + { + struct net_device_context *ndev_ctx = netdev_priv(net); +@@ -1749,13 +1751,10 @@ out_unlock: + + static struct net_device *get_netvsc_bymac(const u8 *mac) + { +- struct net_device *dev; +- +- ASSERT_RTNL(); ++ struct net_device_context *ndev_ctx; + +- for_each_netdev(&init_net, dev) { +- if (dev->netdev_ops != &device_ops) +- continue; /* not a netvsc device */ ++ list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) { ++ struct net_device *dev = hv_get_drvdata(ndev_ctx->device_ctx); + + if (ether_addr_equal(mac, dev->perm_addr)) + return dev; +@@ -1766,25 +1765,18 @@ static struct net_device *get_netvsc_bymac(const u8 *mac) + + static struct net_device *get_netvsc_byref(struct net_device *vf_netdev) + { ++ struct net_device_context *net_device_ctx; + struct net_device *dev; + +- ASSERT_RTNL(); +- +- for_each_netdev(&init_net, dev) { +- struct net_device_context *net_device_ctx; ++ dev = netdev_master_upper_dev_get(vf_netdev); ++ if (!dev || dev->netdev_ops != &device_ops) ++ return NULL; /* not a netvsc device */ + +- if (dev->netdev_ops != &device_ops) +- continue; /* not a netvsc device */ ++ net_device_ctx = netdev_priv(dev); ++ if (!rtnl_dereference(net_device_ctx->nvdev)) ++ return NULL; /* device is removed */ + +- net_device_ctx = netdev_priv(dev); +- if (!rtnl_dereference(net_device_ctx->nvdev)) +- continue; /* device is removed */ +- +- if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev) +- return dev; /* a match */ +- } +- +- return NULL; ++ return dev; + } + + /* Called when VF is injecting data into network stack. +@@ -2065,15 +2057,19 @@ static int netvsc_probe(struct hv_device *dev, + else + net->max_mtu = ETH_DATA_LEN; + +- ret = register_netdev(net); ++ rtnl_lock(); ++ ret = register_netdevice(net); + if (ret != 0) { + pr_err("Unable to register netdev.\n"); + goto register_failed; + } + +- return ret; ++ list_add(&net_device_ctx->list, &netvsc_dev_list); ++ rtnl_unlock(); ++ return 0; + + register_failed: ++ rtnl_unlock(); + rndis_filter_device_remove(dev, nvdev); + rndis_failed: + free_percpu(net_device_ctx->vf_stats); +@@ -2119,6 +2115,7 @@ static int netvsc_remove(struct hv_device *dev) + rndis_filter_device_remove(dev, nvdev); + + unregister_netdevice(net); ++ list_del(&ndev_ctx->list); + + rtnl_unlock(); + rcu_read_unlock(); +diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c +index 0831b7142df7..0c5b68e7da51 100644 +--- a/drivers/net/phy/mdio-mux-bcm-iproc.c ++++ b/drivers/net/phy/mdio-mux-bcm-iproc.c +@@ -218,7 +218,7 @@ out: + + static int mdio_mux_iproc_remove(struct platform_device *pdev) + { +- struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev); ++ struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev); + + mdio_mux_uninit(md->mux_handle); + mdiobus_unregister(md->mii_bus); +diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c +index 1b2fe74a44ea..e4a6ed88b9cf 100644 +--- a/drivers/net/phy/phylink.c ++++ b/drivers/net/phy/phylink.c +@@ -561,6 +561,8 @@ void phylink_destroy(struct phylink *pl) + { + if (pl->sfp_bus) + sfp_unregister_upstream(pl->sfp_bus); ++ if (!IS_ERR(pl->link_gpio)) ++ gpiod_put(pl->link_gpio); + + cancel_work_sync(&pl->resolve); + kfree(pl); +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c +index 0aa91ab9a0fb..9e3f632e22f1 100644 +--- a/drivers/net/usb/lan78xx.c ++++ b/drivers/net/usb/lan78xx.c +@@ -1216,6 +1216,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev) + mod_timer(&dev->stat_monitor, + jiffies + STAT_UPDATE_TIMER); + } ++ ++ tasklet_schedule(&dev->bh); + } + + return ret; +diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h +index 5d80be213fac..869f276cc1d8 100644 +--- a/drivers/net/wireless/ath/regd.h ++++ b/drivers/net/wireless/ath/regd.h +@@ -68,12 +68,14 @@ enum CountryCode { + CTRY_AUSTRALIA = 36, + CTRY_AUSTRIA = 40, + CTRY_AZERBAIJAN = 31, ++ CTRY_BAHAMAS = 44, + CTRY_BAHRAIN = 48, + CTRY_BANGLADESH = 50, + CTRY_BARBADOS = 52, + CTRY_BELARUS = 112, + CTRY_BELGIUM = 56, + CTRY_BELIZE = 84, ++ CTRY_BERMUDA = 60, + CTRY_BOLIVIA = 68, + CTRY_BOSNIA_HERZ = 70, + CTRY_BRAZIL = 76, +@@ -159,6 +161,7 @@ enum CountryCode { + CTRY_ROMANIA = 642, + CTRY_RUSSIA = 643, + CTRY_SAUDI_ARABIA = 682, ++ CTRY_SERBIA = 688, + CTRY_SERBIA_MONTENEGRO = 891, + CTRY_SINGAPORE = 702, + CTRY_SLOVAKIA = 703, +@@ -170,11 +173,13 @@ enum CountryCode { + CTRY_SWITZERLAND = 756, + CTRY_SYRIA = 760, + CTRY_TAIWAN = 158, ++ CTRY_TANZANIA = 834, + CTRY_THAILAND = 764, + CTRY_TRINIDAD_Y_TOBAGO = 780, + CTRY_TUNISIA = 788, + CTRY_TURKEY = 792, + CTRY_UAE = 784, ++ CTRY_UGANDA = 800, + CTRY_UKRAINE = 804, + CTRY_UNITED_KINGDOM = 826, + CTRY_UNITED_STATES = 840, +diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h +index bdd2b4d61f2f..15bbd1e0d912 100644 +--- a/drivers/net/wireless/ath/regd_common.h ++++ b/drivers/net/wireless/ath/regd_common.h +@@ -35,6 +35,7 @@ enum EnumRd { + FRANCE_RES = 0x31, + FCC3_FCCA = 0x3A, + FCC3_WORLD = 0x3B, ++ FCC3_ETSIC = 0x3F, + + ETSI1_WORLD = 0x37, + ETSI3_ETSIA = 0x32, +@@ -44,6 +45,7 @@ enum EnumRd { + ETSI4_ETSIC = 0x38, + ETSI5_WORLD = 0x39, + ETSI6_WORLD = 0x34, ++ ETSI8_WORLD = 0x3D, + ETSI_RESERVED = 0x33, + + MKK1_MKKA = 0x40, +@@ -59,6 +61,7 @@ enum EnumRd { + MKK1_MKKA1 = 0x4A, + MKK1_MKKA2 = 0x4B, + MKK1_MKKC = 0x4C, ++ APL2_FCCA = 0x4D, + + APL3_FCCA = 0x50, + APL1_WORLD = 0x52, +@@ -67,6 +70,7 @@ enum EnumRd { + APL1_ETSIC = 0x55, + APL2_ETSIC = 0x56, + APL5_WORLD = 0x58, ++ APL13_WORLD = 0x5A, + APL6_WORLD = 0x5B, + APL7_FCCA = 0x5C, + APL8_WORLD = 0x5D, +@@ -168,6 +172,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { + {FCC2_ETSIC, CTL_FCC, CTL_ETSI}, + {FCC3_FCCA, CTL_FCC, CTL_FCC}, + {FCC3_WORLD, CTL_FCC, CTL_ETSI}, ++ {FCC3_ETSIC, CTL_FCC, CTL_ETSI}, + {FCC4_FCCA, CTL_FCC, CTL_FCC}, + {FCC5_FCCA, CTL_FCC, CTL_FCC}, + {FCC6_FCCA, CTL_FCC, CTL_FCC}, +@@ -179,6 +184,7 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { + {ETSI4_WORLD, CTL_ETSI, CTL_ETSI}, + {ETSI5_WORLD, CTL_ETSI, CTL_ETSI}, + {ETSI6_WORLD, CTL_ETSI, CTL_ETSI}, ++ {ETSI8_WORLD, CTL_ETSI, CTL_ETSI}, + + /* XXX: For ETSI3_ETSIA, Was NO_CTL meant for the 2 GHz band ? */ + {ETSI3_ETSIA, CTL_ETSI, CTL_ETSI}, +@@ -188,9 +194,11 @@ static struct reg_dmn_pair_mapping regDomainPairs[] = { + {FCC1_FCCA, CTL_FCC, CTL_FCC}, + {APL1_WORLD, CTL_FCC, CTL_ETSI}, + {APL2_WORLD, CTL_FCC, CTL_ETSI}, ++ {APL2_FCCA, CTL_FCC, CTL_FCC}, + {APL3_WORLD, CTL_FCC, CTL_ETSI}, + {APL4_WORLD, CTL_FCC, CTL_ETSI}, + {APL5_WORLD, CTL_FCC, CTL_ETSI}, ++ {APL13_WORLD, CTL_ETSI, CTL_ETSI}, + {APL6_WORLD, CTL_ETSI, CTL_ETSI}, + {APL8_WORLD, CTL_ETSI, CTL_ETSI}, + {APL9_WORLD, CTL_ETSI, CTL_ETSI}, +@@ -298,6 +306,7 @@ static struct country_code_to_enum_rd allCountries[] = { + {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"}, + {CTRY_AUSTRIA, ETSI1_WORLD, "AT"}, + {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"}, ++ {CTRY_BAHAMAS, FCC3_WORLD, "BS"}, + {CTRY_BAHRAIN, APL6_WORLD, "BH"}, + {CTRY_BANGLADESH, NULL1_WORLD, "BD"}, + {CTRY_BARBADOS, FCC2_WORLD, "BB"}, +@@ -305,6 +314,7 @@ static struct country_code_to_enum_rd allCountries[] = { + {CTRY_BELGIUM, ETSI1_WORLD, "BE"}, + {CTRY_BELGIUM2, ETSI4_WORLD, "BL"}, + {CTRY_BELIZE, APL1_ETSIC, "BZ"}, ++ {CTRY_BERMUDA, FCC3_FCCA, "BM"}, + {CTRY_BOLIVIA, APL1_ETSIC, "BO"}, + {CTRY_BOSNIA_HERZ, ETSI1_WORLD, "BA"}, + {CTRY_BRAZIL, FCC3_WORLD, "BR"}, +@@ -444,6 +454,7 @@ static struct country_code_to_enum_rd allCountries[] = { + {CTRY_ROMANIA, NULL1_WORLD, "RO"}, + {CTRY_RUSSIA, NULL1_WORLD, "RU"}, + {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"}, ++ {CTRY_SERBIA, ETSI1_WORLD, "RS"}, + {CTRY_SERBIA_MONTENEGRO, ETSI1_WORLD, "CS"}, + {CTRY_SINGAPORE, APL6_WORLD, "SG"}, + {CTRY_SLOVAKIA, ETSI1_WORLD, "SK"}, +@@ -455,10 +466,12 @@ static struct country_code_to_enum_rd allCountries[] = { + {CTRY_SWITZERLAND, ETSI1_WORLD, "CH"}, + {CTRY_SYRIA, NULL1_WORLD, "SY"}, + {CTRY_TAIWAN, APL3_FCCA, "TW"}, ++ {CTRY_TANZANIA, APL1_WORLD, "TZ"}, + {CTRY_THAILAND, FCC3_WORLD, "TH"}, + {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"}, + {CTRY_TUNISIA, ETSI3_WORLD, "TN"}, + {CTRY_TURKEY, ETSI3_WORLD, "TR"}, ++ {CTRY_UGANDA, FCC3_WORLD, "UG"}, + {CTRY_UKRAINE, NULL1_WORLD, "UA"}, + {CTRY_UAE, NULL1_WORLD, "AE"}, + {CTRY_UNITED_KINGDOM, ETSI1_WORLD, "GB"}, +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +index cd587325e286..dd6e27513cc1 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +@@ -1098,6 +1098,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = { + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43340), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43341), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43362), ++ BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43364), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4335_4339), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4339), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_43430), +diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +index a06b6612b658..ca99c3cf41c2 100644 +--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c ++++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +@@ -901,6 +901,8 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) + } + def_rxq = trans_pcie->rxq; + ++ cancel_work_sync(&rba->rx_alloc); ++ + spin_lock(&rba->lock); + atomic_set(&rba->req_pending, 0); + atomic_set(&rba->req_ready, 0); +diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c +index f4f2b9b27e32..50890cab8807 100644 +--- a/drivers/net/wireless/marvell/mwifiex/usb.c ++++ b/drivers/net/wireless/marvell/mwifiex/usb.c +@@ -644,6 +644,9 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf) + MWIFIEX_FUNC_SHUTDOWN); + } + ++ if (adapter->workqueue) ++ flush_workqueue(adapter->workqueue); ++ + mwifiex_usb_free(card); + + mwifiex_dbg(adapter, FATAL, +diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c +index 0cd68ffc2c74..51ccf10f4413 100644 +--- a/drivers/net/wireless/marvell/mwifiex/util.c ++++ b/drivers/net/wireless/marvell/mwifiex/util.c +@@ -708,12 +708,14 @@ void mwifiex_hist_data_set(struct mwifiex_private *priv, u8 rx_rate, s8 snr, + s8 nflr) + { + struct mwifiex_histogram_data *phist_data = priv->hist_data; ++ s8 nf = -nflr; ++ s8 rssi = snr - nflr; + + atomic_inc(&phist_data->num_samples); + atomic_inc(&phist_data->rx_rate[rx_rate]); +- atomic_inc(&phist_data->snr[snr]); +- atomic_inc(&phist_data->noise_flr[128 + nflr]); +- atomic_inc(&phist_data->sig_str[nflr - snr]); ++ atomic_inc(&phist_data->snr[snr + 128]); ++ atomic_inc(&phist_data->noise_flr[nf + 128]); ++ atomic_inc(&phist_data->sig_str[rssi + 128]); + } + + /* function to reset histogram data during init/reset */ +diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c +index 070dfd68bb83..120b0ff545c1 100644 +--- a/drivers/net/wireless/rsi/rsi_91x_hal.c ++++ b/drivers/net/wireless/rsi/rsi_91x_hal.c +@@ -557,28 +557,32 @@ static int bl_write_header(struct rsi_hw *adapter, u8 *flash_content, + u32 content_size) + { + struct rsi_host_intf_ops *hif_ops = adapter->host_intf_ops; +- struct bl_header bl_hdr; ++ struct bl_header *bl_hdr; + u32 write_addr, write_len; + int status; + +- bl_hdr.flags = 0; +- bl_hdr.image_no = cpu_to_le32(adapter->priv->coex_mode); +- bl_hdr.check_sum = cpu_to_le32( +- *(u32 *)&flash_content[CHECK_SUM_OFFSET]); +- bl_hdr.flash_start_address = cpu_to_le32( +- *(u32 *)&flash_content[ADDR_OFFSET]); +- bl_hdr.flash_len = cpu_to_le32(*(u32 *)&flash_content[LEN_OFFSET]); ++ bl_hdr = kzalloc(sizeof(*bl_hdr), GFP_KERNEL); ++ if (!bl_hdr) ++ return -ENOMEM; ++ ++ bl_hdr->flags = 0; ++ bl_hdr->image_no = cpu_to_le32(adapter->priv->coex_mode); ++ bl_hdr->check_sum = ++ cpu_to_le32(*(u32 *)&flash_content[CHECK_SUM_OFFSET]); ++ bl_hdr->flash_start_address = ++ cpu_to_le32(*(u32 *)&flash_content[ADDR_OFFSET]); ++ bl_hdr->flash_len = cpu_to_le32(*(u32 *)&flash_content[LEN_OFFSET]); + write_len = sizeof(struct bl_header); + + if (adapter->rsi_host_intf == RSI_HOST_INTF_USB) { + write_addr = PING_BUFFER_ADDRESS; + status = hif_ops->write_reg_multiple(adapter, write_addr, +- (u8 *)&bl_hdr, write_len); ++ (u8 *)bl_hdr, write_len); + if (status < 0) { + rsi_dbg(ERR_ZONE, + "%s: Failed to load Version/CRC structure\n", + __func__); +- return status; ++ goto fail; + } + } else { + write_addr = PING_BUFFER_ADDRESS >> 16; +@@ -587,20 +591,23 @@ static int bl_write_header(struct rsi_hw *adapter, u8 *flash_content, + rsi_dbg(ERR_ZONE, + "%s: Unable to set ms word to common reg\n", + __func__); +- return status; ++ goto fail; + } + write_addr = RSI_SD_REQUEST_MASTER | + (PING_BUFFER_ADDRESS & 0xFFFF); + status = hif_ops->write_reg_multiple(adapter, write_addr, +- (u8 *)&bl_hdr, write_len); ++ (u8 *)bl_hdr, write_len); + if (status < 0) { + rsi_dbg(ERR_ZONE, + "%s: Failed to load Version/CRC structure\n", + __func__); +- return status; ++ goto fail; + } + } +- return 0; ++ status = 0; ++fail: ++ kfree(bl_hdr); ++ return status; + } + + static u32 read_flash_capacity(struct rsi_hw *adapter) +diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c +index 370161ca2a1c..0362967874aa 100644 +--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c ++++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c +@@ -161,7 +161,6 @@ static void rsi_reset_card(struct sdio_func *pfunction) + int err; + struct mmc_card *card = pfunction->card; + struct mmc_host *host = card->host; +- s32 bit = (fls(host->ocr_avail) - 1); + u8 cmd52_resp; + u32 clock, resp, i; + u16 rca; +@@ -181,7 +180,6 @@ static void rsi_reset_card(struct sdio_func *pfunction) + msleep(20); + + /* Initialize the SDIO card */ +- host->ios.vdd = bit; + host->ios.chip_select = MMC_CS_DONTCARE; + host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; + host->ios.power_mode = MMC_POWER_UP; +@@ -970,17 +968,21 @@ static void ulp_read_write(struct rsi_hw *adapter, u16 addr, u32 data, + /*This function resets and re-initializes the chip.*/ + static void rsi_reset_chip(struct rsi_hw *adapter) + { +- __le32 data; ++ u8 *data; + u8 sdio_interrupt_status = 0; + u8 request = 1; + int ret; + ++ data = kzalloc(sizeof(u32), GFP_KERNEL); ++ if (!data) ++ return; ++ + rsi_dbg(INFO_ZONE, "Writing disable to wakeup register\n"); + ret = rsi_sdio_write_register(adapter, 0, SDIO_WAKEUP_REG, &request); + if (ret < 0) { + rsi_dbg(ERR_ZONE, + "%s: Failed to write SDIO wakeup register\n", __func__); +- return; ++ goto err; + } + msleep(20); + ret = rsi_sdio_read_register(adapter, RSI_FN1_INT_REGISTER, +@@ -988,7 +990,7 @@ static void rsi_reset_chip(struct rsi_hw *adapter) + if (ret < 0) { + rsi_dbg(ERR_ZONE, "%s: Failed to Read Intr Status Register\n", + __func__); +- return; ++ goto err; + } + rsi_dbg(INFO_ZONE, "%s: Intr Status Register value = %d\n", + __func__, sdio_interrupt_status); +@@ -998,17 +1000,17 @@ static void rsi_reset_chip(struct rsi_hw *adapter) + rsi_dbg(ERR_ZONE, + "%s: Unable to set ms word to common reg\n", + __func__); +- return; ++ goto err; + } + +- data = TA_HOLD_THREAD_VALUE; ++ put_unaligned_le32(TA_HOLD_THREAD_VALUE, data); + if (rsi_sdio_write_register_multiple(adapter, TA_HOLD_THREAD_REG | + RSI_SD_REQUEST_MASTER, +- (u8 *)&data, 4)) { ++ data, 4)) { + rsi_dbg(ERR_ZONE, + "%s: Unable to hold Thread-Arch processor threads\n", + __func__); +- return; ++ goto err; + } + + /* This msleep will ensure Thread-Arch processor to go to hold +@@ -1029,6 +1031,9 @@ static void rsi_reset_chip(struct rsi_hw *adapter) + * read write operations to complete for chip reset. + */ + msleep(500); ++err: ++ kfree(data); ++ return; + } + + /** +diff --git a/drivers/net/wireless/rsi/rsi_sdio.h b/drivers/net/wireless/rsi/rsi_sdio.h +index 903392039200..6788fbbdd166 100644 +--- a/drivers/net/wireless/rsi/rsi_sdio.h ++++ b/drivers/net/wireless/rsi/rsi_sdio.h +@@ -85,7 +85,7 @@ enum sdio_interrupt_type { + #define TA_SOFT_RST_CLR 0 + #define TA_SOFT_RST_SET BIT(0) + #define TA_PC_ZERO 0 +-#define TA_HOLD_THREAD_VALUE cpu_to_le32(0xF) ++#define TA_HOLD_THREAD_VALUE 0xF + #define TA_RELEASE_THREAD_VALUE cpu_to_le32(0xF) + #define TA_BASE_ADDR 0x2200 + #define MISC_CFG_BASE_ADDR 0x4105 +diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c +index f8a1fea64e25..219d1a86b92e 100644 +--- a/drivers/net/wireless/ti/wlcore/sdio.c ++++ b/drivers/net/wireless/ti/wlcore/sdio.c +@@ -406,6 +406,11 @@ static int wl1271_suspend(struct device *dev) + mmc_pm_flag_t sdio_flags; + int ret = 0; + ++ if (!wl) { ++ dev_err(dev, "no wilink module was probed\n"); ++ goto out; ++ } ++ + dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n", + wl->wow_enabled); + +diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c +index f07b9c9bb5ba..dfc076f9ee4b 100644 +--- a/drivers/net/xen-netfront.c ++++ b/drivers/net/xen-netfront.c +@@ -87,6 +87,7 @@ struct netfront_cb { + /* IRQ name is queue name with "-tx" or "-rx" appended */ + #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) + ++static DECLARE_WAIT_QUEUE_HEAD(module_load_q); + static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); + + struct netfront_stats { +@@ -239,7 +240,7 @@ static void rx_refill_timeout(unsigned long data) + static int netfront_tx_slot_available(struct netfront_queue *queue) + { + return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < +- (NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2); ++ (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1); + } + + static void xennet_maybe_wake_tx(struct netfront_queue *queue) +@@ -790,7 +791,7 @@ static int xennet_get_responses(struct netfront_queue *queue, + RING_IDX cons = queue->rx.rsp_cons; + struct sk_buff *skb = xennet_get_rx_skb(queue, cons); + grant_ref_t ref = xennet_get_rx_ref(queue, cons); +- int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); ++ int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); + int slots = 1; + int err = 0; + unsigned long ret; +@@ -1330,6 +1331,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) + netif_carrier_off(netdev); + + xenbus_switch_state(dev, XenbusStateInitialising); ++ wait_event(module_load_q, ++ xenbus_read_driver_state(dev->otherend) != ++ XenbusStateClosed && ++ xenbus_read_driver_state(dev->otherend) != ++ XenbusStateUnknown); + return netdev; + + exit: +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 4cac4755abef..f5643d107cc6 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -2519,6 +2519,9 @@ static pci_ers_result_t nvme_slot_reset(struct pci_dev *pdev) + + static void nvme_error_resume(struct pci_dev *pdev) + { ++ struct nvme_dev *dev = pci_get_drvdata(pdev); ++ ++ flush_work(&dev->ctrl.reset_work); + pci_cleanup_aer_uncorrect_error_status(pdev); + } + +@@ -2562,6 +2565,8 @@ static const struct pci_device_id nvme_id_table[] = { + .driver_data = NVME_QUIRK_LIGHTNVM, }, + { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ + .driver_data = NVME_QUIRK_LIGHTNVM, }, ++ { PCI_DEVICE(0x1d1d, 0x2601), /* CNEX Granby */ ++ .driver_data = NVME_QUIRK_LIGHTNVM, }, + { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, + { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c +index 93a082e0bdd4..48a831d58e7a 100644 +--- a/drivers/nvme/host/rdma.c ++++ b/drivers/nvme/host/rdma.c +@@ -796,7 +796,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, + if (error) { + dev_err(ctrl->ctrl.device, + "prop_get NVME_REG_CAP failed\n"); +- goto out_cleanup_queue; ++ goto out_stop_queue; + } + + ctrl->ctrl.sqsize = +@@ -804,23 +804,25 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, + + error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); + if (error) +- goto out_cleanup_queue; ++ goto out_stop_queue; + + ctrl->ctrl.max_hw_sectors = + (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9); + + error = nvme_init_identify(&ctrl->ctrl); + if (error) +- goto out_cleanup_queue; ++ goto out_stop_queue; + + error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev, + &ctrl->async_event_sqe, sizeof(struct nvme_command), + DMA_TO_DEVICE); + if (error) +- goto out_cleanup_queue; ++ goto out_stop_queue; + + return 0; + ++out_stop_queue: ++ nvme_rdma_stop_queue(&ctrl->queues[0]); + out_cleanup_queue: + if (new) + blk_cleanup_queue(ctrl->ctrl.admin_q); +diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c +index d12e5de78e70..2afafd5d8915 100644 +--- a/drivers/nvmem/core.c ++++ b/drivers/nvmem/core.c +@@ -1049,6 +1049,8 @@ static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, + + /* setup the first byte with lsb bits from nvmem */ + rc = nvmem_reg_read(nvmem, cell->offset, &v, 1); ++ if (rc) ++ goto err; + *b++ |= GENMASK(bit_offset - 1, 0) & v; + + /* setup rest of the byte if any */ +@@ -1067,11 +1069,16 @@ static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, + /* setup the last byte with msb bits from nvmem */ + rc = nvmem_reg_read(nvmem, + cell->offset + cell->bytes - 1, &v, 1); ++ if (rc) ++ goto err; + *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v; + + } + + return buf; ++err: ++ kfree(buf); ++ return ERR_PTR(rc); + } + + /** +diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c +index 087645116ecb..c78fd9c2cf8c 100644 +--- a/drivers/pci/host/pci-xgene.c ++++ b/drivers/pci/host/pci-xgene.c +@@ -686,7 +686,6 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) + + bus = bridge->bus; + +- pci_scan_child_bus(bus); + pci_assign_unassigned_bus_resources(bus); + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); +diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c +index 05832b597e53..46c2ee2caf28 100644 +--- a/drivers/pci/hotplug/pciehp_hpc.c ++++ b/drivers/pci/hotplug/pciehp_hpc.c +@@ -863,6 +863,13 @@ struct controller *pcie_init(struct pcie_device *dev) + if (pdev->hotplug_user_indicators) + slot_cap &= ~(PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_PIP); + ++ /* ++ * We assume no Thunderbolt controllers support Command Complete events, ++ * but some controllers falsely claim they do. ++ */ ++ if (pdev->is_thunderbolt) ++ slot_cap |= PCI_EXP_SLTCAP_NCCS; ++ + ctrl->slot_cap = slot_cap; + mutex_init(&ctrl->ctrl_lock); + init_waitqueue_head(&ctrl->queue); +diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c +index 00fa4278c1f4..c3f0473d1afa 100644 +--- a/drivers/pci/pci-sysfs.c ++++ b/drivers/pci/pci-sysfs.c +@@ -305,13 +305,16 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr, + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + +- if (!val) { +- if (pci_is_enabled(pdev)) +- pci_disable_device(pdev); +- else +- result = -EIO; +- } else ++ device_lock(dev); ++ if (dev->driver) ++ result = -EBUSY; ++ else if (val) + result = pci_enable_device(pdev); ++ else if (pci_is_enabled(pdev)) ++ pci_disable_device(pdev); ++ else ++ result = -EIO; ++ device_unlock(dev); + + return result < 0 ? result : count; + } +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index f285cd74088e..4bccaf688aad 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -516,12 +516,14 @@ static void devm_pci_release_host_bridge_dev(struct device *dev) + + if (bridge->release_fn) + bridge->release_fn(bridge); ++ ++ pci_free_resource_list(&bridge->windows); + } + + static void pci_release_host_bridge_dev(struct device *dev) + { + devm_pci_release_host_bridge_dev(dev); +- pci_free_host_bridge(to_pci_host_bridge(dev)); ++ kfree(to_pci_host_bridge(dev)); + } + + struct pci_host_bridge *pci_alloc_host_bridge(size_t priv) +diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c +index b1ca838dd80a..e61e2f8c91ce 100644 +--- a/drivers/pinctrl/pinctrl-at91-pio4.c ++++ b/drivers/pinctrl/pinctrl-at91-pio4.c +@@ -576,8 +576,10 @@ static int atmel_pctl_dt_node_to_map(struct pinctrl_dev *pctldev, + for_each_child_of_node(np_config, np) { + ret = atmel_pctl_dt_subnode_to_map(pctldev, np, map, + &reserved_maps, num_maps); +- if (ret < 0) ++ if (ret < 0) { ++ of_node_put(np); + break; ++ } + } + } + +diff --git a/drivers/regulator/cpcap-regulator.c b/drivers/regulator/cpcap-regulator.c +index f541b80f1b54..bd910fe123d9 100644 +--- a/drivers/regulator/cpcap-regulator.c ++++ b/drivers/regulator/cpcap-regulator.c +@@ -222,7 +222,7 @@ static unsigned int cpcap_map_mode(unsigned int mode) + case CPCAP_BIT_AUDIO_LOW_PWR: + return REGULATOR_MODE_STANDBY; + default: +- return -EINVAL; ++ return REGULATOR_MODE_INVALID; + } + } + +diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c +index c9875355905d..a3bf7c993723 100644 +--- a/drivers/regulator/of_regulator.c ++++ b/drivers/regulator/of_regulator.c +@@ -31,6 +31,7 @@ static void of_get_regulation_constraints(struct device_node *np, + struct regulation_constraints *constraints = &(*init_data)->constraints; + struct regulator_state *suspend_state; + struct device_node *suspend_np; ++ unsigned int mode; + int ret, i; + u32 pval; + +@@ -124,11 +125,11 @@ static void of_get_regulation_constraints(struct device_node *np, + + if (!of_property_read_u32(np, "regulator-initial-mode", &pval)) { + if (desc && desc->of_map_mode) { +- ret = desc->of_map_mode(pval); +- if (ret == -EINVAL) ++ mode = desc->of_map_mode(pval); ++ if (mode == REGULATOR_MODE_INVALID) + pr_err("%s: invalid mode %u\n", np->name, pval); + else +- constraints->initial_mode = ret; ++ constraints->initial_mode = mode; + } else { + pr_warn("%s: mapping for mode %d not defined\n", + np->name, pval); +@@ -163,12 +164,12 @@ static void of_get_regulation_constraints(struct device_node *np, + if (!of_property_read_u32(suspend_np, "regulator-mode", + &pval)) { + if (desc && desc->of_map_mode) { +- ret = desc->of_map_mode(pval); +- if (ret == -EINVAL) ++ mode = desc->of_map_mode(pval); ++ if (mode == REGULATOR_MODE_INVALID) + pr_err("%s: invalid mode %u\n", + np->name, pval); + else +- suspend_state->mode = ret; ++ suspend_state->mode = mode; + } else { + pr_warn("%s: mapping for mode %d not defined\n", + np->name, pval); +diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c +index 63922a2167e5..659e516455be 100644 +--- a/drivers/regulator/pfuze100-regulator.c ++++ b/drivers/regulator/pfuze100-regulator.c +@@ -158,6 +158,7 @@ static const struct regulator_ops pfuze100_sw_regulator_ops = { + static const struct regulator_ops pfuze100_swb_regulator_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, ++ .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_table, + .map_voltage = regulator_map_voltage_ascend, + .set_voltage_sel = regulator_set_voltage_sel_regmap, +diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c +index a4456db5849d..884c7505ed91 100644 +--- a/drivers/regulator/twl-regulator.c ++++ b/drivers/regulator/twl-regulator.c +@@ -274,7 +274,7 @@ static inline unsigned int twl4030reg_map_mode(unsigned int mode) + case RES_STATE_SLEEP: + return REGULATOR_MODE_STANDBY; + default: +- return -EINVAL; ++ return REGULATOR_MODE_INVALID; + } + } + +diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c +index 9eb32ead63db..e4f951e968a4 100644 +--- a/drivers/rtc/interface.c ++++ b/drivers/rtc/interface.c +@@ -359,6 +359,11 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) + { + int err; + ++ if (!rtc->ops) ++ return -ENODEV; ++ else if (!rtc->ops->set_alarm) ++ return -EINVAL; ++ + err = rtc_valid_tm(&alarm->time); + if (err != 0) + return err; +diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c +index a3418a8a3796..97fdc99bfeef 100644 +--- a/drivers/rtc/rtc-tps6586x.c ++++ b/drivers/rtc/rtc-tps6586x.c +@@ -276,14 +276,15 @@ static int tps6586x_rtc_probe(struct platform_device *pdev) + device_init_wakeup(&pdev->dev, 1); + + platform_set_drvdata(pdev, rtc); +- rtc->rtc = devm_rtc_device_register(&pdev->dev, dev_name(&pdev->dev), +- &tps6586x_rtc_ops, THIS_MODULE); ++ rtc->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rtc->rtc)) { + ret = PTR_ERR(rtc->rtc); +- dev_err(&pdev->dev, "RTC device register: ret %d\n", ret); ++ dev_err(&pdev->dev, "RTC allocate device: ret %d\n", ret); + goto fail_rtc_register; + } + ++ rtc->rtc->ops = &tps6586x_rtc_ops; ++ + ret = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL, + tps6586x_rtc_irq, + IRQF_ONESHOT, +@@ -294,6 +295,13 @@ static int tps6586x_rtc_probe(struct platform_device *pdev) + goto fail_rtc_register; + } + disable_irq(rtc->irq); ++ ++ ret = rtc_register_device(rtc->rtc); ++ if (ret) { ++ dev_err(&pdev->dev, "RTC device register: ret %d\n", ret); ++ goto fail_rtc_register; ++ } ++ + return 0; + + fail_rtc_register: +diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c +index d0244d7979fc..a56b526db89a 100644 +--- a/drivers/rtc/rtc-tps65910.c ++++ b/drivers/rtc/rtc-tps65910.c +@@ -380,6 +380,10 @@ static int tps65910_rtc_probe(struct platform_device *pdev) + if (!tps_rtc) + return -ENOMEM; + ++ tps_rtc->rtc = devm_rtc_allocate_device(&pdev->dev); ++ if (IS_ERR(tps_rtc->rtc)) ++ return PTR_ERR(tps_rtc->rtc); ++ + /* Clear pending interrupts */ + ret = regmap_read(tps65910->regmap, TPS65910_RTC_STATUS, &rtc_reg); + if (ret < 0) +@@ -421,10 +425,10 @@ static int tps65910_rtc_probe(struct platform_device *pdev) + tps_rtc->irq = irq; + device_set_wakeup_capable(&pdev->dev, 1); + +- tps_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, +- &tps65910_rtc_ops, THIS_MODULE); +- if (IS_ERR(tps_rtc->rtc)) { +- ret = PTR_ERR(tps_rtc->rtc); ++ tps_rtc->rtc->ops = &tps65910_rtc_ops; ++ ++ ret = rtc_register_device(tps_rtc->rtc); ++ if (ret) { + dev_err(&pdev->dev, "RTC device register: err %d\n", ret); + return ret; + } +diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c +index 7ce22967fd16..7ed010714f29 100644 +--- a/drivers/rtc/rtc-vr41xx.c ++++ b/drivers/rtc/rtc-vr41xx.c +@@ -292,13 +292,14 @@ static int rtc_probe(struct platform_device *pdev) + goto err_rtc1_iounmap; + } + +- rtc = devm_rtc_device_register(&pdev->dev, rtc_name, &vr41xx_rtc_ops, +- THIS_MODULE); ++ rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rtc)) { + retval = PTR_ERR(rtc); + goto err_iounmap_all; + } + ++ rtc->ops = &vr41xx_rtc_ops; ++ + rtc->max_user_freq = MAX_PERIODIC_RATE; + + spin_lock_irq(&rtc_lock); +@@ -340,6 +341,10 @@ static int rtc_probe(struct platform_device *pdev) + + dev_info(&pdev->dev, "Real Time Clock of NEC VR4100 series\n"); + ++ retval = rtc_register_device(rtc); ++ if (retval) ++ goto err_iounmap_all; ++ + return 0; + + err_iounmap_all: +diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c +index b415ba42ca73..599447032e50 100644 +--- a/drivers/s390/scsi/zfcp_dbf.c ++++ b/drivers/s390/scsi/zfcp_dbf.c +@@ -285,6 +285,8 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, + struct list_head *entry; + unsigned long flags; + ++ lockdep_assert_held(&adapter->erp_lock); ++ + if (unlikely(!debug_level_enabled(dbf->rec, level))) + return; + +diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c +index 00e7968a1d70..a1388842e17e 100644 +--- a/drivers/scsi/3w-9xxx.c ++++ b/drivers/scsi/3w-9xxx.c +@@ -886,6 +886,11 @@ static int twa_chrdev_open(struct inode *inode, struct file *file) + unsigned int minor_number; + int retval = TW_IOCTL_ERROR_OS_ENODEV; + ++ if (!capable(CAP_SYS_ADMIN)) { ++ retval = -EACCES; ++ goto out; ++ } ++ + minor_number = iminor(inode); + if (minor_number >= twa_device_extension_count) + goto out; +diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c +index 33261b690774..f6179e3d6953 100644 +--- a/drivers/scsi/3w-xxxx.c ++++ b/drivers/scsi/3w-xxxx.c +@@ -1033,6 +1033,9 @@ static int tw_chrdev_open(struct inode *inode, struct file *file) + + dprintk(KERN_WARNING "3w-xxxx: tw_ioctl_open()\n"); + ++ if (!capable(CAP_SYS_ADMIN)) ++ return -EACCES; ++ + minor_number = iminor(inode); + if (minor_number >= tw_device_extension_count) + return -ENODEV; +diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c +index 0b6467206f8e..737314cac8d8 100644 +--- a/drivers/scsi/cxlflash/main.c ++++ b/drivers/scsi/cxlflash/main.c +@@ -946,9 +946,9 @@ static void cxlflash_remove(struct pci_dev *pdev) + return; + } + +- /* If a Task Management Function is active, wait for it to complete +- * before continuing with remove. +- */ ++ /* Yield to running recovery threads before continuing with remove */ ++ wait_event(cfg->reset_waitq, cfg->state != STATE_RESET && ++ cfg->state != STATE_PROBING); + spin_lock_irqsave(&cfg->tmf_slock, lock_flags); + if (cfg->tmf_active) + wait_event_interruptible_lock_irq(cfg->tmf_waitq, +@@ -1303,7 +1303,10 @@ static void afu_err_intr_init(struct afu *afu) + for (i = 0; i < afu->num_hwqs; i++) { + hwq = get_hwq(afu, i); + +- writeq_be(SISL_MSI_SYNC_ERROR, &hwq->host_map->ctx_ctrl); ++ reg = readq_be(&hwq->host_map->ctx_ctrl); ++ WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0); ++ reg |= SISL_MSI_SYNC_ERROR; ++ writeq_be(reg, &hwq->host_map->ctx_ctrl); + writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask); + } + } +diff --git a/drivers/scsi/cxlflash/sislite.h b/drivers/scsi/cxlflash/sislite.h +index 09daa86670fc..0892fb1f0a1e 100644 +--- a/drivers/scsi/cxlflash/sislite.h ++++ b/drivers/scsi/cxlflash/sislite.h +@@ -284,6 +284,7 @@ struct sisl_host_map { + __be64 cmd_room; + __be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */ + #define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */ ++#define SISL_CTX_CTRL_LISN_MASK (0xFFULL) + __be64 mbox_w; /* restricted use */ + __be64 sq_start; /* Submission Queue (R/W): write sequence and */ + __be64 sq_end; /* inclusion semantics are the same as RRQ */ +diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +index 2e5fa9717be8..871962b2e2f6 100644 +--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c ++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +@@ -328,10 +328,11 @@ enum { + #define DIR_TO_DEVICE 2 + #define DIR_RESERVED 3 + +-#define CMD_IS_UNCONSTRAINT(cmd) \ +- ((cmd == ATA_CMD_READ_LOG_EXT) || \ +- (cmd == ATA_CMD_READ_LOG_DMA_EXT) || \ +- (cmd == ATA_CMD_DEV_RESET)) ++#define FIS_CMD_IS_UNCONSTRAINED(fis) \ ++ ((fis.command == ATA_CMD_READ_LOG_EXT) || \ ++ (fis.command == ATA_CMD_READ_LOG_DMA_EXT) || \ ++ ((fis.command == ATA_CMD_DEV_RESET) && \ ++ ((fis.control & ATA_SRST) != 0))) + + static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) + { +@@ -1044,7 +1045,7 @@ static int prep_ata_v3_hw(struct hisi_hba *hisi_hba, + << CMD_HDR_FRAME_TYPE_OFF; + dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF; + +- if (CMD_IS_UNCONSTRAINT(task->ata_task.fis.command)) ++ if (FIS_CMD_IS_UNCONSTRAINED(task->ata_task.fis)) + dw1 |= 1 << CMD_HDR_UNCON_CMD_OFF; + + hdr->dw1 = cpu_to_le32(dw1); +diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c +index 7195cff51d4c..9b6f5d024dba 100644 +--- a/drivers/scsi/megaraid.c ++++ b/drivers/scsi/megaraid.c +@@ -4199,6 +4199,9 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) + int irq, i, j; + int error = -ENODEV; + ++ if (hba_count >= MAX_CONTROLLERS) ++ goto out; ++ + if (pci_enable_device(pdev)) + goto out; + pci_set_master(pdev); +diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c +index d8f626567f59..06a2e3d9fc5b 100644 +--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c ++++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c +@@ -2677,6 +2677,9 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, + pRAID_Context->timeout_value = cpu_to_le16(os_timeout_value); + pRAID_Context->virtual_disk_tgt_id = cpu_to_le16(device_id); + } else { ++ if (os_timeout_value) ++ os_timeout_value++; ++ + /* system pd Fast Path */ + io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + timeout_limit = (scmd->device->type == TYPE_DISK) ? +diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c +index 7c0064500cc5..382edb79a0de 100644 +--- a/drivers/scsi/qedf/qedf_main.c ++++ b/drivers/scsi/qedf/qedf_main.c +@@ -1649,6 +1649,15 @@ static int qedf_vport_destroy(struct fc_vport *vport) + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port = vport->dd_data; ++ struct qedf_ctx *qedf = lport_priv(vn_port); ++ ++ if (!qedf) { ++ QEDF_ERR(NULL, "qedf is NULL.\n"); ++ goto out; ++ } ++ ++ /* Set unloading bit on vport qedf_ctx to prevent more I/O */ ++ set_bit(QEDF_UNLOADING, &qedf->flags); + + mutex_lock(&n_port->lp_mutex); + list_del(&vn_port->list); +@@ -1675,6 +1684,7 @@ static int qedf_vport_destroy(struct fc_vport *vport) + if (vn_port->host) + scsi_host_put(vn_port->host); + ++out: + return 0; + } + +diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c +index a5e30e9449ef..375cede0c534 100644 +--- a/drivers/scsi/scsi_dh.c ++++ b/drivers/scsi/scsi_dh.c +@@ -58,7 +58,10 @@ static const struct scsi_dh_blist scsi_dh_blist[] = { + {"IBM", "3526", "rdac", }, + {"IBM", "3542", "rdac", }, + {"IBM", "3552", "rdac", }, +- {"SGI", "TP9", "rdac", }, ++ {"SGI", "TP9300", "rdac", }, ++ {"SGI", "TP9400", "rdac", }, ++ {"SGI", "TP9500", "rdac", }, ++ {"SGI", "TP9700", "rdac", }, + {"SGI", "IS", "rdac", }, + {"STK", "OPENstorage", "rdac", }, + {"STK", "FLEXLINE 380", "rdac", }, +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index 3bb1f6cc297a..21c81c1feac5 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -4947,6 +4947,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) + hba = container_of(work, struct ufs_hba, eeh_work); + + pm_runtime_get_sync(hba->dev); ++ scsi_block_requests(hba->host); + err = ufshcd_get_ee_status(hba, &status); + if (err) { + dev_err(hba->dev, "%s: failed to get exception status %d\n", +@@ -4960,6 +4961,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) + ufshcd_bkops_exception_event_handler(hba); + + out: ++ scsi_unblock_requests(hba->host); + pm_runtime_put_sync(hba->dev); + return; + } +@@ -6761,9 +6763,16 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, + if (list_empty(head)) + goto out; + +- ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); +- if (ret) +- return ret; ++ /* ++ * vendor specific setup_clocks ops may depend on clocks managed by ++ * this standard driver hence call the vendor specific setup_clocks ++ * before disabling the clocks managed here. ++ */ ++ if (!on) { ++ ret = ufshcd_vops_setup_clocks(hba, on, PRE_CHANGE); ++ if (ret) ++ return ret; ++ } + + list_for_each_entry(clki, head, list) { + if (!IS_ERR_OR_NULL(clki->clk)) { +@@ -6787,9 +6796,16 @@ static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, + } + } + +- ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); +- if (ret) +- return ret; ++ /* ++ * vendor specific setup_clocks ops may depend on clocks managed by ++ * this standard driver hence call the vendor specific setup_clocks ++ * after enabling the clocks managed here. ++ */ ++ if (on) { ++ ret = ufshcd_vops_setup_clocks(hba, on, POST_CHANGE); ++ if (ret) ++ return ret; ++ } + + out: + if (ret) { +diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c +index afc7ecc3c187..f4e3bd40c72e 100644 +--- a/drivers/soc/imx/gpcv2.c ++++ b/drivers/soc/imx/gpcv2.c +@@ -155,7 +155,7 @@ static int imx7_gpc_pu_pgc_sw_pdn_req(struct generic_pm_domain *genpd) + return imx7_gpc_pu_pgc_sw_pxx_req(genpd, false); + } + +-static struct imx7_pgc_domain imx7_pgc_domains[] = { ++static const struct imx7_pgc_domain imx7_pgc_domains[] = { + [IMX7_POWER_DOMAIN_MIPI_PHY] = { + .genpd = { + .name = "mipi-phy", +@@ -321,11 +321,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev) + continue; + } + +- domain = &imx7_pgc_domains[domain_index]; +- domain->regmap = regmap; +- domain->genpd.power_on = imx7_gpc_pu_pgc_sw_pup_req; +- domain->genpd.power_off = imx7_gpc_pu_pgc_sw_pdn_req; +- + pd_pdev = platform_device_alloc("imx7-pgc-domain", + domain_index); + if (!pd_pdev) { +@@ -334,7 +329,20 @@ static int imx_gpcv2_probe(struct platform_device *pdev) + return -ENOMEM; + } + +- pd_pdev->dev.platform_data = domain; ++ ret = platform_device_add_data(pd_pdev, ++ &imx7_pgc_domains[domain_index], ++ sizeof(imx7_pgc_domains[domain_index])); ++ if (ret) { ++ platform_device_put(pd_pdev); ++ of_node_put(np); ++ return ret; ++ } ++ ++ domain = pd_pdev->dev.platform_data; ++ domain->regmap = regmap; ++ domain->genpd.power_on = imx7_gpc_pu_pgc_sw_pup_req; ++ domain->genpd.power_off = imx7_gpc_pu_pgc_sw_pdn_req; ++ + pd_pdev->dev.parent = dev; + pd_pdev->dev.of_node = np; + +diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c +index 7f8429635502..a5b0df7e6131 100644 +--- a/drivers/spi/spi-meson-spicc.c ++++ b/drivers/spi/spi-meson-spicc.c +@@ -574,10 +574,15 @@ static int meson_spicc_probe(struct platform_device *pdev) + master->max_speed_hz = rate >> 2; + + ret = devm_spi_register_master(&pdev->dev, master); +- if (!ret) +- return 0; ++ if (ret) { ++ dev_err(&pdev->dev, "spi master registration failed\n"); ++ goto out_clk; ++ } + +- dev_err(&pdev->dev, "spi master registration failed\n"); ++ return 0; ++ ++out_clk: ++ clk_disable_unprepare(spicc->core); + + out_master: + spi_master_put(master); +diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c +index b392cca8fa4f..1a6ec226d6e4 100644 +--- a/drivers/spi/spi-s3c64xx.c ++++ b/drivers/spi/spi-s3c64xx.c +@@ -1273,8 +1273,6 @@ static int s3c64xx_spi_resume(struct device *dev) + if (ret < 0) + return ret; + +- s3c64xx_spi_hwinit(sdd, sdd->port_id); +- + return spi_master_resume(master); + } + #endif /* CONFIG_PM_SLEEP */ +@@ -1312,6 +1310,8 @@ static int s3c64xx_spi_runtime_resume(struct device *dev) + if (ret != 0) + goto err_disable_src_clk; + ++ s3c64xx_spi_hwinit(sdd, sdd->port_id); ++ + return 0; + + err_disable_src_clk: +diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c +index 52056535f54e..0fea18ab970e 100644 +--- a/drivers/spi/spi-sh-msiof.c ++++ b/drivers/spi/spi-sh-msiof.c +@@ -555,14 +555,16 @@ static int sh_msiof_spi_setup(struct spi_device *spi) + + /* Configure native chip select mode/polarity early */ + clr = MDR1_SYNCMD_MASK; +- set = MDR1_TRMD | TMDR1_PCON | MDR1_SYNCMD_SPI; ++ set = MDR1_SYNCMD_SPI; + if (spi->mode & SPI_CS_HIGH) + clr |= BIT(MDR1_SYNCAC_SHIFT); + else + set |= BIT(MDR1_SYNCAC_SHIFT); + pm_runtime_get_sync(&p->pdev->dev); + tmp = sh_msiof_read(p, TMDR1) & ~clr; +- sh_msiof_write(p, TMDR1, tmp | set); ++ sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON); ++ tmp = sh_msiof_read(p, RMDR1) & ~clr; ++ sh_msiof_write(p, RMDR1, tmp | set); + pm_runtime_put(&p->pdev->dev); + p->native_cs_high = spi->mode & SPI_CS_HIGH; + p->native_cs_inited = true; +diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c +index 84dfef4bd6ae..f85d30dc9187 100644 +--- a/drivers/spi/spi.c ++++ b/drivers/spi/spi.c +@@ -1222,6 +1222,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) + if (!was_busy && ctlr->auto_runtime_pm) { + ret = pm_runtime_get_sync(ctlr->dev.parent); + if (ret < 0) { ++ pm_runtime_put_noidle(ctlr->dev.parent); + dev_err(&ctlr->dev, "Failed to power device: %d\n", + ret); + mutex_unlock(&ctlr->io_mutex); +diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +index 284cdd44a2ee..8b92cf06d063 100644 +--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c ++++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c +@@ -1710,7 +1710,7 @@ int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx, + return 0; + } + spin_unlock(&fps->fps_lock); +- rc = -EBUSY; ++ rc = -EAGAIN; + } + + spin_lock(&fps->fps_lock); +diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +index 29e10021b906..4b4a20149894 100644 +--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c ++++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +@@ -47,7 +47,7 @@ static int kiblnd_init_rdma(struct kib_conn *conn, struct kib_tx *tx, int type, + __u64 dstcookie); + static void kiblnd_queue_tx_locked(struct kib_tx *tx, struct kib_conn *conn); + static void kiblnd_queue_tx(struct kib_tx *tx, struct kib_conn *conn); +-static void kiblnd_unmap_tx(struct lnet_ni *ni, struct kib_tx *tx); ++static void kiblnd_unmap_tx(struct kib_tx *tx); + static void kiblnd_check_sends_locked(struct kib_conn *conn); + + static void +@@ -65,7 +65,7 @@ kiblnd_tx_done(struct lnet_ni *ni, struct kib_tx *tx) + LASSERT(!tx->tx_waiting); /* mustn't be awaiting peer response */ + LASSERT(tx->tx_pool); + +- kiblnd_unmap_tx(ni, tx); ++ kiblnd_unmap_tx(tx); + + /* tx may have up to 2 lnet msgs to finalise */ + lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL; +@@ -590,13 +590,9 @@ kiblnd_fmr_map_tx(struct kib_net *net, struct kib_tx *tx, struct kib_rdma_desc * + return 0; + } + +-static void kiblnd_unmap_tx(struct lnet_ni *ni, struct kib_tx *tx) ++static void kiblnd_unmap_tx(struct kib_tx *tx) + { +- struct kib_net *net = ni->ni_data; +- +- LASSERT(net); +- +- if (net->ibn_fmr_ps) ++ if (tx->fmr.fmr_pfmr || tx->fmr.fmr_frd) + kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status); + + if (tx->tx_nfrags) { +@@ -1289,11 +1285,6 @@ kiblnd_connect_peer(struct kib_peer *peer) + goto failed2; + } + +- LASSERT(cmid->device); +- CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n", +- libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname, +- &dev->ibd_ifip, cmid->device->name); +- + return; + + failed2: +@@ -2995,8 +2986,19 @@ kiblnd_cm_callback(struct rdma_cm_id *cmid, struct rdma_cm_event *event) + } else { + rc = rdma_resolve_route( + cmid, *kiblnd_tunables.kib_timeout * 1000); +- if (!rc) ++ if (!rc) { ++ struct kib_net *net = peer->ibp_ni->ni_data; ++ struct kib_dev *dev = net->ibn_dev; ++ ++ CDEBUG(D_NET, "%s: connection bound to "\ ++ "%s:%pI4h:%s\n", ++ libcfs_nid2str(peer->ibp_nid), ++ dev->ibd_ifname, ++ &dev->ibd_ifip, cmid->device->name); ++ + return 0; ++ } ++ + /* Can't initiate route resolution */ + CERROR("Can't resolve route for %s: %d\n", + libcfs_nid2str(peer->ibp_nid), rc); +diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c +index b5d84f3f6071..11e01c48f51a 100644 +--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c ++++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c +@@ -1571,8 +1571,10 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns, + return ERR_CAST(res); + + lock = ldlm_lock_new(res); +- if (!lock) ++ if (!lock) { ++ ldlm_resource_putref(res); + return ERR_PTR(-ENOMEM); ++ } + + lock->l_req_mode = mode; + lock->l_ast_data = data; +@@ -1615,6 +1617,8 @@ out: + return ERR_PTR(rc); + } + ++ ++ + /** + * Enqueue (request) a lock. + * On the client this is called from ldlm_cli_enqueue_fini +diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c +index 0be55623bac4..364d697b2690 100644 +--- a/drivers/staging/lustre/lustre/llite/xattr.c ++++ b/drivers/staging/lustre/lustre/llite/xattr.c +@@ -93,7 +93,11 @@ ll_xattr_set_common(const struct xattr_handler *handler, + __u64 valid; + int rc; + +- if (flags == XATTR_REPLACE) { ++ /* When setxattr() is called with a size of 0 the value is ++ * unconditionally replaced by "". When removexattr() is ++ * called we get a NULL value and XATTR_REPLACE for flags. ++ */ ++ if (!value && flags == XATTR_REPLACE) { + ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1); + valid = OBD_MD_FLXATTRRM; + } else { +diff --git a/drivers/staging/media/atomisp/i2c/ov2680.c b/drivers/staging/media/atomisp/i2c/ov2680.c +index 51b7d61df0f5..179576224319 100644 +--- a/drivers/staging/media/atomisp/i2c/ov2680.c ++++ b/drivers/staging/media/atomisp/i2c/ov2680.c +@@ -396,12 +396,11 @@ static long __ov2680_set_exposure(struct v4l2_subdev *sd, int coarse_itg, + { + struct i2c_client *client = v4l2_get_subdevdata(sd); + struct ov2680_device *dev = to_ov2680_sensor(sd); +- u16 vts,hts; ++ u16 vts; + int ret,exp_val; + + dev_dbg(&client->dev, "+++++++__ov2680_set_exposure coarse_itg %d, gain %d, digitgain %d++\n",coarse_itg, gain, digitgain); + +- hts = ov2680_res[dev->fmt_idx].pixels_per_line; + vts = ov2680_res[dev->fmt_idx].lines_per_frame; + + /* group hold */ +@@ -1190,7 +1189,8 @@ static int ov2680_detect(struct i2c_client *client) + OV2680_SC_CMMN_SUB_ID, &high); + revision = (u8) high & 0x0f; + +- dev_info(&client->dev, "sensor_revision id = 0x%x\n", id); ++ dev_info(&client->dev, "sensor_revision id = 0x%x, rev= %d\n", ++ id, revision); + + return 0; + } +diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c +index 0592ac1f2832..cfe6bb610014 100644 +--- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c ++++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_compat_ioctl32.c +@@ -81,7 +81,7 @@ static int get_v4l2_framebuffer32(struct v4l2_framebuffer *kp, + get_user(kp->flags, &up->flags)) + return -EFAULT; + +- kp->base = compat_ptr(tmp); ++ kp->base = (void __force *)compat_ptr(tmp); + get_v4l2_pix_format((struct v4l2_pix_format *)&kp->fmt, &up->fmt); + return 0; + } +@@ -232,10 +232,10 @@ static int get_atomisp_dvs_6axis_config32(struct atomisp_dvs_6axis_config *kp, + get_user(ycoords_uv, &up->ycoords_uv)) + return -EFAULT; + +- kp->xcoords_y = compat_ptr(xcoords_y); +- kp->ycoords_y = compat_ptr(ycoords_y); +- kp->xcoords_uv = compat_ptr(xcoords_uv); +- kp->ycoords_uv = compat_ptr(ycoords_uv); ++ kp->xcoords_y = (void __force *)compat_ptr(xcoords_y); ++ kp->ycoords_y = (void __force *)compat_ptr(ycoords_y); ++ kp->xcoords_uv = (void __force *)compat_ptr(xcoords_uv); ++ kp->ycoords_uv = (void __force *)compat_ptr(ycoords_uv); + return 0; + } + +@@ -296,7 +296,7 @@ static int get_atomisp_metadata_stat32(struct atomisp_metadata *kp, + return -EFAULT; + + kp->data = compat_ptr(data); +- kp->effective_width = compat_ptr(effective_width); ++ kp->effective_width = (void __force *)compat_ptr(effective_width); + return 0; + } + +@@ -360,7 +360,7 @@ static int get_atomisp_metadata_by_type_stat32( + return -EFAULT; + + kp->data = compat_ptr(data); +- kp->effective_width = compat_ptr(effective_width); ++ kp->effective_width = (void __force *)compat_ptr(effective_width); + return 0; + } + +@@ -437,7 +437,7 @@ static int get_atomisp_overlay32(struct atomisp_overlay *kp, + get_user(kp->overlay_start_x, &up->overlay_start_y)) + return -EFAULT; + +- kp->frame = compat_ptr(frame); ++ kp->frame = (void __force *)compat_ptr(frame); + return 0; + } + +@@ -481,7 +481,7 @@ static int get_atomisp_calibration_group32( + get_user(calb_grp_values, &up->calb_grp_values)) + return -EFAULT; + +- kp->calb_grp_values = compat_ptr(calb_grp_values); ++ kp->calb_grp_values = (void __force *)compat_ptr(calb_grp_values); + return 0; + } + +@@ -703,8 +703,8 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp, + return -EFAULT; + + while (n >= 0) { +- compat_uptr_t *src = (compat_uptr_t *)up + n; +- uintptr_t *dst = (uintptr_t *)kp + n; ++ compat_uptr_t __user *src = ((compat_uptr_t __user *)up) + n; ++ uintptr_t *dst = ((uintptr_t *)kp) + n; + + if (get_user((*dst), src)) + return -EFAULT; +@@ -751,12 +751,12 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp, + #endif + return -EFAULT; + +- kp->shading_table = user_ptr + offset; ++ kp->shading_table = (void __force *)user_ptr + offset; + offset = sizeof(struct atomisp_shading_table); + if (!kp->shading_table) + return -EFAULT; + +- if (copy_to_user(kp->shading_table, ++ if (copy_to_user((void __user *)kp->shading_table, + &karg.shading_table, + sizeof(struct atomisp_shading_table))) + return -EFAULT; +@@ -777,13 +777,14 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp, + #endif + return -EFAULT; + +- kp->morph_table = user_ptr + offset; ++ kp->morph_table = (void __force *)user_ptr + offset; + offset += sizeof(struct atomisp_morph_table); + if (!kp->morph_table) + return -EFAULT; + +- if (copy_to_user(kp->morph_table, &karg.morph_table, +- sizeof(struct atomisp_morph_table))) ++ if (copy_to_user((void __user *)kp->morph_table, ++ &karg.morph_table, ++ sizeof(struct atomisp_morph_table))) + return -EFAULT; + } + +@@ -802,13 +803,14 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp, + #endif + return -EFAULT; + +- kp->dvs2_coefs = user_ptr + offset; ++ kp->dvs2_coefs = (void __force *)user_ptr + offset; + offset += sizeof(struct atomisp_dis_coefficients); + if (!kp->dvs2_coefs) + return -EFAULT; + +- if (copy_to_user(kp->dvs2_coefs, &karg.dvs2_coefs, +- sizeof(struct atomisp_dis_coefficients))) ++ if (copy_to_user((void __user *)kp->dvs2_coefs, ++ &karg.dvs2_coefs, ++ sizeof(struct atomisp_dis_coefficients))) + return -EFAULT; + } + /* handle dvs 6axis configuration */ +@@ -826,13 +828,14 @@ static int get_atomisp_parameters32(struct atomisp_parameters *kp, + #endif + return -EFAULT; + +- kp->dvs_6axis_config = user_ptr + offset; ++ kp->dvs_6axis_config = (void __force *)user_ptr + offset; + offset += sizeof(struct atomisp_dvs_6axis_config); + if (!kp->dvs_6axis_config) + return -EFAULT; + +- if (copy_to_user(kp->dvs_6axis_config, &karg.dvs_6axis_config, +- sizeof(struct atomisp_dvs_6axis_config))) ++ if (copy_to_user((void __user *)kp->dvs_6axis_config, ++ &karg.dvs_6axis_config, ++ sizeof(struct atomisp_dvs_6axis_config))) + return -EFAULT; + } + } +@@ -891,7 +894,7 @@ static int get_atomisp_sensor_ae_bracketing_lut( + get_user(lut, &up->lut)) + return -EFAULT; + +- kp->lut = compat_ptr(lut); ++ kp->lut = (void __force *)compat_ptr(lut); + return 0; + } + +diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +index 486be990d7fc..a457034818c3 100644 +--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c ++++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c +@@ -601,6 +601,7 @@ reserve_space(VCHIQ_STATE_T *state, size_t space, int is_blocking) + } + + if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) { ++ up(&state->slot_available_event); + pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos); + return NULL; + } +diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c +index ac83f721db24..d60069b5dc98 100644 +--- a/drivers/thermal/samsung/exynos_tmu.c ++++ b/drivers/thermal/samsung/exynos_tmu.c +@@ -598,6 +598,7 @@ static int exynos5433_tmu_initialize(struct platform_device *pdev) + threshold_code = temp_to_code(data, temp); + + rising_threshold = readl(data->base + rising_reg_offset); ++ rising_threshold &= ~(0xff << j * 8); + rising_threshold |= (threshold_code << j * 8); + writel(rising_threshold, data->base + rising_reg_offset); + +diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c +index 16331a90c1e8..9da8474fe50a 100644 +--- a/drivers/tty/hvc/hvc_opal.c ++++ b/drivers/tty/hvc/hvc_opal.c +@@ -332,7 +332,6 @@ static void udbg_init_opal_common(void) + udbg_putc = udbg_opal_putc; + udbg_getc = udbg_opal_getc; + udbg_getc_poll = udbg_opal_getc_poll; +- tb_ticks_per_usec = 0x200; /* Make udelay not suck */ + } + + void __init hvc_opal_init_early(void) +diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c +index 64338442050e..899e8fe5e00f 100644 +--- a/drivers/tty/pty.c ++++ b/drivers/tty/pty.c +@@ -110,16 +110,19 @@ static void pty_unthrottle(struct tty_struct *tty) + static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) + { + struct tty_struct *to = tty->link; ++ unsigned long flags; + + if (tty->stopped) + return 0; + + if (c > 0) { ++ spin_lock_irqsave(&to->port->lock, flags); + /* Stuff the data into the input queue of the other end */ + c = tty_insert_flip_string(to->port, buf, c); + /* And shovel */ + if (c) + tty_flip_buffer_push(to->port); ++ spin_unlock_irqrestore(&to->port->lock, flags); + } + return c; + } +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index a8bc48b26c23..a9db0887edca 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -3361,6 +3361,10 @@ static int wait_for_connected(struct usb_device *udev, + while (delay_ms < 2000) { + if (status || *portstatus & USB_PORT_STAT_CONNECTION) + break; ++ if (!port_is_power_on(hub, *portstatus)) { ++ status = -ENODEV; ++ break; ++ } + msleep(20); + delay_ms += 20; + status = hub_port_status(hub, *port1, portstatus, portchange); +diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c +index 4cfa72cb0a91..c12a1a6554ba 100644 +--- a/drivers/usb/gadget/udc/renesas_usb3.c ++++ b/drivers/usb/gadget/udc/renesas_usb3.c +@@ -334,6 +334,7 @@ struct renesas_usb3 { + struct usb_gadget_driver *driver; + struct extcon_dev *extcon; + struct work_struct extcon_work; ++ struct dentry *dentry; + + struct renesas_usb3_ep *usb3_ep; + int num_usb3_eps; +@@ -2397,8 +2398,12 @@ static void renesas_usb3_debugfs_init(struct renesas_usb3 *usb3, + + file = debugfs_create_file("b_device", 0644, root, usb3, + &renesas_usb3_b_device_fops); +- if (!file) ++ if (!file) { + dev_info(dev, "%s: Can't create debugfs mode\n", __func__); ++ debugfs_remove_recursive(root); ++ } else { ++ usb3->dentry = root; ++ } + } + + /*------- platform_driver ------------------------------------------------*/ +@@ -2406,6 +2411,7 @@ static int renesas_usb3_remove(struct platform_device *pdev) + { + struct renesas_usb3 *usb3 = platform_get_drvdata(pdev); + ++ debugfs_remove_recursive(usb3->dentry); + device_remove_file(&pdev->dev, &dev_attr_role); + + usb_del_gadget_udc(&usb3->gadget); +diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c +index 126991046eb7..0212f0ee8aea 100644 +--- a/drivers/vfio/mdev/mdev_core.c ++++ b/drivers/vfio/mdev/mdev_core.c +@@ -66,34 +66,6 @@ uuid_le mdev_uuid(struct mdev_device *mdev) + } + EXPORT_SYMBOL(mdev_uuid); + +-static int _find_mdev_device(struct device *dev, void *data) +-{ +- struct mdev_device *mdev; +- +- if (!dev_is_mdev(dev)) +- return 0; +- +- mdev = to_mdev_device(dev); +- +- if (uuid_le_cmp(mdev->uuid, *(uuid_le *)data) == 0) +- return 1; +- +- return 0; +-} +- +-static bool mdev_device_exist(struct mdev_parent *parent, uuid_le uuid) +-{ +- struct device *dev; +- +- dev = device_find_child(parent->dev, &uuid, _find_mdev_device); +- if (dev) { +- put_device(dev); +- return true; +- } +- +- return false; +-} +- + /* Should be called holding parent_list_lock */ + static struct mdev_parent *__find_parent_device(struct device *dev) + { +@@ -221,7 +193,6 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops) + } + + kref_init(&parent->ref); +- mutex_init(&parent->lock); + + parent->dev = dev; + parent->ops = ops; +@@ -297,6 +268,10 @@ static void mdev_device_release(struct device *dev) + { + struct mdev_device *mdev = to_mdev_device(dev); + ++ mutex_lock(&mdev_list_lock); ++ list_del(&mdev->next); ++ mutex_unlock(&mdev_list_lock); ++ + dev_dbg(&mdev->dev, "MDEV: destroying\n"); + kfree(mdev); + } +@@ -304,7 +279,7 @@ static void mdev_device_release(struct device *dev) + int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid) + { + int ret; +- struct mdev_device *mdev; ++ struct mdev_device *mdev, *tmp; + struct mdev_parent *parent; + struct mdev_type *type = to_mdev_type(kobj); + +@@ -312,21 +287,28 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid) + if (!parent) + return -EINVAL; + +- mutex_lock(&parent->lock); ++ mutex_lock(&mdev_list_lock); + + /* Check for duplicate */ +- if (mdev_device_exist(parent, uuid)) { +- ret = -EEXIST; +- goto create_err; ++ list_for_each_entry(tmp, &mdev_list, next) { ++ if (!uuid_le_cmp(tmp->uuid, uuid)) { ++ mutex_unlock(&mdev_list_lock); ++ ret = -EEXIST; ++ goto mdev_fail; ++ } + } + + mdev = kzalloc(sizeof(*mdev), GFP_KERNEL); + if (!mdev) { ++ mutex_unlock(&mdev_list_lock); + ret = -ENOMEM; +- goto create_err; ++ goto mdev_fail; + } + + memcpy(&mdev->uuid, &uuid, sizeof(uuid_le)); ++ list_add(&mdev->next, &mdev_list); ++ mutex_unlock(&mdev_list_lock); ++ + mdev->parent = parent; + kref_init(&mdev->ref); + +@@ -338,35 +320,28 @@ int mdev_device_create(struct kobject *kobj, struct device *dev, uuid_le uuid) + ret = device_register(&mdev->dev); + if (ret) { + put_device(&mdev->dev); +- goto create_err; ++ goto mdev_fail; + } + + ret = mdev_device_create_ops(kobj, mdev); + if (ret) +- goto create_failed; ++ goto create_fail; + + ret = mdev_create_sysfs_files(&mdev->dev, type); + if (ret) { + mdev_device_remove_ops(mdev, true); +- goto create_failed; ++ goto create_fail; + } + + mdev->type_kobj = kobj; ++ mdev->active = true; + dev_dbg(&mdev->dev, "MDEV: created\n"); + +- mutex_unlock(&parent->lock); +- +- mutex_lock(&mdev_list_lock); +- list_add(&mdev->next, &mdev_list); +- mutex_unlock(&mdev_list_lock); +- +- return ret; ++ return 0; + +-create_failed: ++create_fail: + device_unregister(&mdev->dev); +- +-create_err: +- mutex_unlock(&parent->lock); ++mdev_fail: + mdev_put_parent(parent); + return ret; + } +@@ -377,44 +352,39 @@ int mdev_device_remove(struct device *dev, bool force_remove) + struct mdev_parent *parent; + struct mdev_type *type; + int ret; +- bool found = false; + + mdev = to_mdev_device(dev); + + mutex_lock(&mdev_list_lock); + list_for_each_entry(tmp, &mdev_list, next) { +- if (tmp == mdev) { +- found = true; ++ if (tmp == mdev) + break; +- } + } + +- if (found) +- list_del(&mdev->next); ++ if (tmp != mdev) { ++ mutex_unlock(&mdev_list_lock); ++ return -ENODEV; ++ } + +- mutex_unlock(&mdev_list_lock); ++ if (!mdev->active) { ++ mutex_unlock(&mdev_list_lock); ++ return -EAGAIN; ++ } + +- if (!found) +- return -ENODEV; ++ mdev->active = false; ++ mutex_unlock(&mdev_list_lock); + + type = to_mdev_type(mdev->type_kobj); + parent = mdev->parent; +- mutex_lock(&parent->lock); + + ret = mdev_device_remove_ops(mdev, force_remove); + if (ret) { +- mutex_unlock(&parent->lock); +- +- mutex_lock(&mdev_list_lock); +- list_add(&mdev->next, &mdev_list); +- mutex_unlock(&mdev_list_lock); +- ++ mdev->active = true; + return ret; + } + + mdev_remove_sysfs_files(dev, type); + device_unregister(dev); +- mutex_unlock(&parent->lock); + mdev_put_parent(parent); + + return 0; +diff --git a/drivers/vfio/mdev/mdev_private.h b/drivers/vfio/mdev/mdev_private.h +index a9cefd70a705..b5819b7d7ef7 100644 +--- a/drivers/vfio/mdev/mdev_private.h ++++ b/drivers/vfio/mdev/mdev_private.h +@@ -20,7 +20,6 @@ struct mdev_parent { + struct device *dev; + const struct mdev_parent_ops *ops; + struct kref ref; +- struct mutex lock; + struct list_head next; + struct kset *mdev_types_kset; + struct list_head type_list; +@@ -34,6 +33,7 @@ struct mdev_device { + struct kref ref; + struct list_head next; + struct kobject *type_kobj; ++ bool active; + }; + + #define to_mdev_device(dev) container_of(dev, struct mdev_device, dev) +diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c +index 4c27f4be3c3d..aa9e792110e3 100644 +--- a/drivers/vfio/platform/vfio_platform_common.c ++++ b/drivers/vfio/platform/vfio_platform_common.c +@@ -681,18 +681,23 @@ int vfio_platform_probe_common(struct vfio_platform_device *vdev, + group = vfio_iommu_group_get(dev); + if (!group) { + pr_err("VFIO: No IOMMU group for device %s\n", vdev->name); +- return -EINVAL; ++ ret = -EINVAL; ++ goto put_reset; + } + + ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev); +- if (ret) { +- vfio_iommu_group_put(group, dev); +- return ret; +- } ++ if (ret) ++ goto put_iommu; + + mutex_init(&vdev->igate); + + return 0; ++ ++put_iommu: ++ vfio_iommu_group_put(group, dev); ++put_reset: ++ vfio_platform_put_reset(vdev); ++ return ret; + } + EXPORT_SYMBOL_GPL(vfio_platform_probe_common); + +diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c +index d639378e36ac..50eeb74ddc0a 100644 +--- a/drivers/vfio/vfio_iommu_type1.c ++++ b/drivers/vfio/vfio_iommu_type1.c +@@ -83,6 +83,7 @@ struct vfio_dma { + size_t size; /* Map size (bytes) */ + int prot; /* IOMMU_READ/WRITE */ + bool iommu_mapped; ++ bool lock_cap; /* capable(CAP_IPC_LOCK) */ + struct task_struct *task; + struct rb_root pfn_list; /* Ex-user pinned pfn list */ + }; +@@ -246,29 +247,25 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn) + return ret; + } + +-static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap) ++static int vfio_lock_acct(struct vfio_dma *dma, long npage, bool async) + { + struct mm_struct *mm; +- bool is_current; + int ret; + + if (!npage) + return 0; + +- is_current = (task->mm == current->mm); +- +- mm = is_current ? task->mm : get_task_mm(task); ++ mm = async ? get_task_mm(dma->task) : dma->task->mm; + if (!mm) + return -ESRCH; /* process exited */ + + ret = down_write_killable(&mm->mmap_sem); + if (!ret) { + if (npage > 0) { +- if (lock_cap ? !*lock_cap : +- !has_capability(task, CAP_IPC_LOCK)) { ++ if (!dma->lock_cap) { + unsigned long limit; + +- limit = task_rlimit(task, ++ limit = task_rlimit(dma->task, + RLIMIT_MEMLOCK) >> PAGE_SHIFT; + + if (mm->locked_vm + npage > limit) +@@ -282,7 +279,7 @@ static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap) + up_write(&mm->mmap_sem); + } + +- if (!is_current) ++ if (async) + mmput(mm); + + return ret; +@@ -391,7 +388,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, + */ + static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, + long npage, unsigned long *pfn_base, +- bool lock_cap, unsigned long limit) ++ unsigned long limit) + { + unsigned long pfn = 0; + long ret, pinned = 0, lock_acct = 0; +@@ -414,7 +411,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, + * pages are already counted against the user. + */ + if (!rsvd && !vfio_find_vpfn(dma, iova)) { +- if (!lock_cap && current->mm->locked_vm + 1 > limit) { ++ if (!dma->lock_cap && current->mm->locked_vm + 1 > limit) { + put_pfn(*pfn_base, dma->prot); + pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, + limit << PAGE_SHIFT); +@@ -440,7 +437,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, + } + + if (!rsvd && !vfio_find_vpfn(dma, iova)) { +- if (!lock_cap && ++ if (!dma->lock_cap && + current->mm->locked_vm + lock_acct + 1 > limit) { + put_pfn(pfn, dma->prot); + pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", +@@ -453,7 +450,7 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr, + } + + out: +- ret = vfio_lock_acct(current, lock_acct, &lock_cap); ++ ret = vfio_lock_acct(dma, lock_acct, false); + + unpin_out: + if (ret) { +@@ -484,7 +481,7 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, + } + + if (do_accounting) +- vfio_lock_acct(dma->task, locked - unlocked, NULL); ++ vfio_lock_acct(dma, locked - unlocked, true); + + return unlocked; + } +@@ -501,7 +498,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr, + + ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base); + if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) { +- ret = vfio_lock_acct(dma->task, 1, NULL); ++ ret = vfio_lock_acct(dma, 1, true); + if (ret) { + put_pfn(*pfn_base, dma->prot); + if (ret == -ENOMEM) +@@ -528,7 +525,7 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, + unlocked = vfio_iova_put_vfio_pfn(dma, vpfn); + + if (do_accounting) +- vfio_lock_acct(dma->task, -unlocked, NULL); ++ vfio_lock_acct(dma, -unlocked, true); + + return unlocked; + } +@@ -723,7 +720,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma, + + dma->iommu_mapped = false; + if (do_accounting) { +- vfio_lock_acct(dma->task, -unlocked, NULL); ++ vfio_lock_acct(dma, -unlocked, true); + return 0; + } + return unlocked; +@@ -935,14 +932,12 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma, + size_t size = map_size; + long npage; + unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; +- bool lock_cap = capable(CAP_IPC_LOCK); + int ret = 0; + + while (size) { + /* Pin a contiguous chunk of memory */ + npage = vfio_pin_pages_remote(dma, vaddr + dma->size, +- size >> PAGE_SHIFT, &pfn, +- lock_cap, limit); ++ size >> PAGE_SHIFT, &pfn, limit); + if (npage <= 0) { + WARN_ON(!npage); + ret = (int)npage; +@@ -1017,8 +1012,36 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, + dma->iova = iova; + dma->vaddr = vaddr; + dma->prot = prot; +- get_task_struct(current); +- dma->task = current; ++ ++ /* ++ * We need to be able to both add to a task's locked memory and test ++ * against the locked memory limit and we need to be able to do both ++ * outside of this call path as pinning can be asynchronous via the ++ * external interfaces for mdev devices. RLIMIT_MEMLOCK requires a ++ * task_struct and VM locked pages requires an mm_struct, however ++ * holding an indefinite mm reference is not recommended, therefore we ++ * only hold a reference to a task. We could hold a reference to ++ * current, however QEMU uses this call path through vCPU threads, ++ * which can be killed resulting in a NULL mm and failure in the unmap ++ * path when called via a different thread. Avoid this problem by ++ * using the group_leader as threads within the same group require ++ * both CLONE_THREAD and CLONE_VM and will therefore use the same ++ * mm_struct. ++ * ++ * Previously we also used the task for testing CAP_IPC_LOCK at the ++ * time of pinning and accounting, however has_capability() makes use ++ * of real_cred, a copy-on-write field, so we can't guarantee that it ++ * matches group_leader, or in fact that it might not change by the ++ * time it's evaluated. If a process were to call MAP_DMA with ++ * CAP_IPC_LOCK but later drop it, it doesn't make sense that they ++ * possibly see different results for an iommu_mapped vfio_dma vs ++ * externally mapped. Therefore track CAP_IPC_LOCK in vfio_dma at the ++ * time of calling MAP_DMA. ++ */ ++ get_task_struct(current->group_leader); ++ dma->task = current->group_leader; ++ dma->lock_cap = capable(CAP_IPC_LOCK); ++ + dma->pfn_list = RB_ROOT; + + /* Insert zero-sized and grow as we map chunks of it */ +@@ -1053,7 +1076,6 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, + struct vfio_domain *d; + struct rb_node *n; + unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; +- bool lock_cap = capable(CAP_IPC_LOCK); + int ret; + + /* Arbitrarily pick the first domain in the list for lookups */ +@@ -1100,8 +1122,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, + + npage = vfio_pin_pages_remote(dma, vaddr, + n >> PAGE_SHIFT, +- &pfn, lock_cap, +- limit); ++ &pfn, limit); + if (npage <= 0) { + WARN_ON(!npage); + ret = (int)npage; +@@ -1378,7 +1399,7 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu) + if (!is_invalid_reserved_pfn(vpfn->pfn)) + locked++; + } +- vfio_lock_acct(dma->task, locked - unlocked, NULL); ++ vfio_lock_acct(dma, locked - unlocked, true); + } + } + +diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c +index 1c2289ddd555..0fa7d2bd0e48 100644 +--- a/drivers/video/backlight/pwm_bl.c ++++ b/drivers/video/backlight/pwm_bl.c +@@ -301,14 +301,14 @@ static int pwm_backlight_probe(struct platform_device *pdev) + + /* + * If the GPIO is not known to be already configured as output, that +- * is, if gpiod_get_direction returns either GPIOF_DIR_IN or -EINVAL, +- * change the direction to output and set the GPIO as active. ++ * is, if gpiod_get_direction returns either 1 or -EINVAL, change the ++ * direction to output and set the GPIO as active. + * Do not force the GPIO to active when it was already output as it + * could cause backlight flickering or we would enable the backlight too + * early. Leave the decision of the initial backlight state for later. + */ + if (pb->enable_gpio && +- gpiod_get_direction(pb->enable_gpio) != GPIOF_DIR_OUT) ++ gpiod_get_direction(pb->enable_gpio) != 0) + gpiod_direction_output(pb->enable_gpio, 1); + + pb->power_supply = devm_regulator_get(&pdev->dev, "power"); +diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c +index 2a20fc163ed0..4c62ad74aec0 100644 +--- a/drivers/watchdog/da9063_wdt.c ++++ b/drivers/watchdog/da9063_wdt.c +@@ -102,10 +102,23 @@ static int da9063_wdt_set_timeout(struct watchdog_device *wdd, + { + struct da9063 *da9063 = watchdog_get_drvdata(wdd); + unsigned int selector; +- int ret; ++ int ret = 0; + + selector = da9063_wdt_timeout_to_sel(timeout); +- ret = _da9063_wdt_set_timeout(da9063, selector); ++ ++ /* ++ * There are two cases when a set_timeout() will be called: ++ * 1. The watchdog is off and someone wants to set the timeout for the ++ * further use. ++ * 2. The watchdog is already running and a new timeout value should be ++ * set. ++ * ++ * The watchdog can't store a timeout value not equal zero without ++ * enabling the watchdog, so the timeout must be buffered by the driver. ++ */ ++ if (watchdog_active(wdd)) ++ ret = _da9063_wdt_set_timeout(da9063, selector); ++ + if (ret) + dev_err(da9063->dev, "Failed to set watchdog timeout (err = %d)\n", + ret); +diff --git a/fs/block_dev.c b/fs/block_dev.c +index 789f55e851ae..3323eec5c164 100644 +--- a/fs/block_dev.c ++++ b/fs/block_dev.c +@@ -231,7 +231,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, + + ret = bio_iov_iter_get_pages(&bio, iter); + if (unlikely(ret)) +- return ret; ++ goto out; + ret = bio.bi_iter.bi_size; + + if (iov_iter_rw(iter) == READ) { +@@ -260,12 +260,13 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, + put_page(bvec->bv_page); + } + +- if (vecs != inline_vecs) +- kfree(vecs); +- + if (unlikely(bio.bi_status)) + ret = blk_status_to_errno(bio.bi_status); + ++out: ++ if (vecs != inline_vecs) ++ kfree(vecs); ++ + bio_uninit(&bio); + + return ret; +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index f5b90dc137ec..28a58f40f3a4 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -3162,6 +3162,9 @@ out: + /* once for the tree */ + btrfs_put_ordered_extent(ordered_extent); + ++ /* Try to release some metadata so we don't get an OOM but don't wait */ ++ btrfs_btree_balance_dirty_nodelay(fs_info); ++ + return ret; + } + +@@ -4737,7 +4740,10 @@ delete: + extent_num_bytes, 0, + btrfs_header_owner(leaf), + ino, extent_offset); +- BUG_ON(ret); ++ if (ret) { ++ btrfs_abort_transaction(trans, ret); ++ break; ++ } + if (btrfs_should_throttle_delayed_refs(trans, fs_info)) + btrfs_async_run_delayed_refs(fs_info, + trans->delayed_ref_updates * 2, +@@ -5496,13 +5502,18 @@ void btrfs_evict_inode(struct inode *inode) + trans->block_rsv = rsv; + + ret = btrfs_truncate_inode_items(trans, root, inode, 0, 0); +- if (ret != -ENOSPC && ret != -EAGAIN) ++ if (ret) { ++ trans->block_rsv = &fs_info->trans_block_rsv; ++ btrfs_end_transaction(trans); ++ btrfs_btree_balance_dirty(fs_info); ++ if (ret != -ENOSPC && ret != -EAGAIN) { ++ btrfs_orphan_del(NULL, BTRFS_I(inode)); ++ btrfs_free_block_rsv(fs_info, rsv); ++ goto no_delete; ++ } ++ } else { + break; +- +- trans->block_rsv = &fs_info->trans_block_rsv; +- btrfs_end_transaction(trans); +- trans = NULL; +- btrfs_btree_balance_dirty(fs_info); ++ } + } + + btrfs_free_block_rsv(fs_info, rsv); +@@ -5511,12 +5522,8 @@ void btrfs_evict_inode(struct inode *inode) + * Errors here aren't a big deal, it just means we leave orphan items + * in the tree. They will be cleaned up on the next mount. + */ +- if (ret == 0) { +- trans->block_rsv = root->orphan_block_rsv; +- btrfs_orphan_del(trans, BTRFS_I(inode)); +- } else { +- btrfs_orphan_del(NULL, BTRFS_I(inode)); +- } ++ trans->block_rsv = root->orphan_block_rsv; ++ btrfs_orphan_del(trans, BTRFS_I(inode)); + + trans->block_rsv = &fs_info->trans_block_rsv; + if (!(root == fs_info->tree_root || +diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c +index e172d4843eae..473ad5985aa3 100644 +--- a/fs/btrfs/qgroup.c ++++ b/fs/btrfs/qgroup.c +@@ -2499,6 +2499,21 @@ out: + spin_unlock(&fs_info->qgroup_lock); + } + ++/* ++ * Check if the leaf is the last leaf. Which means all node pointers ++ * are at their last position. ++ */ ++static bool is_last_leaf(struct btrfs_path *path) ++{ ++ int i; ++ ++ for (i = 1; i < BTRFS_MAX_LEVEL && path->nodes[i]; i++) { ++ if (path->slots[i] != btrfs_header_nritems(path->nodes[i]) - 1) ++ return false; ++ } ++ return true; ++} ++ + /* + * returns < 0 on error, 0 when more leafs are to be scanned. + * returns 1 when done. +@@ -2512,6 +2527,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, + struct ulist *roots = NULL; + struct seq_list tree_mod_seq_elem = SEQ_LIST_INIT(tree_mod_seq_elem); + u64 num_bytes; ++ bool done; + int slot; + int ret; + +@@ -2540,6 +2556,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, + mutex_unlock(&fs_info->qgroup_rescan_lock); + return ret; + } ++ done = is_last_leaf(path); + + btrfs_item_key_to_cpu(path->nodes[0], &found, + btrfs_header_nritems(path->nodes[0]) - 1); +@@ -2586,6 +2603,8 @@ out: + } + btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem); + ++ if (done && !ret) ++ ret = 1; + return ret; + } + +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index bf4e22df7c97..e1b4a59485df 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -3041,8 +3041,11 @@ out_wake_log_root: + mutex_unlock(&log_root_tree->log_mutex); + + /* +- * The barrier before waitqueue_active is implied by mutex_unlock ++ * The barrier before waitqueue_active is needed so all the updates ++ * above are seen by the woken threads. It might not be necessary, but ++ * proving that seems to be hard. + */ ++ smp_mb(); + if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) + wake_up(&log_root_tree->log_commit_wait[index2]); + out: +@@ -3053,8 +3056,11 @@ out: + mutex_unlock(&root->log_mutex); + + /* +- * The barrier before waitqueue_active is implied by mutex_unlock ++ * The barrier before waitqueue_active is needed so all the updates ++ * above are seen by the woken threads. It might not be necessary, but ++ * proving that seems to be hard. + */ ++ smp_mb(); + if (waitqueue_active(&root->log_commit_wait[index1])) + wake_up(&root->log_commit_wait[index1]); + return ret; +diff --git a/fs/ceph/super.c b/fs/ceph/super.c +index 48ffe720bf09..b79b1211a2b5 100644 +--- a/fs/ceph/super.c ++++ b/fs/ceph/super.c +@@ -254,7 +254,7 @@ static int parse_fsopt_token(char *c, void *private) + case Opt_rasize: + if (intval < 0) + return -EINVAL; +- fsopt->rasize = ALIGN(intval + PAGE_SIZE - 1, PAGE_SIZE); ++ fsopt->rasize = ALIGN(intval, PAGE_SIZE); + break; + case Opt_caps_wanted_delay_min: + if (intval < 1) +diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c +index d262a93d9b31..daf2683f0655 100644 +--- a/fs/crypto/crypto.c ++++ b/fs/crypto/crypto.c +@@ -446,8 +446,17 @@ fail: + */ + static int __init fscrypt_init(void) + { ++ /* ++ * Use an unbound workqueue to allow bios to be decrypted in parallel ++ * even when they happen to complete on the same CPU. This sacrifices ++ * locality, but it's worthwhile since decryption is CPU-intensive. ++ * ++ * Also use a high-priority workqueue to prioritize decryption work, ++ * which blocks reads from completing, over regular application tasks. ++ */ + fscrypt_read_workqueue = alloc_workqueue("fscrypt_read_queue", +- WQ_HIGHPRI, 0); ++ WQ_UNBOUND | WQ_HIGHPRI, ++ num_online_cpus()); + if (!fscrypt_read_workqueue) + goto fail; + +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c +index 9c9eafd6bd76..70266a3355dc 100644 +--- a/fs/ext4/balloc.c ++++ b/fs/ext4/balloc.c +@@ -379,6 +379,8 @@ static int ext4_validate_block_bitmap(struct super_block *sb, + return -EFSCORRUPTED; + + ext4_lock_group(sb, block_group); ++ if (buffer_verified(bh)) ++ goto verified; + if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, + desc, bh))) { + ext4_unlock_group(sb, block_group); +@@ -401,6 +403,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb, + return -EFSCORRUPTED; + } + set_buffer_verified(bh); ++verified: + ext4_unlock_group(sb, block_group); + return 0; + } +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c +index 95341bc2b3b7..2f46564d3fca 100644 +--- a/fs/ext4/ialloc.c ++++ b/fs/ext4/ialloc.c +@@ -91,6 +91,8 @@ static int ext4_validate_inode_bitmap(struct super_block *sb, + return -EFSCORRUPTED; + + ext4_lock_group(sb, block_group); ++ if (buffer_verified(bh)) ++ goto verified; + blk = ext4_inode_bitmap(sb, desc); + if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh, + EXT4_INODES_PER_GROUP(sb) / 8)) { +@@ -108,6 +110,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb, + return -EFSBADCRC; + } + set_buffer_verified(bh); ++verified: + ext4_unlock_group(sb, block_group); + return 0; + } +@@ -1394,7 +1397,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, + ext4_itable_unused_count(sb, gdp)), + sbi->s_inodes_per_block); + +- if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) { ++ if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) || ++ ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) - ++ ext4_itable_unused_count(sb, gdp)) < ++ EXT4_FIRST_INO(sb)))) { + ext4_error(sb, "Something is wrong with group %u: " + "used itable blocks: %d; " + "itable unused count: %u", +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c +index 7d498f4a3f90..b549cfd2d7d3 100644 +--- a/fs/ext4/inline.c ++++ b/fs/ext4/inline.c +@@ -688,6 +688,10 @@ int ext4_try_to_write_inline_data(struct address_space *mapping, + goto convert; + } + ++ ret = ext4_journal_get_write_access(handle, iloc.bh); ++ if (ret) ++ goto out; ++ + flags |= AOP_FLAG_NOFS; + + page = grab_cache_page_write_begin(mapping, 0, flags); +@@ -716,7 +720,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping, + out_up_read: + up_read(&EXT4_I(inode)->xattr_sem); + out: +- if (handle) ++ if (handle && (ret != 1)) + ext4_journal_stop(handle); + brelse(iloc.bh); + return ret; +@@ -758,6 +762,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, + + ext4_write_unlock_xattr(inode, &no_expand); + brelse(iloc.bh); ++ mark_inode_dirty(inode); + out: + return copied; + } +@@ -904,7 +909,6 @@ retry_journal: + goto out; + } + +- + page = grab_cache_page_write_begin(mapping, 0, flags); + if (!page) { + ret = -ENOMEM; +@@ -922,6 +926,9 @@ retry_journal: + if (ret < 0) + goto out_release_page; + } ++ ret = ext4_journal_get_write_access(handle, iloc.bh); ++ if (ret) ++ goto out_release_page; + + up_read(&EXT4_I(inode)->xattr_sem); + *pagep = page; +@@ -942,7 +949,6 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, + unsigned len, unsigned copied, + struct page *page) + { +- int i_size_changed = 0; + int ret; + + ret = ext4_write_inline_data_end(inode, pos, len, copied, page); +@@ -960,10 +966,8 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, + * But it's important to update i_size while still holding page lock: + * page writeout could otherwise come in and zero beyond i_size. + */ +- if (pos+copied > inode->i_size) { ++ if (pos+copied > inode->i_size) + i_size_write(inode, pos+copied); +- i_size_changed = 1; +- } + unlock_page(page); + put_page(page); + +@@ -973,8 +977,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos, + * ordering of page lock and transaction start for journaling + * filesystems. + */ +- if (i_size_changed) +- mark_inode_dirty(inode); ++ mark_inode_dirty(inode); + + return copied; + } +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index c2efe4d2ad87..f9baa59de0e2 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -1388,9 +1388,10 @@ static int ext4_write_end(struct file *file, + loff_t old_size = inode->i_size; + int ret = 0, ret2; + int i_size_changed = 0; ++ int inline_data = ext4_has_inline_data(inode); + + trace_ext4_write_end(inode, pos, len, copied); +- if (ext4_has_inline_data(inode)) { ++ if (inline_data) { + ret = ext4_write_inline_data_end(inode, pos, len, + copied, page); + if (ret < 0) { +@@ -1418,7 +1419,7 @@ static int ext4_write_end(struct file *file, + * ordering of page lock and transaction start for journaling + * filesystems. + */ +- if (i_size_changed) ++ if (i_size_changed || inline_data) + ext4_mark_inode_dirty(handle, inode); + + if (pos + len > inode->i_size && ext4_can_truncate(inode)) +@@ -1492,6 +1493,7 @@ static int ext4_journalled_write_end(struct file *file, + int partial = 0; + unsigned from, to; + int size_changed = 0; ++ int inline_data = ext4_has_inline_data(inode); + + trace_ext4_journalled_write_end(inode, pos, len, copied); + from = pos & (PAGE_SIZE - 1); +@@ -1499,7 +1501,7 @@ static int ext4_journalled_write_end(struct file *file, + + BUG_ON(!ext4_handle_valid(handle)); + +- if (ext4_has_inline_data(inode)) { ++ if (inline_data) { + ret = ext4_write_inline_data_end(inode, pos, len, + copied, page); + if (ret < 0) { +@@ -1530,7 +1532,7 @@ static int ext4_journalled_write_end(struct file *file, + if (old_size < pos) + pagecache_isize_extended(inode, old_size, pos); + +- if (size_changed) { ++ if (size_changed || inline_data) { + ret2 = ext4_mark_inode_dirty(handle, inode); + if (!ret) + ret = ret2; +@@ -2027,11 +2029,7 @@ static int __ext4_journalled_writepage(struct page *page, + } + + if (inline_data) { +- BUFFER_TRACE(inode_bh, "get write access"); +- ret = ext4_journal_get_write_access(handle, inode_bh); +- +- err = ext4_handle_dirty_metadata(handle, inode, inode_bh); +- ++ ret = ext4_mark_inode_dirty(handle, inode); + } else { + ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL, + do_journal_get_write_access); +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index fc32a67a7a19..6b0c1ea95196 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -3103,14 +3103,8 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) + if (!gdp) + continue; + +- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) +- continue; +- if (group != 0) ++ if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) + break; +- ext4_error(sb, "Inode table for bg 0 marked as " +- "needing zeroing"); +- if (sb_rdonly(sb)) +- return ngroups; + } + + return group; +diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c +index 36b535207c88..85142e5df88b 100644 +--- a/fs/f2fs/data.c ++++ b/fs/f2fs/data.c +@@ -1601,7 +1601,13 @@ out: + + redirty_out: + redirty_page_for_writepage(wbc, page); +- if (!err) ++ /* ++ * pageout() in MM traslates EAGAIN, so calls handle_write_error() ++ * -> mapping_set_error() -> set_bit(AS_EIO, ...). ++ * file_write_and_wait_range() will see EIO error, which is critical ++ * to return value of fsync() followed by atomic_write failure to user. ++ */ ++ if (!err || wbc->for_reclaim) + return AOP_WRITEPAGE_ACTIVATE; + unlock_page(page); + return err; +diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c +index 72c6a9e9a9b4..87e654c53c31 100644 +--- a/fs/f2fs/file.c ++++ b/fs/f2fs/file.c +@@ -1630,6 +1630,8 @@ static int f2fs_ioc_start_atomic_write(struct file *filp) + + inode_lock(inode); + ++ down_write(&F2FS_I(inode)->dio_rwsem[WRITE]); ++ + if (f2fs_is_atomic_file(inode)) + goto out; + +@@ -1659,6 +1661,7 @@ inc_stat: + stat_inc_atomic_write(inode); + stat_update_max_atomic_write(inode); + out: ++ up_write(&F2FS_I(inode)->dio_rwsem[WRITE]); + inode_unlock(inode); + mnt_drop_write_file(filp); + return ret; +@@ -1808,9 +1811,11 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) + if (get_user(in, (__u32 __user *)arg)) + return -EFAULT; + +- ret = mnt_want_write_file(filp); +- if (ret) +- return ret; ++ if (in != F2FS_GOING_DOWN_FULLSYNC) { ++ ret = mnt_want_write_file(filp); ++ if (ret) ++ return ret; ++ } + + switch (in) { + case F2FS_GOING_DOWN_FULLSYNC: +@@ -1838,7 +1843,8 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) + } + f2fs_update_time(sbi, REQ_TIME); + out: +- mnt_drop_write_file(filp); ++ if (in != F2FS_GOING_DOWN_FULLSYNC) ++ mnt_drop_write_file(filp); + return ret; + } + +@@ -2490,7 +2496,9 @@ static int f2fs_ioc_setproject(struct file *filp, __u32 projid) + } + f2fs_put_page(ipage, 1); + +- dquot_initialize(inode); ++ err = dquot_initialize(inode); ++ if (err) ++ goto out_unlock; + + transfer_to[PRJQUOTA] = dqget(sb, make_kqid_projid(kprojid)); + if (!IS_ERR(transfer_to[PRJQUOTA])) { +diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c +index e5673a9b2619..f2f897cd23c9 100644 +--- a/fs/f2fs/gc.c ++++ b/fs/f2fs/gc.c +@@ -768,9 +768,14 @@ retry: + set_cold_data(page); + + err = do_write_data_page(&fio); +- if (err == -ENOMEM && is_dirty) { +- congestion_wait(BLK_RW_ASYNC, HZ/50); +- goto retry; ++ if (err) { ++ clear_cold_data(page); ++ if (err == -ENOMEM) { ++ congestion_wait(BLK_RW_ASYNC, HZ/50); ++ goto retry; ++ } ++ if (is_dirty) ++ set_page_dirty(page); + } + } + out: +diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c +index 271516db8939..7c05bd4222b2 100644 +--- a/fs/f2fs/segment.c ++++ b/fs/f2fs/segment.c +@@ -225,6 +225,8 @@ static int __revoke_inmem_pages(struct inode *inode, + + lock_page(page); + ++ f2fs_wait_on_page_writeback(page, DATA, true); ++ + if (recover) { + struct dnode_of_data dn; + struct node_info ni; +@@ -435,6 +437,9 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) + + void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi) + { ++ if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) ++ return; ++ + /* try to shrink extent cache when there is no enough memory */ + if (!available_free_memory(sbi, EXTENT_CACHE)) + f2fs_shrink_extent_tree(sbi, EXTENT_CACHE_SHRINK_NUMBER); +diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c +index 933c3d529e65..400c00058bad 100644 +--- a/fs/f2fs/super.c ++++ b/fs/f2fs/super.c +@@ -2663,6 +2663,12 @@ static int __init init_f2fs_fs(void) + { + int err; + ++ if (PAGE_SIZE != F2FS_BLKSIZE) { ++ printk("F2FS not supported on PAGE_SIZE(%lu) != %d\n", ++ PAGE_SIZE, F2FS_BLKSIZE); ++ return -EINVAL; ++ } ++ + f2fs_build_trace_ios(); + + err = init_inodecache(); +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 928bbc397818..43fbf4495090 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -745,6 +745,13 @@ static int nfs41_sequence_process(struct rpc_task *task, + slot->slot_nr, + slot->seq_nr); + goto out_retry; ++ case -NFS4ERR_RETRY_UNCACHED_REP: ++ case -NFS4ERR_SEQ_FALSE_RETRY: ++ /* ++ * The server thinks we tried to replay a request. ++ * Retry the call after bumping the sequence ID. ++ */ ++ goto retry_new_seq; + case -NFS4ERR_BADSLOT: + /* + * The slot id we used was probably retired. Try again +@@ -769,10 +776,6 @@ static int nfs41_sequence_process(struct rpc_task *task, + goto retry_nowait; + } + goto session_recover; +- case -NFS4ERR_SEQ_FALSE_RETRY: +- if (interrupted) +- goto retry_new_seq; +- goto session_recover; + default: + /* Just update the slot sequence no. */ + slot->seq_done = 1; +@@ -2692,7 +2695,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, + if (ret != 0) + goto out; + +- state = nfs4_opendata_to_nfs4_state(opendata); ++ state = _nfs4_opendata_to_nfs4_state(opendata); + ret = PTR_ERR(state); + if (IS_ERR(state)) + goto out; +@@ -2728,6 +2731,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, + nfs4_schedule_stateid_recovery(server, state); + } + out: ++ nfs4_sequence_free_slot(&opendata->o_res.seq_res); + return ret; + } + +diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c +index 7b34534210ce..96867fb159bf 100644 +--- a/fs/nfs/pnfs.c ++++ b/fs/nfs/pnfs.c +@@ -1126,7 +1126,7 @@ _pnfs_return_layout(struct inode *ino) + LIST_HEAD(tmp_list); + nfs4_stateid stateid; + int status = 0; +- bool send; ++ bool send, valid_layout; + + dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino); + +@@ -1147,6 +1147,7 @@ _pnfs_return_layout(struct inode *ino) + goto out_put_layout_hdr; + spin_lock(&ino->i_lock); + } ++ valid_layout = pnfs_layout_is_valid(lo); + pnfs_clear_layoutcommit(ino, &tmp_list); + pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0); + +@@ -1160,7 +1161,8 @@ _pnfs_return_layout(struct inode *ino) + } + + /* Don't send a LAYOUTRETURN if list was initially empty */ +- if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) { ++ if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) || ++ !valid_layout) { + spin_unlock(&ino->i_lock); + dprintk("NFS: %s no layout segments to return\n", __func__); + goto out_put_layout_hdr; +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c +index f6588cc6816c..c1e923334012 100644 +--- a/fs/nfsd/nfs4xdr.c ++++ b/fs/nfsd/nfs4xdr.c +@@ -1586,6 +1586,8 @@ nfsd4_decode_getdeviceinfo(struct nfsd4_compoundargs *argp, + gdev->gd_maxcount = be32_to_cpup(p++); + num = be32_to_cpup(p++); + if (num) { ++ if (num > 1000) ++ goto xdr_error; + READ_BUF(4 * num); + gdev->gd_notify_types = be32_to_cpup(p++); + for (i = 1; i < num; i++) { +diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c +index b8f8d666e8d4..ba20393d60ef 100644 +--- a/fs/overlayfs/super.c ++++ b/fs/overlayfs/super.c +@@ -232,6 +232,7 @@ static void ovl_put_super(struct super_block *sb) + kfree(ufs); + } + ++/* Sync real dirty inodes in upper filesystem (if it exists) */ + static int ovl_sync_fs(struct super_block *sb, int wait) + { + struct ovl_fs *ufs = sb->s_fs_info; +@@ -240,14 +241,24 @@ static int ovl_sync_fs(struct super_block *sb, int wait) + + if (!ufs->upper_mnt) + return 0; +- upper_sb = ufs->upper_mnt->mnt_sb; +- if (!upper_sb->s_op->sync_fs) ++ ++ /* ++ * If this is a sync(2) call or an emergency sync, all the super blocks ++ * will be iterated, including upper_sb, so no need to do anything. ++ * ++ * If this is a syncfs(2) call, then we do need to call ++ * sync_filesystem() on upper_sb, but enough if we do it when being ++ * called with wait == 1. ++ */ ++ if (!wait) + return 0; + +- /* real inodes have already been synced by sync_filesystem(ovl_sb) */ ++ upper_sb = ufs->upper_mnt->mnt_sb; ++ + down_read(&upper_sb->s_umount); +- ret = upper_sb->s_op->sync_fs(upper_sb, wait); ++ ret = sync_filesystem(upper_sb); + up_read(&upper_sb->s_umount); ++ + return ret; + } + +diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c +index 6f337fff38c4..519522d39bde 100644 +--- a/fs/proc/task_mmu.c ++++ b/fs/proc/task_mmu.c +@@ -1275,8 +1275,9 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm, + if (pte_swp_soft_dirty(pte)) + flags |= PM_SOFT_DIRTY; + entry = pte_to_swp_entry(pte); +- frame = swp_type(entry) | +- (swp_offset(entry) << MAX_SWAPFILES_SHIFT); ++ if (pm->show_pfn) ++ frame = swp_type(entry) | ++ (swp_offset(entry) << MAX_SWAPFILES_SHIFT); + flags |= PM_SWAP; + if (is_migration_entry(entry)) + page = migration_entry_to_page(entry); +@@ -1327,11 +1328,14 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, + #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION + else if (is_swap_pmd(pmd)) { + swp_entry_t entry = pmd_to_swp_entry(pmd); +- unsigned long offset = swp_offset(entry); ++ unsigned long offset; + +- offset += (addr & ~PMD_MASK) >> PAGE_SHIFT; +- frame = swp_type(entry) | +- (offset << MAX_SWAPFILES_SHIFT); ++ if (pm->show_pfn) { ++ offset = swp_offset(entry) + ++ ((addr & ~PMD_MASK) >> PAGE_SHIFT); ++ frame = swp_type(entry) | ++ (offset << MAX_SWAPFILES_SHIFT); ++ } + flags |= PM_SWAP; + if (pmd_swp_soft_dirty(pmd)) + flags |= PM_SOFT_DIRTY; +@@ -1349,10 +1353,12 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, + err = add_to_pagemap(addr, &pme, pm); + if (err) + break; +- if (pm->show_pfn && (flags & PM_PRESENT)) +- frame++; +- else if (flags & PM_SWAP) +- frame += (1 << MAX_SWAPFILES_SHIFT); ++ if (pm->show_pfn) { ++ if (flags & PM_PRESENT) ++ frame++; ++ else if (flags & PM_SWAP) ++ frame += (1 << MAX_SWAPFILES_SHIFT); ++ } + } + spin_unlock(ptl); + return err; +diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c +index 23813c078cc9..0839efa720b3 100644 +--- a/fs/squashfs/cache.c ++++ b/fs/squashfs/cache.c +@@ -350,6 +350,9 @@ int squashfs_read_metadata(struct super_block *sb, void *buffer, + + TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset); + ++ if (unlikely(length < 0)) ++ return -EIO; ++ + while (length) { + entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0); + if (entry->error) { +diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c +index 13d80947bf9e..fcff2e0487fe 100644 +--- a/fs/squashfs/file.c ++++ b/fs/squashfs/file.c +@@ -194,7 +194,11 @@ static long long read_indexes(struct super_block *sb, int n, + } + + for (i = 0; i < blocks; i++) { +- int size = le32_to_cpu(blist[i]); ++ int size = squashfs_block_size(blist[i]); ++ if (size < 0) { ++ err = size; ++ goto failure; ++ } + block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size); + } + n -= blocks; +@@ -367,7 +371,7 @@ static int read_blocklist(struct inode *inode, int index, u64 *block) + sizeof(size)); + if (res < 0) + return res; +- return le32_to_cpu(size); ++ return squashfs_block_size(size); + } + + /* Copy data into page cache */ +diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c +index 0ed6edbc5c71..86ad9a4b8c36 100644 +--- a/fs/squashfs/fragment.c ++++ b/fs/squashfs/fragment.c +@@ -61,9 +61,7 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment, + return size; + + *fragment_block = le64_to_cpu(fragment_entry.start_block); +- size = le32_to_cpu(fragment_entry.size); +- +- return size; ++ return squashfs_block_size(fragment_entry.size); + } + + +diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h +index 24d12fd14177..4e6853f084d0 100644 +--- a/fs/squashfs/squashfs_fs.h ++++ b/fs/squashfs/squashfs_fs.h +@@ -129,6 +129,12 @@ + + #define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK)) + ++static inline int squashfs_block_size(__le32 raw) ++{ ++ u32 size = le32_to_cpu(raw); ++ return (size >> 25) ? -EIO : size; ++} ++ + /* + * Inode number ops. Inodes consist of a compressed block number, and an + * uncompressed offset within that block +diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h +index b17476a6909c..8fd7cb5297ab 100644 +--- a/include/drm/drm_dp_helper.h ++++ b/include/drm/drm_dp_helper.h +@@ -453,6 +453,7 @@ + # define DP_PSR_FRAME_CAPTURE (1 << 3) + # define DP_PSR_SELECTIVE_UPDATE (1 << 4) + # define DP_PSR_IRQ_HPD_WITH_CRC_ERRORS (1 << 5) ++# define DP_PSR_ENABLE_PSR2 (1 << 6) /* eDP 1.4a */ + + #define DP_ADAPTER_CTRL 0x1a0 + # define DP_ADAPTER_CTRL_FORCE_LOAD_SENSE (1 << 0) +diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h +index 5e335b6203f4..31c865d1842e 100644 +--- a/include/linux/delayacct.h ++++ b/include/linux/delayacct.h +@@ -29,7 +29,7 @@ + + #ifdef CONFIG_TASK_DELAY_ACCT + struct task_delay_info { +- spinlock_t lock; ++ raw_spinlock_t lock; + unsigned int flags; /* Private per-task flags */ + + /* For each stat XXX, add following, aligned appropriately +@@ -124,7 +124,7 @@ static inline void delayacct_blkio_start(void) + + static inline void delayacct_blkio_end(struct task_struct *p) + { +- if (current->delays) ++ if (p->delays) + __delayacct_blkio_end(p); + delayacct_clear_flag(DELAYACCT_PF_BLKIO); + } +diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h +index 92f20832fd28..e8ca5e654277 100644 +--- a/include/linux/dma-iommu.h ++++ b/include/linux/dma-iommu.h +@@ -17,6 +17,7 @@ + #define __DMA_IOMMU_H + + #ifdef __KERNEL__ ++#include <linux/types.h> + #include <asm/errno.h> + + #ifdef CONFIG_IOMMU_DMA +diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h +index cdd66a5fbd5e..0a7abe8a407f 100644 +--- a/include/linux/mmc/sdio_ids.h ++++ b/include/linux/mmc/sdio_ids.h +@@ -35,6 +35,7 @@ + #define SDIO_DEVICE_ID_BROADCOM_4335_4339 0x4335 + #define SDIO_DEVICE_ID_BROADCOM_4339 0x4339 + #define SDIO_DEVICE_ID_BROADCOM_43362 0xa962 ++#define SDIO_DEVICE_ID_BROADCOM_43364 0xa9a4 + #define SDIO_DEVICE_ID_BROADCOM_43430 0xa9a6 + #define SDIO_DEVICE_ID_BROADCOM_4345 0x4345 + #define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf +diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h +index bfb3531fd88a..7ad8ddf9ca8a 100644 +--- a/include/linux/netfilter/ipset/ip_set_timeout.h ++++ b/include/linux/netfilter/ipset/ip_set_timeout.h +@@ -65,8 +65,14 @@ ip_set_timeout_set(unsigned long *timeout, u32 value) + static inline u32 + ip_set_timeout_get(const unsigned long *timeout) + { +- return *timeout == IPSET_ELEM_PERMANENT ? 0 : +- jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; ++ u32 t; ++ ++ if (*timeout == IPSET_ELEM_PERMANENT) ++ return 0; ++ ++ t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC; ++ /* Zero value in userspace means no timeout */ ++ return t == 0 ? 1 : t; + } + + #endif /* __KERNEL__ */ +diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h +index df176d7c2b87..25602afd4844 100644 +--- a/include/linux/regulator/consumer.h ++++ b/include/linux/regulator/consumer.h +@@ -80,6 +80,7 @@ struct regmap; + * These modes can be OR'ed together to make up a mask of valid register modes. + */ + ++#define REGULATOR_MODE_INVALID 0x0 + #define REGULATOR_MODE_FAST 0x1 + #define REGULATOR_MODE_NORMAL 0x2 + #define REGULATOR_MODE_IDLE 0x4 +diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h +index 74fc82d22310..868b60a79c0b 100644 +--- a/include/linux/serial_core.h ++++ b/include/linux/serial_core.h +@@ -348,7 +348,8 @@ struct earlycon_device { + }; + + struct earlycon_id { +- char name[16]; ++ char name[15]; ++ char name_term; /* In case compiler didn't '\0' term name */ + char compatible[128]; + int (*setup)(struct earlycon_device *, const char *options); + }; +diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h +index 34f053a150a9..cf2862bd134a 100644 +--- a/include/linux/thread_info.h ++++ b/include/linux/thread_info.h +@@ -43,11 +43,7 @@ enum { + #define THREAD_ALIGN THREAD_SIZE + #endif + +-#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) +-# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) +-#else +-# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT) +-#endif ++#define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) + + /* + * flag set/clear/test wrappers +diff --git a/include/net/tcp.h b/include/net/tcp.h +index 3173dd12b8cc..686e33ea76e7 100644 +--- a/include/net/tcp.h ++++ b/include/net/tcp.h +@@ -372,7 +372,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, + unsigned int flags); + +-void tcp_enter_quickack_mode(struct sock *sk); ++void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks); + static inline void tcp_dec_quickack_mode(struct sock *sk, + const unsigned int pkts) + { +diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h +index 44202ff897fd..f759e0918037 100644 +--- a/include/soc/tegra/mc.h ++++ b/include/soc/tegra/mc.h +@@ -99,6 +99,8 @@ struct tegra_mc_soc { + u8 client_id_mask; + + const struct tegra_smmu_soc *smmu; ++ ++ u32 intmask; + }; + + struct tegra_mc { +diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h +index 69c37ecbff7e..f3c4b46e39d8 100644 +--- a/include/uapi/sound/asoc.h ++++ b/include/uapi/sound/asoc.h +@@ -139,6 +139,11 @@ + #define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_CHANNELS (1 << 1) + #define SND_SOC_TPLG_DAI_FLGBIT_SYMMETRIC_SAMPLEBITS (1 << 2) + ++/* DAI clock gating */ ++#define SND_SOC_TPLG_DAI_CLK_GATE_UNDEFINED 0 ++#define SND_SOC_TPLG_DAI_CLK_GATE_GATED 1 ++#define SND_SOC_TPLG_DAI_CLK_GATE_CONT 2 ++ + /* DAI physical PCM data formats. + * Add new formats to the end of the list. + */ +@@ -160,6 +165,18 @@ + #define SND_SOC_TPLG_LNK_FLGBIT_SYMMETRIC_SAMPLEBITS (1 << 2) + #define SND_SOC_TPLG_LNK_FLGBIT_VOICE_WAKEUP (1 << 3) + ++/* DAI topology BCLK parameter ++ * For the backwards capability, by default codec is bclk master ++ */ ++#define SND_SOC_TPLG_BCLK_CM 0 /* codec is bclk master */ ++#define SND_SOC_TPLG_BCLK_CS 1 /* codec is bclk slave */ ++ ++/* DAI topology FSYNC parameter ++ * For the backwards capability, by default codec is fsync master ++ */ ++#define SND_SOC_TPLG_FSYNC_CM 0 /* codec is fsync master */ ++#define SND_SOC_TPLG_FSYNC_CS 1 /* codec is fsync slave */ ++ + /* + * Block Header. + * This header precedes all object and object arrays below. +@@ -312,11 +329,11 @@ struct snd_soc_tplg_hw_config { + __le32 size; /* in bytes of this structure */ + __le32 id; /* unique ID - - used to match */ + __le32 fmt; /* SND_SOC_DAI_FORMAT_ format value */ +- __u8 clock_gated; /* 1 if clock can be gated to save power */ ++ __u8 clock_gated; /* SND_SOC_TPLG_DAI_CLK_GATE_ value */ + __u8 invert_bclk; /* 1 for inverted BCLK, 0 for normal */ + __u8 invert_fsync; /* 1 for inverted frame clock, 0 for normal */ +- __u8 bclk_master; /* 1 for master of BCLK, 0 for slave */ +- __u8 fsync_master; /* 1 for master of FSYNC, 0 for slave */ ++ __u8 bclk_master; /* SND_SOC_TPLG_BCLK_ value */ ++ __u8 fsync_master; /* SND_SOC_TPLG_FSYNC_ value */ + __u8 mclk_direction; /* 0 for input, 1 for output */ + __le16 reserved; /* for 32bit alignment */ + __le32 mclk_rate; /* MCLK or SYSCLK freqency in Hz */ +diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c +index 0b0aa5854dac..8dd4063647c2 100644 +--- a/kernel/auditfilter.c ++++ b/kernel/auditfilter.c +@@ -407,7 +407,7 @@ static int audit_field_valid(struct audit_entry *entry, struct audit_field *f) + return -EINVAL; + break; + case AUDIT_EXE: +- if (f->op != Audit_equal) ++ if (f->op != Audit_not_equal && f->op != Audit_equal) + return -EINVAL; + if (entry->rule.listnr != AUDIT_FILTER_EXIT) + return -EINVAL; +diff --git a/kernel/auditsc.c b/kernel/auditsc.c +index ecc23e25c9eb..677053a2fb57 100644 +--- a/kernel/auditsc.c ++++ b/kernel/auditsc.c +@@ -471,6 +471,8 @@ static int audit_filter_rules(struct task_struct *tsk, + break; + case AUDIT_EXE: + result = audit_exe_compare(tsk, rule->exe); ++ if (f->op == Audit_not_equal) ++ result = !result; + break; + case AUDIT_UID: + result = audit_uid_comparator(cred->uid, f->op, f->uid); +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 3ceb269c0ebd..450e2cd31ed6 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -4110,7 +4110,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) + /* hold the map. If the program is rejected by verifier, + * the map will be released by release_maps() or it + * will be used by the valid program until it's unloaded +- * and all maps are released in free_bpf_prog_info() ++ * and all maps are released in free_used_maps() + */ + map = bpf_map_inc(map, false); + if (IS_ERR(map)) { +@@ -4623,7 +4623,7 @@ free_log_buf: + vfree(log_buf); + if (!env->prog->aux->used_maps) + /* if we didn't copy map pointers into bpf_prog_info, release +- * them now. Otherwise free_bpf_prog_info() will release them. ++ * them now. Otherwise free_used_maps() will release them. + */ + release_maps(env); + *prog = env->prog; +diff --git a/kernel/delayacct.c b/kernel/delayacct.c +index e2764d767f18..ca8ac2824f0b 100644 +--- a/kernel/delayacct.c ++++ b/kernel/delayacct.c +@@ -44,23 +44,24 @@ void __delayacct_tsk_init(struct task_struct *tsk) + { + tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL); + if (tsk->delays) +- spin_lock_init(&tsk->delays->lock); ++ raw_spin_lock_init(&tsk->delays->lock); + } + + /* + * Finish delay accounting for a statistic using its timestamps (@start), + * accumalator (@total) and @count + */ +-static void delayacct_end(spinlock_t *lock, u64 *start, u64 *total, u32 *count) ++static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, ++ u32 *count) + { + s64 ns = ktime_get_ns() - *start; + unsigned long flags; + + if (ns > 0) { +- spin_lock_irqsave(lock, flags); ++ raw_spin_lock_irqsave(lock, flags); + *total += ns; + (*count)++; +- spin_unlock_irqrestore(lock, flags); ++ raw_spin_unlock_irqrestore(lock, flags); + } + } + +@@ -127,7 +128,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) + + /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ + +- spin_lock_irqsave(&tsk->delays->lock, flags); ++ raw_spin_lock_irqsave(&tsk->delays->lock, flags); + tmp = d->blkio_delay_total + tsk->delays->blkio_delay; + d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; + tmp = d->swapin_delay_total + tsk->delays->swapin_delay; +@@ -137,7 +138,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) + d->blkio_count += tsk->delays->blkio_count; + d->swapin_count += tsk->delays->swapin_count; + d->freepages_count += tsk->delays->freepages_count; +- spin_unlock_irqrestore(&tsk->delays->lock, flags); ++ raw_spin_unlock_irqrestore(&tsk->delays->lock, flags); + + return 0; + } +@@ -147,10 +148,10 @@ __u64 __delayacct_blkio_ticks(struct task_struct *tsk) + __u64 ret; + unsigned long flags; + +- spin_lock_irqsave(&tsk->delays->lock, flags); ++ raw_spin_lock_irqsave(&tsk->delays->lock, flags); + ret = nsec_to_clock_t(tsk->delays->blkio_delay + + tsk->delays->swapin_delay); +- spin_unlock_irqrestore(&tsk->delays->lock, flags); ++ raw_spin_unlock_irqrestore(&tsk->delays->lock, flags); + return ret; + } + +diff --git a/kernel/fork.c b/kernel/fork.c +index 98c91bd341b4..91907a3701ce 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -215,10 +215,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) + if (!s) + continue; + +-#ifdef CONFIG_DEBUG_KMEMLEAK + /* Clear stale pointers from reused stack. */ + memset(s->addr, 0, THREAD_SIZE); +-#endif ++ + tsk->stack_vm_area = s; + return s->addr; + } +diff --git a/kernel/hung_task.c b/kernel/hung_task.c +index 751593ed7c0b..32b479468e4d 100644 +--- a/kernel/hung_task.c ++++ b/kernel/hung_task.c +@@ -44,6 +44,7 @@ int __read_mostly sysctl_hung_task_warnings = 10; + + static int __read_mostly did_panic; + static bool hung_task_show_lock; ++static bool hung_task_call_panic; + + static struct task_struct *watchdog_task; + +@@ -127,10 +128,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) + touch_nmi_watchdog(); + + if (sysctl_hung_task_panic) { +- if (hung_task_show_lock) +- debug_show_all_locks(); +- trigger_all_cpu_backtrace(); +- panic("hung_task: blocked tasks"); ++ hung_task_show_lock = true; ++ hung_task_call_panic = true; + } + } + +@@ -193,6 +192,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) + rcu_read_unlock(); + if (hung_task_show_lock) + debug_show_all_locks(); ++ if (hung_task_call_panic) { ++ trigger_all_cpu_backtrace(); ++ panic("hung_task: blocked tasks"); ++ } + } + + static long hung_timeout_jiffies(unsigned long last_checked, +diff --git a/kernel/kcov.c b/kernel/kcov.c +index b11ef6e51f7e..f1e060b04ef6 100644 +--- a/kernel/kcov.c ++++ b/kernel/kcov.c +@@ -108,7 +108,8 @@ static void kcov_put(struct kcov *kcov) + + void kcov_task_init(struct task_struct *t) + { +- t->kcov_mode = KCOV_MODE_DISABLED; ++ WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED); ++ barrier(); + t->kcov_size = 0; + t->kcov_area = NULL; + t->kcov = NULL; +diff --git a/kernel/kthread.c b/kernel/kthread.c +index 1ef8f3a5b072..4e6d85b63201 100644 +--- a/kernel/kthread.c ++++ b/kernel/kthread.c +@@ -311,8 +311,14 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data), + task = create->result; + if (!IS_ERR(task)) { + static const struct sched_param param = { .sched_priority = 0 }; ++ char name[TASK_COMM_LEN]; + +- vsnprintf(task->comm, sizeof(task->comm), namefmt, args); ++ /* ++ * task is already visible to other tasks, so updating ++ * COMM must be protected. ++ */ ++ vsnprintf(name, sizeof(name), namefmt, args); ++ set_task_comm(task, name); + /* + * root may have changed our (kthreadd's) priority or CPU mask. + * The kernel thread should not inherit these properties. +diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c +index 0685c4499431..c0bc2c89697a 100644 +--- a/kernel/power/suspend.c ++++ b/kernel/power/suspend.c +@@ -60,7 +60,7 @@ static const struct platform_s2idle_ops *s2idle_ops; + static DECLARE_WAIT_QUEUE_HEAD(s2idle_wait_head); + + enum s2idle_states __read_mostly s2idle_state; +-static DEFINE_SPINLOCK(s2idle_lock); ++static DEFINE_RAW_SPINLOCK(s2idle_lock); + + void s2idle_set_ops(const struct platform_s2idle_ops *ops) + { +@@ -78,12 +78,12 @@ static void s2idle_enter(void) + { + trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, true); + +- spin_lock_irq(&s2idle_lock); ++ raw_spin_lock_irq(&s2idle_lock); + if (pm_wakeup_pending()) + goto out; + + s2idle_state = S2IDLE_STATE_ENTER; +- spin_unlock_irq(&s2idle_lock); ++ raw_spin_unlock_irq(&s2idle_lock); + + get_online_cpus(); + cpuidle_resume(); +@@ -97,11 +97,11 @@ static void s2idle_enter(void) + cpuidle_pause(); + put_online_cpus(); + +- spin_lock_irq(&s2idle_lock); ++ raw_spin_lock_irq(&s2idle_lock); + + out: + s2idle_state = S2IDLE_STATE_NONE; +- spin_unlock_irq(&s2idle_lock); ++ raw_spin_unlock_irq(&s2idle_lock); + + trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, false); + } +@@ -156,12 +156,12 @@ void s2idle_wake(void) + { + unsigned long flags; + +- spin_lock_irqsave(&s2idle_lock, flags); ++ raw_spin_lock_irqsave(&s2idle_lock, flags); + if (s2idle_state > S2IDLE_STATE_NONE) { + s2idle_state = S2IDLE_STATE_WAKE; + wake_up(&s2idle_wait_head); + } +- spin_unlock_irqrestore(&s2idle_lock, flags); ++ raw_spin_unlock_irqrestore(&s2idle_lock, flags); + } + EXPORT_SYMBOL_GPL(s2idle_wake); + +diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c +index d989cc238198..64825b2df3a5 100644 +--- a/kernel/printk/printk_safe.c ++++ b/kernel/printk/printk_safe.c +@@ -284,7 +284,7 @@ void printk_safe_flush_on_panic(void) + * Make sure that we could access the main ring buffer. + * Do not risk a double release when more CPUs are up. + */ +- if (in_nmi() && raw_spin_is_locked(&logbuf_lock)) { ++ if (raw_spin_is_locked(&logbuf_lock)) { + if (num_online_cpus() > 1) + return; + +diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c +index 2f6fa95de2d8..1ff523dae6e2 100644 +--- a/kernel/stop_machine.c ++++ b/kernel/stop_machine.c +@@ -37,7 +37,7 @@ struct cpu_stop_done { + struct cpu_stopper { + struct task_struct *thread; + +- spinlock_t lock; ++ raw_spinlock_t lock; + bool enabled; /* is this stopper enabled? */ + struct list_head works; /* list of pending works */ + +@@ -81,13 +81,13 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) + unsigned long flags; + bool enabled; + +- spin_lock_irqsave(&stopper->lock, flags); ++ raw_spin_lock_irqsave(&stopper->lock, flags); + enabled = stopper->enabled; + if (enabled) + __cpu_stop_queue_work(stopper, work, &wakeq); + else if (work->done) + cpu_stop_signal_done(work->done); +- spin_unlock_irqrestore(&stopper->lock, flags); ++ raw_spin_unlock_irqrestore(&stopper->lock, flags); + + wake_up_q(&wakeq); + +@@ -237,8 +237,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, + DEFINE_WAKE_Q(wakeq); + int err; + retry: +- spin_lock_irq(&stopper1->lock); +- spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); ++ raw_spin_lock_irq(&stopper1->lock); ++ raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); + + err = -ENOENT; + if (!stopper1->enabled || !stopper2->enabled) +@@ -261,8 +261,8 @@ retry: + __cpu_stop_queue_work(stopper1, work1, &wakeq); + __cpu_stop_queue_work(stopper2, work2, &wakeq); + unlock: +- spin_unlock(&stopper2->lock); +- spin_unlock_irq(&stopper1->lock); ++ raw_spin_unlock(&stopper2->lock); ++ raw_spin_unlock_irq(&stopper1->lock); + + if (unlikely(err == -EDEADLK)) { + while (stop_cpus_in_progress) +@@ -461,9 +461,9 @@ static int cpu_stop_should_run(unsigned int cpu) + unsigned long flags; + int run; + +- spin_lock_irqsave(&stopper->lock, flags); ++ raw_spin_lock_irqsave(&stopper->lock, flags); + run = !list_empty(&stopper->works); +- spin_unlock_irqrestore(&stopper->lock, flags); ++ raw_spin_unlock_irqrestore(&stopper->lock, flags); + return run; + } + +@@ -474,13 +474,13 @@ static void cpu_stopper_thread(unsigned int cpu) + + repeat: + work = NULL; +- spin_lock_irq(&stopper->lock); ++ raw_spin_lock_irq(&stopper->lock); + if (!list_empty(&stopper->works)) { + work = list_first_entry(&stopper->works, + struct cpu_stop_work, list); + list_del_init(&work->list); + } +- spin_unlock_irq(&stopper->lock); ++ raw_spin_unlock_irq(&stopper->lock); + + if (work) { + cpu_stop_fn_t fn = work->fn; +@@ -554,7 +554,7 @@ static int __init cpu_stop_init(void) + for_each_possible_cpu(cpu) { + struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); + +- spin_lock_init(&stopper->lock); ++ raw_spin_lock_init(&stopper->lock); + INIT_LIST_HEAD(&stopper->works); + } + +diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c +index b413fab7d75b..43254c5e7e16 100644 +--- a/kernel/trace/trace_events_trigger.c ++++ b/kernel/trace/trace_events_trigger.c +@@ -680,6 +680,8 @@ event_trigger_callback(struct event_command *cmd_ops, + goto out_free; + + out_reg: ++ /* Up the trigger_data count to make sure reg doesn't free it on failure */ ++ event_trigger_init(trigger_ops, trigger_data); + ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file); + /* + * The above returns on success the # of functions enabled, +@@ -687,11 +689,13 @@ event_trigger_callback(struct event_command *cmd_ops, + * Consider no functions a failure too. + */ + if (!ret) { ++ cmd_ops->unreg(glob, trigger_ops, trigger_data, file); + ret = -ENOENT; +- goto out_free; +- } else if (ret < 0) +- goto out_free; +- ret = 0; ++ } else if (ret > 0) ++ ret = 0; ++ ++ /* Down the counter of trigger_data or free it if not used anymore */ ++ event_trigger_free(trigger_ops, trigger_data); + out: + return ret; + +@@ -1392,6 +1396,9 @@ int event_enable_trigger_func(struct event_command *cmd_ops, + goto out; + } + ++ /* Up the trigger_data count to make sure nothing frees it on failure */ ++ event_trigger_init(trigger_ops, trigger_data); ++ + if (trigger) { + number = strsep(&trigger, ":"); + +@@ -1442,6 +1449,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops, + goto out_disable; + /* Just return zero, not the number of enabled functions */ + ret = 0; ++ event_trigger_free(trigger_ops, trigger_data); + out: + return ret; + +@@ -1452,7 +1460,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops, + out_free: + if (cmd_ops->set_filter) + cmd_ops->set_filter(NULL, trigger_data, NULL); +- kfree(trigger_data); ++ event_trigger_free(trigger_ops, trigger_data); + kfree(enable_data); + goto out; + } +diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c +index f8d3bd974bcc..ea20274a105a 100644 +--- a/kernel/trace/trace_kprobe.c ++++ b/kernel/trace/trace_kprobe.c +@@ -376,11 +376,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event, + static int + enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) + { ++ struct event_file_link *link = NULL; + int ret = 0; + + if (file) { +- struct event_file_link *link; +- + link = kmalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + ret = -ENOMEM; +@@ -400,6 +399,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) + else + ret = enable_kprobe(&tk->rp.kp); + } ++ ++ if (ret) { ++ if (file) { ++ /* Notice the if is true on not WARN() */ ++ if (!WARN_ON_ONCE(!link)) ++ list_del_rcu(&link->list); ++ kfree(link); ++ tk->tp.flags &= ~TP_FLAG_TRACE; ++ } else { ++ tk->tp.flags &= ~TP_FLAG_PROFILE; ++ } ++ } + out: + return ret; + } +diff --git a/mm/slub.c b/mm/slub.c +index c38e71cea6d3..10e54c4acd19 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -708,7 +708,7 @@ void object_err(struct kmem_cache *s, struct page *page, + print_trailer(s, page, object); + } + +-static void slab_err(struct kmem_cache *s, struct page *page, ++static __printf(3, 4) void slab_err(struct kmem_cache *s, struct page *page, + const char *fmt, ...) + { + va_list args; +diff --git a/mm/vmalloc.c b/mm/vmalloc.c +index ebff729cc956..9ff21a12ea00 100644 +--- a/mm/vmalloc.c ++++ b/mm/vmalloc.c +@@ -1519,7 +1519,7 @@ static void __vunmap(const void *addr, int deallocate_pages) + addr)) + return; + +- area = remove_vm_area(addr); ++ area = find_vmap_area((unsigned long)addr)->vm; + if (unlikely(!area)) { + WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", + addr); +@@ -1529,6 +1529,7 @@ static void __vunmap(const void *addr, int deallocate_pages) + debug_check_no_locks_freed(addr, get_vm_area_size(area)); + debug_check_no_obj_freed(addr, get_vm_area_size(area)); + ++ remove_vm_area(addr); + if (deallocate_pages) { + int i; + +diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c +index 67eebcb113f3..5bbdd05d0cd3 100644 +--- a/net/ipv4/fib_frontend.c ++++ b/net/ipv4/fib_frontend.c +@@ -282,19 +282,19 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb) + return ip_hdr(skb)->daddr; + + in_dev = __in_dev_get_rcu(dev); +- BUG_ON(!in_dev); + + net = dev_net(dev); + + scope = RT_SCOPE_UNIVERSE; + if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) { ++ bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev); + struct flowi4 fl4 = { + .flowi4_iif = LOOPBACK_IFINDEX, + .flowi4_oif = l3mdev_master_ifindex_rcu(dev), + .daddr = ip_hdr(skb)->saddr, + .flowi4_tos = RT_TOS(ip_hdr(skb)->tos), + .flowi4_scope = scope, +- .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0, ++ .flowi4_mark = vmark ? skb->mark : 0, + }; + if (!fib_lookup(net, &fl4, &res, 0)) + return FIB_RES_PREFSRC(net, res); +diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c +index abdebca848c9..f0782c91514c 100644 +--- a/net/ipv4/ipconfig.c ++++ b/net/ipv4/ipconfig.c +@@ -781,6 +781,11 @@ static void __init ic_bootp_init_ext(u8 *e) + */ + static inline void __init ic_bootp_init(void) + { ++ /* Re-initialise all name servers to NONE, in case any were set via the ++ * "ip=" or "nfsaddrs=" kernel command line parameters: any IP addresses ++ * specified there will already have been decoded but are no longer ++ * needed ++ */ + ic_nameservers_predef(); + + dev_add_pack(&bootp_packet_type); +@@ -1402,6 +1407,13 @@ static int __init ip_auto_config(void) + int err; + unsigned int i; + ++ /* Initialise all name servers to NONE (but only if the "ip=" or ++ * "nfsaddrs=" kernel command line parameters weren't decoded, otherwise ++ * we'll overwrite the IP addresses specified there) ++ */ ++ if (ic_set_manually == 0) ++ ic_nameservers_predef(); ++ + #ifdef CONFIG_PROC_FS + proc_create("pnp", S_IRUGO, init_net.proc_net, &pnp_seq_fops); + #endif /* CONFIG_PROC_FS */ +@@ -1622,6 +1634,7 @@ static int __init ip_auto_config_setup(char *addrs) + return 1; + } + ++ /* Initialise all name servers to NONE */ + ic_nameservers_predef(); + + /* Parse string for static IP assignment. */ +diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c +index 9a0b952dd09b..06f247ca9197 100644 +--- a/net/ipv4/tcp_bbr.c ++++ b/net/ipv4/tcp_bbr.c +@@ -353,6 +353,10 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain) + /* Reduce delayed ACKs by rounding up cwnd to the next even number. */ + cwnd = (cwnd + 1) & ~1U; + ++ /* Ensure gain cycling gets inflight above BDP even for small BDPs. */ ++ if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT) ++ cwnd += 2; ++ + return cwnd; + } + +diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c +index c78fb53988a1..1a9b88c8cf72 100644 +--- a/net/ipv4/tcp_dctcp.c ++++ b/net/ipv4/tcp_dctcp.c +@@ -138,7 +138,7 @@ static void dctcp_ce_state_0_to_1(struct sock *sk) + */ + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) + __tcp_send_ack(sk, ca->prior_rcv_nxt); +- tcp_enter_quickack_mode(sk); ++ tcp_enter_quickack_mode(sk, 1); + } + + ca->prior_rcv_nxt = tp->rcv_nxt; +@@ -159,7 +159,7 @@ static void dctcp_ce_state_1_to_0(struct sock *sk) + */ + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) + __tcp_send_ack(sk, ca->prior_rcv_nxt); +- tcp_enter_quickack_mode(sk); ++ tcp_enter_quickack_mode(sk, 1); + } + + ca->prior_rcv_nxt = tp->rcv_nxt; +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index b86e7b8beb1d..bdabd748f4bc 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -198,21 +198,23 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) + } + } + +-static void tcp_incr_quickack(struct sock *sk) ++static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks) + { + struct inet_connection_sock *icsk = inet_csk(sk); + unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); + + if (quickacks == 0) + quickacks = 2; ++ quickacks = min(quickacks, max_quickacks); + if (quickacks > icsk->icsk_ack.quick) +- icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); ++ icsk->icsk_ack.quick = quickacks; + } + +-void tcp_enter_quickack_mode(struct sock *sk) ++void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks) + { + struct inet_connection_sock *icsk = inet_csk(sk); +- tcp_incr_quickack(sk); ++ ++ tcp_incr_quickack(sk, max_quickacks); + icsk->icsk_ack.pingpong = 0; + icsk->icsk_ack.ato = TCP_ATO_MIN; + } +@@ -248,8 +250,10 @@ static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) + tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; + } + +-static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) ++static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) + { ++ struct tcp_sock *tp = tcp_sk(sk); ++ + switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { + case INET_ECN_NOT_ECT: + /* Funny extension: if ECT is not set on a segment, +@@ -257,31 +261,31 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) + * it is probably a retransmit. + */ + if (tp->ecn_flags & TCP_ECN_SEEN) +- tcp_enter_quickack_mode((struct sock *)tp); ++ tcp_enter_quickack_mode(sk, 2); + break; + case INET_ECN_CE: +- if (tcp_ca_needs_ecn((struct sock *)tp)) +- tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE); ++ if (tcp_ca_needs_ecn(sk)) ++ tcp_ca_event(sk, CA_EVENT_ECN_IS_CE); + + if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { + /* Better not delay acks, sender can have a very low cwnd */ +- tcp_enter_quickack_mode((struct sock *)tp); ++ tcp_enter_quickack_mode(sk, 2); + tp->ecn_flags |= TCP_ECN_DEMAND_CWR; + } + tp->ecn_flags |= TCP_ECN_SEEN; + break; + default: +- if (tcp_ca_needs_ecn((struct sock *)tp)) +- tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE); ++ if (tcp_ca_needs_ecn(sk)) ++ tcp_ca_event(sk, CA_EVENT_ECN_NO_CE); + tp->ecn_flags |= TCP_ECN_SEEN; + break; + } + } + +-static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) ++static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) + { +- if (tp->ecn_flags & TCP_ECN_OK) +- __tcp_ecn_check_ce(tp, skb); ++ if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK) ++ __tcp_ecn_check_ce(sk, skb); + } + + static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) +@@ -686,7 +690,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) + /* The _first_ data packet received, initialize + * delayed ACK engine. + */ +- tcp_incr_quickack(sk); ++ tcp_incr_quickack(sk, TCP_MAX_QUICKACKS); + icsk->icsk_ack.ato = TCP_ATO_MIN; + } else { + int m = now - icsk->icsk_ack.lrcvtime; +@@ -702,13 +706,13 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) + /* Too long gap. Apparently sender failed to + * restart window, so that we send ACKs quickly. + */ +- tcp_incr_quickack(sk); ++ tcp_incr_quickack(sk, TCP_MAX_QUICKACKS); + sk_mem_reclaim(sk); + } + } + icsk->icsk_ack.lrcvtime = now; + +- tcp_ecn_check_ce(tp, skb); ++ tcp_ecn_check_ce(sk, skb); + + if (skb->len >= 128) + tcp_grow_window(sk, skb); +@@ -4160,7 +4164,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb) + if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && + before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); +- tcp_enter_quickack_mode(sk); ++ tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); + + if (tcp_is_sack(tp) && sysctl_tcp_dsack) { + u32 end_seq = TCP_SKB_CB(skb)->end_seq; +@@ -4441,7 +4445,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) + u32 seq, end_seq; + bool fragstolen; + +- tcp_ecn_check_ce(tp, skb); ++ tcp_ecn_check_ce(sk, skb); + + if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP); +@@ -4710,7 +4714,7 @@ queue_and_out: + tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); + + out_of_window: +- tcp_enter_quickack_mode(sk); ++ tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); + inet_csk_schedule_ack(sk); + drop: + tcp_drop(sk, skb); +@@ -4721,8 +4725,6 @@ drop: + if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp))) + goto out_of_window; + +- tcp_enter_quickack_mode(sk); +- + if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { + /* Partial packet, seq < rcv_next < end_seq */ + SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n", +@@ -5793,7 +5795,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, + * to stand against the temptation 8) --ANK + */ + inet_csk_schedule_ack(sk); +- tcp_enter_quickack_mode(sk); ++ tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS); + inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, + TCP_DELACK_MAX, TCP_RTO_MAX); + +diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h +index 51063d9ed0f7..dfd268166e42 100644 +--- a/net/netfilter/ipset/ip_set_hash_gen.h ++++ b/net/netfilter/ipset/ip_set_hash_gen.h +@@ -1241,7 +1241,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set, + pr_debug("Create set %s with family %s\n", + set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6"); + +-#ifndef IP_SET_PROTO_UNDEF ++#ifdef IP_SET_PROTO_UNDEF ++ if (set->family != NFPROTO_UNSPEC) ++ return -IPSET_ERR_INVALID_FAMILY; ++#else + if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6)) + return -IPSET_ERR_INVALID_FAMILY; + #endif +diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c +index 85b549e84104..9a945024a0b6 100644 +--- a/net/netfilter/nf_tables_api.c ++++ b/net/netfilter/nf_tables_api.c +@@ -2710,12 +2710,13 @@ static struct nft_set *nf_tables_set_lookup_byid(const struct net *net, + u32 id = ntohl(nla_get_be32(nla)); + + list_for_each_entry(trans, &net->nft.commit_list, list) { +- struct nft_set *set = nft_trans_set(trans); ++ if (trans->msg_type == NFT_MSG_NEWSET) { ++ struct nft_set *set = nft_trans_set(trans); + +- if (trans->msg_type == NFT_MSG_NEWSET && +- id == nft_trans_set_id(trans) && +- nft_active_genmask(set, genmask)) +- return set; ++ if (id == nft_trans_set_id(trans) && ++ nft_active_genmask(set, genmask)) ++ return set; ++ } + } + return ERR_PTR(-ENOENT); + } +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index b3932846f6c4..b2fcbf012056 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -977,6 +977,11 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, + return err; + } + ++ if (nlk->ngroups == 0) ++ groups = 0; ++ else ++ groups &= (1ULL << nlk->ngroups) - 1; ++ + bound = nlk->bound; + if (bound) { + /* Ensure nlk->portid is up-to-date. */ +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c +index 7e334fd31c15..f8553179bdd7 100644 +--- a/security/integrity/ima/ima_main.c ++++ b/security/integrity/ima/ima_main.c +@@ -379,6 +379,7 @@ int ima_read_file(struct file *file, enum kernel_read_file_id read_id) + + static int read_idmap[READING_MAX_ID] = { + [READING_FIRMWARE] = FIRMWARE_CHECK, ++ [READING_FIRMWARE_PREALLOC_BUFFER] = FIRMWARE_CHECK, + [READING_MODULE] = MODULE_CHECK, + [READING_KEXEC_IMAGE] = KEXEC_KERNEL_CHECK, + [READING_KEXEC_INITRAMFS] = KEXEC_INITRAMFS_CHECK, +diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c +index 2683b9717215..56be1630bd3e 100644 +--- a/sound/pci/emu10k1/emupcm.c ++++ b/sound/pci/emu10k1/emupcm.c +@@ -1850,7 +1850,9 @@ int snd_emu10k1_pcm_efx(struct snd_emu10k1 *emu, int device) + if (!kctl) + return -ENOMEM; + kctl->id.device = device; +- snd_ctl_add(emu->card, kctl); ++ err = snd_ctl_add(emu->card, kctl); ++ if (err < 0) ++ return err; + + snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 64*1024, 64*1024); + +diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c +index 4f1f69be1865..8c778fa33031 100644 +--- a/sound/pci/emu10k1/memory.c ++++ b/sound/pci/emu10k1/memory.c +@@ -237,13 +237,13 @@ __found_pages: + static int is_valid_page(struct snd_emu10k1 *emu, dma_addr_t addr) + { + if (addr & ~emu->dma_mask) { +- dev_err(emu->card->dev, ++ dev_err_ratelimited(emu->card->dev, + "max memory size is 0x%lx (addr = 0x%lx)!!\n", + emu->dma_mask, (unsigned long)addr); + return 0; + } + if (addr & (EMUPAGESIZE-1)) { +- dev_err(emu->card->dev, "page is not aligned\n"); ++ dev_err_ratelimited(emu->card->dev, "page is not aligned\n"); + return 0; + } + return 1; +@@ -334,7 +334,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst + else + addr = snd_pcm_sgbuf_get_addr(substream, ofs); + if (! is_valid_page(emu, addr)) { +- dev_err(emu->card->dev, ++ dev_err_ratelimited(emu->card->dev, + "emu: failure page = %d\n", idx); + mutex_unlock(&hdr->block_mutex); + return NULL; +diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c +index 73a67bc3586b..e3fb9c61017c 100644 +--- a/sound/pci/fm801.c ++++ b/sound/pci/fm801.c +@@ -1068,11 +1068,19 @@ static int snd_fm801_mixer(struct fm801 *chip) + if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97_sec)) < 0) + return err; + } +- for (i = 0; i < FM801_CONTROLS; i++) +- snd_ctl_add(chip->card, snd_ctl_new1(&snd_fm801_controls[i], chip)); ++ for (i = 0; i < FM801_CONTROLS; i++) { ++ err = snd_ctl_add(chip->card, ++ snd_ctl_new1(&snd_fm801_controls[i], chip)); ++ if (err < 0) ++ return err; ++ } + if (chip->multichannel) { +- for (i = 0; i < FM801_CONTROLS_MULTI; i++) +- snd_ctl_add(chip->card, snd_ctl_new1(&snd_fm801_controls_multi[i], chip)); ++ for (i = 0; i < FM801_CONTROLS_MULTI; i++) { ++ err = snd_ctl_add(chip->card, ++ snd_ctl_new1(&snd_fm801_controls_multi[i], chip)); ++ if (err < 0) ++ return err; ++ } + } + return 0; + } +diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c +index 3e73d5c6ccfc..119f3b504765 100644 +--- a/sound/pci/hda/patch_ca0132.c ++++ b/sound/pci/hda/patch_ca0132.c +@@ -38,6 +38,10 @@ + /* Enable this to see controls for tuning purpose. */ + /*#define ENABLE_TUNING_CONTROLS*/ + ++#ifdef ENABLE_TUNING_CONTROLS ++#include <sound/tlv.h> ++#endif ++ + #define FLOAT_ZERO 0x00000000 + #define FLOAT_ONE 0x3f800000 + #define FLOAT_TWO 0x40000000 +@@ -3067,8 +3071,8 @@ static int equalizer_ctl_put(struct snd_kcontrol *kcontrol, + return 1; + } + +-static const DECLARE_TLV_DB_SCALE(voice_focus_db_scale, 2000, 100, 0); +-static const DECLARE_TLV_DB_SCALE(eq_db_scale, -2400, 100, 0); ++static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(voice_focus_db_scale, 2000, 100, 0); ++static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(eq_db_scale, -2400, 100, 0); + + static int add_tuning_control(struct hda_codec *codec, + hda_nid_t pnid, hda_nid_t nid, +diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c +index 94b88b897c3b..3d0dab8282ad 100644 +--- a/sound/soc/soc-pcm.c ++++ b/sound/soc/soc-pcm.c +@@ -1779,8 +1779,10 @@ int dpcm_be_dai_shutdown(struct snd_soc_pcm_runtime *fe, int stream) + continue; + + if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) && +- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN)) +- continue; ++ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_OPEN)) { ++ soc_pcm_hw_free(be_substream); ++ be->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE; ++ } + + dev_dbg(be->dev, "ASoC: close BE %s\n", + be->dai_link->name); +diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c +index 30cdad2eab7f..c1619860a5de 100644 +--- a/sound/soc/soc-topology.c ++++ b/sound/soc/soc-topology.c +@@ -1997,6 +1997,13 @@ static void set_link_hw_format(struct snd_soc_dai_link *link, + + link->dai_fmt = hw_config->fmt & SND_SOC_DAIFMT_FORMAT_MASK; + ++ /* clock gating */ ++ if (hw_config->clock_gated == SND_SOC_TPLG_DAI_CLK_GATE_GATED) ++ link->dai_fmt |= SND_SOC_DAIFMT_GATED; ++ else if (hw_config->clock_gated == ++ SND_SOC_TPLG_DAI_CLK_GATE_CONT) ++ link->dai_fmt |= SND_SOC_DAIFMT_CONT; ++ + /* clock signal polarity */ + invert_bclk = hw_config->invert_bclk; + invert_fsync = hw_config->invert_fsync; +@@ -2010,13 +2017,15 @@ static void set_link_hw_format(struct snd_soc_dai_link *link, + link->dai_fmt |= SND_SOC_DAIFMT_IB_IF; + + /* clock masters */ +- bclk_master = hw_config->bclk_master; +- fsync_master = hw_config->fsync_master; +- if (!bclk_master && !fsync_master) ++ bclk_master = (hw_config->bclk_master == ++ SND_SOC_TPLG_BCLK_CM); ++ fsync_master = (hw_config->fsync_master == ++ SND_SOC_TPLG_FSYNC_CM); ++ if (bclk_master && fsync_master) + link->dai_fmt |= SND_SOC_DAIFMT_CBM_CFM; +- else if (bclk_master && !fsync_master) +- link->dai_fmt |= SND_SOC_DAIFMT_CBS_CFM; + else if (!bclk_master && fsync_master) ++ link->dai_fmt |= SND_SOC_DAIFMT_CBS_CFM; ++ else if (bclk_master && !fsync_master) + link->dai_fmt |= SND_SOC_DAIFMT_CBM_CFS; + else + link->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS; +diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c +index 3cbfae6604f9..d8a46d46bcd2 100644 +--- a/sound/usb/pcm.c ++++ b/sound/usb/pcm.c +@@ -1311,7 +1311,7 @@ static void retire_capture_urb(struct snd_usb_substream *subs, + if (bytes % (runtime->sample_bits >> 3) != 0) { + int oldbytes = bytes; + bytes = frames * stride; +- dev_warn(&subs->dev->dev, ++ dev_warn_ratelimited(&subs->dev->dev, + "Corrected urb data len. %d->%d\n", + oldbytes, bytes); + } +diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y +index e81a20ea8d7d..988310cd3049 100644 +--- a/tools/perf/util/parse-events.y ++++ b/tools/perf/util/parse-events.y +@@ -72,6 +72,7 @@ static void inc_group_count(struct list_head *list, + %type <num> value_sym + %type <head> event_config + %type <head> opt_event_config ++%type <head> opt_pmu_config + %type <term> event_term + %type <head> event_pmu + %type <head> event_legacy_symbol +@@ -223,7 +224,7 @@ event_def: event_pmu | + event_bpf_file + + event_pmu: +-PE_NAME opt_event_config ++PE_NAME opt_pmu_config + { + struct list_head *list, *orig_terms, *terms; + +@@ -486,6 +487,17 @@ opt_event_config: + $$ = NULL; + } + ++opt_pmu_config: ++'/' event_config '/' ++{ ++ $$ = $2; ++} ++| ++'/' '/' ++{ ++ $$ = NULL; ++} ++ + start_terms: event_config + { + struct parse_events_state *parse_state = _parse_state; +diff --git a/tools/testing/selftests/intel_pstate/run.sh b/tools/testing/selftests/intel_pstate/run.sh +index c670359becc6..928978804342 100755 +--- a/tools/testing/selftests/intel_pstate/run.sh ++++ b/tools/testing/selftests/intel_pstate/run.sh +@@ -30,9 +30,12 @@ + + EVALUATE_ONLY=0 + ++# Kselftest framework requirement - SKIP code is 4. ++ksft_skip=4 ++ + if ! uname -m | sed -e s/i.86/x86/ -e s/x86_64/x86/ | grep -q x86; then + echo "$0 # Skipped: Test can only run on x86 architectures." +- exit 0 ++ exit $ksft_skip + fi + + max_cpus=$(($(nproc)-1)) +@@ -48,11 +51,12 @@ function run_test () { + + echo "sleeping for 5 seconds" + sleep 5 +- num_freqs=$(cat /proc/cpuinfo | grep MHz | sort -u | wc -l) +- if [ $num_freqs -le 2 ]; then +- cat /proc/cpuinfo | grep MHz | sort -u | tail -1 > /tmp/result.$1 ++ grep MHz /proc/cpuinfo | sort -u > /tmp/result.freqs ++ num_freqs=$(wc -l /tmp/result.freqs | awk ' { print $1 } ') ++ if [ $num_freqs -ge 2 ]; then ++ tail -n 1 /tmp/result.freqs > /tmp/result.$1 + else +- cat /proc/cpuinfo | grep MHz | sort -u > /tmp/result.$1 ++ cp /tmp/result.freqs /tmp/result.$1 + fi + ./msr 0 >> /tmp/result.$1 + +@@ -82,21 +86,20 @@ _max_freq=$(cpupower frequency-info -l | tail -1 | awk ' { print $2 } ') + max_freq=$(($_max_freq / 1000)) + + +-for freq in `seq $max_freq -100 $min_freq` ++[ $EVALUATE_ONLY -eq 0 ] && for freq in `seq $max_freq -100 $min_freq` + do + echo "Setting maximum frequency to $freq" + cpupower frequency-set -g powersave --max=${freq}MHz >& /dev/null +- [ $EVALUATE_ONLY -eq 0 ] && run_test $freq ++ run_test $freq + done + +-echo "==============================================================================" ++[ $EVALUATE_ONLY -eq 0 ] && cpupower frequency-set -g powersave --max=${max_freq}MHz >& /dev/null + ++echo "==============================================================================" + echo "The marketing frequency of the cpu is $mkt_freq MHz" + echo "The maximum frequency of the cpu is $max_freq MHz" + echo "The minimum frequency of the cpu is $min_freq MHz" + +-cpupower frequency-set -g powersave --max=${max_freq}MHz >& /dev/null +- + # make a pretty table + echo "Target Actual Difference MSR(0x199) max_perf_pct" + for freq in `seq $max_freq -100 $min_freq` +@@ -104,10 +107,6 @@ do + result_freq=$(cat /tmp/result.${freq} | grep "cpu MHz" | awk ' { print $4 } ' | awk -F "." ' { print $1 } ') + msr=$(cat /tmp/result.${freq} | grep "msr" | awk ' { print $3 } ') + max_perf_pct=$(cat /tmp/result.${freq} | grep "max_perf_pct" | awk ' { print $2 } ' ) +- if [ $result_freq -eq $freq ]; then +- echo " $freq $result_freq 0 $msr $(($max_perf_pct*3300))" +- else +- echo " $freq $result_freq $(($result_freq-$freq)) $msr $(($max_perf_pct*$max_freq))" +- fi ++ echo " $freq $result_freq $(($result_freq-$freq)) $msr $(($max_perf_pct*$max_freq))" + done + exit 0 +diff --git a/tools/testing/selftests/memfd/run_tests.sh b/tools/testing/selftests/memfd/run_tests.sh +index daabb350697c..bf83db61013a 100755 +--- a/tools/testing/selftests/memfd/run_tests.sh ++++ b/tools/testing/selftests/memfd/run_tests.sh +@@ -1,6 +1,9 @@ + #!/bin/bash + # please run as root + ++# Kselftest framework requirement - SKIP code is 4. ++ksft_skip=4 ++ + # + # Normal tests requiring no special resources + # +@@ -29,12 +32,13 @@ if [ -n "$freepgs" ] && [ $freepgs -lt $hpages_test ]; then + nr_hugepgs=`cat /proc/sys/vm/nr_hugepages` + hpages_needed=`expr $hpages_test - $freepgs` + ++ if [ $UID != 0 ]; then ++ echo "Please run memfd with hugetlbfs test as root" ++ exit $ksft_skip ++ fi ++ + echo 3 > /proc/sys/vm/drop_caches + echo $(( $hpages_needed + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages +- if [ $? -ne 0 ]; then +- echo "Please run this test as root" +- exit 1 +- fi + while read name size unit; do + if [ "$name" = "HugePages_Free:" ]; then + freepgs=$size +@@ -53,7 +57,7 @@ if [ $freepgs -lt $hpages_test ]; then + fi + printf "Not enough huge pages available (%d < %d)\n" \ + $freepgs $needpgs +- exit 1 ++ exit $ksft_skip + fi + + # +diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c +index d1fc0f9f00fb..ed8c9d360c0f 100644 +--- a/tools/usb/usbip/libsrc/vhci_driver.c ++++ b/tools/usb/usbip/libsrc/vhci_driver.c +@@ -135,11 +135,11 @@ static int refresh_imported_device_list(void) + return 0; + } + +-static int get_nports(void) ++static int get_nports(struct udev_device *hc_device) + { + const char *attr_nports; + +- attr_nports = udev_device_get_sysattr_value(vhci_driver->hc_device, "nports"); ++ attr_nports = udev_device_get_sysattr_value(hc_device, "nports"); + if (!attr_nports) { + err("udev_device_get_sysattr_value nports failed"); + return -1; +@@ -242,35 +242,41 @@ static int read_record(int rhport, char *host, unsigned long host_len, + + int usbip_vhci_driver_open(void) + { ++ int nports; ++ struct udev_device *hc_device; ++ + udev_context = udev_new(); + if (!udev_context) { + err("udev_new failed"); + return -1; + } + +- vhci_driver = calloc(1, sizeof(struct usbip_vhci_driver)); +- + /* will be freed in usbip_driver_close() */ +- vhci_driver->hc_device = ++ hc_device = + udev_device_new_from_subsystem_sysname(udev_context, + USBIP_VHCI_BUS_TYPE, + USBIP_VHCI_DEVICE_NAME); +- if (!vhci_driver->hc_device) { ++ if (!hc_device) { + err("udev_device_new_from_subsystem_sysname failed"); + goto err; + } + +- vhci_driver->nports = get_nports(); +- dbg("available ports: %d", vhci_driver->nports); +- +- if (vhci_driver->nports <= 0) { ++ nports = get_nports(hc_device); ++ if (nports <= 0) { + err("no available ports"); + goto err; +- } else if (vhci_driver->nports > MAXNPORT) { +- err("port number exceeds %d", MAXNPORT); ++ } ++ dbg("available ports: %d", nports); ++ ++ vhci_driver = calloc(1, sizeof(struct usbip_vhci_driver) + ++ nports * sizeof(struct usbip_imported_device)); ++ if (!vhci_driver) { ++ err("vhci_driver allocation failed"); + goto err; + } + ++ vhci_driver->nports = nports; ++ vhci_driver->hc_device = hc_device; + vhci_driver->ncontrollers = get_ncontrollers(); + dbg("available controllers: %d", vhci_driver->ncontrollers); + +@@ -285,7 +291,7 @@ int usbip_vhci_driver_open(void) + return 0; + + err: +- udev_device_unref(vhci_driver->hc_device); ++ udev_device_unref(hc_device); + + if (vhci_driver) + free(vhci_driver); +diff --git a/tools/usb/usbip/libsrc/vhci_driver.h b/tools/usb/usbip/libsrc/vhci_driver.h +index 418b404d5121..6c9aca216705 100644 +--- a/tools/usb/usbip/libsrc/vhci_driver.h ++++ b/tools/usb/usbip/libsrc/vhci_driver.h +@@ -13,7 +13,6 @@ + + #define USBIP_VHCI_BUS_TYPE "platform" + #define USBIP_VHCI_DEVICE_NAME "vhci_hcd.0" +-#define MAXNPORT 128 + + enum hub_speed { + HUB_SPEED_HIGH = 0, +@@ -41,7 +40,7 @@ struct usbip_vhci_driver { + + int ncontrollers; + int nports; +- struct usbip_imported_device idev[MAXNPORT]; ++ struct usbip_imported_device idev[]; + }; + + +diff --git a/tools/usb/usbip/src/usbip_detach.c b/tools/usb/usbip/src/usbip_detach.c +index 9db9d21bb2ec..6a8db858caa5 100644 +--- a/tools/usb/usbip/src/usbip_detach.c ++++ b/tools/usb/usbip/src/usbip_detach.c +@@ -43,7 +43,7 @@ void usbip_detach_usage(void) + + static int detach_port(char *port) + { +- int ret; ++ int ret = 0; + uint8_t portnum; + char path[PATH_MAX+1]; + +@@ -73,9 +73,12 @@ static int detach_port(char *port) + } + + ret = usbip_vhci_detach_device(portnum); +- if (ret < 0) +- return -1; ++ if (ret < 0) { ++ ret = -1; ++ goto call_driver_close; ++ } + ++call_driver_close: + usbip_vhci_driver_close(); + + return ret; |