summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2018-07-03 09:18:56 -0400
committerMike Pagano <mpagano@gentoo.org>2018-07-03 09:18:56 -0400
commit59757de106df0c4e1e658baa34090609c99946b5 (patch)
treea80eb224241f273a1802a8625cc81b62bfc3847d
parentkvmclock: Define pvclock_pvti_cpu0_va setter for X86_32. See bug #658544. (diff)
downloadlinux-patches-59757de106df0c4e1e658baa34090609c99946b5.tar.gz
linux-patches-59757de106df0c4e1e658baa34090609c99946b5.tar.bz2
linux-patches-59757de106df0c4e1e658baa34090609c99946b5.zip
Linux patch 4.17.4
-rw-r--r--0000_README4
-rw-r--r--1003_linux-4.17.4.patch6875
2 files changed, 6879 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index af14401a..f45eebec 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch: 1002_linux-4.17.3.patch
From: http://www.kernel.org
Desc: Linux 4.17.3
+Patch: 1003_linux-4.17.4.patch
+From: http://www.kernel.org
+Desc: Linux 4.17.4
+
Patch: 1800_iommu-amd-dma-direct-revert.patch
From: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/patch/?id=e16c4790de39dc861b749674c2a9319507f6f64f
Desc: Revert iommu/amd_iommu: Use CONFIG_DMA_DIRECT_OPS=y and dma_direct_{alloc,free}(). See bug #658538.
diff --git a/1003_linux-4.17.4.patch b/1003_linux-4.17.4.patch
new file mode 100644
index 00000000..76692ff6
--- /dev/null
+++ b/1003_linux-4.17.4.patch
@@ -0,0 +1,6875 @@
+diff --git a/Documentation/ABI/testing/sysfs-class-cxl b/Documentation/ABI/testing/sysfs-class-cxl
+index 8e69345c37cc..bbbabffc682a 100644
+--- a/Documentation/ABI/testing/sysfs-class-cxl
++++ b/Documentation/ABI/testing/sysfs-class-cxl
+@@ -69,7 +69,9 @@ Date: September 2014
+ Contact: linuxppc-dev@lists.ozlabs.org
+ Description: read/write
+ Set the mode for prefaulting in segments into the segment table
+- when performing the START_WORK ioctl. Possible values:
++ when performing the START_WORK ioctl. Only applicable when
++ running under hashed page table mmu.
++ Possible values:
+ none: No prefaulting (default)
+ work_element_descriptor: Treat the work element
+ descriptor as an effective address and
+diff --git a/Documentation/core-api/printk-formats.rst b/Documentation/core-api/printk-formats.rst
+index eb30efdd2e78..25dc591cb110 100644
+--- a/Documentation/core-api/printk-formats.rst
++++ b/Documentation/core-api/printk-formats.rst
+@@ -419,11 +419,10 @@ struct clk
+
+ %pC pll1
+ %pCn pll1
+- %pCr 1560000000
+
+ For printing struct clk structures. %pC and %pCn print the name
+ (Common Clock Framework) or address (legacy clock framework) of the
+-structure; %pCr prints the current clock rate.
++structure.
+
+ Passed by reference.
+
+diff --git a/Makefile b/Makefile
+index 31dc3a08295a..1d740dbe676d 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 17
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Merciless Moray
+
+diff --git a/arch/arm/boot/dts/mt7623.dtsi b/arch/arm/boot/dts/mt7623.dtsi
+index e10c03496524..a115575b38bf 100644
+--- a/arch/arm/boot/dts/mt7623.dtsi
++++ b/arch/arm/boot/dts/mt7623.dtsi
+@@ -22,11 +22,12 @@
+ #include <dt-bindings/phy/phy.h>
+ #include <dt-bindings/reset/mt2701-resets.h>
+ #include <dt-bindings/thermal/thermal.h>
+-#include "skeleton64.dtsi"
+
+ / {
+ compatible = "mediatek,mt7623";
+ interrupt-parent = <&sysirq>;
++ #address-cells = <2>;
++ #size-cells = <2>;
+
+ cpu_opp_table: opp-table {
+ compatible = "operating-points-v2";
+diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
+index bbf56f855e46..5938e4c79deb 100644
+--- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
++++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
+@@ -109,6 +109,7 @@
+ };
+
+ memory@80000000 {
++ device_type = "memory";
+ reg = <0 0x80000000 0 0x40000000>;
+ };
+ };
+diff --git a/arch/arm/boot/dts/mt7623n-rfb.dtsi b/arch/arm/boot/dts/mt7623n-rfb.dtsi
+index 256c5fd947bf..43c9d7ca23a0 100644
+--- a/arch/arm/boot/dts/mt7623n-rfb.dtsi
++++ b/arch/arm/boot/dts/mt7623n-rfb.dtsi
+@@ -47,6 +47,7 @@
+ };
+
+ memory@80000000 {
++ device_type = "memory";
+ reg = <0 0x80000000 0 0x40000000>;
+ };
+
+diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
+index 486d4e7433ed..b38f8c240558 100644
+--- a/arch/arm/boot/dts/socfpga.dtsi
++++ b/arch/arm/boot/dts/socfpga.dtsi
+@@ -748,13 +748,13 @@
+ nand0: nand@ff900000 {
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+- compatible = "denali,denali-nand-dt";
++ compatible = "altr,socfpga-denali-nand";
+ reg = <0xff900000 0x100000>,
+ <0xffb80000 0x10000>;
+ reg-names = "nand_data", "denali_reg";
+ interrupts = <0x0 0x90 0x4>;
+ dma-mask = <0xffffffff>;
+- clocks = <&nand_clk>;
++ clocks = <&nand_x_clk>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
+index bead79e4b2aa..791ca15c799e 100644
+--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
++++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
+@@ -593,8 +593,7 @@
+ #size-cells = <0>;
+ reg = <0xffda5000 0x100>;
+ interrupts = <0 102 4>;
+- num-chipselect = <4>;
+- bus-num = <0>;
++ num-cs = <4>;
+ /*32bit_access;*/
+ tx-dma-channel = <&pdma 16>;
+ rx-dma-channel = <&pdma 17>;
+@@ -633,7 +632,7 @@
+ nand: nand@ffb90000 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+- compatible = "denali,denali-nand-dt", "altr,socfpga-denali-nand";
++ compatible = "altr,socfpga-denali-nand";
+ reg = <0xffb90000 0x72000>,
+ <0xffb80000 0x10000>;
+ reg-names = "nand_data", "denali_reg";
+diff --git a/arch/arm/boot/dts/sun8i-h3-libretech-all-h3-cc.dts b/arch/arm/boot/dts/sun8i-h3-libretech-all-h3-cc.dts
+index b20a710da7bc..7a4fca36c673 100644
+--- a/arch/arm/boot/dts/sun8i-h3-libretech-all-h3-cc.dts
++++ b/arch/arm/boot/dts/sun8i-h3-libretech-all-h3-cc.dts
+@@ -62,8 +62,8 @@
+ reg_vcc1v2: vcc1v2 {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc1v2";
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
++ regulator-min-microvolt = <1200000>;
++ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&reg_vcc5v0>;
+@@ -113,8 +113,8 @@
+ reg_vdd_cpux: vdd-cpux {
+ compatible = "regulator-fixed";
+ regulator-name = "vdd-cpux";
+- regulator-min-microvolt = <3300000>;
+- regulator-max-microvolt = <3300000>;
++ regulator-min-microvolt = <1200000>;
++ regulator-max-microvolt = <1200000>;
+ regulator-always-on;
+ regulator-boot-on;
+ vin-supply = <&reg_vcc5v0>;
+diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
+index 3b73fdcf3627..8de1100d1067 100644
+--- a/arch/arm/include/asm/kgdb.h
++++ b/arch/arm/include/asm/kgdb.h
+@@ -77,7 +77,7 @@ extern int kgdb_fault_expected;
+
+ #define KGDB_MAX_NO_CPUS 1
+ #define BUFMAX 400
+-#define NUMREGBYTES (DBG_MAX_REG_NUM << 2)
++#define NUMREGBYTES (GDB_MAX_REGS << 2)
+ #define NUMCRITREGBYTES (32 << 2)
+
+ #define _R0 0
+diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+index c89d0c307f8d..2c63e60754c5 100644
+--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
++++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+@@ -252,8 +252,7 @@
+ interrupts = <0 99 4>;
+ resets = <&rst SPIM0_RESET>;
+ reg-io-width = <4>;
+- num-chipselect = <4>;
+- bus-num = <0>;
++ num-cs = <4>;
+ status = "disabled";
+ };
+
+@@ -265,8 +264,7 @@
+ interrupts = <0 100 4>;
+ resets = <&rst SPIM1_RESET>;
+ reg-io-width = <4>;
+- num-chipselect = <4>;
+- bus-num = <0>;
++ num-cs = <4>;
+ status = "disabled";
+ };
+
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+index 3c31e21cbed7..69693977fe07 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+@@ -35,6 +35,12 @@
+ no-map;
+ };
+
++ /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
++ secmon_reserved_alt: secmon@5000000 {
++ reg = <0x0 0x05000000 0x0 0x300000>;
++ no-map;
++ };
++
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+index 3e3eb31748a3..f63bceb88caa 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+@@ -234,9 +234,6 @@
+
+ bus-width = <4>;
+ cap-sd-highspeed;
+- sd-uhs-sdr12;
+- sd-uhs-sdr25;
+- sd-uhs-sdr50;
+ max-frequency = <100000000>;
+ disable-wp;
+
+diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+index dba365ed4bd5..33c15f2a949e 100644
+--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
++++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+@@ -13,14 +13,6 @@
+ / {
+ compatible = "amlogic,meson-gxl";
+
+- reserved-memory {
+- /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
+- secmon_reserved_alt: secmon@5000000 {
+- reg = <0x0 0x05000000 0x0 0x300000>;
+- no-map;
+- };
+- };
+-
+ soc {
+ usb0: usb@c9000000 {
+ status = "disabled";
+diff --git a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
+index ed2f1237ea1e..8259b32f0ced 100644
+--- a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
++++ b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
+@@ -149,7 +149,7 @@
+
+ CP110_LABEL(icu): interrupt-controller@1e0000 {
+ compatible = "marvell,cp110-icu";
+- reg = <0x1e0000 0x10>;
++ reg = <0x1e0000 0x440>;
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ msi-parent = <&gicp>;
+diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
+index 253188fb8cb0..e3e50950a863 100644
+--- a/arch/arm64/crypto/aes-glue.c
++++ b/arch/arm64/crypto/aes-glue.c
+@@ -223,8 +223,8 @@ static int ctr_encrypt(struct skcipher_request *req)
+ kernel_neon_begin();
+ aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
+ (u8 *)ctx->key_enc, rounds, blocks, walk.iv);
+- err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
+ kernel_neon_end();
++ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
+ }
+ if (walk.nbytes) {
+ u8 __aligned(8) tail[AES_BLOCK_SIZE];
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 9d1b06d67c53..df0bd090f0e4 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -937,7 +937,7 @@ static int __init parse_kpti(char *str)
+ __kpti_forced = enabled ? 1 : -1;
+ return 0;
+ }
+-__setup("kpti=", parse_kpti);
++early_param("kpti", parse_kpti);
+ #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
+
+ #ifdef CONFIG_ARM64_HW_AFDBM
+diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
+index 154b7d30145d..f21209064041 100644
+--- a/arch/arm64/kernel/signal.c
++++ b/arch/arm64/kernel/signal.c
+@@ -830,11 +830,12 @@ static void do_signal(struct pt_regs *regs)
+ unsigned long continue_addr = 0, restart_addr = 0;
+ int retval = 0;
+ struct ksignal ksig;
++ bool syscall = in_syscall(regs);
+
+ /*
+ * If we were from a system call, check for system call restarting...
+ */
+- if (in_syscall(regs)) {
++ if (syscall) {
+ continue_addr = regs->pc;
+ restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4);
+ retval = regs->regs[0];
+@@ -886,7 +887,7 @@ static void do_signal(struct pt_regs *regs)
+ * Handle restarting a different system call. As above, if a debugger
+ * has chosen to restart at a different PC, ignore the restart.
+ */
+- if (in_syscall(regs) && regs->pc == restart_addr) {
++ if (syscall && regs->pc == restart_addr) {
+ if (retval == -ERESTART_RESTARTBLOCK)
+ setup_restart_syscall(regs);
+ user_rewind_single_step(current);
+diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
+index 5f9a73a4452c..03646e6a2ef4 100644
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -217,8 +217,9 @@ ENDPROC(idmap_cpu_replace_ttbr1)
+
+ .macro __idmap_kpti_put_pgtable_ent_ng, type
+ orr \type, \type, #PTE_NG // Same bit for blocks and pages
+- str \type, [cur_\()\type\()p] // Update the entry and ensure it
+- dc civac, cur_\()\type\()p // is visible to all CPUs.
++ str \type, [cur_\()\type\()p] // Update the entry and ensure
++ dmb sy // that it is visible to all
++ dc civac, cur_\()\type\()p // CPUs.
+ .endm
+
+ /*
+diff --git a/arch/m68k/mac/config.c b/arch/m68k/mac/config.c
+index 0c3275aa0197..e522307db47c 100644
+--- a/arch/m68k/mac/config.c
++++ b/arch/m68k/mac/config.c
+@@ -1005,7 +1005,7 @@ int __init mac_platform_init(void)
+ struct resource swim_rsrc = {
+ .flags = IORESOURCE_MEM,
+ .start = (resource_size_t)swim_base,
+- .end = (resource_size_t)swim_base + 0x2000,
++ .end = (resource_size_t)swim_base + 0x1FFF,
+ };
+
+ platform_device_register_simple("swim", -1, &swim_rsrc, 1);
+diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c
+index c2a38321c96d..3b420f6d8822 100644
+--- a/arch/m68k/mm/kmap.c
++++ b/arch/m68k/mm/kmap.c
+@@ -89,7 +89,8 @@ static inline void free_io_area(void *addr)
+ for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
+ if (tmp->addr == addr) {
+ *p = tmp->next;
+- __iounmap(tmp->addr, tmp->size);
++ /* remove gap added in get_io_area() */
++ __iounmap(tmp->addr, tmp->size - IO_SIZE);
+ kfree(tmp);
+ return;
+ }
+diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c
+index 6b2c6f3baefa..75fb96ca61db 100644
+--- a/arch/mips/ath79/mach-pb44.c
++++ b/arch/mips/ath79/mach-pb44.c
+@@ -34,7 +34,7 @@
+ #define PB44_KEYS_DEBOUNCE_INTERVAL (3 * PB44_KEYS_POLL_INTERVAL)
+
+ static struct gpiod_lookup_table pb44_i2c_gpiod_table = {
+- .dev_id = "i2c-gpio",
++ .dev_id = "i2c-gpio.0",
+ .table = {
+ GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA,
+ NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
+diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
+index 6054d49e608e..8c9cbf13d32a 100644
+--- a/arch/mips/bcm47xx/setup.c
++++ b/arch/mips/bcm47xx/setup.c
+@@ -212,6 +212,12 @@ static int __init bcm47xx_cpu_fixes(void)
+ */
+ if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
+ cpu_wait = NULL;
++
++ /*
++ * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
++ * Enable ExternalSync for sync instruction to take effect
++ */
++ set_c0_config7(MIPS_CONF7_ES);
+ break;
+ #endif
+ }
+diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
+index a7d0b836f2f7..cea8ad864b3f 100644
+--- a/arch/mips/include/asm/io.h
++++ b/arch/mips/include/asm/io.h
+@@ -414,6 +414,8 @@ static inline type pfx##in##bwlq##p(unsigned long port) \
+ __val = *__addr; \
+ slow; \
+ \
++ /* prevent prefetching of coherent DMA data prematurely */ \
++ rmb(); \
+ return pfx##ioswab##bwlq(__addr, __val); \
+ }
+
+diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
+index f65859784a4c..eeb131e2048e 100644
+--- a/arch/mips/include/asm/mipsregs.h
++++ b/arch/mips/include/asm/mipsregs.h
+@@ -681,6 +681,8 @@
+ #define MIPS_CONF7_WII (_ULCAST_(1) << 31)
+
+ #define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
++/* ExternalSync */
++#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
+
+ #define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
+ #define MIPS_CONF7_AR (_ULCAST_(1) << 16)
+@@ -2760,6 +2762,7 @@ __BUILD_SET_C0(status)
+ __BUILD_SET_C0(cause)
+ __BUILD_SET_C0(config)
+ __BUILD_SET_C0(config5)
++__BUILD_SET_C0(config7)
+ __BUILD_SET_C0(intcontrol)
+ __BUILD_SET_C0(intctl)
+ __BUILD_SET_C0(srsmap)
+diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
+index f2ee7e1e3342..cff52b283e03 100644
+--- a/arch/mips/kernel/mcount.S
++++ b/arch/mips/kernel/mcount.S
+@@ -119,10 +119,20 @@ NESTED(_mcount, PT_SIZE, ra)
+ EXPORT_SYMBOL(_mcount)
+ PTR_LA t1, ftrace_stub
+ PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
+- bne t1, t2, static_trace
++ beq t1, t2, fgraph_trace
+ nop
+
++ MCOUNT_SAVE_REGS
++
++ move a0, ra /* arg1: self return address */
++ jalr t2 /* (1) call *ftrace_trace_function */
++ move a1, AT /* arg2: parent's return address */
++
++ MCOUNT_RESTORE_REGS
++
++fgraph_trace:
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
++ PTR_LA t1, ftrace_stub
+ PTR_L t3, ftrace_graph_return
+ bne t1, t3, ftrace_graph_caller
+ nop
+@@ -131,24 +141,11 @@ EXPORT_SYMBOL(_mcount)
+ bne t1, t3, ftrace_graph_caller
+ nop
+ #endif
+- b ftrace_stub
+-#ifdef CONFIG_32BIT
+- addiu sp, sp, 8
+-#else
+- nop
+-#endif
+
+-static_trace:
+- MCOUNT_SAVE_REGS
+-
+- move a0, ra /* arg1: self return address */
+- jalr t2 /* (1) call *ftrace_trace_function */
+- move a1, AT /* arg2: parent's return address */
+-
+- MCOUNT_RESTORE_REGS
+ #ifdef CONFIG_32BIT
+ addiu sp, sp, 8
+ #endif
++
+ .globl ftrace_stub
+ ftrace_stub:
+ RETURN_BACK
+diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
+index 95813df90801..bb2523b4bd8f 100644
+--- a/arch/powerpc/Makefile
++++ b/arch/powerpc/Makefile
+@@ -251,6 +251,7 @@ cpu-as-$(CONFIG_4xx) += -Wa,-m405
+ cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
+ cpu-as-$(CONFIG_E200) += -Wa,-me200
+ cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4
++cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc)
+
+ KBUILD_AFLAGS += $(cpu-as-y)
+ KBUILD_CFLAGS += $(cpu-as-y)
+diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
+index c904477abaf3..d926100da914 100644
+--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
++++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
+@@ -711,7 +711,8 @@ static __init void cpufeatures_cpu_quirks(void)
+ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
+ cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
+ cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
+- } else /* DD2.1 and up have DD2_1 */
++ } else if ((version & 0xffff0000) == 0x004e0000)
++ /* DD2.1 and up have DD2_1 */
+ cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
+
+ if ((version & 0xffff0000) == 0x004e0000) {
+diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
+index 51695608c68b..3d1af55e09dc 100644
+--- a/arch/powerpc/kernel/entry_64.S
++++ b/arch/powerpc/kernel/entry_64.S
+@@ -596,6 +596,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
+ * actually hit this code path.
+ */
+
++ isync
+ slbie r6
+ slbie r6 /* Workaround POWER5 < DD2.1 issue */
+ slbmte r7,r0
+diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
+index 3c2c2688918f..fe631022ea89 100644
+--- a/arch/powerpc/kernel/fadump.c
++++ b/arch/powerpc/kernel/fadump.c
+@@ -1155,6 +1155,9 @@ void fadump_cleanup(void)
+ init_fadump_mem_struct(&fdm,
+ be64_to_cpu(fdm_active->cpu_state_data.destination_address));
+ fadump_invalidate_dump(&fdm);
++ } else if (fw_dump.dump_registered) {
++ /* Un-register Firmware-assisted dump if it was registered. */
++ fadump_unregister_dump(&fdm);
+ }
+ }
+
+diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
+index 4c1012b80d3b..80547dad37da 100644
+--- a/arch/powerpc/kernel/hw_breakpoint.c
++++ b/arch/powerpc/kernel/hw_breakpoint.c
+@@ -178,8 +178,8 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
+ if (cpu_has_feature(CPU_FTR_DAWR)) {
+ length_max = 512 ; /* 64 doublewords */
+ /* DAWR region can't cross 512 boundary */
+- if ((bp->attr.bp_addr >> 10) !=
+- ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
++ if ((bp->attr.bp_addr >> 9) !=
++ ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 9))
+ return -EINVAL;
+ }
+ if (info->len >
+diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
+index d23cf632edf0..0f63dd5972e9 100644
+--- a/arch/powerpc/kernel/ptrace.c
++++ b/arch/powerpc/kernel/ptrace.c
+@@ -2443,6 +2443,7 @@ static int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
+ /* Create a new breakpoint request if one doesn't exist already */
+ hw_breakpoint_init(&attr);
+ attr.bp_addr = hw_brk.address;
++ attr.bp_len = 8;
+ arch_bp_generic_fields(hw_brk.type,
+ &attr.bp_type);
+
+diff --git a/arch/powerpc/mm/pkeys.c b/arch/powerpc/mm/pkeys.c
+index 0eafdf01edc7..e6f500fabf5e 100644
+--- a/arch/powerpc/mm/pkeys.c
++++ b/arch/powerpc/mm/pkeys.c
+@@ -383,9 +383,9 @@ int __arch_override_mprotect_pkey(struct vm_area_struct *vma, int prot,
+ {
+ /*
+ * If the currently associated pkey is execute-only, but the requested
+- * protection requires read or write, move it back to the default pkey.
++ * protection is not execute-only, move it back to the default pkey.
+ */
+- if (vma_is_pkey_exec_only(vma) && (prot & (PROT_READ | PROT_WRITE)))
++ if (vma_is_pkey_exec_only(vma) && (prot != PROT_EXEC))
+ return 0;
+
+ /*
+diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
+index a5d7309c2d05..465cb604b33a 100644
+--- a/arch/powerpc/mm/tlb-radix.c
++++ b/arch/powerpc/mm/tlb-radix.c
+@@ -733,6 +733,8 @@ extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
+ for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
+ if (sib == cpu)
+ continue;
++ if (!cpu_possible(sib))
++ continue;
+ if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
+ flush = true;
+ }
+diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
+index d7532e7b9ab5..75fb23c24ee8 100644
+--- a/arch/powerpc/perf/imc-pmu.c
++++ b/arch/powerpc/perf/imc-pmu.c
+@@ -1146,7 +1146,7 @@ static int init_nest_pmu_ref(void)
+
+ static void cleanup_all_core_imc_memory(void)
+ {
+- int i, nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core);
++ int i, nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
+ struct imc_mem_info *ptr = core_imc_pmu->mem_info;
+ int size = core_imc_pmu->counter_mem_size;
+
+@@ -1264,7 +1264,7 @@ static int imc_mem_init(struct imc_pmu *pmu_ptr, struct device_node *parent,
+ if (!pmu_ptr->pmu.name)
+ return -ENOMEM;
+
+- nr_cores = DIV_ROUND_UP(num_present_cpus(), threads_per_core);
++ nr_cores = DIV_ROUND_UP(num_possible_cpus(), threads_per_core);
+ pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info),
+ GFP_KERNEL);
+
+diff --git a/arch/powerpc/platforms/powernv/copy-paste.h b/arch/powerpc/platforms/powernv/copy-paste.h
+index c9a503623431..e9a6c35f8a29 100644
+--- a/arch/powerpc/platforms/powernv/copy-paste.h
++++ b/arch/powerpc/platforms/powernv/copy-paste.h
+@@ -42,5 +42,6 @@ static inline int vas_paste(void *paste_address, int offset)
+ : "b" (offset), "b" (paste_address)
+ : "memory", "cr0");
+
+- return (cr >> CR0_SHIFT) & CR0_MASK;
++ /* We mask with 0xE to ignore SO */
++ return (cr >> CR0_SHIFT) & 0xE;
+ }
+diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
+index 1f12ab1e6030..1c5d0675b43c 100644
+--- a/arch/powerpc/platforms/powernv/idle.c
++++ b/arch/powerpc/platforms/powernv/idle.c
+@@ -79,7 +79,7 @@ static int pnv_save_sprs_for_deep_states(void)
+ uint64_t msr_val = MSR_IDLE;
+ uint64_t psscr_val = pnv_deepest_stop_psscr_val;
+
+- for_each_possible_cpu(cpu) {
++ for_each_present_cpu(cpu) {
+ uint64_t pir = get_hard_smp_processor_id(cpu);
+ uint64_t hsprg0_val = (uint64_t)paca_ptrs[cpu];
+
+@@ -814,7 +814,7 @@ static int __init pnv_init_idle_states(void)
+ int cpu;
+
+ pr_info("powernv: idle: Saving PACA pointers of all CPUs in their thread sibling PACA\n");
+- for_each_possible_cpu(cpu) {
++ for_each_present_cpu(cpu) {
+ int base_cpu = cpu_first_thread_sibling(cpu);
+ int idx = cpu_thread_in_core(cpu);
+ int i;
+diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
+index 3f9c69d7623a..f7d9b3433a29 100644
+--- a/arch/powerpc/platforms/powernv/pci-ioda.c
++++ b/arch/powerpc/platforms/powernv/pci-ioda.c
+@@ -3642,7 +3642,6 @@ static void pnv_pci_ioda2_release_pe_dma(struct pnv_ioda_pe *pe)
+ WARN_ON(pe->table_group.group);
+ }
+
+- pnv_pci_ioda2_table_free_pages(tbl);
+ iommu_tce_table_put(tbl);
+ }
+
+diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c
+index 02168fe25105..cf9bf9b43ec3 100644
+--- a/arch/um/drivers/vector_kern.c
++++ b/arch/um/drivers/vector_kern.c
+@@ -188,7 +188,7 @@ static int get_transport_options(struct arglist *def)
+ if (strncmp(transport, TRANS_TAP, TRANS_TAP_LEN) == 0)
+ return (vec_rx | VECTOR_BPF);
+ if (strncmp(transport, TRANS_RAW, TRANS_RAW_LEN) == 0)
+- return (vec_rx | vec_tx);
++ return (vec_rx | vec_tx | VECTOR_QDISC_BYPASS);
+ return (vec_rx | vec_tx);
+ }
+
+@@ -504,15 +504,19 @@ static struct vector_queue *create_queue(
+
+ result = kmalloc(sizeof(struct vector_queue), GFP_KERNEL);
+ if (result == NULL)
+- goto out_fail;
++ return NULL;
+ result->max_depth = max_size;
+ result->dev = vp->dev;
+ result->mmsg_vector = kmalloc(
+ (sizeof(struct mmsghdr) * max_size), GFP_KERNEL);
++ if (result->mmsg_vector == NULL)
++ goto out_mmsg_fail;
+ result->skbuff_vector = kmalloc(
+ (sizeof(void *) * max_size), GFP_KERNEL);
+- if (result->mmsg_vector == NULL || result->skbuff_vector == NULL)
+- goto out_fail;
++ if (result->skbuff_vector == NULL)
++ goto out_skb_fail;
++
++ /* further failures can be handled safely by destroy_queue*/
+
+ mmsg_vector = result->mmsg_vector;
+ for (i = 0; i < max_size; i++) {
+@@ -563,6 +567,11 @@ static struct vector_queue *create_queue(
+ result->head = 0;
+ result->tail = 0;
+ return result;
++out_skb_fail:
++ kfree(result->mmsg_vector);
++out_mmsg_fail:
++ kfree(result);
++ return NULL;
+ out_fail:
+ destroy_queue(result);
+ return NULL;
+@@ -1232,9 +1241,8 @@ static int vector_net_open(struct net_device *dev)
+
+ if ((vp->options & VECTOR_QDISC_BYPASS) != 0) {
+ if (!uml_raw_enable_qdisc_bypass(vp->fds->rx_fd))
+- vp->options = vp->options | VECTOR_BPF;
++ vp->options |= VECTOR_BPF;
+ }
+-
+ if ((vp->options & VECTOR_BPF) != 0)
+ vp->bpf = uml_vector_default_bpf(vp->fds->rx_fd, dev->dev_addr);
+
+diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
+index 9de7f1e1dede..7d0df78db727 100644
+--- a/arch/x86/entry/entry_64_compat.S
++++ b/arch/x86/entry/entry_64_compat.S
+@@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat)
+ pushq %rdx /* pt_regs->dx */
+ pushq %rcx /* pt_regs->cx */
+ pushq $-ENOSYS /* pt_regs->ax */
+- pushq %r8 /* pt_regs->r8 */
++ pushq $0 /* pt_regs->r8 = 0 */
+ xorl %r8d, %r8d /* nospec r8 */
+- pushq %r9 /* pt_regs->r9 */
++ pushq $0 /* pt_regs->r9 = 0 */
+ xorl %r9d, %r9d /* nospec r9 */
+- pushq %r10 /* pt_regs->r10 */
++ pushq $0 /* pt_regs->r10 = 0 */
+ xorl %r10d, %r10d /* nospec r10 */
+- pushq %r11 /* pt_regs->r11 */
++ pushq $0 /* pt_regs->r11 = 0 */
+ xorl %r11d, %r11d /* nospec r11 */
+ pushq %rbx /* pt_regs->rbx */
+ xorl %ebx, %ebx /* nospec rbx */
+@@ -374,13 +374,13 @@ ENTRY(entry_INT80_compat)
+ pushq %rcx /* pt_regs->cx */
+ xorl %ecx, %ecx /* nospec cx */
+ pushq $-ENOSYS /* pt_regs->ax */
+- pushq $0 /* pt_regs->r8 = 0 */
++ pushq %r8 /* pt_regs->r8 */
+ xorl %r8d, %r8d /* nospec r8 */
+- pushq $0 /* pt_regs->r9 = 0 */
++ pushq %r9 /* pt_regs->r9 */
+ xorl %r9d, %r9d /* nospec r9 */
+- pushq $0 /* pt_regs->r10 = 0 */
++ pushq %r10 /* pt_regs->r10*/
+ xorl %r10d, %r10d /* nospec r10 */
+- pushq $0 /* pt_regs->r11 = 0 */
++ pushq %r11 /* pt_regs->r11 */
+ xorl %r11d, %r11d /* nospec r11 */
+ pushq %rbx /* pt_regs->rbx */
+ xorl %ebx, %ebx /* nospec rbx */
+diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
+index 042b5e892ed1..14de0432d288 100644
+--- a/arch/x86/include/asm/barrier.h
++++ b/arch/x86/include/asm/barrier.h
+@@ -38,7 +38,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ {
+ unsigned long mask;
+
+- asm ("cmp %1,%2; sbb %0,%0;"
++ asm volatile ("cmp %1,%2; sbb %0,%0;"
+ :"=r" (mask)
+ :"g"(size),"r" (index)
+ :"cc");
+diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
+index efaf2d4f9c3c..d492752f79e1 100644
+--- a/arch/x86/kernel/apic/x2apic_uv_x.c
++++ b/arch/x86/kernel/apic/x2apic_uv_x.c
+@@ -26,6 +26,7 @@
+ #include <linux/delay.h>
+ #include <linux/crash_dump.h>
+ #include <linux/reboot.h>
++#include <linux/memory.h>
+
+ #include <asm/uv/uv_mmrs.h>
+ #include <asm/uv/uv_hub.h>
+@@ -392,6 +393,51 @@ extern int uv_hub_info_version(void)
+ }
+ EXPORT_SYMBOL(uv_hub_info_version);
+
++/* Default UV memory block size is 2GB */
++static unsigned long mem_block_size = (2UL << 30);
++
++/* Kernel parameter to specify UV mem block size */
++static int parse_mem_block_size(char *ptr)
++{
++ unsigned long size = memparse(ptr, NULL);
++
++ /* Size will be rounded down by set_block_size() below */
++ mem_block_size = size;
++ return 0;
++}
++early_param("uv_memblksize", parse_mem_block_size);
++
++static __init int adj_blksize(u32 lgre)
++{
++ unsigned long base = (unsigned long)lgre << UV_GAM_RANGE_SHFT;
++ unsigned long size;
++
++ for (size = mem_block_size; size > MIN_MEMORY_BLOCK_SIZE; size >>= 1)
++ if (IS_ALIGNED(base, size))
++ break;
++
++ if (size >= mem_block_size)
++ return 0;
++
++ mem_block_size = size;
++ return 1;
++}
++
++static __init void set_block_size(void)
++{
++ unsigned int order = ffs(mem_block_size);
++
++ if (order) {
++ /* adjust for ffs return of 1..64 */
++ set_memory_block_size_order(order - 1);
++ pr_info("UV: mem_block_size set to 0x%lx\n", mem_block_size);
++ } else {
++ /* bad or zero value, default to 1UL << 31 (2GB) */
++ pr_err("UV: mem_block_size error with 0x%lx\n", mem_block_size);
++ set_memory_block_size_order(31);
++ }
++}
++
+ /* Build GAM range lookup table: */
+ static __init void build_uv_gr_table(void)
+ {
+@@ -1180,23 +1226,30 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
+ << UV_GAM_RANGE_SHFT);
+ int order = 0;
+ char suffix[] = " KMGTPE";
++ int flag = ' ';
+
+ while (size > 9999 && order < sizeof(suffix)) {
+ size /= 1024;
+ order++;
+ }
+
++ /* adjust max block size to current range start */
++ if (gre->type == 1 || gre->type == 2)
++ if (adj_blksize(lgre))
++ flag = '*';
++
+ if (!index) {
+ pr_info("UV: GAM Range Table...\n");
+- pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
++ pr_info("UV: # %20s %14s %6s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
+ }
+- pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n",
++ pr_info("UV: %2d: 0x%014lx-0x%014lx%c %5lu%c %3d %04x %02x %02x\n",
+ index++,
+ (unsigned long)lgre << UV_GAM_RANGE_SHFT,
+ (unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
+- size, suffix[order],
++ flag, size, suffix[order],
+ gre->type, gre->nasid, gre->sockid, gre->pnode);
+
++ /* update to next range start */
+ lgre = gre->limit;
+ if (sock_min > gre->sockid)
+ sock_min = gre->sockid;
+@@ -1427,6 +1480,7 @@ static void __init uv_system_init_hub(void)
+
+ build_socket_tables();
+ build_uv_gr_table();
++ set_block_size();
+ uv_init_hub_info(&hub_info);
+ uv_possible_blades = num_possible_nodes();
+ if (!_node_to_pnode)
+diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
+index 5bbd06f38ff6..f34d89c01edc 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
++++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
+@@ -160,6 +160,11 @@ static struct severity {
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
+ USER
+ ),
++ MCESEV(
++ PANIC, "Data load in unrecoverable area of kernel",
++ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
++ KERNEL
++ ),
+ #endif
+ MCESEV(
+ PANIC, "Action required: unknown MCACOD",
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index 42cf2880d0ed..6f7eda9d5297 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -772,23 +772,25 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
+ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
+ struct pt_regs *regs)
+ {
+- int i, ret = 0;
+ char *tmp;
++ int i;
+
+ for (i = 0; i < mca_cfg.banks; i++) {
+ m->status = mce_rdmsrl(msr_ops.status(i));
+- if (m->status & MCI_STATUS_VAL) {
+- __set_bit(i, validp);
+- if (quirk_no_way_out)
+- quirk_no_way_out(i, m, regs);
+- }
++ if (!(m->status & MCI_STATUS_VAL))
++ continue;
++
++ __set_bit(i, validp);
++ if (quirk_no_way_out)
++ quirk_no_way_out(i, m, regs);
+
+ if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
++ mce_read_aux(m, i);
+ *msg = tmp;
+- ret = 1;
++ return 1;
+ }
+ }
+- return ret;
++ return 0;
+ }
+
+ /*
+@@ -1205,13 +1207,18 @@ void do_machine_check(struct pt_regs *regs, long error_code)
+ lmce = m.mcgstatus & MCG_STATUS_LMCES;
+
+ /*
++ * Local machine check may already know that we have to panic.
++ * Broadcast machine check begins rendezvous in mce_start()
+ * Go through all banks in exclusion of the other CPUs. This way we
+ * don't report duplicated events on shared banks because the first one
+- * to see it will clear it. If this is a Local MCE, then no need to
+- * perform rendezvous.
++ * to see it will clear it.
+ */
+- if (!lmce)
++ if (lmce) {
++ if (no_way_out)
++ mce_panic("Fatal local machine check", &m, msg);
++ } else {
+ order = mce_start(&no_way_out);
++ }
+
+ for (i = 0; i < cfg->banks; i++) {
+ __clear_bit(i, toclear);
+@@ -1287,12 +1294,17 @@ void do_machine_check(struct pt_regs *regs, long error_code)
+ no_way_out = worst >= MCE_PANIC_SEVERITY;
+ } else {
+ /*
+- * Local MCE skipped calling mce_reign()
+- * If we found a fatal error, we need to panic here.
++ * If there was a fatal machine check we should have
++ * already called mce_panic earlier in this function.
++ * Since we re-read the banks, we might have found
++ * something new. Check again to see if we found a
++ * fatal error. We call "mce_severity()" again to
++ * make sure we have the right "msg".
+ */
+- if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
+- mce_panic("Machine check from unknown source",
+- NULL, NULL);
++ if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
++ mce_severity(&m, cfg->tolerant, &msg, true);
++ mce_panic("Local fatal machine check!", &m, msg);
++ }
+ }
+
+ /*
+diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
+index 6a2cb1442e05..aec38a170dbc 100644
+--- a/arch/x86/kernel/e820.c
++++ b/arch/x86/kernel/e820.c
+@@ -1246,6 +1246,7 @@ void __init e820__memblock_setup(void)
+ {
+ int i;
+ u64 end;
++ u64 addr = 0;
+
+ /*
+ * The bootstrap memblock region count maximum is 128 entries
+@@ -1262,13 +1263,21 @@ void __init e820__memblock_setup(void)
+ struct e820_entry *entry = &e820_table->entries[i];
+
+ end = entry->addr + entry->size;
++ if (addr < entry->addr)
++ memblock_reserve(addr, entry->addr - addr);
++ addr = end;
+ if (end != (resource_size_t)end)
+ continue;
+
++ /*
++ * all !E820_TYPE_RAM ranges (including gap ranges) are put
++ * into memblock.reserved to make sure that struct pages in
++ * such regions are not left uninitialized after bootup.
++ */
+ if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
+- continue;
+-
+- memblock_add(entry->addr, entry->size);
++ memblock_reserve(entry->addr, entry->size);
++ else
++ memblock_add(entry->addr, entry->size);
+ }
+
+ /* Throw away partial pages: */
+diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
+index 697a4ce04308..736348ead421 100644
+--- a/arch/x86/kernel/quirks.c
++++ b/arch/x86/kernel/quirks.c
+@@ -645,12 +645,19 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
+ /* Skylake */
+ static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
+ {
+- u32 capid0;
++ u32 capid0, capid5;
+
+ pci_read_config_dword(pdev, 0x84, &capid0);
++ pci_read_config_dword(pdev, 0x98, &capid5);
+
+- if ((capid0 & 0xc0) == 0xc0)
++ /*
++ * CAPID0{7:6} indicate whether this is an advanced RAS SKU
++ * CAPID5{8:5} indicate that various NVDIMM usage modes are
++ * enabled, so memory machine check recovery is also enabled.
++ */
++ if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
+ static_branch_inc(&mcsafe_key);
++
+ }
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
+ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
+diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
+index 03f3d7695dac..162a31d80ad5 100644
+--- a/arch/x86/kernel/traps.c
++++ b/arch/x86/kernel/traps.c
+@@ -834,16 +834,18 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
+ char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
+ "simd exception";
+
+- if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
+- return;
+ cond_local_irq_enable(regs);
+
+ if (!user_mode(regs)) {
+- if (!fixup_exception(regs, trapnr)) {
+- task->thread.error_code = error_code;
+- task->thread.trap_nr = trapnr;
++ if (fixup_exception(regs, trapnr))
++ return;
++
++ task->thread.error_code = error_code;
++ task->thread.trap_nr = trapnr;
++
++ if (notify_die(DIE_TRAP, str, regs, error_code,
++ trapnr, SIGFPE) != NOTIFY_STOP)
+ die(str, regs, error_code);
+- }
+ return;
+ }
+
+diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
+index fec82b577c18..cee58a972cb2 100644
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -706,7 +706,9 @@ void __init init_mem_mapping(void)
+ */
+ int devmem_is_allowed(unsigned long pagenr)
+ {
+- if (page_is_ram(pagenr)) {
++ if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE,
++ IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE)
++ != REGION_DISJOINT) {
+ /*
+ * For disallowed memory regions in the low 1MB range,
+ * request that the page be shown as all zeros.
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index 0a400606dea0..20d8bf5fbceb 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -1350,16 +1350,28 @@ int kern_addr_valid(unsigned long addr)
+ /* Amount of ram needed to start using large blocks */
+ #define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
+
++/* Adjustable memory block size */
++static unsigned long set_memory_block_size;
++int __init set_memory_block_size_order(unsigned int order)
++{
++ unsigned long size = 1UL << order;
++
++ if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE)
++ return -EINVAL;
++
++ set_memory_block_size = size;
++ return 0;
++}
++
+ static unsigned long probe_memory_block_size(void)
+ {
+ unsigned long boot_mem_end = max_pfn << PAGE_SHIFT;
+ unsigned long bz;
+
+- /* If this is UV system, always set 2G block size */
+- if (is_uv_system()) {
+- bz = MAX_BLOCK_SIZE;
++ /* If memory block size has been set, then use it */
++ bz = set_memory_block_size;
++ if (bz)
+ goto done;
+- }
+
+ /* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
+ if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) {
+diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
+index bed7e7f4e44c..84fbfaba8404 100644
+--- a/arch/x86/platform/efi/efi_64.c
++++ b/arch/x86/platform/efi/efi_64.c
+@@ -166,14 +166,14 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
+ pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
+ set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
+
+- if (!(pgd_val(*pgd) & _PAGE_PRESENT))
++ if (!pgd_present(*pgd))
+ continue;
+
+ for (i = 0; i < PTRS_PER_P4D; i++) {
+ p4d = p4d_offset(pgd,
+ pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
+
+- if (!(p4d_val(*p4d) & _PAGE_PRESENT))
++ if (!p4d_present(*p4d))
+ continue;
+
+ pud = (pud_t *)p4d_page_vaddr(*p4d);
+diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
+index 2e20ae2fa2d6..e3b18ad49889 100644
+--- a/arch/x86/xen/smp_pv.c
++++ b/arch/x86/xen/smp_pv.c
+@@ -32,6 +32,7 @@
+ #include <xen/interface/vcpu.h>
+ #include <xen/interface/xenpmu.h>
+
++#include <asm/spec-ctrl.h>
+ #include <asm/xen/interface.h>
+ #include <asm/xen/hypercall.h>
+
+@@ -70,6 +71,8 @@ static void cpu_bringup(void)
+ cpu_data(cpu).x86_max_cores = 1;
+ set_cpu_sibling_map(cpu);
+
++ speculative_store_bypass_ht_init();
++
+ xen_setup_cpu_clockevents();
+
+ notify_cpu_starting(cpu);
+@@ -250,6 +253,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
+ }
+ set_cpu_sibling_map(0);
+
++ speculative_store_bypass_ht_init();
++
+ xen_pmu_init(0);
+
+ if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))
+diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
+index 32c5207f1226..84a70b8cbe33 100644
+--- a/arch/xtensa/kernel/traps.c
++++ b/arch/xtensa/kernel/traps.c
+@@ -338,7 +338,7 @@ do_unaligned_user (struct pt_regs *regs)
+ info.si_errno = 0;
+ info.si_code = BUS_ADRALN;
+ info.si_addr = (void *) regs->excvaddr;
+- force_sig_info(SIGSEGV, &info, current);
++ force_sig_info(SIGBUS, &info, current);
+
+ }
+ #endif
+diff --git a/block/blk-core.c b/block/blk-core.c
+index 85909b431eb0..b559b9d4f1a2 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -3487,6 +3487,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
+ dst->cpu = src->cpu;
+ dst->__sector = blk_rq_pos(src);
+ dst->__data_len = blk_rq_bytes(src);
++ if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
++ dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
++ dst->special_vec = src->special_vec;
++ }
+ dst->nr_phys_segments = src->nr_phys_segments;
+ dst->ioprio = src->ioprio;
+ dst->extra_len = src->extra_len;
+diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
+index 7d81e6bb461a..b6cabac4b62b 100644
+--- a/crypto/asymmetric_keys/x509_cert_parser.c
++++ b/crypto/asymmetric_keys/x509_cert_parser.c
+@@ -249,6 +249,15 @@ int x509_note_signature(void *context, size_t hdrlen,
+ return -EINVAL;
+ }
+
++ if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) {
++ /* Discard the BIT STRING metadata */
++ if (vlen < 1 || *(const u8 *)value != 0)
++ return -EBADMSG;
++
++ value++;
++ vlen--;
++ }
++
+ ctx->cert->raw_sig = value;
+ ctx->cert->raw_sig_size = vlen;
+ return 0;
+diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
+index 2bcffec8dbf0..eb091375c873 100644
+--- a/drivers/acpi/acpi_lpss.c
++++ b/drivers/acpi/acpi_lpss.c
+@@ -22,6 +22,7 @@
+ #include <linux/pm_domain.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/pwm.h>
++#include <linux/suspend.h>
+ #include <linux/delay.h>
+
+ #include "internal.h"
+@@ -229,11 +230,13 @@ static const struct lpss_device_desc lpt_sdio_dev_desc = {
+
+ static const struct lpss_device_desc byt_pwm_dev_desc = {
+ .flags = LPSS_SAVE_CTX,
++ .prv_offset = 0x800,
+ .setup = byt_pwm_setup,
+ };
+
+ static const struct lpss_device_desc bsw_pwm_dev_desc = {
+ .flags = LPSS_SAVE_CTX | LPSS_NO_D3_DELAY,
++ .prv_offset = 0x800,
+ .setup = bsw_pwm_setup,
+ };
+
+@@ -940,9 +943,10 @@ static void lpss_iosf_exit_d3_state(void)
+ mutex_unlock(&lpss_iosf_mutex);
+ }
+
+-static int acpi_lpss_suspend(struct device *dev, bool wakeup)
++static int acpi_lpss_suspend(struct device *dev, bool runtime)
+ {
+ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
++ bool wakeup = runtime || device_may_wakeup(dev);
+ int ret;
+
+ if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
+@@ -955,13 +959,14 @@ static int acpi_lpss_suspend(struct device *dev, bool wakeup)
+ * wrong status for devices being about to be powered off. See
+ * lpss_iosf_enter_d3_state() for further information.
+ */
+- if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
++ if ((runtime || !pm_suspend_via_firmware()) &&
++ lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+ lpss_iosf_enter_d3_state();
+
+ return ret;
+ }
+
+-static int acpi_lpss_resume(struct device *dev)
++static int acpi_lpss_resume(struct device *dev, bool runtime)
+ {
+ struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+ int ret;
+@@ -970,7 +975,8 @@ static int acpi_lpss_resume(struct device *dev)
+ * This call is kept first to be in symmetry with
+ * acpi_lpss_runtime_suspend() one.
+ */
+- if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
++ if ((runtime || !pm_resume_via_firmware()) &&
++ lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+ lpss_iosf_exit_d3_state();
+
+ ret = acpi_dev_resume(dev);
+@@ -994,12 +1000,12 @@ static int acpi_lpss_suspend_late(struct device *dev)
+ return 0;
+
+ ret = pm_generic_suspend_late(dev);
+- return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
++ return ret ? ret : acpi_lpss_suspend(dev, false);
+ }
+
+ static int acpi_lpss_resume_early(struct device *dev)
+ {
+- int ret = acpi_lpss_resume(dev);
++ int ret = acpi_lpss_resume(dev, false);
+
+ return ret ? ret : pm_generic_resume_early(dev);
+ }
+@@ -1014,7 +1020,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
+
+ static int acpi_lpss_runtime_resume(struct device *dev)
+ {
+- int ret = acpi_lpss_resume(dev);
++ int ret = acpi_lpss_resume(dev, true);
+
+ return ret ? ret : pm_generic_runtime_resume(dev);
+ }
+diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
+index 2c2ed9cf8796..f9413755177b 100644
+--- a/drivers/auxdisplay/Kconfig
++++ b/drivers/auxdisplay/Kconfig
+@@ -14,9 +14,6 @@ menuconfig AUXDISPLAY
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+-config CHARLCD
+- tristate "Character LCD core support" if COMPILE_TEST
+-
+ if AUXDISPLAY
+
+ config HD44780
+@@ -157,8 +154,6 @@ config HT16K33
+ Say yes here to add support for Holtek HT16K33, RAM mapping 16*8
+ LED controller driver with keyscan.
+
+-endif # AUXDISPLAY
+-
+ config ARM_CHARLCD
+ bool "ARM Ltd. Character LCD Driver"
+ depends on PLAT_VERSATILE
+@@ -169,6 +164,8 @@ config ARM_CHARLCD
+ line and the Linux version on the second line, but that's
+ still useful.
+
++endif # AUXDISPLAY
++
+ config PANEL
+ tristate "Parallel port LCD/Keypad Panel support"
+ depends on PARPORT
+@@ -448,3 +445,6 @@ config PANEL_BOOT_MESSAGE
+ printf()-formatted message is valid with newline and escape codes.
+
+ endif # PANEL
++
++config CHARLCD
++ tristate "Character LCD core support" if COMPILE_TEST
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index d680fd030316..f4ba878dd2dc 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -216,6 +216,13 @@ struct device_link *device_link_add(struct device *consumer,
+ link->rpm_active = true;
+ }
+ pm_runtime_new_link(consumer);
++ /*
++ * If the link is being added by the consumer driver at probe
++ * time, balance the decrementation of the supplier's runtime PM
++ * usage counter after consumer probe in driver_probe_device().
++ */
++ if (consumer->links.status == DL_DEV_PROBING)
++ pm_runtime_get_noresume(supplier);
+ }
+ get_device(supplier);
+ link->supplier = supplier;
+@@ -235,12 +242,12 @@ struct device_link *device_link_add(struct device *consumer,
+ switch (consumer->links.status) {
+ case DL_DEV_PROBING:
+ /*
+- * Balance the decrementation of the supplier's
+- * runtime PM usage counter after consumer probe
+- * in driver_probe_device().
++ * Some callers expect the link creation during
++ * consumer driver probe to resume the supplier
++ * even without DL_FLAG_RPM_ACTIVE.
+ */
+ if (flags & DL_FLAG_PM_RUNTIME)
+- pm_runtime_get_sync(supplier);
++ pm_runtime_resume(supplier);
+
+ link->status = DL_STATE_CONSUMER_PROBE;
+ break;
+diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
+index 1ea0e2502e8e..ef6cf3d5d2b5 100644
+--- a/drivers/base/power/domain.c
++++ b/drivers/base/power/domain.c
+@@ -2246,6 +2246,9 @@ int genpd_dev_pm_attach(struct device *dev)
+ genpd_lock(pd);
+ ret = genpd_power_on(pd, 0);
+ genpd_unlock(pd);
++
++ if (ret)
++ genpd_remove_device(pd, dev);
+ out:
+ return ret ? -EPROBE_DEFER : 0;
+ }
+diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
+index 33b36fea1d73..472afeed1d2f 100644
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -3397,7 +3397,6 @@ static void cancel_tasks_sync(struct rbd_device *rbd_dev)
+ {
+ dout("%s rbd_dev %p\n", __func__, rbd_dev);
+
+- cancel_delayed_work_sync(&rbd_dev->watch_dwork);
+ cancel_work_sync(&rbd_dev->acquired_lock_work);
+ cancel_work_sync(&rbd_dev->released_lock_work);
+ cancel_delayed_work_sync(&rbd_dev->lock_dwork);
+@@ -3415,6 +3414,7 @@ static void rbd_unregister_watch(struct rbd_device *rbd_dev)
+ rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
+ mutex_unlock(&rbd_dev->watch_mutex);
+
++ cancel_delayed_work_sync(&rbd_dev->watch_dwork);
+ ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
+ }
+
+diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
+index 05ec530b8a3a..330e9b29e145 100644
+--- a/drivers/bluetooth/hci_qca.c
++++ b/drivers/bluetooth/hci_qca.c
+@@ -935,6 +935,12 @@ static int qca_setup(struct hci_uart *hu)
+ } else if (ret == -ENOENT) {
+ /* No patch/nvm-config found, run with original fw/config */
+ ret = 0;
++ } else if (ret == -EAGAIN) {
++ /*
++ * Userspace firmware loader will return -EAGAIN in case no
++ * patch/nvm-config is found, so run with original fw/config.
++ */
++ ret = 0;
+ }
+
+ /* Setup bdaddr */
+diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
+index 91bb98c42a1c..aaf9e5afaad4 100644
+--- a/drivers/char/hw_random/core.c
++++ b/drivers/char/hw_random/core.c
+@@ -516,11 +516,18 @@ EXPORT_SYMBOL_GPL(hwrng_register);
+
+ void hwrng_unregister(struct hwrng *rng)
+ {
++ int err;
++
+ mutex_lock(&rng_mutex);
+
+ list_del(&rng->list);
+- if (current_rng == rng)
+- enable_best_rng();
++ if (current_rng == rng) {
++ err = enable_best_rng();
++ if (err) {
++ drop_current_rng();
++ cur_rng_set_by_user = 0;
++ }
++ }
+
+ if (list_empty(&rng_list)) {
+ mutex_unlock(&rng_mutex);
+diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
+index fd4ea8d87d4b..a3397664f800 100644
+--- a/drivers/char/ipmi/ipmi_bt_sm.c
++++ b/drivers/char/ipmi/ipmi_bt_sm.c
+@@ -504,11 +504,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
+ if (status & BT_H_BUSY) /* clear a leftover H_BUSY */
+ BT_CONTROL(BT_H_BUSY);
+
++ bt->timeout = bt->BT_CAP_req2rsp;
++
+ /* Read BT capabilities if it hasn't been done yet */
+ if (!bt->BT_CAP_outreqs)
+ BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN,
+ SI_SM_CALL_WITHOUT_DELAY);
+- bt->timeout = bt->BT_CAP_req2rsp;
+ BT_SI_SM_RETURN(SI_SM_IDLE);
+
+ case BT_STATE_XACTION_START:
+diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
+index 230b99288024..e4a04b2d3c32 100644
+--- a/drivers/char/tpm/tpm-dev-common.c
++++ b/drivers/char/tpm/tpm-dev-common.c
+@@ -37,7 +37,7 @@ static void timeout_work(struct work_struct *work)
+ struct file_priv *priv = container_of(work, struct file_priv, work);
+
+ mutex_lock(&priv->buffer_mutex);
+- atomic_set(&priv->data_pending, 0);
++ priv->data_pending = 0;
+ memset(priv->data_buffer, 0, sizeof(priv->data_buffer));
+ mutex_unlock(&priv->buffer_mutex);
+ }
+@@ -46,7 +46,6 @@ void tpm_common_open(struct file *file, struct tpm_chip *chip,
+ struct file_priv *priv)
+ {
+ priv->chip = chip;
+- atomic_set(&priv->data_pending, 0);
+ mutex_init(&priv->buffer_mutex);
+ timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
+ INIT_WORK(&priv->work, timeout_work);
+@@ -58,29 +57,24 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
+ size_t size, loff_t *off)
+ {
+ struct file_priv *priv = file->private_data;
+- ssize_t ret_size;
+- ssize_t orig_ret_size;
++ ssize_t ret_size = 0;
+ int rc;
+
+ del_singleshot_timer_sync(&priv->user_read_timer);
+ flush_work(&priv->work);
+- ret_size = atomic_read(&priv->data_pending);
+- if (ret_size > 0) { /* relay data */
+- orig_ret_size = ret_size;
+- if (size < ret_size)
+- ret_size = size;
++ mutex_lock(&priv->buffer_mutex);
+
+- mutex_lock(&priv->buffer_mutex);
++ if (priv->data_pending) {
++ ret_size = min_t(ssize_t, size, priv->data_pending);
+ rc = copy_to_user(buf, priv->data_buffer, ret_size);
+- memset(priv->data_buffer, 0, orig_ret_size);
++ memset(priv->data_buffer, 0, priv->data_pending);
+ if (rc)
+ ret_size = -EFAULT;
+
+- mutex_unlock(&priv->buffer_mutex);
++ priv->data_pending = 0;
+ }
+
+- atomic_set(&priv->data_pending, 0);
+-
++ mutex_unlock(&priv->buffer_mutex);
+ return ret_size;
+ }
+
+@@ -91,17 +85,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
+ size_t in_size = size;
+ ssize_t out_size;
+
++ if (in_size > TPM_BUFSIZE)
++ return -E2BIG;
++
++ mutex_lock(&priv->buffer_mutex);
++
+ /* Cannot perform a write until the read has cleared either via
+ * tpm_read or a user_read_timer timeout. This also prevents split
+ * buffered writes from blocking here.
+ */
+- if (atomic_read(&priv->data_pending) != 0)
++ if (priv->data_pending != 0) {
++ mutex_unlock(&priv->buffer_mutex);
+ return -EBUSY;
+-
+- if (in_size > TPM_BUFSIZE)
+- return -E2BIG;
+-
+- mutex_lock(&priv->buffer_mutex);
++ }
+
+ if (copy_from_user
+ (priv->data_buffer, (void __user *) buf, in_size)) {
+@@ -132,7 +128,7 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
+ return out_size;
+ }
+
+- atomic_set(&priv->data_pending, out_size);
++ priv->data_pending = out_size;
+ mutex_unlock(&priv->buffer_mutex);
+
+ /* Set a timeout by which the reader must come claim the result */
+@@ -149,5 +145,5 @@ void tpm_common_release(struct file *file, struct file_priv *priv)
+ del_singleshot_timer_sync(&priv->user_read_timer);
+ flush_work(&priv->work);
+ file->private_data = NULL;
+- atomic_set(&priv->data_pending, 0);
++ priv->data_pending = 0;
+ }
+diff --git a/drivers/char/tpm/tpm-dev.h b/drivers/char/tpm/tpm-dev.h
+index ba3b6f9dacf7..b24cfb4d3ee1 100644
+--- a/drivers/char/tpm/tpm-dev.h
++++ b/drivers/char/tpm/tpm-dev.h
+@@ -8,7 +8,7 @@ struct file_priv {
+ struct tpm_chip *chip;
+
+ /* Data passed to and from the tpm via the read/write calls */
+- atomic_t data_pending;
++ size_t data_pending;
+ struct mutex buffer_mutex;
+
+ struct timer_list user_read_timer; /* user needs to claim result */
+diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
+index 4e4014eabdb9..6122d3276f72 100644
+--- a/drivers/char/tpm/tpm2-space.c
++++ b/drivers/char/tpm/tpm2-space.c
+@@ -102,8 +102,9 @@ static int tpm2_load_context(struct tpm_chip *chip, u8 *buf,
+ * TPM_RC_REFERENCE_H0 means the session has been
+ * flushed outside the space
+ */
+- rc = -ENOENT;
++ *handle = 0;
+ tpm_buf_destroy(&tbuf);
++ return -ENOENT;
+ } else if (rc > 0) {
+ dev_warn(&chip->dev, "%s: failed with a TPM error 0x%04X\n",
+ __func__, rc);
+diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
+index 7d3223fc7161..72b6091eb7b9 100644
+--- a/drivers/clk/at91/clk-pll.c
++++ b/drivers/clk/at91/clk-pll.c
+@@ -132,19 +132,8 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+ {
+ struct clk_pll *pll = to_clk_pll(hw);
+- unsigned int pllr;
+- u16 mul;
+- u8 div;
+-
+- regmap_read(pll->regmap, PLL_REG(pll->id), &pllr);
+-
+- div = PLL_DIV(pllr);
+- mul = PLL_MUL(pllr, pll->layout);
+-
+- if (!div || !mul)
+- return 0;
+
+- return (parent_rate / div) * (mul + 1);
++ return (parent_rate / pll->div) * (pll->mul + 1);
+ }
+
+ static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
+diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
+index 5eb50c31e455..2c23e7d7ba28 100644
+--- a/drivers/clk/clk-aspeed.c
++++ b/drivers/clk/clk-aspeed.c
+@@ -88,7 +88,7 @@ static const struct aspeed_gate_data aspeed_gates[] = {
+ [ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
+ [ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
+ [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
+- [ASPEED_CLK_GATE_BCLK] = { 4, 10, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
++ [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
+ [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, 0 }, /* DAC */
+ [ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL },
+ [ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */
+@@ -297,7 +297,7 @@ static const u8 aspeed_resets[] = {
+ [ASPEED_RESET_JTAG_MASTER] = 22,
+ [ASPEED_RESET_MIC] = 18,
+ [ASPEED_RESET_PWM] = 9,
+- [ASPEED_RESET_PCIVGA] = 8,
++ [ASPEED_RESET_PECI] = 10,
+ [ASPEED_RESET_I2C] = 2,
+ [ASPEED_RESET_AHB] = 1,
+ };
+diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
+index d0524ec71aad..d0d320180c51 100644
+--- a/drivers/clk/meson/meson8b.c
++++ b/drivers/clk/meson/meson8b.c
+@@ -246,6 +246,13 @@ static struct clk_regmap meson8b_fclk_div2 = {
+ .ops = &clk_regmap_gate_ops,
+ .parent_names = (const char *[]){ "fclk_div2_div" },
+ .num_parents = 1,
++ /*
++ * FIXME: Ethernet with a RGMII PHYs is not working if
++ * fclk_div2 is disabled. it is currently unclear why this
++ * is. keep it enabled until the Ethernet driver knows how
++ * to manage this clock.
++ */
++ .flags = CLK_IS_CRITICAL,
+ },
+ };
+
+diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
+index 4e88e980fb76..69a7c756658b 100644
+--- a/drivers/clk/renesas/renesas-cpg-mssr.c
++++ b/drivers/clk/renesas/renesas-cpg-mssr.c
+@@ -258,8 +258,9 @@ struct clk *cpg_mssr_clk_src_twocell_get(struct of_phandle_args *clkspec,
+ dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
+ PTR_ERR(clk));
+ else
+- dev_dbg(dev, "clock (%u, %u) is %pC at %pCr Hz\n",
+- clkspec->args[0], clkspec->args[1], clk, clk);
++ dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
++ clkspec->args[0], clkspec->args[1], clk,
++ clk_get_rate(clk));
+ return clk;
+ }
+
+@@ -326,7 +327,7 @@ static void __init cpg_mssr_register_core_clk(const struct cpg_core_clk *core,
+ if (IS_ERR_OR_NULL(clk))
+ goto fail;
+
+- dev_dbg(dev, "Core clock %pC at %pCr Hz\n", clk, clk);
++ dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
+ priv->clks[id] = clk;
+ return;
+
+@@ -392,7 +393,7 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod,
+ if (IS_ERR(clk))
+ goto fail;
+
+- dev_dbg(dev, "Module clock %pC at %pCr Hz\n", clk, clk);
++ dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
+ priv->clks[id] = clk;
+ priv->smstpcr_saved[clock->index / 32].mask |= BIT(clock->index % 32);
+ return;
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 17e566afbb41..bd3f0a9d5e60 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -284,6 +284,7 @@ struct pstate_funcs {
+ static struct pstate_funcs pstate_funcs __read_mostly;
+
+ static int hwp_active __read_mostly;
++static int hwp_mode_bdw __read_mostly;
+ static bool per_cpu_limits __read_mostly;
+
+ static struct cpufreq_driver *intel_pstate_driver __read_mostly;
+@@ -1370,7 +1371,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+ cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
+ cpu->pstate.scaling = pstate_funcs.get_scaling();
+ cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
+- cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
++
++ if (hwp_active && !hwp_mode_bdw) {
++ unsigned int phy_max, current_max;
++
++ intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
++ cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
++ } else {
++ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
++ }
+
+ if (pstate_funcs.get_aperf_mperf_shift)
+ cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
+@@ -2252,28 +2261,36 @@ static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
+ static inline void intel_pstate_request_control_from_smm(void) {}
+ #endif /* CONFIG_ACPI */
+
++#define INTEL_PSTATE_HWP_BROADWELL 0x01
++
++#define ICPU_HWP(model, hwp_mode) \
++ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
++
+ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
+- { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
++ ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
++ ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL),
++ ICPU_HWP(X86_MODEL_ANY, 0),
+ {}
+ };
+
+ static int __init intel_pstate_init(void)
+ {
++ const struct x86_cpu_id *id;
+ int rc;
+
+ if (no_load)
+ return -ENODEV;
+
+- if (x86_match_cpu(hwp_support_ids)) {
++ id = x86_match_cpu(hwp_support_ids);
++ if (id) {
+ copy_cpu_funcs(&core_funcs);
+ if (!no_hwp) {
+ hwp_active++;
++ hwp_mode_bdw = id->driver_data;
+ intel_pstate.attr = hwp_cpufreq_attrs;
+ goto hwp_cpu_matched;
+ }
+ } else {
+- const struct x86_cpu_id *id;
+-
+ id = x86_match_cpu(intel_pstate_cpu_ids);
+ if (!id)
+ return -ENODEV;
+diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
+index 1a8234e706bc..d29e4f041efe 100644
+--- a/drivers/cpuidle/cpuidle-powernv.c
++++ b/drivers/cpuidle/cpuidle-powernv.c
+@@ -43,9 +43,31 @@ struct stop_psscr_table {
+
+ static struct stop_psscr_table stop_psscr_table[CPUIDLE_STATE_MAX] __read_mostly;
+
+-static u64 snooze_timeout __read_mostly;
++static u64 default_snooze_timeout __read_mostly;
+ static bool snooze_timeout_en __read_mostly;
+
++static u64 get_snooze_timeout(struct cpuidle_device *dev,
++ struct cpuidle_driver *drv,
++ int index)
++{
++ int i;
++
++ if (unlikely(!snooze_timeout_en))
++ return default_snooze_timeout;
++
++ for (i = index + 1; i < drv->state_count; i++) {
++ struct cpuidle_state *s = &drv->states[i];
++ struct cpuidle_state_usage *su = &dev->states_usage[i];
++
++ if (s->disabled || su->disable)
++ continue;
++
++ return s->target_residency * tb_ticks_per_usec;
++ }
++
++ return default_snooze_timeout;
++}
++
+ static int snooze_loop(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv,
+ int index)
+@@ -56,7 +78,7 @@ static int snooze_loop(struct cpuidle_device *dev,
+
+ local_irq_enable();
+
+- snooze_exit_time = get_tb() + snooze_timeout;
++ snooze_exit_time = get_tb() + get_snooze_timeout(dev, drv, index);
+ ppc64_runlatch_off();
+ HMT_very_low();
+ while (!need_resched()) {
+@@ -465,11 +487,9 @@ static int powernv_idle_probe(void)
+ cpuidle_state_table = powernv_states;
+ /* Device tree can indicate more idle states */
+ max_idle_state = powernv_add_idle_states();
+- if (max_idle_state > 1) {
++ default_snooze_timeout = TICK_USEC * tb_ticks_per_usec;
++ if (max_idle_state > 1)
+ snooze_timeout_en = true;
+- snooze_timeout = powernv_states[1].target_residency *
+- tb_ticks_per_usec;
+- }
+ } else
+ return -ENODEV;
+
+diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
+index 9d08cea3f1b0..9f5f35362f27 100644
+--- a/drivers/firmware/efi/libstub/tpm.c
++++ b/drivers/firmware/efi/libstub/tpm.c
+@@ -64,7 +64,7 @@ void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
+ efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
+ efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
+ efi_status_t status;
+- efi_physical_addr_t log_location, log_last_entry;
++ efi_physical_addr_t log_location = 0, log_last_entry = 0;
+ struct linux_efi_tpm_eventlog *log_tbl = NULL;
+ unsigned long first_entry_addr, last_entry_addr;
+ size_t log_size, last_entry_size;
+diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
+index 3b73dee6fdc6..e97105ae4158 100644
+--- a/drivers/hwmon/k10temp.c
++++ b/drivers/hwmon/k10temp.c
+@@ -37,6 +37,10 @@ MODULE_PARM_DESC(force, "force loading on processors with erratum 319");
+ /* Provide lock for writing to NB_SMU_IND_ADDR */
+ static DEFINE_MUTEX(nb_smu_ind_mutex);
+
++#ifndef PCI_DEVICE_ID_AMD_15H_M70H_NB_F3
++#define PCI_DEVICE_ID_AMD_15H_M70H_NB_F3 0x15b3
++#endif
++
+ #ifndef PCI_DEVICE_ID_AMD_17H_DF_F3
+ #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463
+ #endif
+@@ -320,6 +324,7 @@ static const struct pci_device_id k10temp_id_table[] = {
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
++ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M70H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
+diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
+index 3df0efd69ae3..1147bddb8b2c 100644
+--- a/drivers/i2c/algos/i2c-algo-bit.c
++++ b/drivers/i2c/algos/i2c-algo-bit.c
+@@ -649,11 +649,6 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap,
+ if (bit_adap->getscl == NULL)
+ adap->quirks = &i2c_bit_quirk_no_clk_stretch;
+
+- /* Bring bus to a known state. Looks like STOP if bus is not free yet */
+- setscl(bit_adap, 1);
+- udelay(bit_adap->udelay);
+- setsda(bit_adap, 1);
+-
+ ret = add_adapter(adap);
+ if (ret < 0)
+ return ret;
+diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
+index 58abb3eced58..20b81bec0b0b 100644
+--- a/drivers/i2c/busses/i2c-gpio.c
++++ b/drivers/i2c/busses/i2c-gpio.c
+@@ -279,9 +279,9 @@ static int i2c_gpio_probe(struct platform_device *pdev)
+ * required for an I2C bus.
+ */
+ if (pdata->scl_is_open_drain)
+- gflags = GPIOD_OUT_LOW;
++ gflags = GPIOD_OUT_HIGH;
+ else
+- gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
++ gflags = GPIOD_OUT_HIGH_OPEN_DRAIN;
+ priv->scl = i2c_gpio_get_desc(dev, "scl", 1, gflags);
+ if (IS_ERR(priv->scl))
+ return PTR_ERR(priv->scl);
+diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
+index f33dadf7b262..562f125235db 100644
+--- a/drivers/iio/accel/sca3000.c
++++ b/drivers/iio/accel/sca3000.c
+@@ -1277,7 +1277,7 @@ static int sca3000_configure_ring(struct iio_dev *indio_dev)
+ {
+ struct iio_buffer *buffer;
+
+- buffer = iio_kfifo_allocate();
++ buffer = devm_iio_kfifo_allocate(&indio_dev->dev);
+ if (!buffer)
+ return -ENOMEM;
+
+@@ -1287,11 +1287,6 @@ static int sca3000_configure_ring(struct iio_dev *indio_dev)
+ return 0;
+ }
+
+-static void sca3000_unconfigure_ring(struct iio_dev *indio_dev)
+-{
+- iio_kfifo_free(indio_dev->buffer);
+-}
+-
+ static inline
+ int __sca3000_hw_ring_state_set(struct iio_dev *indio_dev, bool state)
+ {
+@@ -1546,8 +1541,6 @@ static int sca3000_remove(struct spi_device *spi)
+ if (spi->irq)
+ free_irq(spi->irq, indio_dev);
+
+- sca3000_unconfigure_ring(indio_dev);
+-
+ return 0;
+ }
+
+diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
+index 70fbf92f9827..03a5f7d6cb0c 100644
+--- a/drivers/iio/adc/ad7791.c
++++ b/drivers/iio/adc/ad7791.c
+@@ -244,58 +244,9 @@ static int ad7791_read_raw(struct iio_dev *indio_dev,
+ return -EINVAL;
+ }
+
+-static const char * const ad7791_sample_freq_avail[] = {
+- [AD7791_FILTER_RATE_120] = "120",
+- [AD7791_FILTER_RATE_100] = "100",
+- [AD7791_FILTER_RATE_33_3] = "33.3",
+- [AD7791_FILTER_RATE_20] = "20",
+- [AD7791_FILTER_RATE_16_6] = "16.6",
+- [AD7791_FILTER_RATE_16_7] = "16.7",
+- [AD7791_FILTER_RATE_13_3] = "13.3",
+- [AD7791_FILTER_RATE_9_5] = "9.5",
+-};
+-
+-static ssize_t ad7791_read_frequency(struct device *dev,
+- struct device_attribute *attr, char *buf)
+-{
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+- struct ad7791_state *st = iio_priv(indio_dev);
+- unsigned int rate = st->filter & AD7791_FILTER_RATE_MASK;
+-
+- return sprintf(buf, "%s\n", ad7791_sample_freq_avail[rate]);
+-}
+-
+-static ssize_t ad7791_write_frequency(struct device *dev,
+- struct device_attribute *attr, const char *buf, size_t len)
+-{
+- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+- struct ad7791_state *st = iio_priv(indio_dev);
+- int i, ret;
+-
+- i = sysfs_match_string(ad7791_sample_freq_avail, buf);
+- if (i < 0)
+- return i;
+-
+- ret = iio_device_claim_direct_mode(indio_dev);
+- if (ret)
+- return ret;
+- st->filter &= ~AD7791_FILTER_RATE_MASK;
+- st->filter |= i;
+- ad_sd_write_reg(&st->sd, AD7791_REG_FILTER, sizeof(st->filter),
+- st->filter);
+- iio_device_release_direct_mode(indio_dev);
+-
+- return len;
+-}
+-
+-static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
+- ad7791_read_frequency,
+- ad7791_write_frequency);
+-
+ static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("120 100 33.3 20 16.7 16.6 13.3 9.5");
+
+ static struct attribute *ad7791_attributes[] = {
+- &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+ };
+diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
+index 2b6c9b516070..d76455edd292 100644
+--- a/drivers/infiniband/core/umem.c
++++ b/drivers/infiniband/core/umem.c
+@@ -119,16 +119,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
+ umem->length = size;
+ umem->address = addr;
+ umem->page_shift = PAGE_SHIFT;
+- /*
+- * We ask for writable memory if any of the following
+- * access flags are set. "Local write" and "remote write"
+- * obviously require write access. "Remote atomic" can do
+- * things like fetch and add, which will modify memory, and
+- * "MW bind" can change permissions by binding a window.
+- */
+- umem->writable = !!(access &
+- (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
+- IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
++ umem->writable = ib_access_writable(access);
+
+ if (access & IB_ACCESS_ON_DEMAND) {
+ ret = ib_umem_odp_get(context, umem, access);
+diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
+index 4445d8ee9314..2d34a9c827b7 100644
+--- a/drivers/infiniband/core/uverbs_main.c
++++ b/drivers/infiniband/core/uverbs_main.c
+@@ -734,10 +734,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
+ if (ret)
+ return ret;
+
+- if (!file->ucontext &&
+- (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended))
+- return -EINVAL;
+-
+ if (extended) {
+ if (count < (sizeof(hdr) + sizeof(ex_hdr)))
+ return -EINVAL;
+@@ -757,6 +753,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
+ goto out;
+ }
+
++ /*
++ * Must be after the ib_dev check, as once the RCU clears ib_dev ==
++ * NULL means ucontext == NULL
++ */
++ if (!file->ucontext &&
++ (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended)) {
++ ret = -EINVAL;
++ goto out;
++ }
++
+ if (!verify_command_mask(ib_dev, command, extended)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
+index 6ddfb1fade79..def3bc1e6447 100644
+--- a/drivers/infiniband/core/verbs.c
++++ b/drivers/infiniband/core/verbs.c
+@@ -1562,11 +1562,12 @@ EXPORT_SYMBOL(ib_destroy_qp);
+
+ /* Completion queues */
+
+-struct ib_cq *ib_create_cq(struct ib_device *device,
+- ib_comp_handler comp_handler,
+- void (*event_handler)(struct ib_event *, void *),
+- void *cq_context,
+- const struct ib_cq_init_attr *cq_attr)
++struct ib_cq *__ib_create_cq(struct ib_device *device,
++ ib_comp_handler comp_handler,
++ void (*event_handler)(struct ib_event *, void *),
++ void *cq_context,
++ const struct ib_cq_init_attr *cq_attr,
++ const char *caller)
+ {
+ struct ib_cq *cq;
+
+@@ -1580,12 +1581,13 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
+ cq->cq_context = cq_context;
+ atomic_set(&cq->usecnt, 0);
+ cq->res.type = RDMA_RESTRACK_CQ;
++ cq->res.kern_name = caller;
+ rdma_restrack_add(&cq->res);
+ }
+
+ return cq;
+ }
+-EXPORT_SYMBOL(ib_create_cq);
++EXPORT_SYMBOL(__ib_create_cq);
+
+ int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
+ {
+diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
+index e6bdd0c1e80a..ebccc4c84827 100644
+--- a/drivers/infiniband/hw/hfi1/chip.c
++++ b/drivers/infiniband/hw/hfi1/chip.c
+@@ -6829,7 +6829,7 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd)
+ }
+ rcvmask = HFI1_RCVCTRL_CTXT_ENB;
+ /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */
+- rcvmask |= HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ?
++ rcvmask |= rcd->rcvhdrtail_kvaddr ?
+ HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS;
+ hfi1_rcvctrl(dd, rcvmask, rcd);
+ hfi1_rcd_put(rcd);
+@@ -8355,7 +8355,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd)
+ u32 tail;
+ int present;
+
+- if (!HFI1_CAP_IS_KSET(DMA_RTAIL))
++ if (!rcd->rcvhdrtail_kvaddr)
+ present = (rcd->seq_cnt ==
+ rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd))));
+ else /* is RDMA rtail */
+@@ -11823,7 +11823,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
+ /* reset the tail and hdr addresses, and sequence count */
+ write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR,
+ rcd->rcvhdrq_dma);
+- if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL))
++ if (rcd->rcvhdrtail_kvaddr)
+ write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR,
+ rcd->rcvhdrqtailaddr_dma);
+ rcd->seq_cnt = 1;
+@@ -11903,7 +11903,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op,
+ rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
+ if (op & HFI1_RCVCTRL_INTRAVAIL_DIS)
+ rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK;
+- if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma)
++ if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr)
+ rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK;
+ if (op & HFI1_RCVCTRL_TAILUPD_DIS) {
+ /* See comment on RcvCtxtCtrl.TailUpd above */
+diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
+index 852173bf05d0..5343960610fe 100644
+--- a/drivers/infiniband/hw/hfi1/debugfs.c
++++ b/drivers/infiniband/hw/hfi1/debugfs.c
+@@ -1227,7 +1227,8 @@ DEBUGFS_FILE_OPS(fault_stats);
+
+ static void fault_exit_opcode_debugfs(struct hfi1_ibdev *ibd)
+ {
+- debugfs_remove_recursive(ibd->fault_opcode->dir);
++ if (ibd->fault_opcode)
++ debugfs_remove_recursive(ibd->fault_opcode->dir);
+ kfree(ibd->fault_opcode);
+ ibd->fault_opcode = NULL;
+ }
+@@ -1255,6 +1256,7 @@ static int fault_init_opcode_debugfs(struct hfi1_ibdev *ibd)
+ &ibd->fault_opcode->attr);
+ if (IS_ERR(ibd->fault_opcode->dir)) {
+ kfree(ibd->fault_opcode);
++ ibd->fault_opcode = NULL;
+ return -ENOENT;
+ }
+
+@@ -1278,7 +1280,8 @@ static int fault_init_opcode_debugfs(struct hfi1_ibdev *ibd)
+
+ static void fault_exit_packet_debugfs(struct hfi1_ibdev *ibd)
+ {
+- debugfs_remove_recursive(ibd->fault_packet->dir);
++ if (ibd->fault_packet)
++ debugfs_remove_recursive(ibd->fault_packet->dir);
+ kfree(ibd->fault_packet);
+ ibd->fault_packet = NULL;
+ }
+@@ -1304,6 +1307,7 @@ static int fault_init_packet_debugfs(struct hfi1_ibdev *ibd)
+ &ibd->fault_opcode->attr);
+ if (IS_ERR(ibd->fault_packet->dir)) {
+ kfree(ibd->fault_packet);
++ ibd->fault_packet = NULL;
+ return -ENOENT;
+ }
+
+diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
+index da4aa1a95b11..cf25913bd81c 100644
+--- a/drivers/infiniband/hw/hfi1/file_ops.c
++++ b/drivers/infiniband/hw/hfi1/file_ops.c
+@@ -505,7 +505,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma)
+ ret = -EINVAL;
+ goto done;
+ }
+- if (flags & VM_WRITE) {
++ if ((flags & VM_WRITE) || !uctxt->rcvhdrtail_kvaddr) {
+ ret = -EPERM;
+ goto done;
+ }
+@@ -689,8 +689,8 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
+ * checks to default and disable the send context.
+ */
+ if (uctxt->sc) {
+- set_pio_integrity(uctxt->sc);
+ sc_disable(uctxt->sc);
++ set_pio_integrity(uctxt->sc);
+ }
+
+ hfi1_free_ctxt_rcv_groups(uctxt);
+diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
+index cac2c62bc42d..9c97c180c35e 100644
+--- a/drivers/infiniband/hw/hfi1/hfi.h
++++ b/drivers/infiniband/hw/hfi1/hfi.h
+@@ -1856,6 +1856,7 @@ struct cc_state *get_cc_state_protected(struct hfi1_pportdata *ppd)
+ #define HFI1_HAS_SDMA_TIMEOUT 0x8
+ #define HFI1_HAS_SEND_DMA 0x10 /* Supports Send DMA */
+ #define HFI1_FORCED_FREEZE 0x80 /* driver forced freeze mode */
++#define HFI1_SHUTDOWN 0x100 /* device is shutting down */
+
+ /* IB dword length mask in PBC (lower 11 bits); same for all chips */
+ #define HFI1_PBC_LENGTH_MASK ((1 << 11) - 1)
+diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
+index 6309edf811df..92e802a64fc4 100644
+--- a/drivers/infiniband/hw/hfi1/init.c
++++ b/drivers/infiniband/hw/hfi1/init.c
+@@ -1058,6 +1058,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
+ unsigned pidx;
+ int i;
+
++ if (dd->flags & HFI1_SHUTDOWN)
++ return;
++ dd->flags |= HFI1_SHUTDOWN;
++
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+@@ -1391,6 +1395,7 @@ void hfi1_disable_after_error(struct hfi1_devdata *dd)
+
+ static void remove_one(struct pci_dev *);
+ static int init_one(struct pci_dev *, const struct pci_device_id *);
++static void shutdown_one(struct pci_dev *);
+
+ #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
+ #define PFX DRIVER_NAME ": "
+@@ -1407,6 +1412,7 @@ static struct pci_driver hfi1_pci_driver = {
+ .name = DRIVER_NAME,
+ .probe = init_one,
+ .remove = remove_one,
++ .shutdown = shutdown_one,
+ .id_table = hfi1_pci_tbl,
+ .err_handler = &hfi1_pci_err_handler,
+ };
+@@ -1816,6 +1822,13 @@ static void remove_one(struct pci_dev *pdev)
+ postinit_cleanup(dd);
+ }
+
++static void shutdown_one(struct pci_dev *pdev)
++{
++ struct hfi1_devdata *dd = pci_get_drvdata(pdev);
++
++ shutdown_device(dd);
++}
++
+ /**
+ * hfi1_create_rcvhdrq - create a receive header queue
+ * @dd: the hfi1_ib device
+@@ -1831,7 +1844,6 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
+ u64 reg;
+
+ if (!rcd->rcvhdrq) {
+- dma_addr_t dma_hdrqtail;
+ gfp_t gfp_flags;
+
+ /*
+@@ -1856,13 +1868,13 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd)
+ goto bail;
+ }
+
+- if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) {
++ if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) ||
++ HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) {
+ rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent(
+- &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail,
+- gfp_flags);
++ &dd->pcidev->dev, PAGE_SIZE,
++ &rcd->rcvhdrqtailaddr_dma, gfp_flags);
+ if (!rcd->rcvhdrtail_kvaddr)
+ goto bail_free;
+- rcd->rcvhdrqtailaddr_dma = dma_hdrqtail;
+ }
+
+ rcd->rcvhdrq_size = amt;
+diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
+index 40dac4d16eb8..9cac15d10c4f 100644
+--- a/drivers/infiniband/hw/hfi1/pio.c
++++ b/drivers/infiniband/hw/hfi1/pio.c
+@@ -50,8 +50,6 @@
+ #include "qp.h"
+ #include "trace.h"
+
+-#define SC_CTXT_PACKET_EGRESS_TIMEOUT 350 /* in chip cycles */
+-
+ #define SC(name) SEND_CTXT_##name
+ /*
+ * Send Context functions
+@@ -961,15 +959,40 @@ void sc_disable(struct send_context *sc)
+ }
+
+ /* return SendEgressCtxtStatus.PacketOccupancy */
+-#define packet_occupancy(r) \
+- (((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)\
+- >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT)
++static u64 packet_occupancy(u64 reg)
++{
++ return (reg &
++ SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SMASK)
++ >> SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_PACKET_OCCUPANCY_SHIFT;
++}
+
+ /* is egress halted on the context? */
+-#define egress_halted(r) \
+- ((r) & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK)
++static bool egress_halted(u64 reg)
++{
++ return !!(reg & SEND_EGRESS_CTXT_STATUS_CTXT_EGRESS_HALT_STATUS_SMASK);
++}
+
+-/* wait for packet egress, optionally pause for credit return */
++/* is the send context halted? */
++static bool is_sc_halted(struct hfi1_devdata *dd, u32 hw_context)
++{
++ return !!(read_kctxt_csr(dd, hw_context, SC(STATUS)) &
++ SC(STATUS_CTXT_HALTED_SMASK));
++}
++
++/**
++ * sc_wait_for_packet_egress
++ * @sc: valid send context
++ * @pause: wait for credit return
++ *
++ * Wait for packet egress, optionally pause for credit return
++ *
++ * Egress halt and Context halt are not necessarily the same thing, so
++ * check for both.
++ *
++ * NOTE: The context halt bit may not be set immediately. Because of this,
++ * it is necessary to check the SW SFC_HALTED bit (set in the IRQ) and the HW
++ * context bit to determine if the context is halted.
++ */
+ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
+ {
+ struct hfi1_devdata *dd = sc->dd;
+@@ -981,8 +1004,9 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
+ reg_prev = reg;
+ reg = read_csr(dd, sc->hw_context * 8 +
+ SEND_EGRESS_CTXT_STATUS);
+- /* done if egress is stopped */
+- if (egress_halted(reg))
++ /* done if any halt bits, SW or HW are set */
++ if (sc->flags & SCF_HALTED ||
++ is_sc_halted(dd, sc->hw_context) || egress_halted(reg))
+ break;
+ reg = packet_occupancy(reg);
+ if (reg == 0)
+diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
+index 0793a21d76f4..d604b3d5aa3e 100644
+--- a/drivers/infiniband/hw/mlx4/mad.c
++++ b/drivers/infiniband/hw/mlx4/mad.c
+@@ -1934,7 +1934,6 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
+ "buf:%lld\n", wc.wr_id);
+ break;
+ default:
+- BUG_ON(1);
+ break;
+ }
+ } else {
+diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
+index 61d8b06375bb..ed1f253faf97 100644
+--- a/drivers/infiniband/hw/mlx4/mr.c
++++ b/drivers/infiniband/hw/mlx4/mr.c
+@@ -367,6 +367,40 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
+ return block_shift;
+ }
+
++static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
++ u64 length, u64 virt_addr,
++ int access_flags)
++{
++ /*
++ * Force registering the memory as writable if the underlying pages
++ * are writable. This is so rereg can change the access permissions
++ * from readable to writable without having to run through ib_umem_get
++ * again
++ */
++ if (!ib_access_writable(access_flags)) {
++ struct vm_area_struct *vma;
++
++ down_read(&current->mm->mmap_sem);
++ /*
++ * FIXME: Ideally this would iterate over all the vmas that
++ * cover the memory, but for now it requires a single vma to
++ * entirely cover the MR to support RO mappings.
++ */
++ vma = find_vma(current->mm, start);
++ if (vma && vma->vm_end >= start + length &&
++ vma->vm_start <= start) {
++ if (vma->vm_flags & VM_WRITE)
++ access_flags |= IB_ACCESS_LOCAL_WRITE;
++ } else {
++ access_flags |= IB_ACCESS_LOCAL_WRITE;
++ }
++
++ up_read(&current->mm->mmap_sem);
++ }
++
++ return ib_umem_get(context, start, length, access_flags, 0);
++}
++
+ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata)
+@@ -381,10 +415,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+- /* Force registering the memory as writable. */
+- /* Used for memory re-registeration. HCA protects the access */
+- mr->umem = ib_umem_get(pd->uobject->context, start, length,
+- access_flags | IB_ACCESS_LOCAL_WRITE, 0);
++ mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
++ virt_addr, access_flags);
+ if (IS_ERR(mr->umem)) {
+ err = PTR_ERR(mr->umem);
+ goto err_free;
+@@ -454,6 +486,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
+ }
+
+ if (flags & IB_MR_REREG_ACCESS) {
++ if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
++ return -EPERM;
++
+ err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
+ convert_access(mr_access_flags));
+
+@@ -467,10 +502,9 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
+
+ mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
+ ib_umem_release(mmr->umem);
+- mmr->umem = ib_umem_get(mr->uobject->context, start, length,
+- mr_access_flags |
+- IB_ACCESS_LOCAL_WRITE,
+- 0);
++ mmr->umem =
++ mlx4_get_umem_mr(mr->uobject->context, start, length,
++ virt_addr, mr_access_flags);
+ if (IS_ERR(mmr->umem)) {
+ err = PTR_ERR(mmr->umem);
+ /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
+diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
+index 77d257ec899b..9f6bc34cd4db 100644
+--- a/drivers/infiniband/hw/mlx5/cq.c
++++ b/drivers/infiniband/hw/mlx5/cq.c
+@@ -637,7 +637,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
+ }
+
+ static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
+- struct ib_wc *wc)
++ struct ib_wc *wc, bool is_fatal_err)
+ {
+ struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device);
+ struct mlx5_ib_wc *soft_wc, *next;
+@@ -650,6 +650,10 @@ static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries,
+ mlx5_ib_dbg(dev, "polled software generated completion on CQ 0x%x\n",
+ cq->mcq.cqn);
+
++ if (unlikely(is_fatal_err)) {
++ soft_wc->wc.status = IB_WC_WR_FLUSH_ERR;
++ soft_wc->wc.vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
++ }
+ wc[npolled++] = soft_wc->wc;
+ list_del(&soft_wc->list);
+ kfree(soft_wc);
+@@ -670,12 +674,17 @@ int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
+
+ spin_lock_irqsave(&cq->lock, flags);
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
+- mlx5_ib_poll_sw_comp(cq, num_entries, wc, &npolled);
++ /* make sure no soft wqe's are waiting */
++ if (unlikely(!list_empty(&cq->wc_list)))
++ soft_polled = poll_soft_wc(cq, num_entries, wc, true);
++
++ mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled,
++ wc + soft_polled, &npolled);
+ goto out;
+ }
+
+ if (unlikely(!list_empty(&cq->wc_list)))
+- soft_polled = poll_soft_wc(cq, num_entries, wc);
++ soft_polled = poll_soft_wc(cq, num_entries, wc, false);
+
+ for (npolled = 0; npolled < num_entries - soft_polled; npolled++) {
+ if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled))
+diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
+index 46072455130c..3461df002f81 100644
+--- a/drivers/infiniband/hw/qib/qib.h
++++ b/drivers/infiniband/hw/qib/qib.h
+@@ -1228,6 +1228,7 @@ static inline struct qib_ibport *to_iport(struct ib_device *ibdev, u8 port)
+ #define QIB_BADINTR 0x8000 /* severe interrupt problems */
+ #define QIB_DCA_ENABLED 0x10000 /* Direct Cache Access enabled */
+ #define QIB_HAS_QSFP 0x20000 /* device (card instance) has QSFP */
++#define QIB_SHUTDOWN 0x40000 /* device is shutting down */
+
+ /*
+ * values for ppd->lflags (_ib_port_ related flags)
+@@ -1423,8 +1424,7 @@ u64 qib_sps_ints(void);
+ /*
+ * dma_addr wrappers - all 0's invalid for hw
+ */
+-dma_addr_t qib_map_page(struct pci_dev *, struct page *, unsigned long,
+- size_t, int);
++int qib_map_page(struct pci_dev *d, struct page *p, dma_addr_t *daddr);
+ struct pci_dev *qib_get_pci_dev(struct rvt_dev_info *rdi);
+
+ /*
+diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
+index 6a8800b65047..49c8e926cc64 100644
+--- a/drivers/infiniband/hw/qib/qib_file_ops.c
++++ b/drivers/infiniband/hw/qib/qib_file_ops.c
+@@ -364,6 +364,8 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
+ goto done;
+ }
+ for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
++ dma_addr_t daddr;
++
+ for (; ntids--; tid++) {
+ if (tid == tidcnt)
+ tid = 0;
+@@ -380,12 +382,14 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
+ ret = -ENOMEM;
+ break;
+ }
++ ret = qib_map_page(dd->pcidev, pagep[i], &daddr);
++ if (ret)
++ break;
++
+ tidlist[i] = tid + tidoff;
+ /* we "know" system pages and TID pages are same size */
+ dd->pageshadow[ctxttid + tid] = pagep[i];
+- dd->physshadow[ctxttid + tid] =
+- qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE,
+- PCI_DMA_FROMDEVICE);
++ dd->physshadow[ctxttid + tid] = daddr;
+ /*
+ * don't need atomic or it's overhead
+ */
+diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
+index 6c68f8a97018..015520289735 100644
+--- a/drivers/infiniband/hw/qib/qib_init.c
++++ b/drivers/infiniband/hw/qib/qib_init.c
+@@ -841,6 +841,10 @@ static void qib_shutdown_device(struct qib_devdata *dd)
+ struct qib_pportdata *ppd;
+ unsigned pidx;
+
++ if (dd->flags & QIB_SHUTDOWN)
++ return;
++ dd->flags |= QIB_SHUTDOWN;
++
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+@@ -1182,6 +1186,7 @@ void qib_disable_after_error(struct qib_devdata *dd)
+
+ static void qib_remove_one(struct pci_dev *);
+ static int qib_init_one(struct pci_dev *, const struct pci_device_id *);
++static void qib_shutdown_one(struct pci_dev *);
+
+ #define DRIVER_LOAD_MSG "Intel " QIB_DRV_NAME " loaded: "
+ #define PFX QIB_DRV_NAME ": "
+@@ -1199,6 +1204,7 @@ static struct pci_driver qib_driver = {
+ .name = QIB_DRV_NAME,
+ .probe = qib_init_one,
+ .remove = qib_remove_one,
++ .shutdown = qib_shutdown_one,
+ .id_table = qib_pci_tbl,
+ .err_handler = &qib_pci_err_handler,
+ };
+@@ -1549,6 +1555,13 @@ static void qib_remove_one(struct pci_dev *pdev)
+ qib_postinit_cleanup(dd);
+ }
+
++static void qib_shutdown_one(struct pci_dev *pdev)
++{
++ struct qib_devdata *dd = pci_get_drvdata(pdev);
++
++ qib_shutdown_device(dd);
++}
++
+ /**
+ * qib_create_rcvhdrq - create a receive header queue
+ * @dd: the qlogic_ib device
+diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c
+index ce83ba9a12ef..16543d5e80c3 100644
+--- a/drivers/infiniband/hw/qib/qib_user_pages.c
++++ b/drivers/infiniband/hw/qib/qib_user_pages.c
+@@ -99,23 +99,27 @@ static int __qib_get_user_pages(unsigned long start_page, size_t num_pages,
+ *
+ * I'm sure we won't be so lucky with other iommu's, so FIXME.
+ */
+-dma_addr_t qib_map_page(struct pci_dev *hwdev, struct page *page,
+- unsigned long offset, size_t size, int direction)
++int qib_map_page(struct pci_dev *hwdev, struct page *page, dma_addr_t *daddr)
+ {
+ dma_addr_t phys;
+
+- phys = pci_map_page(hwdev, page, offset, size, direction);
++ phys = pci_map_page(hwdev, page, 0, PAGE_SIZE, PCI_DMA_FROMDEVICE);
++ if (pci_dma_mapping_error(hwdev, phys))
++ return -ENOMEM;
+
+- if (phys == 0) {
+- pci_unmap_page(hwdev, phys, size, direction);
+- phys = pci_map_page(hwdev, page, offset, size, direction);
++ if (!phys) {
++ pci_unmap_page(hwdev, phys, PAGE_SIZE, PCI_DMA_FROMDEVICE);
++ phys = pci_map_page(hwdev, page, 0, PAGE_SIZE,
++ PCI_DMA_FROMDEVICE);
++ if (pci_dma_mapping_error(hwdev, phys))
++ return -ENOMEM;
+ /*
+ * FIXME: If we get 0 again, we should keep this page,
+ * map another, then free the 0 page.
+ */
+ }
+-
+- return phys;
++ *daddr = phys;
++ return 0;
+ }
+
+ /**
+diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
+index fb52b669bfce..340c17aba3b0 100644
+--- a/drivers/infiniband/sw/rdmavt/cq.c
++++ b/drivers/infiniband/sw/rdmavt/cq.c
+@@ -120,17 +120,20 @@ void rvt_cq_enter(struct rvt_cq *cq, struct ib_wc *entry, bool solicited)
+ if (cq->notify == IB_CQ_NEXT_COMP ||
+ (cq->notify == IB_CQ_SOLICITED &&
+ (solicited || entry->status != IB_WC_SUCCESS))) {
++ struct kthread_worker *worker;
++
+ /*
+ * This will cause send_complete() to be called in
+ * another thread.
+ */
+- spin_lock(&cq->rdi->n_cqs_lock);
+- if (likely(cq->rdi->worker)) {
++ rcu_read_lock();
++ worker = rcu_dereference(cq->rdi->worker);
++ if (likely(worker)) {
+ cq->notify = RVT_CQ_NONE;
+ cq->triggered++;
+- kthread_queue_work(cq->rdi->worker, &cq->comptask);
++ kthread_queue_work(worker, &cq->comptask);
+ }
+- spin_unlock(&cq->rdi->n_cqs_lock);
++ rcu_read_unlock();
+ }
+
+ spin_unlock_irqrestore(&cq->lock, flags);
+@@ -512,7 +515,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
+ int cpu;
+ struct kthread_worker *worker;
+
+- if (rdi->worker)
++ if (rcu_access_pointer(rdi->worker))
+ return 0;
+
+ spin_lock_init(&rdi->n_cqs_lock);
+@@ -524,7 +527,7 @@ int rvt_driver_cq_init(struct rvt_dev_info *rdi)
+ return PTR_ERR(worker);
+
+ set_user_nice(worker->task, MIN_NICE);
+- rdi->worker = worker;
++ RCU_INIT_POINTER(rdi->worker, worker);
+ return 0;
+ }
+
+@@ -536,15 +539,19 @@ void rvt_cq_exit(struct rvt_dev_info *rdi)
+ {
+ struct kthread_worker *worker;
+
+- /* block future queuing from send_complete() */
+- spin_lock_irq(&rdi->n_cqs_lock);
+- worker = rdi->worker;
++ if (!rcu_access_pointer(rdi->worker))
++ return;
++
++ spin_lock(&rdi->n_cqs_lock);
++ worker = rcu_dereference_protected(rdi->worker,
++ lockdep_is_held(&rdi->n_cqs_lock));
+ if (!worker) {
+- spin_unlock_irq(&rdi->n_cqs_lock);
++ spin_unlock(&rdi->n_cqs_lock);
+ return;
+ }
+- rdi->worker = NULL;
+- spin_unlock_irq(&rdi->n_cqs_lock);
++ RCU_INIT_POINTER(rdi->worker, NULL);
++ spin_unlock(&rdi->n_cqs_lock);
++ synchronize_rcu();
+
+ kthread_destroy_worker(worker);
+ }
+diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
+index fff40b097947..3130698fee70 100644
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -886,15 +886,9 @@ isert_login_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_des
+ }
+
+ static void
+-isert_create_send_desc(struct isert_conn *isert_conn,
+- struct isert_cmd *isert_cmd,
+- struct iser_tx_desc *tx_desc)
++__isert_create_send_desc(struct isert_device *device,
++ struct iser_tx_desc *tx_desc)
+ {
+- struct isert_device *device = isert_conn->device;
+- struct ib_device *ib_dev = device->ib_device;
+-
+- ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
+- ISER_HEADERS_LEN, DMA_TO_DEVICE);
+
+ memset(&tx_desc->iser_header, 0, sizeof(struct iser_ctrl));
+ tx_desc->iser_header.flags = ISCSI_CTRL;
+@@ -907,6 +901,20 @@ isert_create_send_desc(struct isert_conn *isert_conn,
+ }
+ }
+
++static void
++isert_create_send_desc(struct isert_conn *isert_conn,
++ struct isert_cmd *isert_cmd,
++ struct iser_tx_desc *tx_desc)
++{
++ struct isert_device *device = isert_conn->device;
++ struct ib_device *ib_dev = device->ib_device;
++
++ ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
++ ISER_HEADERS_LEN, DMA_TO_DEVICE);
++
++ __isert_create_send_desc(device, tx_desc);
++}
++
+ static int
+ isert_init_tx_hdrs(struct isert_conn *isert_conn,
+ struct iser_tx_desc *tx_desc)
+@@ -994,7 +1002,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
+ struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
+ int ret;
+
+- isert_create_send_desc(isert_conn, NULL, tx_desc);
++ __isert_create_send_desc(device, tx_desc);
+
+ memcpy(&tx_desc->iscsi_header, &login->rsp[0],
+ sizeof(struct iscsi_hdr));
+@@ -2108,7 +2116,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
+
+ sig_attrs->check_mask =
+ (se_cmd->prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
+- (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
++ (se_cmd->prot_checks & TARGET_DIF_CHECK_APPTAG ? 0x30 : 0) |
+ (se_cmd->prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
+ return 0;
+ }
+diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
+index a89b81b35932..62f9c23d8a7f 100644
+--- a/drivers/input/joystick/xpad.c
++++ b/drivers/input/joystick/xpad.c
+@@ -123,7 +123,7 @@ static const struct xpad_device {
+ u8 mapping;
+ u8 xtype;
+ } xpad_device[] = {
+- { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 },
++ { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
+ { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+ { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
+ { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
+diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
+index 599544c1a91c..243e0fa6e3e3 100644
+--- a/drivers/input/mouse/elan_i2c.h
++++ b/drivers/input/mouse/elan_i2c.h
+@@ -27,6 +27,8 @@
+ #define ETP_DISABLE_POWER 0x0001
+ #define ETP_PRESSURE_OFFSET 25
+
++#define ETP_CALIBRATE_MAX_LEN 3
++
+ /* IAP Firmware handling */
+ #define ETP_PRODUCT_ID_FORMAT_STRING "%d.0"
+ #define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"
+diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
+index 93967c8139e7..37f954b704a6 100644
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -610,7 +610,7 @@ static ssize_t calibrate_store(struct device *dev,
+ int tries = 20;
+ int retval;
+ int error;
+- u8 val[3];
++ u8 val[ETP_CALIBRATE_MAX_LEN];
+
+ retval = mutex_lock_interruptible(&data->sysfs_mutex);
+ if (retval)
+@@ -1263,6 +1263,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
+ { "ELAN060C", 0 },
+ { "ELAN0611", 0 },
+ { "ELAN0612", 0 },
++ { "ELAN0618", 0 },
+ { "ELAN1000", 0 },
+ { }
+ };
+diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
+index cfcb32559925..c060d270bc4d 100644
+--- a/drivers/input/mouse/elan_i2c_smbus.c
++++ b/drivers/input/mouse/elan_i2c_smbus.c
+@@ -56,7 +56,7 @@
+ static int elan_smbus_initialize(struct i2c_client *client)
+ {
+ u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
+- u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 };
++ u8 values[I2C_SMBUS_BLOCK_MAX] = {0};
+ int len, error;
+
+ /* Get hello packet */
+@@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client)
+ static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
+ {
+ int error;
++ u8 buf[I2C_SMBUS_BLOCK_MAX] = {0};
++
++ BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf));
+
+ error = i2c_smbus_read_block_data(client,
+- ETP_SMBUS_CALIBRATE_QUERY, val);
++ ETP_SMBUS_CALIBRATE_QUERY, buf);
+ if (error < 0)
+ return error;
+
++ memcpy(val, buf, ETP_CALIBRATE_MAX_LEN);
+ return 0;
+ }
+
+@@ -472,6 +476,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
+ {
+ int len;
+
++ BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN);
++
+ len = i2c_smbus_read_block_data(client,
+ ETP_SMBUS_PACKET_QUERY,
+ &report[ETP_SMBUS_REPORT_OFFSET]);
+diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
+index db47a5e1d114..b68019109e99 100644
+--- a/drivers/input/mouse/elantech.c
++++ b/drivers/input/mouse/elantech.c
+@@ -796,7 +796,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
+ else if (ic_version == 7 && etd->samples[1] == 0x2A)
+ sanity_check = ((packet[3] & 0x1c) == 0x10);
+ else
+- sanity_check = ((packet[0] & 0x0c) == 0x04 &&
++ sanity_check = ((packet[0] & 0x08) == 0x00 &&
+ (packet[3] & 0x1c) == 0x10);
+
+ if (!sanity_check)
+@@ -1169,6 +1169,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
+ { }
+ };
+
++static const char * const middle_button_pnp_ids[] = {
++ "LEN2131", /* ThinkPad P52 w/ NFC */
++ "LEN2132", /* ThinkPad P52 */
++ NULL
++};
++
+ /*
+ * Set the appropriate event bits for the input subsystem
+ */
+@@ -1188,7 +1194,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
+ __clear_bit(EV_REL, dev->evbit);
+
+ __set_bit(BTN_LEFT, dev->keybit);
+- if (dmi_check_system(elantech_dmi_has_middle_button))
++ if (dmi_check_system(elantech_dmi_has_middle_button) ||
++ psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids))
+ __set_bit(BTN_MIDDLE, dev->keybit);
+ __set_bit(BTN_RIGHT, dev->keybit);
+
+diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
+index 8900c3166ebf..47ed5616d026 100644
+--- a/drivers/input/mouse/psmouse-base.c
++++ b/drivers/input/mouse/psmouse-base.c
+@@ -192,8 +192,8 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
+ else
+ input_report_rel(dev, REL_WHEEL, -wheel);
+
+- input_report_key(dev, BTN_SIDE, BIT(4));
+- input_report_key(dev, BTN_EXTRA, BIT(5));
++ input_report_key(dev, BTN_SIDE, packet[3] & BIT(4));
++ input_report_key(dev, BTN_EXTRA, packet[3] & BIT(5));
+ break;
+ }
+ break;
+@@ -203,13 +203,13 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
+ input_report_rel(dev, REL_WHEEL, -(s8) packet[3]);
+
+ /* Extra buttons on Genius NewNet 3D */
+- input_report_key(dev, BTN_SIDE, BIT(6));
+- input_report_key(dev, BTN_EXTRA, BIT(7));
++ input_report_key(dev, BTN_SIDE, packet[0] & BIT(6));
++ input_report_key(dev, BTN_EXTRA, packet[0] & BIT(7));
+ break;
+
+ case PSMOUSE_THINKPS:
+ /* Extra button on ThinkingMouse */
+- input_report_key(dev, BTN_EXTRA, BIT(3));
++ input_report_key(dev, BTN_EXTRA, packet[0] & BIT(3));
+
+ /*
+ * Without this bit of weirdness moving up gives wildly
+@@ -223,7 +223,7 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
+ * Cortron PS2 Trackball reports SIDE button in the
+ * 4th bit of the first byte.
+ */
+- input_report_key(dev, BTN_SIDE, BIT(3));
++ input_report_key(dev, BTN_SIDE, packet[0] & BIT(3));
+ packet[0] |= BIT(3);
+ break;
+
+diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
+index ff7043f74a3d..d196ac3d8b8c 100644
+--- a/drivers/input/touchscreen/silead.c
++++ b/drivers/input/touchscreen/silead.c
+@@ -603,6 +603,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = {
+ { "GSL3692", 0 },
+ { "MSSL1680", 0 },
+ { "MSSL0001", 0 },
++ { "MSSL0002", 0 },
+ { }
+ };
+ MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match);
+diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
+index df171cb85822..b38798cc5288 100644
+--- a/drivers/iommu/Kconfig
++++ b/drivers/iommu/Kconfig
+@@ -107,7 +107,6 @@ config IOMMU_PGTABLES_L2
+ # AMD IOMMU support
+ config AMD_IOMMU
+ bool "AMD IOMMU support"
+- select DMA_DIRECT_OPS
+ select SWIOTLB
+ select PCI_MSI
+ select PCI_ATS
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index b0b30a568db7..12c1491a1a9a 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -2593,32 +2593,51 @@ static void *alloc_coherent(struct device *dev, size_t size,
+ unsigned long attrs)
+ {
+ u64 dma_mask = dev->coherent_dma_mask;
+- struct protection_domain *domain = get_domain(dev);
+- bool is_direct = false;
+- void *virt_addr;
++ struct protection_domain *domain;
++ struct dma_ops_domain *dma_dom;
++ struct page *page;
++
++ domain = get_domain(dev);
++ if (PTR_ERR(domain) == -EINVAL) {
++ page = alloc_pages(flag, get_order(size));
++ *dma_addr = page_to_phys(page);
++ return page_address(page);
++ } else if (IS_ERR(domain))
++ return NULL;
+
+- if (IS_ERR(domain)) {
+- if (PTR_ERR(domain) != -EINVAL)
++ dma_dom = to_dma_ops_domain(domain);
++ size = PAGE_ALIGN(size);
++ dma_mask = dev->coherent_dma_mask;
++ flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
++ flag |= __GFP_ZERO;
++
++ page = alloc_pages(flag | __GFP_NOWARN, get_order(size));
++ if (!page) {
++ if (!gfpflags_allow_blocking(flag))
+ return NULL;
+- is_direct = true;
+- }
+
+- virt_addr = dma_direct_alloc(dev, size, dma_addr, flag, attrs);
+- if (!virt_addr || is_direct)
+- return virt_addr;
++ page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
++ get_order(size), flag);
++ if (!page)
++ return NULL;
++ }
+
+ if (!dma_mask)
+ dma_mask = *dev->dma_mask;
+
+- *dma_addr = __map_single(dev, to_dma_ops_domain(domain),
+- virt_to_phys(virt_addr), PAGE_ALIGN(size),
+- DMA_BIDIRECTIONAL, dma_mask);
++ *dma_addr = __map_single(dev, dma_dom, page_to_phys(page),
++ size, DMA_BIDIRECTIONAL, dma_mask);
++
+ if (*dma_addr == AMD_IOMMU_MAPPING_ERROR)
+ goto out_free;
+- return virt_addr;
++
++ return page_address(page);
+
+ out_free:
+- dma_direct_free(dev, size, virt_addr, *dma_addr, attrs);
++
++ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
++ __free_pages(page, get_order(size));
++
+ return NULL;
+ }
+
+@@ -2629,17 +2648,24 @@ static void free_coherent(struct device *dev, size_t size,
+ void *virt_addr, dma_addr_t dma_addr,
+ unsigned long attrs)
+ {
+- struct protection_domain *domain = get_domain(dev);
++ struct protection_domain *domain;
++ struct dma_ops_domain *dma_dom;
++ struct page *page;
+
++ page = virt_to_page(virt_addr);
+ size = PAGE_ALIGN(size);
+
+- if (!IS_ERR(domain)) {
+- struct dma_ops_domain *dma_dom = to_dma_ops_domain(domain);
++ domain = get_domain(dev);
++ if (IS_ERR(domain))
++ goto free_mem;
+
+- __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
+- }
++ dma_dom = to_dma_ops_domain(domain);
++
++ __unmap_single(dma_dom, dma_addr, size, DMA_BIDIRECTIONAL);
+
+- dma_direct_free(dev, size, virt_addr, dma_addr, attrs);
++free_mem:
++ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
++ __free_pages(page, get_order(size));
+ }
+
+ /*
+diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
+index 5416f2b2ac21..ab16968fced8 100644
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -2309,7 +2309,14 @@ static int its_irq_domain_activate(struct irq_domain *domain,
+ cpu_mask = cpumask_of_node(its_dev->its->numa_node);
+
+ /* Bind the LPI to the first possible CPU */
+- cpu = cpumask_first(cpu_mask);
++ cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
++ if (cpu >= nr_cpu_ids) {
++ if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
++ return -EINVAL;
++
++ cpu = cpumask_first(cpu_online_mask);
++ }
++
+ its_dev->event_map.col_map[event] = cpu;
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index b11107497d2e..19478c7b2268 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -1385,6 +1385,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
+
+ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
+
++static void requeue_bios(struct pool *pool);
++
+ static void check_for_space(struct pool *pool)
+ {
+ int r;
+@@ -1397,8 +1399,10 @@ static void check_for_space(struct pool *pool)
+ if (r)
+ return;
+
+- if (nr_free)
++ if (nr_free) {
+ set_pool_mode(pool, PM_WRITE);
++ requeue_bios(pool);
++ }
+ }
+
+ /*
+@@ -1475,7 +1479,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
+
+ r = dm_pool_alloc_data_block(pool->pmd, result);
+ if (r) {
+- metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
++ if (r == -ENOSPC)
++ set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
++ else
++ metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
+ return r;
+ }
+
+diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
+index e73b0776683c..e302f1558fa0 100644
+--- a/drivers/md/dm-zoned-target.c
++++ b/drivers/md/dm-zoned-target.c
+@@ -788,7 +788,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+
+ /* Chunk BIO work */
+ mutex_init(&dmz->chunk_lock);
+- INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
++ INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
+ dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
+ 0, dev->name);
+ if (!dmz->chunk_wq) {
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 0a7b0107ca78..cabae3e280c2 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1582,10 +1582,9 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
+ * the usage of io->orig_bio in dm_remap_zone_report()
+ * won't be affected by this reassignment.
+ */
+- struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
+- md->queue->bio_split);
++ struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
++ GFP_NOIO, md->queue->bio_split);
+ ci.io->orig_bio = b;
+- bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
+ bio_chain(b, bio);
+ ret = generic_make_request(bio);
+ break;
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index c208c01f63a5..bac480d75d1d 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -2853,7 +2853,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
+ err = 0;
+ }
+ } else if (cmd_match(buf, "re-add")) {
+- if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1)) {
++ if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) &&
++ rdev->saved_raid_disk >= 0) {
+ /* clear_bit is performed _after_ all the devices
+ * have their local Faulty bit cleared. If any writes
+ * happen in the meantime in the local node, they
+@@ -8641,6 +8642,7 @@ static int remove_and_add_spares(struct mddev *mddev,
+ if (mddev->pers->hot_remove_disk(
+ mddev, rdev) == 0) {
+ sysfs_unlink_rdev(mddev, rdev);
++ rdev->saved_raid_disk = rdev->raid_disk;
+ rdev->raid_disk = -1;
+ removed++;
+ }
+diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
+index e33414975065..a4ada1ccf0df 100644
+--- a/drivers/media/dvb-core/dvb_frontend.c
++++ b/drivers/media/dvb-core/dvb_frontend.c
+@@ -275,8 +275,20 @@ static void dvb_frontend_add_event(struct dvb_frontend *fe,
+ wake_up_interruptible (&events->wait_queue);
+ }
+
++static int dvb_frontend_test_event(struct dvb_frontend_private *fepriv,
++ struct dvb_fe_events *events)
++{
++ int ret;
++
++ up(&fepriv->sem);
++ ret = events->eventw != events->eventr;
++ down(&fepriv->sem);
++
++ return ret;
++}
++
+ static int dvb_frontend_get_event(struct dvb_frontend *fe,
+- struct dvb_frontend_event *event, int flags)
++ struct dvb_frontend_event *event, int flags)
+ {
+ struct dvb_frontend_private *fepriv = fe->frontend_priv;
+ struct dvb_fe_events *events = &fepriv->events;
+@@ -294,13 +306,8 @@ static int dvb_frontend_get_event(struct dvb_frontend *fe,
+ if (flags & O_NONBLOCK)
+ return -EWOULDBLOCK;
+
+- up(&fepriv->sem);
+-
+- ret = wait_event_interruptible (events->wait_queue,
+- events->eventw != events->eventr);
+-
+- if (down_interruptible (&fepriv->sem))
+- return -ERESTARTSYS;
++ ret = wait_event_interruptible(events->wait_queue,
++ dvb_frontend_test_event(fepriv, events));
+
+ if (ret < 0)
+ return ret;
+diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
+index c2d3b8f0f487..93f69b3ac911 100644
+--- a/drivers/media/platform/vsp1/vsp1_video.c
++++ b/drivers/media/platform/vsp1/vsp1_video.c
+@@ -849,9 +849,8 @@ static int vsp1_video_setup_pipeline(struct vsp1_pipeline *pipe)
+ return 0;
+ }
+
+-static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
++static void vsp1_video_release_buffers(struct vsp1_video *video)
+ {
+- struct vsp1_video *video = pipe->output->video;
+ struct vsp1_vb2_buffer *buffer;
+ unsigned long flags;
+
+@@ -861,12 +860,18 @@ static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
+ vb2_buffer_done(&buffer->buf.vb2_buf, VB2_BUF_STATE_ERROR);
+ INIT_LIST_HEAD(&video->irqqueue);
+ spin_unlock_irqrestore(&video->irqlock, flags);
++}
++
++static void vsp1_video_cleanup_pipeline(struct vsp1_pipeline *pipe)
++{
++ lockdep_assert_held(&pipe->lock);
+
+ /* Release our partition table allocation */
+- mutex_lock(&pipe->lock);
+ kfree(pipe->part_table);
+ pipe->part_table = NULL;
+- mutex_unlock(&pipe->lock);
++
++ vsp1_dl_list_put(pipe->dl);
++ pipe->dl = NULL;
+ }
+
+ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
+@@ -881,8 +886,9 @@ static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
+ if (pipe->stream_count == pipe->num_inputs) {
+ ret = vsp1_video_setup_pipeline(pipe);
+ if (ret < 0) {
+- mutex_unlock(&pipe->lock);
++ vsp1_video_release_buffers(video);
+ vsp1_video_cleanup_pipeline(pipe);
++ mutex_unlock(&pipe->lock);
+ return ret;
+ }
+
+@@ -932,13 +938,12 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq)
+ if (ret == -ETIMEDOUT)
+ dev_err(video->vsp1->dev, "pipeline stop timeout\n");
+
+- vsp1_dl_list_put(pipe->dl);
+- pipe->dl = NULL;
++ vsp1_video_cleanup_pipeline(pipe);
+ }
+ mutex_unlock(&pipe->lock);
+
+ media_pipeline_stop(&video->video.entity);
+- vsp1_video_cleanup_pipeline(pipe);
++ vsp1_video_release_buffers(video);
+ vsp1_video_pipeline_put(pipe);
+ }
+
+diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c
+index c110984ca671..5478fe08f9d3 100644
+--- a/drivers/media/rc/ir-mce_kbd-decoder.c
++++ b/drivers/media/rc/ir-mce_kbd-decoder.c
+@@ -130,6 +130,8 @@ static void mce_kbd_rx_timeout(struct timer_list *t)
+
+ for (i = 0; i < MCIR2_MASK_KEYS_START; i++)
+ input_report_key(raw->mce_kbd.idev, kbd_keycodes[i], 0);
++
++ input_sync(raw->mce_kbd.idev);
+ }
+
+ static enum mce_kbd_mode mce_kbd_mode(struct mce_kbd_dec *data)
+diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
+index c76b2101193c..89795d4d0a71 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
++++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
+@@ -1024,6 +1024,9 @@ struct usb_device_id cx231xx_id_table[] = {
+ .driver_info = CX231XX_BOARD_CNXT_RDE_250},
+ {USB_DEVICE(0x0572, 0x58A0),
+ .driver_info = CX231XX_BOARD_CNXT_RDU_250},
++ /* AverMedia DVD EZMaker 7 */
++ {USB_DEVICE(0x07ca, 0xc039),
++ .driver_info = CX231XX_BOARD_CNXT_VIDEO_GRABBER},
+ {USB_DEVICE(0x2040, 0xb110),
+ .driver_info = CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL},
+ {USB_DEVICE(0x2040, 0xb111),
+diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
+index 67ed66712d05..f31ffaf9d2f2 100644
+--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
++++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
+@@ -1151,7 +1151,7 @@ static int dvb_init(struct cx231xx *dev)
+ info.platform_data = &si2157_config;
+ request_module("si2157");
+
+- client = i2c_new_device(adapter, &info);
++ client = i2c_new_device(tuner_i2c, &info);
+ if (client == NULL || client->dev.driver == NULL) {
+ module_put(dvb->i2c_client_demod[0]->dev.driver->owner);
+ i2c_unregister_device(dvb->i2c_client_demod[0]);
+diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
+index aa0082fe5833..b28c997a7ab0 100644
+--- a/drivers/media/usb/uvc/uvc_video.c
++++ b/drivers/media/usb/uvc/uvc_video.c
+@@ -163,14 +163,27 @@ static void uvc_fixup_video_ctrl(struct uvc_streaming *stream,
+ }
+ }
+
++static size_t uvc_video_ctrl_size(struct uvc_streaming *stream)
++{
++ /*
++ * Return the size of the video probe and commit controls, which depends
++ * on the protocol version.
++ */
++ if (stream->dev->uvc_version < 0x0110)
++ return 26;
++ else if (stream->dev->uvc_version < 0x0150)
++ return 34;
++ else
++ return 48;
++}
++
+ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
+ struct uvc_streaming_control *ctrl, int probe, u8 query)
+ {
++ u16 size = uvc_video_ctrl_size(stream);
+ u8 *data;
+- u16 size;
+ int ret;
+
+- size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
+ if ((stream->dev->quirks & UVC_QUIRK_PROBE_DEF) &&
+ query == UVC_GET_DEF)
+ return -EIO;
+@@ -225,7 +238,7 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
+ ctrl->dwMaxVideoFrameSize = get_unaligned_le32(&data[18]);
+ ctrl->dwMaxPayloadTransferSize = get_unaligned_le32(&data[22]);
+
+- if (size == 34) {
++ if (size >= 34) {
+ ctrl->dwClockFrequency = get_unaligned_le32(&data[26]);
+ ctrl->bmFramingInfo = data[30];
+ ctrl->bPreferedVersion = data[31];
+@@ -254,11 +267,10 @@ static int uvc_get_video_ctrl(struct uvc_streaming *stream,
+ static int uvc_set_video_ctrl(struct uvc_streaming *stream,
+ struct uvc_streaming_control *ctrl, int probe)
+ {
++ u16 size = uvc_video_ctrl_size(stream);
+ u8 *data;
+- u16 size;
+ int ret;
+
+- size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
+ data = kzalloc(size, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+@@ -275,7 +287,7 @@ static int uvc_set_video_ctrl(struct uvc_streaming *stream,
+ put_unaligned_le32(ctrl->dwMaxVideoFrameSize, &data[18]);
+ put_unaligned_le32(ctrl->dwMaxPayloadTransferSize, &data[22]);
+
+- if (size == 34) {
++ if (size >= 34) {
+ put_unaligned_le32(ctrl->dwClockFrequency, &data[26]);
+ data[30] = ctrl->bmFramingInfo;
+ data[31] = ctrl->bPreferedVersion;
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index 4312935f1dfc..d03a44d89649 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -871,7 +871,7 @@ static int put_v4l2_ext_controls32(struct file *file,
+ get_user(kcontrols, &kp->controls))
+ return -EFAULT;
+
+- if (!count)
++ if (!count || count > (U32_MAX/sizeof(*ucontrols)))
+ return 0;
+ if (get_user(p, &up->controls))
+ return -EFAULT;
+diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
+index d1c46de89eb4..d9ae983095c5 100644
+--- a/drivers/mfd/intel-lpss-pci.c
++++ b/drivers/mfd/intel-lpss-pci.c
+@@ -124,6 +124,11 @@ static const struct intel_lpss_platform_info apl_i2c_info = {
+ .properties = apl_i2c_properties,
+ };
+
++static const struct intel_lpss_platform_info cnl_i2c_info = {
++ .clk_rate = 216000000,
++ .properties = spt_i2c_properties,
++};
++
+ static const struct pci_device_id intel_lpss_pci_ids[] = {
+ /* BXT A-Step */
+ { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info },
+@@ -207,13 +212,13 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0x9daa), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9dab), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0x9dfb), (kernel_ulong_t)&spt_info },
+- { PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&spt_i2c_info },
+- { PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&spt_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x9dc5), (kernel_ulong_t)&cnl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x9dc6), (kernel_ulong_t)&cnl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x9dc7), (kernel_ulong_t)&spt_uart_info },
+- { PCI_VDEVICE(INTEL, 0x9de8), (kernel_ulong_t)&spt_i2c_info },
+- { PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&spt_i2c_info },
+- { PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&spt_i2c_info },
+- { PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&spt_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x9de8), (kernel_ulong_t)&cnl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x9de9), (kernel_ulong_t)&cnl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x9dea), (kernel_ulong_t)&cnl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0x9deb), (kernel_ulong_t)&cnl_i2c_info },
+ /* SPT-H */
+ { PCI_VDEVICE(INTEL, 0xa127), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa128), (kernel_ulong_t)&spt_uart_info },
+@@ -240,10 +245,10 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, 0xa32b), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa347), (kernel_ulong_t)&spt_uart_info },
+- { PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&spt_i2c_info },
+- { PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&spt_i2c_info },
+- { PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&spt_i2c_info },
+- { PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&spt_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa368), (kernel_ulong_t)&cnl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa369), (kernel_ulong_t)&cnl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&cnl_i2c_info },
++ { PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&cnl_i2c_info },
+ { }
+ };
+ MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
+diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c
+index 9e545eb6e8b4..4bcf117a7ba8 100644
+--- a/drivers/mfd/intel-lpss.c
++++ b/drivers/mfd/intel-lpss.c
+@@ -275,11 +275,11 @@ static void intel_lpss_init_dev(const struct intel_lpss *lpss)
+
+ intel_lpss_deassert_reset(lpss);
+
++ intel_lpss_set_remap_addr(lpss);
++
+ if (!intel_lpss_has_idma(lpss))
+ return;
+
+- intel_lpss_set_remap_addr(lpss);
+-
+ /* Make sure that SPI multiblock DMA transfers are re-enabled */
+ if (lpss->type == LPSS_DEV_SPI)
+ writel(value, lpss->priv + LPSS_PRIV_SSP_REG);
+diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
+index d3133a371e27..c649344fd7f2 100644
+--- a/drivers/mfd/twl-core.c
++++ b/drivers/mfd/twl-core.c
+@@ -1177,7 +1177,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
+ twl_priv->ready = true;
+
+ /* setup clock framework */
+- clocks_init(&pdev->dev, pdata ? pdata->clock : NULL);
++ clocks_init(&client->dev, pdata ? pdata->clock : NULL);
+
+ /* read TWL IDCODE Register */
+ if (twl_class_is_4030()) {
+diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
+index 4d6736f9d463..429d6de1dde7 100644
+--- a/drivers/misc/cxl/pci.c
++++ b/drivers/misc/cxl/pci.c
+@@ -514,9 +514,9 @@ static int init_implementation_adapter_regs_psl9(struct cxl *adapter,
+ cxl_p1_write(adapter, CXL_PSL9_FIR_CNTL, psl_fircntl);
+
+ /* Setup the PSL to transmit packets on the PCIe before the
+- * CAPP is enabled
++ * CAPP is enabled. Make sure that CAPP virtual machines are disabled
+ */
+- cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0001001000002A10ULL);
++ cxl_p1_write(adapter, CXL_PSL9_DSNDCTL, 0x0001001000012A10ULL);
+
+ /*
+ * A response to an ASB_Notify request is returned by the
+diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
+index 4b5a4c5d3c01..629e2e156412 100644
+--- a/drivers/misc/cxl/sysfs.c
++++ b/drivers/misc/cxl/sysfs.c
+@@ -353,12 +353,20 @@ static ssize_t prefault_mode_store(struct device *device,
+ struct cxl_afu *afu = to_cxl_afu(device);
+ enum prefault_modes mode = -1;
+
+- if (!strncmp(buf, "work_element_descriptor", 23))
+- mode = CXL_PREFAULT_WED;
+- if (!strncmp(buf, "all", 3))
+- mode = CXL_PREFAULT_ALL;
+ if (!strncmp(buf, "none", 4))
+ mode = CXL_PREFAULT_NONE;
++ else {
++ if (!radix_enabled()) {
++
++ /* only allowed when not in radix mode */
++ if (!strncmp(buf, "work_element_descriptor", 23))
++ mode = CXL_PREFAULT_WED;
++ if (!strncmp(buf, "all", 3))
++ mode = CXL_PREFAULT_ALL;
++ } else {
++ dev_err(device, "Cannot prefault with radix enabled\n");
++ }
++ }
+
+ if (mode == -1)
+ return -EINVAL;
+diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
+index 51e01f03fb99..45c015da2e75 100644
+--- a/drivers/mmc/host/renesas_sdhi_core.c
++++ b/drivers/mmc/host/renesas_sdhi_core.c
+@@ -28,6 +28,7 @@
+ #include <linux/of_device.h>
+ #include <linux/platform_device.h>
+ #include <linux/mmc/host.h>
++#include <linux/mmc/slot-gpio.h>
+ #include <linux/mfd/tmio.h>
+ #include <linux/sh_dma.h>
+ #include <linux/delay.h>
+@@ -534,6 +535,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
+ host->multi_io_quirk = renesas_sdhi_multi_io_quirk;
+ host->dma_ops = dma_ops;
+
++ /* For some SoC, we disable internal WP. GPIO may override this */
++ if (mmc_can_gpio_ro(host->mmc))
++ mmc_data->capabilities2 &= ~MMC_CAP2_NO_WRITE_PROTECT;
++
+ /* SDR speeds are only available on Gen2+ */
+ if (mmc_data->flags & TMIO_MMC_MIN_RCAR2) {
+ /* card_busy caused issues on r8a73a4 (pre-Gen2) CD-less SDHI */
+diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+index 6af946d16d24..eb027cdc8f24 100644
+--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
++++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+@@ -87,6 +87,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
+ TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
+ MMC_CAP_CMD23,
++ .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
+ .bus_shift = 2,
+ .scc_offset = 0x1000,
+ .taps = rcar_gen3_scc_taps,
+diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+index 848e50c1638a..4bb46c489d71 100644
+--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
++++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+@@ -42,6 +42,7 @@ static const struct renesas_sdhi_of_data of_rz_compatible = {
+ static const struct renesas_sdhi_of_data of_rcar_gen1_compatible = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ,
++ .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
+ };
+
+ /* Definitions for sampling clocks */
+@@ -61,6 +62,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
+ TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
+ MMC_CAP_CMD23,
++ .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
+ .dma_buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .dma_rx_offset = 0x2000,
+ .scc_offset = 0x0300,
+@@ -81,6 +83,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
+ TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
+ MMC_CAP_CMD23,
++ .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT,
+ .bus_shift = 2,
+ .scc_offset = 0x1000,
+ .taps = rcar_gen3_scc_taps,
+diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
+index 692902df2598..3a8a88fa06aa 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -1880,7 +1880,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
+ if (time_after(jiffies, timeo) && !chip_ready(map, adr))
+ break;
+
+- if (chip_ready(map, adr)) {
++ if (chip_good(map, adr, datum)) {
+ xip_enable(map, chip, adr);
+ goto op_done;
+ }
+@@ -2515,7 +2515,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+
+ struct ppb_lock {
+ struct flchip *chip;
+- loff_t offset;
++ unsigned long adr;
+ int locked;
+ };
+
+@@ -2533,8 +2533,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
+ unsigned long timeo;
+ int ret;
+
++ adr += chip->start;
+ mutex_lock(&chip->mutex);
+- ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
++ ret = get_chip(map, chip, adr, FL_LOCKING);
+ if (ret) {
+ mutex_unlock(&chip->mutex);
+ return ret;
+@@ -2552,8 +2553,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
+
+ if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
+ chip->state = FL_LOCKING;
+- map_write(map, CMD(0xA0), chip->start + adr);
+- map_write(map, CMD(0x00), chip->start + adr);
++ map_write(map, CMD(0xA0), adr);
++ map_write(map, CMD(0x00), adr);
+ } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
+ /*
+ * Unlocking of one specific sector is not supported, so we
+@@ -2591,7 +2592,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
+ map_write(map, CMD(0x00), chip->start);
+
+ chip->state = FL_READY;
+- put_chip(map, chip, adr + chip->start);
++ put_chip(map, chip, adr);
+ mutex_unlock(&chip->mutex);
+
+ return ret;
+@@ -2648,9 +2649,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
+ * sectors shall be unlocked, so lets keep their locking
+ * status at "unlocked" (locked=0) for the final re-locking.
+ */
+- if ((adr < ofs) || (adr >= (ofs + len))) {
++ if ((offset < ofs) || (offset >= (ofs + len))) {
+ sect[sectors].chip = &cfi->chips[chipnum];
+- sect[sectors].offset = offset;
++ sect[sectors].adr = adr;
+ sect[sectors].locked = do_ppb_xxlock(
+ map, &cfi->chips[chipnum], adr, 0,
+ DO_XXLOCK_ONEBLOCK_GETLOCK);
+@@ -2664,6 +2665,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
+ i++;
+
+ if (adr >> cfi->chipshift) {
++ if (offset >= (ofs + len))
++ break;
+ adr = 0;
+ chipnum++;
+
+@@ -2694,7 +2697,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
+ */
+ for (i = 0; i < sectors; i++) {
+ if (sect[i].locked)
+- do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
++ do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
+ DO_XXLOCK_ONEBLOCK_LOCK);
+ }
+
+diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
+index cfd33e6ca77f..5869e90cc14b 100644
+--- a/drivers/mtd/nand/raw/denali_dt.c
++++ b/drivers/mtd/nand/raw/denali_dt.c
+@@ -123,7 +123,11 @@ static int denali_dt_probe(struct platform_device *pdev)
+ if (ret)
+ return ret;
+
+- denali->clk_x_rate = clk_get_rate(dt->clk);
++ /*
++ * Hardcode the clock rate for the backward compatibility.
++ * This works for both SOCFPGA and UniPhier.
++ */
++ denali->clk_x_rate = 200000000;
+
+ ret = denali_init(denali);
+ if (ret)
+diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
+index 45786e707b7b..26cef218bb43 100644
+--- a/drivers/mtd/nand/raw/mxc_nand.c
++++ b/drivers/mtd/nand/raw/mxc_nand.c
+@@ -48,7 +48,7 @@
+ #define NFC_V1_V2_CONFIG (host->regs + 0x0a)
+ #define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c)
+ #define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e)
+-#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10)
++#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10)
+ #define NFC_V1_V2_WRPROT (host->regs + 0x12)
+ #define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
+ #define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
+@@ -1274,6 +1274,9 @@ static void preset_v2(struct mtd_info *mtd)
+ writew(config1, NFC_V1_V2_CONFIG1);
+ /* preset operation */
+
++ /* spare area size in 16-bit half-words */
++ writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
++
+ /* Unlock the internal RAM Buffer */
+ writew(0x2, NFC_V1_V2_CONFIG);
+
+diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
+index f28c3a555861..7a881000eeba 100644
+--- a/drivers/mtd/nand/raw/nand_base.c
++++ b/drivers/mtd/nand/raw/nand_base.c
+@@ -440,7 +440,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
+
+ for (; page < page_end; page++) {
+ res = chip->ecc.read_oob(mtd, chip, page);
+- if (res)
++ if (res < 0)
+ return res;
+
+ bad = chip->oob_poi[chip->badblockpos];
+@@ -2174,7 +2174,6 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const u8 *params = data;
+ int i, ret;
+- u8 status;
+
+ if (chip->exec_op) {
+ const struct nand_sdr_timings *sdr =
+@@ -2188,26 +2187,18 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature,
+ };
+ struct nand_operation op = NAND_OPERATION(instrs);
+
+- ret = nand_exec_op(chip, &op);
+- if (ret)
+- return ret;
+-
+- ret = nand_status_op(chip, &status);
+- if (ret)
+- return ret;
+- } else {
+- chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
+- for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+- chip->write_byte(mtd, params[i]);
++ return nand_exec_op(chip, &op);
++ }
+
+- ret = chip->waitfunc(mtd, chip);
+- if (ret < 0)
+- return ret;
++ chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, feature, -1);
++ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
++ chip->write_byte(mtd, params[i]);
+
+- status = ret;
+- }
++ ret = chip->waitfunc(mtd, chip);
++ if (ret < 0)
++ return ret;
+
+- if (status & NAND_STATUS_FAIL)
++ if (ret & NAND_STATUS_FAIL)
+ return -EIO;
+
+ return 0;
+diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
+index 7ed1f87e742a..49c546c97c6f 100644
+--- a/drivers/mtd/nand/raw/nand_macronix.c
++++ b/drivers/mtd/nand/raw/nand_macronix.c
+@@ -17,23 +17,47 @@
+
+ #include <linux/mtd/rawnand.h>
+
++/*
++ * Macronix AC series does not support using SET/GET_FEATURES to change
++ * the timings unlike what is declared in the parameter page. Unflag
++ * this feature to avoid unnecessary downturns.
++ */
++static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
++{
++ unsigned int i;
++ static const char * const broken_get_timings[] = {
++ "MX30LF1G18AC",
++ "MX30LF1G28AC",
++ "MX30LF2G18AC",
++ "MX30LF2G28AC",
++ "MX30LF4G18AC",
++ "MX30LF4G28AC",
++ "MX60LF8G18AC",
++ };
++
++ if (!chip->parameters.supports_set_get_features)
++ return;
++
++ for (i = 0; i < ARRAY_SIZE(broken_get_timings); i++) {
++ if (!strcmp(broken_get_timings[i], chip->parameters.model))
++ break;
++ }
++
++ if (i == ARRAY_SIZE(broken_get_timings))
++ return;
++
++ bitmap_clear(chip->parameters.get_feature_list,
++ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
++ bitmap_clear(chip->parameters.set_feature_list,
++ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
++}
++
+ static int macronix_nand_init(struct nand_chip *chip)
+ {
+ if (nand_is_slc(chip))
+ chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+
+- /*
+- * MX30LF2G18AC chip does not support using SET/GET_FEATURES to change
+- * the timings unlike what is declared in the parameter page. Unflag
+- * this feature to avoid unnecessary downturns.
+- */
+- if (chip->parameters.supports_set_get_features &&
+- !strcmp("MX30LF2G18AC", chip->parameters.model)) {
+- bitmap_clear(chip->parameters.get_feature_list,
+- ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+- bitmap_clear(chip->parameters.set_feature_list,
+- ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+- }
++ macronix_nand_fix_broken_get_timings(chip);
+
+ return 0;
+ }
+diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
+index 0af45b134c0c..5ec4c90a637d 100644
+--- a/drivers/mtd/nand/raw/nand_micron.c
++++ b/drivers/mtd/nand/raw/nand_micron.c
+@@ -66,7 +66,9 @@ static int micron_nand_onfi_init(struct nand_chip *chip)
+
+ if (p->supports_set_get_features) {
+ set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list);
++ set_bit(ONFI_FEATURE_ON_DIE_ECC, p->set_feature_list);
+ set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list);
++ set_bit(ONFI_FEATURE_ON_DIE_ECC, p->get_feature_list);
+ }
+
+ return 0;
+diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c
+index 699951523179..8e98f4ab87c1 100644
+--- a/drivers/mtd/spi-nor/intel-spi.c
++++ b/drivers/mtd/spi-nor/intel-spi.c
+@@ -136,6 +136,7 @@
+ * @swseq_reg: Use SW sequencer in register reads/writes
+ * @swseq_erase: Use SW sequencer in erase operation
+ * @erase_64k: 64k erase supported
++ * @atomic_preopcode: Holds preopcode when atomic sequence is requested
+ * @opcodes: Opcodes which are supported. This are programmed by BIOS
+ * before it locks down the controller.
+ */
+@@ -153,6 +154,7 @@ struct intel_spi {
+ bool swseq_reg;
+ bool swseq_erase;
+ bool erase_64k;
++ u8 atomic_preopcode;
+ u8 opcodes[8];
+ };
+
+@@ -474,7 +476,7 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
+ int optype)
+ {
+ u32 val = 0, status;
+- u16 preop;
++ u8 atomic_preopcode;
+ int ret;
+
+ ret = intel_spi_opcode_index(ispi, opcode, optype);
+@@ -484,17 +486,42 @@ static int intel_spi_sw_cycle(struct intel_spi *ispi, u8 opcode, int len,
+ if (len > INTEL_SPI_FIFO_SZ)
+ return -EINVAL;
+
++ /*
++ * Always clear it after each SW sequencer operation regardless
++ * of whether it is successful or not.
++ */
++ atomic_preopcode = ispi->atomic_preopcode;
++ ispi->atomic_preopcode = 0;
++
+ /* Only mark 'Data Cycle' bit when there is data to be transferred */
+ if (len > 0)
+ val = ((len - 1) << SSFSTS_CTL_DBC_SHIFT) | SSFSTS_CTL_DS;
+ val |= ret << SSFSTS_CTL_COP_SHIFT;
+ val |= SSFSTS_CTL_FCERR | SSFSTS_CTL_FDONE;
+ val |= SSFSTS_CTL_SCGO;
+- preop = readw(ispi->sregs + PREOP_OPTYPE);
+- if (preop) {
+- val |= SSFSTS_CTL_ACS;
+- if (preop >> 8)
+- val |= SSFSTS_CTL_SPOP;
++ if (atomic_preopcode) {
++ u16 preop;
++
++ switch (optype) {
++ case OPTYPE_WRITE_NO_ADDR:
++ case OPTYPE_WRITE_WITH_ADDR:
++ /* Pick matching preopcode for the atomic sequence */
++ preop = readw(ispi->sregs + PREOP_OPTYPE);
++ if ((preop & 0xff) == atomic_preopcode)
++ ; /* Do nothing */
++ else if ((preop >> 8) == atomic_preopcode)
++ val |= SSFSTS_CTL_SPOP;
++ else
++ return -EINVAL;
++
++ /* Enable atomic sequence */
++ val |= SSFSTS_CTL_ACS;
++ break;
++
++ default:
++ return -EINVAL;
++ }
++
+ }
+ writel(val, ispi->sregs + SSFSTS_CTL);
+
+@@ -538,13 +565,31 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+
+ /*
+ * This is handled with atomic operation and preop code in Intel
+- * controller so skip it here now. If the controller is not locked,
+- * program the opcode to the PREOP register for later use.
++ * controller so we only verify that it is available. If the
++ * controller is not locked, program the opcode to the PREOP
++ * register for later use.
++ *
++ * When hardware sequencer is used there is no need to program
++ * any opcodes (it handles them automatically as part of a command).
+ */
+ if (opcode == SPINOR_OP_WREN) {
+- if (!ispi->locked)
++ u16 preop;
++
++ if (!ispi->swseq_reg)
++ return 0;
++
++ preop = readw(ispi->sregs + PREOP_OPTYPE);
++ if ((preop & 0xff) != opcode && (preop >> 8) != opcode) {
++ if (ispi->locked)
++ return -EINVAL;
+ writel(opcode, ispi->sregs + PREOP_OPTYPE);
++ }
+
++ /*
++ * This enables atomic sequence on next SW sycle. Will
++ * be cleared after next operation.
++ */
++ ispi->atomic_preopcode = opcode;
+ return 0;
+ }
+
+@@ -569,6 +614,13 @@ static ssize_t intel_spi_read(struct spi_nor *nor, loff_t from, size_t len,
+ u32 val, status;
+ ssize_t ret;
+
++ /*
++ * Atomic sequence is not expected with HW sequencer reads. Make
++ * sure it is cleared regardless.
++ */
++ if (WARN_ON_ONCE(ispi->atomic_preopcode))
++ ispi->atomic_preopcode = 0;
++
+ switch (nor->read_opcode) {
+ case SPINOR_OP_READ:
+ case SPINOR_OP_READ_FAST:
+@@ -627,6 +679,9 @@ static ssize_t intel_spi_write(struct spi_nor *nor, loff_t to, size_t len,
+ u32 val, status;
+ ssize_t ret;
+
++ /* Not needed with HW sequencer write, make sure it is cleared */
++ ispi->atomic_preopcode = 0;
++
+ while (len > 0) {
+ block_size = min_t(size_t, len, INTEL_SPI_FIFO_SZ);
+
+@@ -707,6 +762,9 @@ static int intel_spi_erase(struct spi_nor *nor, loff_t offs)
+ return 0;
+ }
+
++ /* Not needed with HW sequencer erase, make sure it is cleared */
++ ispi->atomic_preopcode = 0;
++
+ while (len > 0) {
+ writel(offs, ispi->base + FADDR);
+
+diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
+index 753494e042d5..74425af840d6 100644
+--- a/drivers/mtd/ubi/build.c
++++ b/drivers/mtd/ubi/build.c
+@@ -1091,6 +1091,9 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
+ if (ubi->bgt_thread)
+ kthread_stop(ubi->bgt_thread);
+
++#ifdef CONFIG_MTD_UBI_FASTMAP
++ cancel_work_sync(&ubi->fm_work);
++#endif
+ ubi_debugfs_exit_dev(ubi);
+ uif_close(ubi);
+
+diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
+index 250e30fac61b..593a4f9d97e3 100644
+--- a/drivers/mtd/ubi/eba.c
++++ b/drivers/mtd/ubi/eba.c
+@@ -490,6 +490,82 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
+ return err;
+ }
+
++#ifdef CONFIG_MTD_UBI_FASTMAP
++/**
++ * check_mapping - check and fixup a mapping
++ * @ubi: UBI device description object
++ * @vol: volume description object
++ * @lnum: logical eraseblock number
++ * @pnum: physical eraseblock number
++ *
++ * Checks whether a given mapping is valid. Fastmap cannot track LEB unmap
++ * operations, if such an operation is interrupted the mapping still looks
++ * good, but upon first read an ECC is reported to the upper layer.
++ * Normaly during the full-scan at attach time this is fixed, for Fastmap
++ * we have to deal with it while reading.
++ * If the PEB behind a LEB shows this symthom we change the mapping to
++ * %UBI_LEB_UNMAPPED and schedule the PEB for erasure.
++ *
++ * Returns 0 on success, negative error code in case of failure.
++ */
++static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
++ int *pnum)
++{
++ int err;
++ struct ubi_vid_io_buf *vidb;
++
++ if (!ubi->fast_attach)
++ return 0;
++
++ vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
++ if (!vidb)
++ return -ENOMEM;
++
++ err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0);
++ if (err > 0 && err != UBI_IO_BITFLIPS) {
++ int torture = 0;
++
++ switch (err) {
++ case UBI_IO_FF:
++ case UBI_IO_FF_BITFLIPS:
++ case UBI_IO_BAD_HDR:
++ case UBI_IO_BAD_HDR_EBADMSG:
++ break;
++ default:
++ ubi_assert(0);
++ }
++
++ if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
++ torture = 1;
++
++ down_read(&ubi->fm_eba_sem);
++ vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
++ up_read(&ubi->fm_eba_sem);
++ ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
++
++ *pnum = UBI_LEB_UNMAPPED;
++ } else if (err < 0) {
++ ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
++ *pnum, err);
++
++ goto out_free;
++ }
++
++ err = 0;
++
++out_free:
++ ubi_free_vid_buf(vidb);
++
++ return err;
++}
++#else
++static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
++ int *pnum)
++{
++ return 0;
++}
++#endif
++
+ /**
+ * ubi_eba_read_leb - read data.
+ * @ubi: UBI device description object
+@@ -522,7 +598,13 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ return err;
+
+ pnum = vol->eba_tbl->entries[lnum].pnum;
+- if (pnum < 0) {
++ if (pnum >= 0) {
++ err = check_mapping(ubi, vol, lnum, &pnum);
++ if (err < 0)
++ goto out_unlock;
++ }
++
++ if (pnum == UBI_LEB_UNMAPPED) {
+ /*
+ * The logical eraseblock is not mapped, fill the whole buffer
+ * with 0xFF bytes. The exception is static volumes for which
+@@ -930,6 +1012,12 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
+ return err;
+
+ pnum = vol->eba_tbl->entries[lnum].pnum;
++ if (pnum >= 0) {
++ err = check_mapping(ubi, vol, lnum, &pnum);
++ if (err < 0)
++ goto out;
++ }
++
+ if (pnum >= 0) {
+ dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
+ len, offset, vol_id, lnum, pnum);
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 2052a647220e..f66b3b22f328 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -1505,6 +1505,7 @@ int ubi_thread(void *u)
+ }
+
+ dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
++ ubi->thread_enabled = 0;
+ return 0;
+ }
+
+@@ -1514,9 +1515,6 @@ int ubi_thread(void *u)
+ */
+ static void shutdown_work(struct ubi_device *ubi)
+ {
+-#ifdef CONFIG_MTD_UBI_FASTMAP
+- flush_work(&ubi->fm_work);
+-#endif
+ while (!list_empty(&ubi->works)) {
+ struct ubi_work *wrk;
+
+diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
+index 38828ab77eb9..1480c094b57d 100644
+--- a/drivers/net/ethernet/ti/davinci_emac.c
++++ b/drivers/net/ethernet/ti/davinci_emac.c
+@@ -1385,6 +1385,11 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
+ return -EOPNOTSUPP;
+ }
+
++static int match_first_device(struct device *dev, void *data)
++{
++ return !strncmp(dev_name(dev), "davinci_mdio", 12);
++}
++
+ /**
+ * emac_dev_open - EMAC device open
+ * @ndev: The DaVinci EMAC network adapter
+@@ -1484,8 +1489,14 @@ static int emac_dev_open(struct net_device *ndev)
+
+ /* use the first phy on the bus if pdata did not give us a phy id */
+ if (!phydev && !priv->phy_id) {
+- phy = bus_find_device_by_name(&mdio_bus_type, NULL,
+- "davinci_mdio");
++ /* NOTE: we can't use bus_find_device_by_name() here because
++ * the device name is not guaranteed to be 'davinci_mdio'. On
++ * some systems it can be 'davinci_mdio.0' so we need to use
++ * strncmp() against the first part of the string to correctly
++ * match it.
++ */
++ phy = bus_find_device(&mdio_bus_type, NULL, NULL,
++ match_first_device);
+ if (phy) {
+ priv->phy_id = dev_name(phy);
+ if (!priv->phy_id || !*priv->phy_id)
+diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
+index a64023690cad..b9e0d30e317a 100644
+--- a/drivers/nvdimm/bus.c
++++ b/drivers/nvdimm/bus.c
+@@ -566,14 +566,18 @@ int nvdimm_revalidate_disk(struct gendisk *disk)
+ {
+ struct device *dev = disk_to_dev(disk)->parent;
+ struct nd_region *nd_region = to_nd_region(dev->parent);
+- const char *pol = nd_region->ro ? "only" : "write";
++ int disk_ro = get_disk_ro(disk);
+
+- if (nd_region->ro == get_disk_ro(disk))
++ /*
++ * Upgrade to read-only if the region is read-only preserve as
++ * read-only if the disk is already read-only.
++ */
++ if (disk_ro || nd_region->ro == disk_ro)
+ return 0;
+
+- dev_info(dev, "%s read-%s, marking %s read-%s\n",
+- dev_name(&nd_region->dev), pol, disk->disk_name, pol);
+- set_disk_ro(disk, nd_region->ro);
++ dev_info(dev, "%s read-only, marking %s read-only\n",
++ dev_name(&nd_region->dev), disk->disk_name);
++ set_disk_ro(disk, 1);
+
+ return 0;
+
+diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
+index 9d714926ecf5..d7193c4a6ee2 100644
+--- a/drivers/nvdimm/pmem.c
++++ b/drivers/nvdimm/pmem.c
+@@ -299,7 +299,7 @@ static int pmem_attach_disk(struct device *dev,
+ {
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ struct nd_region *nd_region = to_nd_region(dev->parent);
+- int nid = dev_to_node(dev), fua, wbc;
++ int nid = dev_to_node(dev), fua;
+ struct resource *res = &nsio->res;
+ struct resource bb_res;
+ struct nd_pfn *nd_pfn = NULL;
+@@ -335,7 +335,6 @@ static int pmem_attach_disk(struct device *dev,
+ dev_warn(dev, "unable to guarantee persistence of writes\n");
+ fua = 0;
+ }
+- wbc = nvdimm_has_cache(nd_region);
+
+ if (!devm_request_mem_region(dev, res->start, resource_size(res),
+ dev_name(&ndns->dev))) {
+@@ -382,13 +381,14 @@ static int pmem_attach_disk(struct device *dev,
+ return PTR_ERR(addr);
+ pmem->virt_addr = addr;
+
+- blk_queue_write_cache(q, wbc, fua);
++ blk_queue_write_cache(q, true, fua);
+ blk_queue_make_request(q, pmem_make_request);
+ blk_queue_physical_block_size(q, PAGE_SIZE);
+ blk_queue_logical_block_size(q, pmem_sector_size(ndns));
+ blk_queue_max_hw_sectors(q, UINT_MAX);
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+- blk_queue_flag_set(QUEUE_FLAG_DAX, q);
++ if (pmem->pfn_flags & PFN_MAP)
++ blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+ q->queuedata = pmem;
+
+ disk = alloc_disk_node(0, nid);
+@@ -413,7 +413,7 @@ static int pmem_attach_disk(struct device *dev,
+ put_disk(disk);
+ return -ENOMEM;
+ }
+- dax_write_cache(dax_dev, wbc);
++ dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
+ pmem->dax_dev = dax_dev;
+
+ gendev = disk_to_dev(disk);
+diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
+index a612be6f019d..ec3543b83330 100644
+--- a/drivers/nvdimm/region_devs.c
++++ b/drivers/nvdimm/region_devs.c
+@@ -1132,7 +1132,8 @@ EXPORT_SYMBOL_GPL(nvdimm_has_flush);
+
+ int nvdimm_has_cache(struct nd_region *nd_region)
+ {
+- return is_nd_pmem(&nd_region->dev);
++ return is_nd_pmem(&nd_region->dev) &&
++ !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
+ }
+ EXPORT_SYMBOL_GPL(nvdimm_has_cache);
+
+diff --git a/drivers/of/platform.c b/drivers/of/platform.c
+index c00d81dfac0b..9c91f97ffbe1 100644
+--- a/drivers/of/platform.c
++++ b/drivers/of/platform.c
+@@ -537,6 +537,9 @@ int of_platform_device_destroy(struct device *dev, void *data)
+ if (of_node_check_flag(dev->of_node, OF_POPULATED_BUS))
+ device_for_each_child(dev, NULL, of_platform_device_destroy);
+
++ of_node_clear_flag(dev->of_node, OF_POPULATED);
++ of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
++
+ if (dev->bus == &platform_bus_type)
+ platform_device_unregister(to_platform_device(dev));
+ #ifdef CONFIG_ARM_AMBA
+@@ -544,8 +547,6 @@ int of_platform_device_destroy(struct device *dev, void *data)
+ amba_device_unregister(to_amba_device(dev));
+ #endif
+
+- of_node_clear_flag(dev->of_node, OF_POPULATED);
+- of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(of_platform_device_destroy);
+diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
+index 65d0b7adfcd4..7edfac6f1914 100644
+--- a/drivers/of/resolver.c
++++ b/drivers/of/resolver.c
+@@ -122,6 +122,11 @@ static int update_usages_of_a_phandle_reference(struct device_node *overlay,
+ goto err_fail;
+ }
+
++ if (offset < 0 || offset + sizeof(__be32) > prop->length) {
++ err = -EINVAL;
++ goto err_fail;
++ }
++
+ *(__be32 *)(prop->value + offset) = cpu_to_be32(phandle);
+ }
+
+diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
+index 6bb37c18292a..ecee50d10d14 100644
+--- a/drivers/of/unittest.c
++++ b/drivers/of/unittest.c
+@@ -165,20 +165,20 @@ static void __init of_unittest_dynamic(void)
+ /* Add a new property - should pass*/
+ prop->name = "new-property";
+ prop->value = "new-property-data";
+- prop->length = strlen(prop->value);
++ prop->length = strlen(prop->value) + 1;
+ unittest(of_add_property(np, prop) == 0, "Adding a new property failed\n");
+
+ /* Try to add an existing property - should fail */
+ prop++;
+ prop->name = "new-property";
+ prop->value = "new-property-data-should-fail";
+- prop->length = strlen(prop->value);
++ prop->length = strlen(prop->value) + 1;
+ unittest(of_add_property(np, prop) != 0,
+ "Adding an existing property should have failed\n");
+
+ /* Try to modify an existing property - should pass */
+ prop->value = "modify-property-data-should-pass";
+- prop->length = strlen(prop->value);
++ prop->length = strlen(prop->value) + 1;
+ unittest(of_update_property(np, prop) == 0,
+ "Updating an existing property should have passed\n");
+
+@@ -186,7 +186,7 @@ static void __init of_unittest_dynamic(void)
+ prop++;
+ prop->name = "modify-property";
+ prop->value = "modify-missing-property-data-should-pass";
+- prop->length = strlen(prop->value);
++ prop->length = strlen(prop->value) + 1;
+ unittest(of_update_property(np, prop) == 0,
+ "Updating a missing property should have passed\n");
+
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index 92fa94a6dcc1..9c3f5e3df232 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -591,7 +591,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
+ }
+
+ /* Scaling up? Scale voltage before frequency */
+- if (freq > old_freq) {
++ if (freq >= old_freq) {
+ ret = _set_opp_voltage(dev, reg, new_supply);
+ if (ret)
+ goto restore_voltage;
+diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
+index c75199538c05..da4b457a14e0 100644
+--- a/drivers/pci/host/pci-hyperv.c
++++ b/drivers/pci/host/pci-hyperv.c
+@@ -1596,17 +1596,6 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
+ get_pcichild(hpdev, hv_pcidev_ref_childlist);
+ spin_lock_irqsave(&hbus->device_list_lock, flags);
+
+- /*
+- * When a device is being added to the bus, we set the PCI domain
+- * number to be the device serial number, which is non-zero and
+- * unique on the same VM. The serial numbers start with 1, and
+- * increase by 1 for each device. So device names including this
+- * can have shorter names than based on the bus instance UUID.
+- * Only the first device serial number is used for domain, so the
+- * domain number will not change after the first device is added.
+- */
+- if (list_empty(&hbus->children))
+- hbus->sysdata.domain = desc->ser;
+ list_add_tail(&hpdev->list_entry, &hbus->children);
+ spin_unlock_irqrestore(&hbus->device_list_lock, flags);
+ return hpdev;
+diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
+index 88e917c9120f..5f892065585e 100644
+--- a/drivers/pci/hotplug/pciehp.h
++++ b/drivers/pci/hotplug/pciehp.h
+@@ -121,7 +121,7 @@ struct controller *pcie_init(struct pcie_device *dev);
+ int pcie_init_notification(struct controller *ctrl);
+ int pciehp_enable_slot(struct slot *p_slot);
+ int pciehp_disable_slot(struct slot *p_slot);
+-void pcie_enable_notification(struct controller *ctrl);
++void pcie_reenable_notification(struct controller *ctrl);
+ int pciehp_power_on_slot(struct slot *slot);
+ void pciehp_power_off_slot(struct slot *slot);
+ void pciehp_get_power_status(struct slot *slot, u8 *status);
+diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
+index 332b723ff9e6..44a6a63802d5 100644
+--- a/drivers/pci/hotplug/pciehp_core.c
++++ b/drivers/pci/hotplug/pciehp_core.c
+@@ -283,7 +283,7 @@ static int pciehp_resume(struct pcie_device *dev)
+ ctrl = get_service_data(dev);
+
+ /* reinitialize the chipset's event detection logic */
+- pcie_enable_notification(ctrl);
++ pcie_reenable_notification(ctrl);
+
+ slot = ctrl->slot;
+
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 18a42f8f5dc5..98ea75aa32c7 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -659,7 +659,7 @@ static irqreturn_t pcie_isr(int irq, void *dev_id)
+ return handled;
+ }
+
+-void pcie_enable_notification(struct controller *ctrl)
++static void pcie_enable_notification(struct controller *ctrl)
+ {
+ u16 cmd, mask;
+
+@@ -697,6 +697,17 @@ void pcie_enable_notification(struct controller *ctrl)
+ pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
+ }
+
++void pcie_reenable_notification(struct controller *ctrl)
++{
++ /*
++ * Clear both Presence and Data Link Layer Changed to make sure
++ * those events still fire after we have re-enabled them.
++ */
++ pcie_capability_write_word(ctrl->pcie->port, PCI_EXP_SLTSTA,
++ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC);
++ pcie_enable_notification(ctrl);
++}
++
+ static void pcie_disable_notification(struct controller *ctrl)
+ {
+ u16 mask;
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index b9a131137e64..c816b0683a82 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -753,10 +753,11 @@ static int pci_pm_suspend(struct device *dev)
+ * better to resume the device from runtime suspend here.
+ */
+ if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) ||
+- !pci_dev_keep_suspended(pci_dev))
++ !pci_dev_keep_suspended(pci_dev)) {
+ pm_runtime_resume(dev);
++ pci_dev->state_saved = false;
++ }
+
+- pci_dev->state_saved = false;
+ if (pm->suspend) {
+ pci_power_t prev = pci_dev->current_state;
+ int error;
+diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
+index ac91b6fd0bcd..73ac02796ba9 100644
+--- a/drivers/pci/probe.c
++++ b/drivers/pci/probe.c
+@@ -2638,7 +2638,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
+ for_each_pci_bridge(dev, bus) {
+ cmax = max;
+ max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
+- used_buses += cmax - max;
++
++ /*
++ * Reserve one bus for each bridge now to avoid extending
++ * hotplug bridges too much during the second scan below.
++ */
++ used_buses++;
++ if (cmax - max > 1)
++ used_buses += cmax - max - 1;
+ }
+
+ /* Scan bridges that need to be reconfigured */
+@@ -2661,12 +2668,14 @@ static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
+ * bridges if any.
+ */
+ buses = available_buses / hotplug_bridges;
+- buses = min(buses, available_buses - used_buses);
++ buses = min(buses, available_buses - used_buses + 1);
+ }
+
+ cmax = max;
+ max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
+- used_buses += max - cmax;
++ /* One bus is already accounted so don't add it again */
++ if (max - cmax > 1)
++ used_buses += max - cmax - 1;
+ }
+
+ /*
+diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
+index 2990ad1e7c99..785a29ba4f51 100644
+--- a/drivers/pci/quirks.c
++++ b/drivers/pci/quirks.c
+@@ -4230,11 +4230,29 @@ static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
+ * 0xa290-0xa29f PCI Express Root port #{0-16}
+ * 0xa2e7-0xa2ee PCI Express Root port #{17-24}
+ *
++ * Mobile chipsets are also affected, 7th & 8th Generation
++ * Specification update confirms ACS errata 22, status no fix: (7th Generation
++ * Intel Processor Family I/O for U/Y Platforms and 8th Generation Intel
++ * Processor Family I/O for U Quad Core Platforms Specification Update,
++ * August 2017, Revision 002, Document#: 334660-002)[6]
++ * Device IDs from I/O datasheet: (7th Generation Intel Processor Family I/O
++ * for U/Y Platforms and 8th Generation Intel ® Processor Family I/O for U
++ * Quad Core Platforms, Vol 1 of 2, August 2017, Document#: 334658-003)[7]
++ *
++ * 0x9d10-0x9d1b PCI Express Root port #{1-12}
++ *
++ * The 300 series chipset suffers from the same bug so include those root
++ * ports here as well.
++ *
++ * 0xa32c-0xa343 PCI Express Root port #{0-24}
++ *
+ * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
+ * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
+ * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
+ * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html
+ * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html
++ * [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html
++ * [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html
+ */
+ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
+ {
+@@ -4244,6 +4262,8 @@ static bool pci_quirk_intel_spt_pch_acs_match(struct pci_dev *dev)
+ switch (dev->device) {
+ case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
+ case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
++ case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
++ case 0xa32c ... 0xa343: /* 300 series */
+ return true;
+ }
+
+diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
+index b601039d6c69..c4aa411f5935 100644
+--- a/drivers/pinctrl/devicetree.c
++++ b/drivers/pinctrl/devicetree.c
+@@ -101,10 +101,11 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
+ }
+
+ static int dt_to_map_one_config(struct pinctrl *p,
+- struct pinctrl_dev *pctldev,
++ struct pinctrl_dev *hog_pctldev,
+ const char *statename,
+ struct device_node *np_config)
+ {
++ struct pinctrl_dev *pctldev = NULL;
+ struct device_node *np_pctldev;
+ const struct pinctrl_ops *ops;
+ int ret;
+@@ -123,8 +124,10 @@ static int dt_to_map_one_config(struct pinctrl *p,
+ return -EPROBE_DEFER;
+ }
+ /* If we're creating a hog we can use the passed pctldev */
+- if (pctldev && (np_pctldev == p->dev->of_node))
++ if (hog_pctldev && (np_pctldev == p->dev->of_node)) {
++ pctldev = hog_pctldev;
+ break;
++ }
+ pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
+ if (pctldev)
+ break;
+diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+index 5b63248c8209..7bef929bd7fe 100644
+--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
++++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+@@ -679,12 +679,13 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
+ writel(1 << hwirq,
+ info->base +
+ IRQ_STATUS + 4 * i);
+- continue;
++ goto update_status;
+ }
+ }
+
+ generic_handle_irq(virq);
+
++update_status:
+ /* Update status in case a new IRQ appears */
+ spin_lock_irqsave(&info->irq_lock, flags);
+ status = readl_relaxed(info->base +
+diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
+index 90c274490181..4f4ae66a0ee3 100644
+--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
++++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
+@@ -105,12 +105,12 @@ static const struct samsung_pin_bank_data s5pv210_pin_bank[] __initconst = {
+ EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38),
+ EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c),
+ EXYNOS_PIN_BANK_EINTG(7, 0x200, "gpg3", 0x40),
+- EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"),
+ EXYNOS_PIN_BANK_EINTG(8, 0x240, "gpj0", 0x44),
+ EXYNOS_PIN_BANK_EINTG(6, 0x260, "gpj1", 0x48),
+ EXYNOS_PIN_BANK_EINTG(8, 0x280, "gpj2", 0x4c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x2a0, "gpj3", 0x50),
+ EXYNOS_PIN_BANK_EINTG(5, 0x2c0, "gpj4", 0x54),
++ EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x2e0, "mp01"),
+ EXYNOS_PIN_BANK_EINTN(4, 0x300, "mp02"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x320, "mp03"),
+@@ -630,7 +630,6 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst =
+ EXYNOS_PIN_BANK_EINTG(4, 0x100, "gpc3", 0x20),
+ EXYNOS_PIN_BANK_EINTG(7, 0x120, "gpc1", 0x24),
+ EXYNOS_PIN_BANK_EINTG(7, 0x140, "gpc2", 0x28),
+- EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"),
+ EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpd1", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x1A0, "gpe0", 0x30),
+ EXYNOS_PIN_BANK_EINTG(2, 0x1C0, "gpe1", 0x34),
+@@ -641,6 +640,7 @@ static const struct samsung_pin_bank_data exynos5410_pin_banks0[] __initconst =
+ EXYNOS_PIN_BANK_EINTG(2, 0x260, "gpg2", 0x48),
+ EXYNOS_PIN_BANK_EINTG(4, 0x280, "gph0", 0x4c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x2A0, "gph1", 0x50),
++ EXYNOS_PIN_BANK_EINTN(2, 0x160, "gpm5"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x2C0, "gpm7"),
+ EXYNOS_PIN_BANK_EINTN(6, 0x2E0, "gpy0"),
+ EXYNOS_PIN_BANK_EINTN(4, 0x300, "gpy1"),
+diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c
+index 3682e1539251..31c8b8c49e45 100644
+--- a/drivers/platform/chrome/cros_ec_lpc.c
++++ b/drivers/platform/chrome/cros_ec_lpc.c
+@@ -435,7 +435,13 @@ static int __init cros_ec_lpc_init(void)
+ int ret;
+ acpi_status status;
+
+- if (!dmi_check_system(cros_ec_lpc_dmi_table)) {
++ status = acpi_get_devices(ACPI_DRV_NAME, cros_ec_lpc_parse_device,
++ &cros_ec_lpc_acpi_device_found, NULL);
++ if (ACPI_FAILURE(status))
++ pr_warn(DRV_NAME ": Looking for %s failed\n", ACPI_DRV_NAME);
++
++ if (!cros_ec_lpc_acpi_device_found &&
++ !dmi_check_system(cros_ec_lpc_dmi_table)) {
+ pr_err(DRV_NAME ": unsupported system.\n");
+ return -ENODEV;
+ }
+@@ -450,11 +456,6 @@ static int __init cros_ec_lpc_init(void)
+ return ret;
+ }
+
+- status = acpi_get_devices(ACPI_DRV_NAME, cros_ec_lpc_parse_device,
+- &cros_ec_lpc_acpi_device_found, NULL);
+- if (ACPI_FAILURE(status))
+- pr_warn(DRV_NAME ": Looking for %s failed\n", ACPI_DRV_NAME);
+-
+ if (!cros_ec_lpc_acpi_device_found) {
+ /* Register the device, and it'll get hooked up automatically */
+ ret = platform_device_register(&cros_ec_lpc_device);
+diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
+index 5d6ed1507d29..5561b9e190f8 100644
+--- a/drivers/pwm/pwm-lpss-platform.c
++++ b/drivers/pwm/pwm-lpss-platform.c
+@@ -74,6 +74,10 @@ static int pwm_lpss_remove_platform(struct platform_device *pdev)
+ return pwm_lpss_remove(lpwm);
+ }
+
++static SIMPLE_DEV_PM_OPS(pwm_lpss_platform_pm_ops,
++ pwm_lpss_suspend,
++ pwm_lpss_resume);
++
+ static const struct acpi_device_id pwm_lpss_acpi_match[] = {
+ { "80860F09", (unsigned long)&pwm_lpss_byt_info },
+ { "80862288", (unsigned long)&pwm_lpss_bsw_info },
+@@ -86,6 +90,7 @@ static struct platform_driver pwm_lpss_driver_platform = {
+ .driver = {
+ .name = "pwm-lpss",
+ .acpi_match_table = pwm_lpss_acpi_match,
++ .pm = &pwm_lpss_platform_pm_ops,
+ },
+ .probe = pwm_lpss_probe_platform,
+ .remove = pwm_lpss_remove_platform,
+diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
+index 8db0d40ccacd..4721a264bac2 100644
+--- a/drivers/pwm/pwm-lpss.c
++++ b/drivers/pwm/pwm-lpss.c
+@@ -32,10 +32,13 @@
+ /* Size of each PWM register space if multiple */
+ #define PWM_SIZE 0x400
+
++#define MAX_PWMS 4
++
+ struct pwm_lpss_chip {
+ struct pwm_chip chip;
+ void __iomem *regs;
+ const struct pwm_lpss_boardinfo *info;
++ u32 saved_ctrl[MAX_PWMS];
+ };
+
+ static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip)
+@@ -177,6 +180,9 @@ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
+ unsigned long c;
+ int ret;
+
++ if (WARN_ON(info->npwm > MAX_PWMS))
++ return ERR_PTR(-ENODEV);
++
+ lpwm = devm_kzalloc(dev, sizeof(*lpwm), GFP_KERNEL);
+ if (!lpwm)
+ return ERR_PTR(-ENOMEM);
+@@ -212,6 +218,30 @@ int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
+ }
+ EXPORT_SYMBOL_GPL(pwm_lpss_remove);
+
++int pwm_lpss_suspend(struct device *dev)
++{
++ struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev);
++ int i;
++
++ for (i = 0; i < lpwm->info->npwm; i++)
++ lpwm->saved_ctrl[i] = readl(lpwm->regs + i * PWM_SIZE + PWM);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(pwm_lpss_suspend);
++
++int pwm_lpss_resume(struct device *dev)
++{
++ struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev);
++ int i;
++
++ for (i = 0; i < lpwm->info->npwm; i++)
++ writel(lpwm->saved_ctrl[i], lpwm->regs + i * PWM_SIZE + PWM);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(pwm_lpss_resume);
++
+ MODULE_DESCRIPTION("PWM driver for Intel LPSS");
+ MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
+ MODULE_LICENSE("GPL v2");
+diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
+index 98306bb02cfe..7a4238ad1fcb 100644
+--- a/drivers/pwm/pwm-lpss.h
++++ b/drivers/pwm/pwm-lpss.h
+@@ -28,5 +28,7 @@ struct pwm_lpss_boardinfo {
+ struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
+ const struct pwm_lpss_boardinfo *info);
+ int pwm_lpss_remove(struct pwm_lpss_chip *lpwm);
++int pwm_lpss_suspend(struct device *dev);
++int pwm_lpss_resume(struct device *dev);
+
+ #endif /* __PWM_LPSS_H */
+diff --git a/drivers/remoteproc/qcom_q6v5_pil.c b/drivers/remoteproc/qcom_q6v5_pil.c
+index cbbafdcaaecb..56b14c27e275 100644
+--- a/drivers/remoteproc/qcom_q6v5_pil.c
++++ b/drivers/remoteproc/qcom_q6v5_pil.c
+@@ -761,13 +761,11 @@ static int q6v5_start(struct rproc *rproc)
+ }
+
+ /* Assign MBA image access in DDR to q6 */
+- xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
+- qproc->mba_phys,
+- qproc->mba_size);
+- if (xfermemop_ret) {
++ ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true,
++ qproc->mba_phys, qproc->mba_size);
++ if (ret) {
+ dev_err(qproc->dev,
+- "assigning Q6 access to mba memory failed: %d\n",
+- xfermemop_ret);
++ "assigning Q6 access to mba memory failed: %d\n", ret);
+ goto disable_active_clks;
+ }
+
+diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
+index 5ce9bf7b897d..f63adcd95eb0 100644
+--- a/drivers/rpmsg/qcom_smd.c
++++ b/drivers/rpmsg/qcom_smd.c
+@@ -1100,12 +1100,12 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
+ void *info;
+ int ret;
+
+- channel = devm_kzalloc(&edge->dev, sizeof(*channel), GFP_KERNEL);
++ channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+ if (!channel)
+ return ERR_PTR(-ENOMEM);
+
+ channel->edge = edge;
+- channel->name = devm_kstrdup(&edge->dev, name, GFP_KERNEL);
++ channel->name = kstrdup(name, GFP_KERNEL);
+ if (!channel->name)
+ return ERR_PTR(-ENOMEM);
+
+@@ -1156,8 +1156,8 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
+ return channel;
+
+ free_name_and_channel:
+- devm_kfree(&edge->dev, channel->name);
+- devm_kfree(&edge->dev, channel);
++ kfree(channel->name);
++ kfree(channel);
+
+ return ERR_PTR(ret);
+ }
+@@ -1378,13 +1378,13 @@ static int qcom_smd_parse_edge(struct device *dev,
+ */
+ static void qcom_smd_edge_release(struct device *dev)
+ {
+- struct qcom_smd_channel *channel;
++ struct qcom_smd_channel *channel, *tmp;
+ struct qcom_smd_edge *edge = to_smd_edge(dev);
+
+- list_for_each_entry(channel, &edge->channels, list) {
+- SET_RX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED);
+- SET_RX_CHANNEL_INFO(channel, head, 0);
+- SET_RX_CHANNEL_INFO(channel, tail, 0);
++ list_for_each_entry_safe(channel, tmp, &edge->channels, list) {
++ list_del(&channel->list);
++ kfree(channel->name);
++ kfree(channel);
+ }
+
+ kfree(edge);
+diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
+index 2e6fb275acc8..2cd5a7b1a2e3 100644
+--- a/drivers/rtc/rtc-sun6i.c
++++ b/drivers/rtc/rtc-sun6i.c
+@@ -74,7 +74,7 @@
+ #define SUN6I_ALARM_CONFIG_WAKEUP BIT(0)
+
+ #define SUN6I_LOSC_OUT_GATING 0x0060
+-#define SUN6I_LOSC_OUT_GATING_EN BIT(0)
++#define SUN6I_LOSC_OUT_GATING_EN_OFFSET 0
+
+ /*
+ * Get date values
+@@ -255,7 +255,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node)
+ &clkout_name);
+ rtc->ext_losc = clk_register_gate(NULL, clkout_name, rtc->hw.init->name,
+ 0, rtc->base + SUN6I_LOSC_OUT_GATING,
+- SUN6I_LOSC_OUT_GATING_EN, 0,
++ SUN6I_LOSC_OUT_GATING_EN_OFFSET, 0,
+ &rtc->lock);
+ if (IS_ERR(rtc->ext_losc)) {
+ pr_crit("Couldn't register the LOSC external gate\n");
+diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
+index 18c4f933e8b9..b415ba42ca73 100644
+--- a/drivers/s390/scsi/zfcp_dbf.c
++++ b/drivers/s390/scsi/zfcp_dbf.c
+@@ -664,6 +664,46 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
+ spin_unlock_irqrestore(&dbf->scsi_lock, flags);
+ }
+
++/**
++ * zfcp_dbf_scsi_eh() - Trace event for special cases of scsi_eh callbacks.
++ * @tag: Identifier for event.
++ * @adapter: Pointer to zfcp adapter as context for this event.
++ * @scsi_id: SCSI ID/target to indicate scope of task management function (TMF).
++ * @ret: Return value of calling function.
++ *
++ * This SCSI trace variant does not depend on any of:
++ * scsi_cmnd, zfcp_fsf_req, scsi_device.
++ */
++void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
++ unsigned int scsi_id, int ret)
++{
++ struct zfcp_dbf *dbf = adapter->dbf;
++ struct zfcp_dbf_scsi *rec = &dbf->scsi_buf;
++ unsigned long flags;
++ static int const level = 1;
++
++ if (unlikely(!debug_level_enabled(adapter->dbf->scsi, level)))
++ return;
++
++ spin_lock_irqsave(&dbf->scsi_lock, flags);
++ memset(rec, 0, sizeof(*rec));
++
++ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
++ rec->id = ZFCP_DBF_SCSI_CMND;
++ rec->scsi_result = ret; /* re-use field, int is 4 bytes and fits */
++ rec->scsi_retries = ~0;
++ rec->scsi_allowed = ~0;
++ rec->fcp_rsp_info = ~0;
++ rec->scsi_id = scsi_id;
++ rec->scsi_lun = (u32)ZFCP_DBF_INVALID_LUN;
++ rec->scsi_lun_64_hi = (u32)(ZFCP_DBF_INVALID_LUN >> 32);
++ rec->host_scribble = ~0;
++ memset(rec->scsi_opcode, 0xff, ZFCP_DBF_SCSI_OPCODE);
++
++ debug_event(dbf->scsi, level, rec, sizeof(*rec));
++ spin_unlock_irqrestore(&dbf->scsi_lock, flags);
++}
++
+ static debug_info_t *zfcp_dbf_reg(const char *name, int size, int rec_size)
+ {
+ struct debug_info *d;
+diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
+index 1d91a32db08e..69dfb328dba4 100644
+--- a/drivers/s390/scsi/zfcp_erp.c
++++ b/drivers/s390/scsi/zfcp_erp.c
+@@ -35,11 +35,28 @@ enum zfcp_erp_steps {
+ ZFCP_ERP_STEP_LUN_OPENING = 0x2000,
+ };
+
++/**
++ * enum zfcp_erp_act_type - Type of ERP action object.
++ * @ZFCP_ERP_ACTION_REOPEN_LUN: LUN recovery.
++ * @ZFCP_ERP_ACTION_REOPEN_PORT: Port recovery.
++ * @ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: Forced port recovery.
++ * @ZFCP_ERP_ACTION_REOPEN_ADAPTER: Adapter recovery.
++ * @ZFCP_ERP_ACTION_NONE: Eyecatcher pseudo flag to bitwise or-combine with
++ * either of the first four enum values.
++ * Used to indicate that an ERP action could not be
++ * set up despite a detected need for some recovery.
++ * @ZFCP_ERP_ACTION_FAILED: Eyecatcher pseudo flag to bitwise or-combine with
++ * either of the first four enum values.
++ * Used to indicate that ERP not needed because
++ * the object has ZFCP_STATUS_COMMON_ERP_FAILED.
++ */
+ enum zfcp_erp_act_type {
+ ZFCP_ERP_ACTION_REOPEN_LUN = 1,
+ ZFCP_ERP_ACTION_REOPEN_PORT = 2,
+ ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
+ ZFCP_ERP_ACTION_REOPEN_ADAPTER = 4,
++ ZFCP_ERP_ACTION_NONE = 0xc0,
++ ZFCP_ERP_ACTION_FAILED = 0xe0,
+ };
+
+ enum zfcp_erp_act_state {
+@@ -126,6 +143,49 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
+ }
+ }
+
++static int zfcp_erp_handle_failed(int want, struct zfcp_adapter *adapter,
++ struct zfcp_port *port,
++ struct scsi_device *sdev)
++{
++ int need = want;
++ struct zfcp_scsi_dev *zsdev;
++
++ switch (want) {
++ case ZFCP_ERP_ACTION_REOPEN_LUN:
++ zsdev = sdev_to_zfcp(sdev);
++ if (atomic_read(&zsdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
++ need = 0;
++ break;
++ case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
++ if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
++ need = 0;
++ break;
++ case ZFCP_ERP_ACTION_REOPEN_PORT:
++ if (atomic_read(&port->status) &
++ ZFCP_STATUS_COMMON_ERP_FAILED) {
++ need = 0;
++ /* ensure propagation of failed status to new devices */
++ zfcp_erp_set_port_status(
++ port, ZFCP_STATUS_COMMON_ERP_FAILED);
++ }
++ break;
++ case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
++ if (atomic_read(&adapter->status) &
++ ZFCP_STATUS_COMMON_ERP_FAILED) {
++ need = 0;
++ /* ensure propagation of failed status to new devices */
++ zfcp_erp_set_adapter_status(
++ adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
++ }
++ break;
++ default:
++ need = 0;
++ break;
++ }
++
++ return need;
++}
++
+ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
+ struct zfcp_port *port,
+ struct scsi_device *sdev)
+@@ -249,16 +309,27 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
+ int retval = 1, need;
+ struct zfcp_erp_action *act;
+
+- if (!adapter->erp_thread)
+- return -EIO;
++ need = zfcp_erp_handle_failed(want, adapter, port, sdev);
++ if (!need) {
++ need = ZFCP_ERP_ACTION_FAILED; /* marker for trace */
++ goto out;
++ }
++
++ if (!adapter->erp_thread) {
++ need = ZFCP_ERP_ACTION_NONE; /* marker for trace */
++ retval = -EIO;
++ goto out;
++ }
+
+ need = zfcp_erp_required_act(want, adapter, port, sdev);
+ if (!need)
+ goto out;
+
+ act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
+- if (!act)
++ if (!act) {
++ need |= ZFCP_ERP_ACTION_NONE; /* marker for trace */
+ goto out;
++ }
+ atomic_or(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
+ ++adapter->erp_total_count;
+ list_add_tail(&act->list, &adapter->erp_ready_head);
+@@ -269,18 +340,32 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
+ return retval;
+ }
+
++void zfcp_erp_port_forced_no_port_dbf(char *id, struct zfcp_adapter *adapter,
++ u64 port_name, u32 port_id)
++{
++ unsigned long flags;
++ static /* don't waste stack */ struct zfcp_port tmpport;
++
++ write_lock_irqsave(&adapter->erp_lock, flags);
++ /* Stand-in zfcp port with fields just good enough for
++ * zfcp_dbf_rec_trig() and zfcp_dbf_set_common().
++ * Under lock because tmpport is static.
++ */
++ atomic_set(&tmpport.status, -1); /* unknown */
++ tmpport.wwpn = port_name;
++ tmpport.d_id = port_id;
++ zfcp_dbf_rec_trig(id, adapter, &tmpport, NULL,
++ ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
++ ZFCP_ERP_ACTION_NONE);
++ write_unlock_irqrestore(&adapter->erp_lock, flags);
++}
++
+ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
+ int clear_mask, char *id)
+ {
+ zfcp_erp_adapter_block(adapter, clear_mask);
+ zfcp_scsi_schedule_rports_block(adapter);
+
+- /* ensure propagation of failed status to new devices */
+- if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
+- zfcp_erp_set_adapter_status(adapter,
+- ZFCP_STATUS_COMMON_ERP_FAILED);
+- return -EIO;
+- }
+ return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
+ adapter, NULL, NULL, id, 0);
+ }
+@@ -299,12 +384,8 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear, char *id)
+ zfcp_scsi_schedule_rports_block(adapter);
+
+ write_lock_irqsave(&adapter->erp_lock, flags);
+- if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+- zfcp_erp_set_adapter_status(adapter,
+- ZFCP_STATUS_COMMON_ERP_FAILED);
+- else
+- zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
+- NULL, NULL, id, 0);
++ zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
++ NULL, NULL, id, 0);
+ write_unlock_irqrestore(&adapter->erp_lock, flags);
+ }
+
+@@ -345,9 +426,6 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port, int clear,
+ zfcp_erp_port_block(port, clear);
+ zfcp_scsi_schedule_rport_block(port);
+
+- if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+- return;
+-
+ zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
+ port->adapter, port, NULL, id, 0);
+ }
+@@ -373,12 +451,6 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id)
+ zfcp_erp_port_block(port, clear);
+ zfcp_scsi_schedule_rport_block(port);
+
+- if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
+- /* ensure propagation of failed status to new devices */
+- zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
+- return -EIO;
+- }
+-
+ return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
+ port->adapter, port, NULL, id, 0);
+ }
+@@ -418,9 +490,6 @@ static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
+
+ zfcp_erp_lun_block(sdev, clear);
+
+- if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+- return;
+-
+ zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
+ zfcp_sdev->port, sdev, id, act_status);
+ }
+diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
+index e5eed8aac0ce..65d16747c301 100644
+--- a/drivers/s390/scsi/zfcp_ext.h
++++ b/drivers/s390/scsi/zfcp_ext.h
+@@ -52,10 +52,15 @@ extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
+ extern void zfcp_dbf_scsi(char *, int, struct scsi_cmnd *,
+ struct zfcp_fsf_req *);
++extern void zfcp_dbf_scsi_eh(char *tag, struct zfcp_adapter *adapter,
++ unsigned int scsi_id, int ret);
+
+ /* zfcp_erp.c */
+ extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
+ extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
++extern void zfcp_erp_port_forced_no_port_dbf(char *id,
++ struct zfcp_adapter *adapter,
++ u64 port_name, u32 port_id);
+ extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *);
+ extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *);
+ extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index 22f9562f415c..0b6f51424745 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -181,6 +181,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
+ if (abrt_req)
+ break;
+
++ zfcp_dbf_scsi_abort("abrt_wt", scpnt, NULL);
+ zfcp_erp_wait(adapter);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret) {
+@@ -277,6 +278,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
+ if (fsf_req)
+ break;
+
++ zfcp_dbf_scsi_devreset("wait", scpnt, tm_flags, NULL);
+ zfcp_erp_wait(adapter);
+ ret = fc_block_scsi_eh(scpnt);
+ if (ret) {
+@@ -323,15 +325,16 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
+ {
+ struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
+ struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
+- int ret;
++ int ret = SUCCESS, fc_ret;
+
+ zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
+ zfcp_erp_wait(adapter);
+- ret = fc_block_scsi_eh(scpnt);
+- if (ret)
+- return ret;
++ fc_ret = fc_block_scsi_eh(scpnt);
++ if (fc_ret)
++ ret = fc_ret;
+
+- return SUCCESS;
++ zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret);
++ return ret;
+ }
+
+ struct scsi_transport_template *zfcp_scsi_transport_template;
+@@ -602,6 +605,11 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
+ if (port) {
+ zfcp_erp_port_forced_reopen(port, 0, "sctrpi1");
+ put_device(&port->dev);
++ } else {
++ zfcp_erp_port_forced_no_port_dbf(
++ "sctrpin", adapter,
++ rport->port_name /* zfcp_scsi_rport_register */,
++ rport->port_id /* zfcp_scsi_rport_register */);
+ }
+ }
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index 3a9eca163db8..b92f86acb8bb 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -8869,7 +8869,7 @@ static void hpsa_disable_rld_caching(struct ctlr_info *h)
+ kfree(options);
+ }
+
+-static void hpsa_shutdown(struct pci_dev *pdev)
++static void __hpsa_shutdown(struct pci_dev *pdev)
+ {
+ struct ctlr_info *h;
+
+@@ -8884,6 +8884,12 @@ static void hpsa_shutdown(struct pci_dev *pdev)
+ hpsa_disable_interrupt_mode(h); /* pci_init 2 */
+ }
+
++static void hpsa_shutdown(struct pci_dev *pdev)
++{
++ __hpsa_shutdown(pdev);
++ pci_disable_device(pdev);
++}
++
+ static void hpsa_free_device_info(struct ctlr_info *h)
+ {
+ int i;
+@@ -8927,7 +8933,7 @@ static void hpsa_remove_one(struct pci_dev *pdev)
+ scsi_remove_host(h->scsi_host); /* init_one 8 */
+ /* includes hpsa_free_irqs - init_one 4 */
+ /* includes hpsa_disable_interrupt_mode - pci_init 2 */
+- hpsa_shutdown(pdev);
++ __hpsa_shutdown(pdev);
+
+ hpsa_free_device_info(h); /* scan */
+
+diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
+index 9e914f9c3ffb..05abe5aaab7f 100644
+--- a/drivers/scsi/qla2xxx/qla_gs.c
++++ b/drivers/scsi/qla2xxx/qla_gs.c
+@@ -3915,7 +3915,6 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
+ if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
+ continue;
+ fcport->scan_state = QLA_FCPORT_FOUND;
+- fcport->d_id.b24 = rp->id.b24;
+ found = true;
+ /*
+ * If device was not a fabric device before.
+@@ -3923,7 +3922,10 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
+ qla2x00_clear_loop_id(fcport);
+ fcport->flags |= FCF_FABRIC_DEVICE;
++ } else if (fcport->d_id.b24 != rp->id.b24) {
++ qlt_schedule_sess_for_deletion(fcport);
+ }
++ fcport->d_id.b24 = rp->id.b24;
+ break;
+ }
+
+diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
+index 8f55dd44adae..636960ad029a 100644
+--- a/drivers/scsi/qla2xxx/qla_init.c
++++ b/drivers/scsi/qla2xxx/qla_init.c
+@@ -5037,7 +5037,8 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
+ return;
+
+ if (fcport->fp_speed == PORT_SPEED_UNKNOWN ||
+- fcport->fp_speed > ha->link_data_rate)
++ fcport->fp_speed > ha->link_data_rate ||
++ !ha->flags.gpsc_supported)
+ return;
+
+ rval = qla2x00_set_idma_speed(vha, fcport->loop_id, fcport->fp_speed,
+diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
+index a3dc83f9444d..68560a097ae1 100644
+--- a/drivers/scsi/qla2xxx/qla_isr.c
++++ b/drivers/scsi/qla2xxx/qla_isr.c
+@@ -2494,8 +2494,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
+ ox_id = le16_to_cpu(sts24->ox_id);
+ par_sense_len = sizeof(sts24->data);
+ /* Valid values of the retry delay timer are 0x1-0xffef */
+- if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1)
+- retry_delay = sts24->retry_delay;
++ if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1) {
++ retry_delay = sts24->retry_delay & 0x3fff;
++ ql_dbg(ql_dbg_io, sp->vha, 0x3033,
++ "%s: scope=%#x retry_delay=%#x\n", __func__,
++ sts24->retry_delay >> 14, retry_delay);
++ }
+ } else {
+ if (scsi_status & SS_SENSE_LEN_VALID)
+ sense_len = le16_to_cpu(sts->req_sense_length);
+diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
+index 025dc2d3f3de..0266c4d07bc9 100644
+--- a/drivers/scsi/qla2xxx/qla_target.c
++++ b/drivers/scsi/qla2xxx/qla_target.c
+@@ -1230,7 +1230,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
+ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
+ {
+ struct qla_tgt *tgt = sess->tgt;
+- struct qla_hw_data *ha = sess->vha->hw;
+ unsigned long flags;
+
+ if (sess->disc_state == DSC_DELETE_PEND)
+@@ -1247,16 +1246,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
+ return;
+ }
+
+- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
+ if (sess->deleted == QLA_SESS_DELETED)
+ sess->logout_on_delete = 0;
+
++ spin_lock_irqsave(&sess->vha->work_lock, flags);
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
+- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
++ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+ return;
+ }
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
+- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
++ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
+
+ sess->disc_state = DSC_DELETE_PEND;
+
+diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
+index 656c98e116a9..e086bb63da46 100644
+--- a/drivers/scsi/scsi_debug.c
++++ b/drivers/scsi/scsi_debug.c
+@@ -5506,9 +5506,9 @@ static void __exit scsi_debug_exit(void)
+ int k = sdebug_add_host;
+
+ stop_all_queued();
+- free_all_queued();
+ for (; k; k--)
+ sdebug_remove_adapter();
++ free_all_queued();
+ driver_unregister(&sdebug_driverfs_driver);
+ bus_unregister(&pseudo_lld_bus);
+ root_device_unregister(pseudo_primary);
+diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c
+index 53efc386b1ad..df7f30a425c6 100644
+--- a/drivers/soc/rockchip/pm_domains.c
++++ b/drivers/soc/rockchip/pm_domains.c
+@@ -255,7 +255,7 @@ static void rockchip_do_pmu_set_power_domain(struct rockchip_pm_domain *pd,
+ return;
+ else if (pd->info->pwr_w_mask)
+ regmap_write(pmu->regmap, pmu->info->pwr_offset,
+- on ? pd->info->pwr_mask :
++ on ? pd->info->pwr_w_mask :
+ (pd->info->pwr_mask | pd->info->pwr_w_mask));
+ else
+ regmap_update_bits(pmu->regmap, pmu->info->pwr_offset,
+diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
+index a4d6a0e2e993..23ad4f9f2143 100644
+--- a/drivers/thermal/broadcom/bcm2835_thermal.c
++++ b/drivers/thermal/broadcom/bcm2835_thermal.c
+@@ -213,8 +213,8 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
+ rate = clk_get_rate(data->clk);
+ if ((rate < 1920000) || (rate > 5000000))
+ dev_warn(&pdev->dev,
+- "Clock %pCn running at %pCr Hz is outside of the recommended range: 1.92 to 5MHz\n",
+- data->clk, data->clk);
++ "Clock %pCn running at %lu Hz is outside of the recommended range: 1.92 to 5MHz\n",
++ data->clk, rate);
+
+ /* register of thermal sensor and get info from DT */
+ tz = thermal_zone_of_sensor_register(&pdev->dev, 0, data,
+diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
+index a4f82ec665fe..2051a5309851 100644
+--- a/drivers/tty/serial/sh-sci.c
++++ b/drivers/tty/serial/sh-sci.c
+@@ -2890,16 +2890,15 @@ static void serial_console_write(struct console *co, const char *s,
+ unsigned long flags;
+ int locked = 1;
+
+- local_irq_save(flags);
+ #if defined(SUPPORT_SYSRQ)
+ if (port->sysrq)
+ locked = 0;
+ else
+ #endif
+ if (oops_in_progress)
+- locked = spin_trylock(&port->lock);
++ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+- spin_lock(&port->lock);
++ spin_lock_irqsave(&port->lock, flags);
+
+ /* first save SCSCR then disable interrupts, keep clock source */
+ ctrl = serial_port_in(port, SCSCR);
+@@ -2919,8 +2918,7 @@ static void serial_console_write(struct console *co, const char *s,
+ serial_port_out(port, SCSCR, ctrl);
+
+ if (locked)
+- spin_unlock(&port->lock);
+- local_irq_restore(flags);
++ spin_unlock_irqrestore(&port->lock, flags);
+ }
+
+ static int serial_console_setup(struct console *co, char *options)
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index aa9968d90a48..e3bf65e213cd 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4551,7 +4551,9 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1,
+ * reset. But only on the first attempt,
+ * lest we get into a time out/reset loop
+ */
+- if (r == 0 || (r == -ETIMEDOUT && retries == 0))
++ if (r == 0 || (r == -ETIMEDOUT &&
++ retries == 0 &&
++ udev->speed > USB_SPEED_FULL))
+ break;
+ }
+ udev->descriptor.bMaxPacketSize0 =
+diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c
+index 734a9158946b..e55304d5cf07 100644
+--- a/drivers/video/backlight/as3711_bl.c
++++ b/drivers/video/backlight/as3711_bl.c
+@@ -262,10 +262,10 @@ static int as3711_bl_register(struct platform_device *pdev,
+ static int as3711_backlight_parse_dt(struct device *dev)
+ {
+ struct as3711_bl_pdata *pdata = dev_get_platdata(dev);
+- struct device_node *bl =
+- of_find_node_by_name(dev->parent->of_node, "backlight"), *fb;
++ struct device_node *bl, *fb;
+ int ret;
+
++ bl = of_get_child_by_name(dev->parent->of_node, "backlight");
+ if (!bl) {
+ dev_dbg(dev, "backlight node not found\n");
+ return -ENODEV;
+@@ -279,7 +279,7 @@ static int as3711_backlight_parse_dt(struct device *dev)
+ if (pdata->su1_max_uA <= 0)
+ ret = -EINVAL;
+ if (ret < 0)
+- return ret;
++ goto err_put_bl;
+ }
+
+ fb = of_parse_phandle(bl, "su2-dev", 0);
+@@ -292,7 +292,7 @@ static int as3711_backlight_parse_dt(struct device *dev)
+ if (pdata->su2_max_uA <= 0)
+ ret = -EINVAL;
+ if (ret < 0)
+- return ret;
++ goto err_put_bl;
+
+ if (of_find_property(bl, "su2-feedback-voltage", NULL)) {
+ pdata->su2_feedback = AS3711_SU2_VOLTAGE;
+@@ -314,8 +314,10 @@ static int as3711_backlight_parse_dt(struct device *dev)
+ pdata->su2_feedback = AS3711_SU2_CURR_AUTO;
+ count++;
+ }
+- if (count != 1)
+- return -EINVAL;
++ if (count != 1) {
++ ret = -EINVAL;
++ goto err_put_bl;
++ }
+
+ count = 0;
+ if (of_find_property(bl, "su2-fbprot-lx-sd4", NULL)) {
+@@ -334,8 +336,10 @@ static int as3711_backlight_parse_dt(struct device *dev)
+ pdata->su2_fbprot = AS3711_SU2_GPIO4;
+ count++;
+ }
+- if (count != 1)
+- return -EINVAL;
++ if (count != 1) {
++ ret = -EINVAL;
++ goto err_put_bl;
++ }
+
+ count = 0;
+ if (of_find_property(bl, "su2-auto-curr1", NULL)) {
+@@ -355,11 +359,20 @@ static int as3711_backlight_parse_dt(struct device *dev)
+ * At least one su2-auto-curr* must be specified iff
+ * AS3711_SU2_CURR_AUTO is used
+ */
+- if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO))
+- return -EINVAL;
++ if (!count ^ (pdata->su2_feedback != AS3711_SU2_CURR_AUTO)) {
++ ret = -EINVAL;
++ goto err_put_bl;
++ }
+ }
+
++ of_node_put(bl);
++
+ return 0;
++
++err_put_bl:
++ of_node_put(bl);
++
++ return ret;
+ }
+
+ static int as3711_backlight_probe(struct platform_device *pdev)
+diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
+index 7b738d60ecc2..f3aa6088f1d9 100644
+--- a/drivers/video/backlight/max8925_bl.c
++++ b/drivers/video/backlight/max8925_bl.c
+@@ -116,7 +116,7 @@ static void max8925_backlight_dt_init(struct platform_device *pdev)
+ if (!pdata)
+ return;
+
+- np = of_find_node_by_name(nproot, "backlight");
++ np = of_get_child_by_name(nproot, "backlight");
+ if (!np) {
+ dev_err(&pdev->dev, "failed to find backlight node\n");
+ return;
+@@ -125,6 +125,8 @@ static void max8925_backlight_dt_init(struct platform_device *pdev)
+ if (!of_property_read_u32(np, "maxim,max8925-dual-string", &val))
+ pdata->dual_string = val;
+
++ of_node_put(np);
++
+ pdev->dev.platform_data = pdata;
+ }
+
+diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c
+index 380917c86276..762e3feed097 100644
+--- a/drivers/video/backlight/tps65217_bl.c
++++ b/drivers/video/backlight/tps65217_bl.c
+@@ -184,11 +184,11 @@ static struct tps65217_bl_pdata *
+ tps65217_bl_parse_dt(struct platform_device *pdev)
+ {
+ struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent);
+- struct device_node *node = of_node_get(tps->dev->of_node);
++ struct device_node *node;
+ struct tps65217_bl_pdata *pdata, *err;
+ u32 val;
+
+- node = of_find_node_by_name(node, "backlight");
++ node = of_get_child_by_name(tps->dev->of_node, "backlight");
+ if (!node)
+ return ERR_PTR(-ENODEV);
+
+diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
+index 73676eb0244a..c592ca513115 100644
+--- a/drivers/video/fbdev/uvesafb.c
++++ b/drivers/video/fbdev/uvesafb.c
+@@ -1044,7 +1044,8 @@ static int uvesafb_setcmap(struct fb_cmap *cmap, struct fb_info *info)
+ info->cmap.len || cmap->start < info->cmap.start)
+ return -EINVAL;
+
+- entries = kmalloc(sizeof(*entries) * cmap->len, GFP_KERNEL);
++ entries = kmalloc_array(cmap->len, sizeof(*entries),
++ GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
+index 398d22693234..6e2a9619192d 100644
+--- a/drivers/virt/vboxguest/vboxguest_linux.c
++++ b/drivers/virt/vboxguest/vboxguest_linux.c
+@@ -121,7 +121,9 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
+ if (!buf)
+ return -ENOMEM;
+
+- if (copy_from_user(buf, (void *)arg, hdr.size_in)) {
++ *((struct vbg_ioctl_hdr *)buf) = hdr;
++ if (copy_from_user(buf + sizeof(hdr), (void *)arg + sizeof(hdr),
++ hdr.size_in - sizeof(hdr))) {
+ ret = -EFAULT;
+ goto out;
+ }
+diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
+index 80a778b02f28..caef0e0fd817 100644
+--- a/drivers/w1/w1.c
++++ b/drivers/w1/w1.c
+@@ -751,7 +751,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
+
+ /* slave modules need to be loaded in a context with unlocked mutex */
+ mutex_unlock(&dev->mutex);
+- request_module("w1-family-0x%02x", rn->family);
++ request_module("w1-family-0x%02X", rn->family);
+ mutex_lock(&dev->mutex);
+
+ spin_lock(&w1_flock);
+diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
+index 762378f1811c..08e4af04d6f2 100644
+--- a/drivers/xen/events/events_base.c
++++ b/drivers/xen/events/events_base.c
+@@ -628,8 +628,6 @@ static void __unbind_from_irq(unsigned int irq)
+ xen_irq_info_cleanup(info);
+ }
+
+- BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
+-
+ xen_free_irq(irq);
+ }
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index 775a0f2d0b45..b54a55497216 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -9475,6 +9475,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ u64 new_idx = 0;
+ u64 root_objectid;
+ int ret;
++ int ret2;
+ bool root_log_pinned = false;
+ bool dest_log_pinned = false;
+
+@@ -9671,7 +9672,8 @@ static int btrfs_rename_exchange(struct inode *old_dir,
+ dest_log_pinned = false;
+ }
+ }
+- ret = btrfs_end_transaction(trans);
++ ret2 = btrfs_end_transaction(trans);
++ ret = ret ? ret : ret2;
+ out_notrans:
+ if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
+ up_read(&fs_info->subvol_sem);
+diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
+index bf779461df13..2e23b953d304 100644
+--- a/fs/f2fs/checkpoint.c
++++ b/fs/f2fs/checkpoint.c
+@@ -100,8 +100,10 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index,
+ * readonly and make sure do not write checkpoint with non-uptodate
+ * meta page.
+ */
+- if (unlikely(!PageUptodate(page)))
++ if (unlikely(!PageUptodate(page))) {
++ memset(page_address(page), 0, PAGE_SIZE);
+ f2fs_stop_checkpoint(sbi, false);
++ }
+ out:
+ return page;
+ }
+diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
+index e0d9e8f27ed2..f8ef04c9f69d 100644
+--- a/fs/f2fs/inode.c
++++ b/fs/f2fs/inode.c
+@@ -320,10 +320,10 @@ struct inode *f2fs_iget(struct super_block *sb, unsigned long ino)
+ make_now:
+ if (ino == F2FS_NODE_INO(sbi)) {
+ inode->i_mapping->a_ops = &f2fs_node_aops;
+- mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
++ mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
+ } else if (ino == F2FS_META_INO(sbi)) {
+ inode->i_mapping->a_ops = &f2fs_meta_aops;
+- mapping_set_gfp_mask(inode->i_mapping, GFP_F2FS_ZERO);
++ mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
+ } else if (S_ISREG(inode->i_mode)) {
+ inode->i_op = &f2fs_file_inode_operations;
+ inode->i_fop = &f2fs_file_operations;
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 5854cc4e1d67..be8d1b16b8d1 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -2020,6 +2020,7 @@ static void write_current_sum_page(struct f2fs_sb_info *sbi,
+ struct f2fs_summary_block *dst;
+
+ dst = (struct f2fs_summary_block *)page_address(page);
++ memset(dst, 0, PAGE_SIZE);
+
+ mutex_lock(&curseg->curseg_mutex);
+
+@@ -3116,6 +3117,7 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
+
+ page = grab_meta_page(sbi, blkaddr++);
+ kaddr = (unsigned char *)page_address(page);
++ memset(kaddr, 0, PAGE_SIZE);
+
+ /* Step 1: write nat cache */
+ seg_i = CURSEG_I(sbi, CURSEG_HOT_DATA);
+@@ -3140,6 +3142,7 @@ static void write_compacted_summaries(struct f2fs_sb_info *sbi, block_t blkaddr)
+ if (!page) {
+ page = grab_meta_page(sbi, blkaddr++);
+ kaddr = (unsigned char *)page_address(page);
++ memset(kaddr, 0, PAGE_SIZE);
+ written_size = 0;
+ }
+ summary = (struct f2fs_summary *)(kaddr + written_size);
+diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
+index 3325d0769723..492ad0c86fa9 100644
+--- a/fs/f2fs/segment.h
++++ b/fs/f2fs/segment.h
+@@ -375,6 +375,7 @@ static inline void seg_info_to_sit_page(struct f2fs_sb_info *sbi,
+ int i;
+
+ raw_sit = (struct f2fs_sit_block *)page_address(page);
++ memset(raw_sit, 0, PAGE_SIZE);
+ for (i = 0; i < end - start; i++) {
+ rs = &raw_sit->entries[i];
+ se = get_seg_entry(sbi, start + i);
+diff --git a/fs/fuse/control.c b/fs/fuse/control.c
+index b9ea99c5b5b3..5be0339dcceb 100644
+--- a/fs/fuse/control.c
++++ b/fs/fuse/control.c
+@@ -211,10 +211,11 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
+ if (!dentry)
+ return NULL;
+
+- fc->ctl_dentry[fc->ctl_ndents++] = dentry;
+ inode = new_inode(fuse_control_sb);
+- if (!inode)
++ if (!inode) {
++ dput(dentry);
+ return NULL;
++ }
+
+ inode->i_ino = get_next_ino();
+ inode->i_mode = mode;
+@@ -228,6 +229,9 @@ static struct dentry *fuse_ctl_add_dentry(struct dentry *parent,
+ set_nlink(inode, nlink);
+ inode->i_private = fc;
+ d_add(dentry, inode);
++
++ fc->ctl_dentry[fc->ctl_ndents++] = dentry;
++
+ return dentry;
+ }
+
+@@ -284,7 +288,10 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc)
+ for (i = fc->ctl_ndents - 1; i >= 0; i--) {
+ struct dentry *dentry = fc->ctl_dentry[i];
+ d_inode(dentry)->i_private = NULL;
+- d_drop(dentry);
++ if (!i) {
++ /* Get rid of submounts: */
++ d_invalidate(dentry);
++ }
+ dput(dentry);
+ }
+ drop_nlink(d_inode(fuse_control_sb->s_root));
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 5d06384c2cae..ee6c9baf8158 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -381,8 +381,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
+ if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
+ wake_up(&fc->blocked_waitq);
+
+- if (fc->num_background == fc->congestion_threshold &&
+- fc->connected && fc->sb) {
++ if (fc->num_background == fc->congestion_threshold && fc->sb) {
+ clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC);
+ clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC);
+ }
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 24967382a7b1..7a980b4462d9 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1629,8 +1629,19 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
+ return err;
+
+ if (attr->ia_valid & ATTR_OPEN) {
+- if (fc->atomic_o_trunc)
++ /* This is coming from open(..., ... | O_TRUNC); */
++ WARN_ON(!(attr->ia_valid & ATTR_SIZE));
++ WARN_ON(attr->ia_size != 0);
++ if (fc->atomic_o_trunc) {
++ /*
++ * No need to send request to userspace, since actual
++ * truncation has already been done by OPEN. But still
++ * need to truncate page cache.
++ */
++ i_size_write(inode, 0);
++ truncate_pagecache(inode, 0);
+ return 0;
++ }
+ file = NULL;
+ }
+
+diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
+index ef309958e060..9b37cf8142b5 100644
+--- a/fs/fuse/inode.c
++++ b/fs/fuse/inode.c
+@@ -1179,6 +1179,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
+ fuse_dev_free(fud);
+ err_put_conn:
+ fuse_conn_put(fc);
++ sb->s_fs_info = NULL;
+ err_fput:
+ fput(file);
+ err:
+diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
+index a50d7813e3ea..180b4b616725 100644
+--- a/fs/nfs/callback_proc.c
++++ b/fs/nfs/callback_proc.c
+@@ -420,11 +420,8 @@ validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot,
+ return htonl(NFS4ERR_SEQ_FALSE_RETRY);
+ }
+
+- /* Wraparound */
+- if (unlikely(slot->seq_nr == 0xFFFFFFFFU)) {
+- if (args->csa_sequenceid == 1)
+- return htonl(NFS4_OK);
+- } else if (likely(args->csa_sequenceid == slot->seq_nr + 1))
++ /* Note: wraparound relies on seq_nr being of type u32 */
++ if (likely(args->csa_sequenceid == slot->seq_nr + 1))
+ return htonl(NFS4_OK);
+
+ /* Misordered request */
+diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c
+index 22dc30a679a0..b6f9d84ba19b 100644
+--- a/fs/nfs/nfs4idmap.c
++++ b/fs/nfs/nfs4idmap.c
+@@ -343,7 +343,7 @@ static ssize_t nfs_idmap_lookup_name(__u32 id, const char *type, char *buf,
+ int id_len;
+ ssize_t ret;
+
+- id_len = snprintf(id_str, sizeof(id_str), "%u", id);
++ id_len = nfs_map_numeric_to_string(id, id_str, sizeof(id_str));
+ ret = nfs_idmap_get_key(id_str, id_len, type, buf, buflen, idmap);
+ if (ret < 0)
+ return -EINVAL;
+@@ -627,7 +627,8 @@ static int nfs_idmap_read_and_verify_message(struct idmap_msg *im,
+ if (strcmp(upcall->im_name, im->im_name) != 0)
+ break;
+ /* Note: here we store the NUL terminator too */
+- len = sprintf(id_str, "%d", im->im_id) + 1;
++ len = 1 + nfs_map_numeric_to_string(im->im_id, id_str,
++ sizeof(id_str));
+ ret = nfs_idmap_instantiate(key, authkey, id_str, len);
+ break;
+ case IDMAP_CONV_IDTONAME:
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index b71757e85066..409acdda70dd 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -751,7 +751,7 @@ static int nfs41_sequence_process(struct rpc_task *task,
+ * The slot id we used was probably retired. Try again
+ * using a different slot id.
+ */
+- if (slot->seq_nr < slot->table->target_highest_slotid)
++ if (slot->slot_nr < slot->table->target_highest_slotid)
+ goto session_recover;
+ goto retry_nowait;
+ case -NFS4ERR_SEQ_MISORDERED:
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 1d048dd95464..cfe535c286c3 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -3651,7 +3651,8 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
+ nfserr = nfserr_resource;
+ goto err_no_verf;
+ }
+- maxcount = min_t(u32, readdir->rd_maxcount, INT_MAX);
++ maxcount = svc_max_payload(resp->rqstp);
++ maxcount = min_t(u32, readdir->rd_maxcount, maxcount);
+ /*
+ * Note the rfc defines rd_maxcount as the size of the
+ * READDIR4resok structure, which includes the verifier above
+@@ -3665,7 +3666,7 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4
+
+ /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */
+ if (!readdir->rd_dircount)
+- readdir->rd_dircount = INT_MAX;
++ readdir->rd_dircount = svc_max_payload(resp->rqstp);
+
+ readdir->xdr = xdr;
+ readdir->rd_maxcount = maxcount;
+diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
+index 04c4ec6483e5..8ae1cd8611cc 100644
+--- a/fs/ubifs/journal.c
++++ b/fs/ubifs/journal.c
+@@ -1283,10 +1283,11 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
+ int *new_len)
+ {
+ void *buf;
+- int err, dlen, compr_type, out_len, old_dlen;
++ int err, compr_type;
++ u32 dlen, out_len, old_dlen;
+
+ out_len = le32_to_cpu(dn->size);
+- buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS);
++ buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS);
+ if (!buf)
+ return -ENOMEM;
+
+diff --git a/fs/udf/directory.c b/fs/udf/directory.c
+index 0a98a2369738..3835f983cc99 100644
+--- a/fs/udf/directory.c
++++ b/fs/udf/directory.c
+@@ -152,6 +152,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
+ sizeof(struct fileIdentDesc));
+ }
+ }
++ /* Got last entry outside of dir size - fs is corrupted! */
++ if (*nf_pos > dir->i_size)
++ return NULL;
+ return fi;
+ }
+
+diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
+index d3558d897a4d..8d69b9134bef 100644
+--- a/include/dt-bindings/clock/aspeed-clock.h
++++ b/include/dt-bindings/clock/aspeed-clock.h
+@@ -45,7 +45,7 @@
+ #define ASPEED_RESET_JTAG_MASTER 3
+ #define ASPEED_RESET_MIC 4
+ #define ASPEED_RESET_PWM 5
+-#define ASPEED_RESET_PCIVGA 6
++#define ASPEED_RESET_PECI 6
+ #define ASPEED_RESET_I2C 7
+ #define ASPEED_RESET_AHB 8
+
+diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
+index 5c4eee043191..7d047465dfc2 100644
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -1124,8 +1124,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
+ if (!q->limits.chunk_sectors)
+ return q->limits.max_sectors;
+
+- return q->limits.chunk_sectors -
+- (offset & (q->limits.chunk_sectors - 1));
++ return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
++ (offset & (q->limits.chunk_sectors - 1))));
+ }
+
+ static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
+diff --git a/include/linux/compiler.h b/include/linux/compiler.h
+index ab4711c63601..42506e4d1f53 100644
+--- a/include/linux/compiler.h
++++ b/include/linux/compiler.h
+@@ -21,7 +21,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
+ #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
+
+ #define __branch_check__(x, expect, is_constant) ({ \
+- int ______r; \
++ long ______r; \
+ static struct ftrace_likely_data \
+ __attribute__((__aligned__(4))) \
+ __attribute__((section("_ftrace_annotated_branch"))) \
+diff --git a/include/linux/memory.h b/include/linux/memory.h
+index 31ca3e28b0eb..a6ddefc60517 100644
+--- a/include/linux/memory.h
++++ b/include/linux/memory.h
+@@ -38,6 +38,7 @@ struct memory_block {
+
+ int arch_get_memory_phys_device(unsigned long start_pfn);
+ unsigned long memory_block_size_bytes(void);
++int set_memory_block_size_order(unsigned int order);
+
+ /* These states are exposed to userspace as text strings in sysfs */
+ #define MEM_ONLINE (1<<0) /* exposed to userspace */
+diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
+index 3773e26c08c1..bb93a6c693e3 100644
+--- a/include/linux/slub_def.h
++++ b/include/linux/slub_def.h
+@@ -156,8 +156,12 @@ struct kmem_cache {
+
+ #ifdef CONFIG_SYSFS
+ #define SLAB_SUPPORTS_SYSFS
++void sysfs_slab_unlink(struct kmem_cache *);
+ void sysfs_slab_release(struct kmem_cache *);
+ #else
++static inline void sysfs_slab_unlink(struct kmem_cache *s)
++{
++}
+ static inline void sysfs_slab_release(struct kmem_cache *s)
+ {
+ }
+diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
+index 9fc8a825aa28..ba015efb5312 100644
+--- a/include/rdma/ib_verbs.h
++++ b/include/rdma/ib_verbs.h
+@@ -3310,11 +3310,14 @@ int ib_process_cq_direct(struct ib_cq *cq, int budget);
+ *
+ * Users can examine the cq structure to determine the actual CQ size.
+ */
+-struct ib_cq *ib_create_cq(struct ib_device *device,
+- ib_comp_handler comp_handler,
+- void (*event_handler)(struct ib_event *, void *),
+- void *cq_context,
+- const struct ib_cq_init_attr *cq_attr);
++struct ib_cq *__ib_create_cq(struct ib_device *device,
++ ib_comp_handler comp_handler,
++ void (*event_handler)(struct ib_event *, void *),
++ void *cq_context,
++ const struct ib_cq_init_attr *cq_attr,
++ const char *caller);
++#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
++ __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
+
+ /**
+ * ib_resize_cq - Modifies the capacity of the CQ.
+@@ -3734,6 +3737,20 @@ static inline int ib_check_mr_access(int flags)
+ return 0;
+ }
+
++static inline bool ib_access_writable(int access_flags)
++{
++ /*
++ * We have writable memory backing the MR if any of the following
++ * access flags are set. "Local write" and "remote write" obviously
++ * require write access. "Remote atomic" can do things like fetch and
++ * add, which will modify memory, and "MW bind" can change permissions
++ * by binding a window.
++ */
++ return access_flags &
++ (IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE |
++ IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND);
++}
++
+ /**
+ * ib_check_mr_status: lightweight check of MR status.
+ * This routine may provide status checks on a selected
+diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
+index 3f4c187e435d..eec495e68823 100644
+--- a/include/rdma/rdma_vt.h
++++ b/include/rdma/rdma_vt.h
+@@ -402,7 +402,7 @@ struct rvt_dev_info {
+ spinlock_t pending_lock; /* protect pending mmap list */
+
+ /* CQ */
+- struct kthread_worker *worker; /* per device cq worker */
++ struct kthread_worker __rcu *worker; /* per device cq worker */
+ u32 n_cqs_allocated; /* number of CQs allocated for device */
+ spinlock_t n_cqs_lock; /* protect count of in use cqs */
+
+diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
+index bc1e507be9ff..776308d2fa9e 100644
+--- a/kernel/locking/rwsem.c
++++ b/kernel/locking/rwsem.c
+@@ -181,6 +181,7 @@ void down_read_non_owner(struct rw_semaphore *sem)
+ might_sleep();
+
+ __down_read(sem);
++ rwsem_set_reader_owned(sem);
+ }
+
+ EXPORT_SYMBOL(down_read_non_owner);
+diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
+index 3e3c2004bb23..449d67edfa4b 100644
+--- a/kernel/printk/printk_safe.c
++++ b/kernel/printk/printk_safe.c
+@@ -82,6 +82,7 @@ static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
+ {
+ int add;
+ size_t len;
++ va_list ap;
+
+ again:
+ len = atomic_read(&s->len);
+@@ -100,7 +101,9 @@ static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
+ if (!len)
+ smp_rmb();
+
+- add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
++ va_copy(ap, args);
++ add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
++ va_end(ap);
+ if (!add)
+ return 0;
+
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index 177de3640c78..8a040bcaa033 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -139,9 +139,13 @@ static void __local_bh_enable(unsigned int cnt)
+ {
+ lockdep_assert_irqs_disabled();
+
++ if (preempt_count() == cnt)
++ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
++
+ if (softirq_count() == (cnt & SOFTIRQ_MASK))
+ trace_softirqs_on(_RET_IP_);
+- preempt_count_sub(cnt);
++
++ __preempt_count_sub(cnt);
+ }
+
+ /*
+diff --git a/kernel/time/time.c b/kernel/time/time.c
+index 3044d48ebe56..e8127f4e9e66 100644
+--- a/kernel/time/time.c
++++ b/kernel/time/time.c
+@@ -28,6 +28,7 @@
+ */
+
+ #include <linux/export.h>
++#include <linux/kernel.h>
+ #include <linux/timex.h>
+ #include <linux/capability.h>
+ #include <linux/timekeeper_internal.h>
+@@ -314,9 +315,10 @@ unsigned int jiffies_to_msecs(const unsigned long j)
+ return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
+ #else
+ # if BITS_PER_LONG == 32
+- return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
++ return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
++ HZ_TO_MSEC_SHR32;
+ # else
+- return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
++ return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
+ # endif
+ #endif
+ }
+diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
+index 7d306b74230f..c44f74daefbf 100644
+--- a/kernel/trace/trace_events_filter.c
++++ b/kernel/trace/trace_events_filter.c
+@@ -78,7 +78,8 @@ static const char * ops[] = { OPS };
+ C(TOO_MANY_PREDS, "Too many terms in predicate expression"), \
+ C(INVALID_FILTER, "Meaningless filter expression"), \
+ C(IP_FIELD_ONLY, "Only 'ip' field is supported for function trace"), \
+- C(INVALID_VALUE, "Invalid value (did you forget quotes)?"),
++ C(INVALID_VALUE, "Invalid value (did you forget quotes)?"), \
++ C(NO_FILTER, "No filter found"),
+
+ #undef C
+ #define C(a, b) FILT_ERR_##a
+@@ -550,6 +551,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
+ goto out_free;
+ }
+
++ if (!N) {
++ /* No program? */
++ ret = -EINVAL;
++ parse_error(pe, FILT_ERR_NO_FILTER, ptr - str);
++ goto out_free;
++ }
++
+ prog[N].pred = NULL; /* #13 */
+ prog[N].target = 1; /* TRUE */
+ prog[N+1].pred = NULL;
+diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
+index 3d35d062970d..c253c1b46c6b 100644
+--- a/lib/Kconfig.kasan
++++ b/lib/Kconfig.kasan
+@@ -6,6 +6,7 @@ if HAVE_ARCH_KASAN
+ config KASAN
+ bool "KASan: runtime memory debugger"
+ depends on SLUB || (SLAB && !DEBUG_SLAB)
++ select SLUB_DEBUG if SLUB
+ select CONSTRUCTORS
+ select STACKDEPOT
+ help
+diff --git a/lib/vsprintf.c b/lib/vsprintf.c
+index 23920c5ff728..91320e5bfd5b 100644
+--- a/lib/vsprintf.c
++++ b/lib/vsprintf.c
+@@ -1456,9 +1456,6 @@ char *clock(char *buf, char *end, struct clk *clk, struct printf_spec spec,
+ return string(buf, end, NULL, spec);
+
+ switch (fmt[1]) {
+- case 'r':
+- return number(buf, end, clk_get_rate(clk), spec);
+-
+ case 'n':
+ default:
+ #ifdef CONFIG_COMMON_CLK
+diff --git a/mm/gup.c b/mm/gup.c
+index 541904a7c60f..3d8472d48a0b 100644
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -1459,32 +1459,48 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
+ return 1;
+ }
+
+-static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
++static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
+ unsigned long end, struct page **pages, int *nr)
+ {
+ unsigned long fault_pfn;
++ int nr_start = *nr;
++
++ fault_pfn = pmd_pfn(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
++ if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
++ return 0;
+
+- fault_pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+- return __gup_device_huge(fault_pfn, addr, end, pages, nr);
++ if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
++ undo_dev_pagemap(nr, nr_start, pages);
++ return 0;
++ }
++ return 1;
+ }
+
+-static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
++static int __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
+ unsigned long end, struct page **pages, int *nr)
+ {
+ unsigned long fault_pfn;
++ int nr_start = *nr;
++
++ fault_pfn = pud_pfn(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
++ if (!__gup_device_huge(fault_pfn, addr, end, pages, nr))
++ return 0;
+
+- fault_pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+- return __gup_device_huge(fault_pfn, addr, end, pages, nr);
++ if (unlikely(pud_val(orig) != pud_val(*pudp))) {
++ undo_dev_pagemap(nr, nr_start, pages);
++ return 0;
++ }
++ return 1;
+ }
+ #else
+-static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
++static int __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
+ unsigned long end, struct page **pages, int *nr)
+ {
+ BUILD_BUG();
+ return 0;
+ }
+
+-static int __gup_device_huge_pud(pud_t pud, unsigned long addr,
++static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
+ unsigned long end, struct page **pages, int *nr)
+ {
+ BUILD_BUG();
+@@ -1502,7 +1518,7 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
+ return 0;
+
+ if (pmd_devmap(orig))
+- return __gup_device_huge_pmd(orig, addr, end, pages, nr);
++ return __gup_device_huge_pmd(orig, pmdp, addr, end, pages, nr);
+
+ refs = 0;
+ page = pmd_page(orig) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
+@@ -1540,7 +1556,7 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
+ return 0;
+
+ if (pud_devmap(orig))
+- return __gup_device_huge_pud(orig, addr, end, pages, nr);
++ return __gup_device_huge_pud(orig, pudp, addr, end, pages, nr);
+
+ refs = 0;
+ page = pud_page(orig) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
+diff --git a/mm/ksm.c b/mm/ksm.c
+index e3cbf9a92f3c..e6a9640580fc 100644
+--- a/mm/ksm.c
++++ b/mm/ksm.c
+@@ -199,6 +199,8 @@ struct rmap_item {
+ #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */
+ #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */
+ #define STABLE_FLAG 0x200 /* is listed from the stable tree */
++#define KSM_FLAG_MASK (SEQNR_MASK|UNSTABLE_FLAG|STABLE_FLAG)
++ /* to mask all the flags */
+
+ /* The stable and unstable tree heads */
+ static struct rb_root one_stable_tree[1] = { RB_ROOT };
+@@ -2570,10 +2572,15 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
+ anon_vma_lock_read(anon_vma);
+ anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
+ 0, ULONG_MAX) {
++ unsigned long addr;
++
+ cond_resched();
+ vma = vmac->vma;
+- if (rmap_item->address < vma->vm_start ||
+- rmap_item->address >= vma->vm_end)
++
++ /* Ignore the stable/unstable/sqnr flags */
++ addr = rmap_item->address & ~KSM_FLAG_MASK;
++
++ if (addr < vma->vm_start || addr >= vma->vm_end)
+ continue;
+ /*
+ * Initially we examine only the vma which covers this
+@@ -2587,8 +2594,7 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
+ if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
+ continue;
+
+- if (!rwc->rmap_one(page, vma,
+- rmap_item->address, rwc->arg)) {
++ if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
+ anon_vma_unlock_read(anon_vma);
+ return;
+ }
+diff --git a/mm/slab_common.c b/mm/slab_common.c
+index 98dcdc352062..65408ced18f1 100644
+--- a/mm/slab_common.c
++++ b/mm/slab_common.c
+@@ -566,10 +566,14 @@ static int shutdown_cache(struct kmem_cache *s)
+ list_del(&s->list);
+
+ if (s->flags & SLAB_TYPESAFE_BY_RCU) {
++#ifdef SLAB_SUPPORTS_SYSFS
++ sysfs_slab_unlink(s);
++#endif
+ list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
+ schedule_work(&slab_caches_to_rcu_destroy_work);
+ } else {
+ #ifdef SLAB_SUPPORTS_SYSFS
++ sysfs_slab_unlink(s);
+ sysfs_slab_release(s);
+ #else
+ slab_kmem_cache_release(s);
+diff --git a/mm/slub.c b/mm/slub.c
+index 44aa7847324a..613c8dc2f409 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -5714,7 +5714,6 @@ static void sysfs_slab_remove_workfn(struct work_struct *work)
+ kset_unregister(s->memcg_kset);
+ #endif
+ kobject_uevent(&s->kobj, KOBJ_REMOVE);
+- kobject_del(&s->kobj);
+ out:
+ kobject_put(&s->kobj);
+ }
+@@ -5799,6 +5798,12 @@ static void sysfs_slab_remove(struct kmem_cache *s)
+ schedule_work(&s->kobj_remove_work);
+ }
+
++void sysfs_slab_unlink(struct kmem_cache *s)
++{
++ if (slab_state >= FULL)
++ kobject_del(&s->kobj);
++}
++
+ void sysfs_slab_release(struct kmem_cache *s)
+ {
+ if (slab_state >= FULL)
+diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
+index e8adad33d0bb..8e531ac9bc87 100644
+--- a/net/sunrpc/xprtrdma/rpc_rdma.c
++++ b/net/sunrpc/xprtrdma/rpc_rdma.c
+@@ -230,7 +230,7 @@ rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
+ */
+ *ppages = alloc_page(GFP_ATOMIC);
+ if (!*ppages)
+- return -EAGAIN;
++ return -ENOBUFS;
+ }
+ seg->mr_page = *ppages;
+ seg->mr_offset = (char *)page_base;
+diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
+index 245160373dab..cbf227d12c2b 100644
+--- a/security/selinux/selinuxfs.c
++++ b/security/selinux/selinuxfs.c
+@@ -435,22 +435,16 @@ static int sel_release_policy(struct inode *inode, struct file *filp)
+ static ssize_t sel_read_policy(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+ {
+- struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
+ struct policy_load_memory *plm = filp->private_data;
+ int ret;
+
+- mutex_lock(&fsi->mutex);
+-
+ ret = avc_has_perm(&selinux_state,
+ current_sid(), SECINITSID_SECURITY,
+ SECCLASS_SECURITY, SECURITY__READ_POLICY, NULL);
+ if (ret)
+- goto out;
++ return ret;
+
+- ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
+-out:
+- mutex_unlock(&fsi->mutex);
+- return ret;
++ return simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
+ }
+
+ static int sel_mmap_policy_fault(struct vm_fault *vmf)
+@@ -1182,25 +1176,29 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
+ ret = -EINVAL;
+ if (index >= fsi->bool_num || strcmp(name,
+ fsi->bool_pending_names[index]))
+- goto out;
++ goto out_unlock;
+
+ ret = -ENOMEM;
+ page = (char *)get_zeroed_page(GFP_KERNEL);
+ if (!page)
+- goto out;
++ goto out_unlock;
+
+ cur_enforcing = security_get_bool_value(fsi->state, index);
+ if (cur_enforcing < 0) {
+ ret = cur_enforcing;
+- goto out;
++ goto out_unlock;
+ }
+ length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing,
+ fsi->bool_pending_values[index]);
+- ret = simple_read_from_buffer(buf, count, ppos, page, length);
+-out:
+ mutex_unlock(&fsi->mutex);
++ ret = simple_read_from_buffer(buf, count, ppos, page, length);
++out_free:
+ free_page((unsigned long)page);
+ return ret;
++
++out_unlock:
++ mutex_unlock(&fsi->mutex);
++ goto out_free;
+ }
+
+ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
+@@ -1213,6 +1211,17 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
+ unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
+ const char *name = filep->f_path.dentry->d_name.name;
+
++ if (count >= PAGE_SIZE)
++ return -ENOMEM;
++
++ /* No partial writes. */
++ if (*ppos != 0)
++ return -EINVAL;
++
++ page = memdup_user_nul(buf, count);
++ if (IS_ERR(page))
++ return PTR_ERR(page);
++
+ mutex_lock(&fsi->mutex);
+
+ length = avc_has_perm(&selinux_state,
+@@ -1227,22 +1236,6 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
+ fsi->bool_pending_names[index]))
+ goto out;
+
+- length = -ENOMEM;
+- if (count >= PAGE_SIZE)
+- goto out;
+-
+- /* No partial writes. */
+- length = -EINVAL;
+- if (*ppos != 0)
+- goto out;
+-
+- page = memdup_user_nul(buf, count);
+- if (IS_ERR(page)) {
+- length = PTR_ERR(page);
+- page = NULL;
+- goto out;
+- }
+-
+ length = -EINVAL;
+ if (sscanf(page, "%d", &new_value) != 1)
+ goto out;
+@@ -1274,6 +1267,17 @@ static ssize_t sel_commit_bools_write(struct file *filep,
+ ssize_t length;
+ int new_value;
+
++ if (count >= PAGE_SIZE)
++ return -ENOMEM;
++
++ /* No partial writes. */
++ if (*ppos != 0)
++ return -EINVAL;
++
++ page = memdup_user_nul(buf, count);
++ if (IS_ERR(page))
++ return PTR_ERR(page);
++
+ mutex_lock(&fsi->mutex);
+
+ length = avc_has_perm(&selinux_state,
+@@ -1283,22 +1287,6 @@ static ssize_t sel_commit_bools_write(struct file *filep,
+ if (length)
+ goto out;
+
+- length = -ENOMEM;
+- if (count >= PAGE_SIZE)
+- goto out;
+-
+- /* No partial writes. */
+- length = -EINVAL;
+- if (*ppos != 0)
+- goto out;
+-
+- page = memdup_user_nul(buf, count);
+- if (IS_ERR(page)) {
+- length = PTR_ERR(page);
+- page = NULL;
+- goto out;
+- }
+-
+ length = -EINVAL;
+ if (sscanf(page, "%d", &new_value) != 1)
+ goto out;
+diff --git a/sound/core/timer.c b/sound/core/timer.c
+index 0ddcae495838..e9e73edb4bd8 100644
+--- a/sound/core/timer.c
++++ b/sound/core/timer.c
+@@ -1517,7 +1517,7 @@ static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
+ } else {
+ if (id.subdevice < 0)
+ id.subdevice = 0;
+- else
++ else if (id.subdevice < INT_MAX)
+ id.subdevice++;
+ }
+ }
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 5bc3a7468e17..4d26bb010ddf 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -2887,8 +2887,9 @@ static int hda_codec_runtime_suspend(struct device *dev)
+ list_for_each_entry(pcm, &codec->pcm_list_head, list)
+ snd_pcm_suspend_all(pcm->pcm);
+ state = hda_call_codec_suspend(codec);
+- if (codec_has_clkstop(codec) && codec_has_epss(codec) &&
+- (state & AC_PWRST_CLK_STOP_OK))
++ if (codec->link_down_at_suspend ||
++ (codec_has_clkstop(codec) && codec_has_epss(codec) &&
++ (state & AC_PWRST_CLK_STOP_OK)))
+ snd_hdac_codec_link_down(&codec->core);
+ snd_hdac_link_power(&codec->core, false);
+ return 0;
+diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
+index 681c360f29f9..a8b1b31f161c 100644
+--- a/sound/pci/hda/hda_codec.h
++++ b/sound/pci/hda/hda_codec.h
+@@ -258,6 +258,7 @@ struct hda_codec {
+ unsigned int power_save_node:1; /* advanced PM for each widget */
+ unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
+ unsigned int force_pin_prefix:1; /* Add location prefix */
++ unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
+ #ifdef CONFIG_PM
+ unsigned long power_on_acct;
+ unsigned long power_off_acct;
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 7d7eb1354eee..ed39a77f9253 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -3741,6 +3741,11 @@ static int patch_atihdmi(struct hda_codec *codec)
+
+ spec->chmap.channels_max = max(spec->chmap.channels_max, 8u);
+
++ /* AMD GPUs have neither EPSS nor CLKSTOP bits, hence preventing
++ * the link-down as is. Tell the core to allow it.
++ */
++ codec->link_down_at_suspend = 1;
++
+ return 0;
+ }
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 06c2c80a045b..cb9a977bf188 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -2542,6 +2542,7 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110),
+ SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ),
+ SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN),
++ SND_PCI_QUIRK(0x1734, 0x1141, "FSC ESPRIMO U9210", ALC262_FIXUP_FSC_H270),
+ SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270),
+ SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000),
+ SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ),
+@@ -4985,7 +4986,6 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
+ struct alc_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+- spec->shutup = alc_no_shutup; /* reduce click noise */
+ spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
+ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
+ codec->power_save_node = 0; /* avoid click noises */
+@@ -5384,6 +5384,13 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
+ /* for hda_fixup_thinkpad_acpi() */
+ #include "thinkpad_helper.c"
+
++static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ alc_fixup_no_shutup(codec, fix, action); /* reduce click noise */
++ hda_fixup_thinkpad_acpi(codec, fix, action);
++}
++
+ /* for dell wmi mic mute led */
+ #include "dell_wmi_helper.c"
+
+@@ -5927,7 +5934,7 @@ static const struct hda_fixup alc269_fixups[] = {
+ },
+ [ALC269_FIXUP_THINKPAD_ACPI] = {
+ .type = HDA_FIXUP_FUNC,
+- .v.func = hda_fixup_thinkpad_acpi,
++ .v.func = alc_fixup_thinkpad_acpi,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_SKU_IGNORE,
+ },
+@@ -6577,8 +6584,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
+ SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
++ SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+- SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
++ SND_PCI_QUIRK(0x17aa, 0x3136, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
+ SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
+@@ -6756,6 +6764,12 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
+ {0x14, 0x90170110},
+ {0x19, 0x02a11030},
+ {0x21, 0x02211020}),
++ SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
++ {0x14, 0x90170110},
++ {0x19, 0x02a11030},
++ {0x1a, 0x02a11040},
++ {0x1b, 0x01014020},
++ {0x21, 0x0221101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0x90a60140},
+ {0x14, 0x90170110},
+diff --git a/sound/soc/cirrus/edb93xx.c b/sound/soc/cirrus/edb93xx.c
+index c53bd6f2c2d7..3d011abaa266 100644
+--- a/sound/soc/cirrus/edb93xx.c
++++ b/sound/soc/cirrus/edb93xx.c
+@@ -67,7 +67,7 @@ static struct snd_soc_dai_link edb93xx_dai = {
+ .cpu_dai_name = "ep93xx-i2s",
+ .codec_name = "spi0.0",
+ .codec_dai_name = "cs4271-hifi",
+- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF |
++ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ops = &edb93xx_ops,
+ };
+diff --git a/sound/soc/cirrus/ep93xx-i2s.c b/sound/soc/cirrus/ep93xx-i2s.c
+index 934f8aefdd90..0dc3852c4621 100644
+--- a/sound/soc/cirrus/ep93xx-i2s.c
++++ b/sound/soc/cirrus/ep93xx-i2s.c
+@@ -51,7 +51,9 @@
+ #define EP93XX_I2S_WRDLEN_24 (1 << 0)
+ #define EP93XX_I2S_WRDLEN_32 (2 << 0)
+
+-#define EP93XX_I2S_LINCTRLDATA_R_JUST (1 << 2) /* Right justify */
++#define EP93XX_I2S_RXLINCTRLDATA_R_JUST BIT(1) /* Right justify */
++
++#define EP93XX_I2S_TXLINCTRLDATA_R_JUST BIT(2) /* Right justify */
+
+ #define EP93XX_I2S_CLKCFG_LRS (1 << 0) /* lrclk polarity */
+ #define EP93XX_I2S_CLKCFG_CKP (1 << 1) /* Bit clock polarity */
+@@ -170,25 +172,25 @@ static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
+ unsigned int fmt)
+ {
+ struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(cpu_dai);
+- unsigned int clk_cfg, lin_ctrl;
++ unsigned int clk_cfg;
++ unsigned int txlin_ctrl = 0;
++ unsigned int rxlin_ctrl = 0;
+
+ clk_cfg = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXCLKCFG);
+- lin_ctrl = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXLINCTRLDATA);
+
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ clk_cfg |= EP93XX_I2S_CLKCFG_REL;
+- lin_ctrl &= ~EP93XX_I2S_LINCTRLDATA_R_JUST;
+ break;
+
+ case SND_SOC_DAIFMT_LEFT_J:
+ clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
+- lin_ctrl &= ~EP93XX_I2S_LINCTRLDATA_R_JUST;
+ break;
+
+ case SND_SOC_DAIFMT_RIGHT_J:
+ clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
+- lin_ctrl |= EP93XX_I2S_LINCTRLDATA_R_JUST;
++ rxlin_ctrl |= EP93XX_I2S_RXLINCTRLDATA_R_JUST;
++ txlin_ctrl |= EP93XX_I2S_TXLINCTRLDATA_R_JUST;
+ break;
+
+ default:
+@@ -213,32 +215,32 @@ static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_NF:
+ /* Negative bit clock, lrclk low on left word */
+- clk_cfg &= ~(EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_REL);
++ clk_cfg &= ~(EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_LRS);
+ break;
+
+ case SND_SOC_DAIFMT_NB_IF:
+ /* Negative bit clock, lrclk low on right word */
+ clk_cfg &= ~EP93XX_I2S_CLKCFG_CKP;
+- clk_cfg |= EP93XX_I2S_CLKCFG_REL;
++ clk_cfg |= EP93XX_I2S_CLKCFG_LRS;
+ break;
+
+ case SND_SOC_DAIFMT_IB_NF:
+ /* Positive bit clock, lrclk low on left word */
+ clk_cfg |= EP93XX_I2S_CLKCFG_CKP;
+- clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
++ clk_cfg &= ~EP93XX_I2S_CLKCFG_LRS;
+ break;
+
+ case SND_SOC_DAIFMT_IB_IF:
+ /* Positive bit clock, lrclk low on right word */
+- clk_cfg |= EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_REL;
++ clk_cfg |= EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_LRS;
+ break;
+ }
+
+ /* Write new register values */
+ ep93xx_i2s_write_reg(info, EP93XX_I2S_RXCLKCFG, clk_cfg);
+ ep93xx_i2s_write_reg(info, EP93XX_I2S_TXCLKCFG, clk_cfg);
+- ep93xx_i2s_write_reg(info, EP93XX_I2S_RXLINCTRLDATA, lin_ctrl);
+- ep93xx_i2s_write_reg(info, EP93XX_I2S_TXLINCTRLDATA, lin_ctrl);
++ ep93xx_i2s_write_reg(info, EP93XX_I2S_RXLINCTRLDATA, rxlin_ctrl);
++ ep93xx_i2s_write_reg(info, EP93XX_I2S_TXLINCTRLDATA, txlin_ctrl);
+ return 0;
+ }
+
+diff --git a/sound/soc/cirrus/snappercl15.c b/sound/soc/cirrus/snappercl15.c
+index 2334ec19e7eb..11ff7b2672b2 100644
+--- a/sound/soc/cirrus/snappercl15.c
++++ b/sound/soc/cirrus/snappercl15.c
+@@ -72,7 +72,7 @@ static struct snd_soc_dai_link snappercl15_dai = {
+ .codec_dai_name = "tlv320aic23-hifi",
+ .codec_name = "tlv320aic23-codec.0-001a",
+ .platform_name = "ep93xx-i2s",
+- .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF |
++ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBS_CFS,
+ .ops = &snappercl15_ops,
+ };
+diff --git a/sound/soc/codecs/cs35l35.c b/sound/soc/codecs/cs35l35.c
+index a4a2cb171bdf..bd6226bde45f 100644
+--- a/sound/soc/codecs/cs35l35.c
++++ b/sound/soc/codecs/cs35l35.c
+@@ -1105,6 +1105,7 @@ static struct regmap_config cs35l35_regmap = {
+ .readable_reg = cs35l35_readable_register,
+ .precious_reg = cs35l35_precious_register,
+ .cache_type = REGCACHE_RBTREE,
++ .use_single_rw = true,
+ };
+
+ static irqreturn_t cs35l35_irq(int irq, void *data)
+diff --git a/sound/soc/mediatek/common/mtk-afe-platform-driver.c b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
+index 53215b52e4f2..f8a06709f76d 100644
+--- a/sound/soc/mediatek/common/mtk-afe-platform-driver.c
++++ b/sound/soc/mediatek/common/mtk-afe-platform-driver.c
+@@ -64,14 +64,14 @@ static const struct snd_pcm_ops mtk_afe_pcm_ops = {
+ static int mtk_afe_pcm_new(struct snd_soc_pcm_runtime *rtd)
+ {
+ size_t size;
+- struct snd_card *card = rtd->card->snd_card;
+ struct snd_pcm *pcm = rtd->pcm;
+ struct snd_soc_component *component = snd_soc_rtdcom_lookup(rtd, AFE_PCM_NAME);
+ struct mtk_base_afe *afe = snd_soc_component_get_drvdata(component);
+
+ size = afe->mtk_afe_hardware->buffer_bytes_max;
+ return snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+- card->dev, size, size);
++ rtd->platform->dev,
++ size, size);
+ }
+
+ static void mtk_afe_pcm_free(struct snd_pcm *pcm)
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 2d9709104ec5..b2b501ef57d7 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -433,6 +433,8 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
+ static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
+ {
+ struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
++
++ list_del(&data->paths);
+ kfree(data->wlist);
+ kfree(data);
+ }
+diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
+index 36ef45b2e89d..09c4a4a7b5dd 100644
+--- a/tools/perf/util/dso.c
++++ b/tools/perf/util/dso.c
+@@ -354,6 +354,8 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
+ if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
+ (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
+ (strncmp(name, "[vdso]", 6) == 0) ||
++ (strncmp(name, "[vdso32]", 8) == 0) ||
++ (strncmp(name, "[vdsox32]", 9) == 0) ||
+ (strncmp(name, "[vsyscall]", 10) == 0)) {
+ m->kmod = false;
+
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+index f9157aed1289..d404bed7003a 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+@@ -113,6 +113,7 @@ struct intel_pt_decoder {
+ bool have_cyc;
+ bool fixup_last_mtc;
+ bool have_last_ip;
++ enum intel_pt_param_flags flags;
+ uint64_t pos;
+ uint64_t last_ip;
+ uint64_t ip;
+@@ -226,6 +227,8 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
+ decoder->return_compression = params->return_compression;
+ decoder->branch_enable = params->branch_enable;
+
++ decoder->flags = params->flags;
++
+ decoder->period = params->period;
+ decoder->period_type = params->period_type;
+
+@@ -1097,6 +1100,15 @@ static bool intel_pt_fup_event(struct intel_pt_decoder *decoder)
+ return ret;
+ }
+
++static inline bool intel_pt_fup_with_nlip(struct intel_pt_decoder *decoder,
++ struct intel_pt_insn *intel_pt_insn,
++ uint64_t ip, int err)
++{
++ return decoder->flags & INTEL_PT_FUP_WITH_NLIP && !err &&
++ intel_pt_insn->branch == INTEL_PT_BR_INDIRECT &&
++ ip == decoder->ip + intel_pt_insn->length;
++}
++
+ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
+ {
+ struct intel_pt_insn intel_pt_insn;
+@@ -1109,10 +1121,11 @@ static int intel_pt_walk_fup(struct intel_pt_decoder *decoder)
+ err = intel_pt_walk_insn(decoder, &intel_pt_insn, ip);
+ if (err == INTEL_PT_RETURN)
+ return 0;
+- if (err == -EAGAIN) {
++ if (err == -EAGAIN ||
++ intel_pt_fup_with_nlip(decoder, &intel_pt_insn, ip, err)) {
+ if (intel_pt_fup_event(decoder))
+ return 0;
+- return err;
++ return -EAGAIN;
+ }
+ decoder->set_fup_tx_flags = false;
+ if (err)
+@@ -1376,7 +1389,6 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
+ {
+ intel_pt_log("ERROR: Buffer overflow\n");
+ intel_pt_clear_tx_flags(decoder);
+- decoder->have_tma = false;
+ decoder->cbr = 0;
+ decoder->timestamp_insn_cnt = 0;
+ decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
+@@ -1604,7 +1616,6 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
+ case INTEL_PT_PSB:
+ case INTEL_PT_TSC:
+ case INTEL_PT_TMA:
+- case INTEL_PT_CBR:
+ case INTEL_PT_MODE_TSX:
+ case INTEL_PT_BAD:
+ case INTEL_PT_PSBEND:
+@@ -1620,6 +1631,10 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
+ decoder->pkt_step = 0;
+ return -ENOENT;
+
++ case INTEL_PT_CBR:
++ intel_pt_calc_cbr(decoder);
++ break;
++
+ case INTEL_PT_OVF:
+ return intel_pt_overflow(decoder);
+
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+index fc1752d50019..51c18d67f4ca 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
+@@ -60,6 +60,14 @@ enum {
+ INTEL_PT_ERR_MAX,
+ };
+
++enum intel_pt_param_flags {
++ /*
++ * FUP packet can contain next linear instruction pointer instead of
++ * current linear instruction pointer.
++ */
++ INTEL_PT_FUP_WITH_NLIP = 1 << 0,
++};
++
+ struct intel_pt_state {
+ enum intel_pt_sample_type type;
+ int err;
+@@ -106,6 +114,7 @@ struct intel_pt_params {
+ unsigned int mtc_period;
+ uint32_t tsc_ctc_ratio_n;
+ uint32_t tsc_ctc_ratio_d;
++ enum intel_pt_param_flags flags;
+ };
+
+ struct intel_pt_decoder;
+diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
+index ba4c9dd18643..d426761a549d 100644
+--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
++++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
+@@ -366,7 +366,7 @@ static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf,
+ if (len < offs)
+ return INTEL_PT_NEED_MORE_BYTES;
+ byte = buf[offs++];
+- payload |= (byte >> 1) << shift;
++ payload |= ((uint64_t)byte >> 1) << shift;
+ }
+
+ packet->type = INTEL_PT_CYC;
+diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
+index 0effaff57020..38b25e826a45 100644
+--- a/tools/perf/util/intel-pt.c
++++ b/tools/perf/util/intel-pt.c
+@@ -751,6 +751,7 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
+ unsigned int queue_nr)
+ {
+ struct intel_pt_params params = { .get_trace = 0, };
++ struct perf_env *env = pt->machine->env;
+ struct intel_pt_queue *ptq;
+
+ ptq = zalloc(sizeof(struct intel_pt_queue));
+@@ -832,6 +833,9 @@ static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
+ }
+ }
+
++ if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
++ params.flags |= INTEL_PT_FUP_WITH_NLIP;
++
+ ptq->decoder = intel_pt_decoder_new(&params);
+ if (!ptq->decoder)
+ goto out_free;
+@@ -1523,6 +1527,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
+
+ if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
+ switch (ptq->switch_state) {
++ case INTEL_PT_SS_NOT_TRACING:
+ case INTEL_PT_SS_UNKNOWN:
+ case INTEL_PT_SS_EXPECTING_SWITCH_IP:
+ err = intel_pt_next_tid(pt, ptq);
+diff --git a/tools/testing/selftests/ftrace/test.d/functions b/tools/testing/selftests/ftrace/test.d/functions
+index 2a4f16fc9819..8393b1c06027 100644
+--- a/tools/testing/selftests/ftrace/test.d/functions
++++ b/tools/testing/selftests/ftrace/test.d/functions
+@@ -15,14 +15,29 @@ reset_tracer() { # reset the current tracer
+ echo nop > current_tracer
+ }
+
+-reset_trigger() { # reset all current setting triggers
+- grep -v ^# events/*/*/trigger |
++reset_trigger_file() {
++ # remove action triggers first
++ grep -H ':on[^:]*(' $@ |
++ while read line; do
++ cmd=`echo $line | cut -f2- -d: | cut -f1 -d" "`
++ file=`echo $line | cut -f1 -d:`
++ echo "!$cmd" >> $file
++ done
++ grep -Hv ^# $@ |
+ while read line; do
+ cmd=`echo $line | cut -f2- -d: | cut -f1 -d" "`
+- echo "!$cmd" > `echo $line | cut -f1 -d:`
++ file=`echo $line | cut -f1 -d:`
++ echo "!$cmd" > $file
+ done
+ }
+
++reset_trigger() { # reset all current setting triggers
++ if [ -d events/synthetic ]; then
++ reset_trigger_file events/synthetic/*/trigger
++ fi
++ reset_trigger_file events/*/*/trigger
++}
++
+ reset_events_filter() { # reset all current setting filters
+ grep -v ^none events/*/*/filter |
+ while read line; do