diff options
author | Alice Ferrazzi <alicef@gentoo.org> | 2017-02-01 12:48:31 +0000 |
---|---|---|
committer | Alice Ferrazzi <alicef@gentoo.org> | 2017-02-01 12:48:31 +0000 |
commit | 687a84c83863dcb0662f4f62c707ce18fcee51c1 (patch) | |
tree | bc35f22206d06d573351ac37ee39665c5a82451e | |
parent | Remove redundant patch. (diff) | |
download | linux-patches-687a84c83863dcb0662f4f62c707ce18fcee51c1.tar.gz linux-patches-687a84c83863dcb0662f4f62c707ce18fcee51c1.tar.bz2 linux-patches-687a84c83863dcb0662f4f62c707ce18fcee51c1.zip |
linux kernel 3.12.703.12-69
-rw-r--r-- | 0000_README | 8 | ||||
-rw-r--r-- | 1069_linux-3.12.70.patch | 7026 |
2 files changed, 7034 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 9b876d46..89b165d4 100644 --- a/0000_README +++ b/0000_README @@ -318,6 +318,14 @@ Patch: 1067_linux-3.12.68.patch From: http://www.kernel.org Desc: Linux 3.12.68 +Patch: 1068_linux-3.12.69.patch +From: http://www.kernel.org +Desc: Linux 3.12.69 + +Patch: 1069_linux-3.12.70.patch +From: http://www.kernel.org +Desc: Linux 3.12.70 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1069_linux-3.12.70.patch b/1069_linux-3.12.70.patch new file mode 100644 index 00000000..01821f58 --- /dev/null +++ b/1069_linux-3.12.70.patch @@ -0,0 +1,7026 @@ +diff --git a/Documentation/devicetree/bindings/clock/imx31-clock.txt b/Documentation/devicetree/bindings/clock/imx31-clock.txt +index 19df842c694f..8163d565f697 100644 +--- a/Documentation/devicetree/bindings/clock/imx31-clock.txt ++++ b/Documentation/devicetree/bindings/clock/imx31-clock.txt +@@ -77,7 +77,7 @@ Examples: + clks: ccm@53f80000{ + compatible = "fsl,imx31-ccm"; + reg = <0x53f80000 0x4000>; +- interrupts = <0 31 0x04 0 53 0x04>; ++ interrupts = <31>, <53>; + #clock-cells = <1>; + }; + +diff --git a/Makefile b/Makefile +index f355c0e24cd6..d0e6e38ee77b 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 12 +-SUBLEVEL = 69 ++SUBLEVEL = 70 + EXTRAVERSION = + NAME = One Giant Leap for Frogkind + +diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts +index 588ce58a2959..bd81f1da17a6 100644 +--- a/arch/arm/boot/dts/da850-evm.dts ++++ b/arch/arm/boot/dts/da850-evm.dts +@@ -59,6 +59,7 @@ + #size-cells = <1>; + compatible = "m25p64"; + spi-max-frequency = <30000000>; ++ m25p,fast-read; + reg = <0>; + partition@0 { + label = "U-Boot-SPL"; +diff --git a/arch/arm/boot/dts/imx31.dtsi b/arch/arm/boot/dts/imx31.dtsi +index c34f82581248..626e5e374572 100644 +--- a/arch/arm/boot/dts/imx31.dtsi ++++ b/arch/arm/boot/dts/imx31.dtsi +@@ -30,11 +30,11 @@ + }; + }; + +- avic: avic-interrupt-controller@60000000 { ++ avic: interrupt-controller@68000000 { + compatible = "fsl,imx31-avic", "fsl,avic"; + interrupt-controller; + #interrupt-cells = <1>; +- reg = <0x60000000 0x100000>; ++ reg = <0x68000000 0x100000>; + }; + + soc { +@@ -110,13 +110,6 @@ + interrupts = <19>; + clocks = <&clks 25>; + }; +- +- clks: ccm@53f80000{ +- compatible = "fsl,imx31-ccm"; +- reg = <0x53f80000 0x4000>; +- interrupts = <0 31 0x04 0 53 0x04>; +- #clock-cells = <1>; +- }; + }; + + aips@53f00000 { /* AIPS2 */ +@@ -126,6 +119,13 @@ + reg = <0x53f00000 0x100000>; + ranges; + ++ clks: ccm@53f80000{ ++ compatible = "fsl,imx31-ccm"; ++ reg = <0x53f80000 0x4000>; ++ interrupts = <31>, <53>; ++ #clock-cells = <1>; ++ }; ++ + gpt: timer@53f90000 { + compatible = "fsl,imx31-gpt"; + reg = <0x53f90000 0x4000>; +diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h +index 9672e978d50d..569549079bc7 100644 +--- a/arch/arm/include/asm/cputype.h ++++ b/arch/arm/include/asm/cputype.h +@@ -76,6 +76,9 @@ + #define ARM_CPU_XSCALE_ARCH_V2 0x4000 + #define ARM_CPU_XSCALE_ARCH_V3 0x6000 + ++/* Qualcomm implemented cores */ ++#define ARM_CPU_PART_SCORPION 0x510002d0 ++ + extern unsigned int processor_id; + + #ifdef CONFIG_CPU_CP15 +diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c +index 7b95de601357..b3ebae328fac 100644 +--- a/arch/arm/kernel/hw_breakpoint.c ++++ b/arch/arm/kernel/hw_breakpoint.c +@@ -1066,6 +1066,22 @@ static int __init arch_hw_breakpoint_init(void) + return 0; + } + ++ /* ++ * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD ++ * whenever a WFI is issued, even if the core is not powered down, in ++ * violation of the architecture. When DBGPRSR.SPD is set, accesses to ++ * breakpoint and watchpoint registers are treated as undefined, so ++ * this results in boot time and runtime failures when these are ++ * accessed and we unexpectedly take a trap. ++ * ++ * It's not clear if/how this can be worked around, so we blacklist ++ * Scorpion CPUs to avoid these issues. ++ */ ++ if ((read_cpuid_id() & 0xff00fff0) == ARM_CPU_PART_SCORPION) { ++ pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n"); ++ return 0; ++ } ++ + has_ossr = core_has_os_save_restore(); + + /* Determine how many BRPs/WRPs are available. */ +diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c +index f56e5fbfa2fd..25f11492c33f 100644 +--- a/arch/arm/mach-davinci/da850.c ++++ b/arch/arm/mach-davinci/da850.c +@@ -297,6 +297,16 @@ static struct clk emac_clk = { + .gpsc = 1, + }; + ++/* ++ * In order to avoid adding the emac_clk to the clock lookup table twice (and ++ * screwing up the linked list in the process) create a separate clock for ++ * mdio inheriting the rate from emac_clk. ++ */ ++static struct clk mdio_clk = { ++ .name = "mdio", ++ .parent = &emac_clk, ++}; ++ + static struct clk mcasp_clk = { + .name = "mcasp", + .parent = &pll0_sysclk2, +@@ -461,7 +471,7 @@ static struct clk_lookup da850_clks[] = { + CLK(NULL, "arm", &arm_clk), + CLK(NULL, "rmii", &rmii_clk), + CLK("davinci_emac.1", NULL, &emac_clk), +- CLK("davinci_mdio.0", "fck", &emac_clk), ++ CLK("davinci_mdio.0", "fck", &mdio_clk), + CLK("davinci-mcasp.0", NULL, &mcasp_clk), + CLK("da8xx_lcdc.0", "fck", &lcdc_clk), + CLK("da830-mmc.0", NULL, &mmcsd0_clk), +diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c +index 1a468f0fd22e..9d532568b8b3 100644 +--- a/arch/arm/mach-ux500/pm.c ++++ b/arch/arm/mach-ux500/pm.c +@@ -128,8 +128,8 @@ bool prcmu_pending_irq(void) + */ + bool prcmu_is_cpu_in_wfi(int cpu) + { +- return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : +- PRCM_ARM_WFI_STANDBY_WFI0; ++ return readl(PRCM_ARM_WFI_STANDBY) & ++ (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0); + } + + /* +diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c +index 83e4f959ee47..0cad698cdd3c 100644 +--- a/arch/arm/xen/enlighten.c ++++ b/arch/arm/xen/enlighten.c +@@ -260,8 +260,7 @@ static int __init xen_guest_init(void) + * for secondary CPUs as they are brought up. + * For uniformity we use VCPUOP_register_vcpu_info even on cpu0. + */ +- xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info), +- sizeof(struct vcpu_info)); ++ xen_vcpu_info = alloc_percpu(struct vcpu_info); + if (xen_vcpu_info == NULL) + return -ENOMEM; + +diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h +index 6913643bbe54..c136fd53c847 100644 +--- a/arch/arm64/include/uapi/asm/ptrace.h ++++ b/arch/arm64/include/uapi/asm/ptrace.h +@@ -75,6 +75,7 @@ struct user_fpsimd_state { + __uint128_t vregs[32]; + __u32 fpsr; + __u32 fpcr; ++ __u32 __reserved[2]; + }; + + struct user_hwdebug_state { +diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S +index 028a1b91e2b3..c405e2421fd8 100644 +--- a/arch/arm64/kernel/entry.S ++++ b/arch/arm64/kernel/entry.S +@@ -493,7 +493,7 @@ el0_inv: + mov x0, sp + mov x1, #BAD_SYNC + mrs x2, esr_el1 +- b bad_mode ++ b bad_el0_sync + ENDPROC(el0_sync) + + .align 6 +diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c +index 9b9d651446ba..cdf1ec11c015 100644 +--- a/arch/arm64/kernel/ptrace.c ++++ b/arch/arm64/kernel/ptrace.c +@@ -442,6 +442,8 @@ static int hw_break_set(struct task_struct *target, + /* (address, ctrl) registers */ + limit = regset->n * regset->size; + while (count && offset < limit) { ++ if (count < PTRACE_HBP_ADDR_SZ) ++ return -EINVAL; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, + offset, offset + PTRACE_HBP_ADDR_SZ); + if (ret) +@@ -451,6 +453,8 @@ static int hw_break_set(struct task_struct *target, + return ret; + offset += PTRACE_HBP_ADDR_SZ; + ++ if (!count) ++ break; + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, + offset, offset + PTRACE_HBP_CTRL_SZ); + if (ret) +@@ -487,7 +491,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset, + const void *kbuf, const void __user *ubuf) + { + int ret; +- struct user_pt_regs newregs; ++ struct user_pt_regs newregs = task_pt_regs(target)->user_regs; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); + if (ret) +@@ -517,7 +521,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset, + const void *kbuf, const void __user *ubuf) + { + int ret; +- struct user_fpsimd_state newstate; ++ struct user_fpsimd_state newstate = ++ target->thread.fpsimd_state.user_fpsimd; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); + if (ret) +@@ -540,7 +545,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset, + const void *kbuf, const void __user *ubuf) + { + int ret; +- unsigned long tls; ++ unsigned long tls = target->thread.tp_value; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); + if (ret) +diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c +index 7ffadddb645d..7d1f6c5cfa65 100644 +--- a/arch/arm64/kernel/traps.c ++++ b/arch/arm64/kernel/traps.c +@@ -306,16 +306,33 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) + } + + /* +- * bad_mode handles the impossible case in the exception vector. ++ * bad_mode handles the impossible case in the exception vector. This is always ++ * fatal. + */ + asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) + { +- siginfo_t info; +- void __user *pc = (void __user *)instruction_pointer(regs); + console_verbose(); + + pr_crit("Bad mode in %s handler detected, code 0x%08x\n", + handler[reason], esr); ++ ++ die("Oops - bad mode", regs, 0); ++ local_irq_disable(); ++ panic("bad mode"); ++} ++ ++/* ++ * bad_el0_sync handles unexpected, but potentially recoverable synchronous ++ * exceptions taken from EL0. Unlike bad_mode, this returns. ++ */ ++asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr) ++{ ++ siginfo_t info; ++ void __user *pc = (void __user *)instruction_pointer(regs); ++ console_verbose(); ++ ++ pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x\n", ++ smp_processor_id(), esr); + __show_regs(regs); + + info.si_signo = SIGILL; +@@ -323,7 +340,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) + info.si_code = ILL_ILLOPC; + info.si_addr = pc; + +- arm64_notify_die("Oops - bad mode", regs, &info, 0); ++ force_sig_info(info.si_signo, &info, current); + } + + void __pte_error(const char *file, int line, unsigned long val) +diff --git a/arch/cris/boot/rescue/Makefile b/arch/cris/boot/rescue/Makefile +index 52bd0bd1dd22..d98edbb30a18 100644 +--- a/arch/cris/boot/rescue/Makefile ++++ b/arch/cris/boot/rescue/Makefile +@@ -10,6 +10,9 @@ + + asflags-y += $(LINUXINCLUDE) + ccflags-y += -O2 $(LINUXINCLUDE) ++ ++ifdef CONFIG_ETRAX_AXISFLASHMAP ++ + arch-$(CONFIG_ETRAX_ARCH_V10) = v10 + arch-$(CONFIG_ETRAX_ARCH_V32) = v32 + +@@ -28,6 +31,11 @@ $(obj)/rescue.bin: $(obj)/rescue.o FORCE + $(call if_changed,objcopy) + cp -p $(obj)/rescue.bin $(objtree) + ++else ++$(obj)/rescue.bin: ++ ++endif ++ + $(obj)/testrescue.bin: $(obj)/testrescue.o + $(OBJCOPY) $(OBJCOPYFLAGS) $(obj)/testrescue.o tr.bin + # Pad it to 784 bytes +diff --git a/arch/m68k/include/asm/delay.h b/arch/m68k/include/asm/delay.h +index d28fa8fe26fe..c598d847d56b 100644 +--- a/arch/m68k/include/asm/delay.h ++++ b/arch/m68k/include/asm/delay.h +@@ -114,6 +114,6 @@ static inline void __udelay(unsigned long usecs) + */ + #define HZSCALE (268435456 / (1000000 / HZ)) + +-#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000)); ++#define ndelay(n) __delay(DIV_ROUND_UP((n) * ((((HZSCALE) >> 11) * (loops_per_jiffy >> 11)) >> 6), 1000)) + + #endif /* defined(_M68K_DELAY_H) */ +diff --git a/arch/powerpc/boot/ps3-head.S b/arch/powerpc/boot/ps3-head.S +index b6fcbaf5027b..3dc44b05fb97 100644 +--- a/arch/powerpc/boot/ps3-head.S ++++ b/arch/powerpc/boot/ps3-head.S +@@ -57,11 +57,6 @@ __system_reset_overlay: + bctr + + 1: +- /* Save the value at addr zero for a null pointer write check later. */ +- +- li r4, 0 +- lwz r3, 0(r4) +- + /* Primary delays then goes to _zimage_start in wrapper. */ + + or 31, 31, 31 /* db16cyc */ +diff --git a/arch/powerpc/boot/ps3.c b/arch/powerpc/boot/ps3.c +index 9954d98871d0..029ea3ce1588 100644 +--- a/arch/powerpc/boot/ps3.c ++++ b/arch/powerpc/boot/ps3.c +@@ -119,13 +119,12 @@ void ps3_copy_vectors(void) + flush_cache((void *)0x100, 512); + } + +-void platform_init(unsigned long null_check) ++void platform_init(void) + { + const u32 heapsize = 0x1000000 - (u32)_end; /* 16MiB */ + void *chosen; + unsigned long ft_addr; + u64 rm_size; +- unsigned long val; + + console_ops.write = ps3_console_write; + platform_ops.exit = ps3_exit; +@@ -153,11 +152,6 @@ void platform_init(unsigned long null_check) + + printf(" flat tree at 0x%lx\n\r", ft_addr); + +- val = *(unsigned long *)0; +- +- if (val != null_check) +- printf("null check failed: %lx != %lx\n\r", val, null_check); +- + ((kernel_entry_t)0)(ft_addr, 0, NULL); + + ps3_exit(); +diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c +index 16a7c2326d48..bc47b7986e37 100644 +--- a/arch/powerpc/kernel/ibmebus.c ++++ b/arch/powerpc/kernel/ibmebus.c +@@ -180,6 +180,7 @@ static int ibmebus_create_device(struct device_node *dn) + static int ibmebus_create_devices(const struct of_device_id *matches) + { + struct device_node *root, *child; ++ struct device *dev; + int ret = 0; + + root = of_find_node_by_path("/"); +@@ -188,9 +189,12 @@ static int ibmebus_create_devices(const struct of_device_id *matches) + if (!of_match_node(matches, child)) + continue; + +- if (bus_find_device(&ibmebus_bus_type, NULL, child, +- ibmebus_match_node)) ++ dev = bus_find_device(&ibmebus_bus_type, NULL, child, ++ ibmebus_match_node); ++ if (dev) { ++ put_device(dev); + continue; ++ } + + ret = ibmebus_create_device(child); + if (ret) { +@@ -262,6 +266,7 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus, + const char *buf, size_t count) + { + struct device_node *dn = NULL; ++ struct device *dev; + char *path; + ssize_t rc = 0; + +@@ -269,8 +274,10 @@ static ssize_t ibmebus_store_probe(struct bus_type *bus, + if (!path) + return -ENOMEM; + +- if (bus_find_device(&ibmebus_bus_type, NULL, path, +- ibmebus_match_path)) { ++ dev = bus_find_device(&ibmebus_bus_type, NULL, path, ++ ibmebus_match_path); ++ if (dev) { ++ put_device(dev); + printk(KERN_WARNING "%s: %s has already been probed\n", + __func__, path); + rc = -EEXIST; +@@ -306,6 +313,7 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus, + if ((dev = bus_find_device(&ibmebus_bus_type, NULL, path, + ibmebus_match_path))) { + of_device_unregister(to_platform_device(dev)); ++ put_device(dev); + + kfree(path); + return count; +diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S +index df930727f73b..6ff0f4ef08be 100644 +--- a/arch/powerpc/kernel/idle_power7.S ++++ b/arch/powerpc/kernel/idle_power7.S +@@ -110,7 +110,7 @@ power7_enter_nap_mode: + std r0,0(r1) + ptesync + ld r0,0(r1) +-1: cmp cr0,r0,r0 ++1: cmpd cr0,r0,r0 + bne 1b + PPC_NAP + b . +diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S +index ace34137a501..e23298f065df 100644 +--- a/arch/powerpc/kernel/misc_32.S ++++ b/arch/powerpc/kernel/misc_32.S +@@ -313,7 +313,7 @@ _GLOBAL(flush_instruction_cache) + lis r3, KERNELBASE@h + iccci 0,r3 + #endif +-#elif CONFIG_FSL_BOOKE ++#elif defined(CONFIG_FSL_BOOKE) + BEGIN_FTR_SECTION + mfspr r3,SPRN_L1CSR0 + ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC +diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h +index 29559831c94f..43849c3d6275 100644 +--- a/arch/x86/include/asm/apic.h ++++ b/arch/x86/include/asm/apic.h +@@ -710,9 +710,8 @@ static inline void exiting_irq(void) + + static inline void exiting_ack_irq(void) + { +- irq_exit(); +- /* Ack only at the end to avoid potential reentry */ + ack_APIC_irq(); ++ irq_exit(); + } + + extern void ioapic_zap_locks(void); +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 9364936b47c2..f415fd820c86 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1067,7 +1067,7 @@ static __init int setup_disablecpuid(char *arg) + { + int bit; + +- if (get_option(&arg, &bit) && bit < NCAPINTS*32) ++ if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32) + setup_clear_cpu_cap(bit); + else + return 0; +diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c +index 0271272d55d0..050784bcd71f 100644 +--- a/arch/x86/kernel/cpu/perf_event.c ++++ b/arch/x86/kernel/cpu/perf_event.c +@@ -64,7 +64,7 @@ u64 x86_perf_event_update(struct perf_event *event) + int shift = 64 - x86_pmu.cntval_bits; + u64 prev_raw_count, new_raw_count; + int idx = hwc->idx; +- s64 delta; ++ u64 delta; + + if (idx == INTEL_PMC_IDX_FIXED_BTS) + return 0; +diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c +index 04e7df068f0e..0c6527a168f0 100644 +--- a/arch/x86/kernel/cpu/perf_event_intel.c ++++ b/arch/x86/kernel/cpu/perf_event_intel.c +@@ -2578,7 +2578,7 @@ __init int intel_pmu_init(void) + + /* Support full width counters using alternative MSR range */ + if (x86_pmu.intel_cap.full_width_write) { +- x86_pmu.max_period = x86_pmu.cntval_mask; ++ x86_pmu.max_period = x86_pmu.cntval_mask >> 1; + x86_pmu.perfctr = MSR_IA32_PMC0; + pr_cont("full-width counters, "); + } +diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S +index 1f1c33d0a13c..a78db5ed8b3f 100644 +--- a/arch/x86/kernel/entry_32.S ++++ b/arch/x86/kernel/entry_32.S +@@ -1113,8 +1113,8 @@ ftrace_graph_call: + jmp ftrace_stub + #endif + +-.globl ftrace_stub +-ftrace_stub: ++/* This is weak to keep gas from relaxing the jumps */ ++WEAK(ftrace_stub) + ret + END(ftrace_caller) + +diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S +index ead3e7c9672e..ceb8d113938b 100644 +--- a/arch/x86/kernel/entry_64.S ++++ b/arch/x86/kernel/entry_64.S +@@ -122,7 +122,8 @@ GLOBAL(ftrace_graph_call) + jmp ftrace_stub + #endif + +-GLOBAL(ftrace_stub) ++/* This is weak to keep gas from relaxing the jumps */ ++WEAK(ftrace_stub) + retq + END(ftrace_caller) + +diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c +index 7c3a5a61f2e4..e5d895fa1fe0 100644 +--- a/arch/x86/kernel/smp.c ++++ b/arch/x86/kernel/smp.c +@@ -267,8 +267,8 @@ __visible void smp_reschedule_interrupt(struct pt_regs *regs) + + static inline void smp_entering_irq(void) + { +- ack_APIC_irq(); + irq_enter(); ++ ack_APIC_irq(); + } + + __visible void smp_trace_reschedule_interrupt(struct pt_regs *regs) +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index 77d373211053..0b45efc5318f 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -744,6 +744,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, + return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); + } + ++static int segmented_write_std(struct x86_emulate_ctxt *ctxt, ++ struct segmented_address addr, ++ void *data, ++ unsigned int size) ++{ ++ int rc; ++ ulong linear; ++ ++ rc = linearize(ctxt, addr, size, true, &linear); ++ if (rc != X86EMUL_CONTINUE) ++ return rc; ++ return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception); ++} ++ + /* + * Fetch the next byte of the instruction being emulated which is pointed to + * by ctxt->_eip, then increment ctxt->_eip. +@@ -1444,7 +1458,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, + &ctxt->exception); + } + +-/* Does not support long mode */ + static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, + u16 selector, int seg, + struct desc_struct *desc) +@@ -1458,6 +1471,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, + int ret; + u16 dummy; + ++ ++ /* ++ * None of MOV, POP and LSS can load a NULL selector in CPL=3, but ++ * they can load it at CPL<3 (Intel's manual says only LSS can, ++ * but it's wrong). ++ * ++ * However, the Intel manual says that putting IST=1/DPL=3 in ++ * an interrupt gate will result in SS=3 (the AMD manual instead ++ * says it doesn't), so allow SS=3 in __load_segment_descriptor ++ * and only forbid it here. ++ */ ++ if (seg == VCPU_SREG_SS && selector == 3 && ++ ctxt->mode == X86EMUL_MODE_PROT64) ++ return emulate_exception(ctxt, GP_VECTOR, 0, true); ++ + memset(&seg_desc, 0, sizeof seg_desc); + + if (ctxt->mode == X86EMUL_MODE_REAL) { +@@ -1480,20 +1508,34 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, + rpl = selector & 3; + cpl = ctxt->ops->cpl(ctxt); + +- /* NULL selector is not valid for TR, CS and SS (except for long mode) */ +- if ((seg == VCPU_SREG_CS +- || (seg == VCPU_SREG_SS +- && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)) +- || seg == VCPU_SREG_TR) +- && null_selector) +- goto exception; +- + /* TR should be in GDT only */ + if (seg == VCPU_SREG_TR && (selector & (1 << 2))) + goto exception; + +- if (null_selector) /* for NULL selector skip all following checks */ ++ /* NULL selector is not valid for TR, CS and (except for long mode) SS */ ++ if (null_selector) { ++ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR) ++ goto exception; ++ ++ if (seg == VCPU_SREG_SS) { ++ if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl) ++ goto exception; ++ ++ /* ++ * ctxt->ops->set_segment expects the CPL to be in ++ * SS.DPL, so fake an expand-up 32-bit data segment. ++ */ ++ seg_desc.type = 3; ++ seg_desc.p = 1; ++ seg_desc.s = 1; ++ seg_desc.dpl = cpl; ++ seg_desc.d = 1; ++ seg_desc.g = 1; ++ } ++ ++ /* Skip all following checks */ + goto load; ++ } + + ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr); + if (ret != X86EMUL_CONTINUE) +@@ -3179,8 +3221,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, + } + /* Disable writeback. */ + ctxt->dst.type = OP_NONE; +- return segmented_write(ctxt, ctxt->dst.addr.mem, +- &desc_ptr, 2 + ctxt->op_bytes); ++ return segmented_write_std(ctxt, ctxt->dst.addr.mem, ++ &desc_ptr, 2 + ctxt->op_bytes); + } + + static int em_sgdt(struct x86_emulate_ctxt *ctxt) +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index a4ce2b2f1418..33d479540373 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -1908,3 +1908,9 @@ void kvm_lapic_init(void) + jump_label_rate_limit(&apic_hw_disabled, HZ); + jump_label_rate_limit(&apic_sw_disabled, HZ); + } ++ ++void kvm_lapic_exit(void) ++{ ++ static_key_deferred_flush(&apic_hw_disabled); ++ static_key_deferred_flush(&apic_sw_disabled); ++} +diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h +index fc87568fc409..f1fd0753b6ba 100644 +--- a/arch/x86/kvm/lapic.h ++++ b/arch/x86/kvm/lapic.h +@@ -93,6 +93,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu) + + int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data); + void kvm_lapic_init(void); ++void kvm_lapic_exit(void); + + static inline u32 kvm_apic_get_reg(struct kvm_lapic *apic, int reg_off) + { +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index b81c81bce181..c7f2b3c52d92 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -1052,10 +1052,10 @@ static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12) + return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT); + } + +-static inline bool is_exception(u32 intr_info) ++static inline bool is_nmi(u32 intr_info) + { + return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) +- == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK); ++ == (INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK); + } + + static void nested_vmx_vmexit(struct kvm_vcpu *vcpu); +@@ -4769,7 +4769,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) + if (is_machine_check(intr_info)) + return handle_machine_check(vcpu); + +- if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR) ++ if (is_nmi(intr_info)) + return 1; /* already handled by vmx_vcpu_run() */ + + if (is_no_device(intr_info)) { +@@ -6653,7 +6653,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) + + switch (exit_reason) { + case EXIT_REASON_EXCEPTION_NMI: +- if (!is_exception(intr_info)) ++ if (is_nmi(intr_info)) + return 0; + else if (is_page_fault(intr_info)) + return enable_ept; +@@ -6962,8 +6962,7 @@ static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) + kvm_machine_check(); + + /* We need to handle NMIs before interrupts are enabled */ +- if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && +- (exit_intr_info & INTR_INFO_VALID_MASK)) { ++ if (is_nmi(exit_intr_info)) { + kvm_before_handle_nmi(&vmx->vcpu); + asm("int $2"); + kvm_after_handle_nmi(&vmx->vcpu); +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 8562aff68884..69e7b0b9a6bb 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -5573,6 +5573,7 @@ out: + + void kvm_arch_exit(void) + { ++ kvm_lapic_exit(); + perf_unregister_guest_info_callbacks(&kvm_guest_cbs); + + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) +diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c +index a24e9c2e95da..a33c61c5e34a 100644 +--- a/arch/x86/pci/acpi.c ++++ b/arch/x86/pci/acpi.c +@@ -118,6 +118,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = { + DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"), + }, + }, ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */ ++ { ++ .callback = set_nouse_crs, ++ .ident = "Supermicro X8DTH", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"), ++ DMI_MATCH(DMI_BIOS_VERSION, "2.0a"), ++ }, ++ }, + + /* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */ + { +diff --git a/block/bsg.c b/block/bsg.c +index 420a5a9f1b23..76801e57f556 100644 +--- a/block/bsg.c ++++ b/block/bsg.c +@@ -675,6 +675,9 @@ bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) + + dprintk("%s: write %Zd bytes\n", bd->name, count); + ++ if (unlikely(segment_eq(get_fs(), KERNEL_DS))) ++ return -EINVAL; ++ + bsg_set_block(bd, file); + + bytes_written = 0; +diff --git a/drivers/base/core.c b/drivers/base/core.c +index 944fecd32e9f..449f7096974d 100644 +--- a/drivers/base/core.c ++++ b/drivers/base/core.c +@@ -874,11 +874,29 @@ static struct kobject *get_device_parent(struct device *dev, + return NULL; + } + ++static inline bool live_in_glue_dir(struct kobject *kobj, ++ struct device *dev) ++{ ++ if (!kobj || !dev->class || ++ kobj->kset != &dev->class->p->glue_dirs) ++ return false; ++ return true; ++} ++ ++static inline struct kobject *get_glue_dir(struct device *dev) ++{ ++ return dev->kobj.parent; ++} ++ ++/* ++ * make sure cleaning up dir as the last step, we need to make ++ * sure .release handler of kobject is run with holding the ++ * global lock ++ */ + static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) + { + /* see if we live in a "glue" directory */ +- if (!glue_dir || !dev->class || +- glue_dir->kset != &dev->class->p->glue_dirs) ++ if (!live_in_glue_dir(glue_dir, dev)) + return; + + mutex_lock(&gdp_mutex); +@@ -886,11 +904,6 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) + mutex_unlock(&gdp_mutex); + } + +-static void cleanup_device_parent(struct device *dev) +-{ +- cleanup_glue_dir(dev, dev->kobj.parent); +-} +- + static int device_add_class_symlinks(struct device *dev) + { + int error; +@@ -1054,6 +1067,7 @@ int device_add(struct device *dev) + struct kobject *kobj; + struct class_interface *class_intf; + int error = -EINVAL; ++ struct kobject *glue_dir = NULL; + + dev = get_device(dev); + if (!dev) +@@ -1098,8 +1112,10 @@ int device_add(struct device *dev) + /* first, register with generic layer. */ + /* we require the name to be set before, and pass NULL */ + error = kobject_add(&dev->kobj, dev->kobj.parent, NULL); +- if (error) ++ if (error) { ++ glue_dir = get_glue_dir(dev); + goto Error; ++ } + + /* notify platform of device entry */ + if (platform_notify) +@@ -1182,11 +1198,11 @@ done: + device_remove_file(dev, &dev_attr_uevent); + attrError: + kobject_uevent(&dev->kobj, KOBJ_REMOVE); ++ glue_dir = get_glue_dir(dev); + kobject_del(&dev->kobj); + Error: +- cleanup_device_parent(dev); +- if (parent) +- put_device(parent); ++ cleanup_glue_dir(dev, glue_dir); ++ put_device(parent); + name_error: + kfree(dev->p); + dev->p = NULL; +@@ -1261,6 +1277,7 @@ EXPORT_SYMBOL_GPL(put_device); + void device_del(struct device *dev) + { + struct device *parent = dev->parent; ++ struct kobject *glue_dir = NULL; + struct class_interface *class_intf; + + /* Notify clients of device removal. This call must come +@@ -1302,8 +1319,9 @@ void device_del(struct device *dev) + if (platform_notify_remove) + platform_notify_remove(dev); + kobject_uevent(&dev->kobj, KOBJ_REMOVE); +- cleanup_device_parent(dev); ++ glue_dir = get_glue_dir(dev); + kobject_del(&dev->kobj); ++ cleanup_glue_dir(dev, glue_dir); + put_device(parent); + } + EXPORT_SYMBOL_GPL(device_del); +diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c +index 805b4c344006..ee5f2c985f4d 100644 +--- a/drivers/clk/clk-wm831x.c ++++ b/drivers/clk/clk-wm831x.c +@@ -248,7 +248,7 @@ static int wm831x_clkout_is_prepared(struct clk_hw *hw) + if (ret < 0) { + dev_err(wm831x->dev, "Unable to read CLOCK_CONTROL_1: %d\n", + ret); +- return true; ++ return false; + } + + return (ret & WM831X_CLKOUT_ENA) != 0; +diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c +index fc0e502022de..26bfe09ce0fb 100644 +--- a/drivers/clocksource/exynos_mct.c ++++ b/drivers/clocksource/exynos_mct.c +@@ -398,13 +398,11 @@ static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) + return IRQ_HANDLED; + } + +-static int exynos4_local_timer_setup(struct clock_event_device *evt) ++static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt) + { +- struct mct_clock_event_device *mevt; ++ struct clock_event_device *evt = &mevt->evt; + unsigned int cpu = smp_processor_id(); + +- mevt = container_of(evt, struct mct_clock_event_device, evt); +- + mevt->base = EXYNOS4_MCT_L_BASE(cpu); + sprintf(mevt->name, "mct_tick%d", cpu); + +@@ -433,12 +431,15 @@ static int exynos4_local_timer_setup(struct clock_event_device *evt) + return 0; + } + +-static void exynos4_local_timer_stop(struct clock_event_device *evt) ++static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt) + { ++ struct clock_event_device *evt = &mevt->evt; ++ + evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); + if (mct_int_type == MCT_INT_SPI) { + if (evt->irq != -1) + disable_irq_nosync(evt->irq); ++ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET); + } else { + disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); + } +@@ -456,11 +457,11 @@ static int exynos4_mct_cpu_notify(struct notifier_block *self, + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_STARTING: + mevt = this_cpu_ptr(&percpu_mct_tick); +- exynos4_local_timer_setup(&mevt->evt); ++ exynos4_local_timer_setup(mevt); + break; + case CPU_DYING: + mevt = this_cpu_ptr(&percpu_mct_tick); +- exynos4_local_timer_stop(&mevt->evt); ++ exynos4_local_timer_stop(mevt); + break; + } + +@@ -526,7 +527,7 @@ static void __init exynos4_timer_resources(struct device_node *np, void __iomem + goto out_irq; + + /* Immediately configure the timer on the boot CPU */ +- exynos4_local_timer_setup(&mevt->evt); ++ exynos4_local_timer_setup(mevt); + return; + + out_irq: +diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c +index 7c63b72ecd75..66f549399dc4 100644 +--- a/drivers/crypto/caam/caamalg.c ++++ b/drivers/crypto/caam/caamalg.c +@@ -418,7 +418,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead) + + /* Will read cryptlen */ + append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); +- aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); ++ append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | KEY_VLF | ++ FIFOLD_TYPE_MSG1OUT2 | FIFOLD_TYPE_LASTBOTH); ++ append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); + + /* Write ICV */ + append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | +diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c +index 88fc3a5fa7c4..32be5cb1f797 100644 +--- a/drivers/gpu/drm/ast/ast_main.c ++++ b/drivers/gpu/drm/ast/ast_main.c +@@ -120,7 +120,8 @@ static int ast_get_dram_info(struct drm_device *dev) + ast_write32(ast, 0x10000, 0xfc600309); + + do { +- ; ++ if (pci_channel_offline(dev->pdev)) ++ return -EIO; + } while (ast_read32(ast, 0x10000) != 0x01); + data = ast_read32(ast, 0x10004); + +@@ -343,7 +344,9 @@ int ast_driver_load(struct drm_device *dev, unsigned long flags) + ast_detect_chip(dev); + + if (ast->chip != AST1180) { +- ast_get_dram_info(dev); ++ ret = ast_get_dram_info(dev); ++ if (ret) ++ goto out_free; + ast->vram_size = ast_get_vram_info(dev); + DRM_INFO("dram %d %d %d %08x\n", ast->mclk, ast->dram_type, ast->dram_bus_width, ast->vram_size); + } +diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c +index fcb4e9ff1f20..09c155737daf 100644 +--- a/drivers/gpu/drm/gma500/psb_drv.c ++++ b/drivers/gpu/drm/gma500/psb_drv.c +@@ -620,6 +620,9 @@ static const struct file_operations psb_gem_fops = { + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = psb_unlocked_ioctl, ++#ifdef CONFIG_COMPAT ++ .compat_ioctl = drm_compat_ioctl, ++#endif + .mmap = drm_gem_mmap, + .poll = drm_poll, + .read = drm_read, +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c +index 3265792f1990..f7af7a8e4cd0 100644 +--- a/drivers/gpu/drm/radeon/si_dpm.c ++++ b/drivers/gpu/drm/radeon/si_dpm.c +@@ -2943,24 +2943,12 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, + (rdev->pdev->device == 0x6817) || + (rdev->pdev->device == 0x6806)) + max_mclk = 120000; +- } else if (rdev->family == CHIP_VERDE) { +- if ((rdev->pdev->revision == 0x81) || +- (rdev->pdev->revision == 0x83) || +- (rdev->pdev->revision == 0x87) || +- (rdev->pdev->device == 0x6820) || +- (rdev->pdev->device == 0x6821) || +- (rdev->pdev->device == 0x6822) || +- (rdev->pdev->device == 0x6823) || +- (rdev->pdev->device == 0x682A) || +- (rdev->pdev->device == 0x682B)) { +- max_sclk = 75000; +- max_mclk = 80000; +- } + } else if (rdev->family == CHIP_OLAND) { + if ((rdev->pdev->revision == 0xC7) || + (rdev->pdev->revision == 0x80) || + (rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || ++ (rdev->pdev->revision == 0x87) || + (rdev->pdev->device == 0x6604) || + (rdev->pdev->device == 0x6605)) { + max_sclk = 75000; +diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c +index c4ef3bc726e3..e299576004ce 100644 +--- a/drivers/hid/hid-cypress.c ++++ b/drivers/hid/hid-cypress.c +@@ -39,6 +39,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc, + if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX)) + return rdesc; + ++ if (*rsize < 4) ++ return rdesc; ++ + for (i = 0; i < *rsize - 4; i++) + if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) { + __u8 tmp; +diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c +index 0918b9136588..2a50ab613238 100644 +--- a/drivers/hwmon/ds620.c ++++ b/drivers/hwmon/ds620.c +@@ -166,7 +166,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da, + if (res) + return res; + +- val = (val * 10 / 625) * 8; ++ val = (clamp_val(val, -128000, 128000) * 10 / 625) * 8; + + mutex_lock(&data->update_lock); + data->temp[attr->index] = val; +diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c +index b4b8b5bef718..3bc0e8224b33 100644 +--- a/drivers/hwmon/g762.c ++++ b/drivers/hwmon/g762.c +@@ -193,14 +193,17 @@ static inline unsigned int rpm_from_cnt(u8 cnt, u32 clk_freq, u16 p, + * Convert fan RPM value from sysfs into count value for fan controller + * register (FAN_SET_CNT). + */ +-static inline unsigned char cnt_from_rpm(u32 rpm, u32 clk_freq, u16 p, ++static inline unsigned char cnt_from_rpm(unsigned long rpm, u32 clk_freq, u16 p, + u8 clk_div, u8 gear_mult) + { +- if (!rpm) /* to stop the fan, set cnt to 255 */ ++ unsigned long f1 = clk_freq * 30 * gear_mult; ++ unsigned long f2 = p * clk_div; ++ ++ if (!rpm) /* to stop the fan, set cnt to 255 */ + return 0xff; + +- return clamp_val(((clk_freq * 30 * gear_mult) / (rpm * p * clk_div)), +- 0, 255); ++ rpm = clamp_val(rpm, f1 / (255 * f2), ULONG_MAX / f2); ++ return DIV_ROUND_CLOSEST(f1, rpm * f2); + } + + /* helper to grab and cache data, at most one time per second */ +diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c +index c3ccdea3d180..fa3ecec524fa 100644 +--- a/drivers/i2c/i2c-dev.c ++++ b/drivers/i2c/i2c-dev.c +@@ -328,7 +328,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client, + unsigned long arg) + { + struct i2c_smbus_ioctl_data data_arg; +- union i2c_smbus_data temp; ++ union i2c_smbus_data temp = {}; + int datasize, res; + + if (copy_from_user(&data_arg, +diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c +index 4c837e66516b..f93fca41464f 100644 +--- a/drivers/infiniband/core/mad.c ++++ b/drivers/infiniband/core/mad.c +@@ -1598,7 +1598,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv, + if (!class) + goto out; + if (convert_mgmt_class(mad->mad_hdr.mgmt_class) >= +- IB_MGMT_MAX_METHODS) ++ ARRAY_SIZE(class->method_table)) + goto out; + method = class->method_table[convert_mgmt_class( + mad->mad_hdr.mgmt_class)]; +diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c +index 180d7f436ed5..2f861b59cbc1 100644 +--- a/drivers/infiniband/core/multicast.c ++++ b/drivers/infiniband/core/multicast.c +@@ -516,8 +516,11 @@ static void join_handler(int status, struct ib_sa_mcmember_rec *rec, + if (status) + process_join_error(group, status); + else { +- ib_find_pkey(group->port->dev->device, group->port->port_num, +- be16_to_cpu(rec->pkey), &pkey_index); ++ ++ if (ib_find_pkey(group->port->dev->device, ++ group->port->port_num, be16_to_cpu(rec->pkey), ++ &pkey_index)) ++ pkey_index = MCAST_INVALID_PKEY_INDEX; + + spin_lock_irq(&group->port->lock); + group->rec = *rec; +diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c +index f55d69500a5f..3a85e7669068 100644 +--- a/drivers/infiniband/hw/mlx4/ah.c ++++ b/drivers/infiniband/hw/mlx4/ah.c +@@ -118,7 +118,9 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr + !(1 << ah->av.eth.stat_rate & dev->caps.stat_rate_support)) + --ah->av.eth.stat_rate; + } +- ++ ah->av.eth.sl_tclass_flowlabel |= ++ cpu_to_be32((ah_attr->grh.traffic_class << 20) | ++ ah_attr->grh.flow_label); + /* + * HW requires multicast LID so we just choose one. + */ +@@ -126,7 +128,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr + ah->av.ib.dlid = cpu_to_be16(0xc000); + + memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16); +- ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29); ++ ah->av.eth.sl_tclass_flowlabel |= cpu_to_be32(ah_attr->sl << 29); + + return &ah->ibah; + } +diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c +index f0612645de99..9407a31afe20 100644 +--- a/drivers/infiniband/hw/mlx4/main.c ++++ b/drivers/infiniband/hw/mlx4/main.c +@@ -335,9 +335,11 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, + if (err) + goto out; + +- props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? +- IB_WIDTH_4X : IB_WIDTH_1X; +- props->active_speed = IB_SPEED_QDR; ++ props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) || ++ (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? ++ IB_WIDTH_4X : IB_WIDTH_1X; ++ props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? ++ IB_SPEED_FDR : IB_SPEED_QDR; + props->port_cap_flags = IB_PORT_CM_SUP; + props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; + props->max_msg_sz = mdev->dev->caps.max_msg_sz; +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index 5be10fb2edf2..a711aab97ae7 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -1094,6 +1094,12 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id + input_dev->name = xpad_device[i].name; + input_dev->phys = xpad->phys; + usb_to_input_id(udev, &input_dev->id); ++ ++ if (xpad->xtype == XTYPE_XBOX360W) { ++ /* x360w controllers and the receiver have different ids */ ++ input_dev->id.product = 0x02a1; ++ } ++ + input_dev->dev.parent = &intf->dev; + + input_set_drvdata(input_dev, xpad); +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index ccb36fb565de..3f3c517f2039 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { + DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"), + }, + }, ++ { ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), ++ }, ++ }, + { } + }; + +diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c +index 71f9cd108590..557824a7e5b8 100644 +--- a/drivers/iommu/amd_iommu.c ++++ b/drivers/iommu/amd_iommu.c +@@ -1044,7 +1044,7 @@ again: + next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; + left = (head - next_tail) % iommu->cmd_buf_size; + +- if (left <= 2) { ++ if (left <= 0x20) { + struct iommu_cmd sync_cmd; + volatile u64 sem = 0; + int ret; +diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c +index 3ac9c4194814..53dfe1693e50 100644 +--- a/drivers/isdn/gigaset/ser-gigaset.c ++++ b/drivers/isdn/gigaset/ser-gigaset.c +@@ -787,8 +787,10 @@ static int __init ser_gigaset_init(void) + driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, + GIGASET_MODULENAME, GIGASET_DEVNAME, + &ops, THIS_MODULE); +- if (!driver) ++ if (!driver) { ++ rc = -ENOMEM; + goto error; ++ } + + rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc); + if (rc != 0) { +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c +index 0f64dc596bce..c1b36e208669 100644 +--- a/drivers/md/dm-crypt.c ++++ b/drivers/md/dm-crypt.c +@@ -1283,12 +1283,15 @@ static int crypt_set_key(struct crypt_config *cc, char *key) + if (!cc->key_size && strcmp(key, "-")) + goto out; + ++ /* clear the flag since following operations may invalidate previously valid key */ ++ clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); ++ + if (cc->key_size && crypt_decode_key(cc->key, key, cc->key_size) < 0) + goto out; + +- set_bit(DM_CRYPT_KEY_VALID, &cc->flags); +- + r = crypt_setkey_allcpus(cc); ++ if (!r) ++ set_bit(DM_CRYPT_KEY_VALID, &cc->flags); + + out: + /* Hex key string not needed after here, so wipe it. */ +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 81bf511b3182..87e8cd29ca5f 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -6431,7 +6431,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, + /* need to ensure recovery thread has run */ + wait_event_interruptible_timeout(mddev->sb_wait, + !test_bit(MD_RECOVERY_NEEDED, +- &mddev->flags), ++ &mddev->recovery), + msecs_to_jiffies(5000)); + if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { + /* Need to flush page cache, and ensure no-one else opens +diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c +index 8a8f06bcde60..1543f37c272a 100644 +--- a/drivers/md/persistent-data/dm-space-map-metadata.c ++++ b/drivers/md/persistent-data/dm-space-map-metadata.c +@@ -773,15 +773,13 @@ int dm_sm_metadata_create(struct dm_space_map *sm, + memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); + + r = sm_ll_new_metadata(&smm->ll, tm); ++ if (!r) { ++ r = sm_ll_extend(&smm->ll, nr_blocks); ++ } ++ memcpy(&smm->sm, &ops, sizeof(smm->sm)); + if (r) + return r; + +- r = sm_ll_extend(&smm->ll, nr_blocks); +- if (r) +- return r; +- +- memcpy(&smm->sm, &ops, sizeof(smm->sm)); +- + /* + * Now we need to update the newly created data structures with the + * allocated blocks that they were built from. +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 9fbc77c6e132..01757b23e1fc 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -5943,6 +5943,15 @@ static int run(struct mddev *mddev) + stripe = (stripe | (stripe-1)) + 1; + mddev->queue->limits.discard_alignment = stripe; + mddev->queue->limits.discard_granularity = stripe; ++ ++ /* ++ * We use 16-bit counter of active stripes in bi_phys_segments ++ * (minus one for over-loaded initialization) ++ */ ++ blk_queue_max_hw_sectors(mddev->queue, 0xfffe * STRIPE_SECTORS); ++ blk_queue_max_discard_sectors(mddev->queue, ++ 0xfffe * STRIPE_SECTORS); ++ + /* + * unaligned part of discard request will be ignored, so can't + * guarantee discard_zeroes_data +diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c +index 63b42252166a..7a754ec826ac 100644 +--- a/drivers/media/rc/ite-cir.c ++++ b/drivers/media/rc/ite-cir.c +@@ -263,6 +263,8 @@ static void ite_set_carrier_params(struct ite_dev *dev) + + if (allowance > ITE_RXDCR_MAX) + allowance = ITE_RXDCR_MAX; ++ ++ use_demodulator = true; + } + } + +diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c +index 9771cd83c06e..3a615e4c4991 100644 +--- a/drivers/media/tuners/tuner-xc2028.c ++++ b/drivers/media/tuners/tuner-xc2028.c +@@ -289,6 +289,14 @@ static void free_firmware(struct xc2028_data *priv) + int i; + tuner_dbg("%s called\n", __func__); + ++ /* free allocated f/w string */ ++ if (priv->fname != firmware_name) ++ kfree(priv->fname); ++ priv->fname = NULL; ++ ++ priv->state = XC2028_NO_FIRMWARE; ++ memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); ++ + if (!priv->firm) + return; + +@@ -299,9 +307,6 @@ static void free_firmware(struct xc2028_data *priv) + + priv->firm = NULL; + priv->firm_size = 0; +- priv->state = XC2028_NO_FIRMWARE; +- +- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); + } + + static int load_all_firmwares(struct dvb_frontend *fe, +@@ -890,9 +895,9 @@ read_not_reliable: + return 0; + + fail: ++ free_firmware(priv); + priv->state = XC2028_SLEEP; + +- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); + if (retry_count < 8) { + msleep(50); + retry_count++; +@@ -1314,11 +1319,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe) + mutex_lock(&xc2028_list_mutex); + + /* only perform final cleanup if this is the last instance */ +- if (hybrid_tuner_report_instance_count(priv) == 1) { ++ if (hybrid_tuner_report_instance_count(priv) == 1) + free_firmware(priv); +- kfree(priv->ctrl.fname); +- priv->ctrl.fname = NULL; +- } + + if (priv) + hybrid_tuner_release_state(priv); +@@ -1381,16 +1383,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) + + /* + * Copy the config data. +- * For the firmware name, keep a local copy of the string, +- * in order to avoid troubles during device release. + */ +- kfree(priv->ctrl.fname); + memcpy(&priv->ctrl, p, sizeof(priv->ctrl)); +- if (p->fname) { +- priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL); +- if (priv->ctrl.fname == NULL) +- rc = -ENOMEM; +- } + + /* + * If firmware name changed, frees firmware. As free_firmware will +@@ -1405,10 +1399,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) + + if (priv->state == XC2028_NO_FIRMWARE) { + if (!firmware_name[0]) +- priv->fname = priv->ctrl.fname; ++ priv->fname = kstrdup(p->fname, GFP_KERNEL); + else + priv->fname = firmware_name; + ++ if (!priv->fname) { ++ rc = -ENOMEM; ++ goto unlock; ++ } ++ + rc = request_firmware_nowait(THIS_MODULE, 1, + priv->fname, + priv->i2c_props.adap->dev.parent, +@@ -1421,6 +1420,7 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) + } else + priv->state = XC2028_WAITING_FIRMWARE; + } ++unlock: + mutex_unlock(&priv->lock); + + return rc; +diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c +index 0c0fc52d42c5..b2ef5f2b4c53 100644 +--- a/drivers/mmc/card/mmc_test.c ++++ b/drivers/mmc/card/mmc_test.c +@@ -795,7 +795,7 @@ static int mmc_test_nonblock_transfer(struct mmc_test_card *test, + struct mmc_async_req *cur_areq = &test_areq[0].areq; + struct mmc_async_req *other_areq = &test_areq[1].areq; + int i; +- int ret; ++ int ret = RESULT_OK; + + test_areq[0].test = test; + test_areq[1].test = test; +diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c +index f8aac3044670..f87e6e9ce386 100644 +--- a/drivers/mmc/host/mxs-mmc.c ++++ b/drivers/mmc/host/mxs-mmc.c +@@ -315,6 +315,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host) + cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); + cmd1 = cmd->arg; + ++ if (cmd->opcode == MMC_STOP_TRANSMISSION) ++ cmd0 |= BM_SSP_CMD0_APPEND_8CYC; ++ + if (host->sdio_irq_en) { + ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; + cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; +@@ -423,8 +426,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) + ssp->base + HW_SSP_BLOCK_SIZE); + } + +- if ((cmd->opcode == MMC_STOP_TRANSMISSION) || +- (cmd->opcode == SD_IO_RW_EXTENDED)) ++ if (cmd->opcode == SD_IO_RW_EXTENDED) + cmd0 |= BM_SSP_CMD0_APPEND_8CYC; + + cmd1 = cmd->arg; +diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig +index d88529841d3f..2bb9c04cb2c5 100644 +--- a/drivers/mtd/nand/Kconfig ++++ b/drivers/mtd/nand/Kconfig +@@ -531,7 +531,7 @@ config MTD_NAND_FSMC + Flexible Static Memory Controller (FSMC) + + config MTD_NAND_XWAY +- tristate "Support for NAND on Lantiq XWAY SoC" ++ bool "Support for NAND on Lantiq XWAY SoC" + depends on LANTIQ && SOC_TYPE_XWAY + select MTD_NAND_PLATFORM + help +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c +index 03e7f0cbda8c..47f0dcbf42ca 100644 +--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c ++++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c +@@ -824,23 +824,25 @@ lbl_free_candev: + static void peak_usb_disconnect(struct usb_interface *intf) + { + struct peak_usb_device *dev; ++ struct peak_usb_device *dev_prev_siblings; + + /* unregister as many netdev devices as siblings */ +- for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) { ++ for (dev = usb_get_intfdata(intf); dev; dev = dev_prev_siblings) { + struct net_device *netdev = dev->netdev; + char name[IFNAMSIZ]; + ++ dev_prev_siblings = dev->prev_siblings; + dev->state &= ~PCAN_USB_STATE_CONNECTED; + strncpy(name, netdev->name, IFNAMSIZ); + + unregister_netdev(netdev); +- free_candev(netdev); + + kfree(dev->cmd_buf); + dev->next_siblings = NULL; + if (dev->adapter->dev_free) + dev->adapter->dev_free(dev); + ++ free_candev(netdev); + dev_info(&intf->dev, "%s removed\n", name); + } + +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +index 97fe8e6dba79..5ef133a5a48b 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +@@ -1776,8 +1776,16 @@ static void bnx2x_get_ringparam(struct net_device *dev, + + ering->rx_max_pending = MAX_RX_AVAIL; + ++ /* If size isn't already set, we give an estimation of the number ++ * of buffers we'll have. We're neglecting some possible conditions ++ * [we couldn't know for certain at this point if number of queues ++ * might shrink] but the number would be correct for the likely ++ * scenario. ++ */ + if (bp->rx_ring_size) + ering->rx_pending = bp->rx_ring_size; ++ else if (BNX2X_NUM_RX_QUEUES(bp)) ++ ering->rx_pending = MAX_RX_AVAIL / BNX2X_NUM_RX_QUEUES(bp); + else + ering->rx_pending = MAX_RX_AVAIL; + +diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c +index 45ce6e2214b3..2deabae1d66e 100644 +--- a/drivers/net/ethernet/brocade/bna/bnad.c ++++ b/drivers/net/ethernet/brocade/bna/bnad.c +@@ -193,6 +193,7 @@ bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) + return 0; + + hw_cons = *(tcb->hw_consumer_index); ++ rmb(); + cons = tcb->consumer_index; + q_depth = tcb->q_depth; + +@@ -2906,13 +2907,12 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) + BNA_QE_INDX_INC(prod, q_depth); + tcb->producer_index = prod; + +- smp_mb(); ++ wmb(); + + if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) + return NETDEV_TX_OK; + + bna_txq_prod_indx_doorbell(tcb); +- smp_mb(); + + return NETDEV_TX_OK; + } +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 3b5459696310..4ce28987c3c1 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -2723,12 +2723,6 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, + spin_lock_init(&priv->lock); + spin_lock_init(&priv->tx_lock); + +- ret = register_netdev(ndev); +- if (ret) { +- pr_err("%s: ERROR %i registering the device\n", __func__, ret); +- goto error_netdev_register; +- } +- + priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME); + if (IS_ERR(priv->stmmac_clk)) { + pr_warn("%s: warning: cannot get CSR clock\n", __func__); +@@ -2759,13 +2753,23 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, + } + } + ++ ret = register_netdev(ndev); ++ if (ret) { ++ netdev_err(priv->dev, "%s: ERROR %i registering the device\n", ++ __func__, ret); ++ goto error_netdev_register; ++ } ++ + return priv; + ++error_netdev_register: ++ if (priv->pcs != STMMAC_PCS_RGMII && ++ priv->pcs != STMMAC_PCS_TBI && ++ priv->pcs != STMMAC_PCS_RTBI) ++ stmmac_mdio_unregister(ndev); + error_mdio_register: + clk_put(priv->stmmac_clk); + error_clk_get: +- unregister_netdev(ndev); +-error_netdev_register: + netif_napi_del(&priv->napi); + error_free_netdev: + free_netdev(ndev); +diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c +index 2dc16b6efaf0..97f3e626b535 100644 +--- a/drivers/net/ethernet/ti/cpmac.c ++++ b/drivers/net/ethernet/ti/cpmac.c +@@ -557,7 +557,8 @@ fatal_error: + + static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) + { +- int queue, len; ++ int queue; ++ unsigned int len; + struct cpmac_desc *desc; + struct cpmac_priv *priv = netdev_priv(dev); + +@@ -567,7 +568,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) + if (unlikely(skb_padto(skb, ETH_ZLEN))) + return NETDEV_TX_OK; + +- len = max(skb->len, ETH_ZLEN); ++ len = max_t(unsigned int, skb->len, ETH_ZLEN); + queue = skb_get_queue_mapping(skb); + netif_stop_subqueue(dev, queue); + +diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c +index 616b4e1dd44c..eb6d0d8a3e06 100644 +--- a/drivers/net/hyperv/netvsc_drv.c ++++ b/drivers/net/hyperv/netvsc_drv.c +@@ -48,6 +48,9 @@ struct net_device_context { + struct work_struct work; + }; + ++/* Restrict GSO size to account for NVGRE */ ++#define NETVSC_GSO_MAX_SIZE 62768 ++ + #define RING_SIZE_MIN 64 + static int ring_size = 128; + module_param(ring_size, int, S_IRUGO); +@@ -435,6 +438,7 @@ static int netvsc_probe(struct hv_device *dev, + + SET_ETHTOOL_OPS(net, ðtool_ops); + SET_NETDEV_DEV(net, &dev->device); ++ netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE); + + ret = register_netdev(net); + if (ret != 0) { +diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c +index 55d89390b4bc..59dcdfcd0c28 100644 +--- a/drivers/net/vmxnet3/vmxnet3_drv.c ++++ b/drivers/net/vmxnet3/vmxnet3_drv.c +@@ -2890,7 +2890,6 @@ vmxnet3_tx_timeout(struct net_device *netdev) + + netdev_err(adapter->netdev, "tx hang\n"); + schedule_work(&adapter->work); +- netif_wake_queue(adapter->netdev); + } + + +@@ -2917,6 +2916,7 @@ vmxnet3_reset_work(struct work_struct *data) + } + rtnl_unlock(); + ++ netif_wake_queue(adapter->netdev); + clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); + } + +diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c +index bb7af78e4eed..6a995e0919dd 100644 +--- a/drivers/pci/hotplug/rpadlpar_core.c ++++ b/drivers/pci/hotplug/rpadlpar_core.c +@@ -259,8 +259,13 @@ static int dlpar_add_phb(char *drc_name, struct device_node *dn) + + static int dlpar_add_vio_slot(char *drc_name, struct device_node *dn) + { +- if (vio_find_node(dn)) ++ struct vio_dev *vio_dev; ++ ++ vio_dev = vio_find_node(dn); ++ if (vio_dev) { ++ put_device(&vio_dev->dev); + return -EINVAL; ++ } + + if (!vio_register_device_node(dn)) { + printk(KERN_ERR +@@ -336,6 +341,9 @@ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn) + return -EINVAL; + + vio_unregister_device(vio_dev); ++ ++ put_device(&vio_dev->dev); ++ + return 0; + } + +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index 36c3e71d54b5..1b9548fb9102 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -1906,6 +1906,10 @@ bool pci_dev_run_wake(struct pci_dev *dev) + if (!dev->pme_support) + return false; + ++ /* PME-capable in principle, but not from the intended sleep state */ ++ if (!pci_pme_capable(dev, pci_target_state(dev))) ++ return false; ++ + while (bus->parent) { + struct pci_dev *bridge = bus->self; + +diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c +index e758af95c209..b625a1f062bf 100644 +--- a/drivers/pinctrl/sh-pfc/pinctrl.c ++++ b/drivers/pinctrl/sh-pfc/pinctrl.c +@@ -479,7 +479,8 @@ static bool sh_pfc_pinconf_validate(struct sh_pfc *pfc, unsigned int _pin, + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: +- return true; ++ return pin->configs & ++ (SH_PFC_PIN_CFG_PULL_UP | SH_PFC_PIN_CFG_PULL_DOWN); + + case PIN_CONFIG_BIAS_PULL_UP: + return pin->configs & SH_PFC_PIN_CFG_PULL_UP; +diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c +index cf31d3321dab..a7f44f30273b 100644 +--- a/drivers/s390/char/vmlogrdr.c ++++ b/drivers/s390/char/vmlogrdr.c +@@ -873,7 +873,7 @@ static int __init vmlogrdr_init(void) + goto cleanup; + + for (i=0; i < MAXMINOR; ++i ) { +- sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL); ++ sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!sys_ser[i].buffer) { + rc = -ENOMEM; + break; +diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c +index 371aed75eb83..79f0f2e096cb 100644 +--- a/drivers/s390/scsi/zfcp_dbf.c ++++ b/drivers/s390/scsi/zfcp_dbf.c +@@ -289,11 +289,12 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter, + + + /** +- * zfcp_dbf_rec_run - trace event related to running recovery ++ * zfcp_dbf_rec_run_lvl - trace event related to running recovery ++ * @level: trace level to be used for event + * @tag: identifier for event + * @erp: erp_action running + */ +-void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) ++void zfcp_dbf_rec_run_lvl(int level, char *tag, struct zfcp_erp_action *erp) + { + struct zfcp_dbf *dbf = erp->adapter->dbf; + struct zfcp_dbf_rec *rec = &dbf->rec_buf; +@@ -319,11 +320,21 @@ void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) + else + rec->u.run.rec_count = atomic_read(&erp->adapter->erp_counter); + +- debug_event(dbf->rec, 1, rec, sizeof(*rec)); ++ debug_event(dbf->rec, level, rec, sizeof(*rec)); + spin_unlock_irqrestore(&dbf->rec_lock, flags); + } + + /** ++ * zfcp_dbf_rec_run - trace event related to running recovery ++ * @tag: identifier for event ++ * @erp: erp_action running ++ */ ++void zfcp_dbf_rec_run(char *tag, struct zfcp_erp_action *erp) ++{ ++ zfcp_dbf_rec_run_lvl(1, tag, erp); ++} ++ ++/** + * zfcp_dbf_rec_run_wka - trace wka port event with info like running recovery + * @tag: identifier for event + * @wka_port: well known address port +diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h +index 440aa619da1d..a8165f142550 100644 +--- a/drivers/s390/scsi/zfcp_dbf.h ++++ b/drivers/s390/scsi/zfcp_dbf.h +@@ -2,7 +2,7 @@ + * zfcp device driver + * debug feature declarations + * +- * Copyright IBM Corp. 2008, 2015 ++ * Copyright IBM Corp. 2008, 2016 + */ + + #ifndef ZFCP_DBF_H +@@ -283,6 +283,30 @@ struct zfcp_dbf { + struct zfcp_dbf_scsi scsi_buf; + }; + ++/** ++ * zfcp_dbf_hba_fsf_resp_suppress - true if we should not trace by default ++ * @req: request that has been completed ++ * ++ * Returns true if FCP response with only benign residual under count. ++ */ ++static inline ++bool zfcp_dbf_hba_fsf_resp_suppress(struct zfcp_fsf_req *req) ++{ ++ struct fsf_qtcb *qtcb = req->qtcb; ++ u32 fsf_stat = qtcb->header.fsf_status; ++ struct fcp_resp *fcp_rsp; ++ u8 rsp_flags, fr_status; ++ ++ if (qtcb->prefix.qtcb_type != FSF_IO_COMMAND) ++ return false; /* not an FCP response */ ++ fcp_rsp = (struct fcp_resp *)&qtcb->bottom.io.fcp_rsp; ++ rsp_flags = fcp_rsp->fr_flags; ++ fr_status = fcp_rsp->fr_status; ++ return (fsf_stat == FSF_FCP_RSP_AVAILABLE) && ++ (rsp_flags == FCP_RESID_UNDER) && ++ (fr_status == SAM_STAT_GOOD); ++} ++ + static inline + void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req) + { +@@ -304,7 +328,9 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req) + zfcp_dbf_hba_fsf_resp("fs_perr", 1, req); + + } else if (qtcb->header.fsf_status != FSF_GOOD) { +- zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req); ++ zfcp_dbf_hba_fsf_resp("fs_ferr", ++ zfcp_dbf_hba_fsf_resp_suppress(req) ++ ? 5 : 1, req); + + } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || + (req->fsf_command == FSF_QTCB_OPEN_LUN)) { +@@ -388,4 +414,15 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag) + _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL); + } + ++/** ++ * zfcp_dbf_scsi_nullcmnd() - trace NULLify of SCSI command in dev/tgt-reset. ++ * @scmnd: SCSI command that was NULLified. ++ * @fsf_req: request that owned @scmnd. ++ */ ++static inline void zfcp_dbf_scsi_nullcmnd(struct scsi_cmnd *scmnd, ++ struct zfcp_fsf_req *fsf_req) ++{ ++ _zfcp_dbf_scsi("scfc__1", 3, scmnd, fsf_req); ++} ++ + #endif /* ZFCP_DBF_H */ +diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c +index ac86ff90c897..acb0b8c3989d 100644 +--- a/drivers/s390/scsi/zfcp_erp.c ++++ b/drivers/s390/scsi/zfcp_erp.c +@@ -3,7 +3,7 @@ + * + * Error Recovery Procedures (ERP). + * +- * Copyright IBM Corp. 2002, 2015 ++ * Copyright IBM Corp. 2002, 2016 + */ + + #define KMSG_COMPONENT "zfcp" +@@ -1211,6 +1211,62 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action) + } + } + ++/** ++ * zfcp_erp_try_rport_unblock - unblock rport if no more/new recovery ++ * @port: zfcp_port whose fc_rport we should try to unblock ++ */ ++static void zfcp_erp_try_rport_unblock(struct zfcp_port *port) ++{ ++ unsigned long flags; ++ struct zfcp_adapter *adapter = port->adapter; ++ int port_status; ++ struct Scsi_Host *shost = adapter->scsi_host; ++ struct scsi_device *sdev; ++ ++ write_lock_irqsave(&adapter->erp_lock, flags); ++ port_status = atomic_read(&port->status); ++ if ((port_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 || ++ (port_status & (ZFCP_STATUS_COMMON_ERP_INUSE | ++ ZFCP_STATUS_COMMON_ERP_FAILED)) != 0) { ++ /* new ERP of severity >= port triggered elsewhere meanwhile or ++ * local link down (adapter erp_failed but not clear unblock) ++ */ ++ zfcp_dbf_rec_run_lvl(4, "ertru_p", &port->erp_action); ++ write_unlock_irqrestore(&adapter->erp_lock, flags); ++ return; ++ } ++ spin_lock(shost->host_lock); ++ __shost_for_each_device(sdev, shost) { ++ struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev); ++ int lun_status; ++ ++ if (zsdev->port != port) ++ continue; ++ /* LUN under port of interest */ ++ lun_status = atomic_read(&zsdev->status); ++ if ((lun_status & ZFCP_STATUS_COMMON_ERP_FAILED) != 0) ++ continue; /* unblock rport despite failed LUNs */ ++ /* LUN recovery not given up yet [maybe follow-up pending] */ ++ if ((lun_status & ZFCP_STATUS_COMMON_UNBLOCKED) == 0 || ++ (lun_status & ZFCP_STATUS_COMMON_ERP_INUSE) != 0) { ++ /* LUN blocked: ++ * not yet unblocked [LUN recovery pending] ++ * or meanwhile blocked [new LUN recovery triggered] ++ */ ++ zfcp_dbf_rec_run_lvl(4, "ertru_l", &zsdev->erp_action); ++ spin_unlock(shost->host_lock); ++ write_unlock_irqrestore(&adapter->erp_lock, flags); ++ return; ++ } ++ } ++ /* now port has no child or all children have completed recovery, ++ * and no ERP of severity >= port was meanwhile triggered elsewhere ++ */ ++ zfcp_scsi_schedule_rport_register(port); ++ spin_unlock(shost->host_lock); ++ write_unlock_irqrestore(&adapter->erp_lock, flags); ++} ++ + static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) + { + struct zfcp_adapter *adapter = act->adapter; +@@ -1221,6 +1277,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) + case ZFCP_ERP_ACTION_REOPEN_LUN: + if (!(act->status & ZFCP_STATUS_ERP_NO_REF)) + scsi_device_put(sdev); ++ zfcp_erp_try_rport_unblock(port); + break; + + case ZFCP_ERP_ACTION_REOPEN_PORT: +@@ -1231,7 +1288,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) + */ + if (act->step != ZFCP_ERP_STEP_UNINITIALIZED) + if (result == ZFCP_ERP_SUCCEEDED) +- zfcp_scsi_schedule_rport_register(port); ++ zfcp_erp_try_rport_unblock(port); + /* fall through */ + case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED: + put_device(&port->dev); +diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h +index 1f1fe41ecb97..0c8c8b8fc1de 100644 +--- a/drivers/s390/scsi/zfcp_ext.h ++++ b/drivers/s390/scsi/zfcp_ext.h +@@ -3,7 +3,7 @@ + * + * External function declarations. + * +- * Copyright IBM Corp. 2002, 2015 ++ * Copyright IBM Corp. 2002, 2016 + */ + + #ifndef ZFCP_EXT_H +@@ -35,6 +35,8 @@ extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *); + extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *, + struct zfcp_port *, struct scsi_device *, u8, u8); + extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *); ++extern void zfcp_dbf_rec_run_lvl(int level, char *tag, ++ struct zfcp_erp_action *erp); + extern void zfcp_dbf_rec_run_wka(char *, struct zfcp_fc_wka_port *, u64); + extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *); + extern void zfcp_dbf_hba_fsf_res(char *, int, struct zfcp_fsf_req *); +diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h +index be1c04b334c5..ea3c76ac0de1 100644 +--- a/drivers/s390/scsi/zfcp_fsf.h ++++ b/drivers/s390/scsi/zfcp_fsf.h +@@ -3,7 +3,7 @@ + * + * Interface to the FSF support functions. + * +- * Copyright IBM Corp. 2002, 2015 ++ * Copyright IBM Corp. 2002, 2016 + */ + + #ifndef FSF_H +@@ -78,6 +78,7 @@ + #define FSF_APP_TAG_CHECK_FAILURE 0x00000082 + #define FSF_REF_TAG_CHECK_FAILURE 0x00000083 + #define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD ++#define FSF_FCP_RSP_AVAILABLE 0x000000AF + #define FSF_UNKNOWN_COMMAND 0x000000E2 + #define FSF_UNKNOWN_OP_SUBTYPE 0x000000E3 + #define FSF_INVALID_COMMAND_OPTION 0x000000E5 +diff --git a/drivers/s390/scsi/zfcp_reqlist.h b/drivers/s390/scsi/zfcp_reqlist.h +index 7c2c6194dfca..703fce59befe 100644 +--- a/drivers/s390/scsi/zfcp_reqlist.h ++++ b/drivers/s390/scsi/zfcp_reqlist.h +@@ -4,7 +4,7 @@ + * Data structure and helper functions for tracking pending FSF + * requests. + * +- * Copyright IBM Corp. 2009 ++ * Copyright IBM Corp. 2009, 2016 + */ + + #ifndef ZFCP_REQLIST_H +@@ -180,4 +180,32 @@ static inline void zfcp_reqlist_move(struct zfcp_reqlist *rl, + spin_unlock_irqrestore(&rl->lock, flags); + } + ++/** ++ * zfcp_reqlist_apply_for_all() - apply a function to every request. ++ * @rl: the requestlist that contains the target requests. ++ * @f: the function to apply to each request; the first parameter of the ++ * function will be the target-request; the second parameter is the same ++ * pointer as given with the argument @data. ++ * @data: freely chosen argument; passed through to @f as second parameter. ++ * ++ * Uses :c:macro:`list_for_each_entry` to iterate over the lists in the hash- ++ * table (not a 'safe' variant, so don't modify the list). ++ * ++ * Holds @rl->lock over the entire request-iteration. ++ */ ++static inline void ++zfcp_reqlist_apply_for_all(struct zfcp_reqlist *rl, ++ void (*f)(struct zfcp_fsf_req *, void *), void *data) ++{ ++ struct zfcp_fsf_req *req; ++ unsigned long flags; ++ unsigned int i; ++ ++ spin_lock_irqsave(&rl->lock, flags); ++ for (i = 0; i < ZFCP_REQ_LIST_BUCKETS; i++) ++ list_for_each_entry(req, &rl->buckets[i], list) ++ f(req, data); ++ spin_unlock_irqrestore(&rl->lock, flags); ++} ++ + #endif /* ZFCP_REQLIST_H */ +diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c +index 38ee0df633a3..66c37e77ac7c 100644 +--- a/drivers/s390/scsi/zfcp_scsi.c ++++ b/drivers/s390/scsi/zfcp_scsi.c +@@ -3,7 +3,7 @@ + * + * Interface to Linux SCSI midlayer. + * +- * Copyright IBM Corp. 2002, 2015 ++ * Copyright IBM Corp. 2002, 2016 + */ + + #define KMSG_COMPONENT "zfcp" +@@ -109,9 +109,7 @@ int zfcp_scsi_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scpnt) + } + + if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) { +- /* This could be either +- * open LUN pending: this is temporary, will result in +- * open LUN or ERP_FAILED, so retry command ++ /* This could be + * call to rport_delete pending: mimic retry from + * fc_remote_port_chkready until rport is BLOCKED + */ +@@ -230,6 +228,57 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt) + return retval; + } + ++struct zfcp_scsi_req_filter { ++ u8 tmf_scope; ++ u32 lun_handle; ++ u32 port_handle; ++}; ++ ++static void zfcp_scsi_forget_cmnd(struct zfcp_fsf_req *old_req, void *data) ++{ ++ struct zfcp_scsi_req_filter *filter = ++ (struct zfcp_scsi_req_filter *)data; ++ ++ /* already aborted - prevent side-effects - or not a SCSI command */ ++ if (old_req->data == NULL || old_req->fsf_command != FSF_QTCB_FCP_CMND) ++ return; ++ ++ /* (tmf_scope == FCP_TMF_TGT_RESET || tmf_scope == FCP_TMF_LUN_RESET) */ ++ if (old_req->qtcb->header.port_handle != filter->port_handle) ++ return; ++ ++ if (filter->tmf_scope == FCP_TMF_LUN_RESET && ++ old_req->qtcb->header.lun_handle != filter->lun_handle) ++ return; ++ ++ zfcp_dbf_scsi_nullcmnd((struct scsi_cmnd *)old_req->data, old_req); ++ old_req->data = NULL; ++} ++ ++static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags) ++{ ++ struct zfcp_adapter *adapter = zsdev->port->adapter; ++ struct zfcp_scsi_req_filter filter = { ++ .tmf_scope = FCP_TMF_TGT_RESET, ++ .port_handle = zsdev->port->handle, ++ }; ++ unsigned long flags; ++ ++ if (tm_flags == FCP_TMF_LUN_RESET) { ++ filter.tmf_scope = FCP_TMF_LUN_RESET; ++ filter.lun_handle = zsdev->lun_handle; ++ } ++ ++ /* ++ * abort_lock secures against other processings - in the abort-function ++ * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data ++ */ ++ write_lock_irqsave(&adapter->abort_lock, flags); ++ zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd, ++ &filter); ++ write_unlock_irqrestore(&adapter->abort_lock, flags); ++} ++ + static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) + { + struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device); +@@ -262,8 +311,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags) + if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) { + zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags); + retval = FAILED; +- } else ++ } else { + zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags); ++ zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags); ++ } + + zfcp_fsf_req_free(fsf_req); + return retval; +diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c +index 1e4479f3331a..55716c5184f7 100644 +--- a/drivers/scsi/mvsas/mv_94xx.c ++++ b/drivers/scsi/mvsas/mv_94xx.c +@@ -621,7 +621,7 @@ static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx) + { + u32 tmp; + tmp = mvs_cr32(mvi, MVS_COMMAND_ACTIVE+(slot_idx >> 3)); +- if (tmp && 1 << (slot_idx % 32)) { ++ if (tmp & 1 << (slot_idx % 32)) { + mv_printk("command active %08X, slot [%x].\n", tmp, slot_idx); + mvs_cw32(mvi, MVS_COMMAND_ACTIVE + (slot_idx >> 3), + 1 << (slot_idx % 32)); +diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c +index 36d62fd53511..ebc939e85b76 100644 +--- a/drivers/scsi/qla2xxx/qla_os.c ++++ b/drivers/scsi/qla2xxx/qla_os.c +@@ -3384,7 +3384,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, + sizeof(struct ct6_dsd), 0, + SLAB_HWCACHE_ALIGN, NULL); + if (!ctx_cachep) +- goto fail_free_gid_list; ++ goto fail_free_srb_mempool; + } + ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ, + ctx_cachep); +@@ -3537,7 +3537,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, + ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long), + GFP_KERNEL); + if (!ha->loop_id_map) +- goto fail_async_pd; ++ goto fail_loop_id_map; + else { + qla2x00_set_reserved_loop_ids(ha); + ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123, +@@ -3546,6 +3546,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, + + return 0; + ++fail_loop_id_map: ++ dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma); + fail_async_pd: + dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma); + fail_ex_init_cb: +@@ -3573,6 +3575,10 @@ fail_free_ms_iocb: + dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma); + ha->ms_iocb = NULL; + ha->ms_iocb_dma = 0; ++ ++ if (ha->sns_cmd) ++ dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt), ++ ha->sns_cmd, ha->sns_cmd_dma); + fail_dma_pool: + if (IS_QLA82XX(ha) || ql2xenabledif) { + dma_pool_destroy(ha->fcp_cmnd_dma_pool); +@@ -3590,10 +3596,12 @@ fail_free_nvram: + kfree(ha->nvram); + ha->nvram = NULL; + fail_free_ctx_mempool: +- mempool_destroy(ha->ctx_mempool); ++ if (ha->ctx_mempool) ++ mempool_destroy(ha->ctx_mempool); + ha->ctx_mempool = NULL; + fail_free_srb_mempool: +- mempool_destroy(ha->srb_mempool); ++ if (ha->srb_mempool) ++ mempool_destroy(ha->srb_mempool); + ha->srb_mempool = NULL; + fail_free_gid_list: + dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), +diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c +index 14ad111b2851..970f655f8532 100644 +--- a/drivers/scsi/scsi_sysfs.c ++++ b/drivers/scsi/scsi_sysfs.c +@@ -905,10 +905,6 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) + struct request_queue *rq = sdev->request_queue; + struct scsi_target *starget = sdev->sdev_target; + +- error = scsi_device_set_state(sdev, SDEV_RUNNING); +- if (error) +- return error; +- + error = scsi_target_add(starget); + if (error) + return error; +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 1f65e32db285..0b27d293dd83 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -568,6 +568,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) + sg_io_hdr_t *hp; + unsigned char cmnd[MAX_COMMAND_SIZE]; + ++ if (unlikely(segment_eq(get_fs(), KERNEL_DS))) ++ return -EINVAL; ++ + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) + return -ENXIO; + SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n", +@@ -766,8 +769,14 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp, + return k; /* probably out of space --> ENOMEM */ + } + if (sdp->detached) { +- if (srp->bio) ++ if (srp->bio) { ++ if (srp->rq->cmd != srp->rq->__cmd) ++ kfree(srp->rq->cmd); ++ + blk_end_request_all(srp->rq, -EIO); ++ srp->rq = NULL; ++ } ++ + sg_finish_rem_req(srp); + return -ENODEV; + } +diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c +index a8dc95ebf2d6..7700cef5e177 100644 +--- a/drivers/ssb/pci.c ++++ b/drivers/ssb/pci.c +@@ -846,6 +846,7 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus, + if (err) { + ssb_warn("WARNING: Using fallback SPROM failed (err %d)\n", + err); ++ goto out_free; + } else { + ssb_dbg("Using SPROM revision %d provided by platform\n", + sprom->revision); +diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c +index 72868ceda360..740a8eab262a 100644 +--- a/drivers/staging/iio/adc/ad7606_core.c ++++ b/drivers/staging/iio/adc/ad7606_core.c +@@ -189,7 +189,7 @@ static ssize_t ad7606_store_oversampling_ratio(struct device *dev, + mutex_lock(&indio_dev->mlock); + gpio_set_value(st->pdata->gpio_os0, (ret >> 0) & 1); + gpio_set_value(st->pdata->gpio_os1, (ret >> 1) & 1); +- gpio_set_value(st->pdata->gpio_os1, (ret >> 2) & 1); ++ gpio_set_value(st->pdata->gpio_os2, (ret >> 2) & 1); + st->oversampling = lval; + mutex_unlock(&indio_dev->mlock); + +diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c +index b713d63a86f7..ed4ea4ef1420 100644 +--- a/drivers/target/iscsi/iscsi_target_tpg.c ++++ b/drivers/target/iscsi/iscsi_target_tpg.c +@@ -258,7 +258,6 @@ err_out: + iscsi_release_param_list(tpg->param_list); + tpg->param_list = NULL; + } +- kfree(tpg); + return -ENOMEM; + } + +diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c +index 1967bee4f076..9035fbc5e98d 100644 +--- a/drivers/thermal/thermal_hwmon.c ++++ b/drivers/thermal/thermal_hwmon.c +@@ -98,7 +98,7 @@ temp_crit_show(struct device *dev, struct device_attribute *attr, char *buf) + long temperature; + int ret; + +- ret = tz->ops->get_trip_temp(tz, 0, &temperature); ++ ret = tz->ops->get_crit_temp(tz, &temperature); + if (ret) + return ret; + +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c +index 3299168189cc..e93eaea14ccc 100644 +--- a/drivers/tty/serial/8250/8250_pci.c ++++ b/drivers/tty/serial/8250/8250_pci.c +@@ -55,6 +55,7 @@ struct serial_private { + unsigned int nr; + void __iomem *remapped_bar[PCI_NUM_BAR_RESOURCES]; + struct pci_serial_quirk *quirk; ++ const struct pciserial_board *board; + int line[0]; + }; + +@@ -3451,6 +3452,7 @@ pciserial_init_ports(struct pci_dev *dev, const struct pciserial_board *board) + } + } + priv->nr = i; ++ priv->board = board; + return priv; + + err_deinit: +@@ -3461,7 +3463,7 @@ err_out: + } + EXPORT_SYMBOL_GPL(pciserial_init_ports); + +-void pciserial_remove_ports(struct serial_private *priv) ++void pciserial_detach_ports(struct serial_private *priv) + { + struct pci_serial_quirk *quirk; + int i; +@@ -3481,7 +3483,11 @@ void pciserial_remove_ports(struct serial_private *priv) + quirk = find_quirk(priv->dev); + if (quirk->exit) + quirk->exit(priv->dev); ++} + ++void pciserial_remove_ports(struct serial_private *priv) ++{ ++ pciserial_detach_ports(priv); + kfree(priv); + } + EXPORT_SYMBOL_GPL(pciserial_remove_ports); +@@ -5039,7 +5045,7 @@ static pci_ers_result_t serial8250_io_error_detected(struct pci_dev *dev, + return PCI_ERS_RESULT_DISCONNECT; + + if (priv) +- pciserial_suspend_ports(priv); ++ pciserial_detach_ports(priv); + + pci_disable_device(dev); + +@@ -5064,9 +5070,18 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev) + static void serial8250_io_resume(struct pci_dev *dev) + { + struct serial_private *priv = pci_get_drvdata(dev); ++ const struct pciserial_board *board; + +- if (priv) +- pciserial_resume_ports(priv); ++ if (!priv) ++ return; ++ ++ board = priv->board; ++ kfree(priv); ++ priv = pciserial_init_ports(dev, board); ++ ++ if (!IS_ERR(priv)) { ++ pci_set_drvdata(dev, priv); ++ } + } + + static const struct pci_error_handlers serial8250_err_handler = { +diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c +index 3b9b80856c1b..aefe343b4212 100644 +--- a/drivers/tty/sysrq.c ++++ b/drivers/tty/sysrq.c +@@ -925,8 +925,8 @@ static const struct input_device_id sysrq_ids[] = { + { + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | + INPUT_DEVICE_ID_MATCH_KEYBIT, +- .evbit = { BIT_MASK(EV_KEY) }, +- .keybit = { BIT_MASK(KEY_LEFTALT) }, ++ .evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) }, ++ .keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) }, + }, + { }, + }; +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 2d269169d08b..c78c4f7efb40 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1588,6 +1588,7 @@ static const struct usb_device_id acm_ids[] = { + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ + }, + { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */ ++ { USB_DEVICE(0x2184, 0x0036) }, /* GW Instek AFG-125 */ + { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */ + }, + /* Motorola H24 HSPA module: */ +diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c +index ce6225959f2c..15b39065f1dc 100644 +--- a/drivers/usb/core/config.c ++++ b/drivers/usb/core/config.c +@@ -207,6 +207,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum, + if (ifp->desc.bNumEndpoints >= num_ep) + goto skip_to_next_endpoint_or_interface_descriptor; + ++ /* Check for duplicate endpoint addresses */ ++ for (i = 0; i < ifp->desc.bNumEndpoints; ++i) { ++ if (ifp->endpoint[i].desc.bEndpointAddress == ++ d->bEndpointAddress) { ++ dev_warn(ddev, "config %d interface %d altsetting %d has a duplicate endpoint with address 0x%X, skipping\n", ++ cfgno, inum, asnum, d->bEndpointAddress); ++ goto skip_to_next_endpoint_or_interface_descriptor; ++ } ++ } ++ + endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; + ++ifp->desc.bNumEndpoints; + +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 5e788077675b..770cea7de0ec 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -115,6 +115,7 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem); + + static int usb_reset_and_verify_device(struct usb_device *udev); + static void hub_release(struct kref *kref); ++static int hub_port_disable(struct usb_hub *hub, int port1, int set_state); + + static inline char *portspeed(struct usb_hub *hub, int portstatus) + { +@@ -878,89 +879,6 @@ static int hub_set_port_link_state(struct usb_hub *hub, int port1, + } + + /* +- * If USB 3.0 ports are placed into the Disabled state, they will no longer +- * detect any device connects or disconnects. This is generally not what the +- * USB core wants, since it expects a disabled port to produce a port status +- * change event when a new device connects. +- * +- * Instead, set the link state to Disabled, wait for the link to settle into +- * that state, clear any change bits, and then put the port into the RxDetect +- * state. +- */ +-static int hub_usb3_port_disable(struct usb_hub *hub, int port1) +-{ +- int ret; +- int total_time; +- u16 portchange, portstatus; +- +- if (!hub_is_superspeed(hub->hdev)) +- return -EINVAL; +- +- ret = hub_port_status(hub, port1, &portstatus, &portchange); +- if (ret < 0) +- return ret; +- +- /* +- * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI +- * Controller [1022:7814] will have spurious result making the following +- * usb 3.0 device hotplugging route to the 2.0 root hub and recognized +- * as high-speed device if we set the usb 3.0 port link state to +- * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we +- * check the state here to avoid the bug. +- */ +- if ((portstatus & USB_PORT_STAT_LINK_STATE) == +- USB_SS_PORT_LS_RX_DETECT) { +- dev_dbg(&hub->ports[port1 - 1]->dev, +- "Not disabling port; link state is RxDetect\n"); +- return ret; +- } +- +- ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED); +- if (ret) +- return ret; +- +- /* Wait for the link to enter the disabled state. */ +- for (total_time = 0; ; total_time += HUB_DEBOUNCE_STEP) { +- ret = hub_port_status(hub, port1, &portstatus, &portchange); +- if (ret < 0) +- return ret; +- +- if ((portstatus & USB_PORT_STAT_LINK_STATE) == +- USB_SS_PORT_LS_SS_DISABLED) +- break; +- if (total_time >= HUB_DEBOUNCE_TIMEOUT) +- break; +- msleep(HUB_DEBOUNCE_STEP); +- } +- if (total_time >= HUB_DEBOUNCE_TIMEOUT) +- dev_warn(hub->intfdev, "Could not disable port %d after %d ms\n", +- port1, total_time); +- +- return hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_RX_DETECT); +-} +- +-static int hub_port_disable(struct usb_hub *hub, int port1, int set_state) +-{ +- struct usb_device *hdev = hub->hdev; +- int ret = 0; +- +- if (hub->ports[port1 - 1]->child && set_state) +- usb_set_device_state(hub->ports[port1 - 1]->child, +- USB_STATE_NOTATTACHED); +- if (!hub->error) { +- if (hub_is_superspeed(hub->hdev)) +- ret = hub_usb3_port_disable(hub, port1); +- else +- ret = usb_clear_port_feature(hdev, port1, +- USB_PORT_FEAT_ENABLE); +- } +- if (ret && ret != -ENODEV) +- dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n", +- port1, ret); +- return ret; +-} +- +-/* + * Disable a port and mark a logical connect-change event, so that some + * time later khubd will disconnect() any existing usb_device on the port + * and will re-enumerate if there actually is a device attached. +@@ -3885,6 +3803,26 @@ void usb_unlocked_enable_lpm(struct usb_device *udev) + } + EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm); + ++/* usb3 devices use U3 for disabled, make sure remote wakeup is disabled */ ++static void hub_usb3_port_prepare_disable(struct usb_hub *hub, ++ struct usb_port *port_dev) ++{ ++ struct usb_device *udev = port_dev->child; ++ int ret; ++ ++ if (udev && udev->port_is_suspended && udev->do_remote_wakeup) { ++ ret = hub_set_port_link_state(hub, port_dev->portnum, ++ USB_SS_PORT_LS_U0); ++ if (!ret) { ++ msleep(USB_RESUME_TIMEOUT); ++ ret = usb_disable_remote_wakeup(udev); ++ } ++ if (ret) ++ dev_warn(&udev->dev, ++ "Port disable: can't disable remote wake\n"); ++ udev->do_remote_wakeup = 0; ++ } ++} + + #else /* CONFIG_PM */ + +@@ -3892,6 +3830,9 @@ EXPORT_SYMBOL_GPL(usb_unlocked_enable_lpm); + #define hub_resume NULL + #define hub_reset_resume NULL + ++static inline void hub_usb3_port_prepare_disable(struct usb_hub *hub, ++ struct usb_port *port_dev) { } ++ + int usb_disable_lpm(struct usb_device *udev) + { + return 0; +@@ -3921,6 +3862,35 @@ EXPORT_SYMBOL_GPL(usb_enable_ltm); + + #endif /* CONFIG_PM */ + ++/* ++ * USB-3 does not have a similar link state as USB-2 that will avoid negotiating ++ * a connection with a plugged-in cable but will signal the host when the cable ++ * is unplugged. Disable remote wake and set link state to U3 for USB-3 devices ++ */ ++static int hub_port_disable(struct usb_hub *hub, int port1, int set_state) ++{ ++ struct usb_port *port_dev = hub->ports[port1 - 1]; ++ struct usb_device *hdev = hub->hdev; ++ int ret = 0; ++ ++ if (!hub->error) { ++ if (hub_is_superspeed(hub->hdev)) { ++ hub_usb3_port_prepare_disable(hub, port_dev); ++ ret = hub_set_port_link_state(hub, port_dev->portnum, ++ USB_SS_PORT_LS_U3); ++ } else { ++ ret = usb_clear_port_feature(hdev, port1, ++ USB_PORT_FEAT_ENABLE); ++ } ++ } ++ if (port_dev->child && set_state) ++ usb_set_device_state(port_dev->child, USB_STATE_NOTATTACHED); ++ if (ret && ret != -ENODEV) ++ dev_err(hub->intfdev, "cannot disable port %d (err = %d)\n", ++ port1, ret); ++ return ret; ++} ++ + + /* USB 2.0 spec, 7.1.7.3 / fig 7-29: + * +diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c +index 2e252aae51ca..b4e123152533 100644 +--- a/drivers/usb/dwc3/dwc3-pci.c ++++ b/drivers/usb/dwc3/dwc3-pci.c +@@ -30,6 +30,14 @@ + #define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd + #define PCI_DEVICE_ID_INTEL_BYT 0x0f37 + #define PCI_DEVICE_ID_INTEL_MRFLD 0x119e ++#define PCI_DEVICE_ID_INTEL_BSW 0x22B7 ++#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 ++#define PCI_DEVICE_ID_INTEL_SPTH 0xa130 ++#define PCI_DEVICE_ID_INTEL_BXT 0x0aaa ++#define PCI_DEVICE_ID_INTEL_BXT_M 0x1aaa ++#define PCI_DEVICE_ID_INTEL_APL 0x5aaa ++#define PCI_DEVICE_ID_INTEL_KBP 0xa2b0 ++#define PCI_DEVICE_ID_INTEL_GLK 0x31aa + + struct dwc3_pci { + struct device *dev; +@@ -189,8 +197,16 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = { + PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, + PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3), + }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT), }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BXT_M), }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_APL), }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBP), }, ++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), }, + { } /* Terminating Entry */ + }; + MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table); +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index af03ea2c9c78..f4a36f4669bb 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -245,11 +245,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, + if (req->request.status == -EINPROGRESS) + req->request.status = status; + +- if (dwc->ep0_bounced && dep->number == 0) ++ if (dwc->ep0_bounced && dep->number <= 1) + dwc->ep0_bounced = false; +- else +- usb_gadget_unmap_request(&dwc->gadget, &req->request, +- req->direction); ++ ++ usb_gadget_unmap_request(&dwc->gadget, &req->request, ++ req->direction); + + dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", + req, dep->name, req->request.actual, +diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c +index a0b5a13b52b0..2c0f38811ee7 100644 +--- a/drivers/usb/gadget/composite.c ++++ b/drivers/usb/gadget/composite.c +@@ -125,11 +125,16 @@ int config_ep_by_speed(struct usb_gadget *g, + + ep_found: + /* commit results */ +- _ep->maxpacket = usb_endpoint_maxp(chosen_desc); ++ _ep->maxpacket = usb_endpoint_maxp(chosen_desc) & 0x7ff; + _ep->desc = chosen_desc; + _ep->comp_desc = NULL; + _ep->maxburst = 0; +- _ep->mult = 0; ++ _ep->mult = 1; ++ ++ if (g->speed == USB_SPEED_HIGH && (usb_endpoint_xfer_isoc(_ep->desc) || ++ usb_endpoint_xfer_int(_ep->desc))) ++ _ep->mult = ((usb_endpoint_maxp(_ep->desc) & 0x1800) >> 11) + 1; ++ + if (!want_comp_desc) + return 0; + +@@ -146,7 +151,7 @@ ep_found: + switch (usb_endpoint_type(_ep->desc)) { + case USB_ENDPOINT_XFER_ISOC: + /* mult: bits 1:0 of bmAttributes */ +- _ep->mult = comp_desc->bmAttributes & 0x3; ++ _ep->mult = (comp_desc->bmAttributes & 0x3) + 1; + case USB_ENDPOINT_XFER_BULK: + case USB_ENDPOINT_XFER_INT: + _ep->maxburst = comp_desc->bMaxBurst + 1; +@@ -1320,9 +1325,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) + value = min(w_length, (u16) 1); + break; + +- /* function drivers must handle get/set altsetting; if there's +- * no get() method, we know only altsetting zero works. +- */ ++ /* function drivers must handle get/set altsetting */ + case USB_REQ_SET_INTERFACE: + if (ctrl->bRequestType != USB_RECIP_INTERFACE) + goto unknown; +@@ -1331,7 +1334,13 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) + f = cdev->config->interface[intf]; + if (!f) + break; +- if (w_value && !f->set_alt) ++ ++ /* ++ * If there's no get_alt() method, we know only altsetting zero ++ * works. There is no need to check if set_alt() is not NULL ++ * as we check this in usb_add_function(). ++ */ ++ if (w_value && !f->get_alt) + break; + value = f->set_alt(f, w_index, w_value); + if (value == USB_GADGET_DELAYED_STATUS) { +diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c +index b8a2376971a4..341976289d15 100644 +--- a/drivers/usb/gadget/dummy_hcd.c ++++ b/drivers/usb/gadget/dummy_hcd.c +@@ -266,7 +266,7 @@ static void nuke(struct dummy *dum, struct dummy_ep *ep) + /* caller must hold lock */ + static void stop_activity(struct dummy *dum) + { +- struct dummy_ep *ep; ++ int i; + + /* prevent any more requests */ + dum->address = 0; +@@ -274,8 +274,8 @@ static void stop_activity(struct dummy *dum) + /* The timer is left running so that outstanding URBs can fail */ + + /* nuke any pending requests first, so driver i/o is quiesced */ +- list_for_each_entry(ep, &dum->gadget.ep_list, ep.ep_list) +- nuke(dum, ep); ++ for (i = 0; i < DUMMY_ENDPOINTS; ++i) ++ nuke(dum, &dum->ep[i]); + + /* driver now does any non-usb quiescing necessary */ + } +diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c +index 4ac9e9928d67..8fa7ba0f6beb 100644 +--- a/drivers/usb/gadget/inode.c ++++ b/drivers/usb/gadget/inode.c +@@ -1199,7 +1199,7 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) + /* data and/or status stage for control request */ + } else if (dev->state == STATE_DEV_SETUP) { + +- /* IN DATA+STATUS caller makes len <= wLength */ ++ len = min_t(size_t, len, dev->setup_wLength); + if (dev->setup_in) { + retval = setup_req (dev->gadget->ep0, dev->req, len); + if (retval == 0) { +@@ -1829,10 +1829,12 @@ static struct usb_gadget_driver probe_driver = { + * such as configuration notifications. + */ + +-static int is_valid_config (struct usb_config_descriptor *config) ++static int is_valid_config(struct usb_config_descriptor *config, ++ unsigned int total) + { + return config->bDescriptorType == USB_DT_CONFIG + && config->bLength == USB_DT_CONFIG_SIZE ++ && total >= USB_DT_CONFIG_SIZE + && config->bConfigurationValue != 0 + && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0 + && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0; +@@ -1849,7 +1851,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) + u32 tag; + char *kbuf; + +- if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) ++ if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) || ++ (len > PAGE_SIZE * 4)) + return -EINVAL; + + /* we might need to change message format someday */ +@@ -1873,7 +1876,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) + /* full or low speed config */ + dev->config = (void *) kbuf; + total = le16_to_cpu(dev->config->wTotalLength); +- if (!is_valid_config (dev->config) || total >= length) ++ if (!is_valid_config(dev->config, total) || ++ total > length - USB_DT_DEVICE_SIZE) + goto fail; + kbuf += total; + length -= total; +@@ -1882,10 +1886,13 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) + if (kbuf [1] == USB_DT_CONFIG) { + dev->hs_config = (void *) kbuf; + total = le16_to_cpu(dev->hs_config->wTotalLength); +- if (!is_valid_config (dev->hs_config) || total >= length) ++ if (!is_valid_config(dev->hs_config, total) || ++ total > length - USB_DT_DEVICE_SIZE) + goto fail; + kbuf += total; + length -= total; ++ } else { ++ dev->hs_config = NULL; + } + + /* could support multiple configs, using another encoding! */ +diff --git a/drivers/usb/gadget/uvc_video.c b/drivers/usb/gadget/uvc_video.c +index 71e896d4c5ae..43e8c65fd9ed 100644 +--- a/drivers/usb/gadget/uvc_video.c ++++ b/drivers/usb/gadget/uvc_video.c +@@ -240,7 +240,7 @@ uvc_video_alloc_requests(struct uvc_video *video) + + req_size = video->ep->maxpacket + * max_t(unsigned int, video->ep->maxburst, 1) +- * (video->ep->mult + 1); ++ * (video->ep->mult); + + for (i = 0; i < UVC_NUM_REQUESTS; ++i) { + video->req_buffer[i] = kmalloc(req_size, GFP_KERNEL); +diff --git a/drivers/usb/host/uhci-pci.c b/drivers/usb/host/uhci-pci.c +index 0f228c46eeda..ad458ef4b7e9 100644 +--- a/drivers/usb/host/uhci-pci.c ++++ b/drivers/usb/host/uhci-pci.c +@@ -129,6 +129,10 @@ static int uhci_pci_init(struct usb_hcd *hcd) + if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_HP) + uhci->wait_for_hp = 1; + ++ /* Intel controllers use non-PME wakeup signalling */ ++ if (to_pci_dev(uhci_dev(uhci))->vendor == PCI_VENDOR_ID_INTEL) ++ device_set_run_wake(uhci_dev(uhci), 1); ++ + /* Set up pointers to PCI-specific functions */ + uhci->reset_hc = uhci_pci_reset_hc; + uhci->check_and_reset_hc = uhci_pci_check_and_reset_hc; +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c +index 8a79270ca44d..f97a382e3e76 100644 +--- a/drivers/usb/host/xhci-hub.c ++++ b/drivers/usb/host/xhci-hub.c +@@ -1221,6 +1221,35 @@ int xhci_bus_suspend(struct usb_hcd *hcd) + return 0; + } + ++/* ++ * Workaround for missing Cold Attach Status (CAS) if device re-plugged in S3. ++ * warm reset a USB3 device stuck in polling or compliance mode after resume. ++ * See Intel 100/c230 series PCH specification update Doc #332692-006 Errata #8 ++ */ ++static bool xhci_port_missing_cas_quirk(int port_index, ++ __le32 __iomem **port_array) ++{ ++ u32 portsc; ++ ++ portsc = readl(port_array[port_index]); ++ ++ /* if any of these are set we are not stuck */ ++ if (portsc & (PORT_CONNECT | PORT_CAS)) ++ return false; ++ ++ if (((portsc & PORT_PLS_MASK) != XDEV_POLLING) && ++ ((portsc & PORT_PLS_MASK) != XDEV_COMP_MODE)) ++ return false; ++ ++ /* clear wakeup/change bits, and do a warm port reset */ ++ portsc &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS); ++ portsc |= PORT_WR; ++ writel(portsc, port_array[port_index]); ++ /* flush write */ ++ readl(port_array[port_index]); ++ return true; ++} ++ + int xhci_bus_resume(struct usb_hcd *hcd) + { + struct xhci_hcd *xhci = hcd_to_xhci(hcd); +@@ -1255,6 +1284,14 @@ int xhci_bus_resume(struct usb_hcd *hcd) + int slot_id; + + temp = xhci_readl(xhci, port_array[port_index]); ++ ++ /* warm reset CAS limited ports stuck in polling/compliance */ ++ if ((xhci->quirks & XHCI_MISSING_CAS) && ++ (hcd->speed >= HCD_USB3) && ++ xhci_port_missing_cas_quirk(port_index, port_array)) { ++ xhci_dbg(xhci, "reset stuck port %d\n", port_index); ++ continue; ++ } + if (DEV_SUPERSPEED(temp)) + temp &= ~(PORT_RWC_BITS | PORT_CEC | PORT_WAKE_BITS); + else +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index bc5307f9367f..34323aa444e3 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -865,6 +865,40 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) + xhci->devs[slot_id] = NULL; + } + ++/* ++ * Free a virt_device structure. ++ * If the virt_device added a tt_info (a hub) and has children pointing to ++ * that tt_info, then free the child first. Recursive. ++ * We can't rely on udev at this point to find child-parent relationships. ++ */ ++void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_id) ++{ ++ struct xhci_virt_device *vdev; ++ struct list_head *tt_list_head; ++ struct xhci_tt_bw_info *tt_info, *next; ++ int i; ++ ++ vdev = xhci->devs[slot_id]; ++ if (!vdev) ++ return; ++ ++ tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts); ++ list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) { ++ /* is this a hub device that added a tt_info to the tts list */ ++ if (tt_info->slot_id == slot_id) { ++ /* are any devices using this tt_info? */ ++ for (i = 1; i < HCS_MAX_SLOTS(xhci->hcs_params1); i++) { ++ vdev = xhci->devs[i]; ++ if (vdev && (vdev->tt_info == tt_info)) ++ xhci_free_virt_devices_depth_first( ++ xhci, i); ++ } ++ } ++ } ++ /* we are now at a leaf device */ ++ xhci_free_virt_device(xhci, slot_id); ++} ++ + int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, + struct usb_device *udev, gfp_t flags) + { +@@ -1735,8 +1769,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) + } + } + +- for (i = 1; i < MAX_HC_SLOTS; ++i) +- xhci_free_virt_device(xhci, i); ++ for (i = HCS_MAX_SLOTS(xhci->hcs_params1); i > 0; i--) ++ xhci_free_virt_devices_depth_first(xhci, i); + + if (xhci->segment_pool) + dma_pool_destroy(xhci->segment_pool); +@@ -2270,7 +2304,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) + * "physically contiguous and 64-byte (cache line) aligned". + */ + xhci->dcbaa = dma_alloc_coherent(dev, sizeof(*xhci->dcbaa), &dma, +- GFP_KERNEL); ++ flags); + if (!xhci->dcbaa) + goto fail; + memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); +@@ -2365,7 +2399,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) + + xhci->erst.entries = dma_alloc_coherent(dev, + sizeof(struct xhci_erst_entry) * ERST_NUM_SEGS, &dma, +- GFP_KERNEL); ++ flags); + if (!xhci->erst.entries) + goto fail; + xhci_dbg_trace(xhci, trace_xhci_dbg_init, +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index 1ee8c97ae6be..6b11f6df76aa 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -41,6 +41,9 @@ + #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f + #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f ++#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 ++#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 ++#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8 + + static const char hcd_name[] = "xhci_hcd"; + +@@ -138,9 +141,17 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + if (pdev->vendor == PCI_VENDOR_ID_INTEL && + (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || +- pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) { ++ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || ++ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || ++ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI || ++ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) { + xhci->quirks |= XHCI_PME_STUCK_QUIRK; + } ++ if (pdev->vendor == PCI_VENDOR_ID_INTEL && ++ (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || ++ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) ++ xhci->quirks |= XHCI_MISSING_CAS; ++ + if (pdev->vendor == PCI_VENDOR_ID_ETRON && + pdev->device == PCI_DEVICE_ID_ASROCK_P67) { + xhci->quirks |= XHCI_RESET_ON_RESUME; +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 4bcea54f60cd..8f1159612593 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -948,13 +948,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg) + spin_lock_irqsave(&xhci->lock, flags); + + ep->stop_cmds_pending--; +- if (xhci->xhc_state & XHCI_STATE_DYING) { +- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, +- "Stop EP timer ran, but another timer marked " +- "xHCI as DYING, exiting."); +- spin_unlock_irqrestore(&xhci->lock, flags); +- return; +- } + if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) { + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, + "Stop EP timer ran, but no command pending, " +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index ea185eaeae28..04ba50b05075 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -1538,19 +1538,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) + xhci_urb_free_priv(xhci, urb_priv); + return ret; + } +- if ((xhci->xhc_state & XHCI_STATE_DYING) || +- (xhci->xhc_state & XHCI_STATE_HALTED)) { +- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, +- "Ep 0x%x: URB %p to be canceled on " +- "non-responsive xHCI host.", +- urb->ep->desc.bEndpointAddress, urb); +- /* Let the stop endpoint command watchdog timer (which set this +- * state) finish cleaning up the endpoint TD lists. We must +- * have caught it in the middle of dropping a lock and giving +- * back an URB. +- */ +- goto done; +- } + + ep_index = xhci_get_endpoint_index(&urb->ep->desc); + ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index]; +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index 0419137c4732..83bfb60d19c0 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -286,6 +286,8 @@ struct xhci_op_regs { + #define XDEV_U2 (0x2 << 5) + #define XDEV_U3 (0x3 << 5) + #define XDEV_INACTIVE (0x6 << 5) ++#define XDEV_POLLING (0x7 << 5) ++#define XDEV_COMP_MODE (0xa << 5) + #define XDEV_RESUME (0xf << 5) + /* true: port has power (see HCC_PPC) */ + #define PORT_POWER (1 << 9) +@@ -1555,6 +1557,7 @@ struct xhci_hcd { + #define XHCI_SLOW_SUSPEND (1 << 17) + #define XHCI_SPURIOUS_WAKEUP (1 << 18) + #define XHCI_PME_STUCK_QUIRK (1 << 20) ++#define XHCI_MISSING_CAS (1 << 24) + unsigned int num_active_eps; + unsigned int limit_active_eps; + /* There are two roothubs to keep track of bus suspend info for */ +diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h +index f7b13fd25257..a3dcbd55e436 100644 +--- a/drivers/usb/musb/musbhsdma.h ++++ b/drivers/usb/musb/musbhsdma.h +@@ -157,5 +157,5 @@ struct musb_dma_controller { + void __iomem *base; + u8 channel_count; + u8 used_channels; +- u8 irq; ++ int irq; + }; +diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c +index 22cf07d62e4c..0b8efff8524c 100644 +--- a/drivers/usb/phy/phy-am335x-control.c ++++ b/drivers/usb/phy/phy-am335x-control.c +@@ -85,7 +85,9 @@ struct phy_control *am335x_get_phy_control(struct device *dev) + return NULL; + + dev = bus_find_device(&platform_bus_type, NULL, node, match); ++ of_node_put(node); + ctrl_usb = dev_get_drvdata(dev); ++ put_device(dev); + if (!ctrl_usb) + return NULL; + return &ctrl_usb->phy_ctrl; +diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c +index c2a4171ab9cb..a4e5be5aea46 100644 +--- a/drivers/usb/serial/ch341.c ++++ b/drivers/usb/serial/ch341.c +@@ -97,6 +97,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request, + r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request, + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, + value, index, NULL, 0, DEFAULT_TIMEOUT); ++ if (r < 0) ++ dev_err(&dev->dev, "failed to send control message: %d\n", r); + + return r; + } +@@ -114,7 +116,20 @@ static int ch341_control_in(struct usb_device *dev, + r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request, + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, + value, index, buf, bufsize, DEFAULT_TIMEOUT); +- return r; ++ if (r < bufsize) { ++ if (r >= 0) { ++ dev_err(&dev->dev, ++ "short control message received (%d < %u)\n", ++ r, bufsize); ++ r = -EIO; ++ } ++ ++ dev_err(&dev->dev, "failed to receive control message: %d\n", ++ r); ++ return r; ++ } ++ ++ return 0; + } + + static int ch341_set_baudrate(struct usb_device *dev, +@@ -156,9 +171,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control) + + static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv) + { ++ const unsigned int size = 2; + char *buffer; + int r; +- const unsigned size = 8; + unsigned long flags; + + buffer = kmalloc(size, GFP_KERNEL); +@@ -169,15 +184,10 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv) + if (r < 0) + goto out; + +- /* setup the private status if available */ +- if (r == 2) { +- r = 0; +- spin_lock_irqsave(&priv->lock, flags); +- priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT; +- priv->multi_status_change = 0; +- spin_unlock_irqrestore(&priv->lock, flags); +- } else +- r = -EPROTO; ++ spin_lock_irqsave(&priv->lock, flags); ++ priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT; ++ priv->multi_status_change = 0; ++ spin_unlock_irqrestore(&priv->lock, flags); + + out: kfree(buffer); + return r; +@@ -187,9 +197,9 @@ out: kfree(buffer); + + static int ch341_configure(struct usb_device *dev, struct ch341_private *priv) + { ++ const unsigned int size = 2; + char *buffer; + int r; +- const unsigned size = 8; + + buffer = kmalloc(size, GFP_KERNEL); + if (!buffer) +@@ -252,7 +262,6 @@ static int ch341_port_probe(struct usb_serial_port *port) + + spin_lock_init(&priv->lock); + priv->baud_rate = DEFAULT_BAUD_RATE; +- priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR; + + r = ch341_configure(port->serial->dev, priv); + if (r < 0) +@@ -316,15 +325,15 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) + + r = ch341_configure(serial->dev, priv); + if (r) +- goto out; ++ return r; + + r = ch341_set_handshake(serial->dev, priv->line_control); + if (r) +- goto out; ++ return r; + + r = ch341_set_baudrate(serial->dev, priv); + if (r) +- goto out; ++ return r; + + dev_dbg(&port->dev, "%s - submitting interrupt urb", __func__); + r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL); +@@ -332,12 +341,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port) + dev_err(&port->dev, "%s - failed submitting interrupt urb," + " error %d\n", __func__, r); + ch341_close(port); +- goto out; ++ return r; + } + + r = usb_serial_generic_open(tty, port); ++ if (r) ++ goto err_kill_interrupt_urb; + +-out: return r; ++ return 0; ++ ++err_kill_interrupt_urb: ++ usb_kill_urb(port->interrupt_in_urb); ++ ++ return r; + } + + /* Old_termios contains the original termios settings and +@@ -352,26 +368,25 @@ static void ch341_set_termios(struct tty_struct *tty, + + baud_rate = tty_get_baud_rate(tty); + +- priv->baud_rate = baud_rate; +- + if (baud_rate) { +- spin_lock_irqsave(&priv->lock, flags); +- priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS); +- spin_unlock_irqrestore(&priv->lock, flags); ++ priv->baud_rate = baud_rate; + ch341_set_baudrate(port->serial->dev, priv); +- } else { +- spin_lock_irqsave(&priv->lock, flags); +- priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS); +- spin_unlock_irqrestore(&priv->lock, flags); + } + +- ch341_set_handshake(port->serial->dev, priv->line_control); +- + /* Unimplemented: + * (cflag & CSIZE) : data bits [5, 8] + * (cflag & PARENB) : parity {NONE, EVEN, ODD} + * (cflag & CSTOPB) : stop bits [1, 2] + */ ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ if (C_BAUD(tty) == B0) ++ priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS); ++ else if (old_termios && (old_termios->c_cflag & CBAUD) == B0) ++ priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS); ++ spin_unlock_irqrestore(&priv->lock, flags); ++ ++ ch341_set_handshake(port->serial->dev, priv->line_control); + } + + static void ch341_break_ctl(struct tty_struct *tty, int break_state) +@@ -570,14 +585,23 @@ static int ch341_tiocmget(struct tty_struct *tty) + + static int ch341_reset_resume(struct usb_serial *serial) + { +- struct ch341_private *priv; +- +- priv = usb_get_serial_port_data(serial->port[0]); ++ struct usb_serial_port *port = serial->port[0]; ++ struct ch341_private *priv = usb_get_serial_port_data(port); ++ int ret; + + /* reconfigure ch341 serial port after bus-reset */ + ch341_configure(serial->dev, priv); + +- return 0; ++ if (test_bit(ASYNCB_INITIALIZED, &port->port.flags)) { ++ ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO); ++ if (ret) { ++ dev_err(&port->dev, "failed to submit interrupt urb: %d\n", ++ ret); ++ return ret; ++ } ++ } ++ ++ return usb_serial_generic_resume(serial); + } + + static struct usb_serial_driver ch341_device = { +diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c +index 781426230d69..bb3c7f09f059 100644 +--- a/drivers/usb/serial/cyberjack.c ++++ b/drivers/usb/serial/cyberjack.c +@@ -51,6 +51,7 @@ + #define CYBERJACK_PRODUCT_ID 0x0100 + + /* Function prototypes */ ++static int cyberjack_attach(struct usb_serial *serial); + static int cyberjack_port_probe(struct usb_serial_port *port); + static int cyberjack_port_remove(struct usb_serial_port *port); + static int cyberjack_open(struct tty_struct *tty, +@@ -78,6 +79,7 @@ static struct usb_serial_driver cyberjack_device = { + .description = "Reiner SCT Cyberjack USB card reader", + .id_table = id_table, + .num_ports = 1, ++ .attach = cyberjack_attach, + .port_probe = cyberjack_port_probe, + .port_remove = cyberjack_port_remove, + .open = cyberjack_open, +@@ -101,6 +103,14 @@ struct cyberjack_private { + short wrsent; /* Data already sent */ + }; + ++static int cyberjack_attach(struct usb_serial *serial) ++{ ++ if (serial->num_bulk_out < serial->num_ports) ++ return -ENODEV; ++ ++ return 0; ++} ++ + static int cyberjack_port_probe(struct usb_serial_port *port) + { + struct cyberjack_private *priv; +diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c +index 04b5ed90ffb2..9f1381dfce8c 100644 +--- a/drivers/usb/serial/garmin_gps.c ++++ b/drivers/usb/serial/garmin_gps.c +@@ -1049,6 +1049,7 @@ static int garmin_write_bulk(struct usb_serial_port *port, + "%s - usb_submit_urb(write bulk) failed with status = %d\n", + __func__, status); + count = status; ++ kfree(buffer); + } + + /* we are done with this urb, so let the host driver +diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c +index 0d037cc40e51..75e5ed82d17e 100644 +--- a/drivers/usb/serial/io_edgeport.c ++++ b/drivers/usb/serial/io_edgeport.c +@@ -2781,6 +2781,11 @@ static int edge_startup(struct usb_serial *serial) + EDGE_COMPATIBILITY_MASK1, + EDGE_COMPATIBILITY_MASK2 }; + ++ if (serial->num_bulk_in < 1 || serial->num_interrupt_in < 1) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ return -ENODEV; ++ } ++ + dev = serial->dev; + + /* create our private serial structure */ +diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c +index 0385bc4efefa..d569d773e1ce 100644 +--- a/drivers/usb/serial/io_ti.c ++++ b/drivers/usb/serial/io_ti.c +@@ -1390,8 +1390,7 @@ static int download_fw(struct edgeport_serial *serial) + + dev_dbg(dev, "%s - Download successful -- Device rebooting...\n", __func__); + +- /* return an error on purpose */ +- return -ENODEV; ++ return 1; + } + + stayinbootmode: +@@ -1399,7 +1398,7 @@ stayinbootmode: + dev_dbg(dev, "%s - STAYING IN BOOT MODE\n", __func__); + serial->product_info.TiMode = TI_MODE_BOOT; + +- return 0; ++ return 1; + } + + +@@ -2409,6 +2408,13 @@ static int edge_startup(struct usb_serial *serial) + struct edgeport_serial *edge_serial; + int status; + ++ /* Make sure we have the required endpoints when in download mode. */ ++ if (serial->interface->cur_altsetting->desc.bNumEndpoints > 1) { ++ if (serial->num_bulk_in < serial->num_ports || ++ serial->num_bulk_out < serial->num_ports) ++ return -ENODEV; ++ } ++ + /* create our private serial structure */ + edge_serial = kzalloc(sizeof(struct edgeport_serial), GFP_KERNEL); + if (edge_serial == NULL) { +@@ -2420,11 +2426,14 @@ static int edge_startup(struct usb_serial *serial) + usb_set_serial_data(serial, edge_serial); + + status = download_fw(edge_serial); +- if (status) { ++ if (status < 0) { + kfree(edge_serial); + return status; + } + ++ if (status > 0) ++ return 1; /* bind but do not register any ports */ ++ + return 0; + } + +diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c +index 57c439a24b5a..66ca41f83ffc 100644 +--- a/drivers/usb/serial/iuu_phoenix.c ++++ b/drivers/usb/serial/iuu_phoenix.c +@@ -69,6 +69,16 @@ struct iuu_private { + u32 clk; + }; + ++static int iuu_attach(struct usb_serial *serial) ++{ ++ unsigned char num_ports = serial->num_ports; ++ ++ if (serial->num_bulk_in < num_ports || serial->num_bulk_out < num_ports) ++ return -ENODEV; ++ ++ return 0; ++} ++ + static int iuu_port_probe(struct usb_serial_port *port) + { + struct iuu_private *priv; +@@ -1197,6 +1207,7 @@ static struct usb_serial_driver iuu_device = { + .tiocmset = iuu_tiocmset, + .set_termios = iuu_set_termios, + .init_termios = iuu_init_termios, ++ .attach = iuu_attach, + .port_probe = iuu_port_probe, + .port_remove = iuu_port_remove, + }; +diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c +index 5f1d382e55cf..05c567bf5cfa 100644 +--- a/drivers/usb/serial/keyspan_pda.c ++++ b/drivers/usb/serial/keyspan_pda.c +@@ -697,6 +697,19 @@ MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw"); + MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw"); + #endif + ++static int keyspan_pda_attach(struct usb_serial *serial) ++{ ++ unsigned char num_ports = serial->num_ports; ++ ++ if (serial->num_bulk_out < num_ports || ++ serial->num_interrupt_in < num_ports) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ + static int keyspan_pda_port_probe(struct usb_serial_port *port) + { + +@@ -774,6 +787,7 @@ static struct usb_serial_driver keyspan_pda_device = { + .break_ctl = keyspan_pda_break_ctl, + .tiocmget = keyspan_pda_tiocmget, + .tiocmset = keyspan_pda_tiocmset, ++ .attach = keyspan_pda_attach, + .port_probe = keyspan_pda_port_probe, + .port_remove = keyspan_pda_port_remove, + }; +diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c +index 1b4054fe52a5..b6794baf0a3b 100644 +--- a/drivers/usb/serial/kl5kusb105.c ++++ b/drivers/usb/serial/kl5kusb105.c +@@ -198,10 +198,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port, + status_buf, KLSI_STATUSBUF_LEN, + 10000 + ); +- if (rc < 0) +- dev_err(&port->dev, "Reading line status failed (error = %d)\n", +- rc); +- else { ++ if (rc != KLSI_STATUSBUF_LEN) { ++ dev_err(&port->dev, "reading line status failed: %d\n", rc); ++ if (rc >= 0) ++ rc = -EIO; ++ } else { + status = get_unaligned_le16(status_buf); + + dev_info(&port->serial->dev->dev, "read status %x %x", +@@ -304,7 +305,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) + rc = usb_serial_generic_open(tty, port); + if (rc) { + retval = rc; +- goto exit; ++ goto err_free_cfg; + } + + rc = usb_control_msg(port->serial->dev, +@@ -319,21 +320,38 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port) + if (rc < 0) { + dev_err(&port->dev, "Enabling read failed (error = %d)\n", rc); + retval = rc; ++ goto err_generic_close; + } else + dev_dbg(&port->dev, "%s - enabled reading\n", __func__); + + rc = klsi_105_get_line_state(port, &line_state); +- if (rc >= 0) { +- spin_lock_irqsave(&priv->lock, flags); +- priv->line_state = line_state; +- spin_unlock_irqrestore(&priv->lock, flags); +- dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, line_state); +- retval = 0; +- } else ++ if (rc < 0) { + retval = rc; ++ goto err_disable_read; ++ } ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ priv->line_state = line_state; ++ spin_unlock_irqrestore(&priv->lock, flags); ++ dev_dbg(&port->dev, "%s - read line state 0x%lx\n", __func__, ++ line_state); ++ ++ return 0; + +-exit: ++err_disable_read: ++ usb_control_msg(port->serial->dev, ++ usb_sndctrlpipe(port->serial->dev, 0), ++ KL5KUSB105A_SIO_CONFIGURE, ++ USB_TYPE_VENDOR | USB_DIR_OUT, ++ KL5KUSB105A_SIO_CONFIGURE_READ_OFF, ++ 0, /* index */ ++ NULL, 0, ++ KLSI_TIMEOUT); ++err_generic_close: ++ usb_serial_generic_close(port); ++err_free_cfg: + kfree(cfg); ++ + return retval; + } + +diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c +index efa75b4e51f2..63fa400a822f 100644 +--- a/drivers/usb/serial/kobil_sct.c ++++ b/drivers/usb/serial/kobil_sct.c +@@ -52,6 +52,7 @@ + + + /* Function prototypes */ ++static int kobil_attach(struct usb_serial *serial); + static int kobil_port_probe(struct usb_serial_port *probe); + static int kobil_port_remove(struct usb_serial_port *probe); + static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port); +@@ -87,6 +88,7 @@ static struct usb_serial_driver kobil_device = { + .description = "KOBIL USB smart card terminal", + .id_table = id_table, + .num_ports = 1, ++ .attach = kobil_attach, + .port_probe = kobil_port_probe, + .port_remove = kobil_port_remove, + .ioctl = kobil_ioctl, +@@ -114,6 +116,16 @@ struct kobil_private { + }; + + ++static int kobil_attach(struct usb_serial *serial) ++{ ++ if (serial->num_interrupt_out < serial->num_ports) { ++ dev_err(&serial->interface->dev, "missing interrupt-out endpoint\n"); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ + static int kobil_port_probe(struct usb_serial_port *port) + { + struct usb_serial *serial = port->serial; +diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c +index d40e1dccb998..c5274908ea92 100644 +--- a/drivers/usb/serial/mos7720.c ++++ b/drivers/usb/serial/mos7720.c +@@ -66,8 +66,6 @@ struct moschip_port { + struct urb *write_urb_pool[NUM_URBS]; + }; + +-static struct usb_serial_driver moschip7720_2port_driver; +- + #define USB_VENDOR_ID_MOSCHIP 0x9710 + #define MOSCHIP_DEVICE_ID_7720 0x7720 + #define MOSCHIP_DEVICE_ID_7715 0x7715 +@@ -966,25 +964,6 @@ static void mos7720_bulk_out_data_callback(struct urb *urb) + tty_port_tty_wakeup(&mos7720_port->port->port); + } + +-/* +- * mos77xx_probe +- * this function installs the appropriate read interrupt endpoint callback +- * depending on whether the device is a 7720 or 7715, thus avoiding costly +- * run-time checks in the high-frequency callback routine itself. +- */ +-static int mos77xx_probe(struct usb_serial *serial, +- const struct usb_device_id *id) +-{ +- if (id->idProduct == MOSCHIP_DEVICE_ID_7715) +- moschip7720_2port_driver.read_int_callback = +- mos7715_interrupt_callback; +- else +- moschip7720_2port_driver.read_int_callback = +- mos7720_interrupt_callback; +- +- return 0; +-} +- + static int mos77xx_calc_num_ports(struct usb_serial *serial) + { + u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); +@@ -1916,6 +1895,11 @@ static int mos7720_startup(struct usb_serial *serial) + u16 product; + int ret_val; + ++ if (serial->num_bulk_in < 2 || serial->num_bulk_out < 2) { ++ dev_err(&serial->interface->dev, "missing bulk endpoints\n"); ++ return -ENODEV; ++ } ++ + product = le16_to_cpu(serial->dev->descriptor.idProduct); + dev = serial->dev; + +@@ -1940,19 +1924,18 @@ static int mos7720_startup(struct usb_serial *serial) + tmp->interrupt_in_endpointAddress; + serial->port[1]->interrupt_in_urb = NULL; + serial->port[1]->interrupt_in_buffer = NULL; ++ ++ if (serial->port[0]->interrupt_in_urb) { ++ struct urb *urb = serial->port[0]->interrupt_in_urb; ++ ++ urb->complete = mos7715_interrupt_callback; ++ } + } + + /* setting configuration feature to one */ + usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), + (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000); + +- /* start the interrupt urb */ +- ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL); +- if (ret_val) +- dev_err(&dev->dev, +- "%s - Error %d submitting control urb\n", +- __func__, ret_val); +- + #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT + if (product == MOSCHIP_DEVICE_ID_7715) { + ret_val = mos7715_parport_init(serial); +@@ -1960,6 +1943,13 @@ static int mos7720_startup(struct usb_serial *serial) + return ret_val; + } + #endif ++ /* start the interrupt urb */ ++ ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL); ++ if (ret_val) { ++ dev_err(&dev->dev, "failed to submit interrupt urb: %d\n", ++ ret_val); ++ } ++ + /* LSR For Port 1 */ + read_mos_reg(serial, 0, LSR, &data); + dev_dbg(&dev->dev, "LSR:%x\n", data); +@@ -1969,6 +1959,8 @@ static int mos7720_startup(struct usb_serial *serial) + + static void mos7720_release(struct usb_serial *serial) + { ++ usb_kill_urb(serial->port[0]->interrupt_in_urb); ++ + #ifdef CONFIG_USB_SERIAL_MOS7715_PARPORT + /* close the parallel port */ + +@@ -2051,7 +2043,6 @@ static struct usb_serial_driver moschip7720_2port_driver = { + .close = mos7720_close, + .throttle = mos7720_throttle, + .unthrottle = mos7720_unthrottle, +- .probe = mos77xx_probe, + .attach = mos7720_startup, + .release = mos7720_release, + .port_probe = mos7720_port_probe, +@@ -2065,7 +2056,7 @@ static struct usb_serial_driver moschip7720_2port_driver = { + .chars_in_buffer = mos7720_chars_in_buffer, + .break_ctl = mos7720_break, + .read_bulk_callback = mos7720_bulk_in_callback, +- .read_int_callback = NULL /* dynamically assigned in probe() */ ++ .read_int_callback = mos7720_interrupt_callback, + }; + + static struct usb_serial_driver * const serial_drivers[] = { +diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c +index 29b33ecd048b..0b1659026d85 100644 +--- a/drivers/usb/serial/mos7840.c ++++ b/drivers/usb/serial/mos7840.c +@@ -2192,6 +2192,17 @@ static int mos7840_calc_num_ports(struct usb_serial *serial) + return mos7840_num_ports; + } + ++static int mos7840_attach(struct usb_serial *serial) ++{ ++ if (serial->num_bulk_in < serial->num_ports || ++ serial->num_bulk_out < serial->num_ports) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ + static int mos7840_port_probe(struct usb_serial_port *port) + { + struct usb_serial *serial = port->serial; +@@ -2472,6 +2483,7 @@ static struct usb_serial_driver moschip7840_4port_device = { + .tiocmset = mos7840_tiocmset, + .tiocmiwait = usb_serial_generic_tiocmiwait, + .get_icount = usb_serial_generic_get_icount, ++ .attach = mos7840_attach, + .port_probe = mos7840_port_probe, + .port_remove = mos7840_port_remove, + .read_bulk_callback = mos7840_bulk_in_callback, +diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c +index 5739bf6f7200..24720f656387 100644 +--- a/drivers/usb/serial/omninet.c ++++ b/drivers/usb/serial/omninet.c +@@ -39,6 +39,7 @@ static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port, + const unsigned char *buf, int count); + static int omninet_write_room(struct tty_struct *tty); + static void omninet_disconnect(struct usb_serial *serial); ++static int omninet_attach(struct usb_serial *serial); + static int omninet_port_probe(struct usb_serial_port *port); + static int omninet_port_remove(struct usb_serial_port *port); + +@@ -57,6 +58,7 @@ static struct usb_serial_driver zyxel_omninet_device = { + .description = "ZyXEL - omni.net lcd plus usb", + .id_table = id_table, + .num_ports = 1, ++ .attach = omninet_attach, + .port_probe = omninet_port_probe, + .port_remove = omninet_port_remove, + .open = omninet_open, +@@ -105,6 +107,17 @@ struct omninet_data { + __u8 od_outseq; /* Sequence number for bulk_out URBs */ + }; + ++static int omninet_attach(struct usb_serial *serial) ++{ ++ /* The second bulk-out endpoint is used for writing. */ ++ if (serial->num_bulk_out < 2) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ + static int omninet_port_probe(struct usb_serial_port *port) + { + struct omninet_data *od; +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 2bc169692965..99dff08b560b 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -269,6 +269,8 @@ static void option_instat_callback(struct urb *urb); + #define TELIT_PRODUCT_CC864_SINGLE 0x1006 + #define TELIT_PRODUCT_DE910_DUAL 0x1010 + #define TELIT_PRODUCT_UE910_V2 0x1012 ++#define TELIT_PRODUCT_LE922_USBCFG1 0x1040 ++#define TELIT_PRODUCT_LE922_USBCFG2 0x1041 + #define TELIT_PRODUCT_LE922_USBCFG0 0x1042 + #define TELIT_PRODUCT_LE922_USBCFG3 0x1043 + #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 +@@ -1212,6 +1214,10 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1), ++ .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2), ++ .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3), + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff), +@@ -1856,6 +1862,7 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, ++ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */ + { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ +diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c +index a2080ac7b7e5..da6404c868e9 100644 +--- a/drivers/usb/serial/oti6858.c ++++ b/drivers/usb/serial/oti6858.c +@@ -135,6 +135,7 @@ static int oti6858_tiocmget(struct tty_struct *tty); + static int oti6858_tiocmset(struct tty_struct *tty, + unsigned int set, unsigned int clear); + static int oti6858_tiocmiwait(struct tty_struct *tty, unsigned long arg); ++static int oti6858_attach(struct usb_serial *serial); + static int oti6858_port_probe(struct usb_serial_port *port); + static int oti6858_port_remove(struct usb_serial_port *port); + +@@ -159,6 +160,7 @@ static struct usb_serial_driver oti6858_device = { + .write_bulk_callback = oti6858_write_bulk_callback, + .write_room = oti6858_write_room, + .chars_in_buffer = oti6858_chars_in_buffer, ++ .attach = oti6858_attach, + .port_probe = oti6858_port_probe, + .port_remove = oti6858_port_remove, + }; +@@ -328,6 +330,20 @@ static void send_data(struct work_struct *work) + usb_serial_port_softint(port); + } + ++static int oti6858_attach(struct usb_serial *serial) ++{ ++ unsigned char num_ports = serial->num_ports; ++ ++ if (serial->num_bulk_in < num_ports || ++ serial->num_bulk_out < num_ports || ++ serial->num_interrupt_in < num_ports) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ + static int oti6858_port_probe(struct usb_serial_port *port) + { + struct oti6858_private *priv; +diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c +index e47f9c642404..23f11751e05a 100644 +--- a/drivers/usb/serial/pl2303.c ++++ b/drivers/usb/serial/pl2303.c +@@ -176,9 +176,17 @@ static int pl2303_vendor_write(__u16 value, __u16 index, + static int pl2303_startup(struct usb_serial *serial) + { + struct pl2303_serial_private *spriv; ++ unsigned char num_ports = serial->num_ports; + enum pl2303_type type = type_0; + unsigned char *buf; + ++ if (serial->num_bulk_in < num_ports || ++ serial->num_bulk_out < num_ports || ++ serial->num_interrupt_in < num_ports) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ return -ENODEV; ++ } ++ + spriv = kzalloc(sizeof(*spriv), GFP_KERNEL); + if (!spriv) + return -ENOMEM; +diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c +index 58ab9e52a938..d0ee758dff0b 100644 +--- a/drivers/usb/serial/quatech2.c ++++ b/drivers/usb/serial/quatech2.c +@@ -409,16 +409,12 @@ static void qt2_close(struct usb_serial_port *port) + { + struct usb_serial *serial; + struct qt2_port_private *port_priv; +- unsigned long flags; + int i; + + serial = port->serial; + port_priv = usb_get_serial_port_data(port); + +- spin_lock_irqsave(&port_priv->urb_lock, flags); + usb_kill_urb(port_priv->write_urb); +- port_priv->urb_in_use = false; +- spin_unlock_irqrestore(&port_priv->urb_lock, flags); + + /* flush the port transmit buffer */ + i = usb_control_msg(serial->dev, +diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c +index 5b793c352267..ab754d23244c 100644 +--- a/drivers/usb/serial/spcp8x5.c ++++ b/drivers/usb/serial/spcp8x5.c +@@ -155,6 +155,19 @@ static int spcp8x5_probe(struct usb_serial *serial, + return 0; + } + ++static int spcp8x5_attach(struct usb_serial *serial) ++{ ++ unsigned char num_ports = serial->num_ports; ++ ++ if (serial->num_bulk_in < num_ports || ++ serial->num_bulk_out < num_ports) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ return -ENODEV; ++ } ++ ++ return 0; ++} ++ + static int spcp8x5_port_probe(struct usb_serial_port *port) + { + const struct usb_device_id *id = usb_get_serial_data(port->serial); +@@ -479,6 +492,7 @@ static struct usb_serial_driver spcp8x5_device = { + .tiocmget = spcp8x5_tiocmget, + .tiocmset = spcp8x5_tiocmset, + .probe = spcp8x5_probe, ++ .attach = spcp8x5_attach, + .port_probe = spcp8x5_port_probe, + .port_remove = spcp8x5_port_remove, + }; +diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c +index 11b402935fbd..a7c3f0800de9 100644 +--- a/drivers/usb/serial/ti_usb_3410_5052.c ++++ b/drivers/usb/serial/ti_usb_3410_5052.c +@@ -341,6 +341,13 @@ static int ti_startup(struct usb_serial *serial) + goto free_tdev; + } + ++ if (serial->num_bulk_in < serial->num_ports || ++ serial->num_bulk_out < serial->num_ports) { ++ dev_err(&serial->interface->dev, "missing endpoints\n"); ++ status = -ENODEV; ++ goto free_tdev; ++ } ++ + return 0; + + free_tdev: +diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c +index 275aa3fc4087..f636e2eb0dd8 100644 +--- a/drivers/vfio/pci/vfio_pci.c ++++ b/drivers/vfio/pci/vfio_pci.c +@@ -468,8 +468,9 @@ static long vfio_pci_ioctl(void *device_data, + + } else if (cmd == VFIO_DEVICE_SET_IRQS) { + struct vfio_irq_set hdr; ++ size_t size; + u8 *data = NULL; +- int ret = 0; ++ int max, ret = 0; + + minsz = offsetofend(struct vfio_irq_set, count); + +@@ -477,23 +478,31 @@ static long vfio_pci_ioctl(void *device_data, + return -EFAULT; + + if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS || ++ hdr.count >= (U32_MAX - hdr.start) || + hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | + VFIO_IRQ_SET_ACTION_TYPE_MASK)) + return -EINVAL; + +- if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { +- size_t size; +- int max = vfio_pci_get_irq_count(vdev, hdr.index); ++ max = vfio_pci_get_irq_count(vdev, hdr.index); ++ if (hdr.start >= max || hdr.start + hdr.count > max) ++ return -EINVAL; + +- if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) +- size = sizeof(uint8_t); +- else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD) +- size = sizeof(int32_t); +- else +- return -EINVAL; ++ switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { ++ case VFIO_IRQ_SET_DATA_NONE: ++ size = 0; ++ break; ++ case VFIO_IRQ_SET_DATA_BOOL: ++ size = sizeof(uint8_t); ++ break; ++ case VFIO_IRQ_SET_DATA_EVENTFD: ++ size = sizeof(int32_t); ++ break; ++ default: ++ return -EINVAL; ++ } + +- if (hdr.argsz - minsz < hdr.count * size || +- hdr.start >= max || hdr.start + hdr.count > max) ++ if (size) { ++ if (hdr.argsz - minsz < hdr.count * size) + return -EINVAL; + + data = memdup_user((void __user *)(arg + minsz), +diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c +index 641bc87bdb96..05b0834e26e0 100644 +--- a/drivers/vfio/pci/vfio_pci_intrs.c ++++ b/drivers/vfio/pci/vfio_pci_intrs.c +@@ -465,7 +465,7 @@ static int vfio_msi_enable(struct vfio_pci_device *vdev, int nvec, bool msix) + if (!is_irq_none(vdev)) + return -EINVAL; + +- vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); ++ vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); + if (!vdev->ctx) + return -ENOMEM; + +diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c +index 1abbf80ffb19..9733b8a7fea7 100644 +--- a/drivers/vme/bridges/vme_ca91cx42.c ++++ b/drivers/vme/bridges/vme_ca91cx42.c +@@ -468,7 +468,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled, + vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]); + pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]); + +- *pci_base = (dma_addr_t)vme_base + pci_offset; ++ *pci_base = (dma_addr_t)*vme_base + pci_offset; + *size = (unsigned long long)((vme_bound - *vme_base) + granularity); + + *enabled = 0; +diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c +index 27accc4cc999..c17116f63eb1 100644 +--- a/drivers/xen/gntdev.c ++++ b/drivers/xen/gntdev.c +@@ -763,7 +763,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) + + vma->vm_ops = &gntdev_vmops; + +- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_IO; ++ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP; + + if (use_ptemod) + vma->vm_flags |= VM_DONTCOPY; +diff --git a/fs/9p/acl.c b/fs/9p/acl.c +index 7af425f53bee..9686c1f17653 100644 +--- a/fs/9p/acl.c ++++ b/fs/9p/acl.c +@@ -320,32 +320,26 @@ static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name, + case ACL_TYPE_ACCESS: + name = POSIX_ACL_XATTR_ACCESS; + if (acl) { +- umode_t mode = inode->i_mode; +- retval = posix_acl_equiv_mode(acl, &mode); +- if (retval < 0) ++ struct iattr iattr; ++ ++ retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl); ++ if (retval) + goto err_out; +- else { +- struct iattr iattr; +- if (retval == 0) { +- /* +- * ACL can be represented +- * by the mode bits. So don't +- * update ACL. +- */ +- acl = NULL; +- value = NULL; +- size = 0; +- } +- /* Updte the mode bits */ +- iattr.ia_mode = ((mode & S_IALLUGO) | +- (inode->i_mode & ~S_IALLUGO)); +- iattr.ia_valid = ATTR_MODE; +- /* FIXME should we update ctime ? +- * What is the following setxattr update the +- * mode ? ++ if (!acl) { ++ /* ++ * ACL can be represented ++ * by the mode bits. So don't ++ * update ACL. + */ +- v9fs_vfs_setattr_dotl(dentry, &iattr); ++ value = NULL; ++ size = 0; + } ++ iattr.ia_valid = ATTR_MODE; ++ /* FIXME should we update ctime ? ++ * What is the following setxattr update the ++ * mode ? ++ */ ++ v9fs_vfs_setattr_dotl(dentry, &iattr); + } + break; + case ACL_TYPE_DEFAULT: +diff --git a/fs/block_dev.c b/fs/block_dev.c +index 1e86823a9cbd..e833c974409c 100644 +--- a/fs/block_dev.c ++++ b/fs/block_dev.c +@@ -634,7 +634,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, + return true; /* already a holder */ + else if (bdev->bd_holder != NULL) + return false; /* held by someone else */ +- else if (bdev->bd_contains == bdev) ++ else if (whole == bdev) + return true; /* is a whole device which isn't held */ + + else if (whole->bd_holder == bd_may_claim) +@@ -1672,6 +1672,7 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg) + spin_lock(&inode_sb_list_lock); + list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { + struct address_space *mapping = inode->i_mapping; ++ struct block_device *bdev; + + spin_lock(&inode->i_lock); + if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || +@@ -1692,8 +1693,12 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg) + */ + iput(old_inode); + old_inode = inode; ++ bdev = I_BDEV(inode); + +- func(I_BDEV(inode), arg); ++ mutex_lock(&bdev->bd_mutex); ++ if (bdev->bd_openers) ++ func(bdev, arg); ++ mutex_unlock(&bdev->bd_mutex); + + spin_lock(&inode_sb_list_lock); + } +diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c +index 0890c83643e9..d6d53e5e7945 100644 +--- a/fs/btrfs/acl.c ++++ b/fs/btrfs/acl.c +@@ -118,11 +118,9 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans, + case ACL_TYPE_ACCESS: + name = POSIX_ACL_XATTR_ACCESS; + if (acl) { +- ret = posix_acl_equiv_mode(acl, &inode->i_mode); +- if (ret < 0) ++ ret = posix_acl_update_mode(inode, &inode->i_mode, &acl); ++ if (ret) + return ret; +- if (ret == 0) +- acl = NULL; + } + ret = 0; + break; +diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c +index 34f33e16b08f..269ac79ea25c 100644 +--- a/fs/btrfs/delayed-inode.c ++++ b/fs/btrfs/delayed-inode.c +@@ -1805,14 +1805,6 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, + struct btrfs_delayed_node *delayed_node; + int ret = 0; + +- /* +- * we don't do delayed inode updates during log recovery because it +- * leads to enospc problems. This means we also can't do +- * delayed inode refs +- */ +- if (BTRFS_I(inode)->root->fs_info->log_root_recovering) +- return -EAGAIN; +- + delayed_node = btrfs_get_or_create_delayed_node(inode); + if (IS_ERR(delayed_node)) + return PTR_ERR(delayed_node); +diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c +index 85bcb25384c0..854af9e95f4c 100644 +--- a/fs/btrfs/extent_io.c ++++ b/fs/btrfs/extent_io.c +@@ -4865,11 +4865,20 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, + lock_page(page); + } + locked_pages++; ++ } ++ /* ++ * We need to firstly lock all pages to make sure that ++ * the uptodate bit of our pages won't be affected by ++ * clear_extent_buffer_uptodate(). ++ */ ++ for (i = start_i; i < num_pages; i++) { ++ page = eb->pages[i]; + if (!PageUptodate(page)) { + num_reads++; + all_uptodate = 0; + } + } ++ + if (all_uptodate) { + if (start_i == 0) + set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); +diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c +index be3bf0be13c7..4c56a5028786 100644 +--- a/fs/btrfs/tree-log.c ++++ b/fs/btrfs/tree-log.c +@@ -1739,12 +1739,11 @@ static noinline int find_dir_range(struct btrfs_root *root, + next: + /* check the next slot in the tree to see if it is a valid item */ + nritems = btrfs_header_nritems(path->nodes[0]); ++ path->slots[0]++; + if (path->slots[0] >= nritems) { + ret = btrfs_next_leaf(root, path); + if (ret) + goto out; +- } else { +- path->slots[0]++; + } + + btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); +diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h +index 37e4a72a7d1c..ae4e35bdc2cd 100644 +--- a/fs/cifs/cifs_fs_sb.h ++++ b/fs/cifs/cifs_fs_sb.h +@@ -45,6 +45,9 @@ + #define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */ + #define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */ + #define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */ ++#define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible ++ * root mountable ++ */ + + struct cifs_sb_info { + struct rb_root tlink_tree; +@@ -65,5 +68,6 @@ struct cifs_sb_info { + char *mountdata; /* options received at mount time or via DFS refs */ + struct backing_dev_info bdi; + struct delayed_work prune_tlinks; ++ char *prepath; + }; + #endif /* _CIFS_FS_SB_H */ +diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c +index 037b8f7e8a94..75aacb731c54 100644 +--- a/fs/cifs/cifsfs.c ++++ b/fs/cifs/cifsfs.c +@@ -586,6 +586,9 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) + char *s, *p; + char sep; + ++ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) ++ return dget(sb->s_root); ++ + full_path = cifs_build_path_to_root(vol, cifs_sb, + cifs_sb_master_tcon(cifs_sb)); + if (full_path == NULL) +@@ -665,10 +668,14 @@ cifs_do_mount(struct file_system_type *fs_type, + cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL); + if (cifs_sb->mountdata == NULL) { + root = ERR_PTR(-ENOMEM); +- goto out_cifs_sb; ++ goto out_free; + } + +- cifs_setup_cifs_sb(volume_info, cifs_sb); ++ rc = cifs_setup_cifs_sb(volume_info, cifs_sb); ++ if (rc) { ++ root = ERR_PTR(rc); ++ goto out_free; ++ } + + rc = cifs_mount(cifs_sb, volume_info); + if (rc) { +@@ -676,7 +683,7 @@ cifs_do_mount(struct file_system_type *fs_type, + cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n", + rc); + root = ERR_PTR(rc); +- goto out_mountdata; ++ goto out_free; + } + + mnt_data.vol = volume_info; +@@ -719,9 +726,9 @@ out: + cifs_cleanup_volume_info(volume_info); + return root; + +-out_mountdata: ++out_free: ++ kfree(cifs_sb->prepath); + kfree(cifs_sb->mountdata); +-out_cifs_sb: + kfree(cifs_sb); + out_nls: + unload_nls(volume_info->local_nls); +diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h +index fa30efe15ba2..4b87feaa507f 100644 +--- a/fs/cifs/cifsglob.h ++++ b/fs/cifs/cifsglob.h +@@ -594,6 +594,8 @@ struct TCP_Server_Info { + #ifdef CONFIG_CIFS_SMB2 + unsigned int max_read; + unsigned int max_write; ++ struct delayed_work reconnect; /* reconnect workqueue job */ ++ struct mutex reconnect_mutex; /* prevent simultaneous reconnects */ + #endif /* CONFIG_CIFS_SMB2 */ + }; + +@@ -760,6 +762,7 @@ cap_unix(struct cifs_ses *ses) + struct cifs_tcon { + struct list_head tcon_list; + int tc_count; ++ struct list_head rlist; /* reconnect list */ + struct list_head openFileList; + spinlock_t open_file_lock; /* protects list above */ + struct cifs_ses *ses; /* pointer to session associated with */ +diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h +index c6bfe5b368f9..44d825cdf85e 100644 +--- a/fs/cifs/cifsproto.h ++++ b/fs/cifs/cifsproto.h +@@ -179,7 +179,7 @@ extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, + extern int cifs_readv_from_socket(struct TCP_Server_Info *server, + struct kvec *iov_orig, unsigned int nr_segs, + unsigned int to_read); +-extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, ++extern int cifs_setup_cifs_sb(struct smb_vol *pvolume_info, + struct cifs_sb_info *cifs_sb); + extern int cifs_match_super(struct super_block *, void *); + extern void cifs_cleanup_volume_info(struct smb_vol *pvolume_info); +@@ -199,6 +199,9 @@ extern void cifs_add_pending_open_locked(struct cifs_fid *fid, + struct tcon_link *tlink, + struct cifs_pending_open *open); + extern void cifs_del_pending_open(struct cifs_pending_open *open); ++extern void cifs_put_tcp_session(struct TCP_Server_Info *server, ++ int from_reconnect); ++extern void cifs_put_tcon(struct cifs_tcon *tcon); + + #if IS_ENABLED(CONFIG_CIFS_DFS_UPCALL) + extern void cifs_dfs_release_automount_timer(void); +diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c +index 54f507bd2c09..bd54422a260d 100644 +--- a/fs/cifs/connect.c ++++ b/fs/cifs/connect.c +@@ -52,6 +52,9 @@ + #include "nterr.h" + #include "rfc1002pdu.h" + #include "fscache.h" ++#ifdef CONFIG_CIFS_SMB2 ++#include "smb2proto.h" ++#endif + + #define CIFS_PORT 445 + #define RFC1001_PORT 139 +@@ -2060,8 +2063,8 @@ cifs_find_tcp_session(struct smb_vol *vol) + return NULL; + } + +-static void +-cifs_put_tcp_session(struct TCP_Server_Info *server) ++void ++cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) + { + struct task_struct *task; + +@@ -2078,6 +2081,19 @@ cifs_put_tcp_session(struct TCP_Server_Info *server) + + cancel_delayed_work_sync(&server->echo); + ++#ifdef CONFIG_CIFS_SMB2 ++ if (from_reconnect) ++ /* ++ * Avoid deadlock here: reconnect work calls ++ * cifs_put_tcp_session() at its end. Need to be sure ++ * that reconnect work does nothing with server pointer after ++ * that step. ++ */ ++ cancel_delayed_work(&server->reconnect); ++ else ++ cancel_delayed_work_sync(&server->reconnect); ++#endif ++ + spin_lock(&GlobalMid_Lock); + server->tcpStatus = CifsExiting; + spin_unlock(&GlobalMid_Lock); +@@ -2142,6 +2158,10 @@ cifs_get_tcp_session(struct smb_vol *volume_info) + INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); + INIT_LIST_HEAD(&tcp_ses->smb_ses_list); + INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); ++#ifdef CONFIG_CIFS_SMB2 ++ INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server); ++ mutex_init(&tcp_ses->reconnect_mutex); ++#endif + memcpy(&tcp_ses->srcaddr, &volume_info->srcaddr, + sizeof(tcp_ses->srcaddr)); + memcpy(&tcp_ses->dstaddr, &volume_info->dstaddr, +@@ -2294,7 +2314,7 @@ cifs_put_smb_ses(struct cifs_ses *ses) + spin_unlock(&cifs_tcp_ses_lock); + + sesInfoFree(ses); +- cifs_put_tcp_session(server); ++ cifs_put_tcp_session(server, 0); + } + + #ifdef CONFIG_KEYS +@@ -2467,7 +2487,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) + mutex_unlock(&ses->session_mutex); + + /* existing SMB ses has a server reference already */ +- cifs_put_tcp_session(server); ++ cifs_put_tcp_session(server, 0); + free_xid(xid); + return ses; + } +@@ -2557,7 +2577,7 @@ cifs_find_tcon(struct cifs_ses *ses, const char *unc) + return NULL; + } + +-static void ++void + cifs_put_tcon(struct cifs_tcon *tcon) + { + unsigned int xid; +@@ -2722,6 +2742,24 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) + return 1; + } + ++static int ++match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data) ++{ ++ struct cifs_sb_info *old = CIFS_SB(sb); ++ struct cifs_sb_info *new = mnt_data->cifs_sb; ++ ++ if (old->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) { ++ if (!(new->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)) ++ return 0; ++ /* The prepath should be null terminated strings */ ++ if (strcmp(new->prepath, old->prepath)) ++ return 0; ++ ++ return 1; ++ } ++ return 0; ++} ++ + int + cifs_match_super(struct super_block *sb, void *data) + { +@@ -2749,7 +2787,8 @@ cifs_match_super(struct super_block *sb, void *data) + + if (!match_server(tcp_srv, volume_info) || + !match_session(ses, volume_info) || +- !match_tcon(tcon, volume_info->UNC)) { ++ !match_tcon(tcon, volume_info->UNC) || ++ !match_prepath(sb, mnt_data)) { + rc = 0; + goto out; + } +@@ -3165,7 +3204,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, + } + } + +-void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, ++int cifs_setup_cifs_sb(struct smb_vol *pvolume_info, + struct cifs_sb_info *cifs_sb) + { + INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); +@@ -3247,6 +3286,15 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, + + if ((pvolume_info->cifs_acl) && (pvolume_info->dynperm)) + cifs_dbg(VFS, "mount option dynperm ignored if cifsacl mount option supported\n"); ++ ++ ++ if (pvolume_info->prepath) { ++ cifs_sb->prepath = kstrdup(pvolume_info->prepath, GFP_KERNEL); ++ if (cifs_sb->prepath == NULL) ++ return -ENOMEM; ++ } ++ ++ return 0; + } + + static void +@@ -3417,6 +3465,44 @@ cifs_get_volume_info(char *mount_data, const char *devname) + return volume_info; + } + ++static int ++cifs_are_all_path_components_accessible(struct TCP_Server_Info *server, ++ unsigned int xid, ++ struct cifs_tcon *tcon, ++ struct cifs_sb_info *cifs_sb, ++ char *full_path) ++{ ++ int rc; ++ char *s; ++ char sep, tmp; ++ ++ sep = CIFS_DIR_SEP(cifs_sb); ++ s = full_path; ++ ++ rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, ""); ++ while (rc == 0) { ++ /* skip separators */ ++ while (*s == sep) ++ s++; ++ if (!*s) ++ break; ++ /* next separator */ ++ while (*s && *s != sep) ++ s++; ++ ++ /* ++ * temporarily null-terminate the path at the end of ++ * the current component ++ */ ++ tmp = *s; ++ *s = 0; ++ rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, ++ full_path); ++ *s = tmp; ++ } ++ return rc; ++} ++ + int + cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) + { +@@ -3543,6 +3629,17 @@ remote_path_check: + kfree(full_path); + goto mount_fail_check; + } ++ if (rc != -EREMOTE) { ++ rc = cifs_are_all_path_components_accessible(server, ++ xid, tcon, cifs_sb, ++ full_path); ++ if (rc != 0) { ++ cifs_dbg(VFS, "cannot query dirs between root and final path, " ++ "enabling CIFS_MOUNT_USE_PREFIX_PATH\n"); ++ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; ++ rc = 0; ++ } ++ } + kfree(full_path); + } + +@@ -3606,7 +3703,7 @@ mount_fail_check: + else if (ses) + cifs_put_smb_ses(ses); + else +- cifs_put_tcp_session(server); ++ cifs_put_tcp_session(server, 0); + bdi_destroy(&cifs_sb->bdi); + } + +@@ -3799,6 +3896,7 @@ cifs_umount(struct cifs_sb_info *cifs_sb) + + bdi_destroy(&cifs_sb->bdi); + kfree(cifs_sb->mountdata); ++ kfree(cifs_sb->prepath); + unload_nls(cifs_sb->local_nls); + kfree(cifs_sb); + } +@@ -3904,7 +4002,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) + ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info); + if (IS_ERR(ses)) { + tcon = (struct cifs_tcon *)ses; +- cifs_put_tcp_session(master_tcon->ses->server); ++ cifs_put_tcp_session(master_tcon->ses->server, 0); + goto out; + } + +diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c +index 7347f1678fa7..39660990e4b0 100644 +--- a/fs/cifs/dir.c ++++ b/fs/cifs/dir.c +@@ -84,6 +84,7 @@ build_path_from_dentry(struct dentry *direntry) + struct dentry *temp; + int namelen; + int dfsplen; ++ int pplen = 0; + char *full_path; + char dirsep; + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); +@@ -95,8 +96,12 @@ build_path_from_dentry(struct dentry *direntry) + dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1); + else + dfsplen = 0; ++ ++ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) ++ pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0; ++ + cifs_bp_rename_retry: +- namelen = dfsplen; ++ namelen = dfsplen + pplen; + seq = read_seqbegin(&rename_lock); + rcu_read_lock(); + for (temp = direntry; !IS_ROOT(temp);) { +@@ -137,7 +142,7 @@ cifs_bp_rename_retry: + } + } + rcu_read_unlock(); +- if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) { ++ if (namelen != dfsplen + pplen || read_seqretry(&rename_lock, seq)) { + cifs_dbg(FYI, "did not end path lookup where expected. namelen=%ddfsplen=%d\n", + namelen, dfsplen); + /* presumably this is only possible if racing with a rename +@@ -153,6 +158,17 @@ cifs_bp_rename_retry: + those safely to '/' if any are found in the middle of the prepath */ + /* BB test paths to Windows with '/' in the midst of prepath */ + ++ if (pplen) { ++ int i; ++ ++ cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath); ++ memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1); ++ full_path[dfsplen] = '\\'; ++ for (i = 0; i < pplen-1; i++) ++ if (full_path[dfsplen+1+i] == '/') ++ full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb); ++ } ++ + if (dfsplen) { + strncpy(full_path, tcon->treeName, dfsplen); + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { +diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c +index ab9f992ca479..518cf900682f 100644 +--- a/fs/cifs/inode.c ++++ b/fs/cifs/inode.c +@@ -937,12 +937,29 @@ struct inode *cifs_root_iget(struct super_block *sb) + struct inode *inode = NULL; + long rc; + struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); ++ char *path = NULL; ++ int len; ++ ++ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) ++ && cifs_sb->prepath) { ++ len = strlen(cifs_sb->prepath); ++ path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL); ++ if (path == NULL) ++ return ERR_PTR(-ENOMEM); ++ path[0] = '/'; ++ memcpy(path+1, cifs_sb->prepath, len); ++ } else { ++ path = kstrdup("", GFP_KERNEL); ++ if (path == NULL) ++ return ERR_PTR(-ENOMEM); ++ } + + xid = get_xid(); ++ convert_delimiter(path, CIFS_DIR_SEP(cifs_sb)); + if (tcon->unix_ext) +- rc = cifs_get_inode_info_unix(&inode, "", sb, xid); ++ rc = cifs_get_inode_info_unix(&inode, path, sb, xid); + else +- rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL); ++ rc = cifs_get_inode_info(&inode, path, NULL, sb, xid, NULL); + + if (!inode) { + inode = ERR_PTR(rc); +@@ -970,6 +987,7 @@ struct inode *cifs_root_iget(struct super_block *sb) + } + + out: ++ kfree(path); + /* can not call macro free_xid here since in a void func + * TODO: This is no longer true + */ +diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c +index 45992944e238..b87b07504947 100644 +--- a/fs/cifs/smb2file.c ++++ b/fs/cifs/smb2file.c +@@ -241,7 +241,7 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile) + * and check it for zero before using. + */ + max_buf = tlink_tcon(cfile->tlink)->ses->server->maxBuf; +- if (!max_buf) { ++ if (max_buf < sizeof(struct smb2_lock_element)) { + free_xid(xid); + return -EINVAL; + } +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index 1a6dde4bce62..30d0751626e3 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -282,7 +282,7 @@ out: + case SMB2_CHANGE_NOTIFY: + case SMB2_QUERY_INFO: + case SMB2_SET_INFO: +- return -EAGAIN; ++ rc = -EAGAIN; + } + unload_nls(nls_codepage); + return rc; +@@ -1560,6 +1560,54 @@ smb2_echo_callback(struct mid_q_entry *mid) + add_credits(server, credits_received, CIFS_ECHO_OP); + } + ++void smb2_reconnect_server(struct work_struct *work) ++{ ++ struct TCP_Server_Info *server = container_of(work, ++ struct TCP_Server_Info, reconnect.work); ++ struct cifs_ses *ses; ++ struct cifs_tcon *tcon, *tcon2; ++ struct list_head tmp_list; ++ int tcon_exist = false; ++ ++ /* Prevent simultaneous reconnects that can corrupt tcon->rlist list */ ++ mutex_lock(&server->reconnect_mutex); ++ ++ INIT_LIST_HEAD(&tmp_list); ++ cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n"); ++ ++ spin_lock(&cifs_tcp_ses_lock); ++ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { ++ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { ++ if (tcon->need_reconnect) { ++ tcon->tc_count++; ++ list_add_tail(&tcon->rlist, &tmp_list); ++ tcon_exist = true; ++ } ++ } ++ } ++ /* ++ * Get the reference to server struct to be sure that the last call of ++ * cifs_put_tcon() in the loop below won't release the server pointer. ++ */ ++ if (tcon_exist) ++ server->srv_count++; ++ ++ spin_unlock(&cifs_tcp_ses_lock); ++ ++ list_for_each_entry_safe(tcon, tcon2, &tmp_list, rlist) { ++ smb2_reconnect(SMB2_ECHO, tcon); ++ list_del_init(&tcon->rlist); ++ cifs_put_tcon(tcon); ++ } ++ ++ cifs_dbg(FYI, "Reconnecting tcons finished\n"); ++ mutex_unlock(&server->reconnect_mutex); ++ ++ /* now we can safely release srv struct */ ++ if (tcon_exist) ++ cifs_put_tcp_session(server, 1); ++} ++ + int + SMB2_echo(struct TCP_Server_Info *server) + { +@@ -1572,32 +1620,11 @@ SMB2_echo(struct TCP_Server_Info *server) + cifs_dbg(FYI, "In echo request\n"); + + if (server->tcpStatus == CifsNeedNegotiate) { +- struct list_head *tmp, *tmp2; +- struct cifs_ses *ses; +- struct cifs_tcon *tcon; +- +- cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n"); +- spin_lock(&cifs_tcp_ses_lock); +- list_for_each(tmp, &server->smb_ses_list) { +- ses = list_entry(tmp, struct cifs_ses, smb_ses_list); +- list_for_each(tmp2, &ses->tcon_list) { +- tcon = list_entry(tmp2, struct cifs_tcon, +- tcon_list); +- /* add check for persistent handle reconnect */ +- if (tcon && tcon->need_reconnect) { +- spin_unlock(&cifs_tcp_ses_lock); +- rc = smb2_reconnect(SMB2_ECHO, tcon); +- spin_lock(&cifs_tcp_ses_lock); +- } +- } +- } +- spin_unlock(&cifs_tcp_ses_lock); ++ /* No need to send echo on newly established connections */ ++ queue_delayed_work(cifsiod_wq, &server->reconnect, 0); ++ return rc; + } + +- /* if no session, renegotiate failed above */ +- if (server->tcpStatus == CifsNeedNegotiate) +- return -EIO; +- + rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req); + if (rc) + return rc; +diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h +index 5793f3e39a31..d45f772a35c9 100644 +--- a/fs/cifs/smb2proto.h ++++ b/fs/cifs/smb2proto.h +@@ -89,6 +89,7 @@ extern int smb2_open_file(const unsigned int xid, + extern int smb2_unlock_range(struct cifsFileInfo *cfile, + struct file_lock *flock, const unsigned int xid); + extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile); ++extern void smb2_reconnect_server(struct work_struct *work); + + /* + * SMB2 Worker functions - most of protocol specific implementation details +diff --git a/fs/dcache.c b/fs/dcache.c +index 11ded5b0b853..9a5e9082feb1 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -2623,6 +2623,12 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) + dentry->d_parent = dentry; + list_del_init(&dentry->d_child); + anon->d_parent = dparent; ++ if (likely(!d_unhashed(anon))) { ++ hlist_bl_lock(&anon->d_sb->s_anon); ++ __hlist_bl_del(&anon->d_hash); ++ anon->d_hash.pprev = NULL; ++ hlist_bl_unlock(&anon->d_sb->s_anon); ++ } + list_move(&anon->d_child, &dparent->d_subdirs); + + write_seqcount_end(&dentry->d_seq); +@@ -2677,7 +2683,6 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) + * could splice into our tree? */ + __d_materialise_dentry(dentry, alias); + write_sequnlock(&rename_lock); +- __d_drop(alias); + goto found; + } else { + /* Nope, but we must(!) avoid directory +diff --git a/fs/exec.c b/fs/exec.c +index d8b46a197172..f33c0fff702c 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -19,7 +19,7 @@ + * current->executable is only used by the procfs. This allows a dispatch + * table to check for several different types of binary formats. We keep + * trying until we recognize the file or we run out of supported binary +- * formats. ++ * formats. + */ + + #include <linux/slab.h> +@@ -1098,6 +1098,13 @@ int flush_old_exec(struct linux_binprm * bprm) + flush_thread(); + current->personality &= ~bprm->per_clear; + ++ /* ++ * We have to apply CLOEXEC before we change whether the process is ++ * dumpable (in setup_new_exec) to avoid a race with a process in userspace ++ * trying to access the should-be-closed file descriptors of a process ++ * undergoing exec(2). ++ */ ++ do_close_on_exec(current->files); + return 0; + + out: +@@ -1148,7 +1155,6 @@ void setup_new_exec(struct linux_binprm * bprm) + current->self_exec_id++; + + flush_signal_handlers(current, 0); +- do_close_on_exec(current->files); + } + EXPORT_SYMBOL(setup_new_exec); + +diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c +index 110b6b371a4e..48c3c2d7d261 100644 +--- a/fs/ext2/acl.c ++++ b/fs/ext2/acl.c +@@ -206,15 +206,11 @@ ext2_set_acl(struct inode *inode, int type, struct posix_acl *acl) + case ACL_TYPE_ACCESS: + name_index = EXT2_XATTR_INDEX_POSIX_ACL_ACCESS; + if (acl) { +- error = posix_acl_equiv_mode(acl, &inode->i_mode); +- if (error < 0) ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl); ++ if (error) + return error; +- else { +- inode->i_ctime = CURRENT_TIME_SEC; +- mark_inode_dirty(inode); +- if (error == 0) +- acl = NULL; +- } ++ inode->i_ctime = CURRENT_TIME_SEC; ++ mark_inode_dirty(inode); + } + break; + +diff --git a/fs/ext3/acl.c b/fs/ext3/acl.c +index dbb5ad59a7fc..2f994bbf73a7 100644 +--- a/fs/ext3/acl.c ++++ b/fs/ext3/acl.c +@@ -205,15 +205,11 @@ ext3_set_acl(handle_t *handle, struct inode *inode, int type, + case ACL_TYPE_ACCESS: + name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS; + if (acl) { +- error = posix_acl_equiv_mode(acl, &inode->i_mode); ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl); + if (error < 0) + return error; +- else { +- inode->i_ctime = CURRENT_TIME_SEC; +- ext3_mark_inode_dirty(handle, inode); +- if (error == 0) +- acl = NULL; +- } ++ inode->i_ctime = CURRENT_TIME_SEC; ++ ext3_mark_inode_dirty(handle, inode); + } + break; + +diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c +index 39a54a0e9fe4..c844f1bfb451 100644 +--- a/fs/ext4/acl.c ++++ b/fs/ext4/acl.c +@@ -211,15 +211,11 @@ ext4_set_acl(handle_t *handle, struct inode *inode, int type, + case ACL_TYPE_ACCESS: + name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS; + if (acl) { +- error = posix_acl_equiv_mode(acl, &inode->i_mode); +- if (error < 0) ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl); ++ if (error) + return error; +- else { +- inode->i_ctime = ext4_current_time(inode); +- ext4_mark_inode_dirty(handle, inode); +- if (error == 0) +- acl = NULL; +- } ++ inode->i_ctime = ext4_current_time(inode); ++ ext4_mark_inode_dirty(handle, inode); + } + break; + +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c +index b7e491056f9c..a4d6e9a953f9 100644 +--- a/fs/ext4/inline.c ++++ b/fs/ext4/inline.c +@@ -339,8 +339,10 @@ static int ext4_update_inline_data(handle_t *handle, struct inode *inode, + + len -= EXT4_MIN_INLINE_DATA_SIZE; + value = kzalloc(len, GFP_NOFS); +- if (!value) ++ if (!value) { ++ error = -ENOMEM; + goto out; ++ } + + error = ext4_xattr_ibody_get(inode, i.name_index, i.name, + value, len); +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 4a3735a795d0..50fc2d1da9a9 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -701,6 +701,20 @@ has_zeroout: + int ret = check_block_validity(inode, map); + if (ret != 0) + return ret; ++ ++ /* ++ * Inodes with freshly allocated blocks where contents will be ++ * visible after transaction commit must be on transaction's ++ * ordered data list. ++ */ ++ if (map->m_flags & EXT4_MAP_NEW && ++ !(map->m_flags & EXT4_MAP_UNWRITTEN) && ++ !IS_NOQUOTA(inode) && ++ ext4_should_order_data(inode)) { ++ ret = ext4_jbd2_file_inode(handle, inode); ++ if (ret) ++ return ret; ++ } + } + return retval; + } +@@ -1065,15 +1079,6 @@ static int ext4_write_end(struct file *file, + int i_size_changed = 0; + + trace_ext4_write_end(inode, pos, len, copied); +- if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) { +- ret = ext4_jbd2_file_inode(handle, inode); +- if (ret) { +- unlock_page(page); +- page_cache_release(page); +- goto errout; +- } +- } +- + if (ext4_has_inline_data(inode)) { + ret = ext4_write_inline_data_end(inode, pos, len, + copied, page); +@@ -4098,6 +4103,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) + struct inode *inode; + journal_t *journal = EXT4_SB(sb)->s_journal; + long ret; ++ loff_t size; + int block; + uid_t i_uid; + gid_t i_gid; +@@ -4189,6 +4195,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) + ei->i_file_acl |= + ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; + inode->i_size = ext4_isize(raw_inode); ++ if ((size = i_size_read(inode)) < 0) { ++ EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size); ++ ret = -EIO; ++ goto bad_inode; ++ } + ei->i_disksize = inode->i_size; + #ifdef CONFIG_QUOTA + ei->i_reserved_quota = 0; +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 96f4c72fbbd2..2b4ed2bf9569 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -668,7 +668,7 @@ static void ext4_mb_mark_free_simple(struct super_block *sb, + ext4_grpblk_t min; + ext4_grpblk_t max; + ext4_grpblk_t chunk; +- unsigned short border; ++ unsigned int border; + + BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); + +@@ -2243,7 +2243,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) + struct ext4_group_info *grinfo; + struct sg { + struct ext4_group_info info; +- ext4_grpblk_t counters[16]; ++ ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; + } sg; + + group--; +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 483bc328643d..6362896f5875 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -3257,10 +3257,15 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp, + ext4_set_bit(s++, buf); + count++; + } +- for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) { +- ext4_set_bit(EXT4_B2C(sbi, s++), buf); +- count++; ++ j = ext4_bg_num_gdb(sb, grp); ++ if (s + j > EXT4_BLOCKS_PER_GROUP(sb)) { ++ ext4_error(sb, "Invalid number of block group " ++ "descriptor blocks: %d", j); ++ j = EXT4_BLOCKS_PER_GROUP(sb) - s; + } ++ count += j; ++ for (; j > 0; j--) ++ ext4_set_bit(EXT4_B2C(sbi, s++), buf); + } + if (!count) + return 0; +@@ -3363,7 +3368,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + char *orig_data = kstrdup(data, GFP_KERNEL); + struct buffer_head *bh; + struct ext4_super_block *es = NULL; +- struct ext4_sb_info *sbi; ++ struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); + ext4_fsblk_t block; + ext4_fsblk_t sb_block = get_sb_block(&data); + ext4_fsblk_t logical_sb_block; +@@ -3383,16 +3388,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; + ext4_group_t first_not_zeroed; + +- sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); +- if (!sbi) +- goto out_free_orig; ++ if ((data && !orig_data) || !sbi) ++ goto out_free_base; + + sbi->s_blockgroup_lock = + kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL); +- if (!sbi->s_blockgroup_lock) { +- kfree(sbi); +- goto out_free_orig; +- } ++ if (!sbi->s_blockgroup_lock) ++ goto out_free_base; ++ + sb->s_fs_info = sbi; + sbi->s_sb = sb; + sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS; +@@ -3538,11 +3541,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + */ + sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; + +- if (!parse_options((char *) sbi->s_es->s_mount_opts, sb, +- &journal_devnum, &journal_ioprio, 0)) { +- ext4_msg(sb, KERN_WARNING, +- "failed to parse options in superblock: %s", +- sbi->s_es->s_mount_opts); ++ if (sbi->s_es->s_mount_opts[0]) { ++ char *s_mount_opts = kstrndup(sbi->s_es->s_mount_opts, ++ sizeof(sbi->s_es->s_mount_opts), ++ GFP_KERNEL); ++ if (!s_mount_opts) ++ goto failed_mount; ++ if (!parse_options(s_mount_opts, sb, &journal_devnum, ++ &journal_ioprio, 0)) { ++ ext4_msg(sb, KERN_WARNING, ++ "failed to parse options in superblock: %s", ++ s_mount_opts); ++ } ++ kfree(s_mount_opts); + } + sbi->s_def_mount_opt = sbi->s_mount_opt; + if (!parse_options((char *) data, sb, &journal_devnum, +@@ -3689,12 +3700,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + + sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); + sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); +- if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0) +- goto cantfind_ext4; + + sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb); + if (sbi->s_inodes_per_block == 0) + goto cantfind_ext4; ++ if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || ++ sbi->s_inodes_per_group > blocksize * 8) { ++ ext4_msg(sb, KERN_ERR, "invalid inodes per group: %lu\n", ++ sbi->s_blocks_per_group); ++ goto failed_mount; ++ } + sbi->s_itb_per_group = sbi->s_inodes_per_group / + sbi->s_inodes_per_block; + sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb); +@@ -3778,13 +3793,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + } + sbi->s_cluster_ratio = clustersize / blocksize; + +- if (sbi->s_inodes_per_group > blocksize * 8) { +- ext4_msg(sb, KERN_ERR, +- "#inodes per group too big: %lu", +- sbi->s_inodes_per_group); +- goto failed_mount; +- } +- + /* Do we have standard group size of clustersize * 8 blocks ? */ + if (sbi->s_blocks_per_group == clustersize << 3) + set_opt2(sb, STD_GROUP_SIZE); +@@ -4173,7 +4181,9 @@ no_journal: + } + + ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. " +- "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts, ++ "Opts: %.*s%s%s", descr, ++ (int) sizeof(sbi->s_es->s_mount_opts), ++ sbi->s_es->s_mount_opts, + *sbi->s_es->s_mount_opts ? "; " : "", orig_data); + + if (es->s_error_count) +@@ -4242,8 +4252,8 @@ failed_mount: + out_fail: + sb->s_fs_info = NULL; + kfree(sbi->s_blockgroup_lock); ++out_free_base: + kfree(sbi); +-out_free_orig: + kfree(orig_data); + return err ? err : ret; + } +diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c +index b7826ec1b470..f4fefc57ff56 100644 +--- a/fs/f2fs/acl.c ++++ b/fs/f2fs/acl.c +@@ -223,12 +223,10 @@ static int f2fs_set_acl(struct inode *inode, int type, struct posix_acl *acl) + case ACL_TYPE_ACCESS: + name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS; + if (acl) { +- error = posix_acl_equiv_mode(acl, &inode->i_mode); +- if (error < 0) ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl); ++ if (error) + return error; + set_acl_inode(fi, inode->i_mode); +- if (error == 0) +- acl = NULL; + } + break; + +diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c +index a84b0a8e6854..52355ba40c15 100644 +--- a/fs/f2fs/debug.c ++++ b/fs/f2fs/debug.c +@@ -294,6 +294,7 @@ static int stat_open(struct inode *inode, struct file *file) + } + + static const struct file_operations stat_fops = { ++ .owner = THIS_MODULE, + .open = stat_open, + .read = seq_read, + .llseek = seq_lseek, +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index 8ef52e12cd57..f6314cd3e3b0 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -2393,6 +2393,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, + loff_t i_size; + size_t count = iov_length(iov, nr_segs); + struct fuse_io_priv *io; ++ bool is_sync = is_sync_kiocb(iocb); + + pos = offset; + inode = file->f_mapping->host; +@@ -2428,7 +2429,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, + * to wait on real async I/O requests, so we must submit this request + * synchronously. + */ +- if (!is_sync_kiocb(iocb) && (offset + count > i_size) && rw == WRITE) ++ if (!is_sync && (offset + count > i_size) && rw == WRITE) + io->async = false; + + if (rw == WRITE) +@@ -2440,7 +2441,7 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, + fuse_aio_complete(io, ret < 0 ? ret : 0, -1); + + /* we have a non-extending, async request, so return */ +- if (!is_sync_kiocb(iocb)) ++ if (!is_sync) + return -EIOCBQUEUED; + + ret = wait_on_sync_kiocb(iocb); +diff --git a/fs/generic_acl.c b/fs/generic_acl.c +index b3f3676796d3..7855cfb938f6 100644 +--- a/fs/generic_acl.c ++++ b/fs/generic_acl.c +@@ -82,19 +82,21 @@ generic_acl_set(struct dentry *dentry, const char *name, const void *value, + return PTR_ERR(acl); + } + if (acl) { ++ struct posix_acl *old_acl; ++ + error = posix_acl_valid(acl); + if (error) + goto failed; + switch (type) { + case ACL_TYPE_ACCESS: +- error = posix_acl_equiv_mode(acl, &inode->i_mode); ++ old_acl = acl; ++ error = posix_acl_update_mode(inode, &inode->i_mode, ++ &acl); + if (error < 0) + goto failed; ++ if (!acl) ++ posix_acl_release(old_acl); + inode->i_ctime = CURRENT_TIME; +- if (error == 0) { +- posix_acl_release(acl); +- acl = NULL; +- } + break; + case ACL_TYPE_DEFAULT: + if (!S_ISDIR(inode->i_mode)) { +diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c +index f69ac0af5496..a61b0c2b57ab 100644 +--- a/fs/gfs2/acl.c ++++ b/fs/gfs2/acl.c +@@ -268,15 +268,13 @@ static int gfs2_xattr_system_set(struct dentry *dentry, const char *name, + + if (type == ACL_TYPE_ACCESS) { + umode_t mode = inode->i_mode; +- error = posix_acl_equiv_mode(acl, &mode); ++ struct posix_acl *old_acl = acl; + +- if (error <= 0) { +- posix_acl_release(acl); +- acl = NULL; +- +- if (error < 0) +- return error; +- } ++ error = posix_acl_update_mode(inode, &mode, &acl); ++ if (error < 0) ++ goto out_release; ++ if (!acl) ++ posix_acl_release(old_acl); + + error = gfs2_set_mode(inode, mode); + if (error) +diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c +index b609cc14c72e..9f7cc491ffb1 100644 +--- a/fs/hfsplus/posix_acl.c ++++ b/fs/hfsplus/posix_acl.c +@@ -72,8 +72,8 @@ static int hfsplus_set_posix_acl(struct inode *inode, + case ACL_TYPE_ACCESS: + xattr_name = POSIX_ACL_XATTR_ACCESS; + if (acl) { +- err = posix_acl_equiv_mode(acl, &inode->i_mode); +- if (err < 0) ++ err = posix_acl_update_mode(inode, &inode->i_mode, &acl); ++ if (err) + return err; + } + err = 0; +diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c +index bd8471fb9a6a..889be3fef4bc 100644 +--- a/fs/hfsplus/xattr.c ++++ b/fs/hfsplus/xattr.c +@@ -69,8 +69,9 @@ static int can_set_system_xattr(struct inode *inode, const char *name, + if (IS_ERR(acl)) + return PTR_ERR(acl); + if (acl) { +- err = posix_acl_equiv_mode(acl, &inode->i_mode); +- posix_acl_release(acl); ++ struct posix_acl *old_acl = acl; ++ err = posix_acl_update_mode(inode, &inode->i_mode, &acl); ++ posix_acl_release(old_acl); + if (err < 0) + return err; + mark_inode_dirty(inode); +diff --git a/fs/ioprio.c b/fs/ioprio.c +index 31666c92b46a..563435684c3c 100644 +--- a/fs/ioprio.c ++++ b/fs/ioprio.c +@@ -149,8 +149,10 @@ static int get_task_ioprio(struct task_struct *p) + if (ret) + goto out; + ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM); ++ task_lock(p); + if (p->io_context) + ret = p->io_context->ioprio; ++ task_unlock(p); + out: + return ret; + } +diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c +index 223283c30111..9335b8d3cf52 100644 +--- a/fs/jffs2/acl.c ++++ b/fs/jffs2/acl.c +@@ -243,9 +243,10 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl) + case ACL_TYPE_ACCESS: + xprefix = JFFS2_XPREFIX_ACL_ACCESS; + if (acl) { +- umode_t mode = inode->i_mode; +- rc = posix_acl_equiv_mode(acl, &mode); +- if (rc < 0) ++ umode_t mode; ++ ++ rc = posix_acl_update_mode(inode, &mode, &acl); ++ if (rc) + return rc; + if (inode->i_mode != mode) { + struct iattr attr; +@@ -257,8 +258,6 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl) + if (rc < 0) + return rc; + } +- if (rc == 0) +- acl = NULL; + } + break; + case ACL_TYPE_DEFAULT: +diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c +index d3472f4cd530..8c9b6a06dcbb 100644 +--- a/fs/jfs/xattr.c ++++ b/fs/jfs/xattr.c +@@ -693,8 +693,9 @@ static int can_set_system_xattr(struct inode *inode, const char *name, + return rc; + } + if (acl) { +- rc = posix_acl_equiv_mode(acl, &inode->i_mode); +- posix_acl_release(acl); ++ struct posix_acl *old_acl = acl; ++ rc = posix_acl_update_mode(inode, &inode->i_mode, &acl); ++ posix_acl_release(old_acl); + if (rc < 0) { + printk(KERN_ERR + "posix_acl_equiv_mode returned %d\n", +diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c +index b9670301d7d3..24e6448b7c80 100644 +--- a/fs/nfs/dir.c ++++ b/fs/nfs/dir.c +@@ -1487,6 +1487,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, + switch (err) { + case -ENOENT: + d_add(dentry, NULL); ++ nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); + break; + case -EISDIR: + case -ENOTDIR: +diff --git a/fs/nfs/file.c b/fs/nfs/file.c +index 1e6bfdbc1aff..0a0b5063e50e 100644 +--- a/fs/nfs/file.c ++++ b/fs/nfs/file.c +@@ -425,7 +425,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping, + */ + if (!PageUptodate(page)) { + unsigned pglen = nfs_page_length(page); +- unsigned end = offset + len; ++ unsigned end = offset + copied; + + if (pglen == 0) { + zero_user_segments(page, 0, offset, +diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c +index efac602edb37..91de91430b31 100644 +--- a/fs/nfs/nfs4filelayoutdev.c ++++ b/fs/nfs/nfs4filelayoutdev.c +@@ -827,7 +827,8 @@ nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) + nfs4_wait_ds_connect(ds); + } + out_test_devid: +- if (filelayout_test_devid_unavailable(devid)) ++ if (ret->ds_clp == NULL || ++ filelayout_test_devid_unavailable(devid)) + ret = NULL; + out: + return ret; +diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c +index b4f788e0ca31..23095b017752 100644 +--- a/fs/ocfs2/acl.c ++++ b/fs/ocfs2/acl.c +@@ -270,20 +270,14 @@ static int ocfs2_set_acl(handle_t *handle, + case ACL_TYPE_ACCESS: + name_index = OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS; + if (acl) { +- umode_t mode = inode->i_mode; +- ret = posix_acl_equiv_mode(acl, &mode); +- if (ret < 0) ++ umode_t mode; ++ ret = posix_acl_update_mode(inode, &mode, &acl); ++ if (ret) ++ return ret; ++ ret = ocfs2_acl_set_mode(inode, di_bh, ++ handle, mode); ++ if (ret) + return ret; +- else { +- if (ret == 0) +- acl = NULL; +- +- ret = ocfs2_acl_set_mode(inode, di_bh, +- handle, mode); +- if (ret) +- return ret; +- +- } + } + break; + case ACL_TYPE_DEFAULT: +diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c +index 416a2ab68ac1..9c93df0f241d 100644 +--- a/fs/ocfs2/dlmglue.c ++++ b/fs/ocfs2/dlmglue.c +@@ -3302,6 +3302,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb, + mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name, + lockres->l_level, new_level); + ++ /* ++ * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always ++ * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that ++ * we can recover correctly from node failure. Otherwise, we may get ++ * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set. ++ */ ++ if (!ocfs2_is_o2cb_active() && ++ lockres->l_ops->flags & LOCK_TYPE_USES_LVB) ++ lvb = 1; ++ + if (lvb) + dlm_flags |= DLM_LKF_VALBLK; + +diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c +index 54ba0afacf00..7201b56e8f2c 100644 +--- a/fs/ocfs2/file.c ++++ b/fs/ocfs2/file.c +@@ -1100,6 +1100,7 @@ out: + int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) + { + int status = 0, size_change; ++ int inode_locked = 0; + struct inode *inode = dentry->d_inode; + struct super_block *sb = inode->i_sb; + struct ocfs2_super *osb = OCFS2_SB(sb); +@@ -1145,6 +1146,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) + mlog_errno(status); + goto bail_unlock_rw; + } ++ inode_locked = 1; + + if (size_change && attr->ia_size != i_size_read(inode)) { + status = inode_newsize_ok(inode, attr->ia_size); +@@ -1225,7 +1227,10 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) + bail_commit: + ocfs2_commit_trans(osb, handle); + bail_unlock: +- ocfs2_inode_unlock(inode, 1); ++ if (status) { ++ ocfs2_inode_unlock(inode, 1); ++ inode_locked = 0; ++ } + bail_unlock_rw: + if (size_change) + ocfs2_rw_unlock(inode, 1); +@@ -1241,6 +1246,8 @@ bail: + if (status < 0) + mlog_errno(status); + } ++ if (inode_locked) ++ ocfs2_inode_unlock(inode, 1); + + return status; + } +diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c +index 39abf89697ed..88610b3cbc04 100644 +--- a/fs/ocfs2/stackglue.c ++++ b/fs/ocfs2/stackglue.c +@@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl"; + */ + static struct ocfs2_stack_plugin *active_stack; + ++inline int ocfs2_is_o2cb_active(void) ++{ ++ return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB); ++} ++EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active); ++ + static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name) + { + struct ocfs2_stack_plugin *p; +diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h +index 1ec56fdb8d0d..fa49d8a1dc7b 100644 +--- a/fs/ocfs2/stackglue.h ++++ b/fs/ocfs2/stackglue.h +@@ -289,4 +289,7 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p + int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin); + void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin); + ++/* In ocfs2_downconvert_lock(), we need to know which stack we are using */ ++int ocfs2_is_o2cb_active(void); ++ + #endif /* STACKGLUE_H */ +diff --git a/fs/posix_acl.c b/fs/posix_acl.c +index 3542f1f814e2..1da000aabb08 100644 +--- a/fs/posix_acl.c ++++ b/fs/posix_acl.c +@@ -407,6 +407,37 @@ posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p) + } + EXPORT_SYMBOL(posix_acl_create); + ++/** ++ * posix_acl_update_mode - update mode in set_acl ++ * ++ * Update the file mode when setting an ACL: compute the new file permission ++ * bits based on the ACL. In addition, if the ACL is equivalent to the new ++ * file mode, set *acl to NULL to indicate that no ACL should be set. ++ * ++ * As with chmod, clear the setgit bit if the caller is not in the owning group ++ * or capable of CAP_FSETID (see inode_change_ok). ++ * ++ * Called from set_acl inode operations. ++ */ ++int posix_acl_update_mode(struct inode *inode, umode_t *mode_p, ++ struct posix_acl **acl) ++{ ++ umode_t mode = inode->i_mode; ++ int error; ++ ++ error = posix_acl_equiv_mode(*acl, &mode); ++ if (error < 0) ++ return error; ++ if (error == 0) ++ *acl = NULL; ++ if (!in_group_p(inode->i_gid) && ++ !capable_wrt_inode_uidgid(inode, CAP_FSETID)) ++ mode &= ~S_ISGID; ++ *mode_p = mode; ++ return 0; ++} ++EXPORT_SYMBOL(posix_acl_update_mode); ++ + int + posix_acl_chmod(struct posix_acl **acl, gfp_t gfp, umode_t mode) + { +diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c +index 71290463a1d3..c615a4592572 100644 +--- a/fs/proc/proc_sysctl.c ++++ b/fs/proc/proc_sysctl.c +@@ -666,7 +666,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx) + ctl_dir = container_of(head, struct ctl_dir, header); + + if (!dir_emit_dots(file, ctx)) +- return 0; ++ goto out; + + pos = 2; + +@@ -676,6 +676,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx) + break; + } + } ++out: + sysctl_head_finish(head); + return 0; + } +diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c +index 06c04f73da65..a86ad7ec7957 100644 +--- a/fs/reiserfs/xattr_acl.c ++++ b/fs/reiserfs/xattr_acl.c +@@ -288,13 +288,9 @@ reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode, + case ACL_TYPE_ACCESS: + name = POSIX_ACL_XATTR_ACCESS; + if (acl) { +- error = posix_acl_equiv_mode(acl, &inode->i_mode); +- if (error < 0) ++ error = posix_acl_update_mode(inode, &inode->i_mode, &acl); ++ if (error) + return error; +- else { +- if (error == 0) +- acl = NULL; +- } + } + break; + case ACL_TYPE_DEFAULT: +diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c +index 349f31a30f40..fdf2ca1dd771 100644 +--- a/fs/ubifs/tnc.c ++++ b/fs/ubifs/tnc.c +@@ -34,6 +34,11 @@ + #include <linux/slab.h> + #include "ubifs.h" + ++static int try_read_node(const struct ubifs_info *c, void *buf, int type, ++ int len, int lnum, int offs); ++static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key, ++ struct ubifs_zbranch *zbr, void *node); ++ + /* + * Returned codes of 'matches_name()' and 'fallible_matches_name()' functions. + * @NAME_LESS: name corresponding to the first argument is less than second +@@ -419,7 +424,19 @@ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr, + return 0; + } + +- err = ubifs_tnc_read_node(c, zbr, node); ++ if (c->replaying) { ++ err = fallible_read_node(c, &zbr->key, zbr, node); ++ /* ++ * When the node was not found, return -ENOENT, 0 otherwise. ++ * Negative return codes stay as-is. ++ */ ++ if (err == 0) ++ err = -ENOENT; ++ else if (err == 1) ++ err = 0; ++ } else { ++ err = ubifs_tnc_read_node(c, zbr, node); ++ } + if (err) + return err; + +@@ -2783,7 +2800,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c, + if (nm->name) { + if (err) { + /* Handle collisions */ +- err = resolve_collision(c, key, &znode, &n, nm); ++ if (c->replaying) ++ err = fallible_resolve_collision(c, key, &znode, &n, ++ nm, 0); ++ else ++ err = resolve_collision(c, key, &znode, &n, nm); + dbg_tnc("rc returned %d, znode %p, n %d", + err, znode, n); + if (unlikely(err < 0)) +diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c +index 0e2f37efedd0..9c7b5ce06f4f 100644 +--- a/fs/xfs/xfs_acl.c ++++ b/fs/xfs/xfs_acl.c +@@ -402,16 +402,15 @@ xfs_xattr_acl_set(struct dentry *dentry, const char *name, + goto out_release; + + if (type == ACL_TYPE_ACCESS) { +- umode_t mode = inode->i_mode; +- error = posix_acl_equiv_mode(acl, &mode); ++ umode_t mode; ++ struct posix_acl *old_acl = acl; + +- if (error <= 0) { +- posix_acl_release(acl); +- acl = NULL; ++ error = posix_acl_update_mode(inode, &mode, &acl); + +- if (error < 0) +- return error; +- } ++ if (error) ++ goto out_release; ++ if (!acl) ++ posix_acl_release(old_acl); + + error = xfs_set_mode(inode, mode); + if (error) +diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c +index 5b166a07d55e..48dcb167cce5 100644 +--- a/fs/xfs/xfs_log_recover.c ++++ b/fs/xfs/xfs_log_recover.c +@@ -3923,6 +3923,7 @@ xlog_recover_clear_agi_bucket( + agi->agi_unlinked[bucket] = cpu_to_be32(NULLAGINO); + offset = offsetof(xfs_agi_t, agi_unlinked) + + (sizeof(xfs_agino_t) * bucket); ++ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF); + xfs_trans_log_buf(tp, agibp, offset, + (offset + sizeof(xfs_agino_t) - 1)); + +diff --git a/include/linux/capability.h b/include/linux/capability.h +index aa93e5ef594c..c2eb39ff1a53 100644 +--- a/include/linux/capability.h ++++ b/include/linux/capability.h +@@ -40,8 +40,6 @@ struct inode; + struct dentry; + struct user_namespace; + +-struct user_namespace *current_user_ns(void); +- + extern const kernel_cap_t __cap_empty_set; + extern const kernel_cap_t __cap_init_eff_set; + +diff --git a/include/linux/cpu.h b/include/linux/cpu.h +index 801ff9e73679..d1fcdcbc01e4 100644 +--- a/include/linux/cpu.h ++++ b/include/linux/cpu.h +@@ -119,22 +119,16 @@ enum { + { .notifier_call = fn, .priority = pri }; \ + register_cpu_notifier(&fn##_nb); \ + } +-#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ +-#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) +-#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ +-#ifdef CONFIG_HOTPLUG_CPU + extern int register_cpu_notifier(struct notifier_block *nb); + extern void unregister_cpu_notifier(struct notifier_block *nb); +-#else + +-#ifndef MODULE +-extern int register_cpu_notifier(struct notifier_block *nb); +-#else ++#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ ++#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) ++ + static inline int register_cpu_notifier(struct notifier_block *nb) + { + return 0; + } +-#endif + + static inline void unregister_cpu_notifier(struct notifier_block *nb) + { +diff --git a/include/linux/cred.h b/include/linux/cred.h +index 6c58dd7cb9ac..cd3fb73dc421 100644 +--- a/include/linux/cred.h ++++ b/include/linux/cred.h +@@ -345,7 +345,10 @@ extern struct user_namespace init_user_ns; + #ifdef CONFIG_USER_NS + #define current_user_ns() (current_cred_xxx(user_ns)) + #else +-#define current_user_ns() (&init_user_ns) ++static inline struct user_namespace *current_user_ns(void) ++{ ++ return &init_user_ns; ++} + #endif + + +diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h +index 113788389b3d..3f66ce8f0819 100644 +--- a/include/linux/jump_label_ratelimit.h ++++ b/include/linux/jump_label_ratelimit.h +@@ -14,6 +14,7 @@ struct static_key_deferred { + + #ifdef HAVE_JUMP_LABEL + extern void static_key_slow_dec_deferred(struct static_key_deferred *key); ++extern void static_key_deferred_flush(struct static_key_deferred *key); + extern void + jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl); + +@@ -25,6 +26,9 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key) + { + static_key_slow_dec(&key->key); + } ++static inline void static_key_deferred_flush(struct static_key_deferred *key) ++{ ++} + static inline void + jump_label_rate_limit(struct static_key_deferred *key, + unsigned long rl) +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 41239f739d51..0a793dcd975f 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -1829,14 +1829,19 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen) + return NAPI_GRO_CB(skb)->frag0_len < hlen; + } + ++static inline void skb_gro_frag0_invalidate(struct sk_buff *skb) ++{ ++ NAPI_GRO_CB(skb)->frag0 = NULL; ++ NAPI_GRO_CB(skb)->frag0_len = 0; ++} ++ + static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen, + unsigned int offset) + { + if (!pskb_may_pull(skb, hlen)) + return NULL; + +- NAPI_GRO_CB(skb)->frag0 = NULL; +- NAPI_GRO_CB(skb)->frag0_len = 0; ++ skb_gro_frag0_invalidate(skb); + return skb->data + offset; + } + +diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h +index 7931efe71175..43cb8d59d0a7 100644 +--- a/include/linux/posix_acl.h ++++ b/include/linux/posix_acl.h +@@ -89,6 +89,7 @@ extern int posix_acl_permission(struct inode *, const struct posix_acl *, int); + extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t); + extern int posix_acl_equiv_mode(const struct posix_acl *, umode_t *); + extern int posix_acl_create(struct posix_acl **, gfp_t, umode_t *); ++extern int posix_acl_update_mode(struct inode *, umode_t *, struct posix_acl **); + extern int posix_acl_chmod(struct posix_acl **, gfp_t, umode_t); + + extern struct posix_acl *get_posix_acl(struct inode *, int); +diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h +index e52958d7c2d1..3018528bd1bf 100644 +--- a/include/uapi/linux/can.h ++++ b/include/uapi/linux/can.h +@@ -158,5 +158,6 @@ struct can_filter { + }; + + #define CAN_INV_FILTER 0x20000000U /* to be set in can_filter.can_id */ ++#define CAN_RAW_FILTER_MAX 512 /* maximum number of can_filter set via setsockopt() */ + + #endif /* CAN_H */ +diff --git a/kernel/cpu.c b/kernel/cpu.c +index 92599d897125..c1f258a0a10e 100644 +--- a/kernel/cpu.c ++++ b/kernel/cpu.c +@@ -182,8 +182,6 @@ static int cpu_notify(unsigned long val, void *v) + return __cpu_notify(val, v, -1, NULL); + } + +-#ifdef CONFIG_HOTPLUG_CPU +- + static void cpu_notify_nofail(unsigned long val, void *v) + { + BUG_ON(cpu_notify(val, v)); +@@ -198,6 +196,7 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb) + } + EXPORT_SYMBOL(unregister_cpu_notifier); + ++#ifdef CONFIG_HOTPLUG_CPU + /** + * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU + * @cpu: a CPU id +diff --git a/kernel/jump_label.c b/kernel/jump_label.c +index 297a9247a3b3..9ce813e99a56 100644 +--- a/kernel/jump_label.c ++++ b/kernel/jump_label.c +@@ -113,6 +113,12 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key) + } + EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred); + ++void static_key_deferred_flush(struct static_key_deferred *key) ++{ ++ flush_delayed_work(&key->work); ++} ++EXPORT_SYMBOL_GPL(static_key_deferred_flush); ++ + void jump_label_rate_limit(struct static_key_deferred *key, + unsigned long rl) + { +diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c +index 51a83343df68..132c6a00e301 100644 +--- a/kernel/rtmutex.c ++++ b/kernel/rtmutex.c +@@ -64,8 +64,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) + + static void fixup_rt_mutex_waiters(struct rt_mutex *lock) + { +- if (!rt_mutex_has_waiters(lock)) +- clear_rt_mutex_waiters(lock); ++ unsigned long owner, *p = (unsigned long *) &lock->owner; ++ ++ if (rt_mutex_has_waiters(lock)) ++ return; ++ ++ /* ++ * The rbtree has no waiters enqueued, now make sure that the ++ * lock->owner still has the waiters bit set, otherwise the ++ * following can happen: ++ * ++ * CPU 0 CPU 1 CPU2 ++ * l->owner=T1 ++ * rt_mutex_lock(l) ++ * lock(l->lock) ++ * l->owner = T1 | HAS_WAITERS; ++ * enqueue(T2) ++ * boost() ++ * unlock(l->lock) ++ * block() ++ * ++ * rt_mutex_lock(l) ++ * lock(l->lock) ++ * l->owner = T1 | HAS_WAITERS; ++ * enqueue(T3) ++ * boost() ++ * unlock(l->lock) ++ * block() ++ * signal(->T2) signal(->T3) ++ * lock(l->lock) ++ * dequeue(T2) ++ * deboost() ++ * unlock(l->lock) ++ * lock(l->lock) ++ * dequeue(T3) ++ * ==> wait list is empty ++ * deboost() ++ * unlock(l->lock) ++ * lock(l->lock) ++ * fixup_rt_mutex_waiters() ++ * if (wait_list_empty(l) { ++ * l->owner = owner ++ * owner = l->owner & ~HAS_WAITERS; ++ * ==> l->owner = T1 ++ * } ++ * lock(l->lock) ++ * rt_mutex_unlock(l) fixup_rt_mutex_waiters() ++ * if (wait_list_empty(l) { ++ * owner = l->owner & ~HAS_WAITERS; ++ * cmpxchg(l->owner, T1, NULL) ++ * ===> Success (l->owner = NULL) ++ * ++ * l->owner = owner ++ * ==> l->owner = T1 ++ * } ++ * ++ * With the check for the waiter bit in place T3 on CPU2 will not ++ * overwrite. All tasks fiddling with the waiters bit are ++ * serialized by l->lock, so nothing else can modify the waiters ++ * bit. If the bit is set then nothing can change l->owner either ++ * so the simple RMW is safe. The cmpxchg() will simply fail if it ++ * happens in the middle of the RMW because the waiters bit is ++ * still set. ++ */ ++ owner = READ_ONCE(*p); ++ if (owner & RT_MUTEX_HAS_WAITERS) ++ WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); + } + + /* +diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h +index 53a66c85261b..1823c094fe96 100644 +--- a/kernel/rtmutex_common.h ++++ b/kernel/rtmutex_common.h +@@ -96,8 +96,9 @@ task_top_pi_waiter(struct task_struct *p) + + static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) + { +- return (struct task_struct *) +- ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL); ++ unsigned long owner = (unsigned long) READ_ONCE(lock->owner); ++ ++ return (struct task_struct *) (owner & ~RT_MUTEX_OWNER_MASKALL); + } + + /* +diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c +index 8a95408b1345..f27eb5db3260 100644 +--- a/kernel/time/tick-broadcast.c ++++ b/kernel/time/tick-broadcast.c +@@ -778,6 +778,9 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc) + { + int cpu = smp_processor_id(); + ++ if (!bc) ++ return; ++ + /* Set it up only once ! */ + if (bc->event_handler != tick_handle_oneshot_broadcast) { + int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC; +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 2aaf11bdfb17..24d50334d51c 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -1114,23 +1114,32 @@ free: + } + + /* +- * When releasing a hugetlb pool reservation, any surplus pages that were +- * allocated to satisfy the reservation must be explicitly freed if they were +- * never used. +- * Called with hugetlb_lock held. ++ * This routine has two main purposes: ++ * 1) Decrement the reservation count (resv_huge_pages) by the value passed ++ * in unused_resv_pages. This corresponds to the prior adjustments made ++ * to the associated reservation map. ++ * 2) Free any unused surplus pages that may have been allocated to satisfy ++ * the reservation. As many as unused_resv_pages may be freed. ++ * ++ * Called with hugetlb_lock held. However, the lock could be dropped (and ++ * reacquired) during calls to cond_resched_lock. Whenever dropping the lock, ++ * we must make sure nobody else can claim pages we are in the process of ++ * freeing. Do this by ensuring resv_huge_page always is greater than the ++ * number of huge pages we plan to free when dropping the lock. + */ + static void return_unused_surplus_pages(struct hstate *h, + unsigned long unused_resv_pages) + { + unsigned long nr_pages; + +- /* Uncommit the reservation */ +- h->resv_huge_pages -= unused_resv_pages; +- + /* Cannot return gigantic pages currently */ + if (h->order >= MAX_ORDER) +- return; ++ goto out; + ++ /* ++ * Part (or even all) of the reservation could have been backed ++ * by pre-allocated pages. Only free surplus pages. ++ */ + nr_pages = min(unused_resv_pages, h->surplus_huge_pages); + + /* +@@ -1140,12 +1149,22 @@ static void return_unused_surplus_pages(struct hstate *h, + * when the nodes with surplus pages have no free pages. + * free_pool_huge_page() will balance the the freed pages across the + * on-line nodes with memory and will handle the hstate accounting. ++ * ++ * Note that we decrement resv_huge_pages as we free the pages. If ++ * we drop the lock, resv_huge_pages will still be sufficiently large ++ * to cover subsequent pages we may free. + */ + while (nr_pages--) { ++ h->resv_huge_pages--; ++ unused_resv_pages--; + if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1)) +- break; ++ goto out; + cond_resched_lock(&hugetlb_lock); + } ++ ++out: ++ /* Fully uncommit the reservation */ ++ h->resv_huge_pages -= unused_resv_pages; + } + + /* +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 7abab3b7d140..8927c8d0ff4e 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -5279,15 +5279,18 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) + sizeof(arch_zone_lowest_possible_pfn)); + memset(arch_zone_highest_possible_pfn, 0, + sizeof(arch_zone_highest_possible_pfn)); +- arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions(); +- arch_zone_highest_possible_pfn[0] = max_zone_pfn[0]; +- for (i = 1; i < MAX_NR_ZONES; i++) { ++ ++ start_pfn = find_min_pfn_with_active_regions(); ++ ++ for (i = 0; i < MAX_NR_ZONES; i++) { + if (i == ZONE_MOVABLE) + continue; +- arch_zone_lowest_possible_pfn[i] = +- arch_zone_highest_possible_pfn[i-1]; +- arch_zone_highest_possible_pfn[i] = +- max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]); ++ ++ end_pfn = max(max_zone_pfn[i], start_pfn); ++ arch_zone_lowest_possible_pfn[i] = start_pfn; ++ arch_zone_highest_possible_pfn[i] = end_pfn; ++ ++ start_pfn = end_pfn; + } + arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0; + arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0; +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 6dc33d9dc2cf..dc23ad3ecf4c 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -231,6 +231,7 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, + int nid = shrinkctl->nid; + long batch_size = shrinker->batch ? shrinker->batch + : SHRINK_BATCH; ++ long scanned = 0, next_deferred; + + freeable = shrinker->count_objects(shrinker, shrinkctl); + if (freeable == 0) +@@ -253,7 +254,9 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, + "shrink_slab: %pF negative objects to delete nr=%ld\n", + shrinker->scan_objects, total_scan); + total_scan = freeable; +- } ++ next_deferred = nr; ++ } else ++ next_deferred = total_scan; + + /* + * We need to avoid excessive windup on filesystem shrinkers +@@ -310,17 +313,22 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, + + count_vm_events(SLABS_SCANNED, nr_to_scan); + total_scan -= nr_to_scan; ++ scanned += nr_to_scan; + + cond_resched(); + } + ++ if (next_deferred >= scanned) ++ next_deferred -= scanned; ++ else ++ next_deferred = 0; + /* + * move the unused scan count back into the shrinker in a + * manner that handles concurrent updates. If we exhausted the + * scan, there is no need to do an update. + */ +- if (total_scan > 0) +- new_nr = atomic_long_add_return(total_scan, ++ if (next_deferred > 0) ++ new_nr = atomic_long_add_return(next_deferred, + &shrinker->nr_deferred[nid]); + else + new_nr = atomic_long_read(&shrinker->nr_deferred[nid]); +diff --git a/net/can/raw.c b/net/can/raw.c +index 641e1c895123..e10699cc72bd 100644 +--- a/net/can/raw.c ++++ b/net/can/raw.c +@@ -470,6 +470,9 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, + if (optlen % sizeof(struct can_filter) != 0) + return -EINVAL; + ++ if (optlen > CAN_RAW_FILTER_MAX * sizeof(struct can_filter)) ++ return -EINVAL; ++ + count = optlen / sizeof(struct can_filter); + + if (count > 1) { +diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c +index 469f3138d0f6..ecdf164c80fe 100644 +--- a/net/ceph/messenger.c ++++ b/net/ceph/messenger.c +@@ -1972,6 +1972,19 @@ static int process_connect(struct ceph_connection *con) + + dout("process_connect on %p tag %d\n", con, (int)con->in_tag); + ++ if (con->auth_reply_buf) { ++ /* ++ * Any connection that defines ->get_authorizer() ++ * should also define ->verify_authorizer_reply(). ++ * See get_connect_authorizer(). ++ */ ++ ret = con->ops->verify_authorizer_reply(con, 0); ++ if (ret < 0) { ++ con->error_msg = "bad authorize reply"; ++ return ret; ++ } ++ } ++ + switch (con->in_reply.tag) { + case CEPH_MSGR_TAG_FEATURES: + pr_err("%s%lld %s feature set mismatch," +diff --git a/net/core/dev.c b/net/core/dev.c +index fa6d9a47f71f..6b0ddf661f92 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3969,7 +3969,9 @@ static void skb_gro_reset_offset(struct sk_buff *skb) + pinfo->nr_frags && + !PageHighMem(skb_frag_page(frag0))) { + NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); +- NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0); ++ NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, ++ skb_frag_size(frag0), ++ skb->end - skb->tail); + } + } + +diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c +index f27d126239b1..5b40f7319504 100644 +--- a/net/core/drop_monitor.c ++++ b/net/core/drop_monitor.c +@@ -80,6 +80,7 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) + struct nlattr *nla; + struct sk_buff *skb; + unsigned long flags; ++ void *msg_header; + + al = sizeof(struct net_dm_alert_msg); + al += dm_hit_limit * sizeof(struct net_dm_drop_point); +@@ -87,21 +88,41 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) + + skb = genlmsg_new(al, GFP_KERNEL); + +- if (skb) { +- genlmsg_put(skb, 0, 0, &net_drop_monitor_family, +- 0, NET_DM_CMD_ALERT); +- nla = nla_reserve(skb, NLA_UNSPEC, +- sizeof(struct net_dm_alert_msg)); +- msg = nla_data(nla); +- memset(msg, 0, al); +- } else { +- mod_timer(&data->send_timer, jiffies + HZ / 10); ++ if (!skb) ++ goto err; ++ ++ msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family, ++ 0, NET_DM_CMD_ALERT); ++ if (!msg_header) { ++ nlmsg_free(skb); ++ skb = NULL; ++ goto err; ++ } ++ nla = nla_reserve(skb, NLA_UNSPEC, ++ sizeof(struct net_dm_alert_msg)); ++ if (!nla) { ++ nlmsg_free(skb); ++ skb = NULL; ++ goto err; + } ++ msg = nla_data(nla); ++ memset(msg, 0, al); ++ goto out; + ++err: ++ mod_timer(&data->send_timer, jiffies + HZ / 10); ++out: + spin_lock_irqsave(&data->lock, flags); + swap(data->skb, skb); + spin_unlock_irqrestore(&data->lock, flags); + ++ if (skb) { ++ struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data; ++ struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh); ++ ++ genlmsg_end(skb, genlmsg_data(gnlh)); ++ } ++ + return skb; + } + +diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c +index 931bc8d6d8ee..38ab073783e2 100644 +--- a/net/ipv4/igmp.c ++++ b/net/ipv4/igmp.c +@@ -221,9 +221,14 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay) + static void igmp_gq_start_timer(struct in_device *in_dev) + { + int tv = net_random() % in_dev->mr_maxdelay; ++ unsigned long exp = jiffies + tv + 2; ++ ++ if (in_dev->mr_gq_running && ++ time_after_eq(exp, (in_dev->mr_gq_timer).expires)) ++ return; + + in_dev->mr_gq_running = 1; +- if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2)) ++ if (!mod_timer(&in_dev->mr_gq_timer, exp)) + in_dev_hold(in_dev); + } + +diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c +index d82de7228100..1a6ef4c8cd8b 100644 +--- a/net/ipv6/ip6_offload.c ++++ b/net/ipv6/ip6_offload.c +@@ -177,6 +177,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, + ops = rcu_dereference(inet6_offloads[proto]); + if (!ops || !ops->callbacks.gro_receive) { + __pskb_pull(skb, skb_gro_offset(skb)); ++ skb_gro_frag0_invalidate(skb); + proto = ipv6_gso_pull_exthdrs(skb, proto); + skb_gro_pull(skb, -skb_transport_offset(skb)); + skb_reset_transport_header(skb); +diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c +index c4e69763c602..c2afb29dc1d7 100644 +--- a/net/ipv6/raw.c ++++ b/net/ipv6/raw.c +@@ -585,8 +585,11 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, + } + + offset += skb_transport_offset(skb); +- if (skb_copy_bits(skb, offset, &csum, 2)) +- BUG(); ++ err = skb_copy_bits(skb, offset, &csum, 2); ++ if (err < 0) { ++ ip6_flush_pending_frames(sk); ++ goto out; ++ } + + /* in case cksum was not initialized */ + if (unlikely(csum)) +diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c +index 2ea40d1877a6..042e5d839623 100644 +--- a/net/sched/cls_api.c ++++ b/net/sched/cls_api.c +@@ -136,12 +136,14 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n) + unsigned long cl; + unsigned long fh; + int err; +- int tp_created = 0; ++ int tp_created; + + if ((n->nlmsg_type != RTM_GETTFILTER) && !netlink_capable(skb, CAP_NET_ADMIN)) + return -EPERM; + + replay: ++ tp_created = 0; ++ + err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL); + if (err < 0) + return err; +diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c +index 9d7e6097ef5b..6d0531a2a5c9 100644 +--- a/net/sunrpc/auth_gss/svcauth_gss.c ++++ b/net/sunrpc/auth_gss/svcauth_gss.c +@@ -1485,7 +1485,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) + case RPC_GSS_PROC_DESTROY: + if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq)) + goto auth_err; +- rsci->h.expiry_time = get_seconds(); ++ rsci->h.expiry_time = seconds_since_boot(); + set_bit(CACHE_NEGATIVE, &rsci->h.flags); + if (resv->iov_len + 4 > PAGE_SIZE) + goto drop; +diff --git a/scripts/kconfig/nconf.gui.c b/scripts/kconfig/nconf.gui.c +index 8275f0e55106..4b2f44c20caf 100644 +--- a/scripts/kconfig/nconf.gui.c ++++ b/scripts/kconfig/nconf.gui.c +@@ -364,12 +364,14 @@ int dialog_inputbox(WINDOW *main_window, + WINDOW *prompt_win; + WINDOW *form_win; + PANEL *panel; +- int i, x, y; ++ int i, x, y, lines, columns, win_lines, win_cols; + int res = -1; + int cursor_position = strlen(init); + int cursor_form_win; + char *result = *resultp; + ++ getmaxyx(stdscr, lines, columns); ++ + if (strlen(init)+1 > *result_len) { + *result_len = strlen(init)+1; + *resultp = result = realloc(result, *result_len); +@@ -386,14 +388,19 @@ int dialog_inputbox(WINDOW *main_window, + if (title) + prompt_width = max(prompt_width, strlen(title)); + ++ win_lines = min(prompt_lines+6, lines-2); ++ win_cols = min(prompt_width+7, columns-2); ++ prompt_lines = max(win_lines-6, 0); ++ prompt_width = max(win_cols-7, 0); ++ + /* place dialog in middle of screen */ +- y = (getmaxy(stdscr)-(prompt_lines+4))/2; +- x = (getmaxx(stdscr)-(prompt_width+4))/2; ++ y = (lines-win_lines)/2; ++ x = (columns-win_cols)/2; + + strncpy(result, init, *result_len); + + /* create the windows */ +- win = newwin(prompt_lines+6, prompt_width+7, y, x); ++ win = newwin(win_lines, win_cols, y, x); + prompt_win = derwin(win, prompt_lines+1, prompt_width, 2, 2); + form_win = derwin(win, 1, prompt_width, prompt_lines+3, 2); + keypad(form_win, TRUE); +diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c +index b30489856741..a798c75c7726 100644 +--- a/security/apparmor/apparmorfs.c ++++ b/security/apparmor/apparmorfs.c +@@ -380,6 +380,8 @@ void __aa_fs_profile_migrate_dents(struct aa_profile *old, + + for (i = 0; i < AAFS_PROF_SIZEOF; i++) { + new->dents[i] = old->dents[i]; ++ if (new->dents[i]) ++ new->dents[i]->d_inode->i_mtime = CURRENT_TIME; + old->dents[i] = NULL; + } + } +diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c +index 031d2d9dd695..47d0f9ecd3bc 100644 +--- a/security/apparmor/audit.c ++++ b/security/apparmor/audit.c +@@ -212,7 +212,8 @@ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp, + + if (sa->aad->type == AUDIT_APPARMOR_KILL) + (void)send_sig_info(SIGKILL, NULL, +- sa->aad->tsk ? sa->aad->tsk : current); ++ sa->type == LSM_AUDIT_DATA_TASK && sa->aad->tsk ? ++ sa->aad->tsk : current); + + if (sa->aad->type == AUDIT_APPARMOR_ALLOWED) + return complain_error(sa->aad->error); +diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c +index 0c23888b9816..1c7763766135 100644 +--- a/security/apparmor/domain.c ++++ b/security/apparmor/domain.c +@@ -348,7 +348,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) + file_inode(bprm->file)->i_uid, + file_inode(bprm->file)->i_mode + }; +- const char *name = NULL, *target = NULL, *info = NULL; ++ const char *name = NULL, *info = NULL; + int error = cap_bprm_set_creds(bprm); + if (error) + return error; +@@ -403,6 +403,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) + if (cxt->onexec) { + struct file_perms cp; + info = "change_profile onexec"; ++ new_profile = aa_get_newest_profile(cxt->onexec); + if (!(perms.allow & AA_MAY_ONEXEC)) + goto audit; + +@@ -417,7 +418,6 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) + + if (!(cp.allow & AA_MAY_ONEXEC)) + goto audit; +- new_profile = aa_get_newest_profile(cxt->onexec); + goto apply; + } + +@@ -437,7 +437,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) + new_profile = aa_get_newest_profile(ns->unconfined); + info = "ux fallback"; + } else { +- error = -ENOENT; ++ error = -EACCES; + info = "profile not found"; + /* remove MAY_EXEC to audit as failure */ + perms.allow &= ~MAY_EXEC; +@@ -449,10 +449,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) + if (!new_profile) { + error = -ENOMEM; + info = "could not create null profile"; +- } else { ++ } else + error = -EACCES; +- target = new_profile->base.hname; +- } + perms.xindex |= AA_X_UNSAFE; + } else + /* fail exec */ +@@ -463,7 +461,6 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) + * fail the exec. + */ + if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) { +- aa_put_profile(new_profile); + error = -EPERM; + goto cleanup; + } +@@ -478,10 +475,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) + + if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) { + error = may_change_ptraced_domain(current, new_profile); +- if (error) { +- aa_put_profile(new_profile); ++ if (error) + goto audit; +- } + } + + /* Determine if secure exec is needed. +@@ -502,7 +497,6 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) + bprm->unsafe |= AA_SECURE_X_NEEDED; + } + apply: +- target = new_profile->base.hname; + /* when transitioning profiles clear unsafe personality bits */ + bprm->per_clear |= PER_CLEAR_ON_SETID; + +@@ -510,15 +504,19 @@ x_clear: + aa_put_profile(cxt->profile); + /* transfer new profile reference will be released when cxt is freed */ + cxt->profile = new_profile; ++ new_profile = NULL; + + /* clear out all temporary/transitional state from the context */ + aa_clear_task_cxt_trans(cxt); + + audit: + error = aa_audit_file(profile, &perms, GFP_KERNEL, OP_EXEC, MAY_EXEC, +- name, target, cond.uid, info, error); ++ name, ++ new_profile ? new_profile->base.hname : NULL, ++ cond.uid, info, error); + + cleanup: ++ aa_put_profile(new_profile); + aa_put_profile(profile); + kfree(buffer); + +diff --git a/security/apparmor/file.c b/security/apparmor/file.c +index fdaa50cb1876..a4f7f1a5a798 100644 +--- a/security/apparmor/file.c ++++ b/security/apparmor/file.c +@@ -110,7 +110,8 @@ int aa_audit_file(struct aa_profile *profile, struct file_perms *perms, + int type = AUDIT_APPARMOR_AUTO; + struct common_audit_data sa; + struct apparmor_audit_data aad = {0,}; +- sa.type = LSM_AUDIT_DATA_NONE; ++ sa.type = LSM_AUDIT_DATA_TASK; ++ sa.u.tsk = NULL; + sa.aad = &aad; + aad.op = op, + aad.fs.request = request; +diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h +index 001c43aa0406..a1c04fe86790 100644 +--- a/security/apparmor/include/match.h ++++ b/security/apparmor/include/match.h +@@ -62,6 +62,7 @@ struct table_set_header { + #define YYTD_ID_ACCEPT2 6 + #define YYTD_ID_NXT 7 + #define YYTD_ID_TSIZE 8 ++#define YYTD_ID_MAX 8 + + #define YYTD_DATA8 1 + #define YYTD_DATA16 2 +diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h +index c28b0f20ab53..52275f040a5f 100644 +--- a/security/apparmor/include/policy.h ++++ b/security/apparmor/include/policy.h +@@ -403,6 +403,8 @@ static inline int AUDIT_MODE(struct aa_profile *profile) + return profile->audit; + } + ++bool policy_view_capable(void); ++bool policy_admin_capable(void); + bool aa_may_manage_policy(int op); + + #endif /* __AA_POLICY_H */ +diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c +index fb99e18123b4..00a92de97c82 100644 +--- a/security/apparmor/lsm.c ++++ b/security/apparmor/lsm.c +@@ -762,51 +762,49 @@ __setup("apparmor=", apparmor_enabled_setup); + /* set global flag turning off the ability to load policy */ + static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp) + { +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_admin_capable()) + return -EPERM; +- if (aa_g_lock_policy) +- return -EACCES; + return param_set_bool(val, kp); + } + + static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp) + { +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_view_capable()) + return -EPERM; + return param_get_bool(buffer, kp); + } + + static int param_set_aabool(const char *val, const struct kernel_param *kp) + { +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_admin_capable()) + return -EPERM; + return param_set_bool(val, kp); + } + + static int param_get_aabool(char *buffer, const struct kernel_param *kp) + { +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_view_capable()) + return -EPERM; + return param_get_bool(buffer, kp); + } + + static int param_set_aauint(const char *val, const struct kernel_param *kp) + { +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_admin_capable()) + return -EPERM; + return param_set_uint(val, kp); + } + + static int param_get_aauint(char *buffer, const struct kernel_param *kp) + { +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_view_capable()) + return -EPERM; + return param_get_uint(buffer, kp); + } + + static int param_get_audit(char *buffer, struct kernel_param *kp) + { +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_view_capable()) + return -EPERM; + + if (!apparmor_enabled) +@@ -818,7 +816,7 @@ static int param_get_audit(char *buffer, struct kernel_param *kp) + static int param_set_audit(const char *val, struct kernel_param *kp) + { + int i; +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_admin_capable()) + return -EPERM; + + if (!apparmor_enabled) +@@ -839,7 +837,7 @@ static int param_set_audit(const char *val, struct kernel_param *kp) + + static int param_get_mode(char *buffer, struct kernel_param *kp) + { +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_admin_capable()) + return -EPERM; + + if (!apparmor_enabled) +@@ -851,7 +849,7 @@ static int param_get_mode(char *buffer, struct kernel_param *kp) + static int param_set_mode(const char *val, struct kernel_param *kp) + { + int i; +- if (!capable(CAP_MAC_ADMIN)) ++ if (!policy_admin_capable()) + return -EPERM; + + if (!apparmor_enabled) +diff --git a/security/apparmor/match.c b/security/apparmor/match.c +index 727eb4200d5c..3f900fcca8fb 100644 +--- a/security/apparmor/match.c ++++ b/security/apparmor/match.c +@@ -47,6 +47,8 @@ static struct table_header *unpack_table(char *blob, size_t bsize) + * it every time we use td_id as an index + */ + th.td_id = be16_to_cpu(*(u16 *) (blob)) - 1; ++ if (th.td_id > YYTD_ID_MAX) ++ goto out; + th.td_flags = be16_to_cpu(*(u16 *) (blob + 2)); + th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8)); + blob += sizeof(struct table_header); +@@ -61,7 +63,9 @@ static struct table_header *unpack_table(char *blob, size_t bsize) + + table = kvzalloc(tsize); + if (table) { +- *table = th; ++ table->td_id = th.td_id; ++ table->td_flags = th.td_flags; ++ table->td_lolen = th.td_lolen; + if (th.td_flags == YYTD_DATA8) + UNPACK_ARRAY(table->td_data, blob, th.td_lolen, + u8, byte_to_byte); +@@ -73,14 +77,14 @@ static struct table_header *unpack_table(char *blob, size_t bsize) + u32, be32_to_cpu); + else + goto fail; ++ /* if table was vmalloced make sure the page tables are synced ++ * before it is used, as it goes live to all cpus. ++ */ ++ if (is_vmalloc_addr(table)) ++ vm_unmap_aliases(); + } + + out: +- /* if table was vmalloced make sure the page tables are synced +- * before it is used, as it goes live to all cpus. +- */ +- if (is_vmalloc_addr(table)) +- vm_unmap_aliases(); + return table; + fail: + kvfree(table); +diff --git a/security/apparmor/path.c b/security/apparmor/path.c +index 35b394a75d76..5505e0563bc8 100644 +--- a/security/apparmor/path.c ++++ b/security/apparmor/path.c +@@ -25,7 +25,6 @@ + #include "include/path.h" + #include "include/policy.h" + +- + /* modified from dcache.c */ + static int prepend(char **buffer, int buflen, const char *str, int namelen) + { +@@ -39,6 +38,38 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen) + + #define CHROOT_NSCONNECT (PATH_CHROOT_REL | PATH_CHROOT_NSCONNECT) + ++/* If the path is not connected to the expected root, ++ * check if it is a sysctl and handle specially else remove any ++ * leading / that __d_path may have returned. ++ * Unless ++ * specifically directed to connect the path, ++ * OR ++ * if in a chroot and doing chroot relative paths and the path ++ * resolves to the namespace root (would be connected outside ++ * of chroot) and specifically directed to connect paths to ++ * namespace root. ++ */ ++static int disconnect(const struct path *path, char *buf, char **name, ++ int flags) ++{ ++ int error = 0; ++ ++ if (!(flags & PATH_CONNECT_PATH) && ++ !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) && ++ our_mnt(path->mnt))) { ++ /* disconnected path, don't return pathname starting ++ * with '/' ++ */ ++ error = -EACCES; ++ if (**name == '/') ++ *name = *name + 1; ++ } else if (**name != '/') ++ /* CONNECT_PATH with missing root */ ++ error = prepend(name, *name - buf, "/", 1); ++ ++ return error; ++} ++ + /** + * d_namespace_path - lookup a name associated with a given path + * @path: path to lookup (NOT NULL) +@@ -74,7 +105,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, + * control instead of hard coded /proc + */ + return prepend(name, *name - buf, "/proc", 5); +- } ++ } else ++ return disconnect(path, buf, name, flags); + return 0; + } + +@@ -120,29 +152,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, + goto out; + } + +- /* If the path is not connected to the expected root, +- * check if it is a sysctl and handle specially else remove any +- * leading / that __d_path may have returned. +- * Unless +- * specifically directed to connect the path, +- * OR +- * if in a chroot and doing chroot relative paths and the path +- * resolves to the namespace root (would be connected outside +- * of chroot) and specifically directed to connect paths to +- * namespace root. +- */ +- if (!connected) { +- if (!(flags & PATH_CONNECT_PATH) && +- !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) && +- our_mnt(path->mnt))) { +- /* disconnected path, don't return pathname starting +- * with '/' +- */ +- error = -EACCES; +- if (*res == '/') +- *name = res + 1; +- } +- } ++ if (!connected) ++ error = disconnect(path, buf, name, flags); + + out: + return error; +diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c +index 705c2879d3a9..179e68d7dc5f 100644 +--- a/security/apparmor/policy.c ++++ b/security/apparmor/policy.c +@@ -766,7 +766,9 @@ struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name) + struct aa_profile *profile; + + rcu_read_lock(); +- profile = aa_get_profile(__find_child(&parent->base.profiles, name)); ++ do { ++ profile = __find_child(&parent->base.profiles, name); ++ } while (profile && !aa_get_profile_not0(profile)); + rcu_read_unlock(); + + /* refcount released by caller */ +@@ -916,6 +918,22 @@ static int audit_policy(int op, gfp_t gfp, const char *name, const char *info, + &sa, NULL); + } + ++bool policy_view_capable(void) ++{ ++ struct user_namespace *user_ns = current_user_ns(); ++ bool response = false; ++ ++ if (ns_capable(user_ns, CAP_MAC_ADMIN)) ++ response = true; ++ ++ return response; ++} ++ ++bool policy_admin_capable(void) ++{ ++ return policy_view_capable() && !aa_g_lock_policy; ++} ++ + /** + * aa_may_manage_policy - can the current task manage policy + * @op: the policy manipulation operation being done +@@ -930,7 +948,7 @@ bool aa_may_manage_policy(int op) + return 0; + } + +- if (!capable(CAP_MAC_ADMIN)) { ++ if (!policy_admin_capable()) { + audit_policy(op, GFP_KERNEL, NULL, "not policy admin", -EACCES); + return 0; + } +@@ -1067,7 +1085,7 @@ static int __lookup_replace(struct aa_namespace *ns, const char *hname, + */ + ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace) + { +- const char *ns_name, *name = NULL, *info = NULL; ++ const char *ns_name, *info = NULL; + struct aa_namespace *ns = NULL; + struct aa_load_ent *ent, *tmp; + int op = OP_PROF_REPL; +@@ -1082,18 +1100,15 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace) + /* released below */ + ns = aa_prepare_namespace(ns_name); + if (!ns) { +- info = "failed to prepare namespace"; +- error = -ENOMEM; +- name = ns_name; +- goto fail; ++ error = audit_policy(op, GFP_KERNEL, ns_name, ++ "failed to prepare namespace", -ENOMEM); ++ goto free; + } + + mutex_lock(&ns->lock); + /* setup parent and ns info */ + list_for_each_entry(ent, &lh, list) { + struct aa_policy *policy; +- +- name = ent->new->base.hname; + error = __lookup_replace(ns, ent->new->base.hname, noreplace, + &ent->old, &info); + if (error) +@@ -1121,7 +1136,6 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace) + if (!p) { + error = -ENOENT; + info = "parent does not exist"; +- name = ent->new->base.hname; + goto fail_lock; + } + rcu_assign_pointer(ent->new->parent, aa_get_profile(p)); +@@ -1163,7 +1177,7 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace) + list_del_init(&ent->list); + op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL; + +- audit_policy(op, GFP_ATOMIC, ent->new->base.name, NULL, error); ++ audit_policy(op, GFP_ATOMIC, ent->new->base.hname, NULL, error); + + if (ent->old) { + __replace_profile(ent->old, ent->new, 1); +@@ -1187,14 +1201,14 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace) + /* parent replaced in this atomic set? */ + if (newest != parent) { + aa_get_profile(newest); +- aa_put_profile(parent); + rcu_assign_pointer(ent->new->parent, newest); +- } else +- aa_put_profile(newest); ++ aa_put_profile(parent); ++ } + /* aafs interface uses replacedby */ + rcu_assign_pointer(ent->new->replacedby->profile, + aa_get_profile(ent->new)); +- __list_add_profile(&parent->base.profiles, ent->new); ++ __list_add_profile(&newest->base.profiles, ent->new); ++ aa_put_profile(newest); + } else { + /* aafs interface uses replacedby */ + rcu_assign_pointer(ent->new->replacedby->profile, +@@ -1214,9 +1228,22 @@ out: + + fail_lock: + mutex_unlock(&ns->lock); +-fail: +- error = audit_policy(op, GFP_KERNEL, name, info, error); + ++ /* audit cause of failure */ ++ op = (!ent->old) ? OP_PROF_LOAD : OP_PROF_REPL; ++ audit_policy(op, GFP_KERNEL, ent->new->base.hname, info, error); ++ /* audit status that rest of profiles in the atomic set failed too */ ++ info = "valid profile in failed atomic policy load"; ++ list_for_each_entry(tmp, &lh, list) { ++ if (tmp == ent) { ++ info = "unchecked profile in failed atomic policy load"; ++ /* skip entry that caused failure */ ++ continue; ++ } ++ op = (!ent->old) ? OP_PROF_LOAD : OP_PROF_REPL; ++ audit_policy(op, GFP_KERNEL, tmp->new->base.hname, info, error); ++ } ++free: + list_for_each_entry_safe(ent, tmp, &lh, list) { + list_del_init(&ent->list); + aa_load_ent_free(ent); +diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c +index a689f10930b5..dac2121bc873 100644 +--- a/security/apparmor/policy_unpack.c ++++ b/security/apparmor/policy_unpack.c +@@ -583,6 +583,9 @@ static struct aa_profile *unpack_profile(struct aa_ext *e) + error = PTR_ERR(profile->policy.dfa); + profile->policy.dfa = NULL; + goto fail; ++ } else if (!profile->policy.dfa) { ++ error = -EPROTO; ++ goto fail; + } + if (!unpack_u32(e, &profile->policy.start[0], "start")) + /* default start state */ +@@ -676,7 +679,7 @@ static bool verify_xindex(int xindex, int table_size) + int index, xtype; + xtype = xindex & AA_X_TYPE_MASK; + index = xindex & AA_X_INDEX_MASK; +- if (xtype == AA_X_TABLE && index > table_size) ++ if (xtype == AA_X_TABLE && index >= table_size) + return 0; + return 1; + } +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index c036e60c34fe..63a335dfd629 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -3234,6 +3234,7 @@ enum { + CXT_FIXUP_HEADPHONE_MIC, + CXT_FIXUP_GPIO1, + CXT_FIXUP_ASPIRE_DMIC, ++ CXT_FIXUP_HP_GATE_MIC, + }; + + static void cxt_fixup_stereo_dmic(struct hda_codec *codec, +@@ -3310,6 +3311,17 @@ static void cxt_fixup_headphone_mic(struct hda_codec *codec, + } + + ++static void cxt_fixup_hp_gate_mic_jack(struct hda_codec *codec, ++ const struct hda_fixup *fix, ++ int action) ++{ ++ /* the mic pin (0x19) doesn't give an unsolicited event; ++ * probe the mic pin together with the headphone pin (0x16) ++ */ ++ if (action == HDA_FIXUP_ACT_PROBE) ++ snd_hda_jack_set_gating_jack(codec, 0x19, 0x16); ++} ++ + /* ThinkPad X200 & co with cxt5051 */ + static const struct hda_pintbl cxt_pincfg_lenovo_x200[] = { + { 0x16, 0x042140ff }, /* HP (seq# overridden) */ +@@ -3403,6 +3415,10 @@ static const struct hda_fixup cxt_fixups[] = { + .chained = true, + .chain_id = CXT_FIXUP_GPIO1, + }, ++ [CXT_FIXUP_HP_GATE_MIC] = { ++ .type = HDA_FIXUP_FUNC, ++ .v.func = cxt_fixup_hp_gate_mic_jack, ++ }, + }; + + static const struct snd_pci_quirk cxt5051_fixups[] = { +@@ -3414,6 +3430,7 @@ static const struct snd_pci_quirk cxt5051_fixups[] = { + static const struct snd_pci_quirk cxt5066_fixups[] = { + SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), + SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), ++ SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC), + SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), + SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), + SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410), +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 06e80327567c..8b816bf65405 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -2194,6 +2194,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { + SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC), + SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601), + SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS), ++ SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3), + SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT), + SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP), + SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP), +@@ -4982,6 +4983,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), + SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP), + SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP), ++ SND_PCI_QUIRK(0x1043, 0x1963, "ASUS X71SL", ALC662_FIXUP_ASUS_MODE8), + SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_CHMAP), + SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT), + SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2), +diff --git a/sound/usb/card.c b/sound/usb/card.c +index 96a09226be7d..96a429945e3a 100644 +--- a/sound/usb/card.c ++++ b/sound/usb/card.c +@@ -205,7 +205,6 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int + if (! snd_usb_parse_audio_interface(chip, interface)) { + usb_set_interface(dev, interface, 0); /* reset the current interface */ + usb_driver_claim_interface(&usb_audio_driver, iface, (void *)-1L); +- return -EINVAL; + } + + return 0; +diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c +index c21a3df9a0df..d4d036fca6cb 100644 +--- a/sound/usb/hiface/pcm.c ++++ b/sound/usb/hiface/pcm.c +@@ -445,6 +445,8 @@ static int hiface_pcm_prepare(struct snd_pcm_substream *alsa_sub) + + mutex_lock(&rt->stream_mutex); + ++ hiface_pcm_stream_stop(rt); ++ + sub->dma_off = 0; + sub->period_off = 0; + +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c +index 86f46b46f214..afcaafce643c 100644 +--- a/sound/usb/mixer.c ++++ b/sound/usb/mixer.c +@@ -893,9 +893,10 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, + case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */ + case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */ + case USB_ID(0x046d, 0x0991): ++ case USB_ID(0x046d, 0x09a2): /* QuickCam Communicate Deluxe/S7500 */ + /* Most audio usb devices lie about volume resolution. + * Most Logitech webcams have res = 384. +- * Proboly there is some logitech magic behind this number --fishor ++ * Probably there is some logitech magic behind this number --fishor + */ + if (!strcmp(kctl->id.name, "Mic Capture Volume")) { + snd_printk(KERN_INFO +diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c +index 95199e4eea97..f928bfc4852f 100644 +--- a/tools/perf/util/trace-event-scripting.c ++++ b/tools/perf/util/trace-event-scripting.c +@@ -91,7 +91,8 @@ static void register_python_scripting(struct scripting_ops *scripting_ops) + if (err) + die("error registering py script extension"); + +- scripting_context = malloc(sizeof(struct scripting_context)); ++ if (scripting_context == NULL) ++ scripting_context = malloc(sizeof(*scripting_context)); + } + + #ifdef NO_LIBPYTHON +@@ -154,7 +155,8 @@ static void register_perl_scripting(struct scripting_ops *scripting_ops) + if (err) + die("error registering pl script extension"); + +- scripting_context = malloc(sizeof(struct scripting_context)); ++ if (scripting_context == NULL) ++ scripting_context = malloc(sizeof(*scripting_context)); + } + + #ifdef NO_LIBPERL +diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests +index c09a682df56a..16058bbea7a8 100644 +--- a/tools/testing/selftests/net/run_netsocktests ++++ b/tools/testing/selftests/net/run_netsocktests +@@ -1,4 +1,4 @@ +-#!/bin/bash ++#!/bin/sh + + echo "--------------------" + echo "running socket test" |