diff options
author | Mike Pagano <mpagano@gentoo.org> | 2015-09-29 13:50:31 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2015-09-29 13:50:31 -0400 |
commit | a246795e14884680031e6838755d88dfa0ce1790 (patch) | |
tree | 05437298d272dff8b5b5378ed0071266a85edf3a | |
parent | dm crypt: constrain crypt device's max_segment_size to PAGE_SIZE. See bug #56... (diff) | |
download | linux-patches-a246795e14884680031e6838755d88dfa0ce1790.tar.gz linux-patches-a246795e14884680031e6838755d88dfa0ce1790.tar.bz2 linux-patches-a246795e14884680031e6838755d88dfa0ce1790.zip |
Linux patch 4.1.94.1-13
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1008_linux-4.1.9.patch | 5955 |
2 files changed, 5959 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 4a96d2ef..46b8cb0f 100644 --- a/0000_README +++ b/0000_README @@ -75,6 +75,10 @@ Patch: 1007_linux-4.1.8.patch From: http://www.kernel.org Desc: Linux 4.1.8 +Patch: 1008_linux-4.1.9.patch +From: http://www.kernel.org +Desc: Linux 4.1.9 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1008_linux-4.1.9.patch b/1008_linux-4.1.9.patch new file mode 100644 index 00000000..000c373f --- /dev/null +++ b/1008_linux-4.1.9.patch @@ -0,0 +1,5955 @@ +diff --git a/Makefile b/Makefile +index dbf3baa5fabb..e071176b2ce6 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 1 +-SUBLEVEL = 8 ++SUBLEVEL = 9 + EXTRAVERSION = + NAME = Series 4800 + +diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c +index bd245d34952d..a0765e7ed6c7 100644 +--- a/arch/arm/boot/compressed/decompress.c ++++ b/arch/arm/boot/compressed/decompress.c +@@ -57,5 +57,5 @@ extern char * strstr(const char * s1, const char *s2); + + int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) + { +- return decompress(input, len, NULL, NULL, output, NULL, error); ++ return __decompress(input, len, NULL, NULL, output, 0, NULL, error); + } +diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c +index d9631ecddd56..d6223cbcb661 100644 +--- a/arch/arm/kvm/arm.c ++++ b/arch/arm/kvm/arm.c +@@ -450,7 +450,7 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) + * Map the VGIC hardware resources before running a vcpu the first + * time on this VM. + */ +- if (unlikely(!vgic_ready(kvm))) { ++ if (unlikely(irqchip_in_kernel(kvm) && !vgic_ready(kvm))) { + ret = kvm_vgic_map_resources(kvm); + if (ret) + return ret; +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index 7796af4b1d6f..6f0a3b41b009 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -101,6 +101,10 @@ config NO_IOPORT_MAP + config STACKTRACE_SUPPORT + def_bool y + ++config ILLEGAL_POINTER_VALUE ++ hex ++ default 0xdead000000000000 ++ + config LOCKDEP_SUPPORT + def_bool y + +@@ -409,6 +413,22 @@ config ARM64_ERRATUM_845719 + + If unsure, say Y. + ++config ARM64_ERRATUM_843419 ++ bool "Cortex-A53: 843419: A load or store might access an incorrect address" ++ depends on MODULES ++ default y ++ help ++ This option builds kernel modules using the large memory model in ++ order to avoid the use of the ADRP instruction, which can cause ++ a subsequent memory access to use an incorrect address on Cortex-A53 ++ parts up to r0p4. ++ ++ Note that the kernel itself must be linked with a version of ld ++ which fixes potentially affected ADRP instructions through the ++ use of veneers. ++ ++ If unsure, say Y. ++ + endmenu + + +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile +index 4d2a925998f9..81151663ef38 100644 +--- a/arch/arm64/Makefile ++++ b/arch/arm64/Makefile +@@ -30,6 +30,10 @@ endif + + CHECKFLAGS += -D__aarch64__ + ++ifeq ($(CONFIG_ARM64_ERRATUM_843419), y) ++CFLAGS_MODULE += -mcmodel=large ++endif ++ + # Default value + head-y := arch/arm64/kernel/head.o + +diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h +index f800d45ea226..44a59c20e773 100644 +--- a/arch/arm64/include/asm/memory.h ++++ b/arch/arm64/include/asm/memory.h +@@ -114,6 +114,14 @@ extern phys_addr_t memstart_addr; + #define PHYS_OFFSET ({ memstart_addr; }) + + /* ++ * The maximum physical address that the linear direct mapping ++ * of system RAM can cover. (PAGE_OFFSET can be interpreted as ++ * a 2's complement signed quantity and negated to derive the ++ * maximum size of the linear mapping.) ++ */ ++#define MAX_MEMBLOCK_ADDR ({ memstart_addr - PAGE_OFFSET - 1; }) ++ ++/* + * PFNs are used to describe any physical page; this means + * PFN 0 == physical address 0. + * +diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c +index 3dca15634e69..c31e59fe2cb8 100644 +--- a/arch/arm64/kernel/fpsimd.c ++++ b/arch/arm64/kernel/fpsimd.c +@@ -157,6 +157,7 @@ void fpsimd_thread_switch(struct task_struct *next) + void fpsimd_flush_thread(void) + { + memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); ++ fpsimd_flush_task_state(current); + set_thread_flag(TIF_FOREIGN_FPSTATE); + } + +diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S +index 19f915e8f6e0..36aa31ff2c06 100644 +--- a/arch/arm64/kernel/head.S ++++ b/arch/arm64/kernel/head.S +@@ -565,6 +565,11 @@ CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems + msr hstr_el2, xzr // Disable CP15 traps to EL2 + #endif + ++ /* EL2 debug */ ++ mrs x0, pmcr_el0 // Disable debug access traps ++ ubfx x0, x0, #11, #5 // to EL2 and allow access to ++ msr mdcr_el2, x0 // all PMU counters from EL1 ++ + /* Stage-2 translation */ + msr vttbr_el2, xzr + +diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c +index 67bf4107f6ef..876eb8df50bf 100644 +--- a/arch/arm64/kernel/module.c ++++ b/arch/arm64/kernel/module.c +@@ -332,12 +332,14 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, + AARCH64_INSN_IMM_ADR); + break; ++#ifndef CONFIG_ARM64_ERRATUM_843419 + case R_AARCH64_ADR_PREL_PG_HI21_NC: + overflow_check = false; + case R_AARCH64_ADR_PREL_PG_HI21: + ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, + AARCH64_INSN_IMM_ADR); + break; ++#endif + case R_AARCH64_ADD_ABS_LO12_NC: + case R_AARCH64_LDST8_ABS_LO12_NC: + overflow_check = false; +diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c +index c0cff3410166..c58aee062590 100644 +--- a/arch/arm64/kernel/signal32.c ++++ b/arch/arm64/kernel/signal32.c +@@ -212,14 +212,32 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) + + /* + * VFP save/restore code. ++ * ++ * We have to be careful with endianness, since the fpsimd context-switch ++ * code operates on 128-bit (Q) register values whereas the compat ABI ++ * uses an array of 64-bit (D) registers. Consequently, we need to swap ++ * the two halves of each Q register when running on a big-endian CPU. + */ ++union __fpsimd_vreg { ++ __uint128_t raw; ++ struct { ++#ifdef __AARCH64EB__ ++ u64 hi; ++ u64 lo; ++#else ++ u64 lo; ++ u64 hi; ++#endif ++ }; ++}; ++ + static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) + { + struct fpsimd_state *fpsimd = ¤t->thread.fpsimd_state; + compat_ulong_t magic = VFP_MAGIC; + compat_ulong_t size = VFP_STORAGE_SIZE; + compat_ulong_t fpscr, fpexc; +- int err = 0; ++ int i, err = 0; + + /* + * Save the hardware registers to the fpsimd_state structure. +@@ -235,10 +253,15 @@ static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) + /* + * Now copy the FP registers. Since the registers are packed, + * we can copy the prefix we want (V0-V15) as it is. +- * FIXME: Won't work if big endian. + */ +- err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs, +- sizeof(frame->ufp.fpregs)); ++ for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) { ++ union __fpsimd_vreg vreg = { ++ .raw = fpsimd->vregs[i >> 1], ++ }; ++ ++ __put_user_error(vreg.lo, &frame->ufp.fpregs[i], err); ++ __put_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err); ++ } + + /* Create an AArch32 fpscr from the fpsr and the fpcr. */ + fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) | +@@ -263,7 +286,7 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) + compat_ulong_t magic = VFP_MAGIC; + compat_ulong_t size = VFP_STORAGE_SIZE; + compat_ulong_t fpscr; +- int err = 0; ++ int i, err = 0; + + __get_user_error(magic, &frame->magic, err); + __get_user_error(size, &frame->size, err); +@@ -273,12 +296,14 @@ static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) + if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) + return -EINVAL; + +- /* +- * Copy the FP registers into the start of the fpsimd_state. +- * FIXME: Won't work if big endian. +- */ +- err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs, +- sizeof(frame->ufp.fpregs)); ++ /* Copy the FP registers into the start of the fpsimd_state. */ ++ for (i = 0; i < ARRAY_SIZE(frame->ufp.fpregs); i += 2) { ++ union __fpsimd_vreg vreg; ++ ++ __get_user_error(vreg.lo, &frame->ufp.fpregs[i], err); ++ __get_user_error(vreg.hi, &frame->ufp.fpregs[i + 1], err); ++ fpsimd.vregs[i >> 1] = vreg.raw; ++ } + + /* Extract the fpsr and the fpcr from the fpscr */ + __get_user_error(fpscr, &frame->ufp.fpscr, err); +diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S +index 5befd010e232..64f9e60b31da 100644 +--- a/arch/arm64/kvm/hyp.S ++++ b/arch/arm64/kvm/hyp.S +@@ -844,8 +844,6 @@ + mrs x3, cntv_ctl_el0 + and x3, x3, #3 + str w3, [x0, #VCPU_TIMER_CNTV_CTL] +- bic x3, x3, #1 // Clear Enable +- msr cntv_ctl_el0, x3 + + isb + +@@ -853,6 +851,9 @@ + str x3, [x0, #VCPU_TIMER_CNTV_CVAL] + + 1: ++ // Disable the virtual timer ++ msr cntv_ctl_el0, xzr ++ + // Allow physical timer/counter access for the host + mrs x2, cnthctl_el2 + orr x2, x2, #3 +@@ -947,13 +948,15 @@ ENTRY(__kvm_vcpu_run) + // Guest context + add x2, x0, #VCPU_CONTEXT + ++ // We must restore the 32-bit state before the sysregs, thanks ++ // to Cortex-A57 erratum #852523. ++ restore_guest_32bit_state + bl __restore_sysregs + bl __restore_fpsimd + + skip_debug_state x3, 1f + bl __restore_debug + 1: +- restore_guest_32bit_state + restore_guest_regs + + // That's it, no more messing around. +diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c +index 28a09529f206..3a7692745868 100644 +--- a/arch/m32r/boot/compressed/misc.c ++++ b/arch/m32r/boot/compressed/misc.c +@@ -86,6 +86,7 @@ decompress_kernel(int mmu_on, unsigned char *zimage_data, + free_mem_end_ptr = free_mem_ptr + BOOT_HEAP_SIZE; + + puts("\nDecompressing Linux... "); +- decompress(input_data, input_len, NULL, NULL, output_data, NULL, error); ++ __decompress(input_data, input_len, NULL, NULL, output_data, 0, ++ NULL, error); + puts("done.\nBooting the kernel.\n"); + } +diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c +index 54831069a206..080cd53bac36 100644 +--- a/arch/mips/boot/compressed/decompress.c ++++ b/arch/mips/boot/compressed/decompress.c +@@ -111,8 +111,8 @@ void decompress_kernel(unsigned long boot_heap_start) + puts("\n"); + + /* Decompress the kernel with according algorithm */ +- decompress((char *)zimage_start, zimage_size, 0, 0, +- (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, error); ++ __decompress((char *)zimage_start, zimage_size, 0, 0, ++ (void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error); + + /* FIXME: should we flush cache here? */ + puts("Now, booting the kernel...\n"); +diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c +index 6983fcd48131..2b95e34fa9e8 100644 +--- a/arch/mips/math-emu/cp1emu.c ++++ b/arch/mips/math-emu/cp1emu.c +@@ -1137,7 +1137,7 @@ emul: + break; + + case mfhc_op: +- if (!cpu_has_mips_r2) ++ if (!cpu_has_mips_r2_r6) + goto sigill; + + /* copregister rd -> gpr[rt] */ +@@ -1148,7 +1148,7 @@ emul: + break; + + case mthc_op: +- if (!cpu_has_mips_r2) ++ if (!cpu_has_mips_r2_r6) + goto sigill; + + /* copregister rd <- gpr[rt] */ +@@ -1181,6 +1181,24 @@ emul: + } + break; + ++ case bc1eqz_op: ++ case bc1nez_op: ++ if (!cpu_has_mips_r6 || delay_slot(xcp)) ++ return SIGILL; ++ ++ cond = likely = 0; ++ switch (MIPSInst_RS(ir)) { ++ case bc1eqz_op: ++ if (get_fpr32(¤t->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1) ++ cond = 1; ++ break; ++ case bc1nez_op: ++ if (!(get_fpr32(¤t->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1)) ++ cond = 1; ++ break; ++ } ++ goto branch_common; ++ + case bc_op: + if (delay_slot(xcp)) + return SIGILL; +@@ -1207,7 +1225,7 @@ emul: + case bct_op: + break; + } +- ++branch_common: + set_delay_slot(xcp); + if (cond) { + /* +diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c +index f3191db6e2e9..c0eab24f6a9e 100644 +--- a/arch/parisc/kernel/irq.c ++++ b/arch/parisc/kernel/irq.c +@@ -507,8 +507,8 @@ void do_cpu_irq_mask(struct pt_regs *regs) + struct pt_regs *old_regs; + unsigned long eirr_val; + int irq, cpu = smp_processor_id(); +-#ifdef CONFIG_SMP + struct irq_desc *desc; ++#ifdef CONFIG_SMP + cpumask_t dest; + #endif + +@@ -521,8 +521,12 @@ void do_cpu_irq_mask(struct pt_regs *regs) + goto set_out; + irq = eirr_to_irq(eirr_val); + +-#ifdef CONFIG_SMP ++ /* Filter out spurious interrupts, mostly from serial port at bootup */ + desc = irq_to_desc(irq); ++ if (unlikely(!desc->action)) ++ goto set_out; ++ ++#ifdef CONFIG_SMP + cpumask_copy(&dest, desc->irq_data.affinity); + if (irqd_is_per_cpu(&desc->irq_data) && + !cpumask_test_cpu(smp_processor_id(), &dest)) { +diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S +index 7ef22e3387e0..0b8d26d3ba43 100644 +--- a/arch/parisc/kernel/syscall.S ++++ b/arch/parisc/kernel/syscall.S +@@ -821,7 +821,7 @@ cas2_action: + /* 64bit CAS */ + #ifdef CONFIG_64BIT + 19: ldd,ma 0(%sr3,%r26), %r29 +- sub,= %r29, %r25, %r0 ++ sub,*= %r29, %r25, %r0 + b,n cas2_end + 20: std,ma %r24, 0(%sr3,%r26) + copy %r0, %r28 +diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile +index 73eddda53b8e..4eec430d8fa8 100644 +--- a/arch/powerpc/boot/Makefile ++++ b/arch/powerpc/boot/Makefile +@@ -28,6 +28,9 @@ BOOTCFLAGS += -m64 + endif + ifdef CONFIG_CPU_BIG_ENDIAN + BOOTCFLAGS += -mbig-endian ++else ++BOOTCFLAGS += -mlittle-endian ++BOOTCFLAGS += $(call cc-option,-mabi=elfv2) + endif + + BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc +diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h +index 43e6ad424c7f..88d27e3258d2 100644 +--- a/arch/powerpc/include/asm/pgtable-ppc64.h ++++ b/arch/powerpc/include/asm/pgtable-ppc64.h +@@ -135,7 +135,19 @@ + #define pte_iterate_hashed_end() } while(0) + + #ifdef CONFIG_PPC_HAS_HASH_64K +-#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr) ++/* ++ * We expect this to be called only for user addresses or kernel virtual ++ * addresses other than the linear mapping. ++ */ ++#define pte_pagesize_index(mm, addr, pte) \ ++ ({ \ ++ unsigned int psize; \ ++ if (is_kernel_addr(addr)) \ ++ psize = MMU_PAGE_4K; \ ++ else \ ++ psize = get_slice_psize(mm, addr); \ ++ psize; \ ++ }) + #else + #define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K + #endif +diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h +index 7a4ede16b283..b77ef369c0f0 100644 +--- a/arch/powerpc/include/asm/rtas.h ++++ b/arch/powerpc/include/asm/rtas.h +@@ -343,6 +343,7 @@ extern void rtas_power_off(void); + extern void rtas_halt(void); + extern void rtas_os_term(char *str); + extern int rtas_get_sensor(int sensor, int index, int *state); ++extern int rtas_get_sensor_fast(int sensor, int index, int *state); + extern int rtas_get_power_level(int powerdomain, int *level); + extern int rtas_set_power_level(int powerdomain, int level, int *setlevel); + extern bool rtas_indicator_present(int token, int *maxindex); +diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h +index 58abeda64cb7..15cca17cba4b 100644 +--- a/arch/powerpc/include/asm/switch_to.h ++++ b/arch/powerpc/include/asm/switch_to.h +@@ -29,6 +29,7 @@ static inline void save_early_sprs(struct thread_struct *prev) {} + + extern void enable_kernel_fp(void); + extern void enable_kernel_altivec(void); ++extern void enable_kernel_vsx(void); + extern int emulate_altivec(struct pt_regs *); + extern void __giveup_vsx(struct task_struct *); + extern void giveup_vsx(struct task_struct *); +diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c +index 9ee61d15653d..cb565ad0a5b6 100644 +--- a/arch/powerpc/kernel/eeh.c ++++ b/arch/powerpc/kernel/eeh.c +@@ -310,11 +310,26 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity) + if (!(pe->type & EEH_PE_PHB)) { + if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG)) + eeh_pci_enable(pe, EEH_OPT_THAW_MMIO); ++ ++ /* ++ * The config space of some PCI devices can't be accessed ++ * when their PEs are in frozen state. Otherwise, fenced ++ * PHB might be seen. Those PEs are identified with flag ++ * EEH_PE_CFG_RESTRICTED, indicating EEH_PE_CFG_BLOCKED ++ * is set automatically when the PE is put to EEH_PE_ISOLATED. ++ * ++ * Restoring BARs possibly triggers PCI config access in ++ * (OPAL) firmware and then causes fenced PHB. If the ++ * PCI config is blocked with flag EEH_PE_CFG_BLOCKED, it's ++ * pointless to restore BARs and dump config space. ++ */ + eeh_ops->configure_bridge(pe); +- eeh_pe_restore_bars(pe); ++ if (!(pe->state & EEH_PE_CFG_BLOCKED)) { ++ eeh_pe_restore_bars(pe); + +- pci_regs_buf[0] = 0; +- eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen); ++ pci_regs_buf[0] = 0; ++ eeh_pe_traverse(pe, eeh_dump_pe_log, &loglen); ++ } + } + + eeh_ops->get_log(pe, severity, pci_regs_buf, loglen); +@@ -1118,9 +1133,6 @@ void eeh_add_device_late(struct pci_dev *dev) + return; + } + +- if (eeh_has_flag(EEH_PROBE_MODE_DEV)) +- eeh_ops->probe(pdn, NULL); +- + /* + * The EEH cache might not be removed correctly because of + * unbalanced kref to the device during unplug time, which +@@ -1144,6 +1156,9 @@ void eeh_add_device_late(struct pci_dev *dev) + dev->dev.archdata.edev = NULL; + } + ++ if (eeh_has_flag(EEH_PROBE_MODE_DEV)) ++ eeh_ops->probe(pdn, NULL); ++ + edev->pdev = dev; + dev->dev.archdata.edev = edev; + +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c +index febb50dd5328..0596373cd1c3 100644 +--- a/arch/powerpc/kernel/process.c ++++ b/arch/powerpc/kernel/process.c +@@ -204,8 +204,6 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); + #endif /* CONFIG_ALTIVEC */ + + #ifdef CONFIG_VSX +-#if 0 +-/* not currently used, but some crazy RAID module might want to later */ + void enable_kernel_vsx(void) + { + WARN_ON(preemptible()); +@@ -220,7 +218,6 @@ void enable_kernel_vsx(void) + #endif /* CONFIG_SMP */ + } + EXPORT_SYMBOL(enable_kernel_vsx); +-#endif + + void giveup_vsx(struct task_struct *tsk) + { +diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c +index 7a488c108410..caffb10e7aa3 100644 +--- a/arch/powerpc/kernel/rtas.c ++++ b/arch/powerpc/kernel/rtas.c +@@ -584,6 +584,23 @@ int rtas_get_sensor(int sensor, int index, int *state) + } + EXPORT_SYMBOL(rtas_get_sensor); + ++int rtas_get_sensor_fast(int sensor, int index, int *state) ++{ ++ int token = rtas_token("get-sensor-state"); ++ int rc; ++ ++ if (token == RTAS_UNKNOWN_SERVICE) ++ return -ENOENT; ++ ++ rc = rtas_call(token, 2, 2, state, sensor, index); ++ WARN_ON(rc == RTAS_BUSY || (rc >= RTAS_EXTENDED_DELAY_MIN && ++ rc <= RTAS_EXTENDED_DELAY_MAX)); ++ ++ if (rc < 0) ++ return rtas_error_rc(rc); ++ return rc; ++} ++ + bool rtas_indicator_present(int token, int *maxindex) + { + int proplen, count, i; +diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c +index 43dafb9d6a46..4d87122cf6a7 100644 +--- a/arch/powerpc/mm/hugepage-hash64.c ++++ b/arch/powerpc/mm/hugepage-hash64.c +@@ -85,7 +85,6 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, + BUG_ON(index >= 4096); + + vpn = hpt_vpn(ea, vsid, ssize); +- hash = hpt_hash(vpn, shift, ssize); + hpte_slot_array = get_hpte_slot_array(pmdp); + if (psize == MMU_PAGE_4K) { + /* +@@ -101,6 +100,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, + valid = hpte_valid(hpte_slot_array, index); + if (valid) { + /* update the hpte bits */ ++ hash = hpt_hash(vpn, shift, ssize); + hidx = hpte_hash_index(hpte_slot_array, index); + if (hidx & _PTEIDX_SECONDARY) + hash = ~hash; +@@ -126,6 +126,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, + if (!valid) { + unsigned long hpte_group; + ++ hash = hpt_hash(vpn, shift, ssize); + /* insert new entry */ + pa = pmd_pfn(__pmd(old_pmd)) << PAGE_SHIFT; + new_pmd |= _PAGE_HASHPTE; +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c +index 02e4a1745516..3b6647e574b6 100644 +--- a/arch/powerpc/platforms/pseries/ras.c ++++ b/arch/powerpc/platforms/pseries/ras.c +@@ -189,7 +189,8 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id) + int state; + int critical; + +- status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state); ++ status = rtas_get_sensor_fast(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, ++ &state); + + if (state > 3) + critical = 1; /* Time Critical */ +diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c +index df6a7041922b..e6e8b241d717 100644 +--- a/arch/powerpc/platforms/pseries/setup.c ++++ b/arch/powerpc/platforms/pseries/setup.c +@@ -268,6 +268,11 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act + eeh_dev_init(PCI_DN(np), pci->phb); + } + break; ++ case OF_RECONFIG_DETACH_NODE: ++ pci = PCI_DN(np); ++ if (pci) ++ list_del(&pci->list); ++ break; + default: + err = NOTIFY_DONE; + break; +diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c +index 42506b371b74..4da604ebf6fd 100644 +--- a/arch/s390/boot/compressed/misc.c ++++ b/arch/s390/boot/compressed/misc.c +@@ -167,7 +167,7 @@ unsigned long decompress_kernel(void) + #endif + + puts("Uncompressing Linux... "); +- decompress(input_data, input_len, NULL, NULL, output, NULL, error); ++ __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); + puts("Ok, booting the kernel.\n"); + return (unsigned long) output; + } +diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c +index 95470a472d2c..208a9753ab38 100644 +--- a/arch/sh/boot/compressed/misc.c ++++ b/arch/sh/boot/compressed/misc.c +@@ -132,7 +132,7 @@ void decompress_kernel(void) + + puts("Uncompressing Linux... "); + cache_control(CACHE_ENABLE); +- decompress(input_data, input_len, NULL, NULL, output, NULL, error); ++ __decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error); + cache_control(CACHE_DISABLE); + puts("Ok, booting the kernel.\n"); + } +diff --git a/arch/unicore32/boot/compressed/misc.c b/arch/unicore32/boot/compressed/misc.c +index 176d5bda3559..5c65dfee278c 100644 +--- a/arch/unicore32/boot/compressed/misc.c ++++ b/arch/unicore32/boot/compressed/misc.c +@@ -119,8 +119,8 @@ unsigned long decompress_kernel(unsigned long output_start, + output_ptr = get_unaligned_le32(tmp); + + arch_decomp_puts("Uncompressing Linux..."); +- decompress(input_data, input_data_end - input_data, NULL, NULL, +- output_data, NULL, error); ++ __decompress(input_data, input_data_end - input_data, NULL, NULL, ++ output_data, 0, NULL, error); + arch_decomp_puts(" done, booting the kernel.\n"); + return output_ptr; + } +diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c +index a107b935e22f..e28437e0f708 100644 +--- a/arch/x86/boot/compressed/misc.c ++++ b/arch/x86/boot/compressed/misc.c +@@ -424,7 +424,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, + #endif + + debug_putstr("\nDecompressing Linux... "); +- decompress(input_data, input_len, NULL, NULL, output, NULL, error); ++ __decompress(input_data, input_len, NULL, NULL, output, output_len, ++ NULL, error); + parse_elf(output); + /* + * 32-bit always performs relocations. 64-bit relocations are only +diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c +index c8140e12816a..c23ab1ee3a9a 100644 +--- a/arch/x86/mm/init_32.c ++++ b/arch/x86/mm/init_32.c +@@ -137,6 +137,7 @@ page_table_range_init_count(unsigned long start, unsigned long end) + + vaddr = start; + pgd_idx = pgd_index(vaddr); ++ pmd_idx = pmd_index(vaddr); + + for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) { + for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); +diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c +index b79685e06b70..279c5d674edf 100644 +--- a/block/blk-mq-sysfs.c ++++ b/block/blk-mq-sysfs.c +@@ -141,15 +141,26 @@ static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page) + + static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg) + { +- char *start_page = page; + struct request *rq; ++ int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg); ++ ++ list_for_each_entry(rq, list, queuelist) { ++ const int rq_len = 2 * sizeof(rq) + 2; ++ ++ /* if the output will be truncated */ ++ if (PAGE_SIZE - 1 < len + rq_len) { ++ /* backspacing if it can't hold '\t...\n' */ ++ if (PAGE_SIZE - 1 < len + 5) ++ len -= rq_len; ++ len += snprintf(page + len, PAGE_SIZE - 1 - len, ++ "\t...\n"); ++ break; ++ } ++ len += snprintf(page + len, PAGE_SIZE - 1 - len, ++ "\t%p\n", rq); ++ } + +- page += sprintf(page, "%s:\n", msg); +- +- list_for_each_entry(rq, list, queuelist) +- page += sprintf(page, "\t%p\n", rq); +- +- return page - start_page; ++ return len; + } + + static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page) +diff --git a/drivers/base/node.c b/drivers/base/node.c +index a2aa65b4215d..b10479c87357 100644 +--- a/drivers/base/node.c ++++ b/drivers/base/node.c +@@ -388,6 +388,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) + for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { + int page_nid; + ++ /* ++ * memory block could have several absent sections from start. ++ * skip pfn range from absent section ++ */ ++ if (!pfn_present(pfn)) { ++ pfn = round_down(pfn + PAGES_PER_SECTION, ++ PAGES_PER_SECTION) - 1; ++ continue; ++ } ++ + page_nid = get_nid_for_pfn(pfn); + if (page_nid < 0) + continue; +diff --git a/drivers/crypto/vmx/aes.c b/drivers/crypto/vmx/aes.c +index ab300ea19434..41f93334cc44 100644 +--- a/drivers/crypto/vmx/aes.c ++++ b/drivers/crypto/vmx/aes.c +@@ -80,6 +80,7 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key, + + pagefault_disable(); + enable_kernel_altivec(); ++ enable_kernel_vsx(); + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); + pagefault_enable(); +@@ -97,6 +98,7 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + } else { + pagefault_disable(); + enable_kernel_altivec(); ++ enable_kernel_vsx(); + aes_p8_encrypt(src, dst, &ctx->enc_key); + pagefault_enable(); + } +@@ -111,6 +113,7 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) + } else { + pagefault_disable(); + enable_kernel_altivec(); ++ enable_kernel_vsx(); + aes_p8_decrypt(src, dst, &ctx->dec_key); + pagefault_enable(); + } +diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c +index 1a559b7dddb5..c8e7f653e5d3 100644 +--- a/drivers/crypto/vmx/aes_cbc.c ++++ b/drivers/crypto/vmx/aes_cbc.c +@@ -81,6 +81,7 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key, + + pagefault_disable(); + enable_kernel_altivec(); ++ enable_kernel_vsx(); + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); + pagefault_enable(); +@@ -108,6 +109,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, + } else { + pagefault_disable(); + enable_kernel_altivec(); ++ enable_kernel_vsx(); + + blkcipher_walk_init(&walk, dst, src, nbytes); + ret = blkcipher_walk_virt(desc, &walk); +@@ -143,6 +145,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, + } else { + pagefault_disable(); + enable_kernel_altivec(); ++ enable_kernel_vsx(); + + blkcipher_walk_init(&walk, dst, src, nbytes); + ret = blkcipher_walk_virt(desc, &walk); +diff --git a/drivers/crypto/vmx/aes_ctr.c b/drivers/crypto/vmx/aes_ctr.c +index 96dbee4bf4a6..266e708d63df 100644 +--- a/drivers/crypto/vmx/aes_ctr.c ++++ b/drivers/crypto/vmx/aes_ctr.c +@@ -79,6 +79,7 @@ static int p8_aes_ctr_setkey(struct crypto_tfm *tfm, const u8 *key, + + pagefault_disable(); + enable_kernel_altivec(); ++ enable_kernel_vsx(); + ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); + pagefault_enable(); + +@@ -97,6 +98,7 @@ static void p8_aes_ctr_final(struct p8_aes_ctr_ctx *ctx, + + pagefault_disable(); + enable_kernel_altivec(); ++ enable_kernel_vsx(); + aes_p8_encrypt(ctrblk, keystream, &ctx->enc_key); + pagefault_enable(); + +@@ -127,6 +129,7 @@ static int p8_aes_ctr_crypt(struct blkcipher_desc *desc, + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { + pagefault_disable(); + enable_kernel_altivec(); ++ enable_kernel_vsx(); + aes_p8_ctr32_encrypt_blocks(walk.src.virt.addr, walk.dst.virt.addr, + (nbytes & AES_BLOCK_MASK)/AES_BLOCK_SIZE, &ctx->enc_key, walk.iv); + pagefault_enable(); +diff --git a/drivers/crypto/vmx/ghash.c b/drivers/crypto/vmx/ghash.c +index d0ffe277af5c..917b3f09e724 100644 +--- a/drivers/crypto/vmx/ghash.c ++++ b/drivers/crypto/vmx/ghash.c +@@ -116,6 +116,7 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key, + + pagefault_disable(); + enable_kernel_altivec(); ++ enable_kernel_vsx(); + enable_kernel_fp(); + gcm_init_p8(ctx->htable, (const u64 *) key); + pagefault_enable(); +@@ -142,6 +143,7 @@ static int p8_ghash_update(struct shash_desc *desc, + GHASH_DIGEST_SIZE - dctx->bytes); + pagefault_disable(); + enable_kernel_altivec(); ++ enable_kernel_vsx(); + enable_kernel_fp(); + gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, + GHASH_DIGEST_SIZE); +@@ -154,6 +156,7 @@ static int p8_ghash_update(struct shash_desc *desc, + if (len) { + pagefault_disable(); + enable_kernel_altivec(); ++ enable_kernel_vsx(); + enable_kernel_fp(); + gcm_ghash_p8(dctx->shash, ctx->htable, src, len); + pagefault_enable(); +@@ -182,6 +185,7 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out) + dctx->buffer[i] = 0; + pagefault_disable(); + enable_kernel_altivec(); ++ enable_kernel_vsx(); + enable_kernel_fp(); + gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, + GHASH_DIGEST_SIZE); +diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c +index c097d3a82bda..a9b01bcf7d0a 100644 +--- a/drivers/gpu/drm/radeon/radeon_combios.c ++++ b/drivers/gpu/drm/radeon/radeon_combios.c +@@ -3387,6 +3387,14 @@ void radeon_combios_asic_init(struct drm_device *dev) + rdev->pdev->subsystem_device == 0x30ae) + return; + ++ /* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume ++ * - it hangs on resume inside the dynclk 1 table. ++ */ ++ if (rdev->family == CHIP_RS480 && ++ rdev->pdev->subsystem_vendor == 0x103c && ++ rdev->pdev->subsystem_device == 0x280a) ++ return; ++ + /* DYN CLK 1 */ + table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE); + if (table) +diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h +index b716b0815644..bebf11a6622a 100644 +--- a/drivers/infiniband/core/uverbs.h ++++ b/drivers/infiniband/core/uverbs.h +@@ -85,7 +85,7 @@ + */ + + struct ib_uverbs_device { +- struct kref ref; ++ atomic_t refcount; + int num_comp_vectors; + struct completion comp; + struct device *dev; +@@ -94,6 +94,7 @@ struct ib_uverbs_device { + struct cdev cdev; + struct rb_root xrcd_tree; + struct mutex xrcd_tree_mutex; ++ struct kobject kobj; + }; + + struct ib_uverbs_event_file { +diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c +index a9f048990dfc..ccc2494b4ea7 100644 +--- a/drivers/infiniband/core/uverbs_cmd.c ++++ b/drivers/infiniband/core/uverbs_cmd.c +@@ -2244,6 +2244,12 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, + next->send_flags = user_wr->send_flags; + + if (is_ud) { ++ if (next->opcode != IB_WR_SEND && ++ next->opcode != IB_WR_SEND_WITH_IMM) { ++ ret = -EINVAL; ++ goto out_put; ++ } ++ + next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah, + file->ucontext); + if (!next->wr.ud.ah) { +@@ -2283,9 +2289,11 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file, + user_wr->wr.atomic.compare_add; + next->wr.atomic.swap = user_wr->wr.atomic.swap; + next->wr.atomic.rkey = user_wr->wr.atomic.rkey; ++ case IB_WR_SEND: + break; + default: +- break; ++ ret = -EINVAL; ++ goto out_put; + } + } + +diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c +index 88cce9bb72fe..09686d49d4c1 100644 +--- a/drivers/infiniband/core/uverbs_main.c ++++ b/drivers/infiniband/core/uverbs_main.c +@@ -129,14 +129,18 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file, + static void ib_uverbs_add_one(struct ib_device *device); + static void ib_uverbs_remove_one(struct ib_device *device); + +-static void ib_uverbs_release_dev(struct kref *ref) ++static void ib_uverbs_release_dev(struct kobject *kobj) + { + struct ib_uverbs_device *dev = +- container_of(ref, struct ib_uverbs_device, ref); ++ container_of(kobj, struct ib_uverbs_device, kobj); + +- complete(&dev->comp); ++ kfree(dev); + } + ++static struct kobj_type ib_uverbs_dev_ktype = { ++ .release = ib_uverbs_release_dev, ++}; ++ + static void ib_uverbs_release_event_file(struct kref *ref) + { + struct ib_uverbs_event_file *file = +@@ -302,13 +306,19 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, + return context->device->dealloc_ucontext(context); + } + ++static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev) ++{ ++ complete(&dev->comp); ++} ++ + static void ib_uverbs_release_file(struct kref *ref) + { + struct ib_uverbs_file *file = + container_of(ref, struct ib_uverbs_file, ref); + + module_put(file->device->ib_dev->owner); +- kref_put(&file->device->ref, ib_uverbs_release_dev); ++ if (atomic_dec_and_test(&file->device->refcount)) ++ ib_uverbs_comp_dev(file->device); + + kfree(file); + } +@@ -742,9 +752,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) + int ret; + + dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev); +- if (dev) +- kref_get(&dev->ref); +- else ++ if (!atomic_inc_not_zero(&dev->refcount)) + return -ENXIO; + + if (!try_module_get(dev->ib_dev->owner)) { +@@ -765,6 +773,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp) + mutex_init(&file->mutex); + + filp->private_data = file; ++ kobject_get(&dev->kobj); + + return nonseekable_open(inode, filp); + +@@ -772,13 +781,16 @@ err_module: + module_put(dev->ib_dev->owner); + + err: +- kref_put(&dev->ref, ib_uverbs_release_dev); ++ if (atomic_dec_and_test(&dev->refcount)) ++ ib_uverbs_comp_dev(dev); ++ + return ret; + } + + static int ib_uverbs_close(struct inode *inode, struct file *filp) + { + struct ib_uverbs_file *file = filp->private_data; ++ struct ib_uverbs_device *dev = file->device; + + ib_uverbs_cleanup_ucontext(file, file->ucontext); + +@@ -786,6 +798,7 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) + kref_put(&file->async_file->ref, ib_uverbs_release_event_file); + + kref_put(&file->ref, ib_uverbs_release_file); ++ kobject_put(&dev->kobj); + + return 0; + } +@@ -881,10 +894,11 @@ static void ib_uverbs_add_one(struct ib_device *device) + if (!uverbs_dev) + return; + +- kref_init(&uverbs_dev->ref); ++ atomic_set(&uverbs_dev->refcount, 1); + init_completion(&uverbs_dev->comp); + uverbs_dev->xrcd_tree = RB_ROOT; + mutex_init(&uverbs_dev->xrcd_tree_mutex); ++ kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype); + + spin_lock(&map_lock); + devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES); +@@ -911,6 +925,7 @@ static void ib_uverbs_add_one(struct ib_device *device) + cdev_init(&uverbs_dev->cdev, NULL); + uverbs_dev->cdev.owner = THIS_MODULE; + uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops; ++ uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj; + kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum); + if (cdev_add(&uverbs_dev->cdev, base, 1)) + goto err_cdev; +@@ -941,9 +956,10 @@ err_cdev: + clear_bit(devnum, overflow_map); + + err: +- kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); ++ if (atomic_dec_and_test(&uverbs_dev->refcount)) ++ ib_uverbs_comp_dev(uverbs_dev); + wait_for_completion(&uverbs_dev->comp); +- kfree(uverbs_dev); ++ kobject_put(&uverbs_dev->kobj); + return; + } + +@@ -963,9 +979,10 @@ static void ib_uverbs_remove_one(struct ib_device *device) + else + clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map); + +- kref_put(&uverbs_dev->ref, ib_uverbs_release_dev); ++ if (atomic_dec_and_test(&uverbs_dev->refcount)) ++ ib_uverbs_comp_dev(uverbs_dev); + wait_for_completion(&uverbs_dev->comp); +- kfree(uverbs_dev); ++ kobject_put(&uverbs_dev->kobj); + } + + static char *uverbs_devnode(struct device *dev, umode_t *mode) +diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c +index f50a546224ad..33fdd50123f7 100644 +--- a/drivers/infiniband/hw/mlx4/ah.c ++++ b/drivers/infiniband/hw/mlx4/ah.c +@@ -148,9 +148,13 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) + enum rdma_link_layer ll; + + memset(ah_attr, 0, sizeof *ah_attr); +- ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; + ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24; + ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num); ++ if (ll == IB_LINK_LAYER_ETHERNET) ++ ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29; ++ else ++ ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28; ++ + ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0; + if (ah->av.ib.stat_rate) + ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET; +diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c +index 0176caa5792c..2857ed89725e 100644 +--- a/drivers/infiniband/hw/mlx4/cq.c ++++ b/drivers/infiniband/hw/mlx4/cq.c +@@ -629,7 +629,7 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries, + * simulated FLUSH_ERR completions + */ + list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) { +- mlx4_ib_qp_sw_comp(qp, num_entries, wc, npolled, 1); ++ mlx4_ib_qp_sw_comp(qp, num_entries, wc + *npolled, npolled, 1); + if (*npolled >= num_entries) + goto out; + } +diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c +index ed327e6c8fdc..a0559a8af4f4 100644 +--- a/drivers/infiniband/hw/mlx4/mcg.c ++++ b/drivers/infiniband/hw/mlx4/mcg.c +@@ -206,15 +206,16 @@ static int send_mad_to_wire(struct mlx4_ib_demux_ctx *ctx, struct ib_mad *mad) + { + struct mlx4_ib_dev *dev = ctx->dev; + struct ib_ah_attr ah_attr; ++ unsigned long flags; + +- spin_lock(&dev->sm_lock); ++ spin_lock_irqsave(&dev->sm_lock, flags); + if (!dev->sm_ah[ctx->port - 1]) { + /* port is not yet Active, sm_ah not ready */ +- spin_unlock(&dev->sm_lock); ++ spin_unlock_irqrestore(&dev->sm_lock, flags); + return -EAGAIN; + } + mlx4_ib_query_ah(dev->sm_ah[ctx->port - 1], &ah_attr); +- spin_unlock(&dev->sm_lock); ++ spin_unlock_irqrestore(&dev->sm_lock, flags); + return mlx4_ib_send_to_wire(dev, mlx4_master_func_num(dev->dev), + ctx->port, IB_QPT_GSI, 0, 1, IB_QP1_QKEY, + &ah_attr, NULL, mad); +diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c +index 6797108ce873..69fb5ba94d0f 100644 +--- a/drivers/infiniband/hw/mlx4/sysfs.c ++++ b/drivers/infiniband/hw/mlx4/sysfs.c +@@ -640,6 +640,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave) + struct mlx4_port *p; + int i; + int ret; ++ int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port_num) == ++ IB_LINK_LAYER_ETHERNET; + + p = kzalloc(sizeof *p, GFP_KERNEL); + if (!p) +@@ -657,7 +659,8 @@ static int add_port(struct mlx4_ib_dev *dev, int port_num, int slave) + + p->pkey_group.name = "pkey_idx"; + p->pkey_group.attrs = +- alloc_group_attrs(show_port_pkey, store_port_pkey, ++ alloc_group_attrs(show_port_pkey, ++ is_eth ? NULL : store_port_pkey, + dev->dev->caps.pkey_table_len[port_num]); + if (!p->pkey_group.attrs) { + ret = -ENOMEM; +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c +index 71c593583864..0c52f078759c 100644 +--- a/drivers/infiniband/hw/mlx5/mr.c ++++ b/drivers/infiniband/hw/mlx5/mr.c +@@ -1119,19 +1119,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, + return &mr->ibmr; + + error: +- /* +- * Destroy the umem *before* destroying the MR, to ensure we +- * will not have any in-flight notifiers when destroying the +- * MR. +- * +- * As the MR is completely invalid to begin with, and this +- * error path is only taken if we can't push the mr entry into +- * the pagefault tree, this is safe. +- */ +- + ib_umem_release(umem); +- /* Kill the MR, and return an error code. */ +- clean_mr(mr); + return ERR_PTR(err); + } + +diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c +index ad843c786e72..5afaa218508d 100644 +--- a/drivers/infiniband/hw/qib/qib_keys.c ++++ b/drivers/infiniband/hw/qib/qib_keys.c +@@ -86,6 +86,10 @@ int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) + * unrestricted LKEY. + */ + rkt->gen++; ++ /* ++ * bits are capped in qib_verbs.c to insure enough bits ++ * for generation number ++ */ + mr->lkey = (r << (32 - ib_qib_lkey_table_size)) | + ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen) + << 8); +diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c +index 4a3599890ea5..9dd5d9a0556b 100644 +--- a/drivers/infiniband/hw/qib/qib_verbs.c ++++ b/drivers/infiniband/hw/qib/qib_verbs.c +@@ -40,6 +40,7 @@ + #include <linux/rculist.h> + #include <linux/mm.h> + #include <linux/random.h> ++#include <linux/vmalloc.h> + + #include "qib.h" + #include "qib_common.h" +@@ -2089,10 +2090,16 @@ int qib_register_ib_device(struct qib_devdata *dd) + * the LKEY). The remaining bits act as a generation number or tag. + */ + spin_lock_init(&dev->lk_table.lock); ++ /* insure generation is at least 4 bits see keys.c */ ++ if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) { ++ qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n", ++ ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS); ++ ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS; ++ } + dev->lk_table.max = 1 << ib_qib_lkey_table_size; + lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); + dev->lk_table.table = (struct qib_mregion __rcu **) +- __get_free_pages(GFP_KERNEL, get_order(lk_tab_size)); ++ vmalloc(lk_tab_size); + if (dev->lk_table.table == NULL) { + ret = -ENOMEM; + goto err_lk; +@@ -2265,7 +2272,7 @@ err_tx: + sizeof(struct qib_pio_header), + dev->pio_hdrs, dev->pio_hdrs_phys); + err_hdrs: +- free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size)); ++ vfree(dev->lk_table.table); + err_lk: + kfree(dev->qp_table); + err_qpt: +@@ -2319,8 +2326,7 @@ void qib_unregister_ib_device(struct qib_devdata *dd) + sizeof(struct qib_pio_header), + dev->pio_hdrs, dev->pio_hdrs_phys); + lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); +- free_pages((unsigned long) dev->lk_table.table, +- get_order(lk_tab_size)); ++ vfree(dev->lk_table.table); + kfree(dev->qp_table); + } + +diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h +index bfc8948fdd35..44ca28c83fe6 100644 +--- a/drivers/infiniband/hw/qib/qib_verbs.h ++++ b/drivers/infiniband/hw/qib/qib_verbs.h +@@ -647,6 +647,8 @@ struct qib_qpn_table { + struct qpn_map map[QPNMAP_ENTRIES]; + }; + ++#define MAX_LKEY_TABLE_BITS 23 ++ + struct qib_lkey_table { + spinlock_t lock; /* protect changes in this struct */ + u32 next; /* next unused index (speeds search) */ +diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c +index 6a594aac2290..c933d882c35c 100644 +--- a/drivers/infiniband/ulp/iser/iscsi_iser.c ++++ b/drivers/infiniband/ulp/iser/iscsi_iser.c +@@ -201,6 +201,7 @@ iser_initialize_task_headers(struct iscsi_task *task, + goto out; + } + ++ tx_desc->mapped = true; + tx_desc->dma_addr = dma_addr; + tx_desc->tx_sg[0].addr = tx_desc->dma_addr; + tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; +@@ -360,16 +361,19 @@ iscsi_iser_task_xmit(struct iscsi_task *task) + static void iscsi_iser_cleanup_task(struct iscsi_task *task) + { + struct iscsi_iser_task *iser_task = task->dd_data; +- struct iser_tx_desc *tx_desc = &iser_task->desc; +- struct iser_conn *iser_conn = task->conn->dd_data; ++ struct iser_tx_desc *tx_desc = &iser_task->desc; ++ struct iser_conn *iser_conn = task->conn->dd_data; + struct iser_device *device = iser_conn->ib_conn.device; + + /* DEVICE_REMOVAL event might have already released the device */ + if (!device) + return; + +- ib_dma_unmap_single(device->ib_device, +- tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); ++ if (likely(tx_desc->mapped)) { ++ ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, ++ ISER_HEADERS_LEN, DMA_TO_DEVICE); ++ tx_desc->mapped = false; ++ } + + /* mgmt tasks do not need special cleanup */ + if (!task->sc) +diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h +index 262ba1f8ee50..d2b6caf7694d 100644 +--- a/drivers/infiniband/ulp/iser/iscsi_iser.h ++++ b/drivers/infiniband/ulp/iser/iscsi_iser.h +@@ -270,6 +270,7 @@ enum iser_desc_type { + * sg[1] optionally points to either of immediate data + * unsolicited data-out or control + * @num_sge: number sges used on this TX task ++ * @mapped: Is the task header mapped + */ + struct iser_tx_desc { + struct iser_hdr iser_header; +@@ -278,6 +279,7 @@ struct iser_tx_desc { + u64 dma_addr; + struct ib_sge tx_sg[2]; + int num_sge; ++ bool mapped; + }; + + #define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \ +diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c +index 3e2118e8ed87..0a47f42fec24 100644 +--- a/drivers/infiniband/ulp/iser/iser_initiator.c ++++ b/drivers/infiniband/ulp/iser/iser_initiator.c +@@ -454,7 +454,7 @@ int iser_send_data_out(struct iscsi_conn *conn, + unsigned long buf_offset; + unsigned long data_seg_len; + uint32_t itt; +- int err = 0; ++ int err; + struct ib_sge *tx_dsg; + + itt = (__force uint32_t)hdr->itt; +@@ -475,7 +475,9 @@ int iser_send_data_out(struct iscsi_conn *conn, + memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr)); + + /* build the tx desc */ +- iser_initialize_task_headers(task, tx_desc); ++ err = iser_initialize_task_headers(task, tx_desc); ++ if (err) ++ goto send_data_out_error; + + mem_reg = &iser_task->rdma_reg[ISER_DIR_OUT]; + tx_dsg = &tx_desc->tx_sg[1]; +@@ -502,7 +504,7 @@ int iser_send_data_out(struct iscsi_conn *conn, + + send_data_out_error: + kmem_cache_free(ig.desc_cache, tx_desc); +- iser_err("conn %p failed err %d\n",conn, err); ++ iser_err("conn %p failed err %d\n", conn, err); + return err; + } + +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c +index 75c01b27bd0b..025f93105444 100644 +--- a/drivers/infiniband/ulp/srp/ib_srp.c ++++ b/drivers/infiniband/ulp/srp/ib_srp.c +@@ -2761,6 +2761,13 @@ static int srp_sdev_count(struct Scsi_Host *host) + return c; + } + ++/* ++ * Return values: ++ * < 0 upon failure. Caller is responsible for SRP target port cleanup. ++ * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port ++ * removal has been scheduled. ++ * 0 and target->state != SRP_TARGET_REMOVED upon success. ++ */ + static int srp_add_target(struct srp_host *host, struct srp_target_port *target) + { + struct srp_rport_identifiers ids; +@@ -3266,7 +3273,7 @@ static ssize_t srp_create_target(struct device *dev, + srp_free_ch_ib(target, ch); + srp_free_req_data(target, ch); + target->ch_count = ch - target->ch; +- break; ++ goto connected; + } + } + +@@ -3276,6 +3283,7 @@ static ssize_t srp_create_target(struct device *dev, + node_idx++; + } + ++connected: + target->scsi_host->nr_hw_queues = target->ch_count; + + ret = srp_add_target(host, target); +@@ -3298,6 +3306,8 @@ out: + mutex_unlock(&host->add_target_mutex); + + scsi_host_put(target->scsi_host); ++ if (ret < 0) ++ scsi_host_put(target->scsi_host); + + return ret; + +diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c +index a18f41b89b6a..2ae522f0d2b2 100644 +--- a/drivers/input/evdev.c ++++ b/drivers/input/evdev.c +@@ -290,19 +290,14 @@ static int evdev_flush(struct file *file, fl_owner_t id) + { + struct evdev_client *client = file->private_data; + struct evdev *evdev = client->evdev; +- int retval; + +- retval = mutex_lock_interruptible(&evdev->mutex); +- if (retval) +- return retval; ++ mutex_lock(&evdev->mutex); + +- if (!evdev->exist || client->revoked) +- retval = -ENODEV; +- else +- retval = input_flush_device(&evdev->handle, file); ++ if (evdev->exist && !client->revoked) ++ input_flush_device(&evdev->handle, file); + + mutex_unlock(&evdev->mutex); +- return retval; ++ return 0; + } + + static void evdev_free(struct device *dev) +diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c +index abeedc9a78c2..2570f2a25dc4 100644 +--- a/drivers/iommu/fsl_pamu.c ++++ b/drivers/iommu/fsl_pamu.c +@@ -41,7 +41,6 @@ struct pamu_isr_data { + + static struct paace *ppaact; + static struct paace *spaact; +-static struct ome *omt __initdata; + + /* + * Table for matching compatible strings, for device tree +@@ -50,7 +49,7 @@ static struct ome *omt __initdata; + * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0" + * string would be used. + */ +-static const struct of_device_id guts_device_ids[] __initconst = { ++static const struct of_device_id guts_device_ids[] = { + { .compatible = "fsl,qoriq-device-config-1.0", }, + { .compatible = "fsl,qoriq-device-config-2.0", }, + {} +@@ -599,7 +598,7 @@ found_cpu_node: + * Memory accesses to QMAN and BMAN private memory need not be coherent, so + * clear the PAACE entry coherency attribute for them. + */ +-static void __init setup_qbman_paace(struct paace *ppaace, int paace_type) ++static void setup_qbman_paace(struct paace *ppaace, int paace_type) + { + switch (paace_type) { + case QMAN_PAACE: +@@ -629,7 +628,7 @@ static void __init setup_qbman_paace(struct paace *ppaace, int paace_type) + * this table to translate device transaction to appropriate corenet + * transaction. + */ +-static void __init setup_omt(struct ome *omt) ++static void setup_omt(struct ome *omt) + { + struct ome *ome; + +@@ -666,7 +665,7 @@ static void __init setup_omt(struct ome *omt) + * Get the maximum number of PAACT table entries + * and subwindows supported by PAMU + */ +-static void __init get_pamu_cap_values(unsigned long pamu_reg_base) ++static void get_pamu_cap_values(unsigned long pamu_reg_base) + { + u32 pc_val; + +@@ -676,9 +675,9 @@ static void __init get_pamu_cap_values(unsigned long pamu_reg_base) + } + + /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */ +-static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, +- phys_addr_t ppaact_phys, phys_addr_t spaact_phys, +- phys_addr_t omt_phys) ++static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, ++ phys_addr_t ppaact_phys, phys_addr_t spaact_phys, ++ phys_addr_t omt_phys) + { + u32 *pc; + struct pamu_mmap_regs *pamu_regs; +@@ -720,7 +719,7 @@ static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu + } + + /* Enable all device LIODNS */ +-static void __init setup_liodns(void) ++static void setup_liodns(void) + { + int i, len; + struct paace *ppaace; +@@ -846,7 +845,7 @@ struct ccsr_law { + /* + * Create a coherence subdomain for a given memory block. + */ +-static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id) ++static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id) + { + struct device_node *np; + const __be32 *iprop; +@@ -988,7 +987,7 @@ error: + static const struct { + u32 svr; + u32 port_id; +-} port_id_map[] __initconst = { ++} port_id_map[] = { + {(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */ + {(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */ + {(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */ +@@ -1006,7 +1005,7 @@ static const struct { + + #define SVR_SECURITY 0x80000 /* The Security (E) bit */ + +-static int __init fsl_pamu_probe(struct platform_device *pdev) ++static int fsl_pamu_probe(struct platform_device *pdev) + { + struct device *dev = &pdev->dev; + void __iomem *pamu_regs = NULL; +@@ -1022,6 +1021,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) + int irq; + phys_addr_t ppaact_phys; + phys_addr_t spaact_phys; ++ struct ome *omt; + phys_addr_t omt_phys; + size_t mem_size = 0; + unsigned int order = 0; +@@ -1200,7 +1200,7 @@ error: + return ret; + } + +-static struct platform_driver fsl_of_pamu_driver __initdata = { ++static struct platform_driver fsl_of_pamu_driver = { + .driver = { + .name = "fsl-of-pamu", + }, +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index c87c4b1bfc00..c23427951ec1 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -681,6 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu + struct context_entry *context; + u64 *entry; + ++ entry = &root->lo; + if (ecs_enabled(iommu)) { + if (devfn >= 0x80) { + devfn -= 0x80; +@@ -688,7 +689,6 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu + } + devfn *= 2; + } +- entry = &root->lo; + if (*entry & 1) + context = phys_to_virt(*entry & VTD_PAGE_MASK); + else { +diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c +index 4e460216bd16..e29d5d7fe220 100644 +--- a/drivers/iommu/io-pgtable-arm.c ++++ b/drivers/iommu/io-pgtable-arm.c +@@ -200,6 +200,10 @@ typedef u64 arm_lpae_iopte; + + static bool selftest_running = false; + ++static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, ++ unsigned long iova, size_t size, int lvl, ++ arm_lpae_iopte *ptep); ++ + static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, + unsigned long iova, phys_addr_t paddr, + arm_lpae_iopte prot, int lvl, +@@ -207,10 +211,21 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, + { + arm_lpae_iopte pte = prot; + +- /* We require an unmap first */ + if (iopte_leaf(*ptep, lvl)) { ++ /* We require an unmap first */ + WARN_ON(!selftest_running); + return -EEXIST; ++ } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { ++ /* ++ * We need to unmap and free the old table before ++ * overwriting it with a block entry. ++ */ ++ arm_lpae_iopte *tblp; ++ size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data); ++ ++ tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); ++ if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz)) ++ return -EINVAL; + } + + if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) +diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c +index c845d99ecf6b..e0ff5f4d7fed 100644 +--- a/drivers/iommu/tegra-smmu.c ++++ b/drivers/iommu/tegra-smmu.c +@@ -26,6 +26,7 @@ struct tegra_smmu { + const struct tegra_smmu_soc *soc; + + unsigned long pfn_mask; ++ unsigned long tlb_mask; + + unsigned long *asids; + struct mutex lock; +@@ -65,7 +66,8 @@ static inline u32 smmu_readl(struct tegra_smmu *smmu, unsigned long offset) + #define SMMU_TLB_CONFIG 0x14 + #define SMMU_TLB_CONFIG_HIT_UNDER_MISS (1 << 29) + #define SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION (1 << 28) +-#define SMMU_TLB_CONFIG_ACTIVE_LINES(x) ((x) & 0x3f) ++#define SMMU_TLB_CONFIG_ACTIVE_LINES(smmu) \ ++ ((smmu)->soc->num_tlb_lines & (smmu)->tlb_mask) + + #define SMMU_PTC_CONFIG 0x18 + #define SMMU_PTC_CONFIG_ENABLE (1 << 29) +@@ -716,6 +718,9 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, + smmu->pfn_mask = BIT_MASK(mc->soc->num_address_bits - PAGE_SHIFT) - 1; + dev_dbg(dev, "address bits: %u, PFN mask: %#lx\n", + mc->soc->num_address_bits, smmu->pfn_mask); ++ smmu->tlb_mask = (smmu->soc->num_tlb_lines << 1) - 1; ++ dev_dbg(dev, "TLB lines: %u, mask: %#lx\n", smmu->soc->num_tlb_lines, ++ smmu->tlb_mask); + + value = SMMU_PTC_CONFIG_ENABLE | SMMU_PTC_CONFIG_INDEX_MAP(0x3f); + +@@ -725,7 +730,7 @@ struct tegra_smmu *tegra_smmu_probe(struct device *dev, + smmu_writel(smmu, value, SMMU_PTC_CONFIG); + + value = SMMU_TLB_CONFIG_HIT_UNDER_MISS | +- SMMU_TLB_CONFIG_ACTIVE_LINES(0x20); ++ SMMU_TLB_CONFIG_ACTIVE_LINES(smmu); + + if (soc->supports_round_robin_arbitration) + value |= SMMU_TLB_CONFIG_ROUND_ROBIN_ARBITRATION; +diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c +index 8c91fd5eb6fd..3ac9c4194814 100644 +--- a/drivers/isdn/gigaset/ser-gigaset.c ++++ b/drivers/isdn/gigaset/ser-gigaset.c +@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty) + cs->hw.ser->tty = tty; + atomic_set(&cs->hw.ser->refcnt, 1); + init_completion(&cs->hw.ser->dead_cmp); +- + tty->disc_data = cs; + ++ /* Set the amount of data we're willing to receive per call ++ * from the hardware driver to half of the input buffer size ++ * to leave some reserve. ++ * Note: We don't do flow control towards the hardware driver. ++ * If more data is received than will fit into the input buffer, ++ * it will be dropped and an error will be logged. This should ++ * never happen as the device is slow and the buffer size ample. ++ */ ++ tty->receive_room = RBUFSIZE/2; ++ + /* OK.. Initialization of the datastructures and the HW is done.. Now + * startup system and notify the LL that we are ready to run + */ +diff --git a/drivers/md/md.c b/drivers/md/md.c +index e4621511d118..e8c44fcb1ad1 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -5365,6 +5365,8 @@ static void __md_stop(struct mddev *mddev) + { + struct md_personality *pers = mddev->pers; + mddev_detach(mddev); ++ /* Ensure ->event_work is done */ ++ flush_workqueue(md_misc_wq); + spin_lock(&mddev->lock); + mddev->ready = 0; + mddev->pers = NULL; +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c +index f55c3f35b746..fe0122771642 100644 +--- a/drivers/md/raid10.c ++++ b/drivers/md/raid10.c +@@ -3566,6 +3566,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) + /* far_copies must be 1 */ + conf->prev.stride = conf->dev_sectors; + } ++ conf->reshape_safe = conf->reshape_progress; + spin_lock_init(&conf->device_lock); + INIT_LIST_HEAD(&conf->retry_list); + +@@ -3770,7 +3771,6 @@ static int run(struct mddev *mddev) + } + conf->offset_diff = min_offset_diff; + +- conf->reshape_safe = conf->reshape_progress; + clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); + clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); + set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); +@@ -4113,6 +4113,7 @@ static int raid10_start_reshape(struct mddev *mddev) + conf->reshape_progress = size; + } else + conf->reshape_progress = 0; ++ conf->reshape_safe = conf->reshape_progress; + spin_unlock_irq(&conf->device_lock); + + if (mddev->delta_disks && mddev->bitmap) { +@@ -4180,6 +4181,7 @@ abort: + rdev->new_data_offset = rdev->data_offset; + smp_wmb(); + conf->reshape_progress = MaxSector; ++ conf->reshape_safe = MaxSector; + mddev->reshape_position = MaxSector; + spin_unlock_irq(&conf->device_lock); + return ret; +@@ -4534,6 +4536,7 @@ static void end_reshape(struct r10conf *conf) + md_finish_reshape(conf->mddev); + smp_wmb(); + conf->reshape_progress = MaxSector; ++ conf->reshape_safe = MaxSector; + spin_unlock_irq(&conf->device_lock); + + /* read-ahead size must cover two whole stripes, which is +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index b6793d2e051f..23af6772f146 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -2151,6 +2151,9 @@ static int resize_stripes(struct r5conf *conf, int newsize) + if (!sc) + return -ENOMEM; + ++ /* Need to ensure auto-resizing doesn't interfere */ ++ mutex_lock(&conf->cache_size_mutex); ++ + for (i = conf->max_nr_stripes; i; i--) { + nsh = alloc_stripe(sc, GFP_KERNEL); + if (!nsh) +@@ -2167,6 +2170,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) + kmem_cache_free(sc, nsh); + } + kmem_cache_destroy(sc); ++ mutex_unlock(&conf->cache_size_mutex); + return -ENOMEM; + } + /* Step 2 - Must use GFP_NOIO now. +@@ -2213,6 +2217,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) + } else + err = -ENOMEM; + ++ mutex_unlock(&conf->cache_size_mutex); + /* Step 4, return new stripes to service */ + while(!list_empty(&newstripes)) { + nsh = list_entry(newstripes.next, struct stripe_head, lru); +@@ -2240,7 +2245,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) + static int drop_one_stripe(struct r5conf *conf) + { + struct stripe_head *sh; +- int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; ++ int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; + + spin_lock_irq(conf->hash_locks + hash); + sh = get_free_stripe(conf, hash); +@@ -5846,12 +5851,14 @@ static void raid5d(struct md_thread *thread) + pr_debug("%d stripes handled\n", handled); + + spin_unlock_irq(&conf->device_lock); +- if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) { ++ if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && ++ mutex_trylock(&conf->cache_size_mutex)) { + grow_one_stripe(conf, __GFP_NOWARN); + /* Set flag even if allocation failed. This helps + * slow down allocation requests when mem is short + */ + set_bit(R5_DID_ALLOC, &conf->cache_state); ++ mutex_unlock(&conf->cache_size_mutex); + } + + async_tx_issue_pending_all(); +@@ -5883,18 +5890,22 @@ raid5_set_cache_size(struct mddev *mddev, int size) + return -EINVAL; + + conf->min_nr_stripes = size; ++ mutex_lock(&conf->cache_size_mutex); + while (size < conf->max_nr_stripes && + drop_one_stripe(conf)) + ; ++ mutex_unlock(&conf->cache_size_mutex); + + + err = md_allow_write(mddev); + if (err) + return err; + ++ mutex_lock(&conf->cache_size_mutex); + while (size > conf->max_nr_stripes) + if (!grow_one_stripe(conf, GFP_KERNEL)) + break; ++ mutex_unlock(&conf->cache_size_mutex); + + return 0; + } +@@ -6360,11 +6371,19 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink, + struct shrink_control *sc) + { + struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); +- int ret = 0; +- while (ret < sc->nr_to_scan) { +- if (drop_one_stripe(conf) == 0) +- return SHRINK_STOP; +- ret++; ++ unsigned long ret = SHRINK_STOP; ++ ++ if (mutex_trylock(&conf->cache_size_mutex)) { ++ ret= 0; ++ while (ret < sc->nr_to_scan && ++ conf->max_nr_stripes > conf->min_nr_stripes) { ++ if (drop_one_stripe(conf) == 0) { ++ ret = SHRINK_STOP; ++ break; ++ } ++ ret++; ++ } ++ mutex_unlock(&conf->cache_size_mutex); + } + return ret; + } +@@ -6433,6 +6452,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) + goto abort; + spin_lock_init(&conf->device_lock); + seqcount_init(&conf->gen_lock); ++ mutex_init(&conf->cache_size_mutex); + init_waitqueue_head(&conf->wait_for_stripe); + init_waitqueue_head(&conf->wait_for_overlap); + INIT_LIST_HEAD(&conf->handle_list); +diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h +index 896d603ad0da..03472fbbd882 100644 +--- a/drivers/md/raid5.h ++++ b/drivers/md/raid5.h +@@ -482,7 +482,8 @@ struct r5conf { + */ + int active_name; + char cache_name[2][32]; +- struct kmem_cache *slab_cache; /* for allocating stripes */ ++ struct kmem_cache *slab_cache; /* for allocating stripes */ ++ struct mutex cache_size_mutex; /* Protect changes to cache size */ + + int seq_flush, seq_write; + int quiesce; +diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c +index a30cc2f7e4f1..ddf59ee5ca40 100644 +--- a/drivers/media/platform/am437x/am437x-vpfe.c ++++ b/drivers/media/platform/am437x/am437x-vpfe.c +@@ -1185,14 +1185,24 @@ static int vpfe_initialize_device(struct vpfe_device *vpfe) + static int vpfe_release(struct file *file) + { + struct vpfe_device *vpfe = video_drvdata(file); ++ bool fh_singular; + int ret; + + mutex_lock(&vpfe->lock); + +- if (v4l2_fh_is_singular_file(file)) +- vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev); ++ /* Save the singular status before we call the clean-up helper */ ++ fh_singular = v4l2_fh_is_singular_file(file); ++ ++ /* the release helper will cleanup any on-going streaming */ + ret = _vb2_fop_release(file, NULL); + ++ /* ++ * If this was the last open file. ++ * Then de-initialize hw module. ++ */ ++ if (fh_singular) ++ vpfe_ccdc_close(&vpfe->ccdc, vpfe->pdev); ++ + mutex_unlock(&vpfe->lock); + + return ret; +@@ -1577,7 +1587,7 @@ static int vpfe_s_fmt(struct file *file, void *priv, + return -EBUSY; + } + +- ret = vpfe_try_fmt(file, priv, fmt); ++ ret = vpfe_try_fmt(file, priv, &format); + if (ret) + return ret; + +diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c +index 18d0a871747f..947d8be7b245 100644 +--- a/drivers/media/platform/omap3isp/isp.c ++++ b/drivers/media/platform/omap3isp/isp.c +@@ -829,14 +829,14 @@ static int isp_pipeline_link_notify(struct media_link *link, u32 flags, + int ret; + + if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH && +- !(link->flags & MEDIA_LNK_FL_ENABLED)) { ++ !(flags & MEDIA_LNK_FL_ENABLED)) { + /* Powering off entities is assumed to never fail. */ + isp_pipeline_pm_power(source, -sink_use); + isp_pipeline_pm_power(sink, -source_use); + return 0; + } + +- if (notification == MEDIA_DEV_NOTIFY_POST_LINK_CH && ++ if (notification == MEDIA_DEV_NOTIFY_PRE_LINK_CH && + (flags & MEDIA_LNK_FL_ENABLED)) { + + ret = isp_pipeline_pm_power(source, sink_use); +diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c +index f8c5e47a30aa..0aba9ff92102 100644 +--- a/drivers/media/rc/rc-main.c ++++ b/drivers/media/rc/rc-main.c +@@ -1191,9 +1191,6 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env) + { + struct rc_dev *dev = to_rc_dev(device); + +- if (!dev || !dev->input_dev) +- return -ENODEV; +- + if (dev->rc_map.name) + ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name); + if (dev->driver_name) +diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c +index 511e9a25c151..16c4d26f51e7 100644 +--- a/drivers/memory/tegra/tegra114.c ++++ b/drivers/memory/tegra/tegra114.c +@@ -935,6 +935,7 @@ static const struct tegra_smmu_soc tegra114_smmu_soc = { + .num_swgroups = ARRAY_SIZE(tegra114_swgroups), + .supports_round_robin_arbitration = false, + .supports_request_limit = false, ++ .num_tlb_lines = 32, + .num_asids = 4, + .ops = &tegra114_smmu_ops, + }; +diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c +index 278d40b854c1..b153d0b732cf 100644 +--- a/drivers/memory/tegra/tegra124.c ++++ b/drivers/memory/tegra/tegra124.c +@@ -981,6 +981,7 @@ static const struct tegra_smmu_soc tegra124_smmu_soc = { + .num_swgroups = ARRAY_SIZE(tegra124_swgroups), + .supports_round_robin_arbitration = true, + .supports_request_limit = true, ++ .num_tlb_lines = 32, + .num_asids = 128, + .ops = &tegra124_smmu_ops, + }; +diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c +index 71fe9376fe53..f422b18f45f3 100644 +--- a/drivers/memory/tegra/tegra30.c ++++ b/drivers/memory/tegra/tegra30.c +@@ -957,6 +957,7 @@ static const struct tegra_smmu_soc tegra30_smmu_soc = { + .num_swgroups = ARRAY_SIZE(tegra30_swgroups), + .supports_round_robin_arbitration = false, + .supports_request_limit = false, ++ .num_tlb_lines = 16, + .num_asids = 4, + .ops = &tegra30_smmu_ops, + }; +diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c +index 1ef01647265f..4f1b0bdb9cf8 100644 +--- a/drivers/misc/cxl/pci.c ++++ b/drivers/misc/cxl/pci.c +@@ -778,14 +778,9 @@ int cxl_reset(struct cxl *adapter) + { + struct pci_dev *dev = to_pci_dev(adapter->dev.parent); + int rc; +- int i; +- u32 val; + + dev_info(&dev->dev, "CXL reset\n"); + +- for (i = 0; i < adapter->slices; i++) +- cxl_remove_afu(adapter->afu[i]); +- + /* pcie_warm_reset requests a fundamental pci reset which includes a + * PERST assert/deassert. PERST triggers a loading of the image + * if "user" or "factory" is selected in sysfs */ +@@ -794,20 +789,6 @@ int cxl_reset(struct cxl *adapter) + return rc; + } + +- /* the PERST done above fences the PHB. So, reset depends on EEH +- * to unbind the driver, tell Sapphire to reinit the PHB, and rebind +- * the driver. Do an mmio read explictly to ensure EEH notices the +- * fenced PHB. Retry for a few seconds before giving up. */ +- i = 0; +- while (((val = mmio_read32be(adapter->p1_mmio)) != 0xffffffff) && +- (i < 5)) { +- msleep(500); +- i++; +- } +- +- if (val != 0xffffffff) +- dev_err(&dev->dev, "cxl: PERST failed to trigger EEH\n"); +- + return rc; + } + +@@ -1062,8 +1043,6 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id) + int slice; + int rc; + +- pci_dev_get(dev); +- + if (cxl_verbose) + dump_cxl_config_space(dev); + +diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c +index 92e7671426eb..588fb7908642 100644 +--- a/drivers/mmc/core/core.c ++++ b/drivers/mmc/core/core.c +@@ -330,8 +330,10 @@ EXPORT_SYMBOL(mmc_start_bkops); + */ + static void mmc_wait_data_done(struct mmc_request *mrq) + { +- mrq->host->context_info.is_done_rcv = true; +- wake_up_interruptible(&mrq->host->context_info.wait); ++ struct mmc_context_info *context_info = &mrq->host->context_info; ++ ++ context_info->is_done_rcv = true; ++ wake_up_interruptible(&context_info->wait); + } + + static void mmc_wait_done(struct mmc_request *mrq) +diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c +index 7a3fc16d0a6c..53cfc7cedefe 100644 +--- a/drivers/mmc/host/sdhci-pci.c ++++ b/drivers/mmc/host/sdhci-pci.c +@@ -549,6 +549,7 @@ static int jmicron_resume(struct sdhci_pci_chip *chip) + static const struct sdhci_pci_fixes sdhci_o2 = { + .probe = sdhci_pci_o2_probe, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, ++ .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD, + .probe_slot = sdhci_pci_o2_probe_slot, + .resume = sdhci_pci_o2_resume, + }; +diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c +index bec8a307f8cd..fd41b91436ec 100644 +--- a/drivers/mmc/host/sdhci.c ++++ b/drivers/mmc/host/sdhci.c +@@ -1146,6 +1146,7 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host) + preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); + break; + case MMC_TIMING_UHS_DDR50: ++ case MMC_TIMING_MMC_DDR52: + preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); + break; + case MMC_TIMING_MMC_HS400: +@@ -1598,7 +1599,8 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) + (ios->timing == MMC_TIMING_UHS_SDR25) || + (ios->timing == MMC_TIMING_UHS_SDR50) || + (ios->timing == MMC_TIMING_UHS_SDR104) || +- (ios->timing == MMC_TIMING_UHS_DDR50))) { ++ (ios->timing == MMC_TIMING_UHS_DDR50) || ++ (ios->timing == MMC_TIMING_MMC_DDR52))) { + u16 preset; + + sdhci_enable_preset_value(host, true); +diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c +index d5fe5d5f490f..16d87bf8ac3c 100644 +--- a/drivers/net/bonding/bond_main.c ++++ b/drivers/net/bonding/bond_main.c +@@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev, + call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); + } + ++static struct slave *bond_get_old_active(struct bonding *bond, ++ struct slave *new_active) ++{ ++ struct slave *slave; ++ struct list_head *iter; ++ ++ bond_for_each_slave(bond, slave, iter) { ++ if (slave == new_active) ++ continue; ++ ++ if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) ++ return slave; ++ } ++ ++ return NULL; ++} ++ + /* bond_do_fail_over_mac + * + * Perform special MAC address swapping for fail_over_mac settings +@@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond, + if (!new_active) + return; + ++ if (!old_active) ++ old_active = bond_get_old_active(bond, new_active); ++ + if (old_active) { + ether_addr_copy(tmp_mac, new_active->dev->dev_addr); + ether_addr_copy(saddr.sa_data, +@@ -1902,6 +1922,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev, + bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; + netdev_info(bond_dev, "Destroying bond %s\n", + bond_dev->name); ++ bond_remove_proc_entry(bond); + unregister_netdevice(bond_dev); + } + return ret; +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c +index 069952fa5d64..0d8af5bb5907 100644 +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -10757,7 +10757,7 @@ static ssize_t tg3_show_temp(struct device *dev, + tg3_ape_scratchpad_read(tp, &temperature, attr->index, + sizeof(temperature)); + spin_unlock_bh(&tp->lock); +- return sprintf(buf, "%u\n", temperature); ++ return sprintf(buf, "%u\n", temperature * 1000); + } + + +diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c +index caae6cb2bc1a..a1c30ee60888 100644 +--- a/drivers/net/ethernet/brocade/bna/bnad.c ++++ b/drivers/net/ethernet/brocade/bna/bnad.c +@@ -675,6 +675,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) + if (!next_cmpl->valid) + break; + } ++ packets++; + + /* TODO: BNA_CQ_EF_LOCAL ? */ + if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | +@@ -691,7 +692,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget) + else + bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); + +- packets++; + rcb->rxq->rx_packets++; + rcb->rxq->rx_bytes += totlen; + ccb->bytes_per_intr += totlen; +diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c +index c754b2027281..c9da1b5d4804 100644 +--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c ++++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c +@@ -216,7 +216,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, + + static inline bool fm10k_page_is_reserved(struct page *page) + { +- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; ++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); + } + + static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, +diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h +index c2bd4f98a837..212d668dabb3 100644 +--- a/drivers/net/ethernet/intel/igb/igb.h ++++ b/drivers/net/ethernet/intel/igb/igb.h +@@ -540,6 +540,7 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va, + struct sk_buff *skb); + int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr); + int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr); ++void igb_set_flag_queue_pairs(struct igb_adapter *, const u32); + #ifdef CONFIG_IGB_HWMON + void igb_sysfs_exit(struct igb_adapter *adapter); + int igb_sysfs_init(struct igb_adapter *adapter); +diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c +index d5673eb90c54..0afc0913e5b9 100644 +--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c ++++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c +@@ -2991,6 +2991,7 @@ static int igb_set_channels(struct net_device *netdev, + { + struct igb_adapter *adapter = netdev_priv(netdev); + unsigned int count = ch->combined_count; ++ unsigned int max_combined = 0; + + /* Verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) +@@ -3001,11 +3002,13 @@ static int igb_set_channels(struct net_device *netdev, + return -EINVAL; + + /* Verify the number of channels doesn't exceed hw limits */ +- if (count > igb_max_channels(adapter)) ++ max_combined = igb_max_channels(adapter); ++ if (count > max_combined) + return -EINVAL; + + if (count != adapter->rss_queues) { + adapter->rss_queues = count; ++ igb_set_flag_queue_pairs(adapter, max_combined); + + /* Hardware has to reinitialize queues and interrupts to + * match the new configuration. +diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c +index a0a9b1fcb5e8..4f6bf996851e 100644 +--- a/drivers/net/ethernet/intel/igb/igb_main.c ++++ b/drivers/net/ethernet/intel/igb/igb_main.c +@@ -1205,10 +1205,14 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, + + /* allocate q_vector and rings */ + q_vector = adapter->q_vector[v_idx]; +- if (!q_vector) ++ if (!q_vector) { + q_vector = kzalloc(size, GFP_KERNEL); +- else ++ } else if (size > ksize(q_vector)) { ++ kfree_rcu(q_vector, rcu); ++ q_vector = kzalloc(size, GFP_KERNEL); ++ } else { + memset(q_vector, 0, size); ++ } + if (!q_vector) + return -ENOMEM; + +@@ -2901,6 +2905,14 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter) + + adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); + ++ igb_set_flag_queue_pairs(adapter, max_rss_queues); ++} ++ ++void igb_set_flag_queue_pairs(struct igb_adapter *adapter, ++ const u32 max_rss_queues) ++{ ++ struct e1000_hw *hw = &adapter->hw; ++ + /* Determine if we need to pair queues. */ + switch (hw->mac.type) { + case e1000_82575: +@@ -6584,7 +6596,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, + + static inline bool igb_page_is_reserved(struct page *page) + { +- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; ++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); + } + + static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +index 5be12a00e1f4..463ff47200f1 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +@@ -1829,7 +1829,7 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, + + static inline bool ixgbe_page_is_reserved(struct page *page) + { +- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; ++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); + } + + /** +diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +index e71cdde9cb01..1d7b00b038a2 100644 +--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c ++++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +@@ -765,7 +765,7 @@ static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, + + static inline bool ixgbevf_page_is_reserved(struct page *page) + { +- return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; ++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); + } + + /** +diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c +index 2619c9fbf42d..983b1d51244d 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/eq.c ++++ b/drivers/net/ethernet/mellanox/mlx4/eq.c +@@ -573,7 +573,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) + continue; + mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", + __func__, i, port); +- s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; ++ s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { + eqe->event.port_change.port = + cpu_to_be32( +@@ -608,7 +608,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) + continue; + if (i == mlx4_master_func_num(dev)) + continue; +- s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; ++ s_info = &priv->mfunc.master.vf_oper[i].vport[port].state; + if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { + eqe->event.port_change.port = + cpu_to_be32( +diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c +index cf98cc9bbc8d..73b6fc21ea00 100644 +--- a/drivers/net/ethernet/rocker/rocker.c ++++ b/drivers/net/ethernet/rocker/rocker.c +@@ -4587,6 +4587,7 @@ static void rocker_remove_ports(struct rocker *rocker) + rocker_port = rocker->ports[i]; + rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE); + unregister_netdev(rocker_port->dev); ++ free_netdev(rocker_port->dev); + } + kfree(rocker->ports); + } +diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h +index ad3996038018..799c2929c536 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/descs.h ++++ b/drivers/net/ethernet/stmicro/stmmac/descs.h +@@ -158,6 +158,8 @@ struct dma_desc { + u32 buffer2_size:13; + u32 reserved4:3; + } etx; /* -- enhanced -- */ ++ ++ u64 all_flags; + } des01; + unsigned int des2; + unsigned int des3; +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +index 6249a4ec08f0..573708123338 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +@@ -38,7 +38,6 @@ struct rk_priv_data { + bool clock_input; + + struct clk *clk_mac; +- struct clk *clk_mac_pll; + struct clk *gmac_clkin; + struct clk *mac_clk_rx; + struct clk *mac_clk_tx; +@@ -208,7 +207,7 @@ static int gmac_clk_init(struct rk_priv_data *bsp_priv) + dev_info(dev, "%s: clock input from PHY\n", __func__); + } else { + if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) +- clk_set_rate(bsp_priv->clk_mac_pll, 50000000); ++ clk_set_rate(bsp_priv->clk_mac, 50000000); + } + + return 0; +diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +index 1e2bcf5f89e1..7d944449f5ef 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +@@ -240,6 +240,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, + static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, + int mode, int end) + { ++ p->des01.all_flags = 0; + p->des01.erx.own = 1; + p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; + +@@ -254,7 +255,7 @@ static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, + + static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end) + { +- p->des01.etx.own = 0; ++ p->des01.all_flags = 0; + if (mode == STMMAC_CHAIN_MODE) + ehn_desc_tx_set_on_chain(p, end); + else +diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +index 35ad4f427ae2..48c3456445b2 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c ++++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c +@@ -123,6 +123,7 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x, + static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, + int end) + { ++ p->des01.all_flags = 0; + p->des01.rx.own = 1; + p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1; + +@@ -137,7 +138,7 @@ static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, + + static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end) + { +- p->des01.tx.own = 0; ++ p->des01.all_flags = 0; + if (mode == STMMAC_CHAIN_MODE) + ndesc_tx_set_on_chain(p, end); + else +diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +index 2c5ce2baca87..c274cdc5df1e 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c ++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +@@ -829,8 +829,11 @@ static int stmmac_init_phy(struct net_device *dev) + + phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface); + +- if (IS_ERR(phydev)) { ++ if (IS_ERR_OR_NULL(phydev)) { + pr_err("%s: Could not attach to PHY\n", dev->name); ++ if (!phydev) ++ return -ENODEV; ++ + return PTR_ERR(phydev); + } + +@@ -1189,41 +1192,41 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv) + goto err_tx_skbuff; + + if (priv->extend_desc) { +- priv->dma_erx = dma_alloc_coherent(priv->device, rxsize * +- sizeof(struct +- dma_extended_desc), +- &priv->dma_rx_phy, +- GFP_KERNEL); ++ priv->dma_erx = dma_zalloc_coherent(priv->device, rxsize * ++ sizeof(struct ++ dma_extended_desc), ++ &priv->dma_rx_phy, ++ GFP_KERNEL); + if (!priv->dma_erx) + goto err_dma; + +- priv->dma_etx = dma_alloc_coherent(priv->device, txsize * +- sizeof(struct +- dma_extended_desc), +- &priv->dma_tx_phy, +- GFP_KERNEL); ++ priv->dma_etx = dma_zalloc_coherent(priv->device, txsize * ++ sizeof(struct ++ dma_extended_desc), ++ &priv->dma_tx_phy, ++ GFP_KERNEL); + if (!priv->dma_etx) { + dma_free_coherent(priv->device, priv->dma_rx_size * +- sizeof(struct dma_extended_desc), +- priv->dma_erx, priv->dma_rx_phy); ++ sizeof(struct dma_extended_desc), ++ priv->dma_erx, priv->dma_rx_phy); + goto err_dma; + } + } else { +- priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * +- sizeof(struct dma_desc), +- &priv->dma_rx_phy, +- GFP_KERNEL); ++ priv->dma_rx = dma_zalloc_coherent(priv->device, rxsize * ++ sizeof(struct dma_desc), ++ &priv->dma_rx_phy, ++ GFP_KERNEL); + if (!priv->dma_rx) + goto err_dma; + +- priv->dma_tx = dma_alloc_coherent(priv->device, txsize * +- sizeof(struct dma_desc), +- &priv->dma_tx_phy, +- GFP_KERNEL); ++ priv->dma_tx = dma_zalloc_coherent(priv->device, txsize * ++ sizeof(struct dma_desc), ++ &priv->dma_tx_phy, ++ GFP_KERNEL); + if (!priv->dma_tx) { + dma_free_coherent(priv->device, priv->dma_rx_size * +- sizeof(struct dma_desc), +- priv->dma_rx, priv->dma_rx_phy); ++ sizeof(struct dma_desc), ++ priv->dma_rx, priv->dma_rx_phy); + goto err_dma; + } + } +diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c +index 63c7810e1545..7fbca37a1adf 100644 +--- a/drivers/net/virtio_net.c ++++ b/drivers/net/virtio_net.c +@@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev) + else + vi->hdr_len = sizeof(struct virtio_net_hdr); + +- if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) ++ if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || ++ virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) + vi->any_header_sg = true; + + if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) +diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +index 23806c243a53..fd4a5353d216 100644 +--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c ++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +@@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { + {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ + {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ + {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/ ++ {RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/ + {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/ + {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ + {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ +diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c +index 57966e3c8e8d..3fa2fb7c8e4e 100644 +--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c ++++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c +@@ -2180,7 +2180,7 @@ static int _rtl8821ae_set_media_status(struct ieee80211_hw *hw, + + rtl_write_byte(rtlpriv, MSR, bt_msr); + rtlpriv->cfg->ops->led_control(hw, ledaction); +- if ((bt_msr & 0xfc) == MSR_AP) ++ if ((bt_msr & MSR_MASK) == MSR_AP) + rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); + else + rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66); +diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h +index 53668fc8f23e..1d6110f9c1fb 100644 +--- a/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h ++++ b/drivers/net/wireless/rtlwifi/rtl8821ae/reg.h +@@ -429,6 +429,7 @@ + #define MSR_ADHOC 0x01 + #define MSR_INFRA 0x02 + #define MSR_AP 0x03 ++#define MSR_MASK 0x03 + + #define RRSR_RSC_OFFSET 21 + #define RRSR_SHORT_OFFSET 23 +diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c +index 0d2594395ffb..0866c5dfdf87 100644 +--- a/drivers/net/xen-netback/netback.c ++++ b/drivers/net/xen-netback/netback.c +@@ -1571,13 +1571,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) + smp_rmb(); + + while (dc != dp) { +- BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); ++ BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); + pending_idx = + queue->dealloc_ring[pending_index(dc++)]; + +- pending_idx_release[gop-queue->tx_unmap_ops] = ++ pending_idx_release[gop - queue->tx_unmap_ops] = + pending_idx; +- queue->pages_to_unmap[gop-queue->tx_unmap_ops] = ++ queue->pages_to_unmap[gop - queue->tx_unmap_ops] = + queue->mmap_pages[pending_idx]; + gnttab_set_unmap_op(gop, + idx_to_kaddr(queue, pending_idx), +diff --git a/drivers/nfc/st21nfca/st21nfca.c b/drivers/nfc/st21nfca/st21nfca.c +index d251f7229c4e..051286562fab 100644 +--- a/drivers/nfc/st21nfca/st21nfca.c ++++ b/drivers/nfc/st21nfca/st21nfca.c +@@ -148,14 +148,14 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) + ST21NFCA_DEVICE_MGNT_GATE, + ST21NFCA_DEVICE_MGNT_PIPE); + if (r < 0) +- goto free_info; ++ return r; + + /* Get pipe list */ + r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE, + ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list), + &skb_pipe_list); + if (r < 0) +- goto free_info; ++ return r; + + /* Complete the existing gate_pipe table */ + for (i = 0; i < skb_pipe_list->len; i++) { +@@ -181,6 +181,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) + info->src_host_id != ST21NFCA_ESE_HOST_ID) { + pr_err("Unexpected apdu_reader pipe on host %x\n", + info->src_host_id); ++ kfree_skb(skb_pipe_info); + continue; + } + +@@ -200,6 +201,7 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) + hdev->pipes[st21nfca_gates[j].pipe].dest_host = + info->src_host_id; + } ++ kfree_skb(skb_pipe_info); + } + + /* +@@ -214,13 +216,12 @@ static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) + st21nfca_gates[i].gate, + st21nfca_gates[i].pipe); + if (r < 0) +- goto free_info; ++ goto free_list; + } + } + + memcpy(hdev->init_data.gates, st21nfca_gates, sizeof(st21nfca_gates)); +-free_info: +- kfree_skb(skb_pipe_info); ++free_list: + kfree_skb(skb_pipe_list); + return r; + } +diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c +index cde35c5d0191..d91f721a05b6 100644 +--- a/drivers/of/fdt.c ++++ b/drivers/of/fdt.c +@@ -955,7 +955,9 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, + } + + #ifdef CONFIG_HAVE_MEMBLOCK +-#define MAX_PHYS_ADDR ((phys_addr_t)~0) ++#ifndef MAX_MEMBLOCK_ADDR ++#define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0) ++#endif + + void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) + { +@@ -972,16 +974,16 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) + } + size &= PAGE_MASK; + +- if (base > MAX_PHYS_ADDR) { ++ if (base > MAX_MEMBLOCK_ADDR) { + pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + +- if (base + size - 1 > MAX_PHYS_ADDR) { ++ if (base + size - 1 > MAX_MEMBLOCK_ADDR) { + pr_warning("Ignoring memory range 0x%llx - 0x%llx\n", +- ((u64)MAX_PHYS_ADDR) + 1, base + size); +- size = MAX_PHYS_ADDR - base + 1; ++ ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size); ++ size = MAX_MEMBLOCK_ADDR - base + 1; + } + + if (base + size < phys_offset) { +diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c +index dceb9ddfd99a..a32c1f6c252c 100644 +--- a/drivers/parisc/lba_pci.c ++++ b/drivers/parisc/lba_pci.c +@@ -1556,8 +1556,11 @@ lba_driver_probe(struct parisc_device *dev) + if (lba_dev->hba.lmmio_space.flags) + pci_add_resource_offset(&resources, &lba_dev->hba.lmmio_space, + lba_dev->hba.lmmio_space_offset); +- if (lba_dev->hba.gmmio_space.flags) +- pci_add_resource(&resources, &lba_dev->hba.gmmio_space); ++ if (lba_dev->hba.gmmio_space.flags) { ++ /* pci_add_resource(&resources, &lba_dev->hba.gmmio_space); */ ++ pr_warn("LBA: Not registering GMMIO space %pR\n", ++ &lba_dev->hba.gmmio_space); ++ } + + pci_add_resource(&resources, &lba_dev->hba.bus_num); + +diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig +index 944f50015ed0..73de4efcbe6e 100644 +--- a/drivers/pci/Kconfig ++++ b/drivers/pci/Kconfig +@@ -2,7 +2,7 @@ + # PCI configuration + # + config PCI_BUS_ADDR_T_64BIT +- def_bool y if (ARCH_DMA_ADDR_T_64BIT || (64BIT && !PARISC)) ++ def_bool y if (ARCH_DMA_ADDR_T_64BIT || 64BIT) + depends on PCI + + config PCI_MSI +diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c +index 2f797cb7e205..774781450885 100644 +--- a/drivers/pinctrl/pinctrl-at91.c ++++ b/drivers/pinctrl/pinctrl-at91.c +@@ -320,6 +320,9 @@ static const struct pinctrl_ops at91_pctrl_ops = { + static void __iomem *pin_to_controller(struct at91_pinctrl *info, + unsigned int bank) + { ++ if (!gpio_chips[bank]) ++ return NULL; ++ + return gpio_chips[bank]->regbase; + } + +@@ -729,6 +732,10 @@ static int at91_pmx_set(struct pinctrl_dev *pctldev, unsigned selector, + pin = &pins_conf[i]; + at91_pin_dbg(info->dev, pin); + pio = pin_to_controller(info, pin->bank); ++ ++ if (!pio) ++ continue; ++ + mask = pin_to_mask(pin->pin); + at91_mux_disable_interrupt(pio, mask); + switch (pin->mux) { +@@ -848,6 +855,10 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev, + *config = 0; + dev_dbg(info->dev, "%s:%d, pin_id=%d", __func__, __LINE__, pin_id); + pio = pin_to_controller(info, pin_to_bank(pin_id)); ++ ++ if (!pio) ++ return -EINVAL; ++ + pin = pin_id % MAX_NB_GPIO_PER_BANK; + + if (at91_mux_get_multidrive(pio, pin)) +@@ -889,6 +900,10 @@ static int at91_pinconf_set(struct pinctrl_dev *pctldev, + "%s:%d, pin_id=%d, config=0x%lx", + __func__, __LINE__, pin_id, config); + pio = pin_to_controller(info, pin_to_bank(pin_id)); ++ ++ if (!pio) ++ return -EINVAL; ++ + pin = pin_id % MAX_NB_GPIO_PER_BANK; + mask = pin_to_mask(pin); + +diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c +index cb7cd8d79329..cd78f1166b33 100644 +--- a/drivers/platform/x86/ideapad-laptop.c ++++ b/drivers/platform/x86/ideapad-laptop.c +@@ -852,6 +852,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { + }, + }, + { ++ .ident = "Lenovo Yoga 3 14", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), ++ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 3 14"), ++ }, ++ }, ++ { + .ident = "Lenovo Yoga 3 Pro 1370", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), +diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c +index 4337c3bc6ace..afea84c7a155 100644 +--- a/drivers/rtc/rtc-abx80x.c ++++ b/drivers/rtc/rtc-abx80x.c +@@ -28,7 +28,7 @@ + #define ABX8XX_REG_WD 0x07 + + #define ABX8XX_REG_CTRL1 0x10 +-#define ABX8XX_CTRL_WRITE BIT(1) ++#define ABX8XX_CTRL_WRITE BIT(0) + #define ABX8XX_CTRL_12_24 BIT(6) + + #define ABX8XX_REG_CFG_KEY 0x1f +diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c +index 76cbad7a99d3..c5a2523b0185 100644 +--- a/drivers/rtc/rtc-s3c.c ++++ b/drivers/rtc/rtc-s3c.c +@@ -39,6 +39,7 @@ struct s3c_rtc { + void __iomem *base; + struct clk *rtc_clk; + struct clk *rtc_src_clk; ++ bool clk_disabled; + + struct s3c_rtc_data *data; + +@@ -71,9 +72,12 @@ static void s3c_rtc_enable_clk(struct s3c_rtc *info) + unsigned long irq_flags; + + spin_lock_irqsave(&info->alarm_clk_lock, irq_flags); +- clk_enable(info->rtc_clk); +- if (info->data->needs_src_clk) +- clk_enable(info->rtc_src_clk); ++ if (info->clk_disabled) { ++ clk_enable(info->rtc_clk); ++ if (info->data->needs_src_clk) ++ clk_enable(info->rtc_src_clk); ++ info->clk_disabled = false; ++ } + spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags); + } + +@@ -82,9 +86,12 @@ static void s3c_rtc_disable_clk(struct s3c_rtc *info) + unsigned long irq_flags; + + spin_lock_irqsave(&info->alarm_clk_lock, irq_flags); +- if (info->data->needs_src_clk) +- clk_disable(info->rtc_src_clk); +- clk_disable(info->rtc_clk); ++ if (!info->clk_disabled) { ++ if (info->data->needs_src_clk) ++ clk_disable(info->rtc_src_clk); ++ clk_disable(info->rtc_clk); ++ info->clk_disabled = true; ++ } + spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags); + } + +@@ -128,6 +135,11 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) + + s3c_rtc_disable_clk(info); + ++ if (enabled) ++ s3c_rtc_enable_clk(info); ++ else ++ s3c_rtc_disable_clk(info); ++ + return 0; + } + +diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c +index 8c70d785ba73..ab60287ee72d 100644 +--- a/drivers/rtc/rtc-s5m.c ++++ b/drivers/rtc/rtc-s5m.c +@@ -635,6 +635,16 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info) + case S2MPS13X: + data[0] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT); + ret = regmap_write(info->regmap, info->regs->ctrl, data[0]); ++ if (ret < 0) ++ break; ++ ++ /* ++ * Should set WUDR & (RUDR or AUDR) bits to high after writing ++ * RTC_CTRL register like writing Alarm registers. We can't find ++ * the description from datasheet but vendor code does that ++ * really. ++ */ ++ ret = s5m8767_rtc_set_alarm_reg(info); + break; + + default: +diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c +index 94e909c5a503..00d18c2bdb0f 100644 +--- a/fs/btrfs/transaction.c ++++ b/fs/btrfs/transaction.c +@@ -1875,8 +1875,11 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, + spin_unlock(&root->fs_info->trans_lock); + + wait_for_commit(root, prev_trans); ++ ret = prev_trans->aborted; + + btrfs_put_transaction(prev_trans); ++ if (ret) ++ goto cleanup_transaction; + } else { + spin_unlock(&root->fs_info->trans_lock); + } +diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c +index 8b7898b7670f..64a9bca976d0 100644 +--- a/fs/cifs/ioctl.c ++++ b/fs/cifs/ioctl.c +@@ -67,6 +67,12 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file, + goto out_drop_write; + } + ++ if (src_file.file->f_op->unlocked_ioctl != cifs_ioctl) { ++ rc = -EBADF; ++ cifs_dbg(VFS, "src file seems to be from a different filesystem type\n"); ++ goto out_fput; ++ } ++ + if ((!src_file.file->private_data) || (!dst_file->private_data)) { + rc = -EBADF; + cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n"); +diff --git a/fs/coredump.c b/fs/coredump.c +index bbbe139ab280..8dd099dc5f9b 100644 +--- a/fs/coredump.c ++++ b/fs/coredump.c +@@ -506,10 +506,10 @@ void do_coredump(const siginfo_t *siginfo) + const struct cred *old_cred; + struct cred *cred; + int retval = 0; +- int flag = 0; + int ispipe; + struct files_struct *displaced; +- bool need_nonrelative = false; ++ /* require nonrelative corefile path and be extra careful */ ++ bool need_suid_safe = false; + bool core_dumped = false; + static atomic_t core_dump_count = ATOMIC_INIT(0); + struct coredump_params cprm = { +@@ -543,9 +543,8 @@ void do_coredump(const siginfo_t *siginfo) + */ + if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) { + /* Setuid core dump mode */ +- flag = O_EXCL; /* Stop rewrite attacks */ + cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */ +- need_nonrelative = true; ++ need_suid_safe = true; + } + + retval = coredump_wait(siginfo->si_signo, &core_state); +@@ -626,7 +625,7 @@ void do_coredump(const siginfo_t *siginfo) + if (cprm.limit < binfmt->min_coredump) + goto fail_unlock; + +- if (need_nonrelative && cn.corename[0] != '/') { ++ if (need_suid_safe && cn.corename[0] != '/') { + printk(KERN_WARNING "Pid %d(%s) can only dump core "\ + "to fully qualified path!\n", + task_tgid_vnr(current), current->comm); +@@ -634,8 +633,35 @@ void do_coredump(const siginfo_t *siginfo) + goto fail_unlock; + } + ++ /* ++ * Unlink the file if it exists unless this is a SUID ++ * binary - in that case, we're running around with root ++ * privs and don't want to unlink another user's coredump. ++ */ ++ if (!need_suid_safe) { ++ mm_segment_t old_fs; ++ ++ old_fs = get_fs(); ++ set_fs(KERNEL_DS); ++ /* ++ * If it doesn't exist, that's fine. If there's some ++ * other problem, we'll catch it at the filp_open(). ++ */ ++ (void) sys_unlink((const char __user *)cn.corename); ++ set_fs(old_fs); ++ } ++ ++ /* ++ * There is a race between unlinking and creating the ++ * file, but if that causes an EEXIST here, that's ++ * fine - another process raced with us while creating ++ * the corefile, and the other process won. To userspace, ++ * what matters is that at least one of the two processes ++ * writes its coredump successfully, not which one. ++ */ + cprm.file = filp_open(cn.corename, +- O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag, ++ O_CREAT | 2 | O_NOFOLLOW | ++ O_LARGEFILE | O_EXCL, + 0600); + if (IS_ERR(cprm.file)) + goto fail_unlock; +@@ -652,11 +678,15 @@ void do_coredump(const siginfo_t *siginfo) + if (!S_ISREG(inode->i_mode)) + goto close_fail; + /* +- * Dont allow local users get cute and trick others to coredump +- * into their pre-created files. ++ * Don't dump core if the filesystem changed owner or mode ++ * of the file during file creation. This is an issue when ++ * a process dumps core while its cwd is e.g. on a vfat ++ * filesystem. + */ + if (!uid_eq(inode->i_uid, current_fsuid())) + goto close_fail; ++ if ((inode->i_mode & 0677) != 0600) ++ goto close_fail; + if (!(cprm.file->f_mode & FMODE_CAN_WRITE)) + goto close_fail; + if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file)) +diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c +index 8db0b464483f..63cd2c147221 100644 +--- a/fs/ecryptfs/dentry.c ++++ b/fs/ecryptfs/dentry.c +@@ -45,20 +45,20 @@ + static int ecryptfs_d_revalidate(struct dentry *dentry, unsigned int flags) + { + struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); +- int rc; +- +- if (!(lower_dentry->d_flags & DCACHE_OP_REVALIDATE)) +- return 1; ++ int rc = 1; + + if (flags & LOOKUP_RCU) + return -ECHILD; + +- rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags); ++ if (lower_dentry->d_flags & DCACHE_OP_REVALIDATE) ++ rc = lower_dentry->d_op->d_revalidate(lower_dentry, flags); ++ + if (d_really_is_positive(dentry)) { +- struct inode *lower_inode = +- ecryptfs_inode_to_lower(d_inode(dentry)); ++ struct inode *inode = d_inode(dentry); + +- fsstack_copy_attr_all(d_inode(dentry), lower_inode); ++ fsstack_copy_attr_all(inode, ecryptfs_inode_to_lower(inode)); ++ if (!inode->i_nlink) ++ return 0; + } + return rc; + } +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 6b4eb94b04a5..ff89971e3ee0 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -324,6 +324,22 @@ static void save_error_info(struct super_block *sb, const char *func, + ext4_commit_super(sb, 1); + } + ++/* ++ * The del_gendisk() function uninitializes the disk-specific data ++ * structures, including the bdi structure, without telling anyone ++ * else. Once this happens, any attempt to call mark_buffer_dirty() ++ * (for example, by ext4_commit_super), will cause a kernel OOPS. ++ * This is a kludge to prevent these oops until we can put in a proper ++ * hook in del_gendisk() to inform the VFS and file system layers. ++ */ ++static int block_device_ejected(struct super_block *sb) ++{ ++ struct inode *bd_inode = sb->s_bdev->bd_inode; ++ struct backing_dev_info *bdi = inode_to_bdi(bd_inode); ++ ++ return bdi->dev == NULL; ++} ++ + static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn) + { + struct super_block *sb = journal->j_private; +@@ -4591,7 +4607,7 @@ static int ext4_commit_super(struct super_block *sb, int sync) + struct buffer_head *sbh = EXT4_SB(sb)->s_sbh; + int error = 0; + +- if (!sbh) ++ if (!sbh || block_device_ejected(sb)) + return error; + if (buffer_write_io_error(sbh)) { + /* +@@ -4807,10 +4823,11 @@ static int ext4_freeze(struct super_block *sb) + error = jbd2_journal_flush(journal); + if (error < 0) + goto out; ++ ++ /* Journal blocked and flushed, clear needs_recovery flag. */ ++ EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); + } + +- /* Journal blocked and flushed, clear needs_recovery flag. */ +- EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); + error = ext4_commit_super(sb, 1); + out: + if (journal) +@@ -4828,8 +4845,11 @@ static int ext4_unfreeze(struct super_block *sb) + if (sb->s_flags & MS_RDONLY) + return 0; + +- /* Reset the needs_recovery flag before the fs is unlocked. */ +- EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); ++ if (EXT4_SB(sb)->s_journal) { ++ /* Reset the needs_recovery flag before the fs is unlocked. */ ++ EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); ++ } ++ + ext4_commit_super(sb, 1); + return 0; + } +diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c +index d3fa6bd9503e..221719eac5de 100644 +--- a/fs/hfs/bnode.c ++++ b/fs/hfs/bnode.c +@@ -288,7 +288,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) + page_cache_release(page); + goto fail; + } +- page_cache_release(page); + node->page[i] = page; + } + +@@ -398,11 +397,11 @@ node_error: + + void hfs_bnode_free(struct hfs_bnode *node) + { +- //int i; ++ int i; + +- //for (i = 0; i < node->tree->pages_per_bnode; i++) +- // if (node->page[i]) +- // page_cache_release(node->page[i]); ++ for (i = 0; i < node->tree->pages_per_bnode; i++) ++ if (node->page[i]) ++ page_cache_release(node->page[i]); + kfree(node); + } + +diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c +index 9f4ee7f52026..6fc766df0461 100644 +--- a/fs/hfs/brec.c ++++ b/fs/hfs/brec.c +@@ -131,13 +131,16 @@ skip: + hfs_bnode_write(node, entry, data_off + key_len, entry_len); + hfs_bnode_dump(node); + +- if (new_node) { +- /* update parent key if we inserted a key +- * at the start of the first node +- */ +- if (!rec && new_node != node) +- hfs_brec_update_parent(fd); ++ /* ++ * update parent key if we inserted a key ++ * at the start of the node and it is not the new node ++ */ ++ if (!rec && new_node != node) { ++ hfs_bnode_read_key(node, fd->search_key, data_off + size); ++ hfs_brec_update_parent(fd); ++ } + ++ if (new_node) { + hfs_bnode_put(fd->bnode); + if (!new_node->parent) { + hfs_btree_inc_height(tree); +@@ -166,9 +169,6 @@ skip: + goto again; + } + +- if (!rec) +- hfs_brec_update_parent(fd); +- + return 0; + } + +@@ -366,6 +366,8 @@ again: + if (IS_ERR(parent)) + return PTR_ERR(parent); + __hfs_brec_find(parent, fd); ++ if (fd->record < 0) ++ return -ENOENT; + hfs_bnode_dump(parent); + rec = fd->record; + +diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c +index 759708fd9331..63924662aaf3 100644 +--- a/fs/hfsplus/bnode.c ++++ b/fs/hfsplus/bnode.c +@@ -454,7 +454,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid) + page_cache_release(page); + goto fail; + } +- page_cache_release(page); + node->page[i] = page; + } + +@@ -566,13 +565,11 @@ node_error: + + void hfs_bnode_free(struct hfs_bnode *node) + { +-#if 0 + int i; + + for (i = 0; i < node->tree->pages_per_bnode; i++) + if (node->page[i]) + page_cache_release(node->page[i]); +-#endif + kfree(node); + } + +diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c +index 4227dc4f7437..8c44654ce274 100644 +--- a/fs/jbd2/checkpoint.c ++++ b/fs/jbd2/checkpoint.c +@@ -417,12 +417,12 @@ int jbd2_cleanup_journal_tail(journal_t *journal) + * journal_clean_one_cp_list + * + * Find all the written-back checkpoint buffers in the given list and +- * release them. ++ * release them. If 'destroy' is set, clean all buffers unconditionally. + * + * Called with j_list_lock held. + * Returns 1 if we freed the transaction, 0 otherwise. + */ +-static int journal_clean_one_cp_list(struct journal_head *jh) ++static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy) + { + struct journal_head *last_jh; + struct journal_head *next_jh = jh; +@@ -436,7 +436,10 @@ static int journal_clean_one_cp_list(struct journal_head *jh) + do { + jh = next_jh; + next_jh = jh->b_cpnext; +- ret = __try_to_free_cp_buf(jh); ++ if (!destroy) ++ ret = __try_to_free_cp_buf(jh); ++ else ++ ret = __jbd2_journal_remove_checkpoint(jh) + 1; + if (!ret) + return freed; + if (ret == 2) +@@ -459,10 +462,11 @@ static int journal_clean_one_cp_list(struct journal_head *jh) + * journal_clean_checkpoint_list + * + * Find all the written-back checkpoint buffers in the journal and release them. ++ * If 'destroy' is set, release all buffers unconditionally. + * + * Called with j_list_lock held. + */ +-void __jbd2_journal_clean_checkpoint_list(journal_t *journal) ++void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy) + { + transaction_t *transaction, *last_transaction, *next_transaction; + int ret; +@@ -476,7 +480,8 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal) + do { + transaction = next_transaction; + next_transaction = transaction->t_cpnext; +- ret = journal_clean_one_cp_list(transaction->t_checkpoint_list); ++ ret = journal_clean_one_cp_list(transaction->t_checkpoint_list, ++ destroy); + /* + * This function only frees up some memory if possible so we + * dont have an obligation to finish processing. Bail out if +@@ -492,7 +497,7 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal) + * we can possibly see not yet submitted buffers on io_list + */ + ret = journal_clean_one_cp_list(transaction-> +- t_checkpoint_io_list); ++ t_checkpoint_io_list, destroy); + if (need_resched()) + return; + /* +@@ -506,6 +511,28 @@ void __jbd2_journal_clean_checkpoint_list(journal_t *journal) + } + + /* ++ * Remove buffers from all checkpoint lists as journal is aborted and we just ++ * need to free memory ++ */ ++void jbd2_journal_destroy_checkpoint(journal_t *journal) ++{ ++ /* ++ * We loop because __jbd2_journal_clean_checkpoint_list() may abort ++ * early due to a need of rescheduling. ++ */ ++ while (1) { ++ spin_lock(&journal->j_list_lock); ++ if (!journal->j_checkpoint_transactions) { ++ spin_unlock(&journal->j_list_lock); ++ break; ++ } ++ __jbd2_journal_clean_checkpoint_list(journal, true); ++ spin_unlock(&journal->j_list_lock); ++ cond_resched(); ++ } ++} ++ ++/* + * journal_remove_checkpoint: called after a buffer has been committed + * to disk (either by being write-back flushed to disk, or being + * committed to the log). +diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c +index b73e0215baa7..362e5f614450 100644 +--- a/fs/jbd2/commit.c ++++ b/fs/jbd2/commit.c +@@ -510,7 +510,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) + * frees some memory + */ + spin_lock(&journal->j_list_lock); +- __jbd2_journal_clean_checkpoint_list(journal); ++ __jbd2_journal_clean_checkpoint_list(journal, false); + spin_unlock(&journal->j_list_lock); + + jbd_debug(3, "JBD2: commit phase 1\n"); +diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c +index 112fad9e1e20..7003c0925760 100644 +--- a/fs/jbd2/journal.c ++++ b/fs/jbd2/journal.c +@@ -1708,8 +1708,17 @@ int jbd2_journal_destroy(journal_t *journal) + while (journal->j_checkpoint_transactions != NULL) { + spin_unlock(&journal->j_list_lock); + mutex_lock(&journal->j_checkpoint_mutex); +- jbd2_log_do_checkpoint(journal); ++ err = jbd2_log_do_checkpoint(journal); + mutex_unlock(&journal->j_checkpoint_mutex); ++ /* ++ * If checkpointing failed, just free the buffers to avoid ++ * looping forever ++ */ ++ if (err) { ++ jbd2_journal_destroy_checkpoint(journal); ++ spin_lock(&journal->j_list_lock); ++ break; ++ } + spin_lock(&journal->j_list_lock); + } + +diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c +index 6f5f0f425e86..fecd9201dbad 100644 +--- a/fs/nfs/flexfilelayout/flexfilelayout.c ++++ b/fs/nfs/flexfilelayout/flexfilelayout.c +@@ -1039,6 +1039,11 @@ static int ff_layout_write_done_cb(struct rpc_task *task, + hdr->res.verf->committed == NFS_DATA_SYNC) + ff_layout_set_layoutcommit(hdr); + ++ /* zero out fattr since we don't care DS attr at all */ ++ hdr->fattr.valid = 0; ++ if (task->tk_status >= 0) ++ nfs_writeback_update_inode(hdr); ++ + return 0; + } + +diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c +index f13e1969eedd..b28fa4cbea52 100644 +--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c ++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c +@@ -500,16 +500,19 @@ int ff_layout_encode_ds_ioerr(struct nfs4_flexfile_layout *flo, + range->offset, range->length)) + continue; + /* offset(8) + length(8) + stateid(NFS4_STATEID_SIZE) +- * + deviceid(NFS4_DEVICEID4_SIZE) + status(4) + opnum(4) ++ * + array length + deviceid(NFS4_DEVICEID4_SIZE) ++ * + status(4) + opnum(4) + */ + p = xdr_reserve_space(xdr, +- 24 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE); ++ 28 + NFS4_STATEID_SIZE + NFS4_DEVICEID4_SIZE); + if (unlikely(!p)) + return -ENOBUFS; + p = xdr_encode_hyper(p, err->offset); + p = xdr_encode_hyper(p, err->length); + p = xdr_encode_opaque_fixed(p, &err->stateid, + NFS4_STATEID_SIZE); ++ /* Encode 1 error */ ++ *p++ = cpu_to_be32(1); + p = xdr_encode_opaque_fixed(p, &err->deviceid, + NFS4_DEVICEID4_SIZE); + *p++ = cpu_to_be32(err->status); +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c +index 5d25b9d97c29..976ba792fbc6 100644 +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -1270,13 +1270,6 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat + return 0; + } + +-static int nfs_ctime_need_update(const struct inode *inode, const struct nfs_fattr *fattr) +-{ +- if (!(fattr->valid & NFS_ATTR_FATTR_CTIME)) +- return 0; +- return timespec_compare(&fattr->ctime, &inode->i_ctime) > 0; +-} +- + static atomic_long_t nfs_attr_generation_counter; + + static unsigned long nfs_read_attr_generation_counter(void) +@@ -1425,7 +1418,6 @@ static int nfs_inode_attrs_need_update(const struct inode *inode, const struct n + const struct nfs_inode *nfsi = NFS_I(inode); + + return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 || +- nfs_ctime_need_update(inode, fattr) || + ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0); + } + +@@ -1488,6 +1480,13 @@ static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr + { + unsigned long invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; + ++ /* ++ * Don't revalidate the pagecache if we hold a delegation, but do ++ * force an attribute update ++ */ ++ if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) ++ invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_FORCED; ++ + if (S_ISDIR(inode->i_mode)) + invalid |= NFS_INO_INVALID_DATA; + nfs_set_cache_invalid(inode, invalid); +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index d3f205126609..c245874d7e9d 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -1152,6 +1152,8 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) + return 0; + if ((delegation->type & fmode) != fmode) + return 0; ++ if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) ++ return 0; + if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) + return 0; + nfs_mark_delegation_referenced(delegation); +@@ -1216,6 +1218,7 @@ static void nfs_resync_open_stateid_locked(struct nfs4_state *state) + } + + static void nfs_clear_open_stateid_locked(struct nfs4_state *state, ++ nfs4_stateid *arg_stateid, + nfs4_stateid *stateid, fmode_t fmode) + { + clear_bit(NFS_O_RDWR_STATE, &state->flags); +@@ -1234,8 +1237,9 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state, + if (stateid == NULL) + return; + /* Handle races with OPEN */ +- if (!nfs4_stateid_match_other(stateid, &state->open_stateid) || +- !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { ++ if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || ++ (nfs4_stateid_match_other(stateid, &state->open_stateid) && ++ !nfs4_stateid_is_newer(stateid, &state->open_stateid))) { + nfs_resync_open_stateid_locked(state); + return; + } +@@ -1244,10 +1248,12 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state, + nfs4_stateid_copy(&state->open_stateid, stateid); + } + +-static void nfs_clear_open_stateid(struct nfs4_state *state, nfs4_stateid *stateid, fmode_t fmode) ++static void nfs_clear_open_stateid(struct nfs4_state *state, ++ nfs4_stateid *arg_stateid, ++ nfs4_stateid *stateid, fmode_t fmode) + { + write_seqlock(&state->seqlock); +- nfs_clear_open_stateid_locked(state, stateid, fmode); ++ nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); + write_sequnlock(&state->seqlock); + if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) + nfs4_schedule_state_manager(state->owner->so_server->nfs_client); +@@ -2413,7 +2419,7 @@ static int _nfs4_do_open(struct inode *dir, + goto err_free_label; + state = ctx->state; + +- if ((opendata->o_arg.open_flags & O_EXCL) && ++ if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL) && + (opendata->o_arg.createmode != NFS4_CREATE_GUARDED)) { + nfs4_exclusive_attrset(opendata, sattr); + +@@ -2672,7 +2678,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data) + goto out_release; + } + } +- nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode); ++ nfs_clear_open_stateid(state, &calldata->arg.stateid, ++ res_stateid, calldata->arg.fmode); + out_release: + nfs_release_seqid(calldata->arg.seqid); + nfs_refresh_inode(calldata->inode, calldata->res.fattr); +@@ -8571,6 +8578,7 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { + .reboot_recovery_ops = &nfs41_reboot_recovery_ops, + .nograce_recovery_ops = &nfs41_nograce_recovery_ops, + .state_renewal_ops = &nfs41_state_renewal_ops, ++ .mig_recovery_ops = &nfs41_mig_recovery_ops, + }; + #endif + +diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c +index 7b4552678536..069914ce7641 100644 +--- a/fs/nfs/pagelist.c ++++ b/fs/nfs/pagelist.c +@@ -77,8 +77,8 @@ EXPORT_SYMBOL_GPL(nfs_pgheader_init); + void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos) + { + spin_lock(&hdr->lock); +- if (pos < hdr->io_start + hdr->good_bytes) { +- set_bit(NFS_IOHDR_ERROR, &hdr->flags); ++ if (!test_and_set_bit(NFS_IOHDR_ERROR, &hdr->flags) ++ || pos < hdr->io_start + hdr->good_bytes) { + clear_bit(NFS_IOHDR_EOF, &hdr->flags); + hdr->good_bytes = pos - hdr->io_start; + hdr->error = error; +diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c +index f37e25b6311c..1705c78ee2d8 100644 +--- a/fs/nfs/pnfs_nfs.c ++++ b/fs/nfs/pnfs_nfs.c +@@ -359,26 +359,31 @@ same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2) + return false; + } + ++/* ++ * Checks if 'dsaddrs1' contains a subset of 'dsaddrs2'. If it does, ++ * declare a match. ++ */ + static bool + _same_data_server_addrs_locked(const struct list_head *dsaddrs1, + const struct list_head *dsaddrs2) + { + struct nfs4_pnfs_ds_addr *da1, *da2; +- +- /* step through both lists, comparing as we go */ +- for (da1 = list_first_entry(dsaddrs1, typeof(*da1), da_node), +- da2 = list_first_entry(dsaddrs2, typeof(*da2), da_node); +- da1 != NULL && da2 != NULL; +- da1 = list_entry(da1->da_node.next, typeof(*da1), da_node), +- da2 = list_entry(da2->da_node.next, typeof(*da2), da_node)) { +- if (!same_sockaddr((struct sockaddr *)&da1->da_addr, +- (struct sockaddr *)&da2->da_addr)) +- return false; ++ struct sockaddr *sa1, *sa2; ++ bool match = false; ++ ++ list_for_each_entry(da1, dsaddrs1, da_node) { ++ sa1 = (struct sockaddr *)&da1->da_addr; ++ match = false; ++ list_for_each_entry(da2, dsaddrs2, da_node) { ++ sa2 = (struct sockaddr *)&da2->da_addr; ++ match = same_sockaddr(sa1, sa2); ++ if (match) ++ break; ++ } ++ if (!match) ++ break; + } +- if (da1 == NULL && da2 == NULL) +- return true; +- +- return false; ++ return match; + } + + /* +diff --git a/fs/nfs/write.c b/fs/nfs/write.c +index daf355642845..07115b9b1ad2 100644 +--- a/fs/nfs/write.c ++++ b/fs/nfs/write.c +@@ -1383,24 +1383,27 @@ static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, + { + struct nfs_pgio_args *argp = &hdr->args; + struct nfs_pgio_res *resp = &hdr->res; ++ u64 size = argp->offset + resp->count; + + if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) ++ fattr->size = size; ++ if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) { ++ fattr->valid &= ~NFS_ATTR_FATTR_SIZE; + return; +- if (argp->offset + resp->count != fattr->size) +- return; +- if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) ++ } ++ if (size != fattr->size) + return; + /* Set attribute barrier */ + nfs_fattr_set_barrier(fattr); ++ /* ...and update size */ ++ fattr->valid |= NFS_ATTR_FATTR_SIZE; + } + + void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) + { +- struct nfs_fattr *fattr = hdr->res.fattr; ++ struct nfs_fattr *fattr = &hdr->fattr; + struct inode *inode = hdr->inode; + +- if (fattr == NULL) +- return; + spin_lock(&inode->i_lock); + nfs_writeback_check_extend(hdr, fattr); + nfs_post_op_update_inode_force_wcc_locked(inode, fattr); +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index 6e13504f736e..397798368b1a 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -777,13 +777,16 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp) + list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations); + } + +-static void ++static bool + unhash_delegation_locked(struct nfs4_delegation *dp) + { + struct nfs4_file *fp = dp->dl_stid.sc_file; + + lockdep_assert_held(&state_lock); + ++ if (list_empty(&dp->dl_perfile)) ++ return false; ++ + dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID; + /* Ensure that deleg break won't try to requeue it */ + ++dp->dl_time; +@@ -792,16 +795,21 @@ unhash_delegation_locked(struct nfs4_delegation *dp) + list_del_init(&dp->dl_recall_lru); + list_del_init(&dp->dl_perfile); + spin_unlock(&fp->fi_lock); ++ return true; + } + + static void destroy_delegation(struct nfs4_delegation *dp) + { ++ bool unhashed; ++ + spin_lock(&state_lock); +- unhash_delegation_locked(dp); ++ unhashed = unhash_delegation_locked(dp); + spin_unlock(&state_lock); +- put_clnt_odstate(dp->dl_clnt_odstate); +- nfs4_put_deleg_lease(dp->dl_stid.sc_file); +- nfs4_put_stid(&dp->dl_stid); ++ if (unhashed) { ++ put_clnt_odstate(dp->dl_clnt_odstate); ++ nfs4_put_deleg_lease(dp->dl_stid.sc_file); ++ nfs4_put_stid(&dp->dl_stid); ++ } + } + + static void revoke_delegation(struct nfs4_delegation *dp) +@@ -1004,16 +1012,20 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop) + sop->so_ops->so_free(sop); + } + +-static void unhash_ol_stateid(struct nfs4_ol_stateid *stp) ++static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp) + { + struct nfs4_file *fp = stp->st_stid.sc_file; + + lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock); + ++ if (list_empty(&stp->st_perfile)) ++ return false; ++ + spin_lock(&fp->fi_lock); +- list_del(&stp->st_perfile); ++ list_del_init(&stp->st_perfile); + spin_unlock(&fp->fi_lock); + list_del(&stp->st_perstateowner); ++ return true; + } + + static void nfs4_free_ol_stateid(struct nfs4_stid *stid) +@@ -1063,25 +1075,27 @@ static void put_ol_stateid_locked(struct nfs4_ol_stateid *stp, + list_add(&stp->st_locks, reaplist); + } + +-static void unhash_lock_stateid(struct nfs4_ol_stateid *stp) ++static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp) + { + struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); + + lockdep_assert_held(&oo->oo_owner.so_client->cl_lock); + + list_del_init(&stp->st_locks); +- unhash_ol_stateid(stp); + nfs4_unhash_stid(&stp->st_stid); ++ return unhash_ol_stateid(stp); + } + + static void release_lock_stateid(struct nfs4_ol_stateid *stp) + { + struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner); ++ bool unhashed; + + spin_lock(&oo->oo_owner.so_client->cl_lock); +- unhash_lock_stateid(stp); ++ unhashed = unhash_lock_stateid(stp); + spin_unlock(&oo->oo_owner.so_client->cl_lock); +- nfs4_put_stid(&stp->st_stid); ++ if (unhashed) ++ nfs4_put_stid(&stp->st_stid); + } + + static void unhash_lockowner_locked(struct nfs4_lockowner *lo) +@@ -1129,7 +1143,7 @@ static void release_lockowner(struct nfs4_lockowner *lo) + while (!list_empty(&lo->lo_owner.so_stateids)) { + stp = list_first_entry(&lo->lo_owner.so_stateids, + struct nfs4_ol_stateid, st_perstateowner); +- unhash_lock_stateid(stp); ++ WARN_ON(!unhash_lock_stateid(stp)); + put_ol_stateid_locked(stp, &reaplist); + } + spin_unlock(&clp->cl_lock); +@@ -1142,21 +1156,26 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp, + { + struct nfs4_ol_stateid *stp; + ++ lockdep_assert_held(&open_stp->st_stid.sc_client->cl_lock); ++ + while (!list_empty(&open_stp->st_locks)) { + stp = list_entry(open_stp->st_locks.next, + struct nfs4_ol_stateid, st_locks); +- unhash_lock_stateid(stp); ++ WARN_ON(!unhash_lock_stateid(stp)); + put_ol_stateid_locked(stp, reaplist); + } + } + +-static void unhash_open_stateid(struct nfs4_ol_stateid *stp, ++static bool unhash_open_stateid(struct nfs4_ol_stateid *stp, + struct list_head *reaplist) + { ++ bool unhashed; ++ + lockdep_assert_held(&stp->st_stid.sc_client->cl_lock); + +- unhash_ol_stateid(stp); ++ unhashed = unhash_ol_stateid(stp); + release_open_stateid_locks(stp, reaplist); ++ return unhashed; + } + + static void release_open_stateid(struct nfs4_ol_stateid *stp) +@@ -1164,8 +1183,8 @@ static void release_open_stateid(struct nfs4_ol_stateid *stp) + LIST_HEAD(reaplist); + + spin_lock(&stp->st_stid.sc_client->cl_lock); +- unhash_open_stateid(stp, &reaplist); +- put_ol_stateid_locked(stp, &reaplist); ++ if (unhash_open_stateid(stp, &reaplist)) ++ put_ol_stateid_locked(stp, &reaplist); + spin_unlock(&stp->st_stid.sc_client->cl_lock); + free_ol_stateid_reaplist(&reaplist); + } +@@ -1210,8 +1229,8 @@ static void release_openowner(struct nfs4_openowner *oo) + while (!list_empty(&oo->oo_owner.so_stateids)) { + stp = list_first_entry(&oo->oo_owner.so_stateids, + struct nfs4_ol_stateid, st_perstateowner); +- unhash_open_stateid(stp, &reaplist); +- put_ol_stateid_locked(stp, &reaplist); ++ if (unhash_open_stateid(stp, &reaplist)) ++ put_ol_stateid_locked(stp, &reaplist); + } + spin_unlock(&clp->cl_lock); + free_ol_stateid_reaplist(&reaplist); +@@ -1714,7 +1733,7 @@ __destroy_client(struct nfs4_client *clp) + spin_lock(&state_lock); + while (!list_empty(&clp->cl_delegations)) { + dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); +- unhash_delegation_locked(dp); ++ WARN_ON(!unhash_delegation_locked(dp)); + list_add(&dp->dl_recall_lru, &reaplist); + } + spin_unlock(&state_lock); +@@ -4346,7 +4365,7 @@ nfs4_laundromat(struct nfsd_net *nn) + new_timeo = min(new_timeo, t); + break; + } +- unhash_delegation_locked(dp); ++ WARN_ON(!unhash_delegation_locked(dp)); + list_add(&dp->dl_recall_lru, &reaplist); + } + spin_unlock(&state_lock); +@@ -4714,7 +4733,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + if (check_for_locks(stp->st_stid.sc_file, + lockowner(stp->st_stateowner))) + break; +- unhash_lock_stateid(stp); ++ WARN_ON(!unhash_lock_stateid(stp)); + spin_unlock(&cl->cl_lock); + nfs4_put_stid(s); + ret = nfs_ok; +@@ -4930,20 +4949,23 @@ out: + static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) + { + struct nfs4_client *clp = s->st_stid.sc_client; ++ bool unhashed; + LIST_HEAD(reaplist); + + s->st_stid.sc_type = NFS4_CLOSED_STID; + spin_lock(&clp->cl_lock); +- unhash_open_stateid(s, &reaplist); ++ unhashed = unhash_open_stateid(s, &reaplist); + + if (clp->cl_minorversion) { +- put_ol_stateid_locked(s, &reaplist); ++ if (unhashed) ++ put_ol_stateid_locked(s, &reaplist); + spin_unlock(&clp->cl_lock); + free_ol_stateid_reaplist(&reaplist); + } else { + spin_unlock(&clp->cl_lock); + free_ol_stateid_reaplist(&reaplist); +- move_to_close_lru(s, clp->net); ++ if (unhashed) ++ move_to_close_lru(s, clp->net); + } + } + +@@ -5982,7 +6004,7 @@ nfsd_inject_add_lock_to_list(struct nfs4_ol_stateid *lst, + + static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, + struct list_head *collect, +- void (*func)(struct nfs4_ol_stateid *)) ++ bool (*func)(struct nfs4_ol_stateid *)) + { + struct nfs4_openowner *oop; + struct nfs4_ol_stateid *stp, *st_next; +@@ -5996,9 +6018,9 @@ static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max, + list_for_each_entry_safe(lst, lst_next, + &stp->st_locks, st_locks) { + if (func) { +- func(lst); +- nfsd_inject_add_lock_to_list(lst, +- collect); ++ if (func(lst)) ++ nfsd_inject_add_lock_to_list(lst, ++ collect); + } + ++count; + /* +@@ -6268,7 +6290,7 @@ static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max, + continue; + + atomic_inc(&clp->cl_refcount); +- unhash_delegation_locked(dp); ++ WARN_ON(!unhash_delegation_locked(dp)); + list_add(&dp->dl_recall_lru, victims); + } + ++count; +@@ -6598,7 +6620,7 @@ nfs4_state_shutdown_net(struct net *net) + spin_lock(&state_lock); + list_for_each_safe(pos, next, &nn->del_recall_lru) { + dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); +- unhash_delegation_locked(dp); ++ WARN_ON(!unhash_delegation_locked(dp)); + list_add(&dp->dl_recall_lru, &reaplist); + } + spin_unlock(&state_lock); +diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c +index d4d84451e0e6..3dd1b616b92b 100644 +--- a/fs/nfsd/nfs4xdr.c ++++ b/fs/nfsd/nfs4xdr.c +@@ -2139,6 +2139,27 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp, + return nfsd4_encode_user(xdr, rqstp, ace->who_uid); + } + ++static inline __be32 ++nfsd4_encode_layout_type(struct xdr_stream *xdr, enum pnfs_layouttype layout_type) ++{ ++ __be32 *p; ++ ++ if (layout_type) { ++ p = xdr_reserve_space(xdr, 8); ++ if (!p) ++ return nfserr_resource; ++ *p++ = cpu_to_be32(1); ++ *p++ = cpu_to_be32(layout_type); ++ } else { ++ p = xdr_reserve_space(xdr, 4); ++ if (!p) ++ return nfserr_resource; ++ *p++ = cpu_to_be32(0); ++ } ++ ++ return 0; ++} ++ + #define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \ + FATTR4_WORD0_RDATTR_ERROR) + #define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID +@@ -2692,20 +2713,16 @@ out_acl: + p = xdr_encode_hyper(p, stat.ino); + } + #ifdef CONFIG_NFSD_PNFS +- if ((bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) || +- (bmval2 & FATTR4_WORD2_LAYOUT_TYPES)) { +- if (exp->ex_layout_type) { +- p = xdr_reserve_space(xdr, 8); +- if (!p) +- goto out_resource; +- *p++ = cpu_to_be32(1); +- *p++ = cpu_to_be32(exp->ex_layout_type); +- } else { +- p = xdr_reserve_space(xdr, 4); +- if (!p) +- goto out_resource; +- *p++ = cpu_to_be32(0); +- } ++ if (bmval1 & FATTR4_WORD1_FS_LAYOUT_TYPES) { ++ status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type); ++ if (status) ++ goto out; ++ } ++ ++ if (bmval2 & FATTR4_WORD2_LAYOUT_TYPES) { ++ status = nfsd4_encode_layout_type(xdr, exp->ex_layout_type); ++ if (status) ++ goto out; + } + + if (bmval2 & FATTR4_WORD2_LAYOUT_BLKSIZE) { +diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h +index edb640ae9a94..eb1cebed3f36 100644 +--- a/include/linux/jbd2.h ++++ b/include/linux/jbd2.h +@@ -1042,8 +1042,9 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block); + extern void jbd2_journal_commit_transaction(journal_t *); + + /* Checkpoint list management */ +-void __jbd2_journal_clean_checkpoint_list(journal_t *journal); ++void __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy); + int __jbd2_journal_remove_checkpoint(struct journal_head *); ++void jbd2_journal_destroy_checkpoint(journal_t *journal); + void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); + + +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 0755b9fd03a7..b2085582d44e 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -1002,6 +1002,34 @@ static inline int page_mapped(struct page *page) + } + + /* ++ * Return true only if the page has been allocated with ++ * ALLOC_NO_WATERMARKS and the low watermark was not ++ * met implying that the system is under some pressure. ++ */ ++static inline bool page_is_pfmemalloc(struct page *page) ++{ ++ /* ++ * Page index cannot be this large so this must be ++ * a pfmemalloc page. ++ */ ++ return page->index == -1UL; ++} ++ ++/* ++ * Only to be called by the page allocator on a freshly allocated ++ * page. ++ */ ++static inline void set_page_pfmemalloc(struct page *page) ++{ ++ page->index = -1UL; ++} ++ ++static inline void clear_page_pfmemalloc(struct page *page) ++{ ++ page->index = 0; ++} ++ ++/* + * Different kinds of faults, as returned by handle_mm_fault(). + * Used to decide whether a process gets delivered SIGBUS or + * just gets major/minor fault counters bumped up. +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h +index 8d37e26a1007..c0c6b33535fb 100644 +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -63,15 +63,6 @@ struct page { + union { + pgoff_t index; /* Our offset within mapping. */ + void *freelist; /* sl[aou]b first free object */ +- bool pfmemalloc; /* If set by the page allocator, +- * ALLOC_NO_WATERMARKS was set +- * and the low watermark was not +- * met implying that the system +- * is under some pressure. The +- * caller should try ensure +- * this page is only used to +- * free other pages. +- */ + }; + + union { +diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h +index f15154a879c7..eb1c55b8255a 100644 +--- a/include/linux/skbuff.h ++++ b/include/linux/skbuff.h +@@ -1590,20 +1590,16 @@ static inline void __skb_fill_page_desc(struct sk_buff *skb, int i, + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + /* +- * Propagate page->pfmemalloc to the skb if we can. The problem is +- * that not all callers have unique ownership of the page. If +- * pfmemalloc is set, we check the mapping as a mapping implies +- * page->index is set (index and pfmemalloc share space). +- * If it's a valid mapping, we cannot use page->pfmemalloc but we +- * do not lose pfmemalloc information as the pages would not be +- * allocated using __GFP_MEMALLOC. ++ * Propagate page pfmemalloc to the skb if we can. The problem is ++ * that not all callers have unique ownership of the page but rely ++ * on page_is_pfmemalloc doing the right thing(tm). + */ + frag->page.p = page; + frag->page_offset = off; + skb_frag_size_set(frag, size); + + page = compound_head(page); +- if (page->pfmemalloc && !page->mapping) ++ if (page_is_pfmemalloc(page)) + skb->pfmemalloc = true; + } + +@@ -2250,7 +2246,7 @@ static inline struct page *dev_alloc_page(void) + static inline void skb_propagate_pfmemalloc(struct page *page, + struct sk_buff *skb) + { +- if (page && page->pfmemalloc) ++ if (page_is_pfmemalloc(page)) + skb->pfmemalloc = true; + } + +diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h +index 7591788e9fbf..357e44c1a46b 100644 +--- a/include/linux/sunrpc/xprtsock.h ++++ b/include/linux/sunrpc/xprtsock.h +@@ -42,6 +42,7 @@ struct sock_xprt { + /* + * Connection of transports + */ ++ unsigned long sock_state; + struct delayed_work connect_worker; + struct sockaddr_storage srcaddr; + unsigned short srcport; +@@ -76,6 +77,8 @@ struct sock_xprt { + */ + #define TCP_RPC_REPLY (1UL << 6) + ++#define XPRT_SOCK_CONNECTING 1U ++ + #endif /* __KERNEL__ */ + + #endif /* _LINUX_SUNRPC_XPRTSOCK_H */ +diff --git a/include/net/act_api.h b/include/net/act_api.h +index 3ee4c92afd1b..931738bc5bba 100644 +--- a/include/net/act_api.h ++++ b/include/net/act_api.h +@@ -99,7 +99,6 @@ struct tc_action_ops { + + int tcf_hash_search(struct tc_action *a, u32 index); + void tcf_hash_destroy(struct tc_action *a); +-int tcf_hash_release(struct tc_action *a, int bind); + u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo); + int tcf_hash_check(u32 index, struct tc_action *a, int bind); + int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, +@@ -107,6 +106,13 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, + void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est); + void tcf_hash_insert(struct tc_action *a); + ++int __tcf_hash_release(struct tc_action *a, bool bind, bool strict); ++ ++static inline int tcf_hash_release(struct tc_action *a, bool bind) ++{ ++ return __tcf_hash_release(a, bind, false); ++} ++ + int tcf_register_action(struct tc_action_ops *a, unsigned int mask); + int tcf_unregister_action(struct tc_action_ops *a); + int tcf_action_destroy(struct list_head *actions, int bind); +diff --git a/include/net/ip.h b/include/net/ip.h +index d14af7edd197..f41fc497b21b 100644 +--- a/include/net/ip.h ++++ b/include/net/ip.h +@@ -161,6 +161,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk) + } + + /* datagram.c */ ++int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); + int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); + + void ip4_datagram_release_cb(struct sock *sk); +diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h +index 63deb8d9f82a..d298857cd845 100644 +--- a/include/soc/tegra/mc.h ++++ b/include/soc/tegra/mc.h +@@ -59,6 +59,7 @@ struct tegra_smmu_soc { + bool supports_round_robin_arbitration; + bool supports_request_limit; + ++ unsigned int num_tlb_lines; + unsigned int num_asids; + + const struct tegra_smmu_ops *ops; +diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h +index fd1a02cb3c82..003dca933803 100644 +--- a/include/trace/events/sunrpc.h ++++ b/include/trace/events/sunrpc.h +@@ -529,18 +529,21 @@ TRACE_EVENT(svc_xprt_do_enqueue, + + TP_STRUCT__entry( + __field(struct svc_xprt *, xprt) +- __field(struct svc_rqst *, rqst) ++ __field_struct(struct sockaddr_storage, ss) ++ __field(int, pid) ++ __field(unsigned long, flags) + ), + + TP_fast_assign( + __entry->xprt = xprt; +- __entry->rqst = rqst; ++ xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss)); ++ __entry->pid = rqst? rqst->rq_task->pid : 0; ++ __entry->flags = xprt ? xprt->xpt_flags : 0; + ), + + TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt, +- (struct sockaddr *)&__entry->xprt->xpt_remote, +- __entry->rqst ? __entry->rqst->rq_task->pid : 0, +- show_svc_xprt_flags(__entry->xprt->xpt_flags)) ++ (struct sockaddr *)&__entry->ss, ++ __entry->pid, show_svc_xprt_flags(__entry->flags)) + ); + + TRACE_EVENT(svc_xprt_dequeue, +@@ -589,16 +592,20 @@ TRACE_EVENT(svc_handle_xprt, + TP_STRUCT__entry( + __field(struct svc_xprt *, xprt) + __field(int, len) ++ __field_struct(struct sockaddr_storage, ss) ++ __field(unsigned long, flags) + ), + + TP_fast_assign( + __entry->xprt = xprt; ++ xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss)); + __entry->len = len; ++ __entry->flags = xprt ? xprt->xpt_flags : 0; + ), + + TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt, +- (struct sockaddr *)&__entry->xprt->xpt_remote, __entry->len, +- show_svc_xprt_flags(__entry->xprt->xpt_flags)) ++ (struct sockaddr *)&__entry->ss, ++ __entry->len, show_svc_xprt_flags(__entry->flags)) + ); + #endif /* _TRACE_SUNRPC_H */ + +diff --git a/kernel/fork.c b/kernel/fork.c +index 03c1eaaa6ef5..8209fa2d36ef 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -1854,13 +1854,21 @@ static int check_unshare_flags(unsigned long unshare_flags) + CLONE_NEWUSER|CLONE_NEWPID)) + return -EINVAL; + /* +- * Not implemented, but pretend it works if there is nothing to +- * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND +- * needs to unshare vm. ++ * Not implemented, but pretend it works if there is nothing ++ * to unshare. Note that unsharing the address space or the ++ * signal handlers also need to unshare the signal queues (aka ++ * CLONE_THREAD). + */ + if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) { +- /* FIXME: get_task_mm() increments ->mm_users */ +- if (atomic_read(¤t->mm->mm_users) > 1) ++ if (!thread_group_empty(current)) ++ return -EINVAL; ++ } ++ if (unshare_flags & (CLONE_SIGHAND | CLONE_VM)) { ++ if (atomic_read(¤t->sighand->count) > 1) ++ return -EINVAL; ++ } ++ if (unshare_flags & CLONE_VM) { ++ if (!current_is_single_threaded()) + return -EINVAL; + } + +@@ -1929,16 +1937,16 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) + if (unshare_flags & CLONE_NEWUSER) + unshare_flags |= CLONE_THREAD | CLONE_FS; + /* +- * If unsharing a thread from a thread group, must also unshare vm. +- */ +- if (unshare_flags & CLONE_THREAD) +- unshare_flags |= CLONE_VM; +- /* + * If unsharing vm, must also unshare signal handlers. + */ + if (unshare_flags & CLONE_VM) + unshare_flags |= CLONE_SIGHAND; + /* ++ * If unsharing a signal handlers, must also unshare the signal queues. ++ */ ++ if (unshare_flags & CLONE_SIGHAND) ++ unshare_flags |= CLONE_THREAD; ++ /* + * If unsharing namespace, must also unshare filesystem information. + */ + if (unshare_flags & CLONE_NEWNS) +diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c +index 6dd0335ea61b..0234361b24b8 100644 +--- a/lib/decompress_bunzip2.c ++++ b/lib/decompress_bunzip2.c +@@ -743,12 +743,12 @@ exit_0: + } + + #ifdef PREBOOT +-STATIC int INIT decompress(unsigned char *buf, long len, ++STATIC int INIT __decompress(unsigned char *buf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), +- unsigned char *outbuf, ++ unsigned char *outbuf, long olen, + long *pos, +- void(*error)(char *x)) ++ void (*error)(char *x)) + { + return bunzip2(buf, len - 4, fill, flush, outbuf, pos, error); + } +diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c +index d4c7891635ec..555c06bf20da 100644 +--- a/lib/decompress_inflate.c ++++ b/lib/decompress_inflate.c +@@ -1,4 +1,5 @@ + #ifdef STATIC ++#define PREBOOT + /* Pre-boot environment: included */ + + /* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots +@@ -33,23 +34,23 @@ static long INIT nofill(void *buffer, unsigned long len) + } + + /* Included from initramfs et al code */ +-STATIC int INIT gunzip(unsigned char *buf, long len, ++STATIC int INIT __gunzip(unsigned char *buf, long len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), +- unsigned char *out_buf, ++ unsigned char *out_buf, long out_len, + long *pos, + void(*error)(char *x)) { + u8 *zbuf; + struct z_stream_s *strm; + int rc; +- size_t out_len; + + rc = -1; + if (flush) { + out_len = 0x8000; /* 32 K */ + out_buf = malloc(out_len); + } else { +- out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */ ++ if (!out_len) ++ out_len = ((size_t)~0) - (size_t)out_buf; /* no limit */ + } + if (!out_buf) { + error("Out of memory while allocating output buffer"); +@@ -181,4 +182,24 @@ gunzip_nomem1: + return rc; /* returns Z_OK (0) if successful */ + } + +-#define decompress gunzip ++#ifndef PREBOOT ++STATIC int INIT gunzip(unsigned char *buf, long len, ++ long (*fill)(void*, unsigned long), ++ long (*flush)(void*, unsigned long), ++ unsigned char *out_buf, ++ long *pos, ++ void (*error)(char *x)) ++{ ++ return __gunzip(buf, len, fill, flush, out_buf, 0, pos, error); ++} ++#else ++STATIC int INIT __decompress(unsigned char *buf, long len, ++ long (*fill)(void*, unsigned long), ++ long (*flush)(void*, unsigned long), ++ unsigned char *out_buf, long out_len, ++ long *pos, ++ void (*error)(char *x)) ++{ ++ return __gunzip(buf, len, fill, flush, out_buf, out_len, pos, error); ++} ++#endif +diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c +index 40f66ebe57b7..036fc882cd72 100644 +--- a/lib/decompress_unlz4.c ++++ b/lib/decompress_unlz4.c +@@ -196,12 +196,12 @@ exit_0: + } + + #ifdef PREBOOT +-STATIC int INIT decompress(unsigned char *buf, long in_len, ++STATIC int INIT __decompress(unsigned char *buf, long in_len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), +- unsigned char *output, ++ unsigned char *output, long out_len, + long *posp, +- void(*error)(char *x) ++ void (*error)(char *x) + ) + { + return unlz4(buf, in_len - 4, fill, flush, output, posp, error); +diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c +index 0be83af62b88..decb64629c14 100644 +--- a/lib/decompress_unlzma.c ++++ b/lib/decompress_unlzma.c +@@ -667,13 +667,12 @@ exit_0: + } + + #ifdef PREBOOT +-STATIC int INIT decompress(unsigned char *buf, long in_len, ++STATIC int INIT __decompress(unsigned char *buf, long in_len, + long (*fill)(void*, unsigned long), + long (*flush)(void*, unsigned long), +- unsigned char *output, ++ unsigned char *output, long out_len, + long *posp, +- void(*error)(char *x) +- ) ++ void (*error)(char *x)) + { + return unlzma(buf, in_len - 4, fill, flush, output, posp, error); + } +diff --git a/lib/decompress_unlzo.c b/lib/decompress_unlzo.c +index b94a31bdd87d..f4c158e3a022 100644 +--- a/lib/decompress_unlzo.c ++++ b/lib/decompress_unlzo.c +@@ -31,6 +31,7 @@ + */ + + #ifdef STATIC ++#define PREBOOT + #include "lzo/lzo1x_decompress_safe.c" + #else + #include <linux/decompress/unlzo.h> +@@ -287,4 +288,14 @@ exit: + return ret; + } + +-#define decompress unlzo ++#ifdef PREBOOT ++STATIC int INIT __decompress(unsigned char *buf, long len, ++ long (*fill)(void*, unsigned long), ++ long (*flush)(void*, unsigned long), ++ unsigned char *out_buf, long olen, ++ long *pos, ++ void (*error)(char *x)) ++{ ++ return unlzo(buf, len, fill, flush, out_buf, pos, error); ++} ++#endif +diff --git a/lib/decompress_unxz.c b/lib/decompress_unxz.c +index b07a78340e9d..25d59a95bd66 100644 +--- a/lib/decompress_unxz.c ++++ b/lib/decompress_unxz.c +@@ -394,4 +394,14 @@ error_alloc_state: + * This macro is used by architecture-specific files to decompress + * the kernel image. + */ +-#define decompress unxz ++#ifdef XZ_PREBOOT ++STATIC int INIT __decompress(unsigned char *buf, long len, ++ long (*fill)(void*, unsigned long), ++ long (*flush)(void*, unsigned long), ++ unsigned char *out_buf, long olen, ++ long *pos, ++ void (*error)(char *x)) ++{ ++ return unxz(buf, len, fill, flush, out_buf, pos, error); ++} ++#endif +diff --git a/lib/rhashtable.c b/lib/rhashtable.c +index 8609378e6505..cf910e48f8f2 100644 +--- a/lib/rhashtable.c ++++ b/lib/rhashtable.c +@@ -612,6 +612,8 @@ next: + iter->skip = 0; + } + ++ iter->p = NULL; ++ + /* Ensure we see any new tables. */ + smp_rmb(); + +@@ -622,8 +624,6 @@ next: + return ERR_PTR(-EAGAIN); + } + +- iter->p = NULL; +- + out: + + return obj; +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index ebffa0e4a9c0..18490f3bd7f1 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -983,12 +983,15 @@ static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, + set_page_owner(page, order, gfp_flags); + + /* +- * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was necessary to ++ * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to + * allocate the page. The expectation is that the caller is taking + * steps that will free more memory. The caller should avoid the page + * being used for !PFMEMALLOC purposes. + */ +- page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); ++ if (alloc_flags & ALLOC_NO_WATERMARKS) ++ set_page_pfmemalloc(page); ++ else ++ clear_page_pfmemalloc(page); + + return 0; + } +diff --git a/mm/slab.c b/mm/slab.c +index 7eb38dd1cefa..3dd2d1ff9d5d 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -1602,7 +1602,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, + } + + /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */ +- if (unlikely(page->pfmemalloc)) ++ if (page_is_pfmemalloc(page)) + pfmemalloc_active = true; + + nr_pages = (1 << cachep->gfporder); +@@ -1613,7 +1613,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, + add_zone_page_state(page_zone(page), + NR_SLAB_UNRECLAIMABLE, nr_pages); + __SetPageSlab(page); +- if (page->pfmemalloc) ++ if (page_is_pfmemalloc(page)) + SetPageSlabPfmemalloc(page); + + if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) { +diff --git a/mm/slub.c b/mm/slub.c +index 54c0876b43d5..08342c523a85 100644 +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -1427,7 +1427,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) + inc_slabs_node(s, page_to_nid(page), page->objects); + page->slab_cache = s; + __SetPageSlab(page); +- if (page->pfmemalloc) ++ if (page_is_pfmemalloc(page)) + SetPageSlabPfmemalloc(page); + + start = page_address(page); +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 0d024fc8aa8e..1a17bd7c0ce5 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -1153,7 +1153,7 @@ cull_mlocked: + if (PageSwapCache(page)) + try_to_free_swap(page); + unlock_page(page); +- putback_lru_page(page); ++ list_add(&page->lru, &ret_pages); + continue; + + activate_locked: +diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c +index e97572b5d2cc..0ff6e1bbca91 100644 +--- a/net/bridge/br_forward.c ++++ b/net/bridge/br_forward.c +@@ -42,6 +42,7 @@ int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb) + } else { + skb_push(skb, ETH_HLEN); + br_drop_fake_rtable(skb); ++ skb_sender_cpu_clear(skb); + dev_queue_xmit(skb); + } + +diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c +index e29ad70b3000..d1f910c0d586 100644 +--- a/net/bridge/br_mdb.c ++++ b/net/bridge/br_mdb.c +@@ -348,7 +348,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, + return -ENOMEM; + rcu_assign_pointer(*pp, p); + +- br_mdb_notify(br->dev, port, group, RTM_NEWMDB); + return 0; + } + +@@ -371,6 +370,7 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br, + if (!p || p->br != br || p->state == BR_STATE_DISABLED) + return -EINVAL; + ++ memset(&ip, 0, sizeof(ip)); + ip.proto = entry->addr.proto; + if (ip.proto == htons(ETH_P_IP)) + ip.u.ip4 = entry->addr.u.ip4; +@@ -417,6 +417,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry) + if (!netif_running(br->dev) || br->multicast_disabled) + return -EINVAL; + ++ memset(&ip, 0, sizeof(ip)); + ip.proto = entry->addr.proto; + if (ip.proto == htons(ETH_P_IP)) { + if (timer_pending(&br->ip4_other_query.timer)) +diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c +index 4b5c236998ff..a7559ef312bd 100644 +--- a/net/bridge/br_netlink.c ++++ b/net/bridge/br_netlink.c +@@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void) + + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ + + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ + + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ ++ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */ ++ + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */ + + 0; + } + +@@ -504,6 +506,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = { + [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, + [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, + [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, ++ [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 }, ++ [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 }, + }; + + /* Change the state of the port and notify spanning tree */ +@@ -711,9 +715,17 @@ static int br_port_slave_changelink(struct net_device *brdev, + struct nlattr *tb[], + struct nlattr *data[]) + { ++ struct net_bridge *br = netdev_priv(brdev); ++ int ret; ++ + if (!data) + return 0; +- return br_setport(br_port_get_rtnl(dev), data); ++ ++ spin_lock_bh(&br->lock); ++ ret = br_setport(br_port_get_rtnl(dev), data); ++ spin_unlock_bh(&br->lock); ++ ++ return ret; + } + + static int br_port_fill_slave_info(struct sk_buff *skb, +diff --git a/net/core/datagram.c b/net/core/datagram.c +index b80fb91bb3f7..617088aee21d 100644 +--- a/net/core/datagram.c ++++ b/net/core/datagram.c +@@ -131,6 +131,35 @@ out_noerr: + goto out; + } + ++static struct sk_buff *skb_set_peeked(struct sk_buff *skb) ++{ ++ struct sk_buff *nskb; ++ ++ if (skb->peeked) ++ return skb; ++ ++ /* We have to unshare an skb before modifying it. */ ++ if (!skb_shared(skb)) ++ goto done; ++ ++ nskb = skb_clone(skb, GFP_ATOMIC); ++ if (!nskb) ++ return ERR_PTR(-ENOMEM); ++ ++ skb->prev->next = nskb; ++ skb->next->prev = nskb; ++ nskb->prev = skb->prev; ++ nskb->next = skb->next; ++ ++ consume_skb(skb); ++ skb = nskb; ++ ++done: ++ skb->peeked = 1; ++ ++ return skb; ++} ++ + /** + * __skb_recv_datagram - Receive a datagram skbuff + * @sk: socket +@@ -165,7 +194,9 @@ out_noerr: + struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, + int *peeked, int *off, int *err) + { ++ struct sk_buff_head *queue = &sk->sk_receive_queue; + struct sk_buff *skb, *last; ++ unsigned long cpu_flags; + long timeo; + /* + * Caller is allowed not to check sk->sk_err before skb_recv_datagram() +@@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, + * Look at current nfs client by the way... + * However, this function was correct in any case. 8) + */ +- unsigned long cpu_flags; +- struct sk_buff_head *queue = &sk->sk_receive_queue; + int _off = *off; + + last = (struct sk_buff *)queue; +@@ -199,7 +228,12 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, + _off -= skb->len; + continue; + } +- skb->peeked = 1; ++ ++ skb = skb_set_peeked(skb); ++ error = PTR_ERR(skb); ++ if (IS_ERR(skb)) ++ goto unlock_err; ++ + atomic_inc(&skb->users); + } else + __skb_unlink(skb, queue); +@@ -223,6 +257,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, + + return NULL; + ++unlock_err: ++ spin_unlock_irqrestore(&queue->lock, cpu_flags); + no_packet: + *err = error; + return NULL; +@@ -622,7 +658,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) + !skb->csum_complete_sw) + netdev_rx_csum_fault(skb->dev); + } +- skb->csum_valid = !sum; ++ if (!skb_shared(skb)) ++ skb->csum_valid = !sum; + return sum; + } + EXPORT_SYMBOL(__skb_checksum_complete_head); +@@ -642,11 +679,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb) + netdev_rx_csum_fault(skb->dev); + } + +- /* Save full packet checksum */ +- skb->csum = csum; +- skb->ip_summed = CHECKSUM_COMPLETE; +- skb->csum_complete_sw = 1; +- skb->csum_valid = !sum; ++ if (!skb_shared(skb)) { ++ /* Save full packet checksum */ ++ skb->csum = csum; ++ skb->ip_summed = CHECKSUM_COMPLETE; ++ skb->csum_complete_sw = 1; ++ skb->csum_valid = !sum; ++ } + + return sum; + } +diff --git a/net/core/dev.c b/net/core/dev.c +index aa82f9ab6a36..a42b232805a5 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -672,10 +672,6 @@ int dev_get_iflink(const struct net_device *dev) + if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink) + return dev->netdev_ops->ndo_get_iflink(dev); + +- /* If dev->rtnl_link_ops is set, it's a virtual interface. */ +- if (dev->rtnl_link_ops) +- return 0; +- + return dev->ifindex; + } + EXPORT_SYMBOL(dev_get_iflink); +@@ -3341,6 +3337,8 @@ static int enqueue_to_backlog(struct sk_buff *skb, int cpu, + local_irq_save(flags); + + rps_lock(sd); ++ if (!netif_running(skb->dev)) ++ goto drop; + qlen = skb_queue_len(&sd->input_pkt_queue); + if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) { + if (qlen) { +@@ -3362,6 +3360,7 @@ enqueue: + goto enqueue; + } + ++drop: + sd->dropped++; + rps_unlock(sd); + +@@ -3667,8 +3666,6 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) + + pt_prev = NULL; + +- rcu_read_lock(); +- + another_round: + skb->skb_iif = skb->dev->ifindex; + +@@ -3678,7 +3675,7 @@ another_round: + skb->protocol == cpu_to_be16(ETH_P_8021AD)) { + skb = skb_vlan_untag(skb); + if (unlikely(!skb)) +- goto unlock; ++ goto out; + } + + #ifdef CONFIG_NET_CLS_ACT +@@ -3708,7 +3705,7 @@ skip_taps: + if (static_key_false(&ingress_needed)) { + skb = handle_ing(skb, &pt_prev, &ret, orig_dev); + if (!skb) +- goto unlock; ++ goto out; + } + + skb->tc_verd = 0; +@@ -3725,7 +3722,7 @@ ncls: + if (vlan_do_receive(&skb)) + goto another_round; + else if (unlikely(!skb)) +- goto unlock; ++ goto out; + } + + rx_handler = rcu_dereference(skb->dev->rx_handler); +@@ -3737,7 +3734,7 @@ ncls: + switch (rx_handler(&skb)) { + case RX_HANDLER_CONSUMED: + ret = NET_RX_SUCCESS; +- goto unlock; ++ goto out; + case RX_HANDLER_ANOTHER: + goto another_round; + case RX_HANDLER_EXACT: +@@ -3791,8 +3788,7 @@ drop: + ret = NET_RX_DROP; + } + +-unlock: +- rcu_read_unlock(); ++out: + return ret; + } + +@@ -3823,29 +3819,30 @@ static int __netif_receive_skb(struct sk_buff *skb) + + static int netif_receive_skb_internal(struct sk_buff *skb) + { ++ int ret; ++ + net_timestamp_check(netdev_tstamp_prequeue, skb); + + if (skb_defer_rx_timestamp(skb)) + return NET_RX_SUCCESS; + ++ rcu_read_lock(); ++ + #ifdef CONFIG_RPS + if (static_key_false(&rps_needed)) { + struct rps_dev_flow voidflow, *rflow = &voidflow; +- int cpu, ret; +- +- rcu_read_lock(); +- +- cpu = get_rps_cpu(skb->dev, skb, &rflow); ++ int cpu = get_rps_cpu(skb->dev, skb, &rflow); + + if (cpu >= 0) { + ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); + rcu_read_unlock(); + return ret; + } +- rcu_read_unlock(); + } + #endif +- return __netif_receive_skb(skb); ++ ret = __netif_receive_skb(skb); ++ rcu_read_unlock(); ++ return ret; + } + + /** +@@ -4390,8 +4387,10 @@ static int process_backlog(struct napi_struct *napi, int quota) + struct sk_buff *skb; + + while ((skb = __skb_dequeue(&sd->process_queue))) { ++ rcu_read_lock(); + local_irq_enable(); + __netif_receive_skb(skb); ++ rcu_read_unlock(); + local_irq_disable(); + input_queue_head_incr(sd); + if (++work >= quota) { +@@ -6027,6 +6026,7 @@ static void rollback_registered_many(struct list_head *head) + unlist_netdevice(dev); + + dev->reg_state = NETREG_UNREGISTERING; ++ on_each_cpu(flush_backlog, dev, 1); + } + + synchronize_net(); +@@ -6297,7 +6297,8 @@ static int netif_alloc_netdev_queues(struct net_device *dev) + struct netdev_queue *tx; + size_t sz = count * sizeof(*tx); + +- BUG_ON(count < 1 || count > 0xffff); ++ if (count < 1 || count > 0xffff) ++ return -EINVAL; + + tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); + if (!tx) { +@@ -6650,8 +6651,6 @@ void netdev_run_todo(void) + + dev->reg_state = NETREG_UNREGISTERED; + +- on_each_cpu(flush_backlog, dev, 1); +- + netdev_wait_allrefs(dev); + + /* paranoia */ +diff --git a/net/core/pktgen.c b/net/core/pktgen.c +index 508155b283dd..043ea1867d0f 100644 +--- a/net/core/pktgen.c ++++ b/net/core/pktgen.c +@@ -3490,8 +3490,10 @@ static int pktgen_thread_worker(void *arg) + pktgen_rem_thread(t); + + /* Wait for kthread_stop */ +- while (!kthread_should_stop()) { ++ for (;;) { + set_current_state(TASK_INTERRUPTIBLE); ++ if (kthread_should_stop()) ++ break; + schedule(); + } + __set_current_state(TASK_RUNNING); +diff --git a/net/core/request_sock.c b/net/core/request_sock.c +index 87b22c0bc08c..b42f0e26f89e 100644 +--- a/net/core/request_sock.c ++++ b/net/core/request_sock.c +@@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) + spin_lock_bh(&queue->syn_wait_lock); + while ((req = lopt->syn_table[i]) != NULL) { + lopt->syn_table[i] = req->dl_next; ++ /* Because of following del_timer_sync(), ++ * we must release the spinlock here ++ * or risk a dead lock. ++ */ ++ spin_unlock_bh(&queue->syn_wait_lock); + atomic_inc(&lopt->qlen_dec); +- if (del_timer(&req->rsk_timer)) ++ if (del_timer_sync(&req->rsk_timer)) + reqsk_put(req); + reqsk_put(req); ++ spin_lock_bh(&queue->syn_wait_lock); + } + spin_unlock_bh(&queue->syn_wait_lock); + } +diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c +index 8de36824018d..fe95cb704aaa 100644 +--- a/net/core/rtnetlink.c ++++ b/net/core/rtnetlink.c +@@ -1287,10 +1287,6 @@ static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = { + [IFLA_INFO_SLAVE_DATA] = { .type = NLA_NESTED }, + }; + +-static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = { +- [IFLA_VF_INFO] = { .type = NLA_NESTED }, +-}; +- + static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { + [IFLA_VF_MAC] = { .len = sizeof(struct ifla_vf_mac) }, + [IFLA_VF_VLAN] = { .len = sizeof(struct ifla_vf_vlan) }, +@@ -1437,96 +1433,98 @@ static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[]) + return 0; + } + +-static int do_setvfinfo(struct net_device *dev, struct nlattr *attr) ++static int do_setvfinfo(struct net_device *dev, struct nlattr **tb) + { +- int rem, err = -EINVAL; +- struct nlattr *vf; + const struct net_device_ops *ops = dev->netdev_ops; ++ int err = -EINVAL; + +- nla_for_each_nested(vf, attr, rem) { +- switch (nla_type(vf)) { +- case IFLA_VF_MAC: { +- struct ifla_vf_mac *ivm; +- ivm = nla_data(vf); +- err = -EOPNOTSUPP; +- if (ops->ndo_set_vf_mac) +- err = ops->ndo_set_vf_mac(dev, ivm->vf, +- ivm->mac); +- break; +- } +- case IFLA_VF_VLAN: { +- struct ifla_vf_vlan *ivv; +- ivv = nla_data(vf); +- err = -EOPNOTSUPP; +- if (ops->ndo_set_vf_vlan) +- err = ops->ndo_set_vf_vlan(dev, ivv->vf, +- ivv->vlan, +- ivv->qos); +- break; +- } +- case IFLA_VF_TX_RATE: { +- struct ifla_vf_tx_rate *ivt; +- struct ifla_vf_info ivf; +- ivt = nla_data(vf); +- err = -EOPNOTSUPP; +- if (ops->ndo_get_vf_config) +- err = ops->ndo_get_vf_config(dev, ivt->vf, +- &ivf); +- if (err) +- break; +- err = -EOPNOTSUPP; +- if (ops->ndo_set_vf_rate) +- err = ops->ndo_set_vf_rate(dev, ivt->vf, +- ivf.min_tx_rate, +- ivt->rate); +- break; +- } +- case IFLA_VF_RATE: { +- struct ifla_vf_rate *ivt; +- ivt = nla_data(vf); +- err = -EOPNOTSUPP; +- if (ops->ndo_set_vf_rate) +- err = ops->ndo_set_vf_rate(dev, ivt->vf, +- ivt->min_tx_rate, +- ivt->max_tx_rate); +- break; +- } +- case IFLA_VF_SPOOFCHK: { +- struct ifla_vf_spoofchk *ivs; +- ivs = nla_data(vf); +- err = -EOPNOTSUPP; +- if (ops->ndo_set_vf_spoofchk) +- err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, +- ivs->setting); +- break; +- } +- case IFLA_VF_LINK_STATE: { +- struct ifla_vf_link_state *ivl; +- ivl = nla_data(vf); +- err = -EOPNOTSUPP; +- if (ops->ndo_set_vf_link_state) +- err = ops->ndo_set_vf_link_state(dev, ivl->vf, +- ivl->link_state); +- break; +- } +- case IFLA_VF_RSS_QUERY_EN: { +- struct ifla_vf_rss_query_en *ivrssq_en; ++ if (tb[IFLA_VF_MAC]) { ++ struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); + +- ivrssq_en = nla_data(vf); +- err = -EOPNOTSUPP; +- if (ops->ndo_set_vf_rss_query_en) +- err = ops->ndo_set_vf_rss_query_en(dev, +- ivrssq_en->vf, +- ivrssq_en->setting); +- break; +- } +- default: +- err = -EINVAL; +- break; +- } +- if (err) +- break; ++ err = -EOPNOTSUPP; ++ if (ops->ndo_set_vf_mac) ++ err = ops->ndo_set_vf_mac(dev, ivm->vf, ++ ivm->mac); ++ if (err < 0) ++ return err; ++ } ++ ++ if (tb[IFLA_VF_VLAN]) { ++ struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); ++ ++ err = -EOPNOTSUPP; ++ if (ops->ndo_set_vf_vlan) ++ err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, ++ ivv->qos); ++ if (err < 0) ++ return err; ++ } ++ ++ if (tb[IFLA_VF_TX_RATE]) { ++ struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); ++ struct ifla_vf_info ivf; ++ ++ err = -EOPNOTSUPP; ++ if (ops->ndo_get_vf_config) ++ err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); ++ if (err < 0) ++ return err; ++ ++ err = -EOPNOTSUPP; ++ if (ops->ndo_set_vf_rate) ++ err = ops->ndo_set_vf_rate(dev, ivt->vf, ++ ivf.min_tx_rate, ++ ivt->rate); ++ if (err < 0) ++ return err; ++ } ++ ++ if (tb[IFLA_VF_RATE]) { ++ struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); ++ ++ err = -EOPNOTSUPP; ++ if (ops->ndo_set_vf_rate) ++ err = ops->ndo_set_vf_rate(dev, ivt->vf, ++ ivt->min_tx_rate, ++ ivt->max_tx_rate); ++ if (err < 0) ++ return err; + } ++ ++ if (tb[IFLA_VF_SPOOFCHK]) { ++ struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); ++ ++ err = -EOPNOTSUPP; ++ if (ops->ndo_set_vf_spoofchk) ++ err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, ++ ivs->setting); ++ if (err < 0) ++ return err; ++ } ++ ++ if (tb[IFLA_VF_LINK_STATE]) { ++ struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); ++ ++ err = -EOPNOTSUPP; ++ if (ops->ndo_set_vf_link_state) ++ err = ops->ndo_set_vf_link_state(dev, ivl->vf, ++ ivl->link_state); ++ if (err < 0) ++ return err; ++ } ++ ++ if (tb[IFLA_VF_RSS_QUERY_EN]) { ++ struct ifla_vf_rss_query_en *ivrssq_en; ++ ++ err = -EOPNOTSUPP; ++ ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); ++ if (ops->ndo_set_vf_rss_query_en) ++ err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, ++ ivrssq_en->setting); ++ if (err < 0) ++ return err; ++ } ++ + return err; + } + +@@ -1722,14 +1720,21 @@ static int do_setlink(const struct sk_buff *skb, + } + + if (tb[IFLA_VFINFO_LIST]) { ++ struct nlattr *vfinfo[IFLA_VF_MAX + 1]; + struct nlattr *attr; + int rem; ++ + nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) { +- if (nla_type(attr) != IFLA_VF_INFO) { ++ if (nla_type(attr) != IFLA_VF_INFO || ++ nla_len(attr) < NLA_HDRLEN) { + err = -EINVAL; + goto errout; + } +- err = do_setvfinfo(dev, attr); ++ err = nla_parse_nested(vfinfo, IFLA_VF_MAX, attr, ++ ifla_vf_policy); ++ if (err < 0) ++ goto errout; ++ err = do_setvfinfo(dev, vfinfo); + if (err < 0) + goto errout; + status |= DO_SETLINK_NOTIFY; +diff --git a/net/core/skbuff.c b/net/core/skbuff.c +index 41ec02242ea7..a2e4e47b2839 100644 +--- a/net/core/skbuff.c ++++ b/net/core/skbuff.c +@@ -340,7 +340,7 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size) + + if (skb && frag_size) { + skb->head_frag = 1; +- if (virt_to_head_page(data)->pfmemalloc) ++ if (page_is_pfmemalloc(virt_to_head_page(data))) + skb->pfmemalloc = 1; + } + return skb; +diff --git a/net/dsa/slave.c b/net/dsa/slave.c +index 827cda560a55..57978c5b2c91 100644 +--- a/net/dsa/slave.c ++++ b/net/dsa/slave.c +@@ -732,7 +732,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p, + return -ENODEV; + + /* Use already configured phy mode */ +- p->phy_interface = p->phy->interface; ++ if (p->phy_interface == PHY_INTERFACE_MODE_NA) ++ p->phy_interface = p->phy->interface; + phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, + p->phy_interface); + +diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c +index 90c0e8386116..574fad9cca05 100644 +--- a/net/ipv4/datagram.c ++++ b/net/ipv4/datagram.c +@@ -20,7 +20,7 @@ + #include <net/route.h> + #include <net/tcp_states.h> + +-int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) ++int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) + { + struct inet_sock *inet = inet_sk(sk); + struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; +@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) + + sk_dst_reset(sk); + +- lock_sock(sk); +- + oif = sk->sk_bound_dev_if; + saddr = inet->inet_saddr; + if (ipv4_is_multicast(usin->sin_addr.s_addr)) { +@@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) + sk_dst_set(sk, &rt->dst); + err = 0; + out: +- release_sock(sk); + return err; + } ++EXPORT_SYMBOL(__ip4_datagram_connect); ++ ++int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) ++{ ++ int res; ++ ++ lock_sock(sk); ++ res = __ip4_datagram_connect(sk, uaddr, addr_len); ++ release_sock(sk); ++ return res; ++} + EXPORT_SYMBOL(ip4_datagram_connect); + + /* Because UDP xmit path can manipulate sk_dst_cache without holding +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c +index 09b62e17dd8c..0ca933db1b41 100644 +--- a/net/ipv4/fib_trie.c ++++ b/net/ipv4/fib_trie.c +@@ -1780,8 +1780,6 @@ void fib_table_flush_external(struct fib_table *tb) + if (hlist_empty(&n->leaf)) { + put_child_root(pn, n->key, NULL); + node_free(n); +- } else { +- leaf_pull_suffix(pn, n); + } + } + } +@@ -1852,8 +1850,6 @@ int fib_table_flush(struct fib_table *tb) + if (hlist_empty(&n->leaf)) { + put_child_root(pn, n->key, NULL); + node_free(n); +- } else { +- leaf_pull_suffix(pn, n); + } + } + +@@ -2457,7 +2453,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, + key = l->key + 1; + iter->pos++; + +- if (pos-- <= 0) ++ if (--pos <= 0) + break; + + l = NULL; +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c +index 8976ca423a07..b27fc401c6a9 100644 +--- a/net/ipv4/inet_connection_sock.c ++++ b/net/ipv4/inet_connection_sock.c +@@ -584,7 +584,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue, + } + + spin_unlock(&queue->syn_wait_lock); +- if (del_timer(&req->rsk_timer)) ++ if (del_timer_sync(&req->rsk_timer)) + reqsk_put(req); + return found; + } +diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c +index cc1da6d9cb35..cae22a1a8777 100644 +--- a/net/ipv4/ip_fragment.c ++++ b/net/ipv4/ip_fragment.c +@@ -342,7 +342,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) + ihl = ip_hdrlen(skb); + + /* Determine the position of this fragment. */ +- end = offset + skb->len - ihl; ++ end = offset + skb->len - skb_network_offset(skb) - ihl; + err = -EINVAL; + + /* Is this the final fragment? */ +@@ -372,7 +372,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) + goto err; + + err = -ENOMEM; +- if (!pskb_pull(skb, ihl)) ++ if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) + goto err; + + err = pskb_trim_rcsum(skb, end - offset); +@@ -613,6 +613,9 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, + iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0; + iph->tot_len = htons(len); + iph->tos |= ecn; ++ ++ ip_send_check(iph); ++ + IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); + qp->q.fragments = NULL; + qp->q.fragments_tail = NULL; +diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c +index 4c2c3ba4ba65..626d9e56a6bd 100644 +--- a/net/ipv4/ip_tunnel.c ++++ b/net/ipv4/ip_tunnel.c +@@ -586,7 +586,8 @@ int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t, + EXPORT_SYMBOL(ip_tunnel_encap); + + static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, +- struct rtable *rt, __be16 df) ++ struct rtable *rt, __be16 df, ++ const struct iphdr *inner_iph) + { + struct ip_tunnel *tunnel = netdev_priv(dev); + int pkt_size = skb->len - tunnel->hlen - dev->hard_header_len; +@@ -603,7 +604,8 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb, + + if (skb->protocol == htons(ETH_P_IP)) { + if (!skb_is_gso(skb) && +- (df & htons(IP_DF)) && mtu < pkt_size) { ++ (inner_iph->frag_off & htons(IP_DF)) && ++ mtu < pkt_size) { + memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); + return -E2BIG; +@@ -737,7 +739,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, + goto tx_error; + } + +- if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off)) { ++ if (tnl_update_pmtu(dev, skb, rt, tnl_params->frag_off, inner_iph)) { + ip_rt_put(rt); + goto tx_error; + } +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index fc1c658ec6c1..441ca6f38981 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) + req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); + if (req) { + nsk = tcp_check_req(sk, skb, req, false); +- if (!nsk) ++ if (!nsk || nsk == sk) + reqsk_put(req); + return nsk; + } +diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c +index 83aa604f9273..1b8c5ba7d5f7 100644 +--- a/net/ipv4/udp.c ++++ b/net/ipv4/udp.c +@@ -1995,12 +1995,19 @@ void udp_v4_early_demux(struct sk_buff *skb) + + skb->sk = sk; + skb->destructor = sock_efree; +- dst = sk->sk_rx_dst; ++ dst = READ_ONCE(sk->sk_rx_dst); + + if (dst) + dst = dst_check(dst, 0); +- if (dst) +- skb_dst_set_noref(skb, dst); ++ if (dst) { ++ /* DST_NOCACHE can not be used without taking a reference */ ++ if (dst->flags & DST_NOCACHE) { ++ if (likely(atomic_inc_not_zero(&dst->__refcnt))) ++ skb_dst_set(skb, dst); ++ } else { ++ skb_dst_set_noref(skb, dst); ++ } ++ } + } + + int udp_rcv(struct sk_buff *skb) +diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c +index 62d908e64eeb..b10a88986a98 100644 +--- a/net/ipv6/datagram.c ++++ b/net/ipv6/datagram.c +@@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a) + return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); + } + +-int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) ++static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) + { + struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; + struct inet_sock *inet = inet_sk(sk); +@@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) + if (usin->sin6_family == AF_INET) { + if (__ipv6_only_sock(sk)) + return -EAFNOSUPPORT; +- err = ip4_datagram_connect(sk, uaddr, addr_len); ++ err = __ip4_datagram_connect(sk, uaddr, addr_len); + goto ipv4_connected; + } + +@@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) + sin.sin_addr.s_addr = daddr->s6_addr32[3]; + sin.sin_port = usin->sin6_port; + +- err = ip4_datagram_connect(sk, +- (struct sockaddr *) &sin, +- sizeof(sin)); ++ err = __ip4_datagram_connect(sk, ++ (struct sockaddr *) &sin, ++ sizeof(sin)); + + ipv4_connected: + if (err) +@@ -204,6 +204,16 @@ out: + fl6_sock_release(flowlabel); + return err; + } ++ ++int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) ++{ ++ int res; ++ ++ lock_sock(sk); ++ res = __ip6_datagram_connect(sk, uaddr, addr_len); ++ release_sock(sk); ++ return res; ++} + EXPORT_SYMBOL_GPL(ip6_datagram_connect); + + int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr, +diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c +index f2e464eba5ef..57990c929cd8 100644 +--- a/net/ipv6/ip6_input.c ++++ b/net/ipv6/ip6_input.c +@@ -331,10 +331,10 @@ int ip6_mc_input(struct sk_buff *skb) + if (offset < 0) + goto out; + +- if (!ipv6_is_mld(skb, nexthdr, offset)) +- goto out; ++ if (ipv6_is_mld(skb, nexthdr, offset)) ++ deliver = true; + +- deliver = true; ++ goto out; + } + /* unknown RA - process it normally */ + } +diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c +index e893cd18612f..08b62047c67f 100644 +--- a/net/ipv6/ip6_offload.c ++++ b/net/ipv6/ip6_offload.c +@@ -292,8 +292,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = { + static const struct net_offload sit_offload = { + .callbacks = { + .gso_segment = ipv6_gso_segment, +- .gro_receive = ipv6_gro_receive, +- .gro_complete = ipv6_gro_complete, + }, + }; + +diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c +index 3adffb300238..e541d68dba8b 100644 +--- a/net/ipv6/tcp_ipv6.c ++++ b/net/ipv6/tcp_ipv6.c +@@ -946,7 +946,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb) + &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); + if (req) { + nsk = tcp_check_req(sk, skb, req, false); +- if (!nsk) ++ if (!nsk || nsk == sk) + reqsk_put(req); + return nsk; + } +diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c +index 667111ee6a20..5787f15a3a12 100644 +--- a/net/mac80211/tx.c ++++ b/net/mac80211/tx.c +@@ -301,9 +301,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) + if (tx->sdata->vif.type == NL80211_IFTYPE_WDS) + return TX_CONTINUE; + +- if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT) +- return TX_CONTINUE; +- + if (tx->flags & IEEE80211_TX_PS_BUFFERED) + return TX_CONTINUE; + +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index bf6e76643f78..4856d975492d 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -355,25 +355,52 @@ err1: + return NULL; + } + ++ ++static void ++__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec, ++ unsigned int order) ++{ ++ struct netlink_sock *nlk = nlk_sk(sk); ++ struct sk_buff_head *queue; ++ struct netlink_ring *ring; ++ ++ queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; ++ ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; ++ ++ spin_lock_bh(&queue->lock); ++ ++ ring->frame_max = req->nm_frame_nr - 1; ++ ring->head = 0; ++ ring->frame_size = req->nm_frame_size; ++ ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; ++ ++ swap(ring->pg_vec_len, req->nm_block_nr); ++ swap(ring->pg_vec_order, order); ++ swap(ring->pg_vec, pg_vec); ++ ++ __skb_queue_purge(queue); ++ spin_unlock_bh(&queue->lock); ++ ++ WARN_ON(atomic_read(&nlk->mapped)); ++ ++ if (pg_vec) ++ free_pg_vec(pg_vec, order, req->nm_block_nr); ++} ++ + static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, +- bool closing, bool tx_ring) ++ bool tx_ring) + { + struct netlink_sock *nlk = nlk_sk(sk); + struct netlink_ring *ring; +- struct sk_buff_head *queue; + void **pg_vec = NULL; + unsigned int order = 0; +- int err; + + ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; +- queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; + +- if (!closing) { +- if (atomic_read(&nlk->mapped)) +- return -EBUSY; +- if (atomic_read(&ring->pending)) +- return -EBUSY; +- } ++ if (atomic_read(&nlk->mapped)) ++ return -EBUSY; ++ if (atomic_read(&ring->pending)) ++ return -EBUSY; + + if (req->nm_block_nr) { + if (ring->pg_vec != NULL) +@@ -405,31 +432,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, + return -EINVAL; + } + +- err = -EBUSY; + mutex_lock(&nlk->pg_vec_lock); +- if (closing || atomic_read(&nlk->mapped) == 0) { +- err = 0; +- spin_lock_bh(&queue->lock); +- +- ring->frame_max = req->nm_frame_nr - 1; +- ring->head = 0; +- ring->frame_size = req->nm_frame_size; +- ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; +- +- swap(ring->pg_vec_len, req->nm_block_nr); +- swap(ring->pg_vec_order, order); +- swap(ring->pg_vec, pg_vec); +- +- __skb_queue_purge(queue); +- spin_unlock_bh(&queue->lock); +- +- WARN_ON(atomic_read(&nlk->mapped)); ++ if (atomic_read(&nlk->mapped) == 0) { ++ __netlink_set_ring(sk, req, tx_ring, pg_vec, order); ++ mutex_unlock(&nlk->pg_vec_lock); ++ return 0; + } ++ + mutex_unlock(&nlk->pg_vec_lock); + + if (pg_vec) + free_pg_vec(pg_vec, order, req->nm_block_nr); +- return err; ++ ++ return -EBUSY; + } + + static void netlink_mm_open(struct vm_area_struct *vma) +@@ -898,10 +913,10 @@ static void netlink_sock_destruct(struct sock *sk) + + memset(&req, 0, sizeof(req)); + if (nlk->rx_ring.pg_vec) +- netlink_set_ring(sk, &req, true, false); ++ __netlink_set_ring(sk, &req, false, NULL, 0); + memset(&req, 0, sizeof(req)); + if (nlk->tx_ring.pg_vec) +- netlink_set_ring(sk, &req, true, true); ++ __netlink_set_ring(sk, &req, true, NULL, 0); + } + #endif /* CONFIG_NETLINK_MMAP */ + +@@ -1079,6 +1094,11 @@ static int netlink_insert(struct sock *sk, u32 portid) + + err = __netlink_insert(table, sk); + if (err) { ++ /* In case the hashtable backend returns with -EBUSY ++ * from here, it must not escape to the caller. ++ */ ++ if (unlikely(err == -EBUSY)) ++ err = -EOVERFLOW; + if (err == -EEXIST) + err = -EADDRINUSE; + nlk_sk(sk)->portid = 0; +@@ -2197,7 +2217,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, + return -EINVAL; + if (copy_from_user(&req, optval, sizeof(req))) + return -EFAULT; +- err = netlink_set_ring(sk, &req, false, ++ err = netlink_set_ring(sk, &req, + optname == NETLINK_TX_RING); + break; + } +diff --git a/net/nfc/nci/hci.c b/net/nfc/nci/hci.c +index ed54ec533836..b33fed6d1584 100644 +--- a/net/nfc/nci/hci.c ++++ b/net/nfc/nci/hci.c +@@ -233,7 +233,7 @@ int nci_hci_send_cmd(struct nci_dev *ndev, u8 gate, u8 cmd, + r = nci_request(ndev, nci_hci_send_data_req, (unsigned long)&data, + msecs_to_jiffies(NCI_DATA_TIMEOUT)); + +- if (r == NCI_STATUS_OK) ++ if (r == NCI_STATUS_OK && skb) + *skb = conn_info->rx_skb; + + return r; +diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c +index fe1610ddeacf..e1ea5d43b01e 100644 +--- a/net/packet/af_packet.c ++++ b/net/packet/af_packet.c +@@ -2307,7 +2307,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg) + } + tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, + addr, hlen); +- if (tp_len > dev->mtu + dev->hard_header_len) { ++ if (likely(tp_len >= 0) && ++ tp_len > dev->mtu + dev->hard_header_len) { + struct ethhdr *ehdr; + /* Earlier code assumed this would be a VLAN pkt, + * double-check this now that we have the actual +@@ -2688,7 +2689,7 @@ static int packet_release(struct socket *sock) + static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto) + { + struct packet_sock *po = pkt_sk(sk); +- const struct net_device *dev_curr; ++ struct net_device *dev_curr; + __be16 proto_curr; + bool need_rehook; + +@@ -2712,15 +2713,13 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto) + + po->num = proto; + po->prot_hook.type = proto; +- +- if (po->prot_hook.dev) +- dev_put(po->prot_hook.dev); +- + po->prot_hook.dev = dev; + + po->ifindex = dev ? dev->ifindex : 0; + packet_cached_dev_assign(po, dev); + } ++ if (dev_curr) ++ dev_put(dev_curr); + + if (proto == 0 || !need_rehook) + goto out_unlock; +diff --git a/net/rds/info.c b/net/rds/info.c +index 9a6b4f66187c..140a44a5f7b7 100644 +--- a/net/rds/info.c ++++ b/net/rds/info.c +@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval, + + /* check for all kinds of wrapping and the like */ + start = (unsigned long)optval; +- if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) { ++ if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) { + ret = -EINVAL; + goto out; + } +diff --git a/net/sched/act_api.c b/net/sched/act_api.c +index 3d43e4979f27..f8d9c2a2c451 100644 +--- a/net/sched/act_api.c ++++ b/net/sched/act_api.c +@@ -45,7 +45,7 @@ void tcf_hash_destroy(struct tc_action *a) + } + EXPORT_SYMBOL(tcf_hash_destroy); + +-int tcf_hash_release(struct tc_action *a, int bind) ++int __tcf_hash_release(struct tc_action *a, bool bind, bool strict) + { + struct tcf_common *p = a->priv; + int ret = 0; +@@ -53,7 +53,7 @@ int tcf_hash_release(struct tc_action *a, int bind) + if (p) { + if (bind) + p->tcfc_bindcnt--; +- else if (p->tcfc_bindcnt > 0) ++ else if (strict && p->tcfc_bindcnt > 0) + return -EPERM; + + p->tcfc_refcnt--; +@@ -64,9 +64,10 @@ int tcf_hash_release(struct tc_action *a, int bind) + ret = 1; + } + } ++ + return ret; + } +-EXPORT_SYMBOL(tcf_hash_release); ++EXPORT_SYMBOL(__tcf_hash_release); + + static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, + struct tc_action *a) +@@ -136,7 +137,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a) + head = &hinfo->htab[tcf_hash(i, hinfo->hmask)]; + hlist_for_each_entry_safe(p, n, head, tcfc_head) { + a->priv = p; +- ret = tcf_hash_release(a, 0); ++ ret = __tcf_hash_release(a, false, true); + if (ret == ACT_P_DELETED) { + module_put(a->ops->owner); + n_i++; +@@ -413,7 +414,7 @@ int tcf_action_destroy(struct list_head *actions, int bind) + int ret = 0; + + list_for_each_entry_safe(a, tmp, actions, list) { +- ret = tcf_hash_release(a, bind); ++ ret = __tcf_hash_release(a, bind, true); + if (ret == ACT_P_DELETED) + module_put(a->ops->owner); + else if (ret < 0) +diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c +index dc6a2d324bd8..521ffca91228 100644 +--- a/net/sched/act_bpf.c ++++ b/net/sched/act_bpf.c +@@ -27,9 +27,10 @@ + struct tcf_bpf_cfg { + struct bpf_prog *filter; + struct sock_filter *bpf_ops; +- char *bpf_name; ++ const char *bpf_name; + u32 bpf_fd; + u16 bpf_num_ops; ++ bool is_ebpf; + }; + + static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, +@@ -200,6 +201,7 @@ static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg) + cfg->bpf_ops = bpf_ops; + cfg->bpf_num_ops = bpf_num_ops; + cfg->filter = fp; ++ cfg->is_ebpf = false; + + return 0; + } +@@ -234,18 +236,40 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg) + cfg->bpf_fd = bpf_fd; + cfg->bpf_name = name; + cfg->filter = fp; ++ cfg->is_ebpf = true; + + return 0; + } + ++static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg) ++{ ++ if (cfg->is_ebpf) ++ bpf_prog_put(cfg->filter); ++ else ++ bpf_prog_destroy(cfg->filter); ++ ++ kfree(cfg->bpf_ops); ++ kfree(cfg->bpf_name); ++} ++ ++static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog, ++ struct tcf_bpf_cfg *cfg) ++{ ++ cfg->is_ebpf = tcf_bpf_is_ebpf(prog); ++ cfg->filter = prog->filter; ++ ++ cfg->bpf_ops = prog->bpf_ops; ++ cfg->bpf_name = prog->bpf_name; ++} ++ + static int tcf_bpf_init(struct net *net, struct nlattr *nla, + struct nlattr *est, struct tc_action *act, + int replace, int bind) + { + struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; ++ struct tcf_bpf_cfg cfg, old; + struct tc_act_bpf *parm; + struct tcf_bpf *prog; +- struct tcf_bpf_cfg cfg; + bool is_bpf, is_ebpf; + int ret; + +@@ -294,6 +318,9 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, + prog = to_bpf(act); + spin_lock_bh(&prog->tcf_lock); + ++ if (ret != ACT_P_CREATED) ++ tcf_bpf_prog_fill_cfg(prog, &old); ++ + prog->bpf_ops = cfg.bpf_ops; + prog->bpf_name = cfg.bpf_name; + +@@ -309,29 +336,22 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla, + + if (ret == ACT_P_CREATED) + tcf_hash_insert(act); ++ else ++ tcf_bpf_cfg_cleanup(&old); + + return ret; + + destroy_fp: +- if (is_ebpf) +- bpf_prog_put(cfg.filter); +- else +- bpf_prog_destroy(cfg.filter); +- +- kfree(cfg.bpf_ops); +- kfree(cfg.bpf_name); +- ++ tcf_bpf_cfg_cleanup(&cfg); + return ret; + } + + static void tcf_bpf_cleanup(struct tc_action *act, int bind) + { +- const struct tcf_bpf *prog = act->priv; ++ struct tcf_bpf_cfg tmp; + +- if (tcf_bpf_is_ebpf(prog)) +- bpf_prog_put(prog->filter); +- else +- bpf_prog_destroy(prog->filter); ++ tcf_bpf_prog_fill_cfg(act->priv, &tmp); ++ tcf_bpf_cfg_cleanup(&tmp); + } + + static struct tc_action_ops act_bpf_ops __read_mostly = { +diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c +index 91bd9c19471d..c0b86f2bfe22 100644 +--- a/net/sched/cls_bpf.c ++++ b/net/sched/cls_bpf.c +@@ -364,7 +364,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, + goto errout; + + if (oldprog) { +- list_replace_rcu(&prog->link, &oldprog->link); ++ list_replace_rcu(&oldprog->link, &prog->link); + tcf_unbind_filter(tp, &oldprog->res); + call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); + } else { +diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c +index a620c4e288a5..75df923f5c03 100644 +--- a/net/sched/cls_flow.c ++++ b/net/sched/cls_flow.c +@@ -419,6 +419,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, + if (!fnew) + goto err2; + ++ tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); ++ + fold = (struct flow_filter *)*arg; + if (fold) { + err = -EINVAL; +@@ -480,7 +482,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, + fnew->mask = ~0U; + fnew->tp = tp; + get_random_bytes(&fnew->hashrnd, 4); +- tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); + } + + fnew->perturb_timer.function = flow_perturbation; +@@ -520,7 +521,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, + if (*arg == 0) + list_add_tail_rcu(&fnew->list, &head->filters); + else +- list_replace_rcu(&fnew->list, &fold->list); ++ list_replace_rcu(&fold->list, &fnew->list); + + *arg = (unsigned long)fnew; + +diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c +index c244c45b78d7..9291598b5aad 100644 +--- a/net/sched/sch_fq_codel.c ++++ b/net/sched/sch_fq_codel.c +@@ -162,10 +162,10 @@ static unsigned int fq_codel_drop(struct Qdisc *sch) + skb = dequeue_head(flow); + len = qdisc_pkt_len(skb); + q->backlogs[idx] -= len; +- kfree_skb(skb); + sch->q.qlen--; + qdisc_qstats_drop(sch); + qdisc_qstats_backlog_dec(sch, skb); ++ kfree_skb(skb); + flow->dropped++; + return idx; + } +diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c +index 1d4fe24af06a..d109d308ec3a 100644 +--- a/net/sunrpc/xprt.c ++++ b/net/sunrpc/xprt.c +@@ -611,6 +611,7 @@ static void xprt_autoclose(struct work_struct *work) + xprt->ops->close(xprt); + clear_bit(XPRT_CLOSE_WAIT, &xprt->state); + xprt_release_write(xprt, NULL); ++ wake_up_bit(&xprt->state, XPRT_LOCKED); + } + + /** +@@ -720,6 +721,7 @@ void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie) + xprt->ops->release_xprt(xprt, NULL); + out: + spin_unlock_bh(&xprt->transport_lock); ++ wake_up_bit(&xprt->state, XPRT_LOCKED); + } + + /** +@@ -1389,6 +1391,10 @@ out: + static void xprt_destroy(struct rpc_xprt *xprt) + { + dprintk("RPC: destroying transport %p\n", xprt); ++ ++ /* Exclude transport connect/disconnect handlers */ ++ wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); ++ + del_timer_sync(&xprt->timer); + + rpc_xprt_debugfs_unregister(xprt); +diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c +index 66891e32c5e3..5e3ad598d3f5 100644 +--- a/net/sunrpc/xprtsock.c ++++ b/net/sunrpc/xprtsock.c +@@ -834,6 +834,7 @@ static void xs_reset_transport(struct sock_xprt *transport) + sk->sk_user_data = NULL; + + xs_restore_old_callbacks(transport, sk); ++ xprt_clear_connected(xprt); + write_unlock_bh(&sk->sk_callback_lock); + xs_sock_reset_connection_flags(xprt); + +@@ -1433,6 +1434,7 @@ out: + static void xs_tcp_state_change(struct sock *sk) + { + struct rpc_xprt *xprt; ++ struct sock_xprt *transport; + + read_lock_bh(&sk->sk_callback_lock); + if (!(xprt = xprt_from_sock(sk))) +@@ -1444,13 +1446,12 @@ static void xs_tcp_state_change(struct sock *sk) + sock_flag(sk, SOCK_ZAPPED), + sk->sk_shutdown); + ++ transport = container_of(xprt, struct sock_xprt, xprt); + trace_rpc_socket_state_change(xprt, sk->sk_socket); + switch (sk->sk_state) { + case TCP_ESTABLISHED: + spin_lock(&xprt->transport_lock); + if (!xprt_test_and_set_connected(xprt)) { +- struct sock_xprt *transport = container_of(xprt, +- struct sock_xprt, xprt); + + /* Reset TCP record info */ + transport->tcp_offset = 0; +@@ -1459,6 +1460,8 @@ static void xs_tcp_state_change(struct sock *sk) + transport->tcp_flags = + TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; + xprt->connect_cookie++; ++ clear_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); ++ xprt_clear_connecting(xprt); + + xprt_wake_pending_tasks(xprt, -EAGAIN); + } +@@ -1494,6 +1497,9 @@ static void xs_tcp_state_change(struct sock *sk) + smp_mb__after_atomic(); + break; + case TCP_CLOSE: ++ if (test_and_clear_bit(XPRT_SOCK_CONNECTING, ++ &transport->sock_state)) ++ xprt_clear_connecting(xprt); + xs_sock_mark_closed(xprt); + } + out: +@@ -2110,6 +2116,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) + /* Tell the socket layer to start connecting... */ + xprt->stat.connect_count++; + xprt->stat.connect_start = jiffies; ++ set_bit(XPRT_SOCK_CONNECTING, &transport->sock_state); + ret = kernel_connect(sock, xs_addr(xprt), xprt->addrlen, O_NONBLOCK); + switch (ret) { + case 0: +@@ -2174,7 +2181,6 @@ static void xs_tcp_setup_socket(struct work_struct *work) + case -EINPROGRESS: + case -EALREADY: + xprt_unlock_connect(xprt, transport); +- xprt_clear_connecting(xprt); + return; + case -EINVAL: + /* Happens, for instance, if the user specified a link +@@ -2216,13 +2222,14 @@ static void xs_connect(struct rpc_xprt *xprt, struct rpc_task *task) + + WARN_ON_ONCE(!xprt_lock_connect(xprt, task, transport)); + +- /* Start by resetting any existing state */ +- xs_reset_transport(transport); +- +- if (transport->sock != NULL && !RPC_IS_SOFTCONN(task)) { ++ if (transport->sock != NULL) { + dprintk("RPC: xs_connect delayed xprt %p for %lu " + "seconds\n", + xprt, xprt->reestablish_timeout / HZ); ++ ++ /* Start by resetting any existing state */ ++ xs_reset_transport(transport); ++ + queue_delayed_work(rpciod_workqueue, + &transport->connect_worker, + xprt->reestablish_timeout); +diff --git a/net/tipc/socket.c b/net/tipc/socket.c +index f485600c4507..20cc6df07157 100644 +--- a/net/tipc/socket.c ++++ b/net/tipc/socket.c +@@ -2009,6 +2009,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags) + res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, 1); + if (res) + goto exit; ++ security_sk_clone(sock->sk, new_sock->sk); + + new_sk = new_sock->sk; + new_tsock = tipc_sk(new_sk); +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 91f6928560e1..6fe862594e9b 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -1134,7 +1134,7 @@ static const struct hda_fixup alc880_fixups[] = { + /* override all pins as BIOS on old Amilo is broken */ + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { +- { 0x14, 0x0121411f }, /* HP */ ++ { 0x14, 0x0121401f }, /* HP */ + { 0x15, 0x99030120 }, /* speaker */ + { 0x16, 0x99030130 }, /* bass speaker */ + { 0x17, 0x411111f0 }, /* N/A */ +@@ -1154,7 +1154,7 @@ static const struct hda_fixup alc880_fixups[] = { + /* almost compatible with FUJITSU, but no bass and SPDIF */ + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { +- { 0x14, 0x0121411f }, /* HP */ ++ { 0x14, 0x0121401f }, /* HP */ + { 0x15, 0x99030120 }, /* speaker */ + { 0x16, 0x411111f0 }, /* N/A */ + { 0x17, 0x411111f0 }, /* N/A */ +@@ -1363,7 +1363,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = { + SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_FIXUP_W810), + SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_FIXUP_MEDION_RIM), + SND_PCI_QUIRK(0x1631, 0xe011, "PB 13201056", ALC880_FIXUP_6ST_AUTOMUTE), +- SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_FIXUP_F1734), ++ SND_PCI_QUIRK(0x1734, 0x107c, "FSC Amilo M1437", ALC880_FIXUP_FUJITSU), + SND_PCI_QUIRK(0x1734, 0x1094, "FSC Amilo M1451G", ALC880_FIXUP_FUJITSU), + SND_PCI_QUIRK(0x1734, 0x10ac, "FSC AMILO Xi 1526", ALC880_FIXUP_F1734), + SND_PCI_QUIRK(0x1734, 0x10b0, "FSC Amilo Pi1556", ALC880_FIXUP_FUJITSU), +@@ -5118,8 +5118,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), +- SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), + SND_PCI_QUIRK(0x1028, 0x06db, "Dell", ALC292_FIXUP_DISABLE_AAMIX), ++ SND_PCI_QUIRK(0x1028, 0x06dd, "Dell", ALC292_FIXUP_DISABLE_AAMIX), ++ SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX), ++ SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC292_FIXUP_DISABLE_AAMIX), ++ SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC292_FIXUP_DISABLE_AAMIX), + SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), +@@ -6454,6 +6457,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { + SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x05fe, "Dell XPS 15", ALC668_FIXUP_DELL_XPS13), + SND_PCI_QUIRK(0x1028, 0x060a, "Dell XPS 13", ALC668_FIXUP_DELL_XPS13), ++ SND_PCI_QUIRK(0x1028, 0x060d, "Dell M3800", ALC668_FIXUP_DELL_XPS13), + SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x0696, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), +diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c +index 8b7e391dd0b8..cd8ed2e393a2 100644 +--- a/sound/usb/mixer.c ++++ b/sound/usb/mixer.c +@@ -2522,7 +2522,7 @@ static int restore_mixer_value(struct usb_mixer_elem_list *list) + for (c = 0; c < MAX_CHANNELS; c++) { + if (!(cval->cmask & (1 << c))) + continue; +- if (cval->cached & (1 << c)) { ++ if (cval->cached & (1 << (c + 1))) { + err = snd_usb_set_cur_mix_value(cval, c + 1, idx, + cval->cache_val[idx]); + if (err < 0) |