diff options
author | Mike Pagano <mpagano@gentoo.org> | 2014-12-07 14:45:30 -0500 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2014-12-07 14:45:30 -0500 |
commit | bf8c7bd93f8c55a5df99ce47e1ab52c43d07923d (patch) | |
tree | 2f2d65ffcce75d138ebdb86a0592493dc4d34d7e | |
parent | Linux patch 3.12.32 (diff) | |
download | linux-patches-bf8c7bd93f8c55a5df99ce47e1ab52c43d07923d.tar.gz linux-patches-bf8c7bd93f8c55a5df99ce47e1ab52c43d07923d.tar.bz2 linux-patches-bf8c7bd93f8c55a5df99ce47e1ab52c43d07923d.zip |
Linux patches 3.12.33 and 3.12.343.12-35
-rw-r--r-- | 0000_README | 12 | ||||
-rw-r--r-- | 1032_linux-3.12.33.patch | 6968 | ||||
-rw-r--r-- | 1033_linux-3.12.34.patch | 4434 |
3 files changed, 11414 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 0f4cf382..da28ca8a 100644 --- a/0000_README +++ b/0000_README @@ -166,6 +166,18 @@ Patch: 1030_linux-3.12.31.patch From: http://www.kernel.org Desc: Linux 3.12.31 +Patch: 1031_linux-3.12.32.patch +From: http://www.kernel.org +Desc: Linux 3.12.32 + +Patch: 1032_linux-3.12.33.patch +From: http://www.kernel.org +Desc: Linux 3.12.33 + +Patch: 1033_linux-3.12.34.patch +From: http://www.kernel.org +Desc: Linux 3.12.34 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1032_linux-3.12.33.patch b/1032_linux-3.12.33.patch new file mode 100644 index 00000000..7a176bdd --- /dev/null +++ b/1032_linux-3.12.33.patch @@ -0,0 +1,6968 @@ +diff --git a/Makefile b/Makefile +index a51d98fee407..db42c15c5def 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 12 +-SUBLEVEL = 32 ++SUBLEVEL = 33 + EXTRAVERSION = + NAME = One Giant Leap for Frogkind + +diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h +index e4abdaac6f9f..b7d4dab219b1 100644 +--- a/arch/arc/include/asm/cache.h ++++ b/arch/arc/include/asm/cache.h +@@ -61,4 +61,31 @@ extern void read_decode_cache_bcr(void); + + #endif /* !__ASSEMBLY__ */ + ++/* Instruction cache related Auxiliary registers */ ++#define ARC_REG_IC_BCR 0x77 /* Build Config reg */ ++#define ARC_REG_IC_IVIC 0x10 ++#define ARC_REG_IC_CTRL 0x11 ++#define ARC_REG_IC_IVIL 0x19 ++#if defined(CONFIG_ARC_MMU_V3) || defined (CONFIG_ARC_MMU_V4) ++#define ARC_REG_IC_PTAG 0x1E ++#endif ++ ++/* Bit val in IC_CTRL */ ++#define IC_CTRL_CACHE_DISABLE 0x1 ++ ++/* Data cache related Auxiliary registers */ ++#define ARC_REG_DC_BCR 0x72 /* Build Config reg */ ++#define ARC_REG_DC_IVDC 0x47 ++#define ARC_REG_DC_CTRL 0x48 ++#define ARC_REG_DC_IVDL 0x4A ++#define ARC_REG_DC_FLSH 0x4B ++#define ARC_REG_DC_FLDL 0x4C ++#if defined(CONFIG_ARC_MMU_V3) || defined (CONFIG_ARC_MMU_V4) ++#define ARC_REG_DC_PTAG 0x5C ++#endif ++ ++/* Bit val in DC_CTRL */ ++#define DC_CTRL_INV_MODE_FLUSH 0x40 ++#define DC_CTRL_FLUSH_STATUS 0x100 ++ + #endif /* _ASM_CACHE_H */ +diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S +index 0f944f024513..a2bca37ef4dd 100644 +--- a/arch/arc/kernel/head.S ++++ b/arch/arc/kernel/head.S +@@ -12,10 +12,42 @@ + * to skip certain things during boot on simulator + */ + ++#include <linux/linkage.h> + #include <asm/asm-offsets.h> + #include <asm/entry.h> +-#include <linux/linkage.h> + #include <asm/arcregs.h> ++#include <asm/cache.h> ++ ++.macro CPU_EARLY_SETUP ++ ++ ; Setting up Vectror Table (in case exception happens in early boot ++ sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE] ++ ++ ; Disable I-cache/D-cache if kernel so configured ++ lr r5, [ARC_REG_IC_BCR] ++ breq r5, 0, 1f ; I$ doesn't exist ++ lr r5, [ARC_REG_IC_CTRL] ++#ifdef CONFIG_ARC_HAS_ICACHE ++ bclr r5, r5, 0 ; 0 - Enable, 1 is Disable ++#else ++ bset r5, r5, 0 ; I$ exists, but is not used ++#endif ++ sr r5, [ARC_REG_IC_CTRL] ++ ++1: ++ lr r5, [ARC_REG_DC_BCR] ++ breq r5, 0, 1f ; D$ doesn't exist ++ lr r5, [ARC_REG_DC_CTRL] ++ bclr r5, r5, 6 ; Invalidate (discard w/o wback) ++#ifdef CONFIG_ARC_HAS_DCACHE ++ bclr r5, r5, 0 ; Enable (+Inv) ++#else ++ bset r5, r5, 0 ; Disable (+Inv) ++#endif ++ sr r5, [ARC_REG_DC_CTRL] ++ ++1: ++.endm + + .cpu A7 + +@@ -24,13 +56,13 @@ + .globl stext + stext: + ;------------------------------------------------------------------- +- ; Don't clobber r0-r4 yet. It might have bootloader provided info ++ ; Don't clobber r0-r2 yet. It might have bootloader provided info + ;------------------------------------------------------------------- + +- sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE] ++ CPU_EARLY_SETUP + + #ifdef CONFIG_SMP +- ; Only Boot (Master) proceeds. Others wait in platform dependent way ++ ; Ensure Boot (Master) proceeds. Others wait in platform dependent way + ; IDENTITY Reg [ 3 2 1 0 ] + ; (cpu-id) ^^^ => Zero for UP ARC700 + ; => #Core-ID if SMP (Master 0) +@@ -39,7 +71,8 @@ stext: + ; need to make sure only boot cpu takes this path. + GET_CPU_ID r5 + cmp r5, 0 +- jnz arc_platform_smp_wait_to_boot ++ mov.ne r0, r5 ++ jne arc_platform_smp_wait_to_boot + #endif + ; Clear BSS before updating any globals + ; XXX: use ZOL here +@@ -101,7 +134,7 @@ stext: + + first_lines_of_secondary: + +- sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE] ++ CPU_EARLY_SETUP + + ; setup per-cpu idle task as "current" on this CPU + ld r0, [@secondary_idle_tsk] +diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c +index 5a1259cd948c..780103417a84 100644 +--- a/arch/arc/mm/cache_arc700.c ++++ b/arch/arc/mm/cache_arc700.c +@@ -73,37 +73,9 @@ + #include <asm/cachectl.h> + #include <asm/setup.h> + +-/* Instruction cache related Auxiliary registers */ +-#define ARC_REG_IC_BCR 0x77 /* Build Config reg */ +-#define ARC_REG_IC_IVIC 0x10 +-#define ARC_REG_IC_CTRL 0x11 +-#define ARC_REG_IC_IVIL 0x19 +-#if (CONFIG_ARC_MMU_VER > 2) +-#define ARC_REG_IC_PTAG 0x1E +-#endif +- +-/* Bit val in IC_CTRL */ +-#define IC_CTRL_CACHE_DISABLE 0x1 +- +-/* Data cache related Auxiliary registers */ +-#define ARC_REG_DC_BCR 0x72 /* Build Config reg */ +-#define ARC_REG_DC_IVDC 0x47 +-#define ARC_REG_DC_CTRL 0x48 +-#define ARC_REG_DC_IVDL 0x4A +-#define ARC_REG_DC_FLSH 0x4B +-#define ARC_REG_DC_FLDL 0x4C +-#if (CONFIG_ARC_MMU_VER > 2) +-#define ARC_REG_DC_PTAG 0x5C +-#endif +- +-/* Bit val in DC_CTRL */ +-#define DC_CTRL_INV_MODE_FLUSH 0x40 +-#define DC_CTRL_FLUSH_STATUS 0x100 +- +-char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len) ++char *arc_cache_mumbojumbo(int c, char *buf, int len) + { + int n = 0; +- unsigned int c = smp_processor_id(); + + #define PR_CACHE(p, enb, str) \ + { \ +@@ -169,72 +141,43 @@ void read_decode_cache_bcr(void) + */ + void arc_cache_init(void) + { +- unsigned int cpu = smp_processor_id(); +- struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; +- struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; +- unsigned int dcache_does_alias, temp; ++ unsigned int __maybe_unused cpu = smp_processor_id(); ++ struct cpuinfo_arc_cache __maybe_unused *ic, __maybe_unused *dc; + char str[256]; + + printk(arc_cache_mumbojumbo(0, str, sizeof(str))); + +- if (!ic->ver) +- goto chk_dc; +- + #ifdef CONFIG_ARC_HAS_ICACHE +- /* 1. Confirm some of I-cache params which Linux assumes */ +- if (ic->line_len != ARC_ICACHE_LINE_LEN) +- panic("Cache H/W doesn't match kernel Config"); +- +- if (ic->ver != CONFIG_ARC_MMU_VER) +- panic("Cache ver doesn't match MMU ver\n"); +-#endif +- +- /* Enable/disable I-Cache */ +- temp = read_aux_reg(ARC_REG_IC_CTRL); +- +-#ifdef CONFIG_ARC_HAS_ICACHE +- temp &= ~IC_CTRL_CACHE_DISABLE; +-#else +- temp |= IC_CTRL_CACHE_DISABLE; ++ ic = &cpuinfo_arc700[cpu].icache; ++ if (ic->ver) { ++ if (ic->line_len != ARC_ICACHE_LINE_LEN) ++ panic("ICache line [%d] != kernel Config [%d]", ++ ic->line_len, ARC_ICACHE_LINE_LEN); ++ ++ if (ic->ver != CONFIG_ARC_MMU_VER) ++ panic("Cache ver [%d] doesn't match MMU ver [%d]\n", ++ ic->ver, CONFIG_ARC_MMU_VER); ++ } + #endif + +- write_aux_reg(ARC_REG_IC_CTRL, temp); +- +-chk_dc: +- if (!dc->ver) +- return; +- + #ifdef CONFIG_ARC_HAS_DCACHE +- if (dc->line_len != ARC_DCACHE_LINE_LEN) +- panic("Cache H/W doesn't match kernel Config"); ++ dc = &cpuinfo_arc700[cpu].dcache; ++ if (dc->ver) { ++ unsigned int dcache_does_alias; + +- /* check for D-Cache aliasing */ +- dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE; ++ if (dc->line_len != ARC_DCACHE_LINE_LEN) ++ panic("DCache line [%d] != kernel Config [%d]", ++ dc->line_len, ARC_DCACHE_LINE_LEN); + +- if (dcache_does_alias && !cache_is_vipt_aliasing()) +- panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); +- else if (!dcache_does_alias && cache_is_vipt_aliasing()) +- panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n"); +-#endif ++ /* check for D-Cache aliasing */ ++ dcache_does_alias = (dc->sz / dc->assoc) > PAGE_SIZE; + +- /* Set the default Invalidate Mode to "simpy discard dirty lines" +- * as this is more frequent then flush before invalidate +- * Ofcourse we toggle this default behviour when desired +- */ +- temp = read_aux_reg(ARC_REG_DC_CTRL); +- temp &= ~DC_CTRL_INV_MODE_FLUSH; +- +-#ifdef CONFIG_ARC_HAS_DCACHE +- /* Enable D-Cache: Clear Bit 0 */ +- write_aux_reg(ARC_REG_DC_CTRL, temp & ~IC_CTRL_CACHE_DISABLE); +-#else +- /* Flush D cache */ +- write_aux_reg(ARC_REG_DC_FLSH, 0x1); +- /* Disable D cache */ +- write_aux_reg(ARC_REG_DC_CTRL, temp | IC_CTRL_CACHE_DISABLE); ++ if (dcache_does_alias && !cache_is_vipt_aliasing()) ++ panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); ++ else if (!dcache_does_alias && cache_is_vipt_aliasing()) ++ panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n"); ++ } + #endif +- +- return; + } + + #define OP_INV 0x1 +diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h +index ce35c9af0c28..370ae7cc588a 100644 +--- a/arch/mips/include/asm/ftrace.h ++++ b/arch/mips/include/asm/ftrace.h +@@ -24,7 +24,7 @@ do { \ + asm volatile ( \ + "1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\ + " li %[" STR(error) "], 0\n" \ +- "2:\n" \ ++ "2: .insn\n" \ + \ + ".section .fixup, \"ax\"\n" \ + "3: li %[" STR(error) "], 1\n" \ +@@ -46,7 +46,7 @@ do { \ + asm volatile ( \ + "1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\ + " li %[" STR(error) "], 0\n" \ +- "2:\n" \ ++ "2: .insn\n" \ + \ + ".section .fixup, \"ax\"\n" \ + "3: li %[" STR(error) "], 1\n" \ +diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c +index db7a050f5c2c..a39b415fec25 100644 +--- a/arch/mips/mm/tlbex.c ++++ b/arch/mips/mm/tlbex.c +@@ -1095,6 +1095,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep) + struct mips_huge_tlb_info { + int huge_pte; + int restore_scratch; ++ bool need_reload_pte; + }; + + static struct mips_huge_tlb_info +@@ -1109,6 +1110,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l, + + rv.huge_pte = scratch; + rv.restore_scratch = 0; ++ rv.need_reload_pte = false; + + if (check_for_high_segbits) { + UASM_i_MFC0(p, tmp, C0_BADVADDR); +@@ -1297,6 +1299,7 @@ static void build_r4000_tlb_refill_handler(void) + } else { + htlb_info.huge_pte = K0; + htlb_info.restore_scratch = 0; ++ htlb_info.need_reload_pte = true; + vmalloc_mode = refill_noscratch; + /* + * create the plain linear handler +@@ -1333,7 +1336,8 @@ static void build_r4000_tlb_refill_handler(void) + } + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT + uasm_l_tlb_huge_update(&l, p); +- UASM_i_LW(&p, K0, 0, K1); ++ if (htlb_info.need_reload_pte) ++ UASM_i_LW(&p, htlb_info.huge_pte, 0, K1); + build_huge_update_entries(&p, htlb_info.huge_pte, K1); + build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, + htlb_info.restore_scratch); +diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c +index 7cfdaae1721a..679df556f4ab 100644 +--- a/arch/powerpc/platforms/pseries/dlpar.c ++++ b/arch/powerpc/platforms/pseries/dlpar.c +@@ -380,7 +380,7 @@ static int dlpar_online_cpu(struct device_node *dn) + BUG_ON(get_cpu_current_state(cpu) + != CPU_STATE_OFFLINE); + cpu_maps_update_done(); +- rc = cpu_up(cpu); ++ rc = device_online(get_cpu_device(cpu)); + if (rc) + goto out; + cpu_maps_update_begin(); +@@ -471,7 +471,7 @@ static int dlpar_offline_cpu(struct device_node *dn) + if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { + set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); + cpu_maps_update_done(); +- rc = cpu_down(cpu); ++ rc = device_offline(get_cpu_device(cpu)); + if (rc) + goto out; + cpu_maps_update_begin(); +diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c +index 3716e6952554..e8ab93c3e638 100644 +--- a/arch/um/drivers/ubd_kern.c ++++ b/arch/um/drivers/ubd_kern.c +@@ -1277,7 +1277,7 @@ static void do_ubd_request(struct request_queue *q) + + while(1){ + struct ubd *dev = q->queuedata; +- if(dev->end_sg == 0){ ++ if(dev->request == NULL){ + struct request *req = blk_fetch_request(q); + if(req == NULL) + return; +@@ -1299,7 +1299,8 @@ static void do_ubd_request(struct request_queue *q) + return; + } + prepare_flush_request(req, io_req); +- submit_request(io_req, dev); ++ if (submit_request(io_req, dev) == false) ++ return; + } + + while(dev->start_sg < dev->end_sg){ +diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S +index 4299eb05023c..92a2e9333620 100644 +--- a/arch/x86/ia32/ia32entry.S ++++ b/arch/x86/ia32/ia32entry.S +@@ -151,6 +151,16 @@ ENTRY(ia32_sysenter_target) + 1: movl (%rbp),%ebp + _ASM_EXTABLE(1b,ia32_badarg) + ASM_CLAC ++ ++ /* ++ * Sysenter doesn't filter flags, so we need to clear NT ++ * ourselves. To save a few cycles, we can check whether ++ * NT was set instead of doing an unconditional popfq. ++ */ ++ testl $X86_EFLAGS_NT,EFLAGS-ARGOFFSET(%rsp) ++ jnz sysenter_fix_flags ++sysenter_flags_fixed: ++ + orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET) + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) + CFI_REMEMBER_STATE +@@ -184,6 +194,8 @@ sysexit_from_sys_call: + TRACE_IRQS_ON + ENABLE_INTERRUPTS_SYSEXIT32 + ++ CFI_RESTORE_STATE ++ + #ifdef CONFIG_AUDITSYSCALL + .macro auditsys_entry_common + movl %esi,%r9d /* 6th arg: 4th syscall arg */ +@@ -226,7 +238,6 @@ sysexit_from_sys_call: + .endm + + sysenter_auditsys: +- CFI_RESTORE_STATE + auditsys_entry_common + movl %ebp,%r9d /* reload 6th syscall arg */ + jmp sysenter_dispatch +@@ -235,6 +246,11 @@ sysexit_audit: + auditsys_exit sysexit_from_sys_call + #endif + ++sysenter_fix_flags: ++ pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) ++ popfq_cfi ++ jmp sysenter_flags_fixed ++ + sysenter_tracesys: + #ifdef CONFIG_AUDITSYSCALL + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) +diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h +index 9c999c1674fa..01f15b227d7e 100644 +--- a/arch/x86/include/asm/elf.h ++++ b/arch/x86/include/asm/elf.h +@@ -155,8 +155,9 @@ do { \ + #define elf_check_arch(x) \ + ((x)->e_machine == EM_X86_64) + +-#define compat_elf_check_arch(x) \ +- (elf_check_arch_ia32(x) || (x)->e_machine == EM_X86_64) ++#define compat_elf_check_arch(x) \ ++ (elf_check_arch_ia32(x) || \ ++ (IS_ENABLED(CONFIG_X86_X32_ABI) && (x)->e_machine == EM_X86_64)) + + #if __USER32_DS != __USER_DS + # error "The following code assumes __USER32_DS == __USER_DS" +diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h +index 847b165b9f9e..7cb77dd749df 100644 +--- a/arch/x86/include/asm/kvm_host.h ++++ b/arch/x86/include/asm/kvm_host.h +@@ -974,6 +974,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) + kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); + } + ++static inline u64 get_canonical(u64 la) ++{ ++ return ((int64_t)la << 16) >> 16; ++} ++ ++static inline bool is_noncanonical_address(u64 la) ++{ ++#ifdef CONFIG_X86_64 ++ return get_canonical(la) != la; ++#else ++ return false; ++#endif ++} ++ + #define TSS_IOPB_BASE_OFFSET 0x66 + #define TSS_BASE_SIZE 0x68 + #define TSS_IOPB_SIZE (65536 / 8) +@@ -1032,7 +1046,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v); + void kvm_vcpu_reset(struct kvm_vcpu *vcpu); + + void kvm_define_shared_msr(unsigned index, u32 msr); +-void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); ++int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); + + bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); + +diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h +index 0e79420376eb..990a2fe1588d 100644 +--- a/arch/x86/include/uapi/asm/vmx.h ++++ b/arch/x86/include/uapi/asm/vmx.h +@@ -67,6 +67,7 @@ + #define EXIT_REASON_EPT_MISCONFIG 49 + #define EXIT_REASON_INVEPT 50 + #define EXIT_REASON_PREEMPTION_TIMER 52 ++#define EXIT_REASON_INVVPID 53 + #define EXIT_REASON_WBINVD 54 + #define EXIT_REASON_XSETBV 55 + #define EXIT_REASON_APIC_WRITE 56 +@@ -114,6 +115,7 @@ + { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ + { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ + { EXIT_REASON_INVD, "INVD" }, \ ++ { EXIT_REASON_INVVPID, "INVVPID" }, \ + { EXIT_REASON_INVPCID, "INVPCID" } + + #endif /* _UAPIVMX_H */ +diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c +index a7eb82d9b012..7170f1738793 100644 +--- a/arch/x86/kernel/apic/apic.c ++++ b/arch/x86/kernel/apic/apic.c +@@ -1282,7 +1282,7 @@ void setup_local_APIC(void) + unsigned int value, queued; + int i, j, acked = 0; + unsigned long long tsc = 0, ntsc; +- long long max_loops = cpu_khz; ++ long long max_loops = cpu_khz ? cpu_khz : 1000000; + + if (cpu_has_tsc) + rdtscll(tsc); +@@ -1379,7 +1379,7 @@ void setup_local_APIC(void) + break; + } + if (queued) { +- if (cpu_has_tsc) { ++ if (cpu_has_tsc && cpu_khz) { + rdtscll(ntsc); + max_loops = (cpu_khz << 10) - (ntsc - tsc); + } else +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index 3533e2c082a3..d5f63dacf030 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -1135,7 +1135,7 @@ void syscall_init(void) + /* Flags to clear on syscall */ + wrmsrl(MSR_SYSCALL_MASK, + X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF| +- X86_EFLAGS_IOPL|X86_EFLAGS_AC); ++ X86_EFLAGS_IOPL|X86_EFLAGS_AC|X86_EFLAGS_NT); + } + + /* +diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c +index 1d8152b764a7..f4a9985ca88e 100644 +--- a/arch/x86/kernel/cpu/intel.c ++++ b/arch/x86/kernel/cpu/intel.c +@@ -384,6 +384,13 @@ static void init_intel(struct cpuinfo_x86 *c) + detect_extended_topology(c); + + l2 = init_intel_cacheinfo(c); ++ ++ /* Detect legacy cache sizes if init_intel_cacheinfo did not */ ++ if (l2 == 0) { ++ cpu_detect_cache_sizes(c); ++ l2 = c->x86_cache_size; ++ } ++ + if (c->cpuid_level > 9) { + unsigned eax = cpuid_eax(10); + /* Check for version and the number of counters */ +@@ -498,6 +505,13 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) + */ + if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) + size = 256; ++ ++ /* ++ * Intel Quark SoC X1000 contains a 4-way set associative ++ * 16K cache with a 16 byte cache line and 256 lines per tag ++ */ ++ if ((c->x86 == 5) && (c->x86_model == 9)) ++ size = 16; + return size; + } + #endif +@@ -703,7 +717,8 @@ static const struct cpu_dev intel_cpu_dev = { + [3] = "OverDrive PODP5V83", + [4] = "Pentium MMX", + [7] = "Mobile Pentium 75 - 200", +- [8] = "Mobile Pentium MMX" ++ [8] = "Mobile Pentium MMX", ++ [9] = "Quark SoC X1000", + } + }, + { .vendor = X86_VENDOR_INTEL, .family = 6, .model_names = +diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c +index 9e5de6813e1f..b88fc86309bc 100644 +--- a/arch/x86/kernel/signal.c ++++ b/arch/x86/kernel/signal.c +@@ -673,6 +673,11 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs) + * handler too. + */ + regs->flags &= ~(X86_EFLAGS_DF|X86_EFLAGS_RF|X86_EFLAGS_TF); ++ /* ++ * Ensure the signal handler starts with the new fpu state. ++ */ ++ if (used_math()) ++ drop_init_fpu(current); + } + signal_setup_done(failed, ksig, test_thread_flag(TIF_SINGLESTEP)); + } +diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c +index 930e5d48f560..a7fef605b1c0 100644 +--- a/arch/x86/kernel/tsc.c ++++ b/arch/x86/kernel/tsc.c +@@ -974,14 +974,17 @@ void __init tsc_init(void) + + x86_init.timers.tsc_pre_init(); + +- if (!cpu_has_tsc) ++ if (!cpu_has_tsc) { ++ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); + return; ++ } + + tsc_khz = x86_platform.calibrate_tsc(); + cpu_khz = tsc_khz; + + if (!tsc_khz) { + mark_tsc_unstable("could not calculate TSC khz"); ++ setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); + return; + } + +diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c +index 422fd8223470..f5869fc65d66 100644 +--- a/arch/x86/kernel/xsave.c ++++ b/arch/x86/kernel/xsave.c +@@ -268,8 +268,6 @@ int save_xstate_sig(void __user *buf, void __user *buf_fx, int size) + if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate)) + return -1; + +- drop_init_fpu(tsk); /* trigger finit */ +- + return 0; + } + +@@ -399,8 +397,11 @@ int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) + set_used_math(); + } + +- if (use_eager_fpu()) ++ if (use_eager_fpu()) { ++ preempt_disable(); + math_state_restore(); ++ preempt_enable(); ++ } + + return err; + } else { +diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c +index 3ee4472cef19..ab1d45928ce7 100644 +--- a/arch/x86/kvm/emulate.c ++++ b/arch/x86/kvm/emulate.c +@@ -498,11 +498,6 @@ static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) + masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); + } + +-static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) +-{ +- register_address_increment(ctxt, &ctxt->_eip, rel); +-} +- + static u32 desc_limit_scaled(struct desc_struct *desc) + { + u32 limit = get_desc_limit(desc); +@@ -576,6 +571,38 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt) + return emulate_exception(ctxt, NM_VECTOR, 0, false); + } + ++static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, ++ int cs_l) ++{ ++ switch (ctxt->op_bytes) { ++ case 2: ++ ctxt->_eip = (u16)dst; ++ break; ++ case 4: ++ ctxt->_eip = (u32)dst; ++ break; ++ case 8: ++ if ((cs_l && is_noncanonical_address(dst)) || ++ (!cs_l && (dst & ~(u32)-1))) ++ return emulate_gp(ctxt, 0); ++ ctxt->_eip = dst; ++ break; ++ default: ++ WARN(1, "unsupported eip assignment size\n"); ++ } ++ return X86EMUL_CONTINUE; ++} ++ ++static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) ++{ ++ return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64); ++} ++ ++static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) ++{ ++ return assign_eip_near(ctxt, ctxt->_eip + rel); ++} ++ + static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) + { + u16 selector; +@@ -1964,13 +1991,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt) + case 2: /* call near abs */ { + long int old_eip; + old_eip = ctxt->_eip; +- ctxt->_eip = ctxt->src.val; ++ rc = assign_eip_near(ctxt, ctxt->src.val); ++ if (rc != X86EMUL_CONTINUE) ++ break; + ctxt->src.val = old_eip; + rc = em_push(ctxt); + break; + } + case 4: /* jmp abs */ +- ctxt->_eip = ctxt->src.val; ++ rc = assign_eip_near(ctxt, ctxt->src.val); + break; + case 5: /* jmp far */ + rc = em_jmp_far(ctxt); +@@ -2002,10 +2031,14 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) + + static int em_ret(struct x86_emulate_ctxt *ctxt) + { +- ctxt->dst.type = OP_REG; +- ctxt->dst.addr.reg = &ctxt->_eip; +- ctxt->dst.bytes = ctxt->op_bytes; +- return em_pop(ctxt); ++ int rc; ++ unsigned long eip; ++ ++ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); ++ if (rc != X86EMUL_CONTINUE) ++ return rc; ++ ++ return assign_eip_near(ctxt, eip); + } + + static int em_ret_far(struct x86_emulate_ctxt *ctxt) +@@ -2283,7 +2316,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) + { + const struct x86_emulate_ops *ops = ctxt->ops; + struct desc_struct cs, ss; +- u64 msr_data; ++ u64 msr_data, rcx, rdx; + int usermode; + u16 cs_sel = 0, ss_sel = 0; + +@@ -2299,6 +2332,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) + else + usermode = X86EMUL_MODE_PROT32; + ++ rcx = reg_read(ctxt, VCPU_REGS_RCX); ++ rdx = reg_read(ctxt, VCPU_REGS_RDX); ++ + cs.dpl = 3; + ss.dpl = 3; + ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); +@@ -2316,6 +2352,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) + ss_sel = cs_sel + 8; + cs.d = 0; + cs.l = 1; ++ if (is_noncanonical_address(rcx) || ++ is_noncanonical_address(rdx)) ++ return emulate_gp(ctxt, 0); + break; + } + cs_sel |= SELECTOR_RPL_MASK; +@@ -2324,8 +2363,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) + ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); + ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); + +- ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX); +- *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX); ++ ctxt->_eip = rdx; ++ *reg_write(ctxt, VCPU_REGS_RSP) = rcx; + + return X86EMUL_CONTINUE; + } +@@ -2864,10 +2903,13 @@ static int em_aad(struct x86_emulate_ctxt *ctxt) + + static int em_call(struct x86_emulate_ctxt *ctxt) + { ++ int rc; + long rel = ctxt->src.val; + + ctxt->src.val = (unsigned long)ctxt->_eip; +- jmp_rel(ctxt, rel); ++ rc = jmp_rel(ctxt, rel); ++ if (rc != X86EMUL_CONTINUE) ++ return rc; + return em_push(ctxt); + } + +@@ -2899,11 +2941,12 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt) + static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) + { + int rc; ++ unsigned long eip; + +- ctxt->dst.type = OP_REG; +- ctxt->dst.addr.reg = &ctxt->_eip; +- ctxt->dst.bytes = ctxt->op_bytes; +- rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); ++ rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); ++ if (rc != X86EMUL_CONTINUE) ++ return rc; ++ rc = assign_eip_near(ctxt, eip); + if (rc != X86EMUL_CONTINUE) + return rc; + rsp_increment(ctxt, ctxt->src.val); +@@ -3193,20 +3236,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt) + + static int em_loop(struct x86_emulate_ctxt *ctxt) + { ++ int rc = X86EMUL_CONTINUE; ++ + register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1); + if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && + (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) +- jmp_rel(ctxt, ctxt->src.val); ++ rc = jmp_rel(ctxt, ctxt->src.val); + +- return X86EMUL_CONTINUE; ++ return rc; + } + + static int em_jcxz(struct x86_emulate_ctxt *ctxt) + { ++ int rc = X86EMUL_CONTINUE; ++ + if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) +- jmp_rel(ctxt, ctxt->src.val); ++ rc = jmp_rel(ctxt, ctxt->src.val); + +- return X86EMUL_CONTINUE; ++ return rc; + } + + static int em_in(struct x86_emulate_ctxt *ctxt) +@@ -4558,7 +4605,7 @@ special_insn: + break; + case 0x70 ... 0x7f: /* jcc (short) */ + if (test_cc(ctxt->b, ctxt->eflags)) +- jmp_rel(ctxt, ctxt->src.val); ++ rc = jmp_rel(ctxt, ctxt->src.val); + break; + case 0x8d: /* lea r16/r32, m */ + ctxt->dst.val = ctxt->src.addr.mem.ea; +@@ -4587,7 +4634,7 @@ special_insn: + break; + case 0xe9: /* jmp rel */ + case 0xeb: /* jmp rel short */ +- jmp_rel(ctxt, ctxt->src.val); ++ rc = jmp_rel(ctxt, ctxt->src.val); + ctxt->dst.type = OP_NONE; /* Disable writeback. */ + break; + case 0xf4: /* hlt */ +@@ -4707,7 +4754,7 @@ twobyte_insn: + break; + case 0x80 ... 0x8f: /* jnz rel, etc*/ + if (test_cc(ctxt->b, ctxt->eflags)) +- jmp_rel(ctxt, ctxt->src.val); ++ rc = jmp_rel(ctxt, ctxt->src.val); + break; + case 0x90 ... 0x9f: /* setcc r/m8 */ + ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); +diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c +index 518d86471b76..298781d4cfb4 100644 +--- a/arch/x86/kvm/i8254.c ++++ b/arch/x86/kvm/i8254.c +@@ -262,8 +262,10 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu) + return; + + timer = &pit->pit_state.timer; ++ mutex_lock(&pit->pit_state.lock); + if (hrtimer_cancel(timer)) + hrtimer_start_expires(timer, HRTIMER_MODE_ABS); ++ mutex_unlock(&pit->pit_state.lock); + } + + static void destroy_pit_timer(struct kvm_pit *pit) +diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c +index 612c717747dd..5dcdff58b679 100644 +--- a/arch/x86/kvm/svm.c ++++ b/arch/x86/kvm/svm.c +@@ -3204,7 +3204,7 @@ static int wrmsr_interception(struct vcpu_svm *svm) + msr.host_initiated = false; + + svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; +- if (svm_set_msr(&svm->vcpu, &msr)) { ++ if (kvm_set_msr(&svm->vcpu, &msr)) { + trace_kvm_msr_write_ex(ecx, data); + kvm_inject_gp(&svm->vcpu, 0); + } else { +@@ -3486,9 +3486,9 @@ static int handle_exit(struct kvm_vcpu *vcpu) + + if (exit_code >= ARRAY_SIZE(svm_exit_handlers) + || !svm_exit_handlers[exit_code]) { +- kvm_run->exit_reason = KVM_EXIT_UNKNOWN; +- kvm_run->hw.hardware_exit_reason = exit_code; +- return 0; ++ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code); ++ kvm_queue_exception(vcpu, UD_VECTOR); ++ return 1; + } + + return svm_exit_handlers[exit_code](svm); +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 59181e653826..c7663b16cdbe 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -2540,12 +2540,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) + break; + msr = find_msr_entry(vmx, msr_index); + if (msr) { ++ u64 old_msr_data = msr->data; + msr->data = data; + if (msr - vmx->guest_msrs < vmx->save_nmsrs) { + preempt_disable(); +- kvm_set_shared_msr(msr->index, msr->data, +- msr->mask); ++ ret = kvm_set_shared_msr(msr->index, msr->data, ++ msr->mask); + preempt_enable(); ++ if (ret) ++ msr->data = old_msr_data; + } + break; + } +@@ -5113,7 +5116,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu) + msr.data = data; + msr.index = ecx; + msr.host_initiated = false; +- if (vmx_set_msr(vcpu, &msr) != 0) { ++ if (kvm_set_msr(vcpu, &msr) != 0) { + trace_kvm_msr_write_ex(ecx, data); + kvm_inject_gp(vcpu, 0); + return 1; +@@ -6385,6 +6388,12 @@ static int handle_invept(struct kvm_vcpu *vcpu) + return 1; + } + ++static int handle_invvpid(struct kvm_vcpu *vcpu) ++{ ++ kvm_queue_exception(vcpu, UD_VECTOR); ++ return 1; ++} ++ + /* + * The exit handlers return 1 if the exit was handled fully and guest execution + * may resume. Otherwise they set the kvm_run parameter to indicate what needs +@@ -6430,6 +6439,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { + [EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op, + [EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op, + [EXIT_REASON_INVEPT] = handle_invept, ++ [EXIT_REASON_INVVPID] = handle_invvpid, + }; + + static const int kvm_vmx_max_exit_handlers = +@@ -6656,7 +6666,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) + case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: + case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE: + case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: +- case EXIT_REASON_INVEPT: ++ case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID: + /* + * VMX instructions trap unconditionally. This allows L1 to + * emulate them for its L2 guest, i.e., allows 3-level nesting! +@@ -6812,10 +6822,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) + && kvm_vmx_exit_handlers[exit_reason]) + return kvm_vmx_exit_handlers[exit_reason](vcpu); + else { +- vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; +- vcpu->run->hw.hardware_exit_reason = exit_reason; ++ WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason); ++ kvm_queue_exception(vcpu, UD_VECTOR); ++ return 1; + } +- return 0; + } + + static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 77046f7177d5..590fd966b37a 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -225,20 +225,25 @@ static void kvm_shared_msr_cpu_online(void) + shared_msr_update(i, shared_msrs_global.msrs[i]); + } + +-void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) ++int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) + { + unsigned int cpu = smp_processor_id(); + struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); ++ int err; + + if (((value ^ smsr->values[slot].curr) & mask) == 0) +- return; ++ return 0; + smsr->values[slot].curr = value; +- wrmsrl(shared_msrs_global.msrs[slot], value); ++ err = wrmsrl_safe(shared_msrs_global.msrs[slot], value); ++ if (err) ++ return 1; ++ + if (!smsr->registered) { + smsr->urn.on_user_return = kvm_on_user_return; + user_return_notifier_register(&smsr->urn); + smsr->registered = true; + } ++ return 0; + } + EXPORT_SYMBOL_GPL(kvm_set_shared_msr); + +@@ -910,7 +915,6 @@ void kvm_enable_efer_bits(u64 mask) + } + EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); + +- + /* + * Writes msr value into into the appropriate "register". + * Returns 0 on success, non-0 otherwise. +@@ -918,8 +922,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); + */ + int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) + { ++ switch (msr->index) { ++ case MSR_FS_BASE: ++ case MSR_GS_BASE: ++ case MSR_KERNEL_GS_BASE: ++ case MSR_CSTAR: ++ case MSR_LSTAR: ++ if (is_noncanonical_address(msr->data)) ++ return 1; ++ break; ++ case MSR_IA32_SYSENTER_EIP: ++ case MSR_IA32_SYSENTER_ESP: ++ /* ++ * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if ++ * non-canonical address is written on Intel but not on ++ * AMD (which ignores the top 32-bits, because it does ++ * not implement 64-bit SYSENTER). ++ * ++ * 64-bit code should hence be able to write a non-canonical ++ * value on AMD. Making the address canonical ensures that ++ * vmentry does not fail on Intel after writing a non-canonical ++ * value, and that something deterministic happens if the guest ++ * invokes 64-bit SYSENTER. ++ */ ++ msr->data = get_canonical(msr->data); ++ } + return kvm_x86_ops->set_msr(vcpu, msr); + } ++EXPORT_SYMBOL_GPL(kvm_set_msr); + + /* + * Adapt set_msr() to msr_io()'s calling convention +diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c +index bb32480c2d71..aabdf762f592 100644 +--- a/arch/x86/mm/pageattr.c ++++ b/arch/x86/mm/pageattr.c +@@ -389,7 +389,7 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr) + psize = page_level_size(level); + pmask = page_level_mask(level); + offset = virt_addr & ~pmask; +- phys_addr = pte_pfn(*pte) << PAGE_SHIFT; ++ phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT; + return (phys_addr | offset); + } + EXPORT_SYMBOL_GPL(slow_virt_to_phys); +diff --git a/block/blk-settings.c b/block/blk-settings.c +index 53309333c2f0..ec00a0f75212 100644 +--- a/block/blk-settings.c ++++ b/block/blk-settings.c +@@ -553,7 +553,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + bottom = max(b->physical_block_size, b->io_min) + alignment; + + /* Verify that top and bottom intervals line up */ +- if (max(top, bottom) & (min(top, bottom) - 1)) { ++ if (max(top, bottom) % min(top, bottom)) { + t->misaligned = 1; + ret = -1; + } +@@ -594,7 +594,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, + + /* Find lowest common alignment_offset */ + t->alignment_offset = lcm(t->alignment_offset, alignment) +- & (max(t->physical_block_size, t->io_min) - 1); ++ % max(t->physical_block_size, t->io_min); + + /* Verify that new alignment_offset is on a logical block boundary */ + if (t->alignment_offset & (t->logical_block_size - 1)) { +diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c +index a5ffcc988f0b..1b4988b4bc11 100644 +--- a/block/scsi_ioctl.c ++++ b/block/scsi_ioctl.c +@@ -506,7 +506,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, + + if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) { + err = DRIVER_ERROR << 24; +- goto out; ++ goto error; + } + + memset(sense, 0, sizeof(sense)); +@@ -516,7 +516,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, + + blk_execute_rq(q, disk, rq, 0); + +-out: + err = rq->errors & 0xff; /* only 8 bit SCSI status */ + if (err) { + if (rq->sense_len && rq->sense) { +diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c +index a19c027b29bd..83187f497c7c 100644 +--- a/crypto/algif_skcipher.c ++++ b/crypto/algif_skcipher.c +@@ -49,7 +49,7 @@ struct skcipher_ctx { + struct ablkcipher_request req; + }; + +-#define MAX_SGL_ENTS ((PAGE_SIZE - sizeof(struct skcipher_sg_list)) / \ ++#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \ + sizeof(struct scatterlist) - 1) + + static inline int skcipher_sndbuf(struct sock *sk) +diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c +index 7171d52e12ca..85752c668473 100644 +--- a/drivers/acpi/ec.c ++++ b/drivers/acpi/ec.c +@@ -129,6 +129,7 @@ static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ + static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ + static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ + static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ ++static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ + + /* -------------------------------------------------------------------------- + Transaction Management +@@ -193,6 +194,8 @@ static bool advance_transaction(struct acpi_ec *ec) + t->rdata[t->ri++] = acpi_ec_read_data(ec); + if (t->rlen == t->ri) { + t->flags |= ACPI_EC_COMMAND_COMPLETE; ++ if (t->command == ACPI_EC_COMMAND_QUERY) ++ pr_debug("hardware QR_EC completion\n"); + wakeup = true; + } + } else +@@ -204,7 +207,15 @@ static bool advance_transaction(struct acpi_ec *ec) + } + return wakeup; + } else { +- if ((status & ACPI_EC_FLAG_IBF) == 0) { ++ if (EC_FLAGS_QUERY_HANDSHAKE && ++ !(status & ACPI_EC_FLAG_SCI) && ++ (t->command == ACPI_EC_COMMAND_QUERY)) { ++ t->flags |= ACPI_EC_COMMAND_POLL; ++ t->rdata[t->ri++] = 0x00; ++ t->flags |= ACPI_EC_COMMAND_COMPLETE; ++ pr_debug("software QR_EC completion\n"); ++ wakeup = true; ++ } else if ((status & ACPI_EC_FLAG_IBF) == 0) { + acpi_ec_write_cmd(ec, t->command); + t->flags |= ACPI_EC_COMMAND_POLL; + } else +@@ -982,6 +993,18 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id) + } + + /* ++ * Acer EC firmware refuses to respond QR_EC when SCI_EVT is not set, for ++ * which case, we complete the QR_EC without issuing it to the firmware. ++ * https://bugzilla.kernel.org/show_bug.cgi?id=86211 ++ */ ++static int ec_flag_query_handshake(const struct dmi_system_id *id) ++{ ++ pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n"); ++ EC_FLAGS_QUERY_HANDSHAKE = 1; ++ return 0; ++} ++ ++/* + * On some hardware it is necessary to clear events accumulated by the EC during + * sleep. These ECs stop reporting GPEs until they are manually polled, if too + * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) +@@ -1051,6 +1074,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = { + { + ec_clear_on_resume, "Samsung hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, ++ { ++ ec_flag_query_handshake, "Acer hardware", { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), }, NULL}, + {}, + }; + +diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c +index b603720b877d..37acda6fa7e4 100644 +--- a/drivers/ata/libata-sff.c ++++ b/drivers/ata/libata-sff.c +@@ -2008,13 +2008,15 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, + + DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); + +- /* software reset. causes dev0 to be selected */ +- iowrite8(ap->ctl, ioaddr->ctl_addr); +- udelay(20); /* FIXME: flush */ +- iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); +- udelay(20); /* FIXME: flush */ +- iowrite8(ap->ctl, ioaddr->ctl_addr); +- ap->last_ctl = ap->ctl; ++ if (ap->ioaddr.ctl_addr) { ++ /* software reset. causes dev0 to be selected */ ++ iowrite8(ap->ctl, ioaddr->ctl_addr); ++ udelay(20); /* FIXME: flush */ ++ iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); ++ udelay(20); /* FIXME: flush */ ++ iowrite8(ap->ctl, ioaddr->ctl_addr); ++ ap->last_ctl = ap->ctl; ++ } + + /* wait the port to become ready */ + return ata_sff_wait_after_reset(&ap->link, devmask, deadline); +@@ -2215,10 +2217,6 @@ void ata_sff_error_handler(struct ata_port *ap) + + spin_unlock_irqrestore(ap->lock, flags); + +- /* ignore ata_sff_softreset if ctl isn't accessible */ +- if (softreset == ata_sff_softreset && !ap->ioaddr.ctl_addr) +- softreset = NULL; +- + /* ignore built-in hardresets if SCR access is not available */ + if ((hardreset == sata_std_hardreset || + hardreset == sata_sff_hardreset) && !sata_scr_valid(&ap->link)) +diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c +index 96c6a79ef606..79dedbae282c 100644 +--- a/drivers/ata/pata_serverworks.c ++++ b/drivers/ata/pata_serverworks.c +@@ -252,12 +252,18 @@ static void serverworks_set_dmamode(struct ata_port *ap, struct ata_device *adev + pci_write_config_byte(pdev, 0x54, ultra_cfg); + } + +-static struct scsi_host_template serverworks_sht = { ++static struct scsi_host_template serverworks_osb4_sht = { ++ ATA_BMDMA_SHT(DRV_NAME), ++ .sg_tablesize = LIBATA_DUMB_MAX_PRD, ++}; ++ ++static struct scsi_host_template serverworks_csb_sht = { + ATA_BMDMA_SHT(DRV_NAME), + }; + + static struct ata_port_operations serverworks_osb4_port_ops = { + .inherits = &ata_bmdma_port_ops, ++ .qc_prep = ata_bmdma_dumb_qc_prep, + .cable_detect = serverworks_cable_detect, + .mode_filter = serverworks_osb4_filter, + .set_piomode = serverworks_set_piomode, +@@ -266,6 +272,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = { + + static struct ata_port_operations serverworks_csb_port_ops = { + .inherits = &serverworks_osb4_port_ops, ++ .qc_prep = ata_bmdma_qc_prep, + .mode_filter = serverworks_csb_filter, + }; + +@@ -405,6 +412,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id + } + }; + const struct ata_port_info *ppi[] = { &info[id->driver_data], NULL }; ++ struct scsi_host_template *sht = &serverworks_csb_sht; + int rc; + + rc = pcim_enable_device(pdev); +@@ -418,6 +426,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id + /* Select non UDMA capable OSB4 if we can't do fixups */ + if (rc < 0) + ppi[0] = &info[1]; ++ sht = &serverworks_osb4_sht; + } + /* setup CSB5/CSB6 : South Bridge and IDE option RAID */ + else if ((pdev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) || +@@ -434,7 +443,7 @@ static int serverworks_init_one(struct pci_dev *pdev, const struct pci_device_id + ppi[1] = &ata_dummy_port_info; + } + +- return ata_pci_bmdma_init_one(pdev, ppi, &serverworks_sht, NULL, 0); ++ return ata_pci_bmdma_init_one(pdev, ppi, sht, NULL, 0); + } + + #ifdef CONFIG_PM +diff --git a/drivers/base/core.c b/drivers/base/core.c +index 34abf4d8a45f..944fecd32e9f 100644 +--- a/drivers/base/core.c ++++ b/drivers/base/core.c +@@ -812,12 +812,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj) + return &dir->kobj; + } + ++static DEFINE_MUTEX(gdp_mutex); + + static struct kobject *get_device_parent(struct device *dev, + struct device *parent) + { + if (dev->class) { +- static DEFINE_MUTEX(gdp_mutex); + struct kobject *kobj = NULL; + struct kobject *parent_kobj; + struct kobject *k; +@@ -881,7 +881,9 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir) + glue_dir->kset != &dev->class->p->glue_dirs) + return; + ++ mutex_lock(&gdp_mutex); + kobject_put(glue_dir); ++ mutex_unlock(&gdp_mutex); + } + + static void cleanup_device_parent(struct device *dev) +diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c +index 7a58be457eb5..9f7990187653 100644 +--- a/drivers/base/regmap/regmap.c ++++ b/drivers/base/regmap/regmap.c +@@ -1403,8 +1403,10 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, + if (val_bytes == 1) { + wval = (void *)val; + } else { +- if (!val_count) +- return -EINVAL; ++ if (!val_count) { ++ ret = -EINVAL; ++ goto out; ++ } + + wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL); + if (!wval) { +diff --git a/drivers/block/drbd/drbd_interval.c b/drivers/block/drbd/drbd_interval.c +index 89c497c630b4..04a14e0f8878 100644 +--- a/drivers/block/drbd/drbd_interval.c ++++ b/drivers/block/drbd/drbd_interval.c +@@ -79,6 +79,7 @@ bool + drbd_insert_interval(struct rb_root *root, struct drbd_interval *this) + { + struct rb_node **new = &root->rb_node, *parent = NULL; ++ sector_t this_end = this->sector + (this->size >> 9); + + BUG_ON(!IS_ALIGNED(this->size, 512)); + +@@ -87,6 +88,8 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this) + rb_entry(*new, struct drbd_interval, rb); + + parent = *new; ++ if (here->end < this_end) ++ here->end = this_end; + if (this->sector < here->sector) + new = &(*new)->rb_left; + else if (this->sector > here->sector) +@@ -99,6 +102,7 @@ drbd_insert_interval(struct rb_root *root, struct drbd_interval *this) + return false; + } + ++ this->end = this_end; + rb_link_node(&this->rb, parent, new); + rb_insert_augmented(&this->rb, root, &augment_callbacks); + return true; +diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c +index aeeb62e0981a..a86841886acc 100644 +--- a/drivers/block/rbd.c ++++ b/drivers/block/rbd.c +@@ -3220,7 +3220,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, + page_count = (u32) calc_pages_for(offset, length); + pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); + if (IS_ERR(pages)) +- ret = PTR_ERR(pages); ++ return PTR_ERR(pages); + + ret = -ENOMEM; + obj_request = rbd_obj_request_create(object_name, offset, length, +diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c +index 6620b73d0490..6beaaf83680e 100644 +--- a/drivers/block/xen-blkback/blkback.c ++++ b/drivers/block/xen-blkback/blkback.c +@@ -755,6 +755,7 @@ again: + BUG_ON(new_map_idx >= segs_to_map); + if (unlikely(map[new_map_idx].status != 0)) { + pr_debug(DRV_PFX "invalid buffer -- could not remap it\n"); ++ put_free_pages(blkif, &pages[seg_idx]->page, 1); + pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE; + ret |= 1; + goto next; +diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c +index 2acabdaecec8..1685b3c50db1 100644 +--- a/drivers/bluetooth/ath3k.c ++++ b/drivers/bluetooth/ath3k.c +@@ -27,6 +27,7 @@ + #include <linux/device.h> + #include <linux/firmware.h> + #include <linux/usb.h> ++#include <asm/unaligned.h> + #include <net/bluetooth/bluetooth.h> + + #define VERSION "1.0" +@@ -50,60 +51,68 @@ + #define ATH3K_NAME_LEN 0xFF + + struct ath3k_version { +- unsigned int rom_version; +- unsigned int build_version; +- unsigned int ram_version; +- unsigned char ref_clock; +- unsigned char reserved[0x07]; +-}; ++ __le32 rom_version; ++ __le32 build_version; ++ __le32 ram_version; ++ __u8 ref_clock; ++ __u8 reserved[7]; ++} __packed; + + static struct usb_device_id ath3k_table[] = { + /* Atheros AR3011 */ + { USB_DEVICE(0x0CF3, 0x3000) }, + + /* Atheros AR3011 with sflash firmware*/ ++ { USB_DEVICE(0x0489, 0xE027) }, ++ { USB_DEVICE(0x0489, 0xE03D) }, ++ { USB_DEVICE(0x0930, 0x0215) }, + { USB_DEVICE(0x0CF3, 0x3002) }, + { USB_DEVICE(0x0CF3, 0xE019) }, + { USB_DEVICE(0x13d3, 0x3304) }, +- { USB_DEVICE(0x0930, 0x0215) }, +- { USB_DEVICE(0x0489, 0xE03D) }, +- { USB_DEVICE(0x0489, 0xE027) }, + + /* Atheros AR9285 Malbec with sflash firmware */ + { USB_DEVICE(0x03F0, 0x311D) }, + + /* Atheros AR3012 with sflash firmware*/ +- { USB_DEVICE(0x0CF3, 0x0036) }, +- { USB_DEVICE(0x0CF3, 0x3004) }, +- { USB_DEVICE(0x0CF3, 0x3008) }, +- { USB_DEVICE(0x0CF3, 0x311D) }, +- { USB_DEVICE(0x0CF3, 0x817a) }, +- { USB_DEVICE(0x13d3, 0x3375) }, ++ { USB_DEVICE(0x0489, 0xe04d) }, ++ { USB_DEVICE(0x0489, 0xe04e) }, ++ { USB_DEVICE(0x0489, 0xe057) }, ++ { USB_DEVICE(0x0489, 0xe056) }, ++ { USB_DEVICE(0x0489, 0xe05f) }, ++ { USB_DEVICE(0x04c5, 0x1330) }, + { USB_DEVICE(0x04CA, 0x3004) }, + { USB_DEVICE(0x04CA, 0x3005) }, + { USB_DEVICE(0x04CA, 0x3006) }, + { USB_DEVICE(0x04CA, 0x3007) }, + { USB_DEVICE(0x04CA, 0x3008) }, +- { USB_DEVICE(0x13d3, 0x3362) }, ++ { USB_DEVICE(0x04CA, 0x300b) }, ++ { USB_DEVICE(0x0930, 0x0219) }, ++ { USB_DEVICE(0x0930, 0x0220) }, ++ { USB_DEVICE(0x0930, 0x0227) }, ++ { USB_DEVICE(0x0b05, 0x17d0) }, ++ { USB_DEVICE(0x0CF3, 0x0036) }, ++ { USB_DEVICE(0x0CF3, 0x3004) }, ++ { USB_DEVICE(0x0CF3, 0x3008) }, ++ { USB_DEVICE(0x0CF3, 0x311D) }, ++ { USB_DEVICE(0x0CF3, 0x311E) }, ++ { USB_DEVICE(0x0CF3, 0x311F) }, ++ { USB_DEVICE(0x0cf3, 0x3121) }, ++ { USB_DEVICE(0x0CF3, 0x817a) }, ++ { USB_DEVICE(0x0cf3, 0xe003) }, + { USB_DEVICE(0x0CF3, 0xE004) }, + { USB_DEVICE(0x0CF3, 0xE005) }, +- { USB_DEVICE(0x0930, 0x0219) }, +- { USB_DEVICE(0x0489, 0xe057) }, ++ { USB_DEVICE(0x13d3, 0x3362) }, ++ { USB_DEVICE(0x13d3, 0x3375) }, + { USB_DEVICE(0x13d3, 0x3393) }, +- { USB_DEVICE(0x0489, 0xe04e) }, +- { USB_DEVICE(0x0489, 0xe056) }, +- { USB_DEVICE(0x0489, 0xe04d) }, +- { USB_DEVICE(0x04c5, 0x1330) }, + { USB_DEVICE(0x13d3, 0x3402) }, +- { USB_DEVICE(0x0cf3, 0x3121) }, +- { USB_DEVICE(0x0cf3, 0xe003) }, ++ { USB_DEVICE(0x13d3, 0x3432) }, + + /* Atheros AR5BBU12 with sflash firmware */ + { USB_DEVICE(0x0489, 0xE02C) }, + + /* Atheros AR5BBU22 with sflash firmware */ +- { USB_DEVICE(0x0489, 0xE03C) }, + { USB_DEVICE(0x0489, 0xE036) }, ++ { USB_DEVICE(0x0489, 0xE03C) }, + + { } /* Terminating entry */ + }; +@@ -116,34 +125,42 @@ MODULE_DEVICE_TABLE(usb, ath3k_table); + static struct usb_device_id ath3k_blist_tbl[] = { + + /* Atheros AR3012 with sflash firmware*/ ++ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, + + /* Atheros AR5BBU22 with sflash firmware */ +- { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, + + { } /* Terminating entry */ + }; +@@ -335,7 +352,8 @@ static int ath3k_load_patch(struct usb_device *udev) + unsigned char fw_state; + char filename[ATH3K_NAME_LEN] = {0}; + const struct firmware *firmware; +- struct ath3k_version fw_version, pt_version; ++ struct ath3k_version fw_version; ++ __u32 pt_rom_version, pt_build_version; + int ret; + + ret = ath3k_get_state(udev, &fw_state); +@@ -356,7 +374,7 @@ static int ath3k_load_patch(struct usb_device *udev) + } + + snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu", +- fw_version.rom_version); ++ le32_to_cpu(fw_version.rom_version)); + + ret = request_firmware(&firmware, filename, &udev->dev); + if (ret < 0) { +@@ -364,12 +382,13 @@ static int ath3k_load_patch(struct usb_device *udev) + return ret; + } + +- pt_version.rom_version = *(int *)(firmware->data + firmware->size - 8); +- pt_version.build_version = *(int *) +- (firmware->data + firmware->size - 4); ++ pt_rom_version = get_unaligned_le32(firmware->data + ++ firmware->size - 8); ++ pt_build_version = get_unaligned_le32(firmware->data + ++ firmware->size - 4); + +- if ((pt_version.rom_version != fw_version.rom_version) || +- (pt_version.build_version <= fw_version.build_version)) { ++ if (pt_rom_version != le32_to_cpu(fw_version.rom_version) || ++ pt_build_version <= le32_to_cpu(fw_version.build_version)) { + BT_ERR("Patch file version did not match with firmware"); + release_firmware(firmware); + return -EINVAL; +@@ -418,7 +437,7 @@ static int ath3k_load_syscfg(struct usb_device *udev) + } + + snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s", +- fw_version.rom_version, clk_value, ".dfu"); ++ le32_to_cpu(fw_version.rom_version), clk_value, ".dfu"); + + ret = request_firmware(&firmware, filename, &udev->dev); + if (ret < 0) { +diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c +index 9a9f51875df5..5592b71f3dae 100644 +--- a/drivers/bluetooth/btmrvl_main.c ++++ b/drivers/bluetooth/btmrvl_main.c +@@ -628,12 +628,17 @@ struct btmrvl_private *btmrvl_add_card(void *card) + init_waitqueue_head(&priv->main_thread.wait_q); + priv->main_thread.task = kthread_run(btmrvl_service_main_thread, + &priv->main_thread, "btmrvl_main_service"); ++ if (IS_ERR(priv->main_thread.task)) ++ goto err_thread; + + priv->btmrvl_dev.card = card; + priv->btmrvl_dev.tx_dnld_rdy = true; + + return priv; + ++err_thread: ++ btmrvl_free_adapter(priv); ++ + err_adapter: + kfree(priv); + +diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c +index 238dea6f6c5f..64f19159515f 100644 +--- a/drivers/bluetooth/btusb.c ++++ b/drivers/bluetooth/btusb.c +@@ -49,6 +49,7 @@ static struct usb_driver btusb_driver; + #define BTUSB_WRONG_SCO_MTU 0x40 + #define BTUSB_ATH3012 0x80 + #define BTUSB_INTEL 0x100 ++#define BTUSB_INTEL_BOOT 0x200 + + static struct usb_device_id btusb_table[] = { + /* Generic Bluetooth USB device */ +@@ -101,21 +102,31 @@ static struct usb_device_id btusb_table[] = { + { USB_DEVICE(0x0c10, 0x0000) }, + + /* Broadcom BCM20702A0 */ ++ { USB_DEVICE(0x0489, 0xe042) }, ++ { USB_DEVICE(0x04ca, 0x2003) }, + { USB_DEVICE(0x0b05, 0x17b5) }, + { USB_DEVICE(0x0b05, 0x17cb) }, +- { USB_DEVICE(0x04ca, 0x2003) }, +- { USB_DEVICE(0x0489, 0xe042) }, + { USB_DEVICE(0x413c, 0x8197) }, + + /* Foxconn - Hon Hai */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01) }, + +- /*Broadcom devices with vendor specific id */ ++ /* Broadcom devices with vendor specific id */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, + ++ /* ASUSTek Computer - Broadcom based */ ++ { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01) }, ++ + /* Belkin F8065bf - Broadcom based */ + { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01) }, + ++ /* IMC Networks - Broadcom based */ ++ { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01) }, ++ ++ /* Intel Bluetooth USB Bootloader (RAM module) */ ++ { USB_DEVICE(0x8087, 0x0a5a), ++ .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC }, ++ + { } /* Terminating entry */ + }; + +@@ -129,53 +140,61 @@ static struct usb_device_id blacklist_table[] = { + { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE }, + + /* Atheros 3011 with sflash firmware */ ++ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, ++ { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, ++ { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE }, + { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, +- { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, +- { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, +- { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, + + /* Atheros AR9285 Malbec with sflash firmware */ + { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, + + /* Atheros 3012 with sflash firmware */ +- { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, +- { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, + + /* Atheros AR5BBU12 with sflash firmware */ + { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, + + /* Atheros AR5BBU12 with sflash firmware */ +- { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 }, ++ { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, + + /* Broadcom BCM2035 */ +- { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, +- { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, + { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, ++ { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, ++ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, + + /* Broadcom BCM2045 */ + { USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_WRONG_SCO_MTU }, +@@ -1448,6 +1467,9 @@ static int btusb_probe(struct usb_interface *intf, + if (id->driver_info & BTUSB_INTEL) + hdev->setup = btusb_setup_intel; + ++ if (id->driver_info & BTUSB_INTEL_BOOT) ++ set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); ++ + /* Interface numbers are hardcoded in the specification */ + data->isoc = usb_ifnum_to_if(data->udev, 1); + +@@ -1485,6 +1507,18 @@ static int btusb_probe(struct usb_interface *intf, + data->isoc = NULL; + } + ++ if (id->driver_info & BTUSB_INTEL_BOOT) { ++ /* A bug in the bootloader causes that interrupt interface is ++ * only enabled after receiving SetInterface(0, AltSetting=0). ++ */ ++ err = usb_set_interface(data->udev, 0, 0); ++ if (err < 0) { ++ BT_ERR("failed to set interface 0, alt 0 %d", err); ++ hci_free_dev(hdev); ++ return err; ++ } ++ } ++ + if (data->isoc) { + err = usb_driver_claim_interface(&btusb_driver, + data->isoc, data); +diff --git a/drivers/char/random.c b/drivers/char/random.c +index 7a744d391756..f6b25db16791 100644 +--- a/drivers/char/random.c ++++ b/drivers/char/random.c +@@ -930,8 +930,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out) + * pool while mixing, and hash one final time. + */ + sha_transform(hash.w, extract, workspace); +- memset(extract, 0, sizeof(extract)); +- memset(workspace, 0, sizeof(workspace)); ++ memzero_explicit(extract, sizeof(extract)); ++ memzero_explicit(workspace, sizeof(workspace)); + + /* + * In case the hash function has some recognizable output +@@ -954,7 +954,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out) + } + + memcpy(out, &hash, EXTRACT_SIZE); +- memset(&hash, 0, sizeof(hash)); ++ memzero_explicit(&hash, sizeof(hash)); + } + + static ssize_t extract_entropy(struct entropy_store *r, void *buf, +@@ -1002,7 +1002,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf, + } + + /* Wipe data just returned from memory */ +- memset(tmp, 0, sizeof(tmp)); ++ memzero_explicit(tmp, sizeof(tmp)); + + return ret; + } +@@ -1040,7 +1040,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, + } + + /* Wipe data just returned from memory */ +- memset(tmp, 0, sizeof(tmp)); ++ memzero_explicit(tmp, sizeof(tmp)); + + return ret; + } +diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c +index 04548f7023af..d15590856325 100644 +--- a/drivers/cpufreq/cpufreq.c ++++ b/drivers/cpufreq/cpufreq.c +@@ -412,7 +412,18 @@ show_one(cpuinfo_max_freq, cpuinfo.max_freq); + show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); + show_one(scaling_min_freq, min); + show_one(scaling_max_freq, max); +-show_one(scaling_cur_freq, cur); ++ ++static ssize_t show_scaling_cur_freq( ++ struct cpufreq_policy *policy, char *buf) ++{ ++ ssize_t ret; ++ ++ if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) ++ ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); ++ else ++ ret = sprintf(buf, "%u\n", policy->cur); ++ return ret; ++} + + static int __cpufreq_set_policy(struct cpufreq_policy *policy, + struct cpufreq_policy *new_policy); +@@ -815,11 +826,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, + if (ret) + goto err_out_kobj_put; + } +- if (cpufreq_driver->target) { +- ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); +- if (ret) +- goto err_out_kobj_put; +- } ++ ++ ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); ++ if (ret) ++ goto err_out_kobj_put; ++ + if (cpufreq_driver->bios_limit) { + ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); + if (ret) +diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c +index f033fadb58e6..132a9139c19f 100644 +--- a/drivers/cpufreq/intel_pstate.c ++++ b/drivers/cpufreq/intel_pstate.c +@@ -600,6 +600,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) + if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { + limits.min_perf_pct = 100; + limits.min_perf = int_tofp(1); ++ limits.max_policy_pct = 100; + limits.max_perf_pct = 100; + limits.max_perf = int_tofp(1); + limits.no_turbo = 0; +diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c +index df6575f1430d..682288ced4ac 100644 +--- a/drivers/edac/cpc925_edac.c ++++ b/drivers/edac/cpc925_edac.c +@@ -562,7 +562,7 @@ static void cpc925_mc_check(struct mem_ctl_info *mci) + + if (apiexcp & UECC_EXCP_DETECTED) { + cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); +- edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, ++ edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, + pfn, offset, 0, + csrow, -1, -1, + mci->ctl_name, ""); +diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c +index 1c4056a50383..2697deae3ab7 100644 +--- a/drivers/edac/e7xxx_edac.c ++++ b/drivers/edac/e7xxx_edac.c +@@ -226,7 +226,7 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info) + static void process_ce_no_info(struct mem_ctl_info *mci) + { + edac_dbg(3, "\n"); +- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, ++ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, + "e7xxx CE log register overflow", ""); + } + +diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c +index be10a74b16ea..7d5b3697f4b5 100644 +--- a/drivers/edac/i3200_edac.c ++++ b/drivers/edac/i3200_edac.c +@@ -242,11 +242,11 @@ static void i3200_process_error_info(struct mem_ctl_info *mci, + -1, -1, + "i3000 UE", ""); + } else if (log & I3200_ECCERRLOG_CE) { +- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, ++ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, + 0, 0, eccerrlog_syndrome(log), + eccerrlog_row(channel, log), + -1, -1, +- "i3000 UE", ""); ++ "i3000 CE", ""); + } + } + } +diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c +index 3e3e431c8301..b93b0d006ebb 100644 +--- a/drivers/edac/i82860_edac.c ++++ b/drivers/edac/i82860_edac.c +@@ -124,7 +124,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci, + dimm->location[0], dimm->location[1], -1, + "i82860 UE", ""); + else +- edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, ++ edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, + info->eap, 0, info->derrsyn, + dimm->location[0], dimm->location[1], -1, + "i82860 CE", ""); +diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c +index 7fc9f7272b56..e8f6418b6dec 100644 +--- a/drivers/gpu/drm/ast/ast_mode.c ++++ b/drivers/gpu/drm/ast/ast_mode.c +@@ -1012,8 +1012,8 @@ static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height) + srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0; + data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4); + data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4); +- data32.b[2] = srcdata32[0].b[1] | (srcdata32[1].b[0] >> 4); +- data32.b[3] = srcdata32[0].b[3] | (srcdata32[1].b[2] >> 4); ++ data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4); ++ data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4); + + writel(data32.ul, dstxor); + csum += data32.ul; +diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c +index bfcfd0c202ad..73fed3518581 100644 +--- a/drivers/gpu/drm/cirrus/cirrus_drv.c ++++ b/drivers/gpu/drm/cirrus/cirrus_drv.c +@@ -32,6 +32,8 @@ static struct drm_driver driver; + static DEFINE_PCI_DEVICE_TABLE(pciidlist) = { + { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0, + 0, 0 }, ++ { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, PCI_VENDOR_ID_XEN, ++ 0x0001, 0, 0, 0 }, + {0,} + }; + +diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c +index 71a831ae73e9..a7daa2a3ac82 100644 +--- a/drivers/gpu/drm/i915/intel_pm.c ++++ b/drivers/gpu/drm/i915/intel_pm.c +@@ -5347,24 +5347,26 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) + static struct i915_power_well *hsw_pwr; + + /* Display audio driver power well request */ +-void i915_request_power_well(void) ++int i915_request_power_well(void) + { +- if (WARN_ON(!hsw_pwr)) +- return; ++ if (!hsw_pwr) ++ return -ENODEV; + + spin_lock_irq(&hsw_pwr->lock); + if (!hsw_pwr->count++ && + !hsw_pwr->i915_request) + __intel_set_power_well(hsw_pwr->device, true); + spin_unlock_irq(&hsw_pwr->lock); ++ return 0; + } + EXPORT_SYMBOL_GPL(i915_request_power_well); + + /* Display audio driver power well release */ +-void i915_release_power_well(void) ++int i915_release_power_well(void) + { +- if (WARN_ON(!hsw_pwr)) +- return; ++ if (!hsw_pwr) ++ return -ENODEV; ++ + + spin_lock_irq(&hsw_pwr->lock); + WARN_ON(!hsw_pwr->count); +@@ -5372,9 +5374,30 @@ void i915_release_power_well(void) + !hsw_pwr->i915_request) + __intel_set_power_well(hsw_pwr->device, false); + spin_unlock_irq(&hsw_pwr->lock); ++ return 0; + } + EXPORT_SYMBOL_GPL(i915_release_power_well); + ++/* ++ * Private interface for the audio driver to get CDCLK in kHz. ++ * ++ * Caller must request power well using i915_request_power_well() prior to ++ * making the call. ++ */ ++int i915_get_cdclk_freq(void) ++{ ++ struct drm_i915_private *dev_priv; ++ ++ if (!hsw_pwr) ++ return -ENODEV; ++ ++ dev_priv = container_of(hsw_pwr, struct drm_i915_private, ++ power_well); ++ ++ return intel_ddi_get_cdclk_freq(dev_priv); ++} ++EXPORT_SYMBOL_GPL(i915_get_cdclk_freq); ++ + int i915_init_power_well(struct drm_device *dev) + { + struct drm_i915_private *dev_priv = dev->dev_private; +diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c +index 2d9b9d7a7992..f3edd2841f2d 100644 +--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c ++++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c +@@ -124,6 +124,7 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len, + struct dcb_output *outp) + { + u16 dcb = dcb_outp(bios, idx, ver, len); ++ memset(outp, 0x00, sizeof(*outp)); + if (dcb) { + if (*ver >= 0x20) { + u32 conn = nv_ro32(bios, dcb + 0x00); +diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c +index 835caba026d3..5f79e511c2a6 100644 +--- a/drivers/gpu/drm/qxl/qxl_display.c ++++ b/drivers/gpu/drm/qxl/qxl_display.c +@@ -508,7 +508,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, + struct qxl_framebuffer *qfb; + struct qxl_bo *bo, *old_bo = NULL; + struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); +- uint32_t width, height, base_offset; + bool recreate_primary = false; + int ret; + int surf_id; +@@ -538,9 +537,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, + if (qcrtc->index == 0) + recreate_primary = true; + +- width = mode->hdisplay; +- height = mode->vdisplay; +- base_offset = 0; ++ if (bo->surf.stride * bo->surf.height > qdev->vram_size) { ++ DRM_ERROR("Mode doesn't fit in vram size (vgamem)"); ++ return -EINVAL; ++ } + + ret = qxl_bo_reserve(bo, false); + if (ret != 0) +@@ -554,10 +554,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, + if (recreate_primary) { + qxl_io_destroy_primary(qdev); + qxl_io_log(qdev, +- "recreate primary: %dx%d (was %dx%d,%d,%d)\n", +- width, height, bo->surf.width, +- bo->surf.height, bo->surf.stride, bo->surf.format); +- qxl_io_create_primary(qdev, base_offset, bo); ++ "recreate primary: %dx%d,%d,%d\n", ++ bo->surf.width, bo->surf.height, ++ bo->surf.stride, bo->surf.format); ++ qxl_io_create_primary(qdev, 0, bo); + bo->is_primary = true; + surf_id = 0; + } else { +diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c +index 83895f2d16c6..f5cdc865752a 100644 +--- a/drivers/gpu/drm/radeon/si_dpm.c ++++ b/drivers/gpu/drm/radeon/si_dpm.c +@@ -6220,7 +6220,7 @@ static void si_parse_pplib_clock_info(struct radeon_device *rdev, + if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) && + index == 0) { + /* XXX disable for A0 tahiti */ +- si_pi->ulv.supported = true; ++ si_pi->ulv.supported = false; + si_pi->ulv.pl = *pl; + si_pi->ulv.one_pcie_lane_in_ulv = false; + si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +index fc43c0601236..dab6fab5dc99 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +@@ -1939,6 +1939,14 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, + DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) + }; + int i; ++ u32 assumed_bpp = 2; ++ ++ /* ++ * If using screen objects, then assume 32-bpp because that's what the ++ * SVGA device is assuming ++ */ ++ if (dev_priv->sou_priv) ++ assumed_bpp = 4; + + /* Add preferred mode */ + { +@@ -1949,8 +1957,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, + mode->vdisplay = du->pref_height; + vmw_guess_mode_timing(mode); + +- if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, +- mode->vdisplay)) { ++ if (vmw_kms_validate_mode_vram(dev_priv, ++ mode->hdisplay * assumed_bpp, ++ mode->vdisplay)) { + drm_mode_probed_add(connector, mode); + } else { + drm_mode_destroy(dev, mode); +@@ -1972,7 +1981,8 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector, + bmode->vdisplay > max_height) + continue; + +- if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2, ++ if (!vmw_kms_validate_mode_vram(dev_priv, ++ bmode->hdisplay * assumed_bpp, + bmode->vdisplay)) + continue; + +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index b921bc55a19b..28f6cdc5aaf9 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -288,6 +288,11 @@ + #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7 + #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 + ++#define USB_VENDOR_ID_ELAN 0x04f3 ++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089 ++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b ++#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f ++ + #define USB_VENDOR_ID_ELECOM 0x056e + #define USB_DEVICE_ID_ELECOM_BM084 0x0061 + +@@ -312,6 +317,7 @@ + + #define USB_VENDOR_ID_ETURBOTOUCH 0x22b9 + #define USB_DEVICE_ID_ETURBOTOUCH 0x0006 ++#define USB_DEVICE_ID_ETURBOTOUCH_2968 0x2968 + + #define USB_VENDOR_ID_EZKEY 0x0518 + #define USB_DEVICE_ID_BTC_8193 0x0002 +@@ -691,6 +697,8 @@ + + #define USB_VENDOR_ID_PENMOUNT 0x14e1 + #define USB_DEVICE_ID_PENMOUNT_PCI 0x3500 ++#define USB_DEVICE_ID_PENMOUNT_1610 0x1610 ++#define USB_DEVICE_ID_PENMOUNT_1640 0x1640 + + #define USB_VENDOR_ID_PETALYNX 0x18b1 + #define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037 +@@ -746,6 +754,9 @@ + #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001 + #define USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE 0x0600 + ++#define USB_VENDOR_ID_SEMICO 0x1a2c ++#define USB_DEVICE_ID_SEMICO_USB_KEYKOARD 0x0023 ++ + #define USB_VENDOR_ID_SENNHEISER 0x1395 + #define USB_DEVICE_ID_SENNHEISER_BTD500USB 0x002c + +diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c +index 44df131d390a..bcc3193d297c 100644 +--- a/drivers/hid/usbhid/hid-core.c ++++ b/drivers/hid/usbhid/hid-core.c +@@ -82,7 +82,7 @@ static int hid_start_in(struct hid_device *hid) + struct usbhid_device *usbhid = hid->driver_data; + + spin_lock_irqsave(&usbhid->lock, flags); +- if (hid->open > 0 && ++ if ((hid->open > 0 || hid->quirks & HID_QUIRK_ALWAYS_POLL) && + !test_bit(HID_DISCONNECTED, &usbhid->iofl) && + !test_bit(HID_SUSPENDED, &usbhid->iofl) && + !test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) { +@@ -292,6 +292,8 @@ static void hid_irq_in(struct urb *urb) + case 0: /* success */ + usbhid_mark_busy(usbhid); + usbhid->retry_delay = 0; ++ if ((hid->quirks & HID_QUIRK_ALWAYS_POLL) && !hid->open) ++ break; + hid_input_report(urb->context, HID_INPUT_REPORT, + urb->transfer_buffer, + urb->actual_length, 1); +@@ -536,7 +538,8 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re + int head; + struct usbhid_device *usbhid = hid->driver_data; + +- if ((hid->quirks & HID_QUIRK_NOGET) && dir == USB_DIR_IN) ++ if (((hid->quirks & HID_QUIRK_NOGET) && dir == USB_DIR_IN) || ++ test_bit(HID_DISCONNECTED, &usbhid->iofl)) + return; + + if (usbhid->urbout && dir == USB_DIR_OUT && report->type == HID_OUTPUT_REPORT) { +@@ -734,8 +737,10 @@ void usbhid_close(struct hid_device *hid) + if (!--hid->open) { + spin_unlock_irq(&usbhid->lock); + hid_cancel_delayed_stuff(usbhid); +- usb_kill_urb(usbhid->urbin); +- usbhid->intf->needs_remote_wakeup = 0; ++ if (!(hid->quirks & HID_QUIRK_ALWAYS_POLL)) { ++ usb_kill_urb(usbhid->urbin); ++ usbhid->intf->needs_remote_wakeup = 0; ++ } + } else { + spin_unlock_irq(&usbhid->lock); + } +@@ -1119,6 +1124,19 @@ static int usbhid_start(struct hid_device *hid) + + set_bit(HID_STARTED, &usbhid->iofl); + ++ if (hid->quirks & HID_QUIRK_ALWAYS_POLL) { ++ ret = usb_autopm_get_interface(usbhid->intf); ++ if (ret) ++ goto fail; ++ usbhid->intf->needs_remote_wakeup = 1; ++ ret = hid_start_in(hid); ++ if (ret) { ++ dev_err(&hid->dev, ++ "failed to start in urb: %d\n", ret); ++ } ++ usb_autopm_put_interface(usbhid->intf); ++ } ++ + /* Some keyboards don't work until their LEDs have been set. + * Since BIOSes do set the LEDs, it must be safe for any device + * that supports the keyboard boot protocol. +@@ -1151,6 +1169,9 @@ static void usbhid_stop(struct hid_device *hid) + if (WARN_ON(!usbhid)) + return; + ++ if (hid->quirks & HID_QUIRK_ALWAYS_POLL) ++ usbhid->intf->needs_remote_wakeup = 0; ++ + clear_bit(HID_STARTED, &usbhid->iofl); + spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ + set_bit(HID_DISCONNECTED, &usbhid->iofl); +@@ -1338,6 +1359,9 @@ static void usbhid_disconnect(struct usb_interface *intf) + return; + + usbhid = hid->driver_data; ++ spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ ++ set_bit(HID_DISCONNECTED, &usbhid->iofl); ++ spin_unlock_irq(&usbhid->lock); + hid_destroy_device(hid); + kfree(usbhid); + } +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c +index da22a5e0d86f..19b5fc350354 100644 +--- a/drivers/hid/usbhid/hid-quirks.c ++++ b/drivers/hid/usbhid/hid-quirks.c +@@ -49,6 +49,7 @@ static const struct hid_blacklist { + + { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, ++ { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, + { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, +@@ -69,6 +70,9 @@ static const struct hid_blacklist { + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, ++ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, ++ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL }, ++ { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL }, + { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, +@@ -76,6 +80,8 @@ static const struct hid_blacklist { + { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, ++ { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET }, ++ { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS }, +@@ -115,6 +121,7 @@ static const struct hid_blacklist { + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS }, ++ { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS2, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_HD, HID_QUIRK_NO_INIT_REPORTS }, +diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c +index 2f6d43bd0728..174445303b9f 100644 +--- a/drivers/i2c/busses/i2c-at91.c ++++ b/drivers/i2c/busses/i2c-at91.c +@@ -434,7 +434,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) + } + } + +- ret = wait_for_completion_io_timeout(&dev->cmd_complete, ++ ret = wait_for_completion_timeout(&dev->cmd_complete, + dev->adapter.timeout); + if (ret == 0) { + dev_err(dev->dev, "controller timed out\n"); +diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c +index 71a2c5f63b9c..af6f2570a55c 100644 +--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c ++++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c +@@ -71,7 +71,7 @@ int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf) + goto st_sensors_free_memory; + } + +- for (i = 0; i < n * num_data_channels; i++) { ++ for (i = 0; i < n * byte_for_channel; i++) { + if (i < n) + buf[i] = rx_array[i]; + else +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index 75e3b102ce45..7b717d8b6897 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -125,6 +125,10 @@ static const struct xpad_device { + { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, + { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, + { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, ++ { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 }, ++ { 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 }, ++ { 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 }, ++ { 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 }, + { 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 }, + { 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX }, + { 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX }, +@@ -137,10 +141,17 @@ static const struct xpad_device { + { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, + { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX }, + { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, ++ { 0x0738, 0x4718, "Mad Catz Street Fighter IV FightStick SE", 0, XTYPE_XBOX360 }, ++ { 0x0738, 0x4726, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, + { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 }, + { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, ++ { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 }, + { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, ++ { 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 }, ++ { 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 }, ++ { 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 }, + { 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX }, + { 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX }, + { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX }, +@@ -153,28 +164,51 @@ static const struct xpad_device { + { 0x0e6f, 0x0005, "Eclipse wireless Controller", 0, XTYPE_XBOX }, + { 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX }, + { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, ++ { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, ++ { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, ++ { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, + { 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX }, ++ { 0x0e8f, 0x3008, "Generic xbox control (dealextreme)", 0, XTYPE_XBOX }, ++ { 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 }, + { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, + { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, + { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, + { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, + { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, + { 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 }, + { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, + { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, +- { 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, +- { 0x1689, 0xfd01, "Razer Onza Classic Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 }, ++ { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, ++ { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, ++ { 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 }, ++ { 0x162e, 0xbeef, "Joytech Neo-Se Take2", 0, XTYPE_XBOX360 }, ++ { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 }, ++ { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 }, ++ { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 }, + { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, + { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf023, "MLG Pro Circuit Controller (Xbox)", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xf028, "Street Fighter IV FightPad", 0, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf038, "Street Fighter IV FightStick TE", 0, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf900, "Harmonix Xbox 360 Controller", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xf901, "Gamestop Xbox 360 Controller", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xf903, "Tron Xbox 360 controller", 0, XTYPE_XBOX360 }, ++ { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, ++ { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, ++ { 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 }, ++ { 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 }, ++ { 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 }, ++ { 0x24c6, 0x5b02, "Thrustmaster, Inc. GPX Controller", 0, XTYPE_XBOX360 }, ++ { 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 }, + { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX }, + { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN } + }; +@@ -258,6 +292,9 @@ static struct usb_device_id xpad_table[] = { + XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ + XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ + XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ ++ XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */ ++ XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */ ++ XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ + { } + }; + +diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h +index 5f6d3e1d28a0..2b888f1e6421 100644 +--- a/drivers/input/serio/i8042-x86ia64io.h ++++ b/drivers/input/serio/i8042-x86ia64io.h +@@ -101,6 +101,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { + }, + { + .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), ++ DMI_MATCH(DMI_PRODUCT_NAME, "X750LN"), ++ }, ++ }, ++ { ++ .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), + DMI_MATCH(DMI_PRODUCT_NAME , "ProLiant"), + DMI_MATCH(DMI_PRODUCT_VERSION, "8500"), +@@ -616,6 +622,22 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = { + }, + }, + { ++ /* Fujitsu A544 laptop */ ++ /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"), ++ }, ++ }, ++ { ++ /* Fujitsu AH544 laptop */ ++ /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"), ++ }, ++ }, ++ { + /* Fujitsu U574 laptop */ + /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ + .matches = { +diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c +index 2b56855c2c77..de019ebb7e29 100644 +--- a/drivers/input/serio/serio.c ++++ b/drivers/input/serio/serio.c +@@ -506,8 +506,8 @@ static void serio_init_port(struct serio *serio) + spin_lock_init(&serio->lock); + mutex_init(&serio->drv_mutex); + device_initialize(&serio->dev); +- dev_set_name(&serio->dev, "serio%ld", +- (long)atomic_inc_return(&serio_no) - 1); ++ dev_set_name(&serio->dev, "serio%lu", ++ (unsigned long)atomic_inc_return(&serio_no) - 1); + serio->dev.bus = &serio_bus; + serio->dev.release = serio_release_port; + serio->dev.groups = serio_device_attr_groups; +diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c +index 5056c45be97f..a42efc7f69ed 100644 +--- a/drivers/md/dm-bufio.c ++++ b/drivers/md/dm-bufio.c +@@ -463,6 +463,7 @@ static void __relink_lru(struct dm_buffer *b, int dirty) + c->n_buffers[dirty]++; + b->list_mode = dirty; + list_move(&b->lru_list, &c->lru[dirty]); ++ b->last_accessed = jiffies; + } + + /*---------------------------------------------------------------- +@@ -1455,9 +1456,9 @@ static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, + list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) { + freed += __cleanup_old_buffer(b, gfp_mask, 0); + if (!--nr_to_scan) +- break; ++ return freed; ++ dm_bufio_cond_resched(); + } +- dm_bufio_cond_resched(); + } + return freed; + } +diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c +index 08d9a207259a..c69d0b787746 100644 +--- a/drivers/md/dm-log-userspace-transfer.c ++++ b/drivers/md/dm-log-userspace-transfer.c +@@ -272,7 +272,7 @@ int dm_ulog_tfr_init(void) + + r = cn_add_callback(&ulog_cn_id, "dmlogusr", cn_ulog_callback); + if (r) { +- cn_del_callback(&ulog_cn_id); ++ kfree(prealloced_cn_msg); + return r; + } + +diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c +index 1e344b033277..22e8c2032f6d 100644 +--- a/drivers/media/dvb-frontends/ds3000.c ++++ b/drivers/media/dvb-frontends/ds3000.c +@@ -864,6 +864,13 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config, + memcpy(&state->frontend.ops, &ds3000_ops, + sizeof(struct dvb_frontend_ops)); + state->frontend.demodulator_priv = state; ++ ++ /* ++ * Some devices like T480 starts with voltage on. Be sure ++ * to turn voltage off during init, as this can otherwise ++ * interfere with Unicable SCR systems. ++ */ ++ ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF); + return &state->frontend; + + error3: +diff --git a/drivers/media/i2c/tda7432.c b/drivers/media/i2c/tda7432.c +index 72af644fa051..cf93021a6500 100644 +--- a/drivers/media/i2c/tda7432.c ++++ b/drivers/media/i2c/tda7432.c +@@ -293,7 +293,7 @@ static int tda7432_s_ctrl(struct v4l2_ctrl *ctrl) + if (t->mute->val) { + lf |= TDA7432_MUTE; + lr |= TDA7432_MUTE; +- lf |= TDA7432_MUTE; ++ rf |= TDA7432_MUTE; + rr |= TDA7432_MUTE; + } + /* Mute & update balance*/ +diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c +index 9d103344f34a..81b21d937201 100644 +--- a/drivers/media/usb/em28xx/em28xx-video.c ++++ b/drivers/media/usb/em28xx/em28xx-video.c +@@ -695,13 +695,16 @@ static int em28xx_stop_streaming(struct vb2_queue *vq) + } + + spin_lock_irqsave(&dev->slock, flags); ++ if (dev->usb_ctl.vid_buf != NULL) { ++ vb2_buffer_done(&dev->usb_ctl.vid_buf->vb, VB2_BUF_STATE_ERROR); ++ dev->usb_ctl.vid_buf = NULL; ++ } + while (!list_empty(&vidq->active)) { + struct em28xx_buffer *buf; + buf = list_entry(vidq->active.next, struct em28xx_buffer, list); + list_del(&buf->list); + vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); + } +- dev->usb_ctl.vid_buf = NULL; + spin_unlock_irqrestore(&dev->slock, flags); + + return 0; +@@ -723,13 +726,16 @@ int em28xx_stop_vbi_streaming(struct vb2_queue *vq) + } + + spin_lock_irqsave(&dev->slock, flags); ++ if (dev->usb_ctl.vbi_buf != NULL) { ++ vb2_buffer_done(&dev->usb_ctl.vbi_buf->vb, VB2_BUF_STATE_ERROR); ++ dev->usb_ctl.vbi_buf = NULL; ++ } + while (!list_empty(&vbiq->active)) { + struct em28xx_buffer *buf; + buf = list_entry(vbiq->active.next, struct em28xx_buffer, list); + list_del(&buf->list); + vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); + } +- dev->usb_ctl.vbi_buf = NULL; + spin_unlock_irqrestore(&dev->slock, flags); + + return 0; +diff --git a/drivers/media/usb/ttusb-dec/ttusbdecfe.c b/drivers/media/usb/ttusb-dec/ttusbdecfe.c +index 5c45c9d0712d..9c29552aedec 100644 +--- a/drivers/media/usb/ttusb-dec/ttusbdecfe.c ++++ b/drivers/media/usb/ttusb-dec/ttusbdecfe.c +@@ -156,6 +156,9 @@ static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struc + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00 }; + ++ if (cmd->msg_len > sizeof(b) - 4) ++ return -EINVAL; ++ + memcpy(&b[4], cmd->msg, cmd->msg_len); + + state->config->send_command(fe, 0x72, +diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c +index c3bb2502225b..753ad4cfc118 100644 +--- a/drivers/media/usb/uvc/uvc_driver.c ++++ b/drivers/media/usb/uvc/uvc_driver.c +@@ -2210,6 +2210,15 @@ static struct usb_device_id uvc_ids[] = { + .bInterfaceSubClass = 1, + .bInterfaceProtocol = 0, + .driver_info = UVC_QUIRK_PROBE_DEF }, ++ /* Dell XPS M1330 (OmniVision OV7670 webcam) */ ++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE ++ | USB_DEVICE_ID_MATCH_INT_INFO, ++ .idVendor = 0x05a9, ++ .idProduct = 0x7670, ++ .bInterfaceClass = USB_CLASS_VIDEO, ++ .bInterfaceSubClass = 1, ++ .bInterfaceProtocol = 0, ++ .driver_info = UVC_QUIRK_PROBE_DEF }, + /* Apple Built-In iSight */ + { .match_flags = USB_DEVICE_ID_MATCH_DEVICE + | USB_DEVICE_ID_MATCH_INT_INFO, +diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c +index 037d7a55aa8c..767abc9e5581 100644 +--- a/drivers/media/v4l2-core/v4l2-common.c ++++ b/drivers/media/v4l2-core/v4l2-common.c +@@ -431,16 +431,13 @@ static unsigned int clamp_align(unsigned int x, unsigned int min, + /* Bits that must be zero to be aligned */ + unsigned int mask = ~((1 << align) - 1); + ++ /* Clamp to aligned min and max */ ++ x = clamp(x, (min + ~mask) & mask, max & mask); ++ + /* Round to nearest aligned value */ + if (align) + x = (x + (1 << (align - 1))) & mask; + +- /* Clamp to aligned value of min and max */ +- if (x < min) +- x = (min + ~mask) & mask; +- else if (x > max) +- x = max & mask; +- + return x; + } + +diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c +index 6ed83feb0c52..c2a7804014f4 100644 +--- a/drivers/mfd/rtsx_pcr.c ++++ b/drivers/mfd/rtsx_pcr.c +@@ -1172,7 +1172,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev, + pcr->msi_en = msi_en; + if (pcr->msi_en) { + ret = pci_enable_msi(pcidev); +- if (ret < 0) ++ if (ret) + pcr->msi_en = false; + } + +diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c +index 54e8ba45c3ad..2985efd7bdb2 100644 +--- a/drivers/mmc/host/rtsx_pci_sdmmc.c ++++ b/drivers/mmc/host/rtsx_pci_sdmmc.c +@@ -342,6 +342,13 @@ static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host, + } + + if (rsp_type == SD_RSP_TYPE_R2) { ++ /* ++ * The controller offloads the last byte {CRC-7, end bit 1'b1} ++ * of response type R2. Assign dummy CRC, 0, and end bit to the ++ * byte(ptr[16], goes into the LSB of resp[3] later). ++ */ ++ ptr[16] = 1; ++ + for (i = 0; i < 4; i++) { + cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4); + dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n", +diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c +index 27ae563d0caa..b2a4c22507d9 100644 +--- a/drivers/mmc/host/sdhci-pci.c ++++ b/drivers/mmc/host/sdhci-pci.c +@@ -37,6 +37,13 @@ + #define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15 + #define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16 + #define PCI_DEVICE_ID_INTEL_BYT_EMMC2 0x0f50 ++#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190 ++#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9 ++#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa ++#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb ++#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5 ++#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6 ++#define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7 + + /* + * PCI registers +@@ -169,6 +176,10 @@ static const struct sdhci_pci_fixes sdhci_cafe = { + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, + }; + ++static const struct sdhci_pci_fixes sdhci_intel_qrk = { ++ .quirks = SDHCI_QUIRK_NO_HISPD_BIT, ++}; ++ + static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot) + { + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; +@@ -359,6 +370,28 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { + .own_cd_for_runtime_pm = true, + }; + ++/* Define Host controllers for Intel Merrifield platform */ ++#define INTEL_MRFL_EMMC_0 0 ++#define INTEL_MRFL_EMMC_1 1 ++ ++static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot) ++{ ++ if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_0) && ++ (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_1)) ++ /* SD support is not ready yet */ ++ return -ENODEV; ++ ++ slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | ++ MMC_CAP_1_8V_DDR; ++ ++ return 0; ++} ++ ++static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = { ++ .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, ++ .probe_slot = intel_mrfl_mmc_probe_slot, ++}; ++ + /* O2Micro extra registers */ + #define O2_SD_LOCK_WP 0xD3 + #define O2_SD_MULTI_VCC3V 0xEE +@@ -832,6 +865,14 @@ static const struct pci_device_id pci_ids[] = { + + { + .vendor = PCI_VENDOR_ID_INTEL, ++ .device = PCI_DEVICE_ID_INTEL_QRK_SD, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .driver_data = (kernel_ulong_t)&sdhci_intel_qrk, ++ }, ++ ++ { ++ .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_MRST_SD0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, +@@ -942,6 +983,54 @@ static const struct pci_device_id pci_ids[] = { + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc, + }, + ++ ++ { ++ .vendor = PCI_VENDOR_ID_INTEL, ++ .device = PCI_DEVICE_ID_INTEL_CLV_SDIO0, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd, ++ }, ++ ++ { ++ .vendor = PCI_VENDOR_ID_INTEL, ++ .device = PCI_DEVICE_ID_INTEL_CLV_SDIO1, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio, ++ }, ++ ++ { ++ .vendor = PCI_VENDOR_ID_INTEL, ++ .device = PCI_DEVICE_ID_INTEL_CLV_SDIO2, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio, ++ }, ++ ++ { ++ .vendor = PCI_VENDOR_ID_INTEL, ++ .device = PCI_DEVICE_ID_INTEL_CLV_EMMC0, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc, ++ }, ++ ++ { ++ .vendor = PCI_VENDOR_ID_INTEL, ++ .device = PCI_DEVICE_ID_INTEL_CLV_EMMC1, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc, ++ }, ++ ++ { ++ .vendor = PCI_VENDOR_ID_INTEL, ++ .device = PCI_DEVICE_ID_INTEL_MRFL_MMC, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc, ++ }, + { + .vendor = PCI_VENDOR_ID_O2, + .device = PCI_DEVICE_ID_O2_8120, +diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c +index f5aa4b02cfa6..85cd77c9cd12 100644 +--- a/drivers/mtd/ubi/fastmap.c ++++ b/drivers/mtd/ubi/fastmap.c +@@ -330,6 +330,7 @@ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai, + av = tmp_av; + else { + ubi_err("orphaned volume in fastmap pool!"); ++ kmem_cache_free(ai->aeb_slab_cache, new_aeb); + return UBI_BAD_FASTMAP; + } + +diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig +index b45b240889f5..367aabc6fd48 100644 +--- a/drivers/net/Kconfig ++++ b/drivers/net/Kconfig +@@ -135,6 +135,7 @@ config MACVLAN + config MACVTAP + tristate "MAC-VLAN based tap driver" + depends on MACVLAN ++ depends on INET + help + This adds a specialized tap character device driver that is based + on the MAC-VLAN network interface, called macvtap. A macvtap device +@@ -205,6 +206,7 @@ config RIONET_RX_SIZE + + config TUN + tristate "Universal TUN/TAP device driver support" ++ depends on INET + select CRC32 + ---help--- + TUN/TAP provides packet reception and transmission for user space +diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c +index 052d1832d3fb..4abd98efdc34 100644 +--- a/drivers/net/macvtap.c ++++ b/drivers/net/macvtap.c +@@ -17,6 +17,7 @@ + #include <linux/idr.h> + #include <linux/fs.h> + ++#include <net/ipv6.h> + #include <net/net_namespace.h> + #include <net/rtnetlink.h> + #include <net/sock.h> +@@ -566,6 +567,8 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb, + break; + case VIRTIO_NET_HDR_GSO_UDP: + gso_type = SKB_GSO_UDP; ++ if (skb->protocol == htons(ETH_P_IPV6)) ++ ipv6_proxy_select_ident(skb); + break; + default: + return -EINVAL; +diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c +index 72ff14b811c6..5a1897d86e94 100644 +--- a/drivers/net/ppp/ppp_generic.c ++++ b/drivers/net/ppp/ppp_generic.c +@@ -601,7 +601,7 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + if (file == ppp->owner) + ppp_shutdown_interface(ppp); + } +- if (atomic_long_read(&file->f_count) <= 2) { ++ if (atomic_long_read(&file->f_count) < 2) { + ppp_release(NULL, file); + err = 0; + } else +diff --git a/drivers/net/tun.c b/drivers/net/tun.c +index 10636cbd3807..495830a8ee28 100644 +--- a/drivers/net/tun.c ++++ b/drivers/net/tun.c +@@ -65,6 +65,7 @@ + #include <linux/nsproxy.h> + #include <linux/virtio_net.h> + #include <linux/rcupdate.h> ++#include <net/ipv6.h> + #include <net/net_namespace.h> + #include <net/netns/generic.h> + #include <net/rtnetlink.h> +@@ -1103,6 +1104,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, + break; + } + ++ skb_reset_network_header(skb); ++ + if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { + pr_debug("GSO!\n"); + switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { +@@ -1114,6 +1117,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, + break; + case VIRTIO_NET_HDR_GSO_UDP: + skb_shinfo(skb)->gso_type = SKB_GSO_UDP; ++ if (skb->protocol == htons(ETH_P_IPV6)) ++ ipv6_proxy_select_ident(skb); + break; + default: + tun->dev->stats.rx_frame_errors++; +@@ -1143,7 +1148,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, + skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; + } + +- skb_reset_network_header(skb); + skb_probe_transport_header(skb, 0); + + rxhash = skb_get_rxhash(skb); +diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c +index 3ecb2133dee6..b8b8f9948a0f 100644 +--- a/drivers/net/usb/ax88179_178a.c ++++ b/drivers/net/usb/ax88179_178a.c +@@ -698,6 +698,7 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p) + { + struct usbnet *dev = netdev_priv(net); + struct sockaddr *addr = p; ++ int ret; + + if (netif_running(net)) + return -EBUSY; +@@ -707,8 +708,12 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p) + memcpy(net->dev_addr, addr->sa_data, ETH_ALEN); + + /* Set the MAC address */ +- return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ++ ret = ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, + ETH_ALEN, net->dev_addr); ++ if (ret < 0) ++ return ret; ++ ++ return 0; + } + + static const struct net_device_ops ax88179_netdev_ops = { +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index f1735fb61362..23969eaf88c1 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -1341,9 +1341,6 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) + if (!in6_dev) + goto out; + +- if (!pskb_may_pull(skb, skb->len)) +- goto out; +- + iphdr = ipv6_hdr(skb); + saddr = &iphdr->saddr; + daddr = &iphdr->daddr; +@@ -1683,6 +1680,8 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, + struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats); + union vxlan_addr loopback; + union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; ++ struct net_device *dev = skb->dev; ++ int len = skb->len; + + skb->pkt_type = PACKET_HOST; + skb->encapsulation = 0; +@@ -1704,16 +1703,16 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, + + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->tx_packets++; +- tx_stats->tx_bytes += skb->len; ++ tx_stats->tx_bytes += len; + u64_stats_update_end(&tx_stats->syncp); + + if (netif_rx(skb) == NET_RX_SUCCESS) { + u64_stats_update_begin(&rx_stats->syncp); + rx_stats->rx_packets++; +- rx_stats->rx_bytes += skb->len; ++ rx_stats->rx_bytes += len; + u64_stats_update_end(&rx_stats->syncp); + } else { +- skb->dev->stats.rx_dropped++; ++ dev->stats.rx_dropped++; + } + } + +@@ -1888,7 +1887,8 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) + return arp_reduce(dev, skb); + #if IS_ENABLED(CONFIG_IPV6) + else if (ntohs(eth->h_proto) == ETH_P_IPV6 && +- skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) && ++ pskb_may_pull(skb, sizeof(struct ipv6hdr) ++ + sizeof(struct nd_msg)) && + ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { + struct nd_msg *msg; + +@@ -1897,6 +1897,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) + msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) + return neigh_reduce(dev, skb); + } ++ eth = eth_hdr(skb); + #endif + } + +diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h +index bc82d55d77c0..71f70c724fc9 100644 +--- a/drivers/net/wireless/rt2x00/rt2800.h ++++ b/drivers/net/wireless/rt2x00/rt2800.h +@@ -54,6 +54,7 @@ + * RF5592 2.4G/5G 2T2R + * RF3070 2.4G 1T1R + * RF5360 2.4G 1T1R ++ * RF5362 2.4G 1T1R + * RF5370 2.4G 1T1R + * RF5390 2.4G 1T1R + */ +@@ -74,6 +75,7 @@ + #define RF3070 0x3070 + #define RF3290 0x3290 + #define RF5360 0x5360 ++#define RF5362 0x5362 + #define RF5370 0x5370 + #define RF5372 0x5372 + #define RF5390 0x5390 +@@ -2147,7 +2149,7 @@ struct mac_iveiv_entry { + /* Bits [7-4] for RF3320 (RT3370/RT3390), on other chipsets reserved */ + #define RFCSR3_PA1_BIAS_CCK FIELD8(0x70) + #define RFCSR3_PA2_CASCODE_BIAS_CCKK FIELD8(0x80) +-/* Bits for RF3290/RF5360/RF5370/RF5372/RF5390/RF5392 */ ++/* Bits for RF3290/RF5360/RF5362/RF5370/RF5372/RF5390/RF5392 */ + #define RFCSR3_VCOCAL_EN FIELD8(0x80) + /* Bits for RF3050 */ + #define RFCSR3_BIT1 FIELD8(0x02) +diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c +index 446eadeaaef6..3bbd03c4906d 100644 +--- a/drivers/net/wireless/rt2x00/rt2800lib.c ++++ b/drivers/net/wireless/rt2x00/rt2800lib.c +@@ -3154,6 +3154,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, + break; + case RF3070: + case RF5360: ++ case RF5362: + case RF5370: + case RF5372: + case RF5390: +@@ -3171,6 +3172,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, + rt2x00_rf(rt2x00dev, RF3290) || + rt2x00_rf(rt2x00dev, RF3322) || + rt2x00_rf(rt2x00dev, RF5360) || ++ rt2x00_rf(rt2x00dev, RF5362) || + rt2x00_rf(rt2x00dev, RF5370) || + rt2x00_rf(rt2x00dev, RF5372) || + rt2x00_rf(rt2x00dev, RF5390) || +@@ -4269,6 +4271,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) + case RF3070: + case RF3290: + case RF5360: ++ case RF5362: + case RF5370: + case RF5372: + case RF5390: +@@ -7032,6 +7035,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) + case RF3320: + case RF3322: + case RF5360: ++ case RF5362: + case RF5370: + case RF5372: + case RF5390: +@@ -7555,6 +7559,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) + rt2x00_rf(rt2x00dev, RF3320) || + rt2x00_rf(rt2x00dev, RF3322) || + rt2x00_rf(rt2x00dev, RF5360) || ++ rt2x00_rf(rt2x00dev, RF5362) || + rt2x00_rf(rt2x00dev, RF5370) || + rt2x00_rf(rt2x00dev, RF5372) || + rt2x00_rf(rt2x00dev, RF5390) || +@@ -7682,6 +7687,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) + case RF3070: + case RF3290: + case RF5360: ++ case RF5362: + case RF5370: + case RF5372: + case RF5390: +diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c +index 4feb35aef990..a44862ad84ec 100644 +--- a/drivers/net/wireless/rt2x00/rt2800usb.c ++++ b/drivers/net/wireless/rt2x00/rt2800usb.c +@@ -992,6 +992,7 @@ static struct usb_device_id rt2800usb_device_table[] = { + { USB_DEVICE(0x07d1, 0x3c15) }, + { USB_DEVICE(0x07d1, 0x3c16) }, + { USB_DEVICE(0x07d1, 0x3c17) }, ++ { USB_DEVICE(0x2001, 0x3317) }, + { USB_DEVICE(0x2001, 0x3c1b) }, + /* Draytek */ + { USB_DEVICE(0x07fa, 0x7712) }, +@@ -1064,6 +1065,7 @@ static struct usb_device_id rt2800usb_device_table[] = { + /* Ovislink */ + { USB_DEVICE(0x1b75, 0x3071) }, + { USB_DEVICE(0x1b75, 0x3072) }, ++ { USB_DEVICE(0x1b75, 0xa200) }, + /* Para */ + { USB_DEVICE(0x20b8, 0x8888) }, + /* Pegatron */ +@@ -1180,6 +1182,8 @@ static struct usb_device_id rt2800usb_device_table[] = { + /* Linksys */ + { USB_DEVICE(0x13b1, 0x002f) }, + { USB_DEVICE(0x1737, 0x0079) }, ++ /* Logitec */ ++ { USB_DEVICE(0x0789, 0x0170) }, + /* Ralink */ + { USB_DEVICE(0x148f, 0x3572) }, + /* Sitecom */ +@@ -1203,6 +1207,8 @@ static struct usb_device_id rt2800usb_device_table[] = { + { USB_DEVICE(0x050d, 0x1103) }, + /* Cameo */ + { USB_DEVICE(0x148f, 0xf301) }, ++ /* D-Link */ ++ { USB_DEVICE(0x2001, 0x3c1f) }, + /* Edimax */ + { USB_DEVICE(0x7392, 0x7733) }, + /* Hawking */ +@@ -1216,6 +1222,7 @@ static struct usb_device_id rt2800usb_device_table[] = { + { USB_DEVICE(0x0789, 0x016b) }, + /* NETGEAR */ + { USB_DEVICE(0x0846, 0x9012) }, ++ { USB_DEVICE(0x0846, 0x9013) }, + { USB_DEVICE(0x0846, 0x9019) }, + /* Planex */ + { USB_DEVICE(0x2019, 0xed19) }, +@@ -1224,6 +1231,7 @@ static struct usb_device_id rt2800usb_device_table[] = { + /* Sitecom */ + { USB_DEVICE(0x0df6, 0x0067) }, + { USB_DEVICE(0x0df6, 0x006a) }, ++ { USB_DEVICE(0x0df6, 0x006e) }, + /* ZyXEL */ + { USB_DEVICE(0x0586, 0x3421) }, + #endif +@@ -1231,6 +1239,8 @@ static struct usb_device_id rt2800usb_device_table[] = { + /* Arcadyan */ + { USB_DEVICE(0x043e, 0x7a12) }, + { USB_DEVICE(0x043e, 0x7a32) }, ++ /* ASUS */ ++ { USB_DEVICE(0x0b05, 0x17e8) }, + /* Azurewave */ + { USB_DEVICE(0x13d3, 0x3329) }, + { USB_DEVICE(0x13d3, 0x3365) }, +@@ -1240,6 +1250,9 @@ static struct usb_device_id rt2800usb_device_table[] = { + { USB_DEVICE(0x2001, 0x3c1c) }, + { USB_DEVICE(0x2001, 0x3c1d) }, + { USB_DEVICE(0x2001, 0x3c1e) }, ++ { USB_DEVICE(0x2001, 0x3c20) }, ++ { USB_DEVICE(0x2001, 0x3c22) }, ++ { USB_DEVICE(0x2001, 0x3c23) }, + /* LG innotek */ + { USB_DEVICE(0x043e, 0x7a22) }, + { USB_DEVICE(0x043e, 0x7a42) }, +@@ -1262,12 +1275,18 @@ static struct usb_device_id rt2800usb_device_table[] = { + { USB_DEVICE(0x043e, 0x7a32) }, + /* AVM GmbH */ + { USB_DEVICE(0x057c, 0x8501) }, +- /* D-Link DWA-160-B2 */ ++ /* Buffalo */ ++ { USB_DEVICE(0x0411, 0x0241) }, ++ { USB_DEVICE(0x0411, 0x0253) }, ++ /* D-Link */ + { USB_DEVICE(0x2001, 0x3c1a) }, ++ { USB_DEVICE(0x2001, 0x3c21) }, + /* Proware */ + { USB_DEVICE(0x043e, 0x7a13) }, + /* Ralink */ + { USB_DEVICE(0x148f, 0x5572) }, ++ /* TRENDnet */ ++ { USB_DEVICE(0x20f4, 0x724a) }, + #endif + #ifdef CONFIG_RT2800USB_UNKNOWN + /* +@@ -1337,6 +1356,7 @@ static struct usb_device_id rt2800usb_device_table[] = { + { USB_DEVICE(0x1d4d, 0x0010) }, + /* Planex */ + { USB_DEVICE(0x2019, 0xab24) }, ++ { USB_DEVICE(0x2019, 0xab29) }, + /* Qcom */ + { USB_DEVICE(0x18e8, 0x6259) }, + /* RadioShack */ +@@ -1348,6 +1368,7 @@ static struct usb_device_id rt2800usb_device_table[] = { + { USB_DEVICE(0x0df6, 0x0053) }, + { USB_DEVICE(0x0df6, 0x0069) }, + { USB_DEVICE(0x0df6, 0x006f) }, ++ { USB_DEVICE(0x0df6, 0x0078) }, + /* SMC */ + { USB_DEVICE(0x083a, 0xa512) }, + { USB_DEVICE(0x083a, 0xc522) }, +diff --git a/drivers/of/base.c b/drivers/of/base.c +index 7d4c70f859e3..6c18ab2a16f1 100644 +--- a/drivers/of/base.c ++++ b/drivers/of/base.c +@@ -1057,52 +1057,6 @@ int of_property_read_string(struct device_node *np, const char *propname, + EXPORT_SYMBOL_GPL(of_property_read_string); + + /** +- * of_property_read_string_index - Find and read a string from a multiple +- * strings property. +- * @np: device node from which the property value is to be read. +- * @propname: name of the property to be searched. +- * @index: index of the string in the list of strings +- * @out_string: pointer to null terminated return string, modified only if +- * return value is 0. +- * +- * Search for a property in a device tree node and retrieve a null +- * terminated string value (pointer to data, not a copy) in the list of strings +- * contained in that property. +- * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if +- * property does not have a value, and -EILSEQ if the string is not +- * null-terminated within the length of the property data. +- * +- * The out_string pointer is modified only if a valid string can be decoded. +- */ +-int of_property_read_string_index(struct device_node *np, const char *propname, +- int index, const char **output) +-{ +- struct property *prop = of_find_property(np, propname, NULL); +- int i = 0; +- size_t l = 0, total = 0; +- const char *p; +- +- if (!prop) +- return -EINVAL; +- if (!prop->value) +- return -ENODATA; +- if (strnlen(prop->value, prop->length) >= prop->length) +- return -EILSEQ; +- +- p = prop->value; +- +- for (i = 0; total < prop->length; total += l, p += l) { +- l = strlen(p) + 1; +- if (i++ == index) { +- *output = p; +- return 0; +- } +- } +- return -ENODATA; +-} +-EXPORT_SYMBOL_GPL(of_property_read_string_index); +- +-/** + * of_property_match_string() - Find string in a list and return index + * @np: pointer to node containing string list property + * @propname: string list property name +@@ -1128,7 +1082,7 @@ int of_property_match_string(struct device_node *np, const char *propname, + end = p + prop->length; + + for (i = 0; p < end; i++, p += l) { +- l = strlen(p) + 1; ++ l = strnlen(p, end - p) + 1; + if (p + l > end) + return -EILSEQ; + pr_debug("comparing %s with %s\n", string, p); +@@ -1140,39 +1094,41 @@ int of_property_match_string(struct device_node *np, const char *propname, + EXPORT_SYMBOL_GPL(of_property_match_string); + + /** +- * of_property_count_strings - Find and return the number of strings from a +- * multiple strings property. ++ * of_property_read_string_util() - Utility helper for parsing string properties + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. ++ * @out_strs: output array of string pointers. ++ * @sz: number of array elements to read. ++ * @skip: Number of strings to skip over at beginning of list. + * +- * Search for a property in a device tree node and retrieve the number of null +- * terminated string contain in it. Returns the number of strings on +- * success, -EINVAL if the property does not exist, -ENODATA if property +- * does not have a value, and -EILSEQ if the string is not null-terminated +- * within the length of the property data. ++ * Don't call this function directly. It is a utility helper for the ++ * of_property_read_string*() family of functions. + */ +-int of_property_count_strings(struct device_node *np, const char *propname) ++int of_property_read_string_helper(struct device_node *np, const char *propname, ++ const char **out_strs, size_t sz, int skip) + { + struct property *prop = of_find_property(np, propname, NULL); +- int i = 0; +- size_t l = 0, total = 0; +- const char *p; ++ int l = 0, i = 0; ++ const char *p, *end; + + if (!prop) + return -EINVAL; + if (!prop->value) + return -ENODATA; +- if (strnlen(prop->value, prop->length) >= prop->length) +- return -EILSEQ; +- + p = prop->value; ++ end = p + prop->length; + +- for (i = 0; total < prop->length; total += l, p += l, i++) +- l = strlen(p) + 1; +- +- return i; ++ for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) { ++ l = strnlen(p, end - p) + 1; ++ if (p + l > end) ++ return -EILSEQ; ++ if (out_strs && i >= skip) ++ *out_strs++ = p; ++ } ++ i -= skip; ++ return i <= 0 ? -ENODATA : i; + } +-EXPORT_SYMBOL_GPL(of_property_count_strings); ++EXPORT_SYMBOL_GPL(of_property_read_string_helper); + + static int __of_parse_phandle_with_args(const struct device_node *np, + const char *list_name, +diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c +index 0eb5c38b4e07..f5e8dc7a725c 100644 +--- a/drivers/of/selftest.c ++++ b/drivers/of/selftest.c +@@ -126,8 +126,9 @@ static void __init of_selftest_parse_phandle_with_args(void) + selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); + } + +-static void __init of_selftest_property_match_string(void) ++static void __init of_selftest_property_string(void) + { ++ const char *strings[4]; + struct device_node *np; + int rc; + +@@ -145,13 +146,66 @@ static void __init of_selftest_property_match_string(void) + rc = of_property_match_string(np, "phandle-list-names", "third"); + selftest(rc == 2, "third expected:0 got:%i\n", rc); + rc = of_property_match_string(np, "phandle-list-names", "fourth"); +- selftest(rc == -ENODATA, "unmatched string; rc=%i", rc); ++ selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); + rc = of_property_match_string(np, "missing-property", "blah"); +- selftest(rc == -EINVAL, "missing property; rc=%i", rc); ++ selftest(rc == -EINVAL, "missing property; rc=%i\n", rc); + rc = of_property_match_string(np, "empty-property", "blah"); +- selftest(rc == -ENODATA, "empty property; rc=%i", rc); ++ selftest(rc == -ENODATA, "empty property; rc=%i\n", rc); + rc = of_property_match_string(np, "unterminated-string", "blah"); +- selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc); ++ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc); ++ ++ /* of_property_count_strings() tests */ ++ rc = of_property_count_strings(np, "string-property"); ++ selftest(rc == 1, "Incorrect string count; rc=%i\n", rc); ++ rc = of_property_count_strings(np, "phandle-list-names"); ++ selftest(rc == 3, "Incorrect string count; rc=%i\n", rc); ++ rc = of_property_count_strings(np, "unterminated-string"); ++ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc); ++ rc = of_property_count_strings(np, "unterminated-string-list"); ++ selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc); ++ ++ /* of_property_read_string_index() tests */ ++ rc = of_property_read_string_index(np, "string-property", 0, strings); ++ selftest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc); ++ strings[0] = NULL; ++ rc = of_property_read_string_index(np, "string-property", 1, strings); ++ selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc); ++ rc = of_property_read_string_index(np, "phandle-list-names", 0, strings); ++ selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc); ++ rc = of_property_read_string_index(np, "phandle-list-names", 1, strings); ++ selftest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc); ++ rc = of_property_read_string_index(np, "phandle-list-names", 2, strings); ++ selftest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc); ++ strings[0] = NULL; ++ rc = of_property_read_string_index(np, "phandle-list-names", 3, strings); ++ selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc); ++ strings[0] = NULL; ++ rc = of_property_read_string_index(np, "unterminated-string", 0, strings); ++ selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc); ++ rc = of_property_read_string_index(np, "unterminated-string-list", 0, strings); ++ selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc); ++ strings[0] = NULL; ++ rc = of_property_read_string_index(np, "unterminated-string-list", 2, strings); /* should fail */ ++ selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc); ++ strings[1] = NULL; ++ ++ /* of_property_read_string_array() tests */ ++ rc = of_property_read_string_array(np, "string-property", strings, 4); ++ selftest(rc == 1, "Incorrect string count; rc=%i\n", rc); ++ rc = of_property_read_string_array(np, "phandle-list-names", strings, 4); ++ selftest(rc == 3, "Incorrect string count; rc=%i\n", rc); ++ rc = of_property_read_string_array(np, "unterminated-string", strings, 4); ++ selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc); ++ /* -- An incorrectly formed string should cause a failure */ ++ rc = of_property_read_string_array(np, "unterminated-string-list", strings, 4); ++ selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc); ++ /* -- parsing the correctly formed strings should still work: */ ++ strings[2] = NULL; ++ rc = of_property_read_string_array(np, "unterminated-string-list", strings, 2); ++ selftest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc); ++ strings[1] = NULL; ++ rc = of_property_read_string_array(np, "phandle-list-names", strings, 1); ++ selftest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]); + } + + static int __init of_selftest(void) +@@ -167,7 +221,7 @@ static int __init of_selftest(void) + + pr_info("start of selftest - you will see error messages\n"); + of_selftest_parse_phandle_with_args(); +- of_selftest_property_match_string(); ++ of_selftest_property_string(); + pr_info("end of selftest - %s\n", selftest_passed ? "PASS" : "FAIL"); + return 0; + } +diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c +index c9076bdaf2c1..59a8d325a697 100644 +--- a/drivers/platform/x86/acer-wmi.c ++++ b/drivers/platform/x86/acer-wmi.c +@@ -572,6 +572,17 @@ static const struct dmi_system_id video_vendor_dmi_table[] = { + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"), + }, + }, ++ { ++ /* ++ * Note no video_set_backlight_video_vendor, we must use the ++ * acer interface, as there is no native backlight interface. ++ */ ++ .ident = "Acer KAV80", ++ .matches = { ++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"), ++ DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"), ++ }, ++ }, + {} + }; + +diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c +index ce4b96c15eba..85a54b39e3b7 100644 +--- a/drivers/regulator/max77693.c ++++ b/drivers/regulator/max77693.c +@@ -231,7 +231,7 @@ static int max77693_pmic_probe(struct platform_device *pdev) + struct max77693_pmic_dev *max77693_pmic; + struct max77693_regulator_data *rdata = NULL; + int num_rdata, i, ret; +- struct regulator_config config; ++ struct regulator_config config = { }; + + num_rdata = max77693_pmic_init_rdata(&pdev->dev, &rdata); + if (!rdata || num_rdata <= 0) { +diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +index f85b9e5c1f05..80a1f9f40aac 100644 +--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c ++++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c +@@ -740,7 +740,16 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess) + pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id); + + node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id); +- WARN_ON(node && (node != se_nacl)); ++ if (WARN_ON(node && (node != se_nacl))) { ++ /* ++ * The nacl no longer matches what we think it should be. ++ * Most likely a new dynamic acl has been added while ++ * someone dropped the hardware lock. It clearly is a ++ * bug elsewhere, but this bit can't make things worse. ++ */ ++ btree_insert32(&lport->lport_fcport_map, nacl->nport_id, ++ node, GFP_ATOMIC); ++ } + + pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", + se_nacl, nacl->nport_wwnn, nacl->nport_id); +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c +index f1322343d789..5d61ccbd31c2 100644 +--- a/drivers/spi/spi-fsl-dspi.c ++++ b/drivers/spi/spi-fsl-dspi.c +@@ -45,7 +45,7 @@ + + #define SPI_TCR 0x08 + +-#define SPI_CTAR(x) (0x0c + (x * 4)) ++#define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4)) + #define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27) + #define SPI_CTAR_CPOL(x) ((x) << 26) + #define SPI_CTAR_CPHA(x) ((x) << 25) +@@ -69,7 +69,7 @@ + + #define SPI_PUSHR 0x34 + #define SPI_PUSHR_CONT (1 << 31) +-#define SPI_PUSHR_CTAS(x) (((x) & 0x00000007) << 28) ++#define SPI_PUSHR_CTAS(x) (((x) & 0x00000003) << 28) + #define SPI_PUSHR_EOQ (1 << 27) + #define SPI_PUSHR_CTCNT (1 << 26) + #define SPI_PUSHR_PCS(x) (((1 << x) & 0x0000003f) << 16) +diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c +index 9c511a954d21..b1a9ba893fab 100644 +--- a/drivers/spi/spi-pl022.c ++++ b/drivers/spi/spi-pl022.c +@@ -1075,7 +1075,7 @@ err_rxdesc: + pl022->sgt_tx.nents, DMA_TO_DEVICE); + err_tx_sgmap: + dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, +- pl022->sgt_tx.nents, DMA_FROM_DEVICE); ++ pl022->sgt_rx.nents, DMA_FROM_DEVICE); + err_rx_sgmap: + sg_free_table(&pl022->sgt_tx); + err_alloc_tx_sg: +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c +index fa28c75c6d04..5b0e57210066 100644 +--- a/drivers/spi/spi-pxa2xx.c ++++ b/drivers/spi/spi-pxa2xx.c +@@ -1287,7 +1287,9 @@ static int pxa2xx_spi_suspend(struct device *dev) + if (status != 0) + return status; + write_SSCR0(0, drv_data->ioaddr); +- clk_disable_unprepare(ssp->clk); ++ ++ if (!pm_runtime_suspended(dev)) ++ clk_disable_unprepare(ssp->clk); + + return 0; + } +@@ -1301,7 +1303,8 @@ static int pxa2xx_spi_resume(struct device *dev) + pxa2xx_spi_dma_resume(drv_data); + + /* Enable the SSP clock */ +- clk_prepare_enable(ssp->clk); ++ if (!pm_runtime_suspended(dev)) ++ clk_prepare_enable(ssp->clk); + + /* Start the queue running */ + status = spi_master_resume(drv_data->master); +diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c +index 6330af656a0f..bc23d66a7a1e 100644 +--- a/drivers/staging/iio/impedance-analyzer/ad5933.c ++++ b/drivers/staging/iio/impedance-analyzer/ad5933.c +@@ -115,6 +115,7 @@ static const struct iio_chan_spec ad5933_channels[] = { + .channel = 0, + .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), + .address = AD5933_REG_TEMP_DATA, ++ .scan_index = -1, + .scan_type = { + .sign = 's', + .realbits = 14, +@@ -124,9 +125,7 @@ static const struct iio_chan_spec ad5933_channels[] = { + .type = IIO_VOLTAGE, + .indexed = 1, + .channel = 0, +- .extend_name = "real_raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | +- BIT(IIO_CHAN_INFO_SCALE), ++ .extend_name = "real", + .address = AD5933_REG_REAL_DATA, + .scan_index = 0, + .scan_type = { +@@ -138,9 +137,7 @@ static const struct iio_chan_spec ad5933_channels[] = { + .type = IIO_VOLTAGE, + .indexed = 1, + .channel = 0, +- .extend_name = "imag_raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | +- BIT(IIO_CHAN_INFO_SCALE), ++ .extend_name = "imag", + .address = AD5933_REG_IMAG_DATA, + .scan_index = 1, + .scan_type = { +@@ -746,14 +743,14 @@ static int ad5933_probe(struct i2c_client *client, + indio_dev->name = id->name; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = ad5933_channels; +- indio_dev->num_channels = 1; /* only register temp0_input */ ++ indio_dev->num_channels = ARRAY_SIZE(ad5933_channels); + + ret = ad5933_register_ring_funcs_and_init(indio_dev); + if (ret) + goto error_disable_reg; + +- /* skip temp0_input, register in0_(real|imag)_raw */ +- ret = iio_buffer_register(indio_dev, &ad5933_channels[1], 2); ++ ret = iio_buffer_register(indio_dev, ad5933_channels, ++ ARRAY_SIZE(ad5933_channels)); + if (ret) + goto error_unreg_ring; + +diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h +index 07318203a836..e8c98cf57070 100644 +--- a/drivers/staging/iio/meter/ade7758.h ++++ b/drivers/staging/iio/meter/ade7758.h +@@ -119,7 +119,6 @@ struct ade7758_state { + u8 *tx; + u8 *rx; + struct mutex buf_lock; +- const struct iio_chan_spec *ade7758_ring_channels; + struct spi_transfer ring_xfer[4]; + struct spi_message ring_msg; + /* +diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c +index 6005d4aab0c3..6f0886f764c7 100644 +--- a/drivers/staging/iio/meter/ade7758_core.c ++++ b/drivers/staging/iio/meter/ade7758_core.c +@@ -630,9 +630,6 @@ static const struct iio_chan_spec ade7758_channels[] = { + .type = IIO_VOLTAGE, + .indexed = 1, + .channel = 0, +- .extend_name = "raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE), + .scan_index = 0, + .scan_type = { +@@ -644,9 +641,6 @@ static const struct iio_chan_spec ade7758_channels[] = { + .type = IIO_CURRENT, + .indexed = 1, + .channel = 0, +- .extend_name = "raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT), + .scan_index = 1, + .scan_type = { +@@ -658,9 +652,7 @@ static const struct iio_chan_spec ade7758_channels[] = { + .type = IIO_POWER, + .indexed = 1, + .channel = 0, +- .extend_name = "apparent_raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), ++ .extend_name = "apparent", + .address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR), + .scan_index = 2, + .scan_type = { +@@ -672,9 +664,7 @@ static const struct iio_chan_spec ade7758_channels[] = { + .type = IIO_POWER, + .indexed = 1, + .channel = 0, +- .extend_name = "active_raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), ++ .extend_name = "active", + .address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR), + .scan_index = 3, + .scan_type = { +@@ -686,9 +676,7 @@ static const struct iio_chan_spec ade7758_channels[] = { + .type = IIO_POWER, + .indexed = 1, + .channel = 0, +- .extend_name = "reactive_raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), ++ .extend_name = "reactive", + .address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR), + .scan_index = 4, + .scan_type = { +@@ -700,9 +688,6 @@ static const struct iio_chan_spec ade7758_channels[] = { + .type = IIO_VOLTAGE, + .indexed = 1, + .channel = 1, +- .extend_name = "raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE), + .scan_index = 5, + .scan_type = { +@@ -714,9 +699,6 @@ static const struct iio_chan_spec ade7758_channels[] = { + .type = IIO_CURRENT, + .indexed = 1, + .channel = 1, +- .extend_name = "raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT), + .scan_index = 6, + .scan_type = { +@@ -728,9 +710,7 @@ static const struct iio_chan_spec ade7758_channels[] = { + .type = IIO_POWER, + .indexed = 1, + .channel = 1, +- .extend_name = "apparent_raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), ++ .extend_name = "apparent", + .address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR), + .scan_index = 7, + .scan_type = { +@@ -742,9 +722,7 @@ static const struct iio_chan_spec ade7758_channels[] = { + .type = IIO_POWER, + .indexed = 1, + .channel = 1, +- .extend_name = "active_raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), ++ .extend_name = "active", + .address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR), + .scan_index = 8, + .scan_type = { +@@ -756,9 +734,7 @@ static const struct iio_chan_spec ade7758_channels[] = { + .type = IIO_POWER, + .indexed = 1, + .channel = 1, +- .extend_name = "reactive_raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), ++ .extend_name = "reactive", + .address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR), + .scan_index = 9, + .scan_type = { +@@ -770,9 +746,6 @@ static const struct iio_chan_spec ade7758_channels[] = { + .type = IIO_VOLTAGE, + .indexed = 1, + .channel = 2, +- .extend_name = "raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE), + .scan_index = 10, + .scan_type = { +@@ -784,9 +757,6 @@ static const struct iio_chan_spec ade7758_channels[] = { + .type = IIO_CURRENT, + .indexed = 1, + .channel = 2, +- .extend_name = "raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), + .address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT), + .scan_index = 11, + .scan_type = { +@@ -798,9 +768,7 @@ static const struct iio_chan_spec ade7758_channels[] = { + .type = IIO_POWER, + .indexed = 1, + .channel = 2, +- .extend_name = "apparent_raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), ++ .extend_name = "apparent", + .address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR), + .scan_index = 12, + .scan_type = { +@@ -812,9 +780,7 @@ static const struct iio_chan_spec ade7758_channels[] = { + .type = IIO_POWER, + .indexed = 1, + .channel = 2, +- .extend_name = "active_raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), ++ .extend_name = "active", + .address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR), + .scan_index = 13, + .scan_type = { +@@ -826,9 +792,7 @@ static const struct iio_chan_spec ade7758_channels[] = { + .type = IIO_POWER, + .indexed = 1, + .channel = 2, +- .extend_name = "reactive_raw", +- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), +- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), ++ .extend_name = "reactive", + .address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR), + .scan_index = 14, + .scan_type = { +@@ -872,13 +836,14 @@ static int ade7758_probe(struct spi_device *spi) + goto error_free_rx; + } + st->us = spi; +- st->ade7758_ring_channels = &ade7758_channels[0]; + mutex_init(&st->buf_lock); + + indio_dev->name = spi->dev.driver->name; + indio_dev->dev.parent = &spi->dev; + indio_dev->info = &ade7758_info; + indio_dev->modes = INDIO_DIRECT_MODE; ++ indio_dev->channels = ade7758_channels; ++ indio_dev->num_channels = ARRAY_SIZE(ade7758_channels); + + ret = ade7758_configure_ring(indio_dev); + if (ret) +diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c +index 7d5db7175578..46eb15d25d4b 100644 +--- a/drivers/staging/iio/meter/ade7758_ring.c ++++ b/drivers/staging/iio/meter/ade7758_ring.c +@@ -89,11 +89,10 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p) + **/ + static int ade7758_ring_preenable(struct iio_dev *indio_dev) + { +- struct ade7758_state *st = iio_priv(indio_dev); + unsigned channel; + int ret; + +- if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) ++ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) + return -EINVAL; + + ret = iio_sw_buffer_preenable(indio_dev); +@@ -104,7 +103,7 @@ static int ade7758_ring_preenable(struct iio_dev *indio_dev) + indio_dev->masklength); + + ade7758_write_waveform_type(&indio_dev->dev, +- st->ade7758_ring_channels[channel].address); ++ indio_dev->channels[channel].address); + + return 0; + } +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c +index c7a3c5e2b1b3..a3ce91234b77 100644 +--- a/drivers/target/target_core_device.c ++++ b/drivers/target/target_core_device.c +@@ -1322,7 +1322,8 @@ int core_dev_add_initiator_node_lun_acl( + * Check to see if there are any existing persistent reservation APTPL + * pre-registrations that need to be enabled for this LUN ACL.. + */ +- core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); ++ core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl, ++ lacl->mapped_lun); + return 0; + } + +diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c +index dfe3db7942ea..a1e1ecdab86c 100644 +--- a/drivers/target/target_core_pr.c ++++ b/drivers/target/target_core_pr.c +@@ -944,10 +944,10 @@ int core_scsi3_check_aptpl_registration( + struct se_device *dev, + struct se_portal_group *tpg, + struct se_lun *lun, +- struct se_lun_acl *lun_acl) ++ struct se_node_acl *nacl, ++ u32 mapped_lun) + { +- struct se_node_acl *nacl = lun_acl->se_lun_nacl; +- struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun]; ++ struct se_dev_entry *deve = nacl->device_list[mapped_lun]; + + if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) + return 0; +diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h +index ed75cdd32cb0..14a0a2e6f9c4 100644 +--- a/drivers/target/target_core_pr.h ++++ b/drivers/target/target_core_pr.h +@@ -55,7 +55,7 @@ extern int core_scsi3_alloc_aptpl_registration( + unsigned char *, u16, u32, int, int, u8); + extern int core_scsi3_check_aptpl_registration(struct se_device *, + struct se_portal_group *, struct se_lun *, +- struct se_lun_acl *); ++ struct se_node_acl *, u32); + extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *, + struct se_node_acl *); + extern void core_scsi3_free_all_registrations(struct se_device *); +diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c +index b9a6ec0aa5fe..d72583597427 100644 +--- a/drivers/target/target_core_tpg.c ++++ b/drivers/target/target_core_tpg.c +@@ -40,6 +40,7 @@ + #include <target/target_core_fabric.h> + + #include "target_core_internal.h" ++#include "target_core_pr.h" + + extern struct se_device *g_lun0_dev; + +@@ -165,6 +166,13 @@ void core_tpg_add_node_to_devs( + + core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, + lun_access, acl, tpg); ++ /* ++ * Check to see if there are any existing persistent reservation ++ * APTPL pre-registrations that need to be enabled for this dynamic ++ * LUN ACL now.. ++ */ ++ core_scsi3_check_aptpl_registration(dev, tpg, lun, acl, ++ lun->unpacked_lun); + spin_lock(&tpg->tpg_lun_lock); + } + spin_unlock(&tpg->tpg_lun_lock); +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index 334c3364837d..b37371ee9f95 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -1856,8 +1856,7 @@ static void transport_complete_qf(struct se_cmd *cmd) + if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { + trace_target_cmd_complete(cmd); + ret = cmd->se_tfo->queue_status(cmd); +- if (ret) +- goto out; ++ goto out; + } + + switch (cmd->data_direction) { +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c +index b5180c10f71d..6015b6c7708f 100644 +--- a/drivers/tty/serial/serial_core.c ++++ b/drivers/tty/serial/serial_core.c +@@ -353,7 +353,7 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios, + * The spd_hi, spd_vhi, spd_shi, spd_warp kludge... + * Die! Die! Die! + */ +- if (baud == 38400) ++ if (try == 0 && baud == 38400) + baud = altbaud; + + /* +diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c +index d3448a90f0f9..25d07412e08e 100644 +--- a/drivers/tty/tty_io.c ++++ b/drivers/tty/tty_io.c +@@ -1701,6 +1701,7 @@ int tty_release(struct inode *inode, struct file *filp) + int pty_master, tty_closing, o_tty_closing, do_sleep; + int idx; + char buf[64]; ++ long timeout = 0; + + if (tty_paranoia_check(tty, inode, __func__)) + return 0; +@@ -1785,7 +1786,11 @@ int tty_release(struct inode *inode, struct file *filp) + __func__, tty_name(tty, buf)); + tty_unlock_pair(tty, o_tty); + mutex_unlock(&tty_mutex); +- schedule(); ++ schedule_timeout_killable(timeout); ++ if (timeout < 120 * HZ) ++ timeout = 2 * timeout + 1; ++ else ++ timeout = MAX_SCHEDULE_TIMEOUT; + } + + /* +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index 669836ae53e0..99fd026161d5 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -875,11 +875,12 @@ static void acm_tty_set_termios(struct tty_struct *tty, + /* FIXME: Needs to clear unsupported bits in the termios */ + acm->clocal = ((termios->c_cflag & CLOCAL) != 0); + +- if (!newline.dwDTERate) { ++ if (C_BAUD(tty) == B0) { + newline.dwDTERate = acm->line.dwDTERate; + newctrl &= ~ACM_CTRL_DTR; +- } else ++ } else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) { + newctrl |= ACM_CTRL_DTR; ++ } + + if (newctrl != acm->ctrlout) + acm_set_control(acm, acm->ctrlout = newctrl); +@@ -1580,6 +1581,7 @@ static const struct usb_device_id acm_ids[] = { + { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */ + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ + }, ++ { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */ + { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */ + }, + /* Motorola H24 HSPA module: */ +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c +index d6a8d23f047b..830063cb4343 100644 +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -2053,6 +2053,8 @@ int usb_alloc_streams(struct usb_interface *interface, + return -EINVAL; + if (dev->speed != USB_SPEED_SUPER) + return -EINVAL; ++ if (dev->state < USB_STATE_CONFIGURED) ++ return -ENODEV; + + /* Streams only apply to bulk endpoints. */ + for (i = 0; i < num_eps; i++) +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 48d3eed8e250..420bf9bb09e5 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -4347,6 +4347,9 @@ check_highspeed (struct usb_hub *hub, struct usb_device *udev, int port1) + struct usb_qualifier_descriptor *qual; + int status; + ++ if (udev->quirks & USB_QUIRK_DEVICE_QUALIFIER) ++ return; ++ + qual = kmalloc (sizeof *qual, GFP_KERNEL); + if (qual == NULL) + return; +diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c +index 280ff96a3945..0d088e9d4ded 100644 +--- a/drivers/usb/core/quirks.c ++++ b/drivers/usb/core/quirks.c +@@ -92,6 +92,16 @@ static const struct usb_device_id usb_quirk_list[] = { + { USB_DEVICE(0x04e8, 0x6601), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + ++ /* Elan Touchscreen */ ++ { USB_DEVICE(0x04f3, 0x0089), .driver_info = ++ USB_QUIRK_DEVICE_QUALIFIER }, ++ ++ { USB_DEVICE(0x04f3, 0x009b), .driver_info = ++ USB_QUIRK_DEVICE_QUALIFIER }, ++ ++ { USB_DEVICE(0x04f3, 0x016f), .driver_info = ++ USB_QUIRK_DEVICE_QUALIFIER }, ++ + /* Roland SC-8820 */ + { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, + +diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c +index 056da977ebdf..4a1922cafc8e 100644 +--- a/drivers/usb/dwc3/ep0.c ++++ b/drivers/usb/dwc3/ep0.c +@@ -251,7 +251,7 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc) + + /* stall is always issued on EP0 */ + dep = dwc->eps[0]; +- __dwc3_gadget_ep_set_halt(dep, 1); ++ __dwc3_gadget_ep_set_halt(dep, 1, false); + dep->flags = DWC3_EP_ENABLED; + dwc->delayed_status = false; + +@@ -461,7 +461,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc, + return -EINVAL; + if (set == 0 && (dep->flags & DWC3_EP_WEDGE)) + break; +- ret = __dwc3_gadget_ep_set_halt(dep, set); ++ ret = __dwc3_gadget_ep_set_halt(dep, set, true); + if (ret) + return -EINVAL; + break; +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index c37da0c9a076..be149b82564f 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -532,12 +532,11 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, + if (!usb_endpoint_xfer_isoc(desc)) + return 0; + +- memset(&trb_link, 0, sizeof(trb_link)); +- + /* Link TRB for ISOC. The HWO bit is never reset */ + trb_st_hw = &dep->trb_pool[0]; + + trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; ++ memset(trb_link, 0, sizeof(*trb_link)); + + trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); + trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); +@@ -588,7 +587,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) + + /* make sure HW endpoint isn't stalled */ + if (dep->flags & DWC3_EP_STALL) +- __dwc3_gadget_ep_set_halt(dep, 0); ++ __dwc3_gadget_ep_set_halt(dep, 0, false); + + reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); + reg &= ~DWC3_DALEPENA_EP(dep->number); +@@ -1186,7 +1185,7 @@ out0: + return ret; + } + +-int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value) ++int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) + { + struct dwc3_gadget_ep_cmd_params params; + struct dwc3 *dwc = dep->dwc; +@@ -1195,6 +1194,14 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value) + memset(¶ms, 0x00, sizeof(params)); + + if (value) { ++ if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) || ++ (!list_empty(&dep->req_queued) || ++ !list_empty(&dep->request_list)))) { ++ dev_dbg(dwc->dev, "%s: pending request, cannot halt\n", ++ dep->name); ++ return -EAGAIN; ++ } ++ + ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, + DWC3_DEPCMD_SETSTALL, ¶ms); + if (ret) +@@ -1234,7 +1241,7 @@ static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value) + goto out; + } + +- ret = __dwc3_gadget_ep_set_halt(dep, value); ++ ret = __dwc3_gadget_ep_set_halt(dep, value, false); + out: + spin_unlock_irqrestore(&dwc->lock, flags); + +@@ -1254,7 +1261,7 @@ static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep) + if (dep->number == 0 || dep->number == 1) + return dwc3_gadget_ep0_set_halt(ep, 1); + else +- return dwc3_gadget_ep_set_halt(ep, 1); ++ return __dwc3_gadget_ep_set_halt(dep, 1, false); + } + + /* -------------------------------------------------------------------------- */ +diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h +index a0ee75b68a80..ac62558231be 100644 +--- a/drivers/usb/dwc3/gadget.h ++++ b/drivers/usb/dwc3/gadget.h +@@ -85,7 +85,7 @@ void dwc3_ep0_out_start(struct dwc3 *dwc); + int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value); + int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request, + gfp_t gfp_flags); +-int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value); ++int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol); + + /** + * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW +diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c +index ab1065afbbd0..3384486c2884 100644 +--- a/drivers/usb/gadget/f_acm.c ++++ b/drivers/usb/gadget/f_acm.c +@@ -430,11 +430,12 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) + if (acm->notify->driver_data) { + VDBG(cdev, "reset acm control interface %d\n", intf); + usb_ep_disable(acm->notify); +- } else { +- VDBG(cdev, "init acm ctrl interface %d\n", intf); ++ } ++ ++ if (!acm->notify->desc) + if (config_ep_by_speed(cdev->gadget, f, acm->notify)) + return -EINVAL; +- } ++ + usb_ep_enable(acm->notify); + acm->notify->driver_data = acm; + +diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c +index 59891b1c48fc..a4aa92376271 100644 +--- a/drivers/usb/gadget/udc-core.c ++++ b/drivers/usb/gadget/udc-core.c +@@ -455,6 +455,11 @@ static ssize_t usb_udc_softconn_store(struct device *dev, + { + struct usb_udc *udc = container_of(dev, struct usb_udc, dev); + ++ if (!udc->driver) { ++ dev_err(dev, "soft-connect without a gadget driver\n"); ++ return -EOPNOTSUPP; ++ } ++ + if (sysfs_streq(buf, "connect")) { + usb_gadget_udc_start(udc->gadget, udc->driver); + usb_gadget_connect(udc->gadget); +diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c +index bd6cc0bea150..5c91ff379345 100644 +--- a/drivers/usb/musb/musb_cppi41.c ++++ b/drivers/usb/musb/musb_cppi41.c +@@ -190,7 +190,8 @@ static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer) + } + } + +- if (!list_empty(&controller->early_tx_list)) { ++ if (!list_empty(&controller->early_tx_list) && ++ !hrtimer_is_queued(&controller->early_tx)) { + ret = HRTIMER_RESTART; + hrtimer_forward_now(&controller->early_tx, + ktime_set(0, 50 * NSEC_PER_USEC)); +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 95b32d0e2b26..34b16b226f74 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -155,6 +155,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ + { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ ++ { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */ + { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ + { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */ + { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index 87d816bbf9e0..74c20472c25b 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -146,6 +146,7 @@ static struct ftdi_sio_quirk ftdi_8u2232c_quirk = { + * /sys/bus/usb-serial/drivers/ftdi_sio/new_id and send a patch or report. + */ + static struct usb_device_id id_table_combined [] = { ++ { USB_DEVICE(FTDI_VID, FTDI_BRICK_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, +@@ -675,6 +676,8 @@ static struct usb_device_id id_table_combined [] = { + { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, + { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, + { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, ++ { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) }, ++ { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, + { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index 5937b2d242f2..6786b705ccf6 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -30,6 +30,12 @@ + + /*** third-party PIDs (using FTDI_VID) ***/ + ++/* ++ * Certain versions of the official Windows FTDI driver reprogrammed ++ * counterfeit FTDI devices to PID 0. Support these devices anyway. ++ */ ++#define FTDI_BRICK_PID 0x0000 ++ + #define FTDI_LUMEL_PD12_PID 0x6002 + + /* +@@ -143,8 +149,12 @@ + * Xsens Technologies BV products (http://www.xsens.com). + */ + #define XSENS_VID 0x2639 +-#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ ++#define XSENS_AWINDA_STATION_PID 0x0101 ++#define XSENS_AWINDA_DONGLE_PID 0x0102 + #define XSENS_MTW_PID 0x0200 /* Xsens MTw */ ++#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ ++ ++/* Xsens devices using FTDI VID */ + #define XSENS_CONVERTER_0_PID 0xD388 /* Xsens USB converter */ + #define XSENS_CONVERTER_1_PID 0xD389 /* Xsens Wireless Receiver */ + #define XSENS_CONVERTER_2_PID 0xD38A +diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c +index 78b48c31abf5..efa75b4e51f2 100644 +--- a/drivers/usb/serial/kobil_sct.c ++++ b/drivers/usb/serial/kobil_sct.c +@@ -336,7 +336,8 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port, + port->interrupt_out_urb->transfer_buffer_length = length; + + priv->cur_pos = priv->cur_pos + length; +- result = usb_submit_urb(port->interrupt_out_urb, GFP_NOIO); ++ result = usb_submit_urb(port->interrupt_out_urb, ++ GFP_ATOMIC); + dev_dbg(&port->dev, "%s - Send write URB returns: %i\n", __func__, result); + todo = priv->filled - priv->cur_pos; + +@@ -351,7 +352,7 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port, + if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID || + priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) { + result = usb_submit_urb(port->interrupt_in_urb, +- GFP_NOIO); ++ GFP_ATOMIC); + dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result); + } + } +diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c +index cbe779f578f9..df495ea0d977 100644 +--- a/drivers/usb/serial/opticon.c ++++ b/drivers/usb/serial/opticon.c +@@ -219,7 +219,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port, + + /* The conncected devices do not have a bulk write endpoint, + * to transmit data to de barcode device the control endpoint is used */ +- dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO); ++ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC); + if (!dr) { + dev_err(&port->dev, "out of memory\n"); + count = -ENOMEM; +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index e47aabe0c760..8b3484134ab0 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -269,6 +269,7 @@ static void option_instat_callback(struct urb *urb); + #define TELIT_PRODUCT_DE910_DUAL 0x1010 + #define TELIT_PRODUCT_UE910_V2 0x1012 + #define TELIT_PRODUCT_LE920 0x1200 ++#define TELIT_PRODUCT_LE910 0x1201 + + /* ZTE PRODUCTS */ + #define ZTE_VENDOR_ID 0x19d2 +@@ -361,6 +362,7 @@ static void option_instat_callback(struct urb *urb); + + /* Haier products */ + #define HAIER_VENDOR_ID 0x201e ++#define HAIER_PRODUCT_CE81B 0x10f8 + #define HAIER_PRODUCT_CE100 0x2009 + + /* Cinterion (formerly Siemens) products */ +@@ -588,6 +590,11 @@ static const struct option_blacklist_info zte_1255_blacklist = { + .reserved = BIT(3) | BIT(4), + }; + ++static const struct option_blacklist_info telit_le910_blacklist = { ++ .sendsetup = BIT(0), ++ .reserved = BIT(1) | BIT(2), ++}; ++ + static const struct option_blacklist_info telit_le920_blacklist = { + .sendsetup = BIT(0), + .reserved = BIT(1) | BIT(5), +@@ -1137,6 +1144,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, ++ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), ++ .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), + .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ +@@ -1612,6 +1621,7 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, + { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, + { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(HAIER_VENDOR_ID, HAIER_PRODUCT_CE81B, 0xff, 0xff, 0xff) }, + /* Pirelli */ + { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1, 0xff) }, + { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2, 0xff) }, +diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c +index 22c7d4360fa2..b1d815eb6d0b 100644 +--- a/drivers/usb/storage/transport.c ++++ b/drivers/usb/storage/transport.c +@@ -1118,6 +1118,31 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us) + */ + if (result == USB_STOR_XFER_LONG) + fake_sense = 1; ++ ++ /* ++ * Sometimes a device will mistakenly skip the data phase ++ * and go directly to the status phase without sending a ++ * zero-length packet. If we get a 13-byte response here, ++ * check whether it really is a CSW. ++ */ ++ if (result == USB_STOR_XFER_SHORT && ++ srb->sc_data_direction == DMA_FROM_DEVICE && ++ transfer_length - scsi_get_resid(srb) == ++ US_BULK_CS_WRAP_LEN) { ++ struct scatterlist *sg = NULL; ++ unsigned int offset = 0; ++ ++ if (usb_stor_access_xfer_buf((unsigned char *) bcs, ++ US_BULK_CS_WRAP_LEN, srb, &sg, ++ &offset, FROM_XFER_BUF) == ++ US_BULK_CS_WRAP_LEN && ++ bcs->Signature == ++ cpu_to_le32(US_BULK_CS_SIGN)) { ++ usb_stor_dbg(us, "Device skipped data phase\n"); ++ scsi_set_resid(srb, transfer_length); ++ goto skipped_data_phase; ++ } ++ } + } + + /* See flow chart on pg 15 of the Bulk Only Transport spec for +@@ -1153,6 +1178,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us) + if (result != USB_STOR_XFER_GOOD) + return USB_STOR_TRANSPORT_ERROR; + ++ skipped_data_phase: + /* check bulk status */ + residue = le32_to_cpu(bcs->Residue); + usb_stor_dbg(us, "Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n", +diff --git a/drivers/video/console/bitblit.c b/drivers/video/console/bitblit.c +index 61b182bf32a2..dbfe4eecf12e 100644 +--- a/drivers/video/console/bitblit.c ++++ b/drivers/video/console/bitblit.c +@@ -205,7 +205,6 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info, + static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, + int bottom_only) + { +- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; + unsigned int cw = vc->vc_font.width; + unsigned int ch = vc->vc_font.height; + unsigned int rw = info->var.xres - (vc->vc_cols*cw); +@@ -214,7 +213,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info, + unsigned int bs = info->var.yres - bh; + struct fb_fillrect region; + +- region.color = attr_bgcol_ec(bgshift, vc, info); ++ region.color = 0; + region.rop = ROP_COPY; + + if (rw && !bottom_only) { +diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/console/fbcon_ccw.c +index 41b32ae23dac..5a3cbf6dff4d 100644 +--- a/drivers/video/console/fbcon_ccw.c ++++ b/drivers/video/console/fbcon_ccw.c +@@ -197,9 +197,8 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info, + unsigned int bh = info->var.xres - (vc->vc_rows*ch); + unsigned int bs = vc->vc_rows*ch; + struct fb_fillrect region; +- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; + +- region.color = attr_bgcol_ec(bgshift,vc,info); ++ region.color = 0; + region.rop = ROP_COPY; + + if (rw && !bottom_only) { +diff --git a/drivers/video/console/fbcon_cw.c b/drivers/video/console/fbcon_cw.c +index a93670ef7f89..e7ee44db4e98 100644 +--- a/drivers/video/console/fbcon_cw.c ++++ b/drivers/video/console/fbcon_cw.c +@@ -180,9 +180,8 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info, + unsigned int bh = info->var.xres - (vc->vc_rows*ch); + unsigned int rs = info->var.yres - rw; + struct fb_fillrect region; +- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; + +- region.color = attr_bgcol_ec(bgshift,vc,info); ++ region.color = 0; + region.rop = ROP_COPY; + + if (rw && !bottom_only) { +diff --git a/drivers/video/console/fbcon_ud.c b/drivers/video/console/fbcon_ud.c +index ff0872c0498b..19e3714abfe8 100644 +--- a/drivers/video/console/fbcon_ud.c ++++ b/drivers/video/console/fbcon_ud.c +@@ -227,9 +227,8 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, + unsigned int rw = info->var.xres - (vc->vc_cols*cw); + unsigned int bh = info->var.yres - (vc->vc_rows*ch); + struct fb_fillrect region; +- int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; + +- region.color = attr_bgcol_ec(bgshift,vc,info); ++ region.color = 0; + region.rop = ROP_COPY; + + if (rw && !bottom_only) { +diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c +index 98917fc872a4..e2ccc0f8e40a 100644 +--- a/drivers/virtio/virtio_pci.c ++++ b/drivers/virtio/virtio_pci.c +@@ -792,6 +792,7 @@ static int virtio_pci_restore(struct device *dev) + struct pci_dev *pci_dev = to_pci_dev(dev); + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + struct virtio_driver *drv; ++ unsigned status = 0; + int ret; + + drv = container_of(vp_dev->vdev.dev.driver, +@@ -802,14 +803,40 @@ static int virtio_pci_restore(struct device *dev) + return ret; + + pci_set_master(pci_dev); ++ /* We always start by resetting the device, in case a previous ++ * driver messed it up. */ ++ vp_reset(&vp_dev->vdev); ++ ++ /* Acknowledge that we've seen the device. */ ++ status |= VIRTIO_CONFIG_S_ACKNOWLEDGE; ++ vp_set_status(&vp_dev->vdev, status); ++ ++ /* Maybe driver failed before freeze. ++ * Restore the failed status, for debugging. */ ++ status |= vp_dev->saved_status & VIRTIO_CONFIG_S_FAILED; ++ vp_set_status(&vp_dev->vdev, status); ++ ++ if (!drv) ++ return 0; ++ ++ /* We have a driver! */ ++ status |= VIRTIO_CONFIG_S_DRIVER; ++ vp_set_status(&vp_dev->vdev, status); ++ + vp_finalize_features(&vp_dev->vdev); + +- if (drv && drv->restore) ++ if (drv->restore) { + ret = drv->restore(&vp_dev->vdev); ++ if (ret) { ++ status |= VIRTIO_CONFIG_S_FAILED; ++ vp_set_status(&vp_dev->vdev, status); ++ return ret; ++ } ++ } + + /* Finally, tell the device we're all set */ +- if (!ret) +- vp_set_status(&vp_dev->vdev, vp_dev->saved_status); ++ status |= VIRTIO_CONFIG_S_DRIVER_OK; ++ vp_set_status(&vp_dev->vdev, status); + + return ret; + } +diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c +index d4731e9808ea..ced6aa4ca3c3 100644 +--- a/fs/btrfs/file-item.c ++++ b/fs/btrfs/file-item.c +@@ -420,7 +420,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, + ret = 0; + fail: + while (ret < 0 && !list_empty(&tmplist)) { +- sums = list_entry(&tmplist, struct btrfs_ordered_sum, list); ++ sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list); + list_del(&sums->list); + kfree(sums); + } +diff --git a/fs/buffer.c b/fs/buffer.c +index fe2182ec812d..333adbc7ed5a 100644 +--- a/fs/buffer.c ++++ b/fs/buffer.c +@@ -2089,6 +2089,7 @@ int generic_write_end(struct file *file, struct address_space *mapping, + struct page *page, void *fsdata) + { + struct inode *inode = mapping->host; ++ loff_t old_size = inode->i_size; + int i_size_changed = 0; + + copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); +@@ -2108,6 +2109,8 @@ int generic_write_end(struct file *file, struct address_space *mapping, + unlock_page(page); + page_cache_release(page); + ++ if (old_size < pos) ++ pagecache_isize_extended(inode, old_size, pos); + /* + * Don't mark the inode dirty under page lock. First, it unnecessarily + * makes the holding time of page lock longer. Second, it forces lock +@@ -2325,6 +2328,11 @@ static int cont_expand_zero(struct file *file, struct address_space *mapping, + err = 0; + + balance_dirty_pages_ratelimited(mapping); ++ ++ if (unlikely(fatal_signal_pending(current))) { ++ err = -EINTR; ++ goto out; ++ } + } + + /* page covers the boundary, find the boundary offset */ +diff --git a/fs/dcache.c b/fs/dcache.c +index e15f90c0e96a..d449b1aed5ad 100644 +--- a/fs/dcache.c ++++ b/fs/dcache.c +@@ -2736,6 +2736,9 @@ static int prepend(char **buffer, int *buflen, const char *str, int namelen) + * the beginning of the name. The sequence number check at the caller will + * retry it again when a d_move() does happen. So any garbage in the buffer + * due to mismatched pointer and length will be discarded. ++ * ++ * Data dependency barrier is needed to make sure that we see that terminating ++ * NUL. Alpha strikes again, film at 11... + */ + static int prepend_name(char **buffer, int *buflen, struct qstr *name) + { +@@ -2743,6 +2746,8 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name) + u32 dlen = ACCESS_ONCE(name->len); + char *p; + ++ smp_read_barrier_depends(); ++ + *buflen -= dlen + 1; + if (*buflen < 0) + return -ENAMETOOLONG; +diff --git a/fs/ext3/super.c b/fs/ext3/super.c +index c50c76190373..03fd6bce713b 100644 +--- a/fs/ext3/super.c ++++ b/fs/ext3/super.c +@@ -1354,13 +1354,6 @@ set_qf_format: + "not specified."); + return 0; + } +- } else { +- if (sbi->s_jquota_fmt) { +- ext3_msg(sb, KERN_ERR, "error: journaled quota format " +- "specified with no journaling " +- "enabled."); +- return 0; +- } + } + #endif + return 1; +diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c +index 3285aa5a706a..b610779a958c 100644 +--- a/fs/ext4/bitmap.c ++++ b/fs/ext4/bitmap.c +@@ -24,8 +24,7 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, + __u32 provided, calculated; + struct ext4_sb_info *sbi = EXT4_SB(sb); + +- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (!ext4_has_metadata_csum(sb)) + return 1; + + provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo); +@@ -46,8 +45,7 @@ void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group, + __u32 csum; + struct ext4_sb_info *sbi = EXT4_SB(sb); + +- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (!ext4_has_metadata_csum(sb)) + return; + + csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); +@@ -65,8 +63,7 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, + struct ext4_sb_info *sbi = EXT4_SB(sb); + int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8; + +- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (!ext4_has_metadata_csum(sb)) + return 1; + + provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo); +@@ -91,8 +88,7 @@ void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group, + __u32 csum; + struct ext4_sb_info *sbi = EXT4_SB(sb); + +- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (!ext4_has_metadata_csum(sb)) + return; + + csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index 54d94db2cf03..29c4e30bf4ca 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -2093,6 +2093,7 @@ int do_journal_get_write_access(handle_t *handle, + #define CONVERT_INLINE_DATA 2 + + extern struct inode *ext4_iget(struct super_block *, unsigned long); ++extern struct inode *ext4_iget_normal(struct super_block *, unsigned long); + extern int ext4_write_inode(struct inode *, struct writeback_control *); + extern int ext4_setattr(struct dentry *, struct iattr *); + extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, +@@ -2323,10 +2324,18 @@ extern int ext4_register_li_request(struct super_block *sb, + static inline int ext4_has_group_desc_csum(struct super_block *sb) + { + return EXT4_HAS_RO_COMPAT_FEATURE(sb, +- EXT4_FEATURE_RO_COMPAT_GDT_CSUM | +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM); ++ EXT4_FEATURE_RO_COMPAT_GDT_CSUM) || ++ (EXT4_SB(sb)->s_chksum_driver != NULL); + } + ++static inline int ext4_has_metadata_csum(struct super_block *sb) ++{ ++ WARN_ON_ONCE(EXT4_HAS_RO_COMPAT_FEATURE(sb, ++ EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && ++ !EXT4_SB(sb)->s_chksum_driver); ++ ++ return (EXT4_SB(sb)->s_chksum_driver != NULL); ++} + static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es) + { + return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) | +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index 8dd96591b2f8..33a676515df0 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -74,8 +74,7 @@ static int ext4_extent_block_csum_verify(struct inode *inode, + { + struct ext4_extent_tail *et; + +- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (!ext4_has_metadata_csum(inode->i_sb)) + return 1; + + et = find_ext4_extent_tail(eh); +@@ -89,8 +88,7 @@ static void ext4_extent_block_csum_set(struct inode *inode, + { + struct ext4_extent_tail *et; + +- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (!ext4_has_metadata_csum(inode->i_sb)) + return; + + et = find_ext4_extent_tail(eh); +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c +index 5b5971c20af1..fbc6df7b895d 100644 +--- a/fs/ext4/ialloc.c ++++ b/fs/ext4/ialloc.c +@@ -864,6 +864,10 @@ got: + struct buffer_head *block_bitmap_bh; + + block_bitmap_bh = ext4_read_block_bitmap(sb, group); ++ if (!block_bitmap_bh) { ++ err = -EIO; ++ goto out; ++ } + BUFFER_TRACE(block_bitmap_bh, "get block bitmap access"); + err = ext4_journal_get_write_access(handle, block_bitmap_bh); + if (err) { +@@ -988,8 +992,7 @@ got: + spin_unlock(&sbi->s_next_gen_lock); + + /* Precompute checksum seed for inode metadata */ +- if (EXT4_HAS_RO_COMPAT_FEATURE(sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { ++ if (ext4_has_metadata_csum(sb)) { + __u32 csum; + __le32 inum = cpu_to_le32(inode->i_ino); + __le32 gen = cpu_to_le32(inode->i_generation); +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c +index 46b366897553..b7e491056f9c 100644 +--- a/fs/ext4/inline.c ++++ b/fs/ext4/inline.c +@@ -1126,8 +1126,7 @@ static int ext4_finish_convert_inline_dir(handle_t *handle, + memcpy((void *)de, buf + EXT4_INLINE_DOTDOT_SIZE, + inline_size - EXT4_INLINE_DOTDOT_SIZE); + +- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (ext4_has_metadata_csum(inode->i_sb)) + csum_size = sizeof(struct ext4_dir_entry_tail); + + inode->i_size = inode->i_sb->s_blocksize; +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index d65a6260ad61..a58a796bb92b 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -83,8 +83,7 @@ static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw, + + if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != + cpu_to_le32(EXT4_OS_LINUX) || +- !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ !ext4_has_metadata_csum(inode->i_sb)) + return 1; + + provided = le16_to_cpu(raw->i_checksum_lo); +@@ -105,8 +104,7 @@ static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw, + + if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != + cpu_to_le32(EXT4_OS_LINUX) || +- !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ !ext4_has_metadata_csum(inode->i_sb)) + return; + + csum = ext4_inode_csum(inode, raw, ei); +@@ -2629,6 +2627,20 @@ static int ext4_nonda_switch(struct super_block *sb) + return 0; + } + ++/* We always reserve for an inode update; the superblock could be there too */ ++static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len) ++{ ++ if (likely(EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, ++ EXT4_FEATURE_RO_COMPAT_LARGE_FILE))) ++ return 1; ++ ++ if (pos + len <= 0x7fffffffULL) ++ return 1; ++ ++ /* We might need to update the superblock to set LARGE_FILE */ ++ return 2; ++} ++ + static int ext4_da_write_begin(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) +@@ -2679,7 +2691,8 @@ retry_grab: + * of file which has an already mapped buffer. + */ + retry_journal: +- handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1); ++ handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, ++ ext4_da_write_credits(inode, pos, len)); + if (IS_ERR(handle)) { + page_cache_release(page); + return PTR_ERR(handle); +@@ -4062,8 +4075,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) + ei->i_extra_isize = 0; + + /* Precompute checksum seed for inode metadata */ +- if (EXT4_HAS_RO_COMPAT_FEATURE(sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { ++ if (ext4_has_metadata_csum(sb)) { + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + __u32 csum; + __le32 inum = cpu_to_le32(inode->i_ino); +@@ -4251,6 +4263,13 @@ bad_inode: + return ERR_PTR(ret); + } + ++struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino) ++{ ++ if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO) ++ return ERR_PTR(-EIO); ++ return ext4_iget(sb, ino); ++} ++ + static int ext4_inode_blocks_set(handle_t *handle, + struct ext4_inode *raw_inode, + struct ext4_inode_info *ei) +@@ -4655,8 +4674,12 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) + ext4_orphan_del(NULL, inode); + goto err_out; + } +- } else ++ } else { ++ loff_t oldsize = inode->i_size; ++ + i_size_write(inode, attr->ia_size); ++ pagecache_isize_extended(inode, oldsize, inode->i_size); ++ } + + /* + * Blocks are going to be removed from the inode. Wait +diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c +index d011b69ae8ae..54d49119f692 100644 +--- a/fs/ext4/ioctl.c ++++ b/fs/ext4/ioctl.c +@@ -347,8 +347,7 @@ flags_out: + if (!inode_owner_or_capable(inode)) + return -EPERM; + +- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { ++ if (ext4_has_metadata_csum(inode->i_sb)) { + ext4_warning(sb, "Setting inode version is not " + "supported with metadata_csum enabled."); + return -ENOTTY; +@@ -548,9 +547,17 @@ group_add_out: + } + + case EXT4_IOC_SWAP_BOOT: ++ { ++ int err; + if (!(filp->f_mode & FMODE_WRITE)) + return -EBADF; +- return swap_inode_boot_loader(sb, inode); ++ err = mnt_want_write_file(filp); ++ if (err) ++ return err; ++ err = swap_inode_boot_loader(sb, inode); ++ mnt_drop_write_file(filp); ++ return err; ++ } + + case EXT4_IOC_RESIZE_FS: { + ext4_fsblk_t n_blocks_count; +diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c +index 214461e42a05..b69ca478e08d 100644 +--- a/fs/ext4/mmp.c ++++ b/fs/ext4/mmp.c +@@ -20,8 +20,7 @@ static __le32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp) + + int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp) + { +- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (!ext4_has_metadata_csum(sb)) + return 1; + + return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp); +@@ -29,8 +28,7 @@ int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp) + + void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp) + { +- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (!ext4_has_metadata_csum(sb)) + return; + + mmp->mmp_checksum = ext4_mmp_csum(sb, mmp); +diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c +index 5a0408d7b114..7e6954cbcef7 100644 +--- a/fs/ext4/namei.c ++++ b/fs/ext4/namei.c +@@ -123,8 +123,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode, + "directory leaf block found instead of index block"); + return ERR_PTR(-EIO); + } +- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) || ++ if (!ext4_has_metadata_csum(inode->i_sb) || + buffer_verified(bh)) + return bh; + +@@ -339,8 +338,7 @@ int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent) + { + struct ext4_dir_entry_tail *t; + +- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (!ext4_has_metadata_csum(inode->i_sb)) + return 1; + + t = get_dirent_tail(inode, dirent); +@@ -361,8 +359,7 @@ static void ext4_dirent_csum_set(struct inode *inode, + { + struct ext4_dir_entry_tail *t; + +- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (!ext4_has_metadata_csum(inode->i_sb)) + return; + + t = get_dirent_tail(inode, dirent); +@@ -437,8 +434,7 @@ static int ext4_dx_csum_verify(struct inode *inode, + struct dx_tail *t; + int count_offset, limit, count; + +- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (!ext4_has_metadata_csum(inode->i_sb)) + return 1; + + c = get_dx_countlimit(inode, dirent, &count_offset); +@@ -467,8 +463,7 @@ static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent) + struct dx_tail *t; + int count_offset, limit, count; + +- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (!ext4_has_metadata_csum(inode->i_sb)) + return; + + c = get_dx_countlimit(inode, dirent, &count_offset); +@@ -556,8 +551,7 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize) + unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) - + EXT4_DIR_REC_LEN(2) - infosize; + +- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (ext4_has_metadata_csum(dir->i_sb)) + entry_space -= sizeof(struct dx_tail); + return entry_space / sizeof(struct dx_entry); + } +@@ -566,8 +560,7 @@ static inline unsigned dx_node_limit(struct inode *dir) + { + unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0); + +- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (ext4_has_metadata_csum(dir->i_sb)) + entry_space -= sizeof(struct dx_tail); + return entry_space / sizeof(struct dx_entry); + } +@@ -1430,7 +1423,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi + dentry->d_name.name); + return ERR_PTR(-EIO); + } +- inode = ext4_iget(dir->i_sb, ino); ++ inode = ext4_iget_normal(dir->i_sb, ino); + if (inode == ERR_PTR(-ESTALE)) { + EXT4_ERROR_INODE(dir, + "deleted inode referenced: %u", +@@ -1461,7 +1454,7 @@ struct dentry *ext4_get_parent(struct dentry *child) + return ERR_PTR(-EIO); + } + +- return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino)); ++ return d_obtain_alias(ext4_iget_normal(child->d_inode->i_sb, ino)); + } + + /* +@@ -1535,8 +1528,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, + int csum_size = 0; + int err = 0, i; + +- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (ext4_has_metadata_csum(dir->i_sb)) + csum_size = sizeof(struct ext4_dir_entry_tail); + + bh2 = ext4_append(handle, dir, &newblock); +@@ -1705,8 +1697,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry, + int csum_size = 0; + int err; + +- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (ext4_has_metadata_csum(inode->i_sb)) + csum_size = sizeof(struct ext4_dir_entry_tail); + + if (!de) { +@@ -1773,8 +1764,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, + struct fake_dirent *fde; + int csum_size = 0; + +- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (ext4_has_metadata_csum(inode->i_sb)) + csum_size = sizeof(struct ext4_dir_entry_tail); + + blocksize = dir->i_sb->s_blocksize; +@@ -1890,8 +1880,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, + ext4_lblk_t block, blocks; + int csum_size = 0; + +- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (ext4_has_metadata_csum(inode->i_sb)) + csum_size = sizeof(struct ext4_dir_entry_tail); + + sb = dir->i_sb; +@@ -2153,8 +2142,7 @@ static int ext4_delete_entry(handle_t *handle, + return err; + } + +- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (ext4_has_metadata_csum(dir->i_sb)) + csum_size = sizeof(struct ext4_dir_entry_tail); + + BUFFER_TRACE(bh, "get_write_access"); +@@ -2373,8 +2361,7 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir, + int csum_size = 0; + int err; + +- if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (ext4_has_metadata_csum(dir->i_sb)) + csum_size = sizeof(struct ext4_dir_entry_tail); + + if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { +diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c +index f3b84cd9de56..2400ad1c3d12 100644 +--- a/fs/ext4/resize.c ++++ b/fs/ext4/resize.c +@@ -1071,7 +1071,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data, + break; + + if (meta_bg == 0) +- backup_block = group * bpg + blk_off; ++ backup_block = ((ext4_fsblk_t)group) * bpg + blk_off; + else + backup_block = (ext4_group_first_block_no(sb, group) + + ext4_bg_has_super(sb, group)); +@@ -1200,8 +1200,7 @@ static int ext4_set_bitmap_checksums(struct super_block *sb, + { + struct buffer_head *bh; + +- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (!ext4_has_metadata_csum(sb)) + return 0; + + bh = ext4_get_bitmap(sb, group_data->inode_bitmap); +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index b52a34bc7600..6795499fefab 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -140,8 +140,7 @@ static __le32 ext4_superblock_csum(struct super_block *sb, + int ext4_superblock_csum_verify(struct super_block *sb, + struct ext4_super_block *es) + { +- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (!ext4_has_metadata_csum(sb)) + return 1; + + return es->s_checksum == ext4_superblock_csum(sb, es); +@@ -151,8 +150,7 @@ void ext4_superblock_csum_set(struct super_block *sb) + { + struct ext4_super_block *es = EXT4_SB(sb)->s_es; + +- if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (!ext4_has_metadata_csum(sb)) + return; + + es->s_checksum = ext4_superblock_csum(sb, es); +@@ -977,7 +975,7 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb, + * Currently we don't know the generation for parent directory, so + * a generation of 0 means "accept any" + */ +- inode = ext4_iget(sb, ino); ++ inode = ext4_iget_normal(sb, ino); + if (IS_ERR(inode)) + return ERR_CAST(inode); + if (generation && inode->i_generation != generation) { +@@ -1687,13 +1685,6 @@ static int parse_options(char *options, struct super_block *sb, + "not specified"); + return 0; + } +- } else { +- if (sbi->s_jquota_fmt) { +- ext4_msg(sb, KERN_ERR, "journaled quota format " +- "specified with no journaling " +- "enabled"); +- return 0; +- } + } + #endif + if (test_opt(sb, DIOREAD_NOLOCK)) { +@@ -1991,8 +1982,7 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group, + __u16 crc = 0; + __le32 le_group = cpu_to_le32(block_group); + +- if ((sbi->s_es->s_feature_ro_compat & +- cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) { ++ if (ext4_has_metadata_csum(sbi->s_sb)) { + /* Use new metadata_csum algorithm */ + __le16 save_csum; + __u32 csum32; +@@ -2010,6 +2000,10 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group, + } + + /* old crc16 code */ ++ if (!(sbi->s_es->s_feature_ro_compat & ++ cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM))) ++ return 0; ++ + offset = offsetof(struct ext4_group_desc, bg_checksum); + + crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); +@@ -3139,8 +3133,7 @@ static int set_journal_csum_feature_set(struct super_block *sb) + int compat, incompat; + struct ext4_sb_info *sbi = EXT4_SB(sb); + +- if (EXT4_HAS_RO_COMPAT_FEATURE(sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { ++ if (ext4_has_metadata_csum(sb)) { + /* journal checksum v3 */ + compat = 0; + incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3; +@@ -3447,8 +3440,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + } + + /* Precompute checksum seed for all metadata */ +- if (EXT4_HAS_RO_COMPAT_FEATURE(sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (ext4_has_metadata_csum(sb)) + sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid, + sizeof(es->s_uuid)); + +@@ -3466,6 +3458,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + #ifdef CONFIG_EXT4_FS_POSIX_ACL + set_opt(sb, POSIX_ACL); + #endif ++ /* don't forget to enable journal_csum when metadata_csum is enabled. */ ++ if (ext4_has_metadata_csum(sb)) ++ set_opt(sb, JOURNAL_CHECKSUM); ++ + if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) + set_opt(sb, JOURNAL_DATA); + else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) +diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c +index 298e9c8da364..a5d2f1b6c5c5 100644 +--- a/fs/ext4/xattr.c ++++ b/fs/ext4/xattr.c +@@ -141,8 +141,7 @@ static int ext4_xattr_block_csum_verify(struct inode *inode, + sector_t block_nr, + struct ext4_xattr_header *hdr) + { +- if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && ++ if (ext4_has_metadata_csum(inode->i_sb) && + (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr))) + return 0; + return 1; +@@ -152,8 +151,7 @@ static void ext4_xattr_block_csum_set(struct inode *inode, + sector_t block_nr, + struct ext4_xattr_header *hdr) + { +- if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, +- EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) ++ if (!ext4_has_metadata_csum(inode->i_sb)) + return; + + hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr); +@@ -189,14 +187,28 @@ ext4_listxattr(struct dentry *dentry, char *buffer, size_t size) + } + + static int +-ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end) ++ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end, ++ void *value_start) + { +- while (!IS_LAST_ENTRY(entry)) { +- struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(entry); ++ struct ext4_xattr_entry *e = entry; ++ ++ while (!IS_LAST_ENTRY(e)) { ++ struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e); + if ((void *)next >= end) + return -EIO; +- entry = next; ++ e = next; + } ++ ++ while (!IS_LAST_ENTRY(entry)) { ++ if (entry->e_value_size != 0 && ++ (value_start + le16_to_cpu(entry->e_value_offs) < ++ (void *)e + sizeof(__u32) || ++ value_start + le16_to_cpu(entry->e_value_offs) + ++ le32_to_cpu(entry->e_value_size) > end)) ++ return -EIO; ++ entry = EXT4_XATTR_NEXT(entry); ++ } ++ + return 0; + } + +@@ -213,7 +225,8 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh) + return -EIO; + if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh))) + return -EIO; +- error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size); ++ error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size, ++ bh->b_data); + if (!error) + set_buffer_verified(bh); + return error; +@@ -329,7 +342,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name, + header = IHDR(inode, raw_inode); + entry = IFIRST(header); + end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; +- error = ext4_xattr_check_names(entry, end); ++ error = ext4_xattr_check_names(entry, end, entry); + if (error) + goto cleanup; + error = ext4_xattr_find_entry(&entry, name_index, name, +@@ -457,7 +470,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size) + raw_inode = ext4_raw_inode(&iloc); + header = IHDR(inode, raw_inode); + end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; +- error = ext4_xattr_check_names(IFIRST(header), end); ++ error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header)); + if (error) + goto cleanup; + error = ext4_xattr_list_entries(dentry, IFIRST(header), +@@ -972,7 +985,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, + is->s.here = is->s.first; + is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; + if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { +- error = ext4_xattr_check_names(IFIRST(header), is->s.end); ++ error = ext4_xattr_check_names(IFIRST(header), is->s.end, ++ IFIRST(header)); + if (error) + return error; + /* Find the named attribute. */ +diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c +index 20dbfabbf874..c4166471ddf0 100644 +--- a/fs/jbd2/recovery.c ++++ b/fs/jbd2/recovery.c +@@ -525,6 +525,7 @@ static int do_one_pass(journal_t *journal, + !jbd2_descr_block_csum_verify(journal, + bh->b_data)) { + err = -EIO; ++ brelse(bh); + goto failed; + } + +diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h +index 413ef89c2d1b..046fee8b6e9b 100644 +--- a/fs/jffs2/jffs2_fs_sb.h ++++ b/fs/jffs2/jffs2_fs_sb.h +@@ -134,8 +134,6 @@ struct jffs2_sb_info { + struct rw_semaphore wbuf_sem; /* Protects the write buffer */ + + struct delayed_work wbuf_dwork; /* write-buffer write-out work */ +- int wbuf_queued; /* non-zero delayed work is queued */ +- spinlock_t wbuf_dwork_lock; /* protects wbuf_dwork and and wbuf_queued */ + + unsigned char *oobbuf; + int oobavail; /* How many bytes are available for JFFS2 in OOB */ +diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c +index a6597d60d76d..09ed55190ee2 100644 +--- a/fs/jffs2/wbuf.c ++++ b/fs/jffs2/wbuf.c +@@ -1162,10 +1162,6 @@ static void delayed_wbuf_sync(struct work_struct *work) + struct jffs2_sb_info *c = work_to_sb(work); + struct super_block *sb = OFNI_BS_2SFFJ(c); + +- spin_lock(&c->wbuf_dwork_lock); +- c->wbuf_queued = 0; +- spin_unlock(&c->wbuf_dwork_lock); +- + if (!(sb->s_flags & MS_RDONLY)) { + jffs2_dbg(1, "%s()\n", __func__); + jffs2_flush_wbuf_gc(c, 0); +@@ -1180,14 +1176,9 @@ void jffs2_dirty_trigger(struct jffs2_sb_info *c) + if (sb->s_flags & MS_RDONLY) + return; + +- spin_lock(&c->wbuf_dwork_lock); +- if (!c->wbuf_queued) { ++ delay = msecs_to_jiffies(dirty_writeback_interval * 10); ++ if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay)) + jffs2_dbg(1, "%s()\n", __func__); +- delay = msecs_to_jiffies(dirty_writeback_interval * 10); +- queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay); +- c->wbuf_queued = 1; +- } +- spin_unlock(&c->wbuf_dwork_lock); + } + + int jffs2_nand_flash_setup(struct jffs2_sb_info *c) +@@ -1211,7 +1202,6 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c) + + /* Initialise write buffer */ + init_rwsem(&c->wbuf_sem); +- spin_lock_init(&c->wbuf_dwork_lock); + INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); + c->wbuf_pagesize = c->mtd->writesize; + c->wbuf_ofs = 0xFFFFFFFF; +@@ -1251,7 +1241,6 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) { + + /* Initialize write buffer */ + init_rwsem(&c->wbuf_sem); +- spin_lock_init(&c->wbuf_dwork_lock); + INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); + c->wbuf_pagesize = c->mtd->erasesize; + +@@ -1311,7 +1300,6 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) { + + /* Initialize write buffer */ + init_rwsem(&c->wbuf_sem); +- spin_lock_init(&c->wbuf_dwork_lock); + INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); + + c->wbuf_pagesize = c->mtd->writesize; +@@ -1346,7 +1334,6 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) { + return 0; + + init_rwsem(&c->wbuf_sem); +- spin_lock_init(&c->wbuf_dwork_lock); + INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); + + c->wbuf_pagesize = c->mtd->writesize; +diff --git a/fs/lockd/mon.c b/fs/lockd/mon.c +index 1812f026960c..6ae664b489af 100644 +--- a/fs/lockd/mon.c ++++ b/fs/lockd/mon.c +@@ -159,6 +159,12 @@ static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res, + + msg.rpc_proc = &clnt->cl_procinfo[proc]; + status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN); ++ if (status == -ECONNREFUSED) { ++ dprintk("lockd: NSM upcall RPC failed, status=%d, forcing rebind\n", ++ status); ++ rpc_force_rebind(clnt); ++ status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN); ++ } + if (status < 0) + dprintk("lockd: NSM upcall RPC failed, status=%d\n", + status); +diff --git a/fs/namei.c b/fs/namei.c +index 3ac674b793bf..1004966437f9 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -3159,7 +3159,8 @@ static int do_tmpfile(int dfd, struct filename *pathname, + if (error) + goto out2; + audit_inode(pathname, nd->path.dentry, 0); +- error = may_open(&nd->path, op->acc_mode, op->open_flag); ++ /* Don't check for other permissions, the inode was just created */ ++ error = may_open(&nd->path, MAY_OPEN, op->open_flag); + if (error) + goto out2; + file->f_path.mnt = nd->path.mnt; +diff --git a/fs/namespace.c b/fs/namespace.c +index 4b14bfc4cfce..d00750d2f91e 100644 +--- a/fs/namespace.c ++++ b/fs/namespace.c +@@ -2747,6 +2747,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, + /* make sure we can reach put_old from new_root */ + if (!is_path_reachable(old_mnt, old.dentry, &new)) + goto out4; ++ /* make certain new is below the root */ ++ if (!is_path_reachable(new_mnt, new.dentry, &root)) ++ goto out4; + root_mp->m_count++; /* pin it so it won't go away */ + br_write_lock(&vfsmount_lock); + detach_mnt(new_mnt, &parent_path); +diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c +index 08c8e023c157..25024d5060da 100644 +--- a/fs/nfsd/nfs4proc.c ++++ b/fs/nfsd/nfs4proc.c +@@ -1233,7 +1233,8 @@ static bool need_wrongsec_check(struct svc_rqst *rqstp) + */ + if (argp->opcnt == resp->opcnt) + return false; +- ++ if (next->opnum == OP_ILLEGAL) ++ return false; + nextd = OPDESC(next); + /* + * Rest of 2.6.3.1.1: certain operations will return WRONGSEC +diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c +index e9a80e4553a3..fafac65804d6 100644 +--- a/fs/nfsd/vfs.c ++++ b/fs/nfsd/vfs.c +@@ -509,6 +509,9 @@ set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key) + char *buf = NULL; + int error = 0; + ++ if (!pacl) ++ return vfs_setxattr(dentry, key, NULL, 0, 0); ++ + buflen = posix_acl_xattr_size(pacl->a_count); + buf = kmalloc(buflen, GFP_KERNEL); + error = -ENOMEM; +diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c +index 12823845d324..14120a3c6195 100644 +--- a/fs/pstore/inode.c ++++ b/fs/pstore/inode.c +@@ -319,10 +319,10 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count, + compressed ? ".enc.z" : ""); + break; + case PSTORE_TYPE_CONSOLE: +- sprintf(name, "console-%s", psname); ++ sprintf(name, "console-%s-%lld", psname, id); + break; + case PSTORE_TYPE_FTRACE: +- sprintf(name, "ftrace-%s", psname); ++ sprintf(name, "ftrace-%s-%lld", psname, id); + break; + case PSTORE_TYPE_MCE: + sprintf(name, "mce-%s-%lld", psname, id); +diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c +index 7f30bdc57d13..f56a35758112 100644 +--- a/fs/quota/dquot.c ++++ b/fs/quota/dquot.c +@@ -637,7 +637,7 @@ int dquot_writeback_dquots(struct super_block *sb, int type) + dqstats_inc(DQST_LOOKUPS); + err = sb->dq_op->write_dquot(dquot); + if (!ret && err) +- err = ret; ++ ret = err; + dqput(dquot); + spin_lock(&dq_list_lock); + } +diff --git a/fs/super.c b/fs/super.c +index fb68a4c90c98..3e39572b2f51 100644 +--- a/fs/super.c ++++ b/fs/super.c +@@ -81,6 +81,8 @@ static unsigned long super_cache_scan(struct shrinker *shrink, + inodes = list_lru_count_node(&sb->s_inode_lru, sc->nid); + dentries = list_lru_count_node(&sb->s_dentry_lru, sc->nid); + total_objects = dentries + inodes + fs_objects + 1; ++ if (!total_objects) ++ total_objects = 1; + + /* proportion the scan between the caches */ + dentries = mult_frac(sc->nr_to_scan, dentries, total_objects); +diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c +index ff8229340cd5..26b69b2d4a45 100644 +--- a/fs/ubifs/commit.c ++++ b/fs/ubifs/commit.c +@@ -166,15 +166,10 @@ static int do_commit(struct ubifs_info *c) + err = ubifs_orphan_end_commit(c); + if (err) + goto out; +- old_ltail_lnum = c->ltail_lnum; +- err = ubifs_log_end_commit(c, new_ltail_lnum); +- if (err) +- goto out; + err = dbg_check_old_index(c, &zroot); + if (err) + goto out; + +- mutex_lock(&c->mst_mutex); + c->mst_node->cmt_no = cpu_to_le64(c->cmt_no); + c->mst_node->log_lnum = cpu_to_le32(new_ltail_lnum); + c->mst_node->root_lnum = cpu_to_le32(zroot.lnum); +@@ -203,8 +198,9 @@ static int do_commit(struct ubifs_info *c) + c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS); + else + c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS); +- err = ubifs_write_master(c); +- mutex_unlock(&c->mst_mutex); ++ ++ old_ltail_lnum = c->ltail_lnum; ++ err = ubifs_log_end_commit(c, new_ltail_lnum); + if (err) + goto out; + +diff --git a/fs/ubifs/log.c b/fs/ubifs/log.c +index 36bd4efd0819..06649d21b056 100644 +--- a/fs/ubifs/log.c ++++ b/fs/ubifs/log.c +@@ -106,10 +106,14 @@ static inline long long empty_log_bytes(const struct ubifs_info *c) + h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs; + t = (long long)c->ltail_lnum * c->leb_size; + +- if (h >= t) ++ if (h > t) + return c->log_bytes - h + t; +- else ++ else if (h != t) + return t - h; ++ else if (c->lhead_lnum != c->ltail_lnum) ++ return 0; ++ else ++ return c->log_bytes; + } + + /** +@@ -447,9 +451,9 @@ out: + * @ltail_lnum: new log tail LEB number + * + * This function is called on when the commit operation was finished. It +- * moves log tail to new position and unmaps LEBs which contain obsolete data. +- * Returns zero in case of success and a negative error code in case of +- * failure. ++ * moves log tail to new position and updates the master node so that it stores ++ * the new log tail LEB number. Returns zero in case of success and a negative ++ * error code in case of failure. + */ + int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum) + { +@@ -477,7 +481,12 @@ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum) + spin_unlock(&c->buds_lock); + + err = dbg_check_bud_bytes(c); ++ if (err) ++ goto out; + ++ err = ubifs_write_master(c); ++ ++out: + mutex_unlock(&c->log_mutex); + return err; + } +diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c +index ab83ace9910a..1a4bb9e8b3b8 100644 +--- a/fs/ubifs/master.c ++++ b/fs/ubifs/master.c +@@ -352,10 +352,9 @@ int ubifs_read_master(struct ubifs_info *c) + * ubifs_write_master - write master node. + * @c: UBIFS file-system description object + * +- * This function writes the master node. The caller has to take the +- * @c->mst_mutex lock before calling this function. Returns zero in case of +- * success and a negative error code in case of failure. The master node is +- * written twice to enable recovery. ++ * This function writes the master node. Returns zero in case of success and a ++ * negative error code in case of failure. The master node is written twice to ++ * enable recovery. + */ + int ubifs_write_master(struct ubifs_info *c) + { +diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c +index 3e4aa7281e04..151c0b4873fb 100644 +--- a/fs/ubifs/super.c ++++ b/fs/ubifs/super.c +@@ -1971,7 +1971,6 @@ static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi) + mutex_init(&c->lp_mutex); + mutex_init(&c->tnc_mutex); + mutex_init(&c->log_mutex); +- mutex_init(&c->mst_mutex); + mutex_init(&c->umount_mutex); + mutex_init(&c->bu_mutex); + mutex_init(&c->write_reserve_mutex); +diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h +index e8c8cfe1435c..7ab9c710c749 100644 +--- a/fs/ubifs/ubifs.h ++++ b/fs/ubifs/ubifs.h +@@ -1042,7 +1042,6 @@ struct ubifs_debug_info; + * + * @mst_node: master node + * @mst_offs: offset of valid master node +- * @mst_mutex: protects the master node area, @mst_node, and @mst_offs + * + * @max_bu_buf_len: maximum bulk-read buffer length + * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu +@@ -1282,7 +1281,6 @@ struct ubifs_info { + + struct ubifs_mst_node *mst_node; + int mst_offs; +- struct mutex mst_mutex; + + int max_bu_buf_len; + struct mutex bu_mutex; +diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c +index 5dcc68019d1b..dc602b564255 100644 +--- a/fs/xfs/xfs_mount.c ++++ b/fs/xfs/xfs_mount.c +@@ -318,7 +318,6 @@ reread: + * Initialize the mount structure from the superblock. + */ + xfs_sb_from_disk(&mp->m_sb, XFS_BUF_TO_SBP(bp)); +- xfs_sb_quota_from_disk(&mp->m_sb); + + /* + * We must be able to do sector-sized and sector-aligned IO. +diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c +index 38b7df67ba7c..1351ff0d77ab 100644 +--- a/fs/xfs/xfs_sb.c ++++ b/fs/xfs/xfs_sb.c +@@ -406,10 +406,11 @@ xfs_sb_quota_from_disk(struct xfs_sb *sbp) + } + } + +-void +-xfs_sb_from_disk( ++static void ++__xfs_sb_from_disk( + struct xfs_sb *to, +- xfs_dsb_t *from) ++ xfs_dsb_t *from, ++ bool convert_xquota) + { + to->sb_magicnum = be32_to_cpu(from->sb_magicnum); + to->sb_blocksize = be32_to_cpu(from->sb_blocksize); +@@ -465,6 +466,17 @@ xfs_sb_from_disk( + to->sb_pad = 0; + to->sb_pquotino = be64_to_cpu(from->sb_pquotino); + to->sb_lsn = be64_to_cpu(from->sb_lsn); ++ /* Convert on-disk flags to in-memory flags? */ ++ if (convert_xquota) ++ xfs_sb_quota_from_disk(to); ++} ++ ++void ++xfs_sb_from_disk( ++ struct xfs_sb *to, ++ xfs_dsb_t *from) ++{ ++ __xfs_sb_from_disk(to, from, true); + } + + static inline void +@@ -580,7 +592,11 @@ xfs_sb_verify( + struct xfs_mount *mp = bp->b_target->bt_mount; + struct xfs_sb sb; + +- xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp)); ++ /* ++ * Use call variant which doesn't convert quota flags from disk ++ * format, because xfs_mount_validate_sb checks the on-disk flags. ++ */ ++ __xfs_sb_from_disk(&sb, XFS_BUF_TO_SBP(bp), false); + + /* + * Only check the in progress field for the primary superblock as +diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h +index 0c5e50e319be..b521d1cd54fa 100644 +--- a/include/drm/drm_pciids.h ++++ b/include/drm/drm_pciids.h +@@ -74,7 +74,6 @@ + {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ +- {0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ + {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ + {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ +diff --git a/include/drm/i915_powerwell.h b/include/drm/i915_powerwell.h +index cfdc884405b7..baa6f11b1837 100644 +--- a/include/drm/i915_powerwell.h ++++ b/include/drm/i915_powerwell.h +@@ -30,7 +30,8 @@ + #define _I915_POWERWELL_H_ + + /* For use by hda_i915 driver */ +-extern void i915_request_power_well(void); +-extern void i915_release_power_well(void); ++extern int i915_request_power_well(void); ++extern int i915_release_power_well(void); ++extern int i915_get_cdclk_freq(void); + + #endif /* _I915_POWERWELL_H_ */ +diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h +index 0e6f765aa1f5..b1056783c105 100644 +--- a/include/linux/blkdev.h ++++ b/include/linux/blkdev.h +@@ -1198,10 +1198,9 @@ static inline int queue_alignment_offset(struct request_queue *q) + static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) + { + unsigned int granularity = max(lim->physical_block_size, lim->io_min); +- unsigned int alignment = (sector << 9) & (granularity - 1); ++ unsigned int alignment = sector_div(sector, granularity >> 9) << 9; + +- return (granularity + lim->alignment_offset - alignment) +- & (granularity - 1); ++ return (granularity + lim->alignment_offset - alignment) % granularity; + } + + static inline int bdev_alignment_offset(struct block_device *bdev) +diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h +index 24545cd90a25..02ae99e8e6d3 100644 +--- a/include/linux/compiler-gcc.h ++++ b/include/linux/compiler-gcc.h +@@ -37,6 +37,9 @@ + __asm__ ("" : "=r"(__ptr) : "0"(ptr)); \ + (typeof(ptr)) (__ptr + (off)); }) + ++/* Make the optimizer believe the variable can be manipulated arbitrarily. */ ++#define OPTIMIZER_HIDE_VAR(var) __asm__ ("" : "=r" (var) : "0" (var)) ++ + #ifdef __CHECKER__ + #define __must_be_array(arr) 0 + #else +diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h +index dc1bd3dcf11f..5529c5239421 100644 +--- a/include/linux/compiler-intel.h ++++ b/include/linux/compiler-intel.h +@@ -15,6 +15,7 @@ + */ + #undef barrier + #undef RELOC_HIDE ++#undef OPTIMIZER_HIDE_VAR + + #define barrier() __memory_barrier() + +@@ -23,6 +24,12 @@ + __ptr = (unsigned long) (ptr); \ + (typeof(ptr)) (__ptr + (off)); }) + ++/* This should act as an optimization barrier on var. ++ * Given that this compiler does not have inline assembly, a compiler barrier ++ * is the best we can do. ++ */ ++#define OPTIMIZER_HIDE_VAR(var) barrier() ++ + /* Intel ECC compiler doesn't support __builtin_types_compatible_p() */ + #define __must_be_array(a) 0 + +diff --git a/include/linux/compiler.h b/include/linux/compiler.h +index 92669cd182a6..a2329c5e6206 100644 +--- a/include/linux/compiler.h ++++ b/include/linux/compiler.h +@@ -170,6 +170,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); + (typeof(ptr)) (__ptr + (off)); }) + #endif + ++#ifndef OPTIMIZER_HIDE_VAR ++#define OPTIMIZER_HIDE_VAR(var) barrier() ++#endif ++ + /* Not-quite-unique ID. */ + #ifndef __UNIQUE_ID + # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) +diff --git a/include/linux/hid.h b/include/linux/hid.h +index 31b9d299ef6c..00c88fccd162 100644 +--- a/include/linux/hid.h ++++ b/include/linux/hid.h +@@ -286,6 +286,7 @@ struct hid_item { + #define HID_QUIRK_HIDINPUT_FORCE 0x00000080 + #define HID_QUIRK_NO_EMPTY_INPUT 0x00000100 + #define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200 ++#define HID_QUIRK_ALWAYS_POLL 0x00000400 + #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 + #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 + #define HID_QUIRK_NO_INIT_REPORTS 0x20000000 +diff --git a/include/linux/mm.h b/include/linux/mm.h +index 2b3a5330dcf2..36556b2e07f8 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -1009,6 +1009,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, + + extern void truncate_pagecache(struct inode *inode, loff_t new); + extern void truncate_setsize(struct inode *inode, loff_t newsize); ++void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); + void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); + int truncate_inode_page(struct address_space *mapping, struct page *page); + int generic_error_remove_page(struct address_space *mapping, struct page *page); +diff --git a/include/linux/of.h b/include/linux/of.h +index f95aee391e30..9007c86b1568 100644 +--- a/include/linux/of.h ++++ b/include/linux/of.h +@@ -252,14 +252,12 @@ extern int of_property_read_u64(const struct device_node *np, + extern int of_property_read_string(struct device_node *np, + const char *propname, + const char **out_string); +-extern int of_property_read_string_index(struct device_node *np, +- const char *propname, +- int index, const char **output); + extern int of_property_match_string(struct device_node *np, + const char *propname, + const char *string); +-extern int of_property_count_strings(struct device_node *np, +- const char *propname); ++extern int of_property_read_string_helper(struct device_node *np, ++ const char *propname, ++ const char **out_strs, size_t sz, int index); + extern int of_device_is_compatible(const struct device_node *device, + const char *); + extern int of_device_is_available(const struct device_node *device); +@@ -434,15 +432,9 @@ static inline int of_property_read_string(struct device_node *np, + return -ENOSYS; + } + +-static inline int of_property_read_string_index(struct device_node *np, +- const char *propname, int index, +- const char **out_string) +-{ +- return -ENOSYS; +-} +- +-static inline int of_property_count_strings(struct device_node *np, +- const char *propname) ++static inline int of_property_read_string_helper(struct device_node *np, ++ const char *propname, ++ const char **out_strs, size_t sz, int index) + { + return -ENOSYS; + } +@@ -544,6 +536,70 @@ static inline int of_node_to_nid(struct device_node *np) + #endif + + /** ++ * of_property_read_string_array() - Read an array of strings from a multiple ++ * strings property. ++ * @np: device node from which the property value is to be read. ++ * @propname: name of the property to be searched. ++ * @out_strs: output array of string pointers. ++ * @sz: number of array elements to read. ++ * ++ * Search for a property in a device tree node and retrieve a list of ++ * terminated string values (pointer to data, not a copy) in that property. ++ * ++ * If @out_strs is NULL, the number of strings in the property is returned. ++ */ ++static inline int of_property_read_string_array(struct device_node *np, ++ const char *propname, const char **out_strs, ++ size_t sz) ++{ ++ return of_property_read_string_helper(np, propname, out_strs, sz, 0); ++} ++ ++/** ++ * of_property_count_strings() - Find and return the number of strings from a ++ * multiple strings property. ++ * @np: device node from which the property value is to be read. ++ * @propname: name of the property to be searched. ++ * ++ * Search for a property in a device tree node and retrieve the number of null ++ * terminated string contain in it. Returns the number of strings on ++ * success, -EINVAL if the property does not exist, -ENODATA if property ++ * does not have a value, and -EILSEQ if the string is not null-terminated ++ * within the length of the property data. ++ */ ++static inline int of_property_count_strings(struct device_node *np, ++ const char *propname) ++{ ++ return of_property_read_string_helper(np, propname, NULL, 0, 0); ++} ++ ++/** ++ * of_property_read_string_index() - Find and read a string from a multiple ++ * strings property. ++ * @np: device node from which the property value is to be read. ++ * @propname: name of the property to be searched. ++ * @index: index of the string in the list of strings ++ * @out_string: pointer to null terminated return string, modified only if ++ * return value is 0. ++ * ++ * Search for a property in a device tree node and retrieve a null ++ * terminated string value (pointer to data, not a copy) in the list of strings ++ * contained in that property. ++ * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if ++ * property does not have a value, and -EILSEQ if the string is not ++ * null-terminated within the length of the property data. ++ * ++ * The out_string pointer is modified only if a valid string can be decoded. ++ */ ++static inline int of_property_read_string_index(struct device_node *np, ++ const char *propname, ++ int index, const char **output) ++{ ++ int rc = of_property_read_string_helper(np, propname, output, 1, index); ++ return rc < 0 ? rc : 0; ++} ++ ++/** + * of_property_read_bool - Findfrom a property + * @np: device node from which the property value is to be read. + * @propname: name of the property to be searched. +diff --git a/include/linux/oom.h b/include/linux/oom.h +index da60007075b5..297cda528855 100644 +--- a/include/linux/oom.h ++++ b/include/linux/oom.h +@@ -50,6 +50,9 @@ static inline bool oom_task_origin(const struct task_struct *p) + extern unsigned long oom_badness(struct task_struct *p, + struct mem_cgroup *memcg, const nodemask_t *nodemask, + unsigned long totalpages); ++ ++extern int oom_kills_count(void); ++extern void note_oom_kill(void); + extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, + unsigned int points, unsigned long totalpages, + struct mem_cgroup *memcg, nodemask_t *nodemask, +diff --git a/include/linux/string.h b/include/linux/string.h +index ac889c5ea11b..0ed878d0465c 100644 +--- a/include/linux/string.h ++++ b/include/linux/string.h +@@ -129,7 +129,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4); + #endif + + extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, +- const void *from, size_t available); ++ const void *from, size_t available); + + /** + * strstarts - does @str start with @prefix? +@@ -141,7 +141,8 @@ static inline bool strstarts(const char *str, const char *prefix) + return strncmp(str, prefix, strlen(prefix)) == 0; + } + +-extern size_t memweight(const void *ptr, size_t bytes); ++size_t memweight(const void *ptr, size_t bytes); ++void memzero_explicit(void *s, size_t count); + + /** + * kbasename - return the last part of a pathname. +diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h +index c3ddcdc36598..3fb428883460 100644 +--- a/include/linux/usb/quirks.h ++++ b/include/linux/usb/quirks.h +@@ -47,4 +47,7 @@ + /* device generates spurious wakeup, ignore remote wakeup capability */ + #define USB_QUIRK_IGNORE_REMOTE_WAKEUP 0x00000200 + ++/* device can't handle device_qualifier descriptor requests */ ++#define USB_QUIRK_DEVICE_QUALIFIER 0x00000100 ++ + #endif /* __LINUX_USB_QUIRKS_H */ +diff --git a/include/net/ipv6.h b/include/net/ipv6.h +index 6b4956e4408f..ea97c94fbc7d 100644 +--- a/include/net/ipv6.h ++++ b/include/net/ipv6.h +@@ -661,6 +661,8 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add + return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); + } + ++extern void ipv6_proxy_select_ident(struct sk_buff *skb); ++ + extern int ip6_dst_hoplimit(struct dst_entry *dst); + + /* +diff --git a/kernel/freezer.c b/kernel/freezer.c +index aa6a8aadb911..8f9279b9c6d7 100644 +--- a/kernel/freezer.c ++++ b/kernel/freezer.c +@@ -42,6 +42,9 @@ bool freezing_slow_path(struct task_struct *p) + if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK)) + return false; + ++ if (test_thread_flag(TIF_MEMDIE)) ++ return false; ++ + if (pm_nosig_freezing || cgroup_freezing(p)) + return true; + +diff --git a/kernel/module.c b/kernel/module.c +index 7b15ff67c5aa..f3c612e45330 100644 +--- a/kernel/module.c ++++ b/kernel/module.c +@@ -1882,7 +1882,9 @@ static void free_module(struct module *mod) + + /* We leave it in list to prevent duplicate loads, but make sure + * that noone uses it while it's being deconstructed. */ ++ mutex_lock(&module_mutex); + mod->state = MODULE_STATE_UNFORMED; ++ mutex_unlock(&module_mutex); + + /* Remove dynamic debug info */ + ddebug_remove_module(mod->name); +diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c +index 424c2d4265c9..77e6b83c0431 100644 +--- a/kernel/posix-timers.c ++++ b/kernel/posix-timers.c +@@ -634,6 +634,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, + goto out; + } + } else { ++ memset(&event.sigev_value, 0, sizeof(event.sigev_value)); + event.sigev_notify = SIGEV_SIGNAL; + event.sigev_signo = SIGALRM; + event.sigev_value.sival_int = new_timer->it_id; +diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c +index 0121dab83f43..7ef5244c3164 100644 +--- a/kernel/power/hibernate.c ++++ b/kernel/power/hibernate.c +@@ -491,8 +491,14 @@ int hibernation_restore(int platform_mode) + error = dpm_suspend_start(PMSG_QUIESCE); + if (!error) { + error = resume_target_kernel(platform_mode); +- dpm_resume_end(PMSG_RECOVER); ++ /* ++ * The above should either succeed and jump to the new kernel, ++ * or return with an error. Otherwise things are just ++ * undefined, so let's be paranoid. ++ */ ++ BUG_ON(!error); + } ++ dpm_resume_end(PMSG_RECOVER); + pm_restore_gfp_mask(); + ftrace_start(); + resume_console(); +diff --git a/kernel/power/process.c b/kernel/power/process.c +index 14f9a8d4725d..f1fe7ec110bb 100644 +--- a/kernel/power/process.c ++++ b/kernel/power/process.c +@@ -107,6 +107,28 @@ static int try_to_freeze_tasks(bool user_only) + return todo ? -EBUSY : 0; + } + ++/* ++ * Returns true if all freezable tasks (except for current) are frozen already ++ */ ++static bool check_frozen_processes(void) ++{ ++ struct task_struct *g, *p; ++ bool ret = true; ++ ++ read_lock(&tasklist_lock); ++ for_each_process_thread(g, p) { ++ if (p != current && !freezer_should_skip(p) && ++ !frozen(p)) { ++ ret = false; ++ goto done; ++ } ++ } ++done: ++ read_unlock(&tasklist_lock); ++ ++ return ret; ++} ++ + /** + * freeze_processes - Signal user space processes to enter the refrigerator. + * The current thread will not be frozen. The same process that calls +@@ -117,6 +139,7 @@ static int try_to_freeze_tasks(bool user_only) + int freeze_processes(void) + { + int error; ++ int oom_kills_saved; + + error = __usermodehelper_disable(UMH_FREEZING); + if (error) +@@ -130,12 +153,27 @@ int freeze_processes(void) + + printk("Freezing user space processes ... "); + pm_freezing = true; ++ oom_kills_saved = oom_kills_count(); + error = try_to_freeze_tasks(true); + if (!error) { +- printk("done."); + __usermodehelper_set_disable_depth(UMH_DISABLED); + oom_killer_disable(); ++ ++ /* ++ * There might have been an OOM kill while we were ++ * freezing tasks and the killed task might be still ++ * on the way out so we have to double check for race. ++ */ ++ if (oom_kills_count() != oom_kills_saved && ++ !check_frozen_processes()) { ++ __usermodehelper_set_disable_depth(UMH_ENABLED); ++ printk("OOM in progress."); ++ error = -EBUSY; ++ goto done; ++ } ++ printk("done."); + } ++done: + printk("\n"); + BUG_ON(in_atomic()); + +diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c +index 559329d9bd2f..d8ce71bbb5bf 100644 +--- a/kernel/trace/trace_syscalls.c ++++ b/kernel/trace/trace_syscalls.c +@@ -312,7 +312,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) + int size; + + syscall_nr = trace_get_syscall_nr(current, regs); +- if (syscall_nr < 0) ++ if (syscall_nr < 0 || syscall_nr >= NR_syscalls) + return; + if (!test_bit(syscall_nr, tr->enabled_enter_syscalls)) + return; +@@ -354,7 +354,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) + int syscall_nr; + + syscall_nr = trace_get_syscall_nr(current, regs); +- if (syscall_nr < 0) ++ if (syscall_nr < 0 || syscall_nr >= NR_syscalls) + return; + if (!test_bit(syscall_nr, tr->enabled_exit_syscalls)) + return; +@@ -557,7 +557,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) + int size; + + syscall_nr = trace_get_syscall_nr(current, regs); +- if (syscall_nr < 0) ++ if (syscall_nr < 0 || syscall_nr >= NR_syscalls) + return; + if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) + return; +@@ -631,7 +631,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) + int size; + + syscall_nr = trace_get_syscall_nr(current, regs); +- if (syscall_nr < 0) ++ if (syscall_nr < 0 || syscall_nr >= NR_syscalls) + return; + if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) + return; +diff --git a/lib/bitmap.c b/lib/bitmap.c +index 06f7e4fe8d2d..e5c4ebe586ba 100644 +--- a/lib/bitmap.c ++++ b/lib/bitmap.c +@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst, + lower = src[off + k]; + if (left && off + k == lim - 1) + lower &= mask; +- dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem; ++ dst[k] = lower >> rem; ++ if (rem) ++ dst[k] |= upper << (BITS_PER_LONG - rem); + if (left && k == lim - 1) + dst[k] &= mask; + } +@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst, + upper = src[k]; + if (left && k == lim - 1) + upper &= (1UL << left) - 1; +- dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem; ++ dst[k + off] = upper << rem; ++ if (rem) ++ dst[k + off] |= lower >> (BITS_PER_LONG - rem); + if (left && k + off == lim - 1) + dst[k + off] &= (1UL << left) - 1; + } +diff --git a/lib/string.c b/lib/string.c +index e5878de4f101..43d0781daf47 100644 +--- a/lib/string.c ++++ b/lib/string.c +@@ -586,6 +586,22 @@ void *memset(void *s, int c, size_t count) + EXPORT_SYMBOL(memset); + #endif + ++/** ++ * memzero_explicit - Fill a region of memory (e.g. sensitive ++ * keying data) with 0s. ++ * @s: Pointer to the start of the area. ++ * @count: The size of the area. ++ * ++ * memzero_explicit() doesn't need an arch-specific version as ++ * it just invokes the one of memset() implicitly. ++ */ ++void memzero_explicit(void *s, size_t count) ++{ ++ memset(s, 0, count); ++ OPTIMIZER_HIDE_VAR(s); ++} ++EXPORT_SYMBOL(memzero_explicit); ++ + #ifndef __HAVE_ARCH_MEMCPY + /** + * memcpy - Copy one area of memory to another +diff --git a/mm/huge_memory.c b/mm/huge_memory.c +index 10532dd43abc..e497843f5f65 100644 +--- a/mm/huge_memory.c ++++ b/mm/huge_memory.c +@@ -192,7 +192,7 @@ retry: + preempt_disable(); + if (cmpxchg(&huge_zero_page, NULL, zero_page)) { + preempt_enable(); +- __free_page(zero_page); ++ __free_pages(zero_page, compound_order(zero_page)); + goto retry; + } + +@@ -224,7 +224,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, + if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { + struct page *zero_page = xchg(&huge_zero_page, NULL); + BUG_ON(zero_page == NULL); +- __free_page(zero_page); ++ __free_pages(zero_page, compound_order(zero_page)); + return HPAGE_PMD_NR; + } + +diff --git a/mm/oom_kill.c b/mm/oom_kill.c +index a9b5b7ffc476..712a0f80451d 100644 +--- a/mm/oom_kill.c ++++ b/mm/oom_kill.c +@@ -406,6 +406,23 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, + dump_tasks(memcg, nodemask); + } + ++/* ++ * Number of OOM killer invocations (including memcg OOM killer). ++ * Primarily used by PM freezer to check for potential races with ++ * OOM killed frozen task. ++ */ ++static atomic_t oom_kills = ATOMIC_INIT(0); ++ ++int oom_kills_count(void) ++{ ++ return atomic_read(&oom_kills); ++} ++ ++void note_oom_kill(void) ++{ ++ atomic_inc(&oom_kills); ++} ++ + #define K(x) ((x) << (PAGE_SHIFT-10)) + /* + * Must be called while holding a reference to p, which will be released upon +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 2f91223dbe93..7abab3b7d140 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -1589,7 +1589,7 @@ again: + } + + __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); +- if (zone_page_state(zone, NR_ALLOC_BATCH) == 0 && ++ if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && + !zone_is_fair_depleted(zone)) + zone_set_flag(zone, ZONE_FAIR_DEPLETED); + +@@ -2250,6 +2250,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, + } + + /* ++ * PM-freezer should be notified that there might be an OOM killer on ++ * its way to kill and wake somebody up. This is too early and we might ++ * end up not killing anything but false positives are acceptable. ++ * See freeze_processes. ++ */ ++ note_oom_kill(); ++ ++ /* + * Go through the zonelist yet one more time, keep very high watermark + * here, this is only to catch a parallel oom killing, we must fail if + * we're still under heavy pressure. +@@ -5652,9 +5660,8 @@ static void __setup_per_zone_wmarks(void) + zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); + + __mod_zone_page_state(zone, NR_ALLOC_BATCH, +- high_wmark_pages(zone) - +- low_wmark_pages(zone) - +- zone_page_state(zone, NR_ALLOC_BATCH)); ++ high_wmark_pages(zone) - low_wmark_pages(zone) - ++ atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); + + setup_zone_migrate_reserve(zone); + spin_unlock_irqrestore(&zone->lock, flags); +diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c +index 6d757e3a872a..e007236f345a 100644 +--- a/mm/page_cgroup.c ++++ b/mm/page_cgroup.c +@@ -170,6 +170,7 @@ static void free_page_cgroup(void *addr) + sizeof(struct page_cgroup) * PAGES_PER_SECTION; + + BUG_ON(PageReserved(page)); ++ kmemleak_free(addr); + free_pages_exact(addr, table_size); + } + } +diff --git a/mm/percpu.c b/mm/percpu.c +index 9bc1bf914cc8..25e2ea52db82 100644 +--- a/mm/percpu.c ++++ b/mm/percpu.c +@@ -1910,8 +1910,6 @@ void __init setup_per_cpu_areas(void) + + if (pcpu_setup_first_chunk(ai, fc) < 0) + panic("Failed to initialize percpu areas."); +- +- pcpu_free_alloc_info(ai); + } + + #endif /* CONFIG_SMP */ +diff --git a/mm/truncate.c b/mm/truncate.c +index 2e84fe59190b..827ad8d2b5cd 100644 +--- a/mm/truncate.c ++++ b/mm/truncate.c +@@ -20,6 +20,7 @@ + #include <linux/buffer_head.h> /* grr. try_to_release_page, + do_invalidatepage */ + #include <linux/cleancache.h> ++#include <linux/rmap.h> + #include "internal.h" + + static void clear_exceptional_entry(struct address_space *mapping, +@@ -661,12 +662,67 @@ EXPORT_SYMBOL(truncate_pagecache); + */ + void truncate_setsize(struct inode *inode, loff_t newsize) + { ++ loff_t oldsize = inode->i_size; ++ + i_size_write(inode, newsize); ++ if (newsize > oldsize) ++ pagecache_isize_extended(inode, oldsize, newsize); + truncate_pagecache(inode, newsize); + } + EXPORT_SYMBOL(truncate_setsize); + + /** ++ * pagecache_isize_extended - update pagecache after extension of i_size ++ * @inode: inode for which i_size was extended ++ * @from: original inode size ++ * @to: new inode size ++ * ++ * Handle extension of inode size either caused by extending truncate or by ++ * write starting after current i_size. We mark the page straddling current ++ * i_size RO so that page_mkwrite() is called on the nearest write access to ++ * the page. This way filesystem can be sure that page_mkwrite() is called on ++ * the page before user writes to the page via mmap after the i_size has been ++ * changed. ++ * ++ * The function must be called after i_size is updated so that page fault ++ * coming after we unlock the page will already see the new i_size. ++ * The function must be called while we still hold i_mutex - this not only ++ * makes sure i_size is stable but also that userspace cannot observe new ++ * i_size value before we are prepared to store mmap writes at new inode size. ++ */ ++void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to) ++{ ++ int bsize = 1 << inode->i_blkbits; ++ loff_t rounded_from; ++ struct page *page; ++ pgoff_t index; ++ ++ WARN_ON(to > inode->i_size); ++ ++ if (from >= to || bsize == PAGE_CACHE_SIZE) ++ return; ++ /* Page straddling @from will not have any hole block created? */ ++ rounded_from = round_up(from, bsize); ++ if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1))) ++ return; ++ ++ index = from >> PAGE_CACHE_SHIFT; ++ page = find_lock_page(inode->i_mapping, index); ++ /* Page not cached? Nothing to do */ ++ if (!page) ++ return; ++ /* ++ * See clear_page_dirty_for_io() for details why set_page_dirty() ++ * is needed. ++ */ ++ if (page_mkclean(page)) ++ set_page_dirty(page); ++ unlock_page(page); ++ page_cache_release(page); ++} ++EXPORT_SYMBOL(pagecache_isize_extended); ++ ++/** + * truncate_pagecache_range - unmap and remove pagecache that is hole-punched + * @inode: inode + * @lstart: offset of beginning of hole +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c +index 9f1014ab86c6..ec12b169931b 100644 +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -534,7 +534,7 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi) + return 1; + + attrlen = rtnh_attrlen(rtnh); +- if (attrlen < 0) { ++ if (attrlen > 0) { + struct nlattr *nla, *attrs = rtnh_attrs(rtnh); + + nla = nla_find(attrs, attrlen, RTA_GATEWAY); +diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c +index c1cb9475fadf..c2dcee28d071 100644 +--- a/net/ipv4/ip_output.c ++++ b/net/ipv4/ip_output.c +@@ -1478,6 +1478,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, + struct sk_buff *nskb; + struct sock *sk; + struct inet_sock *inet; ++ int err; + + if (ip_options_echo(&replyopts.opt.opt, skb)) + return; +@@ -1514,8 +1515,13 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, + sock_net_set(sk, net); + __skb_queue_head_init(&sk->sk_write_queue); + sk->sk_sndbuf = sysctl_wmem_default; +- ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0, +- &ipc, &rt, MSG_DONTWAIT); ++ err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, ++ len, 0, &ipc, &rt, MSG_DONTWAIT); ++ if (unlikely(err)) { ++ ip_flush_pending_frames(sk); ++ goto out; ++ } ++ + nskb = skb_peek(&sk->sk_write_queue); + if (nskb) { + if (arg->csumoffset >= 0) +@@ -1527,7 +1533,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, + skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); + ip_push_pending_frames(sk, &fl4); + } +- ++out: + put_cpu_var(unicast_sock); + + ip_rt_put(rt); +diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c +index 8469d2338727..ff3f84f38e6d 100644 +--- a/net/ipv4/ip_tunnel_core.c ++++ b/net/ipv4/ip_tunnel_core.c +@@ -91,11 +91,12 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) + skb_pull_rcsum(skb, hdr_len); + + if (inner_proto == htons(ETH_P_TEB)) { +- struct ethhdr *eh = (struct ethhdr *)skb->data; ++ struct ethhdr *eh; + + if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) + return -ENOMEM; + ++ eh = (struct ethhdr *)skb->data; + if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN)) + skb->protocol = eh->h_proto; + else +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index cbe5adaad338..a880ccc10f61 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -2909,61 +2909,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt); + #endif + + #ifdef CONFIG_TCP_MD5SIG +-static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly; ++static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool); + static DEFINE_MUTEX(tcp_md5sig_mutex); +- +-static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool) +-{ +- int cpu; +- +- for_each_possible_cpu(cpu) { +- struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu); +- +- if (p->md5_desc.tfm) +- crypto_free_hash(p->md5_desc.tfm); +- } +- free_percpu(pool); +-} ++static bool tcp_md5sig_pool_populated = false; + + static void __tcp_alloc_md5sig_pool(void) + { + int cpu; +- struct tcp_md5sig_pool __percpu *pool; +- +- pool = alloc_percpu(struct tcp_md5sig_pool); +- if (!pool) +- return; + + for_each_possible_cpu(cpu) { +- struct crypto_hash *hash; +- +- hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); +- if (IS_ERR_OR_NULL(hash)) +- goto out_free; ++ if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) { ++ struct crypto_hash *hash; + +- per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; ++ hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); ++ if (IS_ERR_OR_NULL(hash)) ++ return; ++ per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash; ++ } + } +- /* before setting tcp_md5sig_pool, we must commit all writes +- * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool() ++ /* before setting tcp_md5sig_pool_populated, we must commit all writes ++ * to memory. See smp_rmb() in tcp_get_md5sig_pool() + */ + smp_wmb(); +- tcp_md5sig_pool = pool; +- return; +-out_free: +- __tcp_free_md5sig_pool(pool); ++ tcp_md5sig_pool_populated = true; + } + + bool tcp_alloc_md5sig_pool(void) + { +- if (unlikely(!tcp_md5sig_pool)) { ++ if (unlikely(!tcp_md5sig_pool_populated)) { + mutex_lock(&tcp_md5sig_mutex); + +- if (!tcp_md5sig_pool) ++ if (!tcp_md5sig_pool_populated) + __tcp_alloc_md5sig_pool(); + + mutex_unlock(&tcp_md5sig_mutex); + } +- return tcp_md5sig_pool != NULL; ++ return tcp_md5sig_pool_populated; + } + EXPORT_SYMBOL(tcp_alloc_md5sig_pool); + +@@ -2977,13 +2958,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool); + */ + struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) + { +- struct tcp_md5sig_pool __percpu *p; +- + local_bh_disable(); +- p = ACCESS_ONCE(tcp_md5sig_pool); +- if (p) +- return __this_cpu_ptr(p); + ++ if (tcp_md5sig_pool_populated) { ++ /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */ ++ smp_rmb(); ++ return this_cpu_ptr(&tcp_md5sig_pool); ++ } + local_bh_enable(); + return NULL; + } +diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c +index 798eb0f79078..4bd870af05d6 100644 +--- a/net/ipv6/output_core.c ++++ b/net/ipv6/output_core.c +@@ -3,10 +3,48 @@ + * not configured or static. These functions are needed by GSO/GRO implementation. + */ + #include <linux/export.h> ++#include <linux/random.h> ++#include <net/ip.h> + #include <net/ipv6.h> + #include <net/ip6_fib.h> + #include <net/addrconf.h> + ++/* This function exists only for tap drivers that must support broken ++ * clients requesting UFO without specifying an IPv6 fragment ID. ++ * ++ * This is similar to ipv6_select_ident() but we use an independent hash ++ * seed to limit information leakage. ++ * ++ * The network header must be set before calling this. ++ */ ++void ipv6_proxy_select_ident(struct sk_buff *skb) ++{ ++ static u32 ip6_proxy_idents_hashrnd __read_mostly; ++ struct in6_addr buf[2]; ++ struct in6_addr *addrs; ++ static bool done = false; ++ u32 hash, id; ++ ++ addrs = skb_header_pointer(skb, ++ skb_network_offset(skb) + ++ offsetof(struct ipv6hdr, saddr), ++ sizeof(buf), buf); ++ if (!addrs) ++ return; ++ ++ if (!done) { ++ get_random_bytes(&ip6_proxy_idents_hashrnd, ++ sizeof(ip6_proxy_idents_hashrnd)); ++ done = true; ++ } ++ ++ hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd); ++ hash = __ipv6_addr_jhash(&addrs[0], hash); ++ ++ id = ip_idents_reserve(hash, 1); ++ skb_shinfo(skb)->ip6_frag_id = htonl(id); ++} ++EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident); + + int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) + { +diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c +index e126605cec66..8753b77d4223 100644 +--- a/net/mac80211/rate.c ++++ b/net/mac80211/rate.c +@@ -454,7 +454,7 @@ static void rate_fixup_ratelist(struct ieee80211_vif *vif, + */ + if (!(rates[0].flags & IEEE80211_TX_RC_MCS)) { + u32 basic_rates = vif->bss_conf.basic_rates; +- s8 baserate = basic_rates ? ffs(basic_rates - 1) : 0; ++ s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0; + + rate = &sband->bitrates[rates[0].idx]; + +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index 2a4f35e7b5c0..2735facbbf91 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -699,7 +699,7 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg, + * after validation, the socket and the ring may only be used by a + * single process, otherwise we fall back to copying. + */ +- if (atomic_long_read(&sk->sk_socket->file->f_count) > 2 || ++ if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 || + atomic_read(&nlk->mapped) > 1) + excl = false; + +diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c +index 9add08a2be02..d43b62c4a8e5 100644 +--- a/security/integrity/evm/evm_main.c ++++ b/security/integrity/evm/evm_main.c +@@ -296,9 +296,12 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name, + { + const struct evm_ima_xattr_data *xattr_data = xattr_value; + +- if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0) +- && (xattr_data->type == EVM_XATTR_HMAC)) +- return -EPERM; ++ if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) { ++ if (!xattr_value_len) ++ return -EINVAL; ++ if (xattr_data->type != EVM_IMA_XATTR_DIGSIG) ++ return -EPERM; ++ } + return evm_protect_xattr(dentry, xattr_name, xattr_value, + xattr_value_len); + } +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index 630b8adf0ce5..3ba608a61bbf 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -439,6 +439,7 @@ next_inode: + list_entry(sbsec->isec_head.next, + struct inode_security_struct, list); + struct inode *inode = isec->inode; ++ list_del_init(&isec->list); + spin_unlock(&sbsec->isec_lock); + inode = igrab(inode); + if (inode) { +@@ -447,7 +448,6 @@ next_inode: + iput(inode); + } + spin_lock(&sbsec->isec_lock); +- list_del_init(&isec->list); + goto next_inode; + } + spin_unlock(&sbsec->isec_lock); +diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c +index af49721ba0e3..c4ac3c1e19af 100644 +--- a/sound/core/pcm_compat.c ++++ b/sound/core/pcm_compat.c +@@ -206,6 +206,8 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream, + if (err < 0) + return err; + ++ if (clear_user(src, sizeof(*src))) ++ return -EFAULT; + if (put_user(status.state, &src->state) || + compat_put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) || + compat_put_timespec(&status.tstamp, &src->tstamp) || +diff --git a/sound/pci/hda/hda_i915.c b/sound/pci/hda/hda_i915.c +index 76c13d5b3ca0..9e136beaa591 100644 +--- a/sound/pci/hda/hda_i915.c ++++ b/sound/pci/hda/hda_i915.c +@@ -22,20 +22,28 @@ + #include <drm/i915_powerwell.h> + #include "hda_i915.h" + +-static void (*get_power)(void); +-static void (*put_power)(void); ++static int (*get_power)(void); ++static int (*put_power)(void); ++static int (*get_cdclk)(void); + +-void hda_display_power(bool enable) ++int hda_display_power(bool enable) + { + if (!get_power || !put_power) +- return; ++ return -ENODEV; + + snd_printdd("HDA display power %s \n", + enable ? "Enable" : "Disable"); + if (enable) +- get_power(); ++ return get_power(); + else +- put_power(); ++ return put_power(); ++} ++ ++int haswell_get_cdclk(void) ++{ ++ if (!get_cdclk) ++ return -EINVAL; ++ return get_cdclk(); + } + + int hda_i915_init(void) +@@ -55,6 +63,10 @@ int hda_i915_init(void) + return -ENODEV; + } + ++ get_cdclk = symbol_request(i915_get_cdclk_freq); ++ if (!get_cdclk) /* may have abnormal BCLK and audio playback rate */ ++ snd_printd("hda-i915: get_cdclk symbol get fail\n"); ++ + snd_printd("HDA driver get symbol successfully from i915 module\n"); + + return err; +@@ -70,6 +82,10 @@ int hda_i915_exit(void) + symbol_put(i915_release_power_well); + put_power = NULL; + } ++ if (get_cdclk) { ++ symbol_put(i915_get_cdclk_freq); ++ get_cdclk = NULL; ++ } + + return 0; + } +diff --git a/sound/pci/hda/hda_i915.h b/sound/pci/hda/hda_i915.h +index 5a63da2c53e5..26869fafe11a 100644 +--- a/sound/pci/hda/hda_i915.h ++++ b/sound/pci/hda/hda_i915.h +@@ -17,11 +17,13 @@ + #define __SOUND_HDA_I915_H + + #ifdef CONFIG_SND_HDA_I915 +-void hda_display_power(bool enable); ++int hda_display_power(bool enable); ++int haswell_get_cdclk(void); + int hda_i915_init(void); + int hda_i915_exit(void); + #else +-static inline void hda_display_power(bool enable) {} ++static inline int hda_display_power(bool enable) { return 0; } ++static inline int haswell_get_cdclk(void) { return -EINVAL; } + static inline int hda_i915_init(void) + { + return -ENODEV; +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 37806a97c878..86e63b665777 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -748,6 +748,54 @@ static inline void mark_runtime_wc(struct azx *chip, struct azx_dev *azx_dev, + } + #endif + ++#ifdef CONFIG_SND_HDA_I915 ++/* Intel HSW/BDW display HDA controller Extended Mode registers. ++ * EM4 (M value) and EM5 (N Value) are used to convert CDClk (Core Display ++ * Clock) to 24MHz BCLK: BCLK = CDCLK * M / N ++ * The values will be lost when the display power well is disabled. ++ */ ++#define ICH6_REG_EM4 0x100c ++#define ICH6_REG_EM5 0x1010 ++ ++static void haswell_set_bclk(struct azx *chip) ++{ ++ int cdclk_freq; ++ unsigned int bclk_m, bclk_n; ++ ++ cdclk_freq = haswell_get_cdclk(); ++ if (cdclk_freq < 0) ++ return; ++ ++ switch (cdclk_freq) { ++ case 337500: ++ bclk_m = 16; ++ bclk_n = 225; ++ break; ++ ++ case 450000: ++ default: /* default CDCLK 450MHz */ ++ bclk_m = 4; ++ bclk_n = 75; ++ break; ++ ++ case 540000: ++ bclk_m = 4; ++ bclk_n = 90; ++ break; ++ ++ case 675000: ++ bclk_m = 8; ++ bclk_n = 225; ++ break; ++ } ++ ++ azx_writew(chip, EM4, bclk_m); ++ azx_writew(chip, EM5, bclk_n); ++} ++#else ++static inline void haswell_set_bclk(struct azx *chip) {} ++#endif ++ + static int azx_acquire_irq(struct azx *chip, int do_disconnect); + static int azx_send_cmd(struct hda_bus *bus, unsigned int val); + /* +@@ -2951,8 +2999,10 @@ static int azx_resume(struct device *dev) + if (chip->disabled || chip->init_failed) + return 0; + +- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) ++ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { + hda_display_power(true); ++ haswell_set_bclk(chip); ++ } + pci_set_power_state(pci, PCI_D0); + pci_restore_state(pci); + if (pci_enable_device(pci) < 0) { +@@ -3015,8 +3065,10 @@ static int azx_runtime_resume(struct device *dev) + if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) + return 0; + +- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) ++ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { + hda_display_power(true); ++ haswell_set_bclk(chip); ++ } + + /* Read STATESTS before controller reset */ + status = azx_readw(chip, STATESTS); +@@ -3744,6 +3796,10 @@ static int azx_first_init(struct azx *chip) + + /* initialize chip */ + azx_init_pci(chip); ++ ++ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) ++ haswell_set_bclk(chip); ++ + azx_init_chip(chip, (probe_only[dev] & 2) == 0); + + /* codec detection */ +@@ -3902,8 +3958,12 @@ static int azx_probe_continue(struct azx *chip) + snd_printk(KERN_ERR SFX "Error request power-well from i915\n"); + goto out_free; + } ++ err = hda_display_power(true); ++ if (err < 0) { ++ snd_printk(KERN_ERR SFX "Cannot turn on display power on i915\n"); ++ goto out_free; ++ } + #endif +- hda_display_power(true); + } + + err = azx_first_init(chip); +@@ -4025,6 +4085,9 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { + /* BayTrail */ + { PCI_DEVICE(0x8086, 0x0f04), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM }, ++ /* Braswell */ ++ { PCI_DEVICE(0x8086, 0x2284), ++ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, + /* ICH */ + { PCI_DEVICE(0x8086, 0x2668), + .driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC | +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c +index 27c99528b823..14c57789b5c9 100644 +--- a/sound/pci/hda/patch_hdmi.c ++++ b/sound/pci/hda/patch_hdmi.c +@@ -2857,6 +2857,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = { + { .id = 0x80862808, .name = "Broadwell HDMI", .patch = patch_generic_hdmi }, + { .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi }, + { .id = 0x80862882, .name = "Valleyview2 HDMI", .patch = patch_generic_hdmi }, ++{ .id = 0x80862883, .name = "Braswell HDMI", .patch = patch_generic_hdmi }, + { .id = 0x808629fb, .name = "Crestline HDMI", .patch = patch_generic_hdmi }, + {} /* terminator */ + }; +@@ -2913,6 +2914,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862807"); + MODULE_ALIAS("snd-hda-codec-id:80862808"); + MODULE_ALIAS("snd-hda-codec-id:80862880"); + MODULE_ALIAS("snd-hda-codec-id:80862882"); ++MODULE_ALIAS("snd-hda-codec-id:80862883"); + MODULE_ALIAS("snd-hda-codec-id:808629fb"); + + MODULE_LICENSE("GPL"); +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c +index 4136cc25154e..d3fa7b76a21b 100644 +--- a/sound/soc/soc-dapm.c ++++ b/sound/soc/soc-dapm.c +@@ -675,9 +675,9 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w, + int shared; + struct snd_kcontrol *kcontrol; + bool wname_in_long_name, kcname_in_long_name; +- char *long_name; ++ char *long_name = NULL; + const char *name; +- int ret; ++ int ret = 0; + + if (dapm->codec) + prefix = dapm->codec->name_prefix; +@@ -742,15 +742,17 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w, + + kcontrol = snd_soc_cnew(&w->kcontrol_news[kci], NULL, name, + prefix); +- kfree(long_name); +- if (!kcontrol) +- return -ENOMEM; ++ if (!kcontrol) { ++ ret = -ENOMEM; ++ goto exit_free; ++ } ++ + kcontrol->private_free = dapm_kcontrol_free; + + ret = dapm_kcontrol_data_alloc(w, kcontrol); + if (ret) { + snd_ctl_free_one(kcontrol); +- return ret; ++ goto exit_free; + } + + ret = snd_ctl_add(card, kcontrol); +@@ -758,17 +760,18 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w, + dev_err(dapm->dev, + "ASoC: failed to add widget %s dapm kcontrol %s: %d\n", + w->name, name, ret); +- return ret; ++ goto exit_free; + } + } + + ret = dapm_kcontrol_add_widget(kcontrol, w); +- if (ret) +- return ret; ++ if (ret == 0) ++ w->kcontrols[kci] = kcontrol; + +- w->kcontrols[kci] = kcontrol; ++exit_free: ++ kfree(long_name); + +- return 0; ++ return ret; + } + + /* create new dapm mixer control */ +diff --git a/sound/usb/card.c b/sound/usb/card.c +index fda227e3bbac..4476b9047adc 100644 +--- a/sound/usb/card.c ++++ b/sound/usb/card.c +@@ -589,18 +589,19 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, + { + struct snd_card *card; + struct list_head *p; ++ bool was_shutdown; + + if (chip == (void *)-1L) + return; + + card = chip->card; + down_write(&chip->shutdown_rwsem); ++ was_shutdown = chip->shutdown; + chip->shutdown = 1; + up_write(&chip->shutdown_rwsem); + + mutex_lock(®ister_mutex); +- chip->num_interfaces--; +- if (chip->num_interfaces <= 0) { ++ if (!was_shutdown) { + struct snd_usb_endpoint *ep; + + snd_card_disconnect(card); +@@ -620,6 +621,10 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, + list_for_each(p, &chip->mixer_list) { + snd_usb_mixer_disconnect(p); + } ++ } ++ ++ chip->num_interfaces--; ++ if (chip->num_interfaces <= 0) { + usb_chip[chip->index] = NULL; + mutex_unlock(®ister_mutex); + snd_card_free_when_closed(card); +diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c +index dec997188dfb..a650aa48c786 100644 +--- a/virt/kvm/iommu.c ++++ b/virt/kvm/iommu.c +@@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm, + gfn_t base_gfn, unsigned long npages); + + static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, +- unsigned long size) ++ unsigned long npages) + { + gfn_t end_gfn; + pfn_t pfn; + + pfn = gfn_to_pfn_memslot(slot, gfn); +- end_gfn = gfn + (size >> PAGE_SHIFT); ++ end_gfn = gfn + npages; + gfn += 1; + + if (is_error_noslot_pfn(pfn)) +@@ -119,7 +119,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) + * Pin all pages we are about to map in memory. This is + * important because we unmap and unpin in 4kb steps later. + */ +- pfn = kvm_pin_pages(slot, gfn, page_size); ++ pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT); + if (is_error_noslot_pfn(pfn)) { + gfn += 1; + continue; +@@ -131,7 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) + if (r) { + printk(KERN_ERR "kvm_iommu_map_address:" + "iommu failed to map pfn=%llx\n", pfn); +- kvm_unpin_pages(kvm, pfn, page_size); ++ kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT); + goto unmap_pages; + } + diff --git a/1033_linux-3.12.34.patch b/1033_linux-3.12.34.patch new file mode 100644 index 00000000..0e98c971 --- /dev/null +++ b/1033_linux-3.12.34.patch @@ -0,0 +1,4434 @@ +diff --git a/Makefile b/Makefile +index db42c15c5def..9f1b68fba514 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 12 +-SUBLEVEL = 33 ++SUBLEVEL = 34 + EXTRAVERSION = + NAME = One Giant Leap for Frogkind + +diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S +index 75189f13cf54..de5143e4ad04 100644 +--- a/arch/arm/boot/compressed/head.S ++++ b/arch/arm/boot/compressed/head.S +@@ -399,8 +399,7 @@ dtb_check_done: + add sp, sp, r6 + #endif + +- tst r4, #1 +- bleq cache_clean_flush ++ bl cache_clean_flush + + adr r0, BSYM(restart) + add r0, r0, r6 +@@ -1053,6 +1052,8 @@ cache_clean_flush: + b call_cache_fn + + __armv4_mpu_cache_flush: ++ tst r4, #1 ++ movne pc, lr + mov r2, #1 + mov r3, #0 + mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache +@@ -1070,6 +1071,8 @@ __armv4_mpu_cache_flush: + mov pc, lr + + __fa526_cache_flush: ++ tst r4, #1 ++ movne pc, lr + mov r1, #0 + mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache + mcr p15, 0, r1, c7, c5, 0 @ flush I cache +@@ -1078,13 +1081,16 @@ __fa526_cache_flush: + + __armv6_mmu_cache_flush: + mov r1, #0 +- mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D ++ tst r4, #1 ++ mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D + mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB +- mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified ++ mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified + mcr p15, 0, r1, c7, c10, 4 @ drain WB + mov pc, lr + + __armv7_mmu_cache_flush: ++ tst r4, #1 ++ bne iflush + mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 + tst r10, #0xf << 16 @ hierarchical cache (ARMv7) + mov r10, #0 +@@ -1145,6 +1151,8 @@ iflush: + mov pc, lr + + __armv5tej_mmu_cache_flush: ++ tst r4, #1 ++ movne pc, lr + 1: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache + bne 1b + mcr p15, 0, r0, c7, c5, 0 @ flush I cache +@@ -1152,6 +1160,8 @@ __armv5tej_mmu_cache_flush: + mov pc, lr + + __armv4_mmu_cache_flush: ++ tst r4, #1 ++ movne pc, lr + mov r2, #64*1024 @ default: 32K dcache size (*2) + mov r11, #32 @ default: 32 byte line size + mrc p15, 0, r3, c0, c0, 1 @ read cache type +@@ -1185,6 +1195,8 @@ no_cache_id: + + __armv3_mmu_cache_flush: + __armv3_mpu_cache_flush: ++ tst r4, #1 ++ movne pc, lr + mov r1, #0 + mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 + mov pc, lr +diff --git a/arch/arm/kernel/kprobes-common.c b/arch/arm/kernel/kprobes-common.c +index 18a76282970e..380c20fb9c85 100644 +--- a/arch/arm/kernel/kprobes-common.c ++++ b/arch/arm/kernel/kprobes-common.c +@@ -14,6 +14,7 @@ + #include <linux/kernel.h> + #include <linux/kprobes.h> + #include <asm/system_info.h> ++#include <asm/opcodes.h> + + #include "kprobes.h" + +@@ -305,7 +306,8 @@ kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi) + + if (handler) { + /* We can emulate the instruction in (possibly) modified form */ +- asi->insn[0] = (insn & 0xfff00000) | (rn << 16) | reglist; ++ asi->insn[0] = __opcode_to_mem_arm((insn & 0xfff00000) | ++ (rn << 16) | reglist); + asi->insn_handler = handler; + return INSN_GOOD; + } +@@ -334,13 +336,14 @@ prepare_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi, + #ifdef CONFIG_THUMB2_KERNEL + if (thumb) { + u16 *thumb_insn = (u16 *)asi->insn; +- thumb_insn[1] = 0x4770; /* Thumb bx lr */ +- thumb_insn[2] = 0x4770; /* Thumb bx lr */ ++ /* Thumb bx lr */ ++ thumb_insn[1] = __opcode_to_mem_thumb16(0x4770); ++ thumb_insn[2] = __opcode_to_mem_thumb16(0x4770); + return insn; + } +- asi->insn[1] = 0xe12fff1e; /* ARM bx lr */ ++ asi->insn[1] = __opcode_to_mem_arm(0xe12fff1e); /* ARM bx lr */ + #else +- asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */ ++ asi->insn[1] = __opcode_to_mem_arm(0xe1a0f00e); /* mov pc, lr */ + #endif + /* Make an ARM instruction unconditional */ + if (insn < 0xe0000000) +@@ -360,12 +363,12 @@ set_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi, + if (thumb) { + u16 *ip = (u16 *)asi->insn; + if (is_wide_instruction(insn)) +- *ip++ = insn >> 16; +- *ip++ = insn; ++ *ip++ = __opcode_to_mem_thumb16(insn >> 16); ++ *ip++ = __opcode_to_mem_thumb16(insn); + return; + } + #endif +- asi->insn[0] = insn; ++ asi->insn[0] = __opcode_to_mem_arm(insn); + } + + /* +diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c +index 6123daf397a7..241222c66a13 100644 +--- a/arch/arm/kernel/kprobes-thumb.c ++++ b/arch/arm/kernel/kprobes-thumb.c +@@ -11,6 +11,7 @@ + #include <linux/kernel.h> + #include <linux/kprobes.h> + #include <linux/module.h> ++#include <asm/opcodes.h> + + #include "kprobes.h" + +@@ -163,9 +164,9 @@ t32_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi) + enum kprobe_insn ret = kprobe_decode_ldmstm(insn, asi); + + /* Fixup modified instruction to have halfwords in correct order...*/ +- insn = asi->insn[0]; +- ((u16 *)asi->insn)[0] = insn >> 16; +- ((u16 *)asi->insn)[1] = insn & 0xffff; ++ insn = __mem_to_opcode_arm(asi->insn[0]); ++ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn >> 16); ++ ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0xffff); + + return ret; + } +@@ -1153,7 +1154,7 @@ t16_decode_hiregs(kprobe_opcode_t insn, struct arch_specific_insn *asi) + { + insn &= ~0x00ff; + insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */ +- ((u16 *)asi->insn)[0] = insn; ++ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn); + asi->insn_handler = t16_emulate_hiregs; + return INSN_GOOD; + } +@@ -1182,8 +1183,10 @@ t16_decode_push(kprobe_opcode_t insn, struct arch_specific_insn *asi) + * and call it with R9=SP and LR in the register list represented + * by R8. + */ +- ((u16 *)asi->insn)[0] = 0xe929; /* 1st half STMDB R9!,{} */ +- ((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */ ++ /* 1st half STMDB R9!,{} */ ++ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe929); ++ /* 2nd half (register list) */ ++ ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff); + asi->insn_handler = t16_emulate_push; + return INSN_GOOD; + } +@@ -1232,8 +1235,10 @@ t16_decode_pop(kprobe_opcode_t insn, struct arch_specific_insn *asi) + * and call it with R9=SP and PC in the register list represented + * by R8. + */ +- ((u16 *)asi->insn)[0] = 0xe8b9; /* 1st half LDMIA R9!,{} */ +- ((u16 *)asi->insn)[1] = insn & 0x1ff; /* 2nd half (register list) */ ++ /* 1st half LDMIA R9!,{} */ ++ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe8b9); ++ /* 2nd half (register list) */ ++ ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff); + asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc + : t16_emulate_pop_nopc; + return INSN_GOOD; +diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c +index 170e9f34003f..1c6ece51781c 100644 +--- a/arch/arm/kernel/kprobes.c ++++ b/arch/arm/kernel/kprobes.c +@@ -26,6 +26,7 @@ + #include <linux/stop_machine.h> + #include <linux/stringify.h> + #include <asm/traps.h> ++#include <asm/opcodes.h> + #include <asm/cacheflush.h> + + #include "kprobes.h" +@@ -62,10 +63,10 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) + #ifdef CONFIG_THUMB2_KERNEL + thumb = true; + addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */ +- insn = ((u16 *)addr)[0]; ++ insn = __mem_to_opcode_thumb16(((u16 *)addr)[0]); + if (is_wide_instruction(insn)) { +- insn <<= 16; +- insn |= ((u16 *)addr)[1]; ++ u16 inst2 = __mem_to_opcode_thumb16(((u16 *)addr)[1]); ++ insn = __opcode_thumb32_compose(insn, inst2); + decode_insn = thumb32_kprobe_decode_insn; + } else + decode_insn = thumb16_kprobe_decode_insn; +@@ -73,7 +74,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) + thumb = false; + if (addr & 0x3) + return -EINVAL; +- insn = *p->addr; ++ insn = __mem_to_opcode_arm(*p->addr); + decode_insn = arm_kprobe_decode_insn; + #endif + +diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig +index b3b1b883bd08..426f531754ec 100644 +--- a/arch/arm/mm/Kconfig ++++ b/arch/arm/mm/Kconfig +@@ -798,6 +798,7 @@ config NEED_KUSER_HELPERS + + config KUSER_HELPERS + bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS ++ depends on MMU + default y + help + Warning: disabling this option may break user programs. +diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S +index 6e0ed93d51fe..c17967fdf5f6 100644 +--- a/arch/arm64/lib/clear_user.S ++++ b/arch/arm64/lib/clear_user.S +@@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2 ) + sub x1, x1, #2 + 4: adds x1, x1, #1 + b.mi 5f +- strb wzr, [x0] ++USER(9f, strb wzr, [x0] ) + 5: mov x0, #0 + ret + ENDPROC(__clear_user) +diff --git a/arch/parisc/include/uapi/asm/shmbuf.h b/arch/parisc/include/uapi/asm/shmbuf.h +index 0a3eada1863b..f395cde7b593 100644 +--- a/arch/parisc/include/uapi/asm/shmbuf.h ++++ b/arch/parisc/include/uapi/asm/shmbuf.h +@@ -36,23 +36,16 @@ struct shmid64_ds { + unsigned int __unused2; + }; + +-#ifdef CONFIG_64BIT +-/* The 'unsigned int' (formerly 'unsigned long') data types below will +- * ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on +- * a wide kernel, but if some of these values are meant to contain pointers +- * they may need to be 'long long' instead. -PB XXX FIXME +- */ +-#endif + struct shminfo64 { +- unsigned int shmmax; +- unsigned int shmmin; +- unsigned int shmmni; +- unsigned int shmseg; +- unsigned int shmall; +- unsigned int __unused1; +- unsigned int __unused2; +- unsigned int __unused3; +- unsigned int __unused4; ++ unsigned long shmmax; ++ unsigned long shmmin; ++ unsigned long shmmni; ++ unsigned long shmseg; ++ unsigned long shmall; ++ unsigned long __unused1; ++ unsigned long __unused2; ++ unsigned long __unused3; ++ unsigned long __unused4; + }; + + #endif /* _PARISC_SHMBUF_H */ +diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S +index 10a0c2aad8cf..b24732d1bdbf 100644 +--- a/arch/parisc/kernel/syscall_table.S ++++ b/arch/parisc/kernel/syscall_table.S +@@ -286,11 +286,11 @@ + ENTRY_COMP(msgsnd) + ENTRY_COMP(msgrcv) + ENTRY_SAME(msgget) /* 190 */ +- ENTRY_SAME(msgctl) +- ENTRY_SAME(shmat) ++ ENTRY_COMP(msgctl) ++ ENTRY_COMP(shmat) + ENTRY_SAME(shmdt) + ENTRY_SAME(shmget) +- ENTRY_SAME(shmctl) /* 195 */ ++ ENTRY_COMP(shmctl) /* 195 */ + ENTRY_SAME(ni_syscall) /* streams1 */ + ENTRY_SAME(ni_syscall) /* streams2 */ + ENTRY_SAME(lstat64) +@@ -323,7 +323,7 @@ + ENTRY_SAME(epoll_ctl) /* 225 */ + ENTRY_SAME(epoll_wait) + ENTRY_SAME(remap_file_pages) +- ENTRY_SAME(semtimedop) ++ ENTRY_COMP(semtimedop) + ENTRY_COMP(mq_open) + ENTRY_SAME(mq_unlink) /* 230 */ + ENTRY_COMP(mq_timedsend) +diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h +index 905832aa9e9e..a0ed182ae73c 100644 +--- a/arch/sparc/include/asm/atomic_32.h ++++ b/arch/sparc/include/asm/atomic_32.h +@@ -21,7 +21,7 @@ + + extern int __atomic_add_return(int, atomic_t *); + extern int atomic_cmpxchg(atomic_t *, int, int); +-#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) ++extern int atomic_xchg(atomic_t *, int); + extern int __atomic_add_unless(atomic_t *, int, int); + extern void atomic_set(atomic_t *, int); + +diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h +index 1fae1a02e3c2..ae0f9a7a314d 100644 +--- a/arch/sparc/include/asm/cmpxchg_32.h ++++ b/arch/sparc/include/asm/cmpxchg_32.h +@@ -11,22 +11,14 @@ + #ifndef __ARCH_SPARC_CMPXCHG__ + #define __ARCH_SPARC_CMPXCHG__ + +-static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) +-{ +- __asm__ __volatile__("swap [%2], %0" +- : "=&r" (val) +- : "0" (val), "r" (m) +- : "memory"); +- return val; +-} +- ++extern unsigned long __xchg_u32(volatile u32 *m, u32 new); + extern void __xchg_called_with_bad_pointer(void); + + static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) + { + switch (size) { + case 4: +- return xchg_u32(ptr, x); ++ return __xchg_u32(ptr, x); + } + __xchg_called_with_bad_pointer(); + return x; +diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h +index 432afa838861..55841c184e6d 100644 +--- a/arch/sparc/include/asm/vio.h ++++ b/arch/sparc/include/asm/vio.h +@@ -118,12 +118,18 @@ struct vio_disk_attr_info { + u8 vdisk_type; + #define VD_DISK_TYPE_SLICE 0x01 /* Slice in block device */ + #define VD_DISK_TYPE_DISK 0x02 /* Entire block device */ +- u16 resv1; ++ u8 vdisk_mtype; /* v1.1 */ ++#define VD_MEDIA_TYPE_FIXED 0x01 /* Fixed device */ ++#define VD_MEDIA_TYPE_CD 0x02 /* CD Device */ ++#define VD_MEDIA_TYPE_DVD 0x03 /* DVD Device */ ++ u8 resv1; + u32 vdisk_block_size; + u64 operations; +- u64 vdisk_size; ++ u64 vdisk_size; /* v1.1 */ + u64 max_xfer_size; +- u64 resv2[2]; ++ u32 phys_block_size; /* v1.2 */ ++ u32 resv2; ++ u64 resv3[1]; + }; + + struct vio_disk_desc { +@@ -259,7 +265,7 @@ static inline u32 vio_dring_avail(struct vio_dring_state *dr, + unsigned int ring_size) + { + return (dr->pending - +- ((dr->prod - dr->cons) & (ring_size - 1))); ++ ((dr->prod - dr->cons) & (ring_size - 1)) - 1); + } + + #define VIO_MAX_TYPE_LEN 32 +diff --git a/arch/sparc/include/uapi/asm/swab.h b/arch/sparc/include/uapi/asm/swab.h +index a34ad079487e..4c7c12d69bea 100644 +--- a/arch/sparc/include/uapi/asm/swab.h ++++ b/arch/sparc/include/uapi/asm/swab.h +@@ -9,9 +9,9 @@ static inline __u16 __arch_swab16p(const __u16 *addr) + { + __u16 ret; + +- __asm__ __volatile__ ("lduha [%1] %2, %0" ++ __asm__ __volatile__ ("lduha [%2] %3, %0" + : "=r" (ret) +- : "r" (addr), "i" (ASI_PL)); ++ : "m" (*addr), "r" (addr), "i" (ASI_PL)); + return ret; + } + #define __arch_swab16p __arch_swab16p +@@ -20,9 +20,9 @@ static inline __u32 __arch_swab32p(const __u32 *addr) + { + __u32 ret; + +- __asm__ __volatile__ ("lduwa [%1] %2, %0" ++ __asm__ __volatile__ ("lduwa [%2] %3, %0" + : "=r" (ret) +- : "r" (addr), "i" (ASI_PL)); ++ : "m" (*addr), "r" (addr), "i" (ASI_PL)); + return ret; + } + #define __arch_swab32p __arch_swab32p +@@ -31,9 +31,9 @@ static inline __u64 __arch_swab64p(const __u64 *addr) + { + __u64 ret; + +- __asm__ __volatile__ ("ldxa [%1] %2, %0" ++ __asm__ __volatile__ ("ldxa [%2] %3, %0" + : "=r" (ret) +- : "r" (addr), "i" (ASI_PL)); ++ : "m" (*addr), "r" (addr), "i" (ASI_PL)); + return ret; + } + #define __arch_swab64p __arch_swab64p +diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c +index 8f76f23dac38..f9c6813c132d 100644 +--- a/arch/sparc/kernel/pci_schizo.c ++++ b/arch/sparc/kernel/pci_schizo.c +@@ -581,7 +581,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) + { + unsigned long csr_reg, csr, csr_error_bits; + irqreturn_t ret = IRQ_NONE; +- u16 stat; ++ u32 stat; + + csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL; + csr = upa_readq(csr_reg); +@@ -617,7 +617,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) + pbm->name); + ret = IRQ_HANDLED; + } +- pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat); ++ pbm->pci_ops->read(pbm->pci_bus, 0, PCI_STATUS, 2, &stat); + if (stat & (PCI_STATUS_PARITY | + PCI_STATUS_SIG_TARGET_ABORT | + PCI_STATUS_REC_TARGET_ABORT | +@@ -625,7 +625,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm) + PCI_STATUS_SIG_SYSTEM_ERROR)) { + printk("%s: PCI bus error, PCI_STATUS[%04x]\n", + pbm->name, stat); +- pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff); ++ pbm->pci_ops->write(pbm->pci_bus, 0, PCI_STATUS, 2, 0xffff); + ret = IRQ_HANDLED; + } + return ret; +diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c +index 2b4e03e9cd4b..226ff1af1d26 100644 +--- a/arch/sparc/kernel/smp_64.c ++++ b/arch/sparc/kernel/smp_64.c +@@ -822,13 +822,17 @@ void arch_send_call_function_single_ipi(int cpu) + void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) + { + clear_softint(1 << irq); ++ irq_enter(); + generic_smp_call_function_interrupt(); ++ irq_exit(); + } + + void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) + { + clear_softint(1 << irq); ++ irq_enter(); + generic_smp_call_function_single_interrupt(); ++ irq_exit(); + } + + static void tsb_sync(void *info) +diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c +index 1d32b54089aa..8f2f94d53434 100644 +--- a/arch/sparc/lib/atomic32.c ++++ b/arch/sparc/lib/atomic32.c +@@ -40,6 +40,19 @@ int __atomic_add_return(int i, atomic_t *v) + } + EXPORT_SYMBOL(__atomic_add_return); + ++int atomic_xchg(atomic_t *v, int new) ++{ ++ int ret; ++ unsigned long flags; ++ ++ spin_lock_irqsave(ATOMIC_HASH(v), flags); ++ ret = v->counter; ++ v->counter = new; ++ spin_unlock_irqrestore(ATOMIC_HASH(v), flags); ++ return ret; ++} ++EXPORT_SYMBOL(atomic_xchg); ++ + int atomic_cmpxchg(atomic_t *v, int old, int new) + { + int ret; +@@ -132,3 +145,17 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) + return (unsigned long)prev; + } + EXPORT_SYMBOL(__cmpxchg_u32); ++ ++unsigned long __xchg_u32(volatile u32 *ptr, u32 new) ++{ ++ unsigned long flags; ++ u32 prev; ++ ++ spin_lock_irqsave(ATOMIC_HASH(ptr), flags); ++ prev = *ptr; ++ *ptr = new; ++ spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); ++ ++ return (unsigned long)prev; ++} ++EXPORT_SYMBOL(__xchg_u32); +diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h +index 89270b4318db..c2f19a83498d 100644 +--- a/arch/x86/include/asm/cpufeature.h ++++ b/arch/x86/include/asm/cpufeature.h +@@ -203,6 +203,7 @@ + #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */ + #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */ + #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */ ++#define X86_FEATURE_VMMCALL (8*32+15) /* Prefer vmmcall to vmcall */ + + + /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ +diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h +index c7678e43465b..e62cf897f781 100644 +--- a/arch/x86/include/asm/kvm_para.h ++++ b/arch/x86/include/asm/kvm_para.h +@@ -2,6 +2,7 @@ + #define _ASM_X86_KVM_PARA_H + + #include <asm/processor.h> ++#include <asm/alternative.h> + #include <uapi/asm/kvm_para.h> + + extern void kvmclock_init(void); +@@ -16,10 +17,15 @@ static inline bool kvm_check_and_clear_guest_paused(void) + } + #endif /* CONFIG_KVM_GUEST */ + +-/* This instruction is vmcall. On non-VT architectures, it will generate a +- * trap that we will then rewrite to the appropriate instruction. ++#ifdef CONFIG_DEBUG_RODATA ++#define KVM_HYPERCALL \ ++ ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL) ++#else ++/* On AMD processors, vmcall will generate a trap that we will ++ * then rewrite to the appropriate instruction. + */ + #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1" ++#endif + + /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall + * instruction. The hypervisor may replace it with something else but only the +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index 28233b9e45cc..ee51e67df1b1 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -509,6 +509,13 @@ static void early_init_amd(struct cpuinfo_x86 *c) + } + #endif + ++ /* ++ * This is only needed to tell the kernel whether to use VMCALL ++ * and VMMCALL. VMMCALL is never executed except under virt, so ++ * we can set it unconditionally. ++ */ ++ set_cpu_cap(c, X86_FEATURE_VMMCALL); ++ + /* F16h erratum 793, CVE-2013-6885 */ + if (c->x86 == 0x16 && c->x86_model <= 0xf) { + u64 val; +diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c +index 7461f50d5bb1..0686fe313b3b 100644 +--- a/arch/x86/kernel/ptrace.c ++++ b/arch/x86/kernel/ptrace.c +@@ -1441,15 +1441,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, + force_sig_info(SIGTRAP, &info, tsk); + } + +- +-#ifdef CONFIG_X86_32 +-# define IS_IA32 1 +-#elif defined CONFIG_IA32_EMULATION +-# define IS_IA32 is_compat_task() +-#else +-# define IS_IA32 0 +-#endif +- + /* + * We must return the syscall number to actually look up in the table. + * This can be -1L to skip running any syscall at all. +@@ -1487,7 +1478,7 @@ long syscall_trace_enter(struct pt_regs *regs) + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->orig_ax); + +- if (IS_IA32) ++ if (is_ia32_task()) + audit_syscall_entry(AUDIT_ARCH_I386, + regs->orig_ax, + regs->bx, regs->cx, +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 590fd966b37a..790551bc4f15 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -4871,7 +4871,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu) + + ++vcpu->stat.insn_emulation_fail; + trace_kvm_emulate_insn_failed(vcpu); +- if (!is_guest_mode(vcpu)) { ++ if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) { + vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; + vcpu->run->internal.ndata = 0; +diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h +index 51940fec6990..513effd48060 100644 +--- a/arch/xtensa/include/uapi/asm/unistd.h ++++ b/arch/xtensa/include/uapi/asm/unistd.h +@@ -384,7 +384,8 @@ __SYSCALL(174, sys_chroot, 1) + #define __NR_pivot_root 175 + __SYSCALL(175, sys_pivot_root, 2) + #define __NR_umount 176 +-__SYSCALL(176, sys_umount, 2) ++__SYSCALL(176, sys_oldumount, 1) ++#define __ARCH_WANT_SYS_OLDUMOUNT + #define __NR_swapoff 177 + __SYSCALL(177, sys_swapoff, 1) + #define __NR_sync 178 +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c +index a875de67fb7c..4432c9dc9c7a 100644 +--- a/drivers/ata/ahci.c ++++ b/drivers/ata/ahci.c +@@ -61,6 +61,7 @@ enum board_ids { + /* board IDs by feature in alphabetical order */ + board_ahci, + board_ahci_ign_iferr, ++ board_ahci_nomsi, + board_ahci_noncq, + board_ahci_nosntf, + board_ahci_yes_fbs, +@@ -120,6 +121,13 @@ static const struct ata_port_info ahci_port_info[] = { + .udma_mask = ATA_UDMA6, + .port_ops = &ahci_ops, + }, ++ [board_ahci_nomsi] = { ++ AHCI_HFLAGS (AHCI_HFLAG_NO_MSI), ++ .flags = AHCI_FLAG_COMMON, ++ .pio_mask = ATA_PIO4, ++ .udma_mask = ATA_UDMA6, ++ .port_ops = &ahci_ops, ++ }, + [board_ahci_noncq] = { + AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ), + .flags = AHCI_FLAG_COMMON, +@@ -312,6 +320,11 @@ static const struct pci_device_id ahci_pci_tbl[] = { + { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ + { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ ++ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ ++ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */ ++ { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ ++ { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ ++ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ + + /* JMicron 360/1/3/5/6, match class to avoid IDE function */ + { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, +@@ -474,10 +487,10 @@ static const struct pci_device_id ahci_pci_tbl[] = { + { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ + + /* +- * Samsung SSDs found on some macbooks. NCQ times out. +- * https://bugzilla.kernel.org/show_bug.cgi?id=60731 ++ * Samsung SSDs found on some macbooks. NCQ times out if MSI is ++ * enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731 + */ +- { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq }, ++ { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi }, + + /* Enmotus */ + { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, +diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c +index 5814deb6963d..0ebadf93b6c5 100644 +--- a/drivers/block/sunvdc.c ++++ b/drivers/block/sunvdc.c +@@ -9,6 +9,7 @@ + #include <linux/blkdev.h> + #include <linux/hdreg.h> + #include <linux/genhd.h> ++#include <linux/cdrom.h> + #include <linux/slab.h> + #include <linux/spinlock.h> + #include <linux/completion.h> +@@ -22,8 +23,8 @@ + + #define DRV_MODULE_NAME "sunvdc" + #define PFX DRV_MODULE_NAME ": " +-#define DRV_MODULE_VERSION "1.0" +-#define DRV_MODULE_RELDATE "June 25, 2007" ++#define DRV_MODULE_VERSION "1.1" ++#define DRV_MODULE_RELDATE "February 13, 2013" + + static char version[] = + DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; +@@ -32,7 +33,7 @@ MODULE_DESCRIPTION("Sun LDOM virtual disk client driver"); + MODULE_LICENSE("GPL"); + MODULE_VERSION(DRV_MODULE_VERSION); + +-#define VDC_TX_RING_SIZE 256 ++#define VDC_TX_RING_SIZE 512 + + #define WAITING_FOR_LINK_UP 0x01 + #define WAITING_FOR_TX_SPACE 0x02 +@@ -65,11 +66,9 @@ struct vdc_port { + u64 operations; + u32 vdisk_size; + u8 vdisk_type; ++ u8 vdisk_mtype; + + char disk_name[32]; +- +- struct vio_disk_geom geom; +- struct vio_disk_vtoc label; + }; + + static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) +@@ -79,9 +78,16 @@ static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) + + /* Ordered from largest major to lowest */ + static struct vio_version vdc_versions[] = { ++ { .major = 1, .minor = 1 }, + { .major = 1, .minor = 0 }, + }; + ++static inline int vdc_version_supported(struct vdc_port *port, ++ u16 major, u16 minor) ++{ ++ return port->vio.ver.major == major && port->vio.ver.minor >= minor; ++} ++ + #define VDCBLK_NAME "vdisk" + static int vdc_major; + #define PARTITION_SHIFT 3 +@@ -94,18 +100,54 @@ static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr) + static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo) + { + struct gendisk *disk = bdev->bd_disk; +- struct vdc_port *port = disk->private_data; ++ sector_t nsect = get_capacity(disk); ++ sector_t cylinders = nsect; + +- geo->heads = (u8) port->geom.num_hd; +- geo->sectors = (u8) port->geom.num_sec; +- geo->cylinders = port->geom.num_cyl; ++ geo->heads = 0xff; ++ geo->sectors = 0x3f; ++ sector_div(cylinders, geo->heads * geo->sectors); ++ geo->cylinders = cylinders; ++ if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect) ++ geo->cylinders = 0xffff; + + return 0; + } + ++/* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev ++ * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD. ++ * Needed to be able to install inside an ldom from an iso image. ++ */ ++static int vdc_ioctl(struct block_device *bdev, fmode_t mode, ++ unsigned command, unsigned long argument) ++{ ++ int i; ++ struct gendisk *disk; ++ ++ switch (command) { ++ case CDROMMULTISESSION: ++ pr_debug(PFX "Multisession CDs not supported\n"); ++ for (i = 0; i < sizeof(struct cdrom_multisession); i++) ++ if (put_user(0, (char __user *)(argument + i))) ++ return -EFAULT; ++ return 0; ++ ++ case CDROM_GET_CAPABILITY: ++ disk = bdev->bd_disk; ++ ++ if (bdev->bd_disk && (disk->flags & GENHD_FL_CD)) ++ return 0; ++ return -EINVAL; ++ ++ default: ++ pr_debug(PFX "ioctl %08x not supported\n", command); ++ return -EINVAL; ++ } ++} ++ + static const struct block_device_operations vdc_fops = { + .owner = THIS_MODULE, + .getgeo = vdc_getgeo, ++ .ioctl = vdc_ioctl, + }; + + static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for) +@@ -165,9 +207,9 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg) + struct vio_disk_attr_info *pkt = arg; + + viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] " +- "xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n", ++ "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n", + pkt->tag.stype, pkt->operations, +- pkt->vdisk_size, pkt->vdisk_type, ++ pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype, + pkt->xfer_mode, pkt->vdisk_block_size, + pkt->max_xfer_size); + +@@ -192,8 +234,11 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg) + } + + port->operations = pkt->operations; +- port->vdisk_size = pkt->vdisk_size; + port->vdisk_type = pkt->vdisk_type; ++ if (vdc_version_supported(port, 1, 1)) { ++ port->vdisk_size = pkt->vdisk_size; ++ port->vdisk_mtype = pkt->vdisk_mtype; ++ } + if (pkt->max_xfer_size < port->max_xfer_size) + port->max_xfer_size = pkt->max_xfer_size; + port->vdisk_block_size = pkt->vdisk_block_size; +@@ -236,7 +281,9 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, + + __blk_end_request(req, (desc->status ? -EIO : 0), desc->size); + +- if (blk_queue_stopped(port->disk->queue)) ++ /* restart blk queue when ring is half emptied */ ++ if (blk_queue_stopped(port->disk->queue) && ++ vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50) + blk_start_queue(port->disk->queue); + } + +@@ -388,12 +435,6 @@ static int __send_request(struct request *req) + for (i = 0; i < nsg; i++) + len += sg[i].length; + +- if (unlikely(vdc_tx_dring_avail(dr) < 1)) { +- blk_stop_queue(port->disk->queue); +- err = -ENOMEM; +- goto out; +- } +- + desc = vio_dring_cur(dr); + + err = ldc_map_sg(port->vio.lp, sg, nsg, +@@ -433,21 +474,32 @@ static int __send_request(struct request *req) + port->req_id++; + dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1); + } +-out: + + return err; + } + +-static void do_vdc_request(struct request_queue *q) ++static void do_vdc_request(struct request_queue *rq) + { +- while (1) { +- struct request *req = blk_fetch_request(q); ++ struct request *req; + +- if (!req) +- break; ++ while ((req = blk_peek_request(rq)) != NULL) { ++ struct vdc_port *port; ++ struct vio_dring_state *dr; + +- if (__send_request(req) < 0) +- __blk_end_request_all(req, -EIO); ++ port = req->rq_disk->private_data; ++ dr = &port->vio.drings[VIO_DRIVER_TX_RING]; ++ if (unlikely(vdc_tx_dring_avail(dr) < 1)) ++ goto wait; ++ ++ blk_start_request(req); ++ ++ if (__send_request(req) < 0) { ++ blk_requeue_request(rq, req); ++wait: ++ /* Avoid pointless unplugs. */ ++ blk_stop_queue(rq); ++ break; ++ } + } + } + +@@ -656,25 +708,27 @@ static int probe_disk(struct vdc_port *port) + if (comp.err) + return comp.err; + +- err = generic_request(port, VD_OP_GET_VTOC, +- &port->label, sizeof(port->label)); +- if (err < 0) { +- printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err); +- return err; +- } +- +- err = generic_request(port, VD_OP_GET_DISKGEOM, +- &port->geom, sizeof(port->geom)); +- if (err < 0) { +- printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns " +- "error %d\n", err); +- return err; ++ if (vdc_version_supported(port, 1, 1)) { ++ /* vdisk_size should be set during the handshake, if it wasn't ++ * then the underlying disk is reserved by another system ++ */ ++ if (port->vdisk_size == -1) ++ return -ENODEV; ++ } else { ++ struct vio_disk_geom geom; ++ ++ err = generic_request(port, VD_OP_GET_DISKGEOM, ++ &geom, sizeof(geom)); ++ if (err < 0) { ++ printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns " ++ "error %d\n", err); ++ return err; ++ } ++ port->vdisk_size = ((u64)geom.num_cyl * ++ (u64)geom.num_hd * ++ (u64)geom.num_sec); + } + +- port->vdisk_size = ((u64)port->geom.num_cyl * +- (u64)port->geom.num_hd * +- (u64)port->geom.num_sec); +- + q = blk_init_queue(do_vdc_request, &port->vio.lock); + if (!q) { + printk(KERN_ERR PFX "%s: Could not allocate queue.\n", +@@ -691,6 +745,10 @@ static int probe_disk(struct vdc_port *port) + + port->disk = g; + ++ /* Each segment in a request is up to an aligned page in size. */ ++ blk_queue_segment_boundary(q, PAGE_SIZE - 1); ++ blk_queue_max_segment_size(q, PAGE_SIZE); ++ + blk_queue_max_segments(q, port->ring_cookies); + blk_queue_max_hw_sectors(q, port->max_xfer_size); + g->major = vdc_major; +@@ -704,9 +762,32 @@ static int probe_disk(struct vdc_port *port) + + set_capacity(g, port->vdisk_size); + +- printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n", ++ if (vdc_version_supported(port, 1, 1)) { ++ switch (port->vdisk_mtype) { ++ case VD_MEDIA_TYPE_CD: ++ pr_info(PFX "Virtual CDROM %s\n", port->disk_name); ++ g->flags |= GENHD_FL_CD; ++ g->flags |= GENHD_FL_REMOVABLE; ++ set_disk_ro(g, 1); ++ break; ++ ++ case VD_MEDIA_TYPE_DVD: ++ pr_info(PFX "Virtual DVD %s\n", port->disk_name); ++ g->flags |= GENHD_FL_CD; ++ g->flags |= GENHD_FL_REMOVABLE; ++ set_disk_ro(g, 1); ++ break; ++ ++ case VD_MEDIA_TYPE_FIXED: ++ pr_info(PFX "Virtual Hard disk %s\n", port->disk_name); ++ break; ++ } ++ } ++ ++ pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n", + g->disk_name, +- port->vdisk_size, (port->vdisk_size >> (20 - 9))); ++ port->vdisk_size, (port->vdisk_size >> (20 - 9)), ++ port->vio.ver.major, port->vio.ver.minor); + + add_disk(g); + +@@ -765,6 +846,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) + else + snprintf(port->disk_name, sizeof(port->disk_name), + VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26)); ++ port->vdisk_size = -1; + + err = vio_driver_init(&port->vio, vdev, VDEV_DISK, + vdc_versions, ARRAY_SIZE(vdc_versions), +diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c +index 5f1197929f0c..ab11c16352f8 100644 +--- a/drivers/char/hw_random/pseries-rng.c ++++ b/drivers/char/hw_random/pseries-rng.c +@@ -17,19 +17,30 @@ + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + ++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++ ++#include <linux/kernel.h> + #include <linux/module.h> + #include <linux/hw_random.h> + #include <asm/vio.h> + + #define MODULE_NAME "pseries-rng" + +-static int pseries_rng_data_read(struct hwrng *rng, u32 *data) ++static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) + { +- if (plpar_hcall(H_RANDOM, (unsigned long *)data) != H_SUCCESS) { +- printk(KERN_ERR "pseries rng hcall error\n"); +- return 0; ++ u64 buffer[PLPAR_HCALL_BUFSIZE]; ++ size_t size = max < 8 ? max : 8; ++ int rc; ++ ++ rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer); ++ if (rc != H_SUCCESS) { ++ pr_err_ratelimited("H_RANDOM call failed %d\n", rc); ++ return -EIO; + } +- return 8; ++ memcpy(data, buffer, size); ++ ++ /* The hypervisor interface returns 64 bits */ ++ return size; + } + + /** +@@ -48,7 +59,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev) + + static struct hwrng pseries_rng = { + .name = MODULE_NAME, +- .data_read = pseries_rng_data_read, ++ .read = pseries_rng_read, + }; + + static int __init pseries_rng_probe(struct vio_dev *dev, +diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c +index e732bd962e98..af351f478b14 100644 +--- a/drivers/crypto/caam/caamhash.c ++++ b/drivers/crypto/caam/caamhash.c +@@ -832,8 +832,9 @@ static int ahash_update_ctx(struct ahash_request *req) + edesc->sec4_sg + sec4_sg_src_index, + chained); + if (*next_buflen) { +- sg_copy_part(next_buf, req->src, to_hash - +- *buflen, req->nbytes); ++ scatterwalk_map_and_copy(next_buf, req->src, ++ to_hash - *buflen, ++ *next_buflen, 0); + state->current_buf = !state->current_buf; + } + } else { +@@ -866,7 +867,8 @@ static int ahash_update_ctx(struct ahash_request *req) + kfree(edesc); + } + } else if (*next_buflen) { +- sg_copy(buf + *buflen, req->src, req->nbytes); ++ scatterwalk_map_and_copy(buf + *buflen, req->src, 0, ++ req->nbytes, 0); + *buflen = *next_buflen; + *next_buflen = last_buflen; + } +@@ -1213,8 +1215,9 @@ static int ahash_update_no_ctx(struct ahash_request *req) + src_map_to_sec4_sg(jrdev, req->src, src_nents, + edesc->sec4_sg + 1, chained); + if (*next_buflen) { +- sg_copy_part(next_buf, req->src, to_hash - *buflen, +- req->nbytes); ++ scatterwalk_map_and_copy(next_buf, req->src, ++ to_hash - *buflen, ++ *next_buflen, 0); + state->current_buf = !state->current_buf; + } + +@@ -1245,7 +1248,8 @@ static int ahash_update_no_ctx(struct ahash_request *req) + kfree(edesc); + } + } else if (*next_buflen) { +- sg_copy(buf + *buflen, req->src, req->nbytes); ++ scatterwalk_map_and_copy(buf + *buflen, req->src, 0, ++ req->nbytes, 0); + *buflen = *next_buflen; + *next_buflen = 0; + } +@@ -1402,7 +1406,8 @@ static int ahash_update_first(struct ahash_request *req) + } + + if (*next_buflen) +- sg_copy_part(next_buf, req->src, to_hash, req->nbytes); ++ scatterwalk_map_and_copy(next_buf, req->src, to_hash, ++ *next_buflen, 0); + + sh_len = desc_len(sh_desc); + desc = edesc->hw_desc; +@@ -1435,7 +1440,8 @@ static int ahash_update_first(struct ahash_request *req) + state->update = ahash_update_no_ctx; + state->finup = ahash_finup_no_ctx; + state->final = ahash_final_no_ctx; +- sg_copy(next_buf, req->src, req->nbytes); ++ scatterwalk_map_and_copy(next_buf, req->src, 0, ++ req->nbytes, 0); + } + #ifdef DEBUG + print_hex_dump(KERN_ERR, "next buf@"__stringify(__LINE__)": ", +diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h +index e0037c8ee243..ce28a563effc 100644 +--- a/drivers/crypto/caam/sg_sw_sec4.h ++++ b/drivers/crypto/caam/sg_sw_sec4.h +@@ -116,41 +116,3 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg, + } + return nents; + } +- +-/* Copy from len bytes of sg to dest, starting from beginning */ +-static inline void sg_copy(u8 *dest, struct scatterlist *sg, unsigned int len) +-{ +- struct scatterlist *current_sg = sg; +- int cpy_index = 0, next_cpy_index = current_sg->length; +- +- while (next_cpy_index < len) { +- memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), +- current_sg->length); +- current_sg = scatterwalk_sg_next(current_sg); +- cpy_index = next_cpy_index; +- next_cpy_index += current_sg->length; +- } +- if (cpy_index < len) +- memcpy(dest + cpy_index, (u8 *) sg_virt(current_sg), +- len - cpy_index); +-} +- +-/* Copy sg data, from to_skip to end, to dest */ +-static inline void sg_copy_part(u8 *dest, struct scatterlist *sg, +- int to_skip, unsigned int end) +-{ +- struct scatterlist *current_sg = sg; +- int sg_index, cpy_index; +- +- sg_index = current_sg->length; +- while (sg_index <= to_skip) { +- current_sg = scatterwalk_sg_next(current_sg); +- sg_index += current_sg->length; +- } +- cpy_index = sg_index - to_skip; +- memcpy(dest, (u8 *) sg_virt(current_sg) + +- current_sg->length - cpy_index, cpy_index); +- current_sg = scatterwalk_sg_next(current_sg); +- if (end - sg_index) +- sg_copy(dest + cpy_index, current_sg, end - sg_index); +-} +diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c +index d7d5c8af92b9..6d4456898007 100644 +--- a/drivers/firewire/core-cdev.c ++++ b/drivers/firewire/core-cdev.c +@@ -1637,8 +1637,7 @@ static int dispatch_ioctl(struct client *client, + _IOC_SIZE(cmd) > sizeof(buffer)) + return -ENOTTY; + +- if (_IOC_DIR(cmd) == _IOC_READ) +- memset(&buffer, 0, _IOC_SIZE(cmd)); ++ memset(&buffer, 0, sizeof(buffer)); + + if (_IOC_DIR(cmd) & _IOC_WRITE) + if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) +diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c +index ceba819891f4..0fc5fd6b3b41 100644 +--- a/drivers/gpu/drm/radeon/cik.c ++++ b/drivers/gpu/drm/radeon/cik.c +@@ -3343,8 +3343,8 @@ static int cik_cp_gfx_start(struct radeon_device *rdev) + /* init the CE partitions. CE only used for gfx on CIK */ + radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); + radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); +- radeon_ring_write(ring, 0xc000); +- radeon_ring_write(ring, 0xc000); ++ radeon_ring_write(ring, 0x8000); ++ radeon_ring_write(ring, 0x8000); + + /* setup clear context state */ + radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); +@@ -8105,6 +8105,9 @@ void dce8_bandwidth_update(struct radeon_device *rdev) + u32 num_heads = 0, lb_size; + int i; + ++ if (!rdev->mode_info.mode_config_initialized) ++ return; ++ + radeon_update_display_priority(rdev); + + for (i = 0; i < rdev->num_crtc; i++) { +diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c +index 7ca58fc7a1c6..20b00a0f42b4 100644 +--- a/drivers/gpu/drm/radeon/evergreen.c ++++ b/drivers/gpu/drm/radeon/evergreen.c +@@ -2312,6 +2312,9 @@ void evergreen_bandwidth_update(struct radeon_device *rdev) + u32 num_heads = 0, lb_size; + int i; + ++ if (!rdev->mode_info.mode_config_initialized) ++ return; ++ + radeon_update_display_priority(rdev); + + for (i = 0; i < rdev->num_crtc; i++) { +@@ -2520,6 +2523,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); + tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; + WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); ++ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); + } + } else { + tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); +diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c +index d71333033b2b..f98dcbeb9a72 100644 +--- a/drivers/gpu/drm/radeon/r100.c ++++ b/drivers/gpu/drm/radeon/r100.c +@@ -3189,6 +3189,9 @@ void r100_bandwidth_update(struct radeon_device *rdev) + uint32_t pixel_bytes1 = 0; + uint32_t pixel_bytes2 = 0; + ++ if (!rdev->mode_info.mode_config_initialized) ++ return; ++ + radeon_update_display_priority(rdev); + + if (rdev->mode_info.crtcs[0]->base.enabled) { +diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c +index e0daa4fdb073..bbe84591f159 100644 +--- a/drivers/gpu/drm/radeon/rs600.c ++++ b/drivers/gpu/drm/radeon/rs600.c +@@ -826,6 +826,9 @@ void rs600_bandwidth_update(struct radeon_device *rdev) + u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; + /* FIXME: implement full support */ + ++ if (!rdev->mode_info.mode_config_initialized) ++ return; ++ + radeon_update_display_priority(rdev); + + if (rdev->mode_info.crtcs[0]->base.enabled) +diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c +index 3c38f0af78fb..d33b4ad39b25 100644 +--- a/drivers/gpu/drm/radeon/rs690.c ++++ b/drivers/gpu/drm/radeon/rs690.c +@@ -585,6 +585,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev) + u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt; + u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt; + ++ if (!rdev->mode_info.mode_config_initialized) ++ return; ++ + radeon_update_display_priority(rdev); + + if (rdev->mode_info.crtcs[0]->base.enabled) +diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c +index 873eb4b193b4..9de81c5487e9 100644 +--- a/drivers/gpu/drm/radeon/rv515.c ++++ b/drivers/gpu/drm/radeon/rv515.c +@@ -1279,6 +1279,9 @@ void rv515_bandwidth_update(struct radeon_device *rdev) + struct drm_display_mode *mode0 = NULL; + struct drm_display_mode *mode1 = NULL; + ++ if (!rdev->mode_info.mode_config_initialized) ++ return; ++ + radeon_update_display_priority(rdev); + + if (rdev->mode_info.crtcs[0]->base.enabled) +diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c +index 53769e9cf595..50482e763d80 100644 +--- a/drivers/gpu/drm/radeon/si.c ++++ b/drivers/gpu/drm/radeon/si.c +@@ -2230,6 +2230,9 @@ void dce6_bandwidth_update(struct radeon_device *rdev) + u32 num_heads = 0, lb_size; + int i; + ++ if (!rdev->mode_info.mode_config_initialized) ++ return; ++ + radeon_update_display_priority(rdev); + + for (i = 0; i < rdev->num_crtc; i++) { +diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c +index a06e12552886..694af4958a98 100644 +--- a/drivers/input/evdev.c ++++ b/drivers/input/evdev.c +@@ -757,20 +757,23 @@ static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p) + */ + static int evdev_handle_get_val(struct evdev_client *client, + struct input_dev *dev, unsigned int type, +- unsigned long *bits, unsigned int max, +- unsigned int size, void __user *p, int compat) ++ unsigned long *bits, unsigned int maxbit, ++ unsigned int maxlen, void __user *p, ++ int compat) + { + int ret; + unsigned long *mem; ++ size_t len; + +- mem = kmalloc(sizeof(unsigned long) * max, GFP_KERNEL); ++ len = BITS_TO_LONGS(maxbit) * sizeof(unsigned long); ++ mem = kmalloc(len, GFP_KERNEL); + if (!mem) + return -ENOMEM; + + spin_lock_irq(&dev->event_lock); + spin_lock(&client->buffer_lock); + +- memcpy(mem, bits, sizeof(unsigned long) * max); ++ memcpy(mem, bits, len); + + spin_unlock(&dev->event_lock); + +@@ -778,7 +781,7 @@ static int evdev_handle_get_val(struct evdev_client *client, + + spin_unlock_irq(&client->buffer_lock); + +- ret = bits_to_user(mem, max, size, p, compat); ++ ret = bits_to_user(mem, maxbit, maxlen, p, compat); + if (ret < 0) + evdev_queue_syn_dropped(client); + +diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c +index 7c5d72a6a26a..642a42f719b1 100644 +--- a/drivers/input/mouse/alps.c ++++ b/drivers/input/mouse/alps.c +@@ -873,7 +873,13 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) + { + struct alps_data *priv = psmouse->private; + +- if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */ ++ /* ++ * Check if we are dealing with a bare PS/2 packet, presumably from ++ * a device connected to the external PS/2 port. Because bare PS/2 ++ * protocol does not have enough constant bits to self-synchronize ++ * properly we only do this if the device is fully synchronized. ++ */ ++ if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) { + if (psmouse->pktcnt == 3) { + alps_report_bare_ps2_packet(psmouse, psmouse->packet, + true); +@@ -903,6 +909,21 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) + psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", + psmouse->pktcnt - 1, + psmouse->packet[psmouse->pktcnt - 1]); ++ ++ if (priv->proto_version == ALPS_PROTO_V3 && ++ psmouse->pktcnt == psmouse->pktsize) { ++ /* ++ * Some Dell boxes, such as Latitude E6440 or E7440 ++ * with closed lid, quite often smash last byte of ++ * otherwise valid packet with 0xff. Given that the ++ * next packet is very likely to be valid let's ++ * report PSMOUSE_FULL_PACKET but not process data, ++ * rather than reporting PSMOUSE_BAD_DATA and ++ * filling the logs. ++ */ ++ return PSMOUSE_FULL_PACKET; ++ } ++ + return PSMOUSE_BAD_DATA; + } + +@@ -1816,6 +1837,9 @@ int alps_init(struct psmouse *psmouse) + /* We are having trouble resyncing ALPS touchpads so disable it for now */ + psmouse->resync_time = 0; + ++ /* Allow 2 invalid packets without resetting device */ ++ psmouse->resetafter = psmouse->pktsize * 2; ++ + return 0; + + init_fail: +diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c +index 4777a73cd390..b6d370ba408f 100644 +--- a/drivers/input/serio/altera_ps2.c ++++ b/drivers/input/serio/altera_ps2.c +@@ -75,7 +75,7 @@ static void altera_ps2_close(struct serio *io) + { + struct ps2if *ps2if = io->port_data; + +- writel(0, ps2if->base); /* disable rx irq */ ++ writel(0, ps2if->base + 4); /* disable rx irq */ + } + + /* +diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c +index 7e45c9f6e6b7..b08c16bd816e 100644 +--- a/drivers/input/touchscreen/wm97xx-core.c ++++ b/drivers/input/touchscreen/wm97xx-core.c +@@ -70,11 +70,11 @@ + * Documentation/input/input-programming.txt for more details. + */ + +-static int abs_x[3] = {350, 3900, 5}; ++static int abs_x[3] = {150, 4000, 5}; + module_param_array(abs_x, int, NULL, 0); + MODULE_PARM_DESC(abs_x, "Touchscreen absolute X min, max, fuzz"); + +-static int abs_y[3] = {320, 3750, 40}; ++static int abs_y[3] = {200, 4000, 40}; + module_param_array(abs_y, int, NULL, 0); + MODULE_PARM_DESC(abs_y, "Touchscreen absolute Y min, max, fuzz"); + +diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c +index a42efc7f69ed..140be2dd3e23 100644 +--- a/drivers/md/dm-bufio.c ++++ b/drivers/md/dm-bufio.c +@@ -1418,9 +1418,9 @@ static void drop_buffers(struct dm_bufio_client *c) + + /* + * Test if the buffer is unused and too old, and commit it. +- * At if noio is set, we must not do any I/O because we hold +- * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to +- * different bufio client. ++ * And if GFP_NOFS is used, we must not do any I/O because we hold ++ * dm_bufio_clients_lock and we would risk deadlock if the I/O gets ++ * rerouted to different bufio client. + */ + static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp, + unsigned long max_jiffies) +@@ -1428,7 +1428,7 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp, + if (jiffies - b->last_accessed < max_jiffies) + return 0; + +- if (!(gfp & __GFP_IO)) { ++ if (!(gfp & __GFP_FS)) { + if (test_bit(B_READING, &b->state) || + test_bit(B_WRITING, &b->state) || + test_bit(B_DIRTY, &b->state)) +@@ -1470,7 +1470,7 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) + unsigned long freed; + + c = container_of(shrink, struct dm_bufio_client, shrinker); +- if (sc->gfp_mask & __GFP_IO) ++ if (sc->gfp_mask & __GFP_FS) + dm_bufio_lock(c); + else if (!dm_bufio_trylock(c)) + return SHRINK_STOP; +@@ -1487,7 +1487,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) + unsigned long count; + + c = container_of(shrink, struct dm_bufio_client, shrinker); +- if (sc->gfp_mask & __GFP_IO) ++ if (sc->gfp_mask & __GFP_FS) + dm_bufio_lock(c); + else if (!dm_bufio_trylock(c)) + return 0; +diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c +index 4880b69e2e9e..59715389b3cf 100644 +--- a/drivers/md/dm-raid.c ++++ b/drivers/md/dm-raid.c +@@ -785,8 +785,7 @@ struct dm_raid_superblock { + __le32 layout; + __le32 stripe_sectors; + +- __u8 pad[452]; /* Round struct to 512 bytes. */ +- /* Always set to 0 when writing. */ ++ /* Remainder of a logical block is zero-filled when writing (see super_sync()). */ + } __packed; + + static int read_disk_sb(struct md_rdev *rdev, int size) +@@ -823,7 +822,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev) + test_bit(Faulty, &(rs->dev[i].rdev.flags))) + failed_devices |= (1ULL << i); + +- memset(sb, 0, sizeof(*sb)); ++ memset(sb + 1, 0, rdev->sb_size - sizeof(*sb)); + + sb->magic = cpu_to_le32(DM_RAID_MAGIC); + sb->features = cpu_to_le32(0); /* No features yet */ +@@ -858,7 +857,11 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) + uint64_t events_sb, events_refsb; + + rdev->sb_start = 0; +- rdev->sb_size = sizeof(*sb); ++ rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); ++ if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) { ++ DMERR("superblock size of a logical block is no longer valid"); ++ return -EINVAL; ++ } + + ret = read_disk_sb(rdev, rdev->sb_size); + if (ret) +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c +index f8c36d30eca8..0396d7fc1d8b 100644 +--- a/drivers/md/dm-thin.c ++++ b/drivers/md/dm-thin.c +@@ -1504,6 +1504,14 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) + return DM_MAPIO_SUBMITTED; + } + ++ /* ++ * We must hold the virtual cell before doing the lookup, otherwise ++ * there's a race with discard. ++ */ ++ build_virtual_key(tc->td, block, &key); ++ if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result)) ++ return DM_MAPIO_SUBMITTED; ++ + r = dm_thin_find_block(td, block, 0, &result); + + /* +@@ -1527,13 +1535,10 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) + * shared flag will be set in their case. + */ + thin_defer_bio(tc, bio); ++ cell_defer_no_holder_no_free(tc, &cell1); + return DM_MAPIO_SUBMITTED; + } + +- build_virtual_key(tc->td, block, &key); +- if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result)) +- return DM_MAPIO_SUBMITTED; +- + build_data_key(tc->td, result.block, &key); + if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) { + cell_defer_no_holder_no_free(tc, &cell1); +@@ -1554,6 +1559,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) + * of doing so. Just error it. + */ + bio_io_error(bio); ++ cell_defer_no_holder_no_free(tc, &cell1); + return DM_MAPIO_SUBMITTED; + } + /* fall through */ +@@ -1564,6 +1570,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) + * provide the hint to load the metadata into cache. + */ + thin_defer_bio(tc, bio); ++ cell_defer_no_holder_no_free(tc, &cell1); + return DM_MAPIO_SUBMITTED; + + default: +@@ -1573,6 +1580,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) + * pool is switched to fail-io mode. + */ + bio_io_error(bio); ++ cell_defer_no_holder_no_free(tc, &cell1); + return DM_MAPIO_SUBMITTED; + } + } +diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h +index 37d367bb9aa8..bf2b80d5c470 100644 +--- a/drivers/md/persistent-data/dm-btree-internal.h ++++ b/drivers/md/persistent-data/dm-btree-internal.h +@@ -42,6 +42,12 @@ struct btree_node { + } __packed; + + ++/* ++ * Locks a block using the btree node validator. ++ */ ++int bn_read_lock(struct dm_btree_info *info, dm_block_t b, ++ struct dm_block **result); ++ + void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, + struct dm_btree_value_type *vt); + +diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c +index cf9fd676ae44..1b5e13ec7f96 100644 +--- a/drivers/md/persistent-data/dm-btree-spine.c ++++ b/drivers/md/persistent-data/dm-btree-spine.c +@@ -92,7 +92,7 @@ struct dm_block_validator btree_node_validator = { + + /*----------------------------------------------------------------*/ + +-static int bn_read_lock(struct dm_btree_info *info, dm_block_t b, ++int bn_read_lock(struct dm_btree_info *info, dm_block_t b, + struct dm_block **result) + { + return dm_tm_read_lock(info->tm, b, &btree_node_validator, result); +diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c +index 468e371ee9b2..9701d29c94e1 100644 +--- a/drivers/md/persistent-data/dm-btree.c ++++ b/drivers/md/persistent-data/dm-btree.c +@@ -828,22 +828,26 @@ EXPORT_SYMBOL_GPL(dm_btree_find_highest_key); + * FIXME: We shouldn't use a recursive algorithm when we have limited stack + * space. Also this only works for single level trees. + */ +-static int walk_node(struct ro_spine *s, dm_block_t block, ++static int walk_node(struct dm_btree_info *info, dm_block_t block, + int (*fn)(void *context, uint64_t *keys, void *leaf), + void *context) + { + int r; + unsigned i, nr; ++ struct dm_block *node; + struct btree_node *n; + uint64_t keys; + +- r = ro_step(s, block); +- n = ro_node(s); ++ r = bn_read_lock(info, block, &node); ++ if (r) ++ return r; ++ ++ n = dm_block_data(node); + + nr = le32_to_cpu(n->header.nr_entries); + for (i = 0; i < nr; i++) { + if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) { +- r = walk_node(s, value64(n, i), fn, context); ++ r = walk_node(info, value64(n, i), fn, context); + if (r) + goto out; + } else { +@@ -855,7 +859,7 @@ static int walk_node(struct ro_spine *s, dm_block_t block, + } + + out: +- ro_pop(s); ++ dm_tm_unlock(info->tm, node); + return r; + } + +@@ -863,15 +867,7 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root, + int (*fn)(void *context, uint64_t *keys, void *leaf), + void *context) + { +- int r; +- struct ro_spine spine; +- + BUG_ON(info->levels > 1); +- +- init_ro_spine(&spine, info); +- r = walk_node(&spine, root, fn, context); +- exit_ro_spine(&spine); +- +- return r; ++ return walk_node(info, root, fn, context); + } + EXPORT_SYMBOL_GPL(dm_btree_walk); +diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c +index 5c9e3123ad2e..661f7f2a9e8b 100644 +--- a/drivers/media/usb/usbvision/usbvision-video.c ++++ b/drivers/media/usb/usbvision/usbvision-video.c +@@ -446,6 +446,7 @@ static int usbvision_v4l2_close(struct file *file) + if (usbvision->remove_pending) { + printk(KERN_INFO "%s: Final disconnect\n", __func__); + usbvision_release(usbvision); ++ return 0; + } + mutex_unlock(&usbvision->v4l2_lock); + +@@ -1221,6 +1222,7 @@ static int usbvision_radio_close(struct file *file) + if (usbvision->remove_pending) { + printk(KERN_INFO "%s: Final disconnect\n", __func__); + usbvision_release(usbvision); ++ return err_code; + } + + mutex_unlock(&usbvision->v4l2_lock); +diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c +index 25f8f93decb6..8d70fcf18901 100644 +--- a/drivers/memstick/host/rtsx_pci_ms.c ++++ b/drivers/memstick/host/rtsx_pci_ms.c +@@ -591,6 +591,7 @@ static int rtsx_pci_ms_drv_remove(struct platform_device *pdev) + pcr->slots[RTSX_MS_CARD].card_event = NULL; + msh = host->msh; + host->eject = true; ++ cancel_work_sync(&host->handle_req); + + mutex_lock(&host->host_mutex); + if (host->req) { +diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c +index 398faff8be7a..ade8bdfc03af 100644 +--- a/drivers/net/ethernet/sun/sunvnet.c ++++ b/drivers/net/ethernet/sun/sunvnet.c +@@ -656,7 +656,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) + spin_lock_irqsave(&port->vio.lock, flags); + + dr = &port->vio.drings[VIO_DRIVER_TX_RING]; +- if (unlikely(vnet_tx_dring_avail(dr) < 2)) { ++ if (unlikely(vnet_tx_dring_avail(dr) < 1)) { + if (!netif_queue_stopped(dev)) { + netif_stop_queue(dev); + +@@ -704,7 +704,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev) + dev->stats.tx_bytes += skb->len; + + dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1); +- if (unlikely(vnet_tx_dring_avail(dr) < 2)) { ++ if (unlikely(vnet_tx_dring_avail(dr) < 1)) { + netif_stop_queue(dev); + if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr)) + netif_wake_queue(dev); +diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c +index bf0d55e2dd63..6adbef89c4b0 100644 +--- a/drivers/net/ieee802154/fakehard.c ++++ b/drivers/net/ieee802154/fakehard.c +@@ -376,17 +376,20 @@ static int ieee802154fake_probe(struct platform_device *pdev) + + err = wpan_phy_register(phy); + if (err) +- goto out; ++ goto err_phy_reg; + + err = register_netdev(dev); +- if (err < 0) +- goto out; ++ if (err) ++ goto err_netdev_reg; + + dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n"); + return 0; + +-out: +- unregister_netdev(dev); ++err_netdev_reg: ++ wpan_phy_unregister(phy); ++err_phy_reg: ++ free_netdev(dev); ++ wpan_phy_free(phy); + return err; + } + +diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c +index 4abd98efdc34..89d21fc47a16 100644 +--- a/drivers/net/macvtap.c ++++ b/drivers/net/macvtap.c +@@ -625,6 +625,8 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb, + if (skb->ip_summed == CHECKSUM_PARTIAL) { + vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + vnet_hdr->csum_start = skb_checksum_start_offset(skb); ++ if (vlan_tx_tag_present(skb)) ++ vnet_hdr->csum_start += VLAN_HLEN; + vnet_hdr->csum_offset = skb->csum_offset; + } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; +diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c +index 1aff970be33e..1dc628ffce2b 100644 +--- a/drivers/net/ppp/pptp.c ++++ b/drivers/net/ppp/pptp.c +@@ -506,7 +506,9 @@ static int pptp_getname(struct socket *sock, struct sockaddr *uaddr, + int len = sizeof(struct sockaddr_pppox); + struct sockaddr_pppox sp; + +- sp.sa_family = AF_PPPOX; ++ memset(&sp.sa_addr, 0, sizeof(sp.sa_addr)); ++ ++ sp.sa_family = AF_PPPOX; + sp.sa_protocol = PX_PROTO_PPTP; + sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr; + +diff --git a/drivers/net/tun.c b/drivers/net/tun.c +index 495830a8ee28..d72d06301642 100644 +--- a/drivers/net/tun.c ++++ b/drivers/net/tun.c +@@ -1189,6 +1189,10 @@ static ssize_t tun_put_user(struct tun_struct *tun, + struct tun_pi pi = { 0, skb->protocol }; + ssize_t total = 0; + int vlan_offset = 0, copied; ++ int vlan_hlen = 0; ++ ++ if (vlan_tx_tag_present(skb)) ++ vlan_hlen = VLAN_HLEN; + + if (!(tun->flags & TUN_NO_PI)) { + if ((len -= sizeof(pi)) < 0) +@@ -1240,7 +1244,8 @@ static ssize_t tun_put_user(struct tun_struct *tun, + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; +- gso.csum_start = skb_checksum_start_offset(skb); ++ gso.csum_start = skb_checksum_start_offset(skb) + ++ vlan_hlen; + gso.csum_offset = skb->csum_offset; + } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { + gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; +@@ -1253,10 +1258,9 @@ static ssize_t tun_put_user(struct tun_struct *tun, + } + + copied = total; +- total += skb->len; +- if (!vlan_tx_tag_present(skb)) { +- len = min_t(int, skb->len, len); +- } else { ++ len = min_t(int, skb->len + vlan_hlen, len); ++ total += skb->len + vlan_hlen; ++ if (vlan_hlen) { + int copy, ret; + struct { + __be16 h_vlan_proto; +@@ -1267,8 +1271,6 @@ static ssize_t tun_put_user(struct tun_struct *tun, + veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); + + vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); +- len = min_t(int, skb->len + VLAN_HLEN, len); +- total += VLAN_HLEN; + + copy = min_t(int, vlan_offset, len); + ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); +diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c +index 2d8bf4232502..7f22d27070fc 100644 +--- a/drivers/net/usb/qmi_wwan.c ++++ b/drivers/net/usb/qmi_wwan.c +@@ -756,6 +756,7 @@ static const struct usb_device_id products[] = { + {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ + {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ ++ {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */ + + /* 4. Gobi 1000 devices */ + {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ +diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c +index 23969eaf88c1..5407c11a9f14 100644 +--- a/drivers/net/vxlan.c ++++ b/drivers/net/vxlan.c +@@ -282,13 +282,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) + return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); + } + +-/* Find VXLAN socket based on network namespace and UDP port */ +-static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port) ++/* Find VXLAN socket based on network namespace, address family and UDP port */ ++static struct vxlan_sock *vxlan_find_sock(struct net *net, ++ sa_family_t family, __be16 port) + { + struct vxlan_sock *vs; + + hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { +- if (inet_sk(vs->sock->sk)->inet_sport == port) ++ if (inet_sk(vs->sock->sk)->inet_sport == port && ++ inet_sk(vs->sock->sk)->sk.sk_family == family) + return vs; + } + return NULL; +@@ -307,11 +309,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id) + } + + /* Look up VNI in a per net namespace table */ +-static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) ++static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, ++ sa_family_t family, __be16 port) + { + struct vxlan_sock *vs; + +- vs = vxlan_find_sock(net, port); ++ vs = vxlan_find_sock(net, family, port); + if (!vs) + return NULL; + +@@ -1783,7 +1786,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, + struct vxlan_dev *dst_vxlan; + + ip_rt_put(rt); +- dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); ++ dst_vxlan = vxlan_find_vni(dev_net(dev), vni, ++ dst->sa.sa_family, dst_port); + if (!dst_vxlan) + goto tx_error; + vxlan_encap_bypass(skb, vxlan, dst_vxlan); +@@ -1836,7 +1840,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, + struct vxlan_dev *dst_vxlan; + + dst_release(ndst); +- dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port); ++ dst_vxlan = vxlan_find_vni(dev_net(dev), vni, ++ dst->sa.sa_family, dst_port); + if (!dst_vxlan) + goto tx_error; + vxlan_encap_bypass(skb, vxlan, dst_vxlan); +@@ -1987,6 +1992,7 @@ static int vxlan_init(struct net_device *dev) + { + struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); ++ bool ipv6 = vxlan->flags & VXLAN_F_IPV6; + struct vxlan_sock *vs; + + dev->tstats = alloc_percpu(struct pcpu_tstats); +@@ -1994,7 +2000,8 @@ static int vxlan_init(struct net_device *dev) + return -ENOMEM; + + spin_lock(&vn->sock_lock); +- vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port); ++ vs = vxlan_find_sock(dev_net(dev), ipv6 ? AF_INET6 : AF_INET, ++ vxlan->dst_port); + if (vs) { + /* If we have a socket with same port already, reuse it */ + atomic_inc(&vs->refcnt); +@@ -2439,7 +2446,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, + return vs; + + spin_lock(&vn->sock_lock); +- vs = vxlan_find_sock(net, port); ++ vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port); + if (vs) { + if (vs->rcv == rcv) + atomic_inc(&vs->refcnt); +@@ -2584,7 +2591,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev, + if (data[IFLA_VXLAN_PORT]) + vxlan->dst_port = nla_get_be16(data[IFLA_VXLAN_PORT]); + +- if (vxlan_find_vni(net, vni, vxlan->dst_port)) { ++ if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET, ++ vxlan->dst_port)) { + pr_info("duplicate VNI %u\n", vni); + return -EEXIST; + } +diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h +index 80b47508647c..c8d1e378f631 100644 +--- a/drivers/net/wireless/iwlwifi/iwl-trans.h ++++ b/drivers/net/wireless/iwlwifi/iwl-trans.h +@@ -484,6 +484,7 @@ enum iwl_trans_state { + * Set during transport allocation. + * @hw_id_str: a string with info about HW ID. Set during transport allocation. + * @pm_support: set to true in start_hw if link pm is supported ++ * @ltr_enabled: set to true if the LTR is enabled + * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. + * The user should use iwl_trans_{alloc,free}_tx_cmd. + * @dev_cmd_headroom: room needed for the transport's private use before the +@@ -508,6 +509,7 @@ struct iwl_trans { + u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; + + bool pm_support; ++ bool ltr_enabled; + + /* The following fields are internal only */ + struct kmem_cache *dev_cmd_pool; +diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +index 8e7ab41079ca..4dacb20bf490 100644 +--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h ++++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h +@@ -66,13 +66,46 @@ + + /* Power Management Commands, Responses, Notifications */ + ++/** ++ * enum iwl_ltr_config_flags - masks for LTR config command flags ++ * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status ++ * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow ++ * memory access ++ * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR ++ * reg change ++ * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from ++ * D0 to D3 ++ * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register ++ * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register ++ * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD ++ */ ++enum iwl_ltr_config_flags { ++ LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0), ++ LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1), ++ LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2), ++ LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3), ++ LTR_CFG_FLAG_SW_SET_SHORT = BIT(4), ++ LTR_CFG_FLAG_SW_SET_LONG = BIT(5), ++ LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6), ++}; ++ ++/** ++ * struct iwl_ltr_config_cmd - configures the LTR ++ * @flags: See %enum iwl_ltr_config_flags ++ */ ++struct iwl_ltr_config_cmd { ++ __le32 flags; ++ __le32 static_long; ++ __le32 static_short; ++} __packed; ++ + /* Radio LP RX Energy Threshold measured in dBm */ + #define POWER_LPRX_RSSI_THRESHOLD 75 + #define POWER_LPRX_RSSI_THRESHOLD_MAX 94 + #define POWER_LPRX_RSSI_THRESHOLD_MIN 30 + + /** +- * enum iwl_scan_flags - masks for power table command flags ++ * enum iwl_power_flags - masks for power table command flags + * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off + * receiver and transmitter. '0' - does not allow. + * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management, +diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h +index 66264cc5a016..cd59ae18ea8f 100644 +--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h ++++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h +@@ -138,6 +138,7 @@ enum { + + /* Power - legacy power table command */ + POWER_TABLE_CMD = 0x77, ++ LTR_CONFIG = 0xee, + + /* Thermal Throttling*/ + REPLY_THERMAL_MNG_BACKOFF = 0x7e, +diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c +index c76299a3a1e0..08f12006ca77 100644 +--- a/drivers/net/wireless/iwlwifi/mvm/fw.c ++++ b/drivers/net/wireless/iwlwifi/mvm/fw.c +@@ -424,6 +424,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm) + goto error; + } + ++ if (mvm->trans->ltr_enabled) { ++ struct iwl_ltr_config_cmd cmd = { ++ .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE), ++ }; ++ ++ WARN_ON(iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0, ++ sizeof(cmd), &cmd)); ++ } ++ + IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); + return 0; + error: +diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c +index 1fd08baa0d32..e3cdc97380d3 100644 +--- a/drivers/net/wireless/iwlwifi/mvm/ops.c ++++ b/drivers/net/wireless/iwlwifi/mvm/ops.c +@@ -303,6 +303,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = { + CMD(REPLY_BEACON_FILTERING_CMD), + CMD(REPLY_THERMAL_MNG_BACKOFF), + CMD(MAC_PM_POWER_TABLE), ++ CMD(LTR_CONFIG), + }; + #undef CMD + +diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c +index dc875f4befef..67536a26df39 100644 +--- a/drivers/net/wireless/iwlwifi/pcie/trans.c ++++ b/drivers/net/wireless/iwlwifi/pcie/trans.c +@@ -121,6 +121,7 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans) + { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u16 lctl; ++ u16 cap; + + /* + * HW bug W/A for instability in PCIe bus L0S->L1 transition. +@@ -131,16 +132,17 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans) + * power savings, even without L1. + */ + pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); +- if (lctl & PCI_EXP_LNKCTL_ASPM_L1) { +- /* L1-ASPM enabled; disable(!) L0S */ ++ if (lctl & PCI_EXP_LNKCTL_ASPM_L1) + iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); +- dev_info(trans->dev, "L1 Enabled; Disabling L0S\n"); +- } else { +- /* L1-ASPM disabled; enable(!) L0S */ ++ else + iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); +- dev_info(trans->dev, "L1 Disabled; Enabling L0S\n"); +- } + trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); ++ ++ pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); ++ trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN; ++ dev_info(trans->dev, "L1 %sabled - LTR %sabled\n", ++ (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis", ++ trans->ltr_enabled ? "En" : "Dis"); + } + + /* +diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c +index 2cd3f54e1efa..38b8b7139ba3 100644 +--- a/drivers/net/wireless/mac80211_hwsim.c ++++ b/drivers/net/wireless/mac80211_hwsim.c +@@ -2261,7 +2261,7 @@ static int __init init_mac80211_hwsim(void) + printk(KERN_DEBUG + "mac80211_hwsim: device_bind_driver failed (%d)\n", + err); +- goto failed_hw; ++ goto failed_bind; + } + + skb_queue_head_init(&data->pending); +@@ -2563,6 +2563,8 @@ failed_mon: + return err; + + failed_hw: ++ device_release_driver(data->dev); ++failed_bind: + device_unregister(data->dev); + failed_drvdata: + ieee80211_free_hw(hw); +diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c +index 1b8bdb7e9bf4..72b73657576b 100644 +--- a/drivers/parport/parport_serial.c ++++ b/drivers/parport/parport_serial.c +@@ -62,6 +62,7 @@ enum parport_pc_pci_cards { + timedia_9079a, + timedia_9079b, + timedia_9079c, ++ wch_ch353_1s1p, + wch_ch353_2s1p, + sunix_2s1p, + }; +@@ -148,6 +149,7 @@ static struct parport_pc_pci cards[] = { + /* timedia_9079a */ { 1, { { 2, 3 }, } }, + /* timedia_9079b */ { 1, { { 2, 3 }, } }, + /* timedia_9079c */ { 1, { { 2, 3 }, } }, ++ /* wch_ch353_1s1p*/ { 1, { { 1, -1}, } }, + /* wch_ch353_2s1p*/ { 1, { { 2, -1}, } }, + /* sunix_2s1p */ { 1, { { 3, -1 }, } }, + }; +@@ -253,6 +255,7 @@ static struct pci_device_id parport_serial_pci_tbl[] = { + { 0x1409, 0x7168, 0x1409, 0xd079, 0, 0, timedia_9079c }, + + /* WCH CARDS */ ++ { 0x4348, 0x5053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, wch_ch353_1s1p}, + { 0x4348, 0x7053, 0x4348, 0x3253, 0, 0, wch_ch353_2s1p}, + + /* +@@ -479,6 +482,12 @@ static struct pciserial_board pci_parport_serial_boards[] = { + .base_baud = 921600, + .uart_offset = 8, + }, ++ [wch_ch353_1s1p] = { ++ .flags = FL_BASE0|FL_BASE_BARS, ++ .num_ports = 1, ++ .base_baud = 115200, ++ .uart_offset = 8, ++ }, + [wch_ch353_2s1p] = { + .flags = FL_BASE0|FL_BASE_BARS, + .num_ports = 2, +diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig +index 0c657d6af03d..51cf8083b299 100644 +--- a/drivers/pcmcia/Kconfig ++++ b/drivers/pcmcia/Kconfig +@@ -202,6 +202,7 @@ config PCMCIA_SA1111 + depends on ARM && SA1111 && PCMCIA + select PCMCIA_SOC_COMMON + select PCMCIA_SA11XX_BASE if ARCH_SA1100 ++ select PCMCIA_PXA2XX if ARCH_LUBBOCK && SA1111 + help + Say Y here to include support for SA1111-based PCMCIA or CF + sockets, found on the Jornada 720, Graphicsmaster and other +@@ -217,7 +218,6 @@ config PCMCIA_PXA2XX + || ARCOM_PCMCIA || ARCH_PXA_ESERIES || MACH_STARGATE2 \ + || MACH_VPAC270 || MACH_BALLOON3 || MACH_COLIBRI \ + || MACH_COLIBRI320 || MACH_H4700) +- select PCMCIA_SA1111 if ARCH_LUBBOCK && SA1111 + select PCMCIA_SOC_COMMON + help + Say Y here to include support for the PXA2xx PCMCIA controller +diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile +index 7745b512a87c..fd55a6951402 100644 +--- a/drivers/pcmcia/Makefile ++++ b/drivers/pcmcia/Makefile +@@ -49,6 +49,7 @@ sa1100_cs-y += sa1100_generic.o + sa1100_cs-$(CONFIG_SA1100_ASSABET) += sa1100_assabet.o + sa1100_cs-$(CONFIG_SA1100_CERF) += sa1100_cerf.o + sa1100_cs-$(CONFIG_SA1100_COLLIE) += pxa2xx_sharpsl.o ++sa1100_cs-$(CONFIG_SA1100_H3100) += sa1100_h3600.o + sa1100_cs-$(CONFIG_SA1100_H3600) += sa1100_h3600.o + sa1100_cs-$(CONFIG_SA1100_NANOENGINE) += sa1100_nanoengine.o + sa1100_cs-$(CONFIG_SA1100_SHANNON) += sa1100_shannon.o +diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c +index b8f5acf02261..de24232c5191 100644 +--- a/drivers/pcmcia/at91_cf.c ++++ b/drivers/pcmcia/at91_cf.c +@@ -245,7 +245,7 @@ static int at91_cf_dt_init(struct platform_device *pdev) + } + #endif + +-static int __init at91_cf_probe(struct platform_device *pdev) ++static int at91_cf_probe(struct platform_device *pdev) + { + struct at91_cf_socket *cf; + struct at91_cf_data *board = pdev->dev.platform_data; +@@ -354,7 +354,7 @@ fail0a: + return status; + } + +-static int __exit at91_cf_remove(struct platform_device *pdev) ++static int at91_cf_remove(struct platform_device *pdev) + { + struct at91_cf_socket *cf = platform_get_drvdata(pdev); + +@@ -404,14 +404,13 @@ static struct platform_driver at91_cf_driver = { + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(at91_cf_dt_ids), + }, +- .remove = __exit_p(at91_cf_remove), ++ .probe = at91_cf_probe, ++ .remove = at91_cf_remove, + .suspend = at91_cf_suspend, + .resume = at91_cf_resume, + }; + +-/*--------------------------------------------------------------------------*/ +- +-module_platform_driver_probe(at91_cf_driver, at91_cf_probe); ++module_platform_driver(at91_cf_driver); + + MODULE_DESCRIPTION("AT91 Compact Flash Driver"); + MODULE_AUTHOR("David Brownell"); +diff --git a/drivers/pcmcia/sa1111_jornada720.c b/drivers/pcmcia/sa1111_jornada720.c +index 3baa3ef09682..40e040314503 100644 +--- a/drivers/pcmcia/sa1111_jornada720.c ++++ b/drivers/pcmcia/sa1111_jornada720.c +@@ -9,6 +9,7 @@ + #include <linux/device.h> + #include <linux/errno.h> + #include <linux/init.h> ++#include <linux/io.h> + + #include <mach/hardware.h> + #include <asm/hardware/sa1111.h> +@@ -94,6 +95,7 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = { + int pcmcia_jornada720_init(struct device *dev) + { + int ret = -ENODEV; ++ struct sa1111_dev *sadev = SA1111_DEV(dev); + + if (machine_is_jornada720()) { + unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3; +@@ -101,12 +103,12 @@ int pcmcia_jornada720_init(struct device *dev) + GRER |= 0x00000002; + + /* Set GPIO_A<3:1> to be outputs for PCMCIA/CF power controller: */ +- sa1111_set_io_dir(dev, pin, 0, 0); +- sa1111_set_io(dev, pin, 0); +- sa1111_set_sleep_io(dev, pin, 0); ++ sa1111_set_io_dir(sadev, pin, 0, 0); ++ sa1111_set_io(sadev, pin, 0); ++ sa1111_set_sleep_io(sadev, pin, 0); + + sa11xx_drv_pcmcia_ops(&jornada720_pcmcia_ops); +- ret = sa1111_pcmcia_add(dev, &jornada720_pcmcia_ops, ++ ret = sa1111_pcmcia_add(sadev, &jornada720_pcmcia_ops, + sa11xx_drv_pcmcia_add_one); + } + +diff --git a/drivers/platform/x86/pvpanic.c b/drivers/platform/x86/pvpanic.c +index 47ae0c47d4b5..469e182c5461 100644 +--- a/drivers/platform/x86/pvpanic.c ++++ b/drivers/platform/x86/pvpanic.c +@@ -71,6 +71,7 @@ pvpanic_panic_notify(struct notifier_block *nb, unsigned long code, + + static struct notifier_block pvpanic_panic_nb = { + .notifier_call = pvpanic_panic_notify, ++ .priority = 1, /* let this called before broken drm_fb_helper */ + }; + + +diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c +index 9ba3642cb19e..066e3198838d 100644 +--- a/drivers/scsi/scsi_error.c ++++ b/drivers/scsi/scsi_error.c +@@ -1746,8 +1746,10 @@ static void scsi_restart_operations(struct Scsi_Host *shost) + * is no point trying to lock the door of an off-line device. + */ + shost_for_each_device(sdev, shost) { +- if (scsi_device_online(sdev) && sdev->locked) ++ if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) { + scsi_eh_lock_door(sdev); ++ sdev->was_reset = 0; ++ } + } + + /* +diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c +index d02088f7dc33..162e01a27d40 100644 +--- a/drivers/staging/zram/zram_drv.c ++++ b/drivers/staging/zram/zram_drv.c +@@ -430,7 +430,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, + } + + if (page_zero_filled(uncmem)) { +- kunmap_atomic(user_mem); ++ if (user_mem) ++ kunmap_atomic(user_mem); + /* Free memory associated with this sector now. */ + zram_free_page(zram, index); + +diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c +index 6d402cf84cf1..ee1f7c52bd52 100644 +--- a/drivers/tty/serial/8250/8250_pci.c ++++ b/drivers/tty/serial/8250/8250_pci.c +@@ -1558,6 +1558,7 @@ pci_wch_ch353_setup(struct serial_private *priv, + #define PCI_DEVICE_ID_WCH_CH352_2S 0x3253 + #define PCI_DEVICE_ID_WCH_CH353_4S 0x3453 + #define PCI_DEVICE_ID_WCH_CH353_2S1PF 0x5046 ++#define PCI_DEVICE_ID_WCH_CH353_1S1P 0x5053 + #define PCI_DEVICE_ID_WCH_CH353_2S1P 0x7053 + #define PCI_VENDOR_ID_AGESTAR 0x5372 + #define PCI_DEVICE_ID_AGESTAR_9375 0x6872 +@@ -2159,6 +2160,14 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { + .subdevice = PCI_ANY_ID, + .setup = pci_omegapci_setup, + }, ++ /* WCH CH353 1S1P card (16550 clone) */ ++ { ++ .vendor = PCI_VENDOR_ID_WCH, ++ .device = PCI_DEVICE_ID_WCH_CH353_1S1P, ++ .subvendor = PCI_ANY_ID, ++ .subdevice = PCI_ANY_ID, ++ .setup = pci_wch_ch353_setup, ++ }, + /* WCH CH353 2S1P card (16550 clone) */ + { + .vendor = PCI_VENDOR_ID_WCH, +@@ -3228,6 +3237,7 @@ static const struct pci_device_id blacklist[] = { + + /* multi-io cards handled by parport_serial */ + { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */ ++ { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */ + }; + + /* +diff --git a/drivers/vlynq/vlynq.c b/drivers/vlynq/vlynq.c +index 7b07135ab26e..c0227f9418eb 100644 +--- a/drivers/vlynq/vlynq.c ++++ b/drivers/vlynq/vlynq.c +@@ -762,7 +762,8 @@ static int vlynq_remove(struct platform_device *pdev) + + device_unregister(&dev->dev); + iounmap(dev->local); +- release_mem_region(dev->regs_start, dev->regs_end - dev->regs_start); ++ release_mem_region(dev->regs_start, ++ dev->regs_end - dev->regs_start + 1); + + kfree(dev); + +diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c +index 0b2fefbfcd10..1abbf80ffb19 100644 +--- a/drivers/vme/bridges/vme_ca91cx42.c ++++ b/drivers/vme/bridges/vme_ca91cx42.c +@@ -869,14 +869,13 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image, + + spin_lock(&image->lock); + +- /* The following code handles VME address alignment problem +- * in order to assure the maximal data width cycle. +- * We cannot use memcpy_xxx directly here because it +- * may cut data transfer in 8-bits cycles, thus making +- * D16 cycle impossible. +- * From the other hand, the bridge itself assures that +- * maximal configured data cycle is used and splits it +- * automatically for non-aligned addresses. ++ /* The following code handles VME address alignment. We cannot use ++ * memcpy_xxx here because it may cut data transfers in to 8-bit ++ * cycles when D16 or D32 cycles are required on the VME bus. ++ * On the other hand, the bridge itself assures that the maximum data ++ * cycle configured for the transfer is used and splits it ++ * automatically for non-aligned addresses, so we don't want the ++ * overhead of needlessly forcing small transfers for the entire cycle. + */ + if ((uintptr_t)addr & 0x1) { + *(u8 *)buf = ioread8(addr); +@@ -896,9 +895,9 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image, + } + + count32 = (count - done) & ~0x3; +- if (count32 > 0) { +- memcpy_fromio(buf + done, addr + done, (unsigned int)count); +- done += count32; ++ while (done < count32) { ++ *(u32 *)(buf + done) = ioread32(addr + done); ++ done += 4; + } + + if ((count - done) & 0x2) { +@@ -930,7 +929,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image, + spin_lock(&image->lock); + + /* Here we apply for the same strategy we do in master_read +- * function in order to assure D16 cycle when required. ++ * function in order to assure the correct cycles. + */ + if ((uintptr_t)addr & 0x1) { + iowrite8(*(u8 *)buf, addr); +@@ -950,9 +949,9 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image, + } + + count32 = (count - done) & ~0x3; +- if (count32 > 0) { +- memcpy_toio(addr + done, buf + done, count32); +- done += count32; ++ while (done < count32) { ++ iowrite32(*(u32 *)(buf + done), addr + done); ++ done += 4; + } + + if ((count - done) & 0x2) { +diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c +index 7db4e6395e23..ef9028f87da3 100644 +--- a/drivers/vme/bridges/vme_tsi148.c ++++ b/drivers/vme/bridges/vme_tsi148.c +@@ -741,7 +741,7 @@ static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled, + reg_join(vme_bound_high, vme_bound_low, &vme_bound); + reg_join(pci_offset_high, pci_offset_low, &pci_offset); + +- *pci_base = (dma_addr_t)vme_base + pci_offset; ++ *pci_base = (dma_addr_t)(*vme_base + pci_offset); + + *enabled = 0; + *aspace = 0; +@@ -910,11 +910,15 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled, + unsigned long long pci_bound, vme_offset, pci_base; + struct vme_bridge *tsi148_bridge; + struct tsi148_driver *bridge; ++ struct pci_bus_region region; ++ struct pci_dev *pdev; + + tsi148_bridge = image->parent; + + bridge = tsi148_bridge->driver_priv; + ++ pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev); ++ + /* Verify input data */ + if (vme_base & 0xFFFF) { + dev_err(tsi148_bridge->parent, "Invalid VME Window " +@@ -949,7 +953,9 @@ static int tsi148_master_set(struct vme_master_resource *image, int enabled, + pci_bound = 0; + vme_offset = 0; + } else { +- pci_base = (unsigned long long)image->bus_resource.start; ++ pcibios_resource_to_bus(pdev, ®ion, ++ &image->bus_resource); ++ pci_base = region.start; + + /* + * Bound address is a valid address for the window, adjust +@@ -1276,8 +1282,8 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf, + spin_lock(&image->lock); + + /* The following code handles VME address alignment. We cannot use +- * memcpy_xxx directly here because it may cut small data transfers in +- * to 8-bit cycles, thus making D16 cycle impossible. ++ * memcpy_xxx here because it may cut data transfers in to 8-bit ++ * cycles when D16 or D32 cycles are required on the VME bus. + * On the other hand, the bridge itself assures that the maximum data + * cycle configured for the transfer is used and splits it + * automatically for non-aligned addresses, so we don't want the +@@ -1301,9 +1307,9 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf, + } + + count32 = (count - done) & ~0x3; +- if (count32 > 0) { +- memcpy_fromio(buf + done, addr + done, count32); +- done += count32; ++ while (done < count32) { ++ *(u32 *)(buf + done) = ioread32(addr + done); ++ done += 4; + } + + if ((count - done) & 0x2) { +@@ -1363,7 +1369,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf, + spin_lock(&image->lock); + + /* Here we apply for the same strategy we do in master_read +- * function in order to assure D16 cycle when required. ++ * function in order to assure the correct cycles. + */ + if ((uintptr_t)addr & 0x1) { + iowrite8(*(u8 *)buf, addr); +@@ -1383,9 +1389,9 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf, + } + + count32 = (count - done) & ~0x3; +- if (count32 > 0) { +- memcpy_toio(addr + done, buf + done, count32); +- done += count32; ++ while (done < count32) { ++ iowrite32(*(u32 *)(buf + done), addr + done); ++ done += 4; + } + + if ((count - done) & 0x2) { +diff --git a/fs/cifs/file.c b/fs/cifs/file.c +index a2793c93d6ed..f9715276a257 100644 +--- a/fs/cifs/file.c ++++ b/fs/cifs/file.c +@@ -2590,8 +2590,8 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov, + if (rc > 0) { + ssize_t err; + +- err = generic_write_sync(file, pos, rc); +- if (err < 0 && rc > 0) ++ err = generic_write_sync(file, iocb->ki_pos - rc, rc); ++ if (err < 0) + rc = err; + } + +diff --git a/fs/ext4/file.c b/fs/ext4/file.c +index 1b890101397b..7b316011bfef 100644 +--- a/fs/ext4/file.c ++++ b/fs/ext4/file.c +@@ -152,7 +152,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov, + if (ret > 0) { + ssize_t err; + +- err = generic_write_sync(file, pos, ret); ++ err = generic_write_sync(file, iocb->ki_pos - ret, ret); + if (err < 0 && ret > 0) + ret = err; + } +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index a58a796bb92b..ba68d211d748 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -3970,8 +3970,8 @@ void ext4_set_inode_flags(struct inode *inode) + new_fl |= S_NOATIME; + if (flags & EXT4_DIRSYNC_FL) + new_fl |= S_DIRSYNC; +- set_mask_bits(&inode->i_flags, +- S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl); ++ inode_set_flags(inode, new_fl, ++ S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); + } + + /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */ +diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c +index 4a4fea002673..64112185f47c 100644 +--- a/fs/hfsplus/dir.c ++++ b/fs/hfsplus/dir.c +@@ -212,13 +212,31 @@ static int hfsplus_readdir(struct file *file, struct dir_context *ctx) + be32_to_cpu(entry.folder.id), DT_DIR)) + break; + } else if (type == HFSPLUS_FILE) { ++ u16 mode; ++ unsigned type = DT_UNKNOWN; ++ + if (fd.entrylength < sizeof(struct hfsplus_cat_file)) { + pr_err("small file entry\n"); + err = -EIO; + goto out; + } ++ ++ mode = be16_to_cpu(entry.file.permissions.mode); ++ if (S_ISREG(mode)) ++ type = DT_REG; ++ else if (S_ISLNK(mode)) ++ type = DT_LNK; ++ else if (S_ISFIFO(mode)) ++ type = DT_FIFO; ++ else if (S_ISCHR(mode)) ++ type = DT_CHR; ++ else if (S_ISBLK(mode)) ++ type = DT_BLK; ++ else if (S_ISSOCK(mode)) ++ type = DT_SOCK; ++ + if (!dir_emit(ctx, strbuf, len, +- be32_to_cpu(entry.file.id), DT_REG)) ++ be32_to_cpu(entry.file.id), type)) + break; + } else { + pr_err("bad catalog entry type\n"); +diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c +index fbb212fbb1ef..f0f601c83ad6 100644 +--- a/fs/hfsplus/extents.c ++++ b/fs/hfsplus/extents.c +@@ -498,11 +498,13 @@ int hfsplus_file_extend(struct inode *inode) + goto insert_extent; + } + out: +- mutex_unlock(&hip->extents_lock); + if (!res) { + hip->alloc_blocks += len; ++ mutex_unlock(&hip->extents_lock); + hfsplus_mark_inode_dirty(inode, HFSPLUS_I_ALLOC_DIRTY); ++ return 0; + } ++ mutex_unlock(&hip->extents_lock); + return res; + + insert_extent: +@@ -556,11 +558,13 @@ void hfsplus_file_truncate(struct inode *inode) + + blk_cnt = (inode->i_size + HFSPLUS_SB(sb)->alloc_blksz - 1) >> + HFSPLUS_SB(sb)->alloc_blksz_shift; ++ ++ mutex_lock(&hip->extents_lock); ++ + alloc_cnt = hip->alloc_blocks; + if (blk_cnt == alloc_cnt) +- goto out; ++ goto out_unlock; + +- mutex_lock(&hip->extents_lock); + res = hfs_find_init(HFSPLUS_SB(sb)->ext_tree, &fd); + if (res) { + mutex_unlock(&hip->extents_lock); +@@ -592,10 +596,10 @@ void hfsplus_file_truncate(struct inode *inode) + hfs_brec_remove(&fd); + } + hfs_find_exit(&fd); +- mutex_unlock(&hip->extents_lock); + + hip->alloc_blocks = blk_cnt; +-out: ++out_unlock: ++ mutex_unlock(&hip->extents_lock); + hip->phys_size = inode->i_size; + hip->fs_blocks = (inode->i_size + sb->s_blocksize - 1) >> + sb->s_blocksize_bits; +diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c +index 968eab5bc1f5..68537e8b7a09 100644 +--- a/fs/hfsplus/options.c ++++ b/fs/hfsplus/options.c +@@ -75,7 +75,7 @@ int hfsplus_parse_options_remount(char *input, int *force) + int token; + + if (!input) +- return 0; ++ return 1; + + while ((p = strsep(&input, ",")) != NULL) { + if (!*p) +diff --git a/fs/inode.c b/fs/inode.c +index 1e6e8468f2d8..d9134a0f5dd9 100644 +--- a/fs/inode.c ++++ b/fs/inode.c +@@ -1871,3 +1871,34 @@ void inode_dio_done(struct inode *inode) + wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); + } + EXPORT_SYMBOL(inode_dio_done); ++ ++/* ++ * inode_set_flags - atomically set some inode flags ++ * ++ * Note: the caller should be holding i_mutex, or else be sure that ++ * they have exclusive access to the inode structure (i.e., while the ++ * inode is being instantiated). The reason for the cmpxchg() loop ++ * --- which wouldn't be necessary if all code paths which modify ++ * i_flags actually followed this rule, is that there is at least one ++ * code path which doesn't today --- for example, ++ * __generic_file_aio_write() calls file_remove_suid() without holding ++ * i_mutex --- so we use cmpxchg() out of an abundance of caution. ++ * ++ * In the long run, i_mutex is overkill, and we should probably look ++ * at using the i_lock spinlock to protect i_flags, and then make sure ++ * it is so documented in include/linux/fs.h and that all code follows ++ * the locking convention!! ++ */ ++void inode_set_flags(struct inode *inode, unsigned int flags, ++ unsigned int mask) ++{ ++ unsigned int old_flags, new_flags; ++ ++ WARN_ON_ONCE(flags & ~mask); ++ do { ++ old_flags = ACCESS_ONCE(inode->i_flags); ++ new_flags = (old_flags & ~mask) | flags; ++ } while (unlikely(cmpxchg(&inode->i_flags, old_flags, ++ new_flags) != old_flags)); ++} ++EXPORT_SYMBOL(inode_set_flags); +diff --git a/fs/ioprio.c b/fs/ioprio.c +index e50170ca7c33..31666c92b46a 100644 +--- a/fs/ioprio.c ++++ b/fs/ioprio.c +@@ -157,14 +157,16 @@ out: + + int ioprio_best(unsigned short aprio, unsigned short bprio) + { +- unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); +- unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); ++ unsigned short aclass; ++ unsigned short bclass; + +- if (aclass == IOPRIO_CLASS_NONE) +- aclass = IOPRIO_CLASS_BE; +- if (bclass == IOPRIO_CLASS_NONE) +- bclass = IOPRIO_CLASS_BE; ++ if (!ioprio_valid(aprio)) ++ aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM); ++ if (!ioprio_valid(bprio)) ++ bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM); + ++ aclass = IOPRIO_PRIO_CLASS(aprio); ++ bclass = IOPRIO_PRIO_CLASS(bprio); + if (aclass == bclass) + return min(aprio, bprio); + if (aclass > bclass) +diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c +index 7f464c513ba0..6b0f816201a2 100644 +--- a/fs/jfs/jfs_inode.c ++++ b/fs/jfs/jfs_inode.c +@@ -29,20 +29,20 @@ + void jfs_set_inode_flags(struct inode *inode) + { + unsigned int flags = JFS_IP(inode)->mode2; +- +- inode->i_flags &= ~(S_IMMUTABLE | S_APPEND | +- S_NOATIME | S_DIRSYNC | S_SYNC); ++ unsigned int new_fl = 0; + + if (flags & JFS_IMMUTABLE_FL) +- inode->i_flags |= S_IMMUTABLE; ++ new_fl |= S_IMMUTABLE; + if (flags & JFS_APPEND_FL) +- inode->i_flags |= S_APPEND; ++ new_fl |= S_APPEND; + if (flags & JFS_NOATIME_FL) +- inode->i_flags |= S_NOATIME; ++ new_fl |= S_NOATIME; + if (flags & JFS_DIRSYNC_FL) +- inode->i_flags |= S_DIRSYNC; ++ new_fl |= S_DIRSYNC; + if (flags & JFS_SYNC_FL) +- inode->i_flags |= S_SYNC; ++ new_fl |= S_SYNC; ++ inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND | S_NOATIME | ++ S_DIRSYNC | S_SYNC); + } + + void jfs_get_inode_flags(struct jfs_inode_info *jfs_ip) +diff --git a/fs/minix/bitmap.c b/fs/minix/bitmap.c +index 4bc50dac8e97..742942a983be 100644 +--- a/fs/minix/bitmap.c ++++ b/fs/minix/bitmap.c +@@ -96,7 +96,7 @@ int minix_new_block(struct inode * inode) + unsigned long minix_count_free_blocks(struct super_block *sb) + { + struct minix_sb_info *sbi = minix_sb(sb); +- u32 bits = sbi->s_nzones - (sbi->s_firstdatazone + 1); ++ u32 bits = sbi->s_nzones - sbi->s_firstdatazone + 1; + + return (count_free(sbi->s_zmap, sb->s_blocksize, bits) + << sbi->s_log_zone_size); +diff --git a/fs/minix/inode.c b/fs/minix/inode.c +index 0332109162a5..a2e71752f011 100644 +--- a/fs/minix/inode.c ++++ b/fs/minix/inode.c +@@ -266,12 +266,12 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) + block = minix_blocks_needed(sbi->s_ninodes, s->s_blocksize); + if (sbi->s_imap_blocks < block) { + printk("MINIX-fs: file system does not have enough " +- "imap blocks allocated. Refusing to mount\n"); ++ "imap blocks allocated. Refusing to mount.\n"); + goto out_no_bitmap; + } + + block = minix_blocks_needed( +- (sbi->s_nzones - (sbi->s_firstdatazone + 1)), ++ (sbi->s_nzones - sbi->s_firstdatazone + 1), + s->s_blocksize); + if (sbi->s_zmap_blocks < block) { + printk("MINIX-fs: file system does not have enough " +diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c +index 5d8ccecf5f5c..3ed1be9aade3 100644 +--- a/fs/nfs/delegation.c ++++ b/fs/nfs/delegation.c +@@ -109,6 +109,8 @@ again: + continue; + if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) + continue; ++ if (!nfs4_valid_open_stateid(state)) ++ continue; + if (!nfs4_stateid_match(&state->stateid, stateid)) + continue; + get_nfs_open_context(ctx); +@@ -177,7 +179,11 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation * + { + int res = 0; + +- res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync); ++ if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) ++ res = nfs4_proc_delegreturn(inode, ++ delegation->cred, ++ &delegation->stateid, ++ issync); + nfs_free_delegation(delegation); + return res; + } +@@ -364,11 +370,13 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation + { + struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; + struct nfs_inode *nfsi = NFS_I(inode); +- int err; ++ int err = 0; + + if (delegation == NULL) + return 0; + do { ++ if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) ++ break; + err = nfs_delegation_claim_opens(inode, &delegation->stateid); + if (!issync || err != -EAGAIN) + break; +@@ -589,10 +597,23 @@ static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *cl + rcu_read_unlock(); + } + ++static void nfs_revoke_delegation(struct inode *inode) ++{ ++ struct nfs_delegation *delegation; ++ rcu_read_lock(); ++ delegation = rcu_dereference(NFS_I(inode)->delegation); ++ if (delegation != NULL) { ++ set_bit(NFS_DELEGATION_REVOKED, &delegation->flags); ++ nfs_mark_return_delegation(NFS_SERVER(inode), delegation); ++ } ++ rcu_read_unlock(); ++} ++ + void nfs_remove_bad_delegation(struct inode *inode) + { + struct nfs_delegation *delegation; + ++ nfs_revoke_delegation(inode); + delegation = nfs_inode_detach_delegation(inode); + if (delegation) { + nfs_inode_find_state_and_recover(inode, &delegation->stateid); +diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h +index 9a79c7a99d6d..e02b090ab9da 100644 +--- a/fs/nfs/delegation.h ++++ b/fs/nfs/delegation.h +@@ -31,6 +31,7 @@ enum { + NFS_DELEGATION_RETURN_IF_CLOSED, + NFS_DELEGATION_REFERENCED, + NFS_DELEGATION_RETURNING, ++ NFS_DELEGATION_REVOKED, + }; + + int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); +diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c +index af5f3ffcb157..d751a2383c24 100644 +--- a/fs/nfs/direct.c ++++ b/fs/nfs/direct.c +@@ -179,6 +179,7 @@ static void nfs_direct_req_free(struct kref *kref) + { + struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); + ++ nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo); + if (dreq->l_ctx != NULL) + nfs_put_lock_context(dreq->l_ctx); + if (dreq->ctx != NULL) +diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c +index 7f5799d098fd..e5eb677ca9ce 100644 +--- a/fs/nfs/inode.c ++++ b/fs/nfs/inode.c +@@ -598,7 +598,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) + { + struct inode *inode = dentry->d_inode; + int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME; +- int err; ++ int err = 0; + + trace_nfs_getattr_enter(inode); + /* Flush out writes to the server in order to update c/mtime. */ +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 9f7f1a0d30dc..759875038791 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -1579,7 +1579,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct + nfs_inode_find_state_and_recover(state->inode, + stateid); + nfs4_schedule_stateid_recovery(server, state); +- return 0; ++ return -EAGAIN; + case -NFS4ERR_DELAY: + case -NFS4ERR_GRACE: + set_bit(NFS_DELEGATED_STATE, &state->flags); +@@ -2026,46 +2026,60 @@ static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *sta + return ret; + } + ++static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state) ++{ ++ nfs_remove_bad_delegation(state->inode); ++ write_seqlock(&state->seqlock); ++ nfs4_stateid_copy(&state->stateid, &state->open_stateid); ++ write_sequnlock(&state->seqlock); ++ clear_bit(NFS_DELEGATED_STATE, &state->flags); ++} ++ ++static void nfs40_clear_delegation_stateid(struct nfs4_state *state) ++{ ++ if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL) ++ nfs_finish_clear_delegation_stateid(state); ++} ++ ++static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state) ++{ ++ /* NFSv4.0 doesn't allow for delegation recovery on open expire */ ++ nfs40_clear_delegation_stateid(state); ++ return nfs4_open_expired(sp, state); ++} ++ + #if defined(CONFIG_NFS_V4_1) +-static void nfs41_clear_delegation_stateid(struct nfs4_state *state) ++static void nfs41_check_delegation_stateid(struct nfs4_state *state) + { + struct nfs_server *server = NFS_SERVER(state->inode); +- nfs4_stateid *stateid = &state->stateid; ++ nfs4_stateid stateid; + struct nfs_delegation *delegation; +- struct rpc_cred *cred = NULL; +- int status = -NFS4ERR_BAD_STATEID; +- +- /* If a state reset has been done, test_stateid is unneeded */ +- if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) +- return; ++ struct rpc_cred *cred; ++ int status; + + /* Get the delegation credential for use by test/free_stateid */ + rcu_read_lock(); + delegation = rcu_dereference(NFS_I(state->inode)->delegation); +- if (delegation != NULL && +- nfs4_stateid_match(&delegation->stateid, stateid)) { +- cred = get_rpccred(delegation->cred); +- rcu_read_unlock(); +- status = nfs41_test_stateid(server, stateid, cred); +- trace_nfs4_test_delegation_stateid(state, NULL, status); +- } else ++ if (delegation == NULL) { + rcu_read_unlock(); ++ return; ++ } ++ ++ nfs4_stateid_copy(&stateid, &delegation->stateid); ++ cred = get_rpccred(delegation->cred); ++ rcu_read_unlock(); ++ status = nfs41_test_stateid(server, &stateid, cred); ++ trace_nfs4_test_delegation_stateid(state, NULL, status); + + if (status != NFS_OK) { + /* Free the stateid unless the server explicitly + * informs us the stateid is unrecognized. */ + if (status != -NFS4ERR_BAD_STATEID) +- nfs41_free_stateid(server, stateid, cred); +- nfs_remove_bad_delegation(state->inode); +- +- write_seqlock(&state->seqlock); +- nfs4_stateid_copy(&state->stateid, &state->open_stateid); +- write_sequnlock(&state->seqlock); +- clear_bit(NFS_DELEGATED_STATE, &state->flags); ++ nfs41_free_stateid(server, &stateid, cred); ++ nfs_finish_clear_delegation_stateid(state); + } + +- if (cred != NULL) +- put_rpccred(cred); ++ put_rpccred(cred); + } + + /** +@@ -2109,7 +2123,7 @@ static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st + { + int status; + +- nfs41_clear_delegation_stateid(state); ++ nfs41_check_delegation_stateid(state); + status = nfs41_check_open_stateid(state); + if (status != NFS_OK) + status = nfs4_open_expired(sp, state); +@@ -7902,7 +7916,7 @@ static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = { + static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { + .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, + .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, +- .recover_open = nfs4_open_expired, ++ .recover_open = nfs40_open_expired, + .recover_lock = nfs4_lock_expired, + .establish_clid = nfs4_init_clientid, + }; +diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c +index 08fdb77852ac..e31952112eb4 100644 +--- a/fs/nilfs2/file.c ++++ b/fs/nilfs2/file.c +@@ -56,11 +56,9 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) + mutex_unlock(&inode->i_mutex); + + nilfs = inode->i_sb->s_fs_info; +- if (!err && nilfs_test_opt(nilfs, BARRIER)) { +- err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); +- if (err != -EIO) +- err = 0; +- } ++ if (!err) ++ err = nilfs_flush_device(nilfs); ++ + return err; + } + +diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c +index b44bdb291b84..4915e543dc51 100644 +--- a/fs/nilfs2/ioctl.c ++++ b/fs/nilfs2/ioctl.c +@@ -694,11 +694,9 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, + return ret; + + nilfs = inode->i_sb->s_fs_info; +- if (nilfs_test_opt(nilfs, BARRIER)) { +- ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); +- if (ret == -EIO) +- return ret; +- } ++ ret = nilfs_flush_device(nilfs); ++ if (ret < 0) ++ return ret; + + if (argp != NULL) { + down_read(&nilfs->ns_segctor_sem); +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c +index a1a191634abc..0b7d2cad0426 100644 +--- a/fs/nilfs2/segment.c ++++ b/fs/nilfs2/segment.c +@@ -1833,6 +1833,7 @@ static void nilfs_segctor_complete_write(struct nilfs_sc_info *sci) + nilfs_set_next_segment(nilfs, segbuf); + + if (update_sr) { ++ nilfs->ns_flushed_device = 0; + nilfs_set_last_segment(nilfs, segbuf->sb_pseg_start, + segbuf->sb_sum.seg_seq, nilfs->ns_cno++); + +@@ -2216,6 +2217,8 @@ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode, + sci->sc_dsync_end = end; + + err = nilfs_segctor_do_construct(sci, SC_LSEG_DSYNC); ++ if (!err) ++ nilfs->ns_flushed_device = 0; + + nilfs_transaction_unlock(sb); + return err; +diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c +index 7ac2a122ca1d..0bdc0245e0aa 100644 +--- a/fs/nilfs2/super.c ++++ b/fs/nilfs2/super.c +@@ -310,6 +310,9 @@ int nilfs_commit_super(struct super_block *sb, int flag) + nilfs->ns_sbsize)); + } + clear_nilfs_sb_dirty(nilfs); ++ nilfs->ns_flushed_device = 1; ++ /* make sure store to ns_flushed_device cannot be reordered */ ++ smp_wmb(); + return nilfs_sync_super(sb, flag); + } + +@@ -514,6 +517,9 @@ static int nilfs_sync_fs(struct super_block *sb, int wait) + } + up_write(&nilfs->ns_sem); + ++ if (!err) ++ err = nilfs_flush_device(nilfs); ++ + return err; + } + +diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h +index de8cc53b4a5c..005e1dcf8afb 100644 +--- a/fs/nilfs2/the_nilfs.h ++++ b/fs/nilfs2/the_nilfs.h +@@ -45,6 +45,7 @@ enum { + /** + * struct the_nilfs - struct to supervise multiple nilfs mount points + * @ns_flags: flags ++ * @ns_flushed_device: flag indicating if all volatile data was flushed + * @ns_bdev: block device + * @ns_sem: semaphore for shared states + * @ns_snapshot_mount_mutex: mutex to protect snapshot mounts +@@ -98,6 +99,7 @@ enum { + */ + struct the_nilfs { + unsigned long ns_flags; ++ int ns_flushed_device; + + struct block_device *ns_bdev; + struct rw_semaphore ns_sem; +@@ -353,4 +355,24 @@ static inline int nilfs_segment_is_active(struct the_nilfs *nilfs, __u64 n) + return n == nilfs->ns_segnum || n == nilfs->ns_nextnum; + } + ++static inline int nilfs_flush_device(struct the_nilfs *nilfs) ++{ ++ int err; ++ ++ if (!nilfs_test_opt(nilfs, BARRIER) || nilfs->ns_flushed_device) ++ return 0; ++ ++ nilfs->ns_flushed_device = 1; ++ /* ++ * the store to ns_flushed_device must not be reordered after ++ * blkdev_issue_flush(). ++ */ ++ smp_wmb(); ++ ++ err = blkdev_issue_flush(nilfs->ns_bdev, GFP_KERNEL, NULL); ++ if (err != -EIO) ++ err = 0; ++ return err; ++} ++ + #endif /* _THE_NILFS_H */ +diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c +index a0b2f345da2b..86ddab916b66 100644 +--- a/fs/ntfs/file.c ++++ b/fs/ntfs/file.c +@@ -2133,7 +2133,7 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, + ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos); + mutex_unlock(&inode->i_mutex); + if (ret > 0) { +- int err = generic_write_sync(file, pos, ret); ++ int err = generic_write_sync(file, iocb->ki_pos - ret, ret); + if (err < 0) + ret = err; + } +diff --git a/fs/sync.c b/fs/sync.c +index 905f3f6b3d85..354831ddf54b 100644 +--- a/fs/sync.c ++++ b/fs/sync.c +@@ -219,23 +219,6 @@ SYSCALL_DEFINE1(fdatasync, unsigned int, fd) + return do_fsync(fd, 1); + } + +-/** +- * generic_write_sync - perform syncing after a write if file / inode is sync +- * @file: file to which the write happened +- * @pos: offset where the write started +- * @count: length of the write +- * +- * This is just a simple wrapper about our general syncing function. +- */ +-int generic_write_sync(struct file *file, loff_t pos, loff_t count) +-{ +- if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host)) +- return 0; +- return vfs_fsync_range(file, pos, pos + count - 1, +- (file->f_flags & __O_SYNC) ? 0 : 1); +-} +-EXPORT_SYMBOL(generic_write_sync); +- + /* + * sys_sync_file_range() permits finely controlled syncing over a segment of + * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is +diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c +index d56b136e68fe..aa606453a0f8 100644 +--- a/fs/xfs/xfs_file.c ++++ b/fs/xfs/xfs_file.c +@@ -811,7 +811,7 @@ xfs_file_aio_write( + XFS_STATS_ADD(xs_write_bytes, ret); + + /* Handle various SYNC-type writes */ +- err = generic_write_sync(file, pos, ret); ++ err = generic_write_sync(file, iocb->ki_pos - ret, ret); + if (err < 0) + ret = err; + } +diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h +index dbbf8aa7731b..48028261924c 100644 +--- a/include/linux/clocksource.h ++++ b/include/linux/clocksource.h +@@ -289,7 +289,7 @@ extern struct clocksource* clocksource_get_next(void); + extern void clocksource_change_rating(struct clocksource *cs, int rating); + extern void clocksource_suspend(void); + extern void clocksource_resume(void); +-extern struct clocksource * __init __weak clocksource_default_clock(void); ++extern struct clocksource * __init clocksource_default_clock(void); + extern void clocksource_mark_unstable(struct clocksource *cs); + + extern void +diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h +index 7032518f8542..60023e5d3169 100644 +--- a/include/linux/crash_dump.h ++++ b/include/linux/crash_dump.h +@@ -14,14 +14,13 @@ + extern unsigned long long elfcorehdr_addr; + extern unsigned long long elfcorehdr_size; + +-extern int __weak elfcorehdr_alloc(unsigned long long *addr, +- unsigned long long *size); +-extern void __weak elfcorehdr_free(unsigned long long addr); +-extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos); +-extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos); +-extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, +- unsigned long from, unsigned long pfn, +- unsigned long size, pgprot_t prot); ++extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size); ++extern void elfcorehdr_free(unsigned long long addr); ++extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos); ++extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos); ++extern int remap_oldmem_pfn_range(struct vm_area_struct *vma, ++ unsigned long from, unsigned long pfn, ++ unsigned long size, pgprot_t prot); + + extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, + unsigned long, int); +diff --git a/include/linux/fs.h b/include/linux/fs.h +index 164d2a91667f..9cb726aa09fc 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -2217,7 +2217,13 @@ extern int filemap_fdatawrite_range(struct address_space *mapping, + extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, + int datasync); + extern int vfs_fsync(struct file *file, int datasync); +-extern int generic_write_sync(struct file *file, loff_t pos, loff_t count); ++static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count) ++{ ++ if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host)) ++ return 0; ++ return vfs_fsync_range(file, pos, pos + count - 1, ++ (file->f_flags & __O_SYNC) ? 0 : 1); ++} + extern void emergency_sync(void); + extern void emergency_remount(void); + #ifdef CONFIG_BLOCK +@@ -2490,6 +2496,9 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, + void inode_dio_wait(struct inode *inode); + void inode_dio_done(struct inode *inode); + ++extern void inode_set_flags(struct inode *inode, unsigned int flags, ++ unsigned int mask); ++ + extern const struct file_operations generic_ro_fops; + + #define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m)) +diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h +index 79640e015a86..f738f922542d 100644 +--- a/include/linux/inetdevice.h ++++ b/include/linux/inetdevice.h +@@ -234,7 +234,7 @@ static inline void in_dev_put(struct in_device *idev) + static __inline__ __be32 inet_make_mask(int logmask) + { + if (logmask) +- return htonl(~((1<<(32-logmask))-1)); ++ return htonl(~((1U<<(32-logmask))-1)); + return 0; + } + +diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h +index c6e091bf39a5..bdfc95bddde9 100644 +--- a/include/linux/kgdb.h ++++ b/include/linux/kgdb.h +@@ -283,7 +283,7 @@ struct kgdb_io { + + extern struct kgdb_arch arch_kgdb_ops; + +-extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs); ++extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs); + + #ifdef CONFIG_SERIAL_KGDB_NMI + extern int kgdb_register_nmi_console(void); +diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h +index 2b307018979d..715671e4c7e6 100644 +--- a/include/linux/nfs_xdr.h ++++ b/include/linux/nfs_xdr.h +@@ -1223,11 +1223,22 @@ struct nfs41_free_stateid_res { + unsigned int status; + }; + ++static inline void ++nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) ++{ ++ kfree(cinfo->buckets); ++} ++ + #else + + struct pnfs_ds_commit_info { + }; + ++static inline void ++nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo) ++{ ++} ++ + #endif /* CONFIG_NFS_V4_1 */ + + struct nfs_page; +diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h +index 3794c5ad20fe..3848934ab162 100644 +--- a/include/net/sctp/sctp.h ++++ b/include/net/sctp/sctp.h +@@ -454,6 +454,11 @@ static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_associat + asoc->pmtu_pending = 0; + } + ++static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk) ++{ ++ return !list_empty(&chunk->list); ++} ++ + /* Walk through a list of TLV parameters. Don't trust the + * individual parameter lengths and instead depend on + * the chunk length to indicate when to stop. Make sure +diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h +index 4ef75af340b6..c91b6f5c07a5 100644 +--- a/include/net/sctp/sm.h ++++ b/include/net/sctp/sm.h +@@ -249,9 +249,9 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *, + int, __be16); + struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, + union sctp_addr *addr); +-int sctp_verify_asconf(const struct sctp_association *asoc, +- struct sctp_paramhdr *param_hdr, void *chunk_end, +- struct sctp_paramhdr **errp); ++bool sctp_verify_asconf(const struct sctp_association *asoc, ++ struct sctp_chunk *chunk, bool addr_param_needed, ++ struct sctp_paramhdr **errp); + struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, + struct sctp_chunk *asconf); + int sctp_process_asconf_ack(struct sctp_association *asoc, +diff --git a/include/uapi/linux/netfilter/xt_bpf.h b/include/uapi/linux/netfilter/xt_bpf.h +index 5dda450eb55b..2ec9fbcd06f9 100644 +--- a/include/uapi/linux/netfilter/xt_bpf.h ++++ b/include/uapi/linux/netfilter/xt_bpf.h +@@ -6,6 +6,8 @@ + + #define XT_BPF_MAX_NUM_INSTR 64 + ++struct sk_filter; ++ + struct xt_bpf_info { + __u16 bpf_program_num_elem; + struct sock_filter bpf_program[XT_BPF_MAX_NUM_INSTR]; +diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c +index b0e99deb6d05..a0f0ab2ac2a8 100644 +--- a/ipc/ipc_sysctl.c ++++ b/ipc/ipc_sysctl.c +@@ -123,7 +123,6 @@ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) + { + struct ctl_table ipc_table; +- size_t lenp_bef = *lenp; + int oldval; + int rc; + +@@ -133,7 +132,7 @@ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write, + + rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); + +- if (write && !rc && lenp_bef == *lenp) { ++ if (write && !rc) { + int newval = *((int *)(ipc_table.data)); + /* + * The file "auto_msgmni" has correctly been set. +diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c +index 43c307dc9453..00c4459f76df 100644 +--- a/kernel/audit_tree.c ++++ b/kernel/audit_tree.c +@@ -154,6 +154,7 @@ static struct audit_chunk *alloc_chunk(int count) + chunk->owners[i].index = i; + } + fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); ++ chunk->mark.mask = FS_IN_IGNORED; + return chunk; + } + +diff --git a/kernel/rcutree.c b/kernel/rcutree.c +index 32618b3fe4e6..e27526232b5f 100644 +--- a/kernel/rcutree.c ++++ b/kernel/rcutree.c +@@ -1131,6 +1131,22 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) + } + + /* ++ * Awaken the grace-period kthread for the specified flavor of RCU. ++ * Don't do a self-awaken, and don't bother awakening when there is ++ * nothing for the grace-period kthread to do (as in several CPUs ++ * raced to awaken, and we lost), and finally don't try to awaken ++ * a kthread that has not yet been created. ++ */ ++static void rcu_gp_kthread_wake(struct rcu_state *rsp) ++{ ++ if (current == rsp->gp_kthread || ++ !ACCESS_ONCE(rsp->gp_flags) || ++ !rsp->gp_kthread) ++ return; ++ wake_up(&rsp->gp_wq); ++} ++ ++/* + * If there is room, assign a ->completed number to any callbacks on + * this CPU that have not already been assigned. Also accelerate any + * callbacks that were previously assigned a ->completed number that has +@@ -1528,7 +1544,7 @@ static void rsp_wakeup(struct irq_work *work) + struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work); + + /* Wake up rcu_gp_kthread() to start the grace period. */ +- wake_up(&rsp->gp_wq); ++ rcu_gp_kthread_wake(rsp); + } + + /* +@@ -1602,7 +1618,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) + { + WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); + raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); +- wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ ++ rcu_gp_kthread_wake(rsp); + } + + /* +@@ -2172,7 +2188,7 @@ static void force_quiescent_state(struct rcu_state *rsp) + } + rsp->gp_flags |= RCU_GP_FLAG_FQS; + raw_spin_unlock_irqrestore(&rnp_old->lock, flags); +- wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ ++ rcu_gp_kthread_wake(rsp); + } + + /* +diff --git a/mm/filemap.c b/mm/filemap.c +index b012daefc2d7..e94c70380deb 100644 +--- a/mm/filemap.c ++++ b/mm/filemap.c +@@ -2723,8 +2723,8 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, + if (ret > 0) { + ssize_t err; + +- err = generic_write_sync(file, pos, ret); +- if (err < 0 && ret > 0) ++ err = generic_write_sync(file, iocb->ki_pos - ret, ret); ++ if (err < 0) + ret = err; + } + return ret; +diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c +index 6e7a236525b6..06f19b9e159a 100644 +--- a/net/ceph/crypto.c ++++ b/net/ceph/crypto.c +@@ -89,11 +89,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void) + + static const u8 *aes_iv = (u8 *)CEPH_AES_IV; + ++/* ++ * Should be used for buffers allocated with ceph_kvmalloc(). ++ * Currently these are encrypt out-buffer (ceph_buffer) and decrypt ++ * in-buffer (msg front). ++ * ++ * Dispose of @sgt with teardown_sgtable(). ++ * ++ * @prealloc_sg is to avoid memory allocation inside sg_alloc_table() ++ * in cases where a single sg is sufficient. No attempt to reduce the ++ * number of sgs by squeezing physically contiguous pages together is ++ * made though, for simplicity. ++ */ ++static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg, ++ const void *buf, unsigned int buf_len) ++{ ++ struct scatterlist *sg; ++ const bool is_vmalloc = is_vmalloc_addr(buf); ++ unsigned int off = offset_in_page(buf); ++ unsigned int chunk_cnt = 1; ++ unsigned int chunk_len = PAGE_ALIGN(off + buf_len); ++ int i; ++ int ret; ++ ++ if (buf_len == 0) { ++ memset(sgt, 0, sizeof(*sgt)); ++ return -EINVAL; ++ } ++ ++ if (is_vmalloc) { ++ chunk_cnt = chunk_len >> PAGE_SHIFT; ++ chunk_len = PAGE_SIZE; ++ } ++ ++ if (chunk_cnt > 1) { ++ ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS); ++ if (ret) ++ return ret; ++ } else { ++ WARN_ON(chunk_cnt != 1); ++ sg_init_table(prealloc_sg, 1); ++ sgt->sgl = prealloc_sg; ++ sgt->nents = sgt->orig_nents = 1; ++ } ++ ++ for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) { ++ struct page *page; ++ unsigned int len = min(chunk_len - off, buf_len); ++ ++ if (is_vmalloc) ++ page = vmalloc_to_page(buf); ++ else ++ page = virt_to_page(buf); ++ ++ sg_set_page(sg, page, len, off); ++ ++ off = 0; ++ buf += len; ++ buf_len -= len; ++ } ++ WARN_ON(buf_len != 0); ++ ++ return 0; ++} ++ ++static void teardown_sgtable(struct sg_table *sgt) ++{ ++ if (sgt->orig_nents > 1) ++ sg_free_table(sgt); ++} ++ + static int ceph_aes_encrypt(const void *key, int key_len, + void *dst, size_t *dst_len, + const void *src, size_t src_len) + { +- struct scatterlist sg_in[2], sg_out[1]; ++ struct scatterlist sg_in[2], prealloc_sg; ++ struct sg_table sg_out; + struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); + struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; + int ret; +@@ -109,16 +180,18 @@ static int ceph_aes_encrypt(const void *key, int key_len, + + *dst_len = src_len + zero_padding; + +- crypto_blkcipher_setkey((void *)tfm, key, key_len); + sg_init_table(sg_in, 2); + sg_set_buf(&sg_in[0], src, src_len); + sg_set_buf(&sg_in[1], pad, zero_padding); +- sg_init_table(sg_out, 1); +- sg_set_buf(sg_out, dst, *dst_len); ++ ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); ++ if (ret) ++ goto out_tfm; ++ ++ crypto_blkcipher_setkey((void *)tfm, key, key_len); + iv = crypto_blkcipher_crt(tfm)->iv; + ivsize = crypto_blkcipher_ivsize(tfm); +- + memcpy(iv, aes_iv, ivsize); ++ + /* + print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, + key, key_len, 1); +@@ -127,16 +200,22 @@ static int ceph_aes_encrypt(const void *key, int key_len, + print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, + pad, zero_padding, 1); + */ +- ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, ++ ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in, + src_len + zero_padding); +- crypto_free_blkcipher(tfm); +- if (ret < 0) ++ if (ret < 0) { + pr_err("ceph_aes_crypt failed %d\n", ret); ++ goto out_sg; ++ } + /* + print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, + dst, *dst_len, 1); + */ +- return 0; ++ ++out_sg: ++ teardown_sgtable(&sg_out); ++out_tfm: ++ crypto_free_blkcipher(tfm); ++ return ret; + } + + static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, +@@ -144,7 +223,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, + const void *src1, size_t src1_len, + const void *src2, size_t src2_len) + { +- struct scatterlist sg_in[3], sg_out[1]; ++ struct scatterlist sg_in[3], prealloc_sg; ++ struct sg_table sg_out; + struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); + struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; + int ret; +@@ -160,17 +240,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, + + *dst_len = src1_len + src2_len + zero_padding; + +- crypto_blkcipher_setkey((void *)tfm, key, key_len); + sg_init_table(sg_in, 3); + sg_set_buf(&sg_in[0], src1, src1_len); + sg_set_buf(&sg_in[1], src2, src2_len); + sg_set_buf(&sg_in[2], pad, zero_padding); +- sg_init_table(sg_out, 1); +- sg_set_buf(sg_out, dst, *dst_len); ++ ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len); ++ if (ret) ++ goto out_tfm; ++ ++ crypto_blkcipher_setkey((void *)tfm, key, key_len); + iv = crypto_blkcipher_crt(tfm)->iv; + ivsize = crypto_blkcipher_ivsize(tfm); +- + memcpy(iv, aes_iv, ivsize); ++ + /* + print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, + key, key_len, 1); +@@ -181,23 +263,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, + print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, + pad, zero_padding, 1); + */ +- ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, ++ ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in, + src1_len + src2_len + zero_padding); +- crypto_free_blkcipher(tfm); +- if (ret < 0) ++ if (ret < 0) { + pr_err("ceph_aes_crypt2 failed %d\n", ret); ++ goto out_sg; ++ } + /* + print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, + dst, *dst_len, 1); + */ +- return 0; ++ ++out_sg: ++ teardown_sgtable(&sg_out); ++out_tfm: ++ crypto_free_blkcipher(tfm); ++ return ret; + } + + static int ceph_aes_decrypt(const void *key, int key_len, + void *dst, size_t *dst_len, + const void *src, size_t src_len) + { +- struct scatterlist sg_in[1], sg_out[2]; ++ struct sg_table sg_in; ++ struct scatterlist sg_out[2], prealloc_sg; + struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); + struct blkcipher_desc desc = { .tfm = tfm }; + char pad[16]; +@@ -209,16 +298,16 @@ static int ceph_aes_decrypt(const void *key, int key_len, + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + +- crypto_blkcipher_setkey((void *)tfm, key, key_len); +- sg_init_table(sg_in, 1); + sg_init_table(sg_out, 2); +- sg_set_buf(sg_in, src, src_len); + sg_set_buf(&sg_out[0], dst, *dst_len); + sg_set_buf(&sg_out[1], pad, sizeof(pad)); ++ ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); ++ if (ret) ++ goto out_tfm; + ++ crypto_blkcipher_setkey((void *)tfm, key, key_len); + iv = crypto_blkcipher_crt(tfm)->iv; + ivsize = crypto_blkcipher_ivsize(tfm); +- + memcpy(iv, aes_iv, ivsize); + + /* +@@ -227,12 +316,10 @@ static int ceph_aes_decrypt(const void *key, int key_len, + print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, + src, src_len, 1); + */ +- +- ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); +- crypto_free_blkcipher(tfm); ++ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len); + if (ret < 0) { + pr_err("ceph_aes_decrypt failed %d\n", ret); +- return ret; ++ goto out_sg; + } + + if (src_len <= *dst_len) +@@ -250,7 +337,12 @@ static int ceph_aes_decrypt(const void *key, int key_len, + print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, + dst, *dst_len, 1); + */ +- return 0; ++ ++out_sg: ++ teardown_sgtable(&sg_in); ++out_tfm: ++ crypto_free_blkcipher(tfm); ++ return ret; + } + + static int ceph_aes_decrypt2(const void *key, int key_len, +@@ -258,7 +350,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len, + void *dst2, size_t *dst2_len, + const void *src, size_t src_len) + { +- struct scatterlist sg_in[1], sg_out[3]; ++ struct sg_table sg_in; ++ struct scatterlist sg_out[3], prealloc_sg; + struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); + struct blkcipher_desc desc = { .tfm = tfm }; + char pad[16]; +@@ -270,17 +363,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len, + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + +- sg_init_table(sg_in, 1); +- sg_set_buf(sg_in, src, src_len); + sg_init_table(sg_out, 3); + sg_set_buf(&sg_out[0], dst1, *dst1_len); + sg_set_buf(&sg_out[1], dst2, *dst2_len); + sg_set_buf(&sg_out[2], pad, sizeof(pad)); ++ ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len); ++ if (ret) ++ goto out_tfm; + + crypto_blkcipher_setkey((void *)tfm, key, key_len); + iv = crypto_blkcipher_crt(tfm)->iv; + ivsize = crypto_blkcipher_ivsize(tfm); +- + memcpy(iv, aes_iv, ivsize); + + /* +@@ -289,12 +382,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len, + print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, + src, src_len, 1); + */ +- +- ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); +- crypto_free_blkcipher(tfm); ++ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len); + if (ret < 0) { + pr_err("ceph_aes_decrypt failed %d\n", ret); +- return ret; ++ goto out_sg; + } + + if (src_len <= *dst1_len) +@@ -324,7 +415,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len, + dst2, *dst2_len, 1); + */ + +- return 0; ++out_sg: ++ teardown_sgtable(&sg_in); ++out_tfm: ++ crypto_free_blkcipher(tfm); ++ return ret; + } + + +diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c +index f2e15738534d..8f7bd56955b0 100644 +--- a/net/ipv4/fib_rules.c ++++ b/net/ipv4/fib_rules.c +@@ -62,6 +62,10 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res) + else + res->tclassid = 0; + #endif ++ ++ if (err == -ESRCH) ++ err = -ENETUNREACH; ++ + return err; + } + EXPORT_SYMBOL_GPL(__fib_lookup); +diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c +index 99988b86f6af..88774ccb3dda 100644 +--- a/net/ipv6/ip6_gre.c ++++ b/net/ipv6/ip6_gre.c +@@ -965,8 +965,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu) + else + dev->flags &= ~IFF_POINTOPOINT; + +- dev->iflink = p->link; +- + /* Precalculate GRE options length */ + if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { + if (t->parms.o_flags&GRE_CSUM) +@@ -1269,6 +1267,8 @@ static int ip6gre_tunnel_init(struct net_device *dev) + if (!dev->tstats) + return -ENOMEM; + ++ dev->iflink = tunnel->parms.link; ++ + return 0; + } + +@@ -1285,7 +1285,6 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev) + dev_hold(dev); + } + +- + static struct inet6_protocol ip6gre_protocol __read_mostly = { + .handler = ip6gre_rcv, + .err_handler = ip6gre_err, +@@ -1462,6 +1461,8 @@ static int ip6gre_tap_init(struct net_device *dev) + if (!dev->tstats) + return -ENOMEM; + ++ dev->iflink = tunnel->parms.link; ++ + return 0; + } + +diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c +index 7c26d8a3fa1b..f8a70a120e75 100644 +--- a/net/ipv6/ip6_tunnel.c ++++ b/net/ipv6/ip6_tunnel.c +@@ -266,9 +266,6 @@ static int ip6_tnl_create2(struct net_device *dev) + int err; + + t = netdev_priv(dev); +- err = ip6_tnl_dev_init(dev); +- if (err < 0) +- goto out; + + err = register_netdevice(dev); + if (err < 0) +@@ -1448,6 +1445,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu) + + + static const struct net_device_ops ip6_tnl_netdev_ops = { ++ .ndo_init = ip6_tnl_dev_init, + .ndo_uninit = ip6_tnl_dev_uninit, + .ndo_start_xmit = ip6_tnl_xmit, + .ndo_do_ioctl = ip6_tnl_ioctl, +@@ -1532,16 +1530,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev) + struct ip6_tnl *t = netdev_priv(dev); + struct net *net = dev_net(dev); + struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); +- int err = ip6_tnl_dev_init_gen(dev); +- +- if (err) +- return err; + + t->parms.proto = IPPROTO_IPV6; + dev_hold(dev); + +- ip6_tnl_link_config(t); +- + rcu_assign_pointer(ip6n->tnls_wc[0], t); + return 0; + } +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c +index ebdf18bbcc02..8e8fc32a080f 100644 +--- a/net/ipv6/sit.c ++++ b/net/ipv6/sit.c +@@ -195,10 +195,8 @@ static int ipip6_tunnel_create(struct net_device *dev) + struct sit_net *sitn = net_generic(net, sit_net_id); + int err; + +- err = ipip6_tunnel_init(dev); +- if (err < 0) +- goto out; +- ipip6_tunnel_clone_6rd(dev, sitn); ++ memcpy(dev->dev_addr, &t->parms.iph.saddr, 4); ++ memcpy(dev->broadcast, &t->parms.iph.daddr, 4); + + if ((__force u16)t->parms.i_flags & SIT_ISATAP) + dev->priv_flags |= IFF_ISATAP; +@@ -207,7 +205,8 @@ static int ipip6_tunnel_create(struct net_device *dev) + if (err < 0) + goto out; + +- strcpy(t->parms.name, dev->name); ++ ipip6_tunnel_clone_6rd(dev, sitn); ++ + dev->rtnl_link_ops = &sit_link_ops; + + dev_hold(dev); +@@ -1279,6 +1278,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu) + } + + static const struct net_device_ops ipip6_netdev_ops = { ++ .ndo_init = ipip6_tunnel_init, + .ndo_uninit = ipip6_tunnel_uninit, + .ndo_start_xmit = sit_tunnel_xmit, + .ndo_do_ioctl = ipip6_tunnel_ioctl, +@@ -1313,9 +1313,7 @@ static int ipip6_tunnel_init(struct net_device *dev) + + tunnel->dev = dev; + tunnel->net = dev_net(dev); +- +- memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4); +- memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4); ++ strcpy(tunnel->parms.name, dev->name); + + ipip6_tunnel_bind_dev(dev); + dev->tstats = alloc_percpu(struct pcpu_tstats); +@@ -1334,7 +1332,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev) + + tunnel->dev = dev; + tunnel->net = dev_net(dev); +- strcpy(tunnel->parms.name, dev->name); + + iph->version = 4; + iph->protocol = IPPROTO_IPV6; +diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c +index e096025b477f..6857ae49dc8c 100644 +--- a/net/ipx/af_ipx.c ++++ b/net/ipx/af_ipx.c +@@ -1778,6 +1778,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, + struct ipxhdr *ipx = NULL; + struct sk_buff *skb; + int copied, rc; ++ bool locked = true; + + lock_sock(sk); + /* put the autobinding in */ +@@ -1804,6 +1805,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, + if (sock_flag(sk, SOCK_ZAPPED)) + goto out; + ++ release_sock(sk); ++ locked = false; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, + flags & MSG_DONTWAIT, &rc); + if (!skb) +@@ -1837,7 +1840,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock, + out_free: + skb_free_datagram(sk, skb); + out: +- release_sock(sk); ++ if (locked) ++ release_sock(sk); + return rc; + } + +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c +index d019b42e4a65..31da72ce76ef 100644 +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -749,10 +749,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, + u32 hw_reconf_flags = 0; + int i, flushed; + struct ps_data *ps; ++ bool cancel_scan; + + clear_bit(SDATA_STATE_RUNNING, &sdata->state); + +- if (rcu_access_pointer(local->scan_sdata) == sdata) ++ cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata; ++ if (cancel_scan) + ieee80211_scan_cancel(local); + + /* +@@ -959,6 +961,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, + + ieee80211_recalc_ps(local, -1); + ++ if (cancel_scan) ++ flush_delayed_work(&local->scan_work); ++ + if (local->open_count == 0) { + ieee80211_stop_device(local); + +diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c +index 5fa4ee07dd7a..023bc33bab9a 100644 +--- a/net/mac80211/mlme.c ++++ b/net/mac80211/mlme.c +@@ -1265,7 +1265,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, + ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work); + else + mod_timer(&ifmgd->chswitch_timer, +- TU_TO_EXP_TIME(count * cbss->beacon_interval)); ++ TU_TO_EXP_TIME((count - 1) * ++ cbss->beacon_interval)); + } + + static u32 ieee80211_handle_pwr_constr(struct ieee80211_sub_if_data *sdata, +diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c +index 1e5bd0d75732..275cb85bfa31 100644 +--- a/net/mac80211/rx.c ++++ b/net/mac80211/rx.c +@@ -1646,11 +1646,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) + sc = le16_to_cpu(hdr->seq_ctrl); + frag = sc & IEEE80211_SCTL_FRAG; + +- if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || +- is_multicast_ether_addr(hdr->addr1))) { +- /* not fragmented */ ++ if (likely(!ieee80211_has_morefrags(fc) && frag == 0)) ++ goto out; ++ ++ if (is_multicast_ether_addr(hdr->addr1)) { ++ rx->local->dot11MulticastReceivedFrameCount++; + goto out; + } ++ + I802_DEBUG_INC(rx->local->rx_handlers_fragments); + + if (skb_linearize(rx->skb)) +@@ -1743,10 +1746,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx) + out: + if (rx->sta) + rx->sta->rx_packets++; +- if (is_multicast_ether_addr(hdr->addr1)) +- rx->local->dot11MulticastReceivedFrameCount++; +- else +- ieee80211_led_rx(rx->local); ++ ieee80211_led_rx(rx->local); + return RX_CONTINUE; + } + +diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c +index d92cc317bf8b..09172d7abee2 100644 +--- a/net/netfilter/nfnetlink_log.c ++++ b/net/netfilter/nfnetlink_log.c +@@ -45,7 +45,8 @@ + #define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE + #define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ + #define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ +-#define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ ++/* max packet size is limited by 16-bit struct nfattr nfa_len field */ ++#define NFULNL_COPY_RANGE_MAX (0xFFFF - NLA_HDRLEN) + + #define PRINTR(x, args...) do { if (net_ratelimit()) \ + printk(x, ## args); } while (0); +@@ -255,6 +256,8 @@ nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode, + + case NFULNL_COPY_PACKET: + inst->copy_mode = mode; ++ if (range == 0) ++ range = NFULNL_COPY_RANGE_MAX; + inst->copy_range = min_t(unsigned int, + range, NFULNL_COPY_RANGE_MAX); + break; +@@ -345,26 +348,25 @@ nfulnl_alloc_skb(u32 peer_portid, unsigned int inst_size, unsigned int pkt_size) + return skb; + } + +-static int ++static void + __nfulnl_send(struct nfulnl_instance *inst) + { +- int status = -1; +- + if (inst->qlen > 1) { + struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0, + NLMSG_DONE, + sizeof(struct nfgenmsg), + 0); +- if (!nlh) ++ if (WARN_ONCE(!nlh, "bad nlskb size: %u, tailroom %d\n", ++ inst->skb->len, skb_tailroom(inst->skb))) { ++ kfree_skb(inst->skb); + goto out; ++ } + } +- status = nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, +- MSG_DONTWAIT); +- ++ nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, ++ MSG_DONTWAIT); ++out: + inst->qlen = 0; + inst->skb = NULL; +-out: +- return status; + } + + static void +@@ -651,7 +653,8 @@ nfulnl_log_packet(struct net *net, + + nla_total_size(sizeof(u_int32_t)) /* gid */ + + nla_total_size(plen) /* prefix */ + + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) +- + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); ++ + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)) ++ + nla_total_size(sizeof(struct nfgenmsg)); /* NLMSG_DONE */ + + if (in && skb_mac_header_was_set(skb)) { + size += nla_total_size(skb->dev->hard_header_len) +@@ -680,8 +683,7 @@ nfulnl_log_packet(struct net *net, + break; + + case NFULNL_COPY_PACKET: +- if (inst->copy_range == 0 +- || inst->copy_range > skb->len) ++ if (inst->copy_range > skb->len) + data_len = skb->len; + else + data_len = inst->copy_range; +@@ -694,8 +696,7 @@ nfulnl_log_packet(struct net *net, + goto unlock_and_release; + } + +- if (inst->skb && +- size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) { ++ if (inst->skb && size > skb_tailroom(inst->skb)) { + /* either the queue len is too high or we don't have + * enough room in the skb left. flush to userspace. */ + __nfulnl_flush(inst); +diff --git a/net/sctp/associola.c b/net/sctp/associola.c +index ad5cd6f20e78..737050f1b2b2 100644 +--- a/net/sctp/associola.c ++++ b/net/sctp/associola.c +@@ -1645,6 +1645,8 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack( + * ack chunk whose serial number matches that of the request. + */ + list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) { ++ if (sctp_chunk_pending(ack)) ++ continue; + if (ack->subh.addip_hdr->serial == serial) { + sctp_chunk_hold(ack); + return ack; +diff --git a/net/sctp/auth.c b/net/sctp/auth.c +index 43b871f6cddf..4b842e9618ad 100644 +--- a/net/sctp/auth.c ++++ b/net/sctp/auth.c +@@ -868,8 +868,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep, + list_add(&cur_key->key_list, sh_keys); + + cur_key->key = key; +- sctp_auth_key_hold(key); +- + return 0; + nomem: + if (!replace) +diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c +index 5856932fdc38..560cd418a181 100644 +--- a/net/sctp/inqueue.c ++++ b/net/sctp/inqueue.c +@@ -141,18 +141,9 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) + } else { + /* Nothing to do. Next chunk in the packet, please. */ + ch = (sctp_chunkhdr_t *) chunk->chunk_end; +- + /* Force chunk->skb->data to chunk->chunk_end. */ +- skb_pull(chunk->skb, +- chunk->chunk_end - chunk->skb->data); +- +- /* Verify that we have at least chunk headers +- * worth of buffer left. +- */ +- if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) { +- sctp_chunk_free(chunk); +- chunk = queue->in_progress = NULL; +- } ++ skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data); ++ /* We are guaranteed to pull a SCTP header. */ + } + } + +@@ -188,24 +179,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) + skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); + chunk->subh.v = NULL; /* Subheader is no longer valid. */ + +- if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) { ++ if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) < ++ skb_tail_pointer(chunk->skb)) { + /* This is not a singleton */ + chunk->singleton = 0; + } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { +- /* RFC 2960, Section 6.10 Bundling +- * +- * Partial chunks MUST NOT be placed in an SCTP packet. +- * If the receiver detects a partial chunk, it MUST drop +- * the chunk. +- * +- * Since the end of the chunk is past the end of our buffer +- * (which contains the whole packet, we can freely discard +- * the whole packet. +- */ +- sctp_chunk_free(chunk); +- chunk = queue->in_progress = NULL; +- +- return NULL; ++ /* Discard inside state machine. */ ++ chunk->pdiscard = 1; ++ chunk->chunk_end = skb_tail_pointer(chunk->skb); + } else { + /* We are at the end of the packet, so mark the chunk + * in case we need to send a SACK. +diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c +index 1e06f3b23108..d800160f974c 100644 +--- a/net/sctp/sm_make_chunk.c ++++ b/net/sctp/sm_make_chunk.c +@@ -2622,6 +2622,9 @@ do_addr_param: + addr_param = param.v + sizeof(sctp_addip_param_t); + + af = sctp_get_af_specific(param_type2af(param.p->type)); ++ if (af == NULL) ++ break; ++ + af->from_addr_param(&addr, addr_param, + htons(asoc->peer.port), 0); + +@@ -3123,50 +3126,63 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc, + return SCTP_ERROR_NO_ERROR; + } + +-/* Verify the ASCONF packet before we process it. */ +-int sctp_verify_asconf(const struct sctp_association *asoc, +- struct sctp_paramhdr *param_hdr, void *chunk_end, +- struct sctp_paramhdr **errp) { +- sctp_addip_param_t *asconf_param; ++/* Verify the ASCONF packet before we process it. */ ++bool sctp_verify_asconf(const struct sctp_association *asoc, ++ struct sctp_chunk *chunk, bool addr_param_needed, ++ struct sctp_paramhdr **errp) ++{ ++ sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr; + union sctp_params param; +- int length, plen; ++ bool addr_param_seen = false; + +- param.v = (sctp_paramhdr_t *) param_hdr; +- while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) { +- length = ntohs(param.p->length); +- *errp = param.p; +- +- if (param.v > chunk_end - length || +- length < sizeof(sctp_paramhdr_t)) +- return 0; ++ sctp_walk_params(param, addip, addip_hdr.params) { ++ size_t length = ntohs(param.p->length); + ++ *errp = param.p; + switch (param.p->type) { ++ case SCTP_PARAM_ERR_CAUSE: ++ break; ++ case SCTP_PARAM_IPV4_ADDRESS: ++ if (length != sizeof(sctp_ipv4addr_param_t)) ++ return false; ++ addr_param_seen = true; ++ break; ++ case SCTP_PARAM_IPV6_ADDRESS: ++ if (length != sizeof(sctp_ipv6addr_param_t)) ++ return false; ++ addr_param_seen = true; ++ break; + case SCTP_PARAM_ADD_IP: + case SCTP_PARAM_DEL_IP: + case SCTP_PARAM_SET_PRIMARY: +- asconf_param = (sctp_addip_param_t *)param.v; +- plen = ntohs(asconf_param->param_hdr.length); +- if (plen < sizeof(sctp_addip_param_t) + +- sizeof(sctp_paramhdr_t)) +- return 0; ++ /* In ASCONF chunks, these need to be first. */ ++ if (addr_param_needed && !addr_param_seen) ++ return false; ++ length = ntohs(param.addip->param_hdr.length); ++ if (length < sizeof(sctp_addip_param_t) + ++ sizeof(sctp_paramhdr_t)) ++ return false; + break; + case SCTP_PARAM_SUCCESS_REPORT: + case SCTP_PARAM_ADAPTATION_LAYER_IND: + if (length != sizeof(sctp_addip_param_t)) +- return 0; +- ++ return false; + break; + default: +- break; ++ /* This is unkown to us, reject! */ ++ return false; + } +- +- param.v += WORD_ROUND(length); + } + +- if (param.v != chunk_end) +- return 0; ++ /* Remaining sanity checks. */ ++ if (addr_param_needed && !addr_param_seen) ++ return false; ++ if (!addr_param_needed && addr_param_seen) ++ return false; ++ if (param.v != chunk->chunk_end) ++ return false; + +- return 1; ++ return true; + } + + /* Process an incoming ASCONF chunk with the next expected serial no. and +@@ -3175,16 +3191,17 @@ int sctp_verify_asconf(const struct sctp_association *asoc, + struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, + struct sctp_chunk *asconf) + { ++ sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr; ++ bool all_param_pass = true; ++ union sctp_params param; + sctp_addiphdr_t *hdr; + union sctp_addr_param *addr_param; + sctp_addip_param_t *asconf_param; + struct sctp_chunk *asconf_ack; +- + __be16 err_code; + int length = 0; + int chunk_len; + __u32 serial; +- int all_param_pass = 1; + + chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); + hdr = (sctp_addiphdr_t *)asconf->skb->data; +@@ -3212,9 +3229,14 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, + goto done; + + /* Process the TLVs contained within the ASCONF chunk. */ +- while (chunk_len > 0) { ++ sctp_walk_params(param, addip, addip_hdr.params) { ++ /* Skip preceeding address parameters. */ ++ if (param.p->type == SCTP_PARAM_IPV4_ADDRESS || ++ param.p->type == SCTP_PARAM_IPV6_ADDRESS) ++ continue; ++ + err_code = sctp_process_asconf_param(asoc, asconf, +- asconf_param); ++ param.addip); + /* ADDIP 4.1 A7) + * If an error response is received for a TLV parameter, + * all TLVs with no response before the failed TLV are +@@ -3222,28 +3244,20 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, + * the failed response are considered unsuccessful unless + * a specific success indication is present for the parameter. + */ +- if (SCTP_ERROR_NO_ERROR != err_code) +- all_param_pass = 0; +- ++ if (err_code != SCTP_ERROR_NO_ERROR) ++ all_param_pass = false; + if (!all_param_pass) +- sctp_add_asconf_response(asconf_ack, +- asconf_param->crr_id, err_code, +- asconf_param); ++ sctp_add_asconf_response(asconf_ack, param.addip->crr_id, ++ err_code, param.addip); + + /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add + * an IP address sends an 'Out of Resource' in its response, it + * MUST also fail any subsequent add or delete requests bundled + * in the ASCONF. + */ +- if (SCTP_ERROR_RSRC_LOW == err_code) ++ if (err_code == SCTP_ERROR_RSRC_LOW) + goto done; +- +- /* Move to the next ASCONF param. */ +- length = ntohs(asconf_param->param_hdr.length); +- asconf_param = (void *)asconf_param + length; +- chunk_len -= length; + } +- + done: + asoc->peer.addip_serial++; + +diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c +index 1dbcc6a4d800..bf12098bbe1c 100644 +--- a/net/sctp/sm_statefuns.c ++++ b/net/sctp/sm_statefuns.c +@@ -171,6 +171,9 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk, + { + __u16 chunk_length = ntohs(chunk->chunk_hdr->length); + ++ /* Previously already marked? */ ++ if (unlikely(chunk->pdiscard)) ++ return 0; + if (unlikely(chunk_length < required_length)) + return 0; + +@@ -3592,9 +3595,7 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net, + struct sctp_chunk *asconf_ack = NULL; + struct sctp_paramhdr *err_param = NULL; + sctp_addiphdr_t *hdr; +- union sctp_addr_param *addr_param; + __u32 serial; +- int length; + + if (!sctp_vtag_verify(chunk, asoc)) { + sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, +@@ -3619,17 +3620,8 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net, + hdr = (sctp_addiphdr_t *)chunk->skb->data; + serial = ntohl(hdr->serial); + +- addr_param = (union sctp_addr_param *)hdr->params; +- length = ntohs(addr_param->p.length); +- if (length < sizeof(sctp_paramhdr_t)) +- return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, +- (void *)addr_param, commands); +- + /* Verify the ASCONF chunk before processing it. */ +- if (!sctp_verify_asconf(asoc, +- (sctp_paramhdr_t *)((void *)addr_param + length), +- (void *)chunk->chunk_end, +- &err_param)) ++ if (!sctp_verify_asconf(asoc, chunk, true, &err_param)) + return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, + (void *)err_param, commands); + +@@ -3747,10 +3739,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net, + rcvd_serial = ntohl(addip_hdr->serial); + + /* Verify the ASCONF-ACK chunk before processing it. */ +- if (!sctp_verify_asconf(asoc, +- (sctp_paramhdr_t *)addip_hdr->params, +- (void *)asconf_ack->chunk_end, +- &err_param)) ++ if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param)) + return sctp_sf_violation_paramlen(net, ep, asoc, type, arg, + (void *)err_param, commands); + +diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c +index d42a584cf829..ea4b9a8a90bd 100644 +--- a/sound/usb/mixer_quirks.c ++++ b/sound/usb/mixer_quirks.c +@@ -802,6 +802,11 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl, + return changed; + } + ++static void kctl_private_value_free(struct snd_kcontrol *kctl) ++{ ++ kfree((void *)kctl->private_value); ++} ++ + static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer, + int validx, int bUnitID) + { +@@ -836,6 +841,7 @@ static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer, + return -ENOMEM; + } + ++ kctl->private_free = kctl_private_value_free; + err = snd_ctl_add(mixer->chip->card, kctl); + if (err < 0) + return err; |