diff options
author | Mike Pagano <mpagano@gentoo.org> | 2018-09-09 19:28:47 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2018-11-14 09:00:41 -0500 |
commit | eedc648b507d860a378fca7dcd0cb8f7b111eeda (patch) | |
tree | 4041ec45f92884d0cce2368a809efb7cda8842f8 | |
parent | Linux patch 4.14.68 (diff) | |
download | linux-patches-eedc648b507d860a378fca7dcd0cb8f7b111eeda.tar.gz linux-patches-eedc648b507d860a378fca7dcd0cb8f7b111eeda.tar.bz2 linux-patches-eedc648b507d860a378fca7dcd0cb8f7b111eeda.zip |
Linux patch 4.14.69
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1068_linux-4.14.69.patch | 3362 |
2 files changed, 3366 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 4fd9ed95..2a8e1bbd 100644 --- a/0000_README +++ b/0000_README @@ -315,6 +315,10 @@ Patch: 1067_linux-4.14.68.patch From: http://www.kernel.org Desc: Linux 4.14.68 +Patch: 1068_linux-4.14.69.patch +From: http://www.kernel.org +Desc: Linux 4.14.69 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1068_linux-4.14.69.patch b/1068_linux-4.14.69.patch new file mode 100644 index 00000000..461b50e1 --- /dev/null +++ b/1068_linux-4.14.69.patch @@ -0,0 +1,3362 @@ +diff --git a/Makefile b/Makefile +index 3da579058926..3ecda1d2e23a 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 4 + PATCHLEVEL = 14 +-SUBLEVEL = 68 ++SUBLEVEL = 69 + EXTRAVERSION = + NAME = Petit Gorille + +diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c +index a48976dc9bcd..918c3938ef66 100644 +--- a/arch/alpha/kernel/osf_sys.c ++++ b/arch/alpha/kernel/osf_sys.c +@@ -530,24 +530,19 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, const char __user *, path, + SYSCALL_DEFINE1(osf_utsname, char __user *, name) + { + int error; ++ char tmp[5 * 32]; + + down_read(&uts_sem); +- error = -EFAULT; +- if (copy_to_user(name + 0, utsname()->sysname, 32)) +- goto out; +- if (copy_to_user(name + 32, utsname()->nodename, 32)) +- goto out; +- if (copy_to_user(name + 64, utsname()->release, 32)) +- goto out; +- if (copy_to_user(name + 96, utsname()->version, 32)) +- goto out; +- if (copy_to_user(name + 128, utsname()->machine, 32)) +- goto out; ++ memcpy(tmp + 0 * 32, utsname()->sysname, 32); ++ memcpy(tmp + 1 * 32, utsname()->nodename, 32); ++ memcpy(tmp + 2 * 32, utsname()->release, 32); ++ memcpy(tmp + 3 * 32, utsname()->version, 32); ++ memcpy(tmp + 4 * 32, utsname()->machine, 32); ++ up_read(&uts_sem); + +- error = 0; +- out: +- up_read(&uts_sem); +- return error; ++ if (copy_to_user(name, tmp, sizeof(tmp))) ++ return -EFAULT; ++ return 0; + } + + SYSCALL_DEFINE0(getpagesize) +@@ -567,18 +562,21 @@ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen) + { + int len, err = 0; + char *kname; ++ char tmp[32]; + +- if (namelen > 32) ++ if (namelen < 0 || namelen > 32) + namelen = 32; + + down_read(&uts_sem); + kname = utsname()->domainname; + len = strnlen(kname, namelen); +- if (copy_to_user(name, kname, min(len + 1, namelen))) +- err = -EFAULT; ++ len = min(len + 1, namelen); ++ memcpy(tmp, kname, len); + up_read(&uts_sem); + +- return err; ++ if (copy_to_user(name, tmp, len)) ++ return -EFAULT; ++ return 0; + } + + /* +@@ -739,13 +737,14 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) + }; + unsigned long offset; + const char *res; +- long len, err = -EINVAL; ++ long len; ++ char tmp[__NEW_UTS_LEN + 1]; + + offset = command-1; + if (offset >= ARRAY_SIZE(sysinfo_table)) { + /* Digital UNIX has a few unpublished interfaces here */ + printk("sysinfo(%d)", command); +- goto out; ++ return -EINVAL; + } + + down_read(&uts_sem); +@@ -753,13 +752,11 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count) + len = strlen(res)+1; + if ((unsigned long)len > (unsigned long)count) + len = count; +- if (copy_to_user(buf, res, len)) +- err = -EFAULT; +- else +- err = 0; ++ memcpy(tmp, res, len); + up_read(&uts_sem); +- out: +- return err; ++ if (copy_to_user(buf, tmp, len)) ++ return -EFAULT; ++ return 0; + } + + SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer, +diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi +index 92a9740c533f..3b1db7b9ec50 100644 +--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi ++++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi +@@ -206,6 +206,7 @@ + #address-cells = <1>; + #size-cells = <0>; + reg = <0x70>; ++ reset-gpio = <&gpio TEGRA_GPIO(BB, 0) GPIO_ACTIVE_LOW>; + }; + }; + +diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig +index 1bbb89d37f57..c30cd78b6918 100644 +--- a/arch/arm64/Kconfig ++++ b/arch/arm64/Kconfig +@@ -693,7 +693,6 @@ config NEED_PER_CPU_EMBED_FIRST_CHUNK + + config HOLES_IN_ZONE + def_bool y +- depends on NUMA + + source kernel/Kconfig.preempt + source kernel/Kconfig.hz +diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h +index 5a23010af600..1e7a33592e29 100644 +--- a/arch/powerpc/include/asm/fadump.h ++++ b/arch/powerpc/include/asm/fadump.h +@@ -195,9 +195,6 @@ struct fadump_crash_info_header { + struct cpumask online_mask; + }; + +-/* Crash memory ranges */ +-#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2) +- + struct fad_crash_memory_ranges { + unsigned long long base; + unsigned long long size; +diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c +index d0020bc1f209..5a6470383ca3 100644 +--- a/arch/powerpc/kernel/fadump.c ++++ b/arch/powerpc/kernel/fadump.c +@@ -47,8 +47,10 @@ static struct fadump_mem_struct fdm; + static const struct fadump_mem_struct *fdm_active; + + static DEFINE_MUTEX(fadump_mutex); +-struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES]; ++struct fad_crash_memory_ranges *crash_memory_ranges; ++int crash_memory_ranges_size; + int crash_mem_ranges; ++int max_crash_mem_ranges; + + /* Scan the Firmware Assisted dump configuration details. */ + int __init early_init_dt_scan_fw_dump(unsigned long node, +@@ -843,38 +845,88 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active) + return 0; + } + +-static inline void fadump_add_crash_memory(unsigned long long base, +- unsigned long long end) ++static void free_crash_memory_ranges(void) ++{ ++ kfree(crash_memory_ranges); ++ crash_memory_ranges = NULL; ++ crash_memory_ranges_size = 0; ++ max_crash_mem_ranges = 0; ++} ++ ++/* ++ * Allocate or reallocate crash memory ranges array in incremental units ++ * of PAGE_SIZE. ++ */ ++static int allocate_crash_memory_ranges(void) ++{ ++ struct fad_crash_memory_ranges *new_array; ++ u64 new_size; ++ ++ new_size = crash_memory_ranges_size + PAGE_SIZE; ++ pr_debug("Allocating %llu bytes of memory for crash memory ranges\n", ++ new_size); ++ ++ new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL); ++ if (new_array == NULL) { ++ pr_err("Insufficient memory for setting up crash memory ranges\n"); ++ free_crash_memory_ranges(); ++ return -ENOMEM; ++ } ++ ++ crash_memory_ranges = new_array; ++ crash_memory_ranges_size = new_size; ++ max_crash_mem_ranges = (new_size / ++ sizeof(struct fad_crash_memory_ranges)); ++ return 0; ++} ++ ++static inline int fadump_add_crash_memory(unsigned long long base, ++ unsigned long long end) + { + if (base == end) +- return; ++ return 0; ++ ++ if (crash_mem_ranges == max_crash_mem_ranges) { ++ int ret; ++ ++ ret = allocate_crash_memory_ranges(); ++ if (ret) ++ return ret; ++ } + + pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n", + crash_mem_ranges, base, end - 1, (end - base)); + crash_memory_ranges[crash_mem_ranges].base = base; + crash_memory_ranges[crash_mem_ranges].size = end - base; + crash_mem_ranges++; ++ return 0; + } + +-static void fadump_exclude_reserved_area(unsigned long long start, ++static int fadump_exclude_reserved_area(unsigned long long start, + unsigned long long end) + { + unsigned long long ra_start, ra_end; ++ int ret = 0; + + ra_start = fw_dump.reserve_dump_area_start; + ra_end = ra_start + fw_dump.reserve_dump_area_size; + + if ((ra_start < end) && (ra_end > start)) { + if ((start < ra_start) && (end > ra_end)) { +- fadump_add_crash_memory(start, ra_start); +- fadump_add_crash_memory(ra_end, end); ++ ret = fadump_add_crash_memory(start, ra_start); ++ if (ret) ++ return ret; ++ ++ ret = fadump_add_crash_memory(ra_end, end); + } else if (start < ra_start) { +- fadump_add_crash_memory(start, ra_start); ++ ret = fadump_add_crash_memory(start, ra_start); + } else if (ra_end < end) { +- fadump_add_crash_memory(ra_end, end); ++ ret = fadump_add_crash_memory(ra_end, end); + } + } else +- fadump_add_crash_memory(start, end); ++ ret = fadump_add_crash_memory(start, end); ++ ++ return ret; + } + + static int fadump_init_elfcore_header(char *bufp) +@@ -914,10 +966,11 @@ static int fadump_init_elfcore_header(char *bufp) + * Traverse through memblock structure and setup crash memory ranges. These + * ranges will be used create PT_LOAD program headers in elfcore header. + */ +-static void fadump_setup_crash_memory_ranges(void) ++static int fadump_setup_crash_memory_ranges(void) + { + struct memblock_region *reg; + unsigned long long start, end; ++ int ret; + + pr_debug("Setup crash memory ranges.\n"); + crash_mem_ranges = 0; +@@ -928,7 +981,9 @@ static void fadump_setup_crash_memory_ranges(void) + * specified during fadump registration. We need to create a separate + * program header for this chunk with the correct offset. + */ +- fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size); ++ ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size); ++ if (ret) ++ return ret; + + for_each_memblock(memory, reg) { + start = (unsigned long long)reg->base; +@@ -948,8 +1003,12 @@ static void fadump_setup_crash_memory_ranges(void) + } + + /* add this range excluding the reserved dump area. */ +- fadump_exclude_reserved_area(start, end); ++ ret = fadump_exclude_reserved_area(start, end); ++ if (ret) ++ return ret; + } ++ ++ return 0; + } + + /* +@@ -1072,6 +1131,7 @@ static int register_fadump(void) + { + unsigned long addr; + void *vaddr; ++ int ret; + + /* + * If no memory is reserved then we can not register for firmware- +@@ -1080,7 +1140,9 @@ static int register_fadump(void) + if (!fw_dump.reserve_dump_area_size) + return -ENODEV; + +- fadump_setup_crash_memory_ranges(); ++ ret = fadump_setup_crash_memory_ranges(); ++ if (ret) ++ return ret; + + addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len); + /* Initialize fadump crash info header. */ +@@ -1158,6 +1220,7 @@ void fadump_cleanup(void) + } else if (fw_dump.dump_registered) { + /* Un-register Firmware-assisted dump if it was registered. */ + fadump_unregister_dump(&fdm); ++ free_crash_memory_ranges(); + } + } + +diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c +index 816055927ee4..d735937d975c 100644 +--- a/arch/powerpc/mm/mmu_context_iommu.c ++++ b/arch/powerpc/mm/mmu_context_iommu.c +@@ -130,6 +130,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, + long i, j, ret = 0, locked_entries = 0; + unsigned int pageshift; + unsigned long flags; ++ unsigned long cur_ua; + struct page *page = NULL; + + mutex_lock(&mem_list_mutex); +@@ -178,7 +179,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, + } + + for (i = 0; i < entries; ++i) { +- if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT), ++ cur_ua = ua + (i << PAGE_SHIFT); ++ if (1 != get_user_pages_fast(cur_ua, + 1/* pages */, 1/* iswrite */, &page)) { + ret = -EFAULT; + for (j = 0; j < i; ++j) +@@ -197,7 +199,7 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, + if (is_migrate_cma_page(page)) { + if (mm_iommu_move_page_from_cma(page)) + goto populate; +- if (1 != get_user_pages_fast(ua + (i << PAGE_SHIFT), ++ if (1 != get_user_pages_fast(cur_ua, + 1/* pages */, 1/* iswrite */, + &page)) { + ret = -EFAULT; +@@ -211,20 +213,21 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, + } + populate: + pageshift = PAGE_SHIFT; +- if (PageCompound(page)) { ++ if (mem->pageshift > PAGE_SHIFT && PageCompound(page)) { + pte_t *pte; + struct page *head = compound_head(page); + unsigned int compshift = compound_order(head); ++ unsigned int pteshift; + + local_irq_save(flags); /* disables as well */ +- pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift); +- local_irq_restore(flags); ++ pte = find_linux_pte(mm->pgd, cur_ua, NULL, &pteshift); + + /* Double check it is still the same pinned page */ + if (pte && pte_page(*pte) == head && +- pageshift == compshift) +- pageshift = max_t(unsigned int, pageshift, ++ pteshift == compshift + PAGE_SHIFT) ++ pageshift = max_t(unsigned int, pteshift, + PAGE_SHIFT); ++ local_irq_restore(flags); + } + mem->pageshift = min(mem->pageshift, pageshift); + mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; +diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c +index 677b29ef4532..e919696c7137 100644 +--- a/arch/powerpc/platforms/powernv/pci-ioda.c ++++ b/arch/powerpc/platforms/powernv/pci-ioda.c +@@ -3286,12 +3286,49 @@ static void pnv_pci_ioda_create_dbgfs(void) + #endif /* CONFIG_DEBUG_FS */ + } + ++static void pnv_pci_enable_bridge(struct pci_bus *bus) ++{ ++ struct pci_dev *dev = bus->self; ++ struct pci_bus *child; ++ ++ /* Empty bus ? bail */ ++ if (list_empty(&bus->devices)) ++ return; ++ ++ /* ++ * If there's a bridge associated with that bus enable it. This works ++ * around races in the generic code if the enabling is done during ++ * parallel probing. This can be removed once those races have been ++ * fixed. ++ */ ++ if (dev) { ++ int rc = pci_enable_device(dev); ++ if (rc) ++ pci_err(dev, "Error enabling bridge (%d)\n", rc); ++ pci_set_master(dev); ++ } ++ ++ /* Perform the same to child busses */ ++ list_for_each_entry(child, &bus->children, node) ++ pnv_pci_enable_bridge(child); ++} ++ ++static void pnv_pci_enable_bridges(void) ++{ ++ struct pci_controller *hose; ++ ++ list_for_each_entry(hose, &hose_list, list_node) ++ pnv_pci_enable_bridge(hose->bus); ++} ++ + static void pnv_pci_ioda_fixup(void) + { + pnv_pci_ioda_setup_PEs(); + pnv_pci_ioda_setup_iommu_api(); + pnv_pci_ioda_create_dbgfs(); + ++ pnv_pci_enable_bridges(); ++ + #ifdef CONFIG_EEH + eeh_init(); + eeh_addr_cache_build(); +diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c +index 5e1ef9150182..2edc673be137 100644 +--- a/arch/powerpc/platforms/pseries/ras.c ++++ b/arch/powerpc/platforms/pseries/ras.c +@@ -360,7 +360,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) + } + + savep = __va(regs->gpr[3]); +- regs->gpr[3] = savep[0]; /* restore original r3 */ ++ regs->gpr[3] = be64_to_cpu(savep[0]); /* restore original r3 */ + + /* If it isn't an extended log we can use the per cpu 64bit buffer */ + h = (struct rtas_error_log *)&savep[1]; +diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c +index 990703b7cf4d..4b7719b2a73c 100644 +--- a/arch/sparc/kernel/sys_sparc_32.c ++++ b/arch/sparc/kernel/sys_sparc_32.c +@@ -204,23 +204,27 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, + + asmlinkage long sys_getdomainname(char __user *name, int len) + { +- int nlen, err; +- ++ int nlen, err; ++ char tmp[__NEW_UTS_LEN + 1]; ++ + if (len < 0) + return -EINVAL; + +- down_read(&uts_sem); +- ++ down_read(&uts_sem); ++ + nlen = strlen(utsname()->domainname) + 1; + err = -EINVAL; + if (nlen > len) +- goto out; ++ goto out_unlock; ++ memcpy(tmp, utsname()->domainname, nlen); + +- err = -EFAULT; +- if (!copy_to_user(name, utsname()->domainname, nlen)) +- err = 0; ++ up_read(&uts_sem); + +-out: ++ if (copy_to_user(name, tmp, nlen)) ++ return -EFAULT; ++ return 0; ++ ++out_unlock: + up_read(&uts_sem); + return err; + } +diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c +index 55416db482ad..d79c1c74873c 100644 +--- a/arch/sparc/kernel/sys_sparc_64.c ++++ b/arch/sparc/kernel/sys_sparc_64.c +@@ -527,23 +527,27 @@ extern void check_pending(int signum); + + SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len) + { +- int nlen, err; ++ int nlen, err; ++ char tmp[__NEW_UTS_LEN + 1]; + + if (len < 0) + return -EINVAL; + +- down_read(&uts_sem); +- ++ down_read(&uts_sem); ++ + nlen = strlen(utsname()->domainname) + 1; + err = -EINVAL; + if (nlen > len) +- goto out; ++ goto out_unlock; ++ memcpy(tmp, utsname()->domainname, nlen); ++ ++ up_read(&uts_sem); + +- err = -EFAULT; +- if (!copy_to_user(name, utsname()->domainname, nlen)) +- err = 0; ++ if (copy_to_user(name, tmp, nlen)) ++ return -EFAULT; ++ return 0; + +-out: ++out_unlock: + up_read(&uts_sem); + return err; + } +diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c +index f24cd9f1799a..928b0c6083c9 100644 +--- a/arch/x86/kernel/kexec-bzimage64.c ++++ b/arch/x86/kernel/kexec-bzimage64.c +@@ -532,7 +532,7 @@ static int bzImage64_cleanup(void *loader_data) + static int bzImage64_verify_sig(const char *kernel, unsigned long kernel_len) + { + return verify_pefile_signature(kernel, kernel_len, +- NULL, ++ VERIFY_USE_SECONDARY_KEYRING, + VERIFYING_KEXEC_PE_SIGNATURE); + } + #endif +diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c +index 8958b35f6008..a466ee14ad41 100644 +--- a/arch/x86/kvm/vmx.c ++++ b/arch/x86/kvm/vmx.c +@@ -200,12 +200,14 @@ static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_ + + static const struct { + const char *option; +- enum vmx_l1d_flush_state cmd; ++ bool for_parse; + } vmentry_l1d_param[] = { +- {"auto", VMENTER_L1D_FLUSH_AUTO}, +- {"never", VMENTER_L1D_FLUSH_NEVER}, +- {"cond", VMENTER_L1D_FLUSH_COND}, +- {"always", VMENTER_L1D_FLUSH_ALWAYS}, ++ [VMENTER_L1D_FLUSH_AUTO] = {"auto", true}, ++ [VMENTER_L1D_FLUSH_NEVER] = {"never", true}, ++ [VMENTER_L1D_FLUSH_COND] = {"cond", true}, ++ [VMENTER_L1D_FLUSH_ALWAYS] = {"always", true}, ++ [VMENTER_L1D_FLUSH_EPT_DISABLED] = {"EPT disabled", false}, ++ [VMENTER_L1D_FLUSH_NOT_REQUIRED] = {"not required", false}, + }; + + #define L1D_CACHE_ORDER 4 +@@ -289,8 +291,9 @@ static int vmentry_l1d_flush_parse(const char *s) + + if (s) { + for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) { +- if (sysfs_streq(s, vmentry_l1d_param[i].option)) +- return vmentry_l1d_param[i].cmd; ++ if (vmentry_l1d_param[i].for_parse && ++ sysfs_streq(s, vmentry_l1d_param[i].option)) ++ return i; + } + } + return -EINVAL; +@@ -300,13 +303,13 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) + { + int l1tf, ret; + +- if (!boot_cpu_has(X86_BUG_L1TF)) +- return 0; +- + l1tf = vmentry_l1d_flush_parse(s); + if (l1tf < 0) + return l1tf; + ++ if (!boot_cpu_has(X86_BUG_L1TF)) ++ return 0; ++ + /* + * Has vmx_init() run already? If not then this is the pre init + * parameter parsing. In that case just store the value and let +@@ -326,6 +329,9 @@ static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) + + static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) + { ++ if (WARN_ON_ONCE(l1tf_vmx_mitigation >= ARRAY_SIZE(vmentry_l1d_param))) ++ return sprintf(s, "???\n"); ++ + return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option); + } + +diff --git a/arch/xtensa/include/asm/cacheasm.h b/arch/xtensa/include/asm/cacheasm.h +index 2041abb10a23..34545ecfdd6b 100644 +--- a/arch/xtensa/include/asm/cacheasm.h ++++ b/arch/xtensa/include/asm/cacheasm.h +@@ -31,16 +31,32 @@ + * + */ + +- .macro __loop_cache_all ar at insn size line_width + +- movi \ar, 0 ++ .macro __loop_cache_unroll ar at insn size line_width max_immed ++ ++ .if (1 << (\line_width)) > (\max_immed) ++ .set _reps, 1 ++ .elseif (2 << (\line_width)) > (\max_immed) ++ .set _reps, 2 ++ .else ++ .set _reps, 4 ++ .endif ++ ++ __loopi \ar, \at, \size, (_reps << (\line_width)) ++ .set _index, 0 ++ .rep _reps ++ \insn \ar, _index << (\line_width) ++ .set _index, _index + 1 ++ .endr ++ __endla \ar, \at, _reps << (\line_width) ++ ++ .endm ++ + +- __loopi \ar, \at, \size, (4 << (\line_width)) +- \insn \ar, 0 << (\line_width) +- \insn \ar, 1 << (\line_width) +- \insn \ar, 2 << (\line_width) +- \insn \ar, 3 << (\line_width) +- __endla \ar, \at, 4 << (\line_width) ++ .macro __loop_cache_all ar at insn size line_width max_immed ++ ++ movi \ar, 0 ++ __loop_cache_unroll \ar, \at, \insn, \size, \line_width, \max_immed + + .endm + +@@ -57,14 +73,9 @@ + .endm + + +- .macro __loop_cache_page ar at insn line_width ++ .macro __loop_cache_page ar at insn line_width max_immed + +- __loopi \ar, \at, PAGE_SIZE, 4 << (\line_width) +- \insn \ar, 0 << (\line_width) +- \insn \ar, 1 << (\line_width) +- \insn \ar, 2 << (\line_width) +- \insn \ar, 3 << (\line_width) +- __endla \ar, \at, 4 << (\line_width) ++ __loop_cache_unroll \ar, \at, \insn, PAGE_SIZE, \line_width, \max_immed + + .endm + +@@ -72,7 +83,8 @@ + .macro ___unlock_dcache_all ar at + + #if XCHAL_DCACHE_LINE_LOCKABLE && XCHAL_DCACHE_SIZE +- __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH ++ __loop_cache_all \ar \at diu XCHAL_DCACHE_SIZE \ ++ XCHAL_DCACHE_LINEWIDTH 240 + #endif + + .endm +@@ -81,7 +93,8 @@ + .macro ___unlock_icache_all ar at + + #if XCHAL_ICACHE_LINE_LOCKABLE && XCHAL_ICACHE_SIZE +- __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE XCHAL_ICACHE_LINEWIDTH ++ __loop_cache_all \ar \at iiu XCHAL_ICACHE_SIZE \ ++ XCHAL_ICACHE_LINEWIDTH 240 + #endif + + .endm +@@ -90,7 +103,8 @@ + .macro ___flush_invalidate_dcache_all ar at + + #if XCHAL_DCACHE_SIZE +- __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH ++ __loop_cache_all \ar \at diwbi XCHAL_DCACHE_SIZE \ ++ XCHAL_DCACHE_LINEWIDTH 240 + #endif + + .endm +@@ -99,7 +113,8 @@ + .macro ___flush_dcache_all ar at + + #if XCHAL_DCACHE_SIZE +- __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE XCHAL_DCACHE_LINEWIDTH ++ __loop_cache_all \ar \at diwb XCHAL_DCACHE_SIZE \ ++ XCHAL_DCACHE_LINEWIDTH 240 + #endif + + .endm +@@ -108,8 +123,8 @@ + .macro ___invalidate_dcache_all ar at + + #if XCHAL_DCACHE_SIZE +- __loop_cache_all \ar \at dii __stringify(DCACHE_WAY_SIZE) \ +- XCHAL_DCACHE_LINEWIDTH ++ __loop_cache_all \ar \at dii XCHAL_DCACHE_SIZE \ ++ XCHAL_DCACHE_LINEWIDTH 1020 + #endif + + .endm +@@ -118,8 +133,8 @@ + .macro ___invalidate_icache_all ar at + + #if XCHAL_ICACHE_SIZE +- __loop_cache_all \ar \at iii __stringify(ICACHE_WAY_SIZE) \ +- XCHAL_ICACHE_LINEWIDTH ++ __loop_cache_all \ar \at iii XCHAL_ICACHE_SIZE \ ++ XCHAL_ICACHE_LINEWIDTH 1020 + #endif + + .endm +@@ -166,7 +181,7 @@ + .macro ___flush_invalidate_dcache_page ar as + + #if XCHAL_DCACHE_SIZE +- __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH ++ __loop_cache_page \ar \as dhwbi XCHAL_DCACHE_LINEWIDTH 1020 + #endif + + .endm +@@ -175,7 +190,7 @@ + .macro ___flush_dcache_page ar as + + #if XCHAL_DCACHE_SIZE +- __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH ++ __loop_cache_page \ar \as dhwb XCHAL_DCACHE_LINEWIDTH 1020 + #endif + + .endm +@@ -184,7 +199,7 @@ + .macro ___invalidate_dcache_page ar as + + #if XCHAL_DCACHE_SIZE +- __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH ++ __loop_cache_page \ar \as dhi XCHAL_DCACHE_LINEWIDTH 1020 + #endif + + .endm +@@ -193,7 +208,7 @@ + .macro ___invalidate_icache_page ar as + + #if XCHAL_ICACHE_SIZE +- __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH ++ __loop_cache_page \ar \as ihi XCHAL_ICACHE_LINEWIDTH 1020 + #endif + + .endm +diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c +index 5d53e504acae..4b571f3ea009 100644 +--- a/block/bfq-cgroup.c ++++ b/block/bfq-cgroup.c +@@ -887,7 +887,8 @@ static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, + if (ret) + return ret; + +- return bfq_io_set_weight_legacy(of_css(of), NULL, weight); ++ ret = bfq_io_set_weight_legacy(of_css(of), NULL, weight); ++ return ret ?: nbytes; + } + + static int bfqg_print_stat(struct seq_file *sf, void *v) +diff --git a/block/blk-core.c b/block/blk-core.c +index 68bae6338ad4..1d27e2a152e0 100644 +--- a/block/blk-core.c ++++ b/block/blk-core.c +@@ -1025,6 +1025,7 @@ out_exit_flush_rq: + q->exit_rq_fn(q, q->fq->flush_rq); + out_free_flush_queue: + blk_free_flush_queue(q->fq); ++ q->fq = NULL; + return -ENOMEM; + } + EXPORT_SYMBOL(blk_init_allocated_queue); +@@ -3458,9 +3459,11 @@ EXPORT_SYMBOL(blk_finish_plug); + */ + void blk_pm_runtime_init(struct request_queue *q, struct device *dev) + { +- /* not support for RQF_PM and ->rpm_status in blk-mq yet */ +- if (q->mq_ops) ++ /* Don't enable runtime PM for blk-mq until it is ready */ ++ if (q->mq_ops) { ++ pm_runtime_disable(dev); + return; ++ } + + q->dev = dev; + q->rpm_status = RPM_ACTIVE; +diff --git a/certs/system_keyring.c b/certs/system_keyring.c +index 6251d1b27f0c..81728717523d 100644 +--- a/certs/system_keyring.c ++++ b/certs/system_keyring.c +@@ -15,6 +15,7 @@ + #include <linux/cred.h> + #include <linux/err.h> + #include <linux/slab.h> ++#include <linux/verification.h> + #include <keys/asymmetric-type.h> + #include <keys/system_keyring.h> + #include <crypto/pkcs7.h> +@@ -230,7 +231,7 @@ int verify_pkcs7_signature(const void *data, size_t len, + + if (!trusted_keys) { + trusted_keys = builtin_trusted_keys; +- } else if (trusted_keys == (void *)1UL) { ++ } else if (trusted_keys == VERIFY_USE_SECONDARY_KEYRING) { + #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING + trusted_keys = secondary_trusted_keys; + #else +diff --git a/crypto/asymmetric_keys/pkcs7_key_type.c b/crypto/asymmetric_keys/pkcs7_key_type.c +index 1063b644efcd..b2aa925a84bc 100644 +--- a/crypto/asymmetric_keys/pkcs7_key_type.c ++++ b/crypto/asymmetric_keys/pkcs7_key_type.c +@@ -62,7 +62,7 @@ static int pkcs7_preparse(struct key_preparsed_payload *prep) + + return verify_pkcs7_signature(NULL, 0, + prep->data, prep->datalen, +- (void *)1UL, usage, ++ VERIFY_USE_SECONDARY_KEYRING, usage, + pkcs7_view_content, prep); + } + +diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c +index f149d3e61234..1e2648e4c286 100644 +--- a/drivers/block/zram/zram_drv.c ++++ b/drivers/block/zram/zram_drv.c +@@ -321,6 +321,7 @@ static ssize_t backing_dev_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) + { + char *file_name; ++ size_t sz; + struct file *backing_dev = NULL; + struct inode *inode; + struct address_space *mapping; +@@ -341,7 +342,11 @@ static ssize_t backing_dev_store(struct device *dev, + goto out; + } + +- strlcpy(file_name, buf, len); ++ strlcpy(file_name, buf, PATH_MAX); ++ /* ignore trailing newline */ ++ sz = strlen(file_name); ++ if (sz > 0 && file_name[sz - 1] == '\n') ++ file_name[sz - 1] = 0x00; + + backing_dev = filp_open(file_name, O_RDWR|O_LARGEFILE, 0); + if (IS_ERR(backing_dev)) { +diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c +index 43e14bb512c8..6a16d22bc604 100644 +--- a/drivers/cpufreq/cpufreq_governor.c ++++ b/drivers/cpufreq/cpufreq_governor.c +@@ -555,12 +555,20 @@ EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_stop); + + void cpufreq_dbs_governor_limits(struct cpufreq_policy *policy) + { +- struct policy_dbs_info *policy_dbs = policy->governor_data; ++ struct policy_dbs_info *policy_dbs; ++ ++ /* Protect gov->gdbs_data against cpufreq_dbs_governor_exit() */ ++ mutex_lock(&gov_dbs_data_mutex); ++ policy_dbs = policy->governor_data; ++ if (!policy_dbs) ++ goto out; + + mutex_lock(&policy_dbs->update_mutex); + cpufreq_policy_apply_limits(policy); + gov_update_sample_delay(policy_dbs, 0); +- + mutex_unlock(&policy_dbs->update_mutex); ++ ++out: ++ mutex_unlock(&gov_dbs_data_mutex); + } + EXPORT_SYMBOL_GPL(cpufreq_dbs_governor_limits); +diff --git a/drivers/crypto/caam/caamalg_qi.c b/drivers/crypto/caam/caamalg_qi.c +index e7966e37a5aa..ecc6d755d3c1 100644 +--- a/drivers/crypto/caam/caamalg_qi.c ++++ b/drivers/crypto/caam/caamalg_qi.c +@@ -350,10 +350,8 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, + int ret = 0; + + if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { +- crypto_ablkcipher_set_flags(ablkcipher, +- CRYPTO_TFM_RES_BAD_KEY_LEN); + dev_err(jrdev, "key size mismatch\n"); +- return -EINVAL; ++ goto badkey; + } + + memcpy(ctx->key, key, keylen); +@@ -388,7 +386,7 @@ static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, + return ret; + badkey: + crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); +- return 0; ++ return -EINVAL; + } + + /* +diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c +index 7ff4a25440ac..6f3f81bb880b 100644 +--- a/drivers/crypto/caam/caampkc.c ++++ b/drivers/crypto/caam/caampkc.c +@@ -71,8 +71,8 @@ static void rsa_priv_f2_unmap(struct device *dev, struct rsa_edesc *edesc, + dma_unmap_single(dev, pdb->d_dma, key->d_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->p_dma, p_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); +- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); +- dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); ++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); ++ dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); + } + + static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, +@@ -90,8 +90,8 @@ static void rsa_priv_f3_unmap(struct device *dev, struct rsa_edesc *edesc, + dma_unmap_single(dev, pdb->dp_dma, p_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->dq_dma, q_sz, DMA_TO_DEVICE); + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); +- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); +- dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_TO_DEVICE); ++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); ++ dma_unmap_single(dev, pdb->tmp2_dma, q_sz, DMA_BIDIRECTIONAL); + } + + /* RSA Job Completion handler */ +@@ -417,13 +417,13 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, + goto unmap_p; + } + +- pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); ++ pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, pdb->tmp1_dma)) { + dev_err(dev, "Unable to map RSA tmp1 memory\n"); + goto unmap_q; + } + +- pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); ++ pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, pdb->tmp2_dma)) { + dev_err(dev, "Unable to map RSA tmp2 memory\n"); + goto unmap_tmp1; +@@ -451,7 +451,7 @@ static int set_rsa_priv_f2_pdb(struct akcipher_request *req, + return 0; + + unmap_tmp1: +- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); ++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); + unmap_q: + dma_unmap_single(dev, pdb->q_dma, q_sz, DMA_TO_DEVICE); + unmap_p: +@@ -504,13 +504,13 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, + goto unmap_dq; + } + +- pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_TO_DEVICE); ++ pdb->tmp1_dma = dma_map_single(dev, key->tmp1, p_sz, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, pdb->tmp1_dma)) { + dev_err(dev, "Unable to map RSA tmp1 memory\n"); + goto unmap_qinv; + } + +- pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_TO_DEVICE); ++ pdb->tmp2_dma = dma_map_single(dev, key->tmp2, q_sz, DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev, pdb->tmp2_dma)) { + dev_err(dev, "Unable to map RSA tmp2 memory\n"); + goto unmap_tmp1; +@@ -538,7 +538,7 @@ static int set_rsa_priv_f3_pdb(struct akcipher_request *req, + return 0; + + unmap_tmp1: +- dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_TO_DEVICE); ++ dma_unmap_single(dev, pdb->tmp1_dma, p_sz, DMA_BIDIRECTIONAL); + unmap_qinv: + dma_unmap_single(dev, pdb->c_dma, p_sz, DMA_TO_DEVICE); + unmap_dq: +diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c +index d258953ff488..7fa1be184553 100644 +--- a/drivers/crypto/caam/jr.c ++++ b/drivers/crypto/caam/jr.c +@@ -190,7 +190,8 @@ static void caam_jr_dequeue(unsigned long devarg) + BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); + + /* Unmap just-run descriptor so we can post-process */ +- dma_unmap_single(dev, jrp->outring[hw_idx].desc, ++ dma_unmap_single(dev, ++ caam_dma_to_cpu(jrp->outring[hw_idx].desc), + jrp->entinfo[sw_idx].desc_size, + DMA_TO_DEVICE); + +diff --git a/drivers/crypto/vmx/aes_cbc.c b/drivers/crypto/vmx/aes_cbc.c +index 5285ece4f33a..b71895871be3 100644 +--- a/drivers/crypto/vmx/aes_cbc.c ++++ b/drivers/crypto/vmx/aes_cbc.c +@@ -107,24 +107,23 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, + ret = crypto_skcipher_encrypt(req); + skcipher_request_zero(req); + } else { +- preempt_disable(); +- pagefault_disable(); +- enable_kernel_vsx(); +- + blkcipher_walk_init(&walk, dst, src, nbytes); + ret = blkcipher_walk_virt(desc, &walk); + while ((nbytes = walk.nbytes)) { ++ preempt_disable(); ++ pagefault_disable(); ++ enable_kernel_vsx(); + aes_p8_cbc_encrypt(walk.src.virt.addr, + walk.dst.virt.addr, + nbytes & AES_BLOCK_MASK, + &ctx->enc_key, walk.iv, 1); ++ disable_kernel_vsx(); ++ pagefault_enable(); ++ preempt_enable(); ++ + nbytes &= AES_BLOCK_SIZE - 1; + ret = blkcipher_walk_done(desc, &walk, nbytes); + } +- +- disable_kernel_vsx(); +- pagefault_enable(); +- preempt_enable(); + } + + return ret; +@@ -147,24 +146,23 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, + ret = crypto_skcipher_decrypt(req); + skcipher_request_zero(req); + } else { +- preempt_disable(); +- pagefault_disable(); +- enable_kernel_vsx(); +- + blkcipher_walk_init(&walk, dst, src, nbytes); + ret = blkcipher_walk_virt(desc, &walk); + while ((nbytes = walk.nbytes)) { ++ preempt_disable(); ++ pagefault_disable(); ++ enable_kernel_vsx(); + aes_p8_cbc_encrypt(walk.src.virt.addr, + walk.dst.virt.addr, + nbytes & AES_BLOCK_MASK, + &ctx->dec_key, walk.iv, 0); ++ disable_kernel_vsx(); ++ pagefault_enable(); ++ preempt_enable(); ++ + nbytes &= AES_BLOCK_SIZE - 1; + ret = blkcipher_walk_done(desc, &walk, nbytes); + } +- +- disable_kernel_vsx(); +- pagefault_enable(); +- preempt_enable(); + } + + return ret; +diff --git a/drivers/crypto/vmx/aes_xts.c b/drivers/crypto/vmx/aes_xts.c +index 8bd9aff0f55f..e9954a7d4694 100644 +--- a/drivers/crypto/vmx/aes_xts.c ++++ b/drivers/crypto/vmx/aes_xts.c +@@ -116,32 +116,39 @@ static int p8_aes_xts_crypt(struct blkcipher_desc *desc, + ret = enc? crypto_skcipher_encrypt(req) : crypto_skcipher_decrypt(req); + skcipher_request_zero(req); + } else { ++ blkcipher_walk_init(&walk, dst, src, nbytes); ++ ++ ret = blkcipher_walk_virt(desc, &walk); ++ + preempt_disable(); + pagefault_disable(); + enable_kernel_vsx(); + +- blkcipher_walk_init(&walk, dst, src, nbytes); +- +- ret = blkcipher_walk_virt(desc, &walk); + iv = walk.iv; + memset(tweak, 0, AES_BLOCK_SIZE); + aes_p8_encrypt(iv, tweak, &ctx->tweak_key); + ++ disable_kernel_vsx(); ++ pagefault_enable(); ++ preempt_enable(); ++ + while ((nbytes = walk.nbytes)) { ++ preempt_disable(); ++ pagefault_disable(); ++ enable_kernel_vsx(); + if (enc) + aes_p8_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, + nbytes & AES_BLOCK_MASK, &ctx->enc_key, NULL, tweak); + else + aes_p8_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, + nbytes & AES_BLOCK_MASK, &ctx->dec_key, NULL, tweak); ++ disable_kernel_vsx(); ++ pagefault_enable(); ++ preempt_enable(); + + nbytes &= AES_BLOCK_SIZE - 1; + ret = blkcipher_walk_done(desc, &walk, nbytes); + } +- +- disable_kernel_vsx(); +- pagefault_enable(); +- preempt_enable(); + } + return ret; + } +diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c +index 35e9fb885486..95e96f04bf6f 100644 +--- a/drivers/extcon/extcon.c ++++ b/drivers/extcon/extcon.c +@@ -433,8 +433,8 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id) + return index; + + spin_lock_irqsave(&edev->lock, flags); +- + state = !!(edev->state & BIT(index)); ++ spin_unlock_irqrestore(&edev->lock, flags); + + /* + * Call functions in a raw notifier chain for the specific one +@@ -448,6 +448,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id) + */ + raw_notifier_call_chain(&edev->nh_all, state, edev); + ++ spin_lock_irqsave(&edev->lock, flags); + /* This could be in interrupt handler */ + prop_buf = (char *)get_zeroed_page(GFP_ATOMIC); + if (!prop_buf) { +diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c +index 709efe2357ea..05ae8c4a8a1b 100644 +--- a/drivers/gpu/drm/i915/i915_gem_userptr.c ++++ b/drivers/gpu/drm/i915/i915_gem_userptr.c +@@ -782,6 +782,9 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file + I915_USERPTR_UNSYNCHRONIZED)) + return -EINVAL; + ++ if (!args->user_size) ++ return -EINVAL; ++ + if (offset_in_page(args->user_ptr | args->user_size)) + return -EINVAL; + +diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c +index 05964347008d..d96b09fea835 100644 +--- a/drivers/hv/channel.c ++++ b/drivers/hv/channel.c +@@ -541,11 +541,8 @@ static void reset_channel_cb(void *arg) + channel->onchannel_callback = NULL; + } + +-static int vmbus_close_internal(struct vmbus_channel *channel) ++void vmbus_reset_channel_cb(struct vmbus_channel *channel) + { +- struct vmbus_channel_close_channel *msg; +- int ret; +- + /* + * vmbus_on_event(), running in the per-channel tasklet, can race + * with vmbus_close_internal() in the case of SMP guest, e.g., when +@@ -555,6 +552,29 @@ static int vmbus_close_internal(struct vmbus_channel *channel) + */ + tasklet_disable(&channel->callback_event); + ++ channel->sc_creation_callback = NULL; ++ ++ /* Stop the callback asap */ ++ if (channel->target_cpu != get_cpu()) { ++ put_cpu(); ++ smp_call_function_single(channel->target_cpu, reset_channel_cb, ++ channel, true); ++ } else { ++ reset_channel_cb(channel); ++ put_cpu(); ++ } ++ ++ /* Re-enable tasklet for use on re-open */ ++ tasklet_enable(&channel->callback_event); ++} ++ ++static int vmbus_close_internal(struct vmbus_channel *channel) ++{ ++ struct vmbus_channel_close_channel *msg; ++ int ret; ++ ++ vmbus_reset_channel_cb(channel); ++ + /* + * In case a device driver's probe() fails (e.g., + * util_probe() -> vmbus_open() returns -ENOMEM) and the device is +@@ -568,16 +588,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel) + } + + channel->state = CHANNEL_OPEN_STATE; +- channel->sc_creation_callback = NULL; +- /* Stop callback and cancel the timer asap */ +- if (channel->target_cpu != get_cpu()) { +- put_cpu(); +- smp_call_function_single(channel->target_cpu, reset_channel_cb, +- channel, true); +- } else { +- reset_channel_cb(channel); +- put_cpu(); +- } + + /* Send a closing message */ + +@@ -620,8 +630,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel) + get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); + + out: +- /* re-enable tasklet for use on re-open */ +- tasklet_enable(&channel->callback_event); + return ret; + } + +diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c +index 1939c0ca3741..1700b4e7758d 100644 +--- a/drivers/hv/channel_mgmt.c ++++ b/drivers/hv/channel_mgmt.c +@@ -881,6 +881,12 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) + return; + } + ++ /* ++ * Before setting channel->rescind in vmbus_rescind_cleanup(), we ++ * should make sure the channel callback is not running any more. ++ */ ++ vmbus_reset_channel_cb(channel); ++ + /* + * Now wait for offer handling to complete. + */ +diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c +index 565f7d8d3304..f2761b385541 100644 +--- a/drivers/iio/accel/sca3000.c ++++ b/drivers/iio/accel/sca3000.c +@@ -797,6 +797,7 @@ static int sca3000_write_raw(struct iio_dev *indio_dev, + mutex_lock(&st->lock); + ret = sca3000_write_3db_freq(st, val); + mutex_unlock(&st->lock); ++ return ret; + default: + return -EINVAL; + } +diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c +index 99eba524f6dd..1642b55f70da 100644 +--- a/drivers/iio/frequency/ad9523.c ++++ b/drivers/iio/frequency/ad9523.c +@@ -508,7 +508,7 @@ static ssize_t ad9523_store(struct device *dev, + return ret; + + if (!state) +- return 0; ++ return len; + + mutex_lock(&indio_dev->mlock); + switch ((u32)this_attr->address) { +@@ -642,7 +642,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev, + code = (AD9523_CLK_DIST_DIV_PHASE_REV(ret) * 3141592) / + AD9523_CLK_DIST_DIV_REV(ret); + *val = code / 1000000; +- *val2 = (code % 1000000) * 10; ++ *val2 = code % 1000000; + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; +diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c +index 9eb12c2e3c74..83cfe44f070e 100644 +--- a/drivers/infiniband/sw/rxe/rxe_comp.c ++++ b/drivers/infiniband/sw/rxe/rxe_comp.c +@@ -276,6 +276,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp, + case IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE: + if (wqe->wr.opcode != IB_WR_RDMA_READ && + wqe->wr.opcode != IB_WR_RDMA_READ_WITH_INV) { ++ wqe->status = IB_WC_FATAL_ERR; + return COMPST_ERROR; + } + reset_retry_counters(qp); +diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c +index 97c2225829ea..60105ba77889 100644 +--- a/drivers/infiniband/ulp/srpt/ib_srpt.c ++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c +@@ -1713,8 +1713,7 @@ static bool srpt_close_ch(struct srpt_rdma_ch *ch) + int ret; + + if (!srpt_set_ch_state(ch, CH_DRAINING)) { +- pr_debug("%s-%d: already closed\n", ch->sess_name, +- ch->qp->qp_num); ++ pr_debug("%s: already closed\n", ch->sess_name); + return false; + } + +diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c +index e3dbb6101b4a..c0d1c4db5794 100644 +--- a/drivers/iommu/dmar.c ++++ b/drivers/iommu/dmar.c +@@ -1336,8 +1336,8 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, + qi_submit_sync(&desc, iommu); + } + +-void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, +- u64 addr, unsigned mask) ++void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, ++ u16 qdep, u64 addr, unsigned mask) + { + struct qi_desc desc; + +@@ -1352,7 +1352,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, + qdep = 0; + + desc.low = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) | +- QI_DIOTLB_TYPE; ++ QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid); + + qi_submit_sync(&desc, iommu); + } +diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c +index e8414bcf8390..aaf3fed97477 100644 +--- a/drivers/iommu/intel-iommu.c ++++ b/drivers/iommu/intel-iommu.c +@@ -422,6 +422,7 @@ struct device_domain_info { + struct list_head global; /* link to global list */ + u8 bus; /* PCI bus number */ + u8 devfn; /* PCI devfn number */ ++ u16 pfsid; /* SRIOV physical function source ID */ + u8 pasid_supported:3; + u8 pasid_enabled:1; + u8 pri_supported:1; +@@ -1502,6 +1503,20 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info) + return; + + pdev = to_pci_dev(info->dev); ++ /* For IOMMU that supports device IOTLB throttling (DIT), we assign ++ * PFSID to the invalidation desc of a VF such that IOMMU HW can gauge ++ * queue depth at PF level. If DIT is not set, PFSID will be treated as ++ * reserved, which should be set to 0. ++ */ ++ if (!ecap_dit(info->iommu->ecap)) ++ info->pfsid = 0; ++ else { ++ struct pci_dev *pf_pdev; ++ ++ /* pdev will be returned if device is not a vf */ ++ pf_pdev = pci_physfn(pdev); ++ info->pfsid = PCI_DEVID(pf_pdev->bus->number, pf_pdev->devfn); ++ } + + #ifdef CONFIG_INTEL_IOMMU_SVM + /* The PCIe spec, in its wisdom, declares that the behaviour of +@@ -1567,7 +1582,8 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain, + + sid = info->bus << 8 | info->devfn; + qdep = info->ats_qdep; +- qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask); ++ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid, ++ qdep, addr, mask); + } + spin_unlock_irqrestore(&device_domain_lock, flags); + } +diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c +index a7040163dd43..b8b2b3533f46 100644 +--- a/drivers/mailbox/mailbox-xgene-slimpro.c ++++ b/drivers/mailbox/mailbox-xgene-slimpro.c +@@ -195,9 +195,9 @@ static int slimpro_mbox_probe(struct platform_device *pdev) + platform_set_drvdata(pdev, ctx); + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); +- mb_base = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); +- if (!mb_base) +- return -ENOMEM; ++ mb_base = devm_ioremap_resource(&pdev->dev, regs); ++ if (IS_ERR(mb_base)) ++ return PTR_ERR(mb_base); + + /* Setup mailbox links */ + for (i = 0; i < MBOX_CNT; i++) { +diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c +index 930b00f6a3a2..5adb0c850b6c 100644 +--- a/drivers/md/bcache/writeback.c ++++ b/drivers/md/bcache/writeback.c +@@ -456,8 +456,10 @@ static int bch_writeback_thread(void *arg) + * data on cache. BCACHE_DEV_DETACHING flag is set in + * bch_cached_dev_detach(). + */ +- if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) ++ if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) { ++ up_write(&dc->writeback_lock); + break; ++ } + } + + up_write(&dc->writeback_lock); +diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c +index 4a4e9c75fc4c..0a5a45f3ec5f 100644 +--- a/drivers/md/dm-cache-metadata.c ++++ b/drivers/md/dm-cache-metadata.c +@@ -362,7 +362,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd) + disk_super->version = cpu_to_le32(cmd->version); + memset(disk_super->policy_name, 0, sizeof(disk_super->policy_name)); + memset(disk_super->policy_version, 0, sizeof(disk_super->policy_version)); +- disk_super->policy_hint_size = 0; ++ disk_super->policy_hint_size = cpu_to_le32(0); + + __copy_sm_root(cmd, disk_super); + +@@ -700,6 +700,7 @@ static int __commit_transaction(struct dm_cache_metadata *cmd, + disk_super->policy_version[0] = cpu_to_le32(cmd->policy_version[0]); + disk_super->policy_version[1] = cpu_to_le32(cmd->policy_version[1]); + disk_super->policy_version[2] = cpu_to_le32(cmd->policy_version[2]); ++ disk_super->policy_hint_size = cpu_to_le32(cmd->policy_hint_size); + + disk_super->read_hits = cpu_to_le32(cmd->stats.read_hits); + disk_super->read_misses = cpu_to_le32(cmd->stats.read_misses); +@@ -1321,6 +1322,7 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd, + + dm_oblock_t oblock; + unsigned flags; ++ bool dirty = true; + + dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le); + memcpy(&mapping, mapping_value_le, sizeof(mapping)); +@@ -1331,8 +1333,10 @@ static int __load_mapping_v1(struct dm_cache_metadata *cmd, + dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le); + memcpy(&hint, hint_value_le, sizeof(hint)); + } ++ if (cmd->clean_when_opened) ++ dirty = flags & M_DIRTY; + +- r = fn(context, oblock, to_cblock(cb), flags & M_DIRTY, ++ r = fn(context, oblock, to_cblock(cb), dirty, + le32_to_cpu(hint), hints_valid); + if (r) { + DMERR("policy couldn't load cache block %llu", +@@ -1360,7 +1364,7 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd, + + dm_oblock_t oblock; + unsigned flags; +- bool dirty; ++ bool dirty = true; + + dm_array_cursor_get_value(mapping_cursor, (void **) &mapping_value_le); + memcpy(&mapping, mapping_value_le, sizeof(mapping)); +@@ -1371,8 +1375,9 @@ static int __load_mapping_v2(struct dm_cache_metadata *cmd, + dm_array_cursor_get_value(hint_cursor, (void **) &hint_value_le); + memcpy(&hint, hint_value_le, sizeof(hint)); + } ++ if (cmd->clean_when_opened) ++ dirty = dm_bitset_cursor_get_value(dirty_cursor); + +- dirty = dm_bitset_cursor_get_value(dirty_cursor); + r = fn(context, oblock, to_cblock(cb), dirty, + le32_to_cpu(hint), hints_valid); + if (r) { +diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c +index f575110454b6..c60d29d09687 100644 +--- a/drivers/md/dm-crypt.c ++++ b/drivers/md/dm-crypt.c +@@ -3072,11 +3072,11 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) + */ + limits->max_segment_size = PAGE_SIZE; + +- if (cc->sector_size != (1 << SECTOR_SHIFT)) { +- limits->logical_block_size = cc->sector_size; +- limits->physical_block_size = cc->sector_size; +- blk_limits_io_min(limits, cc->sector_size); +- } ++ limits->logical_block_size = ++ max_t(unsigned short, limits->logical_block_size, cc->sector_size); ++ limits->physical_block_size = ++ max_t(unsigned, limits->physical_block_size, cc->sector_size); ++ limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size); + } + + static struct target_type crypt_target = { +diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c +index cbc56372ff97..898286ed47a1 100644 +--- a/drivers/md/dm-integrity.c ++++ b/drivers/md/dm-integrity.c +@@ -177,7 +177,7 @@ struct dm_integrity_c { + __u8 sectors_per_block; + + unsigned char mode; +- bool suspending; ++ int suspending; + + int failed; + +@@ -2209,7 +2209,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti) + + del_timer_sync(&ic->autocommit_timer); + +- ic->suspending = true; ++ WRITE_ONCE(ic->suspending, 1); + + queue_work(ic->commit_wq, &ic->commit_work); + drain_workqueue(ic->commit_wq); +@@ -2219,7 +2219,7 @@ static void dm_integrity_postsuspend(struct dm_target *ti) + dm_integrity_flush_buffers(ic); + } + +- ic->suspending = false; ++ WRITE_ONCE(ic->suspending, 0); + + BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress)); + +diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c +index 72ae5dc50532..6cf9ad4e4e16 100644 +--- a/drivers/md/dm-thin.c ++++ b/drivers/md/dm-thin.c +@@ -2514,6 +2514,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) + case PM_WRITE: + if (old_mode != new_mode) + notify_of_pool_mode_change(pool, "write"); ++ if (old_mode == PM_OUT_OF_DATA_SPACE) ++ cancel_delayed_work_sync(&pool->no_space_timeout); + pool->out_of_data_space = false; + pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; + dm_pool_metadata_read_write(pool->pmd); +diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c +index 698fa764999c..59b0c1fce9be 100644 +--- a/drivers/media/i2c/tvp5150.c ++++ b/drivers/media/i2c/tvp5150.c +@@ -871,7 +871,7 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd, + f = &format->format; + + f->width = decoder->rect.width; +- f->height = decoder->rect.height; ++ f->height = decoder->rect.height / 2; + + f->code = MEDIA_BUS_FMT_UYVY8_2X8; + f->field = V4L2_FIELD_ALTERNATE; +diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c +index c37ccbfd52f2..96c07fa1802a 100644 +--- a/drivers/mfd/hi655x-pmic.c ++++ b/drivers/mfd/hi655x-pmic.c +@@ -49,7 +49,7 @@ static struct regmap_config hi655x_regmap_config = { + .reg_bits = 32, + .reg_stride = HI655X_STRIDE, + .val_bits = 8, +- .max_register = HI655X_BUS_ADDR(0xFFF), ++ .max_register = HI655X_BUS_ADDR(0x400) - HI655X_STRIDE, + }; + + static struct resource pwrkey_resources[] = { +diff --git a/drivers/misc/cxl/main.c b/drivers/misc/cxl/main.c +index c1ba0d42cbc8..e0f29b8a872d 100644 +--- a/drivers/misc/cxl/main.c ++++ b/drivers/misc/cxl/main.c +@@ -287,7 +287,7 @@ int cxl_adapter_context_get(struct cxl *adapter) + int rc; + + rc = atomic_inc_unless_negative(&adapter->contexts_num); +- return rc >= 0 ? 0 : -EBUSY; ++ return rc ? 0 : -EBUSY; + } + + void cxl_adapter_context_put(struct cxl *adapter) +diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c +index 56c6f79a5c5a..5f8b583c6e41 100644 +--- a/drivers/misc/vmw_balloon.c ++++ b/drivers/misc/vmw_balloon.c +@@ -341,7 +341,13 @@ static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps) + success = false; + } + +- if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) ++ /* ++ * 2MB pages are only supported with batching. If batching is for some ++ * reason disabled, do not use 2MB pages, since otherwise the legacy ++ * mechanism is used with 2MB pages, causing a failure. ++ */ ++ if ((b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS) && ++ (b->capabilities & VMW_BALLOON_BATCHED_CMDS)) + b->supported_page_sizes = 2; + else + b->supported_page_sizes = 1; +@@ -450,7 +456,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, + + pfn32 = (u32)pfn; + if (pfn32 != pfn) +- return -1; ++ return -EINVAL; + + STATS_INC(b->stats.lock[false]); + +@@ -460,7 +466,7 @@ static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn, + + pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status); + STATS_INC(b->stats.lock_fail[false]); +- return 1; ++ return -EIO; + } + + static int vmballoon_send_batched_lock(struct vmballoon *b, +@@ -597,11 +603,12 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages, + + locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status, + target); +- if (locked > 0) { ++ if (locked) { + STATS_INC(b->stats.refused_alloc[false]); + +- if (hv_status == VMW_BALLOON_ERROR_RESET || +- hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) { ++ if (locked == -EIO && ++ (hv_status == VMW_BALLOON_ERROR_RESET || ++ hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED)) { + vmballoon_free_page(page, false); + return -EIO; + } +@@ -617,7 +624,7 @@ static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages, + } else { + vmballoon_free_page(page, false); + } +- return -EIO; ++ return locked; + } + + /* track allocated page */ +@@ -1029,29 +1036,30 @@ static void vmballoon_vmci_cleanup(struct vmballoon *b) + */ + static int vmballoon_vmci_init(struct vmballoon *b) + { +- int error = 0; ++ unsigned long error, dummy; + +- if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) != 0) { +- error = vmci_doorbell_create(&b->vmci_doorbell, +- VMCI_FLAG_DELAYED_CB, +- VMCI_PRIVILEGE_FLAG_RESTRICTED, +- vmballoon_doorbell, b); +- +- if (error == VMCI_SUCCESS) { +- VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, +- b->vmci_doorbell.context, +- b->vmci_doorbell.resource, error); +- STATS_INC(b->stats.doorbell_set); +- } +- } ++ if ((b->capabilities & VMW_BALLOON_SIGNALLED_WAKEUP_CMD) == 0) ++ return 0; + +- if (error != 0) { +- vmballoon_vmci_cleanup(b); ++ error = vmci_doorbell_create(&b->vmci_doorbell, VMCI_FLAG_DELAYED_CB, ++ VMCI_PRIVILEGE_FLAG_RESTRICTED, ++ vmballoon_doorbell, b); + +- return -EIO; +- } ++ if (error != VMCI_SUCCESS) ++ goto fail; ++ ++ error = VMWARE_BALLOON_CMD(VMCI_DOORBELL_SET, b->vmci_doorbell.context, ++ b->vmci_doorbell.resource, dummy); ++ ++ STATS_INC(b->stats.doorbell_set); ++ ++ if (error != VMW_BALLOON_SUCCESS) ++ goto fail; + + return 0; ++fail: ++ vmballoon_vmci_cleanup(b); ++ return -EIO; + } + + /* +@@ -1289,7 +1297,14 @@ static int __init vmballoon_init(void) + + return 0; + } +-module_init(vmballoon_init); ++ ++/* ++ * Using late_initcall() instead of module_init() allows the balloon to use the ++ * VMCI doorbell even when the balloon is built into the kernel. Otherwise the ++ * VMCI is probed only after the balloon is initialized. If the balloon is used ++ * as a module, late_initcall() is equivalent to module_init(). ++ */ ++late_initcall(vmballoon_init); + + static void __exit vmballoon_exit(void) + { +diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c +index 8bae88a150fd..713658be6661 100644 +--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c ++++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c +@@ -44,7 +44,7 @@ + /* DM_CM_RST */ + #define RST_DTRANRST1 BIT(9) + #define RST_DTRANRST0 BIT(8) +-#define RST_RESERVED_BITS GENMASK_ULL(32, 0) ++#define RST_RESERVED_BITS GENMASK_ULL(31, 0) + + /* DM_CM_INFO1 and DM_CM_INFO1_MASK */ + #define INFO1_CLEAR 0 +diff --git a/drivers/net/wireless/marvell/libertas/dev.h b/drivers/net/wireless/marvell/libertas/dev.h +index dd1ee1f0af48..469134930026 100644 +--- a/drivers/net/wireless/marvell/libertas/dev.h ++++ b/drivers/net/wireless/marvell/libertas/dev.h +@@ -104,6 +104,7 @@ struct lbs_private { + u8 fw_ready; + u8 surpriseremoved; + u8 setup_fw_on_resume; ++ u8 power_up_on_resume; + int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb); + void (*reset_card) (struct lbs_private *priv); + int (*power_save) (struct lbs_private *priv); +diff --git a/drivers/net/wireless/marvell/libertas/if_sdio.c b/drivers/net/wireless/marvell/libertas/if_sdio.c +index 2300e796c6ab..43743c26c071 100644 +--- a/drivers/net/wireless/marvell/libertas/if_sdio.c ++++ b/drivers/net/wireless/marvell/libertas/if_sdio.c +@@ -1290,15 +1290,23 @@ static void if_sdio_remove(struct sdio_func *func) + static int if_sdio_suspend(struct device *dev) + { + struct sdio_func *func = dev_to_sdio_func(dev); +- int ret; + struct if_sdio_card *card = sdio_get_drvdata(func); ++ struct lbs_private *priv = card->priv; ++ int ret; + + mmc_pm_flag_t flags = sdio_get_host_pm_caps(func); ++ priv->power_up_on_resume = false; + + /* If we're powered off anyway, just let the mmc layer remove the + * card. */ +- if (!lbs_iface_active(card->priv)) +- return -ENOSYS; ++ if (!lbs_iface_active(priv)) { ++ if (priv->fw_ready) { ++ priv->power_up_on_resume = true; ++ if_sdio_power_off(card); ++ } ++ ++ return 0; ++ } + + dev_info(dev, "%s: suspend: PM flags = 0x%x\n", + sdio_func_id(func), flags); +@@ -1306,9 +1314,14 @@ static int if_sdio_suspend(struct device *dev) + /* If we aren't being asked to wake on anything, we should bail out + * and let the SD stack power down the card. + */ +- if (card->priv->wol_criteria == EHS_REMOVE_WAKEUP) { ++ if (priv->wol_criteria == EHS_REMOVE_WAKEUP) { + dev_info(dev, "Suspend without wake params -- powering down card\n"); +- return -ENOSYS; ++ if (priv->fw_ready) { ++ priv->power_up_on_resume = true; ++ if_sdio_power_off(card); ++ } ++ ++ return 0; + } + + if (!(flags & MMC_PM_KEEP_POWER)) { +@@ -1321,7 +1334,7 @@ static int if_sdio_suspend(struct device *dev) + if (ret) + return ret; + +- ret = lbs_suspend(card->priv); ++ ret = lbs_suspend(priv); + if (ret) + return ret; + +@@ -1336,6 +1349,11 @@ static int if_sdio_resume(struct device *dev) + + dev_info(dev, "%s: resume: we're back\n", sdio_func_id(func)); + ++ if (card->priv->power_up_on_resume) { ++ if_sdio_power_on(card); ++ wait_event(card->pwron_waitq, card->priv->fw_ready); ++ } ++ + ret = lbs_resume(card->priv); + + return ret; +diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c +index 2fffd42767c7..fb5ab5812a22 100644 +--- a/drivers/nvdimm/bus.c ++++ b/drivers/nvdimm/bus.c +@@ -808,9 +808,9 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd, + * overshoots the remainder by 4 bytes, assume it was + * including 'status'. + */ +- if (out_field[1] - 8 == remainder) ++ if (out_field[1] - 4 == remainder) + return remainder; +- return out_field[1] - 4; ++ return out_field[1] - 8; + } else if (cmd == ND_CMD_CALL) { + struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field; + +diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c +index 4c22cb395040..f7b8a86fa5c5 100644 +--- a/drivers/pwm/pwm-tiehrpwm.c ++++ b/drivers/pwm/pwm-tiehrpwm.c +@@ -33,10 +33,6 @@ + #define TBCTL 0x00 + #define TBPRD 0x0A + +-#define TBCTL_RUN_MASK (BIT(15) | BIT(14)) +-#define TBCTL_STOP_NEXT 0 +-#define TBCTL_STOP_ON_CYCLE BIT(14) +-#define TBCTL_FREE_RUN (BIT(15) | BIT(14)) + #define TBCTL_PRDLD_MASK BIT(3) + #define TBCTL_PRDLD_SHDW 0 + #define TBCTL_PRDLD_IMDT BIT(3) +@@ -360,7 +356,7 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) + /* Channels polarity can be configured from action qualifier module */ + configure_polarity(pc, pwm->hwpwm); + +- /* Enable TBCLK before enabling PWM device */ ++ /* Enable TBCLK */ + ret = clk_enable(pc->tbclk); + if (ret) { + dev_err(chip->dev, "Failed to enable TBCLK for %s: %d\n", +@@ -368,9 +364,6 @@ static int ehrpwm_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) + return ret; + } + +- /* Enable time counter for free_run */ +- ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_FREE_RUN); +- + return 0; + } + +@@ -388,6 +381,8 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) + aqcsfrc_mask = AQCSFRC_CSFA_MASK; + } + ++ /* Update shadow register first before modifying active register */ ++ ehrpwm_modify(pc->mmio_base, AQCSFRC, aqcsfrc_mask, aqcsfrc_val); + /* + * Changes to immediate action on Action Qualifier. This puts + * Action Qualifier control on PWM output from next TBCLK +@@ -400,9 +395,6 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) + /* Disabling TBCLK on PWM disable */ + clk_disable(pc->tbclk); + +- /* Stop Time base counter */ +- ehrpwm_modify(pc->mmio_base, TBCTL, TBCTL_RUN_MASK, TBCTL_STOP_NEXT); +- + /* Disable clock on PWM disable */ + pm_runtime_put_sync(chip->dev); + } +diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c +index 13f7cd11c07e..ac6e6a6a194c 100644 +--- a/drivers/rtc/rtc-omap.c ++++ b/drivers/rtc/rtc-omap.c +@@ -817,13 +817,6 @@ static int omap_rtc_probe(struct platform_device *pdev) + goto err; + } + +- if (rtc->is_pmic_controller) { +- if (!pm_power_off) { +- omap_rtc_power_off_rtc = rtc; +- pm_power_off = omap_rtc_power_off; +- } +- } +- + /* Support ext_wakeup pinconf */ + rtc_pinctrl_desc.name = dev_name(&pdev->dev); + +@@ -833,6 +826,13 @@ static int omap_rtc_probe(struct platform_device *pdev) + return PTR_ERR(rtc->pctldev); + } + ++ if (rtc->is_pmic_controller) { ++ if (!pm_power_off) { ++ omap_rtc_power_off_rtc = rtc; ++ pm_power_off = omap_rtc_power_off; ++ } ++ } ++ + return 0; + + err: +diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c +index 4a001634023e..02bd1eba045b 100644 +--- a/drivers/spi/spi-cadence.c ++++ b/drivers/spi/spi-cadence.c +@@ -319,7 +319,7 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi) + */ + if (cdns_spi_read(xspi, CDNS_SPI_ISR) & + CDNS_SPI_IXR_TXFULL) +- usleep_range(10, 20); ++ udelay(10); + + if (xspi->txbuf) + cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++); +diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c +index 6ddb6ef1fda4..c5bbe08771a4 100644 +--- a/drivers/spi/spi-davinci.c ++++ b/drivers/spi/spi-davinci.c +@@ -217,7 +217,7 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value) + pdata = &dspi->pdata; + + /* program delay transfers if tx_delay is non zero */ +- if (spicfg->wdelay) ++ if (spicfg && spicfg->wdelay) + spidat1 |= SPIDAT1_WDEL; + + /* +diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c +index d89127f4a46d..ca013dd4ff6b 100644 +--- a/drivers/spi/spi-fsl-dspi.c ++++ b/drivers/spi/spi-fsl-dspi.c +@@ -1006,30 +1006,30 @@ static int dspi_probe(struct platform_device *pdev) + goto out_master_put; + } + ++ dspi->clk = devm_clk_get(&pdev->dev, "dspi"); ++ if (IS_ERR(dspi->clk)) { ++ ret = PTR_ERR(dspi->clk); ++ dev_err(&pdev->dev, "unable to get clock\n"); ++ goto out_master_put; ++ } ++ ret = clk_prepare_enable(dspi->clk); ++ if (ret) ++ goto out_master_put; ++ + dspi_init(dspi); + dspi->irq = platform_get_irq(pdev, 0); + if (dspi->irq < 0) { + dev_err(&pdev->dev, "can't get platform irq\n"); + ret = dspi->irq; +- goto out_master_put; ++ goto out_clk_put; + } + + ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0, + pdev->name, dspi); + if (ret < 0) { + dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n"); +- goto out_master_put; +- } +- +- dspi->clk = devm_clk_get(&pdev->dev, "dspi"); +- if (IS_ERR(dspi->clk)) { +- ret = PTR_ERR(dspi->clk); +- dev_err(&pdev->dev, "unable to get clock\n"); +- goto out_master_put; ++ goto out_clk_put; + } +- ret = clk_prepare_enable(dspi->clk); +- if (ret) +- goto out_master_put; + + if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { + ret = dspi_request_dma(dspi, res->start); +diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c +index 4cb515a3104c..3a2e46e49405 100644 +--- a/drivers/spi/spi-pxa2xx.c ++++ b/drivers/spi/spi-pxa2xx.c +@@ -1480,6 +1480,10 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { + { PCI_VDEVICE(INTEL, 0x31c2), LPSS_BXT_SSP }, + { PCI_VDEVICE(INTEL, 0x31c4), LPSS_BXT_SSP }, + { PCI_VDEVICE(INTEL, 0x31c6), LPSS_BXT_SSP }, ++ /* ICL-LP */ ++ { PCI_VDEVICE(INTEL, 0x34aa), LPSS_CNL_SSP }, ++ { PCI_VDEVICE(INTEL, 0x34ab), LPSS_CNL_SSP }, ++ { PCI_VDEVICE(INTEL, 0x34fb), LPSS_CNL_SSP }, + /* APL */ + { PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP }, + { PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP }, +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c +index c8cb0b398cb1..6db8844ef3ec 100644 +--- a/drivers/tty/serial/serial_core.c ++++ b/drivers/tty/serial/serial_core.c +@@ -195,6 +195,7 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state, + { + struct uart_port *uport = uart_port_check(state); + unsigned long page; ++ unsigned long flags = 0; + int retval = 0; + + if (uport->type == PORT_UNKNOWN) +@@ -209,15 +210,18 @@ static int uart_port_startup(struct tty_struct *tty, struct uart_state *state, + * Initialise and allocate the transmit and temporary + * buffer. + */ +- if (!state->xmit.buf) { +- /* This is protected by the per port mutex */ +- page = get_zeroed_page(GFP_KERNEL); +- if (!page) +- return -ENOMEM; ++ page = get_zeroed_page(GFP_KERNEL); ++ if (!page) ++ return -ENOMEM; + ++ uart_port_lock(state, flags); ++ if (!state->xmit.buf) { + state->xmit.buf = (unsigned char *) page; + uart_circ_clear(&state->xmit); ++ } else { ++ free_page(page); + } ++ uart_port_unlock(uport, flags); + + retval = uport->ops->startup(uport); + if (retval == 0) { +@@ -276,6 +280,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state) + { + struct uart_port *uport = uart_port_check(state); + struct tty_port *port = &state->port; ++ unsigned long flags = 0; + + /* + * Set the TTY IO error marker +@@ -308,10 +313,12 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state) + /* + * Free the transmit buffer page. + */ ++ uart_port_lock(state, flags); + if (state->xmit.buf) { + free_page((unsigned long)state->xmit.buf); + state->xmit.buf = NULL; + } ++ uart_port_unlock(uport, flags); + } + + /** +diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c +index f741ba8df01b..11d73b5fc885 100644 +--- a/drivers/video/fbdev/core/fbmem.c ++++ b/drivers/video/fbdev/core/fbmem.c +@@ -1716,12 +1716,12 @@ static int do_register_framebuffer(struct fb_info *fb_info) + return 0; + } + +-static int do_unregister_framebuffer(struct fb_info *fb_info) ++static int unbind_console(struct fb_info *fb_info) + { + struct fb_event event; +- int i, ret = 0; ++ int ret; ++ int i = fb_info->node; + +- i = fb_info->node; + if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info) + return -EINVAL; + +@@ -1736,17 +1736,29 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) + unlock_fb_info(fb_info); + console_unlock(); + ++ return ret; ++} ++ ++static int __unlink_framebuffer(struct fb_info *fb_info); ++ ++static int do_unregister_framebuffer(struct fb_info *fb_info) ++{ ++ struct fb_event event; ++ int ret; ++ ++ ret = unbind_console(fb_info); ++ + if (ret) + return -EINVAL; + + pm_vt_switch_unregister(fb_info->dev); + +- unlink_framebuffer(fb_info); ++ __unlink_framebuffer(fb_info); + if (fb_info->pixmap.addr && + (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) + kfree(fb_info->pixmap.addr); + fb_destroy_modelist(&fb_info->modelist); +- registered_fb[i] = NULL; ++ registered_fb[fb_info->node] = NULL; + num_registered_fb--; + fb_cleanup_device(fb_info); + event.info = fb_info; +@@ -1759,7 +1771,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) + return 0; + } + +-int unlink_framebuffer(struct fb_info *fb_info) ++static int __unlink_framebuffer(struct fb_info *fb_info) + { + int i; + +@@ -1771,6 +1783,20 @@ int unlink_framebuffer(struct fb_info *fb_info) + device_destroy(fb_class, MKDEV(FB_MAJOR, i)); + fb_info->dev = NULL; + } ++ ++ return 0; ++} ++ ++int unlink_framebuffer(struct fb_info *fb_info) ++{ ++ int ret; ++ ++ ret = __unlink_framebuffer(fb_info); ++ if (ret) ++ return ret; ++ ++ unbind_console(fb_info); ++ + return 0; + } + EXPORT_SYMBOL(unlink_framebuffer); +diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c +index f329eee6dc93..352abc39e891 100644 +--- a/fs/9p/xattr.c ++++ b/fs/9p/xattr.c +@@ -105,7 +105,7 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name, + { + struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len}; + struct iov_iter from; +- int retval; ++ int retval, err; + + iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len); + +@@ -126,7 +126,9 @@ int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name, + retval); + else + p9_client_write(fid, 0, &from, &retval); +- p9_client_clunk(fid); ++ err = p9_client_clunk(fid); ++ if (!retval && err) ++ retval = err; + return retval; + } + +diff --git a/fs/nfs/blocklayout/dev.c b/fs/nfs/blocklayout/dev.c +index 95f74bd2c067..70c4165d2d74 100644 +--- a/fs/nfs/blocklayout/dev.c ++++ b/fs/nfs/blocklayout/dev.c +@@ -204,7 +204,7 @@ static bool bl_map_stripe(struct pnfs_block_dev *dev, u64 offset, + chunk = div_u64(offset, dev->chunk_size); + div_u64_rem(chunk, dev->nr_children, &chunk_idx); + +- if (chunk_idx > dev->nr_children) { ++ if (chunk_idx >= dev->nr_children) { + dprintk("%s: invalid chunk idx %d (%lld/%lld)\n", + __func__, chunk_idx, offset, dev->chunk_size); + /* error, should not happen */ +diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c +index 516b2248cafe..2c3f398995f6 100644 +--- a/fs/nfs/callback_proc.c ++++ b/fs/nfs/callback_proc.c +@@ -433,11 +433,14 @@ validate_seqid(const struct nfs4_slot_table *tbl, const struct nfs4_slot *slot, + * a match. If the slot is in use and the sequence numbers match, the + * client is still waiting for a response to the original request. + */ +-static bool referring_call_exists(struct nfs_client *clp, ++static int referring_call_exists(struct nfs_client *clp, + uint32_t nrclists, +- struct referring_call_list *rclists) ++ struct referring_call_list *rclists, ++ spinlock_t *lock) ++ __releases(lock) ++ __acquires(lock) + { +- bool status = 0; ++ int status = 0; + int i, j; + struct nfs4_session *session; + struct nfs4_slot_table *tbl; +@@ -460,8 +463,10 @@ static bool referring_call_exists(struct nfs_client *clp, + + for (j = 0; j < rclist->rcl_nrefcalls; j++) { + ref = &rclist->rcl_refcalls[j]; ++ spin_unlock(lock); + status = nfs4_slot_wait_on_seqid(tbl, ref->rc_slotid, + ref->rc_sequenceid, HZ >> 1) < 0; ++ spin_lock(lock); + if (status) + goto out; + } +@@ -538,7 +543,8 @@ __be32 nfs4_callback_sequence(void *argp, void *resp, + * related callback was received before the response to the original + * call. + */ +- if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists)) { ++ if (referring_call_exists(clp, args->csa_nrclists, args->csa_rclists, ++ &tbl->slot_tbl_lock) < 0) { + status = htonl(NFS4ERR_DELAY); + goto out_unlock; + } +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 51deff8e1f86..dda4a3a3ef6e 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -547,8 +547,15 @@ nfs4_async_handle_exception(struct rpc_task *task, struct nfs_server *server, + ret = -EIO; + return ret; + out_retry: +- if (ret == 0) ++ if (ret == 0) { + exception->retry = 1; ++ /* ++ * For NFS4ERR_MOVED, the client transport will need to ++ * be recomputed after migration recovery has completed. ++ */ ++ if (errorcode == -NFS4ERR_MOVED) ++ rpc_task_release_transport(task); ++ } + return ret; + } + +diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c +index 60da59be83b6..4a3dd66175fe 100644 +--- a/fs/nfs/pnfs_nfs.c ++++ b/fs/nfs/pnfs_nfs.c +@@ -61,7 +61,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_commit_release); + + /* The generic layer is about to remove the req from the commit list. + * If this will make the bucket empty, it will need to put the lseg reference. +- * Note this must be called holding i_lock ++ * Note this must be called holding nfsi->commit_mutex + */ + void + pnfs_generic_clear_request_commit(struct nfs_page *req, +@@ -149,9 +149,7 @@ restart: + if (list_empty(&b->written)) { + freeme = b->wlseg; + b->wlseg = NULL; +- spin_unlock(&cinfo->inode->i_lock); + pnfs_put_lseg(freeme); +- spin_lock(&cinfo->inode->i_lock); + goto restart; + } + } +@@ -167,7 +165,7 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx) + LIST_HEAD(pages); + int i; + +- spin_lock(&cinfo->inode->i_lock); ++ mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); + for (i = idx; i < fl_cinfo->nbuckets; i++) { + bucket = &fl_cinfo->buckets[i]; + if (list_empty(&bucket->committing)) +@@ -177,12 +175,12 @@ static void pnfs_generic_retry_commit(struct nfs_commit_info *cinfo, int idx) + list_for_each(pos, &bucket->committing) + cinfo->ds->ncommitting--; + list_splice_init(&bucket->committing, &pages); +- spin_unlock(&cinfo->inode->i_lock); ++ mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); + nfs_retry_commit(&pages, freeme, cinfo, i); + pnfs_put_lseg(freeme); +- spin_lock(&cinfo->inode->i_lock); ++ mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); + } +- spin_unlock(&cinfo->inode->i_lock); ++ mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); + } + + static unsigned int +@@ -222,13 +220,13 @@ void pnfs_fetch_commit_bucket_list(struct list_head *pages, + struct list_head *pos; + + bucket = &cinfo->ds->buckets[data->ds_commit_index]; +- spin_lock(&cinfo->inode->i_lock); ++ mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); + list_for_each(pos, &bucket->committing) + cinfo->ds->ncommitting--; + list_splice_init(&bucket->committing, pages); + data->lseg = bucket->clseg; + bucket->clseg = NULL; +- spin_unlock(&cinfo->inode->i_lock); ++ mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); + + } + +diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c +index 7fa7d68baa6d..1d4f9997236f 100644 +--- a/fs/overlayfs/readdir.c ++++ b/fs/overlayfs/readdir.c +@@ -623,6 +623,21 @@ static int ovl_fill_real(struct dir_context *ctx, const char *name, + return orig_ctx->actor(orig_ctx, name, namelen, offset, ino, d_type); + } + ++static bool ovl_is_impure_dir(struct file *file) ++{ ++ struct ovl_dir_file *od = file->private_data; ++ struct inode *dir = d_inode(file->f_path.dentry); ++ ++ /* ++ * Only upper dir can be impure, but if we are in the middle of ++ * iterating a lower real dir, dir could be copied up and marked ++ * impure. We only want the impure cache if we started iterating ++ * a real upper dir to begin with. ++ */ ++ return od->is_upper && ovl_test_flag(OVL_IMPURE, dir); ++ ++} ++ + static int ovl_iterate_real(struct file *file, struct dir_context *ctx) + { + int err; +@@ -646,7 +661,7 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx) + rdt.parent_ino = stat.ino; + } + +- if (ovl_test_flag(OVL_IMPURE, d_inode(dir))) { ++ if (ovl_is_impure_dir(file)) { + rdt.cache = ovl_cache_get_impure(&file->f_path); + if (IS_ERR(rdt.cache)) + return PTR_ERR(rdt.cache); +@@ -676,7 +691,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx) + * entries. + */ + if (ovl_same_sb(dentry->d_sb) && +- (ovl_test_flag(OVL_IMPURE, d_inode(dentry)) || ++ (ovl_is_impure_dir(file) || + OVL_TYPE_MERGE(ovl_path_type(dentry->d_parent)))) { + return ovl_iterate_real(file, ctx); + } +diff --git a/fs/quota/quota.c b/fs/quota/quota.c +index 43612e2a73af..3f02bab0db4e 100644 +--- a/fs/quota/quota.c ++++ b/fs/quota/quota.c +@@ -18,6 +18,7 @@ + #include <linux/quotaops.h> + #include <linux/types.h> + #include <linux/writeback.h> ++#include <linux/nospec.h> + + static int check_quotactl_permission(struct super_block *sb, int type, int cmd, + qid_t id) +@@ -703,6 +704,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, + + if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS)) + return -EINVAL; ++ type = array_index_nospec(type, MAXQUOTAS); + /* + * Quota not supported on this fs? Check this before s_quota_types + * since they needn't be set if quota is not supported at all. +diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c +index 8ae1cd8611cc..69051f7a9606 100644 +--- a/fs/ubifs/journal.c ++++ b/fs/ubifs/journal.c +@@ -665,6 +665,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, + spin_lock(&ui->ui_lock); + ui->synced_i_size = ui->ui_size; + spin_unlock(&ui->ui_lock); ++ if (xent) { ++ spin_lock(&host_ui->ui_lock); ++ host_ui->synced_i_size = host_ui->ui_size; ++ spin_unlock(&host_ui->ui_lock); ++ } + mark_inode_clean(c, ui); + mark_inode_clean(c, host_ui); + return 0; +@@ -1283,11 +1288,10 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in + int *new_len) + { + void *buf; +- int err, compr_type; +- u32 dlen, out_len, old_dlen; ++ int err, dlen, compr_type, out_len, old_dlen; + + out_len = le32_to_cpu(dn->size); +- buf = kmalloc_array(out_len, WORST_COMPR_FACTOR, GFP_NOFS); ++ buf = kmalloc(out_len * WORST_COMPR_FACTOR, GFP_NOFS); + if (!buf) + return -ENOMEM; + +@@ -1389,7 +1393,16 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, + else if (err) + goto out_free; + else { +- if (le32_to_cpu(dn->size) <= dlen) ++ int dn_len = le32_to_cpu(dn->size); ++ ++ if (dn_len <= 0 || dn_len > UBIFS_BLOCK_SIZE) { ++ ubifs_err(c, "bad data node (block %u, inode %lu)", ++ blk, inode->i_ino); ++ ubifs_dump_node(c, dn); ++ goto out_free; ++ } ++ ++ if (dn_len <= dlen) + dlen = 0; /* Nothing to do */ + else { + err = truncate_data_node(c, inode, blk, dn, &dlen); +diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c +index 6c3a1abd0e22..780a436d8c45 100644 +--- a/fs/ubifs/lprops.c ++++ b/fs/ubifs/lprops.c +@@ -1091,10 +1091,6 @@ static int scan_check_cb(struct ubifs_info *c, + } + } + +- buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); +- if (!buf) +- return -ENOMEM; +- + /* + * After an unclean unmount, empty and freeable LEBs + * may contain garbage - do not scan them. +@@ -1113,6 +1109,10 @@ static int scan_check_cb(struct ubifs_info *c, + return LPT_SCAN_CONTINUE; + } + ++ buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL); ++ if (!buf) ++ return -ENOMEM; ++ + sleb = ubifs_scan(c, lnum, 0, buf, 0); + if (IS_ERR(sleb)) { + ret = PTR_ERR(sleb); +diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c +index c13eae819cbc..d47f16c0d582 100644 +--- a/fs/ubifs/xattr.c ++++ b/fs/ubifs/xattr.c +@@ -152,6 +152,12 @@ static int create_xattr(struct ubifs_info *c, struct inode *host, + ui->data_len = size; + + mutex_lock(&host_ui->ui_mutex); ++ ++ if (!host->i_nlink) { ++ err = -ENOENT; ++ goto out_noent; ++ } ++ + host->i_ctime = current_time(host); + host_ui->xattr_cnt += 1; + host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); +@@ -183,6 +189,7 @@ out_cancel: + host_ui->xattr_size -= CALC_XATTR_BYTES(size); + host_ui->xattr_names -= fname_len(nm); + host_ui->flags &= ~UBIFS_CRYPT_FL; ++out_noent: + mutex_unlock(&host_ui->ui_mutex); + out_free: + make_bad_inode(inode); +@@ -234,6 +241,12 @@ static int change_xattr(struct ubifs_info *c, struct inode *host, + mutex_unlock(&ui->ui_mutex); + + mutex_lock(&host_ui->ui_mutex); ++ ++ if (!host->i_nlink) { ++ err = -ENOENT; ++ goto out_noent; ++ } ++ + host->i_ctime = current_time(host); + host_ui->xattr_size -= CALC_XATTR_BYTES(old_size); + host_ui->xattr_size += CALC_XATTR_BYTES(size); +@@ -255,6 +268,7 @@ static int change_xattr(struct ubifs_info *c, struct inode *host, + out_cancel: + host_ui->xattr_size -= CALC_XATTR_BYTES(size); + host_ui->xattr_size += CALC_XATTR_BYTES(old_size); ++out_noent: + mutex_unlock(&host_ui->ui_mutex); + make_bad_inode(inode); + out_free: +@@ -483,6 +497,12 @@ static int remove_xattr(struct ubifs_info *c, struct inode *host, + return err; + + mutex_lock(&host_ui->ui_mutex); ++ ++ if (!host->i_nlink) { ++ err = -ENOENT; ++ goto out_noent; ++ } ++ + host->i_ctime = current_time(host); + host_ui->xattr_cnt -= 1; + host_ui->xattr_size -= CALC_DENT_SIZE(fname_len(nm)); +@@ -502,6 +522,7 @@ out_cancel: + host_ui->xattr_size += CALC_DENT_SIZE(fname_len(nm)); + host_ui->xattr_size += CALC_XATTR_BYTES(ui->data_len); + host_ui->xattr_names += fname_len(nm); ++out_noent: + mutex_unlock(&host_ui->ui_mutex); + ubifs_release_budget(c, &req); + make_bad_inode(inode); +@@ -541,6 +562,9 @@ static int ubifs_xattr_remove(struct inode *host, const char *name) + + ubifs_assert(inode_is_locked(host)); + ++ if (!host->i_nlink) ++ return -ENOENT; ++ + if (fname_len(&nm) > UBIFS_MAX_NLEN) + return -ENAMETOOLONG; + +diff --git a/fs/xattr.c b/fs/xattr.c +index 61cd28ba25f3..be2ce57cd6ad 100644 +--- a/fs/xattr.c ++++ b/fs/xattr.c +@@ -541,7 +541,7 @@ getxattr(struct dentry *d, const char __user *name, void __user *value, + if (error > 0) { + if ((strcmp(kname, XATTR_NAME_POSIX_ACL_ACCESS) == 0) || + (strcmp(kname, XATTR_NAME_POSIX_ACL_DEFAULT) == 0)) +- posix_acl_fix_xattr_to_user(kvalue, size); ++ posix_acl_fix_xattr_to_user(kvalue, error); + if (size && copy_to_user(value, kvalue, error)) + error = -EFAULT; + } else if (error == -ERANGE && size >= XATTR_SIZE_MAX) { +diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h +index ba74eaa8eadf..0c51f753652d 100644 +--- a/include/linux/hyperv.h ++++ b/include/linux/hyperv.h +@@ -1026,6 +1026,8 @@ extern int vmbus_establish_gpadl(struct vmbus_channel *channel, + extern int vmbus_teardown_gpadl(struct vmbus_channel *channel, + u32 gpadl_handle); + ++void vmbus_reset_channel_cb(struct vmbus_channel *channel); ++ + extern int vmbus_recvpacket(struct vmbus_channel *channel, + void *buffer, + u32 bufferlen, +diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h +index 485a5b48f038..a6ab2f51f703 100644 +--- a/include/linux/intel-iommu.h ++++ b/include/linux/intel-iommu.h +@@ -112,6 +112,7 @@ + * Extended Capability Register + */ + ++#define ecap_dit(e) ((e >> 41) & 0x1) + #define ecap_pasid(e) ((e >> 40) & 0x1) + #define ecap_pss(e) ((e >> 35) & 0x1f) + #define ecap_eafs(e) ((e >> 34) & 0x1) +@@ -281,6 +282,7 @@ enum { + #define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32) + #define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16) + #define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) ++#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) + #define QI_DEV_IOTLB_SIZE 1 + #define QI_DEV_IOTLB_MAX_INVS 32 + +@@ -305,6 +307,7 @@ enum { + #define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32) + #define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16) + #define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4) ++#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) + #define QI_DEV_EIOTLB_MAX_INVS 32 + + #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) +@@ -450,9 +453,8 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, + u8 fm, u64 type); + extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type); +-extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, +- u64 addr, unsigned mask); +- ++extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, ++ u16 qdep, u64 addr, unsigned mask); + extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); + + extern int dmar_ir_support(void); +diff --git a/include/linux/pci.h b/include/linux/pci.h +index 9d6fae809c09..b1abbcc614cf 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -2292,4 +2292,16 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) + /* provide the legacy pci_dma_* API */ + #include <linux/pci-dma-compat.h> + ++#define pci_printk(level, pdev, fmt, arg...) \ ++ dev_printk(level, &(pdev)->dev, fmt, ##arg) ++ ++#define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) ++#define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) ++#define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) ++#define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) ++#define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) ++#define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) ++#define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) ++#define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) ++ + #endif /* LINUX_PCI_H */ +diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h +index 71c237e8240e..166fc4e76df6 100644 +--- a/include/linux/sunrpc/clnt.h ++++ b/include/linux/sunrpc/clnt.h +@@ -156,6 +156,7 @@ int rpc_switch_client_transport(struct rpc_clnt *, + + void rpc_shutdown_client(struct rpc_clnt *); + void rpc_release_client(struct rpc_clnt *); ++void rpc_task_release_transport(struct rpc_task *); + void rpc_task_release_client(struct rpc_task *); + + int rpcb_create_local(struct net *); +diff --git a/include/linux/verification.h b/include/linux/verification.h +index a10549a6c7cd..cfa4730d607a 100644 +--- a/include/linux/verification.h ++++ b/include/linux/verification.h +@@ -12,6 +12,12 @@ + #ifndef _LINUX_VERIFICATION_H + #define _LINUX_VERIFICATION_H + ++/* ++ * Indicate that both builtin trusted keys and secondary trusted keys ++ * should be used. ++ */ ++#define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL) ++ + /* + * The use to which an asymmetric key is being put. + */ +diff --git a/include/video/udlfb.h b/include/video/udlfb.h +index 1252a7a89bc0..85e32ee739fc 100644 +--- a/include/video/udlfb.h ++++ b/include/video/udlfb.h +@@ -88,7 +88,7 @@ struct dlfb_data { + #define MIN_RAW_PIX_BYTES 2 + #define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES) + +-#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */ ++#define DL_DEFIO_WRITE_DELAY msecs_to_jiffies(HZ <= 300 ? 4 : 10) /* optimal value for 720p video */ + #define DL_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */ + + /* remove these once align.h patch is taken into kernel */ +diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c +index bf8c8fd72589..7c51f065b212 100644 +--- a/kernel/livepatch/core.c ++++ b/kernel/livepatch/core.c +@@ -605,6 +605,9 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) + if (!func->old_name || !func->new_func) + return -EINVAL; + ++ if (strlen(func->old_name) >= KSYM_NAME_LEN) ++ return -EINVAL; ++ + INIT_LIST_HEAD(&func->stack_node); + func->patched = false; + func->transition = false; +@@ -678,6 +681,9 @@ static int klp_init_object(struct klp_patch *patch, struct klp_object *obj) + if (!obj->funcs) + return -EINVAL; + ++ if (klp_is_module(obj) && strlen(obj->name) >= MODULE_NAME_LEN) ++ return -EINVAL; ++ + obj->patched = false; + obj->mod = NULL; + +diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig +index e8517b63eb37..dd2b5a4d89a5 100644 +--- a/kernel/power/Kconfig ++++ b/kernel/power/Kconfig +@@ -105,6 +105,7 @@ config PM_SLEEP + def_bool y + depends on SUSPEND || HIBERNATE_CALLBACKS + select PM ++ select SRCU + + config PM_SLEEP_SMP + def_bool y +diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c +index d482fd61ac67..64f8046586b6 100644 +--- a/kernel/printk/printk_safe.c ++++ b/kernel/printk/printk_safe.c +@@ -309,12 +309,12 @@ static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args) + return printk_safe_log_store(s, fmt, args); + } + +-void printk_nmi_enter(void) ++void notrace printk_nmi_enter(void) + { + this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); + } + +-void printk_nmi_exit(void) ++void notrace printk_nmi_exit(void) + { + this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); + } +diff --git a/kernel/sys.c b/kernel/sys.c +index de4ed027dfd7..e25ec93aea22 100644 +--- a/kernel/sys.c ++++ b/kernel/sys.c +@@ -1176,18 +1176,19 @@ static int override_release(char __user *release, size_t len) + + SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) + { +- int errno = 0; ++ struct new_utsname tmp; + + down_read(&uts_sem); +- if (copy_to_user(name, utsname(), sizeof *name)) +- errno = -EFAULT; ++ memcpy(&tmp, utsname(), sizeof(tmp)); + up_read(&uts_sem); ++ if (copy_to_user(name, &tmp, sizeof(tmp))) ++ return -EFAULT; + +- if (!errno && override_release(name->release, sizeof(name->release))) +- errno = -EFAULT; +- if (!errno && override_architecture(name)) +- errno = -EFAULT; +- return errno; ++ if (override_release(name->release, sizeof(name->release))) ++ return -EFAULT; ++ if (override_architecture(name)) ++ return -EFAULT; ++ return 0; + } + + #ifdef __ARCH_WANT_SYS_OLD_UNAME +@@ -1196,55 +1197,46 @@ SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name) + */ + SYSCALL_DEFINE1(uname, struct old_utsname __user *, name) + { +- int error = 0; ++ struct old_utsname tmp; + + if (!name) + return -EFAULT; + + down_read(&uts_sem); +- if (copy_to_user(name, utsname(), sizeof(*name))) +- error = -EFAULT; ++ memcpy(&tmp, utsname(), sizeof(tmp)); + up_read(&uts_sem); ++ if (copy_to_user(name, &tmp, sizeof(tmp))) ++ return -EFAULT; + +- if (!error && override_release(name->release, sizeof(name->release))) +- error = -EFAULT; +- if (!error && override_architecture(name)) +- error = -EFAULT; +- return error; ++ if (override_release(name->release, sizeof(name->release))) ++ return -EFAULT; ++ if (override_architecture(name)) ++ return -EFAULT; ++ return 0; + } + + SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name) + { +- int error; ++ struct oldold_utsname tmp = {}; + + if (!name) + return -EFAULT; +- if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname))) +- return -EFAULT; + + down_read(&uts_sem); +- error = __copy_to_user(&name->sysname, &utsname()->sysname, +- __OLD_UTS_LEN); +- error |= __put_user(0, name->sysname + __OLD_UTS_LEN); +- error |= __copy_to_user(&name->nodename, &utsname()->nodename, +- __OLD_UTS_LEN); +- error |= __put_user(0, name->nodename + __OLD_UTS_LEN); +- error |= __copy_to_user(&name->release, &utsname()->release, +- __OLD_UTS_LEN); +- error |= __put_user(0, name->release + __OLD_UTS_LEN); +- error |= __copy_to_user(&name->version, &utsname()->version, +- __OLD_UTS_LEN); +- error |= __put_user(0, name->version + __OLD_UTS_LEN); +- error |= __copy_to_user(&name->machine, &utsname()->machine, +- __OLD_UTS_LEN); +- error |= __put_user(0, name->machine + __OLD_UTS_LEN); ++ memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN); ++ memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN); ++ memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN); ++ memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN); ++ memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN); + up_read(&uts_sem); ++ if (copy_to_user(name, &tmp, sizeof(tmp))) ++ return -EFAULT; + +- if (!error && override_architecture(name)) +- error = -EFAULT; +- if (!error && override_release(name->release, sizeof(name->release))) +- error = -EFAULT; +- return error ? -EFAULT : 0; ++ if (override_architecture(name)) ++ return -EFAULT; ++ if (override_release(name->release, sizeof(name->release))) ++ return -EFAULT; ++ return 0; + } + #endif + +@@ -1258,17 +1250,18 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) + + if (len < 0 || len > __NEW_UTS_LEN) + return -EINVAL; +- down_write(&uts_sem); + errno = -EFAULT; + if (!copy_from_user(tmp, name, len)) { +- struct new_utsname *u = utsname(); ++ struct new_utsname *u; + ++ down_write(&uts_sem); ++ u = utsname(); + memcpy(u->nodename, tmp, len); + memset(u->nodename + len, 0, sizeof(u->nodename) - len); + errno = 0; + uts_proc_notify(UTS_PROC_HOSTNAME); ++ up_write(&uts_sem); + } +- up_write(&uts_sem); + return errno; + } + +@@ -1276,8 +1269,9 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len) + + SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) + { +- int i, errno; ++ int i; + struct new_utsname *u; ++ char tmp[__NEW_UTS_LEN + 1]; + + if (len < 0) + return -EINVAL; +@@ -1286,11 +1280,11 @@ SYSCALL_DEFINE2(gethostname, char __user *, name, int, len) + i = 1 + strlen(u->nodename); + if (i > len) + i = len; +- errno = 0; +- if (copy_to_user(name, u->nodename, i)) +- errno = -EFAULT; ++ memcpy(tmp, u->nodename, i); + up_read(&uts_sem); +- return errno; ++ if (copy_to_user(name, tmp, i)) ++ return -EFAULT; ++ return 0; + } + + #endif +@@ -1309,17 +1303,18 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) + if (len < 0 || len > __NEW_UTS_LEN) + return -EINVAL; + +- down_write(&uts_sem); + errno = -EFAULT; + if (!copy_from_user(tmp, name, len)) { +- struct new_utsname *u = utsname(); ++ struct new_utsname *u; + ++ down_write(&uts_sem); ++ u = utsname(); + memcpy(u->domainname, tmp, len); + memset(u->domainname + len, 0, sizeof(u->domainname) - len); + errno = 0; + uts_proc_notify(UTS_PROC_DOMAINNAME); ++ up_write(&uts_sem); + } +- up_write(&uts_sem); + return errno; + } + +diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c +index e73dcab8e9f0..71a8ee6e60dc 100644 +--- a/kernel/trace/blktrace.c ++++ b/kernel/trace/blktrace.c +@@ -1809,6 +1809,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev, + mutex_lock(&q->blk_trace_mutex); + + if (attr == &dev_attr_enable) { ++ if (!!value == !!q->blk_trace) { ++ ret = 0; ++ goto out_unlock_bdev; ++ } + if (value) + ret = blk_trace_setup_queue(q, bdev); + else +diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c +index b7302c37c064..e9cbb96cd99e 100644 +--- a/kernel/trace/trace.c ++++ b/kernel/trace/trace.c +@@ -7545,7 +7545,9 @@ rb_simple_write(struct file *filp, const char __user *ubuf, + + if (buffer) { + mutex_lock(&trace_types_lock); +- if (val) { ++ if (!!val == tracer_tracing_is_on(tr)) { ++ val = 0; /* do nothing */ ++ } else if (val) { + tracer_tracing_on(tr); + if (tr->current_trace->start) + tr->current_trace->start(tr); +diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c +index 7197ff9f0bbd..ea0d90a31fc9 100644 +--- a/kernel/trace/trace_uprobe.c ++++ b/kernel/trace/trace_uprobe.c +@@ -967,7 +967,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file) + + list_del_rcu(&link->list); + /* synchronize with u{,ret}probe_trace_func */ +- synchronize_sched(); ++ synchronize_rcu(); + kfree(link); + + if (!list_empty(&tu->tp.files)) +diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c +index c490f1e4313b..ed80a88980f0 100644 +--- a/kernel/user_namespace.c ++++ b/kernel/user_namespace.c +@@ -650,7 +650,16 @@ static ssize_t map_write(struct file *file, const char __user *buf, + unsigned idx; + struct uid_gid_extent *extent = NULL; + char *kbuf = NULL, *pos, *next_line; +- ssize_t ret = -EINVAL; ++ ssize_t ret; ++ ++ /* Only allow < page size writes at the beginning of the file */ ++ if ((*ppos != 0) || (count >= PAGE_SIZE)) ++ return -EINVAL; ++ ++ /* Slurp in the user data */ ++ kbuf = memdup_user_nul(buf, count); ++ if (IS_ERR(kbuf)) ++ return PTR_ERR(kbuf); + + /* + * The userns_state_mutex serializes all writes to any given map. +@@ -684,19 +693,6 @@ static ssize_t map_write(struct file *file, const char __user *buf, + if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN)) + goto out; + +- /* Only allow < page size writes at the beginning of the file */ +- ret = -EINVAL; +- if ((*ppos != 0) || (count >= PAGE_SIZE)) +- goto out; +- +- /* Slurp in the user data */ +- kbuf = memdup_user_nul(buf, count); +- if (IS_ERR(kbuf)) { +- ret = PTR_ERR(kbuf); +- kbuf = NULL; +- goto out; +- } +- + /* Parse the user data */ + ret = -EINVAL; + pos = kbuf; +diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c +index 233cd8fc6910..258033d62cb3 100644 +--- a/kernel/utsname_sysctl.c ++++ b/kernel/utsname_sysctl.c +@@ -18,7 +18,7 @@ + + #ifdef CONFIG_PROC_SYSCTL + +-static void *get_uts(struct ctl_table *table, int write) ++static void *get_uts(struct ctl_table *table) + { + char *which = table->data; + struct uts_namespace *uts_ns; +@@ -26,21 +26,9 @@ static void *get_uts(struct ctl_table *table, int write) + uts_ns = current->nsproxy->uts_ns; + which = (which - (char *)&init_uts_ns) + (char *)uts_ns; + +- if (!write) +- down_read(&uts_sem); +- else +- down_write(&uts_sem); + return which; + } + +-static void put_uts(struct ctl_table *table, int write, void *which) +-{ +- if (!write) +- up_read(&uts_sem); +- else +- up_write(&uts_sem); +-} +- + /* + * Special case of dostring for the UTS structure. This has locks + * to observe. Should this be in kernel/sys.c ???? +@@ -50,13 +38,34 @@ static int proc_do_uts_string(struct ctl_table *table, int write, + { + struct ctl_table uts_table; + int r; ++ char tmp_data[__NEW_UTS_LEN + 1]; ++ + memcpy(&uts_table, table, sizeof(uts_table)); +- uts_table.data = get_uts(table, write); ++ uts_table.data = tmp_data; ++ ++ /* ++ * Buffer the value in tmp_data so that proc_dostring() can be called ++ * without holding any locks. ++ * We also need to read the original value in the write==1 case to ++ * support partial writes. ++ */ ++ down_read(&uts_sem); ++ memcpy(tmp_data, get_uts(table), sizeof(tmp_data)); ++ up_read(&uts_sem); + r = proc_dostring(&uts_table, write, buffer, lenp, ppos); +- put_uts(table, write, uts_table.data); + +- if (write) ++ if (write) { ++ /* ++ * Write back the new value. ++ * Note that, since we dropped uts_sem, the result can ++ * theoretically be incorrect if there are two parallel writes ++ * at non-zero offsets to the same sysctl. ++ */ ++ down_write(&uts_sem); ++ memcpy(get_uts(table), tmp_data, sizeof(tmp_data)); ++ up_write(&uts_sem); + proc_sys_poll_notify(table->poll); ++ } + + return r; + } +diff --git a/mm/memory.c b/mm/memory.c +index c9657f013a4d..93d5d324904b 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -392,15 +392,6 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) + { + struct mmu_table_batch **batch = &tlb->batch; + +- /* +- * When there's less then two users of this mm there cannot be a +- * concurrent page-table walk. +- */ +- if (atomic_read(&tlb->mm->mm_users) < 2) { +- __tlb_remove_table(table); +- return; +- } +- + if (*batch == NULL) { + *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN); + if (*batch == NULL) { +diff --git a/mm/readahead.c b/mm/readahead.c +index c4ca70239233..59aa0d06f254 100644 +--- a/mm/readahead.c ++++ b/mm/readahead.c +@@ -380,6 +380,7 @@ ondemand_readahead(struct address_space *mapping, + { + struct backing_dev_info *bdi = inode_to_bdi(mapping->host); + unsigned long max_pages = ra->ra_pages; ++ unsigned long add_pages; + pgoff_t prev_offset; + + /* +@@ -469,10 +470,17 @@ readit: + * Will this read hit the readahead marker made by itself? + * If so, trigger the readahead marker hit now, and merge + * the resulted next readahead window into the current one. ++ * Take care of maximum IO pages as above. + */ + if (offset == ra->start && ra->size == ra->async_size) { +- ra->async_size = get_next_ra_size(ra, max_pages); +- ra->size += ra->async_size; ++ add_pages = get_next_ra_size(ra, max_pages); ++ if (ra->size + add_pages <= max_pages) { ++ ra->async_size = add_pages; ++ ra->size += add_pages; ++ } else { ++ ra->size = max_pages; ++ ra->async_size = max_pages >> 1; ++ } + } + + return ra_submit(ra, mapping, filp); +diff --git a/net/9p/client.c b/net/9p/client.c +index b433aff5ff13..3ec5a82929b2 100644 +--- a/net/9p/client.c ++++ b/net/9p/client.c +@@ -955,7 +955,7 @@ static int p9_client_version(struct p9_client *c) + { + int err = 0; + struct p9_req_t *req; +- char *version; ++ char *version = NULL; + int msize; + + p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n", +diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c +index 985046ae4231..38e21a1e97bc 100644 +--- a/net/9p/trans_fd.c ++++ b/net/9p/trans_fd.c +@@ -185,6 +185,8 @@ static void p9_mux_poll_stop(struct p9_conn *m) + spin_lock_irqsave(&p9_poll_lock, flags); + list_del_init(&m->poll_pending_link); + spin_unlock_irqrestore(&p9_poll_lock, flags); ++ ++ flush_work(&p9_poll_work); + } + + /** +@@ -951,7 +953,7 @@ p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) + if (err < 0) + return err; + +- if (valid_ipaddr4(addr) < 0) ++ if (addr == NULL || valid_ipaddr4(addr) < 0) + return -EINVAL; + + csocket = NULL; +@@ -1001,6 +1003,9 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) + + csocket = NULL; + ++ if (addr == NULL) ++ return -EINVAL; ++ + if (strlen(addr) >= UNIX_PATH_MAX) { + pr_err("%s (%d): address too long: %s\n", + __func__, task_pid_nr(current), addr); +diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c +index 6d8e3031978f..f58467a49090 100644 +--- a/net/9p/trans_rdma.c ++++ b/net/9p/trans_rdma.c +@@ -646,6 +646,9 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args) + struct rdma_conn_param conn_param; + struct ib_qp_init_attr qp_attr; + ++ if (addr == NULL) ++ return -EINVAL; ++ + /* Parse the transport specific mount options */ + err = parse_opts(args, &opts); + if (err < 0) +diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c +index 3aa5a93ad107..da0d3b257459 100644 +--- a/net/9p/trans_virtio.c ++++ b/net/9p/trans_virtio.c +@@ -189,7 +189,7 @@ static int pack_sg_list(struct scatterlist *sg, int start, + s = rest_of_page(data); + if (s > count) + s = count; +- BUG_ON(index > limit); ++ BUG_ON(index >= limit); + /* Make sure we don't terminate early. */ + sg_unmark_end(&sg[index]); + sg_set_buf(&sg[index++], data, s); +@@ -234,6 +234,7 @@ pack_sg_list_p(struct scatterlist *sg, int start, int limit, + s = PAGE_SIZE - data_off; + if (s > count) + s = count; ++ BUG_ON(index >= limit); + /* Make sure we don't terminate early. */ + sg_unmark_end(&sg[index]); + sg_set_page(&sg[index++], pdata[i++], s, data_off); +@@ -406,6 +407,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, + p9_debug(P9_DEBUG_TRANS, "virtio request\n"); + + if (uodata) { ++ __le32 sz; + int n = p9_get_mapped_pages(chan, &out_pages, uodata, + outlen, &offs, &need_drop); + if (n < 0) +@@ -416,6 +418,12 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, + memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4); + outlen = n; + } ++ /* The size field of the message must include the length of the ++ * header and the length of the data. We didn't actually know ++ * the length of the data until this point so add it in now. ++ */ ++ sz = cpu_to_le32(req->tc->size + outlen); ++ memcpy(&req->tc->sdata[0], &sz, sizeof(sz)); + } else if (uidata) { + int n = p9_get_mapped_pages(chan, &in_pages, uidata, + inlen, &offs, &need_drop); +@@ -643,6 +651,9 @@ p9_virtio_create(struct p9_client *client, const char *devname, char *args) + int ret = -ENOENT; + int found = 0; + ++ if (devname == NULL) ++ return -EINVAL; ++ + mutex_lock(&virtio_9p_lock); + list_for_each_entry(chan, &virtio_chan_list, chan_list) { + if (!strncmp(devname, chan->tag, chan->tag_len) && +diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c +index 325c56043007..c10bdf63eae7 100644 +--- a/net/9p/trans_xen.c ++++ b/net/9p/trans_xen.c +@@ -95,6 +95,9 @@ static int p9_xen_create(struct p9_client *client, const char *addr, char *args) + { + struct xen_9pfs_front_priv *priv; + ++ if (addr == NULL) ++ return -EINVAL; ++ + read_lock(&xen_9pfs_lock); + list_for_each_entry(priv, &xen_9pfs_devs, list) { + if (!strcmp(priv->tag, addr)) { +diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c +index e6ff5128e61a..ca53efa17be1 100644 +--- a/net/ieee802154/6lowpan/tx.c ++++ b/net/ieee802154/6lowpan/tx.c +@@ -265,9 +265,24 @@ netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) + /* We must take a copy of the skb before we modify/replace the ipv6 + * header as the header could be used elsewhere + */ +- skb = skb_unshare(skb, GFP_ATOMIC); +- if (!skb) +- return NET_XMIT_DROP; ++ if (unlikely(skb_headroom(skb) < ldev->needed_headroom || ++ skb_tailroom(skb) < ldev->needed_tailroom)) { ++ struct sk_buff *nskb; ++ ++ nskb = skb_copy_expand(skb, ldev->needed_headroom, ++ ldev->needed_tailroom, GFP_ATOMIC); ++ if (likely(nskb)) { ++ consume_skb(skb); ++ skb = nskb; ++ } else { ++ kfree_skb(skb); ++ return NET_XMIT_DROP; ++ } ++ } else { ++ skb = skb_unshare(skb, GFP_ATOMIC); ++ if (!skb) ++ return NET_XMIT_DROP; ++ } + + ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset); + if (ret < 0) { +diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c +index 7e253455f9dd..bcd1a5e6ebf4 100644 +--- a/net/mac802154/tx.c ++++ b/net/mac802154/tx.c +@@ -63,8 +63,21 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) + int ret; + + if (!(local->hw.flags & IEEE802154_HW_TX_OMIT_CKSUM)) { +- u16 crc = crc_ccitt(0, skb->data, skb->len); ++ struct sk_buff *nskb; ++ u16 crc; ++ ++ if (unlikely(skb_tailroom(skb) < IEEE802154_FCS_LEN)) { ++ nskb = skb_copy_expand(skb, 0, IEEE802154_FCS_LEN, ++ GFP_ATOMIC); ++ if (likely(nskb)) { ++ consume_skb(skb); ++ skb = nskb; ++ } else { ++ goto err_tx; ++ } ++ } + ++ crc = crc_ccitt(0, skb->data, skb->len); + put_unaligned_le16(crc, skb_put(skb, 2)); + } + +diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c +index 2ad827db2704..6d118357d9dc 100644 +--- a/net/sunrpc/clnt.c ++++ b/net/sunrpc/clnt.c +@@ -965,10 +965,20 @@ out: + } + EXPORT_SYMBOL_GPL(rpc_bind_new_program); + ++void rpc_task_release_transport(struct rpc_task *task) ++{ ++ struct rpc_xprt *xprt = task->tk_xprt; ++ ++ if (xprt) { ++ task->tk_xprt = NULL; ++ xprt_put(xprt); ++ } ++} ++EXPORT_SYMBOL_GPL(rpc_task_release_transport); ++ + void rpc_task_release_client(struct rpc_task *task) + { + struct rpc_clnt *clnt = task->tk_client; +- struct rpc_xprt *xprt = task->tk_xprt; + + if (clnt != NULL) { + /* Remove from client task list */ +@@ -979,12 +989,14 @@ void rpc_task_release_client(struct rpc_task *task) + + rpc_release_client(clnt); + } ++ rpc_task_release_transport(task); ++} + +- if (xprt != NULL) { +- task->tk_xprt = NULL; +- +- xprt_put(xprt); +- } ++static ++void rpc_task_set_transport(struct rpc_task *task, struct rpc_clnt *clnt) ++{ ++ if (!task->tk_xprt) ++ task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi); + } + + static +@@ -992,8 +1004,7 @@ void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) + { + + if (clnt != NULL) { +- if (task->tk_xprt == NULL) +- task->tk_xprt = xprt_iter_get_next(&clnt->cl_xpi); ++ rpc_task_set_transport(task, clnt); + task->tk_client = clnt; + atomic_inc(&clnt->cl_count); + if (clnt->cl_softrtry) +@@ -1529,6 +1540,7 @@ call_start(struct rpc_task *task) + clnt->cl_program->version[clnt->cl_vers]->counts[idx]++; + clnt->cl_stats->rpccnt++; + task->tk_action = call_reserve; ++ rpc_task_set_transport(task, clnt); + } + + /* +diff --git a/security/commoncap.c b/security/commoncap.c +index 1c1f64582bb5..ae26ef006988 100644 +--- a/security/commoncap.c ++++ b/security/commoncap.c +@@ -388,7 +388,7 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer, + if (strcmp(name, "capability") != 0) + return -EOPNOTSUPP; + +- dentry = d_find_alias(inode); ++ dentry = d_find_any_alias(inode); + if (!dentry) + return -EINVAL; + +diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c +index 5547457566a7..bbb9823e93b9 100644 +--- a/tools/perf/util/auxtrace.c ++++ b/tools/perf/util/auxtrace.c +@@ -197,6 +197,9 @@ static int auxtrace_queues__grow(struct auxtrace_queues *queues, + for (i = 0; i < queues->nr_queues; i++) { + list_splice_tail(&queues->queue_array[i].head, + &queue_array[i].head); ++ queue_array[i].tid = queues->queue_array[i].tid; ++ queue_array[i].cpu = queues->queue_array[i].cpu; ++ queue_array[i].set = queues->queue_array[i].set; + queue_array[i].priv = queues->queue_array[i].priv; + } + |