diff options
author | Mike Pagano <mpagano@gentoo.org> | 2016-05-11 20:14:21 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2016-05-11 20:14:21 -0400 |
commit | f12a24cc0065e56c3476ffdafab6ff0dcd99419d (patch) | |
tree | 8514bc765dcdf233753035ee3439a32a4b6eb61b | |
parent | Linux patch 4.4.9 (diff) | |
download | linux-patches-f12a24cc0065e56c3476ffdafab6ff0dcd99419d.tar.gz linux-patches-f12a24cc0065e56c3476ffdafab6ff0dcd99419d.tar.bz2 linux-patches-f12a24cc0065e56c3476ffdafab6ff0dcd99419d.zip |
Linux patch 4.4.104.4-12
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1009_linux-4.4.10.patch | 1780 |
2 files changed, 1784 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 18110faf..06b25654 100644 --- a/0000_README +++ b/0000_README @@ -79,6 +79,10 @@ Patch: 1008_linux-4.4.9.patch From: http://www.kernel.org Desc: Linux 4.4.9 +Patch: 1009_linux-4.4.10.patch +From: http://www.kernel.org +Desc: Linux 4.4.10 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1009_linux-4.4.10.patch b/1009_linux-4.4.10.patch new file mode 100644 index 00000000..1462b54e --- /dev/null +++ b/1009_linux-4.4.10.patch @@ -0,0 +1,1780 @@ +diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt +index c2340eeeb97f..c000832a7fb9 100644 +--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt ++++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt +@@ -30,6 +30,10 @@ Optional properties: + - target-supply : regulator for SATA target power + - phys : reference to the SATA PHY node + - phy-names : must be "sata-phy" ++- ports-implemented : Mask that indicates which ports that the HBA supports ++ are available for software to use. Useful if PORTS_IMPL ++ is not programmed by the BIOS, which is true with ++ some embedded SOC's. + + Required properties when using sub-nodes: + - #address-cells : number of cells to encode an address +diff --git a/MAINTAINERS b/MAINTAINERS +index 4c3e1d2ac31b..ab65bbecb159 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -4097,8 +4097,8 @@ F: Documentation/efi-stub.txt + F: arch/ia64/kernel/efi.c + F: arch/x86/boot/compressed/eboot.[ch] + F: arch/x86/include/asm/efi.h +-F: arch/x86/platform/efi/* +-F: drivers/firmware/efi/* ++F: arch/x86/platform/efi/ ++F: drivers/firmware/efi/ + F: include/linux/efi*.h + + EFI VARIABLE FILESYSTEM +diff --git a/Makefile b/Makefile +index 0722cdf52152..5b5f462f834c 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 4 +-SUBLEVEL = 9 ++SUBLEVEL = 10 + EXTRAVERSION = + NAME = Blurry Fish Butt + +diff --git a/arch/arc/include/asm/io.h b/arch/arc/include/asm/io.h +index 27b17adea50d..cb69299a492e 100644 +--- a/arch/arc/include/asm/io.h ++++ b/arch/arc/include/asm/io.h +@@ -13,6 +13,15 @@ + #include <asm/byteorder.h> + #include <asm/page.h> + ++#ifdef CONFIG_ISA_ARCV2 ++#include <asm/barrier.h> ++#define __iormb() rmb() ++#define __iowmb() wmb() ++#else ++#define __iormb() do { } while (0) ++#define __iowmb() do { } while (0) ++#endif ++ + extern void __iomem *ioremap(unsigned long physaddr, unsigned long size); + extern void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, + unsigned long flags); +@@ -22,6 +31,15 @@ extern void iounmap(const void __iomem *addr); + #define ioremap_wc(phy, sz) ioremap(phy, sz) + #define ioremap_wt(phy, sz) ioremap(phy, sz) + ++/* ++ * io{read,write}{16,32}be() macros ++ */ ++#define ioread16be(p) ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) ++#define ioread32be(p) ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) ++ ++#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); }) ++#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); }) ++ + /* Change struct page to physical address */ + #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) + +@@ -99,15 +117,6 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr) + + } + +-#ifdef CONFIG_ISA_ARCV2 +-#include <asm/barrier.h> +-#define __iormb() rmb() +-#define __iowmb() wmb() +-#else +-#define __iormb() do { } while (0) +-#define __iowmb() do { } while (0) +-#endif +- + /* + * MMIO can also get buffered/optimized in micro-arch, so barriers needed + * Based on ARM model for the typical use case +diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c +index 47905a50e075..318394ed5c7a 100644 +--- a/arch/arm/mach-cns3xxx/pcie.c ++++ b/arch/arm/mach-cns3xxx/pcie.c +@@ -220,13 +220,13 @@ static void cns3xxx_write_config(struct cns3xxx_pcie *cnspci, + u32 mask = (0x1ull << (size * 8)) - 1; + int shift = (where % 4) * 8; + +- v = readl_relaxed(base + (where & 0xffc)); ++ v = readl_relaxed(base); + + v &= ~(mask << shift); + v |= (val & mask) << shift; + +- writel_relaxed(v, base + (where & 0xffc)); +- readl_relaxed(base + (where & 0xffc)); ++ writel_relaxed(v, base); ++ readl_relaxed(base); + } + + static void __init cns3xxx_pcie_hw_init(struct cns3xxx_pcie *cnspci) +diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c +index 7c21760f590f..875a2bab64f6 100644 +--- a/arch/arm/mach-exynos/pm_domains.c ++++ b/arch/arm/mach-exynos/pm_domains.c +@@ -92,7 +92,7 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on) + if (IS_ERR(pd->clk[i])) + break; + +- if (IS_ERR(pd->clk[i])) ++ if (IS_ERR(pd->pclk[i])) + continue; /* Skip on first power up */ + if (clk_set_parent(pd->clk[i], pd->pclk[i])) + pr_err("%s: error setting parent to clock%d\n", +diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S +index 5d94b7a2fb10..c160fa3007e9 100644 +--- a/arch/arm/mach-socfpga/headsmp.S ++++ b/arch/arm/mach-socfpga/headsmp.S +@@ -13,6 +13,7 @@ + #include <asm/assembler.h> + + .arch armv7-a ++ .arm + + ENTRY(secondary_trampoline) + /* CPU1 will always fetch from 0x0 when it is brought out of reset. +diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h +index e4396a7d0f7c..4afe66aa1400 100644 +--- a/arch/powerpc/include/asm/word-at-a-time.h ++++ b/arch/powerpc/include/asm/word-at-a-time.h +@@ -82,7 +82,7 @@ static inline unsigned long create_zero_mask(unsigned long bits) + "andc %1,%1,%2\n\t" + "popcntd %0,%1" + : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask) +- : "r" (bits)); ++ : "b" (bits)); + + return leading_zero_bits; + } +diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c +index ef2ad2d682da..646bf4d222c1 100644 +--- a/arch/powerpc/kernel/process.c ++++ b/arch/powerpc/kernel/process.c +@@ -569,24 +569,6 @@ static void tm_reclaim_thread(struct thread_struct *thr, + if (!MSR_TM_SUSPENDED(mfmsr())) + return; + +- /* +- * Use the current MSR TM suspended bit to track if we have +- * checkpointed state outstanding. +- * On signal delivery, we'd normally reclaim the checkpointed +- * state to obtain stack pointer (see:get_tm_stackpointer()). +- * This will then directly return to userspace without going +- * through __switch_to(). However, if the stack frame is bad, +- * we need to exit this thread which calls __switch_to() which +- * will again attempt to reclaim the already saved tm state. +- * Hence we need to check that we've not already reclaimed +- * this state. +- * We do this using the current MSR, rather tracking it in +- * some specific thread_struct bit, as it has the additional +- * benifit of checking for a potential TM bad thing exception. +- */ +- if (!MSR_TM_SUSPENDED(mfmsr())) +- return; +- + tm_reclaim(thr, thr->regs->msr, cause); + + /* Having done the reclaim, we now have the checkpointed +diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c +index 2c5aaf8c2e2f..05538582a809 100644 +--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c ++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c +@@ -385,6 +385,9 @@ static void intel_thermal_interrupt(void) + { + __u64 msr_val; + ++ if (static_cpu_has(X86_FEATURE_HWP)) ++ wrmsrl_safe(MSR_HWP_STATUS, 0); ++ + rdmsrl(MSR_IA32_THERM_STATUS, msr_val); + + /* Check for violation of core thermal thresholds*/ +diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c +index b285d4e8c68e..5da924bbf0a0 100644 +--- a/arch/x86/kernel/sysfb_efi.c ++++ b/arch/x86/kernel/sysfb_efi.c +@@ -106,14 +106,24 @@ static int __init efifb_set_system(const struct dmi_system_id *id) + continue; + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + resource_size_t start, end; ++ unsigned long flags; ++ ++ flags = pci_resource_flags(dev, i); ++ if (!(flags & IORESOURCE_MEM)) ++ continue; ++ ++ if (flags & IORESOURCE_UNSET) ++ continue; ++ ++ if (pci_resource_len(dev, i) == 0) ++ continue; + + start = pci_resource_start(dev, i); +- if (start == 0) +- break; + end = pci_resource_end(dev, i); + if (screen_info.lfb_base >= start && + screen_info.lfb_base < end) { + found_bar = 1; ++ break; + } + } + } +diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c +index 92ae6acac8a7..6aa0f4d9eea6 100644 +--- a/arch/x86/kernel/tsc_msr.c ++++ b/arch/x86/kernel/tsc_msr.c +@@ -92,7 +92,7 @@ unsigned long try_msr_calibrate_tsc(void) + + if (freq_desc_tables[cpu_index].msr_plat) { + rdmsr(MSR_PLATFORM_INFO, lo, hi); +- ratio = (lo >> 8) & 0x1f; ++ ratio = (lo >> 8) & 0xff; + } else { + rdmsr(MSR_IA32_PERF_STATUS, lo, hi); + ratio = (hi >> 8) & 0x1f; +diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c +index 6979186dbd4b..9f77943653fb 100644 +--- a/drivers/acpi/acpi_processor.c ++++ b/drivers/acpi/acpi_processor.c +@@ -491,6 +491,58 @@ static void acpi_processor_remove(struct acpi_device *device) + } + #endif /* CONFIG_ACPI_HOTPLUG_CPU */ + ++#ifdef CONFIG_X86 ++static bool acpi_hwp_native_thermal_lvt_set; ++static acpi_status __init acpi_hwp_native_thermal_lvt_osc(acpi_handle handle, ++ u32 lvl, ++ void *context, ++ void **rv) ++{ ++ u8 sb_uuid_str[] = "4077A616-290C-47BE-9EBD-D87058713953"; ++ u32 capbuf[2]; ++ struct acpi_osc_context osc_context = { ++ .uuid_str = sb_uuid_str, ++ .rev = 1, ++ .cap.length = 8, ++ .cap.pointer = capbuf, ++ }; ++ ++ if (acpi_hwp_native_thermal_lvt_set) ++ return AE_CTRL_TERMINATE; ++ ++ capbuf[0] = 0x0000; ++ capbuf[1] = 0x1000; /* set bit 12 */ ++ ++ if (ACPI_SUCCESS(acpi_run_osc(handle, &osc_context))) { ++ if (osc_context.ret.pointer && osc_context.ret.length > 1) { ++ u32 *capbuf_ret = osc_context.ret.pointer; ++ ++ if (capbuf_ret[1] & 0x1000) { ++ acpi_handle_info(handle, ++ "_OSC native thermal LVT Acked\n"); ++ acpi_hwp_native_thermal_lvt_set = true; ++ } ++ } ++ kfree(osc_context.ret.pointer); ++ } ++ ++ return AE_OK; ++} ++ ++void __init acpi_early_processor_osc(void) ++{ ++ if (boot_cpu_has(X86_FEATURE_HWP)) { ++ acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, ++ ACPI_UINT32_MAX, ++ acpi_hwp_native_thermal_lvt_osc, ++ NULL, NULL, NULL); ++ acpi_get_devices(ACPI_PROCESSOR_DEVICE_HID, ++ acpi_hwp_native_thermal_lvt_osc, ++ NULL, NULL); ++ } ++} ++#endif ++ + /* + * The following ACPI IDs are known to be suitable for representing as + * processor devices. +diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c +index bc32f3194afe..28c50c6b5f45 100644 +--- a/drivers/acpi/acpica/dsmethod.c ++++ b/drivers/acpi/acpica/dsmethod.c +@@ -417,6 +417,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, + obj_desc->method.mutex->mutex. + original_sync_level = + obj_desc->method.mutex->mutex.sync_level; ++ ++ obj_desc->method.mutex->mutex.thread_id = ++ acpi_os_get_thread_id(); + } + } + +diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c +index a212cefae524..ca4f28432d87 100644 +--- a/drivers/acpi/bus.c ++++ b/drivers/acpi/bus.c +@@ -1004,6 +1004,9 @@ static int __init acpi_bus_init(void) + goto error1; + } + ++ /* Set capability bits for _OSC under processor scope */ ++ acpi_early_processor_osc(); ++ + /* + * _OSC method may exist in module level code, + * so it must be run after ACPI_FULL_INITIALIZATION +diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h +index 11d87bf67e73..0f3f41c13b38 100644 +--- a/drivers/acpi/internal.h ++++ b/drivers/acpi/internal.h +@@ -130,6 +130,12 @@ void acpi_early_processor_set_pdc(void); + static inline void acpi_early_processor_set_pdc(void) {} + #endif + ++#ifdef CONFIG_X86 ++void acpi_early_processor_osc(void); ++#else ++static inline void acpi_early_processor_osc(void) {} ++#endif ++ + /* -------------------------------------------------------------------------- + Embedded Controller + -------------------------------------------------------------------------- */ +diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c +index 04975b851c23..639adb1f8abd 100644 +--- a/drivers/ata/ahci_platform.c ++++ b/drivers/ata/ahci_platform.c +@@ -51,6 +51,9 @@ static int ahci_probe(struct platform_device *pdev) + if (rc) + return rc; + ++ of_property_read_u32(dev->of_node, ++ "ports-implemented", &hpriv->force_port_map); ++ + if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci")) + hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ; + +diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c +index e2c6d9e0c5ac..e916bff6cee8 100644 +--- a/drivers/ata/ahci_xgene.c ++++ b/drivers/ata/ahci_xgene.c +@@ -739,9 +739,9 @@ static int xgene_ahci_probe(struct platform_device *pdev) + dev_warn(&pdev->dev, "%s: Error reading device info. Assume version1\n", + __func__); + version = XGENE_AHCI_V1; +- } +- if (info->valid & ACPI_VALID_CID) ++ } else if (info->valid & ACPI_VALID_CID) { + version = XGENE_AHCI_V2; ++ } + } + } + #endif +diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c +index 998c6a85ad89..9628fa131757 100644 +--- a/drivers/ata/libahci.c ++++ b/drivers/ata/libahci.c +@@ -467,6 +467,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv) + dev_info(dev, "forcing port_map 0x%x -> 0x%x\n", + port_map, hpriv->force_port_map); + port_map = hpriv->force_port_map; ++ hpriv->saved_port_map = port_map; + } + + if (hpriv->mask_port_map) { +diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c +index 93b3f99b6865..8f1ce6d57a08 100644 +--- a/drivers/block/nbd.c ++++ b/drivers/block/nbd.c +@@ -618,8 +618,8 @@ static void nbd_request_handler(struct request_queue *q) + req, req->cmd_type); + + if (unlikely(!nbd->sock)) { +- dev_err(disk_to_dev(nbd->disk), +- "Attempted send on closed socket\n"); ++ dev_err_ratelimited(disk_to_dev(nbd->disk), ++ "Attempted send on closed socket\n"); + req->errors++; + nbd_end_request(nbd, req); + spin_lock_irq(q->queue_lock); +diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c +index 3ace102a2a0a..bbf206e3da0d 100644 +--- a/drivers/clk/clk-divider.c ++++ b/drivers/clk/clk-divider.c +@@ -422,6 +422,12 @@ const struct clk_ops clk_divider_ops = { + }; + EXPORT_SYMBOL_GPL(clk_divider_ops); + ++const struct clk_ops clk_divider_ro_ops = { ++ .recalc_rate = clk_divider_recalc_rate, ++ .round_rate = clk_divider_round_rate, ++}; ++EXPORT_SYMBOL_GPL(clk_divider_ro_ops); ++ + static struct clk *_register_divider(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 shift, u8 width, +@@ -445,7 +451,10 @@ static struct clk *_register_divider(struct device *dev, const char *name, + return ERR_PTR(-ENOMEM); + + init.name = name; +- init.ops = &clk_divider_ops; ++ if (clk_divider_flags & CLK_DIVIDER_READ_ONLY) ++ init.ops = &clk_divider_ro_ops; ++ else ++ init.ops = &clk_divider_ops; + init.flags = flags | CLK_IS_BASIC; + init.parent_names = (parent_name ? &parent_name: NULL); + init.num_parents = (parent_name ? 1 : 0); +diff --git a/drivers/clk/meson/clkc.c b/drivers/clk/meson/clkc.c +index c83ae1367abc..d920d410b51d 100644 +--- a/drivers/clk/meson/clkc.c ++++ b/drivers/clk/meson/clkc.c +@@ -198,7 +198,7 @@ meson_clk_register_fixed_rate(const struct clk_conf *clk_conf, + } + + void __init meson_clk_register_clks(const struct clk_conf *clk_confs, +- size_t nr_confs, ++ unsigned int nr_confs, + void __iomem *clk_base) + { + unsigned int i; +diff --git a/drivers/clk/nxp/clk-lpc18xx-ccu.c b/drivers/clk/nxp/clk-lpc18xx-ccu.c +index 13aabbb3acbe..558da89555af 100644 +--- a/drivers/clk/nxp/clk-lpc18xx-ccu.c ++++ b/drivers/clk/nxp/clk-lpc18xx-ccu.c +@@ -222,7 +222,7 @@ static void lpc18xx_ccu_register_branch_gate_div(struct lpc18xx_clk_branch *bran + div->width = 1; + + div_hw = &div->hw; +- div_ops = &clk_divider_ops; ++ div_ops = &clk_divider_ro_ops; + } + + branch->gate.reg = branch->offset + reg_base; +diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c +index 66c18bc97857..bdc4b2d07a23 100644 +--- a/drivers/clk/qcom/gcc-msm8960.c ++++ b/drivers/clk/qcom/gcc-msm8960.c +@@ -2753,7 +2753,7 @@ static struct clk_rcg ce3_src = { + }, + .freq_tbl = clk_tbl_ce3, + .clkr = { +- .enable_reg = 0x2c08, ++ .enable_reg = 0x36c0, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "ce3_src", +@@ -2769,7 +2769,7 @@ static struct clk_branch ce3_core_clk = { + .halt_reg = 0x2fdc, + .halt_bit = 5, + .clkr = { +- .enable_reg = 0x36c4, ++ .enable_reg = 0x36cc, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "ce3_core_clk", +diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c +index be6c7fd8315d..9b6c8188efac 100644 +--- a/drivers/clk/rockchip/clk.c ++++ b/drivers/clk/rockchip/clk.c +@@ -70,7 +70,7 @@ static struct clk *rockchip_clk_register_branch(const char *name, + if (gate_offset >= 0) { + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) +- return ERR_PTR(-ENOMEM); ++ goto err_gate; + + gate->flags = gate_flags; + gate->reg = base + gate_offset; +@@ -82,7 +82,7 @@ static struct clk *rockchip_clk_register_branch(const char *name, + if (div_width > 0) { + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) +- return ERR_PTR(-ENOMEM); ++ goto err_div; + + div->flags = div_flags; + div->reg = base + muxdiv_offset; +@@ -90,7 +90,9 @@ static struct clk *rockchip_clk_register_branch(const char *name, + div->width = div_width; + div->lock = lock; + div->table = div_table; +- div_ops = &clk_divider_ops; ++ div_ops = (div_flags & CLK_DIVIDER_READ_ONLY) ++ ? &clk_divider_ro_ops ++ : &clk_divider_ops; + } + + clk = clk_register_composite(NULL, name, parent_names, num_parents, +@@ -100,6 +102,11 @@ static struct clk *rockchip_clk_register_branch(const char *name, + flags); + + return clk; ++err_div: ++ kfree(gate); ++err_gate: ++ kfree(mux); ++ return ERR_PTR(-ENOMEM); + } + + static struct clk *rockchip_clk_register_frac_branch(const char *name, +diff --git a/drivers/clk/versatile/clk-sp810.c b/drivers/clk/versatile/clk-sp810.c +index a1cdef6b0f90..897c36c1754a 100644 +--- a/drivers/clk/versatile/clk-sp810.c ++++ b/drivers/clk/versatile/clk-sp810.c +@@ -92,6 +92,7 @@ static void __init clk_sp810_of_setup(struct device_node *node) + int num = ARRAY_SIZE(parent_names); + char name[12]; + struct clk_init_data init; ++ static int instance; + int i; + bool deprecated; + +@@ -118,7 +119,7 @@ static void __init clk_sp810_of_setup(struct device_node *node) + deprecated = !of_find_property(node, "assigned-clock-parents", NULL); + + for (i = 0; i < ARRAY_SIZE(sp810->timerclken); i++) { +- snprintf(name, ARRAY_SIZE(name), "timerclken%d", i); ++ snprintf(name, sizeof(name), "sp810_%d_%d", instance, i); + + sp810->timerclken[i].sp810 = sp810; + sp810->timerclken[i].channel = i; +@@ -139,5 +140,6 @@ static void __init clk_sp810_of_setup(struct device_node *node) + } + + of_clk_add_provider(node, clk_sp810_timerclken_of_get, sp810); ++ instance++; + } + CLK_OF_DECLARE(sp810, "arm,sp810", clk_sp810_of_setup); +diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c +index 545069d5fdfb..e342565e8715 100644 +--- a/drivers/cpuidle/cpuidle-arm.c ++++ b/drivers/cpuidle/cpuidle-arm.c +@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev, + * call the CPU ops suspend protocol with idle index as a + * parameter. + */ +- arm_cpuidle_suspend(idx); ++ ret = arm_cpuidle_suspend(idx); + + cpu_pm_exit(); + } +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +index b8fbbd7699e4..73628c7599e7 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +@@ -540,6 +540,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, + if (!metadata_size) { + if (bo->metadata_size) { + kfree(bo->metadata); ++ bo->metadata = NULL; + bo->metadata_size = 0; + } + return 0; +diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +index 1e0bba29e167..1cd6de575305 100644 +--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c ++++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +@@ -298,6 +298,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder, + && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) + adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; + ++ /* vertical FP must be at least 1 */ ++ if (mode->crtc_vsync_start == mode->crtc_vdisplay) ++ adjusted_mode->crtc_vsync_start++; ++ + /* get the native mode for scaling */ + if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) + amdgpu_panel_mode_fixup(encoder, adjusted_mode); +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h +index bc7b8faba84d..7e461dca564c 100644 +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -2838,7 +2838,14 @@ enum skl_disp_power_wells { + #define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998) + #define BXT_RP_STATE_CAP 0x138170 + +-#define INTERVAL_1_28_US(us) (((us) * 100) >> 7) ++/* ++ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS ++ * 8300) freezing up around GPU hangs. Looks as if even ++ * scheduling/timer interrupts start misbehaving if the RPS ++ * EI/thresholds are "bad", leading to a very sluggish or even ++ * frozen machine. ++ */ ++#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25) + #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) + #define INTERVAL_0_833_US(us) (((us) * 6) / 5) + #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ +diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c +index 9e530a739354..fc28c512ece3 100644 +--- a/drivers/gpu/drm/i915/intel_csr.c ++++ b/drivers/gpu/drm/i915/intel_csr.c +@@ -180,7 +180,8 @@ struct stepping_info { + static const struct stepping_info skl_stepping_info[] = { + {'A', '0'}, {'B', '0'}, {'C', '0'}, + {'D', '0'}, {'E', '0'}, {'F', '0'}, +- {'G', '0'}, {'H', '0'}, {'I', '0'} ++ {'G', '0'}, {'H', '0'}, {'I', '0'}, ++ {'J', '0'}, {'K', '0'} + }; + + static struct stepping_info bxt_stepping_info[] = { +diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c +index 7e6158b889da..3c6b07683bd9 100644 +--- a/drivers/gpu/drm/i915/intel_ddi.c ++++ b/drivers/gpu/drm/i915/intel_ddi.c +@@ -464,9 +464,17 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, + } else if (IS_BROADWELL(dev)) { + ddi_translations_fdi = bdw_ddi_translations_fdi; + ddi_translations_dp = bdw_ddi_translations_dp; +- ddi_translations_edp = bdw_ddi_translations_edp; ++ ++ if (dev_priv->edp_low_vswing) { ++ ddi_translations_edp = bdw_ddi_translations_edp; ++ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); ++ } else { ++ ddi_translations_edp = bdw_ddi_translations_dp; ++ n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); ++ } ++ + ddi_translations_hdmi = bdw_ddi_translations_hdmi; +- n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); ++ + n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); + n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); + hdmi_default_entry = 7; +@@ -3188,12 +3196,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder, + intel_ddi_clock_get(encoder, pipe_config); + } + +-static void intel_ddi_destroy(struct drm_encoder *encoder) +-{ +- /* HDMI has nothing special to destroy, so we can go with this. */ +- intel_dp_encoder_destroy(encoder); +-} +- + static bool intel_ddi_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *pipe_config) + { +@@ -3212,7 +3214,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder, + } + + static const struct drm_encoder_funcs intel_ddi_funcs = { +- .destroy = intel_ddi_destroy, ++ .reset = intel_dp_encoder_reset, ++ .destroy = intel_dp_encoder_destroy, + }; + + static struct intel_connector * +@@ -3284,6 +3287,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port) + intel_encoder->post_disable = intel_ddi_post_disable; + intel_encoder->get_hw_state = intel_ddi_get_hw_state; + intel_encoder->get_config = intel_ddi_get_config; ++ intel_encoder->suspend = intel_dp_encoder_suspend; + + intel_dig_port->port = port; + intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & +diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c +index 78b8ec84d576..e55a82a99e7f 100644 +--- a/drivers/gpu/drm/i915/intel_dp.c ++++ b/drivers/gpu/drm/i915/intel_dp.c +@@ -5035,7 +5035,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder) + kfree(intel_dig_port); + } + +-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) ++void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) + { + struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + +@@ -5077,7 +5077,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) + edp_panel_vdd_schedule_off(intel_dp); + } + +-static void intel_dp_encoder_reset(struct drm_encoder *encoder) ++void intel_dp_encoder_reset(struct drm_encoder *encoder) + { + struct intel_dp *intel_dp; + +diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h +index 0d00f07b7163..f34a219ec5c4 100644 +--- a/drivers/gpu/drm/i915/intel_drv.h ++++ b/drivers/gpu/drm/i915/intel_drv.h +@@ -1204,6 +1204,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp, + void intel_dp_start_link_train(struct intel_dp *intel_dp); + void intel_dp_stop_link_train(struct intel_dp *intel_dp); + void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); ++void intel_dp_encoder_reset(struct drm_encoder *encoder); ++void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder); + void intel_dp_encoder_destroy(struct drm_encoder *encoder); + int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc); + bool intel_dp_compute_config(struct intel_encoder *encoder, +diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c +index e6c035b0fc1c..4b8ed9f2dabc 100644 +--- a/drivers/gpu/drm/i915/intel_hdmi.c ++++ b/drivers/gpu/drm/i915/intel_hdmi.c +@@ -1388,8 +1388,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force) + hdmi_to_dig_port(intel_hdmi)); + } + +- if (!live_status) +- DRM_DEBUG_KMS("Live status not up!"); ++ if (!live_status) { ++ DRM_DEBUG_KMS("HDMI live status down\n"); ++ /* ++ * Live status register is not reliable on all intel platforms. ++ * So consider live_status only for certain platforms, for ++ * others, read EDID to determine presence of sink. ++ */ ++ if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv)) ++ live_status = true; ++ } + + intel_hdmi_unset_edid(connector); + +diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c +index adf74f4366bb..0b04b9282f56 100644 +--- a/drivers/gpu/drm/radeon/atombios_encoders.c ++++ b/drivers/gpu/drm/radeon/atombios_encoders.c +@@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, + && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2))) + adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2; + ++ /* vertical FP must be at least 1 */ ++ if (mode->crtc_vsync_start == mode->crtc_vdisplay) ++ adjusted_mode->crtc_vsync_start++; ++ + /* get the native mode for scaling */ + if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) { + radeon_panel_mode_fixup(encoder, adjusted_mode); +diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c +index a0e28f3a278d..0585fd2031dd 100644 +--- a/drivers/gpu/ipu-v3/ipu-common.c ++++ b/drivers/gpu/ipu-v3/ipu-common.c +@@ -1068,7 +1068,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) + goto err_register; + } + +- pdev->dev.of_node = of_node; + pdev->dev.parent = dev; + + ret = platform_device_add_data(pdev, ®->pdata, +@@ -1079,6 +1078,12 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base) + platform_device_put(pdev); + goto err_register; + } ++ ++ /* ++ * Set of_node only after calling platform_device_add. Otherwise ++ * the platform:imx-ipuv3-crtc modalias won't be used. ++ */ ++ pdev->dev.of_node = of_node; + } + + return 0; +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index 8b78a7f1f779..909ab0176ef2 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -255,6 +255,7 @@ + #define USB_DEVICE_ID_CORSAIR_K90 0x1b02 + + #define USB_VENDOR_ID_CREATIVELABS 0x041e ++#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51 0x322c + #define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801 + + #define USB_VENDOR_ID_CVTOUCH 0x1ff7 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c +index 7dd0953cd70f..dc8e6adf95a4 100644 +--- a/drivers/hid/usbhid/hid-quirks.c ++++ b/drivers/hid/usbhid/hid-quirks.c +@@ -70,6 +70,7 @@ static const struct hid_blacklist { + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL }, ++ { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL }, +diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c +index 3c0f47ac8e53..5c02d7bbc7f2 100644 +--- a/drivers/hid/wacom_wac.c ++++ b/drivers/hid/wacom_wac.c +@@ -3449,6 +3449,10 @@ static const struct wacom_features wacom_features_0x33E = + { "Wacom Intuos PT M 2", 21600, 13500, 2047, 63, + INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16, + .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; ++static const struct wacom_features wacom_features_0x343 = ++ { "Wacom DTK1651", 34616, 19559, 1023, 0, ++ DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4, ++ WACOM_DTU_OFFSET, WACOM_DTU_OFFSET }; + + static const struct wacom_features wacom_features_HID_ANY_ID = + { "Wacom HID", .type = HID_GENERIC }; +@@ -3614,6 +3618,7 @@ const struct hid_device_id wacom_ids[] = { + { USB_DEVICE_WACOM(0x33C) }, + { USB_DEVICE_WACOM(0x33D) }, + { USB_DEVICE_WACOM(0x33E) }, ++ { USB_DEVICE_WACOM(0x343) }, + { USB_DEVICE_WACOM(0x4001) }, + { USB_DEVICE_WACOM(0x4004) }, + { USB_DEVICE_WACOM(0x5000) }, +diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c +index b13936dacc78..f2a7f72f7aa6 100644 +--- a/drivers/iio/magnetometer/ak8975.c ++++ b/drivers/iio/magnetometer/ak8975.c +@@ -462,6 +462,8 @@ static int ak8975_setup_irq(struct ak8975_data *data) + int rc; + int irq; + ++ init_waitqueue_head(&data->data_ready_queue); ++ clear_bit(0, &data->flags); + if (client->irq) + irq = client->irq; + else +@@ -477,8 +479,6 @@ static int ak8975_setup_irq(struct ak8975_data *data) + return rc; + } + +- init_waitqueue_head(&data->data_ready_queue); +- clear_bit(0, &data->flags); + data->eoc_irq = irq; + + return rc; +@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client, + int eoc_gpio; + int err; + const char *name = NULL; +- enum asahi_compass_chipset chipset; ++ enum asahi_compass_chipset chipset = AK_MAX_TYPE; + + /* Grab and set up the supplied GPIO. */ + if (client->dev.platform_data) +diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c +index de9cd6901752..bc147582bed9 100644 +--- a/drivers/infiniband/hw/cxgb4/cq.c ++++ b/drivers/infiniband/hw/cxgb4/cq.c +@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, + cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS, + &cq->bar2_qid, + user ? &cq->bar2_pa : NULL); +- if (user && !cq->bar2_va) { ++ if (user && !cq->bar2_pa) { + pr_warn(MOD "%s: cqid %u not in BAR2 range.\n", + pci_name(rdev->lldi.pdev), cq->cqid); + ret = -EINVAL; +diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c +index aa515afee724..53aa7515f542 100644 +--- a/drivers/infiniband/hw/cxgb4/qp.c ++++ b/drivers/infiniband/hw/cxgb4/qp.c +@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid, + + if (pbar2_pa) + *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK; ++ ++ if (is_t4(rdev->lldi.adapter_type)) ++ return NULL; ++ + return rdev->bar2_kva + bar2_qoffset; + } + +@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, + /* + * User mode must have bar2 access. + */ +- if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) { ++ if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) { + pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n", + pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); + goto free_dma; +diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c +index 9bbadaaf6bc3..7b3845aa5983 100644 +--- a/drivers/input/touchscreen/zforce_ts.c ++++ b/drivers/input/touchscreen/zforce_ts.c +@@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload) + point.coord_x = point.coord_y = 0; + } + +- point.state = payload[9 * i + 5] & 0x03; +- point.id = (payload[9 * i + 5] & 0xfc) >> 2; ++ point.state = payload[9 * i + 5] & 0x0f; ++ point.id = (payload[9 * i + 5] & 0xf0) >> 4; + + /* determine touch major, minor and orientation */ + point.area_major = max(payload[9 * i + 6], +diff --git a/drivers/md/md.c b/drivers/md/md.c +index b1e1f6b95782..c57fdf847b47 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -293,6 +293,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) + * go away inside make_request + */ + sectors = bio_sectors(bio); ++ /* bio could be mergeable after passing to underlayer */ ++ bio->bi_rw &= ~REQ_NOMERGE; + mddev->pers->make_request(mddev, bio); + + cpu = part_stat_lock(); +diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c +index 6255513f54c7..68aa31ae553a 100644 +--- a/drivers/mfd/intel-lpss.c ++++ b/drivers/mfd/intel-lpss.c +@@ -445,6 +445,7 @@ int intel_lpss_probe(struct device *dev, + err_remove_ltr: + intel_lpss_debugfs_remove(lpss); + intel_lpss_ltr_hide(lpss); ++ intel_lpss_unregister_clock(lpss); + + err_clk_register: + ida_simple_remove(&intel_lpss_devid_ida, lpss->devid); +diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c +index 8f8793004b9f..1b271b99c49e 100644 +--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c ++++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c +@@ -274,6 +274,9 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah, + }; + static const int inc[4] = { 0, 100, 0, 0 }; + ++ memset(&mask_m, 0, sizeof(int8_t) * 123); ++ memset(&mask_p, 0, sizeof(int8_t) * 123); ++ + cur_bin = -6000; + upper = bin + 100; + lower = bin - 100; +@@ -424,14 +427,9 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah, + int tmp, new; + int i; + +- int8_t mask_m[123]; +- int8_t mask_p[123]; + int cur_bb_spur; + bool is2GHz = IS_CHAN_2GHZ(chan); + +- memset(&mask_m, 0, sizeof(int8_t) * 123); +- memset(&mask_p, 0, sizeof(int8_t) * 123); +- + for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { + cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz); + if (AR_NO_SPUR == cur_bb_spur) +diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c +index db6624527d99..53d7445a5d12 100644 +--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c ++++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c +@@ -178,14 +178,9 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah, + int i; + struct chan_centers centers; + +- int8_t mask_m[123]; +- int8_t mask_p[123]; + int cur_bb_spur; + bool is2GHz = IS_CHAN_2GHZ(chan); + +- memset(&mask_m, 0, sizeof(int8_t) * 123); +- memset(&mask_p, 0, sizeof(int8_t) * 123); +- + ath9k_hw_get_channel_centers(ah, chan, ¢ers); + freq = centers.synth_center; + +diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c +index a6c8a4f7bfe9..d6c4f0f60839 100644 +--- a/drivers/net/wireless/mwifiex/sta_ioctl.c ++++ b/drivers/net/wireless/mwifiex/sta_ioctl.c +@@ -313,6 +313,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, + mwifiex_dbg(adapter, ERROR, + "Attempt to reconnect on csa closed chan(%d)\n", + bss_desc->channel); ++ ret = -1; + goto done; + } + +diff --git a/drivers/nvmem/mxs-ocotp.c b/drivers/nvmem/mxs-ocotp.c +index 8ba19bba3156..2bb3c5799ac4 100644 +--- a/drivers/nvmem/mxs-ocotp.c ++++ b/drivers/nvmem/mxs-ocotp.c +@@ -94,7 +94,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size, + if (ret) + goto close_banks; + +- while (val_size) { ++ while (val_size >= reg_size) { + if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) { + /* fill up non-data register */ + *buf = 0; +@@ -103,7 +103,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size, + } + + buf++; +- val_size--; ++ val_size -= reg_size; + offset += reg_size; + } + +diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c +index db9446c612da..b0d92b84bcdc 100644 +--- a/drivers/scsi/lpfc/lpfc_init.c ++++ b/drivers/scsi/lpfc/lpfc_init.c +@@ -2855,7 +2855,7 @@ lpfc_online(struct lpfc_hba *phba) + } + + vports = lpfc_create_vport_work_array(phba); +- if (vports != NULL) ++ if (vports != NULL) { + for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + struct Scsi_Host *shost; + shost = lpfc_shost_from_vport(vports[i]); +@@ -2872,7 +2872,8 @@ lpfc_online(struct lpfc_hba *phba) + } + spin_unlock_irq(shost->host_lock); + } +- lpfc_destroy_vport_work_array(phba, vports); ++ } ++ lpfc_destroy_vport_work_array(phba, vports); + + lpfc_unblock_mgmt_io(phba); + return 0; +diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c +index 534c58937a56..4a65c5bda146 100644 +--- a/drivers/soc/rockchip/pm_domains.c ++++ b/drivers/soc/rockchip/pm_domains.c +@@ -419,6 +419,7 @@ static int rockchip_pm_domain_probe(struct platform_device *pdev) + if (error) { + dev_err(dev, "failed to handle node %s: %d\n", + node->name, error); ++ of_node_put(node); + goto err_out; + } + } +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index bdc0f2f24f19..a2b43a6e7fa7 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -108,6 +108,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ + { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */ + { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ ++ { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */ + { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ + { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ + { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ +@@ -117,6 +118,7 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ + { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ + { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ ++ { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ + { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ + { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ + { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ +@@ -140,6 +142,8 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */ + { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */ + { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */ ++ { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */ ++ { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */ + { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ + { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ + { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */ +diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c +index 12eab503efd1..364bc44610c1 100644 +--- a/drivers/xen/balloon.c ++++ b/drivers/xen/balloon.c +@@ -152,6 +152,8 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq); + static void balloon_process(struct work_struct *work); + static DECLARE_DELAYED_WORK(balloon_worker, balloon_process); + ++static void release_memory_resource(struct resource *resource); ++ + /* When ballooning out (allocating memory to return to Xen) we don't really + want the kernel to try too hard since that can trigger the oom killer. */ + #define GFP_BALLOON \ +@@ -268,6 +270,20 @@ static struct resource *additional_memory_resource(phys_addr_t size) + return NULL; + } + ++#ifdef CONFIG_SPARSEMEM ++ { ++ unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT); ++ unsigned long pfn = res->start >> PAGE_SHIFT; ++ ++ if (pfn > limit) { ++ pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n", ++ pfn, limit); ++ release_memory_resource(res); ++ return NULL; ++ } ++ } ++#endif ++ + return res; + } + +diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c +index 38272ad24551..f4edd6df3df2 100644 +--- a/drivers/xen/evtchn.c ++++ b/drivers/xen/evtchn.c +@@ -316,7 +316,6 @@ static int evtchn_resize_ring(struct per_user_data *u) + { + unsigned int new_size; + evtchn_port_t *new_ring, *old_ring; +- unsigned int p, c; + + /* + * Ensure the ring is large enough to capture all possible +@@ -346,20 +345,17 @@ static int evtchn_resize_ring(struct per_user_data *u) + /* + * Copy the old ring contents to the new ring. + * +- * If the ring contents crosses the end of the current ring, +- * it needs to be copied in two chunks. ++ * To take care of wrapping, a full ring, and the new index ++ * pointing into the second half, simply copy the old contents ++ * twice. + * + * +---------+ +------------------+ +- * |34567 12| -> | 1234567 | +- * +-----p-c-+ +------------------+ ++ * |34567 12| -> |34567 1234567 12| ++ * +-----p-c-+ +-------c------p---+ + */ +- p = evtchn_ring_offset(u, u->ring_prod); +- c = evtchn_ring_offset(u, u->ring_cons); +- if (p < c) { +- memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring)); +- memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring)); +- } else +- memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring)); ++ memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring)); ++ memcpy(new_ring + u->ring_size, old_ring, ++ u->ring_size * sizeof(*u->ring)); + + u->ring = new_ring; + u->ring_size = new_size; +diff --git a/fs/pnode.c b/fs/pnode.c +index 6367e1e435c6..99899705b105 100644 +--- a/fs/pnode.c ++++ b/fs/pnode.c +@@ -198,10 +198,15 @@ static struct mount *next_group(struct mount *m, struct mount *origin) + + /* all accesses are serialized by namespace_sem */ + static struct user_namespace *user_ns; +-static struct mount *last_dest, *last_source, *dest_master; ++static struct mount *last_dest, *first_source, *last_source, *dest_master; + static struct mountpoint *mp; + static struct hlist_head *list; + ++static inline bool peers(struct mount *m1, struct mount *m2) ++{ ++ return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id; ++} ++ + static int propagate_one(struct mount *m) + { + struct mount *child; +@@ -212,24 +217,26 @@ static int propagate_one(struct mount *m) + /* skip if mountpoint isn't covered by it */ + if (!is_subdir(mp->m_dentry, m->mnt.mnt_root)) + return 0; +- if (m->mnt_group_id == last_dest->mnt_group_id) { ++ if (peers(m, last_dest)) { + type = CL_MAKE_SHARED; + } else { + struct mount *n, *p; ++ bool done; + for (n = m; ; n = p) { + p = n->mnt_master; +- if (p == dest_master || IS_MNT_MARKED(p)) { +- while (last_dest->mnt_master != p) { +- last_source = last_source->mnt_master; +- last_dest = last_source->mnt_parent; +- } +- if (n->mnt_group_id != last_dest->mnt_group_id) { +- last_source = last_source->mnt_master; +- last_dest = last_source->mnt_parent; +- } ++ if (p == dest_master || IS_MNT_MARKED(p)) + break; +- } + } ++ do { ++ struct mount *parent = last_source->mnt_parent; ++ if (last_source == first_source) ++ break; ++ done = parent->mnt_master == p; ++ if (done && peers(n, parent)) ++ break; ++ last_source = last_source->mnt_master; ++ } while (!done); ++ + type = CL_SLAVE; + /* beginning of peer group among the slaves? */ + if (IS_MNT_SHARED(m)) +@@ -281,6 +288,7 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, + */ + user_ns = current->nsproxy->mnt_ns->user_ns; + last_dest = dest_mnt; ++ first_source = source_mnt; + last_source = source_mnt; + mp = dest_mp; + list = tree_list; +diff --git a/fs/proc/base.c b/fs/proc/base.c +index b7de324bec11..e8bbf6cdb437 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -954,7 +954,8 @@ static ssize_t environ_read(struct file *file, char __user *buf, + int ret = 0; + struct mm_struct *mm = file->private_data; + +- if (!mm) ++ /* Ensure the process spawned far enough to have an environment. */ ++ if (!mm || !mm->env_end) + return 0; + + page = (char *)__get_free_page(GFP_TEMPORARY); +diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h +index c56988ac63f7..7cd0171963ae 100644 +--- a/include/linux/clk-provider.h ++++ b/include/linux/clk-provider.h +@@ -384,6 +384,7 @@ struct clk_divider { + #define CLK_DIVIDER_MAX_AT_ZERO BIT(6) + + extern const struct clk_ops clk_divider_ops; ++extern const struct clk_ops clk_divider_ro_ops; + + unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, + unsigned int val, const struct clk_div_table *table, +diff --git a/include/linux/hash.h b/include/linux/hash.h +index 1afde47e1528..79c52fa81cac 100644 +--- a/include/linux/hash.h ++++ b/include/linux/hash.h +@@ -32,12 +32,28 @@ + #error Wordsize not 32 or 64 + #endif + ++/* ++ * The above primes are actively bad for hashing, since they are ++ * too sparse. The 32-bit one is mostly ok, the 64-bit one causes ++ * real problems. Besides, the "prime" part is pointless for the ++ * multiplicative hash. ++ * ++ * Although a random odd number will do, it turns out that the golden ++ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice ++ * properties. ++ * ++ * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2. ++ * (See Knuth vol 3, section 6.4, exercise 9.) ++ */ ++#define GOLDEN_RATIO_32 0x61C88647 ++#define GOLDEN_RATIO_64 0x61C8864680B583EBull ++ + static __always_inline u64 hash_64(u64 val, unsigned int bits) + { + u64 hash = val; + +-#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 +- hash = hash * GOLDEN_RATIO_PRIME_64; ++#if BITS_PER_LONG == 64 ++ hash = hash * GOLDEN_RATIO_64; + #else + /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ + u64 n = hash; +diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h +index 0816c872b689..a6cc576fd467 100644 +--- a/include/net/ip_vs.h ++++ b/include/net/ip_vs.h +@@ -1588,6 +1588,23 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) + } + #endif /* CONFIG_IP_VS_NFCT */ + ++/* Really using conntrack? */ ++static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp, ++ struct sk_buff *skb) ++{ ++#ifdef CONFIG_IP_VS_NFCT ++ enum ip_conntrack_info ctinfo; ++ struct nf_conn *ct; ++ ++ if (!(cp->flags & IP_VS_CONN_F_NFCT)) ++ return false; ++ ct = nf_ct_get(skb, &ctinfo); ++ if (ct && !nf_ct_is_untracked(ct)) ++ return true; ++#endif ++ return false; ++} ++ + static inline int + ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) + { +diff --git a/include/xen/page.h b/include/xen/page.h +index 96294ac93755..9dc46cb8a0fd 100644 +--- a/include/xen/page.h ++++ b/include/xen/page.h +@@ -15,9 +15,9 @@ + */ + + #define xen_pfn_to_page(xen_pfn) \ +- ((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT))) ++ (pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT))) + #define page_to_xen_pfn(page) \ +- (((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT) ++ ((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT)) + + #define XEN_PFN_PER_PAGE (PAGE_SIZE / XEN_PAGE_SIZE) + +diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c +index d202d991edae..996f0fd34312 100644 +--- a/kernel/trace/trace_events.c ++++ b/kernel/trace/trace_events.c +@@ -2107,8 +2107,13 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file) + trace_create_file("filter", 0644, file->dir, file, + &ftrace_event_filter_fops); + +- trace_create_file("trigger", 0644, file->dir, file, +- &event_trigger_fops); ++ /* ++ * Only event directories that can be enabled should have ++ * triggers. ++ */ ++ if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) ++ trace_create_file("trigger", 0644, file->dir, file, ++ &event_trigger_fops); + + trace_create_file("format", 0444, file->dir, call, + &ftrace_event_format_fops); +diff --git a/lib/test-string_helpers.c b/lib/test-string_helpers.c +index 98866a770770..25b5cbfb7615 100644 +--- a/lib/test-string_helpers.c ++++ b/lib/test-string_helpers.c +@@ -327,36 +327,67 @@ out: + } + + #define string_get_size_maxbuf 16 +-#define test_string_get_size_one(size, blk_size, units, exp_result) \ ++#define test_string_get_size_one(size, blk_size, exp_result10, exp_result2) \ + do { \ +- BUILD_BUG_ON(sizeof(exp_result) >= string_get_size_maxbuf); \ +- __test_string_get_size((size), (blk_size), (units), \ +- (exp_result)); \ ++ BUILD_BUG_ON(sizeof(exp_result10) >= string_get_size_maxbuf); \ ++ BUILD_BUG_ON(sizeof(exp_result2) >= string_get_size_maxbuf); \ ++ __test_string_get_size((size), (blk_size), (exp_result10), \ ++ (exp_result2)); \ + } while (0) + + +-static __init void __test_string_get_size(const u64 size, const u64 blk_size, +- const enum string_size_units units, +- const char *exp_result) ++static __init void test_string_get_size_check(const char *units, ++ const char *exp, ++ char *res, ++ const u64 size, ++ const u64 blk_size) + { +- char buf[string_get_size_maxbuf]; +- +- string_get_size(size, blk_size, units, buf, sizeof(buf)); +- if (!memcmp(buf, exp_result, strlen(exp_result) + 1)) ++ if (!memcmp(res, exp, strlen(exp) + 1)) + return; + +- buf[sizeof(buf) - 1] = '\0'; +- pr_warn("Test 'test_string_get_size_one' failed!\n"); +- pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %d\n", ++ res[string_get_size_maxbuf - 1] = '\0'; ++ ++ pr_warn("Test 'test_string_get_size' failed!\n"); ++ pr_warn("string_get_size(size = %llu, blk_size = %llu, units = %s)\n", + size, blk_size, units); +- pr_warn("expected: '%s', got '%s'\n", exp_result, buf); ++ pr_warn("expected: '%s', got '%s'\n", exp, res); ++} ++ ++static __init void __test_string_get_size(const u64 size, const u64 blk_size, ++ const char *exp_result10, ++ const char *exp_result2) ++{ ++ char buf10[string_get_size_maxbuf]; ++ char buf2[string_get_size_maxbuf]; ++ ++ string_get_size(size, blk_size, STRING_UNITS_10, buf10, sizeof(buf10)); ++ string_get_size(size, blk_size, STRING_UNITS_2, buf2, sizeof(buf2)); ++ ++ test_string_get_size_check("STRING_UNITS_10", exp_result10, buf10, ++ size, blk_size); ++ ++ test_string_get_size_check("STRING_UNITS_2", exp_result2, buf2, ++ size, blk_size); + } + + static __init void test_string_get_size(void) + { +- test_string_get_size_one(16384, 512, STRING_UNITS_2, "8.00 MiB"); +- test_string_get_size_one(8192, 4096, STRING_UNITS_10, "32.7 MB"); +- test_string_get_size_one(1, 512, STRING_UNITS_10, "512 B"); ++ /* small values */ ++ test_string_get_size_one(0, 512, "0 B", "0 B"); ++ test_string_get_size_one(1, 512, "512 B", "512 B"); ++ test_string_get_size_one(1100, 1, "1.10 kB", "1.07 KiB"); ++ ++ /* normal values */ ++ test_string_get_size_one(16384, 512, "8.39 MB", "8.00 MiB"); ++ test_string_get_size_one(500118192, 512, "256 GB", "238 GiB"); ++ test_string_get_size_one(8192, 4096, "33.6 MB", "32.0 MiB"); ++ ++ /* weird block sizes */ ++ test_string_get_size_one(3000, 1900, "5.70 MB", "5.44 MiB"); ++ ++ /* huge values */ ++ test_string_get_size_one(U64_MAX, 4096, "75.6 ZB", "64.0 ZiB"); ++ test_string_get_size_one(4096, U64_MAX, "75.6 ZB", "64.0 ZiB"); + } + + static int __init test_string_helpers_init(void) +diff --git a/mm/compaction.c b/mm/compaction.c +index de3e1e71cd9f..7881e072dc33 100644 +--- a/mm/compaction.c ++++ b/mm/compaction.c +@@ -880,16 +880,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, + pfn = isolate_migratepages_block(cc, pfn, block_end_pfn, + ISOLATE_UNEVICTABLE); + +- /* +- * In case of fatal failure, release everything that might +- * have been isolated in the previous iteration, and signal +- * the failure back to caller. +- */ +- if (!pfn) { +- putback_movable_pages(&cc->migratepages); +- cc->nr_migratepages = 0; ++ if (!pfn) + break; +- } + + if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) + break; +diff --git a/mm/page-writeback.c b/mm/page-writeback.c +index d15d88c8efa1..e40c9364582d 100644 +--- a/mm/page-writeback.c ++++ b/mm/page-writeback.c +@@ -1899,7 +1899,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) + if (gdtc->dirty > gdtc->bg_thresh) + return true; + +- if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc)) ++ if (wb_stat(wb, WB_RECLAIMABLE) > ++ wb_calc_thresh(gdtc->wb, gdtc->bg_thresh)) + return true; + + if (mdtc) { +@@ -1913,7 +1914,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) + if (mdtc->dirty > mdtc->bg_thresh) + return true; + +- if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc)) ++ if (wb_stat(wb, WB_RECLAIMABLE) > ++ wb_calc_thresh(mdtc->wb, mdtc->bg_thresh)) + return true; + } + +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index c69531afbd8f..6cf5cadeaef7 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -6193,7 +6193,7 @@ int __meminit init_per_zone_wmark_min(void) + setup_per_zone_inactive_ratio(); + return 0; + } +-module_init(init_per_zone_wmark_min) ++core_initcall(init_per_zone_wmark_min) + + /* + * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so +diff --git a/mm/zswap.c b/mm/zswap.c +index bf14508afd64..340261946fda 100644 +--- a/mm/zswap.c ++++ b/mm/zswap.c +@@ -170,6 +170,8 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES]; + static LIST_HEAD(zswap_pools); + /* protects zswap_pools list modification */ + static DEFINE_SPINLOCK(zswap_pools_lock); ++/* pool counter to provide unique names to zpool */ ++static atomic_t zswap_pools_count = ATOMIC_INIT(0); + + /* used by param callback function */ + static bool zswap_init_started; +@@ -565,6 +567,7 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) + static struct zswap_pool *zswap_pool_create(char *type, char *compressor) + { + struct zswap_pool *pool; ++ char name[38]; /* 'zswap' + 32 char (max) num + \0 */ + gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; + + pool = kzalloc(sizeof(*pool), GFP_KERNEL); +@@ -573,7 +576,10 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor) + return NULL; + } + +- pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops); ++ /* unique name for each pool specifically required by zsmalloc */ ++ snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); ++ ++ pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops); + if (!pool->zpool) { + pr_err("%s zpool not available\n", type); + goto error; +diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c +index a49c705fb86b..5f19133c5530 100644 +--- a/net/batman-adv/distributed-arp-table.c ++++ b/net/batman-adv/distributed-arp-table.c +@@ -553,6 +553,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv, + * be sent to + * @bat_priv: the bat priv with all the soft interface information + * @ip_dst: ipv4 to look up in the DHT ++ * @vid: VLAN identifier + * + * An originator O is selected if and only if its DHT_ID value is one of three + * closest values (from the LEFT, with wrap around if needed) then the hash +@@ -561,7 +562,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv, + * Returns the candidate array of size BATADV_DAT_CANDIDATE_NUM. + */ + static struct batadv_dat_candidate * +-batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) ++batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst, ++ unsigned short vid) + { + int select; + batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key; +@@ -577,7 +579,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) + return NULL; + + dat.ip = ip_dst; +- dat.vid = 0; ++ dat.vid = vid; + ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat, + BATADV_DAT_ADDR_MAX); + +@@ -597,6 +599,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) + * @bat_priv: the bat priv with all the soft interface information + * @skb: payload to send + * @ip: the DHT key ++ * @vid: VLAN identifier + * @packet_subtype: unicast4addr packet subtype to use + * + * This function copies the skb with pskb_copy() and is sent as unicast packet +@@ -607,7 +610,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst) + */ + static bool batadv_dat_send_data(struct batadv_priv *bat_priv, + struct sk_buff *skb, __be32 ip, +- int packet_subtype) ++ unsigned short vid, int packet_subtype) + { + int i; + bool ret = false; +@@ -616,7 +619,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv, + struct sk_buff *tmp_skb; + struct batadv_dat_candidate *cand; + +- cand = batadv_dat_select_candidates(bat_priv, ip); ++ cand = batadv_dat_select_candidates(bat_priv, ip, vid); + if (!cand) + goto out; + +@@ -1004,7 +1007,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv, + ret = true; + } else { + /* Send the request to the DHT */ +- ret = batadv_dat_send_data(bat_priv, skb, ip_dst, ++ ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid, + BATADV_P_DAT_DHT_GET); + } + out: +@@ -1132,8 +1135,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv, + /* Send the ARP reply to the candidates for both the IP addresses that + * the node obtained from the ARP reply + */ +- batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT); +- batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT); ++ batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT); ++ batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT); + } + + /** +diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c +index 3207667e69de..d8a2f33e60e5 100644 +--- a/net/batman-adv/routing.c ++++ b/net/batman-adv/routing.c +@@ -104,6 +104,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv, + neigh_node = NULL; + + spin_lock_bh(&orig_node->neigh_list_lock); ++ /* curr_router used earlier may not be the current orig_ifinfo->router ++ * anymore because it was dereferenced outside of the neigh_list_lock ++ * protected region. After the new best neighbor has replace the current ++ * best neighbor the reference counter needs to decrease. Consequently, ++ * the code needs to ensure the curr_router variable contains a pointer ++ * to the replaced best neighbor. ++ */ ++ curr_router = rcu_dereference_protected(orig_ifinfo->router, true); ++ + rcu_assign_pointer(orig_ifinfo->router, neigh_node); + spin_unlock_bh(&orig_node->neigh_list_lock); + batadv_orig_ifinfo_free_ref(orig_ifinfo); +diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c +index f664324805eb..0e0c3b8ed927 100644 +--- a/net/batman-adv/send.c ++++ b/net/batman-adv/send.c +@@ -630,6 +630,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, + + if (pending) { + hlist_del(&forw_packet->list); ++ if (!forw_packet->own) ++ atomic_inc(&bat_priv->bcast_queue_left); ++ + batadv_forw_packet_free(forw_packet); + } + } +@@ -657,6 +660,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv, + + if (pending) { + hlist_del(&forw_packet->list); ++ if (!forw_packet->own) ++ atomic_inc(&bat_priv->batman_queue_left); ++ + batadv_forw_packet_free(forw_packet); + } + } +diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c +index ac4d08de5df4..720f1a5b81ac 100644 +--- a/net/batman-adv/soft-interface.c ++++ b/net/batman-adv/soft-interface.c +@@ -407,11 +407,17 @@ void batadv_interface_rx(struct net_device *soft_iface, + */ + nf_reset(skb); + ++ if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) ++ goto dropped; ++ + vid = batadv_get_vid(skb, 0); + ethhdr = eth_hdr(skb); + + switch (ntohs(ethhdr->h_proto)) { + case ETH_P_8021Q: ++ if (!pskb_may_pull(skb, VLAN_ETH_HLEN)) ++ goto dropped; ++ + vhdr = (struct vlan_ethhdr *)skb->data; + + if (vhdr->h_vlan_encapsulated_proto != ethertype) +@@ -423,8 +429,6 @@ void batadv_interface_rx(struct net_device *soft_iface, + } + + /* skb->dev & skb->pkt_type are set here */ +- if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) +- goto dropped; + skb->protocol = eth_type_trans(skb, soft_iface); + + /* should not be necessary anymore as we use skb_pull_rcsum() +diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c +index 7a2b7915093b..bcb0a1b64556 100644 +--- a/net/mac80211/iface.c ++++ b/net/mac80211/iface.c +@@ -1750,7 +1750,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, + + ret = dev_alloc_name(ndev, ndev->name); + if (ret < 0) { +- free_netdev(ndev); ++ ieee80211_if_free(ndev); + return ret; + } + +@@ -1836,7 +1836,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, + + ret = register_netdevice(ndev); + if (ret) { +- free_netdev(ndev); ++ ieee80211_if_free(ndev); + return ret; + } + } +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c +index f57b4dcdb233..4da560005b0e 100644 +--- a/net/netfilter/ipvs/ip_vs_core.c ++++ b/net/netfilter/ipvs/ip_vs_core.c +@@ -1757,15 +1757,34 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int + cp = pp->conn_in_get(ipvs, af, skb, &iph); + + conn_reuse_mode = sysctl_conn_reuse_mode(ipvs); +- if (conn_reuse_mode && !iph.fragoffs && +- is_new_conn(skb, &iph) && cp && +- ((unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest && +- unlikely(!atomic_read(&cp->dest->weight))) || +- unlikely(is_new_conn_expected(cp, conn_reuse_mode)))) { +- if (!atomic_read(&cp->n_control)) +- ip_vs_conn_expire_now(cp); +- __ip_vs_conn_put(cp); +- cp = NULL; ++ if (conn_reuse_mode && !iph.fragoffs && is_new_conn(skb, &iph) && cp) { ++ bool uses_ct = false, resched = false; ++ ++ if (unlikely(sysctl_expire_nodest_conn(ipvs)) && cp->dest && ++ unlikely(!atomic_read(&cp->dest->weight))) { ++ resched = true; ++ uses_ct = ip_vs_conn_uses_conntrack(cp, skb); ++ } else if (is_new_conn_expected(cp, conn_reuse_mode)) { ++ uses_ct = ip_vs_conn_uses_conntrack(cp, skb); ++ if (!atomic_read(&cp->n_control)) { ++ resched = true; ++ } else { ++ /* Do not reschedule controlling connection ++ * that uses conntrack while it is still ++ * referenced by controlled connection(s). ++ */ ++ resched = !uses_ct; ++ } ++ } ++ ++ if (resched) { ++ if (!atomic_read(&cp->n_control)) ++ ip_vs_conn_expire_now(cp); ++ __ip_vs_conn_put(cp); ++ if (uses_ct) ++ return NF_DROP; ++ cp = NULL; ++ } + } + + if (unlikely(!cp)) { +diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c +index 1b8d594e493a..0a6eb5c0d9e9 100644 +--- a/net/netfilter/ipvs/ip_vs_pe_sip.c ++++ b/net/netfilter/ipvs/ip_vs_pe_sip.c +@@ -70,10 +70,10 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb) + const char *dptr; + int retc; + +- ip_vs_fill_iph_skb(p->af, skb, false, &iph); ++ retc = ip_vs_fill_iph_skb(p->af, skb, false, &iph); + + /* Only useful with UDP */ +- if (iph.protocol != IPPROTO_UDP) ++ if (!retc || iph.protocol != IPPROTO_UDP) + return -EINVAL; + /* todo: IPv6 fragments: + * I think this only should be done for the first fragment. /HS +@@ -88,7 +88,7 @@ ip_vs_sip_fill_param(struct ip_vs_conn_param *p, struct sk_buff *skb) + dptr = skb->data + dataoff; + datalen = skb->len - dataoff; + +- if (get_callid(dptr, dataoff, datalen, &matchoff, &matchlen)) ++ if (get_callid(dptr, 0, datalen, &matchoff, &matchlen)) + return -EINVAL; + + /* N.B: pe_data is only set on success, |