diff options
author | Mike Pagano <mpagano@gentoo.org> | 2022-07-21 16:08:13 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2022-07-21 16:08:13 -0400 |
commit | 3cced38822768a8795123158b3e2a5d184c9be68 (patch) | |
tree | 024c0c07ebd1b96b846f16eac575a788e1ce7d9a | |
parent | Linux patch 5.10.131 (diff) | |
download | linux-patches-5.10-141.tar.gz linux-patches-5.10-141.tar.bz2 linux-patches-5.10-141.zip |
Linux patch 5.10.1325.10-141
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1131_linux-5.10.132.patch | 2956 |
2 files changed, 2960 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 7e7a9fd2..04169db1 100644 --- a/0000_README +++ b/0000_README @@ -567,6 +567,10 @@ Patch: 1130_linux-5.10.131.patch From: http://www.kernel.org Desc: Linux 5.10.131 +Patch: 1131_linux-5.10.132.patch +From: http://www.kernel.org +Desc: Linux 5.10.132 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1131_linux-5.10.132.patch b/1131_linux-5.10.132.patch new file mode 100644 index 00000000..23d80f8d --- /dev/null +++ b/1131_linux-5.10.132.patch @@ -0,0 +1,2956 @@ +diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst +index 4822a058a81d7..0b1f3235aa773 100644 +--- a/Documentation/networking/ip-sysctl.rst ++++ b/Documentation/networking/ip-sysctl.rst +@@ -988,7 +988,7 @@ cipso_cache_enable - BOOLEAN + cipso_cache_bucket_size - INTEGER + The CIPSO label cache consists of a fixed size hash table with each + hash bucket containing a number of cache entries. This variable limits +- the number of entries in each hash bucket; the larger the value the ++ the number of entries in each hash bucket; the larger the value is, the + more CIPSO label mappings that can be cached. When the number of + entries in a given hash bucket reaches this limit adding new entries + causes the oldest entry in the bucket to be removed to make room. +@@ -1080,7 +1080,7 @@ ip_autobind_reuse - BOOLEAN + option should only be set by experts. + Default: 0 + +-ip_dynaddr - BOOLEAN ++ip_dynaddr - INTEGER + If set non-zero, enables support for dynamic addresses. + If set to a non-zero value larger than 1, a kernel log + message will be printed when dynamic address rewriting +diff --git a/Makefile b/Makefile +index 53f1a45ae69b0..5bee8f281b061 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 10 +-SUBLEVEL = 131 ++SUBLEVEL = 132 + EXTRAVERSION = + NAME = Dare mighty things + +diff --git a/arch/arm/boot/dts/imx6qdl-ts7970.dtsi b/arch/arm/boot/dts/imx6qdl-ts7970.dtsi +index e6aa0c33754de..966038ecc5bfb 100644 +--- a/arch/arm/boot/dts/imx6qdl-ts7970.dtsi ++++ b/arch/arm/boot/dts/imx6qdl-ts7970.dtsi +@@ -226,7 +226,7 @@ + reg = <0x28>; + #gpio-cells = <2>; + gpio-controller; +- ngpio = <32>; ++ ngpios = <62>; + }; + + sgtl5000: codec@a { +diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi +index 12f57278ba4a5..33f76d14341ef 100644 +--- a/arch/arm/boot/dts/sama5d2.dtsi ++++ b/arch/arm/boot/dts/sama5d2.dtsi +@@ -1125,7 +1125,7 @@ + clocks = <&pmc PMC_TYPE_PERIPHERAL 55>, <&pmc PMC_TYPE_GCK 55>; + clock-names = "pclk", "gclk"; + assigned-clocks = <&pmc PMC_TYPE_CORE PMC_I2S1_MUX>; +- assigned-parrents = <&pmc PMC_TYPE_GCK 55>; ++ assigned-clock-parents = <&pmc PMC_TYPE_GCK 55>; + status = "disabled"; + }; + +diff --git a/arch/arm/boot/dts/stm32mp151.dtsi b/arch/arm/boot/dts/stm32mp151.dtsi +index 7a0ef01de969e..9919fc86bdc34 100644 +--- a/arch/arm/boot/dts/stm32mp151.dtsi ++++ b/arch/arm/boot/dts/stm32mp151.dtsi +@@ -543,7 +543,7 @@ + compatible = "st,stm32-cec"; + reg = <0x40016000 0x400>; + interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>; +- clocks = <&rcc CEC_K>, <&clk_lse>; ++ clocks = <&rcc CEC_K>, <&rcc CEC>; + clock-names = "cec", "hdmi-cec"; + status = "disabled"; + }; +diff --git a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts +index f19ed981da9d9..3706216ffb40b 100644 +--- a/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts ++++ b/arch/arm/boot/dts/sun8i-h2-plus-orangepi-zero.dts +@@ -169,7 +169,7 @@ + flash@0 { + #address-cells = <1>; + #size-cells = <1>; +- compatible = "mxicy,mx25l1606e", "winbond,w25q128"; ++ compatible = "mxicy,mx25l1606e", "jedec,spi-nor"; + reg = <0>; + spi-max-frequency = <40000000>; + }; +diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h +index 92282558caf7c..2b8970d8e5a2f 100644 +--- a/arch/arm/include/asm/mach/map.h ++++ b/arch/arm/include/asm/mach/map.h +@@ -27,6 +27,7 @@ enum { + MT_HIGH_VECTORS, + MT_MEMORY_RWX, + MT_MEMORY_RW, ++ MT_MEMORY_RO, + MT_ROM, + MT_MEMORY_RWX_NONCACHED, + MT_MEMORY_RW_DTCM, +diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h +index 91d6b7856be4b..73c83f4d33b3b 100644 +--- a/arch/arm/include/asm/ptrace.h ++++ b/arch/arm/include/asm/ptrace.h +@@ -164,5 +164,31 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs) + ((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \ + }) + ++ ++/* ++ * Update ITSTATE after normal execution of an IT block instruction. ++ * ++ * The 8 IT state bits are split into two parts in CPSR: ++ * ITSTATE<1:0> are in CPSR<26:25> ++ * ITSTATE<7:2> are in CPSR<15:10> ++ */ ++static inline unsigned long it_advance(unsigned long cpsr) ++{ ++ if ((cpsr & 0x06000400) == 0) { ++ /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */ ++ cpsr &= ~PSR_IT_MASK; ++ } else { ++ /* We need to shift left ITSTATE<4:0> */ ++ const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */ ++ unsigned long it = cpsr & mask; ++ it <<= 1; ++ it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */ ++ it &= mask; ++ cpsr &= ~mask; ++ cpsr |= it; ++ } ++ return cpsr; ++} ++ + #endif /* __ASSEMBLY__ */ + #endif +diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c +index ea81e89e77400..bcefe3f51744c 100644 +--- a/arch/arm/mm/alignment.c ++++ b/arch/arm/mm/alignment.c +@@ -935,6 +935,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) + if (type == TYPE_LDST) + do_alignment_finish_ldst(addr, instr, regs, offset); + ++ if (thumb_mode(regs)) ++ regs->ARM_cpsr = it_advance(regs->ARM_cpsr); ++ + return 0; + + bad_or_fault: +diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c +index 3e3001998460b..86f213f1b44b8 100644 +--- a/arch/arm/mm/mmu.c ++++ b/arch/arm/mm/mmu.c +@@ -296,6 +296,13 @@ static struct mem_type mem_types[] __ro_after_init = { + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, + .domain = DOMAIN_KERNEL, + }, ++ [MT_MEMORY_RO] = { ++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | ++ L_PTE_XN | L_PTE_RDONLY, ++ .prot_l1 = PMD_TYPE_TABLE, ++ .prot_sect = PMD_TYPE_SECT, ++ .domain = DOMAIN_KERNEL, ++ }, + [MT_ROM] = { + .prot_sect = PMD_TYPE_SECT, + .domain = DOMAIN_KERNEL, +@@ -490,6 +497,7 @@ static void __init build_mem_type_table(void) + + /* Also setup NX memory mapping */ + mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN; ++ mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_XN; + } + if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { + /* +@@ -569,6 +577,7 @@ static void __init build_mem_type_table(void) + mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; + mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; + mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; ++ mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; + #endif + + /* +@@ -588,6 +597,8 @@ static void __init build_mem_type_table(void) + mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED; ++ mem_types[MT_MEMORY_RO].prot_sect |= PMD_SECT_S; ++ mem_types[MT_MEMORY_RO].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED; + mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= PMD_SECT_S; + mem_types[MT_MEMORY_RWX_NONCACHED].prot_pte |= L_PTE_SHARED; +@@ -648,6 +659,8 @@ static void __init build_mem_type_table(void) + mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot; + mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd; + mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot; ++ mem_types[MT_MEMORY_RO].prot_sect |= ecc_mask | cp->pmd; ++ mem_types[MT_MEMORY_RO].prot_pte |= kern_pgprot; + mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot; + mem_types[MT_MEMORY_RWX_NONCACHED].prot_sect |= ecc_mask; + mem_types[MT_ROM].prot_sect |= cp->pmd; +@@ -1342,7 +1355,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) + map.pfn = __phys_to_pfn(__atags_pointer & SECTION_MASK); + map.virtual = FDT_FIXED_BASE; + map.length = FDT_FIXED_SIZE; +- map.type = MT_ROM; ++ map.type = MT_MEMORY_RO; + create_mapping(&map); + } + +diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c +index fb9f3eb6bf483..8bc7a2d6d6c7f 100644 +--- a/arch/arm/mm/proc-v7-bugs.c ++++ b/arch/arm/mm/proc-v7-bugs.c +@@ -108,8 +108,7 @@ static unsigned int spectre_v2_install_workaround(unsigned int method) + #else + static unsigned int spectre_v2_install_workaround(unsigned int method) + { +- pr_info("CPU%u: Spectre V2: workarounds disabled by configuration\n", +- smp_processor_id()); ++ pr_info_once("Spectre V2: workarounds disabled by configuration\n"); + + return SPECTRE_VULNERABLE; + } +@@ -209,10 +208,10 @@ static int spectre_bhb_install_workaround(int method) + return SPECTRE_VULNERABLE; + + spectre_bhb_method = method; +- } + +- pr_info("CPU%u: Spectre BHB: using %s workaround\n", +- smp_processor_id(), spectre_bhb_method_name(method)); ++ pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n", ++ smp_processor_id(), spectre_bhb_method_name(method)); ++ } + + return SPECTRE_MITIGATED; + } +diff --git a/arch/arm/probes/decode.h b/arch/arm/probes/decode.h +index 9731735989921..facc889d05eee 100644 +--- a/arch/arm/probes/decode.h ++++ b/arch/arm/probes/decode.h +@@ -14,6 +14,7 @@ + #include <linux/types.h> + #include <linux/stddef.h> + #include <asm/probes.h> ++#include <asm/ptrace.h> + #include <asm/kprobes.h> + + void __init arm_probes_decode_init(void); +@@ -35,31 +36,6 @@ void __init find_str_pc_offset(void); + #endif + + +-/* +- * Update ITSTATE after normal execution of an IT block instruction. +- * +- * The 8 IT state bits are split into two parts in CPSR: +- * ITSTATE<1:0> are in CPSR<26:25> +- * ITSTATE<7:2> are in CPSR<15:10> +- */ +-static inline unsigned long it_advance(unsigned long cpsr) +- { +- if ((cpsr & 0x06000400) == 0) { +- /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */ +- cpsr &= ~PSR_IT_MASK; +- } else { +- /* We need to shift left ITSTATE<4:0> */ +- const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */ +- unsigned long it = cpsr & mask; +- it <<= 1; +- it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */ +- it &= mask; +- cpsr &= ~mask; +- cpsr |= it; +- } +- return cpsr; +-} +- + static inline void __kprobes bx_write_pc(long pcv, struct pt_regs *regs) + { + long cpsr = regs->ARM_cpsr; +diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h +index 6d5c6463bc07e..de99a19e72d72 100644 +--- a/arch/sh/include/asm/io.h ++++ b/arch/sh/include/asm/io.h +@@ -271,8 +271,12 @@ static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size, + #endif /* CONFIG_HAVE_IOREMAP_PROT */ + + #else /* CONFIG_MMU */ +-#define iounmap(addr) do { } while (0) +-#define ioremap(offset, size) ((void __iomem *)(unsigned long)(offset)) ++static inline void __iomem *ioremap(phys_addr_t offset, size_t size) ++{ ++ return (void __iomem *)(unsigned long)offset; ++} ++ ++static inline void iounmap(volatile void __iomem *addr) { } + #endif /* CONFIG_MMU */ + + #define ioremap_uc ioremap +diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c +index 05e117137b459..efe13ab366f47 100644 +--- a/arch/x86/kernel/head64.c ++++ b/arch/x86/kernel/head64.c +@@ -419,6 +419,8 @@ static void __init clear_bss(void) + { + memset(__bss_start, 0, + (unsigned long) __bss_stop - (unsigned long) __bss_start); ++ memset(__brk_base, 0, ++ (unsigned long) __brk_limit - (unsigned long) __brk_base); + } + + static unsigned long get_cmd_line_ptr(void) +diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c +index 7dfb1e8089284..bd218470d1459 100644 +--- a/arch/x86/kernel/ima_arch.c ++++ b/arch/x86/kernel/ima_arch.c +@@ -88,6 +88,8 @@ const char * const *arch_get_ima_policy(void) + if (IS_ENABLED(CONFIG_IMA_ARCH_POLICY) && arch_ima_get_secureboot()) { + if (IS_ENABLED(CONFIG_MODULE_SIG)) + set_module_sig_enforced(); ++ if (IS_ENABLED(CONFIG_KEXEC_SIG)) ++ set_kexec_sig_enforced(); + return sb_arch_rules; + } + return NULL; +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index da547752580a3..c71f702c037de 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -8142,15 +8142,17 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, + */ + static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) + { +- struct kvm_lapic_irq lapic_irq; +- +- lapic_irq.shorthand = APIC_DEST_NOSHORT; +- lapic_irq.dest_mode = APIC_DEST_PHYSICAL; +- lapic_irq.level = 0; +- lapic_irq.dest_id = apicid; +- lapic_irq.msi_redir_hint = false; ++ /* ++ * All other fields are unused for APIC_DM_REMRD, but may be consumed by ++ * common code, e.g. for tracing. Defer initialization to the compiler. ++ */ ++ struct kvm_lapic_irq lapic_irq = { ++ .delivery_mode = APIC_DM_REMRD, ++ .dest_mode = APIC_DEST_PHYSICAL, ++ .shorthand = APIC_DEST_NOSHORT, ++ .dest_id = apicid, ++ }; + +- lapic_irq.delivery_mode = APIC_DM_REMRD; + kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); + } + +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index c7a47603537f2..63d8c6c7d1254 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -78,10 +78,20 @@ static uint8_t __pte2cachemode_tbl[8] = { + [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC, + }; + +-/* Check that the write-protect PAT entry is set for write-protect */ ++/* ++ * Check that the write-protect PAT entry is set for write-protect. ++ * To do this without making assumptions how PAT has been set up (Xen has ++ * another layout than the kernel), translate the _PAGE_CACHE_MODE_WP cache ++ * mode via the __cachemode2pte_tbl[] into protection bits (those protection ++ * bits will select a cache mode of WP or better), and then translate the ++ * protection bits back into the cache mode using __pte2cm_idx() and the ++ * __pte2cachemode_tbl[] array. This will return the really used cache mode. ++ */ + bool x86_has_pat_wp(void) + { +- return __pte2cachemode_tbl[_PAGE_CACHE_MODE_WP] == _PAGE_CACHE_MODE_WP; ++ uint16_t prot = __cachemode2pte_tbl[_PAGE_CACHE_MODE_WP]; ++ ++ return __pte2cachemode_tbl[__pte2cm_idx(prot)] == _PAGE_CACHE_MODE_WP; + } + + enum page_cache_mode pgprot2cachemode(pgprot_t pgprot) +diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c +index 73621bc119768..3704476bb83a0 100644 +--- a/drivers/cpufreq/pmac32-cpufreq.c ++++ b/drivers/cpufreq/pmac32-cpufreq.c +@@ -471,6 +471,10 @@ static int pmac_cpufreq_init_MacRISC3(struct device_node *cpunode) + if (slew_done_gpio_np) + slew_done_gpio = read_gpio(slew_done_gpio_np); + ++ of_node_put(volt_gpio_np); ++ of_node_put(freq_gpio_np); ++ of_node_put(slew_done_gpio_np); ++ + /* If we use the frequency GPIOs, calculate the min/max speeds based + * on the bus frequencies + */ +diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c +index ecaa538b2d357..ef78781934919 100644 +--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c ++++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c +@@ -790,6 +790,7 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo + ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort); + if (ret) { ++ drm_dp_mst_put_port_malloc(port); + intel_connector_free(intel_connector); + return NULL; + } +diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c +index 6615eb5147e23..a33887f2464fa 100644 +--- a/drivers/gpu/drm/i915/gt/intel_gt.c ++++ b/drivers/gpu/drm/i915/gt/intel_gt.c +@@ -736,6 +736,20 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt) + mutex_lock(>->tlb_invalidate_lock); + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + ++ spin_lock_irq(&uncore->lock); /* serialise invalidate with GT reset */ ++ ++ for_each_engine(engine, gt, id) { ++ struct reg_and_bit rb; ++ ++ rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num); ++ if (!i915_mmio_reg_offset(rb.reg)) ++ continue; ++ ++ intel_uncore_write_fw(uncore, rb.reg, rb.bit); ++ } ++ ++ spin_unlock_irq(&uncore->lock); ++ + for_each_engine(engine, gt, id) { + /* + * HW architecture suggest typical invalidation time at 40us, +@@ -750,7 +764,6 @@ void intel_gt_invalidate_tlbs(struct intel_gt *gt) + if (!i915_mmio_reg_offset(rb.reg)) + continue; + +- intel_uncore_write_fw(uncore, rb.reg, rb.bit); + if (__intel_wait_for_register_fw(uncore, + rb.reg, rb.bit, 0, + timeout_us, timeout_ms, +diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c +index 95d41c01d0e04..35d55f98a06f5 100644 +--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c ++++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c +@@ -4788,8 +4788,8 @@ static int live_lrc_layout(void *arg) + continue; + + hw = shmem_pin_map(engine->default_state); +- if (IS_ERR(hw)) { +- err = PTR_ERR(hw); ++ if (!hw) { ++ err = -ENOMEM; + break; + } + hw += LRC_STATE_OFFSET / sizeof(*hw); +@@ -4965,8 +4965,8 @@ static int live_lrc_fixed(void *arg) + continue; + + hw = shmem_pin_map(engine->default_state); +- if (IS_ERR(hw)) { +- err = PTR_ERR(hw); ++ if (!hw) { ++ err = -ENOMEM; + break; + } + hw += LRC_STATE_OFFSET / sizeof(*hw); +diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c +index a70261809cdd2..1dfc457bbefc8 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_drv.c ++++ b/drivers/gpu/drm/panfrost/panfrost_drv.c +@@ -427,8 +427,8 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, + + if (args->retained) { + if (args->madv == PANFROST_MADV_DONTNEED) +- list_add_tail(&bo->base.madv_list, +- &pfdev->shrinker_list); ++ list_move_tail(&bo->base.madv_list, ++ &pfdev->shrinker_list); + else if (args->madv == PANFROST_MADV_WILLNEED) + list_del_init(&bo->base.madv_list); + } +diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c +index 7fc45b13a52c2..13596961ae17f 100644 +--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c ++++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c +@@ -491,7 +491,7 @@ err_map: + err_pages: + drm_gem_shmem_put_pages(&bo->base); + err_bo: +- drm_gem_object_put(&bo->base.base); ++ panfrost_gem_mapping_put(bomapping); + return ret; + } + +diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c +index 03d2366118dd4..d5f1fabc45d79 100644 +--- a/drivers/irqchip/irq-or1k-pic.c ++++ b/drivers/irqchip/irq-or1k-pic.c +@@ -66,7 +66,6 @@ static struct or1k_pic_dev or1k_pic_level = { + .name = "or1k-PIC-level", + .irq_unmask = or1k_pic_unmask, + .irq_mask = or1k_pic_mask, +- .irq_mask_ack = or1k_pic_mask_ack, + }, + .handle = handle_level_irq, + .flags = IRQ_LEVEL | IRQ_NOPROBE, +diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c +index 1c42417810fcd..1a3fba352cadb 100644 +--- a/drivers/net/can/xilinx_can.c ++++ b/drivers/net/can/xilinx_can.c +@@ -259,7 +259,7 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { + .tseg2_min = 1, + .tseg2_max = 128, + .sjw_max = 128, +- .brp_min = 2, ++ .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, + }; +@@ -272,7 +272,7 @@ static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { + .tseg2_min = 1, + .tseg2_max = 16, + .sjw_max = 16, +- .brp_min = 2, ++ .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, + }; +diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +index fc5ea434a27c9..a0ce213c473bc 100644 +--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c ++++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +@@ -385,7 +385,7 @@ static void aq_pci_shutdown(struct pci_dev *pdev) + } + } + +-static int aq_suspend_common(struct device *dev, bool deep) ++static int aq_suspend_common(struct device *dev) + { + struct aq_nic_s *nic = pci_get_drvdata(to_pci_dev(dev)); + +@@ -398,17 +398,15 @@ static int aq_suspend_common(struct device *dev, bool deep) + if (netif_running(nic->ndev)) + aq_nic_stop(nic); + +- if (deep) { +- aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); +- aq_nic_set_power(nic); +- } ++ aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); ++ aq_nic_set_power(nic); + + rtnl_unlock(); + + return 0; + } + +-static int atl_resume_common(struct device *dev, bool deep) ++static int atl_resume_common(struct device *dev) + { + struct pci_dev *pdev = to_pci_dev(dev); + struct aq_nic_s *nic; +@@ -421,11 +419,6 @@ static int atl_resume_common(struct device *dev, bool deep) + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + +- if (deep) { +- /* Reinitialize Nic/Vecs objects */ +- aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); +- } +- + if (netif_running(nic->ndev)) { + ret = aq_nic_init(nic); + if (ret) +@@ -450,22 +443,22 @@ err_exit: + + static int aq_pm_freeze(struct device *dev) + { +- return aq_suspend_common(dev, true); ++ return aq_suspend_common(dev); + } + + static int aq_pm_suspend_poweroff(struct device *dev) + { +- return aq_suspend_common(dev, true); ++ return aq_suspend_common(dev); + } + + static int aq_pm_thaw(struct device *dev) + { +- return atl_resume_common(dev, true); ++ return atl_resume_common(dev); + } + + static int aq_pm_resume_restore(struct device *dev) + { +- return atl_resume_common(dev, true); ++ return atl_resume_common(dev); + } + + static const struct dev_pm_ops aq_pm_ops = { +diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c +index eea4bd3116e8d..969af4dd64055 100644 +--- a/drivers/net/ethernet/faraday/ftgmac100.c ++++ b/drivers/net/ethernet/faraday/ftgmac100.c +@@ -1747,6 +1747,19 @@ cleanup_clk: + return rc; + } + ++static bool ftgmac100_has_child_node(struct device_node *np, const char *name) ++{ ++ struct device_node *child_np = of_get_child_by_name(np, name); ++ bool ret = false; ++ ++ if (child_np) { ++ ret = true; ++ of_node_put(child_np); ++ } ++ ++ return ret; ++} ++ + static int ftgmac100_probe(struct platform_device *pdev) + { + struct resource *res; +@@ -1860,7 +1873,7 @@ static int ftgmac100_probe(struct platform_device *pdev) + + /* Display what we found */ + phy_attached_info(phy); +- } else if (np && !of_get_child_by_name(np, "mdio")) { ++ } else if (np && !ftgmac100_has_child_node(np, "mdio")) { + /* Support legacy ASPEED devicetree descriptions that decribe a + * MAC with an embedded MDIO controller but have no "mdio" + * child node. Automatically scan the MDIO bus for available +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +index d06532d0baa43..634777fd7db9b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +@@ -231,8 +231,7 @@ mlx5e_set_ktls_rx_priv_ctx(struct tls_context *tls_ctx, + struct mlx5e_ktls_offload_context_rx **ctx = + __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_RX); + +- BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_rx *) > +- TLS_OFFLOAD_CONTEXT_SIZE_RX); ++ BUILD_BUG_ON(sizeof(priv_rx) > TLS_DRIVER_STATE_SIZE_RX); + + *ctx = priv_rx; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +index b140e13fdcc88..679747db3110c 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +@@ -63,8 +63,7 @@ mlx5e_set_ktls_tx_priv_ctx(struct tls_context *tls_ctx, + struct mlx5e_ktls_offload_context_tx **ctx = + __tls_driver_ctx(tls_ctx, TLS_OFFLOAD_CTX_DIR_TX); + +- BUILD_BUG_ON(sizeof(struct mlx5e_ktls_offload_context_tx *) > +- TLS_OFFLOAD_CONTEXT_SIZE_TX); ++ BUILD_BUG_ON(sizeof(priv_tx) > TLS_DRIVER_STATE_SIZE_TX); + + *ctx = priv_tx; + } +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +index 78f6a6f0a7e0a..ff4f10d0f090b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +@@ -536,7 +536,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env) + u32 in[MLX5_ST_SZ_DW(query_vnic_env_in)] = {}; + struct mlx5_core_dev *mdev = priv->mdev; + +- if (!MLX5_CAP_GEN(priv->mdev, nic_receive_steering_discard)) ++ if (!mlx5e_stats_grp_vnic_env_num_stats(priv)) + return; + + MLX5_SET(query_vnic_env_in, in, opcode, MLX5_CMD_OP_QUERY_VNIC_ENV); +diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c +index fa1a872c4bc83..5b7413305be63 100644 +--- a/drivers/net/ethernet/sfc/ef10.c ++++ b/drivers/net/ethernet/sfc/ef10.c +@@ -1916,7 +1916,10 @@ static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx) + + efx_update_sw_stats(efx, stats); + out: ++ /* releasing a DMA coherent buffer with BH disabled can panic */ ++ spin_unlock_bh(&efx->stats_lock); + efx_nic_free_buffer(efx, &stats_buf); ++ spin_lock_bh(&efx->stats_lock); + return rc; + } + +diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c +index 84041cd587d78..b44acb6e3953f 100644 +--- a/drivers/net/ethernet/sfc/ef10_sriov.c ++++ b/drivers/net/ethernet/sfc/ef10_sriov.c +@@ -411,8 +411,9 @@ fail1: + static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) + { + struct pci_dev *dev = efx->pci_dev; ++ struct efx_ef10_nic_data *nic_data = efx->nic_data; + unsigned int vfs_assigned = pci_vfs_assigned(dev); +- int rc = 0; ++ int i, rc = 0; + + if (vfs_assigned && !force) { + netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " +@@ -420,10 +421,13 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) + return -EBUSY; + } + +- if (!vfs_assigned) ++ if (!vfs_assigned) { ++ for (i = 0; i < efx->vf_count; i++) ++ nic_data->vf[i].pci_dev = NULL; + pci_disable_sriov(dev); +- else ++ } else { + rc = -EBUSY; ++ } + + efx_ef10_sriov_free_vf_vswitching(efx); + efx->vf_count = 0; +diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +index 2342d497348ea..fd1b0cc6b5faf 100644 +--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c ++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +@@ -363,6 +363,7 @@ bypass_clk_reset_gpio: + data->fix_mac_speed = tegra_eqos_fix_speed; + data->init = tegra_eqos_init; + data->bsp_priv = eqos; ++ data->sph_disable = 1; + + err = tegra_eqos_init(pdev, eqos); + if (err < 0) +diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c +index 96068e0d841ae..dcbe278086dca 100644 +--- a/drivers/net/phy/sfp.c ++++ b/drivers/net/phy/sfp.c +@@ -2427,7 +2427,7 @@ static int sfp_probe(struct platform_device *pdev) + + platform_set_drvdata(pdev, sfp); + +- err = devm_add_action(sfp->dev, sfp_cleanup, sfp); ++ err = devm_add_action_or_reset(sfp->dev, sfp_cleanup, sfp); + if (err < 0) + return err; + +diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c +index dbac4c03d21a1..a0335407be423 100644 +--- a/drivers/net/xen-netback/rx.c ++++ b/drivers/net/xen-netback/rx.c +@@ -495,6 +495,7 @@ void xenvif_rx_action(struct xenvif_queue *queue) + queue->rx_copy.completed = &completed_skbs; + + while (xenvif_rx_ring_slots_available(queue) && ++ !skb_queue_empty(&queue->rx_queue) && + work_done < RX_BATCH_SIZE) { + xenvif_rx_skb(queue); + work_done++; +diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c +index 3943a30053b3b..f426dcdfcdd6a 100644 +--- a/drivers/nfc/nxp-nci/i2c.c ++++ b/drivers/nfc/nxp-nci/i2c.c +@@ -122,7 +122,9 @@ static int nxp_nci_i2c_fw_read(struct nxp_nci_i2c_phy *phy, + skb_put_data(*skb, &header, NXP_NCI_FW_HDR_LEN); + + r = i2c_master_recv(client, skb_put(*skb, frame_len), frame_len); +- if (r != frame_len) { ++ if (r < 0) { ++ goto fw_read_exit_free_skb; ++ } else if (r != frame_len) { + nfc_err(&client->dev, + "Invalid frame length: %u (expected %zu)\n", + r, frame_len); +@@ -166,7 +168,9 @@ static int nxp_nci_i2c_nci_read(struct nxp_nci_i2c_phy *phy, + return 0; + + r = i2c_master_recv(client, skb_put(*skb, header.plen), header.plen); +- if (r != header.plen) { ++ if (r < 0) { ++ goto nci_read_exit_free_skb; ++ } else if (r != header.plen) { + nfc_err(&client->dev, + "Invalid frame payload length: %u (expected %u)\n", + r, header.plen); +diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c +index af2902d70b196..ab060b4911ffd 100644 +--- a/drivers/nvme/host/core.c ++++ b/drivers/nvme/host/core.c +@@ -4460,6 +4460,8 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl) + nvme_stop_keep_alive(ctrl); + flush_work(&ctrl->async_event_work); + cancel_work_sync(&ctrl->fw_act_work); ++ if (ctrl->ops->stop_ctrl) ++ ctrl->ops->stop_ctrl(ctrl); + } + EXPORT_SYMBOL_GPL(nvme_stop_ctrl); + +diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h +index 8e40a6306e53d..58cf9e39d613e 100644 +--- a/drivers/nvme/host/nvme.h ++++ b/drivers/nvme/host/nvme.h +@@ -478,6 +478,7 @@ struct nvme_ctrl_ops { + void (*free_ctrl)(struct nvme_ctrl *ctrl); + void (*submit_async_event)(struct nvme_ctrl *ctrl); + void (*delete_ctrl)(struct nvme_ctrl *ctrl); ++ void (*stop_ctrl)(struct nvme_ctrl *ctrl); + int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); + }; + +diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c +index 3622c5c9515fa..ce129655ef0a3 100644 +--- a/drivers/nvme/host/pci.c ++++ b/drivers/nvme/host/pci.c +@@ -3234,7 +3234,8 @@ static const struct pci_device_id nvme_id_table[] = { + NVME_QUIRK_DISABLE_WRITE_ZEROES| + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ +- .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, ++ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN | ++ NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | + NVME_QUIRK_IGNORE_DEV_SUBNQN, }, +diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c +index 8eacc9bd58f5a..b61924394032a 100644 +--- a/drivers/nvme/host/rdma.c ++++ b/drivers/nvme/host/rdma.c +@@ -1057,6 +1057,14 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, + } + } + ++static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl) ++{ ++ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); ++ ++ cancel_work_sync(&ctrl->err_work); ++ cancel_delayed_work_sync(&ctrl->reconnect_work); ++} ++ + static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) + { + struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); +@@ -2236,9 +2244,6 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = { + + static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) + { +- cancel_work_sync(&ctrl->err_work); +- cancel_delayed_work_sync(&ctrl->reconnect_work); +- + nvme_rdma_teardown_io_queues(ctrl, shutdown); + blk_mq_quiesce_queue(ctrl->ctrl.admin_q); + if (shutdown) +@@ -2288,6 +2293,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = { + .submit_async_event = nvme_rdma_submit_async_event, + .delete_ctrl = nvme_rdma_delete_ctrl, + .get_address = nvmf_get_address, ++ .stop_ctrl = nvme_rdma_stop_ctrl, + }; + + /* +diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c +index 7e39320337072..fe8c27bbc3f20 100644 +--- a/drivers/nvme/host/tcp.c ++++ b/drivers/nvme/host/tcp.c +@@ -1149,8 +1149,7 @@ done: + } else if (ret < 0) { + dev_err(queue->ctrl->ctrl.device, + "failed to send request %d\n", ret); +- if (ret != -EPIPE && ret != -ECONNRESET) +- nvme_tcp_fail_request(queue->request); ++ nvme_tcp_fail_request(queue->request); + nvme_tcp_done_send_req(queue); + } + return ret; +@@ -2136,9 +2135,6 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work) + + static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown) + { +- cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); +- cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); +- + nvme_tcp_teardown_io_queues(ctrl, shutdown); + blk_mq_quiesce_queue(ctrl->admin_q); + if (shutdown) +@@ -2178,6 +2174,12 @@ out_fail: + nvme_tcp_reconnect_or_remove(ctrl); + } + ++static void nvme_tcp_stop_ctrl(struct nvme_ctrl *ctrl) ++{ ++ cancel_work_sync(&to_tcp_ctrl(ctrl)->err_work); ++ cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work); ++} ++ + static void nvme_tcp_free_ctrl(struct nvme_ctrl *nctrl) + { + struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); +@@ -2500,6 +2502,7 @@ static const struct nvme_ctrl_ops nvme_tcp_ctrl_ops = { + .submit_async_event = nvme_tcp_submit_async_event, + .delete_ctrl = nvme_tcp_delete_ctrl, + .get_address = nvmf_get_address, ++ .stop_ctrl = nvme_tcp_stop_ctrl, + }; + + static bool +diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c +index 9c65d560d48f7..e792318c38946 100644 +--- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c ++++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c +@@ -235,11 +235,11 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, + const struct aspeed_sig_expr **funcs; + const struct aspeed_sig_expr ***prios; + +- pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name); +- + if (!pdesc) + return -EINVAL; + ++ pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name); ++ + prios = pdesc->prios; + + if (!prios) +diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c +index e94e59283ecb9..012639f6d3354 100644 +--- a/drivers/platform/x86/hp-wmi.c ++++ b/drivers/platform/x86/hp-wmi.c +@@ -62,6 +62,7 @@ enum hp_wmi_event_ids { + HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D, + HPWMI_PEAKSHIFT_PERIOD = 0x0F, + HPWMI_BATTERY_CHARGE_PERIOD = 0x10, ++ HPWMI_SANITIZATION_MODE = 0x17, + }; + + struct bios_args { +@@ -629,6 +630,8 @@ static void hp_wmi_notify(u32 value, void *context) + break; + case HPWMI_BATTERY_CHARGE_PERIOD: + break; ++ case HPWMI_SANITIZATION_MODE: ++ break; + default: + pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data); + break; +diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +index cd41dc061d874..dfe7e6370d84f 100644 +--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c ++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +@@ -2738,6 +2738,7 @@ static int slave_configure_v3_hw(struct scsi_device *sdev) + struct hisi_hba *hisi_hba = shost_priv(shost); + struct device *dev = hisi_hba->dev; + int ret = sas_slave_configure(sdev); ++ unsigned int max_sectors; + + if (ret) + return ret; +@@ -2755,6 +2756,12 @@ static int slave_configure_v3_hw(struct scsi_device *sdev) + } + } + ++ /* Set according to IOMMU IOVA caching limit */ ++ max_sectors = min_t(size_t, queue_max_hw_sectors(sdev->request_queue), ++ (PAGE_SIZE * 32) >> SECTOR_SHIFT); ++ ++ blk_queue_max_hw_sectors(sdev->request_queue, max_sectors); ++ + return 0; + } + +diff --git a/drivers/soc/ixp4xx/ixp4xx-npe.c b/drivers/soc/ixp4xx/ixp4xx-npe.c +index 6065aaab67403..8482a4892b83b 100644 +--- a/drivers/soc/ixp4xx/ixp4xx-npe.c ++++ b/drivers/soc/ixp4xx/ixp4xx-npe.c +@@ -735,7 +735,7 @@ static const struct of_device_id ixp4xx_npe_of_match[] = { + static struct platform_driver ixp4xx_npe_driver = { + .driver = { + .name = "ixp4xx-npe", +- .of_match_table = of_match_ptr(ixp4xx_npe_of_match), ++ .of_match_table = ixp4xx_npe_of_match, + }, + .probe = ixp4xx_npe_probe, + .remove = ixp4xx_npe_remove, +diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c +index 7f629544060db..a027cfd49df8a 100644 +--- a/drivers/spi/spi-amd.c ++++ b/drivers/spi/spi-amd.c +@@ -28,6 +28,7 @@ + #define AMD_SPI_RX_COUNT_REG 0x4B + #define AMD_SPI_STATUS_REG 0x4C + ++#define AMD_SPI_FIFO_SIZE 70 + #define AMD_SPI_MEM_SIZE 200 + + /* M_CMD OP codes for SPI */ +@@ -245,6 +246,11 @@ static int amd_spi_master_transfer(struct spi_master *master, + return 0; + } + ++static size_t amd_spi_max_transfer_size(struct spi_device *spi) ++{ ++ return AMD_SPI_FIFO_SIZE; ++} ++ + static int amd_spi_probe(struct platform_device *pdev) + { + struct device *dev = &pdev->dev; +@@ -278,6 +284,8 @@ static int amd_spi_probe(struct platform_device *pdev) + master->flags = SPI_MASTER_HALF_DUPLEX; + master->setup = amd_spi_master_setup; + master->transfer_one_message = amd_spi_master_transfer; ++ master->max_transfer_size = amd_spi_max_transfer_size; ++ master->max_message_size = amd_spi_max_transfer_size; + + /* Register the controller with SPI framework */ + err = devm_spi_register_master(dev, master); +diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c +index cae61d1ebec5a..98ce484f1089d 100644 +--- a/drivers/tty/serial/8250/8250_core.c ++++ b/drivers/tty/serial/8250/8250_core.c +@@ -23,6 +23,7 @@ + #include <linux/sysrq.h> + #include <linux/delay.h> + #include <linux/platform_device.h> ++#include <linux/pm_runtime.h> + #include <linux/tty.h> + #include <linux/ratelimit.h> + #include <linux/tty_flip.h> +@@ -571,6 +572,9 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev) + + up->port.dev = dev; + ++ if (uart_console_enabled(&up->port)) ++ pm_runtime_get_sync(up->port.dev); ++ + serial8250_apply_quirks(up); + uart_add_one_port(drv, &up->port); + } +diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c +index 9cf5177815a87..43884e8b51610 100644 +--- a/drivers/tty/serial/8250/8250_port.c ++++ b/drivers/tty/serial/8250/8250_port.c +@@ -2953,8 +2953,10 @@ static int serial8250_request_std_resource(struct uart_8250_port *up) + case UPIO_MEM32BE: + case UPIO_MEM16: + case UPIO_MEM: +- if (!port->mapbase) ++ if (!port->mapbase) { ++ ret = -EINVAL; + break; ++ } + + if (!request_mem_region(port->mapbase, size, "serial")) { + ret = -EBUSY; +diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c +index 07b19e97f850d..9900ee3f90683 100644 +--- a/drivers/tty/serial/amba-pl011.c ++++ b/drivers/tty/serial/amba-pl011.c +@@ -1326,6 +1326,15 @@ static void pl011_stop_rx(struct uart_port *port) + pl011_dma_rx_stop(uap); + } + ++static void pl011_throttle_rx(struct uart_port *port) ++{ ++ unsigned long flags; ++ ++ spin_lock_irqsave(&port->lock, flags); ++ pl011_stop_rx(port); ++ spin_unlock_irqrestore(&port->lock, flags); ++} ++ + static void pl011_enable_ms(struct uart_port *port) + { + struct uart_amba_port *uap = +@@ -1717,9 +1726,10 @@ static int pl011_allocate_irq(struct uart_amba_port *uap) + */ + static void pl011_enable_interrupts(struct uart_amba_port *uap) + { ++ unsigned long flags; + unsigned int i; + +- spin_lock_irq(&uap->port.lock); ++ spin_lock_irqsave(&uap->port.lock, flags); + + /* Clear out any spuriously appearing RX interrupts */ + pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR); +@@ -1741,7 +1751,14 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap) + if (!pl011_dma_rx_running(uap)) + uap->im |= UART011_RXIM; + pl011_write(uap->im, uap, REG_IMSC); +- spin_unlock_irq(&uap->port.lock); ++ spin_unlock_irqrestore(&uap->port.lock, flags); ++} ++ ++static void pl011_unthrottle_rx(struct uart_port *port) ++{ ++ struct uart_amba_port *uap = container_of(port, struct uart_amba_port, port); ++ ++ pl011_enable_interrupts(uap); + } + + static int pl011_startup(struct uart_port *port) +@@ -2116,6 +2133,8 @@ static const struct uart_ops amba_pl011_pops = { + .stop_tx = pl011_stop_tx, + .start_tx = pl011_start_tx, + .stop_rx = pl011_stop_rx, ++ .throttle = pl011_throttle_rx, ++ .unthrottle = pl011_unthrottle_rx, + .enable_ms = pl011_enable_ms, + .break_ctl = pl011_break_ctl, + .startup = pl011_startup, +diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c +index 81faead3c4f80..263c33260d8a8 100644 +--- a/drivers/tty/serial/samsung_tty.c ++++ b/drivers/tty/serial/samsung_tty.c +@@ -361,8 +361,7 @@ static void enable_tx_dma(struct s3c24xx_uart_port *ourport) + /* Enable tx dma mode */ + ucon = rd_regl(port, S3C2410_UCON); + ucon &= ~(S3C64XX_UCON_TXBURST_MASK | S3C64XX_UCON_TXMODE_MASK); +- ucon |= (dma_get_cache_alignment() >= 16) ? +- S3C64XX_UCON_TXBURST_16 : S3C64XX_UCON_TXBURST_1; ++ ucon |= S3C64XX_UCON_TXBURST_1; + ucon |= S3C64XX_UCON_TXMODE_DMA; + wr_regl(port, S3C2410_UCON, ucon); + +@@ -634,7 +633,7 @@ static void enable_rx_dma(struct s3c24xx_uart_port *ourport) + S3C64XX_UCON_DMASUS_EN | + S3C64XX_UCON_TIMEOUT_EN | + S3C64XX_UCON_RXMODE_MASK); +- ucon |= S3C64XX_UCON_RXBURST_16 | ++ ucon |= S3C64XX_UCON_RXBURST_1 | + 0xf << S3C64XX_UCON_TIMEOUT_SHIFT | + S3C64XX_UCON_EMPTYINT_EN | + S3C64XX_UCON_TIMEOUT_EN | +diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c +index 32d09d024f6c9..b578f7090b637 100644 +--- a/drivers/tty/serial/serial_core.c ++++ b/drivers/tty/serial/serial_core.c +@@ -1941,11 +1941,6 @@ static int uart_proc_show(struct seq_file *m, void *v) + } + #endif + +-static inline bool uart_console_enabled(struct uart_port *port) +-{ +- return uart_console(port) && (port->cons->flags & CON_ENABLED); +-} +- + static void uart_port_spin_lock_init(struct uart_port *port) + { + spin_lock_init(&port->lock); +diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c +index 8cd9e5b077b64..9377da1e97c08 100644 +--- a/drivers/tty/serial/stm32-usart.c ++++ b/drivers/tty/serial/stm32-usart.c +@@ -70,6 +70,8 @@ static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE, + *cr3 |= USART_CR3_DEM; + over8 = *cr1 & USART_CR1_OVER8; + ++ *cr1 &= ~(USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK); ++ + if (over8) + rs485_deat_dedt = delay_ADE * baud * 8; + else +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index 0a6336d54a650..f043fd7e0f924 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -855,7 +855,7 @@ static void delete_char(struct vc_data *vc, unsigned int nr) + unsigned short *p = (unsigned short *) vc->vc_pos; + + vc_uniscr_delete(vc, nr); +- scr_memcpyw(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2); ++ scr_memmovew(p, p + nr, (vc->vc_cols - vc->state.x - nr) * 2); + scr_memsetw(p + vc->vc_cols - vc->state.x - nr, vc->vc_video_erase_char, + nr * 2); + vc->vc_need_wrap = 0; +diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c +index 05fe6ded66a52..94e9d336855bc 100644 +--- a/drivers/usb/dwc3/gadget.c ++++ b/drivers/usb/dwc3/gadget.c +@@ -3781,7 +3781,6 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) + } + + evt->count = 0; +- evt->flags &= ~DWC3_EVENT_PENDING; + ret = IRQ_HANDLED; + + /* Unmask interrupt */ +@@ -3794,6 +3793,9 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt) + dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval); + } + ++ /* Keep the clearing of DWC3_EVENT_PENDING at the end */ ++ evt->flags &= ~DWC3_EVENT_PENDING; ++ + return ret; + } + +diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c +index b74621dc2a658..8f980fc6efc19 100644 +--- a/drivers/usb/serial/ftdi_sio.c ++++ b/drivers/usb/serial/ftdi_sio.c +@@ -1023,6 +1023,9 @@ static const struct usb_device_id id_table_combined[] = { + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_DISPLAY_PID) }, + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_LITE_PID) }, + { USB_DEVICE(FTDI_VID, CHETCO_SEASMART_ANALOG_PID) }, ++ /* Belimo Automation devices */ ++ { USB_DEVICE(FTDI_VID, BELIMO_ZTH_PID) }, ++ { USB_DEVICE(FTDI_VID, BELIMO_ZIP_PID) }, + /* ICP DAS I-756xU devices */ + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7560U_PID) }, + { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, +diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h +index d1a9564697a4b..4e92c165c86bf 100644 +--- a/drivers/usb/serial/ftdi_sio_ids.h ++++ b/drivers/usb/serial/ftdi_sio_ids.h +@@ -1568,6 +1568,12 @@ + #define CHETCO_SEASMART_LITE_PID 0xA5AE /* SeaSmart Lite USB Adapter */ + #define CHETCO_SEASMART_ANALOG_PID 0xA5AF /* SeaSmart Analog Adapter */ + ++/* ++ * Belimo Automation ++ */ ++#define BELIMO_ZTH_PID 0x8050 ++#define BELIMO_ZIP_PID 0xC811 ++ + /* + * Unjo AB + */ +diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c +index c7d44daa05c4a..9d3a35b2046d3 100644 +--- a/drivers/usb/typec/class.c ++++ b/drivers/usb/typec/class.c +@@ -1444,6 +1444,7 @@ void typec_set_pwr_opmode(struct typec_port *port, + partner->usb_pd = 1; + sysfs_notify(&partner_dev->kobj, NULL, + "supports_usb_power_delivery"); ++ kobject_uevent(&partner_dev->kobj, KOBJ_CHANGE); + } + put_device(partner_dev); + } +diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c +index 5c970e6f664c8..e8ef0c66e558f 100644 +--- a/drivers/virtio/virtio_mmio.c ++++ b/drivers/virtio/virtio_mmio.c +@@ -62,6 +62,7 @@ + #include <linux/list.h> + #include <linux/module.h> + #include <linux/platform_device.h> ++#include <linux/pm.h> + #include <linux/slab.h> + #include <linux/spinlock.h> + #include <linux/virtio.h> +@@ -543,6 +544,28 @@ static const struct virtio_config_ops virtio_mmio_config_ops = { + .get_shm_region = vm_get_shm_region, + }; + ++#ifdef CONFIG_PM_SLEEP ++static int virtio_mmio_freeze(struct device *dev) ++{ ++ struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev); ++ ++ return virtio_device_freeze(&vm_dev->vdev); ++} ++ ++static int virtio_mmio_restore(struct device *dev) ++{ ++ struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev); ++ ++ if (vm_dev->version == 1) ++ writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); ++ ++ return virtio_device_restore(&vm_dev->vdev); ++} ++ ++static const struct dev_pm_ops virtio_mmio_pm_ops = { ++ SET_SYSTEM_SLEEP_PM_OPS(virtio_mmio_freeze, virtio_mmio_restore) ++}; ++#endif + + static void virtio_mmio_release_dev(struct device *_d) + { +@@ -787,6 +810,9 @@ static struct platform_driver virtio_mmio_driver = { + .name = "virtio-mmio", + .of_match_table = virtio_mmio_match, + .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match), ++#ifdef CONFIG_PM_SLEEP ++ .pm = &virtio_mmio_pm_ops, ++#endif + }, + }; + +diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c +index 4a5248097d7aa..779b7745cdc48 100644 +--- a/fs/btrfs/inode.c ++++ b/fs/btrfs/inode.c +@@ -7480,7 +7480,19 @@ static int btrfs_dio_iomap_begin(struct inode *inode, loff_t start, + if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || + em->block_start == EXTENT_MAP_INLINE) { + free_extent_map(em); +- ret = -ENOTBLK; ++ /* ++ * If we are in a NOWAIT context, return -EAGAIN in order to ++ * fallback to buffered IO. This is not only because we can ++ * block with buffered IO (no support for NOWAIT semantics at ++ * the moment) but also to avoid returning short reads to user ++ * space - this happens if we were able to read some data from ++ * previous non-compressed extents and then when we fallback to ++ * buffered IO, at btrfs_file_read_iter() by calling ++ * filemap_read(), we fail to fault in pages for the read buffer, ++ * in which case filemap_read() returns a short read (the number ++ * of bytes previously read is > 0, so it does not return -EFAULT). ++ */ ++ ret = (flags & IOMAP_NOWAIT) ? -EAGAIN : -ENOTBLK; + goto unlock_err; + } + +diff --git a/fs/exec.c b/fs/exec.c +index bcd86f2d176c3..d37a82206fa31 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -1286,7 +1286,7 @@ int begin_new_exec(struct linux_binprm * bprm) + bprm->mm = NULL; + + #ifdef CONFIG_POSIX_TIMERS +- exit_itimers(me->signal); ++ exit_itimers(me); + flush_itimer_signals(); + #endif + +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index 6641b74ad4620..0f49bf547b848 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -4691,16 +4691,17 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) + return -EOPNOTSUPP; + + ext4_fc_start_update(inode); ++ inode_lock(inode); ++ ret = ext4_convert_inline_data(inode); ++ inode_unlock(inode); ++ if (ret) ++ goto exit; + + if (mode & FALLOC_FL_PUNCH_HOLE) { + ret = ext4_punch_hole(file, offset, len); + goto exit; + } + +- ret = ext4_convert_inline_data(inode); +- if (ret) +- goto exit; +- + if (mode & FALLOC_FL_COLLAPSE_RANGE) { + ret = ext4_collapse_range(file, offset, len); + goto exit; +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 72e3f55f1e07a..bd0d0a10ca429 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -4042,15 +4042,6 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) + + trace_ext4_punch_hole(inode, offset, length, 0); + +- ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA); +- if (ext4_has_inline_data(inode)) { +- down_write(&EXT4_I(inode)->i_mmap_sem); +- ret = ext4_convert_inline_data(inode); +- up_write(&EXT4_I(inode)->i_mmap_sem); +- if (ret) +- return ret; +- } +- + /* + * Write out all dirty pages to avoid race conditions + * Then release them. +diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h +index 9ca165bc97d2b..ace27a89fbb07 100644 +--- a/fs/nilfs2/nilfs.h ++++ b/fs/nilfs2/nilfs.h +@@ -198,6 +198,9 @@ static inline int nilfs_acl_chmod(struct inode *inode) + + static inline int nilfs_init_acl(struct inode *inode, struct inode *dir) + { ++ if (S_ISLNK(inode->i_mode)) ++ return 0; ++ + inode->i_mode &= ~current_umask(); + return 0; + } +diff --git a/fs/remap_range.c b/fs/remap_range.c +index e6099beefa97d..e8e00e217d6c9 100644 +--- a/fs/remap_range.c ++++ b/fs/remap_range.c +@@ -71,7 +71,8 @@ static int generic_remap_checks(struct file *file_in, loff_t pos_in, + * Otherwise, make sure the count is also block-aligned, having + * already confirmed the starting offsets' block alignment. + */ +- if (pos_in + count == size_in) { ++ if (pos_in + count == size_in && ++ (!(remap_flags & REMAP_FILE_DEDUP) || pos_out + count == size_out)) { + bcount = ALIGN(size_in, bs) - pos_in; + } else { + if (!IS_ALIGNED(count, bs)) +diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h +index fee0b5547cd0a..c9fafca1c30c5 100644 +--- a/include/linux/cgroup-defs.h ++++ b/include/linux/cgroup-defs.h +@@ -260,7 +260,8 @@ struct css_set { + * List of csets participating in the on-going migration either as + * source or destination. Protected by cgroup_mutex. + */ +- struct list_head mg_preload_node; ++ struct list_head mg_src_preload_node; ++ struct list_head mg_dst_preload_node; + struct list_head mg_node; + + /* +diff --git a/include/linux/kexec.h b/include/linux/kexec.h +index 037192c3a46f7..a1f12e959bbad 100644 +--- a/include/linux/kexec.h ++++ b/include/linux/kexec.h +@@ -442,6 +442,12 @@ static inline int kexec_crash_loaded(void) { return 0; } + #define kexec_in_progress false + #endif /* CONFIG_KEXEC_CORE */ + ++#ifdef CONFIG_KEXEC_SIG ++void set_kexec_sig_enforced(void); ++#else ++static inline void set_kexec_sig_enforced(void) {} ++#endif ++ + #endif /* !defined(__ASSEBMLY__) */ + + #endif /* LINUX_KEXEC_H */ +diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h +index fa75f325dad53..eeacb4a16fe3f 100644 +--- a/include/linux/sched/task.h ++++ b/include/linux/sched/task.h +@@ -82,7 +82,7 @@ static inline void exit_thread(struct task_struct *tsk) + extern void do_group_exit(int); + + extern void exit_files(struct task_struct *); +-extern void exit_itimers(struct signal_struct *); ++extern void exit_itimers(struct task_struct *); + + extern pid_t kernel_clone(struct kernel_clone_args *kargs); + struct task_struct *fork_idle(int); +diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h +index 35b26743dbb28..9c1292ea47fdc 100644 +--- a/include/linux/serial_core.h ++++ b/include/linux/serial_core.h +@@ -394,6 +394,11 @@ static const bool earlycon_acpi_spcr_enable EARLYCON_USED_OR_UNUSED; + static inline int setup_earlycon(char *buf) { return 0; } + #endif + ++static inline bool uart_console_enabled(struct uart_port *port) ++{ ++ return uart_console(port) && (port->cons->flags & CON_ENABLED); ++} ++ + struct uart_port *uart_get_console(struct uart_port *ports, int nr, + struct console *c); + int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr, +diff --git a/include/net/raw.h b/include/net/raw.h +index 8ad8df5948536..c51a635671a73 100644 +--- a/include/net/raw.h ++++ b/include/net/raw.h +@@ -75,7 +75,7 @@ static inline bool raw_sk_bound_dev_eq(struct net *net, int bound_dev_if, + int dif, int sdif) + { + #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) +- return inet_bound_dev_eq(!!net->ipv4.sysctl_raw_l3mdev_accept, ++ return inet_bound_dev_eq(READ_ONCE(net->ipv4.sysctl_raw_l3mdev_accept), + bound_dev_if, dif, sdif); + #else + return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); +diff --git a/include/net/sock.h b/include/net/sock.h +index 2c11eb4abdd24..83854cec4a471 100644 +--- a/include/net/sock.h ++++ b/include/net/sock.h +@@ -1445,7 +1445,7 @@ void __sk_mem_reclaim(struct sock *sk, int amount); + /* sysctl_mem values are in pages, we convert them in SK_MEM_QUANTUM units */ + static inline long sk_prot_mem_limits(const struct sock *sk, int index) + { +- long val = sk->sk_prot->sysctl_mem[index]; ++ long val = READ_ONCE(sk->sk_prot->sysctl_mem[index]); + + #if PAGE_SIZE > SK_MEM_QUANTUM + val <<= PAGE_SHIFT - SK_MEM_QUANTUM_SHIFT; +diff --git a/include/net/tls.h b/include/net/tls.h +index 745b3bc6ce91d..d9cb597cab46a 100644 +--- a/include/net/tls.h ++++ b/include/net/tls.h +@@ -707,7 +707,7 @@ int tls_sw_fallback_init(struct sock *sk, + struct tls_crypto_info *crypto_info); + + #ifdef CONFIG_TLS_DEVICE +-void tls_device_init(void); ++int tls_device_init(void); + void tls_device_cleanup(void); + void tls_device_sk_destruct(struct sock *sk); + int tls_set_device_offload(struct sock *sk, struct tls_context *ctx); +@@ -727,7 +727,7 @@ static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk) + return tls_get_ctx(sk)->rx_conf == TLS_HW; + } + #else +-static inline void tls_device_init(void) {} ++static inline int tls_device_init(void) { return 0; } + static inline void tls_device_cleanup(void) {} + + static inline int +diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h +index a966d4b5ab377..905b151bc3dd9 100644 +--- a/include/trace/events/sock.h ++++ b/include/trace/events/sock.h +@@ -98,7 +98,7 @@ TRACE_EVENT(sock_exceed_buf_limit, + + TP_STRUCT__entry( + __array(char, name, 32) +- __field(long *, sysctl_mem) ++ __array(long, sysctl_mem, 3) + __field(long, allocated) + __field(int, sysctl_rmem) + __field(int, rmem_alloc) +@@ -110,7 +110,9 @@ TRACE_EVENT(sock_exceed_buf_limit, + + TP_fast_assign( + strncpy(__entry->name, prot->name, 32); +- __entry->sysctl_mem = prot->sysctl_mem; ++ __entry->sysctl_mem[0] = READ_ONCE(prot->sysctl_mem[0]); ++ __entry->sysctl_mem[1] = READ_ONCE(prot->sysctl_mem[1]); ++ __entry->sysctl_mem[2] = READ_ONCE(prot->sysctl_mem[2]); + __entry->allocated = allocated; + __entry->sysctl_rmem = sk_get_rmem0(sk, prot); + __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); +diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c +index 0853289d321a5..5046c99deba86 100644 +--- a/kernel/cgroup/cgroup.c ++++ b/kernel/cgroup/cgroup.c +@@ -736,7 +736,8 @@ struct css_set init_css_set = { + .task_iters = LIST_HEAD_INIT(init_css_set.task_iters), + .threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets), + .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links), +- .mg_preload_node = LIST_HEAD_INIT(init_css_set.mg_preload_node), ++ .mg_src_preload_node = LIST_HEAD_INIT(init_css_set.mg_src_preload_node), ++ .mg_dst_preload_node = LIST_HEAD_INIT(init_css_set.mg_dst_preload_node), + .mg_node = LIST_HEAD_INIT(init_css_set.mg_node), + + /* +@@ -1211,7 +1212,8 @@ static struct css_set *find_css_set(struct css_set *old_cset, + INIT_LIST_HEAD(&cset->threaded_csets); + INIT_HLIST_NODE(&cset->hlist); + INIT_LIST_HEAD(&cset->cgrp_links); +- INIT_LIST_HEAD(&cset->mg_preload_node); ++ INIT_LIST_HEAD(&cset->mg_src_preload_node); ++ INIT_LIST_HEAD(&cset->mg_dst_preload_node); + INIT_LIST_HEAD(&cset->mg_node); + + /* Copy the set of subsystem state objects generated in +@@ -2556,21 +2558,27 @@ int cgroup_migrate_vet_dst(struct cgroup *dst_cgrp) + */ + void cgroup_migrate_finish(struct cgroup_mgctx *mgctx) + { +- LIST_HEAD(preloaded); + struct css_set *cset, *tmp_cset; + + lockdep_assert_held(&cgroup_mutex); + + spin_lock_irq(&css_set_lock); + +- list_splice_tail_init(&mgctx->preloaded_src_csets, &preloaded); +- list_splice_tail_init(&mgctx->preloaded_dst_csets, &preloaded); ++ list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_src_csets, ++ mg_src_preload_node) { ++ cset->mg_src_cgrp = NULL; ++ cset->mg_dst_cgrp = NULL; ++ cset->mg_dst_cset = NULL; ++ list_del_init(&cset->mg_src_preload_node); ++ put_css_set_locked(cset); ++ } + +- list_for_each_entry_safe(cset, tmp_cset, &preloaded, mg_preload_node) { ++ list_for_each_entry_safe(cset, tmp_cset, &mgctx->preloaded_dst_csets, ++ mg_dst_preload_node) { + cset->mg_src_cgrp = NULL; + cset->mg_dst_cgrp = NULL; + cset->mg_dst_cset = NULL; +- list_del_init(&cset->mg_preload_node); ++ list_del_init(&cset->mg_dst_preload_node); + put_css_set_locked(cset); + } + +@@ -2612,7 +2620,7 @@ void cgroup_migrate_add_src(struct css_set *src_cset, + + src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root); + +- if (!list_empty(&src_cset->mg_preload_node)) ++ if (!list_empty(&src_cset->mg_src_preload_node)) + return; + + WARN_ON(src_cset->mg_src_cgrp); +@@ -2623,7 +2631,7 @@ void cgroup_migrate_add_src(struct css_set *src_cset, + src_cset->mg_src_cgrp = src_cgrp; + src_cset->mg_dst_cgrp = dst_cgrp; + get_css_set(src_cset); +- list_add_tail(&src_cset->mg_preload_node, &mgctx->preloaded_src_csets); ++ list_add_tail(&src_cset->mg_src_preload_node, &mgctx->preloaded_src_csets); + } + + /** +@@ -2648,7 +2656,7 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) + + /* look up the dst cset for each src cset and link it to src */ + list_for_each_entry_safe(src_cset, tmp_cset, &mgctx->preloaded_src_csets, +- mg_preload_node) { ++ mg_src_preload_node) { + struct css_set *dst_cset; + struct cgroup_subsys *ss; + int ssid; +@@ -2667,7 +2675,7 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) + if (src_cset == dst_cset) { + src_cset->mg_src_cgrp = NULL; + src_cset->mg_dst_cgrp = NULL; +- list_del_init(&src_cset->mg_preload_node); ++ list_del_init(&src_cset->mg_src_preload_node); + put_css_set(src_cset); + put_css_set(dst_cset); + continue; +@@ -2675,8 +2683,8 @@ int cgroup_migrate_prepare_dst(struct cgroup_mgctx *mgctx) + + src_cset->mg_dst_cset = dst_cset; + +- if (list_empty(&dst_cset->mg_preload_node)) +- list_add_tail(&dst_cset->mg_preload_node, ++ if (list_empty(&dst_cset->mg_dst_preload_node)) ++ list_add_tail(&dst_cset->mg_dst_preload_node, + &mgctx->preloaded_dst_csets); + else + put_css_set(dst_cset); +@@ -2922,7 +2930,8 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp) + goto out_finish; + + spin_lock_irq(&css_set_lock); +- list_for_each_entry(src_cset, &mgctx.preloaded_src_csets, mg_preload_node) { ++ list_for_each_entry(src_cset, &mgctx.preloaded_src_csets, ++ mg_src_preload_node) { + struct task_struct *task, *ntask; + + /* all tasks in src_csets need to be migrated */ +diff --git a/kernel/exit.c b/kernel/exit.c +index d13d67fc5f4e2..ab900b661867f 100644 +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -782,7 +782,7 @@ void __noreturn do_exit(long code) + + #ifdef CONFIG_POSIX_TIMERS + hrtimer_cancel(&tsk->signal->real_timer); +- exit_itimers(tsk->signal); ++ exit_itimers(tsk); + #endif + if (tsk->mm) + setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); +diff --git a/kernel/kexec_file.c b/kernel/kexec_file.c +index 2e0f0b3fb9ab0..fff11916aba33 100644 +--- a/kernel/kexec_file.c ++++ b/kernel/kexec_file.c +@@ -29,6 +29,15 @@ + #include <linux/vmalloc.h> + #include "kexec_internal.h" + ++#ifdef CONFIG_KEXEC_SIG ++static bool sig_enforce = IS_ENABLED(CONFIG_KEXEC_SIG_FORCE); ++ ++void set_kexec_sig_enforced(void) ++{ ++ sig_enforce = true; ++} ++#endif ++ + static int kexec_calculate_store_digests(struct kimage *image); + + /* +@@ -159,7 +168,7 @@ kimage_validate_signature(struct kimage *image) + image->kernel_buf_len); + if (ret) { + +- if (IS_ENABLED(CONFIG_KEXEC_SIG_FORCE)) { ++ if (sig_enforce) { + pr_notice("Enforced kernel signature verification failed (%d).\n", ret); + return ret; + } +diff --git a/kernel/signal.c b/kernel/signal.c +index 6bb2df4f6109d..d05f783d5a5e6 100644 +--- a/kernel/signal.c ++++ b/kernel/signal.c +@@ -1912,12 +1912,12 @@ bool do_notify_parent(struct task_struct *tsk, int sig) + bool autoreap = false; + u64 utime, stime; + +- BUG_ON(sig == -1); ++ WARN_ON_ONCE(sig == -1); + +- /* do_notify_parent_cldstop should have been called instead. */ +- BUG_ON(task_is_stopped_or_traced(tsk)); ++ /* do_notify_parent_cldstop should have been called instead. */ ++ WARN_ON_ONCE(task_is_stopped_or_traced(tsk)); + +- BUG_ON(!tsk->ptrace && ++ WARN_ON_ONCE(!tsk->ptrace && + (tsk->group_leader != tsk || !thread_group_empty(tsk))); + + /* Wake up all pidfd waiters */ +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 8832440a4938e..f0dd1a3b66eb9 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -557,14 +557,14 @@ static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp, + if (*negp) { + if (*lvalp > (unsigned long) INT_MAX + 1) + return -EINVAL; +- *valp = -*lvalp; ++ WRITE_ONCE(*valp, -*lvalp); + } else { + if (*lvalp > (unsigned long) INT_MAX) + return -EINVAL; +- *valp = *lvalp; ++ WRITE_ONCE(*valp, *lvalp); + } + } else { +- int val = *valp; ++ int val = READ_ONCE(*valp); + if (val < 0) { + *negp = true; + *lvalp = -(unsigned long)val; +@@ -583,9 +583,9 @@ static int do_proc_douintvec_conv(unsigned long *lvalp, + if (write) { + if (*lvalp > UINT_MAX) + return -EINVAL; +- *valp = *lvalp; ++ WRITE_ONCE(*valp, *lvalp); + } else { +- unsigned int val = *valp; ++ unsigned int val = READ_ONCE(*valp); + *lvalp = (unsigned long)val; + } + return 0; +@@ -959,7 +959,7 @@ static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp, + if ((param->min && *param->min > tmp) || + (param->max && *param->max < tmp)) + return -EINVAL; +- *valp = tmp; ++ WRITE_ONCE(*valp, tmp); + } + + return 0; +@@ -1025,7 +1025,7 @@ static int do_proc_douintvec_minmax_conv(unsigned long *lvalp, + (param->max && *param->max < tmp)) + return -ERANGE; + +- *valp = tmp; ++ WRITE_ONCE(*valp, tmp); + } + + return 0; +@@ -1193,9 +1193,9 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, + err = -EINVAL; + break; + } +- *i = val; ++ WRITE_ONCE(*i, val); + } else { +- val = convdiv * (*i) / convmul; ++ val = convdiv * READ_ONCE(*i) / convmul; + if (!first) + proc_put_char(&buffer, &left, '\t'); + proc_put_long(&buffer, &left, val, false); +@@ -1276,9 +1276,12 @@ static int do_proc_dointvec_jiffies_conv(bool *negp, unsigned long *lvalp, + if (write) { + if (*lvalp > INT_MAX / HZ) + return 1; +- *valp = *negp ? -(*lvalp*HZ) : (*lvalp*HZ); ++ if (*negp) ++ WRITE_ONCE(*valp, -*lvalp * HZ); ++ else ++ WRITE_ONCE(*valp, *lvalp * HZ); + } else { +- int val = *valp; ++ int val = READ_ONCE(*valp); + unsigned long lval; + if (val < 0) { + *negp = true; +@@ -1324,9 +1327,9 @@ static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp, + + if (jif > INT_MAX) + return 1; +- *valp = (int)jif; ++ WRITE_ONCE(*valp, (int)jif); + } else { +- int val = *valp; ++ int val = READ_ONCE(*valp); + unsigned long lval; + if (val < 0) { + *negp = true; +@@ -1394,8 +1397,8 @@ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, + * @ppos: the current position in the file + * + * Reads/writes up to table->maxlen/sizeof(unsigned int) integer +- * values from/to the user buffer, treated as an ASCII string. +- * The values read are assumed to be in 1/1000 seconds, and ++ * values from/to the user buffer, treated as an ASCII string. ++ * The values read are assumed to be in 1/1000 seconds, and + * are converted into jiffies. + * + * Returns 0 on success. +@@ -2811,6 +2814,17 @@ static struct ctl_table vm_table[] = { + .extra1 = SYSCTL_ZERO, + .extra2 = &two_hundred, + }, ++#ifdef CONFIG_NUMA ++ { ++ .procname = "numa_stat", ++ .data = &sysctl_vm_numa_stat, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = sysctl_vm_numa_stat_handler, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE, ++ }, ++#endif + #ifdef CONFIG_HUGETLB_PAGE + { + .procname = "nr_hugepages", +@@ -2827,15 +2841,6 @@ static struct ctl_table vm_table[] = { + .mode = 0644, + .proc_handler = &hugetlb_mempolicy_sysctl_handler, + }, +- { +- .procname = "numa_stat", +- .data = &sysctl_vm_numa_stat, +- .maxlen = sizeof(int), +- .mode = 0644, +- .proc_handler = sysctl_vm_numa_stat_handler, +- .extra1 = SYSCTL_ZERO, +- .extra2 = SYSCTL_ONE, +- }, + #endif + { + .procname = "hugetlb_shm_group", +diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c +index dd5697d7347b1..b624788023d8f 100644 +--- a/kernel/time/posix-timers.c ++++ b/kernel/time/posix-timers.c +@@ -1051,15 +1051,24 @@ retry_delete: + } + + /* +- * This is called by do_exit or de_thread, only when there are no more +- * references to the shared signal_struct. ++ * This is called by do_exit or de_thread, only when nobody else can ++ * modify the signal->posix_timers list. Yet we need sighand->siglock ++ * to prevent the race with /proc/pid/timers. + */ +-void exit_itimers(struct signal_struct *sig) ++void exit_itimers(struct task_struct *tsk) + { ++ struct list_head timers; + struct k_itimer *tmr; + +- while (!list_empty(&sig->posix_timers)) { +- tmr = list_entry(sig->posix_timers.next, struct k_itimer, list); ++ if (list_empty(&tsk->signal->posix_timers)) ++ return; ++ ++ spin_lock_irq(&tsk->sighand->siglock); ++ list_replace_init(&tsk->signal->posix_timers, &timers); ++ spin_unlock_irq(&tsk->sighand->siglock); ++ ++ while (!list_empty(&timers)) { ++ tmr = list_first_entry(&timers, struct k_itimer, list); + itimer_delete(tmr); + } + } +diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c +index 3ed1723b68d56..fd54168294456 100644 +--- a/kernel/trace/trace_events_hist.c ++++ b/kernel/trace/trace_events_hist.c +@@ -3943,6 +3943,8 @@ static int parse_var_defs(struct hist_trigger_data *hist_data) + + s = kstrdup(field_str, GFP_KERNEL); + if (!s) { ++ kfree(hist_data->attrs->var_defs.name[n_vars]); ++ hist_data->attrs->var_defs.name[n_vars] = NULL; + ret = -ENOMEM; + goto free; + } +diff --git a/mm/memory.c b/mm/memory.c +index 72236b1ce5903..cc50fa0f4590d 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -4365,6 +4365,19 @@ static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) + + static vm_fault_t create_huge_pud(struct vm_fault *vmf) + { ++#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ ++ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) ++ /* No support for anonymous transparent PUD pages yet */ ++ if (vma_is_anonymous(vmf->vma)) ++ return VM_FAULT_FALLBACK; ++ if (vmf->vma->vm_ops->huge_fault) ++ return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); ++#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ ++ return VM_FAULT_FALLBACK; ++} ++ ++static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) ++{ + #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) + /* No support for anonymous transparent PUD pages yet */ +@@ -4379,19 +4392,7 @@ static vm_fault_t create_huge_pud(struct vm_fault *vmf) + split: + /* COW or write-notify not handled on PUD level: split pud.*/ + __split_huge_pud(vmf->vma, vmf->pud, vmf->address); +-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +- return VM_FAULT_FALLBACK; +-} +- +-static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud) +-{ +-#ifdef CONFIG_TRANSPARENT_HUGEPAGE +- /* No support for anonymous transparent PUD pages yet */ +- if (vma_is_anonymous(vmf->vma)) +- return VM_FAULT_FALLBACK; +- if (vmf->vma->vm_ops->huge_fault) +- return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD); +-#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ ++#endif /* CONFIG_TRANSPARENT_HUGEPAGE && CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ + return VM_FAULT_FALLBACK; + } + +diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c +index 68c0d0f928908..10a2c7bca7199 100644 +--- a/net/bridge/br_netfilter_hooks.c ++++ b/net/bridge/br_netfilter_hooks.c +@@ -1012,9 +1012,24 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net, + return okfn(net, sk, skb); + + ops = nf_hook_entries_get_hook_ops(e); +- for (i = 0; i < e->num_hook_entries && +- ops[i]->priority <= NF_BR_PRI_BRNF; i++) +- ; ++ for (i = 0; i < e->num_hook_entries; i++) { ++ /* These hooks have already been called */ ++ if (ops[i]->priority < NF_BR_PRI_BRNF) ++ continue; ++ ++ /* These hooks have not been called yet, run them. */ ++ if (ops[i]->priority > NF_BR_PRI_BRNF) ++ break; ++ ++ /* take a closer look at NF_BR_PRI_BRNF. */ ++ if (ops[i]->hook == br_nf_pre_routing) { ++ /* This hook diverted the skb to this function, ++ * hooks after this have not been run yet. ++ */ ++ i++; ++ break; ++ } ++ } + + nf_hook_state_init(&state, hook, NFPROTO_BRIDGE, indev, outdev, + sk, net, okfn); +diff --git a/net/core/filter.c b/net/core/filter.c +index 246947fbc9581..34ae30503ac4f 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -5624,7 +5624,6 @@ static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len + if (err) + return err; + +- ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + + return seg6_lookup_nexthop(skb, NULL, 0); +diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c +index 742218594741a..e77283069c7b7 100644 +--- a/net/ipv4/af_inet.c ++++ b/net/ipv4/af_inet.c +@@ -1245,7 +1245,7 @@ static int inet_sk_reselect_saddr(struct sock *sk) + if (new_saddr == old_saddr) + return 0; + +- if (sock_net(sk)->ipv4.sysctl_ip_dynaddr > 1) { ++ if (READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) > 1) { + pr_info("%s(): shifting inet->saddr from %pI4 to %pI4\n", + __func__, &old_saddr, &new_saddr); + } +@@ -1300,7 +1300,7 @@ int inet_sk_rebuild_header(struct sock *sk) + * Other protocols have to map its equivalent state to TCP_SYN_SENT. + * DCCP maps its DCCP_REQUESTING state to TCP_SYN_SENT. -acme + */ +- if (!sock_net(sk)->ipv4.sysctl_ip_dynaddr || ++ if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_ip_dynaddr) || + sk->sk_state != TCP_SYN_SENT || + (sk->sk_userlocks & SOCK_BINDADDR_LOCK) || + (err = inet_sk_reselect_saddr(sk)) != 0) +diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c +index ca217a6f488f6..d4a4160159a92 100644 +--- a/net/ipv4/cipso_ipv4.c ++++ b/net/ipv4/cipso_ipv4.c +@@ -240,7 +240,7 @@ static int cipso_v4_cache_check(const unsigned char *key, + struct cipso_v4_map_cache_entry *prev_entry = NULL; + u32 hash; + +- if (!cipso_v4_cache_enabled) ++ if (!READ_ONCE(cipso_v4_cache_enabled)) + return -ENOENT; + + hash = cipso_v4_map_cache_hash(key, key_len); +@@ -297,13 +297,14 @@ static int cipso_v4_cache_check(const unsigned char *key, + int cipso_v4_cache_add(const unsigned char *cipso_ptr, + const struct netlbl_lsm_secattr *secattr) + { ++ int bkt_size = READ_ONCE(cipso_v4_cache_bucketsize); + int ret_val = -EPERM; + u32 bkt; + struct cipso_v4_map_cache_entry *entry = NULL; + struct cipso_v4_map_cache_entry *old_entry = NULL; + u32 cipso_ptr_len; + +- if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) ++ if (!READ_ONCE(cipso_v4_cache_enabled) || bkt_size <= 0) + return 0; + + cipso_ptr_len = cipso_ptr[1]; +@@ -323,7 +324,7 @@ int cipso_v4_cache_add(const unsigned char *cipso_ptr, + + bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); + spin_lock_bh(&cipso_v4_cache[bkt].lock); +- if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { ++ if (cipso_v4_cache[bkt].size < bkt_size) { + list_add(&entry->list, &cipso_v4_cache[bkt].list); + cipso_v4_cache[bkt].size += 1; + } else { +@@ -1200,7 +1201,8 @@ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def, + /* This will send packets using the "optimized" format when + * possible as specified in section 3.4.2.6 of the + * CIPSO draft. */ +- if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) ++ if (READ_ONCE(cipso_v4_rbm_optfmt) && ret_val > 0 && ++ ret_val <= 10) + tag_len = 14; + else + tag_len = 4 + ret_val; +@@ -1604,7 +1606,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option) + * all the CIPSO validations here but it doesn't + * really specify _exactly_ what we need to validate + * ... so, just make it a sysctl tunable. */ +- if (cipso_v4_rbm_strictvalid) { ++ if (READ_ONCE(cipso_v4_rbm_strictvalid)) { + if (cipso_v4_map_lvl_valid(doi_def, + tag[3]) < 0) { + err_offset = opt_iter + 3; +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c +index c8c7b76c3b2e2..70c866308abea 100644 +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -1229,7 +1229,7 @@ static int fib_check_nh_nongw(struct net *net, struct fib_nh *nh, + + nh->fib_nh_dev = in_dev->dev; + dev_hold(nh->fib_nh_dev); +- nh->fib_nh_scope = RT_SCOPE_HOST; ++ nh->fib_nh_scope = RT_SCOPE_LINK; + if (!netif_carrier_ok(nh->fib_nh_dev)) + nh->fib_nh_flags |= RTNH_F_LINKDOWN; + err = 0; +@@ -1831,7 +1831,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event, + goto nla_put_failure; + if (nexthop_is_blackhole(fi->nh)) + rtm->rtm_type = RTN_BLACKHOLE; +- if (!fi->fib_net->ipv4.sysctl_nexthop_compat_mode) ++ if (!READ_ONCE(fi->fib_net->ipv4.sysctl_nexthop_compat_mode)) + goto offload; + } + +diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c +index ffc5332f13906..a28f525e2c474 100644 +--- a/net/ipv4/fib_trie.c ++++ b/net/ipv4/fib_trie.c +@@ -497,7 +497,7 @@ static void tnode_free(struct key_vector *tn) + tn = container_of(head, struct tnode, rcu)->kv; + } + +- if (tnode_free_size >= sysctl_fib_sync_mem) { ++ if (tnode_free_size >= READ_ONCE(sysctl_fib_sync_mem)) { + tnode_free_size = 0; + synchronize_rcu(); + } +diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c +index cd65d3146c300..0fa0da1d71f57 100644 +--- a/net/ipv4/icmp.c ++++ b/net/ipv4/icmp.c +@@ -261,11 +261,12 @@ bool icmp_global_allow(void) + spin_lock(&icmp_global.lock); + delta = min_t(u32, now - icmp_global.stamp, HZ); + if (delta >= HZ / 50) { +- incr = sysctl_icmp_msgs_per_sec * delta / HZ ; ++ incr = READ_ONCE(sysctl_icmp_msgs_per_sec) * delta / HZ; + if (incr) + WRITE_ONCE(icmp_global.stamp, now); + } +- credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst); ++ credit = min_t(u32, icmp_global.credit + incr, ++ READ_ONCE(sysctl_icmp_msgs_burst)); + if (credit) { + /* We want to use a credit of one in average, but need to randomize + * it for security reasons. +@@ -289,7 +290,7 @@ static bool icmpv4_mask_allow(struct net *net, int type, int code) + return true; + + /* Limit if icmp type is enabled in ratemask. */ +- if (!((1 << type) & net->ipv4.sysctl_icmp_ratemask)) ++ if (!((1 << type) & READ_ONCE(net->ipv4.sysctl_icmp_ratemask))) + return true; + + return false; +@@ -327,7 +328,8 @@ static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt, + + vif = l3mdev_master_ifindex(dst->dev); + peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1); +- rc = inet_peer_xrlim_allow(peer, net->ipv4.sysctl_icmp_ratelimit); ++ rc = inet_peer_xrlim_allow(peer, ++ READ_ONCE(net->ipv4.sysctl_icmp_ratelimit)); + if (peer) + inet_putpeer(peer); + out: +diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c +index ff327a62c9ce9..a18668552d33d 100644 +--- a/net/ipv4/inetpeer.c ++++ b/net/ipv4/inetpeer.c +@@ -148,16 +148,20 @@ static void inet_peer_gc(struct inet_peer_base *base, + struct inet_peer *gc_stack[], + unsigned int gc_cnt) + { ++ int peer_threshold, peer_maxttl, peer_minttl; + struct inet_peer *p; + __u32 delta, ttl; + int i; + +- if (base->total >= inet_peer_threshold) ++ peer_threshold = READ_ONCE(inet_peer_threshold); ++ peer_maxttl = READ_ONCE(inet_peer_maxttl); ++ peer_minttl = READ_ONCE(inet_peer_minttl); ++ ++ if (base->total >= peer_threshold) + ttl = 0; /* be aggressive */ + else +- ttl = inet_peer_maxttl +- - (inet_peer_maxttl - inet_peer_minttl) / HZ * +- base->total / inet_peer_threshold * HZ; ++ ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ * ++ base->total / peer_threshold * HZ; + for (i = 0; i < gc_cnt; i++) { + p = gc_stack[i]; + +diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c +index 8bd3f5e3c0e7a..2a17dc9413ae9 100644 +--- a/net/ipv4/nexthop.c ++++ b/net/ipv4/nexthop.c +@@ -882,7 +882,7 @@ static void __remove_nexthop_fib(struct net *net, struct nexthop *nh) + /* __ip6_del_rt does a release, so do a hold here */ + fib6_info_hold(f6i); + ipv6_stub->ip6_del_rt(net, f6i, +- !net->ipv4.sysctl_nexthop_compat_mode); ++ !READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode)); + } + } + +@@ -1194,7 +1194,8 @@ out: + if (!rc) { + nh_base_seq_inc(net); + nexthop_notify(RTM_NEWNEXTHOP, new_nh, &cfg->nlinfo); +- if (replace_notify && net->ipv4.sysctl_nexthop_compat_mode) ++ if (replace_notify && ++ READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode)) + nexthop_replace_notify(net, new_nh, &cfg->nlinfo); + } + +diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c +index a3ec2a08027b8..19c13ad5c121b 100644 +--- a/net/ipv4/tcp.c ++++ b/net/ipv4/tcp.c +@@ -2490,7 +2490,8 @@ static void tcp_orphan_update(struct timer_list *unused) + + static bool tcp_too_many_orphans(int shift) + { +- return READ_ONCE(tcp_orphan_cache) << shift > sysctl_tcp_max_orphans; ++ return READ_ONCE(tcp_orphan_cache) << shift > ++ READ_ONCE(sysctl_tcp_max_orphans); + } + + bool tcp_check_oom(struct sock *sk, int shift) +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index e67505c6d8562..cdf215442d373 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -5641,7 +5641,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, + if (nexthop_is_blackhole(rt->nh)) + rtm->rtm_type = RTN_BLACKHOLE; + +- if (net->ipv4.sysctl_nexthop_compat_mode && ++ if (READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode) && + rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0) + goto nla_put_failure; + +diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c +index 4d4399c5c5ea9..40ac23242c378 100644 +--- a/net/ipv6/seg6_iptunnel.c ++++ b/net/ipv6/seg6_iptunnel.c +@@ -188,6 +188,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) + } + #endif + ++ hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); ++ + skb_postpush_rcsum(skb, hdr, tot_len); + + return 0; +@@ -240,6 +242,8 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh) + } + #endif + ++ hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); ++ + skb_postpush_rcsum(skb, hdr, sizeof(struct ipv6hdr) + hdrlen); + + return 0; +@@ -301,7 +305,6 @@ static int seg6_do_srh(struct sk_buff *skb) + break; + } + +- ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + + return 0; +diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c +index eba23279912df..11f7da4139f66 100644 +--- a/net/ipv6/seg6_local.c ++++ b/net/ipv6/seg6_local.c +@@ -435,7 +435,6 @@ static int input_action_end_b6(struct sk_buff *skb, struct seg6_local_lwt *slwt) + if (err) + goto drop; + +- ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + + seg6_lookup_nexthop(skb, NULL, 0); +@@ -467,7 +466,6 @@ static int input_action_end_b6_encap(struct sk_buff *skb, + if (err) + goto drop; + +- ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); + skb_set_transport_header(skb, sizeof(struct ipv6hdr)); + + seg6_lookup_nexthop(skb, NULL, 0); +diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c +index 2fb99325135a0..b9404b0560871 100644 +--- a/net/mac80211/wme.c ++++ b/net/mac80211/wme.c +@@ -145,8 +145,8 @@ u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata, + bool qos; + + /* all mesh/ocb stations are required to support WME */ +- if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT || +- sdata->vif.type == NL80211_IFTYPE_OCB) ++ if (sta && (sdata->vif.type == NL80211_IFTYPE_MESH_POINT || ++ sdata->vif.type == NL80211_IFTYPE_OCB)) + qos = true; + else if (sta) + qos = sta->sta.wme; +diff --git a/net/tipc/socket.c b/net/tipc/socket.c +index 42283dc6c5b7c..38256aabf4f1d 100644 +--- a/net/tipc/socket.c ++++ b/net/tipc/socket.c +@@ -489,6 +489,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock, + sock_init_data(sock, sk); + tipc_set_sk_state(sk, TIPC_OPEN); + if (tipc_sk_insert(tsk)) { ++ sk_free(sk); + pr_warn("Socket create failed; port number exhausted\n"); + return -EINVAL; + } +diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c +index 3c82286e5bcca..6ae2ce411b4bf 100644 +--- a/net/tls/tls_device.c ++++ b/net/tls/tls_device.c +@@ -1390,9 +1390,9 @@ static struct notifier_block tls_dev_notifier = { + .notifier_call = tls_dev_event, + }; + +-void __init tls_device_init(void) ++int __init tls_device_init(void) + { +- register_netdevice_notifier(&tls_dev_notifier); ++ return register_netdevice_notifier(&tls_dev_notifier); + } + + void __exit tls_device_cleanup(void) +diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c +index 58d22d6b86ae6..e537085b184fe 100644 +--- a/net/tls/tls_main.c ++++ b/net/tls/tls_main.c +@@ -905,7 +905,12 @@ static int __init tls_register(void) + if (err) + return err; + +- tls_device_init(); ++ err = tls_device_init(); ++ if (err) { ++ unregister_pernet_subsys(&tls_proc_ops); ++ return err; ++ } ++ + tcp_register_ulp(&tcp_tls_ulp_ops); + + return 0; +diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c +index a6dd47eb086da..168c3b78ac47b 100644 +--- a/security/integrity/evm/evm_crypto.c ++++ b/security/integrity/evm/evm_crypto.c +@@ -73,7 +73,7 @@ static struct shash_desc *init_desc(char type, uint8_t hash_algo) + { + long rc; + const char *algo; +- struct crypto_shash **tfm, *tmp_tfm = NULL; ++ struct crypto_shash **tfm, *tmp_tfm; + struct shash_desc *desc; + + if (type == EVM_XATTR_HMAC) { +@@ -118,16 +118,13 @@ unlock: + alloc: + desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm), + GFP_KERNEL); +- if (!desc) { +- crypto_free_shash(tmp_tfm); ++ if (!desc) + return ERR_PTR(-ENOMEM); +- } + + desc->tfm = *tfm; + + rc = crypto_shash_init(desc); + if (rc) { +- crypto_free_shash(tmp_tfm); + kfree(desc); + return ERR_PTR(rc); + } +diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c +index 3dd8c2e4314ea..7122a359a268e 100644 +--- a/security/integrity/ima/ima_appraise.c ++++ b/security/integrity/ima/ima_appraise.c +@@ -396,7 +396,8 @@ int ima_appraise_measurement(enum ima_hooks func, + goto out; + } + +- status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint); ++ status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, ++ rc < 0 ? 0 : rc, iint); + switch (status) { + case INTEGRITY_PASS: + case INTEGRITY_PASS_IMMUTABLE: +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c +index f6a7e9643b546..b1e5e7749e416 100644 +--- a/security/integrity/ima/ima_crypto.c ++++ b/security/integrity/ima/ima_crypto.c +@@ -205,6 +205,7 @@ out_array: + + crypto_free_shash(ima_algo_array[i].tfm); + } ++ kfree(ima_algo_array); + out: + crypto_free_shash(ima_shash_tfm); + return rc; +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index 53b7ea86f3f84..6b5d7b4760eda 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -937,6 +937,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { + SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK), + SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE), ++ SND_PCI_QUIRK(0x103c, 0x82b4, "HP ProDesk 600 G3", CXT_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO), + SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO), + SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK), +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index f7645720d29c3..6155261264083 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -6725,6 +6725,7 @@ enum { + ALC298_FIXUP_LENOVO_SPK_VOLUME, + ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER, + ALC269_FIXUP_ATIV_BOOK_8, ++ ALC221_FIXUP_HP_288PRO_MIC_NO_PRESENCE, + ALC221_FIXUP_HP_MIC_NO_PRESENCE, + ALC256_FIXUP_ASUS_HEADSET_MODE, + ALC256_FIXUP_ASUS_MIC, +@@ -7651,6 +7652,16 @@ static const struct hda_fixup alc269_fixups[] = { + .chained = true, + .chain_id = ALC269_FIXUP_NO_SHUTUP + }, ++ [ALC221_FIXUP_HP_288PRO_MIC_NO_PRESENCE] = { ++ .type = HDA_FIXUP_PINS, ++ .v.pins = (const struct hda_pintbl[]) { ++ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ ++ { 0x1a, 0x01813030 }, /* use as headphone mic, without its own jack detect */ ++ { } ++ }, ++ .chained = true, ++ .chain_id = ALC269_FIXUP_HEADSET_MODE ++ }, + [ALC221_FIXUP_HP_MIC_NO_PRESENCE] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { +@@ -8633,6 +8644,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x129c, "Acer SWIFT SF314-55", ALC256_FIXUP_ACER_HEADSET_MIC), ++ SND_PCI_QUIRK(0x1025, 0x129d, "Acer SWIFT SF313-51", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1025, 0x1300, "Acer SWIFT SF314-56", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC), + SND_PCI_QUIRK(0x1025, 0x132a, "Acer TravelMate B114-21", ALC233_FIXUP_ACER_HEADSET_MIC), +@@ -8642,6 +8654,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1025, 0x1430, "Acer TravelMate B311R-31", ALC256_FIXUP_ACER_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1025, 0x1466, "Acer Aspire A515-56", ALC255_FIXUP_ACER_HEADPHONE_AND_MIC), + SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z), ++ SND_PCI_QUIRK(0x1028, 0x053c, "Dell Latitude E5430", ALC292_FIXUP_DELL_E7X), + SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS), + SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X), + SND_PCI_QUIRK(0x1028, 0x05be, "Dell Latitude E6540", ALC292_FIXUP_DELL_E7X), +@@ -8756,6 +8769,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), + SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), ++ SND_PCI_QUIRK(0x103c, 0x2b5e, "HP 288 Pro G2 MT", ALC221_FIXUP_HP_288PRO_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x103c, 0x8077, "HP", ALC256_FIXUP_HP_HEADSET_MIC), +@@ -9073,6 +9087,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC), + SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC), ++ SND_PCI_QUIRK(0x1d72, 0x1945, "Redmi G", ALC256_FIXUP_ASUS_HEADSET_MIC), + SND_PCI_QUIRK(0x1d72, 0x1947, "RedmiBook Air", ALC255_FIXUP_XIAOMI_HEADSET_MIC), + SND_PCI_QUIRK(0x8086, 0x2074, "Intel NUC 8", ALC233_FIXUP_INTEL_NUC8_DMIC), + SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED), +@@ -10926,6 +10941,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { + SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), + SND_PCI_QUIRK(0x103c, 0x8719, "HP", ALC897_FIXUP_HP_HSMIC_VERB), + SND_PCI_QUIRK(0x103c, 0x873e, "HP", ALC671_FIXUP_HP_HEADSET_MIC2), ++ SND_PCI_QUIRK(0x103c, 0x877e, "HP 288 Pro G6", ALC671_FIXUP_HP_HEADSET_MIC2), + SND_PCI_QUIRK(0x103c, 0x885f, "HP 288 Pro G8", ALC671_FIXUP_HP_HEADSET_MIC2), + SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE), + SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50), +diff --git a/sound/soc/codecs/cs47l15.c b/sound/soc/codecs/cs47l15.c +index 254f9d96e766d..7c20642f160ac 100644 +--- a/sound/soc/codecs/cs47l15.c ++++ b/sound/soc/codecs/cs47l15.c +@@ -122,6 +122,9 @@ static int cs47l15_in1_adc_put(struct snd_kcontrol *kcontrol, + snd_soc_kcontrol_component(kcontrol); + struct cs47l15 *cs47l15 = snd_soc_component_get_drvdata(component); + ++ if (!!ucontrol->value.integer.value[0] == cs47l15->in1_lp_mode) ++ return 0; ++ + switch (ucontrol->value.integer.value[0]) { + case 0: + /* Set IN1 to normal mode */ +@@ -150,7 +153,7 @@ static int cs47l15_in1_adc_put(struct snd_kcontrol *kcontrol, + break; + } + +- return 0; ++ return 1; + } + + static const struct snd_kcontrol_new cs47l15_snd_controls[] = { +diff --git a/sound/soc/codecs/madera.c b/sound/soc/codecs/madera.c +index 680f31a6493a2..bbab4bc1f6b50 100644 +--- a/sound/soc/codecs/madera.c ++++ b/sound/soc/codecs/madera.c +@@ -618,7 +618,13 @@ int madera_out1_demux_put(struct snd_kcontrol *kcontrol, + end: + snd_soc_dapm_mutex_unlock(dapm); + +- return snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL); ++ ret = snd_soc_dapm_mux_update_power(dapm, kcontrol, mux, e, NULL); ++ if (ret < 0) { ++ dev_err(madera->dev, "Failed to update demux power state: %d\n", ret); ++ return ret; ++ } ++ ++ return change; + } + EXPORT_SYMBOL_GPL(madera_out1_demux_put); + +@@ -893,7 +899,7 @@ static int madera_adsp_rate_put(struct snd_kcontrol *kcontrol, + struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; + const int adsp_num = e->shift_l; + const unsigned int item = ucontrol->value.enumerated.item[0]; +- int ret; ++ int ret = 0; + + if (item >= e->items) + return -EINVAL; +@@ -910,10 +916,10 @@ static int madera_adsp_rate_put(struct snd_kcontrol *kcontrol, + "Cannot change '%s' while in use by active audio paths\n", + kcontrol->id.name); + ret = -EBUSY; +- } else { ++ } else if (priv->adsp_rate_cache[adsp_num] != e->values[item]) { + /* Volatile register so defer until the codec is powered up */ + priv->adsp_rate_cache[adsp_num] = e->values[item]; +- ret = 0; ++ ret = 1; + } + + mutex_unlock(&priv->rate_lock); +diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c +index 4c0e87e22b97b..f066e016a874a 100644 +--- a/sound/soc/codecs/sgtl5000.c ++++ b/sound/soc/codecs/sgtl5000.c +@@ -1797,6 +1797,9 @@ static int sgtl5000_i2c_remove(struct i2c_client *client) + { + struct sgtl5000_priv *sgtl5000 = i2c_get_clientdata(client); + ++ regmap_write(sgtl5000->regmap, SGTL5000_CHIP_DIG_POWER, SGTL5000_DIG_POWER_DEFAULT); ++ regmap_write(sgtl5000->regmap, SGTL5000_CHIP_ANA_POWER, SGTL5000_ANA_POWER_DEFAULT); ++ + clk_disable_unprepare(sgtl5000->mclk); + regulator_bulk_disable(sgtl5000->num_supplies, sgtl5000->supplies); + regulator_bulk_free(sgtl5000->num_supplies, sgtl5000->supplies); +@@ -1804,6 +1807,11 @@ static int sgtl5000_i2c_remove(struct i2c_client *client) + return 0; + } + ++static void sgtl5000_i2c_shutdown(struct i2c_client *client) ++{ ++ sgtl5000_i2c_remove(client); ++} ++ + static const struct i2c_device_id sgtl5000_id[] = { + {"sgtl5000", 0}, + {}, +@@ -1824,6 +1832,7 @@ static struct i2c_driver sgtl5000_i2c_driver = { + }, + .probe = sgtl5000_i2c_probe, + .remove = sgtl5000_i2c_remove, ++ .shutdown = sgtl5000_i2c_shutdown, + .id_table = sgtl5000_id, + }; + +diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h +index 56ec5863f2507..3a808c762299e 100644 +--- a/sound/soc/codecs/sgtl5000.h ++++ b/sound/soc/codecs/sgtl5000.h +@@ -80,6 +80,7 @@ + /* + * SGTL5000_CHIP_DIG_POWER + */ ++#define SGTL5000_DIG_POWER_DEFAULT 0x0000 + #define SGTL5000_ADC_EN 0x0040 + #define SGTL5000_DAC_EN 0x0020 + #define SGTL5000_DAP_POWERUP 0x0010 +diff --git a/sound/soc/codecs/tas2764.c b/sound/soc/codecs/tas2764.c +index 14a193e48dc76..37588804a6b5f 100644 +--- a/sound/soc/codecs/tas2764.c ++++ b/sound/soc/codecs/tas2764.c +@@ -42,10 +42,12 @@ static void tas2764_reset(struct tas2764_priv *tas2764) + gpiod_set_value_cansleep(tas2764->reset_gpio, 0); + msleep(20); + gpiod_set_value_cansleep(tas2764->reset_gpio, 1); ++ usleep_range(1000, 2000); + } + + snd_soc_component_write(tas2764->component, TAS2764_SW_RST, + TAS2764_RST); ++ usleep_range(1000, 2000); + } + + static int tas2764_set_bias_level(struct snd_soc_component *component, +@@ -107,8 +109,10 @@ static int tas2764_codec_resume(struct snd_soc_component *component) + struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component); + int ret; + +- if (tas2764->sdz_gpio) ++ if (tas2764->sdz_gpio) { + gpiod_set_value_cansleep(tas2764->sdz_gpio, 1); ++ usleep_range(1000, 2000); ++ } + + ret = snd_soc_component_update_bits(component, TAS2764_PWR_CTRL, + TAS2764_PWR_CTRL_MASK, +@@ -131,7 +135,8 @@ static const char * const tas2764_ASI1_src[] = { + }; + + static SOC_ENUM_SINGLE_DECL( +- tas2764_ASI1_src_enum, TAS2764_TDM_CFG2, 4, tas2764_ASI1_src); ++ tas2764_ASI1_src_enum, TAS2764_TDM_CFG2, TAS2764_TDM_CFG2_SCFG_SHIFT, ++ tas2764_ASI1_src); + + static const struct snd_kcontrol_new tas2764_asi1_mux = + SOC_DAPM_ENUM("ASI1 Source", tas2764_ASI1_src_enum); +@@ -329,20 +334,22 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) + { + struct snd_soc_component *component = dai->component; + struct tas2764_priv *tas2764 = snd_soc_component_get_drvdata(component); +- u8 tdm_rx_start_slot = 0, asi_cfg_1 = 0; +- int iface; ++ u8 tdm_rx_start_slot = 0, asi_cfg_0 = 0, asi_cfg_1 = 0; + int ret; + + switch (fmt & SND_SOC_DAIFMT_INV_MASK) { ++ case SND_SOC_DAIFMT_NB_IF: ++ asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START; ++ fallthrough; + case SND_SOC_DAIFMT_NB_NF: + asi_cfg_1 = TAS2764_TDM_CFG1_RX_RISING; + break; ++ case SND_SOC_DAIFMT_IB_IF: ++ asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START; ++ fallthrough; + case SND_SOC_DAIFMT_IB_NF: + asi_cfg_1 = TAS2764_TDM_CFG1_RX_FALLING; + break; +- default: +- dev_err(tas2764->dev, "ASI format Inverse is not found\n"); +- return -EINVAL; + } + + ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG1, +@@ -353,13 +360,13 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) + + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_I2S: ++ asi_cfg_0 ^= TAS2764_TDM_CFG0_FRAME_START; ++ fallthrough; + case SND_SOC_DAIFMT_DSP_A: +- iface = TAS2764_TDM_CFG2_SCFG_I2S; + tdm_rx_start_slot = 1; + break; + case SND_SOC_DAIFMT_DSP_B: + case SND_SOC_DAIFMT_LEFT_J: +- iface = TAS2764_TDM_CFG2_SCFG_LEFT_J; + tdm_rx_start_slot = 0; + break; + default: +@@ -368,14 +375,15 @@ static int tas2764_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) + return -EINVAL; + } + +- ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG1, +- TAS2764_TDM_CFG1_MASK, +- (tdm_rx_start_slot << TAS2764_TDM_CFG1_51_SHIFT)); ++ ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG0, ++ TAS2764_TDM_CFG0_FRAME_START, ++ asi_cfg_0); + if (ret < 0) + return ret; + +- ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG2, +- TAS2764_TDM_CFG2_SCFG_MASK, iface); ++ ret = snd_soc_component_update_bits(component, TAS2764_TDM_CFG1, ++ TAS2764_TDM_CFG1_MASK, ++ (tdm_rx_start_slot << TAS2764_TDM_CFG1_51_SHIFT)); + if (ret < 0) + return ret; + +@@ -501,8 +509,10 @@ static int tas2764_codec_probe(struct snd_soc_component *component) + + tas2764->component = component; + +- if (tas2764->sdz_gpio) ++ if (tas2764->sdz_gpio) { + gpiod_set_value_cansleep(tas2764->sdz_gpio, 1); ++ usleep_range(1000, 2000); ++ } + + tas2764_reset(tas2764); + +@@ -526,12 +536,12 @@ static int tas2764_codec_probe(struct snd_soc_component *component) + } + + static DECLARE_TLV_DB_SCALE(tas2764_digital_tlv, 1100, 50, 0); +-static DECLARE_TLV_DB_SCALE(tas2764_playback_volume, -10000, 50, 0); ++static DECLARE_TLV_DB_SCALE(tas2764_playback_volume, -10050, 50, 1); + + static const struct snd_kcontrol_new tas2764_snd_controls[] = { + SOC_SINGLE_TLV("Speaker Volume", TAS2764_DVC, 0, + TAS2764_DVC_MAX, 1, tas2764_playback_volume), +- SOC_SINGLE_TLV("Amp Gain Volume", TAS2764_CHNL_0, 0, 0x14, 0, ++ SOC_SINGLE_TLV("Amp Gain Volume", TAS2764_CHNL_0, 1, 0x14, 0, + tas2764_digital_tlv), + }; + +@@ -556,7 +566,7 @@ static const struct reg_default tas2764_reg_defaults[] = { + { TAS2764_SW_RST, 0x00 }, + { TAS2764_PWR_CTRL, 0x1a }, + { TAS2764_DVC, 0x00 }, +- { TAS2764_CHNL_0, 0x00 }, ++ { TAS2764_CHNL_0, 0x28 }, + { TAS2764_TDM_CFG0, 0x09 }, + { TAS2764_TDM_CFG1, 0x02 }, + { TAS2764_TDM_CFG2, 0x0a }, +diff --git a/sound/soc/codecs/tas2764.h b/sound/soc/codecs/tas2764.h +index 67d6fd903c42c..f015f22a083b5 100644 +--- a/sound/soc/codecs/tas2764.h ++++ b/sound/soc/codecs/tas2764.h +@@ -47,6 +47,7 @@ + #define TAS2764_TDM_CFG0_MASK GENMASK(3, 1) + #define TAS2764_TDM_CFG0_44_1_48KHZ BIT(3) + #define TAS2764_TDM_CFG0_88_2_96KHZ (BIT(3) | BIT(1)) ++#define TAS2764_TDM_CFG0_FRAME_START BIT(0) + + /* TDM Configuration Reg1 */ + #define TAS2764_TDM_CFG1 TAS2764_REG(0X0, 0x09) +@@ -66,10 +67,7 @@ + #define TAS2764_TDM_CFG2_RXS_16BITS 0x0 + #define TAS2764_TDM_CFG2_RXS_24BITS BIT(0) + #define TAS2764_TDM_CFG2_RXS_32BITS BIT(1) +-#define TAS2764_TDM_CFG2_SCFG_MASK GENMASK(5, 4) +-#define TAS2764_TDM_CFG2_SCFG_I2S 0x0 +-#define TAS2764_TDM_CFG2_SCFG_LEFT_J BIT(4) +-#define TAS2764_TDM_CFG2_SCFG_RIGHT_J BIT(5) ++#define TAS2764_TDM_CFG2_SCFG_SHIFT 4 + + /* TDM Configuration Reg3 */ + #define TAS2764_TDM_CFG3 TAS2764_REG(0X0, 0x0c) +diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c +index 4238929b23751..d0cef982215dc 100644 +--- a/sound/soc/codecs/wm5110.c ++++ b/sound/soc/codecs/wm5110.c +@@ -413,6 +413,7 @@ static int wm5110_put_dre(struct snd_kcontrol *kcontrol, + unsigned int rnew = (!!ucontrol->value.integer.value[1]) << mc->rshift; + unsigned int lold, rold; + unsigned int lena, rena; ++ bool change = false; + int ret; + + snd_soc_dapm_mutex_lock(dapm); +@@ -440,8 +441,8 @@ static int wm5110_put_dre(struct snd_kcontrol *kcontrol, + goto err; + } + +- ret = regmap_update_bits(arizona->regmap, ARIZONA_DRE_ENABLE, +- mask, lnew | rnew); ++ ret = regmap_update_bits_check(arizona->regmap, ARIZONA_DRE_ENABLE, ++ mask, lnew | rnew, &change); + if (ret) { + dev_err(arizona->dev, "Failed to set DRE: %d\n", ret); + goto err; +@@ -454,6 +455,9 @@ static int wm5110_put_dre(struct snd_kcontrol *kcontrol, + if (!rnew && rold) + wm5110_clear_pga_volume(arizona, mc->rshift); + ++ if (change) ++ ret = 1; ++ + err: + snd_soc_dapm_mutex_unlock(dapm); + +diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c +index 87c891c462910..3b3868df9f670 100644 +--- a/sound/soc/intel/skylake/skl-nhlt.c ++++ b/sound/soc/intel/skylake/skl-nhlt.c +@@ -201,7 +201,6 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks, + struct nhlt_fmt_cfg *fmt_cfg; + struct wav_fmt_ext *wav_fmt; + unsigned long rate; +- bool present = false; + int rate_index = 0; + u16 channels, bps; + u8 clk_src; +@@ -214,9 +213,12 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks, + if (fmt->fmt_count == 0) + return; + ++ fmt_cfg = (struct nhlt_fmt_cfg *)fmt->fmt_config; + for (i = 0; i < fmt->fmt_count; i++) { +- fmt_cfg = &fmt->fmt_config[i]; +- wav_fmt = &fmt_cfg->fmt_ext; ++ struct nhlt_fmt_cfg *saved_fmt_cfg = fmt_cfg; ++ bool present = false; ++ ++ wav_fmt = &saved_fmt_cfg->fmt_ext; + + channels = wav_fmt->fmt.channels; + bps = wav_fmt->fmt.bits_per_sample; +@@ -234,12 +236,18 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks, + * derive the rate. + */ + for (j = i; j < fmt->fmt_count; j++) { +- fmt_cfg = &fmt->fmt_config[j]; +- wav_fmt = &fmt_cfg->fmt_ext; ++ struct nhlt_fmt_cfg *tmp_fmt_cfg = fmt_cfg; ++ ++ wav_fmt = &tmp_fmt_cfg->fmt_ext; + if ((fs == wav_fmt->fmt.samples_per_sec) && +- (bps == wav_fmt->fmt.bits_per_sample)) ++ (bps == wav_fmt->fmt.bits_per_sample)) { + channels = max_t(u16, channels, + wav_fmt->fmt.channels); ++ saved_fmt_cfg = tmp_fmt_cfg; ++ } ++ /* Move to the next nhlt_fmt_cfg */ ++ tmp_fmt_cfg = (struct nhlt_fmt_cfg *)(tmp_fmt_cfg->config.caps + ++ tmp_fmt_cfg->config.size); + } + + rate = channels * bps * fs; +@@ -255,8 +263,11 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks, + + /* Fill rate and parent for sclk/sclkfs */ + if (!present) { ++ struct nhlt_fmt_cfg *first_fmt_cfg; ++ ++ first_fmt_cfg = (struct nhlt_fmt_cfg *)fmt->fmt_config; + i2s_config_ext = (struct skl_i2s_config_blob_ext *) +- fmt->fmt_config[0].config.caps; ++ first_fmt_cfg->config.caps; + + /* MCLK Divider Source Select */ + if (is_legacy_blob(i2s_config_ext->hdr.sig)) { +@@ -270,6 +281,9 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks, + + parent = skl_get_parent_clk(clk_src); + ++ /* Move to the next nhlt_fmt_cfg */ ++ fmt_cfg = (struct nhlt_fmt_cfg *)(fmt_cfg->config.caps + ++ fmt_cfg->config.size); + /* + * Do not copy the config data if there is no parent + * clock available for this clock source select +@@ -278,9 +292,9 @@ static void skl_get_ssp_clks(struct skl_dev *skl, struct skl_ssp_clk *ssp_clks, + continue; + + sclk[id].rate_cfg[rate_index].rate = rate; +- sclk[id].rate_cfg[rate_index].config = fmt_cfg; ++ sclk[id].rate_cfg[rate_index].config = saved_fmt_cfg; + sclkfs[id].rate_cfg[rate_index].rate = rate; +- sclkfs[id].rate_cfg[rate_index].config = fmt_cfg; ++ sclkfs[id].rate_cfg[rate_index].config = saved_fmt_cfg; + sclk[id].parent_name = parent->name; + sclkfs[id].parent_name = parent->name; + +@@ -294,13 +308,13 @@ static void skl_get_mclk(struct skl_dev *skl, struct skl_ssp_clk *mclk, + { + struct skl_i2s_config_blob_ext *i2s_config_ext; + struct skl_i2s_config_blob_legacy *i2s_config; +- struct nhlt_specific_cfg *fmt_cfg; ++ struct nhlt_fmt_cfg *fmt_cfg; + struct skl_clk_parent_src *parent; + u32 clkdiv, div_ratio; + u8 clk_src; + +- fmt_cfg = &fmt->fmt_config[0].config; +- i2s_config_ext = (struct skl_i2s_config_blob_ext *)fmt_cfg->caps; ++ fmt_cfg = (struct nhlt_fmt_cfg *)fmt->fmt_config; ++ i2s_config_ext = (struct skl_i2s_config_blob_ext *)fmt_cfg->config.caps; + + /* MCLK Divider Source Select and divider */ + if (is_legacy_blob(i2s_config_ext->hdr.sig)) { +@@ -329,7 +343,7 @@ static void skl_get_mclk(struct skl_dev *skl, struct skl_ssp_clk *mclk, + return; + + mclk[id].rate_cfg[0].rate = parent->rate/div_ratio; +- mclk[id].rate_cfg[0].config = &fmt->fmt_config[0]; ++ mclk[id].rate_cfg[0].config = fmt_cfg; + mclk[id].parent_name = parent->name; + } + +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c +index f2f7f2dde93cf..754c1f16ee83f 100644 +--- a/sound/soc/soc-dapm.c ++++ b/sound/soc/soc-dapm.c +@@ -62,6 +62,8 @@ struct snd_soc_dapm_widget * + snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm, + const struct snd_soc_dapm_widget *widget); + ++static unsigned int soc_dapm_read(struct snd_soc_dapm_context *dapm, int reg); ++ + /* dapm power sequences - make this per codec in the future */ + static int dapm_up_seq[] = { + [snd_soc_dapm_pre] = 1, +@@ -442,6 +444,9 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget, + + snd_soc_dapm_add_path(widget->dapm, data->widget, + widget, NULL, NULL); ++ } else if (e->reg != SND_SOC_NOPM) { ++ data->value = soc_dapm_read(widget->dapm, e->reg) & ++ (e->mask << e->shift_l); + } + break; + default: +diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c +index 15bfcdbdfaa4e..0f26d6c31ce50 100644 +--- a/sound/soc/soc-ops.c ++++ b/sound/soc/soc-ops.c +@@ -517,7 +517,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, + return -EINVAL; + if (mc->platform_max && tmp > mc->platform_max) + return -EINVAL; +- if (tmp > mc->max - mc->min + 1) ++ if (tmp > mc->max - mc->min) + return -EINVAL; + + if (invert) +@@ -538,7 +538,7 @@ int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, + return -EINVAL; + if (mc->platform_max && tmp > mc->platform_max) + return -EINVAL; +- if (tmp > mc->max - mc->min + 1) ++ if (tmp > mc->max - mc->min) + return -EINVAL; + + if (invert) +diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c +index 347636a80b487..4012097a9d60b 100644 +--- a/sound/soc/sof/intel/hda-loader.c ++++ b/sound/soc/sof/intel/hda-loader.c +@@ -79,9 +79,9 @@ out_put: + } + + /* +- * first boot sequence has some extra steps. core 0 waits for power +- * status on core 1, so power up core 1 also momentarily, keep it in +- * reset/stall and then turn it off ++ * first boot sequence has some extra steps. ++ * power on all host managed cores and only unstall/run the boot core to boot the ++ * DSP then turn off all non boot cores (if any) is powered on. + */ + static int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag) + { +@@ -115,7 +115,7 @@ static int cl_dsp_init(struct snd_sof_dev *sdev, int stream_tag) + ((stream_tag - 1) << 9))); + + /* step 3: unset core 0 reset state & unstall/run core 0 */ +- ret = hda_dsp_core_run(sdev, BIT(0)); ++ ret = hda_dsp_core_run(sdev, chip->init_core_mask); + if (ret < 0) { + if (hda->boot_iteration == HDA_FW_BOOT_ATTEMPTS) + dev_err(sdev->dev, |