summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--0000_README4
-rw-r--r--1069_linux-6.6.70.patch12214
2 files changed, 12218 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index d03d6cf7..e68eec73 100644
--- a/0000_README
+++ b/0000_README
@@ -319,6 +319,10 @@ Patch: 1068_linux-6.6.69.patch
From: https://www.kernel.org
Desc: Linux 6.6.69
+Patch: 1069_linux-6.6.70.patch
+From: https://www.kernel.org
+Desc: Linux 6.6.70
+
Patch: 1510_fs-enable-link-security-restrictions-by-default.patch
From: http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch
Desc: Enable link security restrictions by default.
diff --git a/1069_linux-6.6.70.patch b/1069_linux-6.6.70.patch
new file mode 100644
index 00000000..0ccd04cd
--- /dev/null
+++ b/1069_linux-6.6.70.patch
@@ -0,0 +1,12214 @@
+diff --git a/Documentation/admin-guide/media/building.rst b/Documentation/admin-guide/media/building.rst
+index a0647342991637..7a413ba07f93bb 100644
+--- a/Documentation/admin-guide/media/building.rst
++++ b/Documentation/admin-guide/media/building.rst
+@@ -15,7 +15,7 @@ Please notice, however, that, if:
+
+ you should use the main media development tree ``master`` branch:
+
+- https://git.linuxtv.org/media_tree.git/
++ https://git.linuxtv.org/media.git/
+
+ In this case, you may find some useful information at the
+ `LinuxTv wiki pages <https://linuxtv.org/wiki>`_:
+diff --git a/Documentation/admin-guide/media/saa7134.rst b/Documentation/admin-guide/media/saa7134.rst
+index 51eae7eb5ab7f4..18d7cbc897db4b 100644
+--- a/Documentation/admin-guide/media/saa7134.rst
++++ b/Documentation/admin-guide/media/saa7134.rst
+@@ -67,7 +67,7 @@ Changes / Fixes
+ Please mail to linux-media AT vger.kernel.org unified diffs against
+ the linux media git tree:
+
+- https://git.linuxtv.org/media_tree.git/
++ https://git.linuxtv.org/media.git/
+
+ This is done by committing a patch at a clone of the git tree and
+ submitting the patch using ``git send-email``. Don't forget to
+diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst
+index 3cf806733083c7..8209c7a7c3970e 100644
+--- a/Documentation/arch/arm64/silicon-errata.rst
++++ b/Documentation/arch/arm64/silicon-errata.rst
+@@ -244,8 +244,9 @@ stable kernels.
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Hisilicon | Hip08 SMMU PMCG | #162001800 | N/A |
+ +----------------+-----------------+-----------------+-----------------------------+
+-| Hisilicon | Hip08 SMMU PMCG | #162001900 | N/A |
+-| | Hip09 SMMU PMCG | | |
++| Hisilicon | Hip{08,09,09A,10| #162001900 | N/A |
++| | ,10C,11} | | |
++| | SMMU PMCG | | |
+ +----------------+-----------------+-----------------+-----------------------------+
+ +----------------+-----------------+-----------------+-----------------------------+
+ | Qualcomm Tech. | Kryo/Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 |
+diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml b/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
+index 987aa83c264943..e956f524e379dc 100644
+--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
++++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7533.yaml
+@@ -87,7 +87,7 @@ properties:
+ adi,dsi-lanes:
+ description: Number of DSI data lanes connected to the DSI host.
+ $ref: /schemas/types.yaml#/definitions/uint32
+- enum: [ 1, 2, 3, 4 ]
++ enum: [ 2, 3, 4 ]
+
+ ports:
+ description:
+diff --git a/Documentation/i2c/busses/i2c-i801.rst b/Documentation/i2c/busses/i2c-i801.rst
+index 10eced6c2e4625..47e8ac5b7099f7 100644
+--- a/Documentation/i2c/busses/i2c-i801.rst
++++ b/Documentation/i2c/busses/i2c-i801.rst
+@@ -48,6 +48,8 @@ Supported adapters:
+ * Intel Raptor Lake (PCH)
+ * Intel Meteor Lake (SOC and PCH)
+ * Intel Birch Stream (SOC)
++ * Intel Arrow Lake (SOC)
++ * Intel Panther Lake (SOC)
+
+ Datasheets: Publicly available at the Intel website
+
+diff --git a/Makefile b/Makefile
+index ec4d9d1d9b7ae7..4c0dd62e02e465 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 6
+-SUBLEVEL = 69
++SUBLEVEL = 70
+ EXTRAVERSION =
+ NAME = Pinguïn Aangedreven
+
+diff --git a/arch/arc/Makefile b/arch/arc/Makefile
+index 2390dd042e3636..fb98478ed1ab09 100644
+--- a/arch/arc/Makefile
++++ b/arch/arc/Makefile
+@@ -6,7 +6,7 @@
+ KBUILD_DEFCONFIG := haps_hs_smp_defconfig
+
+ ifeq ($(CROSS_COMPILE),)
+-CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux-)
++CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux- arc-linux-gnu-)
+ endif
+
+ cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
+diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
+index 6e65ff12d5c7dc..8fe21f868f72d4 100644
+--- a/arch/loongarch/kernel/numa.c
++++ b/arch/loongarch/kernel/numa.c
+@@ -226,32 +226,6 @@ static void __init node_mem_init(unsigned int node)
+
+ #ifdef CONFIG_ACPI_NUMA
+
+-/*
+- * Sanity check to catch more bad NUMA configurations (they are amazingly
+- * common). Make sure the nodes cover all memory.
+- */
+-static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
+-{
+- int i;
+- u64 numaram, biosram;
+-
+- numaram = 0;
+- for (i = 0; i < mi->nr_blks; i++) {
+- u64 s = mi->blk[i].start >> PAGE_SHIFT;
+- u64 e = mi->blk[i].end >> PAGE_SHIFT;
+-
+- numaram += e - s;
+- numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
+- if ((s64)numaram < 0)
+- numaram = 0;
+- }
+- max_pfn = max_low_pfn;
+- biosram = max_pfn - absent_pages_in_range(0, max_pfn);
+-
+- BUG_ON((s64)(biosram - numaram) >= (1 << (20 - PAGE_SHIFT)));
+- return true;
+-}
+-
+ static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type)
+ {
+ static unsigned long num_physpages;
+@@ -396,7 +370,7 @@ int __init init_numa_memory(void)
+ return -EINVAL;
+
+ init_node_memblock();
+- if (numa_meminfo_cover_memory(&numa_meminfo) == false)
++ if (!memblock_validate_numa_coverage(SZ_1M))
+ return -EINVAL;
+
+ for_each_node_mask(node, node_possible_map) {
+diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
+index d43db8150767be..dddf4f31c219ac 100644
+--- a/arch/powerpc/kernel/setup-common.c
++++ b/arch/powerpc/kernel/setup-common.c
+@@ -601,7 +601,6 @@ struct seq_buf ppc_hw_desc __initdata = {
+ .buffer = ppc_hw_desc_buf,
+ .size = sizeof(ppc_hw_desc_buf),
+ .len = 0,
+- .readpos = 0,
+ };
+
+ static __init void probe_machine(void)
+diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
+index 1245000a8792fd..2fb7d53cf3338d 100644
+--- a/arch/x86/entry/vsyscall/vsyscall_64.c
++++ b/arch/x86/entry/vsyscall/vsyscall_64.c
+@@ -76,7 +76,7 @@ static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
+ if (!show_unhandled_signals)
+ return;
+
+- printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
++ printk_ratelimited("%s%s[%d] %s ip:%lx cs:%x sp:%lx ax:%lx si:%lx di:%lx\n",
+ level, current->comm, task_pid_nr(current),
+ message, regs->ip, regs->cs,
+ regs->sp, regs->ax, regs->si, regs->di);
+diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
+index f4db78b09c8f0b..5a83fbd9bc0b44 100644
+--- a/arch/x86/include/asm/ptrace.h
++++ b/arch/x86/include/asm/ptrace.h
+@@ -56,18 +56,64 @@ struct pt_regs {
+
+ #else /* __i386__ */
+
++struct fred_cs {
++ /* CS selector */
++ u64 cs : 16,
++ /* Stack level at event time */
++ sl : 2,
++ /* IBT in WAIT_FOR_ENDBRANCH state */
++ wfe : 1,
++ : 45;
++};
++
++struct fred_ss {
++ /* SS selector */
++ u64 ss : 16,
++ /* STI state */
++ sti : 1,
++ /* Set if syscall, sysenter or INT n */
++ swevent : 1,
++ /* Event is NMI type */
++ nmi : 1,
++ : 13,
++ /* Event vector */
++ vector : 8,
++ : 8,
++ /* Event type */
++ type : 4,
++ : 4,
++ /* Event was incident to enclave execution */
++ enclave : 1,
++ /* CPU was in long mode */
++ lm : 1,
++ /*
++ * Nested exception during FRED delivery, not set
++ * for #DF.
++ */
++ nested : 1,
++ : 1,
++ /*
++ * The length of the instruction causing the event.
++ * Only set for INTO, INT1, INT3, INT n, SYSCALL
++ * and SYSENTER. 0 otherwise.
++ */
++ insnlen : 4;
++};
++
+ struct pt_regs {
+-/*
+- * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
+- * unless syscall needs a complete, fully filled "struct pt_regs".
+- */
++ /*
++ * C ABI says these regs are callee-preserved. They aren't saved on
++ * kernel entry unless syscall needs a complete, fully filled
++ * "struct pt_regs".
++ */
+ unsigned long r15;
+ unsigned long r14;
+ unsigned long r13;
+ unsigned long r12;
+ unsigned long bp;
+ unsigned long bx;
+-/* These regs are callee-clobbered. Always saved on kernel entry. */
++
++ /* These regs are callee-clobbered. Always saved on kernel entry. */
+ unsigned long r11;
+ unsigned long r10;
+ unsigned long r9;
+@@ -77,18 +123,50 @@ struct pt_regs {
+ unsigned long dx;
+ unsigned long si;
+ unsigned long di;
+-/*
+- * On syscall entry, this is syscall#. On CPU exception, this is error code.
+- * On hw interrupt, it's IRQ number:
+- */
++
++ /*
++ * orig_ax is used on entry for:
++ * - the syscall number (syscall, sysenter, int80)
++ * - error_code stored by the CPU on traps and exceptions
++ * - the interrupt number for device interrupts
++ *
++ * A FRED stack frame starts here:
++ * 1) It _always_ includes an error code;
++ *
++ * 2) The return frame for ERET[US] starts here, but
++ * the content of orig_ax is ignored.
++ */
+ unsigned long orig_ax;
+-/* Return frame for iretq */
++
++ /* The IRETQ return frame starts here */
+ unsigned long ip;
+- unsigned long cs;
++
++ union {
++ /* CS selector */
++ u16 cs;
++ /* The extended 64-bit data slot containing CS */
++ u64 csx;
++ /* The FRED CS extension */
++ struct fred_cs fred_cs;
++ };
++
+ unsigned long flags;
+ unsigned long sp;
+- unsigned long ss;
+-/* top of stack page */
++
++ union {
++ /* SS selector */
++ u16 ss;
++ /* The extended 64-bit data slot containing SS */
++ u64 ssx;
++ /* The FRED SS extension */
++ struct fred_ss fred_ss;
++ };
++
++ /*
++ * Top of stack on IDT systems, while FRED systems have extra fields
++ * defined above for storing exception related information, e.g. CR2 or
++ * DR6.
++ */
+ };
+
+ #endif /* !__i386__ */
+diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
+index 580636cdc257b7..4d3c9d00d6b6b2 100644
+--- a/arch/x86/include/asm/tlb.h
++++ b/arch/x86/include/asm/tlb.h
+@@ -34,4 +34,8 @@ static inline void __tlb_remove_table(void *table)
+ free_page_and_swap_cache(table);
+ }
+
++static inline void invlpg(unsigned long addr)
++{
++ asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
++}
+ #endif /* _ASM_X86_TLB_H */
+diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
+index 3269a0e23d3ab8..15fc9fc3dcf052 100644
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -99,9 +99,9 @@ obj-$(CONFIG_TRACING) += trace.o
+ obj-$(CONFIG_RETHOOK) += rethook.o
+ obj-$(CONFIG_CRASH_CORE) += crash_core_$(BITS).o
+ obj-$(CONFIG_KEXEC_CORE) += machine_kexec_$(BITS).o
+-obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o crash.o
++obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o
+ obj-$(CONFIG_KEXEC_FILE) += kexec-bzimage64.o
+-obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
++obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o crash.o
+ obj-y += kprobes/
+ obj-$(CONFIG_MODULES) += module.o
+ obj-$(CONFIG_X86_32) += doublefault_32.o
+diff --git a/arch/x86/kernel/cet.c b/arch/x86/kernel/cet.c
+index d2c732a34e5d90..303bf74d175b30 100644
+--- a/arch/x86/kernel/cet.c
++++ b/arch/x86/kernel/cet.c
+@@ -81,6 +81,34 @@ static void do_user_cp_fault(struct pt_regs *regs, unsigned long error_code)
+
+ static __ro_after_init bool ibt_fatal = true;
+
++/*
++ * By definition, all missing-ENDBRANCH #CPs are a result of WFE && !ENDBR.
++ *
++ * For the kernel IBT no ENDBR selftest where #CPs are deliberately triggered,
++ * the WFE state of the interrupted context needs to be cleared to let execution
++ * continue. Otherwise when the CPU resumes from the instruction that just
++ * caused the previous #CP, another missing-ENDBRANCH #CP is raised and the CPU
++ * enters a dead loop.
++ *
++ * This is not a problem with IDT because it doesn't preserve WFE and IRET doesn't
++ * set WFE. But FRED provides space on the entry stack (in an expanded CS area)
++ * to save and restore the WFE state, thus the WFE state is no longer clobbered,
++ * so software must clear it.
++ */
++static void ibt_clear_fred_wfe(struct pt_regs *regs)
++{
++ /*
++ * No need to do any FRED checks.
++ *
++ * For IDT event delivery, the high-order 48 bits of CS are pushed
++ * as 0s into the stack, and later IRET ignores these bits.
++ *
++ * For FRED, a test to check if fred_cs.wfe is set would be dropped
++ * by compilers.
++ */
++ regs->fred_cs.wfe = 0;
++}
++
+ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
+ {
+ if ((error_code & CP_EC) != CP_ENDBR) {
+@@ -90,6 +118,7 @@ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
+
+ if (unlikely(regs->ip == (unsigned long)&ibt_selftest_noendbr)) {
+ regs->ax = 0;
++ ibt_clear_fred_wfe(regs);
+ return;
+ }
+
+@@ -97,6 +126,7 @@ static void do_kernel_cp_fault(struct pt_regs *regs, unsigned long error_code)
+ if (!ibt_fatal) {
+ printk(KERN_DEFAULT CUT_HERE);
+ __warn(__FILE__, __LINE__, (void *)regs->ip, TAINT_WARN, regs, NULL);
++ ibt_clear_fred_wfe(regs);
+ return;
+ }
+ BUG();
+diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
+index bcb2d640a0cd85..6328cf56e59be2 100644
+--- a/arch/x86/kernel/cpu/mshyperv.c
++++ b/arch/x86/kernel/cpu/mshyperv.c
+@@ -209,7 +209,9 @@ static void hv_machine_shutdown(void)
+ if (kexec_in_progress)
+ hyperv_cleanup();
+ }
++#endif /* CONFIG_KEXEC_CORE */
+
++#ifdef CONFIG_CRASH_DUMP
+ static void hv_machine_crash_shutdown(struct pt_regs *regs)
+ {
+ if (hv_crash_handler)
+@@ -221,7 +223,64 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs)
+ /* Disable the hypercall page when there is only 1 active CPU. */
+ hyperv_cleanup();
+ }
+-#endif /* CONFIG_KEXEC_CORE */
++#endif /* CONFIG_CRASH_DUMP */
++
++static u64 hv_ref_counter_at_suspend;
++static void (*old_save_sched_clock_state)(void);
++static void (*old_restore_sched_clock_state)(void);
++
++/*
++ * Hyper-V clock counter resets during hibernation. Save and restore clock
++ * offset during suspend/resume, while also considering the time passed
++ * before suspend. This is to make sure that sched_clock using hv tsc page
++ * based clocksource, proceeds from where it left off during suspend and
++ * it shows correct time for the timestamps of kernel messages after resume.
++ */
++static void save_hv_clock_tsc_state(void)
++{
++ hv_ref_counter_at_suspend = hv_read_reference_counter();
++}
++
++static void restore_hv_clock_tsc_state(void)
++{
++ /*
++ * Adjust the offsets used by hv tsc clocksource to
++ * account for the time spent before hibernation.
++ * adjusted value = reference counter (time) at suspend
++ * - reference counter (time) now.
++ */
++ hv_adj_sched_clock_offset(hv_ref_counter_at_suspend - hv_read_reference_counter());
++}
++
++/*
++ * Functions to override save_sched_clock_state and restore_sched_clock_state
++ * functions of x86_platform. The Hyper-V clock counter is reset during
++ * suspend-resume and the offset used to measure time needs to be
++ * corrected, post resume.
++ */
++static void hv_save_sched_clock_state(void)
++{
++ old_save_sched_clock_state();
++ save_hv_clock_tsc_state();
++}
++
++static void hv_restore_sched_clock_state(void)
++{
++ restore_hv_clock_tsc_state();
++ old_restore_sched_clock_state();
++}
++
++static void __init x86_setup_ops_for_tsc_pg_clock(void)
++{
++ if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE))
++ return;
++
++ old_save_sched_clock_state = x86_platform.save_sched_clock_state;
++ x86_platform.save_sched_clock_state = hv_save_sched_clock_state;
++
++ old_restore_sched_clock_state = x86_platform.restore_sched_clock_state;
++ x86_platform.restore_sched_clock_state = hv_restore_sched_clock_state;
++}
+ #endif /* CONFIG_HYPERV */
+
+ static uint32_t __init ms_hyperv_platform(void)
+@@ -493,9 +552,13 @@ static void __init ms_hyperv_init_platform(void)
+ no_timer_check = 1;
+ #endif
+
+-#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
++#if IS_ENABLED(CONFIG_HYPERV)
++#if defined(CONFIG_KEXEC_CORE)
+ machine_ops.shutdown = hv_machine_shutdown;
++#endif
++#if defined(CONFIG_CRASH_DUMP)
+ machine_ops.crash_shutdown = hv_machine_crash_shutdown;
++#endif
+ #endif
+ if (ms_hyperv.features & HV_ACCESS_TSC_INVARIANT) {
+ /*
+@@ -572,6 +635,7 @@ static void __init ms_hyperv_init_platform(void)
+
+ /* Register Hyper-V specific clocksource */
+ hv_init_clocksource();
++ x86_setup_ops_for_tsc_pg_clock();
+ hv_vtl_init_platform();
+ #endif
+ /*
+diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
+index a61c12c0127097..0de509c02d18b9 100644
+--- a/arch/x86/kernel/kexec-bzimage64.c
++++ b/arch/x86/kernel/kexec-bzimage64.c
+@@ -263,11 +263,13 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
+ memset(&params->hd0_info, 0, sizeof(params->hd0_info));
+ memset(&params->hd1_info, 0, sizeof(params->hd1_info));
+
++#ifdef CONFIG_CRASH_DUMP
+ if (image->type == KEXEC_TYPE_CRASH) {
+ ret = crash_setup_memmap_entries(image, params);
+ if (ret)
+ return ret;
+ } else
++#endif
+ setup_e820_entries(params);
+
+ nr_e820_entries = params->e820_entries;
+@@ -428,12 +430,14 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
+ return ERR_PTR(-EINVAL);
+ }
+
++#ifdef CONFIG_CRASH_DUMP
+ /* Allocate and load backup region */
+ if (image->type == KEXEC_TYPE_CRASH) {
+ ret = crash_load_segments(image);
+ if (ret)
+ return ERR_PTR(ret);
+ }
++#endif
+
+ /*
+ * Load purgatory. For 64bit entry point, purgatory code can be
+diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
+index b8ab9ee5896c19..38d88c8b56ec07 100644
+--- a/arch/x86/kernel/kvm.c
++++ b/arch/x86/kernel/kvm.c
+@@ -769,7 +769,7 @@ static struct notifier_block kvm_pv_reboot_nb = {
+ * won't be valid. In cases like kexec, in which you install a new kernel, this
+ * means a random memory location will be kept being written.
+ */
+-#ifdef CONFIG_KEXEC_CORE
++#ifdef CONFIG_CRASH_DUMP
+ static void kvm_crash_shutdown(struct pt_regs *regs)
+ {
+ kvm_guest_cpu_offline(true);
+@@ -852,7 +852,7 @@ static void __init kvm_guest_init(void)
+ kvm_guest_cpu_init();
+ #endif
+
+-#ifdef CONFIG_KEXEC_CORE
++#ifdef CONFIG_CRASH_DUMP
+ machine_ops.crash_shutdown = kvm_crash_shutdown;
+ #endif
+
+diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
+index 2fa12d1dc67602..aaeac2deb85dc6 100644
+--- a/arch/x86/kernel/machine_kexec_64.c
++++ b/arch/x86/kernel/machine_kexec_64.c
+@@ -545,6 +545,8 @@ int arch_kimage_file_post_load_cleanup(struct kimage *image)
+ }
+ #endif /* CONFIG_KEXEC_FILE */
+
++#ifdef CONFIG_CRASH_DUMP
++
+ static int
+ kexec_mark_range(unsigned long start, unsigned long end, bool protect)
+ {
+@@ -589,6 +591,7 @@ void arch_kexec_unprotect_crashkres(void)
+ {
+ kexec_mark_crashkres(false);
+ }
++#endif
+
+ /*
+ * During a traditional boot under SME, SME will encrypt the kernel,
+diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
+index d595ef7c1de05e..dd19a4db741afd 100644
+--- a/arch/x86/kernel/process_64.c
++++ b/arch/x86/kernel/process_64.c
+@@ -117,7 +117,7 @@ void __show_regs(struct pt_regs *regs, enum show_regs_mode mode,
+
+ printk("%sFS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
+ log_lvl, fs, fsindex, gs, gsindex, shadowgs);
+- printk("%sCS: %04lx DS: %04x ES: %04x CR0: %016lx\n",
++ printk("%sCS: %04x DS: %04x ES: %04x CR0: %016lx\n",
+ log_lvl, regs->cs, ds, es, cr0);
+ printk("%sCR2: %016lx CR3: %016lx CR4: %016lx\n",
+ log_lvl, cr2, cr3, cr4);
+diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
+index 830425e6d38e2f..f3130f762784a1 100644
+--- a/arch/x86/kernel/reboot.c
++++ b/arch/x86/kernel/reboot.c
+@@ -796,7 +796,7 @@ struct machine_ops machine_ops __ro_after_init = {
+ .emergency_restart = native_machine_emergency_restart,
+ .restart = native_machine_restart,
+ .halt = native_machine_halt,
+-#ifdef CONFIG_KEXEC_CORE
++#ifdef CONFIG_CRASH_DUMP
+ .crash_shutdown = native_machine_crash_shutdown,
+ #endif
+ };
+@@ -826,7 +826,7 @@ void machine_halt(void)
+ machine_ops.halt();
+ }
+
+-#ifdef CONFIG_KEXEC_CORE
++#ifdef CONFIG_CRASH_DUMP
+ void machine_crash_shutdown(struct pt_regs *regs)
+ {
+ machine_ops.crash_shutdown(regs);
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index eb129277dcdd64..8bcecabd475bdf 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -547,7 +547,7 @@ static void __init reserve_crashkernel(void)
+ bool high = false;
+ int ret;
+
+- if (!IS_ENABLED(CONFIG_KEXEC_CORE))
++ if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
+ return;
+
+ total_mem = memblock_phys_mem_size();
+diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
+index 96a771f9f930a6..52c3823b721191 100644
+--- a/arch/x86/kernel/smp.c
++++ b/arch/x86/kernel/smp.c
+@@ -282,7 +282,7 @@ struct smp_ops smp_ops = {
+ .smp_cpus_done = native_smp_cpus_done,
+
+ .stop_other_cpus = native_stop_other_cpus,
+-#if defined(CONFIG_KEXEC_CORE)
++#if defined(CONFIG_CRASH_DUMP)
+ .crash_stop_other_cpus = kdump_nmi_shootdown_cpus,
+ #endif
+ .smp_send_reschedule = native_smp_send_reschedule,
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index c7fa5396c0f05c..2c67bfc3cf3204 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -448,37 +448,6 @@ int __node_distance(int from, int to)
+ }
+ EXPORT_SYMBOL(__node_distance);
+
+-/*
+- * Sanity check to catch more bad NUMA configurations (they are amazingly
+- * common). Make sure the nodes cover all memory.
+- */
+-static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
+-{
+- u64 numaram, e820ram;
+- int i;
+-
+- numaram = 0;
+- for (i = 0; i < mi->nr_blks; i++) {
+- u64 s = mi->blk[i].start >> PAGE_SHIFT;
+- u64 e = mi->blk[i].end >> PAGE_SHIFT;
+- numaram += e - s;
+- numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
+- if ((s64)numaram < 0)
+- numaram = 0;
+- }
+-
+- e820ram = max_pfn - absent_pages_in_range(0, max_pfn);
+-
+- /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
+- if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
+- printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
+- (numaram << PAGE_SHIFT) >> 20,
+- (e820ram << PAGE_SHIFT) >> 20);
+- return false;
+- }
+- return true;
+-}
+-
+ /*
+ * Mark all currently memblock-reserved physical memory (which covers the
+ * kernel's own memory ranges) as hot-unswappable.
+@@ -584,7 +553,8 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
+ return -EINVAL;
+ }
+ }
+- if (!numa_meminfo_cover_memory(mi))
++
++ if (!memblock_validate_numa_coverage(SZ_1M))
+ return -EINVAL;
+
+ /* Finally register nodes. */
+diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
+index 2fbae48f0b470a..64f594826a2822 100644
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -19,6 +19,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/apic.h>
+ #include <asm/perf_event.h>
++#include <asm/tlb.h>
+
+ #include "mm_internal.h"
+
+@@ -1145,7 +1146,7 @@ STATIC_NOPV void native_flush_tlb_one_user(unsigned long addr)
+ bool cpu_pcide;
+
+ /* Flush 'addr' from the kernel PCID: */
+- asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
++ invlpg(addr);
+
+ /* If PTI is off there is no user PCID and nothing to flush. */
+ if (!static_cpu_has(X86_FEATURE_PTI))
+diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
+index 70be57e8f51caa..ade22feee7aeb1 100644
+--- a/arch/x86/xen/enlighten_hvm.c
++++ b/arch/x86/xen/enlighten_hvm.c
+@@ -141,7 +141,9 @@ static void xen_hvm_shutdown(void)
+ if (kexec_in_progress)
+ xen_reboot(SHUTDOWN_soft_reset);
+ }
++#endif
+
++#ifdef CONFIG_CRASH_DUMP
+ static void xen_hvm_crash_shutdown(struct pt_regs *regs)
+ {
+ native_machine_crash_shutdown(regs);
+@@ -229,6 +231,8 @@ static void __init xen_hvm_guest_init(void)
+
+ #ifdef CONFIG_KEXEC_CORE
+ machine_ops.shutdown = xen_hvm_shutdown;
++#endif
++#ifdef CONFIG_CRASH_DUMP
+ machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
+ #endif
+ }
+diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
+index 6b201e64d8abc8..bfd57d07f4b5ee 100644
+--- a/arch/x86/xen/mmu_pv.c
++++ b/arch/x86/xen/mmu_pv.c
+@@ -2517,7 +2517,7 @@ int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+ }
+ EXPORT_SYMBOL_GPL(xen_remap_pfn);
+
+-#ifdef CONFIG_KEXEC_CORE
++#ifdef CONFIG_VMCORE_INFO
+ phys_addr_t paddr_vmcoreinfo_note(void)
+ {
+ if (xen_pv_domain())
+diff --git a/crypto/ecc.c b/crypto/ecc.c
+index f53fb4d6af992b..21504280aca2e5 100644
+--- a/crypto/ecc.c
++++ b/crypto/ecc.c
+@@ -66,6 +66,28 @@ const struct ecc_curve *ecc_get_curve(unsigned int curve_id)
+ }
+ EXPORT_SYMBOL(ecc_get_curve);
+
++void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
++ u64 *out, unsigned int ndigits)
++{
++ int diff = ndigits - DIV_ROUND_UP(nbytes, sizeof(u64));
++ unsigned int o = nbytes & 7;
++ __be64 msd = 0;
++
++ /* diff > 0: not enough input bytes: set most significant digits to 0 */
++ if (diff > 0) {
++ ndigits -= diff;
++ memset(&out[ndigits - 1], 0, diff * sizeof(u64));
++ }
++
++ if (o) {
++ memcpy((u8 *)&msd + sizeof(msd) - o, in, o);
++ out[--ndigits] = be64_to_cpu(msd);
++ in += o;
++ }
++ ecc_swap_digits(in, out, ndigits);
++}
++EXPORT_SYMBOL(ecc_digits_from_bytes);
++
+ static u64 *ecc_alloc_digits_space(unsigned int ndigits)
+ {
+ size_t len = ndigits * sizeof(u64);
+diff --git a/crypto/ecdsa.c b/crypto/ecdsa.c
+index 3f9ec273a121fd..da04df3c8ecf4d 100644
+--- a/crypto/ecdsa.c
++++ b/crypto/ecdsa.c
+@@ -35,40 +35,27 @@ struct ecdsa_signature_ctx {
+ static int ecdsa_get_signature_rs(u64 *dest, size_t hdrlen, unsigned char tag,
+ const void *value, size_t vlen, unsigned int ndigits)
+ {
+- size_t keylen = ndigits * sizeof(u64);
+- ssize_t diff = vlen - keylen;
++ size_t bufsize = ndigits * sizeof(u64);
+ const char *d = value;
+- u8 rs[ECC_MAX_BYTES];
+
+- if (!value || !vlen)
++ if (!value || !vlen || vlen > bufsize + 1)
+ return -EINVAL;
+
+- /* diff = 0: 'value' has exacly the right size
+- * diff > 0: 'value' has too many bytes; one leading zero is allowed that
+- * makes the value a positive integer; error on more
+- * diff < 0: 'value' is missing leading zeros, which we add
++ /*
++ * vlen may be 1 byte larger than bufsize due to a leading zero byte
++ * (necessary if the most significant bit of the integer is set).
+ */
+- if (diff > 0) {
++ if (vlen > bufsize) {
+ /* skip over leading zeros that make 'value' a positive int */
+ if (*d == 0) {
+ vlen -= 1;
+- diff--;
+ d++;
+- }
+- if (diff)
++ } else {
+ return -EINVAL;
++ }
+ }
+- if (-diff >= keylen)
+- return -EINVAL;
+-
+- if (diff) {
+- /* leading zeros not given in 'value' */
+- memset(rs, 0, -diff);
+- }
+-
+- memcpy(&rs[-diff], d, vlen);
+
+- ecc_swap_digits((u64 *)rs, dest, ndigits);
++ ecc_digits_from_bytes(d, vlen, dest, ndigits);
+
+ return 0;
+ }
+@@ -138,7 +125,7 @@ static int ecdsa_verify(struct akcipher_request *req)
+ {
+ struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
+ struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
+- size_t keylen = ctx->curve->g.ndigits * sizeof(u64);
++ size_t bufsize = ctx->curve->g.ndigits * sizeof(u64);
+ struct ecdsa_signature_ctx sig_ctx = {
+ .curve = ctx->curve,
+ };
+@@ -165,14 +152,14 @@ static int ecdsa_verify(struct akcipher_request *req)
+ goto error;
+
+ /* if the hash is shorter then we will add leading zeros to fit to ndigits */
+- diff = keylen - req->dst_len;
++ diff = bufsize - req->dst_len;
+ if (diff >= 0) {
+ if (diff)
+ memset(rawhash, 0, diff);
+ memcpy(&rawhash[diff], buffer + req->src_len, req->dst_len);
+ } else if (diff < 0) {
+ /* given hash is longer, we take the left-most bytes */
+- memcpy(&rawhash, buffer + req->src_len, keylen);
++ memcpy(&rawhash, buffer + req->src_len, bufsize);
+ }
+
+ ecc_swap_digits((u64 *)rawhash, hash, ctx->curve->g.ndigits);
+@@ -222,9 +209,8 @@ static int ecdsa_ecc_ctx_reset(struct ecc_ctx *ctx)
+ static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsigned int keylen)
+ {
+ struct ecc_ctx *ctx = akcipher_tfm_ctx(tfm);
++ unsigned int digitlen, ndigits;
+ const unsigned char *d = key;
+- const u64 *digits = (const u64 *)&d[1];
+- unsigned int ndigits;
+ int ret;
+
+ ret = ecdsa_ecc_ctx_reset(ctx);
+@@ -238,12 +224,17 @@ static int ecdsa_set_pub_key(struct crypto_akcipher *tfm, const void *key, unsig
+ return -EINVAL;
+
+ keylen--;
+- ndigits = (keylen >> 1) / sizeof(u64);
++ digitlen = keylen >> 1;
++
++ ndigits = DIV_ROUND_UP(digitlen, sizeof(u64));
+ if (ndigits != ctx->curve->g.ndigits)
+ return -EINVAL;
+
+- ecc_swap_digits(digits, ctx->pub_key.x, ndigits);
+- ecc_swap_digits(&digits[ndigits], ctx->pub_key.y, ndigits);
++ d++;
++
++ ecc_digits_from_bytes(d, digitlen, ctx->pub_key.x, ndigits);
++ ecc_digits_from_bytes(&d[digitlen], digitlen, ctx->pub_key.y, ndigits);
++
+ ret = ecc_is_pubkey_valid_full(ctx->curve, &ctx->pub_key);
+
+ ctx->pub_key_set = ret == 0;
+diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
+index 6496ff5a6ba20d..1a31106a14e446 100644
+--- a/drivers/acpi/arm64/iort.c
++++ b/drivers/acpi/arm64/iort.c
+@@ -1712,6 +1712,15 @@ static struct acpi_platform_list pmcg_plat_info[] __initdata = {
+ /* HiSilicon Hip09 Platform */
+ {"HISI ", "HIP09 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
+ "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
++ {"HISI ", "HIP09A ", 0, ACPI_SIG_IORT, greater_than_or_equal,
++ "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
++ /* HiSilicon Hip10/11 Platform uses the same SMMU IP with Hip09 */
++ {"HISI ", "HIP10 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
++ "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
++ {"HISI ", "HIP10C ", 0, ACPI_SIG_IORT, greater_than_or_equal,
++ "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
++ {"HISI ", "HIP11 ", 0, ACPI_SIG_IORT, greater_than_or_equal,
++ "Erratum #162001900", IORT_SMMU_V3_PMCG_HISI_HIP09},
+ { }
+ };
+
+diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
+index fe5e30662017de..c80b5aa7628ae9 100644
+--- a/drivers/bluetooth/btusb.c
++++ b/drivers/bluetooth/btusb.c
+@@ -620,6 +620,9 @@ static const struct usb_device_id quirks_table[] = {
+ { USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
++ { USB_DEVICE(0x13d3, 0x3606), .driver_info = BTUSB_MEDIATEK |
++ BTUSB_WIDEBAND_SPEECH |
++ BTUSB_VALID_LE_STATES },
+
+ /* MediaTek MT7922A Bluetooth devices */
+ { USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK |
+@@ -658,6 +661,37 @@ static const struct usb_device_id quirks_table[] = {
+ { USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
++ { USB_DEVICE(0x35f5, 0x7922), .driver_info = BTUSB_MEDIATEK |
++ BTUSB_WIDEBAND_SPEECH |
++ BTUSB_VALID_LE_STATES },
++ { USB_DEVICE(0x13d3, 0x3614), .driver_info = BTUSB_MEDIATEK |
++ BTUSB_WIDEBAND_SPEECH |
++ BTUSB_VALID_LE_STATES },
++ { USB_DEVICE(0x13d3, 0x3615), .driver_info = BTUSB_MEDIATEK |
++ BTUSB_WIDEBAND_SPEECH |
++ BTUSB_VALID_LE_STATES },
++ { USB_DEVICE(0x04ca, 0x38e4), .driver_info = BTUSB_MEDIATEK |
++ BTUSB_WIDEBAND_SPEECH |
++ BTUSB_VALID_LE_STATES },
++ { USB_DEVICE(0x13d3, 0x3605), .driver_info = BTUSB_MEDIATEK |
++ BTUSB_WIDEBAND_SPEECH |
++ BTUSB_VALID_LE_STATES },
++ { USB_DEVICE(0x13d3, 0x3607), .driver_info = BTUSB_MEDIATEK |
++ BTUSB_WIDEBAND_SPEECH |
++ BTUSB_VALID_LE_STATES },
++
++ /* Additional MediaTek MT7925 Bluetooth devices */
++ { USB_DEVICE(0x0489, 0xe111), .driver_info = BTUSB_MEDIATEK |
++ BTUSB_WIDEBAND_SPEECH },
++ { USB_DEVICE(0x0489, 0xe113), .driver_info = BTUSB_MEDIATEK |
++ BTUSB_WIDEBAND_SPEECH |
++ BTUSB_VALID_LE_STATES },
++ { USB_DEVICE(0x13d3, 0x3602), .driver_info = BTUSB_MEDIATEK |
++ BTUSB_WIDEBAND_SPEECH |
++ BTUSB_VALID_LE_STATES },
++ { USB_DEVICE(0x13d3, 0x3603), .driver_info = BTUSB_MEDIATEK |
++ BTUSB_WIDEBAND_SPEECH |
++ BTUSB_VALID_LE_STATES },
+
+ /* Additional Realtek 8723AE Bluetooth devices */
+ { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
+@@ -858,6 +892,10 @@ struct btusb_data {
+
+ int (*setup_on_usb)(struct hci_dev *hdev);
+
++ int (*suspend)(struct hci_dev *hdev);
++ int (*resume)(struct hci_dev *hdev);
++ int (*disconnect)(struct hci_dev *hdev);
++
+ int oob_wake_irq; /* irq for out-of-band wake-on-bt */
+ unsigned cmd_timeout_cnt;
+
+@@ -4609,6 +4647,9 @@ static void btusb_disconnect(struct usb_interface *intf)
+ if (data->diag)
+ usb_set_intfdata(data->diag, NULL);
+
++ if (data->disconnect)
++ data->disconnect(hdev);
++
+ hci_unregister_dev(hdev);
+
+ if (intf == data->intf) {
+@@ -4657,6 +4698,9 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message)
+
+ cancel_work_sync(&data->work);
+
++ if (data->suspend)
++ data->suspend(data->hdev);
++
+ btusb_stop_traffic(data);
+ usb_kill_anchored_urbs(&data->tx_anchor);
+
+@@ -4760,6 +4804,9 @@ static int btusb_resume(struct usb_interface *intf)
+ btusb_submit_isoc_urb(hdev, GFP_NOIO);
+ }
+
++ if (data->resume)
++ data->resume(hdev);
++
+ spin_lock_irq(&data->txlock);
+ play_deferred(data);
+ clear_bit(BTUSB_SUSPENDING, &data->flags);
+diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
+index 8b3e5f84e89a77..ce44dbfd47e275 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.c
++++ b/drivers/clk/qcom/clk-alpha-pll.c
+@@ -52,6 +52,7 @@
+ #define PLL_CONFIG_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL])
+ #define PLL_CONFIG_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U])
+ #define PLL_CONFIG_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U1])
++#define PLL_CONFIG_CTL_U2(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U2])
+ #define PLL_TEST_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL])
+ #define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U])
+ #define PLL_TEST_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U1])
+@@ -227,6 +228,32 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
+ [PLL_OFF_ALPHA_VAL] = 0x24,
+ [PLL_OFF_ALPHA_VAL_U] = 0x28,
+ },
++ [CLK_ALPHA_PLL_TYPE_ZONDA_OLE] = {
++ [PLL_OFF_L_VAL] = 0x04,
++ [PLL_OFF_ALPHA_VAL] = 0x08,
++ [PLL_OFF_USER_CTL] = 0x0c,
++ [PLL_OFF_USER_CTL_U] = 0x10,
++ [PLL_OFF_CONFIG_CTL] = 0x14,
++ [PLL_OFF_CONFIG_CTL_U] = 0x18,
++ [PLL_OFF_CONFIG_CTL_U1] = 0x1c,
++ [PLL_OFF_CONFIG_CTL_U2] = 0x20,
++ [PLL_OFF_TEST_CTL] = 0x24,
++ [PLL_OFF_TEST_CTL_U] = 0x28,
++ [PLL_OFF_TEST_CTL_U1] = 0x2c,
++ [PLL_OFF_OPMODE] = 0x30,
++ [PLL_OFF_STATUS] = 0x3c,
++ },
++ [CLK_ALPHA_PLL_TYPE_NSS_HUAYRA] = {
++ [PLL_OFF_L_VAL] = 0x04,
++ [PLL_OFF_ALPHA_VAL] = 0x08,
++ [PLL_OFF_TEST_CTL] = 0x0c,
++ [PLL_OFF_TEST_CTL_U] = 0x10,
++ [PLL_OFF_USER_CTL] = 0x14,
++ [PLL_OFF_CONFIG_CTL] = 0x18,
++ [PLL_OFF_CONFIG_CTL_U] = 0x1c,
++ [PLL_OFF_STATUS] = 0x20,
++ },
++
+ };
+ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
+
+diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
+index 3fd0ef41c72c89..52dc5b9b546a15 100644
+--- a/drivers/clk/qcom/clk-alpha-pll.h
++++ b/drivers/clk/qcom/clk-alpha-pll.h
+@@ -21,6 +21,7 @@ enum {
+ CLK_ALPHA_PLL_TYPE_LUCID = CLK_ALPHA_PLL_TYPE_TRION,
+ CLK_ALPHA_PLL_TYPE_AGERA,
+ CLK_ALPHA_PLL_TYPE_ZONDA,
++ CLK_ALPHA_PLL_TYPE_ZONDA_OLE,
+ CLK_ALPHA_PLL_TYPE_LUCID_EVO,
+ CLK_ALPHA_PLL_TYPE_LUCID_OLE,
+ CLK_ALPHA_PLL_TYPE_RIVIAN_EVO,
+@@ -28,6 +29,7 @@ enum {
+ CLK_ALPHA_PLL_TYPE_BRAMMO_EVO,
+ CLK_ALPHA_PLL_TYPE_STROMER,
+ CLK_ALPHA_PLL_TYPE_STROMER_PLUS,
++ CLK_ALPHA_PLL_TYPE_NSS_HUAYRA,
+ CLK_ALPHA_PLL_TYPE_MAX,
+ };
+
+@@ -42,6 +44,7 @@ enum {
+ PLL_OFF_CONFIG_CTL,
+ PLL_OFF_CONFIG_CTL_U,
+ PLL_OFF_CONFIG_CTL_U1,
++ PLL_OFF_CONFIG_CTL_U2,
+ PLL_OFF_TEST_CTL,
+ PLL_OFF_TEST_CTL_U,
+ PLL_OFF_TEST_CTL_U1,
+@@ -119,6 +122,7 @@ struct alpha_pll_config {
+ u32 config_ctl_val;
+ u32 config_ctl_hi_val;
+ u32 config_ctl_hi1_val;
++ u32 config_ctl_hi2_val;
+ u32 user_ctl_val;
+ u32 user_ctl_hi_val;
+ u32 user_ctl_hi1_val;
+@@ -173,6 +177,7 @@ extern const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops;
+
+ extern const struct clk_ops clk_alpha_pll_zonda_ops;
+ #define clk_alpha_pll_postdiv_zonda_ops clk_alpha_pll_postdiv_fabia_ops
++#define clk_alpha_pll_zonda_ole_ops clk_alpha_pll_zonda_ops
+
+ extern const struct clk_ops clk_alpha_pll_lucid_evo_ops;
+ extern const struct clk_ops clk_alpha_pll_reset_lucid_evo_ops;
+diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
+index 8ff7cd4e20bb11..5eec1457e13967 100644
+--- a/drivers/clocksource/hyperv_timer.c
++++ b/drivers/clocksource/hyperv_timer.c
+@@ -27,7 +27,8 @@
+ #include <asm/mshyperv.h>
+
+ static struct clock_event_device __percpu *hv_clock_event;
+-static u64 hv_sched_clock_offset __ro_after_init;
++/* Note: offset can hold negative values after hibernation. */
++static u64 hv_sched_clock_offset __read_mostly;
+
+ /*
+ * If false, we're using the old mechanism for stimer0 interrupts
+@@ -456,6 +457,17 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
+ hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
+ }
+
++/*
++ * Called during resume from hibernation, from overridden
++ * x86_platform.restore_sched_clock_state routine. This is to adjust offsets
++ * used to calculate time for hv tsc page based sched_clock, to account for
++ * time spent before hibernation.
++ */
++void hv_adj_sched_clock_offset(u64 offset)
++{
++ hv_sched_clock_offset -= offset;
++}
++
+ #ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
+ static int hv_cs_enable(struct clocksource *cs)
+ {
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+index 3263b5fa182d20..f99e3b812ee44b 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+@@ -319,7 +319,7 @@ svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange,
+ spage = migrate_pfn_to_page(migrate->src[i]);
+ if (spage && !is_zone_device_page(spage)) {
+ src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
+- DMA_TO_DEVICE);
++ DMA_BIDIRECTIONAL);
+ r = dma_mapping_error(dev, src[i]);
+ if (r) {
+ dev_err(dev, "%s: fail %d dma_map_page\n",
+@@ -634,7 +634,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
+ goto out_oom;
+ }
+
+- dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
++ dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ r = dma_mapping_error(dev, dst[i]);
+ if (r) {
+ dev_err(adev->dev, "%s: fail %d dma_map_page\n", __func__, r);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index 9ec9792f115a8a..385a5a75fdf873 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -1219,10 +1219,6 @@ static bool is_dsc_need_re_compute(
+ if (dc_link->type != dc_connection_mst_branch)
+ return false;
+
+- if (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
+- dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
+- return false;
+-
+ for (i = 0; i < MAX_PIPES; i++)
+ stream_on_link[i] = NULL;
+
+@@ -1243,6 +1239,18 @@ static bool is_dsc_need_re_compute(
+ if (!aconnector)
+ continue;
+
++ /*
++ * Check if cached virtual MST DSC caps are available and DSC is supported
++ * this change takes care of newer MST DSC capable devices that report their
++ * DPCD caps as per specifications in their Virtual DPCD registers.
++
++ * TODO: implement the check for older MST DSC devices that do not conform to
++ * specifications.
++ */
++ if (!(aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported ||
++ aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
++ continue;
++
+ stream_on_link[new_stream_on_link_num] = aconnector;
+ new_stream_on_link_num++;
+
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+index 61f4a38e7d2bf6..8f786592143b6c 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+@@ -153,7 +153,16 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
+ ADV7511_AUDIO_CFG3_LEN_MASK, len);
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
+ ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
+- regmap_write(adv7511->regmap, 0x73, 0x1);
++
++ /* send current Audio infoframe values while updating */
++ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
++ BIT(5), BIT(5));
++
++ regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(0), 0x1);
++
++ /* use Audio infoframe updated info */
++ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
++ BIT(5), 0);
+
+ return 0;
+ }
+@@ -184,8 +193,9 @@ static int audio_startup(struct device *dev, void *data)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
+ BIT(7) | BIT(6), BIT(7));
+ /* use Audio infoframe updated info */
+- regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
++ regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
+ BIT(5), 0);
++
+ /* enable SPDIF receiver */
+ if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
+ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+index ef2b6ce544d0a8..1aa4153b40e0c1 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+@@ -1225,8 +1225,10 @@ static int adv7511_probe(struct i2c_client *i2c)
+ return ret;
+
+ ret = adv7511_init_regulators(adv7511);
+- if (ret)
+- return dev_err_probe(dev, ret, "failed to init regulators\n");
++ if (ret) {
++ dev_err_probe(dev, ret, "failed to init regulators\n");
++ goto err_of_node_put;
++ }
+
+ /*
+ * The power down GPIO is optional. If present, toggle it from active to
+@@ -1346,6 +1348,8 @@ static int adv7511_probe(struct i2c_client *i2c)
+ i2c_unregister_device(adv7511->i2c_edid);
+ uninit_regulators:
+ adv7511_uninit_regulators(adv7511);
++err_of_node_put:
++ of_node_put(adv7511->host_node);
+
+ return ret;
+ }
+@@ -1354,6 +1358,8 @@ static void adv7511_remove(struct i2c_client *i2c)
+ {
+ struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
+
++ of_node_put(adv7511->host_node);
++
+ adv7511_uninit_regulators(adv7511);
+
+ drm_bridge_remove(&adv7511->bridge);
+diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
+index 7e3e56441aedc5..6a4733c7082700 100644
+--- a/drivers/gpu/drm/bridge/adv7511/adv7533.c
++++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
+@@ -175,7 +175,7 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
+
+ of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
+
+- if (num_lanes < 1 || num_lanes > 4)
++ if (num_lanes < 2 || num_lanes > 4)
+ return -EINVAL;
+
+ adv->num_dsi_lanes = num_lanes;
+@@ -184,8 +184,6 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
+ if (!adv->host_node)
+ return -ENODEV;
+
+- of_node_put(adv->host_node);
+-
+ adv->use_timing_gen = !of_property_read_bool(np,
+ "adi,disable-timing-generator");
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
+index 9e113e9473260a..6e8c182b2559e7 100644
+--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
++++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
+@@ -133,7 +133,7 @@ static void gen11_rc6_enable(struct intel_rc6 *rc6)
+ GEN9_MEDIA_PG_ENABLE |
+ GEN11_MEDIA_SAMPLER_PG_ENABLE;
+
+- if (GRAPHICS_VER(gt->i915) >= 12) {
++ if (GRAPHICS_VER(gt->i915) >= 12 && !IS_DG1(gt->i915)) {
+ for (i = 0; i < I915_MAX_VCS; i++)
+ if (HAS_ENGINE(gt, _VCS(i)))
+ pg_enable |= (VDN_HCP_POWERGATE_ENABLE(i) |
+diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
+index 97d27e01a6ee27..982007a112c2a0 100644
+--- a/drivers/i2c/busses/Kconfig
++++ b/drivers/i2c/busses/Kconfig
+@@ -159,6 +159,8 @@ config I2C_I801
+ Raptor Lake (PCH)
+ Meteor Lake (SOC and PCH)
+ Birch Stream (SOC)
++ Arrow Lake (SOC)
++ Panther Lake (SOC)
+
+ This driver can also be built as a module. If so, the module
+ will be called i2c-i801.
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 2b8bcd121ffa5d..18c04f5e41d9c5 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -80,6 +80,9 @@
+ * Meteor Lake SoC-S (SOC) 0xae22 32 hard yes yes yes
+ * Meteor Lake PCH-S (PCH) 0x7f23 32 hard yes yes yes
+ * Birch Stream (SOC) 0x5796 32 hard yes yes yes
++ * Arrow Lake-H (SOC) 0x7722 32 hard yes yes yes
++ * Panther Lake-H (SOC) 0xe322 32 hard yes yes yes
++ * Panther Lake-P (SOC) 0xe422 32 hard yes yes yes
+ *
+ * Features supported by this driver:
+ * Software PEC no
+@@ -234,6 +237,7 @@
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS 0x54a3
+ #define PCI_DEVICE_ID_INTEL_BIRCH_STREAM_SMBUS 0x5796
+ #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
++#define PCI_DEVICE_ID_INTEL_ARROW_LAKE_H_SMBUS 0x7722
+ #define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_S_SMBUS 0x7a23
+ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS 0x7aa3
+ #define PCI_DEVICE_ID_INTEL_METEOR_LAKE_P_SMBUS 0x7e22
+@@ -256,6 +260,8 @@
+ #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323
+ #define PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS 0xa3a3
+ #define PCI_DEVICE_ID_INTEL_METEOR_LAKE_SOC_S_SMBUS 0xae22
++#define PCI_DEVICE_ID_INTEL_PANTHER_LAKE_H_SMBUS 0xe322
++#define PCI_DEVICE_ID_INTEL_PANTHER_LAKE_P_SMBUS 0xe422
+
+ struct i801_mux_config {
+ char *gpio_chip;
+@@ -1046,6 +1052,9 @@ static const struct pci_device_id i801_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
++ { PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
++ { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
++ { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { 0, }
+ };
+
+diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
+index fbc1ffbd2fa7d6..658396c9eeabf9 100644
+--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
++++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
+@@ -91,14 +91,6 @@
+
+ #define SLIMPRO_IIC_MSG_DWORD_COUNT 3
+
+-/* PCC related defines */
+-#define PCC_SIGNATURE 0x50424300
+-#define PCC_STS_CMD_COMPLETE BIT(0)
+-#define PCC_STS_SCI_DOORBELL BIT(1)
+-#define PCC_STS_ERR BIT(2)
+-#define PCC_STS_PLAT_NOTIFY BIT(3)
+-#define PCC_CMD_GENERATE_DB_INT BIT(15)
+-
+ struct slimpro_i2c_dev {
+ struct i2c_adapter adapter;
+ struct device *dev;
+@@ -160,11 +152,11 @@ static void slimpro_i2c_pcc_rx_cb(struct mbox_client *cl, void *msg)
+
+ /* Check if platform sends interrupt */
+ if (!xgene_word_tst_and_clr(&generic_comm_base->status,
+- PCC_STS_SCI_DOORBELL))
++ PCC_STATUS_SCI_DOORBELL))
+ return;
+
+ if (xgene_word_tst_and_clr(&generic_comm_base->status,
+- PCC_STS_CMD_COMPLETE)) {
++ PCC_STATUS_CMD_COMPLETE)) {
+ msg = generic_comm_base + 1;
+
+ /* Response message msg[1] contains the return value. */
+@@ -186,10 +178,10 @@ static void slimpro_i2c_pcc_tx_prepare(struct slimpro_i2c_dev *ctx, u32 *msg)
+ cpu_to_le32(PCC_SIGNATURE | ctx->mbox_idx));
+
+ WRITE_ONCE(generic_comm_base->command,
+- cpu_to_le16(SLIMPRO_MSG_TYPE(msg[0]) | PCC_CMD_GENERATE_DB_INT));
++ cpu_to_le16(SLIMPRO_MSG_TYPE(msg[0]) | PCC_CMD_GENERATE_DB_INTR));
+
+ status = le16_to_cpu(READ_ONCE(generic_comm_base->status));
+- status &= ~PCC_STS_CMD_COMPLETE;
++ status &= ~PCC_STATUS_CMD_COMPLETE;
+ WRITE_ONCE(generic_comm_base->status, cpu_to_le16(status));
+
+ /* Copy the message to the PCC comm space */
+diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
+index b64fd365f83fb8..fa6810aa6a4a7a 100644
+--- a/drivers/iio/adc/ad7192.c
++++ b/drivers/iio/adc/ad7192.c
+@@ -16,7 +16,9 @@
+ #include <linux/err.h>
+ #include <linux/sched.h>
+ #include <linux/delay.h>
+-#include <linux/of.h>
++#include <linux/module.h>
++#include <linux/mod_devicetable.h>
++#include <linux/property.h>
+
+ #include <linux/iio/iio.h>
+ #include <linux/iio/sysfs.h>
+@@ -360,19 +362,19 @@ static inline bool ad7192_valid_external_frequency(u32 freq)
+ freq <= AD7192_EXT_FREQ_MHZ_MAX);
+ }
+
+-static int ad7192_of_clock_select(struct ad7192_state *st)
++static int ad7192_clock_select(struct ad7192_state *st)
+ {
+- struct device_node *np = st->sd.spi->dev.of_node;
++ struct device *dev = &st->sd.spi->dev;
+ unsigned int clock_sel;
+
+ clock_sel = AD7192_CLK_INT;
+
+ /* use internal clock */
+ if (!st->mclk) {
+- if (of_property_read_bool(np, "adi,int-clock-output-enable"))
++ if (device_property_read_bool(dev, "adi,int-clock-output-enable"))
+ clock_sel = AD7192_CLK_INT_CO;
+ } else {
+- if (of_property_read_bool(np, "adi,clock-xtal"))
++ if (device_property_read_bool(dev, "adi,clock-xtal"))
+ clock_sel = AD7192_CLK_EXT_MCLK1_2;
+ else
+ clock_sel = AD7192_CLK_EXT_MCLK2;
+@@ -381,7 +383,7 @@ static int ad7192_of_clock_select(struct ad7192_state *st)
+ return clock_sel;
+ }
+
+-static int ad7192_setup(struct iio_dev *indio_dev, struct device_node *np)
++static int ad7192_setup(struct iio_dev *indio_dev, struct device *dev)
+ {
+ struct ad7192_state *st = iio_priv(indio_dev);
+ bool rej60_en, refin2_en;
+@@ -403,7 +405,7 @@ static int ad7192_setup(struct iio_dev *indio_dev, struct device_node *np)
+ id &= AD7192_ID_MASK;
+
+ if (id != st->chip_info->chip_id)
+- dev_warn(&st->sd.spi->dev, "device ID query failed (0x%X != 0x%X)\n",
++ dev_warn(dev, "device ID query failed (0x%X != 0x%X)\n",
+ id, st->chip_info->chip_id);
+
+ st->mode = AD7192_MODE_SEL(AD7192_MODE_IDLE) |
+@@ -412,31 +414,31 @@ static int ad7192_setup(struct iio_dev *indio_dev, struct device_node *np)
+
+ st->conf = AD7192_CONF_GAIN(0);
+
+- rej60_en = of_property_read_bool(np, "adi,rejection-60-Hz-enable");
++ rej60_en = device_property_read_bool(dev, "adi,rejection-60-Hz-enable");
+ if (rej60_en)
+ st->mode |= AD7192_MODE_REJ60;
+
+- refin2_en = of_property_read_bool(np, "adi,refin2-pins-enable");
++ refin2_en = device_property_read_bool(dev, "adi,refin2-pins-enable");
+ if (refin2_en && st->chip_info->chip_id != CHIPID_AD7195)
+ st->conf |= AD7192_CONF_REFSEL;
+
+ st->conf &= ~AD7192_CONF_CHOP;
+ st->f_order = AD7192_NO_SYNC_FILTER;
+
+- buf_en = of_property_read_bool(np, "adi,buffer-enable");
++ buf_en = device_property_read_bool(dev, "adi,buffer-enable");
+ if (buf_en)
+ st->conf |= AD7192_CONF_BUF;
+
+- bipolar = of_property_read_bool(np, "bipolar");
++ bipolar = device_property_read_bool(dev, "bipolar");
+ if (!bipolar)
+ st->conf |= AD7192_CONF_UNIPOLAR;
+
+- burnout_curr_en = of_property_read_bool(np,
+- "adi,burnout-currents-enable");
++ burnout_curr_en = device_property_read_bool(dev,
++ "adi,burnout-currents-enable");
+ if (burnout_curr_en && buf_en) {
+ st->conf |= AD7192_CONF_BURN;
+ } else if (burnout_curr_en) {
+- dev_warn(&st->sd.spi->dev,
++ dev_warn(dev,
+ "Can't enable burnout currents: see CHOP or buffer\n");
+ }
+
+@@ -1036,9 +1038,10 @@ static int ad7192_probe(struct spi_device *spi)
+ }
+ st->int_vref_mv = ret / 1000;
+
+- st->chip_info = of_device_get_match_data(&spi->dev);
++ st->chip_info = spi_get_device_match_data(spi);
+ if (!st->chip_info)
+- st->chip_info = (void *)spi_get_device_id(spi)->driver_data;
++ return -ENODEV;
++
+ indio_dev->name = st->chip_info->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+@@ -1065,7 +1068,7 @@ static int ad7192_probe(struct spi_device *spi)
+ if (IS_ERR(st->mclk))
+ return PTR_ERR(st->mclk);
+
+- st->clock_sel = ad7192_of_clock_select(st);
++ st->clock_sel = ad7192_clock_select(st);
+
+ if (st->clock_sel == AD7192_CLK_EXT_MCLK1_2 ||
+ st->clock_sel == AD7192_CLK_EXT_MCLK2) {
+@@ -1077,7 +1080,7 @@ static int ad7192_probe(struct spi_device *spi)
+ }
+ }
+
+- ret = ad7192_setup(indio_dev, spi->dev.of_node);
++ ret = ad7192_setup(indio_dev, &spi->dev);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
+index e836c9c477f675..c6053e82ecf6f3 100644
+--- a/drivers/infiniband/core/uverbs_cmd.c
++++ b/drivers/infiniband/core/uverbs_cmd.c
+@@ -161,7 +161,7 @@ static const void __user *uverbs_request_next_ptr(struct uverbs_req_iter *iter,
+ {
+ const void __user *res = iter->cur;
+
+- if (iter->cur + len > iter->end)
++ if (len > iter->end - iter->cur)
+ return (void __force __user *)ERR_PTR(-ENOSPC);
+ iter->cur += len;
+ return res;
+@@ -2009,11 +2009,13 @@ static int ib_uverbs_post_send(struct uverbs_attr_bundle *attrs)
+ ret = uverbs_request_start(attrs, &iter, &cmd, sizeof(cmd));
+ if (ret)
+ return ret;
+- wqes = uverbs_request_next_ptr(&iter, cmd.wqe_size * cmd.wr_count);
++ wqes = uverbs_request_next_ptr(&iter, size_mul(cmd.wqe_size,
++ cmd.wr_count));
+ if (IS_ERR(wqes))
+ return PTR_ERR(wqes);
+- sgls = uverbs_request_next_ptr(
+- &iter, cmd.sge_count * sizeof(struct ib_uverbs_sge));
++ sgls = uverbs_request_next_ptr(&iter,
++ size_mul(cmd.sge_count,
++ sizeof(struct ib_uverbs_sge)));
+ if (IS_ERR(sgls))
+ return PTR_ERR(sgls);
+ ret = uverbs_request_finish(&iter);
+@@ -2199,11 +2201,11 @@ ib_uverbs_unmarshall_recv(struct uverbs_req_iter *iter, u32 wr_count,
+ if (wqe_size < sizeof(struct ib_uverbs_recv_wr))
+ return ERR_PTR(-EINVAL);
+
+- wqes = uverbs_request_next_ptr(iter, wqe_size * wr_count);
++ wqes = uverbs_request_next_ptr(iter, size_mul(wqe_size, wr_count));
+ if (IS_ERR(wqes))
+ return ERR_CAST(wqes);
+- sgls = uverbs_request_next_ptr(
+- iter, sge_count * sizeof(struct ib_uverbs_sge));
++ sgls = uverbs_request_next_ptr(iter, size_mul(sge_count,
++ sizeof(struct ib_uverbs_sge)));
+ if (IS_ERR(sgls))
+ return ERR_CAST(sgls);
+ ret = uverbs_request_finish(iter);
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index df589726060144..13c65ec5825687 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -147,7 +147,7 @@ int bnxt_re_query_device(struct ib_device *ibdev,
+
+ ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
+ ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
+- ib_attr->hw_ver = rdev->en_dev->pdev->subsystem_device;
++ ib_attr->hw_ver = rdev->en_dev->pdev->revision;
+ ib_attr->max_qp = dev_attr->max_qp;
+ ib_attr->max_qp_wr = dev_attr->max_qp_wqes;
+ ib_attr->device_cap_flags =
+@@ -992,23 +992,22 @@ static int bnxt_re_setup_swqe_size(struct bnxt_re_qp *qp,
+ align = sizeof(struct sq_send_hdr);
+ ilsize = ALIGN(init_attr->cap.max_inline_data, align);
+
+- sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
+- if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
+- return -EINVAL;
+- /* For gen p4 and gen p5 backward compatibility mode
+- * wqe size is fixed to 128 bytes
++ /* For gen p4 and gen p5 fixed wqe compatibility mode
++ * wqe size is fixed to 128 bytes - ie 6 SGEs
+ */
+- if (sq->wqe_size < bnxt_re_get_swqe_size(dev_attr->max_qp_sges) &&
+- qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
+- sq->wqe_size = bnxt_re_get_swqe_size(dev_attr->max_qp_sges);
++ if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) {
++ sq->wqe_size = bnxt_re_get_swqe_size(BNXT_STATIC_MAX_SGE);
++ sq->max_sge = BNXT_STATIC_MAX_SGE;
++ } else {
++ sq->wqe_size = bnxt_re_get_wqe_size(ilsize, sq->max_sge);
++ if (sq->wqe_size > bnxt_re_get_swqe_size(dev_attr->max_qp_sges))
++ return -EINVAL;
++ }
+
+ if (init_attr->cap.max_inline_data) {
+ qplqp->max_inline_data = sq->wqe_size -
+ sizeof(struct sq_send_hdr);
+ init_attr->cap.max_inline_data = qplqp->max_inline_data;
+- if (qplqp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
+- sq->max_sge = qplqp->max_inline_data /
+- sizeof(struct sq_sge);
+ }
+
+ return 0;
+@@ -1154,6 +1153,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
+ /* Shadow QP SQ depth should be same as QP1 RQ depth */
+ qp->qplib_qp.sq.wqe_size = bnxt_re_get_wqe_size(0, 6);
+ qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
++ qp->qplib_qp.sq.max_sw_wqe = qp1_qp->rq.max_wqe;
+ qp->qplib_qp.sq.max_sge = 2;
+ /* Q full delta can be 1 since it is internal QP */
+ qp->qplib_qp.sq.q_full_delta = 1;
+@@ -1165,6 +1165,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
+
+ qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size(6);
+ qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
++ qp->qplib_qp.rq.max_sw_wqe = qp1_qp->rq.max_wqe;
+ qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
+ /* Q full delta can be 1 since it is internal QP */
+ qp->qplib_qp.rq.q_full_delta = 1;
+@@ -1226,6 +1227,7 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
+ */
+ entries = bnxt_re_init_depth(init_attr->cap.max_recv_wr + 1, uctx);
+ rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
++ rq->max_sw_wqe = rq->max_wqe;
+ rq->q_full_delta = 0;
+ rq->sg_info.pgsize = PAGE_SIZE;
+ rq->sg_info.pgshft = PAGE_SHIFT;
+@@ -1285,6 +1287,7 @@ static int bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
+ 0 : BNXT_QPLIB_RESERVED_QP_WRS;
+ entries = bnxt_re_init_depth(entries + diff + 1, uctx);
+ sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + diff + 1);
++ sq->max_sw_wqe = bnxt_qplib_get_depth(sq, qplqp->wqe_mode, true);
+ sq->q_full_delta = diff + 1;
+ /*
+ * Reserving one slot for Phantom WQE. Application can
+@@ -2055,18 +2058,20 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
+ }
+ }
+
+- if (qp_attr_mask & IB_QP_PATH_MTU) {
+- qp->qplib_qp.modify_flags |=
+- CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
+- qp->qplib_qp.path_mtu = __from_ib_mtu(qp_attr->path_mtu);
+- qp->qplib_qp.mtu = ib_mtu_enum_to_int(qp_attr->path_mtu);
+- } else if (qp_attr->qp_state == IB_QPS_RTR) {
+- qp->qplib_qp.modify_flags |=
+- CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
+- qp->qplib_qp.path_mtu =
+- __from_ib_mtu(iboe_get_mtu(rdev->netdev->mtu));
+- qp->qplib_qp.mtu =
+- ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
++ if (qp_attr->qp_state == IB_QPS_RTR) {
++ enum ib_mtu qpmtu;
++
++ qpmtu = iboe_get_mtu(rdev->netdev->mtu);
++ if (qp_attr_mask & IB_QP_PATH_MTU) {
++ if (ib_mtu_enum_to_int(qp_attr->path_mtu) >
++ ib_mtu_enum_to_int(qpmtu))
++ return -EINVAL;
++ qpmtu = qp_attr->path_mtu;
++ }
++
++ qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_PATH_MTU;
++ qp->qplib_qp.path_mtu = __from_ib_mtu(qpmtu);
++ qp->qplib_qp.mtu = ib_mtu_enum_to_int(qpmtu);
+ }
+
+ if (qp_attr_mask & IB_QP_TIMEOUT) {
+@@ -2153,6 +2158,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
+ entries = bnxt_re_init_depth(qp_attr->cap.max_recv_wr, uctx);
+ qp->qplib_qp.rq.max_wqe =
+ min_t(u32, entries, dev_attr->max_qp_wqes + 1);
++ qp->qplib_qp.rq.max_sw_wqe = qp->qplib_qp.rq.max_wqe;
+ qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
+ qp_attr->cap.max_recv_wr;
+ qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
+@@ -2710,7 +2716,8 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
+ wr = wr->next;
+ }
+ bnxt_qplib_post_send_db(&qp->qplib_qp);
+- bnxt_ud_qp_hw_stall_workaround(qp);
++ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
++ bnxt_ud_qp_hw_stall_workaround(qp);
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+ return rc;
+ }
+@@ -2822,7 +2829,8 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
+ wr = wr->next;
+ }
+ bnxt_qplib_post_send_db(&qp->qplib_qp);
+- bnxt_ud_qp_hw_stall_workaround(qp);
++ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
++ bnxt_ud_qp_hw_stall_workaround(qp);
+ spin_unlock_irqrestore(&qp->sq_lock, flags);
+
+ return rc;
+@@ -4167,9 +4175,6 @@ int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata)
+ resp.cqe_sz = sizeof(struct cq_base);
+ resp.max_cqd = dev_attr->max_cq_wqes;
+
+- resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
+- resp.mode = rdev->chip_ctx->modes.wqe_mode;
+-
+ if (rdev->chip_ctx->modes.db_push)
+ resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
+
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 0373d0e9db6329..c7e51cc2ea2687 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -128,13 +128,13 @@ static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
+ }
+ }
+
+-static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
++static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev)
+ {
+ struct bnxt_qplib_chip_ctx *cctx;
+
+ cctx = rdev->chip_ctx;
+- cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
+- mode : BNXT_QPLIB_WQE_MODE_STATIC;
++ cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p7(rdev->chip_ctx) ?
++ BNXT_QPLIB_WQE_MODE_VARIABLE : BNXT_QPLIB_WQE_MODE_STATIC;
+ if (bnxt_re_hwrm_qcaps(rdev))
+ dev_err(rdev_to_dev(rdev),
+ "Failed to query hwrm qcaps\n");
+@@ -155,7 +155,7 @@ static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
+ kfree(chip_ctx);
+ }
+
+-static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
++static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev)
+ {
+ struct bnxt_qplib_chip_ctx *chip_ctx;
+ struct bnxt_en_dev *en_dev;
+@@ -177,7 +177,7 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
+ rdev->qplib_res.dattr = &rdev->dev_attr;
+ rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
+
+- bnxt_re_set_drv_mode(rdev, wqe_mode);
++ bnxt_re_set_drv_mode(rdev);
+
+ bnxt_re_set_db_offset(rdev);
+ rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
+@@ -1440,7 +1440,7 @@ static void bnxt_re_worker(struct work_struct *work)
+ schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
+ }
+
+-static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
++static int bnxt_re_dev_init(struct bnxt_re_dev *rdev)
+ {
+ struct bnxt_re_ring_attr rattr = {};
+ struct bnxt_qplib_creq_ctx *creq;
+@@ -1458,7 +1458,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
+ }
+ set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+
+- rc = bnxt_re_setup_chip_ctx(rdev, wqe_mode);
++ rc = bnxt_re_setup_chip_ctx(rdev);
+ if (rc) {
+ bnxt_unregister_dev(rdev->en_dev);
+ clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags);
+@@ -1609,7 +1609,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
+ return rc;
+ }
+
+-static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode)
++static int bnxt_re_add_device(struct auxiliary_device *adev)
+ {
+ struct bnxt_aux_priv *aux_priv =
+ container_of(adev, struct bnxt_aux_priv, aux_dev);
+@@ -1626,7 +1626,7 @@ static int bnxt_re_add_device(struct auxiliary_device *adev, u8 wqe_mode)
+ goto exit;
+ }
+
+- rc = bnxt_re_dev_init(rdev, wqe_mode);
++ rc = bnxt_re_dev_init(rdev);
+ if (rc)
+ goto re_dev_dealloc;
+
+@@ -1756,7 +1756,8 @@ static int bnxt_re_probe(struct auxiliary_device *adev,
+ int rc;
+
+ mutex_lock(&bnxt_re_mutex);
+- rc = bnxt_re_add_device(adev, BNXT_QPLIB_WQE_MODE_STATIC);
++
++ rc = bnxt_re_add_device(adev);
+ if (rc) {
+ mutex_unlock(&bnxt_re_mutex);
+ return rc;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index b624c255eee6fa..871a49315c880f 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -639,13 +639,6 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
+ if (rc)
+ return rc;
+-
+- srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
+- GFP_KERNEL);
+- if (!srq->swq) {
+- rc = -ENOMEM;
+- goto fail;
+- }
+ srq->dbinfo.flags = 0;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_CREATE_SRQ,
+@@ -674,9 +667,17 @@ int bnxt_qplib_create_srq(struct bnxt_qplib_res *res,
+ spin_lock_init(&srq->lock);
+ srq->start_idx = 0;
+ srq->last_idx = srq->hwq.max_elements - 1;
+- for (idx = 0; idx < srq->hwq.max_elements; idx++)
+- srq->swq[idx].next_idx = idx + 1;
+- srq->swq[srq->last_idx].next_idx = -1;
++ if (!srq->hwq.is_user) {
++ srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
++ GFP_KERNEL);
++ if (!srq->swq) {
++ rc = -ENOMEM;
++ goto fail;
++ }
++ for (idx = 0; idx < srq->hwq.max_elements; idx++)
++ srq->swq[idx].next_idx = idx + 1;
++ srq->swq[srq->last_idx].next_idx = -1;
++ }
+
+ srq->id = le32_to_cpu(resp.xid);
+ srq->dbinfo.hwq = &srq->hwq;
+@@ -806,13 +807,13 @@ static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
+ {
+ int indx;
+
+- que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
++ que->swq = kcalloc(que->max_sw_wqe, sizeof(*que->swq), GFP_KERNEL);
+ if (!que->swq)
+ return -ENOMEM;
+
+ que->swq_start = 0;
+- que->swq_last = que->max_wqe - 1;
+- for (indx = 0; indx < que->max_wqe; indx++)
++ que->swq_last = que->max_sw_wqe - 1;
++ for (indx = 0; indx < que->max_sw_wqe; indx++)
+ que->swq[indx].next_idx = indx + 1;
+ que->swq[que->swq_last].next_idx = 0; /* Make it circular */
+ que->swq_last = 0;
+@@ -848,7 +849,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sq->sg_info;
+ hwq_attr.stride = sizeof(struct sq_sge);
+- hwq_attr.depth = bnxt_qplib_get_depth(sq);
++ hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, false);
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
+ if (rc)
+@@ -876,7 +877,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &rq->sg_info;
+ hwq_attr.stride = sizeof(struct sq_sge);
+- hwq_attr.depth = bnxt_qplib_get_depth(rq);
++ hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
+ if (rc)
+@@ -980,9 +981,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ u32 tbl_indx;
+ u16 nsge;
+
+- if (res->dattr)
+- qp->dev_cap_flags = res->dattr->dev_cap_flags;
+-
++ qp->is_host_msn_tbl = _is_host_msn_table(res->dattr->dev_cap_flags2);
+ sq->dbinfo.flags = 0;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_CREATE_QP,
+@@ -999,7 +998,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ sizeof(struct sq_psn_search_ext) :
+ sizeof(struct sq_psn_search);
+
+- if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
++ if (qp->is_host_msn_tbl) {
+ psn_sz = sizeof(struct sq_msn_search);
+ qp->msn = 0;
+ }
+@@ -1008,13 +1007,18 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &sq->sg_info;
+ hwq_attr.stride = sizeof(struct sq_sge);
+- hwq_attr.depth = bnxt_qplib_get_depth(sq);
++ hwq_attr.depth = bnxt_qplib_get_depth(sq, qp->wqe_mode, true);
+ hwq_attr.aux_stride = psn_sz;
+ hwq_attr.aux_depth = psn_sz ? bnxt_qplib_set_sq_size(sq, qp->wqe_mode)
+ : 0;
+ /* Update msn tbl size */
+- if (BNXT_RE_HW_RETX(qp->dev_cap_flags) && psn_sz) {
+- hwq_attr.aux_depth = roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
++ if (qp->is_host_msn_tbl && psn_sz) {
++ if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC)
++ hwq_attr.aux_depth =
++ roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
++ else
++ hwq_attr.aux_depth =
++ roundup_pow_of_two(bnxt_qplib_set_sq_size(sq, qp->wqe_mode)) / 2;
+ qp->msn_tbl_sz = hwq_attr.aux_depth;
+ qp->msn = 0;
+ }
+@@ -1024,13 +1028,14 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ if (rc)
+ return rc;
+
+- rc = bnxt_qplib_alloc_init_swq(sq);
+- if (rc)
+- goto fail_sq;
+-
+- if (psn_sz)
+- bnxt_qplib_init_psn_ptr(qp, psn_sz);
++ if (!sq->hwq.is_user) {
++ rc = bnxt_qplib_alloc_init_swq(sq);
++ if (rc)
++ goto fail_sq;
+
++ if (psn_sz)
++ bnxt_qplib_init_psn_ptr(qp, psn_sz);
++ }
+ req.sq_size = cpu_to_le32(bnxt_qplib_set_sq_size(sq, qp->wqe_mode));
+ pbl = &sq->hwq.pbl[PBL_LVL_0];
+ req.sq_pbl = cpu_to_le64(pbl->pg_map_arr[0]);
+@@ -1049,16 +1054,18 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ hwq_attr.res = res;
+ hwq_attr.sginfo = &rq->sg_info;
+ hwq_attr.stride = sizeof(struct sq_sge);
+- hwq_attr.depth = bnxt_qplib_get_depth(rq);
++ hwq_attr.depth = bnxt_qplib_get_depth(rq, qp->wqe_mode, false);
+ hwq_attr.aux_stride = 0;
+ hwq_attr.aux_depth = 0;
+ hwq_attr.type = HWQ_TYPE_QUEUE;
+ rc = bnxt_qplib_alloc_init_hwq(&rq->hwq, &hwq_attr);
+ if (rc)
+ goto sq_swq;
+- rc = bnxt_qplib_alloc_init_swq(rq);
+- if (rc)
+- goto fail_rq;
++ if (!rq->hwq.is_user) {
++ rc = bnxt_qplib_alloc_init_swq(rq);
++ if (rc)
++ goto fail_rq;
++ }
+
+ req.rq_size = cpu_to_le32(rq->max_wqe);
+ pbl = &rq->hwq.pbl[PBL_LVL_0];
+@@ -1154,9 +1161,11 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+ rq->dbinfo.db = qp->dpi->dbr;
+ rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
+ }
++ spin_lock_bh(&rcfw->tbl_lock);
+ tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
+ rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
+ rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
++ spin_unlock_bh(&rcfw->tbl_lock);
+
+ return 0;
+ fail:
+@@ -1638,7 +1647,7 @@ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
+ if (!swq->psn_search)
+ return;
+ /* Handle MSN differently on cap flags */
+- if (BNXT_RE_HW_RETX(qp->dev_cap_flags)) {
++ if (qp->is_host_msn_tbl) {
+ bnxt_qplib_fill_msn_search(qp, wqe, swq);
+ return;
+ }
+@@ -1820,7 +1829,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
+ }
+
+ swq = bnxt_qplib_get_swqe(sq, &wqe_idx);
+- bnxt_qplib_pull_psn_buff(qp, sq, swq, BNXT_RE_HW_RETX(qp->dev_cap_flags));
++ bnxt_qplib_pull_psn_buff(qp, sq, swq, qp->is_host_msn_tbl);
+
+ idx = 0;
+ swq->slot_idx = hwq->prod;
+@@ -2010,7 +2019,7 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
+ rc = -EINVAL;
+ goto done;
+ }
+- if (!BNXT_RE_HW_RETX(qp->dev_cap_flags) || msn_update) {
++ if (!qp->is_host_msn_tbl || msn_update) {
+ swq->next_psn = sq->psn & BTH_PSN_MASK;
+ bnxt_qplib_fill_psn_search(qp, wqe, swq);
+ }
+@@ -2491,7 +2500,7 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
+ }
+ sq = &qp->sq;
+
+- cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_wqe;
++ cqe_sq_cons = le16_to_cpu(hwcqe->sq_cons_idx) % sq->max_sw_wqe;
+ if (qp->sq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+ "%s: QP in Flush QP = %p\n", __func__, qp);
+@@ -2534,10 +2543,12 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
+ bnxt_qplib_add_flush_qp(qp);
+ } else {
+ /* Before we complete, do WA 9060 */
+- if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
+- cqe_sq_cons)) {
+- *lib_qp = qp;
+- goto out;
++ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
++ if (do_wa9060(qp, cq, cq_cons, sq->swq_last,
++ cqe_sq_cons)) {
++ *lib_qp = qp;
++ goto out;
++ }
+ }
+ if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
+ cqe->status = CQ_REQ_STATUS_OK;
+@@ -2881,7 +2892,7 @@ static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
+ cqe_cons = le16_to_cpu(hwcqe->sq_cons_idx);
+ if (cqe_cons == 0xFFFF)
+ goto do_rq;
+- cqe_cons %= sq->max_wqe;
++ cqe_cons %= sq->max_sw_wqe;
+
+ if (qp->sq.flushed) {
+ dev_dbg(&cq->hwq.pdev->dev,
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index 5d4c49089a20f4..b5c53e864fbb39 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -113,7 +113,6 @@ struct bnxt_qplib_sge {
+ u32 size;
+ };
+
+-#define BNXT_QPLIB_QP_MAX_SGL 6
+ struct bnxt_qplib_swq {
+ u64 wr_id;
+ int next_idx;
+@@ -153,7 +152,7 @@ struct bnxt_qplib_swqe {
+ #define BNXT_QPLIB_SWQE_FLAGS_UC_FENCE BIT(2)
+ #define BNXT_QPLIB_SWQE_FLAGS_SOLICIT_EVENT BIT(3)
+ #define BNXT_QPLIB_SWQE_FLAGS_INLINE BIT(4)
+- struct bnxt_qplib_sge sg_list[BNXT_QPLIB_QP_MAX_SGL];
++ struct bnxt_qplib_sge sg_list[BNXT_VAR_MAX_SGE];
+ int num_sge;
+ /* Max inline data is 96 bytes */
+ u32 inline_len;
+@@ -251,6 +250,7 @@ struct bnxt_qplib_q {
+ struct bnxt_qplib_db_info dbinfo;
+ struct bnxt_qplib_sg_info sg_info;
+ u32 max_wqe;
++ u32 max_sw_wqe;
+ u16 wqe_size;
+ u16 q_full_delta;
+ u16 max_sge;
+@@ -340,7 +340,7 @@ struct bnxt_qplib_qp {
+ struct list_head rq_flush;
+ u32 msn;
+ u32 msn_tbl_sz;
+- u16 dev_cap_flags;
++ bool is_host_msn_tbl;
+ };
+
+ #define BNXT_QPLIB_MAX_CQE_ENTRY_SIZE sizeof(struct cq_base)
+@@ -585,15 +585,22 @@ static inline void bnxt_qplib_swq_mod_start(struct bnxt_qplib_q *que, u32 idx)
+ que->swq_start = que->swq[idx].next_idx;
+ }
+
+-static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que)
++static inline u32 bnxt_qplib_get_depth(struct bnxt_qplib_q *que, u8 wqe_mode, bool is_sq)
+ {
+- return (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge);
++ u32 slots;
++
++ /* Queue depth is the number of slots. */
++ slots = (que->wqe_size * que->max_wqe) / sizeof(struct sq_sge);
++ /* For variable WQE mode, need to align the slots to 256 */
++ if (wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE && is_sq)
++ slots = ALIGN(slots, BNXT_VAR_MAX_SLOT_ALIGN);
++ return slots;
+ }
+
+ static inline u32 bnxt_qplib_set_sq_size(struct bnxt_qplib_q *que, u8 wqe_mode)
+ {
+ return (wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
+- que->max_wqe : bnxt_qplib_get_depth(que);
++ que->max_wqe : bnxt_qplib_get_depth(que, wqe_mode, true);
+ }
+
+ static inline u32 bnxt_qplib_set_sq_max_slot(u8 wqe_mode)
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+index f9e7aa3757cfb2..c2152122a4329d 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+@@ -523,6 +523,12 @@ static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
+
+ #define BNXT_RE_HW_RETX(a) _is_hw_retx_supported((a))
+
++static inline bool _is_host_msn_table(u16 dev_cap_ext_flags2)
++{
++ return (dev_cap_ext_flags2 & CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_MASK) ==
++ CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_MSN_TABLE;
++}
++
+ static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx)
+ {
+ return cctx->modes.dbr_pacing;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+index 0b98577cd7082e..74c3f6b26c4d3a 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+@@ -95,11 +95,13 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+ struct bnxt_qplib_cmdqmsg msg = {};
+ struct creq_query_func_resp_sb *sb;
+ struct bnxt_qplib_rcfw_sbuf sbuf;
++ struct bnxt_qplib_chip_ctx *cctx;
+ struct cmdq_query_func req = {};
+ u8 *tqm_alloc;
+ int i, rc;
+ u32 temp;
+
++ cctx = rcfw->res->cctx;
+ bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
+ CMDQ_BASE_OPCODE_QUERY_FUNC,
+ sizeof(req));
+@@ -127,14 +129,21 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+ attr->max_qp_init_rd_atom =
+ sb->max_qp_init_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ?
+ BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_init_rd_atom;
+- attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr);
+- /*
+- * 128 WQEs needs to be reserved for the HW (8916). Prevent
+- * reporting the max number
+- */
+- attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
+- attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx) ?
+- 6 : sb->max_sge;
++ attr->max_qp_wqes = le16_to_cpu(sb->max_qp_wr) - 1;
++ if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx)) {
++ /*
++ * 128 WQEs needs to be reserved for the HW (8916). Prevent
++ * reporting the max number on legacy devices
++ */
++ attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
++ }
++
++ /* Adjust for max_qp_wqes for variable wqe */
++ if (cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
++ attr->max_qp_wqes = BNXT_VAR_MAX_WQE - 1;
++
++ attr->max_qp_sges = cctx->modes.wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE ?
++ min_t(u32, sb->max_sge_var_wqe, BNXT_VAR_MAX_SGE) : 6;
+ attr->max_cq = le32_to_cpu(sb->max_cq);
+ attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
+ if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
+@@ -165,6 +174,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+ attr->max_sgid = le32_to_cpu(sb->max_gid);
+ attr->max_sgid = min_t(u32, BNXT_QPLIB_NUM_GIDS_SUPPORTED, 2 * attr->max_sgid);
+ attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
++ attr->dev_cap_flags2 = le16_to_cpu(sb->dev_cap_ext_flags_2);
+
+ bnxt_qplib_query_version(rcfw, attr->fw_ver);
+
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+index 755765e68eaab2..aeacd0a9a92cc4 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+@@ -40,6 +40,7 @@
+ #ifndef __BNXT_QPLIB_SP_H__
+ #define __BNXT_QPLIB_SP_H__
+
++#include <rdma/bnxt_re-abi.h>
+ #define BNXT_QPLIB_RESERVED_QP_WRS 128
+
+ struct bnxt_qplib_dev_attr {
+@@ -73,6 +74,7 @@ struct bnxt_qplib_dev_attr {
+ u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
+ bool is_atomic;
+ u16 dev_cap_flags;
++ u16 dev_cap_flags2;
+ u32 max_dpi;
+ };
+
+@@ -351,4 +353,11 @@ int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
+ int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_cc_param *cc_param);
+
++#define BNXT_VAR_MAX_WQE 4352
++#define BNXT_VAR_MAX_SLOT_ALIGN 256
++#define BNXT_VAR_MAX_SGE 13
++#define BNXT_RE_MAX_RQ_WQES 65536
++
++#define BNXT_STATIC_MAX_SGE 6
++
+ #endif /* __BNXT_QPLIB_SP_H__*/
+diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+index 2909608f4b5de4..cb4e7e19fbaf08 100644
+--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
++++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+@@ -2157,8 +2157,36 @@ struct creq_query_func_resp_sb {
+ __le32 tqm_alloc_reqs[12];
+ __le32 max_dpi;
+ u8 max_sge_var_wqe;
+- u8 reserved_8;
++ u8 dev_cap_ext_flags;
++ #define CREQ_QUERY_FUNC_RESP_SB_ATOMIC_OPS_NOT_SUPPORTED 0x1UL
++ #define CREQ_QUERY_FUNC_RESP_SB_DRV_VERSION_RGTR_SUPPORTED 0x2UL
++ #define CREQ_QUERY_FUNC_RESP_SB_CREATE_QP_BATCH_SUPPORTED 0x4UL
++ #define CREQ_QUERY_FUNC_RESP_SB_DESTROY_QP_BATCH_SUPPORTED 0x8UL
++ #define CREQ_QUERY_FUNC_RESP_SB_ROCE_STATS_EXT_CTX_SUPPORTED 0x10UL
++ #define CREQ_QUERY_FUNC_RESP_SB_CREATE_SRQ_SGE_SUPPORTED 0x20UL
++ #define CREQ_QUERY_FUNC_RESP_SB_FIXED_SIZE_WQE_DISABLED 0x40UL
++ #define CREQ_QUERY_FUNC_RESP_SB_DCN_SUPPORTED 0x80UL
+ __le16 max_inline_data_var_wqe;
++ __le32 start_qid;
++ u8 max_msn_table_size;
++ u8 reserved8_1;
++ __le16 dev_cap_ext_flags_2;
++ #define CREQ_QUERY_FUNC_RESP_SB_OPTIMIZE_MODIFY_QP_SUPPORTED 0x1UL
++ #define CREQ_QUERY_FUNC_RESP_SB_CHANGE_UDP_SRC_PORT_WQE_SUPPORTED 0x2UL
++ #define CREQ_QUERY_FUNC_RESP_SB_CQ_COALESCING_SUPPORTED 0x4UL
++ #define CREQ_QUERY_FUNC_RESP_SB_MEMORY_REGION_RO_SUPPORTED 0x8UL
++ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_MASK 0x30UL
++ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_SFT 4
++ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_PSN_TABLE (0x0UL << 4)
++ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_HOST_MSN_TABLE (0x1UL << 4)
++ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE (0x2UL << 4)
++ #define CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_LAST \
++ CREQ_QUERY_FUNC_RESP_SB_REQ_RETRANSMISSION_SUPPORT_IQM_MSN_TABLE
++ __le16 max_xp_qp_size;
++ __le16 create_qp_batch_size;
++ __le16 destroy_qp_batch_size;
++ __le16 reserved16;
++ __le64 reserved64;
+ };
+
+ /* cmdq_set_func_resources (size:448b/56B) */
+diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
+index 11a78ceae56891..950c133d4220e7 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
++++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
+@@ -153,8 +153,7 @@ int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
+ return total;
+ }
+
+-int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
+- int buf_cnt, struct ib_umem *umem,
++int hns_roce_get_umem_bufs(dma_addr_t *bufs, int buf_cnt, struct ib_umem *umem,
+ unsigned int page_shift)
+ {
+ struct ib_block_iter biter;
+diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
+index 9b91731a620795..5e0d78f4e54548 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
+@@ -133,14 +133,12 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+ struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u64 mtts[MTT_MIN_COUNT] = {};
+- dma_addr_t dma_handle;
+ int ret;
+
+- ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
+- &dma_handle);
+- if (!ret) {
++ ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts));
++ if (ret) {
+ ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
+- return -EINVAL;
++ return ret;
+ }
+
+ /* Get CQC memory HEM(Hardware Entry Memory) table */
+@@ -157,7 +155,8 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
+ goto err_put;
+ }
+
+- ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, dma_handle);
++ ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts,
++ hns_roce_get_mtr_ba(&hr_cq->mtr));
+ if (ret)
+ goto err_xa;
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
+index 21ef00fdb65631..03b6546f63cdc6 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_device.h
++++ b/drivers/infiniband/hw/hns/hns_roce_device.h
+@@ -892,8 +892,7 @@ struct hns_roce_hw {
+ int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mr *mr, int flags,
+ void *mb_buf);
+- int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
+- struct hns_roce_mr *mr);
++ int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
+ int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
+ void (*write_cqc)(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
+@@ -1129,8 +1128,13 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
+
+ /* hns roce hw need current block and next block addr from mtt */
+ #define MTT_MIN_COUNT 2
++static inline dma_addr_t hns_roce_get_mtr_ba(struct hns_roce_mtr *mtr)
++{
++ return mtr->hem_cfg.root_ba;
++}
++
+ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+- u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
++ u32 offset, u64 *mtt_buf, int mtt_max);
+ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr,
+ unsigned int page_shift, struct ib_udata *udata,
+@@ -1188,7 +1192,7 @@ struct hns_roce_buf *hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size,
+ int hns_roce_get_kmem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
+ int buf_cnt, struct hns_roce_buf *buf,
+ unsigned int page_shift);
+-int hns_roce_get_umem_bufs(struct hns_roce_dev *hr_dev, dma_addr_t *bufs,
++int hns_roce_get_umem_bufs(dma_addr_t *bufs,
+ int buf_cnt, struct ib_umem *umem,
+ unsigned int page_shift);
+
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
+index 0ab514c49d5e6e..51ab6041ca91bc 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
+@@ -986,6 +986,7 @@ struct hns_roce_hem_item {
+ size_t count; /* max ba numbers */
+ int start; /* start buf offset in this hem */
+ int end; /* end buf offset in this hem */
++ bool exist_bt;
+ };
+
+ /* All HEM items are linked in a tree structure */
+@@ -1014,6 +1015,7 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
+ }
+ }
+
++ hem->exist_bt = exist_bt;
+ hem->count = count;
+ hem->start = start;
+ hem->end = end;
+@@ -1024,34 +1026,32 @@ hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
+ }
+
+ static void hem_list_free_item(struct hns_roce_dev *hr_dev,
+- struct hns_roce_hem_item *hem, bool exist_bt)
++ struct hns_roce_hem_item *hem)
+ {
+- if (exist_bt)
++ if (hem->exist_bt)
+ dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
+ hem->addr, hem->dma_addr);
+ kfree(hem);
+ }
+
+ static void hem_list_free_all(struct hns_roce_dev *hr_dev,
+- struct list_head *head, bool exist_bt)
++ struct list_head *head)
+ {
+ struct hns_roce_hem_item *hem, *temp_hem;
+
+ list_for_each_entry_safe(hem, temp_hem, head, list) {
+ list_del(&hem->list);
+- hem_list_free_item(hr_dev, hem, exist_bt);
++ hem_list_free_item(hr_dev, hem);
+ }
+ }
+
+-static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr,
+- u64 table_addr)
++static void hem_list_link_bt(void *base_addr, u64 table_addr)
+ {
+ *(u64 *)(base_addr) = table_addr;
+ }
+
+ /* assign L0 table address to hem from root bt */
+-static void hem_list_assign_bt(struct hns_roce_dev *hr_dev,
+- struct hns_roce_hem_item *hem, void *cpu_addr,
++static void hem_list_assign_bt(struct hns_roce_hem_item *hem, void *cpu_addr,
+ u64 phy_addr)
+ {
+ hem->addr = cpu_addr;
+@@ -1141,6 +1141,10 @@ int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
+
+ for (i = 0; i < region_cnt; i++) {
+ r = (struct hns_roce_buf_region *)&regions[i];
++ /* when r->hopnum = 0, the region should not occupy root_ba. */
++ if (!r->hopnum)
++ continue;
++
+ if (r->hopnum > 1) {
+ step = hem_list_calc_ba_range(r->hopnum, 1, unit);
+ if (step > 0)
+@@ -1222,8 +1226,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
+ if (level > 1) {
+ pre = hem_ptrs[level - 1];
+ step = (cur->start - pre->start) / step * BA_BYTE_LEN;
+- hem_list_link_bt(hr_dev, pre->addr + step,
+- cur->dma_addr);
++ hem_list_link_bt(pre->addr + step, cur->dma_addr);
+ }
+ }
+
+@@ -1235,7 +1238,7 @@ static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
+
+ err_exit:
+ for (level = 1; level < hopnum; level++)
+- hem_list_free_all(hr_dev, &temp_list[level], true);
++ hem_list_free_all(hr_dev, &temp_list[level]);
+
+ return ret;
+ }
+@@ -1276,16 +1279,26 @@ static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
+ {
+ struct hns_roce_hem_item *hem;
+
++ /* This is on the has_mtt branch, if r->hopnum
++ * is 0, there is no root_ba to reuse for the
++ * region's fake hem, so a dma_alloc request is
++ * necessary here.
++ */
+ hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
+- r->count, false);
++ r->count, !r->hopnum);
+ if (!hem)
+ return -ENOMEM;
+
+- hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base);
++ /* The root_ba can be reused only when r->hopnum > 0. */
++ if (r->hopnum)
++ hem_list_assign_bt(hem, cpu_base, phy_base);
+ list_add(&hem->list, branch_head);
+ list_add(&hem->sibling, leaf_head);
+
+- return r->count;
++ /* If r->hopnum == 0, 0 is returned,
++ * so that the root_bt entry is not occupied.
++ */
++ return r->hopnum ? r->count : 0;
+ }
+
+ static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
+@@ -1304,7 +1317,7 @@ static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
+ /* if exist mid bt, link L1 to L0 */
+ list_for_each_entry_safe(hem, temp_hem, branch_head, list) {
+ offset = (hem->start - r->offset) / step * BA_BYTE_LEN;
+- hem_list_link_bt(hr_dev, cpu_base + offset, hem->dma_addr);
++ hem_list_link_bt(cpu_base + offset, hem->dma_addr);
+ total++;
+ }
+
+@@ -1329,7 +1342,7 @@ setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
+ return -ENOMEM;
+
+ total = 0;
+- for (i = 0; i < region_cnt && total < max_ba_num; i++) {
++ for (i = 0; i < region_cnt && total <= max_ba_num; i++) {
+ r = &regions[i];
+ if (!r->count)
+ continue;
+@@ -1395,9 +1408,9 @@ static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
+ region_cnt);
+ if (ret) {
+ for (i = 0; i < region_cnt; i++)
+- hem_list_free_all(hr_dev, &head.branch[i], false);
++ hem_list_free_all(hr_dev, &head.branch[i]);
+
+- hem_list_free_all(hr_dev, &head.root, true);
++ hem_list_free_all(hr_dev, &head.root);
+ }
+
+ return ret;
+@@ -1460,10 +1473,9 @@ void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
+
+ for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
+ for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
+- hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
+- j != 0);
++ hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j]);
+
+- hem_list_free_all(hr_dev, &hem_list->root_bt, true);
++ hem_list_free_all(hr_dev, &hem_list->root_bt);
+ INIT_LIST_HEAD(&hem_list->btm_bt);
+ hem_list->root_ba = 0;
+ }
+diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+index 2824d390ec3161..aded0a7f42838d 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
++++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+@@ -471,7 +471,7 @@ static inline int set_ud_wqe(struct hns_roce_qp *qp,
+ valid_num_sge = calc_wr_sge_num(wr, &msg_len);
+
+ ret = set_ud_opcode(ud_sq_wqe, wr);
+- if (WARN_ON(ret))
++ if (WARN_ON_ONCE(ret))
+ return ret;
+
+ ud_sq_wqe->msg_len = cpu_to_le32(msg_len);
+@@ -575,7 +575,7 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
+ rc_sq_wqe->msg_len = cpu_to_le32(msg_len);
+
+ ret = set_rc_opcode(hr_dev, rc_sq_wqe, wr);
+- if (WARN_ON(ret))
++ if (WARN_ON_ONCE(ret))
+ return ret;
+
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_SO,
+@@ -673,6 +673,10 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
+ #define HNS_ROCE_SL_SHIFT 2
+ struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
+
++ if (unlikely(qp->state == IB_QPS_ERR)) {
++ flush_cqe(hr_dev, qp);
++ return;
++ }
+ /* All kinds of DirectWQE have the same header field layout */
+ hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_FLAG);
+ hr_reg_write(rc_sq_wqe, RC_SEND_WQE_DB_SL_L, qp->sl);
+@@ -3181,21 +3185,22 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
+ u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ dma_addr_t pbl_ba;
+- int i, count;
++ int ret;
++ int i;
+
+- count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
+- min_t(int, ARRAY_SIZE(pages), mr->npages),
+- &pbl_ba);
+- if (count < 1) {
+- ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
+- count);
+- return -ENOBUFS;
++ ret = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
++ min_t(int, ARRAY_SIZE(pages), mr->npages));
++ if (ret) {
++ ibdev_err(ibdev, "failed to find PBL mtr, ret = %d.\n", ret);
++ return ret;
+ }
+
+ /* Aligned to the hardware address access unit */
+- for (i = 0; i < count; i++)
++ for (i = 0; i < ARRAY_SIZE(pages); i++)
+ pages[i] >>= 6;
+
++ pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
++
+ mpt_entry->pbl_size = cpu_to_le32(mr->npages);
+ mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
+ hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3));
+@@ -3291,21 +3296,14 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
+ return ret;
+ }
+
+-static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
+- void *mb_buf, struct hns_roce_mr *mr)
++static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
+ {
+- struct ib_device *ibdev = &hr_dev->ib_dev;
++ dma_addr_t pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
+ struct hns_roce_v2_mpt_entry *mpt_entry;
+- dma_addr_t pbl_ba = 0;
+
+ mpt_entry = mb_buf;
+ memset(mpt_entry, 0, sizeof(*mpt_entry));
+
+- if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
+- ibdev_err(ibdev, "failed to find frmr mtr.\n");
+- return -ENOBUFS;
+- }
+-
+ hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
+ hr_reg_write(mpt_entry, MPT_PD, mr->pd);
+
+@@ -4213,8 +4211,7 @@ static void set_access_flags(struct hns_roce_qp *hr_qp,
+ }
+
+ static void set_qpc_wqe_cnt(struct hns_roce_qp *hr_qp,
+- struct hns_roce_v2_qp_context *context,
+- struct hns_roce_v2_qp_context *qpc_mask)
++ struct hns_roce_v2_qp_context *context)
+ {
+ hr_reg_write(context, QPC_SGE_SHIFT,
+ to_hr_hem_entries_shift(hr_qp->sge.sge_cnt,
+@@ -4236,7 +4233,6 @@ static inline int get_pdn(struct ib_pd *ib_pd)
+ }
+
+ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
+- const struct ib_qp_attr *attr,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+ {
+@@ -4255,7 +4251,7 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
+
+ hr_reg_write(context, QPC_RQWS, ilog2(hr_qp->rq.max_gs));
+
+- set_qpc_wqe_cnt(hr_qp, context, qpc_mask);
++ set_qpc_wqe_cnt(hr_qp, context);
+
+ /* No VLAN need to set 0xFFF */
+ hr_reg_write(context, QPC_VLAN_ID, 0xfff);
+@@ -4296,7 +4292,6 @@ static void modify_qp_reset_to_init(struct ib_qp *ibqp,
+ }
+
+ static void modify_qp_init_to_init(struct ib_qp *ibqp,
+- const struct ib_qp_attr *attr,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+ {
+@@ -4333,17 +4328,20 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
+ {
+ u64 mtts[MTT_MIN_COUNT] = { 0 };
+ u64 wqe_sge_ba;
+- int count;
++ int ret;
+
+ /* Search qp buf's mtts */
+- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
+- MTT_MIN_COUNT, &wqe_sge_ba);
+- if (hr_qp->rq.wqe_cnt && count < 1) {
++ ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
++ MTT_MIN_COUNT);
++ if (hr_qp->rq.wqe_cnt && ret) {
+ ibdev_err(&hr_dev->ib_dev,
+- "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn);
+- return -EINVAL;
++ "failed to find QP(0x%lx) RQ WQE buf, ret = %d.\n",
++ hr_qp->qpn, ret);
++ return ret;
+ }
+
++ wqe_sge_ba = hns_roce_get_mtr_ba(&hr_qp->mtr);
++
+ context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
+ qpc_mask->wqe_sge_ba = 0;
+
+@@ -4407,23 +4405,23 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ u64 sge_cur_blk = 0;
+ u64 sq_cur_blk = 0;
+- int count;
++ int ret;
+
+ /* search qp buf's mtts */
+- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
+- if (count < 1) {
+- ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
+- hr_qp->qpn);
+- return -EINVAL;
++ ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->sq.offset,
++ &sq_cur_blk, 1);
++ if (ret) {
++ ibdev_err(ibdev, "failed to find QP(0x%lx) SQ WQE buf, ret = %d.\n",
++ hr_qp->qpn, ret);
++ return ret;
+ }
+ if (hr_qp->sge.sge_cnt > 0) {
+- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
+- hr_qp->sge.offset,
+- &sge_cur_blk, 1, NULL);
+- if (count < 1) {
+- ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
+- hr_qp->qpn);
+- return -EINVAL;
++ ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
++ hr_qp->sge.offset, &sge_cur_blk, 1);
++ if (ret) {
++ ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf, ret = %d.\n",
++ hr_qp->qpn, ret);
++ return ret;
+ }
+ }
+
+@@ -4614,8 +4612,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
+ return 0;
+ }
+
+-static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
+- const struct ib_qp_attr *attr, int attr_mask,
++static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, int attr_mask,
+ struct hns_roce_v2_qp_context *context,
+ struct hns_roce_v2_qp_context *qpc_mask)
+ {
+@@ -4984,15 +4981,14 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
+
+ if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
+ memset(qpc_mask, 0, hr_dev->caps.qpc_sz);
+- modify_qp_reset_to_init(ibqp, attr, context, qpc_mask);
++ modify_qp_reset_to_init(ibqp, context, qpc_mask);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
+- modify_qp_init_to_init(ibqp, attr, context, qpc_mask);
++ modify_qp_init_to_init(ibqp, context, qpc_mask);
+ } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
+ ret = modify_qp_init_to_rtr(ibqp, attr, attr_mask, context,
+ qpc_mask, udata);
+ } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
+- ret = modify_qp_rtr_to_rts(ibqp, attr, attr_mask, context,
+- qpc_mask);
++ ret = modify_qp_rtr_to_rts(ibqp, attr_mask, context, qpc_mask);
+ }
+
+ return ret;
+@@ -5550,18 +5546,20 @@ static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
+ struct ib_device *ibdev = srq->ibsrq.device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+ u64 mtts_idx[MTT_MIN_COUNT] = {};
+- dma_addr_t dma_handle_idx = 0;
++ dma_addr_t dma_handle_idx;
+ int ret;
+
+ /* Get physical address of idx que buf */
+ ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx,
+- ARRAY_SIZE(mtts_idx), &dma_handle_idx);
+- if (ret < 1) {
++ ARRAY_SIZE(mtts_idx));
++ if (ret) {
+ ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
+ ret);
+- return -ENOBUFS;
++ return ret;
+ }
+
++ dma_handle_idx = hns_roce_get_mtr_ba(&idx_que->mtr);
++
+ hr_reg_write(ctx, SRQC_IDX_HOP_NUM,
+ to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt));
+
+@@ -5593,20 +5591,22 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+ struct hns_roce_srq_context *ctx = mb_buf;
+ u64 mtts_wqe[MTT_MIN_COUNT] = {};
+- dma_addr_t dma_handle_wqe = 0;
++ dma_addr_t dma_handle_wqe;
+ int ret;
+
+ memset(ctx, 0, sizeof(*ctx));
+
+ /* Get the physical address of srq buf */
+ ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
+- ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
+- if (ret < 1) {
++ ARRAY_SIZE(mtts_wqe));
++ if (ret) {
+ ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
+ ret);
+- return -ENOBUFS;
++ return ret;
+ }
+
++ dma_handle_wqe = hns_roce_get_mtr_ba(&srq->buf_mtr);
++
+ hr_reg_write(ctx, SRQC_SRQ_ST, 1);
+ hr_reg_write_bool(ctx, SRQC_SRQ_TYPE,
+ srq->ibsrq.srq_type == IB_SRQT_XRC);
+@@ -6327,7 +6327,7 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
+ u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
+ struct hns_roce_eq_context *eqc;
+ u64 bt_ba = 0;
+- int count;
++ int ret;
+
+ eqc = mb_buf;
+ memset(eqc, 0, sizeof(struct hns_roce_eq_context));
+@@ -6335,13 +6335,15 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
+ init_eq_config(hr_dev, eq);
+
+ /* if not multi-hop, eqe buffer only use one trunk */
+- count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
+- &bt_ba);
+- if (count < 1) {
+- dev_err(hr_dev->dev, "failed to find EQE mtr\n");
+- return -ENOBUFS;
++ ret = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba,
++ ARRAY_SIZE(eqe_ba));
++ if (ret) {
++ dev_err(hr_dev->dev, "failed to find EQE mtr, ret = %d\n", ret);
++ return ret;
+ }
+
++ bt_ba = hns_roce_get_mtr_ba(&eq->mtr);
++
+ hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
+ hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
+ hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
+index 7f29a55d378f02..408ef2a9614927 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
++++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
+@@ -154,7 +154,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
+ if (mr->type != MR_TYPE_FRMR)
+ ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
+ else
+- ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
++ ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
+ if (ret) {
+ dev_err(dev, "failed to write mtpt, ret = %d.\n", ret);
+ goto err_page;
+@@ -714,7 +714,7 @@ static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ return -ENOMEM;
+
+ if (mtr->umem)
+- npage = hns_roce_get_umem_bufs(hr_dev, pages, page_count,
++ npage = hns_roce_get_umem_bufs(pages, page_count,
+ mtr->umem, page_shift);
+ else
+ npage = hns_roce_get_kmem_bufs(hr_dev, pages, page_count,
+@@ -767,11 +767,6 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ for (i = 0, mapped_cnt = 0; i < mtr->hem_cfg.region_count &&
+ mapped_cnt < page_cnt; i++) {
+ r = &mtr->hem_cfg.region[i];
+- /* if hopnum is 0, no need to map pages in this region */
+- if (!r->hopnum) {
+- mapped_cnt += r->count;
+- continue;
+- }
+
+ if (r->offset + r->count > page_cnt) {
+ ret = -EINVAL;
+@@ -802,47 +797,53 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ return ret;
+ }
+
+-int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+- u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
++static int hns_roce_get_direct_addr_mtt(struct hns_roce_hem_cfg *cfg,
++ u32 start_index, u64 *mtt_buf,
++ int mtt_cnt)
+ {
+- struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
+- int mtt_count, left;
+- u32 start_index;
++ int mtt_count;
+ int total = 0;
+- __le64 *mtts;
+ u32 npage;
+ u64 addr;
+
+- if (!mtt_buf || mtt_max < 1)
+- goto done;
+-
+- /* no mtt memory in direct mode, so just return the buffer address */
+- if (cfg->is_direct) {
+- start_index = offset >> HNS_HW_PAGE_SHIFT;
+- for (mtt_count = 0; mtt_count < cfg->region_count &&
+- total < mtt_max; mtt_count++) {
+- npage = cfg->region[mtt_count].offset;
+- if (npage < start_index)
+- continue;
++ if (mtt_cnt > cfg->region_count)
++ return -EINVAL;
+
+- addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
+- mtt_buf[total] = addr;
++ for (mtt_count = 0; mtt_count < cfg->region_count && total < mtt_cnt;
++ mtt_count++) {
++ npage = cfg->region[mtt_count].offset;
++ if (npage < start_index)
++ continue;
+
+- total++;
+- }
++ addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
++ mtt_buf[total] = addr;
+
+- goto done;
++ total++;
+ }
+
+- start_index = offset >> cfg->buf_pg_shift;
+- left = mtt_max;
++ if (!total)
++ return -ENOENT;
++
++ return 0;
++}
++
++static int hns_roce_get_mhop_mtt(struct hns_roce_dev *hr_dev,
++ struct hns_roce_mtr *mtr, u32 start_index,
++ u64 *mtt_buf, int mtt_cnt)
++{
++ int left = mtt_cnt;
++ int total = 0;
++ int mtt_count;
++ __le64 *mtts;
++ u32 npage;
++
+ while (left > 0) {
+ mtt_count = 0;
+ mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
+ start_index + total,
+ &mtt_count);
+ if (!mtts || !mtt_count)
+- goto done;
++ break;
+
+ npage = min(mtt_count, left);
+ left -= npage;
+@@ -850,11 +851,33 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
+ }
+
+-done:
+- if (base_addr)
+- *base_addr = cfg->root_ba;
++ if (!total)
++ return -ENOENT;
++
++ return 0;
++}
+
+- return total;
++int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
++ u32 offset, u64 *mtt_buf, int mtt_max)
++{
++ struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
++ u32 start_index;
++ int ret;
++
++ if (!mtt_buf || mtt_max < 1)
++ return -EINVAL;
++
++ /* no mtt memory in direct mode, so just return the buffer address */
++ if (cfg->is_direct) {
++ start_index = offset >> HNS_HW_PAGE_SHIFT;
++ ret = hns_roce_get_direct_addr_mtt(cfg, start_index,
++ mtt_buf, mtt_max);
++ } else {
++ start_index = offset >> cfg->buf_pg_shift;
++ ret = hns_roce_get_mhop_mtt(hr_dev, mtr, start_index,
++ mtt_buf, mtt_max);
++ }
++ return ret;
+ }
+
+ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
+diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
+index 88a4777d29f8b8..97d79c8d5cd069 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
++++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
+@@ -1075,7 +1075,6 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
+ }
+
+ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
+- struct ib_pd *ib_pd,
+ struct ib_qp_init_attr *init_attr,
+ struct ib_udata *udata,
+ struct hns_roce_qp *hr_qp)
+@@ -1229,7 +1228,6 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
+ struct ib_device *ibdev = qp->device;
+ struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+ struct hns_roce_qp *hr_qp = to_hr_qp(qp);
+- struct ib_pd *pd = qp->pd;
+ int ret;
+
+ ret = check_qp_type(hr_dev, init_attr->qp_type, !!udata);
+@@ -1244,7 +1242,7 @@ int hns_roce_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
+ hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
+ }
+
+- ret = hns_roce_create_qp_common(hr_dev, pd, init_attr, udata, hr_qp);
++ ret = hns_roce_create_qp_common(hr_dev, init_attr, udata, hr_qp);
+ if (ret)
+ ibdev_err(ibdev, "create QP type 0x%x failed(%d)\n",
+ init_attr->qp_type, ret);
+diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c
+index 652508b660a060..80fcb1b0e8fdcf 100644
+--- a/drivers/infiniband/hw/hns/hns_roce_srq.c
++++ b/drivers/infiniband/hw/hns/hns_roce_srq.c
+@@ -249,7 +249,7 @@ static void free_srq_wqe_buf(struct hns_roce_dev *hr_dev,
+ hns_roce_mtr_destroy(hr_dev, &srq->buf_mtr);
+ }
+
+-static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
++static int alloc_srq_wrid(struct hns_roce_srq *srq)
+ {
+ srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
+ if (!srq->wrid)
+@@ -365,7 +365,7 @@ static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
+ goto err_idx;
+
+ if (!udata) {
+- ret = alloc_srq_wrid(hr_dev, srq);
++ ret = alloc_srq_wrid(srq);
+ if (ret)
+ goto err_wqe_buf;
+ }
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index c510484e024b1a..ada7dbf8eb1cf5 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -3372,7 +3372,8 @@ static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev)
+ list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list,
+ list) {
+ if (dev->sys_image_guid == mpi->sys_image_guid &&
+- (mlx5_core_native_port_num(mpi->mdev) - 1) == i) {
++ (mlx5_core_native_port_num(mpi->mdev) - 1) == i &&
++ mlx5_core_same_coredev_type(dev->mdev, mpi->mdev)) {
+ bound = mlx5_ib_bind_slave_port(dev, mpi);
+ }
+
+@@ -4406,7 +4407,8 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
+
+ mutex_lock(&mlx5_ib_multiport_mutex);
+ list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) {
+- if (dev->sys_image_guid == mpi->sys_image_guid)
++ if (dev->sys_image_guid == mpi->sys_image_guid &&
++ mlx5_core_same_coredev_type(dev->mdev, mpi->mdev))
+ bound = mlx5_ib_bind_slave_port(dev, mpi);
+
+ if (bound) {
+diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+index 758a3d9c2844d1..84d1654148d764 100644
+--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
++++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+@@ -346,6 +346,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ struct rtrs_srv_mr *srv_mr;
+ bool need_inval = false;
+ enum ib_send_flags flags;
++ struct ib_sge list;
+ u32 imm;
+ int err;
+
+@@ -398,7 +399,6 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
+ imm = rtrs_to_io_rsp_imm(id->msg_id, errno, need_inval);
+ imm_wr.wr.next = NULL;
+ if (always_invalidate) {
+- struct ib_sge list;
+ struct rtrs_msg_rkey_rsp *msg;
+
+ srv_mr = &srv_path->mrs[id->msg_id];
+diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
+index 412196a7dad587..2c6c50348afd19 100644
+--- a/drivers/irqchip/irq-gic.c
++++ b/drivers/irqchip/irq-gic.c
+@@ -64,7 +64,7 @@ static void gic_check_cpu_features(void)
+
+ union gic_base {
+ void __iomem *common_base;
+- void __percpu * __iomem *percpu_base;
++ void __iomem * __percpu *percpu_base;
+ };
+
+ struct gic_chip_data {
+diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
+index a44d4b3e5beb22..82102a4c5d6883 100644
+--- a/drivers/mailbox/pcc.c
++++ b/drivers/mailbox/pcc.c
+@@ -91,6 +91,14 @@ struct pcc_chan_reg {
+ * @cmd_update: PCC register bundle for the command complete update register
+ * @error: PCC register bundle for the error status register
+ * @plat_irq: platform interrupt
++ * @type: PCC subspace type
++ * @plat_irq_flags: platform interrupt flags
++ * @chan_in_use: this flag is used just to check if the interrupt needs
++ * handling when it is shared. Since only one transfer can occur
++ * at a time and mailbox takes care of locking, this flag can be
++ * accessed without a lock. Note: the type only support the
++ * communication from OSPM to Platform, like type3, use it, and
++ * other types completely ignore it.
+ */
+ struct pcc_chan_info {
+ struct pcc_mbox_chan chan;
+@@ -100,12 +108,17 @@ struct pcc_chan_info {
+ struct pcc_chan_reg cmd_update;
+ struct pcc_chan_reg error;
+ int plat_irq;
++ u8 type;
++ unsigned int plat_irq_flags;
++ bool chan_in_use;
+ };
+
+ #define to_pcc_chan_info(c) container_of(c, struct pcc_chan_info, chan)
+ static struct pcc_chan_info *chan_info;
+ static int pcc_chan_count;
+
++static int pcc_send_data(struct mbox_chan *chan, void *data);
++
+ /*
+ * PCC can be used with perf critical drivers such as CPPC
+ * So it makes sense to locally cache the virtual address and
+@@ -221,6 +234,70 @@ static int pcc_map_interrupt(u32 interrupt, u32 flags)
+ return acpi_register_gsi(NULL, interrupt, trigger, polarity);
+ }
+
++static bool pcc_chan_plat_irq_can_be_shared(struct pcc_chan_info *pchan)
++{
++ return (pchan->plat_irq_flags & ACPI_PCCT_INTERRUPT_MODE) ==
++ ACPI_LEVEL_SENSITIVE;
++}
++
++static bool pcc_mbox_cmd_complete_check(struct pcc_chan_info *pchan)
++{
++ u64 val;
++ int ret;
++
++ ret = pcc_chan_reg_read(&pchan->cmd_complete, &val);
++ if (ret)
++ return false;
++
++ if (!pchan->cmd_complete.gas)
++ return true;
++
++ /*
++ * Judge if the channel respond the interrupt based on the value of
++ * command complete.
++ */
++ val &= pchan->cmd_complete.status_mask;
++
++ /*
++ * If this is PCC slave subspace channel, and the command complete
++ * bit 0 indicates that Platform is sending a notification and OSPM
++ * needs to respond this interrupt to process this command.
++ */
++ if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
++ return !val;
++
++ return !!val;
++}
++
++static void check_and_ack(struct pcc_chan_info *pchan, struct mbox_chan *chan)
++{
++ struct acpi_pcct_ext_pcc_shared_memory pcc_hdr;
++
++ if (pchan->type != ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
++ return;
++ /* If the memory region has not been mapped, we cannot
++ * determine if we need to send the message, but we still
++ * need to set the cmd_update flag before returning.
++ */
++ if (pchan->chan.shmem == NULL) {
++ pcc_chan_reg_read_modify_write(&pchan->cmd_update);
++ return;
++ }
++ memcpy_fromio(&pcc_hdr, pchan->chan.shmem,
++ sizeof(struct acpi_pcct_ext_pcc_shared_memory));
++ /*
++ * The PCC slave subspace channel needs to set the command complete bit
++ * after processing message. If the PCC_ACK_FLAG is set, it should also
++ * ring the doorbell.
++ *
++ * The PCC master subspace channel clears chan_in_use to free channel.
++ */
++ if (le32_to_cpup(&pcc_hdr.flags) & PCC_ACK_FLAG_MASK)
++ pcc_send_data(chan, NULL);
++ else
++ pcc_chan_reg_read_modify_write(&pchan->cmd_update);
++}
++
+ /**
+ * pcc_mbox_irq - PCC mailbox interrupt handler
+ * @irq: interrupt number
+@@ -236,16 +313,12 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
+ int ret;
+
+ pchan = chan->con_priv;
+-
+- ret = pcc_chan_reg_read(&pchan->cmd_complete, &val);
+- if (ret)
++ if (pchan->type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE &&
++ !pchan->chan_in_use)
+ return IRQ_NONE;
+
+- if (val) { /* Ensure GAS exists and value is non-zero */
+- val &= pchan->cmd_complete.status_mask;
+- if (!val)
+- return IRQ_NONE;
+- }
++ if (!pcc_mbox_cmd_complete_check(pchan))
++ return IRQ_NONE;
+
+ ret = pcc_chan_reg_read(&pchan->error, &val);
+ if (ret)
+@@ -262,6 +335,9 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
+
+ mbox_chan_received_data(chan, NULL);
+
++ check_and_ack(pchan, chan);
++ pchan->chan_in_use = false;
++
+ return IRQ_HANDLED;
+ }
+
+@@ -311,14 +387,37 @@ EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
+ void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
+ {
+ struct mbox_chan *chan = pchan->mchan;
++ struct pcc_chan_info *pchan_info;
++ struct pcc_mbox_chan *pcc_mbox_chan;
+
+ if (!chan || !chan->cl)
+ return;
++ pchan_info = chan->con_priv;
++ pcc_mbox_chan = &pchan_info->chan;
++ if (pcc_mbox_chan->shmem) {
++ iounmap(pcc_mbox_chan->shmem);
++ pcc_mbox_chan->shmem = NULL;
++ }
+
+ mbox_free_channel(chan);
+ }
+ EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
+
++int pcc_mbox_ioremap(struct mbox_chan *chan)
++{
++ struct pcc_chan_info *pchan_info;
++ struct pcc_mbox_chan *pcc_mbox_chan;
++
++ if (!chan || !chan->cl)
++ return -1;
++ pchan_info = chan->con_priv;
++ pcc_mbox_chan = &pchan_info->chan;
++ pcc_mbox_chan->shmem = ioremap(pcc_mbox_chan->shmem_base_addr,
++ pcc_mbox_chan->shmem_size);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(pcc_mbox_ioremap);
++
+ /**
+ * pcc_send_data - Called from Mailbox Controller code. Used
+ * here only to ring the channel doorbell. The PCC client
+@@ -340,7 +439,11 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
+ if (ret)
+ return ret;
+
+- return pcc_chan_reg_read_modify_write(&pchan->db);
++ ret = pcc_chan_reg_read_modify_write(&pchan->db);
++ if (!ret && pchan->plat_irq > 0)
++ pchan->chan_in_use = true;
++
++ return ret;
+ }
+
+ /**
+@@ -353,11 +456,14 @@ static int pcc_send_data(struct mbox_chan *chan, void *data)
+ static int pcc_startup(struct mbox_chan *chan)
+ {
+ struct pcc_chan_info *pchan = chan->con_priv;
++ unsigned long irqflags;
+ int rc;
+
+ if (pchan->plat_irq > 0) {
+- rc = devm_request_irq(chan->mbox->dev, pchan->plat_irq, pcc_mbox_irq, 0,
+- MBOX_IRQ_NAME, chan);
++ irqflags = pcc_chan_plat_irq_can_be_shared(pchan) ?
++ IRQF_SHARED | IRQF_ONESHOT : 0;
++ rc = devm_request_irq(chan->mbox->dev, pchan->plat_irq, pcc_mbox_irq,
++ irqflags, MBOX_IRQ_NAME, chan);
+ if (unlikely(rc)) {
+ dev_err(chan->mbox->dev, "failed to register PCC interrupt %d\n",
+ pchan->plat_irq);
+@@ -463,6 +569,7 @@ static int pcc_parse_subspace_irq(struct pcc_chan_info *pchan,
+ pcct_ss->platform_interrupt);
+ return -EINVAL;
+ }
++ pchan->plat_irq_flags = pcct_ss->flags;
+
+ if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
+ struct acpi_pcct_hw_reduced_type2 *pcct2_ss = (void *)pcct_ss;
+@@ -484,6 +591,12 @@ static int pcc_parse_subspace_irq(struct pcc_chan_info *pchan,
+ "PLAT IRQ ACK");
+ }
+
++ if (pcc_chan_plat_irq_can_be_shared(pchan) &&
++ !pchan->plat_irq_ack.gas) {
++ pr_err("PCC subspace has level IRQ with no ACK register\n");
++ return -EINVAL;
++ }
++
+ return ret;
+ }
+
+@@ -698,6 +811,7 @@ static int pcc_mbox_probe(struct platform_device *pdev)
+
+ pcc_parse_subspace_shmem(pchan, pcct_entry);
+
++ pchan->type = pcct_entry->type;
+ pcct_entry = (struct acpi_subtable_header *)
+ ((unsigned long) pcct_entry + pcct_entry->length);
+ }
+diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
+index 5a3e933df63352..1b05890f99f4f4 100644
+--- a/drivers/media/usb/uvc/uvc_driver.c
++++ b/drivers/media/usb/uvc/uvc_driver.c
+@@ -2519,6 +2519,28 @@ static const struct usb_device_id uvc_ids[] = {
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = UVC_PC_PROTOCOL_15,
+ .driver_info = (kernel_ulong_t)&uvc_ctrl_power_line_limited },
++ /* Quanta ACER HD User Facing */
++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
++ | USB_DEVICE_ID_MATCH_INT_INFO,
++ .idVendor = 0x0408,
++ .idProduct = 0x4033,
++ .bInterfaceClass = USB_CLASS_VIDEO,
++ .bInterfaceSubClass = 1,
++ .bInterfaceProtocol = UVC_PC_PROTOCOL_15,
++ .driver_info = (kernel_ulong_t)&(const struct uvc_device_info){
++ .uvc_version = 0x010a,
++ } },
++ /* Quanta ACER HD User Facing */
++ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
++ | USB_DEVICE_ID_MATCH_INT_INFO,
++ .idVendor = 0x0408,
++ .idProduct = 0x4035,
++ .bInterfaceClass = USB_CLASS_VIDEO,
++ .bInterfaceSubClass = 1,
++ .bInterfaceProtocol = UVC_PC_PROTOCOL_15,
++ .driver_info = (kernel_ulong_t)&(const struct uvc_device_info){
++ .uvc_version = 0x010a,
++ } },
+ /* LogiLink Wireless Webcam */
+ { .match_flags = USB_DEVICE_ID_MATCH_DEVICE
+ | USB_DEVICE_ID_MATCH_INT_INFO,
+diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
+index e113b99a3eab59..8716004fcf6c90 100644
+--- a/drivers/mmc/host/sdhci-msm.c
++++ b/drivers/mmc/host/sdhci-msm.c
+@@ -1867,20 +1867,20 @@ static int sdhci_msm_program_key(struct cqhci_host *cq_host,
+ struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host);
+ union cqhci_crypto_cap_entry cap;
+
++ if (!(cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE))
++ return qcom_ice_evict_key(msm_host->ice, slot);
++
+ /* Only AES-256-XTS has been tested so far. */
+ cap = cq_host->crypto_cap_array[cfg->crypto_cap_idx];
+ if (cap.algorithm_id != CQHCI_CRYPTO_ALG_AES_XTS ||
+ cap.key_size != CQHCI_CRYPTO_KEY_SIZE_256)
+ return -EINVAL;
+
+- if (cfg->config_enable & CQHCI_CRYPTO_CONFIGURATION_ENABLE)
+- return qcom_ice_program_key(msm_host->ice,
+- QCOM_ICE_CRYPTO_ALG_AES_XTS,
+- QCOM_ICE_CRYPTO_KEY_SIZE_256,
+- cfg->crypto_key,
+- cfg->data_unit_size, slot);
+- else
+- return qcom_ice_evict_key(msm_host->ice, slot);
++ return qcom_ice_program_key(msm_host->ice,
++ QCOM_ICE_CRYPTO_ALG_AES_XTS,
++ QCOM_ICE_CRYPTO_KEY_SIZE_256,
++ cfg->crypto_key,
++ cfg->data_unit_size, slot);
+ }
+
+ #else /* CONFIG_MMC_CRYPTO */
+diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
+index a7e8fcdf25768b..59134d117846d1 100644
+--- a/drivers/net/dsa/microchip/ksz9477.c
++++ b/drivers/net/dsa/microchip/ksz9477.c
+@@ -2,7 +2,7 @@
+ /*
+ * Microchip KSZ9477 switch driver main logic
+ *
+- * Copyright (C) 2017-2019 Microchip Technology Inc.
++ * Copyright (C) 2017-2024 Microchip Technology Inc.
+ */
+
+ #include <linux/kernel.h>
+@@ -916,26 +916,51 @@ void ksz9477_get_caps(struct ksz_device *dev, int port,
+ int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+ {
+ u32 secs = msecs / 1000;
+- u8 value;
+- u8 data;
++ u8 data, mult, value;
++ u32 max_val;
+ int ret;
+
+- value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
++#define MAX_TIMER_VAL ((1 << 8) - 1)
+
+- ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
+- if (ret < 0)
+- return ret;
++ /* The aging timer comprises a 3-bit multiplier and an 8-bit second
++ * value. Either of them cannot be zero. The maximum timer is then
++ * 7 * 255 = 1785 seconds.
++ */
++ if (!secs)
++ secs = 1;
+
+- data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
++ /* Return error if too large. */
++ else if (secs > 7 * MAX_TIMER_VAL)
++ return -EINVAL;
+
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
+ if (ret < 0)
+ return ret;
+
+- value &= ~SW_AGE_CNT_M;
+- value |= FIELD_PREP(SW_AGE_CNT_M, data);
++ /* Check whether there is need to update the multiplier. */
++ mult = FIELD_GET(SW_AGE_CNT_M, value);
++ max_val = MAX_TIMER_VAL;
++ if (mult > 0) {
++ /* Try to use the same multiplier already in the register as
++ * the hardware default uses multiplier 4 and 75 seconds for
++ * 300 seconds.
++ */
++ max_val = DIV_ROUND_UP(secs, mult);
++ if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
++ max_val = MAX_TIMER_VAL;
++ }
++
++ data = DIV_ROUND_UP(secs, max_val);
++ if (mult != data) {
++ value &= ~SW_AGE_CNT_M;
++ value |= FIELD_PREP(SW_AGE_CNT_M, data);
++ ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
++ if (ret < 0)
++ return ret;
++ }
+
+- return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
++ value = DIV_ROUND_UP(secs, data);
++ return ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
+ }
+
+ void ksz9477_port_queue_split(struct ksz_device *dev, int port)
+diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
+index a2ef4b18349c41..d0886ed984c578 100644
+--- a/drivers/net/dsa/microchip/ksz9477_reg.h
++++ b/drivers/net/dsa/microchip/ksz9477_reg.h
+@@ -2,7 +2,7 @@
+ /*
+ * Microchip KSZ9477 register definitions
+ *
+- * Copyright (C) 2017-2018 Microchip Technology Inc.
++ * Copyright (C) 2017-2024 Microchip Technology Inc.
+ */
+
+ #ifndef __KSZ9477_REGS_H
+@@ -190,8 +190,6 @@
+ #define SW_VLAN_ENABLE BIT(7)
+ #define SW_DROP_INVALID_VID BIT(6)
+ #define SW_AGE_CNT_M GENMASK(5, 3)
+-#define SW_AGE_CNT_S 3
+-#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8)
+ #define SW_RESV_MCAST_ENABLE BIT(2)
+ #define SW_HASH_OPTION_M 0x03
+ #define SW_HASH_OPTION_CRC 1
+diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
+index b479a628b1ae56..dde37e61faa359 100644
+--- a/drivers/net/dsa/microchip/lan937x_main.c
++++ b/drivers/net/dsa/microchip/lan937x_main.c
+@@ -1,6 +1,6 @@
+ // SPDX-License-Identifier: GPL-2.0
+ /* Microchip LAN937X switch driver main logic
+- * Copyright (C) 2019-2022 Microchip Technology Inc.
++ * Copyright (C) 2019-2024 Microchip Technology Inc.
+ */
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+@@ -257,10 +257,66 @@ int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
+
+ int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+ {
+- u32 secs = msecs / 1000;
+- u32 value;
++ u8 data, mult, value8;
++ bool in_msec = false;
++ u32 max_val, value;
++ u32 secs = msecs;
+ int ret;
+
++#define MAX_TIMER_VAL ((1 << 20) - 1)
++
++ /* The aging timer comprises a 3-bit multiplier and a 20-bit second
++ * value. Either of them cannot be zero. The maximum timer is then
++ * 7 * 1048575 = 7340025 seconds. As this value is too large for
++ * practical use it can be interpreted as microseconds, making the
++ * maximum timer 7340 seconds with finer control. This allows for
++ * maximum 122 minutes compared to 29 minutes in KSZ9477 switch.
++ */
++ if (msecs % 1000)
++ in_msec = true;
++ else
++ secs /= 1000;
++ if (!secs)
++ secs = 1;
++
++ /* Return error if too large. */
++ else if (secs > 7 * MAX_TIMER_VAL)
++ return -EINVAL;
++
++ /* Configure how to interpret the number value. */
++ ret = ksz_rmw8(dev, REG_SW_LUE_CTRL_2, SW_AGE_CNT_IN_MICROSEC,
++ in_msec ? SW_AGE_CNT_IN_MICROSEC : 0);
++ if (ret < 0)
++ return ret;
++
++ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value8);
++ if (ret < 0)
++ return ret;
++
++ /* Check whether there is need to update the multiplier. */
++ mult = FIELD_GET(SW_AGE_CNT_M, value8);
++ max_val = MAX_TIMER_VAL;
++ if (mult > 0) {
++ /* Try to use the same multiplier already in the register as
++ * the hardware default uses multiplier 4 and 75 seconds for
++ * 300 seconds.
++ */
++ max_val = DIV_ROUND_UP(secs, mult);
++ if (max_val > MAX_TIMER_VAL || max_val * mult != secs)
++ max_val = MAX_TIMER_VAL;
++ }
++
++ data = DIV_ROUND_UP(secs, max_val);
++ if (mult != data) {
++ value8 &= ~SW_AGE_CNT_M;
++ value8 |= FIELD_PREP(SW_AGE_CNT_M, data);
++ ret = ksz_write8(dev, REG_SW_LUE_CTRL_0, value8);
++ if (ret < 0)
++ return ret;
++ }
++
++ secs = DIV_ROUND_UP(secs, data);
++
+ value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+
+ ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
+diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
+index 45b606b6429f65..b3e536e7c68694 100644
+--- a/drivers/net/dsa/microchip/lan937x_reg.h
++++ b/drivers/net/dsa/microchip/lan937x_reg.h
+@@ -1,6 +1,6 @@
+ /* SPDX-License-Identifier: GPL-2.0 */
+ /* Microchip LAN937X switch register definitions
+- * Copyright (C) 2019-2021 Microchip Technology Inc.
++ * Copyright (C) 2019-2024 Microchip Technology Inc.
+ */
+ #ifndef __LAN937X_REG_H
+ #define __LAN937X_REG_H
+@@ -48,8 +48,7 @@
+
+ #define SW_VLAN_ENABLE BIT(7)
+ #define SW_DROP_INVALID_VID BIT(6)
+-#define SW_AGE_CNT_M 0x7
+-#define SW_AGE_CNT_S 3
++#define SW_AGE_CNT_M GENMASK(5, 3)
+ #define SW_RESV_MCAST_ENABLE BIT(2)
+
+ #define REG_SW_LUE_CTRL_1 0x0311
+@@ -62,6 +61,10 @@
+ #define SW_FAST_AGING BIT(1)
+ #define SW_LINK_AUTO_AGING BIT(0)
+
++#define REG_SW_LUE_CTRL_2 0x0312
++
++#define SW_AGE_CNT_IN_MICROSEC BIT(7)
++
+ #define REG_SW_AGE_PERIOD__1 0x0313
+ #define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
+
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index 49e890a7e04a37..23cc2d85994e42 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -1967,7 +1967,11 @@ static int bcm_sysport_open(struct net_device *dev)
+ unsigned int i;
+ int ret;
+
+- clk_prepare_enable(priv->clk);
++ ret = clk_prepare_enable(priv->clk);
++ if (ret) {
++ netdev_err(dev, "could not enable priv clock\n");
++ return ret;
++ }
+
+ /* Reset UniMAC */
+ umac_reset(priv);
+@@ -2625,7 +2629,11 @@ static int bcm_sysport_probe(struct platform_device *pdev)
+ goto err_deregister_notifier;
+ }
+
+- clk_prepare_enable(priv->clk);
++ ret = clk_prepare_enable(priv->clk);
++ if (ret) {
++ dev_err(&pdev->dev, "could not enable priv clock\n");
++ goto err_deregister_netdev;
++ }
+
+ priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
+ dev_info(&pdev->dev,
+@@ -2639,6 +2647,8 @@ static int bcm_sysport_probe(struct platform_device *pdev)
+
+ return 0;
+
++err_deregister_netdev:
++ unregister_netdev(dev);
+ err_deregister_notifier:
+ unregister_netdevice_notifier(&priv->netdev_notifier);
+ err_deregister_fixed_link:
+@@ -2810,7 +2820,12 @@ static int __maybe_unused bcm_sysport_resume(struct device *d)
+ if (!netif_running(dev))
+ return 0;
+
+- clk_prepare_enable(priv->clk);
++ ret = clk_prepare_enable(priv->clk);
++ if (ret) {
++ netdev_err(dev, "could not enable priv clock\n");
++ return ret;
++ }
++
+ if (priv->wolopts)
+ clk_disable_unprepare(priv->wol_clk);
+
+diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
+index 5703240474e5b2..d70305654e7d07 100644
+--- a/drivers/net/ethernet/google/gve/gve_main.c
++++ b/drivers/net/ethernet/google/gve/gve_main.c
+@@ -1528,8 +1528,8 @@ static int gve_xsk_pool_enable(struct net_device *dev,
+ if (err)
+ return err;
+
+- /* If XDP prog is not installed, return */
+- if (!priv->xdp_prog)
++ /* If XDP prog is not installed or interface is down, return. */
++ if (!priv->xdp_prog || !netif_running(dev))
+ return 0;
+
+ rx = &priv->rx[qid];
+@@ -1574,21 +1574,16 @@ static int gve_xsk_pool_disable(struct net_device *dev,
+ if (qid >= priv->rx_cfg.num_queues)
+ return -EINVAL;
+
+- /* If XDP prog is not installed, unmap DMA and return */
+- if (!priv->xdp_prog)
+- goto done;
+-
+- tx_qid = gve_xdp_tx_queue_id(priv, qid);
+- if (!netif_running(dev)) {
+- priv->rx[qid].xsk_pool = NULL;
+- xdp_rxq_info_unreg(&priv->rx[qid].xsk_rxq);
+- priv->tx[tx_qid].xsk_pool = NULL;
++ /* If XDP prog is not installed or interface is down, unmap DMA and
++ * return.
++ */
++ if (!priv->xdp_prog || !netif_running(dev))
+ goto done;
+- }
+
+ napi_rx = &priv->ntfy_blocks[priv->rx[qid].ntfy_id].napi;
+ napi_disable(napi_rx); /* make sure current rx poll is done */
+
++ tx_qid = gve_xdp_tx_queue_id(priv, qid);
+ napi_tx = &priv->ntfy_blocks[priv->tx[tx_qid].ntfy_id].napi;
+ napi_disable(napi_tx); /* make sure current tx poll is done */
+
+@@ -1616,6 +1611,9 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+ struct gve_priv *priv = netdev_priv(dev);
+ int tx_queue_id = gve_xdp_tx_queue_id(priv, queue_id);
+
++ if (!gve_get_napi_enabled(priv))
++ return -ENETDOWN;
++
+ if (queue_id >= priv->rx_cfg.num_queues || !priv->xdp_prog)
+ return -EINVAL;
+
+@@ -1757,6 +1755,9 @@ static void gve_turndown(struct gve_priv *priv)
+
+ gve_clear_napi_enabled(priv);
+ gve_clear_report_stats(priv);
++
++ /* Make sure that all traffic is finished processing. */
++ synchronize_net();
+ }
+
+ static void gve_turnup(struct gve_priv *priv)
+diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
+index 2ae891a62875c7..29987624791a68 100644
+--- a/drivers/net/ethernet/google/gve/gve_tx.c
++++ b/drivers/net/ethernet/google/gve/gve_tx.c
+@@ -777,9 +777,12 @@ int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ struct gve_tx_ring *tx;
+ int i, err = 0, qid;
+
+- if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
++ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK) || !priv->xdp_prog)
+ return -EINVAL;
+
++ if (!gve_get_napi_enabled(priv))
++ return -ENETDOWN;
++
+ qid = gve_xdp_tx_queue_id(priv,
+ smp_processor_id() % priv->num_xdp_queues);
+
+diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
+index 3b129a1c338152..07e5051171a48a 100644
+--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
++++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
+@@ -2708,9 +2708,15 @@ static struct platform_device *port_platdev[3];
+
+ static void mv643xx_eth_shared_of_remove(void)
+ {
++ struct mv643xx_eth_platform_data *pd;
+ int n;
+
+ for (n = 0; n < 3; n++) {
++ if (!port_platdev[n])
++ continue;
++ pd = dev_get_platdata(&port_platdev[n]->dev);
++ if (pd)
++ of_node_put(pd->phy_node);
+ platform_device_del(port_platdev[n]);
+ port_platdev[n] = NULL;
+ }
+@@ -2773,8 +2779,10 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
+ }
+
+ ppdev = platform_device_alloc(MV643XX_ETH_NAME, dev_num);
+- if (!ppdev)
+- return -ENOMEM;
++ if (!ppdev) {
++ ret = -ENOMEM;
++ goto put_err;
++ }
+ ppdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ ppdev->dev.of_node = pnp;
+
+@@ -2796,6 +2804,8 @@ static int mv643xx_eth_shared_of_add_port(struct platform_device *pdev,
+
+ port_err:
+ platform_device_put(ppdev);
++put_err:
++ of_node_put(ppd.phy_node);
+ return ret;
+ }
+
+diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
+index 07720841a8d700..dd3d93720358bd 100644
+--- a/drivers/net/ethernet/marvell/sky2.c
++++ b/drivers/net/ethernet/marvell/sky2.c
+@@ -129,6 +129,7 @@ static const struct pci_device_id sky2_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436C) }, /* 88E8072 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4373) }, /* 88E8075 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4382) }, /* 88E8079 */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+index cc9bcc42003242..6ab02f3fc29123 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+@@ -339,9 +339,13 @@ static int mlx5e_macsec_init_sa_fs(struct macsec_context *ctx,
+ {
+ struct mlx5e_priv *priv = macsec_netdev_priv(ctx->netdev);
+ struct mlx5_macsec_fs *macsec_fs = priv->mdev->macsec_fs;
++ const struct macsec_tx_sc *tx_sc = &ctx->secy->tx_sc;
+ struct mlx5_macsec_rule_attrs rule_attrs;
+ union mlx5_macsec_rule *macsec_rule;
+
++ if (is_tx && tx_sc->encoding_sa != sa->assoc_num)
++ return 0;
++
+ rule_attrs.macsec_obj_id = sa->macsec_obj_id;
+ rule_attrs.sci = sa->sci;
+ rule_attrs.assoc_num = sa->assoc_num;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+index 13b5916b64e224..eed8fcde261384 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
+@@ -150,11 +150,11 @@ void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
+ unsigned long i;
+ int err;
+
+- xa_for_each(&esw->offloads.vport_reps, i, rep) {
+- rpriv = rep->rep_data[REP_ETH].priv;
+- if (!rpriv || !rpriv->netdev)
++ mlx5_esw_for_each_rep(esw, i, rep) {
++ if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
+ continue;
+
++ rpriv = rep->rep_data[REP_ETH].priv;
+ rhashtable_walk_enter(&rpriv->tc_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while ((flow = rhashtable_walk_next(&iter)) != NULL) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+index 9b771b572593b5..3e58e731b5697c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+@@ -713,6 +713,9 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
+ MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
+ (last) - 1)
+
++#define mlx5_esw_for_each_rep(esw, i, rep) \
++ xa_for_each(&((esw)->offloads.vport_reps), i, rep)
++
+ struct mlx5_eswitch *__must_check
+ mlx5_devlink_eswitch_get(struct devlink *devlink);
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+index 58529d1a98b37b..7eba3a5bb97cae 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+@@ -52,9 +52,6 @@
+ #include "lag/lag.h"
+ #include "en/tc/post_meter.h"
+
+-#define mlx5_esw_for_each_rep(esw, i, rep) \
+- xa_for_each(&((esw)->offloads.vport_reps), i, rep)
+-
+ /* There are two match-all miss flows, one for unicast dst mac and
+ * one for multicast.
+ */
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 991250f44c2ed1..474e63d02ba492 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -3478,6 +3478,7 @@ void mlx5_fs_core_free(struct mlx5_core_dev *dev)
+ int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
+ {
+ struct mlx5_flow_steering *steering;
++ char name[80];
+ int err = 0;
+
+ err = mlx5_init_fc_stats(dev);
+@@ -3502,10 +3503,12 @@ int mlx5_fs_core_alloc(struct mlx5_core_dev *dev)
+ else
+ steering->mode = MLX5_FLOW_STEERING_MODE_DMFS;
+
+- steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
++ snprintf(name, sizeof(name), "%s-mlx5_fs_fgs", dev_name(dev->device));
++ steering->fgs_cache = kmem_cache_create(name,
+ sizeof(struct mlx5_flow_group), 0,
+ 0, NULL);
+- steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
++ snprintf(name, sizeof(name), "%s-mlx5_fs_ftes", dev_name(dev->device));
++ steering->ftes_cache = kmem_cache_create(name, sizeof(struct fs_fte), 0,
+ 0, NULL);
+ if (!steering->ftes_cache || !steering->fgs_cache) {
+ err = -ENOMEM;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+index 6fa06ba2d34653..f57c84e5128bc7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+@@ -1067,7 +1067,6 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
+ int inlen, err, eqn;
+ void *cqc, *in;
+ __be64 *pas;
+- int vector;
+ u32 i;
+
+ cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+@@ -1096,8 +1095,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
+ if (!in)
+ goto err_cqwq;
+
+- vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
+- err = mlx5_comp_eqn_get(mdev, vector, &eqn);
++ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
+ if (err) {
+ kvfree(in);
+ goto err_cqwq;
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+index dcd198104141f1..fa3fef2b74db0d 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+@@ -423,8 +423,7 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
+
+ parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
+ ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
+- 0, 0, dev_net(to_dev), parms.link, tun->fwmark, 0,
+- 0);
++ 0, 0, tun->net, parms.link, tun->fwmark, 0, 0);
+
+ rt = ip_route_output_key(tun->net, &fl4);
+ if (IS_ERR(rt))
+diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
+index 54aa56c841334c..2f483531d95cb7 100644
+--- a/drivers/net/ethernet/renesas/rswitch.c
++++ b/drivers/net/ethernet/renesas/rswitch.c
+@@ -1632,8 +1632,9 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
+ if (dma_mapping_error(ndev->dev.parent, dma_addr_orig))
+ goto err_kfree;
+
+- gq->skbs[gq->cur] = skb;
+- gq->unmap_addrs[gq->cur] = dma_addr_orig;
++ /* Stored the skb at the last descriptor to avoid skb free before hardware completes send */
++ gq->skbs[(gq->cur + nr_desc - 1) % gq->ring_size] = skb;
++ gq->unmap_addrs[(gq->cur + nr_desc - 1) % gq->ring_size] = dma_addr_orig;
+
+ dma_wmb();
+
+diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c
+index 44bb57670340da..109d2aa34ae332 100644
+--- a/drivers/net/ethernet/sfc/tc_conntrack.c
++++ b/drivers/net/ethernet/sfc/tc_conntrack.c
+@@ -16,7 +16,7 @@ static int efx_tc_flow_block(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+
+ static const struct rhashtable_params efx_tc_ct_zone_ht_params = {
+- .key_len = offsetof(struct efx_tc_ct_zone, linkage),
++ .key_len = sizeof_field(struct efx_tc_ct_zone, zone),
+ .key_offset = 0,
+ .head_offset = offsetof(struct efx_tc_ct_zone, linkage),
+ };
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+index b4fdd40be63cb4..4d570efd9d4bbe 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+@@ -296,62 +296,80 @@ static int stmmac_mtl_setup(struct platform_device *pdev,
+ }
+
+ /**
+- * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
+- * @plat: driver data platform structure
+- * @np: device tree node
+- * @dev: device pointer
+- * Description:
+- * The mdio bus will be allocated in case of a phy transceiver is on board;
+- * it will be NULL if the fixed-link is configured.
+- * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
+- * in any case (for DSA, mdio must be registered even if fixed-link).
+- * The table below sums the supported configurations:
+- * -------------------------------
+- * snps,phy-addr | Y
+- * -------------------------------
+- * phy-handle | Y
+- * -------------------------------
+- * fixed-link | N
+- * -------------------------------
+- * snps,dwmac-mdio |
+- * even if | Y
+- * fixed-link |
+- * -------------------------------
++ * stmmac_of_get_mdio() - Gets the MDIO bus from the devicetree.
++ * @np: devicetree node
++ *
++ * The MDIO bus will be searched for in the following ways:
++ * 1. The compatible is "snps,dwc-qos-ethernet-4.10" && a "mdio" named
++ * child node exists
++ * 2. A child node with the "snps,dwmac-mdio" compatible is present
+ *
+- * It returns 0 in case of success otherwise -ENODEV.
++ * Return: The MDIO node if present otherwise NULL
+ */
+-static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
+- struct device_node *np, struct device *dev)
++static struct device_node *stmmac_of_get_mdio(struct device_node *np)
+ {
+- bool mdio = !of_phy_is_fixed_link(np);
+ static const struct of_device_id need_mdio_ids[] = {
+ { .compatible = "snps,dwc-qos-ethernet-4.10" },
+ {},
+ };
++ struct device_node *mdio_node = NULL;
+
+ if (of_match_node(need_mdio_ids, np)) {
+- plat->mdio_node = of_get_child_by_name(np, "mdio");
++ mdio_node = of_get_child_by_name(np, "mdio");
+ } else {
+ /**
+ * If snps,dwmac-mdio is passed from DT, always register
+ * the MDIO
+ */
+- for_each_child_of_node(np, plat->mdio_node) {
+- if (of_device_is_compatible(plat->mdio_node,
++ for_each_child_of_node(np, mdio_node) {
++ if (of_device_is_compatible(mdio_node,
+ "snps,dwmac-mdio"))
+ break;
+ }
+ }
+
+- if (plat->mdio_node) {
++ return mdio_node;
++}
++
++/**
++ * stmmac_mdio_setup() - Populate platform related MDIO structures.
++ * @plat: driver data platform structure
++ * @np: devicetree node
++ * @dev: device pointer
++ *
++ * This searches for MDIO information from the devicetree.
++ * If an MDIO node is found, it's assigned to plat->mdio_node and
++ * plat->mdio_bus_data is allocated.
++ * If no connection can be determined, just plat->mdio_bus_data is allocated
++ * to indicate a bus should be created and scanned for a phy.
++ * If it's determined there's no MDIO bus needed, both are left NULL.
++ *
++ * This expects that plat->phy_node has already been searched for.
++ *
++ * Return: 0 on success, errno otherwise.
++ */
++static int stmmac_mdio_setup(struct plat_stmmacenet_data *plat,
++ struct device_node *np, struct device *dev)
++{
++ bool legacy_mdio;
++
++ plat->mdio_node = stmmac_of_get_mdio(np);
++ if (plat->mdio_node)
+ dev_dbg(dev, "Found MDIO subnode\n");
+- mdio = true;
+- }
+
+- if (mdio) {
+- plat->mdio_bus_data =
+- devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
+- GFP_KERNEL);
++ /* Legacy devicetrees allowed for no MDIO bus description and expect
++ * the bus to be scanned for devices. If there's no phy or fixed-link
++ * described assume this is the case since there must be something
++ * connected to the MAC.
++ */
++ legacy_mdio = !of_phy_is_fixed_link(np) && !plat->phy_node;
++ if (legacy_mdio)
++ dev_info(dev, "Deprecated MDIO bus assumption used\n");
++
++ if (plat->mdio_node || legacy_mdio) {
++ plat->mdio_bus_data = devm_kzalloc(dev,
++ sizeof(*plat->mdio_bus_data),
++ GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+
+@@ -455,10 +473,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+ if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
+ dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
+
+- /* To Configure PHY by using all device-tree supported properties */
+- rc = stmmac_dt_phy(plat, np, &pdev->dev);
+- if (rc)
+- return ERR_PTR(rc);
++ rc = stmmac_mdio_setup(plat, np, &pdev->dev);
++ if (rc) {
++ ret = ERR_PTR(rc);
++ goto error_put_phy;
++ }
+
+ of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
+
+@@ -547,8 +566,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+ dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
+ GFP_KERNEL);
+ if (!dma_cfg) {
+- stmmac_remove_config_dt(pdev, plat);
+- return ERR_PTR(-ENOMEM);
++ ret = ERR_PTR(-ENOMEM);
++ goto error_put_mdio;
+ }
+ plat->dma_cfg = dma_cfg;
+
+@@ -576,8 +595,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+
+ rc = stmmac_mtl_setup(pdev, plat);
+ if (rc) {
+- stmmac_remove_config_dt(pdev, plat);
+- return ERR_PTR(rc);
++ ret = ERR_PTR(rc);
++ goto error_put_mdio;
+ }
+
+ /* clock setup */
+@@ -629,6 +648,10 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+ clk_disable_unprepare(plat->pclk);
+ error_pclk_get:
+ clk_disable_unprepare(plat->stmmac_clk);
++error_put_mdio:
++ of_node_put(plat->mdio_node);
++error_put_phy:
++ of_node_put(plat->phy_node);
+
+ return ret;
+ }
+@@ -637,16 +660,17 @@ static void devm_stmmac_remove_config_dt(void *data)
+ {
+ struct plat_stmmacenet_data *plat = data;
+
+- /* Platform data argument is unused */
+- stmmac_remove_config_dt(NULL, plat);
++ clk_disable_unprepare(plat->stmmac_clk);
++ clk_disable_unprepare(plat->pclk);
++ of_node_put(plat->mdio_node);
++ of_node_put(plat->phy_node);
+ }
+
+ /**
+ * devm_stmmac_probe_config_dt
+ * @pdev: platform_device structure
+ * @mac: MAC address to use
+- * Description: Devres variant of stmmac_probe_config_dt(). Does not require
+- * the user to call stmmac_remove_config_dt() at driver detach.
++ * Description: Devres variant of stmmac_probe_config_dt().
+ */
+ struct plat_stmmacenet_data *
+ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
+index 3025e9c189702b..f06cdec14ed7a1 100644
+--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
++++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
+@@ -290,6 +290,9 @@ static void icss_iep_enable_shadow_mode(struct icss_iep *iep)
+ for (cmp = IEP_MIN_CMP; cmp < IEP_MAX_CMP; cmp++) {
+ regmap_update_bits(iep->map, ICSS_IEP_CMP_STAT_REG,
+ IEP_CMP_STATUS(cmp), IEP_CMP_STATUS(cmp));
++
++ regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG,
++ IEP_CMP_CFG_CMP_EN(cmp), 0);
+ }
+
+ /* enable reset counter on CMP0 event */
+@@ -808,6 +811,11 @@ int icss_iep_exit(struct icss_iep *iep)
+ }
+ icss_iep_disable(iep);
+
++ if (iep->pps_enabled)
++ icss_iep_pps_enable(iep, false);
++ else if (iep->perout_enabled)
++ icss_iep_perout_enable(iep, NULL, false);
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(icss_iep_exit);
+diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
+index 89775b6d0699a0..8e30df676ededb 100644
+--- a/drivers/net/usb/qmi_wwan.c
++++ b/drivers/net/usb/qmi_wwan.c
+@@ -1373,6 +1373,9 @@ static const struct usb_device_id products[] = {
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a0, 0)}, /* Telit FN920C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a4, 0)}, /* Telit FN920C04 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10a9, 0)}, /* Telit FN920C04 */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c0, 0)}, /* Telit FE910C04 */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c4, 0)}, /* Telit FE910C04 */
++ {QMI_QUIRK_SET_DTR(0x1bc7, 0x10c8, 0)}, /* Telit FE910C04 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
+ {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
+ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
+diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
+index af6546572df26b..9a4f8e815412cb 100644
+--- a/drivers/net/wireless/ath/ath10k/bmi.c
++++ b/drivers/net/wireless/ath/ath10k/bmi.c
+@@ -2,6 +2,7 @@
+ /*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2014,2016-2017 Qualcomm Atheros, Inc.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "bmi.h"
+diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
+index c27b8204718a6d..afae4a8027f833 100644
+--- a/drivers/net/wireless/ath/ath10k/ce.c
++++ b/drivers/net/wireless/ath/ath10k/ce.c
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "hif.h"
+diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
+index 81058be3598f15..c3a8b3496be2a5 100644
+--- a/drivers/net/wireless/ath/ath10k/core.c
++++ b/drivers/net/wireless/ath/ath10k/core.c
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/module.h>
+diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
+index 4b5239de401840..cb2359d2ee0b04 100644
+--- a/drivers/net/wireless/ath/ath10k/core.h
++++ b/drivers/net/wireless/ath/ath10k/core.h
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef _CORE_H_
+diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
+index 2d1634a890dde3..bb3a276b7ed584 100644
+--- a/drivers/net/wireless/ath/ath10k/coredump.c
++++ b/drivers/net/wireless/ath/ath10k/coredump.c
+@@ -2,6 +2,7 @@
+ /*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "coredump.h"
+diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
+index 437b9759f05d3d..e5ef0352e319c7 100644
+--- a/drivers/net/wireless/ath/ath10k/coredump.h
++++ b/drivers/net/wireless/ath/ath10k/coredump.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: ISC */
+ /*
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef _COREDUMP_H_
+diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
+index fe89bc61e5317d..92ad0a04bcc738 100644
+--- a/drivers/net/wireless/ath/ath10k/debug.c
++++ b/drivers/net/wireless/ath/ath10k/debug.c
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/module.h>
+diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+index 5598cf706daabc..0f6de862c3a9ba 100644
+--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
++++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+@@ -2,6 +2,7 @@
+ /*
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "core.h"
+diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
+index 5bfeecb95fca23..a6e21ce90bad64 100644
+--- a/drivers/net/wireless/ath/ath10k/htc.c
++++ b/drivers/net/wireless/ath/ath10k/htc.c
+@@ -2,6 +2,7 @@
+ /*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "core.h"
+diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
+index 7b24297146e72a..52f6dc6b81c5ee 100644
+--- a/drivers/net/wireless/ath/ath10k/htt.h
++++ b/drivers/net/wireless/ath/ath10k/htt.h
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef _HTT_H_
+diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
+index 438b0caaceb79e..51855f23ea2664 100644
+--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "core.h"
+diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
+index bd603feb795314..60425d22d70790 100644
+--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
++++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
+@@ -2,6 +2,7 @@
+ /*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/etherdevice.h>
+diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
+index 6d32b43a4da65e..8fafe096adff53 100644
+--- a/drivers/net/wireless/ath/ath10k/hw.c
++++ b/drivers/net/wireless/ath/ath10k/hw.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: ISC
+ /*
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/types.h>
+diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
+index 7ecdd0011cfa48..afd336282615c6 100644
+--- a/drivers/net/wireless/ath/ath10k/hw.h
++++ b/drivers/net/wireless/ath/ath10k/hw.h
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef _HW_H_
+diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
+index d5e6e11f630b95..655fb5cdf01f86 100644
+--- a/drivers/net/wireless/ath/ath10k/mac.c
++++ b/drivers/net/wireless/ath/ath10k/mac.c
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "mac.h"
+diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
+index 23f36622193908..aaa240f3c08a9f 100644
+--- a/drivers/net/wireless/ath/ath10k/pci.c
++++ b/drivers/net/wireless/ath/ath10k/pci.c
+@@ -2,6 +2,7 @@
+ /*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/pci.h>
+diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
+index 480cd97ab739de..27bb4cf2dfea93 100644
+--- a/drivers/net/wireless/ath/ath10k/pci.h
++++ b/drivers/net/wireless/ath/ath10k/pci.h
+@@ -2,6 +2,7 @@
+ /*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef _PCI_H_
+diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
+index 52c1a3de8da60a..38e939f572a9ed 100644
+--- a/drivers/net/wireless/ath/ath10k/qmi.c
++++ b/drivers/net/wireless/ath/ath10k/qmi.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: ISC
+ /*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/completion.h>
+diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
+index 1c81e454f943fd..0e85c75d227836 100644
+--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
++++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: ISC
+ /*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/soc/qcom/qmi.h>
+diff --git a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
+index f0db991408dc26..9f311f3bc9e7f9 100644
+--- a/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
++++ b/drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.h
+@@ -1,6 +1,7 @@
+ /* SPDX-License-Identifier: ISC */
+ /*
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef WCN3990_QMI_SVC_V01_H
+diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
+index 777e53aa69dc86..564293df1e9acf 100644
+--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
++++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
+@@ -2,6 +2,7 @@
+ /*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef _RX_DESC_H_
+diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
+index 56fbcfb80bf886..850d999615a2c3 100644
+--- a/drivers/net/wireless/ath/ath10k/sdio.c
++++ b/drivers/net/wireless/ath/ath10k/sdio.c
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012,2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
++ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/module.h>
+@@ -2647,9 +2648,9 @@ static void ath10k_sdio_remove(struct sdio_func *func)
+
+ netif_napi_del(&ar->napi);
+
+- ath10k_core_destroy(ar);
+-
+ destroy_workqueue(ar_sdio->workqueue);
++
++ ath10k_core_destroy(ar);
+ }
+
+ static const struct sdio_device_id ath10k_sdio_devices[] = {
+diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
+index cefd97323dfe50..31c8d7fbb0955b 100644
+--- a/drivers/net/wireless/ath/ath10k/thermal.c
++++ b/drivers/net/wireless/ath/ath10k/thermal.c
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: ISC
+ /*
+ * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/device.h>
+diff --git a/drivers/net/wireless/ath/ath10k/usb.h b/drivers/net/wireless/ath/ath10k/usb.h
+index 48e066ba816249..7e4cfbb673c9a8 100644
+--- a/drivers/net/wireless/ath/ath10k/usb.h
++++ b/drivers/net/wireless/ath/ath10k/usb.h
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2004-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
+ * Copyright (c) 2016-2017 Erik Stromdahl <erik.stromdahl@gmail.com>
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef _USB_H_
+diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+index dbb48d70f2e93e..83a8f07a687f73 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h
++++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+ #ifndef _WMI_TLV_H
+ #define _WMI_TLV_H
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
+index 1c21dbde77b84e..818aea99f85eb3 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.c
++++ b/drivers/net/wireless/ath/ath10k/wmi.c
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include <linux/skbuff.h>
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
+index b112e88260931c..9146df98fceeee 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.h
++++ b/drivers/net/wireless/ath/ath10k/wmi.h
+@@ -3,6 +3,7 @@
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #ifndef _WMI_H_
+diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c
+index 20b9aa8ddf7d52..aa7b2e703f3d4b 100644
+--- a/drivers/net/wireless/ath/ath10k/wow.c
++++ b/drivers/net/wireless/ath/ath10k/wow.c
+@@ -2,6 +2,7 @@
+ /*
+ * Copyright (c) 2015-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
++ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+ #include "mac.h"
+diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
+index f90191a290c26a..713899735ccc5e 100644
+--- a/drivers/net/wireless/ath/ath12k/mac.c
++++ b/drivers/net/wireless/ath/ath12k/mac.c
+@@ -4945,7 +4945,7 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to queue management frame %d\n",
+ ret);
+- ieee80211_free_txskb(ar->hw, skb);
++ ieee80211_free_txskb(hw, skb);
+ }
+ return;
+ }
+@@ -4953,7 +4953,7 @@ static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
+ ret = ath12k_dp_tx(ar, arvif, skb);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
+- ieee80211_free_txskb(ar->hw, skb);
++ ieee80211_free_txskb(hw, skb);
+ }
+ }
+
+@@ -5496,7 +5496,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
+ goto err_peer_del;
+
+ param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
+- param_value = ar->hw->wiphy->rts_threshold;
++ param_value = hw->wiphy->rts_threshold;
+ ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+@@ -6676,9 +6676,9 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ arvif->vdev_id, ret);
+ return ret;
+ }
+- ieee80211_iterate_stations_atomic(ar->hw,
+- ath12k_mac_disable_peer_fixed_rate,
+- arvif);
++ ieee80211_iterate_stations_mtx(hw,
++ ath12k_mac_disable_peer_fixed_rate,
++ arvif);
+ } else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+ &single_nss)) {
+ rate = WMI_FIXED_RATE_NONE;
+@@ -6722,16 +6722,16 @@ ath12k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
+ return -EINVAL;
+ }
+
+- ieee80211_iterate_stations_atomic(ar->hw,
+- ath12k_mac_disable_peer_fixed_rate,
+- arvif);
++ ieee80211_iterate_stations_mtx(hw,
++ ath12k_mac_disable_peer_fixed_rate,
++ arvif);
+
+ mutex_lock(&ar->conf_mutex);
+
+ arvif->bitrate_mask = *mask;
+- ieee80211_iterate_stations_atomic(ar->hw,
+- ath12k_mac_set_bitrate_mask_iter,
+- arvif);
++ ieee80211_iterate_stations_mtx(hw,
++ ath12k_mac_set_bitrate_mask_iter,
++ arvif);
+
+ mutex_unlock(&ar->conf_mutex);
+ }
+@@ -6767,7 +6767,7 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
+ ath12k_warn(ar->ab, "pdev %d successfully recovered\n",
+ ar->pdev->pdev_id);
+ ar->state = ATH12K_STATE_ON;
+- ieee80211_wake_queues(ar->hw);
++ ieee80211_wake_queues(hw);
+
+ if (ab->is_reset) {
+ recovery_count = atomic_inc_return(&ab->recovery_count);
+diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
+index 32bdefeccc2453..837a3e1ec3a49c 100644
+--- a/drivers/net/wireless/ath/ath12k/reg.c
++++ b/drivers/net/wireless/ath/ath12k/reg.c
+@@ -28,11 +28,11 @@ static const struct ieee80211_regdomain ath12k_world_regd = {
+ }
+ };
+
+-static bool ath12k_regdom_changes(struct ath12k *ar, char *alpha2)
++static bool ath12k_regdom_changes(struct ieee80211_hw *hw, char *alpha2)
+ {
+ const struct ieee80211_regdomain *regd;
+
+- regd = rcu_dereference_rtnl(ar->hw->wiphy->regd);
++ regd = rcu_dereference_rtnl(hw->wiphy->regd);
+ /* This can happen during wiphy registration where the previous
+ * user request is received before we update the regd received
+ * from firmware.
+@@ -71,7 +71,7 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+ return;
+ }
+
+- if (!ath12k_regdom_changes(ar, request->alpha2)) {
++ if (!ath12k_regdom_changes(hw, request->alpha2)) {
+ ath12k_dbg(ar->ab, ATH12K_DBG_REG, "Country is already set\n");
+ return;
+ }
+diff --git a/drivers/net/wireless/realtek/rtw88/sdio.c b/drivers/net/wireless/realtek/rtw88/sdio.c
+index 0cae5746f540fa..5bd1ee81210d1d 100644
+--- a/drivers/net/wireless/realtek/rtw88/sdio.c
++++ b/drivers/net/wireless/realtek/rtw88/sdio.c
+@@ -1295,12 +1295,12 @@ static void rtw_sdio_deinit_tx(struct rtw_dev *rtwdev)
+ struct rtw_sdio *rtwsdio = (struct rtw_sdio *)rtwdev->priv;
+ int i;
+
+- for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++)
+- skb_queue_purge(&rtwsdio->tx_queue[i]);
+-
+ flush_workqueue(rtwsdio->txwq);
+ destroy_workqueue(rtwsdio->txwq);
+ kfree(rtwsdio->tx_handler_data);
++
++ for (i = 0; i < RTK_MAX_TX_QUEUE_NUM; i++)
++ ieee80211_purge_tx_queue(rtwdev->hw, &rtwsdio->tx_queue[i]);
+ }
+
+ int rtw_sdio_probe(struct sdio_func *sdio_func,
+diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
+index 04a64afcbf8a2d..8f1d653282b7ec 100644
+--- a/drivers/net/wireless/realtek/rtw88/usb.c
++++ b/drivers/net/wireless/realtek/rtw88/usb.c
+@@ -416,10 +416,11 @@ static void rtw_usb_tx_handler(struct work_struct *work)
+
+ static void rtw_usb_tx_queue_purge(struct rtw_usb *rtwusb)
+ {
++ struct rtw_dev *rtwdev = rtwusb->rtwdev;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rtwusb->tx_queue); i++)
+- skb_queue_purge(&rtwusb->tx_queue[i]);
++ ieee80211_purge_tx_queue(rtwdev->hw, &rtwusb->tx_queue[i]);
+ }
+
+ static void rtw_usb_write_port_complete(struct urb *urb)
+@@ -801,9 +802,9 @@ static void rtw_usb_deinit_tx(struct rtw_dev *rtwdev)
+ {
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+
+- rtw_usb_tx_queue_purge(rtwusb);
+ flush_workqueue(rtwusb->txwq);
+ destroy_workqueue(rtwusb->txwq);
++ rtw_usb_tx_queue_purge(rtwusb);
+ }
+
+ static int rtw_usb_intf_init(struct rtw_dev *rtwdev,
+diff --git a/drivers/net/wwan/iosm/iosm_ipc_mmio.c b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
+index 63eb08c43c0517..6764c13530b9bd 100644
+--- a/drivers/net/wwan/iosm/iosm_ipc_mmio.c
++++ b/drivers/net/wwan/iosm/iosm_ipc_mmio.c
+@@ -104,7 +104,7 @@ struct iosm_mmio *ipc_mmio_init(void __iomem *mmio, struct device *dev)
+ break;
+
+ msleep(20);
+- } while (retries-- > 0);
++ } while (--retries > 0);
+
+ if (!retries) {
+ dev_err(ipc_mmio->dev, "invalid exec stage %X", stage);
+diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.c b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+index 80edb8e75a6ad7..64868df3640d13 100644
+--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.c
++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.c
+@@ -97,14 +97,21 @@ void t7xx_fsm_broadcast_state(struct t7xx_fsm_ctl *ctl, enum md_state state)
+ fsm_state_notify(ctl->md, state);
+ }
+
++static void fsm_release_command(struct kref *ref)
++{
++ struct t7xx_fsm_command *cmd = container_of(ref, typeof(*cmd), refcnt);
++
++ kfree(cmd);
++}
++
+ static void fsm_finish_command(struct t7xx_fsm_ctl *ctl, struct t7xx_fsm_command *cmd, int result)
+ {
+ if (cmd->flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
+- *cmd->ret = result;
+- complete_all(cmd->done);
++ cmd->result = result;
++ complete_all(&cmd->done);
+ }
+
+- kfree(cmd);
++ kref_put(&cmd->refcnt, fsm_release_command);
+ }
+
+ static void fsm_del_kf_event(struct t7xx_fsm_event *event)
+@@ -396,7 +403,6 @@ static int fsm_main_thread(void *data)
+
+ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id, unsigned int flag)
+ {
+- DECLARE_COMPLETION_ONSTACK(done);
+ struct t7xx_fsm_command *cmd;
+ unsigned long flags;
+ int ret;
+@@ -408,11 +414,13 @@ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id
+ INIT_LIST_HEAD(&cmd->entry);
+ cmd->cmd_id = cmd_id;
+ cmd->flag = flag;
++ kref_init(&cmd->refcnt);
+ if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
+- cmd->done = &done;
+- cmd->ret = &ret;
++ init_completion(&cmd->done);
++ kref_get(&cmd->refcnt);
+ }
+
++ kref_get(&cmd->refcnt);
+ spin_lock_irqsave(&ctl->command_lock, flags);
+ list_add_tail(&cmd->entry, &ctl->command_queue);
+ spin_unlock_irqrestore(&ctl->command_lock, flags);
+@@ -422,11 +430,11 @@ int t7xx_fsm_append_cmd(struct t7xx_fsm_ctl *ctl, enum t7xx_fsm_cmd_state cmd_id
+ if (flag & FSM_CMD_FLAG_WAIT_FOR_COMPLETION) {
+ unsigned long wait_ret;
+
+- wait_ret = wait_for_completion_timeout(&done,
++ wait_ret = wait_for_completion_timeout(&cmd->done,
+ msecs_to_jiffies(FSM_CMD_TIMEOUT_MS));
+- if (!wait_ret)
+- return -ETIMEDOUT;
+
++ ret = wait_ret ? cmd->result : -ETIMEDOUT;
++ kref_put(&cmd->refcnt, fsm_release_command);
+ return ret;
+ }
+
+diff --git a/drivers/net/wwan/t7xx/t7xx_state_monitor.h b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+index b6e76f3903c892..74f96fd2605e8e 100644
+--- a/drivers/net/wwan/t7xx/t7xx_state_monitor.h
++++ b/drivers/net/wwan/t7xx/t7xx_state_monitor.h
+@@ -109,8 +109,9 @@ struct t7xx_fsm_command {
+ struct list_head entry;
+ enum t7xx_fsm_cmd_state cmd_id;
+ unsigned int flag;
+- struct completion *done;
+- int *ret;
++ struct completion done;
++ int result;
++ struct kref refcnt;
+ };
+
+ struct t7xx_fsm_notifier {
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 5b6a6bd4e6e800..4aad16390d4790 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1178,13 +1178,13 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
+ nvme_keep_alive_work_period(ctrl));
+ }
+
+-static void nvme_keep_alive_finish(struct request *rq,
+- blk_status_t status, struct nvme_ctrl *ctrl)
++static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq,
++ blk_status_t status)
+ {
+- unsigned long flags;
+- bool startka = false;
++ struct nvme_ctrl *ctrl = rq->end_io_data;
+ unsigned long rtt = jiffies - (rq->deadline - rq->timeout);
+ unsigned long delay = nvme_keep_alive_work_period(ctrl);
++ enum nvme_ctrl_state state = nvme_ctrl_state(ctrl);
+
+ /*
+ * Subtract off the keepalive RTT so nvme_keep_alive_work runs
+@@ -1198,22 +1198,20 @@ static void nvme_keep_alive_finish(struct request *rq,
+ delay = 0;
+ }
+
++ blk_mq_free_request(rq);
++
+ if (status) {
+ dev_err(ctrl->device,
+ "failed nvme_keep_alive_end_io error=%d\n",
+ status);
+- return;
++ return RQ_END_IO_NONE;
+ }
+
+ ctrl->ka_last_check_time = jiffies;
+ ctrl->comp_seen = false;
+- spin_lock_irqsave(&ctrl->lock, flags);
+- if (ctrl->state == NVME_CTRL_LIVE ||
+- ctrl->state == NVME_CTRL_CONNECTING)
+- startka = true;
+- spin_unlock_irqrestore(&ctrl->lock, flags);
+- if (startka)
++ if (state == NVME_CTRL_LIVE || state == NVME_CTRL_CONNECTING)
+ queue_delayed_work(nvme_wq, &ctrl->ka_work, delay);
++ return RQ_END_IO_NONE;
+ }
+
+ static void nvme_keep_alive_work(struct work_struct *work)
+@@ -1222,7 +1220,6 @@ static void nvme_keep_alive_work(struct work_struct *work)
+ struct nvme_ctrl, ka_work);
+ bool comp_seen = ctrl->comp_seen;
+ struct request *rq;
+- blk_status_t status;
+
+ ctrl->ka_last_check_time = jiffies;
+
+@@ -1245,9 +1242,9 @@ static void nvme_keep_alive_work(struct work_struct *work)
+ nvme_init_request(rq, &ctrl->ka_cmd);
+
+ rq->timeout = ctrl->kato * HZ;
+- status = blk_execute_rq(rq, false);
+- nvme_keep_alive_finish(rq, status, ctrl);
+- blk_mq_free_request(rq);
++ rq->end_io = nvme_keep_alive_end_io;
++ rq->end_io_data = ctrl;
++ blk_execute_rq_nowait(rq, false);
+ }
+
+ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index bddc068d58c7ea..e867ac859a878e 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -172,6 +172,11 @@ enum nvme_quirks {
+ * MSI (but not MSI-X) interrupts are broken and never fire.
+ */
+ NVME_QUIRK_BROKEN_MSI = (1 << 21),
++
++ /*
++ * Align dma pool segment size to 512 bytes
++ */
++ NVME_QUIRK_DMAPOOL_ALIGN_512 = (1 << 22),
+ };
+
+ /*
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index d525fa1229d791..52c8fd3d5c4796 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2653,15 +2653,20 @@ static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
+
+ static int nvme_setup_prp_pools(struct nvme_dev *dev)
+ {
++ size_t small_align = 256;
++
+ dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
+ NVME_CTRL_PAGE_SIZE,
+ NVME_CTRL_PAGE_SIZE, 0);
+ if (!dev->prp_page_pool)
+ return -ENOMEM;
+
++ if (dev->ctrl.quirks & NVME_QUIRK_DMAPOOL_ALIGN_512)
++ small_align = 512;
++
+ /* Optimisation for I/Os between 4k and 128k */
+ dev->prp_small_pool = dma_pool_create("prp list 256", dev->dev,
+- 256, 256, 0);
++ 256, small_align, 0);
+ if (!dev->prp_small_pool) {
+ dma_pool_destroy(dev->prp_page_pool);
+ return -ENOMEM;
+@@ -3403,7 +3408,7 @@ static const struct pci_device_id nvme_id_table[] = {
+ { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1217, 0x8760), /* O2 Micro 64GB Steam Deck */
+- .driver_data = NVME_QUIRK_QDEPTH_ONE },
++ .driver_data = NVME_QUIRK_DMAPOOL_ALIGN_512, },
+ { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_BOGUS_NID, },
+diff --git a/drivers/of/address.c b/drivers/of/address.c
+index dfd05cb2b2fcfa..34d880a1be0a5e 100644
+--- a/drivers/of/address.c
++++ b/drivers/of/address.c
+@@ -46,7 +46,7 @@ struct of_bus {
+ u64 (*map)(__be32 *addr, const __be32 *range,
+ int na, int ns, int pna);
+ int (*translate)(__be32 *addr, u64 offset, int na);
+- bool has_flags;
++ int flag_cells;
+ unsigned int (*get_flags)(const __be32 *addr);
+ };
+
+@@ -217,10 +217,6 @@ static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
+ return da - cp;
+ }
+
+-static int of_bus_pci_translate(__be32 *addr, u64 offset, int na)
+-{
+- return of_bus_default_translate(addr + 1, offset, na - 1);
+-}
+ #endif /* CONFIG_PCI */
+
+ /*
+@@ -344,11 +340,6 @@ static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns,
+ return da - cp;
+ }
+
+-static int of_bus_isa_translate(__be32 *addr, u64 offset, int na)
+-{
+- return of_bus_default_translate(addr + 1, offset, na - 1);
+-}
+-
+ static unsigned int of_bus_isa_get_flags(const __be32 *addr)
+ {
+ unsigned int flags = 0;
+@@ -379,8 +370,8 @@ static struct of_bus of_busses[] = {
+ .match = of_bus_pci_match,
+ .count_cells = of_bus_pci_count_cells,
+ .map = of_bus_pci_map,
+- .translate = of_bus_pci_translate,
+- .has_flags = true,
++ .translate = of_bus_default_flags_translate,
++ .flag_cells = 1,
+ .get_flags = of_bus_pci_get_flags,
+ },
+ #endif /* CONFIG_PCI */
+@@ -391,8 +382,8 @@ static struct of_bus of_busses[] = {
+ .match = of_bus_isa_match,
+ .count_cells = of_bus_isa_count_cells,
+ .map = of_bus_isa_map,
+- .translate = of_bus_isa_translate,
+- .has_flags = true,
++ .translate = of_bus_default_flags_translate,
++ .flag_cells = 1,
+ .get_flags = of_bus_isa_get_flags,
+ },
+ /* Default with flags cell */
+@@ -403,7 +394,7 @@ static struct of_bus of_busses[] = {
+ .count_cells = of_bus_default_count_cells,
+ .map = of_bus_default_flags_map,
+ .translate = of_bus_default_flags_translate,
+- .has_flags = true,
++ .flag_cells = 1,
+ .get_flags = of_bus_default_flags_get_flags,
+ },
+ /* Default */
+@@ -485,7 +476,8 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
+ }
+ if (ranges == NULL || rlen == 0) {
+ offset = of_read_number(addr, na);
+- memset(addr, 0, pna * 4);
++ /* set address to zero, pass flags through */
++ memset(addr + pbus->flag_cells, 0, (pna - pbus->flag_cells) * 4);
+ pr_debug("empty ranges; 1:1 translation\n");
+ goto finish;
+ }
+@@ -836,7 +828,7 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
+ int na = parser->na;
+ int ns = parser->ns;
+ int np = parser->pna + na + ns;
+- int busflag_na = 0;
++ int busflag_na = parser->bus->flag_cells;
+
+ if (!range)
+ return NULL;
+@@ -846,10 +838,6 @@ struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
+
+ range->flags = parser->bus->get_flags(parser->range);
+
+- /* A extra cell for resource flags */
+- if (parser->bus->has_flags)
+- busflag_na = 1;
+-
+ range->bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na);
+
+ if (parser->dma)
+diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
+index 4551575e4e7d7d..fd97b6ee2a8d11 100644
+--- a/drivers/pinctrl/pinctrl-mcp23s08.c
++++ b/drivers/pinctrl/pinctrl-mcp23s08.c
+@@ -86,6 +86,7 @@ const struct regmap_config mcp23x08_regmap = {
+ .num_reg_defaults = ARRAY_SIZE(mcp23x08_defaults),
+ .cache_type = REGCACHE_FLAT,
+ .max_register = MCP_OLAT,
++ .disable_locking = true, /* mcp->lock protects the regmap */
+ };
+ EXPORT_SYMBOL_GPL(mcp23x08_regmap);
+
+@@ -132,6 +133,7 @@ const struct regmap_config mcp23x17_regmap = {
+ .num_reg_defaults = ARRAY_SIZE(mcp23x17_defaults),
+ .cache_type = REGCACHE_FLAT,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
++ .disable_locking = true, /* mcp->lock protects the regmap */
+ };
+ EXPORT_SYMBOL_GPL(mcp23x17_regmap);
+
+@@ -228,7 +230,9 @@ static int mcp_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
++ mutex_lock(&mcp->lock);
+ ret = mcp_read(mcp, MCP_GPPU, &data);
++ mutex_unlock(&mcp->lock);
+ if (ret < 0)
+ return ret;
+ status = (data & BIT(pin)) ? 1 : 0;
+@@ -257,7 +261,9 @@ static int mcp_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
++ mutex_lock(&mcp->lock);
+ ret = mcp_set_bit(mcp, MCP_GPPU, pin, arg);
++ mutex_unlock(&mcp->lock);
+ break;
+ default:
+ dev_dbg(mcp->dev, "Invalid config param %04x\n", param);
+diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
+index a2ffe4157df10c..b8d77adc9ea145 100644
+--- a/drivers/platform/x86/mlx-platform.c
++++ b/drivers/platform/x86/mlx-platform.c
+@@ -6237,6 +6237,7 @@ mlxplat_pci_fpga_device_init(unsigned int device, const char *res_name, struct p
+ fail_pci_request_regions:
+ pci_disable_device(pci_dev);
+ fail_pci_enable_device:
++ pci_dev_put(pci_dev);
+ return err;
+ }
+
+@@ -6247,6 +6248,7 @@ mlxplat_pci_fpga_device_exit(struct pci_dev *pci_bridge,
+ iounmap(pci_bridge_addr);
+ pci_release_regions(pci_bridge);
+ pci_disable_device(pci_bridge);
++ pci_dev_put(pci_bridge);
+ }
+
+ static int
+diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
+index 6235721f2c1aec..fd6bf9e77afcb8 100644
+--- a/drivers/remoteproc/qcom_q6v5_pas.c
++++ b/drivers/remoteproc/qcom_q6v5_pas.c
+@@ -786,6 +786,23 @@ static const struct adsp_data adsp_resource_init = {
+ .ssctl_id = 0x14,
+ };
+
++static const struct adsp_data sa8775p_adsp_resource = {
++ .crash_reason_smem = 423,
++ .firmware_name = "adsp.mbn",
++ .pas_id = 1,
++ .minidump_id = 5,
++ .auto_boot = true,
++ .proxy_pd_names = (char*[]){
++ "lcx",
++ "lmx",
++ NULL
++ },
++ .load_state = "adsp",
++ .ssr_name = "lpass",
++ .sysmon_name = "adsp",
++ .ssctl_id = 0x14,
++};
++
+ static const struct adsp_data sdm845_adsp_resource_init = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+@@ -885,6 +902,42 @@ static const struct adsp_data cdsp_resource_init = {
+ .ssctl_id = 0x17,
+ };
+
++static const struct adsp_data sa8775p_cdsp0_resource = {
++ .crash_reason_smem = 601,
++ .firmware_name = "cdsp0.mbn",
++ .pas_id = 18,
++ .minidump_id = 7,
++ .auto_boot = true,
++ .proxy_pd_names = (char*[]){
++ "cx",
++ "mxc",
++ "nsp",
++ NULL
++ },
++ .load_state = "cdsp",
++ .ssr_name = "cdsp",
++ .sysmon_name = "cdsp",
++ .ssctl_id = 0x17,
++};
++
++static const struct adsp_data sa8775p_cdsp1_resource = {
++ .crash_reason_smem = 633,
++ .firmware_name = "cdsp1.mbn",
++ .pas_id = 30,
++ .minidump_id = 20,
++ .auto_boot = true,
++ .proxy_pd_names = (char*[]){
++ "cx",
++ "mxc",
++ "nsp",
++ NULL
++ },
++ .load_state = "nsp",
++ .ssr_name = "cdsp1",
++ .sysmon_name = "cdsp1",
++ .ssctl_id = 0x20,
++};
++
+ static const struct adsp_data sdm845_cdsp_resource_init = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+@@ -987,6 +1040,40 @@ static const struct adsp_data sm8350_cdsp_resource = {
+ .ssctl_id = 0x17,
+ };
+
++static const struct adsp_data sa8775p_gpdsp0_resource = {
++ .crash_reason_smem = 640,
++ .firmware_name = "gpdsp0.mbn",
++ .pas_id = 39,
++ .minidump_id = 21,
++ .auto_boot = true,
++ .proxy_pd_names = (char*[]){
++ "cx",
++ "mxc",
++ NULL
++ },
++ .load_state = "gpdsp0",
++ .ssr_name = "gpdsp0",
++ .sysmon_name = "gpdsp0",
++ .ssctl_id = 0x21,
++};
++
++static const struct adsp_data sa8775p_gpdsp1_resource = {
++ .crash_reason_smem = 641,
++ .firmware_name = "gpdsp1.mbn",
++ .pas_id = 40,
++ .minidump_id = 22,
++ .auto_boot = true,
++ .proxy_pd_names = (char*[]){
++ "cx",
++ "mxc",
++ NULL
++ },
++ .load_state = "gpdsp1",
++ .ssr_name = "gpdsp1",
++ .sysmon_name = "gpdsp1",
++ .ssctl_id = 0x22,
++};
++
+ static const struct adsp_data mpss_resource_init = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+@@ -1163,6 +1250,13 @@ static const struct of_device_id adsp_of_match[] = {
+ { .compatible = "qcom,qcs404-adsp-pas", .data = &adsp_resource_init },
+ { .compatible = "qcom,qcs404-cdsp-pas", .data = &cdsp_resource_init },
+ { .compatible = "qcom,qcs404-wcss-pas", .data = &wcss_resource_init },
++ { .compatible = "qcom,sa8775p-adsp-pas", .data = &sa8775p_adsp_resource},
++ { .compatible = "qcom,sa8775p-cdsp0-pas", .data = &sa8775p_cdsp0_resource},
++ { .compatible = "qcom,sa8775p-cdsp1-pas", .data = &sa8775p_cdsp1_resource},
++ { .compatible = "qcom,sa8775p-gpdsp0-pas", .data = &sa8775p_gpdsp0_resource},
++ { .compatible = "qcom,sa8775p-gpdsp1-pas", .data = &sa8775p_gpdsp1_resource},
++ { .compatible = "qcom,sar2130p-adsp-pas", .data = &sm8350_adsp_resource},
++ { .compatible = "qcom,sc7180-adsp-pas", .data = &sm8250_adsp_resource},
+ { .compatible = "qcom,sc7180-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sc7280-mpss-pas", .data = &mpss_resource_init},
+ { .compatible = "qcom,sc8180x-adsp-pas", .data = &sm8150_adsp_resource},
+diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
+index 9e73e9cbbcfc6c..1e4550156b735d 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas.h
++++ b/drivers/scsi/hisi_sas/hisi_sas.h
+@@ -343,7 +343,7 @@ struct hisi_sas_hw {
+ u8 reg_index, u8 reg_count, u8 *write_data);
+ void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba,
+ int delay_ms, int timeout_ms);
+- void (*debugfs_snapshot_regs)(struct hisi_hba *hisi_hba);
++ int (*debugfs_snapshot_regs)(struct hisi_hba *hisi_hba);
+ int complete_hdr_size;
+ const struct scsi_host_template *sht;
+ };
+@@ -451,7 +451,6 @@ struct hisi_hba {
+ const struct hisi_sas_hw *hw; /* Low level hw interface */
+ unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)];
+ struct work_struct rst_work;
+- struct work_struct debugfs_work;
+ u32 phy_state;
+ u32 intr_coal_ticks; /* Time of interrupt coalesce in us */
+ u32 intr_coal_count; /* Interrupt count to coalesce */
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
+index db9ae206974c21..f78c5f8a49ffac 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
+@@ -1579,7 +1579,7 @@ static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
+ return -EPERM;
+ }
+
+- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
++ if (hisi_sas_debugfs_enable)
+ hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
+
+ return 0;
+@@ -1967,8 +1967,19 @@ static bool hisi_sas_internal_abort_timeout(struct sas_task *task,
+ struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
+ struct hisi_sas_internal_abort_data *timeout = data;
+
+- if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
+- queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
++ if (hisi_sas_debugfs_enable) {
++ /*
++ * If timeout occurs in device gone scenario, to avoid
++ * circular dependency like:
++ * hisi_sas_dev_gone() -> down() -> ... ->
++ * hisi_sas_internal_abort_timeout() -> down().
++ */
++ if (!timeout->rst_ha_timeout)
++ down(&hisi_hba->sem);
++ hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
++ if (!timeout->rst_ha_timeout)
++ up(&hisi_hba->sem);
++ }
+
+ if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+ pr_err("Internal abort: timeout %016llx\n",
+diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+index 4054659d48f74c..ff5f86867dbf06 100644
+--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
++++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+@@ -558,8 +558,7 @@ static int experimental_iopoll_q_cnt;
+ module_param(experimental_iopoll_q_cnt, int, 0444);
+ MODULE_PARM_DESC(experimental_iopoll_q_cnt, "number of queues to be used as poll mode, def=0");
+
+-static void debugfs_work_handler_v3_hw(struct work_struct *work);
+-static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba);
++static int debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba);
+
+ static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
+ {
+@@ -3397,7 +3396,6 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev)
+ hisi_hba = shost_priv(shost);
+
+ INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
+- INIT_WORK(&hisi_hba->debugfs_work, debugfs_work_handler_v3_hw);
+ hisi_hba->hw = &hisi_sas_v3_hw;
+ hisi_hba->pci_dev = pdev;
+ hisi_hba->dev = dev;
+@@ -3562,6 +3560,11 @@ debugfs_to_reg_name_v3_hw(int off, int base_off,
+ return NULL;
+ }
+
++static bool debugfs_dump_is_generated_v3_hw(void *p)
++{
++ return p ? true : false;
++}
++
+ static void debugfs_print_reg_v3_hw(u32 *regs_val, struct seq_file *s,
+ const struct hisi_sas_debugfs_reg *reg)
+ {
+@@ -3587,6 +3590,9 @@ static int debugfs_global_v3_hw_show(struct seq_file *s, void *p)
+ {
+ struct hisi_sas_debugfs_regs *global = s->private;
+
++ if (!debugfs_dump_is_generated_v3_hw(global->data))
++ return -EPERM;
++
+ debugfs_print_reg_v3_hw(global->data, s,
+ &debugfs_global_reg);
+
+@@ -3598,6 +3604,9 @@ static int debugfs_axi_v3_hw_show(struct seq_file *s, void *p)
+ {
+ struct hisi_sas_debugfs_regs *axi = s->private;
+
++ if (!debugfs_dump_is_generated_v3_hw(axi->data))
++ return -EPERM;
++
+ debugfs_print_reg_v3_hw(axi->data, s,
+ &debugfs_axi_reg);
+
+@@ -3609,6 +3618,9 @@ static int debugfs_ras_v3_hw_show(struct seq_file *s, void *p)
+ {
+ struct hisi_sas_debugfs_regs *ras = s->private;
+
++ if (!debugfs_dump_is_generated_v3_hw(ras->data))
++ return -EPERM;
++
+ debugfs_print_reg_v3_hw(ras->data, s,
+ &debugfs_ras_reg);
+
+@@ -3621,6 +3633,9 @@ static int debugfs_port_v3_hw_show(struct seq_file *s, void *p)
+ struct hisi_sas_debugfs_port *port = s->private;
+ const struct hisi_sas_debugfs_reg *reg_port = &debugfs_port_reg;
+
++ if (!debugfs_dump_is_generated_v3_hw(port->data))
++ return -EPERM;
++
+ debugfs_print_reg_v3_hw(port->data, s, reg_port);
+
+ return 0;
+@@ -3676,6 +3691,9 @@ static int debugfs_cq_v3_hw_show(struct seq_file *s, void *p)
+ struct hisi_sas_debugfs_cq *debugfs_cq = s->private;
+ int slot;
+
++ if (!debugfs_dump_is_generated_v3_hw(debugfs_cq->complete_hdr))
++ return -EPERM;
++
+ for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++)
+ debugfs_cq_show_slot_v3_hw(s, slot, debugfs_cq);
+
+@@ -3697,8 +3715,12 @@ static void debugfs_dq_show_slot_v3_hw(struct seq_file *s, int slot,
+
+ static int debugfs_dq_v3_hw_show(struct seq_file *s, void *p)
+ {
++ struct hisi_sas_debugfs_dq *debugfs_dq = s->private;
+ int slot;
+
++ if (!debugfs_dump_is_generated_v3_hw(debugfs_dq->hdr))
++ return -EPERM;
++
+ for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++)
+ debugfs_dq_show_slot_v3_hw(s, slot, s->private);
+
+@@ -3712,6 +3734,9 @@ static int debugfs_iost_v3_hw_show(struct seq_file *s, void *p)
+ struct hisi_sas_iost *iost = debugfs_iost->iost;
+ int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
+
++ if (!debugfs_dump_is_generated_v3_hw(iost))
++ return -EPERM;
++
+ for (i = 0; i < max_command_entries; i++, iost++) {
+ __le64 *data = &iost->qw0;
+
+@@ -3731,6 +3756,9 @@ static int debugfs_iost_cache_v3_hw_show(struct seq_file *s, void *p)
+ int i, tab_idx;
+ __le64 *iost;
+
++ if (!debugfs_dump_is_generated_v3_hw(iost_cache))
++ return -EPERM;
++
+ for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) {
+ /*
+ * Data struct of IOST cache:
+@@ -3754,6 +3782,9 @@ static int debugfs_itct_v3_hw_show(struct seq_file *s, void *p)
+ struct hisi_sas_debugfs_itct *debugfs_itct = s->private;
+ struct hisi_sas_itct *itct = debugfs_itct->itct;
+
++ if (!debugfs_dump_is_generated_v3_hw(itct))
++ return -EPERM;
++
+ for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
+ __le64 *data = &itct->qw0;
+
+@@ -3773,6 +3804,9 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p)
+ int i, tab_idx;
+ __le64 *itct;
+
++ if (!debugfs_dump_is_generated_v3_hw(itct_cache))
++ return -EPERM;
++
+ for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) {
+ /*
+ * Data struct of ITCT cache:
+@@ -3790,10 +3824,9 @@ static int debugfs_itct_cache_v3_hw_show(struct seq_file *s, void *p)
+ }
+ DEFINE_SHOW_ATTRIBUTE(debugfs_itct_cache_v3_hw);
+
+-static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
++static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba, int index)
+ {
+ u64 *debugfs_timestamp;
+- int dump_index = hisi_hba->debugfs_dump_index;
+ struct dentry *dump_dentry;
+ struct dentry *dentry;
+ char name[256];
+@@ -3801,17 +3834,17 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
+ int c;
+ int d;
+
+- snprintf(name, 256, "%d", dump_index);
++ snprintf(name, 256, "%d", index);
+
+ dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry);
+
+- debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index];
++ debugfs_timestamp = &hisi_hba->debugfs_timestamp[index];
+
+ debugfs_create_u64("timestamp", 0400, dump_dentry,
+ debugfs_timestamp);
+
+ debugfs_create_file("global", 0400, dump_dentry,
+- &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL],
++ &hisi_hba->debugfs_regs[index][DEBUGFS_GLOBAL],
+ &debugfs_global_v3_hw_fops);
+
+ /* Create port dir and files */
+@@ -3820,7 +3853,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
+ snprintf(name, 256, "%d", p);
+
+ debugfs_create_file(name, 0400, dentry,
+- &hisi_hba->debugfs_port_reg[dump_index][p],
++ &hisi_hba->debugfs_port_reg[index][p],
+ &debugfs_port_v3_hw_fops);
+ }
+
+@@ -3830,7 +3863,7 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
+ snprintf(name, 256, "%d", c);
+
+ debugfs_create_file(name, 0400, dentry,
+- &hisi_hba->debugfs_cq[dump_index][c],
++ &hisi_hba->debugfs_cq[index][c],
+ &debugfs_cq_v3_hw_fops);
+ }
+
+@@ -3840,66 +3873,35 @@ static void debugfs_create_files_v3_hw(struct hisi_hba *hisi_hba)
+ snprintf(name, 256, "%d", d);
+
+ debugfs_create_file(name, 0400, dentry,
+- &hisi_hba->debugfs_dq[dump_index][d],
++ &hisi_hba->debugfs_dq[index][d],
+ &debugfs_dq_v3_hw_fops);
+ }
+
+ debugfs_create_file("iost", 0400, dump_dentry,
+- &hisi_hba->debugfs_iost[dump_index],
++ &hisi_hba->debugfs_iost[index],
+ &debugfs_iost_v3_hw_fops);
+
+ debugfs_create_file("iost_cache", 0400, dump_dentry,
+- &hisi_hba->debugfs_iost_cache[dump_index],
++ &hisi_hba->debugfs_iost_cache[index],
+ &debugfs_iost_cache_v3_hw_fops);
+
+ debugfs_create_file("itct", 0400, dump_dentry,
+- &hisi_hba->debugfs_itct[dump_index],
++ &hisi_hba->debugfs_itct[index],
+ &debugfs_itct_v3_hw_fops);
+
+ debugfs_create_file("itct_cache", 0400, dump_dentry,
+- &hisi_hba->debugfs_itct_cache[dump_index],
++ &hisi_hba->debugfs_itct_cache[index],
+ &debugfs_itct_cache_v3_hw_fops);
+
+ debugfs_create_file("axi", 0400, dump_dentry,
+- &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI],
++ &hisi_hba->debugfs_regs[index][DEBUGFS_AXI],
+ &debugfs_axi_v3_hw_fops);
+
+ debugfs_create_file("ras", 0400, dump_dentry,
+- &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS],
++ &hisi_hba->debugfs_regs[index][DEBUGFS_RAS],
+ &debugfs_ras_v3_hw_fops);
+ }
+
+-static void debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
+-{
+- int debugfs_dump_index = hisi_hba->debugfs_dump_index;
+- struct device *dev = hisi_hba->dev;
+- u64 timestamp = local_clock();
+-
+- if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
+- dev_warn(dev, "dump count exceeded!\n");
+- return;
+- }
+-
+- do_div(timestamp, NSEC_PER_MSEC);
+- hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
+-
+- debugfs_snapshot_prepare_v3_hw(hisi_hba);
+-
+- debugfs_snapshot_global_reg_v3_hw(hisi_hba);
+- debugfs_snapshot_port_reg_v3_hw(hisi_hba);
+- debugfs_snapshot_axi_reg_v3_hw(hisi_hba);
+- debugfs_snapshot_ras_reg_v3_hw(hisi_hba);
+- debugfs_snapshot_cq_reg_v3_hw(hisi_hba);
+- debugfs_snapshot_dq_reg_v3_hw(hisi_hba);
+- debugfs_snapshot_itct_reg_v3_hw(hisi_hba);
+- debugfs_snapshot_iost_reg_v3_hw(hisi_hba);
+-
+- debugfs_create_files_v3_hw(hisi_hba);
+-
+- debugfs_snapshot_restore_v3_hw(hisi_hba);
+- hisi_hba->debugfs_dump_index++;
+-}
+-
+ static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+@@ -3907,9 +3909,6 @@ static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
+ struct hisi_hba *hisi_hba = file->f_inode->i_private;
+ char buf[8];
+
+- if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count)
+- return -EFAULT;
+-
+ if (count > 8)
+ return -EFAULT;
+
+@@ -3919,7 +3918,12 @@ static ssize_t debugfs_trigger_dump_v3_hw_write(struct file *file,
+ if (buf[0] != '1')
+ return -EFAULT;
+
+- queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
++ down(&hisi_hba->sem);
++ if (debugfs_snapshot_regs_v3_hw(hisi_hba)) {
++ up(&hisi_hba->sem);
++ return -EFAULT;
++ }
++ up(&hisi_hba->sem);
+
+ return count;
+ }
+@@ -4670,36 +4674,40 @@ static void debugfs_fifo_init_v3_hw(struct hisi_hba *hisi_hba)
+ }
+ }
+
+-static void debugfs_work_handler_v3_hw(struct work_struct *work)
+-{
+- struct hisi_hba *hisi_hba =
+- container_of(work, struct hisi_hba, debugfs_work);
+-
+- debugfs_snapshot_regs_v3_hw(hisi_hba);
+-}
+-
+ static void debugfs_release_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
+ {
+ struct device *dev = hisi_hba->dev;
+ int i;
+
+ devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache);
++ hisi_hba->debugfs_iost_cache[dump_index].cache = NULL;
+ devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache);
++ hisi_hba->debugfs_itct_cache[dump_index].cache = NULL;
+ devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost);
++ hisi_hba->debugfs_iost[dump_index].iost = NULL;
+ devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct);
++ hisi_hba->debugfs_itct[dump_index].itct = NULL;
+
+- for (i = 0; i < hisi_hba->queue_count; i++)
++ for (i = 0; i < hisi_hba->queue_count; i++) {
+ devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr);
++ hisi_hba->debugfs_dq[dump_index][i].hdr = NULL;
++ }
+
+- for (i = 0; i < hisi_hba->queue_count; i++)
++ for (i = 0; i < hisi_hba->queue_count; i++) {
+ devm_kfree(dev,
+ hisi_hba->debugfs_cq[dump_index][i].complete_hdr);
++ hisi_hba->debugfs_cq[dump_index][i].complete_hdr = NULL;
++ }
+
+- for (i = 0; i < DEBUGFS_REGS_NUM; i++)
++ for (i = 0; i < DEBUGFS_REGS_NUM; i++) {
+ devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data);
++ hisi_hba->debugfs_regs[dump_index][i].data = NULL;
++ }
+
+- for (i = 0; i < hisi_hba->n_phy; i++)
++ for (i = 0; i < hisi_hba->n_phy; i++) {
+ devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data);
++ hisi_hba->debugfs_port_reg[dump_index][i].data = NULL;
++ }
+ }
+
+ static const struct hisi_sas_debugfs_reg *debugfs_reg_array_v3_hw[DEBUGFS_REGS_NUM] = {
+@@ -4712,7 +4720,7 @@ static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
+ {
+ const struct hisi_sas_hw *hw = hisi_hba->hw;
+ struct device *dev = hisi_hba->dev;
+- int p, c, d, r, i;
++ int p, c, d, r;
+ size_t sz;
+
+ for (r = 0; r < DEBUGFS_REGS_NUM; r++) {
+@@ -4792,11 +4800,46 @@ static int debugfs_alloc_v3_hw(struct hisi_hba *hisi_hba, int dump_index)
+
+ return 0;
+ fail:
+- for (i = 0; i < hisi_sas_debugfs_dump_count; i++)
+- debugfs_release_v3_hw(hisi_hba, i);
++ debugfs_release_v3_hw(hisi_hba, dump_index);
+ return -ENOMEM;
+ }
+
++static int debugfs_snapshot_regs_v3_hw(struct hisi_hba *hisi_hba)
++{
++ int debugfs_dump_index = hisi_hba->debugfs_dump_index;
++ struct device *dev = hisi_hba->dev;
++ u64 timestamp = local_clock();
++
++ if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
++ dev_warn(dev, "dump count exceeded!\n");
++ return -EINVAL;
++ }
++
++ if (debugfs_alloc_v3_hw(hisi_hba, debugfs_dump_index)) {
++ dev_warn(dev, "failed to alloc memory\n");
++ return -ENOMEM;
++ }
++
++ do_div(timestamp, NSEC_PER_MSEC);
++ hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
++
++ debugfs_snapshot_prepare_v3_hw(hisi_hba);
++
++ debugfs_snapshot_global_reg_v3_hw(hisi_hba);
++ debugfs_snapshot_port_reg_v3_hw(hisi_hba);
++ debugfs_snapshot_axi_reg_v3_hw(hisi_hba);
++ debugfs_snapshot_ras_reg_v3_hw(hisi_hba);
++ debugfs_snapshot_cq_reg_v3_hw(hisi_hba);
++ debugfs_snapshot_dq_reg_v3_hw(hisi_hba);
++ debugfs_snapshot_itct_reg_v3_hw(hisi_hba);
++ debugfs_snapshot_iost_reg_v3_hw(hisi_hba);
++
++ debugfs_snapshot_restore_v3_hw(hisi_hba);
++ hisi_hba->debugfs_dump_index++;
++
++ return 0;
++}
++
+ static void debugfs_phy_down_cnt_init_v3_hw(struct hisi_hba *hisi_hba)
+ {
+ struct dentry *dir = debugfs_create_dir("phy_down_cnt",
+@@ -4874,6 +4917,17 @@ static void debugfs_bist_init_v3_hw(struct hisi_hba *hisi_hba)
+ hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
+ }
+
++static void debugfs_dump_init_v3_hw(struct hisi_hba *hisi_hba)
++{
++ int i;
++
++ hisi_hba->debugfs_dump_dentry =
++ debugfs_create_dir("dump", hisi_hba->debugfs_dir);
++
++ for (i = 0; i < hisi_sas_debugfs_dump_count; i++)
++ debugfs_create_files_v3_hw(hisi_hba, i);
++}
++
+ static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
+ {
+ debugfs_remove_recursive(hisi_hba->debugfs_dir);
+@@ -4883,7 +4937,6 @@ static void debugfs_exit_v3_hw(struct hisi_hba *hisi_hba)
+ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
+ {
+ struct device *dev = hisi_hba->dev;
+- int i;
+
+ hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
+ hisi_sas_debugfs_dir);
+@@ -4895,19 +4948,10 @@ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
+ /* create bist structures */
+ debugfs_bist_init_v3_hw(hisi_hba);
+
+- hisi_hba->debugfs_dump_dentry =
+- debugfs_create_dir("dump", hisi_hba->debugfs_dir);
++ debugfs_dump_init_v3_hw(hisi_hba);
+
+ debugfs_phy_down_cnt_init_v3_hw(hisi_hba);
+ debugfs_fifo_init_v3_hw(hisi_hba);
+-
+- for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
+- if (debugfs_alloc_v3_hw(hisi_hba, i)) {
+- debugfs_exit_v3_hw(hisi_hba);
+- dev_dbg(dev, "failed to init debugfs!\n");
+- break;
+- }
+- }
+ }
+
+ static int
+diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
+index 7f32619234696f..7880675a68dba6 100644
+--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
++++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
+@@ -8,11 +8,12 @@
+ */
+
+ #include "mpi3mr.h"
++#include <linux/idr.h>
+
+ /* global driver scop variables */
+ LIST_HEAD(mrioc_list);
+ DEFINE_SPINLOCK(mrioc_list_lock);
+-static int mrioc_ids;
++static DEFINE_IDA(mrioc_ida);
+ static int warn_non_secure_ctlr;
+ atomic64_t event_counter;
+
+@@ -5065,7 +5066,10 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ }
+
+ mrioc = shost_priv(shost);
+- mrioc->id = mrioc_ids++;
++ retval = ida_alloc_range(&mrioc_ida, 0, U8_MAX, GFP_KERNEL);
++ if (retval < 0)
++ goto id_alloc_failed;
++ mrioc->id = (u8)retval;
+ sprintf(mrioc->driver_name, "%s", MPI3MR_DRIVER_NAME);
+ sprintf(mrioc->name, "%s%d", mrioc->driver_name, mrioc->id);
+ INIT_LIST_HEAD(&mrioc->list);
+@@ -5215,9 +5219,11 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ resource_alloc_failed:
+ destroy_workqueue(mrioc->fwevt_worker_thread);
+ fwevtthread_failed:
++ ida_free(&mrioc_ida, mrioc->id);
+ spin_lock(&mrioc_list_lock);
+ list_del(&mrioc->list);
+ spin_unlock(&mrioc_list_lock);
++id_alloc_failed:
+ scsi_host_put(shost);
+ shost_failed:
+ return retval;
+@@ -5303,6 +5309,7 @@ static void mpi3mr_remove(struct pci_dev *pdev)
+ mrioc->sas_hba.num_phys = 0;
+ }
+
++ ida_free(&mrioc_ida, mrioc->id);
+ spin_lock(&mrioc_list_lock);
+ list_del(&mrioc->list);
+ spin_unlock(&mrioc_list_lock);
+@@ -5518,6 +5525,7 @@ static void __exit mpi3mr_exit(void)
+ &driver_attr_event_counter);
+ pci_unregister_driver(&mpi3mr_pci_driver);
+ sas_release_transport(mpi3mr_transport_template);
++ ida_destroy(&mrioc_ida);
+ }
+
+ module_init(mpi3mr_init);
+diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
+index 1ec6f9c82aef06..79f2bf5df19a6c 100644
+--- a/drivers/thunderbolt/nhi.c
++++ b/drivers/thunderbolt/nhi.c
+@@ -1524,6 +1524,18 @@ static struct pci_device_id nhi_ids[] = {
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1),
+ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI0),
++ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_LNL_NHI1),
++ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_M_NHI0),
++ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_M_NHI1),
++ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_P_NHI0),
++ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
++ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_PTL_P_NHI1),
++ .driver_data = (kernel_ulong_t)&icl_nhi_ops },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI) },
+
+diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h
+index 0f029ce758825e..16744f25a9a069 100644
+--- a/drivers/thunderbolt/nhi.h
++++ b/drivers/thunderbolt/nhi.h
+@@ -90,6 +90,12 @@ extern const struct tb_nhi_ops icl_nhi_ops;
+ #define PCI_DEVICE_ID_INTEL_TGL_H_NHI1 0x9a21
+ #define PCI_DEVICE_ID_INTEL_RPL_NHI0 0xa73e
+ #define PCI_DEVICE_ID_INTEL_RPL_NHI1 0xa76d
++#define PCI_DEVICE_ID_INTEL_LNL_NHI0 0xa833
++#define PCI_DEVICE_ID_INTEL_LNL_NHI1 0xa834
++#define PCI_DEVICE_ID_INTEL_PTL_M_NHI0 0xe333
++#define PCI_DEVICE_ID_INTEL_PTL_M_NHI1 0xe334
++#define PCI_DEVICE_ID_INTEL_PTL_P_NHI0 0xe433
++#define PCI_DEVICE_ID_INTEL_PTL_P_NHI1 0xe434
+
+ #define PCI_CLASS_SERIAL_USB_USB4 0x0c0340
+
+diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
+index 47becb363adacb..2ee8c5ebca7c3c 100644
+--- a/drivers/thunderbolt/retimer.c
++++ b/drivers/thunderbolt/retimer.c
+@@ -98,6 +98,7 @@ static int tb_retimer_nvm_add(struct tb_retimer *rt)
+
+ err_nvm:
+ dev_dbg(&rt->dev, "NVM upgrade disabled\n");
++ rt->no_nvm_upgrade = true;
+ if (!IS_ERR(nvm))
+ tb_nvm_free(nvm);
+
+@@ -177,8 +178,6 @@ static ssize_t nvm_authenticate_show(struct device *dev,
+
+ if (!rt->nvm)
+ ret = -EAGAIN;
+- else if (rt->no_nvm_upgrade)
+- ret = -EOPNOTSUPP;
+ else
+ ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
+
+@@ -331,6 +330,19 @@ static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
+ }
+ static DEVICE_ATTR_RO(vendor);
+
++static umode_t retimer_is_visible(struct kobject *kobj, struct attribute *attr,
++ int n)
++{
++ struct device *dev = kobj_to_dev(kobj);
++ struct tb_retimer *rt = tb_to_retimer(dev);
++
++ if (attr == &dev_attr_nvm_authenticate.attr ||
++ attr == &dev_attr_nvm_version.attr)
++ return rt->no_nvm_upgrade ? 0 : attr->mode;
++
++ return attr->mode;
++}
++
+ static struct attribute *retimer_attrs[] = {
+ &dev_attr_device.attr,
+ &dev_attr_nvm_authenticate.attr,
+@@ -340,6 +352,7 @@ static struct attribute *retimer_attrs[] = {
+ };
+
+ static const struct attribute_group retimer_group = {
++ .is_visible = retimer_is_visible,
+ .attrs = retimer_attrs,
+ };
+
+diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
+index 2a38e1eb65466c..97437de52ef681 100644
+--- a/drivers/usb/chipidea/ci.h
++++ b/drivers/usb/chipidea/ci.h
+@@ -25,6 +25,7 @@
+ #define TD_PAGE_COUNT 5
+ #define CI_HDRC_PAGE_SIZE 4096ul /* page size for TD's */
+ #define ENDPT_MAX 32
++#define CI_MAX_REQ_SIZE (4 * CI_HDRC_PAGE_SIZE)
+ #define CI_MAX_BUF_SIZE (TD_PAGE_COUNT * CI_HDRC_PAGE_SIZE)
+
+ /******************************************************************************
+@@ -260,6 +261,7 @@ struct ci_hdrc {
+ bool b_sess_valid_event;
+ bool imx28_write_fix;
+ bool has_portsc_pec_bug;
++ bool has_short_pkt_limit;
+ bool supports_runtime_pm;
+ bool in_lpm;
+ bool wakeup_int;
+diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
+index e28bb2f2612dc6..477af457c1a1f0 100644
+--- a/drivers/usb/chipidea/ci_hdrc_imx.c
++++ b/drivers/usb/chipidea/ci_hdrc_imx.c
+@@ -334,6 +334,7 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
+ struct ci_hdrc_platform_data pdata = {
+ .name = dev_name(&pdev->dev),
+ .capoffset = DEF_CAPOFFSET,
++ .flags = CI_HDRC_HAS_SHORT_PKT_LIMIT,
+ .notify_event = ci_hdrc_imx_notify_event,
+ };
+ int ret;
+diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
+index ca71df4f32e4cc..c161a4ee529064 100644
+--- a/drivers/usb/chipidea/core.c
++++ b/drivers/usb/chipidea/core.c
+@@ -1076,6 +1076,8 @@ static int ci_hdrc_probe(struct platform_device *pdev)
+ CI_HDRC_SUPPORTS_RUNTIME_PM);
+ ci->has_portsc_pec_bug = !!(ci->platdata->flags &
+ CI_HDRC_HAS_PORTSC_PEC_MISSED);
++ ci->has_short_pkt_limit = !!(ci->platdata->flags &
++ CI_HDRC_HAS_SHORT_PKT_LIMIT);
+ platform_set_drvdata(pdev, ci);
+
+ ret = hw_device_init(ci, base);
+diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
+index f5490f2a5b6bca..647e98f4e35110 100644
+--- a/drivers/usb/chipidea/otg.c
++++ b/drivers/usb/chipidea/otg.c
+@@ -130,8 +130,11 @@ enum ci_role ci_otg_role(struct ci_hdrc *ci)
+
+ void ci_handle_vbus_change(struct ci_hdrc *ci)
+ {
+- if (!ci->is_otg)
++ if (!ci->is_otg) {
++ if (ci->platdata->flags & CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS)
++ usb_gadget_vbus_connect(&ci->gadget);
+ return;
++ }
+
+ if (hw_read_otgsc(ci, OTGSC_BSV) && !ci->vbus_active)
+ usb_gadget_vbus_connect(&ci->gadget);
+diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
+index 9f7d003e467b54..f2ae5f4c58283a 100644
+--- a/drivers/usb/chipidea/udc.c
++++ b/drivers/usb/chipidea/udc.c
+@@ -959,6 +959,12 @@ static int _ep_queue(struct usb_ep *ep, struct usb_request *req,
+ return -EMSGSIZE;
+ }
+
++ if (ci->has_short_pkt_limit &&
++ hwreq->req.length > CI_MAX_REQ_SIZE) {
++ dev_err(hwep->ci->dev, "request length too big (max 16KB)\n");
++ return -EMSGSIZE;
++ }
++
+ /* first nuke then test link, e.g. previous status has not sent */
+ if (!list_empty(&hwreq->queue)) {
+ dev_err(hwep->ci->dev, "request already in queue\n");
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index b118f4aab18984..d00bf714a7ccfb 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -899,6 +899,7 @@ struct dwc3_hwparams {
+ #define DWC3_MODE(n) ((n) & 0x7)
+
+ /* HWPARAMS1 */
++#define DWC3_SPRAM_TYPE(n) (((n) >> 23) & 1)
+ #define DWC3_NUM_INT(n) (((n) & (0x3f << 15)) >> 15)
+
+ /* HWPARAMS3 */
+@@ -909,6 +910,9 @@ struct dwc3_hwparams {
+ #define DWC3_NUM_IN_EPS(p) (((p)->hwparams3 & \
+ (DWC3_NUM_IN_EPS_MASK)) >> 18)
+
++/* HWPARAMS6 */
++#define DWC3_RAM0_DEPTH(n) (((n) & (0xffff0000)) >> 16)
++
+ /* HWPARAMS7 */
+ #define DWC3_RAM1_DEPTH(n) ((n) & 0xffff)
+
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index b560996bd4218a..656460c0c1dd7e 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -687,6 +687,44 @@ static int dwc3_gadget_calc_tx_fifo_size(struct dwc3 *dwc, int mult)
+ return fifo_size;
+ }
+
++/**
++ * dwc3_gadget_calc_ram_depth - calculates the ram depth for txfifo
++ * @dwc: pointer to the DWC3 context
++ */
++static int dwc3_gadget_calc_ram_depth(struct dwc3 *dwc)
++{
++ int ram_depth;
++ int fifo_0_start;
++ bool is_single_port_ram;
++
++ /* Check supporting RAM type by HW */
++ is_single_port_ram = DWC3_SPRAM_TYPE(dwc->hwparams.hwparams1);
++
++ /*
++ * If a single port RAM is utilized, then allocate TxFIFOs from
++ * RAM0. otherwise, allocate them from RAM1.
++ */
++ ram_depth = is_single_port_ram ? DWC3_RAM0_DEPTH(dwc->hwparams.hwparams6) :
++ DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
++
++ /*
++ * In a single port RAM configuration, the available RAM is shared
++ * between the RX and TX FIFOs. This means that the txfifo can begin
++ * at a non-zero address.
++ */
++ if (is_single_port_ram) {
++ u32 reg;
++
++ /* Check if TXFIFOs start at non-zero addr */
++ reg = dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0));
++ fifo_0_start = DWC3_GTXFIFOSIZ_TXFSTADDR(reg);
++
++ ram_depth -= (fifo_0_start >> 16);
++ }
++
++ return ram_depth;
++}
++
+ /**
+ * dwc3_gadget_clear_tx_fifos - Clears txfifo allocation
+ * @dwc: pointer to the DWC3 context
+@@ -753,7 +791,7 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
+ {
+ struct dwc3 *dwc = dep->dwc;
+ int fifo_0_start;
+- int ram1_depth;
++ int ram_depth;
+ int fifo_size;
+ int min_depth;
+ int num_in_ep;
+@@ -773,7 +811,7 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
+ if (dep->flags & DWC3_EP_TXFIFO_RESIZED)
+ return 0;
+
+- ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
++ ram_depth = dwc3_gadget_calc_ram_depth(dwc);
+
+ if ((dep->endpoint.maxburst > 1 &&
+ usb_endpoint_xfer_bulk(dep->endpoint.desc)) ||
+@@ -794,7 +832,7 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
+
+ /* Reserve at least one FIFO for the number of IN EPs */
+ min_depth = num_in_ep * (fifo + 1);
+- remaining = ram1_depth - min_depth - dwc->last_fifo_depth;
++ remaining = ram_depth - min_depth - dwc->last_fifo_depth;
+ remaining = max_t(int, 0, remaining);
+ /*
+ * We've already reserved 1 FIFO per EP, so check what we can fit in
+@@ -820,9 +858,9 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
+ dwc->last_fifo_depth += DWC31_GTXFIFOSIZ_TXFDEP(fifo_size);
+
+ /* Check fifo size allocation doesn't exceed available RAM size. */
+- if (dwc->last_fifo_depth >= ram1_depth) {
++ if (dwc->last_fifo_depth >= ram_depth) {
+ dev_err(dwc->dev, "Fifosize(%d) > RAM size(%d) %s depth:%d\n",
+- dwc->last_fifo_depth, ram1_depth,
++ dwc->last_fifo_depth, ram_depth,
+ dep->endpoint.name, fifo_size);
+ if (DWC3_IP_IS(DWC3))
+ fifo_size = DWC3_GTXFIFOSIZ_TXFDEP(fifo_size);
+@@ -3078,7 +3116,7 @@ static int dwc3_gadget_check_config(struct usb_gadget *g)
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ struct usb_ep *ep;
+ int fifo_size = 0;
+- int ram1_depth;
++ int ram_depth;
+ int ep_num = 0;
+
+ if (!dwc->do_fifo_resize)
+@@ -3101,8 +3139,8 @@ static int dwc3_gadget_check_config(struct usb_gadget *g)
+ fifo_size += dwc->max_cfg_eps;
+
+ /* Check if we can fit a single fifo per endpoint */
+- ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
+- if (fifo_size > ram1_depth)
++ ram_depth = dwc3_gadget_calc_ram_depth(dwc);
++ if (fifo_size > ram_depth)
+ return -ENOMEM;
+
+ return 0;
+diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
+index 50f58801140045..0d628af5c3ba50 100644
+--- a/drivers/usb/host/xhci-ring.c
++++ b/drivers/usb/host/xhci-ring.c
+@@ -52,6 +52,7 @@
+ * endpoint rings; it generates events on the event ring for these.
+ */
+
++#include <linux/jiffies.h>
+ #include <linux/scatterlist.h>
+ #include <linux/slab.h>
+ #include <linux/dma-mapping.h>
+@@ -1091,6 +1092,19 @@ static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
+ return 0;
+ }
+
++/*
++ * Erase queued TDs from transfer ring(s) and give back those the xHC didn't
++ * stop on. If necessary, queue commands to move the xHC off cancelled TDs it
++ * stopped on. Those will be given back later when the commands complete.
++ *
++ * Call under xhci->lock on a stopped endpoint.
++ */
++void xhci_process_cancelled_tds(struct xhci_virt_ep *ep)
++{
++ xhci_invalidate_cancelled_tds(ep);
++ xhci_giveback_invalidated_tds(ep);
++}
++
+ /*
+ * Returns the TD the endpoint ring halted on.
+ * Only call for non-running rings without streams.
+@@ -1180,9 +1194,35 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
+ break;
+ ep->ep_state &= ~EP_STOP_CMD_PENDING;
+ return;
++ case EP_STATE_STOPPED:
++ /*
++ * Per xHCI 4.6.9, Stop Endpoint command on a Stopped
++ * EP is a Context State Error, and EP stays Stopped.
++ *
++ * But maybe it failed on Halted, and somebody ran Reset
++ * Endpoint later. EP state is now Stopped and EP_HALTED
++ * still set because Reset EP handler will run after us.
++ */
++ if (ep->ep_state & EP_HALTED)
++ break;
++ /*
++ * On some HCs EP state remains Stopped for some tens of
++ * us to a few ms or more after a doorbell ring, and any
++ * new Stop Endpoint fails without aborting the restart.
++ * This handler may run quickly enough to still see this
++ * Stopped state, but it will soon change to Running.
++ *
++ * Assume this bug on unexpected Stop Endpoint failures.
++ * Keep retrying until the EP starts and stops again, on
++ * chips where this is known to help. Wait for 100ms.
++ */
++ if (time_is_before_jiffies(ep->stop_time + msecs_to_jiffies(100)))
++ break;
++ fallthrough;
+ case EP_STATE_RUNNING:
+ /* Race, HW handled stop ep cmd before ep was running */
+- xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
++ xhci_dbg(xhci, "Stop ep completion ctx error, ctx_state %d\n",
++ GET_EP_CTX_STATE(ep_ctx));
+
+ command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
+ if (!command) {
+diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
+index 3bd70e6ad64baf..70e6c240a5409f 100644
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -8,6 +8,7 @@
+ * Some code borrowed from the Linux EHCI driver.
+ */
+
++#include <linux/jiffies.h>
+ #include <linux/pci.h>
+ #include <linux/iommu.h>
+ #include <linux/iopoll.h>
+@@ -1737,15 +1738,27 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+ }
+ }
+
+- /* Queue a stop endpoint command, but only if this is
+- * the first cancellation to be handled.
+- */
+- if (!(ep->ep_state & EP_STOP_CMD_PENDING)) {
++ /* These completion handlers will sort out cancelled TDs for us */
++ if (ep->ep_state & (EP_STOP_CMD_PENDING | EP_HALTED | SET_DEQ_PENDING)) {
++ xhci_dbg(xhci, "Not queuing Stop Endpoint on slot %d ep %d in state 0x%x\n",
++ urb->dev->slot_id, ep_index, ep->ep_state);
++ goto done;
++ }
++
++ /* In this case no commands are pending but the endpoint is stopped */
++ if (ep->ep_state & EP_CLEARING_TT) {
++ /* and cancelled TDs can be given back right away */
++ xhci_dbg(xhci, "Invalidating TDs instantly on slot %d ep %d in state 0x%x\n",
++ urb->dev->slot_id, ep_index, ep->ep_state);
++ xhci_process_cancelled_tds(ep);
++ } else {
++ /* Otherwise, queue a new Stop Endpoint command */
+ command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
+ if (!command) {
+ ret = -ENOMEM;
+ goto done;
+ }
++ ep->stop_time = jiffies;
+ ep->ep_state |= EP_STOP_CMD_PENDING;
+ xhci_queue_stop_endpoint(xhci, command, urb->dev->slot_id,
+ ep_index, 0);
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 4bbd12db7239ab..fddb3a90dae3df 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -717,6 +717,7 @@ struct xhci_virt_ep {
+ /* Bandwidth checking storage */
+ struct xhci_bw_info bw_info;
+ struct list_head bw_endpoint_list;
++ unsigned long stop_time;
+ /* Isoch Frame ID checking storage */
+ int next_frame_id;
+ /* Use new Isoch TRB layout needed for extended TBC support */
+@@ -1951,6 +1952,7 @@ void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
+ void xhci_cleanup_command_queue(struct xhci_hcd *xhci);
+ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring);
+ unsigned int count_trbs(u64 addr, u64 len);
++void xhci_process_cancelled_tds(struct xhci_virt_ep *ep);
+
+ /* xHCI roothub code */
+ void xhci_set_link_state(struct xhci_hcd *xhci, struct xhci_port *port,
+diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
+index f6fb5575d4f0ac..d6a3fd00c3a5c4 100644
+--- a/drivers/usb/typec/ucsi/ucsi.c
++++ b/drivers/usb/typec/ucsi/ucsi.c
+@@ -903,6 +903,9 @@ static void ucsi_handle_connector_change(struct work_struct *work)
+
+ trace_ucsi_connector_change(con->num, &con->status);
+
++ if (ucsi->ops->connector_status)
++ ucsi->ops->connector_status(con);
++
+ role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
+
+ if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) {
+@@ -1258,6 +1261,9 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
+ cap->driver_data = con;
+ cap->ops = &ucsi_ops;
+
++ if (ucsi->ops->update_connector)
++ ucsi->ops->update_connector(con);
++
+ ret = ucsi_register_port_psy(con);
+ if (ret)
+ goto out;
+@@ -1322,6 +1328,9 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
+ }
+ ret = 0; /* ucsi_send_command() returns length on success */
+
++ if (ucsi->ops->connector_status)
++ ucsi->ops->connector_status(con);
++
+ switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
+ case UCSI_CONSTAT_PARTNER_TYPE_UFP:
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
+diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
+index 42c60eba5fb6ee..921ef0e115cffc 100644
+--- a/drivers/usb/typec/ucsi/ucsi.h
++++ b/drivers/usb/typec/ucsi/ucsi.h
+@@ -15,6 +15,7 @@
+
+ struct ucsi;
+ struct ucsi_altmode;
++struct ucsi_connector;
+ struct dentry;
+
+ /* UCSI offsets (Bytes) */
+@@ -52,6 +53,8 @@ struct dentry;
+ * @sync_write: Blocking write operation
+ * @async_write: Non-blocking write operation
+ * @update_altmodes: Squashes duplicate DP altmodes
++ * @update_connector: Update connector capabilities before registering
++ * @connector_status: Updates connector status, called holding connector lock
+ *
+ * Read and write routines for UCSI interface. @sync_write must wait for the
+ * Command Completion Event from the PPM before returning, and @async_write must
+@@ -66,6 +69,8 @@ struct ucsi_operations {
+ const void *val, size_t val_len);
+ bool (*update_altmodes)(struct ucsi *ucsi, struct ucsi_altmode *orig,
+ struct ucsi_altmode *updated);
++ void (*update_connector)(struct ucsi_connector *con);
++ void (*connector_status)(struct ucsi_connector *con);
+ };
+
+ struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops);
+diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
+index 94f2df02f06eeb..82a1081d44f1f0 100644
+--- a/drivers/usb/typec/ucsi/ucsi_glink.c
++++ b/drivers/usb/typec/ucsi/ucsi_glink.c
+@@ -186,10 +186,40 @@ static int pmic_glink_ucsi_sync_write(struct ucsi *__ucsi, unsigned int offset,
+ return ret;
+ }
+
++static void pmic_glink_ucsi_update_connector(struct ucsi_connector *con)
++{
++ struct pmic_glink_ucsi *ucsi = ucsi_get_drvdata(con->ucsi);
++
++ if (con->num > PMIC_GLINK_MAX_PORTS ||
++ !ucsi->port_orientation[con->num - 1])
++ return;
++
++ con->typec_cap.orientation_aware = true;
++}
++
++static void pmic_glink_ucsi_connector_status(struct ucsi_connector *con)
++{
++ struct pmic_glink_ucsi *ucsi = ucsi_get_drvdata(con->ucsi);
++ int orientation;
++
++ if (con->num > PMIC_GLINK_MAX_PORTS ||
++ !ucsi->port_orientation[con->num - 1])
++ return;
++
++ orientation = gpiod_get_value(ucsi->port_orientation[con->num - 1]);
++ if (orientation >= 0) {
++ typec_switch_set(ucsi->port_switch[con->num - 1],
++ orientation ? TYPEC_ORIENTATION_REVERSE
++ : TYPEC_ORIENTATION_NORMAL);
++ }
++}
++
+ static const struct ucsi_operations pmic_glink_ucsi_ops = {
+ .read = pmic_glink_ucsi_read,
+ .sync_write = pmic_glink_ucsi_sync_write,
+- .async_write = pmic_glink_ucsi_async_write
++ .async_write = pmic_glink_ucsi_async_write,
++ .update_connector = pmic_glink_ucsi_update_connector,
++ .connector_status = pmic_glink_ucsi_connector_status,
+ };
+
+ static void pmic_glink_ucsi_read_ack(struct pmic_glink_ucsi *ucsi, const void *data, int len)
+@@ -228,20 +258,8 @@ static void pmic_glink_ucsi_notify(struct work_struct *work)
+ }
+
+ con_num = UCSI_CCI_CONNECTOR(cci);
+- if (con_num) {
+- if (con_num <= PMIC_GLINK_MAX_PORTS &&
+- ucsi->port_orientation[con_num - 1]) {
+- int orientation = gpiod_get_value(ucsi->port_orientation[con_num - 1]);
+-
+- if (orientation >= 0) {
+- typec_switch_set(ucsi->port_switch[con_num - 1],
+- orientation ? TYPEC_ORIENTATION_REVERSE
+- : TYPEC_ORIENTATION_NORMAL);
+- }
+- }
+-
++ if (con_num)
+ ucsi_connector_change(ucsi->ucsi, con_num);
+- }
+
+ if (ucsi->sync_pending &&
+ (cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))) {
+@@ -252,20 +270,6 @@ static void pmic_glink_ucsi_notify(struct work_struct *work)
+ static void pmic_glink_ucsi_register(struct work_struct *work)
+ {
+ struct pmic_glink_ucsi *ucsi = container_of(work, struct pmic_glink_ucsi, register_work);
+- int orientation;
+- int i;
+-
+- for (i = 0; i < PMIC_GLINK_MAX_PORTS; i++) {
+- if (!ucsi->port_orientation[i])
+- continue;
+- orientation = gpiod_get_value(ucsi->port_orientation[i]);
+-
+- if (orientation >= 0) {
+- typec_switch_set(ucsi->port_switch[i],
+- orientation ? TYPEC_ORIENTATION_REVERSE
+- : TYPEC_ORIENTATION_NORMAL);
+- }
+- }
+
+ ucsi_register(ucsi->ucsi);
+ }
+diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c
+index 7bce093316c4d7..525a72d8d746e3 100644
+--- a/drivers/watchdog/rzg2l_wdt.c
++++ b/drivers/watchdog/rzg2l_wdt.c
+@@ -8,11 +8,11 @@
+ #include <linux/clk.h>
+ #include <linux/delay.h>
+ #include <linux/io.h>
+-#include <linux/iopoll.h>
+ #include <linux/kernel.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+ #include <linux/platform_device.h>
++#include <linux/pm_domain.h>
+ #include <linux/pm_runtime.h>
+ #include <linux/reset.h>
+ #include <linux/units.h>
+@@ -54,35 +54,11 @@ struct rzg2l_wdt_priv {
+ struct reset_control *rstc;
+ unsigned long osc_clk_rate;
+ unsigned long delay;
+- unsigned long minimum_assertion_period;
+ struct clk *pclk;
+ struct clk *osc_clk;
+ enum rz_wdt_type devtype;
+ };
+
+-static int rzg2l_wdt_reset(struct rzg2l_wdt_priv *priv)
+-{
+- int err, status;
+-
+- if (priv->devtype == WDT_RZV2M) {
+- /* WDT needs TYPE-B reset control */
+- err = reset_control_assert(priv->rstc);
+- if (err)
+- return err;
+- ndelay(priv->minimum_assertion_period);
+- err = reset_control_deassert(priv->rstc);
+- if (err)
+- return err;
+- err = read_poll_timeout(reset_control_status, status,
+- status != 1, 0, 1000, false,
+- priv->rstc);
+- } else {
+- err = reset_control_reset(priv->rstc);
+- }
+-
+- return err;
+-}
+-
+ static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv)
+ {
+ /* delay timer when change the setting register */
+@@ -129,6 +105,12 @@ static int rzg2l_wdt_start(struct watchdog_device *wdev)
+ if (ret)
+ return ret;
+
++ ret = reset_control_deassert(priv->rstc);
++ if (ret) {
++ pm_runtime_put(wdev->parent);
++ return ret;
++ }
++
+ /* Initialize time out */
+ rzg2l_wdt_init_timeout(wdev);
+
+@@ -146,7 +128,9 @@ static int rzg2l_wdt_stop(struct watchdog_device *wdev)
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ int ret;
+
+- rzg2l_wdt_reset(priv);
++ ret = reset_control_assert(priv->rstc);
++ if (ret)
++ return ret;
+
+ ret = pm_runtime_put(wdev->parent);
+ if (ret < 0)
+@@ -181,11 +165,30 @@ static int rzg2l_wdt_restart(struct watchdog_device *wdev,
+ unsigned long action, void *data)
+ {
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
++ int ret;
+
+- clk_prepare_enable(priv->pclk);
+- clk_prepare_enable(priv->osc_clk);
++ /*
++ * In case of RZ/G3S the watchdog device may be part of an IRQ safe power
++ * domain that is currently powered off. In this case we need to power
++ * it on before accessing registers. Along with this the clocks will be
++ * enabled. We don't undo the pm_runtime_resume_and_get() as the device
++ * need to be on for the reboot to happen.
++ *
++ * For the rest of SoCs not registering a watchdog IRQ safe power
++ * domain it is safe to call pm_runtime_resume_and_get() as the
++ * irq_safe_dev_in_sleep_domain() call in genpd_runtime_resume()
++ * returns non zero value and the genpd_lock() is avoided, thus, there
++ * will be no invalid wait context reported by lockdep.
++ */
++ ret = pm_runtime_resume_and_get(wdev->parent);
++ if (ret)
++ return ret;
+
+ if (priv->devtype == WDT_RZG2L) {
++ ret = reset_control_deassert(priv->rstc);
++ if (ret)
++ return ret;
++
+ /* Generate Reset (WDTRSTB) Signal on parity error */
+ rzg2l_wdt_write(priv, 0, PECR);
+
+@@ -193,7 +196,9 @@ static int rzg2l_wdt_restart(struct watchdog_device *wdev,
+ rzg2l_wdt_write(priv, PEEN_FORCE, PEEN);
+ } else {
+ /* RZ/V2M doesn't have parity error registers */
+- rzg2l_wdt_reset(priv);
++ ret = reset_control_reset(priv->rstc);
++ if (ret)
++ return ret;
+
+ wdev->timeout = 0;
+
+@@ -236,13 +241,11 @@ static const struct watchdog_ops rzg2l_wdt_ops = {
+ .restart = rzg2l_wdt_restart,
+ };
+
+-static void rzg2l_wdt_reset_assert_pm_disable(void *data)
++static void rzg2l_wdt_pm_disable(void *data)
+ {
+ struct watchdog_device *wdev = data;
+- struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ pm_runtime_disable(wdev->parent);
+- reset_control_assert(priv->rstc);
+ }
+
+ static int rzg2l_wdt_probe(struct platform_device *pdev)
+@@ -285,19 +288,9 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->rstc),
+ "failed to get cpg reset");
+
+- ret = reset_control_deassert(priv->rstc);
+- if (ret)
+- return dev_err_probe(dev, ret, "failed to deassert");
+-
+ priv->devtype = (uintptr_t)of_device_get_match_data(dev);
+
+- if (priv->devtype == WDT_RZV2M) {
+- priv->minimum_assertion_period = RZV2M_A_NSEC +
+- 3 * F2CYCLE_NSEC(pclk_rate) + 5 *
+- max(F2CYCLE_NSEC(priv->osc_clk_rate),
+- F2CYCLE_NSEC(pclk_rate));
+- }
+-
++ pm_runtime_irq_safe(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ priv->wdev.info = &rzg2l_wdt_ident;
+@@ -309,9 +302,7 @@ static int rzg2l_wdt_probe(struct platform_device *pdev)
+ priv->wdev.timeout = WDT_DEFAULT_TIMEOUT;
+
+ watchdog_set_drvdata(&priv->wdev, priv);
+- ret = devm_add_action_or_reset(&pdev->dev,
+- rzg2l_wdt_reset_assert_pm_disable,
+- &priv->wdev);
++ ret = devm_add_action_or_reset(&pdev->dev, rzg2l_wdt_pm_disable, &priv->wdev);
+ if (ret < 0)
+ return ret;
+
+diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
+index 25c902e7556d57..4b21ca49b6665d 100644
+--- a/fs/btrfs/ctree.c
++++ b/fs/btrfs/ctree.c
+@@ -526,13 +526,13 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
+ * bytes the allocator should try to find free next to the block it returns.
+ * This is just a hint and may be ignored by the allocator.
+ */
+-static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+- struct btrfs_root *root,
+- struct extent_buffer *buf,
+- struct extent_buffer *parent, int parent_slot,
+- struct extent_buffer **cow_ret,
+- u64 search_start, u64 empty_size,
+- enum btrfs_lock_nesting nest)
++int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
++ struct btrfs_root *root,
++ struct extent_buffer *buf,
++ struct extent_buffer *parent, int parent_slot,
++ struct extent_buffer **cow_ret,
++ u64 search_start, u64 empty_size,
++ enum btrfs_lock_nesting nest)
+ {
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ struct btrfs_disk_key disk_key;
+@@ -660,6 +660,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
+ return ret;
+ }
+ }
++
++ trace_btrfs_cow_block(root, buf, cow);
+ if (unlock_orig)
+ btrfs_tree_unlock(buf);
+ free_extent_buffer_stale(buf);
+@@ -699,7 +701,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
+ }
+
+ /*
+- * cows a single block, see __btrfs_cow_block for the real work.
++ * COWs a single block, see btrfs_force_cow_block() for the real work.
+ * This version of it has extra checks so that a block isn't COWed more than
+ * once per transaction, as long as it hasn't been written yet
+ */
+@@ -711,7 +713,6 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
+ {
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ u64 search_start;
+- int ret;
+
+ if (unlikely(test_bit(BTRFS_ROOT_DELETING, &root->state))) {
+ btrfs_abort_transaction(trans, -EUCLEAN);
+@@ -752,12 +753,8 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
+ * Also We don't care about the error, as it's handled internally.
+ */
+ btrfs_qgroup_trace_subtree_after_cow(trans, root, buf);
+- ret = __btrfs_cow_block(trans, root, buf, parent,
+- parent_slot, cow_ret, search_start, 0, nest);
+-
+- trace_btrfs_cow_block(root, buf, *cow_ret);
+-
+- return ret;
++ return btrfs_force_cow_block(trans, root, buf, parent, parent_slot,
++ cow_ret, search_start, 0, nest);
+ }
+ ALLOW_ERROR_INJECTION(btrfs_cow_block, ERRNO);
+
+@@ -904,11 +901,11 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
+ search_start = last_block;
+
+ btrfs_tree_lock(cur);
+- err = __btrfs_cow_block(trans, root, cur, parent, i,
+- &cur, search_start,
+- min(16 * blocksize,
+- (end_slot - i) * blocksize),
+- BTRFS_NESTING_COW);
++ err = btrfs_force_cow_block(trans, root, cur, parent, i,
++ &cur, search_start,
++ min(16 * blocksize,
++ (end_slot - i) * blocksize),
++ BTRFS_NESTING_COW);
+ if (err) {
+ btrfs_tree_unlock(cur);
+ free_extent_buffer(cur);
+diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
+index f7bb4c34b984b3..7df3ed2945b049 100644
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -538,6 +538,13 @@ int btrfs_cow_block(struct btrfs_trans_handle *trans,
+ struct extent_buffer *parent, int parent_slot,
+ struct extent_buffer **cow_ret,
+ enum btrfs_lock_nesting nest);
++int btrfs_force_cow_block(struct btrfs_trans_handle *trans,
++ struct btrfs_root *root,
++ struct extent_buffer *buf,
++ struct extent_buffer *parent, int parent_slot,
++ struct extent_buffer **cow_ret,
++ u64 search_start, u64 empty_size,
++ enum btrfs_lock_nesting nest);
+ int btrfs_copy_root(struct btrfs_trans_handle *trans,
+ struct btrfs_root *root,
+ struct extent_buffer *buf,
+diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
+index 8ec411eb9c9b0c..967c6b5dd0a434 100644
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -4323,6 +4323,15 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
+ * already the cleaner, but below we run all pending delayed iputs.
+ */
+ btrfs_flush_workqueue(fs_info->fixup_workers);
++ /*
++ * Similar case here, we have to wait for delalloc workers before we
++ * proceed below and stop the cleaner kthread, otherwise we trigger a
++ * use-after-tree on the cleaner kthread task_struct when a delalloc
++ * worker running submit_compressed_extents() adds a delayed iput, which
++ * does a wake up on the cleaner kthread, which was already freed below
++ * when we call kthread_stop().
++ */
++ btrfs_flush_workqueue(fs_info->delalloc_workers);
+
+ /*
+ * After we parked the cleaner kthread, ordered extents may have
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index d6767f728c079d..eb9319d856f2d8 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -9972,7 +9972,7 @@ static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
+ */
+ WRITE_ONCE(priv->status, bbio->bio.bi_status);
+ }
+- if (!atomic_dec_return(&priv->pending))
++ if (atomic_dec_and_test(&priv->pending))
+ wake_up(&priv->wait);
+ bio_put(&bbio->bio);
+ }
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 11289ce8a8cc81..dfa1b3c82b53ac 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -2713,12 +2713,11 @@ char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc, struct dentry *dentry,
+
+ if (pos < 0) {
+ /*
+- * A rename didn't occur, but somehow we didn't end up where
+- * we thought we would. Throw a warning and try again.
++ * The path is longer than PATH_MAX and this function
++ * cannot ever succeed. Creating paths that long is
++ * possible with Ceph, but Linux cannot use them.
+ */
+- pr_warn("build_path did not end path lookup where expected (pos = %d)\n",
+- pos);
+- goto retry;
++ return ERR_PTR(-ENAMETOOLONG);
+ }
+
+ *pbase = base;
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index 3db01b933c3e8b..60455c84a93742 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -891,10 +891,13 @@ do { \
+ (raw_inode)->xtime = cpu_to_le32(clamp_t(int32_t, (ts).tv_sec, S32_MIN, S32_MAX)); \
+ } while (0)
+
+-#define EXT4_INODE_SET_XTIME(xtime, inode, raw_inode) \
+- EXT4_INODE_SET_XTIME_VAL(xtime, inode, raw_inode, (inode)->xtime)
++#define EXT4_INODE_SET_ATIME(inode, raw_inode) \
++ EXT4_INODE_SET_XTIME_VAL(i_atime, inode, raw_inode, inode_get_atime(inode))
+
+-#define EXT4_INODE_SET_CTIME(inode, raw_inode) \
++#define EXT4_INODE_SET_MTIME(inode, raw_inode) \
++ EXT4_INODE_SET_XTIME_VAL(i_mtime, inode, raw_inode, inode_get_mtime(inode))
++
++#define EXT4_INODE_SET_CTIME(inode, raw_inode) \
+ EXT4_INODE_SET_XTIME_VAL(i_ctime, inode, raw_inode, inode_get_ctime(inode))
+
+ #define EXT4_EINODE_SET_XTIME(xtime, einode, raw_inode) \
+@@ -910,9 +913,16 @@ do { \
+ .tv_sec = (signed)le32_to_cpu((raw_inode)->xtime) \
+ })
+
+-#define EXT4_INODE_GET_XTIME(xtime, inode, raw_inode) \
++#define EXT4_INODE_GET_ATIME(inode, raw_inode) \
++do { \
++ inode_set_atime_to_ts(inode, \
++ EXT4_INODE_GET_XTIME_VAL(i_atime, inode, raw_inode)); \
++} while (0)
++
++#define EXT4_INODE_GET_MTIME(inode, raw_inode) \
+ do { \
+- (inode)->xtime = EXT4_INODE_GET_XTIME_VAL(xtime, inode, raw_inode); \
++ inode_set_mtime_to_ts(inode, \
++ EXT4_INODE_GET_XTIME_VAL(i_mtime, inode, raw_inode)); \
+ } while (0)
+
+ #define EXT4_INODE_GET_CTIME(inode, raw_inode) \
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 5ea75af6ca2239..32218ac7f50fe2 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -4475,7 +4475,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
+ int depth = 0;
+ struct ext4_map_blocks map;
+ unsigned int credits;
+- loff_t epos;
++ loff_t epos, old_size = i_size_read(inode);
+
+ BUG_ON(!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS));
+ map.m_lblk = offset;
+@@ -4532,7 +4532,13 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
+ if (epos > new_size)
+ epos = new_size;
+ if (ext4_update_inode_size(inode, epos) & 0x1)
+- inode->i_mtime = inode_get_ctime(inode);
++ inode_set_mtime_to_ts(inode,
++ inode_get_ctime(inode));
++ if (epos > old_size) {
++ pagecache_isize_extended(inode, old_size, epos);
++ ext4_zero_partial_blocks(handle, inode,
++ old_size, epos - old_size);
++ }
+ }
+ ret2 = ext4_mark_inode_dirty(handle, inode);
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+@@ -4670,7 +4676,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+
+ /* Now release the pages and zero block aligned part of pages */
+ truncate_pagecache_range(inode, start, end - 1);
+- inode->i_mtime = inode_set_ctime_current(inode);
++ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+
+ ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
+ flags);
+@@ -4695,7 +4701,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
+ goto out_mutex;
+ }
+
+- inode->i_mtime = inode_set_ctime_current(inode);
++ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ if (new_size)
+ ext4_update_inode_size(inode, new_size);
+ ret = ext4_mark_inode_dirty(handle, inode);
+@@ -5431,7 +5437,7 @@ static int ext4_collapse_range(struct file *file, loff_t offset, loff_t len)
+ up_write(&EXT4_I(inode)->i_data_sem);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+- inode->i_mtime = inode_set_ctime_current(inode);
++ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ ret = ext4_mark_inode_dirty(handle, inode);
+ ext4_update_inode_fsync_trans(handle, inode, 1);
+
+@@ -5541,7 +5547,7 @@ static int ext4_insert_range(struct file *file, loff_t offset, loff_t len)
+ /* Expand file to avoid data loss if there is error while shifting */
+ inode->i_size += len;
+ EXT4_I(inode)->i_disksize += len;
+- inode->i_mtime = inode_set_ctime_current(inode);
++ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ ret = ext4_mark_inode_dirty(handle, inode);
+ if (ret)
+ goto out_stop;
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index d4d0ad689d3c1c..52f2959d29e6e0 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -1255,8 +1255,8 @@ struct inode *__ext4_new_inode(struct mnt_idmap *idmap,
+ inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
+ /* This is the optimal IO size (for stat), not the fs block size */
+ inode->i_blocks = 0;
+- inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode);
+- ei->i_crtime = inode->i_mtime;
++ simple_inode_init_ts(inode);
++ ei->i_crtime = inode_get_mtime(inode);
+
+ memset(ei->i_data, 0, sizeof(ei->i_data));
+ ei->i_dir_start_lookup = 0;
+diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
+index cb65052ee3dec6..3f363276ddd360 100644
+--- a/fs/ext4/inline.c
++++ b/fs/ext4/inline.c
+@@ -1037,7 +1037,7 @@ static int ext4_add_dirent_to_inline(handle_t *handle,
+ * happen is that the times are slightly out of date
+ * and/or different from the directory change time.
+ */
+- dir->i_mtime = inode_set_ctime_current(dir);
++ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
+ ext4_update_dx_flag(dir);
+ inode_inc_iversion(dir);
+ return 1;
+@@ -2010,7 +2010,7 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
+ ext4_orphan_del(handle, inode);
+
+ if (err == 0) {
+- inode->i_mtime = inode_set_ctime_current(inode);
++ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ err = ext4_mark_inode_dirty(handle, inode);
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 18ec9106c5b09f..19d7bcf16ebb88 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1328,8 +1328,10 @@ static int ext4_write_end(struct file *file,
+ folio_unlock(folio);
+ folio_put(folio);
+
+- if (old_size < pos && !verity)
++ if (old_size < pos && !verity) {
+ pagecache_isize_extended(inode, old_size, pos);
++ ext4_zero_partial_blocks(handle, inode, old_size, pos - old_size);
++ }
+ /*
+ * Don't mark the inode dirty under folio lock. First, it unnecessarily
+ * makes the holding time of folio lock longer. Second, it forces lock
+@@ -1445,8 +1447,10 @@ static int ext4_journalled_write_end(struct file *file,
+ folio_unlock(folio);
+ folio_put(folio);
+
+- if (old_size < pos && !verity)
++ if (old_size < pos && !verity) {
+ pagecache_isize_extended(inode, old_size, pos);
++ ext4_zero_partial_blocks(handle, inode, old_size, pos - old_size);
++ }
+
+ if (size_changed) {
+ ret2 = ext4_mark_inode_dirty(handle, inode);
+@@ -2971,7 +2975,8 @@ static int ext4_da_do_write_end(struct address_space *mapping,
+ struct inode *inode = mapping->host;
+ loff_t old_size = inode->i_size;
+ bool disksize_changed = false;
+- loff_t new_i_size;
++ loff_t new_i_size, zero_len = 0;
++ handle_t *handle;
+
+ if (unlikely(!folio_buffers(folio))) {
+ folio_unlock(folio);
+@@ -3015,18 +3020,21 @@ static int ext4_da_do_write_end(struct address_space *mapping,
+ folio_unlock(folio);
+ folio_put(folio);
+
+- if (old_size < pos)
++ if (pos > old_size) {
+ pagecache_isize_extended(inode, old_size, pos);
++ zero_len = pos - old_size;
++ }
+
+- if (disksize_changed) {
+- handle_t *handle;
++ if (!disksize_changed && !zero_len)
++ return copied;
+
+- handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
+- if (IS_ERR(handle))
+- return PTR_ERR(handle);
+- ext4_mark_inode_dirty(handle, inode);
+- ext4_journal_stop(handle);
+- }
++ handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++ if (zero_len)
++ ext4_zero_partial_blocks(handle, inode, old_size, zero_len);
++ ext4_mark_inode_dirty(handle, inode);
++ ext4_journal_stop(handle);
+
+ return copied;
+ }
+@@ -4055,7 +4063,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
+ if (IS_SYNC(inode))
+ ext4_handle_sync(handle);
+
+- inode->i_mtime = inode_set_ctime_current(inode);
++ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ ret2 = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(ret2))
+ ret = ret2;
+@@ -4215,7 +4223,7 @@ int ext4_truncate(struct inode *inode)
+ if (inode->i_nlink)
+ ext4_orphan_del(handle, inode);
+
+- inode->i_mtime = inode_set_ctime_current(inode);
++ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ err2 = ext4_mark_inode_dirty(handle, inode);
+ if (unlikely(err2 && !err))
+ err = err2;
+@@ -4319,8 +4327,8 @@ static int ext4_fill_raw_inode(struct inode *inode, struct ext4_inode *raw_inode
+ raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
+
+ EXT4_INODE_SET_CTIME(inode, raw_inode);
+- EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
+- EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
++ EXT4_INODE_SET_MTIME(inode, raw_inode);
++ EXT4_INODE_SET_ATIME(inode, raw_inode);
+ EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
+
+ raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
+@@ -4928,8 +4936,8 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ }
+
+ EXT4_INODE_GET_CTIME(inode, raw_inode);
+- EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
+- EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
++ EXT4_INODE_GET_ATIME(inode, raw_inode);
++ EXT4_INODE_GET_MTIME(inode, raw_inode);
+ EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
+
+ if (likely(!test_opt2(inode->i_sb, HURD_COMPAT))) {
+@@ -5054,8 +5062,8 @@ static void __ext4_update_other_inode_time(struct super_block *sb,
+
+ spin_lock(&ei->i_raw_lock);
+ EXT4_INODE_SET_CTIME(inode, raw_inode);
+- EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
+- EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
++ EXT4_INODE_SET_MTIME(inode, raw_inode);
++ EXT4_INODE_SET_ATIME(inode, raw_inode);
+ ext4_inode_csum_set(inode, raw_inode, ei);
+ spin_unlock(&ei->i_raw_lock);
+ trace_ext4_other_inode_update_time(inode, orig_ino);
+@@ -5437,6 +5445,14 @@ int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ }
+
+ if (attr->ia_size != inode->i_size) {
++ /* attach jbd2 jinode for EOF folio tail zeroing */
++ if (attr->ia_size & (inode->i_sb->s_blocksize - 1) ||
++ oldsize & (inode->i_sb->s_blocksize - 1)) {
++ error = ext4_inode_attach_jinode(inode);
++ if (error)
++ goto err_out;
++ }
++
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
+ if (IS_ERR(handle)) {
+ error = PTR_ERR(handle);
+@@ -5447,11 +5463,17 @@ int ext4_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ orphan = 1;
+ }
+ /*
+- * Update c/mtime on truncate up, ext4_truncate() will
+- * update c/mtime in shrink case below
++ * Update c/mtime and tail zero the EOF folio on
++ * truncate up. ext4_truncate() handles the shrink case
++ * below.
+ */
+- if (!shrink)
+- inode->i_mtime = inode_set_ctime_current(inode);
++ if (!shrink) {
++ inode_set_mtime_to_ts(inode,
++ inode_set_ctime_current(inode));
++ if (oldsize & (inode->i_sb->s_blocksize - 1))
++ ext4_block_truncate_page(handle,
++ inode->i_mapping, oldsize);
++ }
+
+ if (shrink)
+ ext4_fc_track_range(handle, inode,
+diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
+index 0bfe2ce589e224..4f931f80cb3489 100644
+--- a/fs/ext4/ioctl.c
++++ b/fs/ext4/ioctl.c
+@@ -312,13 +312,22 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
+ struct ext4_inode_info *ei1;
+ struct ext4_inode_info *ei2;
+ unsigned long tmp;
++ struct timespec64 ts1, ts2;
+
+ ei1 = EXT4_I(inode1);
+ ei2 = EXT4_I(inode2);
+
+ swap(inode1->i_version, inode2->i_version);
+- swap(inode1->i_atime, inode2->i_atime);
+- swap(inode1->i_mtime, inode2->i_mtime);
++
++ ts1 = inode_get_atime(inode1);
++ ts2 = inode_get_atime(inode2);
++ inode_set_atime_to_ts(inode1, ts2);
++ inode_set_atime_to_ts(inode2, ts1);
++
++ ts1 = inode_get_mtime(inode1);
++ ts2 = inode_get_mtime(inode2);
++ inode_set_mtime_to_ts(inode1, ts2);
++ inode_set_mtime_to_ts(inode2, ts1);
+
+ memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
+ tmp = ei1->i_flags & EXT4_FL_SHOULD_SWAP;
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 4de1f61bba76b3..96a048d3f51bf5 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2210,7 +2210,7 @@ static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
+ * happen is that the times are slightly out of date
+ * and/or different from the directory change time.
+ */
+- dir->i_mtime = inode_set_ctime_current(dir);
++ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
+ ext4_update_dx_flag(dir);
+ inode_inc_iversion(dir);
+ err2 = ext4_mark_inode_dirty(handle, dir);
+@@ -3248,7 +3248,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
+ * recovery. */
+ inode->i_size = 0;
+ ext4_orphan_add(handle, inode);
+- dir->i_mtime = inode_set_ctime_current(dir);
++ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
+ inode_set_ctime_current(inode);
+ retval = ext4_mark_inode_dirty(handle, inode);
+ if (retval)
+@@ -3323,7 +3323,7 @@ int __ext4_unlink(struct inode *dir, const struct qstr *d_name,
+ retval = ext4_delete_entry(handle, dir, de, bh);
+ if (retval)
+ goto out_handle;
+- dir->i_mtime = inode_set_ctime_current(dir);
++ inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
+ ext4_update_dx_flag(dir);
+ retval = ext4_mark_inode_dirty(handle, dir);
+ if (retval)
+@@ -3691,7 +3691,7 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
+ if (ext4_has_feature_filetype(ent->dir->i_sb))
+ ent->de->file_type = file_type;
+ inode_inc_iversion(ent->dir);
+- ent->dir->i_mtime = inode_set_ctime_current(ent->dir);
++ inode_set_mtime_to_ts(ent->dir, inode_set_ctime_current(ent->dir));
+ retval = ext4_mark_inode_dirty(handle, ent->dir);
+ BUFFER_TRACE(ent->bh, "call ext4_handle_dirty_metadata");
+ if (!ent->inlined) {
+@@ -4006,7 +4006,7 @@ static int ext4_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ ext4_dec_count(new.inode);
+ inode_set_ctime_current(new.inode);
+ }
+- old.dir->i_mtime = inode_set_ctime_current(old.dir);
++ inode_set_mtime_to_ts(old.dir, inode_set_ctime_current(old.dir));
+ ext4_update_dx_flag(old.dir);
+ if (old.dir_bh) {
+ retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 2346ef071b2421..71ced0ada9a2e5 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -7180,7 +7180,7 @@ static int ext4_quota_off(struct super_block *sb, int type)
+ }
+ EXT4_I(inode)->i_flags &= ~(EXT4_NOATIME_FL | EXT4_IMMUTABLE_FL);
+ inode_set_flags(inode, 0, S_NOATIME | S_IMMUTABLE);
+- inode->i_mtime = inode_set_ctime_current(inode);
++ inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
+ err = ext4_mark_inode_dirty(handle, inode);
+ ext4_journal_stop(handle);
+ out_unlock:
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index f40785bc4e5549..df5ab1a75fc482 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -356,7 +356,7 @@ ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size)
+
+ static u64 ext4_xattr_inode_get_ref(struct inode *ea_inode)
+ {
+- return ((u64) inode_get_ctime(ea_inode).tv_sec << 32) |
++ return ((u64) inode_get_ctime_sec(ea_inode) << 32) |
+ (u32) inode_peek_iversion_raw(ea_inode);
+ }
+
+@@ -368,12 +368,12 @@ static void ext4_xattr_inode_set_ref(struct inode *ea_inode, u64 ref_count)
+
+ static u32 ext4_xattr_inode_get_hash(struct inode *ea_inode)
+ {
+- return (u32)ea_inode->i_atime.tv_sec;
++ return (u32) inode_get_atime_sec(ea_inode);
+ }
+
+ static void ext4_xattr_inode_set_hash(struct inode *ea_inode, u32 hash)
+ {
+- ea_inode->i_atime.tv_sec = hash;
++ inode_set_atime(ea_inode, hash, 0);
+ }
+
+ /*
+@@ -418,7 +418,7 @@ static int ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t size)
+ return ret;
+ }
+
+-#define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode)->i_mtime.tv_sec)
++#define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode_get_mtime_sec(inode)))
+
+ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
+ u32 ea_inode_hash, struct inode **ea_inode)
+diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
+index 196755a34833d2..ae129044c52f42 100644
+--- a/fs/f2fs/file.c
++++ b/fs/f2fs/file.c
+@@ -1037,6 +1037,13 @@ int f2fs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ return err;
+ }
+
++ /*
++ * wait for inflight dio, blocks should be removed after
++ * IO completion.
++ */
++ if (attr->ia_size < old_size)
++ inode_dio_wait(inode);
++
+ f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ filemap_invalidate_lock(inode->i_mapping);
+
+@@ -1873,6 +1880,12 @@ static long f2fs_fallocate(struct file *file, int mode,
+ if (ret)
+ goto out;
+
++ /*
++ * wait for inflight dio, blocks should be removed after IO
++ * completion.
++ */
++ inode_dio_wait(inode);
++
+ if (mode & FALLOC_FL_PUNCH_HOLE) {
+ if (offset >= inode->i_size)
+ goto out;
+diff --git a/fs/ntfs3/attrib.c b/fs/ntfs3/attrib.c
+index fc6cea60044edf..e25989dd2c6bba 100644
+--- a/fs/ntfs3/attrib.c
++++ b/fs/ntfs3/attrib.c
+@@ -977,15 +977,17 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
+ goto out;
+
+ /* Check for compressed frame. */
+- err = attr_is_frame_compressed(ni, attr, vcn >> NTFS_LZNT_CUNIT, &hint);
++ err = attr_is_frame_compressed(ni, attr_b, vcn >> NTFS_LZNT_CUNIT,
++ &hint, run);
+ if (err)
+ goto out;
+
+ if (hint) {
+ /* if frame is compressed - don't touch it. */
+ *lcn = COMPRESSED_LCN;
+- *len = hint;
+- err = -EOPNOTSUPP;
++ /* length to the end of frame. */
++ *len = NTFS_LZNT_CLUSTERS - (vcn & (NTFS_LZNT_CLUSTERS - 1));
++ err = 0;
+ goto out;
+ }
+
+@@ -1028,16 +1030,16 @@ int attr_data_get_block(struct ntfs_inode *ni, CLST vcn, CLST clen, CLST *lcn,
+
+ /* Check if 'vcn' and 'vcn0' in different attribute segments. */
+ if (vcn < svcn || evcn1 <= vcn) {
+- /* Load attribute for truncated vcn. */
+- attr = ni_find_attr(ni, attr_b, &le, ATTR_DATA, NULL, 0,
+- &vcn, &mi);
+- if (!attr) {
++ struct ATTRIB *attr2;
++ /* Load runs for truncated vcn. */
++ attr2 = ni_find_attr(ni, attr_b, &le_b, ATTR_DATA, NULL,
++ 0, &vcn, &mi);
++ if (!attr2) {
+ err = -EINVAL;
+ goto out;
+ }
+- svcn = le64_to_cpu(attr->nres.svcn);
+- evcn1 = le64_to_cpu(attr->nres.evcn) + 1;
+- err = attr_load_runs(attr, ni, run, NULL);
++ evcn1 = le64_to_cpu(attr2->nres.evcn) + 1;
++ err = attr_load_runs(attr2, ni, run, NULL);
+ if (err)
+ goto out;
+ }
+@@ -1530,15 +1532,18 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
+
+ /*
+ * attr_is_frame_compressed - Used to detect compressed frame.
++ *
++ * attr - base (primary) attribute segment.
++ * run - run to use, usually == &ni->file.run.
++ * Only base segments contains valid 'attr->nres.c_unit'
+ */
+ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
+- CLST frame, CLST *clst_data)
++ CLST frame, CLST *clst_data, struct runs_tree *run)
+ {
+ int err;
+ u32 clst_frame;
+ CLST clen, lcn, vcn, alen, slen, vcn_next;
+ size_t idx;
+- struct runs_tree *run;
+
+ *clst_data = 0;
+
+@@ -1550,7 +1555,6 @@ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
+
+ clst_frame = 1u << attr->nres.c_unit;
+ vcn = frame * clst_frame;
+- run = &ni->file.run;
+
+ if (!run_lookup_entry(run, vcn, &lcn, &clen, &idx)) {
+ err = attr_load_runs_vcn(ni, attr->type, attr_name(attr),
+@@ -1686,7 +1690,7 @@ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
+ if (err)
+ goto out;
+
+- err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data);
++ err = attr_is_frame_compressed(ni, attr_b, frame, &clst_data, run);
+ if (err)
+ goto out;
+
+diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c
+index 12e03feb3074a0..3c876c468c2c47 100644
+--- a/fs/ntfs3/frecord.c
++++ b/fs/ntfs3/frecord.c
+@@ -1900,46 +1900,6 @@ enum REPARSE_SIGN ni_parse_reparse(struct ntfs_inode *ni, struct ATTRIB *attr,
+ return REPARSE_LINK;
+ }
+
+-/*
+- * fiemap_fill_next_extent_k - a copy of fiemap_fill_next_extent
+- * but it uses 'fe_k' instead of fieinfo->fi_extents_start
+- */
+-static int fiemap_fill_next_extent_k(struct fiemap_extent_info *fieinfo,
+- struct fiemap_extent *fe_k, u64 logical,
+- u64 phys, u64 len, u32 flags)
+-{
+- struct fiemap_extent extent;
+-
+- /* only count the extents */
+- if (fieinfo->fi_extents_max == 0) {
+- fieinfo->fi_extents_mapped++;
+- return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
+- }
+-
+- if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
+- return 1;
+-
+- if (flags & FIEMAP_EXTENT_DELALLOC)
+- flags |= FIEMAP_EXTENT_UNKNOWN;
+- if (flags & FIEMAP_EXTENT_DATA_ENCRYPTED)
+- flags |= FIEMAP_EXTENT_ENCODED;
+- if (flags & (FIEMAP_EXTENT_DATA_TAIL | FIEMAP_EXTENT_DATA_INLINE))
+- flags |= FIEMAP_EXTENT_NOT_ALIGNED;
+-
+- memset(&extent, 0, sizeof(extent));
+- extent.fe_logical = logical;
+- extent.fe_physical = phys;
+- extent.fe_length = len;
+- extent.fe_flags = flags;
+-
+- memcpy(fe_k + fieinfo->fi_extents_mapped, &extent, sizeof(extent));
+-
+- fieinfo->fi_extents_mapped++;
+- if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max)
+- return 1;
+- return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
+-}
+-
+ /*
+ * ni_fiemap - Helper for file_fiemap().
+ *
+@@ -1950,11 +1910,9 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ __u64 vbo, __u64 len)
+ {
+ int err = 0;
+- struct fiemap_extent *fe_k = NULL;
+ struct ntfs_sb_info *sbi = ni->mi.sbi;
+ u8 cluster_bits = sbi->cluster_bits;
+- struct runs_tree *run;
+- struct rw_semaphore *run_lock;
++ struct runs_tree run;
+ struct ATTRIB *attr;
+ CLST vcn = vbo >> cluster_bits;
+ CLST lcn, clen;
+@@ -1965,13 +1923,11 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ u32 flags;
+ bool ok;
+
++ run_init(&run);
+ if (S_ISDIR(ni->vfs_inode.i_mode)) {
+- run = &ni->dir.alloc_run;
+ attr = ni_find_attr(ni, NULL, NULL, ATTR_ALLOC, I30_NAME,
+ ARRAY_SIZE(I30_NAME), NULL, NULL);
+- run_lock = &ni->dir.run_lock;
+ } else {
+- run = &ni->file.run;
+ attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0, NULL,
+ NULL);
+ if (!attr) {
+@@ -1986,7 +1942,6 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ "fiemap is not supported for compressed file (cp -r)");
+ goto out;
+ }
+- run_lock = &ni->file.run_lock;
+ }
+
+ if (!attr || !attr->non_res) {
+@@ -1998,51 +1953,33 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ goto out;
+ }
+
+- /*
+- * To avoid lock problems replace pointer to user memory by pointer to kernel memory.
+- */
+- fe_k = kmalloc_array(fieinfo->fi_extents_max,
+- sizeof(struct fiemap_extent),
+- GFP_NOFS | __GFP_ZERO);
+- if (!fe_k) {
+- err = -ENOMEM;
+- goto out;
+- }
+-
+ end = vbo + len;
+ alloc_size = le64_to_cpu(attr->nres.alloc_size);
+ if (end > alloc_size)
+ end = alloc_size;
+
+- down_read(run_lock);
+
+ while (vbo < end) {
+ if (idx == -1) {
+- ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
++ ok = run_lookup_entry(&run, vcn, &lcn, &clen, &idx);
+ } else {
+ CLST vcn_next = vcn;
+
+- ok = run_get_entry(run, ++idx, &vcn, &lcn, &clen) &&
++ ok = run_get_entry(&run, ++idx, &vcn, &lcn, &clen) &&
+ vcn == vcn_next;
+ if (!ok)
+ vcn = vcn_next;
+ }
+
+ if (!ok) {
+- up_read(run_lock);
+- down_write(run_lock);
+-
+ err = attr_load_runs_vcn(ni, attr->type,
+ attr_name(attr),
+- attr->name_len, run, vcn);
+-
+- up_write(run_lock);
+- down_read(run_lock);
++ attr->name_len, &run, vcn);
+
+ if (err)
+ break;
+
+- ok = run_lookup_entry(run, vcn, &lcn, &clen, &idx);
++ ok = run_lookup_entry(&run, vcn, &lcn, &clen, &idx);
+
+ if (!ok) {
+ err = -EINVAL;
+@@ -2067,8 +2004,9 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ } else if (is_attr_compressed(attr)) {
+ CLST clst_data;
+
+- err = attr_is_frame_compressed(
+- ni, attr, vcn >> attr->nres.c_unit, &clst_data);
++ err = attr_is_frame_compressed(ni, attr,
++ vcn >> attr->nres.c_unit,
++ &clst_data, &run);
+ if (err)
+ break;
+ if (clst_data < NTFS_LZNT_CLUSTERS)
+@@ -2097,8 +2035,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ if (vbo + dlen >= end)
+ flags |= FIEMAP_EXTENT_LAST;
+
+- err = fiemap_fill_next_extent_k(fieinfo, fe_k, vbo, lbo,
+- dlen, flags);
++ err = fiemap_fill_next_extent(fieinfo, vbo, lbo, dlen,
++ flags);
+
+ if (err < 0)
+ break;
+@@ -2119,8 +2057,7 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ if (vbo + bytes >= end)
+ flags |= FIEMAP_EXTENT_LAST;
+
+- err = fiemap_fill_next_extent_k(fieinfo, fe_k, vbo, lbo, bytes,
+- flags);
++ err = fiemap_fill_next_extent(fieinfo, vbo, lbo, bytes, flags);
+ if (err < 0)
+ break;
+ if (err == 1) {
+@@ -2131,19 +2068,8 @@ int ni_fiemap(struct ntfs_inode *ni, struct fiemap_extent_info *fieinfo,
+ vbo += bytes;
+ }
+
+- up_read(run_lock);
+-
+- /*
+- * Copy to user memory out of lock
+- */
+- if (copy_to_user(fieinfo->fi_extents_start, fe_k,
+- fieinfo->fi_extents_max *
+- sizeof(struct fiemap_extent))) {
+- err = -EFAULT;
+- }
+-
+ out:
+- kfree(fe_k);
++ run_close(&run);
+ return err;
+ }
+
+@@ -2674,7 +2600,8 @@ int ni_read_frame(struct ntfs_inode *ni, u64 frame_vbo, struct page **pages,
+ down_write(&ni->file.run_lock);
+ run_truncate_around(run, le64_to_cpu(attr->nres.svcn));
+ frame = frame_vbo >> (cluster_bits + NTFS_LZNT_CUNIT);
+- err = attr_is_frame_compressed(ni, attr, frame, &clst_data);
++ err = attr_is_frame_compressed(ni, attr, frame, &clst_data,
++ run);
+ up_write(&ni->file.run_lock);
+ if (err)
+ goto out1;
+diff --git a/fs/ntfs3/inode.c b/fs/ntfs3/inode.c
+index 52b80fd1591478..af7c0cbba74e3d 100644
+--- a/fs/ntfs3/inode.c
++++ b/fs/ntfs3/inode.c
+@@ -604,7 +604,8 @@ static noinline int ntfs_get_block_vbo(struct inode *inode, u64 vbo,
+
+ bytes = ((u64)len << cluster_bits) - off;
+
+- if (lcn == SPARSE_LCN) {
++ if (lcn >= sbi->used.bitmap.nbits) {
++ /* This case includes resident/compressed/sparse. */
+ if (!create) {
+ if (bh->b_size > bytes)
+ bh->b_size = bytes;
+diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
+index cfe9d3bf07f910..c98e6868bfbadb 100644
+--- a/fs/ntfs3/ntfs_fs.h
++++ b/fs/ntfs3/ntfs_fs.h
+@@ -446,7 +446,8 @@ int attr_wof_frame_info(struct ntfs_inode *ni, struct ATTRIB *attr,
+ struct runs_tree *run, u64 frame, u64 frames,
+ u8 frame_bits, u32 *ondisk_size, u64 *vbo_data);
+ int attr_is_frame_compressed(struct ntfs_inode *ni, struct ATTRIB *attr,
+- CLST frame, CLST *clst_data);
++ CLST frame, CLST *clst_data,
++ struct runs_tree *run);
+ int attr_allocate_frame(struct ntfs_inode *ni, CLST frame, size_t compr_size,
+ u64 new_valid);
+ int attr_collapse_range(struct ntfs_inode *ni, u64 vbo, u64 bytes);
+diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
+index dc9f76ab7e13c3..0dffd6a44d39dc 100644
+--- a/fs/ocfs2/quota_global.c
++++ b/fs/ocfs2/quota_global.c
+@@ -881,7 +881,7 @@ static int ocfs2_get_next_id(struct super_block *sb, struct kqid *qid)
+ int status = 0;
+
+ trace_ocfs2_get_next_id(from_kqid(&init_user_ns, *qid), type);
+- if (!sb_has_quota_loaded(sb, type)) {
++ if (!sb_has_quota_active(sb, type)) {
+ status = -ESRCH;
+ goto out;
+ }
+diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
+index 257f13cdd14c1f..4b4fa58cd32ff0 100644
+--- a/fs/ocfs2/quota_local.c
++++ b/fs/ocfs2/quota_local.c
+@@ -864,6 +864,7 @@ static int ocfs2_local_free_info(struct super_block *sb, int type)
+ brelse(oinfo->dqi_libh);
+ brelse(oinfo->dqi_lqi_bh);
+ kfree(oinfo);
++ info->dqi_priv = NULL;
+ return status;
+ }
+
+diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
+index 59571737e16771..b8640f36ebf8ab 100644
+--- a/fs/proc/task_mmu.c
++++ b/fs/proc/task_mmu.c
+@@ -1516,7 +1516,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
+ flags |= PM_FILE;
+
+ for (; addr != end; addr += PAGE_SIZE, idx++) {
+- unsigned long cur_flags = flags;
++ u64 cur_flags = flags;
+ pagemap_entry_t pme;
+
+ if (page && (flags & PM_PRESENT) &&
+diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
+index f5b6df82e8570a..bff8d0dd74fe7d 100644
+--- a/fs/smb/client/cifsacl.c
++++ b/fs/smb/client/cifsacl.c
+@@ -27,18 +27,18 @@
+ #include "cifs_unicode.h"
+
+ /* security id for everyone/world system group */
+-static const struct cifs_sid sid_everyone = {
++static const struct smb_sid sid_everyone = {
+ 1, 1, {0, 0, 0, 0, 0, 1}, {0} };
+ /* security id for Authenticated Users system group */
+-static const struct cifs_sid sid_authusers = {
++static const struct smb_sid sid_authusers = {
+ 1, 1, {0, 0, 0, 0, 0, 5}, {cpu_to_le32(11)} };
+
+ /* S-1-22-1 Unmapped Unix users */
+-static const struct cifs_sid sid_unix_users = {1, 1, {0, 0, 0, 0, 0, 22},
++static const struct smb_sid sid_unix_users = {1, 1, {0, 0, 0, 0, 0, 22},
+ {cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+ /* S-1-22-2 Unmapped Unix groups */
+-static const struct cifs_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22},
++static const struct smb_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22},
+ {cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+ /*
+@@ -48,17 +48,17 @@ static const struct cifs_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22},
+ /* S-1-5-88 MS NFS and Apple style UID/GID/mode */
+
+ /* S-1-5-88-1 Unix uid */
+-static const struct cifs_sid sid_unix_NFS_users = { 1, 2, {0, 0, 0, 0, 0, 5},
++static const struct smb_sid sid_unix_NFS_users = { 1, 2, {0, 0, 0, 0, 0, 5},
+ {cpu_to_le32(88),
+ cpu_to_le32(1), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+ /* S-1-5-88-2 Unix gid */
+-static const struct cifs_sid sid_unix_NFS_groups = { 1, 2, {0, 0, 0, 0, 0, 5},
++static const struct smb_sid sid_unix_NFS_groups = { 1, 2, {0, 0, 0, 0, 0, 5},
+ {cpu_to_le32(88),
+ cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+ /* S-1-5-88-3 Unix mode */
+-static const struct cifs_sid sid_unix_NFS_mode = { 1, 2, {0, 0, 0, 0, 0, 5},
++static const struct smb_sid sid_unix_NFS_mode = { 1, 2, {0, 0, 0, 0, 0, 5},
+ {cpu_to_le32(88),
+ cpu_to_le32(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} };
+
+@@ -106,7 +106,7 @@ static struct key_type cifs_idmap_key_type = {
+ };
+
+ static char *
+-sid_to_key_str(struct cifs_sid *sidptr, unsigned int type)
++sid_to_key_str(struct smb_sid *sidptr, unsigned int type)
+ {
+ int i, len;
+ unsigned int saval;
+@@ -158,7 +158,7 @@ sid_to_key_str(struct cifs_sid *sidptr, unsigned int type)
+ * the same returns zero, if they do not match returns non-zero.
+ */
+ static int
+-compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
++compare_sids(const struct smb_sid *ctsid, const struct smb_sid *cwsid)
+ {
+ int i;
+ int num_subauth, num_sat, num_saw;
+@@ -204,11 +204,11 @@ compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid)
+ }
+
+ static bool
+-is_well_known_sid(const struct cifs_sid *psid, uint32_t *puid, bool is_group)
++is_well_known_sid(const struct smb_sid *psid, uint32_t *puid, bool is_group)
+ {
+ int i;
+ int num_subauth;
+- const struct cifs_sid *pwell_known_sid;
++ const struct smb_sid *pwell_known_sid;
+
+ if (!psid || (puid == NULL))
+ return false;
+@@ -260,7 +260,7 @@ is_well_known_sid(const struct cifs_sid *psid, uint32_t *puid, bool is_group)
+ }
+
+ static __u16
+-cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
++cifs_copy_sid(struct smb_sid *dst, const struct smb_sid *src)
+ {
+ int i;
+ __u16 size = 1 + 1 + 6;
+@@ -277,11 +277,11 @@ cifs_copy_sid(struct cifs_sid *dst, const struct cifs_sid *src)
+ }
+
+ static int
+-id_to_sid(unsigned int cid, uint sidtype, struct cifs_sid *ssid)
++id_to_sid(unsigned int cid, uint sidtype, struct smb_sid *ssid)
+ {
+ int rc;
+ struct key *sidkey;
+- struct cifs_sid *ksid;
++ struct smb_sid *ksid;
+ unsigned int ksid_size;
+ char desc[3 + 10 + 1]; /* 3 byte prefix + 10 bytes for value + NULL */
+ const struct cred *saved_cred;
+@@ -312,8 +312,8 @@ id_to_sid(unsigned int cid, uint sidtype, struct cifs_sid *ssid)
+ * it could be.
+ */
+ ksid = sidkey->datalen <= sizeof(sidkey->payload) ?
+- (struct cifs_sid *)&sidkey->payload :
+- (struct cifs_sid *)sidkey->payload.data[0];
++ (struct smb_sid *)&sidkey->payload :
++ (struct smb_sid *)sidkey->payload.data[0];
+
+ ksid_size = CIFS_SID_BASE_SIZE + (ksid->num_subauth * sizeof(__le32));
+ if (ksid_size > sidkey->datalen) {
+@@ -336,7 +336,7 @@ id_to_sid(unsigned int cid, uint sidtype, struct cifs_sid *ssid)
+ }
+
+ int
+-sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
++sid_to_id(struct cifs_sb_info *cifs_sb, struct smb_sid *psid,
+ struct cifs_fattr *fattr, uint sidtype)
+ {
+ int rc = 0;
+@@ -515,43 +515,43 @@ exit_cifs_idmap(void)
+ }
+
+ /* copy ntsd, owner sid, and group sid from a security descriptor to another */
+-static __u32 copy_sec_desc(const struct cifs_ntsd *pntsd,
+- struct cifs_ntsd *pnntsd,
++static __u32 copy_sec_desc(const struct smb_ntsd *pntsd,
++ struct smb_ntsd *pnntsd,
+ __u32 sidsoffset,
+- struct cifs_sid *pownersid,
+- struct cifs_sid *pgrpsid)
++ struct smb_sid *pownersid,
++ struct smb_sid *pgrpsid)
+ {
+- struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
+- struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
++ struct smb_sid *owner_sid_ptr, *group_sid_ptr;
++ struct smb_sid *nowner_sid_ptr, *ngroup_sid_ptr;
+
+ /* copy security descriptor control portion */
+ pnntsd->revision = pntsd->revision;
+ pnntsd->type = pntsd->type;
+- pnntsd->dacloffset = cpu_to_le32(sizeof(struct cifs_ntsd));
++ pnntsd->dacloffset = cpu_to_le32(sizeof(struct smb_ntsd));
+ pnntsd->sacloffset = 0;
+ pnntsd->osidoffset = cpu_to_le32(sidsoffset);
+- pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct cifs_sid));
++ pnntsd->gsidoffset = cpu_to_le32(sidsoffset + sizeof(struct smb_sid));
+
+ /* copy owner sid */
+ if (pownersid)
+ owner_sid_ptr = pownersid;
+ else
+- owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
++ owner_sid_ptr = (struct smb_sid *)((char *)pntsd +
+ le32_to_cpu(pntsd->osidoffset));
+- nowner_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset);
++ nowner_sid_ptr = (struct smb_sid *)((char *)pnntsd + sidsoffset);
+ cifs_copy_sid(nowner_sid_ptr, owner_sid_ptr);
+
+ /* copy group sid */
+ if (pgrpsid)
+ group_sid_ptr = pgrpsid;
+ else
+- group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
++ group_sid_ptr = (struct smb_sid *)((char *)pntsd +
+ le32_to_cpu(pntsd->gsidoffset));
+- ngroup_sid_ptr = (struct cifs_sid *)((char *)pnntsd + sidsoffset +
+- sizeof(struct cifs_sid));
++ ngroup_sid_ptr = (struct smb_sid *)((char *)pnntsd + sidsoffset +
++ sizeof(struct smb_sid));
+ cifs_copy_sid(ngroup_sid_ptr, group_sid_ptr);
+
+- return sidsoffset + (2 * sizeof(struct cifs_sid));
++ return sidsoffset + (2 * sizeof(struct smb_sid));
+ }
+
+
+@@ -666,7 +666,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
+ return;
+ }
+
+-static __u16 cifs_copy_ace(struct cifs_ace *dst, struct cifs_ace *src, struct cifs_sid *psid)
++static __u16 cifs_copy_ace(struct smb_ace *dst, struct smb_ace *src, struct smb_sid *psid)
+ {
+ __u16 size = 1 + 1 + 2 + 4;
+
+@@ -685,8 +685,8 @@ static __u16 cifs_copy_ace(struct cifs_ace *dst, struct cifs_ace *src, struct ci
+ return size;
+ }
+
+-static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
+- const struct cifs_sid *psid, __u64 nmode,
++static __u16 fill_ace_for_sid(struct smb_ace *pntace,
++ const struct smb_sid *psid, __u64 nmode,
+ umode_t bits, __u8 access_type,
+ bool allow_delete_child)
+ {
+@@ -723,7 +723,7 @@ static __u16 fill_ace_for_sid(struct cifs_ace *pntace,
+
+
+ #ifdef CONFIG_CIFS_DEBUG2
+-static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
++static void dump_ace(struct smb_ace *pace, char *end_of_acl)
+ {
+ int num_subauth;
+
+@@ -758,15 +758,15 @@ static void dump_ace(struct cifs_ace *pace, char *end_of_acl)
+ }
+ #endif
+
+-static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
+- struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
++static void parse_dacl(struct smb_acl *pdacl, char *end_of_acl,
++ struct smb_sid *pownersid, struct smb_sid *pgrpsid,
+ struct cifs_fattr *fattr, bool mode_from_special_sid)
+ {
+ int i;
+ int num_aces = 0;
+ int acl_size;
+ char *acl_base;
+- struct cifs_ace **ppace;
++ struct smb_ace **ppace;
+
+ /* BB need to add parm so we can store the SID BB */
+
+@@ -793,21 +793,21 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
+ fattr->cf_mode &= ~(0777);
+
+ acl_base = (char *)pdacl;
+- acl_size = sizeof(struct cifs_acl);
++ acl_size = sizeof(struct smb_acl);
+
+ num_aces = le32_to_cpu(pdacl->num_aces);
+ if (num_aces > 0) {
+ umode_t denied_mode = 0;
+
+- if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *))
++ if (num_aces > ULONG_MAX / sizeof(struct smb_ace *))
+ return;
+- ppace = kmalloc_array(num_aces, sizeof(struct cifs_ace *),
++ ppace = kmalloc_array(num_aces, sizeof(struct smb_ace *),
+ GFP_KERNEL);
+ if (!ppace)
+ return;
+
+ for (i = 0; i < num_aces; ++i) {
+- ppace[i] = (struct cifs_ace *) (acl_base + acl_size);
++ ppace[i] = (struct smb_ace *) (acl_base + acl_size);
+ #ifdef CONFIG_CIFS_DEBUG2
+ dump_ace(ppace[i], end_of_acl);
+ #endif
+@@ -849,7 +849,7 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
+
+ /* memcpy((void *)(&(cifscred->aces[i])),
+ (void *)ppace[i],
+- sizeof(struct cifs_ace)); */
++ sizeof(struct smb_ace)); */
+
+ acl_base = (char *)ppace[i];
+ acl_size = le16_to_cpu(ppace[i]->size);
+@@ -861,7 +861,7 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
+ return;
+ }
+
+-unsigned int setup_authusers_ACE(struct cifs_ace *pntace)
++unsigned int setup_authusers_ACE(struct smb_ace *pntace)
+ {
+ int i;
+ unsigned int ace_size = 20;
+@@ -885,12 +885,17 @@ unsigned int setup_authusers_ACE(struct cifs_ace *pntace)
+ * Fill in the special SID based on the mode. See
+ * https://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx
+ */
+-unsigned int setup_special_mode_ACE(struct cifs_ace *pntace, __u64 nmode)
++unsigned int setup_special_mode_ACE(struct smb_ace *pntace,
++ bool posix,
++ __u64 nmode)
+ {
+ int i;
+ unsigned int ace_size = 28;
+
+- pntace->type = ACCESS_DENIED_ACE_TYPE;
++ if (posix)
++ pntace->type = ACCESS_ALLOWED_ACE_TYPE;
++ else
++ pntace->type = ACCESS_DENIED_ACE_TYPE;
+ pntace->flags = 0x0;
+ pntace->access_req = 0;
+ pntace->sid.num_subauth = 3;
+@@ -907,7 +912,7 @@ unsigned int setup_special_mode_ACE(struct cifs_ace *pntace, __u64 nmode)
+ return ace_size;
+ }
+
+-unsigned int setup_special_user_owner_ACE(struct cifs_ace *pntace)
++unsigned int setup_special_user_owner_ACE(struct smb_ace *pntace)
+ {
+ int i;
+ unsigned int ace_size = 28;
+@@ -930,10 +935,11 @@ unsigned int setup_special_user_owner_ACE(struct cifs_ace *pntace)
+ }
+
+ static void populate_new_aces(char *nacl_base,
+- struct cifs_sid *pownersid,
+- struct cifs_sid *pgrpsid,
++ struct smb_sid *pownersid,
++ struct smb_sid *pgrpsid,
+ __u64 *pnmode, u32 *pnum_aces, u16 *pnsize,
+- bool modefromsid)
++ bool modefromsid,
++ bool posix)
+ {
+ __u64 nmode;
+ u32 num_aces = 0;
+@@ -944,19 +950,21 @@ static void populate_new_aces(char *nacl_base,
+ __u64 deny_user_mode = 0;
+ __u64 deny_group_mode = 0;
+ bool sticky_set = false;
+- struct cifs_ace *pnntace = NULL;
++ struct smb_ace *pnntace = NULL;
+
+ nmode = *pnmode;
+ num_aces = *pnum_aces;
+ nsize = *pnsize;
+
+- if (modefromsid) {
+- pnntace = (struct cifs_ace *) (nacl_base + nsize);
+- nsize += setup_special_mode_ACE(pnntace, nmode);
+- num_aces++;
+- pnntace = (struct cifs_ace *) (nacl_base + nsize);
+- nsize += setup_authusers_ACE(pnntace);
++ if (modefromsid || posix) {
++ pnntace = (struct smb_ace *) (nacl_base + nsize);
++ nsize += setup_special_mode_ACE(pnntace, posix, nmode);
+ num_aces++;
++ if (modefromsid) {
++ pnntace = (struct smb_ace *) (nacl_base + nsize);
++ nsize += setup_authusers_ACE(pnntace);
++ num_aces++;
++ }
+ goto set_size;
+ }
+
+@@ -967,7 +975,7 @@ static void populate_new_aces(char *nacl_base,
+ * updated in the inode.
+ */
+
+- if (!memcmp(pownersid, pgrpsid, sizeof(struct cifs_sid))) {
++ if (!memcmp(pownersid, pgrpsid, sizeof(struct smb_sid))) {
+ /*
+ * Case when owner and group SIDs are the same.
+ * Set the more restrictive of the two modes.
+@@ -992,7 +1000,7 @@ static void populate_new_aces(char *nacl_base,
+ sticky_set = true;
+
+ if (deny_user_mode) {
+- pnntace = (struct cifs_ace *) (nacl_base + nsize);
++ pnntace = (struct smb_ace *) (nacl_base + nsize);
+ nsize += fill_ace_for_sid(pnntace, pownersid, deny_user_mode,
+ 0700, ACCESS_DENIED, false);
+ num_aces++;
+@@ -1000,31 +1008,31 @@ static void populate_new_aces(char *nacl_base,
+
+ /* Group DENY ACE does not conflict with owner ALLOW ACE. Keep in preferred order*/
+ if (deny_group_mode && !(deny_group_mode & (user_mode >> 3))) {
+- pnntace = (struct cifs_ace *) (nacl_base + nsize);
++ pnntace = (struct smb_ace *) (nacl_base + nsize);
+ nsize += fill_ace_for_sid(pnntace, pgrpsid, deny_group_mode,
+ 0070, ACCESS_DENIED, false);
+ num_aces++;
+ }
+
+- pnntace = (struct cifs_ace *) (nacl_base + nsize);
++ pnntace = (struct smb_ace *) (nacl_base + nsize);
+ nsize += fill_ace_for_sid(pnntace, pownersid, user_mode,
+ 0700, ACCESS_ALLOWED, true);
+ num_aces++;
+
+ /* Group DENY ACE conflicts with owner ALLOW ACE. So keep it after. */
+ if (deny_group_mode && (deny_group_mode & (user_mode >> 3))) {
+- pnntace = (struct cifs_ace *) (nacl_base + nsize);
++ pnntace = (struct smb_ace *) (nacl_base + nsize);
+ nsize += fill_ace_for_sid(pnntace, pgrpsid, deny_group_mode,
+ 0070, ACCESS_DENIED, false);
+ num_aces++;
+ }
+
+- pnntace = (struct cifs_ace *) (nacl_base + nsize);
++ pnntace = (struct smb_ace *) (nacl_base + nsize);
+ nsize += fill_ace_for_sid(pnntace, pgrpsid, group_mode,
+ 0070, ACCESS_ALLOWED, !sticky_set);
+ num_aces++;
+
+- pnntace = (struct cifs_ace *) (nacl_base + nsize);
++ pnntace = (struct smb_ace *) (nacl_base + nsize);
+ nsize += fill_ace_for_sid(pnntace, &sid_everyone, other_mode,
+ 0007, ACCESS_ALLOWED, !sticky_set);
+ num_aces++;
+@@ -1034,31 +1042,31 @@ static void populate_new_aces(char *nacl_base,
+ *pnsize = nsize;
+ }
+
+-static __u16 replace_sids_and_copy_aces(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
+- struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
+- struct cifs_sid *pnownersid, struct cifs_sid *pngrpsid)
++static __u16 replace_sids_and_copy_aces(struct smb_acl *pdacl, struct smb_acl *pndacl,
++ struct smb_sid *pownersid, struct smb_sid *pgrpsid,
++ struct smb_sid *pnownersid, struct smb_sid *pngrpsid)
+ {
+ int i;
+ u16 size = 0;
+- struct cifs_ace *pntace = NULL;
++ struct smb_ace *pntace = NULL;
+ char *acl_base = NULL;
+ u32 src_num_aces = 0;
+ u16 nsize = 0;
+- struct cifs_ace *pnntace = NULL;
++ struct smb_ace *pnntace = NULL;
+ char *nacl_base = NULL;
+ u16 ace_size = 0;
+
+ acl_base = (char *)pdacl;
+- size = sizeof(struct cifs_acl);
++ size = sizeof(struct smb_acl);
+ src_num_aces = le32_to_cpu(pdacl->num_aces);
+
+ nacl_base = (char *)pndacl;
+- nsize = sizeof(struct cifs_acl);
++ nsize = sizeof(struct smb_acl);
+
+ /* Go through all the ACEs */
+ for (i = 0; i < src_num_aces; ++i) {
+- pntace = (struct cifs_ace *) (acl_base + size);
+- pnntace = (struct cifs_ace *) (nacl_base + nsize);
++ pntace = (struct smb_ace *) (acl_base + size);
++ pnntace = (struct smb_ace *) (nacl_base + nsize);
+
+ if (pnownersid && compare_sids(&pntace->sid, pownersid) == 0)
+ ace_size = cifs_copy_ace(pnntace, pntace, pnownersid);
+@@ -1074,48 +1082,48 @@ static __u16 replace_sids_and_copy_aces(struct cifs_acl *pdacl, struct cifs_acl
+ return nsize;
+ }
+
+-static int set_chmod_dacl(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
+- struct cifs_sid *pownersid, struct cifs_sid *pgrpsid,
+- __u64 *pnmode, bool mode_from_sid)
++static int set_chmod_dacl(struct smb_acl *pdacl, struct smb_acl *pndacl,
++ struct smb_sid *pownersid, struct smb_sid *pgrpsid,
++ __u64 *pnmode, bool mode_from_sid, bool posix)
+ {
+ int i;
+ u16 size = 0;
+- struct cifs_ace *pntace = NULL;
++ struct smb_ace *pntace = NULL;
+ char *acl_base = NULL;
+ u32 src_num_aces = 0;
+ u16 nsize = 0;
+- struct cifs_ace *pnntace = NULL;
++ struct smb_ace *pnntace = NULL;
+ char *nacl_base = NULL;
+ u32 num_aces = 0;
+ bool new_aces_set = false;
+
+ /* Assuming that pndacl and pnmode are never NULL */
+ nacl_base = (char *)pndacl;
+- nsize = sizeof(struct cifs_acl);
++ nsize = sizeof(struct smb_acl);
+
+ /* If pdacl is NULL, we don't have a src. Simply populate new ACL. */
+- if (!pdacl) {
++ if (!pdacl || posix) {
+ populate_new_aces(nacl_base,
+ pownersid, pgrpsid,
+ pnmode, &num_aces, &nsize,
+- mode_from_sid);
++ mode_from_sid, posix);
+ goto finalize_dacl;
+ }
+
+ acl_base = (char *)pdacl;
+- size = sizeof(struct cifs_acl);
++ size = sizeof(struct smb_acl);
+ src_num_aces = le32_to_cpu(pdacl->num_aces);
+
+ /* Retain old ACEs which we can retain */
+ for (i = 0; i < src_num_aces; ++i) {
+- pntace = (struct cifs_ace *) (acl_base + size);
++ pntace = (struct smb_ace *) (acl_base + size);
+
+ if (!new_aces_set && (pntace->flags & INHERITED_ACE)) {
+ /* Place the new ACEs in between existing explicit and inherited */
+ populate_new_aces(nacl_base,
+ pownersid, pgrpsid,
+ pnmode, &num_aces, &nsize,
+- mode_from_sid);
++ mode_from_sid, posix);
+
+ new_aces_set = true;
+ }
+@@ -1130,7 +1138,7 @@ static int set_chmod_dacl(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
+ }
+
+ /* update the pointer to the next ACE to populate*/
+- pnntace = (struct cifs_ace *) (nacl_base + nsize);
++ pnntace = (struct smb_ace *) (nacl_base + nsize);
+
+ nsize += cifs_copy_ace(pnntace, pntace, NULL);
+ num_aces++;
+@@ -1144,7 +1152,7 @@ static int set_chmod_dacl(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
+ populate_new_aces(nacl_base,
+ pownersid, pgrpsid,
+ pnmode, &num_aces, &nsize,
+- mode_from_sid);
++ mode_from_sid, posix);
+
+ new_aces_set = true;
+ }
+@@ -1156,7 +1164,7 @@ static int set_chmod_dacl(struct cifs_acl *pdacl, struct cifs_acl *pndacl,
+ return 0;
+ }
+
+-static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
++static int parse_sid(struct smb_sid *psid, char *end_of_acl)
+ {
+ /* BB need to add parm so we can store the SID BB */
+
+@@ -1191,24 +1199,24 @@ static int parse_sid(struct cifs_sid *psid, char *end_of_acl)
+
+ /* Convert CIFS ACL to POSIX form */
+ static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
+- struct cifs_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr,
++ struct smb_ntsd *pntsd, int acl_len, struct cifs_fattr *fattr,
+ bool get_mode_from_special_sid)
+ {
+ int rc = 0;
+- struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
+- struct cifs_acl *dacl_ptr; /* no need for SACL ptr */
++ struct smb_sid *owner_sid_ptr, *group_sid_ptr;
++ struct smb_acl *dacl_ptr; /* no need for SACL ptr */
+ char *end_of_acl = ((char *)pntsd) + acl_len;
+ __u32 dacloffset;
+
+ if (pntsd == NULL)
+ return -EIO;
+
+- owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
++ owner_sid_ptr = (struct smb_sid *)((char *)pntsd +
+ le32_to_cpu(pntsd->osidoffset));
+- group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
++ group_sid_ptr = (struct smb_sid *)((char *)pntsd +
+ le32_to_cpu(pntsd->gsidoffset));
+ dacloffset = le32_to_cpu(pntsd->dacloffset);
+- dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
++ dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
+ cifs_dbg(NOISY, "revision %d type 0x%x ooffset 0x%x goffset 0x%x sacloffset 0x%x dacloffset 0x%x\n",
+ pntsd->revision, pntsd->type, le32_to_cpu(pntsd->osidoffset),
+ le32_to_cpu(pntsd->gsidoffset),
+@@ -1249,38 +1257,38 @@ static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
+ }
+
+ /* Convert permission bits from mode to equivalent CIFS ACL */
+-static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
++static int build_sec_desc(struct smb_ntsd *pntsd, struct smb_ntsd *pnntsd,
+ __u32 secdesclen, __u32 *pnsecdesclen, __u64 *pnmode, kuid_t uid, kgid_t gid,
+- bool mode_from_sid, bool id_from_sid, int *aclflag)
++ bool mode_from_sid, bool id_from_sid, bool posix, int *aclflag)
+ {
+ int rc = 0;
+ __u32 dacloffset;
+ __u32 ndacloffset;
+ __u32 sidsoffset;
+- struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
+- struct cifs_sid *nowner_sid_ptr = NULL, *ngroup_sid_ptr = NULL;
+- struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */
+- struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
++ struct smb_sid *owner_sid_ptr, *group_sid_ptr;
++ struct smb_sid *nowner_sid_ptr = NULL, *ngroup_sid_ptr = NULL;
++ struct smb_acl *dacl_ptr = NULL; /* no need for SACL ptr */
++ struct smb_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
+ char *end_of_acl = ((char *)pntsd) + secdesclen;
+ u16 size = 0;
+
+ dacloffset = le32_to_cpu(pntsd->dacloffset);
+ if (dacloffset) {
+- dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
++ dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
+ if (end_of_acl < (char *)dacl_ptr + le16_to_cpu(dacl_ptr->size)) {
+ cifs_dbg(VFS, "Server returned illegal ACL size\n");
+ return -EINVAL;
+ }
+ }
+
+- owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
++ owner_sid_ptr = (struct smb_sid *)((char *)pntsd +
+ le32_to_cpu(pntsd->osidoffset));
+- group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
++ group_sid_ptr = (struct smb_sid *)((char *)pntsd +
+ le32_to_cpu(pntsd->gsidoffset));
+
+ if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
+- ndacloffset = sizeof(struct cifs_ntsd);
+- ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
++ ndacloffset = sizeof(struct smb_ntsd);
++ ndacl_ptr = (struct smb_acl *)((char *)pnntsd + ndacloffset);
+ ndacl_ptr->revision =
+ dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION);
+
+@@ -1288,7 +1296,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
+ ndacl_ptr->num_aces = cpu_to_le32(0);
+
+ rc = set_chmod_dacl(dacl_ptr, ndacl_ptr, owner_sid_ptr, group_sid_ptr,
+- pnmode, mode_from_sid);
++ pnmode, mode_from_sid, posix);
+
+ sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
+ /* copy the non-dacl portion of secdesc */
+@@ -1297,15 +1305,15 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
+
+ *aclflag |= CIFS_ACL_DACL;
+ } else {
+- ndacloffset = sizeof(struct cifs_ntsd);
+- ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
++ ndacloffset = sizeof(struct smb_ntsd);
++ ndacl_ptr = (struct smb_acl *)((char *)pnntsd + ndacloffset);
+ ndacl_ptr->revision =
+ dacloffset ? dacl_ptr->revision : cpu_to_le16(ACL_REVISION);
+ ndacl_ptr->num_aces = dacl_ptr ? dacl_ptr->num_aces : 0;
+
+ if (uid_valid(uid)) { /* chown */
+ uid_t id;
+- nowner_sid_ptr = kzalloc(sizeof(struct cifs_sid),
++ nowner_sid_ptr = kzalloc(sizeof(struct smb_sid),
+ GFP_KERNEL);
+ if (!nowner_sid_ptr) {
+ rc = -ENOMEM;
+@@ -1334,7 +1342,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
+ }
+ if (gid_valid(gid)) { /* chgrp */
+ gid_t id;
+- ngroup_sid_ptr = kzalloc(sizeof(struct cifs_sid),
++ ngroup_sid_ptr = kzalloc(sizeof(struct smb_sid),
+ GFP_KERNEL);
+ if (!ngroup_sid_ptr) {
+ rc = -ENOMEM;
+@@ -1385,11 +1393,11 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
+ }
+
+ #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
+-struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
++struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
+ const struct cifs_fid *cifsfid, u32 *pacllen,
+ u32 __maybe_unused unused)
+ {
+- struct cifs_ntsd *pntsd = NULL;
++ struct smb_ntsd *pntsd = NULL;
+ unsigned int xid;
+ int rc;
+ struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+@@ -1410,10 +1418,10 @@ struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
+ return pntsd;
+ }
+
+-static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
++static struct smb_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
+ const char *path, u32 *pacllen)
+ {
+- struct cifs_ntsd *pntsd = NULL;
++ struct smb_ntsd *pntsd = NULL;
+ int oplock = 0;
+ unsigned int xid;
+ int rc;
+@@ -1454,11 +1462,11 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
+ }
+
+ /* Retrieve an ACL from the server */
+-struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
++struct smb_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
+ struct inode *inode, const char *path,
+ u32 *pacllen, u32 info)
+ {
+- struct cifs_ntsd *pntsd = NULL;
++ struct smb_ntsd *pntsd = NULL;
+ struct cifsFileInfo *open_file = NULL;
+
+ if (inode)
+@@ -1472,7 +1480,7 @@ struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
+ }
+
+ /* Set an ACL on the server */
+-int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
++int set_cifs_acl(struct smb_ntsd *pnntsd, __u32 acllen,
+ struct inode *inode, const char *path, int aclflag)
+ {
+ int oplock = 0;
+@@ -1528,7 +1536,7 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
+ struct inode *inode, bool mode_from_special_sid,
+ const char *path, const struct cifs_fid *pfid)
+ {
+- struct cifs_ntsd *pntsd = NULL;
++ struct smb_ntsd *pntsd = NULL;
+ u32 acllen = 0;
+ int rc = 0;
+ struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+@@ -1580,13 +1588,14 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
+ __u32 secdesclen = 0;
+ __u32 nsecdesclen = 0;
+ __u32 dacloffset = 0;
+- struct cifs_acl *dacl_ptr = NULL;
+- struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
+- struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
++ struct smb_acl *dacl_ptr = NULL;
++ struct smb_ntsd *pntsd = NULL; /* acl obtained from server */
++ struct smb_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+ struct smb_version_operations *ops;
+ bool mode_from_sid, id_from_sid;
++ bool posix = tlink_tcon(tlink)->posix_extensions;
+ const u32 info = 0;
+
+ if (IS_ERR(tlink))
+@@ -1622,21 +1631,22 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
+ id_from_sid = false;
+
+ /* Potentially, five new ACEs can be added to the ACL for U,G,O mapping */
+- nsecdesclen = secdesclen;
+ if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
+- if (mode_from_sid)
+- nsecdesclen += 2 * sizeof(struct cifs_ace);
++ if (posix)
++ nsecdesclen = 1 * sizeof(struct smb_ace);
++ else if (mode_from_sid)
++ nsecdesclen = secdesclen + (2 * sizeof(struct smb_ace));
+ else /* cifsacl */
+- nsecdesclen += 5 * sizeof(struct cifs_ace);
++ nsecdesclen = secdesclen + (5 * sizeof(struct smb_ace));
+ } else { /* chown */
+ /* When ownership changes, changes new owner sid length could be different */
+- nsecdesclen = sizeof(struct cifs_ntsd) + (sizeof(struct cifs_sid) * 2);
++ nsecdesclen = sizeof(struct smb_ntsd) + (sizeof(struct smb_sid) * 2);
+ dacloffset = le32_to_cpu(pntsd->dacloffset);
+ if (dacloffset) {
+- dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
++ dacl_ptr = (struct smb_acl *)((char *)pntsd + dacloffset);
+ if (mode_from_sid)
+ nsecdesclen +=
+- le32_to_cpu(dacl_ptr->num_aces) * sizeof(struct cifs_ace);
++ le32_to_cpu(dacl_ptr->num_aces) * sizeof(struct smb_ace);
+ else /* cifsacl */
+ nsecdesclen += le16_to_cpu(dacl_ptr->size);
+ }
+@@ -1657,7 +1667,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
+ }
+
+ rc = build_sec_desc(pntsd, pnntsd, secdesclen, &nsecdesclen, pnmode, uid, gid,
+- mode_from_sid, id_from_sid, &aclflag);
++ mode_from_sid, id_from_sid, posix, &aclflag);
+
+ cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc);
+
+diff --git a/fs/smb/client/cifsacl.h b/fs/smb/client/cifsacl.h
+index ccbfc754bd3c7f..cbaed8038e3654 100644
+--- a/fs/smb/client/cifsacl.h
++++ b/fs/smb/client/cifsacl.h
+@@ -33,9 +33,9 @@
+ * Security Descriptor length containing DACL with 3 ACEs (one each for
+ * owner, group and world).
+ */
+-#define DEFAULT_SEC_DESC_LEN (sizeof(struct cifs_ntsd) + \
+- sizeof(struct cifs_acl) + \
+- (sizeof(struct cifs_ace) * 4))
++#define DEFAULT_SEC_DESC_LEN (sizeof(struct smb_ntsd) + \
++ sizeof(struct smb_acl) + \
++ (sizeof(struct smb_ace) * 4))
+
+ /*
+ * Maximum size of a string representation of a SID:
+@@ -55,7 +55,7 @@
+ #define SID_STRING_BASE_SIZE (2 + 3 + 15 + 1)
+ #define SID_STRING_SUBAUTH_SIZE (11) /* size of a single subauth string */
+
+-struct cifs_ntsd {
++struct smb_ntsd {
+ __le16 revision; /* revision level */
+ __le16 type;
+ __le32 osidoffset;
+@@ -64,17 +64,17 @@ struct cifs_ntsd {
+ __le32 dacloffset;
+ } __attribute__((packed));
+
+-struct cifs_sid {
++struct smb_sid {
+ __u8 revision; /* revision level */
+ __u8 num_subauth;
+ __u8 authority[NUM_AUTHS];
+ __le32 sub_auth[SID_MAX_SUB_AUTHORITIES]; /* sub_auth[num_subauth] */
+ } __attribute__((packed));
+
+-/* size of a struct cifs_sid, sans sub_auth array */
++/* size of a struct smb_sid, sans sub_auth array */
+ #define CIFS_SID_BASE_SIZE (1 + 1 + NUM_AUTHS)
+
+-struct cifs_acl {
++struct smb_acl {
+ __le16 revision; /* revision level */
+ __le16 size;
+ __le32 num_aces;
+@@ -111,12 +111,12 @@ struct cifs_acl {
+ #define SUCCESSFUL_ACCESS_ACE_FLAG 0x40
+ #define FAILED_ACCESS_ACE_FLAG 0x80
+
+-struct cifs_ace {
++struct smb_ace {
+ __u8 type; /* see above and MS-DTYP 2.4.4.1 */
+ __u8 flags;
+ __le16 size;
+ __le32 access_req;
+- struct cifs_sid sid; /* ie UUID of user or group who gets these perms */
++ struct smb_sid sid; /* ie UUID of user or group who gets these perms */
+ } __attribute__((packed));
+
+ /*
+@@ -194,6 +194,6 @@ struct owner_group_sids {
+ * Minimum security descriptor can be one without any SACL and DACL and can
+ * consist of revision, type, and two sids of minimum size for owner and group
+ */
+-#define MIN_SEC_DESC_LEN (sizeof(struct cifs_ntsd) + (2 * MIN_SID_LEN))
++#define MIN_SEC_DESC_LEN (sizeof(struct smb_ntsd) + (2 * MIN_SID_LEN))
+
+ #endif /* _CIFSACL_H */
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index 6ed0f2548232f9..bbb0ef18d7b8c8 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -2015,6 +2015,7 @@ exit_cifs(void)
+ destroy_workqueue(decrypt_wq);
+ destroy_workqueue(fileinfo_put_wq);
+ destroy_workqueue(serverclose_wq);
++ destroy_workqueue(cfid_put_wq);
+ destroy_workqueue(cifsiod_wq);
+ cifs_proc_clean();
+ }
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index 6b57b167a49d80..43b42eca6780cf 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -202,10 +202,10 @@ struct cifs_cred {
+ int gid;
+ int mode;
+ int cecount;
+- struct cifs_sid osid;
+- struct cifs_sid gsid;
++ struct smb_sid osid;
++ struct smb_sid gsid;
+ struct cifs_ntace *ntaces;
+- struct cifs_ace *aces;
++ struct smb_ace *aces;
+ };
+
+ struct cifs_open_info_data {
+@@ -231,8 +231,8 @@ struct cifs_open_info_data {
+ unsigned int eas_len;
+ } wsl;
+ char *symlink_target;
+- struct cifs_sid posix_owner;
+- struct cifs_sid posix_group;
++ struct smb_sid posix_owner;
++ struct smb_sid posix_group;
+ union {
+ struct smb2_file_all_info fi;
+ struct smb311_posix_qinfo posix_fi;
+@@ -539,12 +539,12 @@ struct smb_version_operations {
+ int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *,
+ const char *, const void *, const __u16,
+ const struct nls_table *, struct cifs_sb_info *);
+- struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *,
+- const char *, u32 *, u32);
+- struct cifs_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *,
+- const struct cifs_fid *, u32 *, u32);
+- int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
+- int);
++ struct smb_ntsd * (*get_acl)(struct cifs_sb_info *cifssb, struct inode *ino,
++ const char *patch, u32 *plen, u32 info);
++ struct smb_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *cifssmb,
++ const struct cifs_fid *pfid, u32 *plen, u32 info);
++ int (*set_acl)(struct smb_ntsd *pntsd, __u32 len, struct inode *ino, const char *path,
++ int flag);
+ /* writepages retry size */
+ unsigned int (*wp_retry_size)(struct inode *);
+ /* get mtu credits */
+diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
+index 83692bf60007a0..a151ffffc6f38e 100644
+--- a/fs/smb/client/cifsproto.h
++++ b/fs/smb/client/cifsproto.h
+@@ -223,7 +223,7 @@ extern int cifs_set_file_info(struct inode *inode, struct iattr *attrs,
+ extern int cifs_rename_pending_delete(const char *full_path,
+ struct dentry *dentry,
+ const unsigned int xid);
+-extern int sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
++extern int sid_to_id(struct cifs_sb_info *cifs_sb, struct smb_sid *psid,
+ struct cifs_fattr *fattr, uint sidtype);
+ extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
+ struct cifs_fattr *fattr, struct inode *inode,
+@@ -231,19 +231,21 @@ extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
+ const char *path, const struct cifs_fid *pfid);
+ extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
+ kuid_t uid, kgid_t gid);
+-extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
+- const char *, u32 *, u32);
+-extern struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *,
+- const struct cifs_fid *, u32 *, u32);
++extern struct smb_ntsd *get_cifs_acl(struct cifs_sb_info *cifssmb, struct inode *ino,
++ const char *path, u32 *plen, u32 info);
++extern struct smb_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifssb,
++ const struct cifs_fid *pfid, u32 *plen, u32 info);
+ extern struct posix_acl *cifs_get_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, int type);
+ extern int cifs_set_acl(struct mnt_idmap *idmap,
+ struct dentry *dentry, struct posix_acl *acl, int type);
+-extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
+- const char *, int);
+-extern unsigned int setup_authusers_ACE(struct cifs_ace *pace);
+-extern unsigned int setup_special_mode_ACE(struct cifs_ace *pace, __u64 nmode);
+-extern unsigned int setup_special_user_owner_ACE(struct cifs_ace *pace);
++extern int set_cifs_acl(struct smb_ntsd *pntsd, __u32 len, struct inode *ino,
++ const char *path, int flag);
++extern unsigned int setup_authusers_ACE(struct smb_ace *pace);
++extern unsigned int setup_special_mode_ACE(struct smb_ace *pace,
++ bool posix,
++ __u64 nmode);
++extern unsigned int setup_special_user_owner_ACE(struct smb_ace *pace);
+
+ extern void dequeue_mid(struct mid_q_entry *mid, bool malformed);
+ extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
+@@ -568,9 +570,9 @@ extern int CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
+ const struct nls_table *nls_codepage,
+ struct cifs_sb_info *cifs_sb);
+ extern int CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon,
+- __u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen);
++ __u16 fid, struct smb_ntsd **acl_inf, __u32 *buflen);
+ extern int CIFSSMBSetCIFSACL(const unsigned int, struct cifs_tcon *, __u16,
+- struct cifs_ntsd *, __u32, int);
++ struct smb_ntsd *pntsd, __u32 len, int aclflag);
+ extern int cifs_do_get_acl(const unsigned int xid, struct cifs_tcon *tcon,
+ const unsigned char *searchName,
+ struct posix_acl **acl, const int acl_type,
+diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
+index a34db419e46f7f..2f8745736dbb02 100644
+--- a/fs/smb/client/cifssmb.c
++++ b/fs/smb/client/cifssmb.c
+@@ -3385,7 +3385,7 @@ validate_ntransact(char *buf, char **ppparm, char **ppdata,
+ /* Get Security Descriptor (by handle) from remote server for a file or dir */
+ int
+ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
+- struct cifs_ntsd **acl_inf, __u32 *pbuflen)
++ struct smb_ntsd **acl_inf, __u32 *pbuflen)
+ {
+ int rc = 0;
+ int buf_type = 0;
+@@ -3455,7 +3455,7 @@ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
+
+ /* check if buffer is big enough for the acl
+ header followed by the smallest SID */
+- if ((*pbuflen < sizeof(struct cifs_ntsd) + 8) ||
++ if ((*pbuflen < sizeof(struct smb_ntsd) + 8) ||
+ (*pbuflen >= 64 * 1024)) {
+ cifs_dbg(VFS, "bad acl length %d\n", *pbuflen);
+ rc = -EINVAL;
+@@ -3475,7 +3475,7 @@ CIFSSMBGetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
+
+ int
+ CIFSSMBSetCIFSACL(const unsigned int xid, struct cifs_tcon *tcon, __u16 fid,
+- struct cifs_ntsd *pntsd, __u32 acllen, int aclflag)
++ struct smb_ntsd *pntsd, __u32 acllen, int aclflag)
+ {
+ __u16 byte_count, param_count, data_count, param_offset, data_offset;
+ int rc = 0;
+diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
+index ce7e0aed8f7d2b..b3e59a7c71205f 100644
+--- a/fs/smb/client/inode.c
++++ b/fs/smb/client/inode.c
+@@ -3087,6 +3087,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
+ int rc = -EACCES;
+ __u32 dosattr = 0;
+ __u64 mode = NO_CHANGE_64;
++ bool posix = cifs_sb_master_tcon(cifs_sb)->posix_extensions;
+
+ xid = get_xid();
+
+@@ -3177,7 +3178,8 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
+ mode = attrs->ia_mode;
+ rc = 0;
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) ||
+- (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)) {
++ (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) ||
++ posix) {
+ rc = id_mode_to_cifs_acl(inode, full_path, &mode,
+ INVALID_UID, INVALID_GID);
+ if (rc) {
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index 2a292736c89a2a..e695df1dbb23b2 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -315,7 +315,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ SMB2_O_INFO_FILE, 0,
+ sizeof(struct smb311_posix_qinfo *) +
+ (PATH_MAX * 2) +
+- (sizeof(struct cifs_sid) * 2), 0, NULL);
++ (sizeof(struct smb_sid) * 2), 0, NULL);
+ } else {
+ rc = SMB2_query_info_init(tcon, server,
+ &rqst[num_rqst],
+@@ -325,7 +325,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ SMB2_O_INFO_FILE, 0,
+ sizeof(struct smb311_posix_qinfo *) +
+ (PATH_MAX * 2) +
+- (sizeof(struct cifs_sid) * 2), 0, NULL);
++ (sizeof(struct smb_sid) * 2), 0, NULL);
+ }
+ if (!rc && (!cfile || num_rqst > 1)) {
+ smb2_set_next_command(tcon, &rqst[num_rqst]);
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 6645f147d57c29..fc6d00344c50ea 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -3001,11 +3001,11 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
+ return rc;
+ }
+
+-static struct cifs_ntsd *
++static struct smb_ntsd *
+ get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
+ const struct cifs_fid *cifsfid, u32 *pacllen, u32 info)
+ {
+- struct cifs_ntsd *pntsd = NULL;
++ struct smb_ntsd *pntsd = NULL;
+ unsigned int xid;
+ int rc = -EOPNOTSUPP;
+ struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+@@ -3030,11 +3030,11 @@ get_smb2_acl_by_fid(struct cifs_sb_info *cifs_sb,
+
+ }
+
+-static struct cifs_ntsd *
++static struct smb_ntsd *
+ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
+ const char *path, u32 *pacllen, u32 info)
+ {
+- struct cifs_ntsd *pntsd = NULL;
++ struct smb_ntsd *pntsd = NULL;
+ u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ unsigned int xid;
+ int rc;
+@@ -3097,7 +3097,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
+ }
+
+ static int
+-set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
++set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen,
+ struct inode *inode, const char *path, int aclflag)
+ {
+ u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+@@ -3155,12 +3155,12 @@ set_smb2_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
+ }
+
+ /* Retrieve an ACL from the server */
+-static struct cifs_ntsd *
++static struct smb_ntsd *
+ get_smb2_acl(struct cifs_sb_info *cifs_sb,
+ struct inode *inode, const char *path,
+ u32 *pacllen, u32 info)
+ {
+- struct cifs_ntsd *pntsd = NULL;
++ struct smb_ntsd *pntsd = NULL;
+ struct cifsFileInfo *open_file = NULL;
+
+ if (inode && !(info & SACL_SECINFO))
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 38b26468eb0c53..c012fbc2638ed5 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -2623,7 +2623,7 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
+ unsigned int group_offset = 0;
+ struct smb3_acl acl = {};
+
+- *len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct cifs_ace) * 4), 8);
++ *len = round_up(sizeof(struct crt_sd_ctxt) + (sizeof(struct smb_ace) * 4), 8);
+
+ if (set_owner) {
+ /* sizeof(struct owner_group_sids) is already multiple of 8 so no need to round */
+@@ -2672,21 +2672,21 @@ create_sd_buf(umode_t mode, bool set_owner, unsigned int *len)
+ ptr += sizeof(struct smb3_acl);
+
+ /* create one ACE to hold the mode embedded in reserved special SID */
+- acelen = setup_special_mode_ACE((struct cifs_ace *)ptr, (__u64)mode);
++ acelen = setup_special_mode_ACE((struct smb_ace *)ptr, false, (__u64)mode);
+ ptr += acelen;
+ acl_size = acelen + sizeof(struct smb3_acl);
+ ace_count = 1;
+
+ if (set_owner) {
+ /* we do not need to reallocate buffer to add the two more ACEs. plenty of space */
+- acelen = setup_special_user_owner_ACE((struct cifs_ace *)ptr);
++ acelen = setup_special_user_owner_ACE((struct smb_ace *)ptr);
+ ptr += acelen;
+ acl_size += acelen;
+ ace_count += 1;
+ }
+
+ /* and one more ACE to allow access for authenticated users */
+- acelen = setup_authusers_ACE((struct cifs_ace *)ptr);
++ acelen = setup_authusers_ACE((struct smb_ace *)ptr);
+ ptr += acelen;
+ acl_size += acelen;
+ ace_count += 1;
+@@ -3915,7 +3915,7 @@ SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen)
+ {
+ size_t output_len = sizeof(struct smb311_posix_qinfo *) +
+- (sizeof(struct cifs_sid) * 2) + (PATH_MAX * 2);
++ (sizeof(struct smb_sid) * 2) + (PATH_MAX * 2);
+ *plen = 0;
+
+ return query_info(xid, tcon, persistent_fid, volatile_fid,
+@@ -5626,7 +5626,7 @@ SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
+ int
+ SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid,
+- struct cifs_ntsd *pnntsd, int pacllen, int aclflag)
++ struct smb_ntsd *pnntsd, int pacllen, int aclflag)
+ {
+ return send_set_info(xid, tcon, persistent_fid, volatile_fid,
+ current->tgid, 0, SMB2_O_INFO_SECURITY, aclflag,
+diff --git a/fs/smb/client/smb2pdu.h b/fs/smb/client/smb2pdu.h
+index 5c458ab3b05a44..076d9e83e1a044 100644
+--- a/fs/smb/client/smb2pdu.h
++++ b/fs/smb/client/smb2pdu.h
+@@ -364,8 +364,8 @@ struct create_posix_rsp {
+ u32 nlink;
+ u32 reparse_tag;
+ u32 mode;
+- struct cifs_sid owner; /* var-sized on the wire */
+- struct cifs_sid group; /* var-sized on the wire */
++ struct smb_sid owner; /* var-sized on the wire */
++ struct smb_sid group; /* var-sized on the wire */
+ } __packed;
+
+ #define SMB2_QUERY_DIRECTORY_IOV_SIZE 2
+@@ -408,8 +408,8 @@ struct smb2_posix_info {
+ struct smb2_posix_info_parsed {
+ const struct smb2_posix_info *base;
+ size_t size;
+- struct cifs_sid owner;
+- struct cifs_sid group;
++ struct smb_sid owner;
++ struct smb_sid group;
+ int name_len;
+ const u8 *name;
+ };
+diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
+index 613667b46c5802..750e4e397b1393 100644
+--- a/fs/smb/client/smb2proto.h
++++ b/fs/smb/client/smb2proto.h
+@@ -37,8 +37,6 @@ extern struct mid_q_entry *smb2_setup_request(struct cifs_ses *ses,
+ struct smb_rqst *rqst);
+ extern struct mid_q_entry *smb2_setup_async_request(
+ struct TCP_Server_Info *server, struct smb_rqst *rqst);
+-extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
+- __u64 ses_id);
+ extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
+ __u64 ses_id, __u32 tid);
+ extern int smb2_calc_signature(struct smb_rqst *rqst,
+@@ -247,7 +245,7 @@ extern int SMB2_set_info_init(struct cifs_tcon *tcon,
+ extern void SMB2_set_info_free(struct smb_rqst *rqst);
+ extern int SMB2_set_acl(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid,
+- struct cifs_ntsd *pnntsd, int pacllen, int aclflag);
++ struct smb_ntsd *pnntsd, int pacllen, int aclflag);
+ extern int SMB2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid,
+ struct smb2_file_full_ea_info *buf, int len);
+diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
+index 4ca04e62a993cf..4a43802375b3a3 100644
+--- a/fs/smb/client/smb2transport.c
++++ b/fs/smb/client/smb2transport.c
+@@ -74,7 +74,7 @@ smb311_crypto_shash_allocate(struct TCP_Server_Info *server)
+
+
+ static
+-int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
++int smb3_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
+ {
+ struct cifs_chan *chan;
+ struct TCP_Server_Info *pserver;
+@@ -168,16 +168,41 @@ smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
+ return NULL;
+ }
+
+-struct cifs_ses *
+-smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
++static int smb2_get_sign_key(struct TCP_Server_Info *server,
++ __u64 ses_id, u8 *key)
+ {
+ struct cifs_ses *ses;
++ int rc = -ENOENT;
++
++ if (SERVER_IS_CHAN(server))
++ server = server->primary_server;
+
+ spin_lock(&cifs_tcp_ses_lock);
+- ses = smb2_find_smb_ses_unlocked(server, ses_id);
+- spin_unlock(&cifs_tcp_ses_lock);
++ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++ if (ses->Suid != ses_id)
++ continue;
+
+- return ses;
++ rc = 0;
++ spin_lock(&ses->ses_lock);
++ switch (ses->ses_status) {
++ case SES_EXITING: /* SMB2_LOGOFF */
++ case SES_GOOD:
++ if (likely(ses->auth_key.response)) {
++ memcpy(key, ses->auth_key.response,
++ SMB2_NTLMV2_SESSKEY_SIZE);
++ } else {
++ rc = -EIO;
++ }
++ break;
++ default:
++ rc = -EAGAIN;
++ break;
++ }
++ spin_unlock(&ses->ses_lock);
++ break;
++ }
++ spin_unlock(&cifs_tcp_ses_lock);
++ return rc;
+ }
+
+ static struct cifs_tcon *
+@@ -236,14 +261,16 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+ unsigned char *sigptr = smb2_signature;
+ struct kvec *iov = rqst->rq_iov;
+ struct smb2_hdr *shdr = (struct smb2_hdr *)iov[0].iov_base;
+- struct cifs_ses *ses;
+ struct shash_desc *shash = NULL;
+ struct smb_rqst drqst;
++ __u64 sid = le64_to_cpu(shdr->SessionId);
++ u8 key[SMB2_NTLMV2_SESSKEY_SIZE];
+
+- ses = smb2_find_smb_ses(server, le64_to_cpu(shdr->SessionId));
+- if (unlikely(!ses)) {
+- cifs_server_dbg(VFS, "%s: Could not find session\n", __func__);
+- return -ENOENT;
++ rc = smb2_get_sign_key(server, sid, key);
++ if (unlikely(rc)) {
++ cifs_server_dbg(FYI, "%s: [sesid=0x%llx] couldn't find signing key: %d\n",
++ __func__, sid, rc);
++ return rc;
+ }
+
+ memset(smb2_signature, 0x0, SMB2_HMACSHA256_SIZE);
+@@ -260,8 +287,7 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+ shash = server->secmech.hmacsha256;
+ }
+
+- rc = crypto_shash_setkey(shash->tfm, ses->auth_key.response,
+- SMB2_NTLMV2_SESSKEY_SIZE);
++ rc = crypto_shash_setkey(shash->tfm, key, sizeof(key));
+ if (rc) {
+ cifs_server_dbg(VFS,
+ "%s: Could not update with response\n",
+@@ -303,8 +329,6 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+ out:
+ if (allocate_crypto)
+ cifs_free_hash(&shash);
+- if (ses)
+- cifs_put_smb_ses(ses);
+ return rc;
+ }
+
+@@ -570,7 +594,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server,
+ struct smb_rqst drqst;
+ u8 key[SMB3_SIGN_KEY_SIZE];
+
+- rc = smb2_get_sign_key(le64_to_cpu(shdr->SessionId), server, key);
++ rc = smb3_get_sign_key(le64_to_cpu(shdr->SessionId), server, key);
+ if (unlikely(rc)) {
+ cifs_server_dbg(FYI, "%s: Could not get signing key\n", __func__);
+ return rc;
+diff --git a/fs/smb/client/xattr.c b/fs/smb/client/xattr.c
+index c2bf829310bee2..e8696ad4da994e 100644
+--- a/fs/smb/client/xattr.c
++++ b/fs/smb/client/xattr.c
+@@ -162,7 +162,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
+ case XATTR_CIFS_ACL:
+ case XATTR_CIFS_NTSD:
+ case XATTR_CIFS_NTSD_FULL: {
+- struct cifs_ntsd *pacl;
++ struct smb_ntsd *pacl;
+
+ if (!value)
+ goto out;
+@@ -315,7 +315,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler,
+ * fetch owner and DACL otherwise
+ */
+ u32 acllen, extra_info;
+- struct cifs_ntsd *pacl;
++ struct smb_ntsd *pacl;
+
+ if (pTcon->ses->server->ops->get_acl == NULL)
+ goto out; /* rc already EOPNOTSUPP */
+diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
+index cd530b9a00caa3..2884ebdc0eda02 100644
+--- a/fs/smb/server/smb2pdu.c
++++ b/fs/smb/server/smb2pdu.c
+@@ -4225,6 +4225,7 @@ static bool __query_dir(struct dir_context *ctx, const char *name, int namlen,
+ /* dot and dotdot entries are already reserved */
+ if (!strcmp(".", name) || !strcmp("..", name))
+ return true;
++ d_info->num_scan++;
+ if (ksmbd_share_veto_filename(priv->work->tcon->share_conf, name))
+ return true;
+ if (!match_pattern(name, namlen, priv->search_pattern))
+@@ -4385,8 +4386,17 @@ int smb2_query_dir(struct ksmbd_work *work)
+ query_dir_private.info_level = req->FileInformationClass;
+ dir_fp->readdir_data.private = &query_dir_private;
+ set_ctx_actor(&dir_fp->readdir_data.ctx, __query_dir);
+-
++again:
++ d_info.num_scan = 0;
+ rc = iterate_dir(dir_fp->filp, &dir_fp->readdir_data.ctx);
++ /*
++ * num_entry can be 0 if the directory iteration stops before reaching
++ * the end of the directory and no file is matched with the search
++ * pattern.
++ */
++ if (rc >= 0 && !d_info.num_entry && d_info.num_scan &&
++ d_info.out_buf_len > 0)
++ goto again;
+ /*
+ * req->OutputBufferLength is too small to contain even one entry.
+ * In this case, it immediately returns OutputBufferLength 0 to client.
+@@ -6007,15 +6017,13 @@ static int set_file_basic_info(struct ksmbd_file *fp,
+ attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
+ }
+
+- attrs.ia_valid |= ATTR_CTIME;
+ if (file_info->ChangeTime)
+- attrs.ia_ctime = ksmbd_NTtimeToUnix(file_info->ChangeTime);
+- else
+- attrs.ia_ctime = inode_get_ctime(inode);
++ inode_set_ctime_to_ts(inode,
++ ksmbd_NTtimeToUnix(file_info->ChangeTime));
+
+ if (file_info->LastWriteTime) {
+ attrs.ia_mtime = ksmbd_NTtimeToUnix(file_info->LastWriteTime);
+- attrs.ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
++ attrs.ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET | ATTR_CTIME);
+ }
+
+ if (file_info->Attributes) {
+@@ -6057,8 +6065,6 @@ static int set_file_basic_info(struct ksmbd_file *fp,
+ return -EACCES;
+
+ inode_lock(inode);
+- inode_set_ctime_to_ts(inode, attrs.ia_ctime);
+- attrs.ia_valid &= ~ATTR_CTIME;
+ rc = notify_change(idmap, dentry, &attrs, NULL);
+ inode_unlock(inode);
+ }
+diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h
+index cb76f4b5bafe8c..06903024a2d88b 100644
+--- a/fs/smb/server/vfs.h
++++ b/fs/smb/server/vfs.h
+@@ -43,6 +43,7 @@ struct ksmbd_dir_info {
+ char *rptr;
+ int name_len;
+ int out_buf_len;
++ int num_scan;
+ int num_entry;
+ int data_count;
+ int last_entry_offset;
+diff --git a/fs/udf/namei.c b/fs/udf/namei.c
+index b3f57ad2b869ff..8ac73f41d6ebe6 100644
+--- a/fs/udf/namei.c
++++ b/fs/udf/namei.c
+@@ -770,7 +770,7 @@ static int udf_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ struct inode *old_inode = d_inode(old_dentry);
+ struct inode *new_inode = d_inode(new_dentry);
+ struct udf_fileident_iter oiter, niter, diriter;
+- bool has_diriter = false;
++ bool has_diriter = false, is_dir = false;
+ int retval;
+ struct kernel_lb_addr tloc;
+
+@@ -792,7 +792,20 @@ static int udf_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ retval = -ENOTEMPTY;
+ if (!empty_dir(new_inode))
+ goto out_oiter;
++ retval = -EFSCORRUPTED;
++ if (new_inode->i_nlink != 2)
++ goto out_oiter;
+ }
++ retval = -EFSCORRUPTED;
++ if (old_dir->i_nlink < 3)
++ goto out_oiter;
++ is_dir = true;
++ } else if (new_inode) {
++ retval = -EFSCORRUPTED;
++ if (new_inode->i_nlink < 1)
++ goto out_oiter;
++ }
++ if (is_dir && old_dir != new_dir) {
+ retval = udf_fiiter_find_entry(old_inode, &dotdot_name,
+ &diriter);
+ if (retval == -ENOENT) {
+@@ -880,7 +893,9 @@ static int udf_rename(struct mnt_idmap *idmap, struct inode *old_dir,
+ cpu_to_lelb(UDF_I(new_dir)->i_location);
+ udf_fiiter_write_fi(&diriter, NULL);
+ udf_fiiter_release(&diriter);
++ }
+
++ if (is_dir) {
+ inode_dec_link_count(old_dir);
+ if (new_inode)
+ inode_dec_link_count(new_inode);
+diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h
+index 73e806fe7ce707..699c1a37b8e784 100644
+--- a/include/acpi/pcc.h
++++ b/include/acpi/pcc.h
+@@ -12,17 +12,33 @@
+ struct pcc_mbox_chan {
+ struct mbox_chan *mchan;
+ u64 shmem_base_addr;
++ void __iomem *shmem;
+ u64 shmem_size;
+ u32 latency;
+ u32 max_access_rate;
+ u16 min_turnaround_time;
+ };
+
++/* Generic Communications Channel Shared Memory Region */
++#define PCC_SIGNATURE 0x50434300
++/* Generic Communications Channel Command Field */
++#define PCC_CMD_GENERATE_DB_INTR BIT(15)
++/* Generic Communications Channel Status Field */
++#define PCC_STATUS_CMD_COMPLETE BIT(0)
++#define PCC_STATUS_SCI_DOORBELL BIT(1)
++#define PCC_STATUS_ERROR BIT(2)
++#define PCC_STATUS_PLATFORM_NOTIFY BIT(3)
++/* Initiator Responder Communications Channel Flags */
++#define PCC_CMD_COMPLETION_NOTIFY BIT(0)
++
+ #define MAX_PCC_SUBSPACES 256
++#define PCC_ACK_FLAG_MASK 0x1
++
+ #ifdef CONFIG_PCC
+ extern struct pcc_mbox_chan *
+ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id);
+ extern void pcc_mbox_free_channel(struct pcc_mbox_chan *chan);
++extern int pcc_mbox_ioremap(struct mbox_chan *chan);
+ #else
+ static inline struct pcc_mbox_chan *
+ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
+@@ -30,6 +46,10 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
+ return ERR_PTR(-ENODEV);
+ }
+ static inline void pcc_mbox_free_channel(struct pcc_mbox_chan *chan) { }
++static inline int pcc_mbox_ioremap(struct mbox_chan *chan)
++{
++ return 0;
++};
+ #endif
+
+ #endif /* _PCC_H */
+diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h
+index 6cdc873ac907f5..aa5233b1eba970 100644
+--- a/include/clocksource/hyperv_timer.h
++++ b/include/clocksource/hyperv_timer.h
+@@ -38,6 +38,8 @@ extern void hv_remap_tsc_clocksource(void);
+ extern unsigned long hv_get_tsc_pfn(void);
+ extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
+
++extern void hv_adj_sched_clock_offset(u64 offset);
++
+ static __always_inline bool
+ hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
+ u64 *cur_tsc, u64 *time)
+diff --git a/include/crypto/internal/ecc.h b/include/crypto/internal/ecc.h
+index 4f6c1a68882fa1..c0b8be63cbde7c 100644
+--- a/include/crypto/internal/ecc.h
++++ b/include/crypto/internal/ecc.h
+@@ -56,6 +56,16 @@ static inline void ecc_swap_digits(const void *in, u64 *out, unsigned int ndigit
+ out[i] = get_unaligned_be64(&src[ndigits - 1 - i]);
+ }
+
++/**
++ * ecc_digits_from_bytes() - Create ndigits-sized digits array from byte array
++ * @in: Input byte array
++ * @nbytes Size of input byte array
++ * @out Output digits array
++ * @ndigits: Number of digits to create from byte array
++ */
++void ecc_digits_from_bytes(const u8 *in, unsigned int nbytes,
++ u64 *out, unsigned int ndigits);
++
+ /**
+ * ecc_is_key_valid() - Validate a given ECDH private key
+ *
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index cb8e97665eaa59..92919d52f7e1b2 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -319,34 +319,12 @@ struct bpf_func_state {
+ struct bpf_stack_state *stack;
+ };
+
+-#define MAX_CALL_FRAMES 8
+-
+-/* instruction history flags, used in bpf_jmp_history_entry.flags field */
+-enum {
+- /* instruction references stack slot through PTR_TO_STACK register;
+- * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
+- * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512,
+- * 8 bytes per slot, so slot index (spi) is [0, 63])
+- */
+- INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */
+-
+- INSN_F_SPI_MASK = 0x3f, /* 6 bits */
+- INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
+-
+- INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */
+-};
+-
+-static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
+-static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
+-
+-struct bpf_jmp_history_entry {
++struct bpf_idx_pair {
++ u32 prev_idx;
+ u32 idx;
+- /* insn idx can't be bigger than 1 million */
+- u32 prev_idx : 22;
+- /* special flags, e.g., whether insn is doing register stack spill/load */
+- u32 flags : 10;
+ };
+
++#define MAX_CALL_FRAMES 8
+ /* Maximum number of register states that can exist at once */
+ #define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
+ struct bpf_verifier_state {
+@@ -429,7 +407,7 @@ struct bpf_verifier_state {
+ * For most states jmp_history_cnt is [0-3].
+ * For loops can go up to ~40.
+ */
+- struct bpf_jmp_history_entry *jmp_history;
++ struct bpf_idx_pair *jmp_history;
+ u32 jmp_history_cnt;
+ u32 dfs_depth;
+ u32 callback_unroll_depth;
+@@ -662,7 +640,6 @@ struct bpf_verifier_env {
+ int cur_stack;
+ } cfg;
+ struct backtrack_state bt;
+- struct bpf_jmp_history_entry *cur_hist_ent;
+ u32 pass_cnt; /* number of times do_check() was called */
+ u32 subprog_cnt;
+ /* number of instructions analyzed by the verifier */
+diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
+index 53f1a7a932b08c..64b8600eb8c0e4 100644
+--- a/include/linux/cleanup.h
++++ b/include/linux/cleanup.h
+@@ -92,26 +92,85 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
+ * trivial wrapper around DEFINE_CLASS() above specifically
+ * for locks.
+ *
++ * DEFINE_GUARD_COND(name, ext, condlock)
++ * wrapper around EXTEND_CLASS above to add conditional lock
++ * variants to a base class, eg. mutex_trylock() or
++ * mutex_lock_interruptible().
++ *
+ * guard(name):
+- * an anonymous instance of the (guard) class
++ * an anonymous instance of the (guard) class, not recommended for
++ * conditional locks.
+ *
+ * scoped_guard (name, args...) { }:
+ * similar to CLASS(name, scope)(args), except the variable (with the
+ * explicit name 'scope') is declard in a for-loop such that its scope is
+ * bound to the next (compound) statement.
+ *
++ * for conditional locks the loop body is skipped when the lock is not
++ * acquired.
++ *
++ * scoped_cond_guard (name, fail, args...) { }:
++ * similar to scoped_guard(), except it does fail when the lock
++ * acquire fails.
++ *
++ * Only for conditional locks.
+ */
+
++#define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond) \
++static __maybe_unused const bool class_##_name##_is_conditional = _is_cond
++
+ #define DEFINE_GUARD(_name, _type, _lock, _unlock) \
+- DEFINE_CLASS(_name, _type, _unlock, ({ _lock; _T; }), _type _T)
++ __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
++ DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \
++ static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \
++ { return (void *)(__force unsigned long)*_T; }
++
++#define DEFINE_GUARD_COND(_name, _ext, _condlock) \
++ __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
++ EXTEND_CLASS(_name, _ext, \
++ ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \
++ class_##_name##_t _T) \
++ static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
++ { return class_##_name##_lock_ptr(_T); }
+
+ #define guard(_name) \
+ CLASS(_name, __UNIQUE_ID(guard))
+
+-#define scoped_guard(_name, args...) \
+- for (CLASS(_name, scope)(args), \
+- *done = NULL; !done; done = (void *)1)
++#define __guard_ptr(_name) class_##_name##_lock_ptr
++#define __is_cond_ptr(_name) class_##_name##_is_conditional
+
++/*
++ * Helper macro for scoped_guard().
++ *
++ * Note that the "!__is_cond_ptr(_name)" part of the condition ensures that
++ * compiler would be sure that for the unconditional locks the body of the
++ * loop (caller-provided code glued to the else clause) could not be skipped.
++ * It is needed because the other part - "__guard_ptr(_name)(&scope)" - is too
++ * hard to deduce (even if could be proven true for unconditional locks).
++ */
++#define __scoped_guard(_name, _label, args...) \
++ for (CLASS(_name, scope)(args); \
++ __guard_ptr(_name)(&scope) || !__is_cond_ptr(_name); \
++ ({ goto _label; })) \
++ if (0) { \
++_label: \
++ break; \
++ } else
++
++#define scoped_guard(_name, args...) \
++ __scoped_guard(_name, __UNIQUE_ID(label), args)
++
++#define __scoped_cond_guard(_name, _fail, _label, args...) \
++ for (CLASS(_name, scope)(args); true; ({ goto _label; })) \
++ if (!__guard_ptr(_name)(&scope)) { \
++ BUILD_BUG_ON(!__is_cond_ptr(_name)); \
++ _fail; \
++_label: \
++ break; \
++ } else
++
++#define scoped_cond_guard(_name, _fail, args...) \
++ __scoped_cond_guard(_name, _fail, __UNIQUE_ID(label), args)
+ /*
+ * Additional helper macros for generating lock guards with types, either for
+ * locks that don't have a native type (eg. RCU, preempt) or those that need a
+@@ -119,6 +178,7 @@ static inline class_##_name##_t class_##_name##ext##_constructor(_init_args) \
+ *
+ * DEFINE_LOCK_GUARD_0(name, lock, unlock, ...)
+ * DEFINE_LOCK_GUARD_1(name, type, lock, unlock, ...)
++ * DEFINE_LOCK_GUARD_1_COND(name, ext, condlock)
+ *
+ * will result in the following type:
+ *
+@@ -140,6 +200,11 @@ typedef struct { \
+ static inline void class_##_name##_destructor(class_##_name##_t *_T) \
+ { \
+ if (_T->lock) { _unlock; } \
++} \
++ \
++static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \
++{ \
++ return (void *)(__force unsigned long)_T->lock; \
+ }
+
+
+@@ -161,11 +226,24 @@ static inline class_##_name##_t class_##_name##_constructor(void) \
+ }
+
+ #define DEFINE_LOCK_GUARD_1(_name, _type, _lock, _unlock, ...) \
++__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
+ __DEFINE_UNLOCK_GUARD(_name, _type, _unlock, __VA_ARGS__) \
+ __DEFINE_LOCK_GUARD_1(_name, _type, _lock)
+
+ #define DEFINE_LOCK_GUARD_0(_name, _lock, _unlock, ...) \
++__DEFINE_CLASS_IS_CONDITIONAL(_name, false); \
+ __DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \
+ __DEFINE_LOCK_GUARD_0(_name, _lock)
+
++#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \
++ __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \
++ EXTEND_CLASS(_name, _ext, \
++ ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\
++ if (_T->lock && !(_condlock)) _T->lock = NULL; \
++ _t; }), \
++ typeof_member(class_##_name##_t, lock) l) \
++ static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \
++ { return class_##_name##_lock_ptr(_T); }
++
++
+ #endif /* __LINUX_GUARDS_H */
+diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
+index 3028af87716e29..430749a0f362aa 100644
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -585,13 +585,16 @@ static inline int vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci)
+ * vlan_get_protocol - get protocol EtherType.
+ * @skb: skbuff to query
+ * @type: first vlan protocol
++ * @mac_offset: MAC offset
+ * @depth: buffer to store length of eth and vlan tags in bytes
+ *
+ * Returns the EtherType of the packet, regardless of whether it is
+ * vlan encapsulated (normal or hardware accelerated) or not.
+ */
+-static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
+- int *depth)
++static inline __be16 __vlan_get_protocol_offset(const struct sk_buff *skb,
++ __be16 type,
++ int mac_offset,
++ int *depth)
+ {
+ unsigned int vlan_depth = skb->mac_len, parse_depth = VLAN_MAX_DEPTH;
+
+@@ -610,7 +613,8 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
+ do {
+ struct vlan_hdr vhdr, *vh;
+
+- vh = skb_header_pointer(skb, vlan_depth, sizeof(vhdr), &vhdr);
++ vh = skb_header_pointer(skb, mac_offset + vlan_depth,
++ sizeof(vhdr), &vhdr);
+ if (unlikely(!vh || !--parse_depth))
+ return 0;
+
+@@ -625,6 +629,12 @@ static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
+ return type;
+ }
+
++static inline __be16 __vlan_get_protocol(const struct sk_buff *skb, __be16 type,
++ int *depth)
++{
++ return __vlan_get_protocol_offset(skb, type, 0, depth);
++}
++
+ /**
+ * vlan_get_protocol - get protocol EtherType.
+ * @skb: skbuff to query
+diff --git a/include/linux/memblock.h b/include/linux/memblock.h
+index ed57c23f80ac2b..ed64240041e857 100644
+--- a/include/linux/memblock.h
++++ b/include/linux/memblock.h
+@@ -122,6 +122,7 @@ unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
+ phys_addr_t base2, phys_addr_t size2);
+ bool memblock_overlaps_region(struct memblock_type *type,
+ phys_addr_t base, phys_addr_t size);
++bool memblock_validate_numa_coverage(unsigned long threshold_bytes);
+ int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
+ int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
+ int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index ffb98bc43b2db2..38a8ff9c685cb8 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -1225,6 +1225,12 @@ static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
+ return dev->coredev_type == MLX5_COREDEV_VF;
+ }
+
++static inline bool mlx5_core_same_coredev_type(const struct mlx5_core_dev *dev1,
++ const struct mlx5_core_dev *dev2)
++{
++ return dev1->coredev_type == dev2->coredev_type;
++}
++
+ static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
+ {
+ return dev->caps.embedded_cpu;
+diff --git a/include/linux/mutex.h b/include/linux/mutex.h
+index 5b5630e58407a5..e1c323c7d75ba9 100644
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -248,6 +248,7 @@ extern void mutex_unlock(struct mutex *lock);
+ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
+
+ DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T))
+-DEFINE_FREE(mutex, struct mutex *, if (_T) mutex_unlock(_T))
++DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T))
++DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0)
+
+ #endif /* __LINUX_MUTEX_H */
+diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
+index 1dd530ce8b45b9..9c29689ff505e0 100644
+--- a/include/linux/rwsem.h
++++ b/include/linux/rwsem.h
+@@ -203,11 +203,11 @@ extern void up_read(struct rw_semaphore *sem);
+ extern void up_write(struct rw_semaphore *sem);
+
+ DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T))
+-DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
+-
+-DEFINE_FREE(up_read, struct rw_semaphore *, if (_T) up_read(_T))
+-DEFINE_FREE(up_write, struct rw_semaphore *, if (_T) up_write(_T))
++DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T))
++DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0)
+
++DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T))
++DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T))
+
+ /*
+ * downgrade write lock to read lock
+diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
+index 515d7fcb9634b5..468d8c5eef4a08 100644
+--- a/include/linux/seq_buf.h
++++ b/include/linux/seq_buf.h
+@@ -14,19 +14,24 @@
+ * @buffer: pointer to the buffer
+ * @size: size of the buffer
+ * @len: the amount of data inside the buffer
+- * @readpos: The next position to read in the buffer.
+ */
+ struct seq_buf {
+ char *buffer;
+ size_t size;
+ size_t len;
+- loff_t readpos;
+ };
+
++#define DECLARE_SEQ_BUF(NAME, SIZE) \
++ struct seq_buf NAME = { \
++ .buffer = (char[SIZE]) { 0 }, \
++ .size = SIZE, \
++ }
++
+ static inline void seq_buf_clear(struct seq_buf *s)
+ {
+ s->len = 0;
+- s->readpos = 0;
++ if (s->size)
++ s->buffer[0] = '\0';
+ }
+
+ static inline void
+@@ -72,8 +77,8 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
+ }
+
+ /**
+- * seq_buf_terminate - Make sure buffer is nul terminated
+- * @s: the seq_buf descriptor to terminate.
++ * seq_buf_str - get %NUL-terminated C string from seq_buf
++ * @s: the seq_buf handle
+ *
+ * This makes sure that the buffer in @s is nul terminated and
+ * safe to read as a string.
+@@ -84,16 +89,20 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
+ *
+ * After this function is called, s->buffer is safe to use
+ * in string operations.
++ *
++ * Returns @s->buf after making sure it is terminated.
+ */
+-static inline void seq_buf_terminate(struct seq_buf *s)
++static inline const char *seq_buf_str(struct seq_buf *s)
+ {
+ if (WARN_ON(s->size == 0))
+- return;
++ return "";
+
+ if (seq_buf_buffer_left(s))
+ s->buffer[s->len] = 0;
+ else
+ s->buffer[s->size - 1] = 0;
++
++ return s->buffer;
+ }
+
+ /**
+@@ -143,7 +152,7 @@ extern __printf(2, 0)
+ int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args);
+ extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s);
+ extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf,
+- int cnt);
++ size_t start, int cnt);
+ extern int seq_buf_puts(struct seq_buf *s, const char *str);
+ extern int seq_buf_putc(struct seq_buf *s, unsigned char c);
+ extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len);
+diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
+index 31d3d747a9db78..ceb56b39c70f77 100644
+--- a/include/linux/spinlock.h
++++ b/include/linux/spinlock.h
+@@ -507,6 +507,8 @@ DEFINE_LOCK_GUARD_1(raw_spinlock, raw_spinlock_t,
+ raw_spin_lock(_T->lock),
+ raw_spin_unlock(_T->lock))
+
++DEFINE_LOCK_GUARD_1_COND(raw_spinlock, _try, raw_spin_trylock(_T->lock))
++
+ DEFINE_LOCK_GUARD_1(raw_spinlock_nested, raw_spinlock_t,
+ raw_spin_lock_nested(_T->lock, SINGLE_DEPTH_NESTING),
+ raw_spin_unlock(_T->lock))
+@@ -515,23 +517,36 @@ DEFINE_LOCK_GUARD_1(raw_spinlock_irq, raw_spinlock_t,
+ raw_spin_lock_irq(_T->lock),
+ raw_spin_unlock_irq(_T->lock))
+
++DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irq, _try, raw_spin_trylock_irq(_T->lock))
++
+ DEFINE_LOCK_GUARD_1(raw_spinlock_irqsave, raw_spinlock_t,
+ raw_spin_lock_irqsave(_T->lock, _T->flags),
+ raw_spin_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
++DEFINE_LOCK_GUARD_1_COND(raw_spinlock_irqsave, _try,
++ raw_spin_trylock_irqsave(_T->lock, _T->flags))
++
+ DEFINE_LOCK_GUARD_1(spinlock, spinlock_t,
+ spin_lock(_T->lock),
+ spin_unlock(_T->lock))
+
++DEFINE_LOCK_GUARD_1_COND(spinlock, _try, spin_trylock(_T->lock))
++
+ DEFINE_LOCK_GUARD_1(spinlock_irq, spinlock_t,
+ spin_lock_irq(_T->lock),
+ spin_unlock_irq(_T->lock))
+
++DEFINE_LOCK_GUARD_1_COND(spinlock_irq, _try,
++ spin_trylock_irq(_T->lock))
++
+ DEFINE_LOCK_GUARD_1(spinlock_irqsave, spinlock_t,
+ spin_lock_irqsave(_T->lock, _T->flags),
+ spin_unlock_irqrestore(_T->lock, _T->flags),
+ unsigned long flags)
+
++DEFINE_LOCK_GUARD_1_COND(spinlock_irqsave, _try,
++ spin_trylock_irqsave(_T->lock, _T->flags))
++
+ #undef __LINUX_INSIDE_SPINLOCK_H
+ #endif /* __LINUX_SPINLOCK_H */
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index 9df2524fff33ae..aa1bc417266208 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -279,7 +279,8 @@ struct trace_event_fields {
+ const char *name;
+ const int size;
+ const int align;
+- const int is_signed;
++ const unsigned int is_signed:1;
++ unsigned int needs_test:1;
+ const int filter_type;
+ const int len;
+ };
+@@ -331,6 +332,7 @@ enum {
+ TRACE_EVENT_FL_EPROBE_BIT,
+ TRACE_EVENT_FL_FPROBE_BIT,
+ TRACE_EVENT_FL_CUSTOM_BIT,
++ TRACE_EVENT_FL_TEST_STR_BIT,
+ };
+
+ /*
+@@ -348,6 +350,7 @@ enum {
+ * CUSTOM - Event is a custom event (to be attached to an exsiting tracepoint)
+ * This is set when the custom event has not been attached
+ * to a tracepoint yet, then it is cleared when it is.
++ * TEST_STR - The event has a "%s" that points to a string outside the event
+ */
+ enum {
+ TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
+@@ -361,6 +364,7 @@ enum {
+ TRACE_EVENT_FL_EPROBE = (1 << TRACE_EVENT_FL_EPROBE_BIT),
+ TRACE_EVENT_FL_FPROBE = (1 << TRACE_EVENT_FL_FPROBE_BIT),
+ TRACE_EVENT_FL_CUSTOM = (1 << TRACE_EVENT_FL_CUSTOM_BIT),
++ TRACE_EVENT_FL_TEST_STR = (1 << TRACE_EVENT_FL_TEST_STR_BIT),
+ };
+
+ #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
+diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
+index 6be92bf559fe7c..3691e0e76a1a20 100644
+--- a/include/linux/trace_seq.h
++++ b/include/linux/trace_seq.h
+@@ -14,6 +14,7 @@
+ struct trace_seq {
+ char buffer[PAGE_SIZE];
+ struct seq_buf seq;
++ size_t readpos;
+ int full;
+ };
+
+@@ -22,6 +23,7 @@ trace_seq_init(struct trace_seq *s)
+ {
+ seq_buf_init(&s->seq, s->buffer, PAGE_SIZE);
+ s->full = 0;
++ s->readpos = 0;
+ }
+
+ /**
+diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
+index 0b4f2d5faa080d..ebdfef124b2bc0 100644
+--- a/include/linux/usb/chipidea.h
++++ b/include/linux/usb/chipidea.h
+@@ -64,6 +64,8 @@ struct ci_hdrc_platform_data {
+ #define CI_HDRC_PMQOS BIT(15)
+ #define CI_HDRC_PHY_VBUS_CONTROL BIT(16)
+ #define CI_HDRC_HAS_PORTSC_PEC_MISSED BIT(17)
++#define CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS BIT(18)
++#define CI_HDRC_HAS_SHORT_PKT_LIMIT BIT(19)
+ enum usb_dr_mode dr_mode;
+ #define CI_HDRC_CONTROLLER_RESET_EVENT 0
+ #define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index e9214ccfde2d72..4fcee6b734b74c 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -800,7 +800,6 @@ struct hci_conn_params {
+ extern struct list_head hci_dev_list;
+ extern struct list_head hci_cb_list;
+ extern rwlock_t hci_dev_list_lock;
+-extern struct mutex hci_cb_list_lock;
+
+ #define hci_dev_set_flag(hdev, nr) set_bit((nr), (hdev)->dev_flags)
+ #define hci_dev_clear_flag(hdev, nr) clear_bit((nr), (hdev)->dev_flags)
+@@ -1949,24 +1948,47 @@ struct hci_cb {
+
+ char *name;
+
++ bool (*match) (struct hci_conn *conn);
+ void (*connect_cfm) (struct hci_conn *conn, __u8 status);
+ void (*disconn_cfm) (struct hci_conn *conn, __u8 status);
+ void (*security_cfm) (struct hci_conn *conn, __u8 status,
+- __u8 encrypt);
++ __u8 encrypt);
+ void (*key_change_cfm) (struct hci_conn *conn, __u8 status);
+ void (*role_switch_cfm) (struct hci_conn *conn, __u8 status, __u8 role);
+ };
+
++static inline void hci_cb_lookup(struct hci_conn *conn, struct list_head *list)
++{
++ struct hci_cb *cb, *cpy;
++
++ rcu_read_lock();
++ list_for_each_entry_rcu(cb, &hci_cb_list, list) {
++ if (cb->match && cb->match(conn)) {
++ cpy = kmalloc(sizeof(*cpy), GFP_ATOMIC);
++ if (!cpy)
++ break;
++
++ *cpy = *cb;
++ INIT_LIST_HEAD(&cpy->list);
++ list_add_rcu(&cpy->list, list);
++ }
++ }
++ rcu_read_unlock();
++}
++
+ static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
+ {
+- struct hci_cb *cb;
++ struct list_head list;
++ struct hci_cb *cb, *tmp;
++
++ INIT_LIST_HEAD(&list);
++ hci_cb_lookup(conn, &list);
+
+- mutex_lock(&hci_cb_list_lock);
+- list_for_each_entry(cb, &hci_cb_list, list) {
++ list_for_each_entry_safe(cb, tmp, &list, list) {
+ if (cb->connect_cfm)
+ cb->connect_cfm(conn, status);
++ kfree(cb);
+ }
+- mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->connect_cfm_cb)
+ conn->connect_cfm_cb(conn, status);
+@@ -1974,43 +1996,55 @@ static inline void hci_connect_cfm(struct hci_conn *conn, __u8 status)
+
+ static inline void hci_disconn_cfm(struct hci_conn *conn, __u8 reason)
+ {
+- struct hci_cb *cb;
++ struct list_head list;
++ struct hci_cb *cb, *tmp;
++
++ INIT_LIST_HEAD(&list);
++ hci_cb_lookup(conn, &list);
+
+- mutex_lock(&hci_cb_list_lock);
+- list_for_each_entry(cb, &hci_cb_list, list) {
++ list_for_each_entry_safe(cb, tmp, &list, list) {
+ if (cb->disconn_cfm)
+ cb->disconn_cfm(conn, reason);
++ kfree(cb);
+ }
+- mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->disconn_cfm_cb)
+ conn->disconn_cfm_cb(conn, reason);
+ }
+
+-static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
++static inline void hci_security_cfm(struct hci_conn *conn, __u8 status,
++ __u8 encrypt)
+ {
+- struct hci_cb *cb;
+- __u8 encrypt;
+-
+- if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
+- return;
++ struct list_head list;
++ struct hci_cb *cb, *tmp;
+
+- encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
++ INIT_LIST_HEAD(&list);
++ hci_cb_lookup(conn, &list);
+
+- mutex_lock(&hci_cb_list_lock);
+- list_for_each_entry(cb, &hci_cb_list, list) {
++ list_for_each_entry_safe(cb, tmp, &list, list) {
+ if (cb->security_cfm)
+ cb->security_cfm(conn, status, encrypt);
++ kfree(cb);
+ }
+- mutex_unlock(&hci_cb_list_lock);
+
+ if (conn->security_cfm_cb)
+ conn->security_cfm_cb(conn, status);
+ }
+
++static inline void hci_auth_cfm(struct hci_conn *conn, __u8 status)
++{
++ __u8 encrypt;
++
++ if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
++ return;
++
++ encrypt = test_bit(HCI_CONN_ENCRYPT, &conn->flags) ? 0x01 : 0x00;
++
++ hci_security_cfm(conn, status, encrypt);
++}
++
+ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
+ {
+- struct hci_cb *cb;
+ __u8 encrypt;
+
+ if (conn->state == BT_CONFIG) {
+@@ -2037,40 +2071,38 @@ static inline void hci_encrypt_cfm(struct hci_conn *conn, __u8 status)
+ conn->sec_level = conn->pending_sec_level;
+ }
+
+- mutex_lock(&hci_cb_list_lock);
+- list_for_each_entry(cb, &hci_cb_list, list) {
+- if (cb->security_cfm)
+- cb->security_cfm(conn, status, encrypt);
+- }
+- mutex_unlock(&hci_cb_list_lock);
+-
+- if (conn->security_cfm_cb)
+- conn->security_cfm_cb(conn, status);
++ hci_security_cfm(conn, status, encrypt);
+ }
+
+ static inline void hci_key_change_cfm(struct hci_conn *conn, __u8 status)
+ {
+- struct hci_cb *cb;
++ struct list_head list;
++ struct hci_cb *cb, *tmp;
++
++ INIT_LIST_HEAD(&list);
++ hci_cb_lookup(conn, &list);
+
+- mutex_lock(&hci_cb_list_lock);
+- list_for_each_entry(cb, &hci_cb_list, list) {
++ list_for_each_entry_safe(cb, tmp, &list, list) {
+ if (cb->key_change_cfm)
+ cb->key_change_cfm(conn, status);
++ kfree(cb);
+ }
+- mutex_unlock(&hci_cb_list_lock);
+ }
+
+ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
+ __u8 role)
+ {
+- struct hci_cb *cb;
++ struct list_head list;
++ struct hci_cb *cb, *tmp;
++
++ INIT_LIST_HEAD(&list);
++ hci_cb_lookup(conn, &list);
+
+- mutex_lock(&hci_cb_list_lock);
+- list_for_each_entry(cb, &hci_cb_list, list) {
++ list_for_each_entry_safe(cb, tmp, &list, list) {
+ if (cb->role_switch_cfm)
+ cb->role_switch_cfm(conn, status, role);
++ kfree(cb);
+ }
+- mutex_unlock(&hci_cb_list_lock);
+ }
+
+ static inline bool hci_bdaddr_is_rpa(bdaddr_t *bdaddr, u8 addr_type)
+diff --git a/include/net/mac80211.h b/include/net/mac80211.h
+index 47ade676565dbc..835a58ce9ca57c 100644
+--- a/include/net/mac80211.h
++++ b/include/net/mac80211.h
+@@ -3039,6 +3039,19 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
+ */
+ void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
+
++/**
++ * ieee80211_purge_tx_queue - purge TX skb queue
++ * @hw: the hardware
++ * @skbs: the skbs
++ *
++ * Free a set of transmit skbs. Use this function when device is going to stop
++ * but some transmit skbs without TX status are still queued.
++ * This function does not take the list lock and the caller must hold the
++ * relevant locks to use it.
++ */
++void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
++ struct sk_buff_head *skbs);
++
+ /**
+ * DOC: Hardware crypto acceleration
+ *
+@@ -6067,6 +6080,24 @@ void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw,
+ void (*iterator)(void *data,
+ struct ieee80211_sta *sta),
+ void *data);
++
++/**
++ * ieee80211_iterate_stations_mtx - iterate stations
++ *
++ * This function iterates over all stations associated with a given
++ * hardware that are currently uploaded to the driver and calls the callback
++ * function for them. This version can only be used while holding the wiphy
++ * mutex.
++ *
++ * @hw: the hardware struct of which the interfaces should be iterated over
++ * @iterator: the iterator function to call
++ * @data: first argument of the iterator function
++ */
++void ieee80211_iterate_stations_mtx(struct ieee80211_hw *hw,
++ void (*iterator)(void *data,
++ struct ieee80211_sta *sta),
++ void *data);
++
+ /**
+ * ieee80211_queue_work - add work onto the mac80211 workqueue
+ *
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index b5f9ee5810a347..8321915dddb284 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -721,15 +721,18 @@ struct nft_set_ext_tmpl {
+ /**
+ * struct nft_set_ext - set extensions
+ *
+- * @genmask: generation mask
++ * @genmask: generation mask, but also flags (see NFT_SET_ELEM_DEAD_BIT)
+ * @offset: offsets of individual extension types
+ * @data: beginning of extension data
++ *
++ * This structure must be aligned to word size, otherwise atomic bitops
++ * on genmask field can cause alignment failure on some archs.
+ */
+ struct nft_set_ext {
+ u8 genmask;
+ u8 offset[NFT_SET_EXT_NUM];
+ char data[];
+-};
++} __aligned(BITS_PER_LONG / 8);
+
+ static inline void nft_set_ext_prepare(struct nft_set_ext_tmpl *tmpl)
+ {
+diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
+index 58ee17f429a33a..02f327f05fd619 100644
+--- a/kernel/bpf/core.c
++++ b/kernel/bpf/core.c
+@@ -529,6 +529,8 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
+
+ int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
+ {
++ int err;
++
+ /* Branch offsets can't overflow when program is shrinking, no need
+ * to call bpf_adj_branches(..., true) here
+ */
+@@ -536,7 +538,9 @@ int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
+ sizeof(struct bpf_insn) * (prog->len - off - cnt));
+ prog->len -= cnt;
+
+- return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
++ err = bpf_adj_branches(prog, off, off + cnt, off, false);
++ WARN_ON_ONCE(err);
++ return err;
+ }
+
+ static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index a3c3c66ca04759..d6a4102312fadd 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1762,8 +1762,8 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
+ int i, err;
+
+ dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
+- src->jmp_history_cnt, sizeof(*dst_state->jmp_history),
+- GFP_USER);
++ src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
++ GFP_USER);
+ if (!dst_state->jmp_history)
+ return -ENOMEM;
+ dst_state->jmp_history_cnt = src->jmp_history_cnt;
+@@ -3397,21 +3397,6 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
+ return __check_reg_arg(env, state->regs, regno, t);
+ }
+
+-static int insn_stack_access_flags(int frameno, int spi)
+-{
+- return INSN_F_STACK_ACCESS | (spi << INSN_F_SPI_SHIFT) | frameno;
+-}
+-
+-static int insn_stack_access_spi(int insn_flags)
+-{
+- return (insn_flags >> INSN_F_SPI_SHIFT) & INSN_F_SPI_MASK;
+-}
+-
+-static int insn_stack_access_frameno(int insn_flags)
+-{
+- return insn_flags & INSN_F_FRAMENO_MASK;
+-}
+-
+ static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
+ {
+ env->insn_aux_data[idx].jmp_point = true;
+@@ -3423,51 +3408,28 @@ static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
+ }
+
+ /* for any branch, call, exit record the history of jmps in the given state */
+-static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
+- int insn_flags)
++static int push_jmp_history(struct bpf_verifier_env *env,
++ struct bpf_verifier_state *cur)
+ {
+ u32 cnt = cur->jmp_history_cnt;
+- struct bpf_jmp_history_entry *p;
++ struct bpf_idx_pair *p;
+ size_t alloc_size;
+
+- /* combine instruction flags if we already recorded this instruction */
+- if (env->cur_hist_ent) {
+- /* atomic instructions push insn_flags twice, for READ and
+- * WRITE sides, but they should agree on stack slot
+- */
+- WARN_ONCE((env->cur_hist_ent->flags & insn_flags) &&
+- (env->cur_hist_ent->flags & insn_flags) != insn_flags,
+- "verifier insn history bug: insn_idx %d cur flags %x new flags %x\n",
+- env->insn_idx, env->cur_hist_ent->flags, insn_flags);
+- env->cur_hist_ent->flags |= insn_flags;
++ if (!is_jmp_point(env, env->insn_idx))
+ return 0;
+- }
+
+ cnt++;
+ alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
+ p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
+ if (!p)
+ return -ENOMEM;
++ p[cnt - 1].idx = env->insn_idx;
++ p[cnt - 1].prev_idx = env->prev_insn_idx;
+ cur->jmp_history = p;
+-
+- p = &cur->jmp_history[cnt - 1];
+- p->idx = env->insn_idx;
+- p->prev_idx = env->prev_insn_idx;
+- p->flags = insn_flags;
+ cur->jmp_history_cnt = cnt;
+- env->cur_hist_ent = p;
+-
+ return 0;
+ }
+
+-static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_state *st,
+- u32 hist_end, int insn_idx)
+-{
+- if (hist_end > 0 && st->jmp_history[hist_end - 1].idx == insn_idx)
+- return &st->jmp_history[hist_end - 1];
+- return NULL;
+-}
+-
+ /* Backtrack one insn at a time. If idx is not at the top of recorded
+ * history then previous instruction came from straight line execution.
+ * Return -ENOENT if we exhausted all instructions within given state.
+@@ -3629,14 +3591,9 @@ static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg)
+ return bt->reg_masks[bt->frame] & (1 << reg);
+ }
+
+-static inline bool bt_is_frame_slot_set(struct backtrack_state *bt, u32 frame, u32 slot)
+-{
+- return bt->stack_masks[frame] & (1ull << slot);
+-}
+-
+ static inline bool bt_is_slot_set(struct backtrack_state *bt, u32 slot)
+ {
+- return bt_is_frame_slot_set(bt, bt->frame, slot);
++ return bt->stack_masks[bt->frame] & (1ull << slot);
+ }
+
+ /* format registers bitmask, e.g., "r0,r2,r4" for 0x15 mask */
+@@ -3690,7 +3647,7 @@ static bool calls_callback(struct bpf_verifier_env *env, int insn_idx);
+ * - *was* processed previously during backtracking.
+ */
+ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+- struct bpf_jmp_history_entry *hist, struct backtrack_state *bt)
++ struct backtrack_state *bt)
+ {
+ const struct bpf_insn_cbs cbs = {
+ .cb_call = disasm_kfunc_name,
+@@ -3703,7 +3660,7 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ u8 mode = BPF_MODE(insn->code);
+ u32 dreg = insn->dst_reg;
+ u32 sreg = insn->src_reg;
+- u32 spi, i, fr;
++ u32 spi, i;
+
+ if (insn->code == 0)
+ return 0;
+@@ -3766,15 +3723,20 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ * by 'precise' mark in corresponding register of this state.
+ * No further tracking necessary.
+ */
+- if (!hist || !(hist->flags & INSN_F_STACK_ACCESS))
++ if (insn->src_reg != BPF_REG_FP)
+ return 0;
++
+ /* dreg = *(u64 *)[fp - off] was a fill from the stack.
+ * that [fp - off] slot contains scalar that needs to be
+ * tracked with precision
+ */
+- spi = insn_stack_access_spi(hist->flags);
+- fr = insn_stack_access_frameno(hist->flags);
+- bt_set_frame_slot(bt, fr, spi);
++ spi = (-insn->off - 1) / BPF_REG_SIZE;
++ if (spi >= 64) {
++ verbose(env, "BUG spi %d\n", spi);
++ WARN_ONCE(1, "verifier backtracking bug");
++ return -EFAULT;
++ }
++ bt_set_slot(bt, spi);
+ } else if (class == BPF_STX || class == BPF_ST) {
+ if (bt_is_reg_set(bt, dreg))
+ /* stx & st shouldn't be using _scalar_ dst_reg
+@@ -3783,13 +3745,17 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ */
+ return -ENOTSUPP;
+ /* scalars can only be spilled into stack */
+- if (!hist || !(hist->flags & INSN_F_STACK_ACCESS))
++ if (insn->dst_reg != BPF_REG_FP)
+ return 0;
+- spi = insn_stack_access_spi(hist->flags);
+- fr = insn_stack_access_frameno(hist->flags);
+- if (!bt_is_frame_slot_set(bt, fr, spi))
++ spi = (-insn->off - 1) / BPF_REG_SIZE;
++ if (spi >= 64) {
++ verbose(env, "BUG spi %d\n", spi);
++ WARN_ONCE(1, "verifier backtracking bug");
++ return -EFAULT;
++ }
++ if (!bt_is_slot_set(bt, spi))
+ return 0;
+- bt_clear_frame_slot(bt, fr, spi);
++ bt_clear_slot(bt, spi);
+ if (class == BPF_STX)
+ bt_set_reg(bt, sreg);
+ } else if (class == BPF_JMP || class == BPF_JMP32) {
+@@ -3833,14 +3799,10 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ WARN_ONCE(1, "verifier backtracking bug");
+ return -EFAULT;
+ }
+- /* we are now tracking register spills correctly,
+- * so any instance of leftover slots is a bug
+- */
+- if (bt_stack_mask(bt) != 0) {
+- verbose(env, "BUG stack slots %llx\n", bt_stack_mask(bt));
+- WARN_ONCE(1, "verifier backtracking bug (subprog leftover stack slots)");
+- return -EFAULT;
+- }
++ /* we don't track register spills perfectly,
++ * so fallback to force-precise instead of failing */
++ if (bt_stack_mask(bt) != 0)
++ return -ENOTSUPP;
+ /* propagate r1-r5 to the caller */
+ for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
+ if (bt_is_reg_set(bt, i)) {
+@@ -3865,11 +3827,8 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ WARN_ONCE(1, "verifier backtracking bug");
+ return -EFAULT;
+ }
+- if (bt_stack_mask(bt) != 0) {
+- verbose(env, "BUG stack slots %llx\n", bt_stack_mask(bt));
+- WARN_ONCE(1, "verifier backtracking bug (callback leftover stack slots)");
+- return -EFAULT;
+- }
++ if (bt_stack_mask(bt) != 0)
++ return -ENOTSUPP;
+ /* clear r1-r5 in callback subprog's mask */
+ for (i = BPF_REG_1; i <= BPF_REG_5; i++)
+ bt_clear_reg(bt, i);
+@@ -4306,7 +4265,6 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
+ for (;;) {
+ DECLARE_BITMAP(mask, 64);
+ u32 history = st->jmp_history_cnt;
+- struct bpf_jmp_history_entry *hist;
+
+ if (env->log.level & BPF_LOG_LEVEL2) {
+ verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d subseq_idx %d \n",
+@@ -4370,8 +4328,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
+ err = 0;
+ skip_first = false;
+ } else {
+- hist = get_jmp_hist_entry(st, history, i);
+- err = backtrack_insn(env, i, subseq_idx, hist, bt);
++ err = backtrack_insn(env, i, subseq_idx, bt);
+ }
+ if (err == -ENOTSUPP) {
+ mark_all_scalars_precise(env, env->cur_state);
+@@ -4424,10 +4381,22 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
+ bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr));
+ for_each_set_bit(i, mask, 64) {
+ if (i >= func->allocated_stack / BPF_REG_SIZE) {
+- verbose(env, "BUG backtracking (stack slot %d, total slots %d)\n",
+- i, func->allocated_stack / BPF_REG_SIZE);
+- WARN_ONCE(1, "verifier backtracking bug (stack slot out of bounds)");
+- return -EFAULT;
++ /* the sequence of instructions:
++ * 2: (bf) r3 = r10
++ * 3: (7b) *(u64 *)(r3 -8) = r0
++ * 4: (79) r4 = *(u64 *)(r10 -8)
++ * doesn't contain jmps. It's backtracked
++ * as a single block.
++ * During backtracking insn 3 is not recognized as
++ * stack access, so at the end of backtracking
++ * stack slot fp-8 is still marked in stack_mask.
++ * However the parent state may not have accessed
++ * fp-8 and it's "unallocated" stack space.
++ * In such case fallback to conservative.
++ */
++ mark_all_scalars_precise(env, env->cur_state);
++ bt_reset(bt);
++ return 0;
+ }
+
+ if (!is_spilled_scalar_reg(&func->stack[i])) {
+@@ -4592,7 +4561,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
+ struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
+ struct bpf_reg_state *reg = NULL;
+- int insn_flags = insn_stack_access_flags(state->frameno, spi);
++ u32 dst_reg = insn->dst_reg;
+
+ /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
+ * so it's aligned access and [off, off + size) are within stack limits
+@@ -4631,6 +4600,17 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ mark_stack_slot_scratched(env, spi);
+ if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
+ !register_is_null(reg) && env->bpf_capable) {
++ if (dst_reg != BPF_REG_FP) {
++ /* The backtracking logic can only recognize explicit
++ * stack slot address like [fp - 8]. Other spill of
++ * scalar via different register has to be conservative.
++ * Backtrack from here and mark all registers as precise
++ * that contributed into 'reg' being a constant.
++ */
++ err = mark_chain_precision(env, value_regno);
++ if (err)
++ return err;
++ }
+ save_register_state(state, spi, reg, size);
+ /* Break the relation on a narrowing spill. */
+ if (fls64(reg->umax_value) > BITS_PER_BYTE * size)
+@@ -4642,7 +4622,6 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ __mark_reg_known(&fake_reg, insn->imm);
+ fake_reg.type = SCALAR_VALUE;
+ save_register_state(state, spi, &fake_reg, size);
+- insn_flags = 0; /* not a register spill */
+ } else if (reg && is_spillable_regtype(reg->type)) {
+ /* register containing pointer is being spilled into stack */
+ if (size != BPF_REG_SIZE) {
+@@ -4688,12 +4667,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+
+ /* Mark slots affected by this stack write. */
+ for (i = 0; i < size; i++)
+- state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type;
+- insn_flags = 0; /* not a register spill */
++ state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
++ type;
+ }
+-
+- if (insn_flags)
+- return push_jmp_history(env, env->cur_state, insn_flags);
+ return 0;
+ }
+
+@@ -4882,7 +4858,6 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
+ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
+ struct bpf_reg_state *reg;
+ u8 *stype, type;
+- int insn_flags = insn_stack_access_flags(reg_state->frameno, spi);
+
+ stype = reg_state->stack[spi].slot_type;
+ reg = &reg_state->stack[spi].spilled_ptr;
+@@ -4928,10 +4903,12 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
+ return -EACCES;
+ }
+ mark_reg_unknown(env, state->regs, dst_regno);
+- insn_flags = 0; /* not restoring original register state */
+ }
+ state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
+- } else if (dst_regno >= 0) {
++ return 0;
++ }
++
++ if (dst_regno >= 0) {
+ /* restore register state from stack */
+ copy_register_state(&state->regs[dst_regno], reg);
+ /* mark reg as written since spilled pointer state likely
+@@ -4967,10 +4944,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
+ mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
+ if (dst_regno >= 0)
+ mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
+- insn_flags = 0; /* we are not restoring spilled register */
+ }
+- if (insn_flags)
+- return push_jmp_history(env, env->cur_state, insn_flags);
+ return 0;
+ }
+
+@@ -7054,6 +7028,7 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
+ BPF_SIZE(insn->code), BPF_WRITE, -1, true, false);
+ if (err)
+ return err;
++
+ return 0;
+ }
+
+@@ -16802,8 +16777,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
+ * the precision needs to be propagated back in
+ * the current state.
+ */
+- if (is_jmp_point(env, env->insn_idx))
+- err = err ? : push_jmp_history(env, cur, 0);
++ err = err ? : push_jmp_history(env, cur);
+ err = err ? : propagate_precision(env, &sl->state);
+ if (err)
+ return err;
+@@ -17027,9 +17001,6 @@ static int do_check(struct bpf_verifier_env *env)
+ u8 class;
+ int err;
+
+- /* reset current history entry on each new instruction */
+- env->cur_hist_ent = NULL;
+-
+ env->prev_insn_idx = prev_insn_idx;
+ if (env->insn_idx >= insn_cnt) {
+ verbose(env, "invalid insn idx %d insn_cnt %d\n",
+@@ -17069,7 +17040,7 @@ static int do_check(struct bpf_verifier_env *env)
+ }
+
+ if (is_jmp_point(env, env->insn_idx)) {
+- err = push_jmp_history(env, state, 0);
++ err = push_jmp_history(env, state);
+ if (err)
+ return err;
+ }
+diff --git a/kernel/kcov.c b/kernel/kcov.c
+index 72d9aa6fb50c3e..097c8afa675578 100644
+--- a/kernel/kcov.c
++++ b/kernel/kcov.c
+@@ -165,7 +165,7 @@ static void kcov_remote_area_put(struct kcov_remote_area *area,
+ * Unlike in_serving_softirq(), this function returns false when called during
+ * a hardirq or an NMI that happened in the softirq context.
+ */
+-static inline bool in_softirq_really(void)
++static __always_inline bool in_softirq_really(void)
+ {
+ return in_serving_softirq() && !in_hardirq() && !in_nmi();
+ }
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 228f7c07da7284..86606fb9e6bc6c 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4488,7 +4488,8 @@ int wake_up_state(struct task_struct *p, unsigned int state)
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ *
+- * __sched_fork() is basic setup used by init_idle() too:
++ * __sched_fork() is basic setup which is also used by sched_init() to
++ * initialize the boot CPU's idle task.
+ */
+ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
+ {
+@@ -9257,8 +9258,6 @@ void __init init_idle(struct task_struct *idle, int cpu)
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+- __sched_fork(0, idle);
+-
+ raw_spin_lock_irqsave(&idle->pi_lock, flags);
+ raw_spin_rq_lock(rq);
+
+@@ -9273,10 +9272,8 @@ void __init init_idle(struct task_struct *idle, int cpu)
+
+ #ifdef CONFIG_SMP
+ /*
+- * It's possible that init_idle() gets called multiple times on a task,
+- * in that case do_set_cpus_allowed() will not do the right thing.
+- *
+- * And since this is boot we can forgo the serialization.
++ * No validation and serialization required at boot time and for
++ * setting up the idle tasks of not yet online CPUs.
+ */
+ set_cpus_allowed_common(idle, &ac);
+ #endif
+@@ -10105,6 +10102,7 @@ void __init sched_init(void)
+ * but because we are the idle thread, we just pick up running again
+ * when this runqueue becomes "idle".
+ */
++ __sched_fork(0, current);
+ init_idle(current, smp_processor_id());
+
+ calc_load_update = jiffies + LOAD_FREQ;
+diff --git a/kernel/softirq.c b/kernel/softirq.c
+index bd9716d7bb6383..f24d80cf20bd35 100644
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -279,17 +279,24 @@ static inline void invoke_softirq(void)
+ wakeup_softirqd();
+ }
+
++#define SCHED_SOFTIRQ_MASK BIT(SCHED_SOFTIRQ)
++
+ /*
+ * flush_smp_call_function_queue() can raise a soft interrupt in a function
+- * call. On RT kernels this is undesired and the only known functionality
+- * in the block layer which does this is disabled on RT. If soft interrupts
+- * get raised which haven't been raised before the flush, warn so it can be
++ * call. On RT kernels this is undesired and the only known functionalities
++ * are in the block layer which is disabled on RT, and in the scheduler for
++ * idle load balancing. If soft interrupts get raised which haven't been
++ * raised before the flush, warn if it is not a SCHED_SOFTIRQ so it can be
+ * investigated.
+ */
+ void do_softirq_post_smp_call_flush(unsigned int was_pending)
+ {
+- if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
++ unsigned int is_pending = local_softirq_pending();
++
++ if (unlikely(was_pending != is_pending)) {
++ WARN_ON_ONCE(was_pending != (is_pending & ~SCHED_SOFTIRQ_MASK));
+ invoke_softirq();
++ }
+ }
+
+ #else /* CONFIG_PREEMPT_RT */
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 220903117c5139..9d9af60b238e27 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -1731,15 +1731,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
+ {
+ int len;
+
+- if (trace_seq_used(s) <= s->seq.readpos)
++ if (trace_seq_used(s) <= s->readpos)
+ return -EBUSY;
+
+- len = trace_seq_used(s) - s->seq.readpos;
++ len = trace_seq_used(s) - s->readpos;
+ if (cnt > len)
+ cnt = len;
+- memcpy(buf, s->buffer + s->seq.readpos, cnt);
++ memcpy(buf, s->buffer + s->readpos, cnt);
+
+- s->seq.readpos += cnt;
++ s->readpos += cnt;
+ return cnt;
+ }
+
+@@ -3760,17 +3760,12 @@ char *trace_iter_expand_format(struct trace_iterator *iter)
+ }
+
+ /* Returns true if the string is safe to dereference from an event */
+-static bool trace_safe_str(struct trace_iterator *iter, const char *str,
+- bool star, int len)
++static bool trace_safe_str(struct trace_iterator *iter, const char *str)
+ {
+ unsigned long addr = (unsigned long)str;
+ struct trace_event *trace_event;
+ struct trace_event_call *event;
+
+- /* Ignore strings with no length */
+- if (star && !len)
+- return true;
+-
+ /* OK if part of the event data */
+ if ((addr >= (unsigned long)iter->ent) &&
+ (addr < (unsigned long)iter->ent + iter->ent_size))
+@@ -3810,142 +3805,69 @@ static bool trace_safe_str(struct trace_iterator *iter, const char *str,
+ return false;
+ }
+
+-static const char *show_buffer(struct trace_seq *s)
+-{
+- struct seq_buf *seq = &s->seq;
+-
+- seq_buf_terminate(seq);
+-
+- return seq->buffer;
+-}
+-
+-static DEFINE_STATIC_KEY_FALSE(trace_no_verify);
+-
+-static int test_can_verify_check(const char *fmt, ...)
+-{
+- char buf[16];
+- va_list ap;
+- int ret;
+-
+- /*
+- * The verifier is dependent on vsnprintf() modifies the va_list
+- * passed to it, where it is sent as a reference. Some architectures
+- * (like x86_32) passes it by value, which means that vsnprintf()
+- * does not modify the va_list passed to it, and the verifier
+- * would then need to be able to understand all the values that
+- * vsnprintf can use. If it is passed by value, then the verifier
+- * is disabled.
+- */
+- va_start(ap, fmt);
+- vsnprintf(buf, 16, "%d", ap);
+- ret = va_arg(ap, int);
+- va_end(ap);
+-
+- return ret;
+-}
+-
+-static void test_can_verify(void)
+-{
+- if (!test_can_verify_check("%d %d", 0, 1)) {
+- pr_info("trace event string verifier disabled\n");
+- static_branch_inc(&trace_no_verify);
+- }
+-}
+-
+ /**
+- * trace_check_vprintf - Check dereferenced strings while writing to the seq buffer
++ * ignore_event - Check dereferenced fields while writing to the seq buffer
+ * @iter: The iterator that holds the seq buffer and the event being printed
+- * @fmt: The format used to print the event
+- * @ap: The va_list holding the data to print from @fmt.
+ *
+- * This writes the data into the @iter->seq buffer using the data from
+- * @fmt and @ap. If the format has a %s, then the source of the string
+- * is examined to make sure it is safe to print, otherwise it will
+- * warn and print "[UNSAFE MEMORY]" in place of the dereferenced string
+- * pointer.
++ * At boot up, test_event_printk() will flag any event that dereferences
++ * a string with "%s" that does exist in the ring buffer. It may still
++ * be valid, as the string may point to a static string in the kernel
++ * rodata that never gets freed. But if the string pointer is pointing
++ * to something that was allocated, there's a chance that it can be freed
++ * by the time the user reads the trace. This would cause a bad memory
++ * access by the kernel and possibly crash the system.
++ *
++ * This function will check if the event has any fields flagged as needing
++ * to be checked at runtime and perform those checks.
++ *
++ * If it is found that a field is unsafe, it will write into the @iter->seq
++ * a message stating what was found to be unsafe.
++ *
++ * @return: true if the event is unsafe and should be ignored,
++ * false otherwise.
+ */
+-void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
+- va_list ap)
++bool ignore_event(struct trace_iterator *iter)
+ {
+- const char *p = fmt;
+- const char *str;
+- int i, j;
++ struct ftrace_event_field *field;
++ struct trace_event *trace_event;
++ struct trace_event_call *event;
++ struct list_head *head;
++ struct trace_seq *seq;
++ const void *ptr;
+
+- if (WARN_ON_ONCE(!fmt))
+- return;
++ trace_event = ftrace_find_event(iter->ent->type);
+
+- if (static_branch_unlikely(&trace_no_verify))
+- goto print;
++ seq = &iter->seq;
+
+- /* Don't bother checking when doing a ftrace_dump() */
+- if (iter->fmt == static_fmt_buf)
+- goto print;
++ if (!trace_event) {
++ trace_seq_printf(seq, "EVENT ID %d NOT FOUND?\n", iter->ent->type);
++ return true;
++ }
+
+- while (*p) {
+- bool star = false;
+- int len = 0;
+-
+- j = 0;
+-
+- /* We only care about %s and variants */
+- for (i = 0; p[i]; i++) {
+- if (i + 1 >= iter->fmt_size) {
+- /*
+- * If we can't expand the copy buffer,
+- * just print it.
+- */
+- if (!trace_iter_expand_format(iter))
+- goto print;
+- }
++ event = container_of(trace_event, struct trace_event_call, event);
++ if (!(event->flags & TRACE_EVENT_FL_TEST_STR))
++ return false;
+
+- if (p[i] == '\\' && p[i+1]) {
+- i++;
+- continue;
+- }
+- if (p[i] == '%') {
+- /* Need to test cases like %08.*s */
+- for (j = 1; p[i+j]; j++) {
+- if (isdigit(p[i+j]) ||
+- p[i+j] == '.')
+- continue;
+- if (p[i+j] == '*') {
+- star = true;
+- continue;
+- }
+- break;
+- }
+- if (p[i+j] == 's')
+- break;
+- star = false;
+- }
+- j = 0;
+- }
+- /* If no %s found then just print normally */
+- if (!p[i])
+- break;
++ head = trace_get_fields(event);
++ if (!head) {
++ trace_seq_printf(seq, "FIELDS FOR EVENT '%s' NOT FOUND?\n",
++ trace_event_name(event));
++ return true;
++ }
+
+- /* Copy up to the %s, and print that */
+- strncpy(iter->fmt, p, i);
+- iter->fmt[i] = '\0';
+- trace_seq_vprintf(&iter->seq, iter->fmt, ap);
++ /* Offsets are from the iter->ent that points to the raw event */
++ ptr = iter->ent;
+
+- /*
+- * If iter->seq is full, the above call no longer guarantees
+- * that ap is in sync with fmt processing, and further calls
+- * to va_arg() can return wrong positional arguments.
+- *
+- * Ensure that ap is no longer used in this case.
+- */
+- if (iter->seq.full) {
+- p = "";
+- break;
+- }
++ list_for_each_entry(field, head, link) {
++ const char *str;
++ bool good;
++
++ if (!field->needs_test)
++ continue;
+
+- if (star)
+- len = va_arg(ap, int);
++ str = *(const char **)(ptr + field->offset);
+
+- /* The ap now points to the string data of the %s */
+- str = va_arg(ap, const char *);
++ good = trace_safe_str(iter, str);
+
+ /*
+ * If you hit this warning, it is likely that the
+@@ -3956,45 +3878,14 @@ void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
+ * instead. See samples/trace_events/trace-events-sample.h
+ * for reference.
+ */
+- if (WARN_ONCE(!trace_safe_str(iter, str, star, len),
+- "fmt: '%s' current_buffer: '%s'",
+- fmt, show_buffer(&iter->seq))) {
+- int ret;
+-
+- /* Try to safely read the string */
+- if (star) {
+- if (len + 1 > iter->fmt_size)
+- len = iter->fmt_size - 1;
+- if (len < 0)
+- len = 0;
+- ret = copy_from_kernel_nofault(iter->fmt, str, len);
+- iter->fmt[len] = 0;
+- star = false;
+- } else {
+- ret = strncpy_from_kernel_nofault(iter->fmt, str,
+- iter->fmt_size);
+- }
+- if (ret < 0)
+- trace_seq_printf(&iter->seq, "(0x%px)", str);
+- else
+- trace_seq_printf(&iter->seq, "(0x%px:%s)",
+- str, iter->fmt);
+- str = "[UNSAFE-MEMORY]";
+- strcpy(iter->fmt, "%s");
+- } else {
+- strncpy(iter->fmt, p + i, j + 1);
+- iter->fmt[j+1] = '\0';
++ if (WARN_ONCE(!good, "event '%s' has unsafe pointer field '%s'",
++ trace_event_name(event), field->name)) {
++ trace_seq_printf(seq, "EVENT %s: HAS UNSAFE POINTER FIELD '%s'\n",
++ trace_event_name(event), field->name);
++ return true;
+ }
+- if (star)
+- trace_seq_printf(&iter->seq, iter->fmt, len, str);
+- else
+- trace_seq_printf(&iter->seq, iter->fmt, str);
+-
+- p += i + j + 1;
+ }
+- print:
+- if (*p)
+- trace_seq_vprintf(&iter->seq, p, ap);
++ return false;
+ }
+
+ const char *trace_event_format(struct trace_iterator *iter, const char *fmt)
+@@ -7011,7 +6902,7 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
+
+ /* Now copy what we have to the user */
+ sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
+- if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
++ if (iter->seq.readpos >= trace_seq_used(&iter->seq))
+ trace_seq_init(&iter->seq);
+
+ /*
+@@ -10539,8 +10430,6 @@ __init static int tracer_alloc_buffers(void)
+
+ register_snapshot_cmd();
+
+- test_can_verify();
+-
+ return 0;
+
+ out_free_pipe_cpumask:
+diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
+index 3db42bae73f8e0..e45756f1ac2b12 100644
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -644,9 +644,8 @@ void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
+
+ bool trace_is_tracepoint_string(const char *str);
+ const char *trace_event_format(struct trace_iterator *iter, const char *fmt);
+-void trace_check_vprintf(struct trace_iterator *iter, const char *fmt,
+- va_list ap) __printf(2, 0);
+ char *trace_iter_expand_format(struct trace_iterator *iter);
++bool ignore_event(struct trace_iterator *iter);
+
+ int trace_empty(struct trace_iterator *iter);
+
+@@ -1323,7 +1322,8 @@ struct ftrace_event_field {
+ int filter_type;
+ int offset;
+ int size;
+- int is_signed;
++ unsigned int is_signed:1;
++ unsigned int needs_test:1;
+ int len;
+ };
+
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 64cd856308e77c..9d22745cdea5aa 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -82,7 +82,7 @@ static int system_refcount_dec(struct event_subsystem *system)
+ }
+
+ static struct ftrace_event_field *
+-__find_event_field(struct list_head *head, char *name)
++__find_event_field(struct list_head *head, const char *name)
+ {
+ struct ftrace_event_field *field;
+
+@@ -114,7 +114,8 @@ trace_find_event_field(struct trace_event_call *call, char *name)
+
+ static int __trace_define_field(struct list_head *head, const char *type,
+ const char *name, int offset, int size,
+- int is_signed, int filter_type, int len)
++ int is_signed, int filter_type, int len,
++ int need_test)
+ {
+ struct ftrace_event_field *field;
+
+@@ -133,6 +134,7 @@ static int __trace_define_field(struct list_head *head, const char *type,
+ field->offset = offset;
+ field->size = size;
+ field->is_signed = is_signed;
++ field->needs_test = need_test;
+ field->len = len;
+
+ list_add(&field->link, head);
+@@ -151,13 +153,13 @@ int trace_define_field(struct trace_event_call *call, const char *type,
+
+ head = trace_get_fields(call);
+ return __trace_define_field(head, type, name, offset, size,
+- is_signed, filter_type, 0);
++ is_signed, filter_type, 0, 0);
+ }
+ EXPORT_SYMBOL_GPL(trace_define_field);
+
+ static int trace_define_field_ext(struct trace_event_call *call, const char *type,
+ const char *name, int offset, int size, int is_signed,
+- int filter_type, int len)
++ int filter_type, int len, int need_test)
+ {
+ struct list_head *head;
+
+@@ -166,13 +168,13 @@ static int trace_define_field_ext(struct trace_event_call *call, const char *typ
+
+ head = trace_get_fields(call);
+ return __trace_define_field(head, type, name, offset, size,
+- is_signed, filter_type, len);
++ is_signed, filter_type, len, need_test);
+ }
+
+ #define __generic_field(type, item, filter_type) \
+ ret = __trace_define_field(&ftrace_generic_fields, #type, \
+ #item, 0, 0, is_signed_type(type), \
+- filter_type, 0); \
++ filter_type, 0, 0); \
+ if (ret) \
+ return ret;
+
+@@ -181,7 +183,8 @@ static int trace_define_field_ext(struct trace_event_call *call, const char *typ
+ "common_" #item, \
+ offsetof(typeof(ent), item), \
+ sizeof(ent.item), \
+- is_signed_type(type), FILTER_OTHER, 0); \
++ is_signed_type(type), FILTER_OTHER, \
++ 0, 0); \
+ if (ret) \
+ return ret;
+
+@@ -332,6 +335,7 @@ static bool process_pointer(const char *fmt, int len, struct trace_event_call *c
+ /* Return true if the string is safe */
+ static bool process_string(const char *fmt, int len, struct trace_event_call *call)
+ {
++ struct trace_event_fields *field;
+ const char *r, *e, *s;
+
+ e = fmt + len;
+@@ -360,6 +364,18 @@ static bool process_string(const char *fmt, int len, struct trace_event_call *ca
+ s = r + 1;
+ } while (s < e);
+
++ /*
++ * Check for arrays. If the argument has: foo[REC->val]
++ * then it is very likely that foo is an array of strings
++ * that are safe to use.
++ */
++ r = strstr(s, "[");
++ if (r && r < e) {
++ r = strstr(r, "REC->");
++ if (r && r < e)
++ return true;
++ }
++
+ /*
+ * If there's any strings in the argument consider this arg OK as it
+ * could be: REC->field ? "foo" : "bar" and we don't want to get into
+@@ -372,8 +388,16 @@ static bool process_string(const char *fmt, int len, struct trace_event_call *ca
+ if (process_pointer(fmt, len, call))
+ return true;
+
+- /* Make sure the field is found, and consider it OK for now if it is */
+- return find_event_field(fmt, call) != NULL;
++ /* Make sure the field is found */
++ field = find_event_field(fmt, call);
++ if (!field)
++ return false;
++
++ /* Test this field's string before printing the event */
++ call->flags |= TRACE_EVENT_FL_TEST_STR;
++ field->needs_test = 1;
++
++ return true;
+ }
+
+ /*
+@@ -2552,7 +2576,7 @@ event_define_fields(struct trace_event_call *call)
+ ret = trace_define_field_ext(call, field->type, field->name,
+ offset, field->size,
+ field->is_signed, field->filter_type,
+- field->len);
++ field->len, field->needs_test);
+ if (WARN_ON_ONCE(ret)) {
+ pr_err("error code is %d\n", ret);
+ break;
+diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
+index db575094c49825..2b948d35fb59ea 100644
+--- a/kernel/trace/trace_output.c
++++ b/kernel/trace/trace_output.c
+@@ -317,10 +317,14 @@ EXPORT_SYMBOL(trace_raw_output_prep);
+
+ void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...)
+ {
++ struct trace_seq *s = &iter->seq;
+ va_list ap;
+
++ if (ignore_event(iter))
++ return;
++
+ va_start(ap, fmt);
+- trace_check_vprintf(iter, trace_event_format(iter, fmt), ap);
++ trace_seq_vprintf(s, trace_event_format(iter, fmt), ap);
+ va_end(ap);
+ }
+ EXPORT_SYMBOL(trace_event_printf);
+diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
+index bac06ee3b98b81..7be97229ddf860 100644
+--- a/kernel/trace/trace_seq.c
++++ b/kernel/trace/trace_seq.c
+@@ -370,8 +370,12 @@ EXPORT_SYMBOL_GPL(trace_seq_path);
+ */
+ int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt)
+ {
++ int ret;
+ __trace_seq_init(s);
+- return seq_buf_to_user(&s->seq, ubuf, cnt);
++ ret = seq_buf_to_user(&s->seq, ubuf, s->readpos, cnt);
++ if (ret > 0)
++ s->readpos += ret;
++ return ret;
+ }
+ EXPORT_SYMBOL_GPL(trace_seq_to_user);
+
+diff --git a/lib/seq_buf.c b/lib/seq_buf.c
+index 45c450f423fa87..23518f77ea9c53 100644
+--- a/lib/seq_buf.c
++++ b/lib/seq_buf.c
+@@ -109,9 +109,7 @@ void seq_buf_do_printk(struct seq_buf *s, const char *lvl)
+ if (s->size == 0 || s->len == 0)
+ return;
+
+- seq_buf_terminate(s);
+-
+- start = s->buffer;
++ start = seq_buf_str(s);
+ while ((lf = strchr(start, '\n'))) {
+ int len = lf - start + 1;
+
+@@ -324,23 +322,24 @@ int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc)
+ * seq_buf_to_user - copy the sequence buffer to user space
+ * @s: seq_buf descriptor
+ * @ubuf: The userspace memory location to copy to
++ * @start: The first byte in the buffer to copy
+ * @cnt: The amount to copy
+ *
+ * Copies the sequence buffer into the userspace memory pointed to
+- * by @ubuf. It starts from the last read position (@s->readpos)
+- * and writes up to @cnt characters or till it reaches the end of
+- * the content in the buffer (@s->len), which ever comes first.
++ * by @ubuf. It starts from @start and writes up to @cnt characters
++ * or until it reaches the end of the content in the buffer (@s->len),
++ * whichever comes first.
+ *
+ * On success, it returns a positive number of the number of bytes
+ * it copied.
+ *
+ * On failure it returns -EBUSY if all of the content in the
+ * sequence has been already read, which includes nothing in the
+- * sequence (@s->len == @s->readpos).
++ * sequence (@s->len == @start).
+ *
+ * Returns -EFAULT if the copy to userspace fails.
+ */
+-int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt)
++int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, size_t start, int cnt)
+ {
+ int len;
+ int ret;
+@@ -350,20 +349,17 @@ int seq_buf_to_user(struct seq_buf *s, char __user *ubuf, int cnt)
+
+ len = seq_buf_used(s);
+
+- if (len <= s->readpos)
++ if (len <= start)
+ return -EBUSY;
+
+- len -= s->readpos;
++ len -= start;
+ if (cnt > len)
+ cnt = len;
+- ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
++ ret = copy_to_user(ubuf, s->buffer + start, cnt);
+ if (ret == cnt)
+ return -EFAULT;
+
+- cnt -= ret;
+-
+- s->readpos += cnt;
+- return cnt;
++ return cnt - ret;
+ }
+
+ /**
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 54c2c90d3abc9d..5811a11cc53a6d 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -368,7 +368,7 @@ static void print_unreferenced(struct seq_file *seq,
+
+ for (i = 0; i < nr_entries; i++) {
+ void *ptr = (void *)entries[i];
+- warn_or_seq_printf(seq, " [<%pK>] %pS\n", ptr, ptr);
++ warn_or_seq_printf(seq, " %pS\n", ptr);
+ }
+ }
+
+diff --git a/mm/memblock.c b/mm/memblock.c
+index d630f5c2bdb90e..87a2b4340ce4ea 100644
+--- a/mm/memblock.c
++++ b/mm/memblock.c
+@@ -735,6 +735,40 @@ int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
+ return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
+ }
+
++/**
++ * memblock_validate_numa_coverage - check if amount of memory with
++ * no node ID assigned is less than a threshold
++ * @threshold_bytes: maximal memory size that can have unassigned node
++ * ID (in bytes).
++ *
++ * A buggy firmware may report memory that does not belong to any node.
++ * Check if amount of such memory is below @threshold_bytes.
++ *
++ * Return: true on success, false on failure.
++ */
++bool __init_memblock memblock_validate_numa_coverage(unsigned long threshold_bytes)
++{
++ unsigned long nr_pages = 0;
++ unsigned long start_pfn, end_pfn, mem_size_mb;
++ int nid, i;
++
++ /* calculate lose page */
++ for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
++ if (nid == NUMA_NO_NODE)
++ nr_pages += end_pfn - start_pfn;
++ }
++
++ if ((nr_pages << PAGE_SHIFT) > threshold_bytes) {
++ mem_size_mb = memblock_phys_mem_size() >> 20;
++ pr_err("NUMA: no nodes coverage for %luMB of %luMB RAM\n",
++ (nr_pages << PAGE_SHIFT) >> 20, mem_size_mb);
++ return false;
++ }
++
++ return true;
++}
++
++
+ /**
+ * memblock_isolate_range - isolate given range into disjoint memblocks
+ * @type: memblock type to isolate range for
+diff --git a/mm/readahead.c b/mm/readahead.c
+index e9b11d928b0c48..f1595c032ce7e3 100644
+--- a/mm/readahead.c
++++ b/mm/readahead.c
+@@ -580,7 +580,11 @@ static void ondemand_readahead(struct readahead_control *ractl,
+ 1UL << order);
+ if (index == expected || index == (ra->start + ra->size)) {
+ ra->start += ra->size;
+- ra->size = get_next_ra_size(ra, max_pages);
++ /*
++ * In the case of MADV_HUGEPAGE, the actual size might exceed
++ * the readahead window.
++ */
++ ra->size = max(ra->size, get_next_ra_size(ra, max_pages));
+ ra->async_size = ra->size;
+ goto readit;
+ }
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index 3c91b86d59e935..49456b72575529 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -641,7 +641,14 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
+ if (can_reclaim_anon_pages(NULL, zone_to_nid(zone), NULL))
+ nr += zone_page_state_snapshot(zone, NR_ZONE_INACTIVE_ANON) +
+ zone_page_state_snapshot(zone, NR_ZONE_ACTIVE_ANON);
+-
++ /*
++ * If there are no reclaimable file-backed or anonymous pages,
++ * ensure zones with sufficient free pages are not skipped.
++ * This prevents zones like DMA32 from being ignored in reclaim
++ * scenarios where they can still help alleviate memory pressure.
++ */
++ if (nr == 0)
++ nr = zone_page_state_snapshot(zone, NR_FREE_PAGES);
+ return nr;
+ }
+
+diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
+index 6178ae8feafc0f..549ee9e87d6366 100644
+--- a/net/bluetooth/hci_conn.c
++++ b/net/bluetooth/hci_conn.c
+@@ -2178,13 +2178,9 @@ struct hci_conn *hci_bind_bis(struct hci_dev *hdev, bdaddr_t *dst,
+ conn->iso_qos.bcast.big);
+ if (parent && parent != conn) {
+ link = hci_conn_link(parent, conn);
+- if (!link) {
+- hci_conn_drop(conn);
+- return ERR_PTR(-ENOLINK);
+- }
+-
+- /* Link takes the refcount */
+ hci_conn_drop(conn);
++ if (!link)
++ return ERR_PTR(-ENOLINK);
+ }
+
+ return conn;
+@@ -2274,15 +2270,12 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst,
+ }
+
+ link = hci_conn_link(le, cis);
++ hci_conn_drop(cis);
+ if (!link) {
+ hci_conn_drop(le);
+- hci_conn_drop(cis);
+ return ERR_PTR(-ENOLINK);
+ }
+
+- /* Link takes the refcount */
+- hci_conn_drop(cis);
+-
+ cis->state = BT_CONNECT;
+
+ hci_le_create_cis_pending(hdev);
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 30519d47e8a695..f29fd326440115 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -58,7 +58,6 @@ DEFINE_RWLOCK(hci_dev_list_lock);
+
+ /* HCI callback list */
+ LIST_HEAD(hci_cb_list);
+-DEFINE_MUTEX(hci_cb_list_lock);
+
+ /* HCI ID Numbering */
+ static DEFINE_IDA(hci_index_ida);
+@@ -2957,9 +2956,7 @@ int hci_register_cb(struct hci_cb *cb)
+ {
+ BT_DBG("%p name %s", cb, cb->name);
+
+- mutex_lock(&hci_cb_list_lock);
+- list_add_tail(&cb->list, &hci_cb_list);
+- mutex_unlock(&hci_cb_list_lock);
++ list_add_tail_rcu(&cb->list, &hci_cb_list);
+
+ return 0;
+ }
+@@ -2969,9 +2966,8 @@ int hci_unregister_cb(struct hci_cb *cb)
+ {
+ BT_DBG("%p name %s", cb, cb->name);
+
+- mutex_lock(&hci_cb_list_lock);
+- list_del(&cb->list);
+- mutex_unlock(&hci_cb_list_lock);
++ list_del_rcu(&cb->list);
++ synchronize_rcu();
+
+ return 0;
+ }
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index b94d202bf3745c..f165cafa3aa98b 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -1929,6 +1929,11 @@ int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ return lm;
+ }
+
++static bool iso_match(struct hci_conn *hcon)
++{
++ return hcon->type == ISO_LINK || hcon->type == LE_LINK;
++}
++
+ static void iso_connect_cfm(struct hci_conn *hcon, __u8 status)
+ {
+ if (hcon->type != ISO_LINK) {
+@@ -2110,6 +2115,7 @@ void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+
+ static struct hci_cb iso_cb = {
+ .name = "ISO",
++ .match = iso_match,
+ .connect_cfm = iso_connect_cfm,
+ .disconn_cfm = iso_disconn_cfm,
+ };
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 93651c421767a0..acb148759bd049 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -7223,6 +7223,11 @@ static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
+ return NULL;
+ }
+
++static bool l2cap_match(struct hci_conn *hcon)
++{
++ return hcon->type == ACL_LINK || hcon->type == LE_LINK;
++}
++
+ static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+ {
+ struct hci_dev *hdev = hcon->hdev;
+@@ -7230,9 +7235,6 @@ static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
+ struct l2cap_chan *pchan;
+ u8 dst_type;
+
+- if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
+- return;
+-
+ BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
+
+ if (status) {
+@@ -7297,9 +7299,6 @@ int l2cap_disconn_ind(struct hci_conn *hcon)
+
+ static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
+ {
+- if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
+- return;
+-
+ BT_DBG("hcon %p reason %d", hcon, reason);
+
+ l2cap_conn_del(hcon, bt_to_errno(reason));
+@@ -7578,6 +7577,7 @@ void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
+
+ static struct hci_cb l2cap_cb = {
+ .name = "L2CAP",
++ .match = l2cap_match,
+ .connect_cfm = l2cap_connect_cfm,
+ .disconn_cfm = l2cap_disconn_cfm,
+ .security_cfm = l2cap_security_cfm,
+diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
+index 1d34d849703329..9d46afb24caf07 100644
+--- a/net/bluetooth/rfcomm/core.c
++++ b/net/bluetooth/rfcomm/core.c
+@@ -2134,6 +2134,11 @@ static int rfcomm_run(void *unused)
+ return 0;
+ }
+
++static bool rfcomm_match(struct hci_conn *hcon)
++{
++ return hcon->type == ACL_LINK;
++}
++
+ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
+ {
+ struct rfcomm_session *s;
+@@ -2180,6 +2185,7 @@ static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt)
+
+ static struct hci_cb rfcomm_cb = {
+ .name = "RFCOMM",
++ .match = rfcomm_match,
+ .security_cfm = rfcomm_security_cfm
+ };
+
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 64d4d57c7033a3..c4c36ff25fb202 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -1353,11 +1353,13 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags)
+ return lm;
+ }
+
+-static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
++static bool sco_match(struct hci_conn *hcon)
+ {
+- if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
+- return;
++ return hcon->type == SCO_LINK || hcon->type == ESCO_LINK;
++}
+
++static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
++{
+ BT_DBG("hcon %p bdaddr %pMR status %u", hcon, &hcon->dst, status);
+
+ if (!status) {
+@@ -1372,9 +1374,6 @@ static void sco_connect_cfm(struct hci_conn *hcon, __u8 status)
+
+ static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason)
+ {
+- if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK)
+- return;
+-
+ BT_DBG("hcon %p reason %d", hcon, reason);
+
+ sco_conn_del(hcon, bt_to_errno(reason));
+@@ -1400,6 +1399,7 @@ void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb)
+
+ static struct hci_cb sco_cb = {
+ .name = "SCO",
++ .match = sco_match,
+ .connect_cfm = sco_connect_cfm,
+ .disconn_cfm = sco_disconn_cfm,
+ };
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 4beb9acf2c1839..69da7b009f8b98 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3628,8 +3628,10 @@ int skb_csum_hwoffload_help(struct sk_buff *skb,
+
+ if (features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
+ if (vlan_get_protocol(skb) == htons(ETH_P_IPV6) &&
+- skb_network_header_len(skb) != sizeof(struct ipv6hdr))
++ skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
++ !ipv6_has_hopopt_jumbo(skb))
+ goto sw_checksum;
++
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ case offsetof(struct udphdr, check):
+diff --git a/net/core/sock.c b/net/core/sock.c
+index bc2a4e38dcea8e..84ba3f67bca97b 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1133,7 +1133,10 @@ int sk_setsockopt(struct sock *sk, int level, int optname,
+ sk->sk_reuse = (valbool ? SK_CAN_REUSE : SK_NO_REUSE);
+ break;
+ case SO_REUSEPORT:
+- sk->sk_reuseport = valbool;
++ if (valbool && !sk_is_inet(sk))
++ ret = -EOPNOTSUPP;
++ else
++ sk->sk_reuseport = valbool;
+ break;
+ case SO_TYPE:
+ case SO_PROTOCOL:
+diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
+index 72b2d68ef4da5f..dd1803bf9c5c63 100644
+--- a/net/ipv4/ip_tunnel.c
++++ b/net/ipv4/ip_tunnel.c
+@@ -43,6 +43,7 @@
+ #include <net/rtnetlink.h>
+ #include <net/udp.h>
+ #include <net/dst_metadata.h>
++#include <net/inet_dscp.h>
+
+ #if IS_ENABLED(CONFIG_IPV6)
+ #include <net/ipv6.h>
+@@ -102,10 +103,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
+ if (!ip_tunnel_key_match(&t->parms, flags, key))
+ continue;
+
+- if (t->parms.link == link)
++ if (READ_ONCE(t->parms.link) == link)
+ return t;
+- else
+- cand = t;
++ cand = t;
+ }
+
+ hlist_for_each_entry_rcu(t, head, hash_node) {
+@@ -117,9 +117,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
+ if (!ip_tunnel_key_match(&t->parms, flags, key))
+ continue;
+
+- if (t->parms.link == link)
++ if (READ_ONCE(t->parms.link) == link)
+ return t;
+- else if (!cand)
++ if (!cand)
+ cand = t;
+ }
+
+@@ -137,9 +137,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
+ if (!ip_tunnel_key_match(&t->parms, flags, key))
+ continue;
+
+- if (t->parms.link == link)
++ if (READ_ONCE(t->parms.link) == link)
+ return t;
+- else if (!cand)
++ if (!cand)
+ cand = t;
+ }
+
+@@ -150,9 +150,9 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
+ !(t->dev->flags & IFF_UP))
+ continue;
+
+- if (t->parms.link == link)
++ if (READ_ONCE(t->parms.link) == link)
+ return t;
+- else if (!cand)
++ if (!cand)
+ cand = t;
+ }
+
+@@ -221,7 +221,7 @@ static struct ip_tunnel *ip_tunnel_find(struct ip_tunnel_net *itn,
+ hlist_for_each_entry_rcu(t, head, hash_node) {
+ if (local == t->parms.iph.saddr &&
+ remote == t->parms.iph.daddr &&
+- link == t->parms.link &&
++ link == READ_ONCE(t->parms.link) &&
+ type == t->dev->type &&
+ ip_tunnel_key_match(&t->parms, flags, key))
+ break;
+@@ -294,7 +294,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
+
+ ip_tunnel_init_flow(&fl4, iph->protocol, iph->daddr,
+ iph->saddr, tunnel->parms.o_key,
+- RT_TOS(iph->tos), dev_net(dev),
++ iph->tos & INET_DSCP_MASK, tunnel->net,
+ tunnel->parms.link, tunnel->fwmark, 0, 0);
+ rt = ip_route_output_key(tunnel->net, &fl4);
+
+@@ -610,9 +610,9 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ tos = ipv6_get_dsfield((const struct ipv6hdr *)inner_iph);
+ }
+ ip_tunnel_init_flow(&fl4, proto, key->u.ipv4.dst, key->u.ipv4.src,
+- tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
+- dev_net(dev), 0, skb->mark, skb_get_hash(skb),
+- key->flow_flags);
++ tunnel_id_to_key32(key->tun_id),
++ tos & INET_DSCP_MASK, tunnel->net, 0, skb->mark,
++ skb_get_hash(skb), key->flow_flags);
+
+ if (!tunnel_hlen)
+ tunnel_hlen = ip_encap_hlen(&tun_info->encap);
+@@ -773,8 +773,8 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
+ }
+
+ ip_tunnel_init_flow(&fl4, protocol, dst, tnl_params->saddr,
+- tunnel->parms.o_key, RT_TOS(tos),
+- dev_net(dev), tunnel->parms.link,
++ tunnel->parms.o_key, tos & INET_DSCP_MASK,
++ tunnel->net, READ_ONCE(tunnel->parms.link),
+ tunnel->fwmark, skb_get_hash(skb), 0);
+
+ if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0)
+@@ -894,7 +894,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
+ if (t->parms.link != p->link || t->fwmark != fwmark) {
+ int mtu;
+
+- t->parms.link = p->link;
++ WRITE_ONCE(t->parms.link, p->link);
+ t->fwmark = fwmark;
+ mtu = ip_tunnel_bind_dev(dev);
+ if (set_mtu)
+@@ -1084,9 +1084,9 @@ EXPORT_SYMBOL(ip_tunnel_get_link_net);
+
+ int ip_tunnel_get_iflink(const struct net_device *dev)
+ {
+- struct ip_tunnel *tunnel = netdev_priv(dev);
++ const struct ip_tunnel *tunnel = netdev_priv(dev);
+
+- return tunnel->parms.link;
++ return READ_ONCE(tunnel->parms.link);
+ }
+ EXPORT_SYMBOL(ip_tunnel_get_iflink);
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index fb053942dba2a1..f6a213bae5cccb 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -7192,6 +7192,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ if (unlikely(!inet_csk_reqsk_queue_hash_add(sk, req,
+ req->timeout))) {
+ reqsk_free(req);
++ dst_release(dst);
+ return 0;
+ }
+
+diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
+index 534a4498e280d7..fff09f5a796a75 100644
+--- a/net/ipv6/ila/ila_xlat.c
++++ b/net/ipv6/ila/ila_xlat.c
+@@ -200,6 +200,8 @@ static const struct nf_hook_ops ila_nf_hook_ops[] = {
+ },
+ };
+
++static DEFINE_MUTEX(ila_mutex);
++
+ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
+ {
+ struct ila_net *ilan = net_generic(net, ila_net_id);
+@@ -207,16 +209,20 @@ static int ila_add_mapping(struct net *net, struct ila_xlat_params *xp)
+ spinlock_t *lock = ila_get_lock(ilan, xp->ip.locator_match);
+ int err = 0, order;
+
+- if (!ilan->xlat.hooks_registered) {
++ if (!READ_ONCE(ilan->xlat.hooks_registered)) {
+ /* We defer registering net hooks in the namespace until the
+ * first mapping is added.
+ */
+- err = nf_register_net_hooks(net, ila_nf_hook_ops,
+- ARRAY_SIZE(ila_nf_hook_ops));
++ mutex_lock(&ila_mutex);
++ if (!ilan->xlat.hooks_registered) {
++ err = nf_register_net_hooks(net, ila_nf_hook_ops,
++ ARRAY_SIZE(ila_nf_hook_ops));
++ if (!err)
++ WRITE_ONCE(ilan->xlat.hooks_registered, true);
++ }
++ mutex_unlock(&ila_mutex);
+ if (err)
+ return err;
+-
+- ilan->xlat.hooks_registered = true;
+ }
+
+ ila = kzalloc(sizeof(*ila), GFP_KERNEL);
+diff --git a/net/llc/llc_input.c b/net/llc/llc_input.c
+index 51bccfb00a9cd9..61b0159b2fbee6 100644
+--- a/net/llc/llc_input.c
++++ b/net/llc/llc_input.c
+@@ -124,8 +124,8 @@ static inline int llc_fixup_skb(struct sk_buff *skb)
+ if (unlikely(!pskb_may_pull(skb, llc_len)))
+ return 0;
+
+- skb->transport_header += llc_len;
+ skb_pull(skb, llc_len);
++ skb_reset_transport_header(skb);
+ if (skb->protocol == htons(ETH_P_802_2)) {
+ __be16 pdulen;
+ s32 data_size;
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index daea061d0fc136..04c876d78d3bf0 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -2057,8 +2057,6 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
+ u32 info_flags,
+ u32 ctrl_flags,
+ u64 *cookie);
+-void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
+- struct sk_buff_head *skbs);
+ struct sk_buff *
+ ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, u32 info_flags);
+diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
+index 25223184d6e5b0..a5e7edd2f2d137 100644
+--- a/net/mac80211/mesh.c
++++ b/net/mac80211/mesh.c
+@@ -1173,14 +1173,14 @@ void ieee80211_mbss_info_change_notify(struct ieee80211_sub_if_data *sdata,
+ u64 changed)
+ {
+ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
+- unsigned long bits = changed;
++ unsigned long bits[] = { BITMAP_FROM_U64(changed) };
+ u32 bit;
+
+- if (!bits)
++ if (!changed)
+ return;
+
+ /* if we race with running work, worst case this work becomes a noop */
+- for_each_set_bit(bit, &bits, sizeof(changed) * BITS_PER_BYTE)
++ for_each_set_bit(bit, bits, sizeof(changed) * BITS_PER_BYTE)
+ set_bit(bit, ifmsh->mbss_changed);
+ set_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags);
+ wiphy_work_queue(sdata->local->hw.wiphy, &sdata->work);
+diff --git a/net/mac80211/status.c b/net/mac80211/status.c
+index 44d83da60aee44..9676ed15efecc8 100644
+--- a/net/mac80211/status.c
++++ b/net/mac80211/status.c
+@@ -1270,3 +1270,4 @@ void ieee80211_purge_tx_queue(struct ieee80211_hw *hw,
+ while ((skb = __skb_dequeue(skbs)))
+ ieee80211_free_txskb(hw, skb);
+ }
++EXPORT_SYMBOL(ieee80211_purge_tx_queue);
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index d682c32821a110..154b41af4157d0 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -827,7 +827,8 @@ static void __iterate_stations(struct ieee80211_local *local,
+ {
+ struct sta_info *sta;
+
+- list_for_each_entry_rcu(sta, &local->sta_list, list) {
++ list_for_each_entry_rcu(sta, &local->sta_list, list,
++ lockdep_is_held(&local->hw.wiphy->mtx)) {
+ if (!sta->uploaded)
+ continue;
+
+@@ -848,6 +849,19 @@ void ieee80211_iterate_stations_atomic(struct ieee80211_hw *hw,
+ }
+ EXPORT_SYMBOL_GPL(ieee80211_iterate_stations_atomic);
+
++void ieee80211_iterate_stations_mtx(struct ieee80211_hw *hw,
++ void (*iterator)(void *data,
++ struct ieee80211_sta *sta),
++ void *data)
++{
++ struct ieee80211_local *local = hw_to_local(hw);
++
++ lockdep_assert_wiphy(local->hw.wiphy);
++
++ __iterate_stations(local, iterator, data);
++}
++EXPORT_SYMBOL_GPL(ieee80211_iterate_stations_mtx);
++
+ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev)
+ {
+ struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+@@ -2572,6 +2586,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
+ WARN(1, "Hardware became unavailable upon resume. This could be a software issue prior to suspend or a hardware issue.\n");
+ else
+ WARN(1, "Hardware became unavailable during restart.\n");
++ ieee80211_wake_queues_by_reason(hw, IEEE80211_MAX_QUEUE_MAP,
++ IEEE80211_QUEUE_STOP_REASON_SUSPEND,
++ false);
+ ieee80211_handle_reconfig_failure(local);
+ return res;
+ }
+diff --git a/net/mctp/route.c b/net/mctp/route.c
+index c6a815df9d358c..d3c1f54386efc1 100644
+--- a/net/mctp/route.c
++++ b/net/mctp/route.c
+@@ -334,8 +334,13 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ msk = NULL;
+ rc = -EINVAL;
+
+- /* we may be receiving a locally-routed packet; drop source sk
+- * accounting
++ /* We may be receiving a locally-routed packet; drop source sk
++ * accounting.
++ *
++ * From here, we will either queue the skb - either to a frag_queue, or
++ * to a receiving socket. When that succeeds, we clear the skb pointer;
++ * a non-NULL skb on exit will be otherwise unowned, and hence
++ * kfree_skb()-ed.
+ */
+ skb_orphan(skb);
+
+@@ -389,7 +394,9 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ * pending key.
+ */
+ if (flags & MCTP_HDR_FLAG_EOM) {
+- sock_queue_rcv_skb(&msk->sk, skb);
++ rc = sock_queue_rcv_skb(&msk->sk, skb);
++ if (!rc)
++ skb = NULL;
+ if (key) {
+ /* we've hit a pending reassembly; not much we
+ * can do but drop it
+@@ -398,7 +405,6 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ MCTP_TRACE_KEY_REPLIED);
+ key = NULL;
+ }
+- rc = 0;
+ goto out_unlock;
+ }
+
+@@ -425,8 +431,10 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ * this function.
+ */
+ rc = mctp_key_add(key, msk);
+- if (!rc)
++ if (!rc) {
+ trace_mctp_key_acquire(key);
++ skb = NULL;
++ }
+
+ /* we don't need to release key->lock on exit, so
+ * clean up here and suppress the unlock via
+@@ -444,6 +452,8 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ key = NULL;
+ } else {
+ rc = mctp_frag_queue(key, skb);
++ if (!rc)
++ skb = NULL;
+ }
+ }
+
+@@ -458,12 +468,19 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ else
+ rc = mctp_frag_queue(key, skb);
+
++ if (rc)
++ goto out_unlock;
++
++ /* we've queued; the queue owns the skb now */
++ skb = NULL;
++
+ /* end of message? deliver to socket, and we're done with
+ * the reassembly/response key
+ */
+- if (!rc && flags & MCTP_HDR_FLAG_EOM) {
+- sock_queue_rcv_skb(key->sk, key->reasm_head);
+- key->reasm_head = NULL;
++ if (flags & MCTP_HDR_FLAG_EOM) {
++ rc = sock_queue_rcv_skb(key->sk, key->reasm_head);
++ if (!rc)
++ key->reasm_head = NULL;
+ __mctp_key_done_in(key, net, f, MCTP_TRACE_KEY_REPLIED);
+ key = NULL;
+ }
+@@ -482,8 +499,7 @@ static int mctp_route_input(struct mctp_route *route, struct sk_buff *skb)
+ if (any_key)
+ mctp_key_unref(any_key);
+ out:
+- if (rc)
+- kfree_skb(skb);
++ kfree_skb(skb);
+ return rc;
+ }
+
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 2ad9006a157aef..2e1539027e6d33 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -667,8 +667,15 @@ static bool mptcp_established_options_add_addr(struct sock *sk, struct sk_buff *
+ &echo, &drop_other_suboptions))
+ return false;
+
++ /*
++ * Later on, mptcp_write_options() will enforce mutually exclusion with
++ * DSS, bail out if such option is set and we can't drop it.
++ */
+ if (drop_other_suboptions)
+ remaining += opt_size;
++ else if (opts->suboptions & OPTION_MPTCP_DSS)
++ return false;
++
+ len = mptcp_add_addr_len(opts->addr.family, echo, !!opts->addr.port);
+ if (remaining < len)
+ return false;
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index 01f6ce970918c5..07f3a9703312e5 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -528,13 +528,13 @@ static void mptcp_send_ack(struct mptcp_sock *msk)
+ mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow));
+ }
+
+-static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
++static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied)
+ {
+ bool slow;
+
+ slow = lock_sock_fast(ssk);
+ if (tcp_can_send_ack(ssk))
+- tcp_cleanup_rbuf(ssk, 1);
++ tcp_cleanup_rbuf(ssk, copied);
+ unlock_sock_fast(ssk, slow);
+ }
+
+@@ -551,7 +551,7 @@ static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
+ (ICSK_ACK_PUSHED2 | ICSK_ACK_PUSHED)));
+ }
+
+-static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
++static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied)
+ {
+ int old_space = READ_ONCE(msk->old_wspace);
+ struct mptcp_subflow_context *subflow;
+@@ -559,14 +559,14 @@ static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
+ int space = __mptcp_space(sk);
+ bool cleanup, rx_empty;
+
+- cleanup = (space > 0) && (space >= (old_space << 1));
+- rx_empty = !__mptcp_rmem(sk);
++ cleanup = (space > 0) && (space >= (old_space << 1)) && copied;
++ rx_empty = !__mptcp_rmem(sk) && copied;
+
+ mptcp_for_each_subflow(msk, subflow) {
+ struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
+
+ if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
+- mptcp_subflow_cleanup_rbuf(ssk);
++ mptcp_subflow_cleanup_rbuf(ssk, copied);
+ }
+ }
+
+@@ -1902,6 +1902,8 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+ goto out;
+ }
+
++static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
++
+ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
+ struct msghdr *msg,
+ size_t len, int flags,
+@@ -1955,6 +1957,7 @@ static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
+ break;
+ }
+
++ mptcp_rcv_space_adjust(msk, copied);
+ return copied;
+ }
+
+@@ -2180,9 +2183,6 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+
+ copied += bytes_read;
+
+- /* be sure to advertise window change */
+- mptcp_cleanup_rbuf(msk);
+-
+ if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
+ continue;
+
+@@ -2231,7 +2231,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ }
+
+ pr_debug("block timeout %ld\n", timeo);
+- mptcp_rcv_space_adjust(msk, copied);
++ mptcp_cleanup_rbuf(msk, copied);
+ err = sk_wait_data(sk, &timeo, NULL);
+ if (err < 0) {
+ err = copied ? : err;
+@@ -2239,7 +2239,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ }
+ }
+
+- mptcp_rcv_space_adjust(msk, copied);
++ mptcp_cleanup_rbuf(msk, copied);
+
+ out_err:
+ if (cmsg_flags && copied >= 0) {
+diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
+index bd2b17b219ae90..0b270893ee14c5 100644
+--- a/net/netrom/nr_route.c
++++ b/net/netrom/nr_route.c
+@@ -754,6 +754,12 @@ int nr_route_frame(struct sk_buff *skb, ax25_cb *ax25)
+ int ret;
+ struct sk_buff *skbn;
+
++ /*
++ * Reject malformed packets early. Check that it contains at least 2
++ * addresses and 1 byte more for Time-To-Live
++ */
++ if (skb->len < 2 * sizeof(ax25_address) + 1)
++ return 0;
+
+ nr_src = (ax25_address *)(skb->data + 0);
+ nr_dest = (ax25_address *)(skb->data + 7);
+diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
+index 56e3ae3b6be93f..4abf7e9ac4f2f7 100644
+--- a/net/packet/af_packet.c
++++ b/net/packet/af_packet.c
+@@ -538,10 +538,8 @@ static void *packet_current_frame(struct packet_sock *po,
+ return packet_lookup_frame(po, rb, rb->head, status);
+ }
+
+-static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev)
++static u16 vlan_get_tci(const struct sk_buff *skb, struct net_device *dev)
+ {
+- u8 *skb_orig_data = skb->data;
+- int skb_orig_len = skb->len;
+ struct vlan_hdr vhdr, *vh;
+ unsigned int header_len;
+
+@@ -562,33 +560,21 @@ static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev)
+ else
+ return 0;
+
+- skb_push(skb, skb->data - skb_mac_header(skb));
+- vh = skb_header_pointer(skb, header_len, sizeof(vhdr), &vhdr);
+- if (skb_orig_data != skb->data) {
+- skb->data = skb_orig_data;
+- skb->len = skb_orig_len;
+- }
++ vh = skb_header_pointer(skb, skb_mac_offset(skb) + header_len,
++ sizeof(vhdr), &vhdr);
+ if (unlikely(!vh))
+ return 0;
+
+ return ntohs(vh->h_vlan_TCI);
+ }
+
+-static __be16 vlan_get_protocol_dgram(struct sk_buff *skb)
++static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb)
+ {
+ __be16 proto = skb->protocol;
+
+- if (unlikely(eth_type_vlan(proto))) {
+- u8 *skb_orig_data = skb->data;
+- int skb_orig_len = skb->len;
+-
+- skb_push(skb, skb->data - skb_mac_header(skb));
+- proto = __vlan_get_protocol(skb, proto, NULL);
+- if (skb_orig_data != skb->data) {
+- skb->data = skb_orig_data;
+- skb->len = skb_orig_len;
+- }
+- }
++ if (unlikely(eth_type_vlan(proto)))
++ proto = __vlan_get_protocol_offset(skb, proto,
++ skb_mac_offset(skb), NULL);
+
+ return proto;
+ }
+diff --git a/net/sctp/associola.c b/net/sctp/associola.c
+index c45c192b787873..0b0794f164cf2e 100644
+--- a/net/sctp/associola.c
++++ b/net/sctp/associola.c
+@@ -137,7 +137,8 @@ static struct sctp_association *sctp_association_init(
+ = 5 * asoc->rto_max;
+
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
+- asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
++ asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
++ (unsigned long)sp->autoclose * HZ;
+
+ /* Initializes the timers */
+ for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
+diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
+index efbb4836ec668f..ea498eff1f2ae5 100644
+--- a/scripts/mod/file2alias.c
++++ b/scripts/mod/file2alias.c
+@@ -742,8 +742,8 @@ static void do_input(char *alias,
+
+ for (i = min / BITS_PER_LONG; i < max / BITS_PER_LONG + 1; i++)
+ arr[i] = TO_NATIVE(arr[i]);
+- for (i = min; i < max; i++)
+- if (arr[i / BITS_PER_LONG] & (1L << (i%BITS_PER_LONG)))
++ for (i = min; i <= max; i++)
++ if (arr[i / BITS_PER_LONG] & (1ULL << (i%BITS_PER_LONG)))
+ sprintf(alias + strlen(alias), "%X,*", i);
+ }
+
+diff --git a/scripts/sorttable.h b/scripts/sorttable.h
+index 7bd0184380d3b9..a7c5445baf0027 100644
+--- a/scripts/sorttable.h
++++ b/scripts/sorttable.h
+@@ -110,7 +110,7 @@ static inline unsigned long orc_ip(const int *ip)
+
+ static int orc_sort_cmp(const void *_a, const void *_b)
+ {
+- struct orc_entry *orc_a;
++ struct orc_entry *orc_a, *orc_b;
+ const int *a = g_orc_ip_table + *(int *)_a;
+ const int *b = g_orc_ip_table + *(int *)_b;
+ unsigned long a_val = orc_ip(a);
+@@ -128,6 +128,9 @@ static int orc_sort_cmp(const void *_a, const void *_b)
+ * whitelisted .o files which didn't get objtool generation.
+ */
+ orc_a = g_orc_table + (a - g_orc_ip_table);
++ orc_b = g_orc_table + (b - g_orc_ip_table);
++ if (orc_a->type == ORC_TYPE_UNDEFINED && orc_b->type == ORC_TYPE_UNDEFINED)
++ return 0;
+ return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
+ }
+
+diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
+index 379ac7b5c7098c..f5eead8af2e210 100644
+--- a/security/selinux/ss/services.c
++++ b/security/selinux/ss/services.c
+@@ -956,7 +956,10 @@ void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
+ xpermd->driver))
+ return;
+ } else {
+- BUG();
++ pr_warn_once(
++ "SELinux: unknown extended permission (%u) will be ignored\n",
++ node->datum.u.xperms->specified);
++ return;
+ }
+
+ if (node->key.specified == AVTAB_XPERMS_ALLOWED) {
+@@ -993,7 +996,8 @@ void services_compute_xperms_decision(struct extended_perms_decision *xpermd,
+ node->datum.u.xperms->perms.p[i];
+ }
+ } else {
+- BUG();
++ pr_warn_once("SELinux: unknown specified key (%u)\n",
++ node->key.specified);
+ }
+ }
+
+diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
+index e3394919daa09a..51ee4c00a84310 100644
+--- a/sound/core/seq/oss/seq_oss_synth.c
++++ b/sound/core/seq/oss/seq_oss_synth.c
+@@ -66,6 +66,7 @@ static struct seq_oss_synth midi_synth_dev = {
+ };
+
+ static DEFINE_SPINLOCK(register_lock);
++static DEFINE_MUTEX(sysex_mutex);
+
+ /*
+ * prototypes
+@@ -497,6 +498,7 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
+ if (!info)
+ return -ENXIO;
+
++ guard(mutex)(&sysex_mutex);
+ sysex = info->sysex;
+ if (sysex == NULL) {
+ sysex = kzalloc(sizeof(*sysex), GFP_KERNEL);
+diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
+index e115fe18363495..8b7dfbc8e82075 100644
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -1280,10 +1280,16 @@ static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
+ if (client->type != client_info->type)
+ return -EINVAL;
+
+- /* check validity of midi_version field */
+- if (client->user_pversion >= SNDRV_PROTOCOL_VERSION(1, 0, 3) &&
+- client_info->midi_version > SNDRV_SEQ_CLIENT_UMP_MIDI_2_0)
+- return -EINVAL;
++ if (client->user_pversion >= SNDRV_PROTOCOL_VERSION(1, 0, 3)) {
++ /* check validity of midi_version field */
++ if (client_info->midi_version > SNDRV_SEQ_CLIENT_UMP_MIDI_2_0)
++ return -EINVAL;
++
++ /* check if UMP is supported in kernel */
++ if (!IS_ENABLED(CONFIG_SND_SEQ_UMP) &&
++ client_info->midi_version > 0)
++ return -EINVAL;
++ }
+
+ /* fill the info fields */
+ if (client_info->name[0])
+diff --git a/sound/core/ump.c b/sound/core/ump.c
+index 83856b2f88b89f..32d27e58416aa3 100644
+--- a/sound/core/ump.c
++++ b/sound/core/ump.c
+@@ -37,6 +37,7 @@ static int process_legacy_output(struct snd_ump_endpoint *ump,
+ u32 *buffer, int count);
+ static void process_legacy_input(struct snd_ump_endpoint *ump, const u32 *src,
+ int words);
++static void update_legacy_names(struct snd_ump_endpoint *ump);
+ #else
+ static inline int process_legacy_output(struct snd_ump_endpoint *ump,
+ u32 *buffer, int count)
+@@ -47,6 +48,9 @@ static inline void process_legacy_input(struct snd_ump_endpoint *ump,
+ const u32 *src, int words)
+ {
+ }
++static inline void update_legacy_names(struct snd_ump_endpoint *ump)
++{
++}
+ #endif
+
+ static const struct snd_rawmidi_global_ops snd_ump_rawmidi_ops = {
+@@ -850,6 +854,7 @@ static int ump_handle_fb_info_msg(struct snd_ump_endpoint *ump,
+ fill_fb_info(ump, &fb->info, buf);
+ if (ump->parsed) {
+ snd_ump_update_group_attrs(ump);
++ update_legacy_names(ump);
+ seq_notify_fb_change(ump, fb);
+ }
+ }
+@@ -882,6 +887,7 @@ static int ump_handle_fb_name_msg(struct snd_ump_endpoint *ump,
+ /* notify the FB name update to sequencer, too */
+ if (ret > 0 && ump->parsed) {
+ snd_ump_update_group_attrs(ump);
++ update_legacy_names(ump);
+ seq_notify_fb_change(ump, fb);
+ }
+ return ret;
+@@ -1076,13 +1082,13 @@ static int snd_ump_legacy_open(struct snd_rawmidi_substream *substream)
+ struct snd_ump_endpoint *ump = substream->rmidi->private_data;
+ int dir = substream->stream;
+ int group = ump->legacy_mapping[substream->number];
+- int err = 0;
++ int err;
+
+- mutex_lock(&ump->open_mutex);
+- if (ump->legacy_substreams[dir][group]) {
+- err = -EBUSY;
+- goto unlock;
+- }
++ guard(mutex)(&ump->open_mutex);
++ if (ump->legacy_substreams[dir][group])
++ return -EBUSY;
++ if (!ump->groups[group].active)
++ return -ENODEV;
+ if (dir == SNDRV_RAWMIDI_STREAM_OUTPUT) {
+ if (!ump->legacy_out_opens) {
+ err = snd_rawmidi_kernel_open(&ump->core, 0,
+@@ -1090,17 +1096,14 @@ static int snd_ump_legacy_open(struct snd_rawmidi_substream *substream)
+ SNDRV_RAWMIDI_LFLG_APPEND,
+ &ump->legacy_out_rfile);
+ if (err < 0)
+- goto unlock;
++ return err;
+ }
+ ump->legacy_out_opens++;
+ snd_ump_convert_reset(&ump->out_cvts[group]);
+ }
+- spin_lock_irq(&ump->legacy_locks[dir]);
++ guard(spinlock_irq)(&ump->legacy_locks[dir]);
+ ump->legacy_substreams[dir][group] = substream;
+- spin_unlock_irq(&ump->legacy_locks[dir]);
+- unlock:
+- mutex_unlock(&ump->open_mutex);
+- return err;
++ return 0;
+ }
+
+ static int snd_ump_legacy_close(struct snd_rawmidi_substream *substream)
+@@ -1109,15 +1112,13 @@ static int snd_ump_legacy_close(struct snd_rawmidi_substream *substream)
+ int dir = substream->stream;
+ int group = ump->legacy_mapping[substream->number];
+
+- mutex_lock(&ump->open_mutex);
+- spin_lock_irq(&ump->legacy_locks[dir]);
+- ump->legacy_substreams[dir][group] = NULL;
+- spin_unlock_irq(&ump->legacy_locks[dir]);
++ guard(mutex)(&ump->open_mutex);
++ scoped_guard(spinlock_irq, &ump->legacy_locks[dir])
++ ump->legacy_substreams[dir][group] = NULL;
+ if (dir == SNDRV_RAWMIDI_STREAM_OUTPUT) {
+ if (!--ump->legacy_out_opens)
+ snd_rawmidi_kernel_release(&ump->legacy_out_rfile);
+ }
+- mutex_unlock(&ump->open_mutex);
+ return 0;
+ }
+
+@@ -1169,12 +1170,11 @@ static int process_legacy_output(struct snd_ump_endpoint *ump,
+ const int dir = SNDRV_RAWMIDI_STREAM_OUTPUT;
+ unsigned char c;
+ int group, size = 0;
+- unsigned long flags;
+
+ if (!ump->out_cvts || !ump->legacy_out_opens)
+ return 0;
+
+- spin_lock_irqsave(&ump->legacy_locks[dir], flags);
++ guard(spinlock_irqsave)(&ump->legacy_locks[dir]);
+ for (group = 0; group < SNDRV_UMP_MAX_GROUPS; group++) {
+ substream = ump->legacy_substreams[dir][group];
+ if (!substream)
+@@ -1190,7 +1190,6 @@ static int process_legacy_output(struct snd_ump_endpoint *ump,
+ break;
+ }
+ }
+- spin_unlock_irqrestore(&ump->legacy_locks[dir], flags);
+ return size;
+ }
+
+@@ -1200,18 +1199,16 @@ static void process_legacy_input(struct snd_ump_endpoint *ump, const u32 *src,
+ struct snd_rawmidi_substream *substream;
+ unsigned char buf[16];
+ unsigned char group;
+- unsigned long flags;
+ const int dir = SNDRV_RAWMIDI_STREAM_INPUT;
+ int size;
+
+ size = snd_ump_convert_from_ump(src, buf, &group);
+ if (size <= 0)
+ return;
+- spin_lock_irqsave(&ump->legacy_locks[dir], flags);
++ guard(spinlock_irqsave)(&ump->legacy_locks[dir]);
+ substream = ump->legacy_substreams[dir][group];
+ if (substream)
+ snd_rawmidi_receive(substream, buf, size);
+- spin_unlock_irqrestore(&ump->legacy_locks[dir], flags);
+ }
+
+ /* Fill ump->legacy_mapping[] for groups to be used for legacy rawmidi */
+@@ -1254,11 +1251,20 @@ static void fill_substream_names(struct snd_ump_endpoint *ump,
+ name = ump->groups[idx].name;
+ if (!*name)
+ name = ump->info.name;
+- snprintf(s->name, sizeof(s->name), "Group %d (%.16s)",
+- idx + 1, name);
++ scnprintf(s->name, sizeof(s->name), "Group %d (%.16s)%s",
++ idx + 1, name,
++ ump->groups[idx].active ? "" : " [Inactive]");
+ }
+ }
+
++static void update_legacy_names(struct snd_ump_endpoint *ump)
++{
++ struct snd_rawmidi *rmidi = ump->legacy_rmidi;
++
++ fill_substream_names(ump, rmidi, SNDRV_RAWMIDI_STREAM_INPUT);
++ fill_substream_names(ump, rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT);
++}
++
+ int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump,
+ char *id, int device)
+ {
+@@ -1295,10 +1301,7 @@ int snd_ump_attach_legacy_rawmidi(struct snd_ump_endpoint *ump,
+ rmidi->ops = &snd_ump_legacy_ops;
+ rmidi->private_data = ump;
+ ump->legacy_rmidi = rmidi;
+- if (input)
+- fill_substream_names(ump, rmidi, SNDRV_RAWMIDI_STREAM_INPUT);
+- if (output)
+- fill_substream_names(ump, rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT);
++ update_legacy_names(ump);
+
+ ump_dbg(ump, "Created a legacy rawmidi #%d (%s)\n", device, id);
+ return 0;
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index 748a3c40966e97..27e48fdbbf3aa0 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -1134,7 +1134,6 @@ struct ca0132_spec {
+
+ struct hda_codec *codec;
+ struct delayed_work unsol_hp_work;
+- int quirk;
+
+ #ifdef ENABLE_TUNING_CONTROLS
+ long cur_ctl_vals[TUNING_CTLS_COUNT];
+@@ -1166,7 +1165,6 @@ struct ca0132_spec {
+ * CA0132 quirks table
+ */
+ enum {
+- QUIRK_NONE,
+ QUIRK_ALIENWARE,
+ QUIRK_ALIENWARE_M17XR4,
+ QUIRK_SBZ,
+@@ -1176,10 +1174,11 @@ enum {
+ QUIRK_R3D,
+ QUIRK_AE5,
+ QUIRK_AE7,
++ QUIRK_NONE = HDA_FIXUP_ID_NOT_SET,
+ };
+
+ #ifdef CONFIG_PCI
+-#define ca0132_quirk(spec) ((spec)->quirk)
++#define ca0132_quirk(spec) ((spec)->codec->fixup_id)
+ #define ca0132_use_pci_mmio(spec) ((spec)->use_pci_mmio)
+ #define ca0132_use_alt_functions(spec) ((spec)->use_alt_functions)
+ #define ca0132_use_alt_controls(spec) ((spec)->use_alt_controls)
+@@ -1293,7 +1292,7 @@ static const struct hda_pintbl ae7_pincfgs[] = {
+ {}
+ };
+
+-static const struct snd_pci_quirk ca0132_quirks[] = {
++static const struct hda_quirk ca0132_quirks[] = {
+ SND_PCI_QUIRK(0x1028, 0x057b, "Alienware M17x R4", QUIRK_ALIENWARE_M17XR4),
+ SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
+ SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
+@@ -1316,6 +1315,19 @@ static const struct snd_pci_quirk ca0132_quirks[] = {
+ {}
+ };
+
++static const struct hda_model_fixup ca0132_quirk_models[] = {
++ { .id = QUIRK_ALIENWARE, .name = "alienware" },
++ { .id = QUIRK_ALIENWARE_M17XR4, .name = "alienware-m17xr4" },
++ { .id = QUIRK_SBZ, .name = "sbz" },
++ { .id = QUIRK_ZXR, .name = "zxr" },
++ { .id = QUIRK_ZXR_DBPRO, .name = "zxr-dbpro" },
++ { .id = QUIRK_R3DI, .name = "r3di" },
++ { .id = QUIRK_R3D, .name = "r3d" },
++ { .id = QUIRK_AE5, .name = "ae5" },
++ { .id = QUIRK_AE7, .name = "ae7" },
++ {}
++};
++
+ /* Output selection quirk info structures. */
+ #define MAX_QUIRK_MMIO_GPIO_SET_VALS 3
+ #define MAX_QUIRK_SCP_SET_VALS 2
+@@ -9962,17 +9974,15 @@ static int ca0132_prepare_verbs(struct hda_codec *codec)
+ */
+ static void sbz_detect_quirk(struct hda_codec *codec)
+ {
+- struct ca0132_spec *spec = codec->spec;
+-
+ switch (codec->core.subsystem_id) {
+ case 0x11020033:
+- spec->quirk = QUIRK_ZXR;
++ codec->fixup_id = QUIRK_ZXR;
+ break;
+ case 0x1102003f:
+- spec->quirk = QUIRK_ZXR_DBPRO;
++ codec->fixup_id = QUIRK_ZXR_DBPRO;
+ break;
+ default:
+- spec->quirk = QUIRK_SBZ;
++ codec->fixup_id = QUIRK_SBZ;
+ break;
+ }
+ }
+@@ -9981,7 +9991,6 @@ static int patch_ca0132(struct hda_codec *codec)
+ {
+ struct ca0132_spec *spec;
+ int err;
+- const struct snd_pci_quirk *quirk;
+
+ codec_dbg(codec, "patch_ca0132\n");
+
+@@ -9992,11 +10001,7 @@ static int patch_ca0132(struct hda_codec *codec)
+ spec->codec = codec;
+
+ /* Detect codec quirk */
+- quirk = snd_pci_quirk_lookup(codec->bus->pci, ca0132_quirks);
+- if (quirk)
+- spec->quirk = quirk->value;
+- else
+- spec->quirk = QUIRK_NONE;
++ snd_hda_pick_fixup(codec, ca0132_quirk_models, ca0132_quirks, NULL);
+ if (ca0132_quirk(spec) == QUIRK_SBZ)
+ sbz_detect_quirk(codec);
+
+@@ -10073,7 +10078,7 @@ static int patch_ca0132(struct hda_codec *codec)
+ spec->mem_base = pci_iomap(codec->bus->pci, 2, 0xC20);
+ if (spec->mem_base == NULL) {
+ codec_warn(codec, "pci_iomap failed! Setting quirk to QUIRK_NONE.");
+- spec->quirk = QUIRK_NONE;
++ codec->fixup_id = QUIRK_NONE;
+ }
+ }
+ #endif
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 29d7eb8c6bec3e..fc93af80f0bffe 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10443,6 +10443,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0xf111, 0x0001, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0xf111, 0x0006, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0xf111, 0x0009, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0xf111, 0x000c, "Framework Laptop", ALC295_FIXUP_FRAMEWORK_LAPTOP_MIC_NO_PRESENCE),
+
+ #if 0
+ /* Below is a quirk table taken from the old code.
+@@ -10631,6 +10632,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
+ {.id = ALC255_FIXUP_ACER_HEADPHONE_AND_MIC, .name = "alc255-acer-headphone-and-mic"},
+ {.id = ALC285_FIXUP_HP_GPIO_AMP_INIT, .name = "alc285-hp-amp-init"},
+ {.id = ALC236_FIXUP_LENOVO_INV_DMIC, .name = "alc236-fixup-lenovo-inv-mic"},
++ {.id = ALC2XX_FIXUP_HEADSET_MIC, .name = "alc2xx-fixup-headset-mic"},
+ {}
+ };
+ #define ALC225_STANDARD_PINS \
+diff --git a/sound/usb/format.c b/sound/usb/format.c
+index 3b45d0ee769389..3b3a5ea6fcbfc0 100644
+--- a/sound/usb/format.c
++++ b/sound/usb/format.c
+@@ -60,6 +60,8 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
+ pcm_formats |= SNDRV_PCM_FMTBIT_SPECIAL;
+ /* flag potentially raw DSD capable altsettings */
+ fp->dsd_raw = true;
++ /* clear special format bit to avoid "unsupported format" msg below */
++ format &= ~UAC2_FORMAT_TYPE_I_RAW_DATA;
+ }
+
+ format <<= 1;
+@@ -71,8 +73,11 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
+ sample_width = as->bBitResolution;
+ sample_bytes = as->bSubslotSize;
+
+- if (format & UAC3_FORMAT_TYPE_I_RAW_DATA)
++ if (format & UAC3_FORMAT_TYPE_I_RAW_DATA) {
+ pcm_formats |= SNDRV_PCM_FMTBIT_SPECIAL;
++ /* clear special format bit to avoid "unsupported format" msg below */
++ format &= ~UAC3_FORMAT_TYPE_I_RAW_DATA;
++ }
+
+ format <<= 1;
+ break;
+diff --git a/sound/usb/mixer_us16x08.c b/sound/usb/mixer_us16x08.c
+index 6eb7d93b358d99..20ac32635f1f50 100644
+--- a/sound/usb/mixer_us16x08.c
++++ b/sound/usb/mixer_us16x08.c
+@@ -687,7 +687,7 @@ static int snd_us16x08_meter_get(struct snd_kcontrol *kcontrol,
+ struct usb_mixer_elem_info *elem = kcontrol->private_data;
+ struct snd_usb_audio *chip = elem->head.mixer->chip;
+ struct snd_us16x08_meter_store *store = elem->private_data;
+- u8 meter_urb[64];
++ u8 meter_urb[64] = {0};
+
+ switch (kcontrol->private_value) {
+ case 0: {
+diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
+index 8eed8d9742fda9..ec81b47c41c9ea 100644
+--- a/sound/usb/quirks.c
++++ b/sound/usb/quirks.c
+@@ -2225,6 +2225,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
+ QUIRK_FLAG_DSD_RAW),
+ DEVICE_FLG(0x2522, 0x0007, /* LH Labs Geek Out HD Audio 1V5 */
+ QUIRK_FLAG_SET_IFACE_FIRST),
++ DEVICE_FLG(0x262a, 0x9302, /* ddHiFi TC44C */
++ QUIRK_FLAG_DSD_RAW),
+ DEVICE_FLG(0x2708, 0x0002, /* Audient iD14 */
+ QUIRK_FLAG_IGNORE_CTL_ERROR),
+ DEVICE_FLG(0x2912, 0x30c8, /* Audioengine D1 */
+diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
+index f87365f7599bf7..f61d623b1ce8df 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
++++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
+@@ -541,24 +541,11 @@ static __u64 subprog_spill_reg_precise(void)
+
+ SEC("?raw_tp")
+ __success __log_level(2)
+-__msg("10: (0f) r1 += r7")
+-__msg("mark_precise: frame0: last_idx 10 first_idx 7 subseq_idx -1")
+-__msg("mark_precise: frame0: regs=r7 stack= before 9: (bf) r1 = r8")
+-__msg("mark_precise: frame0: regs=r7 stack= before 8: (27) r7 *= 4")
+-__msg("mark_precise: frame0: regs=r7 stack= before 7: (79) r7 = *(u64 *)(r10 -8)")
+-__msg("mark_precise: frame0: parent state regs= stack=-8: R0_w=2 R6_w=1 R8_rw=map_value(map=.data.vals,ks=4,vs=16) R10=fp0 fp-8_rw=P1")
+-__msg("mark_precise: frame0: last_idx 18 first_idx 0 subseq_idx 7")
+-__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
+-__msg("mark_precise: frame1: regs= stack= before 17: (0f) r0 += r2")
+-__msg("mark_precise: frame1: regs= stack= before 16: (79) r2 = *(u64 *)(r1 +0)")
+-__msg("mark_precise: frame1: regs= stack= before 15: (79) r0 = *(u64 *)(r10 -16)")
+-__msg("mark_precise: frame1: regs= stack= before 14: (7b) *(u64 *)(r10 -16) = r2")
+-__msg("mark_precise: frame1: regs= stack= before 13: (7b) *(u64 *)(r1 +0) = r2")
+-__msg("mark_precise: frame1: regs=r2 stack= before 6: (85) call pc+6")
+-__msg("mark_precise: frame0: regs=r2 stack= before 5: (bf) r2 = r6")
+-__msg("mark_precise: frame0: regs=r6 stack= before 4: (07) r1 += -8")
+-__msg("mark_precise: frame0: regs=r6 stack= before 3: (bf) r1 = r10")
+-__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 1")
++/* precision backtracking can't currently handle stack access not through r10,
++ * so we won't be able to mark stack slot fp-8 as precise, and so will
++ * fallback to forcing all as precise
++ */
++__msg("mark_precise: frame0: falling back to forcing all scalars precise")
+ __naked int subprog_spill_into_parent_stack_slot_precise(void)
+ {
+ asm volatile (
+diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c
+index 8a2ff81d835088..0d84dd1f38b6b0 100644
+--- a/tools/testing/selftests/bpf/verifier/precise.c
++++ b/tools/testing/selftests/bpf/verifier/precise.c
+@@ -140,11 +140,10 @@
+ .result = REJECT,
+ },
+ {
+- "precise: ST zero to stack insn is supported",
++ "precise: ST insn causing spi > allocated_stack",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 123, 0),
+- /* not a register spill, so we stop precision propagation for R4 here */
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, -8, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_MOV64_IMM(BPF_REG_0, -1),
+@@ -158,11 +157,11 @@
+ mark_precise: frame0: last_idx 4 first_idx 2\
+ mark_precise: frame0: regs=r4 stack= before 4\
+ mark_precise: frame0: regs=r4 stack= before 3\
++ mark_precise: frame0: regs= stack=-8 before 2\
++ mark_precise: frame0: falling back to forcing all scalars precise\
++ force_precise: frame0: forcing r0 to be precise\
+ mark_precise: frame0: last_idx 5 first_idx 5\
+- mark_precise: frame0: parent state regs=r0 stack=:\
+- mark_precise: frame0: last_idx 4 first_idx 2\
+- mark_precise: frame0: regs=r0 stack= before 4\
+- 5: R0=-1 R4=0",
++ mark_precise: frame0: parent state regs= stack=:",
+ .result = VERBOSE_ACCEPT,
+ .retval = -1,
+ },
+@@ -170,8 +169,6 @@
+ "precise: STX insn causing spi > allocated_stack",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+- /* make later reg spill more interesting by having somewhat known scalar */
+- BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 123, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, -8),
+@@ -182,21 +179,18 @@
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = BPF_F_TEST_STATE_FREQ,
+- .errstr = "mark_precise: frame0: last_idx 7 first_idx 7\
++ .errstr = "mark_precise: frame0: last_idx 6 first_idx 6\
+ mark_precise: frame0: parent state regs=r4 stack=:\
+- mark_precise: frame0: last_idx 6 first_idx 4\
+- mark_precise: frame0: regs=r4 stack= before 6: (b7) r0 = -1\
+- mark_precise: frame0: regs=r4 stack= before 5: (79) r4 = *(u64 *)(r10 -8)\
+- mark_precise: frame0: regs= stack=-8 before 4: (7b) *(u64 *)(r3 -8) = r0\
+- mark_precise: frame0: parent state regs=r0 stack=:\
+- mark_precise: frame0: last_idx 3 first_idx 3\
+- mark_precise: frame0: regs=r0 stack= before 3: (55) if r3 != 0x7b goto pc+0\
+- mark_precise: frame0: regs=r0 stack= before 2: (bf) r3 = r10\
+- mark_precise: frame0: regs=r0 stack= before 1: (57) r0 &= 255\
+- mark_precise: frame0: parent state regs=r0 stack=:\
+- mark_precise: frame0: last_idx 0 first_idx 0\
+- mark_precise: frame0: regs=r0 stack= before 0: (85) call bpf_get_prandom_u32#7\
+- mark_precise: frame0: last_idx 7 first_idx 7\
++ mark_precise: frame0: last_idx 5 first_idx 3\
++ mark_precise: frame0: regs=r4 stack= before 5\
++ mark_precise: frame0: regs=r4 stack= before 4\
++ mark_precise: frame0: regs= stack=-8 before 3\
++ mark_precise: frame0: falling back to forcing all scalars precise\
++ force_precise: frame0: forcing r0 to be precise\
++ force_precise: frame0: forcing r0 to be precise\
++ force_precise: frame0: forcing r0 to be precise\
++ force_precise: frame0: forcing r0 to be precise\
++ mark_precise: frame0: last_idx 6 first_idx 6\
+ mark_precise: frame0: parent state regs= stack=:",
+ .result = VERBOSE_ACCEPT,
+ .retval = -1,