summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2024-11-01 07:27:17 -0400
committerMike Pagano <mpagano@gentoo.org>2024-11-01 07:27:17 -0400
commitdf796b55c38da6e965a072aab398cfa423360eea (patch)
tree7581f0671847b6ec6b69fd498a8a8bca66341088
parentPatches to fix compilation errors with "-Og" (diff)
downloadlinux-patches-df796b55c38da6e965a072aab398cfa423360eea.tar.gz
linux-patches-df796b55c38da6e965a072aab398cfa423360eea.tar.bz2
linux-patches-df796b55c38da6e965a072aab398cfa423360eea.zip
Linux patch 6.6.59
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1058_linux-6.6.59.patch10051
2 files changed, 10055 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index f75b6477..d1178507 100644
--- a/0000_README
+++ b/0000_README
@@ -275,6 +275,10 @@ Patch: 1057_linux-6.6.58.patch
From: https://www.kernel.org
Desc: Linux 6.6.58
+Patch: 1058_linux-6.6.59.patch
+From: https://www.kernel.org
+Desc: Linux 6.6.59
+
Patch: 1510_fs-enable-link-security-restrictions-by-default.patch
From: http://sources.debian.net/src/linux/3.16.7-ckt4-3/debian/patches/debian/fs-enable-link-security-restrictions-by-default.patch
Desc: Enable link security restrictions by default.
diff --git a/1058_linux-6.6.59.patch b/1058_linux-6.6.59.patch
new file mode 100644
index 00000000..aa89d7df
--- /dev/null
+++ b/1058_linux-6.6.59.patch
@@ -0,0 +1,10051 @@
+diff --git a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml
+index 7735e08d35ba14..beef193aaaeba0 100644
+--- a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml
++++ b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.yaml
+@@ -102,21 +102,21 @@ properties:
+ default: 2
+
+ interrupts:
+- anyOf:
+- - minItems: 1
+- items:
+- - description: TX interrupt
+- - description: RX interrupt
+- - items:
+- - description: common/combined interrupt
++ minItems: 1
++ maxItems: 2
+
+ interrupt-names:
+ oneOf:
+- - minItems: 1
++ - description: TX interrupt
++ const: tx
++ - description: RX interrupt
++ const: rx
++ - description: TX and RX interrupts
+ items:
+ - const: tx
+ - const: rx
+- - const: common
++ - description: Common/combined interrupt
++ const: common
+
+ fck_parent:
+ $ref: /schemas/types.yaml#/definitions/string
+diff --git a/Makefile b/Makefile
+index f80e78c7cf2006..8a55af189f369a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 6
+ PATCHLEVEL = 6
+-SUBLEVEL = 58
++SUBLEVEL = 59
+ EXTRAVERSION =
+ NAME = Pinguïn Aangedreven
+
+diff --git a/arch/arm/boot/dts/broadcom/bcm2837-rpi-cm3-io3.dts b/arch/arm/boot/dts/broadcom/bcm2837-rpi-cm3-io3.dts
+index 72d26d130efaa4..85f54fa595aa8f 100644
+--- a/arch/arm/boot/dts/broadcom/bcm2837-rpi-cm3-io3.dts
++++ b/arch/arm/boot/dts/broadcom/bcm2837-rpi-cm3-io3.dts
+@@ -77,7 +77,7 @@ &gpio {
+ };
+
+ &hdmi {
+- hpd-gpios = <&expgpio 1 GPIO_ACTIVE_LOW>;
++ hpd-gpios = <&expgpio 0 GPIO_ACTIVE_LOW>;
+ power-domains = <&power RPI_POWER_DOMAIN_HDMI>;
+ status = "okay";
+ };
+diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
+index 9a2d3723cd0fa9..11782860717fae 100644
+--- a/arch/arm64/Makefile
++++ b/arch/arm64/Makefile
+@@ -10,7 +10,7 @@
+ #
+ # Copyright (C) 1995-2001 by Russell King
+
+-LDFLAGS_vmlinux :=--no-undefined -X
++LDFLAGS_vmlinux :=--no-undefined -X --pic-veneer
+
+ ifeq ($(CONFIG_RELOCATABLE), y)
+ # Pass --no-apply-dynamic-relocs to restore pre-binutils-2.27 behaviour
+diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
+index 685cc436146a5a..18413d869cca1a 100644
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -777,6 +777,9 @@ static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu)
+ static int check_vcpu_requests(struct kvm_vcpu *vcpu)
+ {
+ if (kvm_request_pending(vcpu)) {
++ if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu))
++ return -EIO;
++
+ if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
+ kvm_vcpu_sleep(vcpu);
+
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index b233a64df2956a..370a1a7bd369dc 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -1708,7 +1708,7 @@ static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+ * one cache line.
+ */
+ if (kvm_has_mte(vcpu->kvm))
+- clidr |= 2 << CLIDR_TTYPE_SHIFT(loc);
++ clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc);
+
+ __vcpu_sys_reg(vcpu, r->reg) = clidr;
+
+diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
+index a2b439ad387c80..b7306c588d9d34 100644
+--- a/arch/arm64/kvm/vgic/vgic-init.c
++++ b/arch/arm64/kvm/vgic/vgic-init.c
+@@ -494,10 +494,10 @@ int kvm_vgic_map_resources(struct kvm *kvm)
+ out:
+ mutex_unlock(&kvm->arch.config_lock);
+ out_slots:
+- mutex_unlock(&kvm->slots_lock);
+-
+ if (ret)
+- kvm_vgic_destroy(kvm);
++ kvm_vm_dead(kvm);
++
++ mutex_unlock(&kvm->slots_lock);
+
+ return ret;
+ }
+diff --git a/arch/loongarch/include/asm/bootinfo.h b/arch/loongarch/include/asm/bootinfo.h
+index c60796869b2b80..d2317fa6a766d6 100644
+--- a/arch/loongarch/include/asm/bootinfo.h
++++ b/arch/loongarch/include/asm/bootinfo.h
+@@ -24,6 +24,10 @@ struct loongson_board_info {
+ const char *board_vendor;
+ };
+
++/*
++ * The "core" of cores_per_node and cores_per_package stands for a
++ * logical core, which means in a SMT system it stands for a thread.
++ */
+ struct loongson_system_configuration {
+ int nr_cpus;
+ int nr_nodes;
+diff --git a/arch/loongarch/include/asm/kasan.h b/arch/loongarch/include/asm/kasan.h
+index cd6084f4e153fe..c6bce5fbff57b0 100644
+--- a/arch/loongarch/include/asm/kasan.h
++++ b/arch/loongarch/include/asm/kasan.h
+@@ -16,7 +16,7 @@
+ #define XRANGE_SHIFT (48)
+
+ /* Valid address length */
+-#define XRANGE_SHADOW_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
++#define XRANGE_SHADOW_SHIFT min(cpu_vabits, VA_BITS)
+ /* Used for taking out the valid address */
+ #define XRANGE_SHADOW_MASK GENMASK_ULL(XRANGE_SHADOW_SHIFT - 1, 0)
+ /* One segment whole address space size */
+diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
+index f2ff8b5d591e4f..6e58f65455c7ca 100644
+--- a/arch/loongarch/kernel/process.c
++++ b/arch/loongarch/kernel/process.c
+@@ -293,13 +293,15 @@ unsigned long stack_top(void)
+ {
+ unsigned long top = TASK_SIZE & PAGE_MASK;
+
+- /* Space for the VDSO & data page */
+- top -= PAGE_ALIGN(current->thread.vdso->size);
+- top -= VVAR_SIZE;
+-
+- /* Space to randomize the VDSO base */
+- if (current->flags & PF_RANDOMIZE)
+- top -= VDSO_RANDOMIZE_SIZE;
++ if (current->thread.vdso) {
++ /* Space for the VDSO & data page */
++ top -= PAGE_ALIGN(current->thread.vdso->size);
++ top -= VVAR_SIZE;
++
++ /* Space to randomize the VDSO base */
++ if (current->flags & PF_RANDOMIZE)
++ top -= VDSO_RANDOMIZE_SIZE;
++ }
+
+ return top;
+ }
+diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
+index 6748d7f3f22198..065f2db57c0992 100644
+--- a/arch/loongarch/kernel/setup.c
++++ b/arch/loongarch/kernel/setup.c
+@@ -55,6 +55,7 @@
+ #define SMBIOS_FREQHIGH_OFFSET 0x17
+ #define SMBIOS_FREQLOW_MASK 0xFF
+ #define SMBIOS_CORE_PACKAGE_OFFSET 0x23
++#define SMBIOS_THREAD_PACKAGE_OFFSET 0x25
+ #define LOONGSON_EFI_ENABLE (1 << 3)
+
+ #ifdef CONFIG_EFI
+@@ -129,7 +130,7 @@ static void __init parse_cpu_table(const struct dmi_header *dm)
+ cpu_clock_freq = freq_temp * 1000000;
+
+ loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]);
+- loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_CORE_PACKAGE_OFFSET);
++ loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_THREAD_PACKAGE_OFFSET);
+
+ pr_info("CpuClock = %llu\n", cpu_clock_freq);
+ }
+diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
+index aebfc3733a7607..d59052c03d9b7e 100644
+--- a/arch/loongarch/kernel/traps.c
++++ b/arch/loongarch/kernel/traps.c
+@@ -529,6 +529,9 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
+ #else
+ unsigned int *pc;
+
++ if (regs->csr_prmd & CSR_PRMD_PIE)
++ local_irq_enable();
++
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
+
+ /*
+@@ -553,6 +556,8 @@ asmlinkage void noinstr do_ale(struct pt_regs *regs)
+ die_if_kernel("Kernel ale access", regs);
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
+ out:
++ if (regs->csr_prmd & CSR_PRMD_PIE)
++ local_irq_disable();
+ #endif
+ irqentry_exit(regs, state);
+ }
+diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
+index 2f041b5cea970e..26eeb397363193 100644
+--- a/arch/riscv/net/bpf_jit_comp64.c
++++ b/arch/riscv/net/bpf_jit_comp64.c
+@@ -555,8 +555,8 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
+ rv_lr_w(r0, 0, rd, 0, 0), ctx);
+ jmp_offset = ninsns_rvoff(8);
+ emit(rv_bne(RV_REG_T2, r0, jmp_offset >> 1), ctx);
+- emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 0) :
+- rv_sc_w(RV_REG_T3, rs, rd, 0, 0), ctx);
++ emit(is64 ? rv_sc_d(RV_REG_T3, rs, rd, 0, 1) :
++ rv_sc_w(RV_REG_T3, rs, rd, 0, 1), ctx);
+ jmp_offset = ninsns_rvoff(-6);
+ emit(rv_bne(RV_REG_T3, 0, jmp_offset >> 1), ctx);
+ emit(rv_fence(0x3, 0x3), ctx);
+diff --git a/arch/s390/include/asm/perf_event.h b/arch/s390/include/asm/perf_event.h
+index 9917e2717b2b42..66aff768f8151d 100644
+--- a/arch/s390/include/asm/perf_event.h
++++ b/arch/s390/include/asm/perf_event.h
+@@ -73,6 +73,7 @@ struct perf_sf_sde_regs {
+ #define SAMPLE_FREQ_MODE(hwc) (SAMPL_FLAGS(hwc) & PERF_CPUM_SF_FREQ_MODE)
+
+ #define perf_arch_fetch_caller_regs(regs, __ip) do { \
++ (regs)->psw.mask = 0; \
+ (regs)->psw.addr = (__ip); \
+ (regs)->gprs[15] = (unsigned long)__builtin_frame_address(0) - \
+ offsetof(struct stack_frame, back_chain); \
+diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
+index b9324ca2eb9403..b3961f1016ea0b 100644
+--- a/arch/s390/pci/pci_event.c
++++ b/arch/s390/pci/pci_event.c
+@@ -272,18 +272,19 @@ static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
+ goto no_pdev;
+
+ switch (ccdf->pec) {
+- case 0x003a: /* Service Action or Error Recovery Successful */
++ case 0x002a: /* Error event concerns FMB */
++ case 0x002b:
++ case 0x002c:
++ break;
++ case 0x0040: /* Service Action or Error Recovery Failed */
++ case 0x003b:
++ zpci_event_io_failure(pdev, pci_channel_io_perm_failure);
++ break;
++ default: /* PCI function left in the error state attempt to recover */
+ ers_res = zpci_event_attempt_error_recovery(pdev);
+ if (ers_res != PCI_ERS_RESULT_RECOVERED)
+ zpci_event_io_failure(pdev, pci_channel_io_perm_failure);
+ break;
+- default:
+- /*
+- * Mark as frozen not permanently failed because the device
+- * could be subsequently recovered by the platform.
+- */
+- zpci_event_io_failure(pdev, pci_channel_io_frozen);
+- break;
+ }
+ pci_dev_put(pdev);
+ no_pdev:
+diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
+index 82d12c93feabe6..05c82fd5d0f60b 100644
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2217,6 +2217,7 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
+ config ADDRESS_MASKING
+ bool "Linear Address Masking support"
+ depends on X86_64
++ depends on COMPILE_TEST || !CPU_MITIGATIONS # wait for LASS
+ help
+ Linear Address Masking (LAM) modifies the checking that is applied
+ to 64-bit linear addresses, allowing software to use of the
+diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+index b44c487727d456..a701e7921ea5c7 100644
+--- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
++++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c
+@@ -27,10 +27,10 @@
+ * hardware. The allocated bandwidth percentage is rounded to the next
+ * control step available on the hardware.
+ */
+-static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
++static bool bw_validate(char *buf, u32 *data, struct rdt_resource *r)
+ {
+- unsigned long bw;
+ int ret;
++ u32 bw;
+
+ /*
+ * Only linear delay values is supported for current Intel SKUs.
+@@ -40,16 +40,21 @@ static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
+ return false;
+ }
+
+- ret = kstrtoul(buf, 10, &bw);
++ ret = kstrtou32(buf, 10, &bw);
+ if (ret) {
+- rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
++ rdt_last_cmd_printf("Invalid MB value %s\n", buf);
+ return false;
+ }
+
+- if ((bw < r->membw.min_bw || bw > r->default_ctrl) &&
+- !is_mba_sc(r)) {
+- rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
+- r->membw.min_bw, r->default_ctrl);
++ /* Nothing else to do if software controller is enabled. */
++ if (is_mba_sc(r)) {
++ *data = bw;
++ return true;
++ }
++
++ if (bw < r->membw.min_bw || bw > r->default_ctrl) {
++ rdt_last_cmd_printf("MB value %u out of range [%d,%d]\n",
++ bw, r->membw.min_bw, r->default_ctrl);
+ return false;
+ }
+
+@@ -63,7 +68,7 @@ int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
+ struct resctrl_staged_config *cfg;
+ u32 closid = data->rdtgrp->closid;
+ struct rdt_resource *r = s->res;
+- unsigned long bw_val;
++ u32 bw_val;
+
+ cfg = &d->staged_config[s->conf_type];
+ if (cfg->have_new_ctrl) {
+diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
+index 60891b9ce25f61..acf22bd99efcd8 100644
+--- a/arch/x86/kvm/svm/nested.c
++++ b/arch/x86/kvm/svm/nested.c
+@@ -63,8 +63,12 @@ static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
+ u64 pdpte;
+ int ret;
+
++ /*
++ * Note, nCR3 is "assumed" to be 32-byte aligned, i.e. the CPU ignores
++ * nCR3[4:0] when loading PDPTEs from memory.
++ */
+ ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
+- offset_in_page(cr3) + index * 8, 8);
++ (cr3 & GENMASK(11, 5)) + index * 8, 8);
+ if (ret)
+ return 0;
+ return pdpte;
+diff --git a/drivers/accel/qaic/qaic_control.c b/drivers/accel/qaic/qaic_control.c
+index 388abd40024ba4..f3db3fa91dd52e 100644
+--- a/drivers/accel/qaic/qaic_control.c
++++ b/drivers/accel/qaic/qaic_control.c
+@@ -496,7 +496,7 @@ static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wr
+ nents = sgt->nents;
+ nents_dma = nents;
+ *size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
+- for_each_sgtable_sg(sgt, sg, i) {
++ for_each_sgtable_dma_sg(sgt, sg, i) {
+ *size -= sizeof(*asp);
+ /* Save 1K for possible follow-up transactions. */
+ if (*size < SZ_1K) {
+diff --git a/drivers/accel/qaic/qaic_data.c b/drivers/accel/qaic/qaic_data.c
+index ed1a5af434f246..d2f8c70a77a5b4 100644
+--- a/drivers/accel/qaic/qaic_data.c
++++ b/drivers/accel/qaic/qaic_data.c
+@@ -177,7 +177,7 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
+ nents = 0;
+
+ size = size ? size : PAGE_SIZE;
+- for (sg = sgt_in->sgl; sg; sg = sg_next(sg)) {
++ for_each_sgtable_dma_sg(sgt_in, sg, j) {
+ len = sg_dma_len(sg);
+
+ if (!len)
+@@ -214,7 +214,7 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
+
+ /* copy relevant sg node and fix page and length */
+ sgn = sgf;
+- for_each_sgtable_sg(sgt, sg, j) {
++ for_each_sgtable_dma_sg(sgt, sg, j) {
+ memcpy(sg, sgn, sizeof(*sg));
+ if (sgn == sgf) {
+ sg_dma_address(sg) += offf;
+@@ -294,7 +294,7 @@ static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice,
+ * fence.
+ */
+ dev_addr = req->dev_addr;
+- for_each_sgtable_sg(slice->sgt, sg, i) {
++ for_each_sgtable_dma_sg(slice->sgt, sg, i) {
+ slice->reqs[i].cmd = cmd;
+ slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
+ sg_dma_address(sg) : dev_addr);
+diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
+index 1e76a64cce0a91..906a7bfa448b31 100644
+--- a/drivers/acpi/button.c
++++ b/drivers/acpi/button.c
+@@ -130,6 +130,17 @@ static const struct dmi_system_id dmi_lid_quirks[] = {
+ },
+ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
+ },
++ {
++ /*
++ * Samsung galaxybook2 ,initial _LID device notification returns
++ * lid closed.
++ */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
++ DMI_MATCH(DMI_PRODUCT_NAME, "750XED"),
++ },
++ .driver_data = (void *)(long)ACPI_BUTTON_LID_INIT_OPEN,
++ },
+ {}
+ };
+
+diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
+index 7aced0b9bad7cc..5df417626fd109 100644
+--- a/drivers/acpi/cppc_acpi.c
++++ b/drivers/acpi/cppc_acpi.c
+@@ -39,6 +39,9 @@
+ #include <linux/rwsem.h>
+ #include <linux/wait.h>
+ #include <linux/topology.h>
++#include <linux/dmi.h>
++#include <linux/units.h>
++#include <asm/unaligned.h>
+
+ #include <acpi/cppc_acpi.h>
+
+@@ -1858,3 +1861,116 @@ unsigned int cppc_get_transition_latency(int cpu_num)
+ return latency_ns;
+ }
+ EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
++
++/* Minimum struct length needed for the DMI processor entry we want */
++#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
++
++/* Offset in the DMI processor structure for the max frequency */
++#define DMI_PROCESSOR_MAX_SPEED 0x14
++
++/* Callback function used to retrieve the max frequency from DMI */
++static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
++{
++ const u8 *dmi_data = (const u8 *)dm;
++ u16 *mhz = (u16 *)private;
++
++ if (dm->type == DMI_ENTRY_PROCESSOR &&
++ dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
++ u16 val = (u16)get_unaligned((const u16 *)
++ (dmi_data + DMI_PROCESSOR_MAX_SPEED));
++ *mhz = val > *mhz ? val : *mhz;
++ }
++}
++
++/* Look up the max frequency in DMI */
++static u64 cppc_get_dmi_max_khz(void)
++{
++ u16 mhz = 0;
++
++ dmi_walk(cppc_find_dmi_mhz, &mhz);
++
++ /*
++ * Real stupid fallback value, just in case there is no
++ * actual value set.
++ */
++ mhz = mhz ? mhz : 1;
++
++ return KHZ_PER_MHZ * mhz;
++}
++
++/*
++ * If CPPC lowest_freq and nominal_freq registers are exposed then we can
++ * use them to convert perf to freq and vice versa. The conversion is
++ * extrapolated as an affine function passing by the 2 points:
++ * - (Low perf, Low freq)
++ * - (Nominal perf, Nominal freq)
++ */
++unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
++{
++ s64 retval, offset = 0;
++ static u64 max_khz;
++ u64 mul, div;
++
++ if (caps->lowest_freq && caps->nominal_freq) {
++ /* Avoid special case when nominal_freq is equal to lowest_freq */
++ if (caps->lowest_freq == caps->nominal_freq) {
++ mul = caps->nominal_freq;
++ div = caps->nominal_perf;
++ } else {
++ mul = caps->nominal_freq - caps->lowest_freq;
++ div = caps->nominal_perf - caps->lowest_perf;
++ }
++ mul *= KHZ_PER_MHZ;
++ offset = caps->nominal_freq * KHZ_PER_MHZ -
++ div64_u64(caps->nominal_perf * mul, div);
++ } else {
++ if (!max_khz)
++ max_khz = cppc_get_dmi_max_khz();
++ mul = max_khz;
++ div = caps->highest_perf;
++ }
++
++ retval = offset + div64_u64(perf * mul, div);
++ if (retval >= 0)
++ return retval;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(cppc_perf_to_khz);
++
++unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
++{
++ s64 retval, offset = 0;
++ static u64 max_khz;
++ u64 mul, div;
++
++ if (caps->lowest_freq && caps->nominal_freq) {
++ /* Avoid special case when nominal_freq is equal to lowest_freq */
++ if (caps->lowest_freq == caps->nominal_freq) {
++ mul = caps->nominal_perf;
++ div = caps->nominal_freq;
++ } else {
++ mul = caps->nominal_perf - caps->lowest_perf;
++ div = caps->nominal_freq - caps->lowest_freq;
++ }
++ /*
++ * We don't need to convert to kHz for computing offset and can
++ * directly use nominal_freq and lowest_freq as the div64_u64
++ * will remove the frequency unit.
++ */
++ offset = caps->nominal_perf -
++ div64_u64(caps->nominal_freq * mul, div);
++ /* But we need it for computing the perf level. */
++ div *= KHZ_PER_MHZ;
++ } else {
++ if (!max_khz)
++ max_khz = cppc_get_dmi_max_khz();
++ mul = caps->highest_perf;
++ div = max_khz;
++ }
++
++ retval = offset + div64_u64(freq * mul, div);
++ if (retval >= 0)
++ return retval;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(cppc_khz_to_perf);
+diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
+index 7020584096bfaa..8b391f12853bb6 100644
+--- a/drivers/acpi/prmt.c
++++ b/drivers/acpi/prmt.c
+@@ -52,7 +52,7 @@ struct prm_context_buffer {
+ static LIST_HEAD(prm_module_list);
+
+ struct prm_handler_info {
+- guid_t guid;
++ efi_guid_t guid;
+ efi_status_t (__efiapi *handler_addr)(u64, void *);
+ u64 static_data_buffer_addr;
+ u64 acpi_param_buffer_addr;
+@@ -72,17 +72,21 @@ struct prm_module_info {
+ struct prm_handler_info handlers[];
+ };
+
+-static u64 efi_pa_va_lookup(u64 pa)
++static u64 efi_pa_va_lookup(efi_guid_t *guid, u64 pa)
+ {
+ efi_memory_desc_t *md;
+ u64 pa_offset = pa & ~PAGE_MASK;
+ u64 page = pa & PAGE_MASK;
+
+ for_each_efi_memory_desc(md) {
+- if (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)
++ if ((md->attribute & EFI_MEMORY_RUNTIME) &&
++ (md->phys_addr < pa && pa < md->phys_addr + PAGE_SIZE * md->num_pages)) {
+ return pa_offset + md->virt_addr + page - md->phys_addr;
++ }
+ }
+
++ pr_warn("Failed to find VA for GUID: %pUL, PA: 0x%llx", guid, pa);
++
+ return 0;
+ }
+
+@@ -148,9 +152,15 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
+ th = &tm->handlers[cur_handler];
+
+ guid_copy(&th->guid, (guid_t *)handler_info->handler_guid);
+- th->handler_addr = (void *)efi_pa_va_lookup(handler_info->handler_address);
+- th->static_data_buffer_addr = efi_pa_va_lookup(handler_info->static_data_buffer_address);
+- th->acpi_param_buffer_addr = efi_pa_va_lookup(handler_info->acpi_param_buffer_address);
++ th->handler_addr =
++ (void *)efi_pa_va_lookup(&th->guid, handler_info->handler_address);
++
++ th->static_data_buffer_addr =
++ efi_pa_va_lookup(&th->guid, handler_info->static_data_buffer_address);
++
++ th->acpi_param_buffer_addr =
++ efi_pa_va_lookup(&th->guid, handler_info->acpi_param_buffer_address);
++
+ } while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
+
+ return 0;
+@@ -253,6 +263,13 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
+ if (!handler || !module)
+ goto invalid_guid;
+
++ if (!handler->handler_addr ||
++ !handler->static_data_buffer_addr ||
++ !handler->acpi_param_buffer_addr) {
++ buffer->prm_status = PRM_HANDLER_ERROR;
++ return AE_OK;
++ }
++
+ ACPI_COPY_NAMESEG(context.signature, "PRMC");
+ context.revision = 0x0;
+ context.reserved = 0x0;
+diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
+index 95233b413c1ac5..d3d776d4fb5a74 100644
+--- a/drivers/acpi/resource.c
++++ b/drivers/acpi/resource.c
+@@ -498,6 +498,13 @@ static const struct dmi_system_id tongfang_gm_rg[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
+ },
+ },
++ {
++ /* LG Electronics 16T90SP */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "LG Electronics"),
++ DMI_MATCH(DMI_BOARD_NAME, "16T90SP"),
++ },
++ },
+ { }
+ };
+
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index a96566e1b2b84c..9cc02252218497 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -636,6 +636,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
+ /* the scmd has an associated qc */
+ if (!(qc->flags & ATA_QCFLAG_EH)) {
+ /* which hasn't failed yet, timeout */
++ set_host_byte(scmd, DID_TIME_OUT);
+ qc->err_mask |= AC_ERR_TIMEOUT;
+ qc->flags |= ATA_QCFLAG_EH;
+ nr_timedout++;
+diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
+index 01f46caf1f88b7..54b80911f3e286 100644
+--- a/drivers/cdrom/cdrom.c
++++ b/drivers/cdrom/cdrom.c
+@@ -2313,7 +2313,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
+ return -EINVAL;
+
+ /* Prevent arg from speculatively bypassing the length check */
+- barrier_nospec();
++ arg = array_index_nospec(arg, cdi->capacity);
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
+index f461f99eb040c6..8c16d67b98bfe4 100644
+--- a/drivers/cpufreq/amd-pstate.c
++++ b/drivers/cpufreq/amd-pstate.c
+@@ -1061,11 +1061,21 @@ static int amd_pstate_register_driver(int mode)
+ return -EINVAL;
+
+ cppc_state = mode;
++
++ ret = amd_pstate_enable(true);
++ if (ret) {
++ pr_err("failed to enable cppc during amd-pstate driver registration, return %d\n",
++ ret);
++ amd_pstate_driver_cleanup();
++ return ret;
++ }
++
+ ret = cpufreq_register_driver(current_pstate_driver);
+ if (ret) {
+ amd_pstate_driver_cleanup();
+ return ret;
+ }
++
+ return 0;
+ }
+
+diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
+index 1ba3943be8a3dd..15f1d41920a339 100644
+--- a/drivers/cpufreq/cppc_cpufreq.c
++++ b/drivers/cpufreq/cppc_cpufreq.c
+@@ -16,7 +16,6 @@
+ #include <linux/delay.h>
+ #include <linux/cpu.h>
+ #include <linux/cpufreq.h>
+-#include <linux/dmi.h>
+ #include <linux/irq_work.h>
+ #include <linux/kthread.h>
+ #include <linux/time.h>
+@@ -27,12 +26,6 @@
+
+ #include <acpi/cppc_acpi.h>
+
+-/* Minimum struct length needed for the DMI processor entry we want */
+-#define DMI_ENTRY_PROCESSOR_MIN_LENGTH 48
+-
+-/* Offset in the DMI processor structure for the max frequency */
+-#define DMI_PROCESSOR_MAX_SPEED 0x14
+-
+ /*
+ * This list contains information parsed from per CPU ACPI _CPC and _PSD
+ * structures: e.g. the highest and lowest supported performance, capabilities,
+@@ -291,97 +284,9 @@ static inline void cppc_freq_invariance_exit(void)
+ }
+ #endif /* CONFIG_ACPI_CPPC_CPUFREQ_FIE */
+
+-/* Callback function used to retrieve the max frequency from DMI */
+-static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
+-{
+- const u8 *dmi_data = (const u8 *)dm;
+- u16 *mhz = (u16 *)private;
+-
+- if (dm->type == DMI_ENTRY_PROCESSOR &&
+- dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
+- u16 val = (u16)get_unaligned((const u16 *)
+- (dmi_data + DMI_PROCESSOR_MAX_SPEED));
+- *mhz = val > *mhz ? val : *mhz;
+- }
+-}
+-
+-/* Look up the max frequency in DMI */
+-static u64 cppc_get_dmi_max_khz(void)
+-{
+- u16 mhz = 0;
+-
+- dmi_walk(cppc_find_dmi_mhz, &mhz);
+-
+- /*
+- * Real stupid fallback value, just in case there is no
+- * actual value set.
+- */
+- mhz = mhz ? mhz : 1;
+-
+- return (1000 * mhz);
+-}
+-
+-/*
+- * If CPPC lowest_freq and nominal_freq registers are exposed then we can
+- * use them to convert perf to freq and vice versa. The conversion is
+- * extrapolated as an affine function passing by the 2 points:
+- * - (Low perf, Low freq)
+- * - (Nominal perf, Nominal perf)
+- */
+-static unsigned int cppc_cpufreq_perf_to_khz(struct cppc_cpudata *cpu_data,
+- unsigned int perf)
+-{
+- struct cppc_perf_caps *caps = &cpu_data->perf_caps;
+- s64 retval, offset = 0;
+- static u64 max_khz;
+- u64 mul, div;
+-
+- if (caps->lowest_freq && caps->nominal_freq) {
+- mul = caps->nominal_freq - caps->lowest_freq;
+- div = caps->nominal_perf - caps->lowest_perf;
+- offset = caps->nominal_freq - div64_u64(caps->nominal_perf * mul, div);
+- } else {
+- if (!max_khz)
+- max_khz = cppc_get_dmi_max_khz();
+- mul = max_khz;
+- div = caps->highest_perf;
+- }
+-
+- retval = offset + div64_u64(perf * mul, div);
+- if (retval >= 0)
+- return retval;
+- return 0;
+-}
+-
+-static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
+- unsigned int freq)
+-{
+- struct cppc_perf_caps *caps = &cpu_data->perf_caps;
+- s64 retval, offset = 0;
+- static u64 max_khz;
+- u64 mul, div;
+-
+- if (caps->lowest_freq && caps->nominal_freq) {
+- mul = caps->nominal_perf - caps->lowest_perf;
+- div = caps->nominal_freq - caps->lowest_freq;
+- offset = caps->nominal_perf - div64_u64(caps->nominal_freq * mul, div);
+- } else {
+- if (!max_khz)
+- max_khz = cppc_get_dmi_max_khz();
+- mul = caps->highest_perf;
+- div = max_khz;
+- }
+-
+- retval = offset + div64_u64(freq * mul, div);
+- if (retval >= 0)
+- return retval;
+- return 0;
+-}
+-
+ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+-
+ {
+ struct cppc_cpudata *cpu_data = policy->driver_data;
+ unsigned int cpu = policy->cpu;
+@@ -389,7 +294,7 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
+ u32 desired_perf;
+ int ret = 0;
+
+- desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
++ desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
+ /* Return if it is exactly the same perf */
+ if (desired_perf == cpu_data->perf_ctrls.desired_perf)
+ return ret;
+@@ -417,7 +322,7 @@ static unsigned int cppc_cpufreq_fast_switch(struct cpufreq_policy *policy,
+ u32 desired_perf;
+ int ret;
+
+- desired_perf = cppc_cpufreq_khz_to_perf(cpu_data, target_freq);
++ desired_perf = cppc_khz_to_perf(&cpu_data->perf_caps, target_freq);
+ cpu_data->perf_ctrls.desired_perf = desired_perf;
+ ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
+
+@@ -530,7 +435,7 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
+ min_step = min_cap / CPPC_EM_CAP_STEP;
+ max_step = max_cap / CPPC_EM_CAP_STEP;
+
+- perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
++ perf_prev = cppc_khz_to_perf(perf_caps, *KHz);
+ step = perf_prev / perf_step;
+
+ if (step > max_step)
+@@ -550,8 +455,8 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
+ perf = step * perf_step;
+ }
+
+- *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
+- perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
++ *KHz = cppc_perf_to_khz(perf_caps, perf);
++ perf_check = cppc_khz_to_perf(perf_caps, *KHz);
+ step_check = perf_check / perf_step;
+
+ /*
+@@ -561,8 +466,8 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
+ */
+ while ((*KHz == prev_freq) || (step_check != step)) {
+ perf++;
+- *KHz = cppc_cpufreq_perf_to_khz(cpu_data, perf);
+- perf_check = cppc_cpufreq_khz_to_perf(cpu_data, *KHz);
++ *KHz = cppc_perf_to_khz(perf_caps, perf);
++ perf_check = cppc_khz_to_perf(perf_caps, *KHz);
+ step_check = perf_check / perf_step;
+ }
+
+@@ -591,7 +496,7 @@ static int cppc_get_cpu_cost(struct device *cpu_dev, unsigned long KHz,
+ perf_caps = &cpu_data->perf_caps;
+ max_cap = arch_scale_cpu_capacity(cpu_dev->id);
+
+- perf_prev = cppc_cpufreq_khz_to_perf(cpu_data, KHz);
++ perf_prev = cppc_khz_to_perf(perf_caps, KHz);
+ perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
+ step = perf_prev / perf_step;
+
+@@ -679,10 +584,6 @@ static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
+ goto free_mask;
+ }
+
+- /* Convert the lowest and nominal freq from MHz to KHz */
+- cpu_data->perf_caps.lowest_freq *= 1000;
+- cpu_data->perf_caps.nominal_freq *= 1000;
+-
+ list_add(&cpu_data->node, &cpu_data_list);
+
+ return cpu_data;
+@@ -724,20 +625,16 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ * Set min to lowest nonlinear perf to avoid any efficiency penalty (see
+ * Section 8.4.7.1.1.5 of ACPI 6.1 spec)
+ */
+- policy->min = cppc_cpufreq_perf_to_khz(cpu_data,
+- caps->lowest_nonlinear_perf);
+- policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
+- caps->nominal_perf);
++ policy->min = cppc_perf_to_khz(caps, caps->lowest_nonlinear_perf);
++ policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
+
+ /*
+ * Set cpuinfo.min_freq to Lowest to make the full range of performance
+ * available if userspace wants to use any perf between lowest & lowest
+ * nonlinear perf
+ */
+- policy->cpuinfo.min_freq = cppc_cpufreq_perf_to_khz(cpu_data,
+- caps->lowest_perf);
+- policy->cpuinfo.max_freq = cppc_cpufreq_perf_to_khz(cpu_data,
+- caps->nominal_perf);
++ policy->cpuinfo.min_freq = cppc_perf_to_khz(caps, caps->lowest_perf);
++ policy->cpuinfo.max_freq = cppc_perf_to_khz(caps, caps->nominal_perf);
+
+ policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
+ policy->shared_type = cpu_data->shared_type;
+@@ -773,7 +670,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
+ boost_supported = true;
+
+ /* Set policy->cur to max now. The governors will adjust later. */
+- policy->cur = cppc_cpufreq_perf_to_khz(cpu_data, caps->highest_perf);
++ policy->cur = cppc_perf_to_khz(caps, caps->highest_perf);
+ cpu_data->perf_ctrls.desired_perf = caps->highest_perf;
+
+ ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
+@@ -868,7 +765,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
+ delivered_perf = cppc_perf_from_fbctrs(cpu_data, &fb_ctrs_t0,
+ &fb_ctrs_t1);
+
+- return cppc_cpufreq_perf_to_khz(cpu_data, delivered_perf);
++ return cppc_perf_to_khz(&cpu_data->perf_caps, delivered_perf);
+ }
+
+ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
+@@ -883,11 +780,9 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
+ }
+
+ if (state)
+- policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
+- caps->highest_perf);
++ policy->max = cppc_perf_to_khz(caps, caps->highest_perf);
+ else
+- policy->max = cppc_cpufreq_perf_to_khz(cpu_data,
+- caps->nominal_perf);
++ policy->max = cppc_perf_to_khz(caps, caps->nominal_perf);
+ policy->cpuinfo.max_freq = policy->max;
+
+ ret = freq_qos_update_request(policy->max_freq_req, policy->max);
+@@ -947,7 +842,7 @@ static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
+ if (ret < 0)
+ return -EIO;
+
+- return cppc_cpufreq_perf_to_khz(cpu_data, desired_perf);
++ return cppc_perf_to_khz(&cpu_data->perf_caps, desired_perf);
+ }
+
+ static void cppc_check_hisi_workaround(void)
+diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
+index 87383c05424bdf..3962683e2af9d4 100644
+--- a/drivers/firmware/arm_scmi/driver.c
++++ b/drivers/firmware/arm_scmi/driver.c
+@@ -2603,10 +2603,8 @@ static struct scmi_debug_info *scmi_debugfs_common_setup(struct scmi_info *info)
+ dbg->top_dentry = top_dentry;
+
+ if (devm_add_action_or_reset(info->dev,
+- scmi_debugfs_common_cleanup, dbg)) {
+- scmi_debugfs_common_cleanup(dbg);
++ scmi_debugfs_common_cleanup, dbg))
+ return NULL;
+- }
+
+ return dbg;
+ }
+diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
+index b8d470417e8f99..8e513f70b75d4c 100644
+--- a/drivers/firmware/arm_scmi/mailbox.c
++++ b/drivers/firmware/arm_scmi/mailbox.c
+@@ -23,6 +23,7 @@
+ * @chan_receiver: Optional Receiver mailbox unidirectional channel
+ * @cinfo: SCMI channel info
+ * @shmem: Transmit/Receive shared memory area
++ * @chan_lock: Lock that prevents multiple xfers from being queued
+ */
+ struct scmi_mailbox {
+ struct mbox_client cl;
+@@ -30,6 +31,7 @@ struct scmi_mailbox {
+ struct mbox_chan *chan_receiver;
+ struct scmi_chan_info *cinfo;
+ struct scmi_shared_mem __iomem *shmem;
++ struct mutex chan_lock;
+ };
+
+ #define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
+@@ -228,6 +230,7 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
+
+ cinfo->transport_info = smbox;
+ smbox->cinfo = cinfo;
++ mutex_init(&smbox->chan_lock);
+
+ return 0;
+ }
+@@ -255,13 +258,23 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo,
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+ int ret;
+
+- ret = mbox_send_message(smbox->chan, xfer);
++ /*
++ * The mailbox layer has its own queue. However the mailbox queue
++ * confuses the per message SCMI timeouts since the clock starts when
++ * the message is submitted into the mailbox queue. So when multiple
++ * messages are queued up the clock starts on all messages instead of
++ * only the one inflight.
++ */
++ mutex_lock(&smbox->chan_lock);
+
+- /* mbox_send_message returns non-negative value on success, so reset */
+- if (ret > 0)
+- ret = 0;
++ ret = mbox_send_message(smbox->chan, xfer);
++ /* mbox_send_message returns non-negative value on success */
++ if (ret < 0) {
++ mutex_unlock(&smbox->chan_lock);
++ return ret;
++ }
+
+- return ret;
++ return 0;
+ }
+
+ static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+@@ -269,13 +282,10 @@ static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
+ {
+ struct scmi_mailbox *smbox = cinfo->transport_info;
+
+- /*
+- * NOTE: we might prefer not to need the mailbox ticker to manage the
+- * transfer queueing since the protocol layer queues things by itself.
+- * Unfortunately, we have to kick the mailbox framework after we have
+- * received our message.
+- */
+ mbox_client_txdone(smbox->chan, ret);
++
++ /* Release channel */
++ mutex_unlock(&smbox->chan_lock);
+ }
+
+ static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+index 2bca37044ad0fb..fac204d6e0ea2c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+@@ -147,6 +147,7 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
+ struct acpi_buffer *params)
+ {
+ acpi_status status;
++ union acpi_object *obj;
+ union acpi_object atif_arg_elements[2];
+ struct acpi_object_list atif_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+@@ -169,16 +170,24 @@ static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
+
+ status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
+ &buffer);
++ obj = (union acpi_object *)buffer.pointer;
+
+- /* Fail only if calling the method fails and ATIF is supported */
++ /* Fail if calling the method fails and ATIF is supported */
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+ acpi_format_exception(status));
+- kfree(buffer.pointer);
++ kfree(obj);
+ return NULL;
+ }
+
+- return buffer.pointer;
++ if (obj->type != ACPI_TYPE_BUFFER) {
++ DRM_DEBUG_DRIVER("bad object returned from ATIF: %d\n",
++ obj->type);
++ kfree(obj);
++ return NULL;
++ }
++
++ return obj;
+ }
+
+ /**
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+index c5c55e132af21d..5e3abdd0805b6d 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+@@ -1053,8 +1053,10 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
+
+ r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
+ AMDGPU_RING_PRIO_DEFAULT, NULL);
+- if (r)
++ if (r) {
++ amdgpu_mes_unlock(&adev->mes);
+ goto clean_up_memory;
++ }
+
+ amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
+
+@@ -1087,7 +1089,6 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
+ amdgpu_ring_fini(ring);
+ clean_up_memory:
+ kfree(ring);
+- amdgpu_mes_unlock(&adev->mes);
+ return r;
+ }
+
+diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+index 2b3d5183818aca..4887a360ead4f2 100644
+--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
++++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+@@ -841,6 +841,8 @@ bool is_psr_su_specific_panel(struct dc_link *link)
+ isPSRSUSupported = false;
+ else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
+ isPSRSUSupported = false;
++ else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x01)
++ isPSRSUSupported = false;
+ else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
+ isPSRSUSupported = true;
+ }
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+index e238e4e8116caf..ad57368dc13f03 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+@@ -722,12 +722,13 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc)
+ _dpu_crtc_complete_flip(crtc);
+ }
+
+-static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
++static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+ {
+ struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
+ struct drm_display_mode *adj_mode = &state->adjusted_mode;
+ u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
++ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ int i;
+
+ for (i = 0; i < cstate->num_mixers; i++) {
+@@ -738,7 +739,12 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
+ r->y2 = adj_mode->vdisplay;
+
+ trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
++
++ if (drm_rect_width(r) > dpu_kms->catalog->caps->max_mixer_width)
++ return -E2BIG;
+ }
++
++ return 0;
+ }
+
+ static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
+@@ -814,7 +820,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
+
+ DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
+
+- _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
++ _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc->state);
+
+ /* encoder will trigger pending mask now */
+ drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
+@@ -1208,8 +1214,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+ if (crtc_state->active_changed)
+ crtc_state->mode_changed = true;
+
+- if (cstate->num_mixers)
+- _dpu_crtc_setup_lm_bounds(crtc, crtc_state);
++ if (cstate->num_mixers) {
++ rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state);
++ if (rc)
++ return rc;
++ }
+
+ /* FIXME: move this to dpu_plane_atomic_check? */
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+index 6262ec5e40204c..10c68de1bf22ca 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+@@ -1122,21 +1122,20 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+- if (!dpu_enc->hw_pp[i]) {
++ phys->hw_pp = dpu_enc->hw_pp[i];
++ if (!phys->hw_pp) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no pp block assigned at idx: %d\n", i);
+ return;
+ }
+
+- if (!hw_ctl[i]) {
++ phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL;
++ if (!phys->hw_ctl) {
+ DPU_ERROR_ENC(dpu_enc,
+ "no ctl block assigned at idx: %d\n", i);
+ return;
+ }
+
+- phys->hw_pp = dpu_enc->hw_pp[i];
+- phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
+-
+ phys->cached_mode = crtc_state->adjusted_mode;
+ if (phys->ops.atomic_mode_set)
+ phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
+diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+index daaf0e60475380..20c8b9af7a2199 100644
+--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
++++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+@@ -280,7 +280,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
+ intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
+- if (phys_enc->hw_pp->merge_3d)
++ if (intf_cfg.mode_3d && phys_enc->hw_pp->merge_3d)
+ intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+index add72bbc28b176..4d55e3cf570f0b 100644
+--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
++++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+@@ -26,7 +26,7 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b
+ end_addr = base_addr + aligned_len;
+
+ if (!(*reg))
+- *reg = kzalloc(len_padded, GFP_KERNEL);
++ *reg = kvzalloc(len_padded, GFP_KERNEL);
+
+ if (*reg)
+ dump_addr = *reg;
+@@ -48,20 +48,21 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b
+ }
+ }
+
+-static void msm_disp_state_print_regs(u32 **reg, u32 len, void __iomem *base_addr,
+- struct drm_printer *p)
++static void msm_disp_state_print_regs(const u32 *dump_addr, u32 len,
++ void __iomem *base_addr, struct drm_printer *p)
+ {
+ int i;
+- u32 *dump_addr = NULL;
+ void __iomem *addr;
+ u32 num_rows;
+
++ if (!dump_addr) {
++ drm_printf(p, "Registers not stored\n");
++ return;
++ }
++
+ addr = base_addr;
+ num_rows = len / REG_DUMP_ALIGN;
+
+- if (*reg)
+- dump_addr = *reg;
+-
+ for (i = 0; i < num_rows; i++) {
+ drm_printf(p, "0x%lx : %08x %08x %08x %08x\n",
+ (unsigned long)(addr - base_addr),
+@@ -89,7 +90,7 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
+
+ list_for_each_entry_safe(block, tmp, &state->blocks, node) {
+ drm_printf(p, "====================%s================\n", block->name);
+- msm_disp_state_print_regs(&block->state, block->size, block->base_addr, p);
++ msm_disp_state_print_regs(block->state, block->size, block->base_addr, p);
+ }
+
+ drm_printf(p, "===================dpu drm state================\n");
+@@ -161,7 +162,7 @@ void msm_disp_state_free(void *data)
+
+ list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) {
+ list_del(&block->node);
+- kfree(block->state);
++ kvfree(block->state);
+ kfree(block);
+ }
+
+diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
+index 77b805eacb1b18..f920329fe2e090 100644
+--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
+@@ -537,7 +537,7 @@ static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mo
+
+ int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay;
+
+- return new_htotal * mode->vtotal * drm_mode_vrefresh(mode);
++ return mult_frac(mode->clock * 1000u, new_htotal, mode->htotal);
+ }
+
+ static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
+@@ -545,7 +545,7 @@ static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
+ {
+ unsigned long pclk_rate;
+
+- pclk_rate = mode->clock * 1000;
++ pclk_rate = mode->clock * 1000u;
+
+ if (dsc)
+ pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc);
+diff --git a/drivers/gpu/drm/vboxvideo/hgsmi_base.c b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
+index 8c041d7ce4f1bd..87dccaecc3e57d 100644
+--- a/drivers/gpu/drm/vboxvideo/hgsmi_base.c
++++ b/drivers/gpu/drm/vboxvideo/hgsmi_base.c
+@@ -139,7 +139,15 @@ int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags,
+ flags |= VBOX_MOUSE_POINTER_VISIBLE;
+ }
+
+- p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA,
++ /*
++ * The 4 extra bytes come from switching struct vbva_mouse_pointer_shape
++ * from having a 4 bytes fixed array at the end to using a proper VLA
++ * at the end. These 4 extra bytes were not subtracted from sizeof(*p)
++ * before the switch to the VLA, so this way the behavior is unchanged.
++ * Chances are these 4 extra bytes are not necessary but they are kept
++ * to avoid regressions.
++ */
++ p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len + 4, HGSMI_CH_VBVA,
+ VBVA_MOUSE_POINTER_SHAPE);
+ if (!p)
+ return -ENOMEM;
+diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h
+index f60d82504da02c..79ec8481de0e48 100644
+--- a/drivers/gpu/drm/vboxvideo/vboxvideo.h
++++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h
+@@ -351,10 +351,8 @@ struct vbva_mouse_pointer_shape {
+ * Bytes in the gap between the AND and the XOR mask are undefined.
+ * XOR mask scanlines have no gap between them and size of XOR mask is:
+ * xor_len = width * 4 * height.
+- *
+- * Preallocate 4 bytes for accessing actual data as p->data.
+ */
+- u8 data[4];
++ u8 data[];
+ } __packed;
+
+ /* pointer is visible */
+diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+index b22ae25db4e17c..52ea0c50c852ce 100644
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+@@ -881,6 +881,10 @@ static int vmw_stdu_connector_atomic_check(struct drm_connector *conn,
+ struct drm_crtc_state *new_crtc_state;
+
+ conn_state = drm_atomic_get_connector_state(state, conn);
++
++ if (IS_ERR(conn_state))
++ return PTR_ERR(conn_state);
++
+ du = vmw_connector_to_stdu(conn);
+
+ if (!conn_state->crtc)
+diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
+index e90e2f01550ad3..04083b7395ab8c 100644
+--- a/drivers/iio/accel/bma400_core.c
++++ b/drivers/iio/accel/bma400_core.c
+@@ -1219,7 +1219,8 @@ static int bma400_activity_event_en(struct bma400_data *data,
+ static int bma400_tap_event_en(struct bma400_data *data,
+ enum iio_event_direction dir, int state)
+ {
+- unsigned int mask, field_value;
++ unsigned int mask;
++ unsigned int field_value = 0;
+ int ret;
+
+ /*
+diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
+index e46817cb5581cf..6dee3b686effdc 100644
+--- a/drivers/iio/adc/Kconfig
++++ b/drivers/iio/adc/Kconfig
+@@ -1335,6 +1335,8 @@ config TI_LMP92064
+ tristate "Texas Instruments LMP92064 ADC driver"
+ depends on SPI
+ select REGMAP_SPI
++ select IIO_BUFFER
++ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for the LMP92064 Precision Current and Voltage
+ sensor.
+diff --git a/drivers/iio/frequency/Kconfig b/drivers/iio/frequency/Kconfig
+index 71de6cc4a1584f..036763d3e84c6b 100644
+--- a/drivers/iio/frequency/Kconfig
++++ b/drivers/iio/frequency/Kconfig
+@@ -82,25 +82,26 @@ config ADMV1014
+ module will be called admv1014.
+
+ config ADMV4420
+- tristate "Analog Devices ADMV4420 K Band Downconverter"
+- depends on SPI
+- help
+- Say yes here to build support for Analog Devices K Band
+- Downconverter with integrated Fractional-N PLL and VCO.
++ tristate "Analog Devices ADMV4420 K Band Downconverter"
++ depends on SPI
++ select REGMAP_SPI
++ help
++ Say yes here to build support for Analog Devices K Band
++ Downconverter with integrated Fractional-N PLL and VCO.
+
+- To compile this driver as a module, choose M here: the
+- module will be called admv4420.
++ To compile this driver as a module, choose M here: the
++ module will be called admv4420.
+
+ config ADRF6780
+- tristate "Analog Devices ADRF6780 Microwave Upconverter"
+- depends on SPI
+- depends on COMMON_CLK
+- help
+- Say yes here to build support for Analog Devices ADRF6780
+- 5.9 GHz to 23.6 GHz, Wideband, Microwave Upconverter.
+-
+- To compile this driver as a module, choose M here: the
+- module will be called adrf6780.
++ tristate "Analog Devices ADRF6780 Microwave Upconverter"
++ depends on SPI
++ depends on COMMON_CLK
++ help
++ Say yes here to build support for Analog Devices ADRF6780
++ 5.9 GHz to 23.6 GHz, Wideband, Microwave Upconverter.
++
++ To compile this driver as a module, choose M here: the
++ module will be called adrf6780.
+
+ endmenu
+ endmenu
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
+index f253295795f0a4..fd78d678877c47 100644
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -269,6 +269,8 @@ rdma_find_ndev_for_src_ip_rcu(struct net *net, const struct sockaddr *src_in)
+ break;
+ #endif
+ }
++ if (!ret && dev && is_vlan_dev(dev))
++ dev = vlan_dev_real_dev(dev);
+ return ret ? ERR_PTR(ret) : dev;
+ }
+
+diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
+index 93572405d6fae7..1e63f809174837 100644
+--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
++++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
+@@ -366,12 +366,12 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
+ goto done;
+ }
+ }
+- if (rdev->pacing.dbr_pacing)
++ if (rdev->pacing.dbr_pacing && bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ bnxt_re_copy_db_pacing_stats(rdev, stats);
+ }
+
+ done:
+- return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
++ return bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
+ BNXT_RE_NUM_EXT_COUNTERS : BNXT_RE_NUM_STD_COUNTERS;
+ }
+
+@@ -381,7 +381,7 @@ struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ int num_counters = 0;
+
+- if (bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
++ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ num_counters = BNXT_RE_NUM_EXT_COUNTERS;
+ else
+ num_counters = BNXT_RE_NUM_STD_COUNTERS;
+diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+index b4d3e7dfc939f6..f20da108fb2978 100644
+--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
++++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+@@ -400,6 +400,10 @@ static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
+ struct bnxt_re_fence_data *fence = &pd->fence;
+ struct ib_mr *ib_mr = &fence->mr->ib_mr;
+ struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
++ struct bnxt_re_dev *rdev = pd->rdev;
++
++ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
++ return;
+
+ memset(wqe, 0, sizeof(*wqe));
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
+@@ -454,6 +458,9 @@ static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
+ struct device *dev = &rdev->en_dev->pdev->dev;
+ struct bnxt_re_mr *mr = fence->mr;
+
++ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
++ return;
++
+ if (fence->mw) {
+ bnxt_re_dealloc_mw(fence->mw);
+ fence->mw = NULL;
+@@ -485,6 +492,9 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
+ struct ib_mw *mw;
+ int rc;
+
++ if (bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
++ return 0;
++
+ dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
+ DMA_BIDIRECTIONAL);
+ rc = dma_mapping_error(dev, dma_addr);
+@@ -1023,7 +1033,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
+ bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
+ /* Consider mapping PSN search memory only for RC QPs. */
+ if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
+- psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
++ psn_sz = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
+ sizeof(struct sq_psn_search_ext) :
+ sizeof(struct sq_psn_search);
+ psn_nume = (qplib_qp->wqe_mode == BNXT_QPLIB_WQE_MODE_STATIC) ?
+@@ -1234,7 +1244,7 @@ static void bnxt_re_adjust_gsi_rq_attr(struct bnxt_re_qp *qp)
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+- if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
++ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+ if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
+ qplqp->rq.max_sge = dev_attr->max_qp_sges;
+@@ -1301,7 +1311,7 @@ static void bnxt_re_adjust_gsi_sq_attr(struct bnxt_re_qp *qp,
+ qplqp = &qp->qplib_qp;
+ dev_attr = &rdev->dev_attr;
+
+- if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
++ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ entries = bnxt_re_init_depth(init_attr->cap.max_send_wr + 1, uctx);
+ qplqp->sq.max_wqe = min_t(u32, entries,
+ dev_attr->max_qp_wqes + 1);
+@@ -1328,7 +1338,7 @@ static int bnxt_re_init_qp_type(struct bnxt_re_dev *rdev,
+ goto out;
+ }
+
+- if (bnxt_qplib_is_chip_gen_p5(chip_ctx) &&
++ if (bnxt_qplib_is_chip_gen_p5_p7(chip_ctx) &&
+ init_attr->qp_type == IB_QPT_GSI)
+ qptype = CMDQ_CREATE_QP_TYPE_GSI;
+ out:
+@@ -1527,7 +1537,7 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr,
+ goto fail;
+
+ if (qp_init_attr->qp_type == IB_QPT_GSI &&
+- !(bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))) {
++ !(bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))) {
+ rc = bnxt_re_create_gsi_qp(qp, pd, qp_init_attr);
+ if (rc == -ENODEV)
+ goto qp_destroy;
+@@ -2553,11 +2563,6 @@ static int bnxt_re_build_inv_wqe(const struct ib_send_wr *wr,
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_LOCAL_INV;
+ wqe->local_inv.inv_l_key = wr->ex.invalidate_rkey;
+
+- /* Need unconditional fence for local invalidate
+- * opcode to work as expected.
+- */
+- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+-
+ if (wr->send_flags & IB_SEND_SIGNALED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+ if (wr->send_flags & IB_SEND_SOLICITED)
+@@ -2580,12 +2585,6 @@ static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
+ wqe->frmr.levels = qplib_frpl->hwq.level;
+ wqe->type = BNXT_QPLIB_SWQE_TYPE_REG_MR;
+
+- /* Need unconditional fence for reg_mr
+- * opcode to function as expected.
+- */
+-
+- wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
+-
+ if (wr->wr.send_flags & IB_SEND_SIGNALED)
+ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
+
+@@ -2716,6 +2715,18 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
+ return rc;
+ }
+
++static void bnxt_re_legacy_set_uc_fence(struct bnxt_qplib_swqe *wqe)
++{
++ /* Need unconditional fence for non-wire memory opcode
++ * to work as expected.
++ */
++ if (wqe->type == BNXT_QPLIB_SWQE_TYPE_LOCAL_INV ||
++ wqe->type == BNXT_QPLIB_SWQE_TYPE_FAST_REG_MR ||
++ wqe->type == BNXT_QPLIB_SWQE_TYPE_REG_MR ||
++ wqe->type == BNXT_QPLIB_SWQE_TYPE_BIND_MW)
++ wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
++}
++
+ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
+ const struct ib_send_wr **bad_wr)
+ {
+@@ -2795,8 +2806,11 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, const struct ib_send_wr *wr,
+ rc = -EINVAL;
+ goto bad;
+ }
+- if (!rc)
++ if (!rc) {
++ if (!bnxt_qplib_is_chip_gen_p5_p7(qp->rdev->chip_ctx))
++ bnxt_re_legacy_set_uc_fence(&wqe);
+ rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
++ }
+ bad:
+ if (rc) {
+ ibdev_err(&qp->rdev->ibdev,
+diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
+index 039801d93ed8aa..0373d0e9db6329 100644
+--- a/drivers/infiniband/hw/bnxt_re/main.c
++++ b/drivers/infiniband/hw/bnxt_re/main.c
+@@ -107,9 +107,14 @@ static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
+ dev_info(rdev_to_dev(rdev),
+ "Couldn't get DB bar size, Low latency framework is disabled\n");
+ /* set register offsets for both UC and WC */
+- res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET :
+- BNXT_QPLIB_DBR_PF_DB_OFFSET;
+- res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset;
++ if (bnxt_qplib_is_chip_gen_p7(cctx)) {
++ res->dpi_tbl.ucreg.offset = offset;
++ res->dpi_tbl.wcreg.offset = en_dev->l2_db_size;
++ } else {
++ res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET :
++ BNXT_QPLIB_DBR_PF_DB_OFFSET;
++ res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset;
++ }
+
+ /* If WC mapping is disabled by L2 driver then en_dev->l2_db_size
+ * is equal to the DB-Bar actual size. This indicates that L2
+@@ -128,7 +133,7 @@ static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
+ struct bnxt_qplib_chip_ctx *cctx;
+
+ cctx = rdev->chip_ctx;
+- cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
++ cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
+ mode : BNXT_QPLIB_WQE_MODE_STATIC;
+ if (bnxt_re_hwrm_qcaps(rdev))
+ dev_err(rdev_to_dev(rdev),
+@@ -176,8 +181,11 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
+
+ bnxt_re_set_db_offset(rdev);
+ rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
+- if (rc)
++ if (rc) {
++ kfree(rdev->chip_ctx);
++ rdev->chip_ctx = NULL;
+ return rc;
++ }
+
+ if (bnxt_qplib_determine_atomics(en_dev->pdev))
+ ibdev_info(&rdev->ibdev,
+@@ -215,7 +223,7 @@ static void bnxt_re_limit_pf_res(struct bnxt_re_dev *rdev)
+ ctx->srqc_count = min_t(u32, BNXT_RE_MAX_SRQC_COUNT,
+ attr->max_srq);
+ ctx->cq_count = min_t(u32, BNXT_RE_MAX_CQ_COUNT, attr->max_cq);
+- if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
++ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ for (i = 0; i < MAX_TQM_ALLOC_REQ; i++)
+ rdev->qplib_ctx.tqm_ctx.qcount[i] =
+ rdev->dev_attr.tqm_alloc_reqs[i];
+@@ -264,7 +272,7 @@ static void bnxt_re_set_resource_limits(struct bnxt_re_dev *rdev)
+ memset(&rdev->qplib_ctx.vf_res, 0, sizeof(struct bnxt_qplib_vf_res));
+ bnxt_re_limit_pf_res(rdev);
+
+- num_vfs = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
++ num_vfs = bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx) ?
+ BNXT_RE_GEN_P5_MAX_VF : rdev->num_vfs;
+ if (num_vfs)
+ bnxt_re_limit_vf_res(&rdev->qplib_ctx, num_vfs);
+@@ -276,7 +284,7 @@ static void bnxt_re_vf_res_config(struct bnxt_re_dev *rdev)
+ if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
+ return;
+ rdev->num_vfs = pci_sriov_get_totalvfs(rdev->en_dev->pdev);
+- if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) {
++ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) {
+ bnxt_re_set_resource_limits(rdev);
+ bnxt_qplib_set_func_resources(&rdev->qplib_res, &rdev->rcfw,
+ &rdev->qplib_ctx);
+@@ -1067,16 +1075,6 @@ static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
+ return 0;
+ }
+
+-#define BNXT_RE_GEN_P5_PF_NQ_DB 0x10000
+-#define BNXT_RE_GEN_P5_VF_NQ_DB 0x4000
+-static u32 bnxt_re_get_nqdb_offset(struct bnxt_re_dev *rdev, u16 indx)
+-{
+- return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
+- (rdev->is_virtfn ? BNXT_RE_GEN_P5_VF_NQ_DB :
+- BNXT_RE_GEN_P5_PF_NQ_DB) :
+- rdev->en_dev->msix_entries[indx].db_offset;
+-}
+-
+ static void bnxt_re_cleanup_res(struct bnxt_re_dev *rdev)
+ {
+ int i;
+@@ -1097,7 +1095,7 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
+ bnxt_qplib_init_res(&rdev->qplib_res);
+
+ for (i = 1; i < rdev->num_msix ; i++) {
+- db_offt = bnxt_re_get_nqdb_offset(rdev, i);
++ db_offt = rdev->en_dev->msix_entries[i].db_offset;
+ rc = bnxt_qplib_enable_nq(rdev->en_dev->pdev, &rdev->nq[i - 1],
+ i - 1, rdev->en_dev->msix_entries[i].vector,
+ db_offt, &bnxt_re_cqn_handler,
+@@ -1508,7 +1506,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
+ ibdev_err(&rdev->ibdev, "Failed to allocate CREQ: %#x\n", rc);
+ goto free_rcfw;
+ }
+- db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
++ db_offt = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].db_offset;
+ vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector;
+ rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
+ vid, db_offt,
+@@ -1536,7 +1534,7 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 wqe_mode)
+ bnxt_re_set_resource_limits(rdev);
+
+ rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0,
+- bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx));
++ bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx));
+ if (rc) {
+ ibdev_err(&rdev->ibdev,
+ "Failed to allocate QPLIB context: %#x\n", rc);
+@@ -1659,7 +1657,7 @@ static void bnxt_re_setup_cc(struct bnxt_re_dev *rdev, bool enable)
+ return;
+
+ /* Currently enabling only for GenP5 adapters */
+- if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
++ if (!bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx))
+ return;
+
+ if (enable) {
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+index 3b28878f62062f..4ee11cb4f2bd38 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+@@ -995,7 +995,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
+
+ /* SQ */
+ if (qp->type == CMDQ_CREATE_QP_TYPE_RC) {
+- psn_sz = bnxt_qplib_is_chip_gen_p5(res->cctx) ?
++ psn_sz = bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
+ sizeof(struct sq_psn_search_ext) :
+ sizeof(struct sq_psn_search);
+
+@@ -1649,7 +1649,7 @@ static void bnxt_qplib_fill_psn_search(struct bnxt_qplib_qp *qp,
+ flg_npsn = ((swq->next_psn << SQ_PSN_SEARCH_NEXT_PSN_SFT) &
+ SQ_PSN_SEARCH_NEXT_PSN_MASK);
+
+- if (bnxt_qplib_is_chip_gen_p5(qp->cctx)) {
++ if (bnxt_qplib_is_chip_gen_p5_p7(qp->cctx)) {
+ psns_ext->opcode_start_psn = cpu_to_le32(op_spsn);
+ psns_ext->flags_next_psn = cpu_to_le32(flg_npsn);
+ psns_ext->start_slot_idx = cpu_to_le16(swq->slot_idx);
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+index a6f38d8f12efe2..56ddff96b5083b 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+@@ -169,7 +169,7 @@ struct bnxt_qplib_swqe {
+ };
+ u32 q_key;
+ u32 dst_qp;
+- u16 avid;
++ u32 avid;
+ } send;
+
+ /* Send Raw Ethernet and QP1 */
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+index 5680fe8b890ad1..7294221b3316cf 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+@@ -525,7 +525,7 @@ static int __bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
+ /* failed with status */
+ dev_err(&rcfw->pdev->dev, "cmdq[%#x]=%#x status %#x\n",
+ cookie, opcode, evnt->status);
+- rc = -EFAULT;
++ rc = -EIO;
+ }
+
+ return rc;
+@@ -852,7 +852,7 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
+ */
+ if (is_virtfn)
+ goto skip_ctx_setup;
+- if (bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
++ if (bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx))
+ goto config_vf_res;
+
+ lvl = ctx->qpc_tbl.level;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+index ae2bde34e785b7..96ceec1e8199a6 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
+@@ -244,6 +244,8 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
+ sginfo.pgsize = npde * pg_size;
+ sginfo.npages = 1;
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], &sginfo);
++ if (rc)
++ goto fail;
+
+ /* Alloc PBL pages */
+ sginfo.npages = npbl;
+@@ -255,22 +257,9 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
+ dst_virt_ptr =
+ (dma_addr_t **)hwq->pbl[PBL_LVL_0].pg_arr;
+ src_phys_ptr = hwq->pbl[PBL_LVL_1].pg_map_arr;
+- if (hwq_attr->type == HWQ_TYPE_MR) {
+- /* For MR it is expected that we supply only 1 contigous
+- * page i.e only 1 entry in the PDL that will contain
+- * all the PBLs for the user supplied memory region
+- */
+- for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
+- i++)
+- dst_virt_ptr[0][i] = src_phys_ptr[i] |
+- flag;
+- } else {
+- for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count;
+- i++)
+- dst_virt_ptr[PTR_PG(i)][PTR_IDX(i)] =
+- src_phys_ptr[i] |
+- PTU_PDE_VALID;
+- }
++ for (i = 0; i < hwq->pbl[PBL_LVL_1].pg_count; i++)
++ dst_virt_ptr[0][i] = src_phys_ptr[i] | flag;
++
+ /* Alloc or init PTEs */
+ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_2],
+ hwq_attr->sginfo);
+@@ -805,7 +794,7 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
+ dpit = &res->dpi_tbl;
+ reg = &dpit->wcreg;
+
+- if (!bnxt_qplib_is_chip_gen_p5(res->cctx)) {
++ if (!bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
+ /* Offest should come from L2 driver */
+ dbr_offset = dev_attr->l2_db_size;
+ dpit->ucreg.offset = dbr_offset;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+index 534db462216ac9..f9e7aa3757cfb2 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
+@@ -44,6 +44,9 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
+ #define CHIP_NUM_57508 0x1750
+ #define CHIP_NUM_57504 0x1751
+ #define CHIP_NUM_57502 0x1752
++#define CHIP_NUM_58818 0xd818
++#define CHIP_NUM_57608 0x1760
++
+
+ struct bnxt_qplib_drv_modes {
+ u8 wqe_mode;
+@@ -296,6 +299,12 @@ struct bnxt_qplib_res {
+ struct bnxt_qplib_db_pacing_data *pacing_data;
+ };
+
++static inline bool bnxt_qplib_is_chip_gen_p7(struct bnxt_qplib_chip_ctx *cctx)
++{
++ return (cctx->chip_num == CHIP_NUM_58818 ||
++ cctx->chip_num == CHIP_NUM_57608);
++}
++
+ static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
+ {
+ return (cctx->chip_num == CHIP_NUM_57508 ||
+@@ -303,15 +312,20 @@ static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
+ cctx->chip_num == CHIP_NUM_57502);
+ }
+
++static inline bool bnxt_qplib_is_chip_gen_p5_p7(struct bnxt_qplib_chip_ctx *cctx)
++{
++ return bnxt_qplib_is_chip_gen_p5(cctx) || bnxt_qplib_is_chip_gen_p7(cctx);
++}
++
+ static inline u8 bnxt_qplib_get_hwq_type(struct bnxt_qplib_res *res)
+ {
+- return bnxt_qplib_is_chip_gen_p5(res->cctx) ?
++ return bnxt_qplib_is_chip_gen_p5_p7(res->cctx) ?
+ HWQ_TYPE_QUEUE : HWQ_TYPE_L2_CMPL;
+ }
+
+ static inline u8 bnxt_qplib_get_ring_type(struct bnxt_qplib_chip_ctx *cctx)
+ {
+- return bnxt_qplib_is_chip_gen_p5(cctx) ?
++ return bnxt_qplib_is_chip_gen_p5_p7(cctx) ?
+ RING_ALLOC_REQ_RING_TYPE_NQ :
+ RING_ALLOC_REQ_RING_TYPE_ROCE_CMPL;
+ }
+@@ -488,7 +502,7 @@ static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info,
+ u32 type;
+
+ type = arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
+- if (bnxt_qplib_is_chip_gen_p5(cctx))
++ if (bnxt_qplib_is_chip_gen_p5_p7(cctx))
+ bnxt_qplib_ring_db(info, type);
+ else
+ bnxt_qplib_ring_db32(info, arm);
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+index a27b6851516477..0b98577cd7082e 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+@@ -59,7 +59,7 @@ static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
+ {
+ u16 pcie_ctl2 = 0;
+
+- if (!bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx))
++ if (!bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx))
+ return false;
+
+ pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, &pcie_ctl2);
+@@ -133,10 +133,12 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+ * reporting the max number
+ */
+ attr->max_qp_wqes -= BNXT_QPLIB_RESERVED_QP_WRS + 1;
+- attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5(rcfw->res->cctx) ?
++ attr->max_qp_sges = bnxt_qplib_is_chip_gen_p5_p7(rcfw->res->cctx) ?
+ 6 : sb->max_sge;
+ attr->max_cq = le32_to_cpu(sb->max_cq);
+ attr->max_cq_wqes = le32_to_cpu(sb->max_cqe);
++ if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
++ attr->max_cq_wqes = min_t(u32, BNXT_QPLIB_MAX_CQ_WQES, attr->max_cq_wqes);
+ attr->max_cq_sges = attr->max_qp_sges;
+ attr->max_mr = le32_to_cpu(sb->max_mr);
+ attr->max_mw = le32_to_cpu(sb->max_mw);
+@@ -151,9 +153,17 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
+ attr->max_srq_sges = sb->max_srq_sge;
+ attr->max_pkey = 1;
+ attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
+- attr->l2_db_size = (sb->l2_db_space_size + 1) *
+- (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
+- attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED;
++ if (!bnxt_qplib_is_chip_gen_p7(rcfw->res->cctx))
++ attr->l2_db_size = (sb->l2_db_space_size + 1) *
++ (0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
++ /*
++ * Read the max gid supported by HW.
++ * For each entry in HW GID in HW table, we consume 2
++ * GID entries in the kernel GID table. So max_gid reported
++ * to stack can be up to twice the value reported by the HW, up to 256 gids.
++ */
++ attr->max_sgid = le32_to_cpu(sb->max_gid);
++ attr->max_sgid = min_t(u32, BNXT_QPLIB_NUM_GIDS_SUPPORTED, 2 * attr->max_sgid);
+ attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
+
+ bnxt_qplib_query_version(rcfw, attr->fw_ver);
+@@ -934,7 +944,7 @@ int bnxt_qplib_modify_cc(struct bnxt_qplib_res *res,
+ req->inactivity_th = cpu_to_le16(cc_param->inact_th);
+
+ /* For chip gen P5 onwards fill extended cmd and header */
+- if (bnxt_qplib_is_chip_gen_p5(res->cctx)) {
++ if (bnxt_qplib_is_chip_gen_p5_p7(res->cctx)) {
+ struct roce_tlv *hdr;
+ u32 payload;
+ u32 chunks;
+diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+index d33c78b96217a8..755765e68eaab2 100644
+--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
++++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+@@ -55,6 +55,7 @@ struct bnxt_qplib_dev_attr {
+ u32 max_qp_wqes;
+ u32 max_qp_sges;
+ u32 max_cq;
++#define BNXT_QPLIB_MAX_CQ_WQES 0xfffff
+ u32 max_cq_wqes;
+ u32 max_cq_sges;
+ u32 max_mr;
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index b3757c6a0457a1..8d753e6e0c7190 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -2086,7 +2086,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
+ err = -ENOMEM;
+ if (n->dev->flags & IFF_LOOPBACK) {
+ if (iptype == 4)
+- pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
++ pdev = __ip_dev_find(&init_net, *(__be32 *)peer_ip, false);
+ else if (IS_ENABLED(CONFIG_IPV6))
+ for_each_netdev(&init_net, pdev) {
+ if (ipv6_chk_addr(&init_net,
+@@ -2101,12 +2101,12 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
+ err = -ENODEV;
+ goto out;
+ }
++ if (is_vlan_dev(pdev))
++ pdev = vlan_dev_real_dev(pdev);
+ ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+ n, pdev, rt_tos2priority(tos));
+- if (!ep->l2t) {
+- dev_put(pdev);
++ if (!ep->l2t)
+ goto out;
+- }
+ ep->mtu = pdev->mtu;
+ ep->tx_chan = cxgb4_port_chan(pdev);
+ ep->smac_idx = ((struct port_info *)netdev_priv(pdev))->smt_idx;
+@@ -2119,7 +2119,6 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
+ ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(pdev) * step];
+ set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
+- dev_put(pdev);
+ } else {
+ pdev = get_real_dev(n->dev);
+ ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c
+index 42d1e977106696..1916daa8c33230 100644
+--- a/drivers/infiniband/hw/irdma/cm.c
++++ b/drivers/infiniband/hw/irdma/cm.c
+@@ -3630,7 +3630,7 @@ void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp)
+ /**
+ * irdma_accept - registered call for connection to be accepted
+ * @cm_id: cm information for passive connection
+- * @conn_param: accpet parameters
++ * @conn_param: accept parameters
+ */
+ int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ {
+diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
+index 45547bf281e312..4bebc34a2929b1 100644
+--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
++++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
+@@ -68,6 +68,8 @@ MODULE_LICENSE("Dual BSD/GPL");
+ static u64 srpt_service_guid;
+ static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
+ static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */
++static DEFINE_MUTEX(srpt_mc_mutex); /* Protects srpt_memory_caches. */
++static DEFINE_XARRAY(srpt_memory_caches); /* See also srpt_memory_cache_entry */
+
+ static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
+ module_param(srp_max_req_size, int, 0444);
+@@ -105,6 +107,63 @@ static void srpt_recv_done(struct ib_cq *cq, struct ib_wc *wc);
+ static void srpt_send_done(struct ib_cq *cq, struct ib_wc *wc);
+ static void srpt_process_wait_list(struct srpt_rdma_ch *ch);
+
++/* Type of the entries in srpt_memory_caches. */
++struct srpt_memory_cache_entry {
++ refcount_t ref;
++ struct kmem_cache *c;
++};
++
++static struct kmem_cache *srpt_cache_get(unsigned int object_size)
++{
++ struct srpt_memory_cache_entry *e;
++ char name[32];
++ void *res;
++
++ guard(mutex)(&srpt_mc_mutex);
++ e = xa_load(&srpt_memory_caches, object_size);
++ if (e) {
++ refcount_inc(&e->ref);
++ return e->c;
++ }
++ snprintf(name, sizeof(name), "srpt-%u", object_size);
++ e = kmalloc(sizeof(*e), GFP_KERNEL);
++ if (!e)
++ return NULL;
++ refcount_set(&e->ref, 1);
++ e->c = kmem_cache_create(name, object_size, /*align=*/512, 0, NULL);
++ if (!e->c)
++ goto free_entry;
++ res = xa_store(&srpt_memory_caches, object_size, e, GFP_KERNEL);
++ if (xa_is_err(res))
++ goto destroy_cache;
++ return e->c;
++
++destroy_cache:
++ kmem_cache_destroy(e->c);
++
++free_entry:
++ kfree(e);
++ return NULL;
++}
++
++static void srpt_cache_put(struct kmem_cache *c)
++{
++ struct srpt_memory_cache_entry *e = NULL;
++ unsigned long object_size;
++
++ guard(mutex)(&srpt_mc_mutex);
++ xa_for_each(&srpt_memory_caches, object_size, e)
++ if (e->c == c)
++ break;
++ if (WARN_ON_ONCE(!e))
++ return;
++ if (!refcount_dec_and_test(&e->ref))
++ return;
++ WARN_ON_ONCE(xa_erase(&srpt_memory_caches, object_size) != e);
++ kmem_cache_destroy(e->c);
++ kfree(e);
++}
++
+ /*
+ * The only allowed channel state changes are those that change the channel
+ * state into a state with a higher numerical value. Hence the new > prev test.
+@@ -2119,13 +2178,13 @@ static void srpt_release_channel_work(struct work_struct *w)
+ ch->sport->sdev, ch->rq_size,
+ ch->rsp_buf_cache, DMA_TO_DEVICE);
+
+- kmem_cache_destroy(ch->rsp_buf_cache);
++ srpt_cache_put(ch->rsp_buf_cache);
+
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_recv_ring,
+ sdev, ch->rq_size,
+ ch->req_buf_cache, DMA_FROM_DEVICE);
+
+- kmem_cache_destroy(ch->req_buf_cache);
++ srpt_cache_put(ch->req_buf_cache);
+
+ kref_put(&ch->kref, srpt_free_ch);
+ }
+@@ -2245,8 +2304,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
+ INIT_LIST_HEAD(&ch->cmd_wait_list);
+ ch->max_rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
+
+- ch->rsp_buf_cache = kmem_cache_create("srpt-rsp-buf", ch->max_rsp_size,
+- 512, 0, NULL);
++ ch->rsp_buf_cache = srpt_cache_get(ch->max_rsp_size);
+ if (!ch->rsp_buf_cache)
+ goto free_ch;
+
+@@ -2280,8 +2338,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
+ alignment_offset = round_up(imm_data_offset, 512) -
+ imm_data_offset;
+ req_sz = alignment_offset + imm_data_offset + srp_max_req_size;
+- ch->req_buf_cache = kmem_cache_create("srpt-req-buf", req_sz,
+- 512, 0, NULL);
++ ch->req_buf_cache = srpt_cache_get(req_sz);
+ if (!ch->req_buf_cache)
+ goto free_rsp_ring;
+
+@@ -2478,7 +2535,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
+ ch->req_buf_cache, DMA_FROM_DEVICE);
+
+ free_recv_cache:
+- kmem_cache_destroy(ch->req_buf_cache);
++ srpt_cache_put(ch->req_buf_cache);
+
+ free_rsp_ring:
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
+@@ -2486,7 +2543,7 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
+ ch->rsp_buf_cache, DMA_TO_DEVICE);
+
+ free_rsp_cache:
+- kmem_cache_destroy(ch->rsp_buf_cache);
++ srpt_cache_put(ch->rsp_buf_cache);
+
+ free_ch:
+ if (rdma_cm_id)
+@@ -3055,7 +3112,7 @@ static void srpt_free_srq(struct srpt_device *sdev)
+ srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
+ sdev->srq_size, sdev->req_buf_cache,
+ DMA_FROM_DEVICE);
+- kmem_cache_destroy(sdev->req_buf_cache);
++ srpt_cache_put(sdev->req_buf_cache);
+ sdev->srq = NULL;
+ }
+
+@@ -3082,8 +3139,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
+ pr_debug("create SRQ #wr= %d max_allow=%d dev= %s\n", sdev->srq_size,
+ sdev->device->attrs.max_srq_wr, dev_name(&device->dev));
+
+- sdev->req_buf_cache = kmem_cache_create("srpt-srq-req-buf",
+- srp_max_req_size, 0, 0, NULL);
++ sdev->req_buf_cache = srpt_cache_get(srp_max_req_size);
+ if (!sdev->req_buf_cache)
+ goto free_srq;
+
+@@ -3105,7 +3161,7 @@ static int srpt_alloc_srq(struct srpt_device *sdev)
+ return 0;
+
+ free_cache:
+- kmem_cache_destroy(sdev->req_buf_cache);
++ srpt_cache_put(sdev->req_buf_cache);
+
+ free_srq:
+ ib_destroy_srq(srq);
+diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
+index ea4b921e5e1588..5a7836186fd414 100644
+--- a/drivers/irqchip/irq-renesas-rzg2l.c
++++ b/drivers/irqchip/irq-renesas-rzg2l.c
+@@ -8,6 +8,7 @@
+ */
+
+ #include <linux/bitfield.h>
++#include <linux/cleanup.h>
+ #include <linux/clk.h>
+ #include <linux/err.h>
+ #include <linux/io.h>
+@@ -18,6 +19,7 @@
+ #include <linux/pm_runtime.h>
+ #include <linux/reset.h>
+ #include <linux/spinlock.h>
++#include <linux/syscore_ops.h>
+
+ #define IRQC_IRQ_START 1
+ #define IRQC_IRQ_COUNT 8
+@@ -55,12 +57,30 @@
+ #define TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
+ #define TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
+
+-struct rzg2l_irqc_priv {
+- void __iomem *base;
+- struct irq_fwspec fwspec[IRQC_NUM_IRQ];
+- raw_spinlock_t lock;
++/**
++ * struct rzg2l_irqc_reg_cache - registers cache (necessary for suspend/resume)
++ * @iitsr: IITSR register
++ * @titsr: TITSR registers
++ */
++struct rzg2l_irqc_reg_cache {
++ u32 iitsr;
++ u32 titsr[2];
+ };
+
++/**
++ * struct rzg2l_irqc_priv - IRQ controller private data structure
++ * @base: Controller's base address
++ * @fwspec: IRQ firmware specific data
++ * @lock: Lock to serialize access to hardware registers
++ * @cache: Registers cache for suspend/resume
++ */
++static struct rzg2l_irqc_priv {
++ void __iomem *base;
++ struct irq_fwspec fwspec[IRQC_NUM_IRQ];
++ raw_spinlock_t lock;
++ struct rzg2l_irqc_reg_cache cache;
++} *rzg2l_irqc_data;
++
+ static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
+ {
+ return data->domain->host_data;
+@@ -276,6 +296,38 @@ static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type)
+ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+ }
+
++static int rzg2l_irqc_irq_suspend(void)
++{
++ struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
++ void __iomem *base = rzg2l_irqc_data->base;
++
++ cache->iitsr = readl_relaxed(base + IITSR);
++ for (u8 i = 0; i < 2; i++)
++ cache->titsr[i] = readl_relaxed(base + TITSR(i));
++
++ return 0;
++}
++
++static void rzg2l_irqc_irq_resume(void)
++{
++ struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
++ void __iomem *base = rzg2l_irqc_data->base;
++
++ /*
++ * Restore only interrupt type. TSSRx will be restored at the
++ * request of pin controller to avoid spurious interrupts due
++ * to invalid PIN states.
++ */
++ for (u8 i = 0; i < 2; i++)
++ writel_relaxed(cache->titsr[i], base + TITSR(i));
++ writel_relaxed(cache->iitsr, base + IITSR);
++}
++
++static struct syscore_ops rzg2l_irqc_syscore_ops = {
++ .suspend = rzg2l_irqc_irq_suspend,
++ .resume = rzg2l_irqc_irq_resume,
++};
++
+ static const struct irq_chip irqc_chip = {
+ .name = "rzg2l-irqc",
+ .irq_eoi = rzg2l_irqc_eoi,
+@@ -357,13 +409,12 @@ static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv,
+
+ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
+ {
++ struct platform_device *pdev = of_find_device_by_node(node);
++ struct device *dev __free(put_device) = pdev ? &pdev->dev : NULL;
+ struct irq_domain *irq_domain, *parent_domain;
+- struct platform_device *pdev;
+ struct reset_control *resetn;
+- struct rzg2l_irqc_priv *priv;
+ int ret;
+
+- pdev = of_find_device_by_node(node);
+ if (!pdev)
+ return -ENODEV;
+
+@@ -373,15 +424,15 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
+ return -ENODEV;
+ }
+
+- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+- if (!priv)
++ rzg2l_irqc_data = devm_kzalloc(&pdev->dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL);
++ if (!rzg2l_irqc_data)
+ return -ENOMEM;
+
+- priv->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
+- if (IS_ERR(priv->base))
+- return PTR_ERR(priv->base);
++ rzg2l_irqc_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
++ if (IS_ERR(rzg2l_irqc_data->base))
++ return PTR_ERR(rzg2l_irqc_data->base);
+
+- ret = rzg2l_irqc_parse_interrupts(priv, node);
++ ret = rzg2l_irqc_parse_interrupts(rzg2l_irqc_data, node);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
+ return ret;
+@@ -404,17 +455,30 @@ static int rzg2l_irqc_init(struct device_node *node, struct device_node *parent)
+ goto pm_disable;
+ }
+
+- raw_spin_lock_init(&priv->lock);
++ raw_spin_lock_init(&rzg2l_irqc_data->lock);
+
+ irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ,
+ node, &rzg2l_irqc_domain_ops,
+- priv);
++ rzg2l_irqc_data);
+ if (!irq_domain) {
+ dev_err(&pdev->dev, "failed to add irq domain\n");
+ ret = -ENOMEM;
+ goto pm_put;
+ }
+
++ register_syscore_ops(&rzg2l_irqc_syscore_ops);
++
++ /*
++ * Prevent the cleanup function from invoking put_device by assigning
++ * NULL to dev.
++ *
++ * make coccicheck will complain about missing put_device calls, but
++ * those are false positives, as dev will be automatically "put" via
++ * __free_put_device on the failing path.
++ * On the successful path we don't actually want to "put" dev.
++ */
++ dev = NULL;
++
+ return 0;
+
+ pm_put:
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
+index 3877744193e2a0..062bcbe6255cff 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.c
++++ b/drivers/net/dsa/mv88e6xxx/chip.c
+@@ -6208,7 +6208,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
+ .invalid_port_mask = BIT(1) | BIT(2) | BIT(8),
+ .num_internal_phys = 5,
+ .internal_phys_offset = 3,
+- .max_vid = 4095,
++ .max_vid = 8191,
+ .max_sid = 63,
+ .port_base_addr = 0x0,
+ .phy_base_addr = 0x0,
+diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
+index f48a3c0ac7f968..f02518e93b60dc 100644
+--- a/drivers/net/dsa/mv88e6xxx/chip.h
++++ b/drivers/net/dsa/mv88e6xxx/chip.h
+@@ -206,6 +206,7 @@ struct mv88e6xxx_gpio_ops;
+ struct mv88e6xxx_avb_ops;
+ struct mv88e6xxx_ptp_ops;
+ struct mv88e6xxx_pcs_ops;
++struct mv88e6xxx_cc_coeffs;
+
+ struct mv88e6xxx_irq {
+ u16 masked;
+@@ -397,6 +398,7 @@ struct mv88e6xxx_chip {
+ struct cyclecounter tstamp_cc;
+ struct timecounter tstamp_tc;
+ struct delayed_work overflow_work;
++ const struct mv88e6xxx_cc_coeffs *cc_coeffs;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+@@ -719,10 +721,6 @@ struct mv88e6xxx_ptp_ops {
+ int arr1_sts_reg;
+ int dep_sts_reg;
+ u32 rx_filters;
+- u32 cc_shift;
+- u32 cc_mult;
+- u32 cc_mult_num;
+- u32 cc_mult_dem;
+ };
+
+ struct mv88e6xxx_pcs_ops {
+diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
+index 5394a8cf7bf1d4..04053fdc6489af 100644
+--- a/drivers/net/dsa/mv88e6xxx/port.c
++++ b/drivers/net/dsa/mv88e6xxx/port.c
+@@ -1713,6 +1713,7 @@ int mv88e6393x_port_set_policy(struct mv88e6xxx_chip *chip, int port,
+ ptr = shift / 8;
+ shift %= 8;
+ mask >>= ptr * 8;
++ ptr <<= 8;
+
+ err = mv88e6393x_port_policy_read(chip, port, ptr, &reg);
+ if (err)
+diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
+index ea17231dc34e3e..5980bb4ce43e0a 100644
+--- a/drivers/net/dsa/mv88e6xxx/ptp.c
++++ b/drivers/net/dsa/mv88e6xxx/ptp.c
+@@ -18,6 +18,13 @@
+
+ #define MV88E6XXX_MAX_ADJ_PPB 1000000
+
++struct mv88e6xxx_cc_coeffs {
++ u32 cc_shift;
++ u32 cc_mult;
++ u32 cc_mult_num;
++ u32 cc_mult_dem;
++};
++
+ /* Family MV88E6250:
+ * Raw timestamps are in units of 10-ns clock periods.
+ *
+@@ -25,22 +32,43 @@
+ * simplifies to
+ * clkadj = scaled_ppm * 2^7 / 5^5
+ */
+-#define MV88E6250_CC_SHIFT 28
+-#define MV88E6250_CC_MULT (10 << MV88E6250_CC_SHIFT)
+-#define MV88E6250_CC_MULT_NUM (1 << 7)
+-#define MV88E6250_CC_MULT_DEM 3125ULL
++#define MV88E6XXX_CC_10NS_SHIFT 28
++static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_10ns_coeffs = {
++ .cc_shift = MV88E6XXX_CC_10NS_SHIFT,
++ .cc_mult = 10 << MV88E6XXX_CC_10NS_SHIFT,
++ .cc_mult_num = 1 << 7,
++ .cc_mult_dem = 3125ULL,
++};
+
+-/* Other families:
++/* Other families except MV88E6393X in internal clock mode:
+ * Raw timestamps are in units of 8-ns clock periods.
+ *
+ * clkadj = scaled_ppm * 8*2^28 / (10^6 * 2^16)
+ * simplifies to
+ * clkadj = scaled_ppm * 2^9 / 5^6
+ */
+-#define MV88E6XXX_CC_SHIFT 28
+-#define MV88E6XXX_CC_MULT (8 << MV88E6XXX_CC_SHIFT)
+-#define MV88E6XXX_CC_MULT_NUM (1 << 9)
+-#define MV88E6XXX_CC_MULT_DEM 15625ULL
++#define MV88E6XXX_CC_8NS_SHIFT 28
++static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_8ns_coeffs = {
++ .cc_shift = MV88E6XXX_CC_8NS_SHIFT,
++ .cc_mult = 8 << MV88E6XXX_CC_8NS_SHIFT,
++ .cc_mult_num = 1 << 9,
++ .cc_mult_dem = 15625ULL
++};
++
++/* Family MV88E6393X using internal clock:
++ * Raw timestamps are in units of 4-ns clock periods.
++ *
++ * clkadj = scaled_ppm * 4*2^28 / (10^6 * 2^16)
++ * simplifies to
++ * clkadj = scaled_ppm * 2^8 / 5^6
++ */
++#define MV88E6XXX_CC_4NS_SHIFT 28
++static const struct mv88e6xxx_cc_coeffs mv88e6xxx_cc_4ns_coeffs = {
++ .cc_shift = MV88E6XXX_CC_4NS_SHIFT,
++ .cc_mult = 4 << MV88E6XXX_CC_4NS_SHIFT,
++ .cc_mult_num = 1 << 8,
++ .cc_mult_dem = 15625ULL
++};
+
+ #define TAI_EVENT_WORK_INTERVAL msecs_to_jiffies(100)
+
+@@ -83,6 +111,33 @@ static int mv88e6352_set_gpio_func(struct mv88e6xxx_chip *chip, int pin,
+ return chip->info->ops->gpio_ops->set_pctl(chip, pin, func);
+ }
+
++static const struct mv88e6xxx_cc_coeffs *
++mv88e6xxx_cc_coeff_get(struct mv88e6xxx_chip *chip)
++{
++ u16 period_ps;
++ int err;
++
++ err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_CLOCK_PERIOD, &period_ps, 1);
++ if (err) {
++ dev_err(chip->dev, "failed to read cycle counter period: %d\n",
++ err);
++ return ERR_PTR(err);
++ }
++
++ switch (period_ps) {
++ case 4000:
++ return &mv88e6xxx_cc_4ns_coeffs;
++ case 8000:
++ return &mv88e6xxx_cc_8ns_coeffs;
++ case 10000:
++ return &mv88e6xxx_cc_10ns_coeffs;
++ default:
++ dev_err(chip->dev, "unexpected cycle counter period of %u ps\n",
++ period_ps);
++ return ERR_PTR(-ENODEV);
++ }
++}
++
+ static u64 mv88e6352_ptp_clock_read(const struct cyclecounter *cc)
+ {
+ struct mv88e6xxx_chip *chip = cc_to_chip(cc);
+@@ -200,7 +255,6 @@ static void mv88e6352_tai_event_work(struct work_struct *ugly)
+ static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+ {
+ struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
+- const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
+ int neg_adj = 0;
+ u32 diff, mult;
+ u64 adj;
+@@ -210,10 +264,10 @@ static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+ scaled_ppm = -scaled_ppm;
+ }
+
+- mult = ptp_ops->cc_mult;
+- adj = ptp_ops->cc_mult_num;
++ mult = chip->cc_coeffs->cc_mult;
++ adj = chip->cc_coeffs->cc_mult_num;
+ adj *= scaled_ppm;
+- diff = div_u64(adj, ptp_ops->cc_mult_dem);
++ diff = div_u64(adj, chip->cc_coeffs->cc_mult_dem);
+
+ mv88e6xxx_reg_lock(chip);
+
+@@ -360,10 +414,6 @@ const struct mv88e6xxx_ptp_ops mv88e6165_ptp_ops = {
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+- .cc_shift = MV88E6XXX_CC_SHIFT,
+- .cc_mult = MV88E6XXX_CC_MULT,
+- .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
+- .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
+ };
+
+ const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {
+@@ -387,10 +437,6 @@ const struct mv88e6xxx_ptp_ops mv88e6250_ptp_ops = {
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+- .cc_shift = MV88E6250_CC_SHIFT,
+- .cc_mult = MV88E6250_CC_MULT,
+- .cc_mult_num = MV88E6250_CC_MULT_NUM,
+- .cc_mult_dem = MV88E6250_CC_MULT_DEM,
+ };
+
+ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
+@@ -414,10 +460,6 @@ const struct mv88e6xxx_ptp_ops mv88e6352_ptp_ops = {
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+- .cc_shift = MV88E6XXX_CC_SHIFT,
+- .cc_mult = MV88E6XXX_CC_MULT,
+- .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
+- .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
+ };
+
+ const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {
+@@ -442,10 +484,6 @@ const struct mv88e6xxx_ptp_ops mv88e6390_ptp_ops = {
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ),
+- .cc_shift = MV88E6XXX_CC_SHIFT,
+- .cc_mult = MV88E6XXX_CC_MULT,
+- .cc_mult_num = MV88E6XXX_CC_MULT_NUM,
+- .cc_mult_dem = MV88E6XXX_CC_MULT_DEM,
+ };
+
+ static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
+@@ -458,10 +496,10 @@ static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
+ return 0;
+ }
+
+-/* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3
++/* With a 250MHz input clock, the 32-bit timestamp counter overflows in ~17.2
+ * seconds; this task forces periodic reads so that we don't miss any.
+ */
+-#define MV88E6XXX_TAI_OVERFLOW_PERIOD (HZ * 16)
++#define MV88E6XXX_TAI_OVERFLOW_PERIOD (HZ * 8)
+ static void mv88e6xxx_ptp_overflow_check(struct work_struct *work)
+ {
+ struct delayed_work *dw = to_delayed_work(work);
+@@ -480,11 +518,15 @@ int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
+ int i;
+
+ /* Set up the cycle counter */
++ chip->cc_coeffs = mv88e6xxx_cc_coeff_get(chip);
++ if (IS_ERR(chip->cc_coeffs))
++ return PTR_ERR(chip->cc_coeffs);
++
+ memset(&chip->tstamp_cc, 0, sizeof(chip->tstamp_cc));
+ chip->tstamp_cc.read = mv88e6xxx_ptp_clock_read;
+ chip->tstamp_cc.mask = CYCLECOUNTER_MASK(32);
+- chip->tstamp_cc.mult = ptp_ops->cc_mult;
+- chip->tstamp_cc.shift = ptp_ops->cc_shift;
++ chip->tstamp_cc.mult = chip->cc_coeffs->cc_mult;
++ chip->tstamp_cc.shift = chip->cc_coeffs->cc_shift;
+
+ timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc,
+ ktime_to_ns(ktime_get_real()));
+diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
+index 597a02c75d527f..e624d31d20d891 100644
+--- a/drivers/net/ethernet/aeroflex/greth.c
++++ b/drivers/net/ethernet/aeroflex/greth.c
+@@ -484,7 +484,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
+
+ if (unlikely(skb->len > MAX_FRAME_SIZE)) {
+ dev->stats.tx_errors++;
+- goto out;
++ goto len_error;
+ }
+
+ /* Save skb pointer. */
+@@ -575,6 +575,7 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
+ map_error:
+ if (net_ratelimit())
+ dev_warn(greth->dev, "Could not create TX DMA mapping\n");
++len_error:
+ dev_kfree_skb(skb);
+ out:
+ return err;
+diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+index 6bf149d6459418..f0647286c68b25 100644
+--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
++++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
+@@ -322,6 +322,7 @@ static netdev_tx_t bcmasp_xmit(struct sk_buff *skb, struct net_device *dev)
+ }
+ /* Rewind so we do not have a hole */
+ spb_index = intf->tx_spb_index;
++ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
+index bf1611cce974a9..49e890a7e04a37 100644
+--- a/drivers/net/ethernet/broadcom/bcmsysport.c
++++ b/drivers/net/ethernet/broadcom/bcmsysport.c
+@@ -1359,6 +1359,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
+ netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
+ skb->data, skb_len);
+ ret = NETDEV_TX_OK;
++ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index ad862ed7888ac4..9d425ece33fd2a 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -1381,10 +1381,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
+ be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
+
+ wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
+- if (unlikely(!wrb_cnt)) {
+- dev_kfree_skb_any(skb);
+- goto drop;
+- }
++ if (unlikely(!wrb_cnt))
++ goto drop_skb;
+
+ /* if os2bmc is enabled and if the pkt is destined to bmc,
+ * enqueue the pkt a 2nd time with mgmt bit set.
+@@ -1393,7 +1391,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
+ BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
+ wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
+ if (unlikely(!wrb_cnt))
+- goto drop;
++ goto drop_skb;
+ else
+ skb_get(skb);
+ }
+@@ -1407,6 +1405,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
+ be_xmit_flush(adapter, txo);
+
+ return NETDEV_TX_OK;
++drop_skb:
++ dev_kfree_skb_any(skb);
+ drop:
+ tx_stats(txo)->tx_drv_drops++;
+ /* Flush the already enqueued tx requests */
+diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
+index 9767586b4eb329..11da139082e1bf 100644
+--- a/drivers/net/ethernet/freescale/fman/mac.c
++++ b/drivers/net/ethernet/freescale/fman/mac.c
+@@ -197,55 +197,67 @@ static int mac_probe(struct platform_device *_of_dev)
+ err = -EINVAL;
+ goto _return_of_node_put;
+ }
++ mac_dev->fman_dev = &of_dev->dev;
+
+ /* Get the FMan cell-index */
+ err = of_property_read_u32(dev_node, "cell-index", &val);
+ if (err) {
+ dev_err(dev, "failed to read cell-index for %pOF\n", dev_node);
+ err = -EINVAL;
+- goto _return_of_node_put;
++ goto _return_dev_put;
+ }
+ /* cell-index 0 => FMan id 1 */
+ fman_id = (u8)(val + 1);
+
+- priv->fman = fman_bind(&of_dev->dev);
++ priv->fman = fman_bind(mac_dev->fman_dev);
+ if (!priv->fman) {
+ dev_err(dev, "fman_bind(%pOF) failed\n", dev_node);
+ err = -ENODEV;
+- goto _return_of_node_put;
++ goto _return_dev_put;
+ }
+
++ /* Two references have been taken in of_find_device_by_node()
++ * and fman_bind(). Release one of them here. The second one
++ * will be released in mac_remove().
++ */
++ put_device(mac_dev->fman_dev);
+ of_node_put(dev_node);
++ dev_node = NULL;
+
+ /* Get the address of the memory mapped registers */
+ mac_dev->res = platform_get_mem_or_io(_of_dev, 0);
+ if (!mac_dev->res) {
+ dev_err(dev, "could not get registers\n");
+- return -EINVAL;
++ err = -EINVAL;
++ goto _return_dev_put;
+ }
+
+ err = devm_request_resource(dev, fman_get_mem_region(priv->fman),
+ mac_dev->res);
+ if (err) {
+ dev_err_probe(dev, err, "could not request resource\n");
+- return err;
++ goto _return_dev_put;
+ }
+
+ mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start,
+ resource_size(mac_dev->res));
+ if (!mac_dev->vaddr) {
+ dev_err(dev, "devm_ioremap() failed\n");
+- return -EIO;
++ err = -EIO;
++ goto _return_dev_put;
+ }
+
+- if (!of_device_is_available(mac_node))
+- return -ENODEV;
++ if (!of_device_is_available(mac_node)) {
++ err = -ENODEV;
++ goto _return_dev_put;
++ }
+
+ /* Get the cell-index */
+ err = of_property_read_u32(mac_node, "cell-index", &val);
+ if (err) {
+ dev_err(dev, "failed to read cell-index for %pOF\n", mac_node);
+- return -EINVAL;
++ err = -EINVAL;
++ goto _return_dev_put;
+ }
+ priv->cell_index = (u8)val;
+
+@@ -259,22 +271,26 @@ static int mac_probe(struct platform_device *_of_dev)
+ if (unlikely(nph < 0)) {
+ dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n",
+ mac_node);
+- return nph;
++ err = nph;
++ goto _return_dev_put;
+ }
+
+ if (nph != ARRAY_SIZE(mac_dev->port)) {
+ dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n",
+ mac_node);
+- return -EINVAL;
++ err = -EINVAL;
++ goto _return_dev_put;
+ }
+
+- for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
++ /* PORT_NUM determines the size of the port array */
++ for (i = 0; i < PORT_NUM; i++) {
+ /* Find the port node */
+ dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i);
+ if (!dev_node) {
+ dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n",
+ mac_node);
+- return -EINVAL;
++ err = -EINVAL;
++ goto _return_dev_arr_put;
+ }
+
+ of_dev = of_find_device_by_node(dev_node);
+@@ -282,17 +298,24 @@ static int mac_probe(struct platform_device *_of_dev)
+ dev_err(dev, "of_find_device_by_node(%pOF) failed\n",
+ dev_node);
+ err = -EINVAL;
+- goto _return_of_node_put;
++ goto _return_dev_arr_put;
+ }
++ mac_dev->fman_port_devs[i] = &of_dev->dev;
+
+- mac_dev->port[i] = fman_port_bind(&of_dev->dev);
++ mac_dev->port[i] = fman_port_bind(mac_dev->fman_port_devs[i]);
+ if (!mac_dev->port[i]) {
+ dev_err(dev, "dev_get_drvdata(%pOF) failed\n",
+ dev_node);
+ err = -EINVAL;
+- goto _return_of_node_put;
++ goto _return_dev_arr_put;
+ }
++ /* Two references have been taken in of_find_device_by_node()
++ * and fman_port_bind(). Release one of them here. The second
++ * one will be released in mac_remove().
++ */
++ put_device(mac_dev->fman_port_devs[i]);
+ of_node_put(dev_node);
++ dev_node = NULL;
+ }
+
+ /* Get the PHY connection type */
+@@ -312,7 +335,7 @@ static int mac_probe(struct platform_device *_of_dev)
+
+ err = init(mac_dev, mac_node, &params);
+ if (err < 0)
+- return err;
++ goto _return_dev_arr_put;
+
+ if (!is_zero_ether_addr(mac_dev->addr))
+ dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr);
+@@ -327,6 +350,12 @@ static int mac_probe(struct platform_device *_of_dev)
+
+ return err;
+
++_return_dev_arr_put:
++ /* mac_dev is kzalloc'ed */
++ for (i = 0; i < PORT_NUM; i++)
++ put_device(mac_dev->fman_port_devs[i]);
++_return_dev_put:
++ put_device(mac_dev->fman_dev);
+ _return_of_node_put:
+ of_node_put(dev_node);
+ return err;
+@@ -335,6 +364,11 @@ static int mac_probe(struct platform_device *_of_dev)
+ static void mac_remove(struct platform_device *pdev)
+ {
+ struct mac_device *mac_dev = platform_get_drvdata(pdev);
++ int i;
++
++ for (i = 0; i < PORT_NUM; i++)
++ put_device(mac_dev->fman_port_devs[i]);
++ put_device(mac_dev->fman_dev);
+
+ platform_device_unregister(mac_dev->priv->eth_dev);
+ }
+diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
+index fe747915cc7379..8b5b43d50f8efb 100644
+--- a/drivers/net/ethernet/freescale/fman/mac.h
++++ b/drivers/net/ethernet/freescale/fman/mac.h
+@@ -19,12 +19,13 @@
+ struct fman_mac;
+ struct mac_priv_s;
+
++#define PORT_NUM 2
+ struct mac_device {
+ void __iomem *vaddr;
+ struct device *dev;
+ struct resource *res;
+ u8 addr[ETH_ALEN];
+- struct fman_port *port[2];
++ struct fman_port *port[PORT_NUM];
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ phy_interface_t phy_if;
+@@ -52,6 +53,9 @@ struct mac_device {
+
+ struct fman_mac *fman_mac;
+ struct mac_priv_s *priv;
++
++ struct device *fman_dev;
++ struct device *fman_port_devs[PORT_NUM];
+ };
+
+ static inline struct mac_device
+diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
+index f2d4669c81cf29..58a3d28d938c32 100644
+--- a/drivers/net/ethernet/i825xx/sun3_82586.c
++++ b/drivers/net/ethernet/i825xx/sun3_82586.c
+@@ -1012,6 +1012,7 @@ sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
+ if(skb->len > XMIT_BUFF_SIZE)
+ {
+ printk("%s: Sorry, max. framelength is %d bytes. The length of your frame is %d bytes.\n",dev->name,XMIT_BUFF_SIZE,skb->len);
++ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+index 3c43f807852850..c7f4e3c058b7fa 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+@@ -336,6 +336,51 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ return new_pkts;
+ }
+
++/**
++ * octep_oq_next_pkt() - Move to the next packet in Rx queue.
++ *
++ * @oq: Octeon Rx queue data structure.
++ * @buff_info: Current packet buffer info.
++ * @read_idx: Current packet index in the ring.
++ * @desc_used: Current packet descriptor number.
++ *
++ * Free the resources associated with a packet.
++ * Increment packet index in the ring and packet descriptor number.
++ */
++static void octep_oq_next_pkt(struct octep_oq *oq,
++ struct octep_rx_buffer *buff_info,
++ u32 *read_idx, u32 *desc_used)
++{
++ dma_unmap_page(oq->dev, oq->desc_ring[*read_idx].buffer_ptr,
++ PAGE_SIZE, DMA_FROM_DEVICE);
++ buff_info->page = NULL;
++ (*read_idx)++;
++ (*desc_used)++;
++ if (*read_idx == oq->max_count)
++ *read_idx = 0;
++}
++
++/**
++ * octep_oq_drop_rx() - Free the resources associated with a packet.
++ *
++ * @oq: Octeon Rx queue data structure.
++ * @buff_info: Current packet buffer info.
++ * @read_idx: Current packet index in the ring.
++ * @desc_used: Current packet descriptor number.
++ *
++ */
++static void octep_oq_drop_rx(struct octep_oq *oq,
++ struct octep_rx_buffer *buff_info,
++ u32 *read_idx, u32 *desc_used)
++{
++ int data_len = buff_info->len - oq->max_single_buffer_size;
++
++ while (data_len > 0) {
++ octep_oq_next_pkt(oq, buff_info, read_idx, desc_used);
++ data_len -= oq->buffer_size;
++ };
++}
++
+ /**
+ * __octep_oq_process_rx() - Process hardware Rx queue and push to stack.
+ *
+@@ -365,10 +410,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+ buff_info = (struct octep_rx_buffer *)&oq->buff_info[read_idx];
+- dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+- PAGE_SIZE, DMA_FROM_DEVICE);
+ resp_hw = page_address(buff_info->page);
+- buff_info->page = NULL;
+
+ /* Swap the length field that is in Big-Endian to CPU */
+ buff_info->len = be64_to_cpu(resp_hw->length);
+@@ -390,36 +432,33 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ */
+ data_offset = OCTEP_OQ_RESP_HW_SIZE;
+ }
++
++ octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used);
++
++ skb = build_skb((void *)resp_hw, PAGE_SIZE);
++ if (!skb) {
++ octep_oq_drop_rx(oq, buff_info,
++ &read_idx, &desc_used);
++ oq->stats.alloc_failures++;
++ continue;
++ }
++ skb_reserve(skb, data_offset);
++
+ rx_bytes += buff_info->len;
+
+ if (buff_info->len <= oq->max_single_buffer_size) {
+- skb = build_skb((void *)resp_hw, PAGE_SIZE);
+- skb_reserve(skb, data_offset);
+ skb_put(skb, buff_info->len);
+- read_idx++;
+- desc_used++;
+- if (read_idx == oq->max_count)
+- read_idx = 0;
+ } else {
+ struct skb_shared_info *shinfo;
+ u16 data_len;
+
+- skb = build_skb((void *)resp_hw, PAGE_SIZE);
+- skb_reserve(skb, data_offset);
+ /* Head fragment includes response header(s);
+ * subsequent fragments contains only data.
+ */
+ skb_put(skb, oq->max_single_buffer_size);
+- read_idx++;
+- desc_used++;
+- if (read_idx == oq->max_count)
+- read_idx = 0;
+-
+ shinfo = skb_shinfo(skb);
+ data_len = buff_info->len - oq->max_single_buffer_size;
+ while (data_len) {
+- dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
+- PAGE_SIZE, DMA_FROM_DEVICE);
+ buff_info = (struct octep_rx_buffer *)
+ &oq->buff_info[read_idx];
+ if (data_len < oq->buffer_size) {
+@@ -434,11 +473,8 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ buff_info->page, 0,
+ buff_info->len,
+ buff_info->len);
+- buff_info->page = NULL;
+- read_idx++;
+- desc_used++;
+- if (read_idx == oq->max_count)
+- read_idx = 0;
++
++ octep_oq_next_pkt(oq, buff_info, &read_idx, &desc_used);
+ }
+ }
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+index 224a025283ca7d..29487518ca6724 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
++++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+@@ -2298,7 +2298,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
+ if (!(cfg & BIT_ULL(12)))
+ continue;
+- bmap |= (1 << i);
++ bmap |= BIT_ULL(i);
+ cfg &= ~BIT_ULL(12);
+ rvu_write64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link), cfg);
+@@ -2319,7 +2319,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
+
+ /* Set NIX_AF_TL3_TL2_LINKX_CFG[ENA] for the TL3/TL2 queue */
+ for (i = 0; i < (rvu->hw->cgx_links + rvu->hw->lbk_links); i++) {
+- if (!(bmap & (1 << i)))
++ if (!(bmap & BIT_ULL(i)))
+ continue;
+ cfg = rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(tl2_tl3_link_schq, link));
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+index 48dc4ae87af092..80af0fc7101fdc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+@@ -1758,6 +1758,10 @@ static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool force
+ }
+ }
+
++#define MLX5_MAX_MANAGE_PAGES_CMD_ENT 1
++#define MLX5_CMD_MASK ((1UL << (cmd->vars.max_reg_cmds + \
++ MLX5_MAX_MANAGE_PAGES_CMD_ENT)) - 1)
++
+ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
+ {
+ struct mlx5_cmd *cmd = &dev->cmd;
+@@ -1769,7 +1773,7 @@ static void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev)
+ /* wait for pending handlers to complete */
+ mlx5_eq_synchronize_cmd_irq(dev);
+ spin_lock_irqsave(&dev->cmd.alloc_lock, flags);
+- vector = ~dev->cmd.vars.bitmask & ((1ul << (1 << dev->cmd.vars.log_sz)) - 1);
++ vector = ~dev->cmd.vars.bitmask & MLX5_CMD_MASK;
+ if (!vector)
+ goto no_trig;
+
+@@ -2275,7 +2279,7 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev)
+
+ cmd->state = MLX5_CMDIF_STATE_DOWN;
+ cmd->vars.max_reg_cmds = (1 << cmd->vars.log_sz) - 1;
+- cmd->vars.bitmask = (1UL << cmd->vars.max_reg_cmds) - 1;
++ cmd->vars.bitmask = MLX5_CMD_MASK;
+
+ sema_init(&cmd->vars.sem, cmd->vars.max_reg_cmds);
+ sema_init(&cmd->vars.pages_sem, 1);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+index 40a6cb052a2da3..07a04195490920 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+@@ -1073,6 +1073,12 @@ int mlx5_comp_eqn_get(struct mlx5_core_dev *dev, u16 vecidx, int *eqn)
+ struct mlx5_eq_comp *eq;
+ int ret = 0;
+
++ if (vecidx >= table->max_comp_eqs) {
++ mlx5_core_dbg(dev, "Requested vector index %u should be less than %u",
++ vecidx, table->max_comp_eqs);
++ return -EINVAL;
++ }
++
+ mutex_lock(&table->comp_lock);
+ eq = xa_load(&table->comp_eqs, vecidx);
+ if (eq) {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+index 1789800faaeb62..f6022c135ec023 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+@@ -1489,7 +1489,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
+ }
+
+ if (err)
+- goto abort;
++ goto err_esw_enable;
+
+ esw->fdb_table.flags |= MLX5_ESW_FDB_CREATED;
+
+@@ -1503,7 +1503,8 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
+
+ return 0;
+
+-abort:
++err_esw_enable:
++ mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
+ mlx5_esw_acls_ns_cleanup(esw);
+ return err;
+ }
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index b499d8ea6d216f..6856eb602f8260 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -4576,7 +4576,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
+ if ((status & 0xffff) == 0xffff || !(status & tp->irq_mask))
+ return IRQ_NONE;
+
+- if (unlikely(status & SYSErr)) {
++ /* At least RTL8168fp may unexpectedly set the SYSErr bit */
++ if (unlikely(status & SYSErr &&
++ tp->mac_version <= RTL_GIGA_MAC_VER_06)) {
+ rtl8169_pcierr_interrupt(tp->dev);
+ goto out;
+ }
+diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
+index c6897e6ea362d9..58fdc4f8dd4835 100644
+--- a/drivers/net/ethernet/renesas/ravb_main.c
++++ b/drivers/net/ethernet/renesas/ravb_main.c
+@@ -1673,20 +1673,19 @@ static int ravb_get_ts_info(struct net_device *ndev,
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *hw_info = priv->info;
+
+- info->so_timestamping =
+- SOF_TIMESTAMPING_TX_SOFTWARE |
+- SOF_TIMESTAMPING_RX_SOFTWARE |
+- SOF_TIMESTAMPING_SOFTWARE |
+- SOF_TIMESTAMPING_TX_HARDWARE |
+- SOF_TIMESTAMPING_RX_HARDWARE |
+- SOF_TIMESTAMPING_RAW_HARDWARE;
+- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+- info->rx_filters =
+- (1 << HWTSTAMP_FILTER_NONE) |
+- (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+- (1 << HWTSTAMP_FILTER_ALL);
+- if (hw_info->gptp || hw_info->ccc_gac)
++ if (hw_info->gptp || hw_info->ccc_gac) {
++ info->so_timestamping =
++ SOF_TIMESTAMPING_TX_SOFTWARE |
++ SOF_TIMESTAMPING_TX_HARDWARE |
++ SOF_TIMESTAMPING_RX_HARDWARE |
++ SOF_TIMESTAMPING_RAW_HARDWARE;
++ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
++ info->rx_filters =
++ (1 << HWTSTAMP_FILTER_NONE) |
++ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
++ (1 << HWTSTAMP_FILTER_ALL);
+ info->phc_index = ptp_clock_index(priv->ptp.clock);
++ }
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+index e0f3cbd36852e3..e2d61a3a7712d3 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-tegra.c
+@@ -127,10 +127,12 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_
+ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_AUX_RX_IDDQ;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
++ usleep_range(10, 20); /* 50ns min delay needed as per HW design */
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_SLEEP;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
++ usleep_range(10, 20); /* 500ns min delay needed as per HW design */
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CAL_EN;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+@@ -143,22 +145,30 @@ static int mgbe_uphy_lane_bringup_serdes_up(struct net_device *ndev, void *mgbe_
+ return err;
+ }
+
++ usleep_range(10, 20); /* 50ns min delay needed as per HW design */
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_DATA_EN;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+- value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
++ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
++ usleep_range(10, 20); /* 50ns min delay needed as per HW design */
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+- value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
++ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
++ usleep_range(10, 20); /* 50ns min delay needed as per HW design */
+ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+ value |= XPCS_WRAP_UPHY_RX_CONTROL_RX_PCS_PHY_RDY;
+ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
+
++ msleep(30); /* 30ms delay needed as per HW design */
++ value = readl(mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
++ value &= ~XPCS_WRAP_UPHY_RX_CONTROL_RX_CDR_RESET;
++ writel(value, mgbe->xpcs + XPCS_WRAP_UPHY_RX_CONTROL);
++
+ err = readl_poll_timeout(mgbe->xpcs + XPCS_WRAP_IRQ_STATUS, value,
+ value & XPCS_WRAP_IRQ_STATUS_PCS_LINK_STS,
+ 500, 500 * 2000);
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 62c10eb4f0adf1..9f779653ed6225 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -845,6 +845,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ if (net_ratelimit())
+ netdev_err(ndev, "TX DMA mapping error\n");
+ ndev->stats.tx_dropped++;
++ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
+@@ -865,6 +866,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+ ndev->stats.tx_dropped++;
+ axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
+ true, NULL, 0);
++ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ desc_set_phys_addr(lp, phys, cur_p);
+diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
+index 9d2d66a4aafd56..8698d2db3dc8e1 100644
+--- a/drivers/net/hyperv/netvsc_drv.c
++++ b/drivers/net/hyperv/netvsc_drv.c
+@@ -2795,6 +2795,31 @@ static struct hv_driver netvsc_drv = {
+ },
+ };
+
++/* Set VF's namespace same as the synthetic NIC */
++static void netvsc_event_set_vf_ns(struct net_device *ndev)
++{
++ struct net_device_context *ndev_ctx = netdev_priv(ndev);
++ struct net_device *vf_netdev;
++ int ret;
++
++ vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
++ if (!vf_netdev)
++ return;
++
++ if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
++ ret = dev_change_net_namespace(vf_netdev, dev_net(ndev),
++ "eth%d");
++ if (ret)
++ netdev_err(vf_netdev,
++ "Cannot move to same namespace as %s: %d\n",
++ ndev->name, ret);
++ else
++ netdev_info(vf_netdev,
++ "Moved VF to namespace with: %s\n",
++ ndev->name);
++ }
++}
++
+ /*
+ * On Hyper-V, every VF interface is matched with a corresponding
+ * synthetic interface. The synthetic interface is presented first
+@@ -2807,6 +2832,11 @@ static int netvsc_netdev_event(struct notifier_block *this,
+ struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
+ int ret = 0;
+
++ if (event_dev->netdev_ops == &device_ops && event == NETDEV_REGISTER) {
++ netvsc_event_set_vf_ns(event_dev);
++ return NOTIFY_DONE;
++ }
++
+ ret = check_dev_is_matching_vf(event_dev);
+ if (ret != 0)
+ return NOTIFY_DONE;
+diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
+index 778fb77c5a9372..2ada8baf815b15 100644
+--- a/drivers/net/macsec.c
++++ b/drivers/net/macsec.c
+@@ -151,19 +151,6 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
+ return sa;
+ }
+
+-static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
+-{
+- struct macsec_rx_sa *sa = NULL;
+- int an;
+-
+- for (an = 0; an < MACSEC_NUM_AN; an++) {
+- sa = macsec_rxsa_get(rx_sc->sa[an]);
+- if (sa)
+- break;
+- }
+- return sa;
+-}
+-
+ static void free_rx_sc_rcu(struct rcu_head *head)
+ {
+ struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
+@@ -1205,15 +1192,12 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ /* If validateFrames is Strict or the C bit in the
+ * SecTAG is set, discard
+ */
+- struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
+ if (hdr->tci_an & MACSEC_TCI_C ||
+ secy->validate_frames == MACSEC_VALIDATE_STRICT) {
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ rxsc_stats->stats.InPktsNotUsingSA++;
+ u64_stats_update_end(&rxsc_stats->syncp);
+ DEV_STATS_INC(secy->netdev, rx_errors);
+- if (active_rx_sa)
+- this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
+ goto drop_nosa;
+ }
+
+@@ -1223,8 +1207,6 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
+ u64_stats_update_begin(&rxsc_stats->syncp);
+ rxsc_stats->stats.InPktsUnusedSA++;
+ u64_stats_update_end(&rxsc_stats->syncp);
+- if (active_rx_sa)
+- this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
+ goto deliver;
+ }
+
+diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
+index 92a7a36b93ac0c..3e0b61202f0c98 100644
+--- a/drivers/net/netdevsim/dev.c
++++ b/drivers/net/netdevsim/dev.c
+@@ -836,7 +836,8 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
+ nsim_dev = nsim_trap_data->nsim_dev;
+
+ if (!devl_trylock(priv_to_devlink(nsim_dev))) {
+- schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw, 1);
++ queue_delayed_work(system_unbound_wq,
++ &nsim_dev->trap_data->trap_report_dw, 1);
+ return;
+ }
+
+@@ -848,11 +849,12 @@ static void nsim_dev_trap_report_work(struct work_struct *work)
+ continue;
+
+ nsim_dev_trap_report(nsim_dev_port);
++ cond_resched();
+ }
+ devl_unlock(priv_to_devlink(nsim_dev));
+-
+- schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
+- msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
++ queue_delayed_work(system_unbound_wq,
++ &nsim_dev->trap_data->trap_report_dw,
++ msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
+ }
+
+ static int nsim_dev_traps_init(struct devlink *devlink)
+@@ -907,8 +909,9 @@ static int nsim_dev_traps_init(struct devlink *devlink)
+
+ INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw,
+ nsim_dev_trap_report_work);
+- schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
+- msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
++ queue_delayed_work(system_unbound_wq,
++ &nsim_dev->trap_data->trap_report_dw,
++ msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
+
+ return 0;
+
+diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
+index 29e1cbea6dc0c3..507726a08f82d1 100644
+--- a/drivers/net/phy/dp83822.c
++++ b/drivers/net/phy/dp83822.c
+@@ -40,8 +40,8 @@
+ /* Control Register 2 bits */
+ #define DP83822_FX_ENABLE BIT(14)
+
+-#define DP83822_HW_RESET BIT(15)
+-#define DP83822_SW_RESET BIT(14)
++#define DP83822_SW_RESET BIT(15)
++#define DP83822_DIG_RESTART BIT(14)
+
+ /* PHY STS bits */
+ #define DP83822_PHYSTS_DUPLEX BIT(2)
+diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
+index 40ce8abe699954..6019811920a441 100644
+--- a/drivers/net/plip/plip.c
++++ b/drivers/net/plip/plip.c
+@@ -815,7 +815,7 @@ plip_send_packet(struct net_device *dev, struct net_local *nl,
+ return HS_TIMEOUT;
+ }
+ }
+- break;
++ fallthrough;
+
+ case PLIP_PK_LENGTH_LSB:
+ if (plip_send(nibble_timeout, dev,
+diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
+index 60c58dd6d25311..9f66c47dc58bc7 100644
+--- a/drivers/net/usb/usbnet.c
++++ b/drivers/net/usb/usbnet.c
+@@ -1771,7 +1771,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ // can rename the link if it knows better.
+ if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
+ ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
+- (net->dev_addr [0] & 0x02) == 0))
++ /* somebody touched it*/
++ !is_zero_ether_addr(net->dev_addr)))
+ strscpy(net->name, "eth%d", sizeof(net->name));
+ /* WLAN devices should always be named "wlan%d" */
+ if ((dev->driver_info->flags & FLAG_WLAN) != 0)
+@@ -1874,6 +1875,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
+ * may trigger an error resubmitting itself and, worse,
+ * schedule a timer. So we kill it all just in case.
+ */
++ usbnet_mark_going_away(dev);
+ cancel_work_sync(&dev->kevent);
+ del_timer_sync(&dev->delay);
+ free_percpu(net->tstats);
+diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
+index a6c787454a1aeb..1341374a4588a0 100644
+--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
++++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
+@@ -148,7 +148,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
+ } else { /* XDP buffer from page pool */
+ page = virt_to_page(xdpf->data);
+ tbi->dma_addr = page_pool_get_dma_addr(page) +
+- VMXNET3_XDP_HEADROOM;
++ (xdpf->data - (void *)xdpf);
+ dma_sync_single_for_device(&adapter->pdev->dev,
+ tbi->dma_addr, buf_size,
+ DMA_TO_DEVICE);
+diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
+index 284ab1f56391aa..45ed3afc443df9 100644
+--- a/drivers/net/wwan/wwan_core.c
++++ b/drivers/net/wwan/wwan_core.c
+@@ -1031,7 +1031,7 @@ static const struct nla_policy wwan_rtnl_policy[IFLA_WWAN_MAX + 1] = {
+
+ static struct rtnl_link_ops wwan_rtnl_link_ops __read_mostly = {
+ .kind = "wwan",
+- .maxtype = __IFLA_WWAN_MAX,
++ .maxtype = IFLA_WWAN_MAX,
+ .alloc = wwan_rtnl_alloc,
+ .validate = wwan_rtnl_validate,
+ .newlink = wwan_rtnl_newlink,
+diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
+index 32b5cc76a0223c..b701969cf1c2aa 100644
+--- a/drivers/nvme/host/pci.c
++++ b/drivers/nvme/host/pci.c
+@@ -2469,17 +2469,29 @@ static unsigned int nvme_pci_nr_maps(struct nvme_dev *dev)
+ return 1;
+ }
+
+-static void nvme_pci_update_nr_queues(struct nvme_dev *dev)
++static bool nvme_pci_update_nr_queues(struct nvme_dev *dev)
+ {
+ if (!dev->ctrl.tagset) {
+ nvme_alloc_io_tag_set(&dev->ctrl, &dev->tagset, &nvme_mq_ops,
+ nvme_pci_nr_maps(dev), sizeof(struct nvme_iod));
+- return;
++ return true;
++ }
++
++ /* Give up if we are racing with nvme_dev_disable() */
++ if (!mutex_trylock(&dev->shutdown_lock))
++ return false;
++
++ /* Check if nvme_dev_disable() has been executed already */
++ if (!dev->online_queues) {
++ mutex_unlock(&dev->shutdown_lock);
++ return false;
+ }
+
+ blk_mq_update_nr_hw_queues(&dev->tagset, dev->online_queues - 1);
+ /* free previously allocated queues that are no longer usable */
+ nvme_free_queues(dev, dev->online_queues);
++ mutex_unlock(&dev->shutdown_lock);
++ return true;
+ }
+
+ static int nvme_pci_enable(struct nvme_dev *dev)
+@@ -2757,10 +2769,11 @@ static void nvme_reset_work(struct work_struct *work)
+ * controller around but remove all namespaces.
+ */
+ if (dev->online_queues > 1) {
++ nvme_dbbuf_set(dev);
+ nvme_unquiesce_io_queues(&dev->ctrl);
+ nvme_wait_freeze(&dev->ctrl);
+- nvme_pci_update_nr_queues(dev);
+- nvme_dbbuf_set(dev);
++ if (!nvme_pci_update_nr_queues(dev))
++ goto out;
+ nvme_unfreeze(&dev->ctrl);
+ } else {
+ dev_warn(dev->ctrl.device, "IO queues lost\n");
+diff --git a/drivers/platform/x86/dell/dell-wmi-base.c b/drivers/platform/x86/dell/dell-wmi-base.c
+index 502783a7adb118..24fd7ffadda952 100644
+--- a/drivers/platform/x86/dell/dell-wmi-base.c
++++ b/drivers/platform/x86/dell/dell-wmi-base.c
+@@ -264,6 +264,15 @@ static const struct key_entry dell_wmi_keymap_type_0010[] = {
+ /*Speaker Mute*/
+ { KE_KEY, 0x109, { KEY_MUTE} },
+
++ /* S2Idle screen off */
++ { KE_IGNORE, 0x120, { KEY_RESERVED }},
++
++ /* Leaving S4 or S2Idle suspend */
++ { KE_IGNORE, 0x130, { KEY_RESERVED }},
++
++ /* Entering S2Idle suspend */
++ { KE_IGNORE, 0x140, { KEY_RESERVED }},
++
+ /* Mic mute */
+ { KE_KEY, 0x150, { KEY_MICMUTE } },
+
+diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+index b929b4f824205e..af49dd6b31ade7 100644
+--- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
++++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c
+@@ -521,6 +521,7 @@ static int __init sysman_init(void)
+ int ret = 0;
+
+ if (!dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Dell System", NULL) &&
++ !dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "Alienware", NULL) &&
+ !dmi_find_device(DMI_DEV_TYPE_OEM_STRING, "www.dell.com", NULL)) {
+ pr_err("Unable to run on non-Dell system\n");
+ return -ENODEV;
+diff --git a/drivers/powercap/dtpm_devfreq.c b/drivers/powercap/dtpm_devfreq.c
+index 612c3b59dd5bef..0ca53db7a90eb3 100644
+--- a/drivers/powercap/dtpm_devfreq.c
++++ b/drivers/powercap/dtpm_devfreq.c
+@@ -166,7 +166,7 @@ static int __dtpm_devfreq_setup(struct devfreq *devfreq, struct dtpm *parent)
+ ret = dev_pm_qos_add_request(dev, &dtpm_devfreq->qos_req,
+ DEV_PM_QOS_MAX_FREQUENCY,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
+- if (ret) {
++ if (ret < 0) {
+ pr_err("Failed to add QoS request: %d\n", ret);
+ goto out_dtpm_unregister;
+ }
+diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
+index 86590a7e29f6ae..dd041ee18ac9be 100644
+--- a/drivers/target/target_core_device.c
++++ b/drivers/target/target_core_device.c
+@@ -692,7 +692,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
+
+ dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
+ if (!dev->queues) {
+- dev->transport->free_device(dev);
++ hba->backend->ops->free_device(dev);
+ return NULL;
+ }
+
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index 22cc6cac0ba2b5..2e100b76914a06 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -2130,7 +2130,7 @@ static int tcmu_netlink_event_send(struct tcmu_dev *udev,
+ }
+
+ ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0,
+- TCMU_MCGRP_CONFIG, GFP_KERNEL);
++ TCMU_MCGRP_CONFIG);
+
+ /* Wait during an add as the listener may not be up yet */
+ if (ret == 0 ||
+diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
+index af851e4e8c8a76..8cbe19574bbcbc 100644
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -2106,6 +2106,11 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+ {
+ u32 reg;
+
++ dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) &
++ DWC3_GUSB2PHYCFG_SUSPHY) ||
++ (dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0)) &
++ DWC3_GUSB3PIPECTL_SUSPHY);
++
+ switch (dwc->current_dr_role) {
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ if (pm_runtime_suspended(dwc->dev))
+@@ -2153,6 +2158,15 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
+ break;
+ }
+
++ if (!PMSG_IS_AUTO(msg)) {
++ /*
++ * TI AM62 platform requires SUSPHY to be
++ * enabled for system suspend to work.
++ */
++ if (!dwc->susphy_state)
++ dwc3_enable_susphy(dwc, true);
++ }
++
+ return 0;
+ }
+
+@@ -2215,6 +2229,11 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
+ break;
+ }
+
++ if (!PMSG_IS_AUTO(msg)) {
++ /* restore SUSPHY state to that before system suspend. */
++ dwc3_enable_susphy(dwc, dwc->susphy_state);
++ }
++
+ return 0;
+ }
+
+diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
+index 420753205fafa1..3325796f3cb451 100644
+--- a/drivers/usb/dwc3/core.h
++++ b/drivers/usb/dwc3/core.h
+@@ -1127,6 +1127,8 @@ struct dwc3_scratchpad_array {
+ * @sys_wakeup: set if the device may do system wakeup.
+ * @wakeup_configured: set if the device is configured for remote wakeup.
+ * @suspended: set to track suspend event due to U3/L2.
++ * @susphy_state: state of DWC3_GUSB2PHYCFG_SUSPHY + DWC3_GUSB3PIPECTL_SUSPHY
++ * before PM suspend.
+ * @imod_interval: set the interrupt moderation interval in 250ns
+ * increments or 0 to disable.
+ * @max_cfg_eps: current max number of IN eps used across all USB configs.
+@@ -1351,6 +1353,7 @@ struct dwc3 {
+ unsigned sys_wakeup:1;
+ unsigned wakeup_configured:1;
+ unsigned suspended:1;
++ unsigned susphy_state:1;
+
+ u16 imod_interval;
+
+diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
+index 0219cd79493a73..b3dc5f5164f42c 100644
+--- a/drivers/usb/gadget/function/f_uac2.c
++++ b/drivers/usb/gadget/function/f_uac2.c
+@@ -2042,7 +2042,7 @@ static ssize_t f_uac2_opts_##name##_show(struct config_item *item, \
+ int result; \
+ \
+ mutex_lock(&opts->lock); \
+- result = snprintf(page, sizeof(opts->name), "%s", opts->name); \
++ result = scnprintf(page, sizeof(opts->name), "%s", opts->name); \
+ mutex_unlock(&opts->lock); \
+ \
+ return result; \
+@@ -2052,7 +2052,7 @@ static ssize_t f_uac2_opts_##name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+ { \
+ struct f_uac2_opts *opts = to_f_uac2_opts(item); \
+- int ret = 0; \
++ int ret = len; \
+ \
+ mutex_lock(&opts->lock); \
+ if (opts->refcnt) { \
+@@ -2060,8 +2060,11 @@ static ssize_t f_uac2_opts_##name##_store(struct config_item *item, \
+ goto end; \
+ } \
+ \
+- ret = snprintf(opts->name, min(sizeof(opts->name), len), \
+- "%s", page); \
++ if (len && page[len - 1] == '\n') \
++ len--; \
++ \
++ scnprintf(opts->name, min(sizeof(opts->name), len + 1), \
++ "%s", page); \
+ \
+ end: \
+ mutex_unlock(&opts->lock); \
+@@ -2178,7 +2181,7 @@ static struct usb_function_instance *afunc_alloc_inst(void)
+ opts->req_number = UAC2_DEF_REQ_NUM;
+ opts->fb_max = FBACK_FAST_MAX;
+
+- snprintf(opts->function_name, sizeof(opts->function_name), "Source/Sink");
++ scnprintf(opts->function_name, sizeof(opts->function_name), "Source/Sink");
+
+ return &opts->func_inst;
+ }
+diff --git a/drivers/usb/host/xhci-caps.h b/drivers/usb/host/xhci-caps.h
+new file mode 100644
+index 00000000000000..9e94cebf4a56d1
+--- /dev/null
++++ b/drivers/usb/host/xhci-caps.h
+@@ -0,0 +1,85 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++/* hc_capbase bitmasks */
++/* bits 7:0 - how long is the Capabilities register */
++#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
++/* bits 31:16 */
++#define HC_VERSION(p) (((p) >> 16) & 0xffff)
++
++/* HCSPARAMS1 - hcs_params1 - bitmasks */
++/* bits 0:7, Max Device Slots */
++#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
++#define HCS_SLOTS_MASK 0xff
++/* bits 8:18, Max Interrupters */
++#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
++/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
++#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
++
++/* HCSPARAMS2 - hcs_params2 - bitmasks */
++/* bits 0:3, frames or uframes that SW needs to queue transactions
++ * ahead of the HW to meet periodic deadlines */
++#define HCS_IST(p) (((p) >> 0) & 0xf)
++/* bits 4:7, max number of Event Ring segments */
++#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
++/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
++/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
++/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
++#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
++
++/* HCSPARAMS3 - hcs_params3 - bitmasks */
++/* bits 0:7, Max U1 to U0 latency for the roothub ports */
++#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
++/* bits 16:31, Max U2 to U0 latency for the roothub ports */
++#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
++
++/* HCCPARAMS - hcc_params - bitmasks */
++/* true: HC can use 64-bit address pointers */
++#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
++/* true: HC can do bandwidth negotiation */
++#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
++/* true: HC uses 64-byte Device Context structures
++ * FIXME 64-byte context structures aren't supported yet.
++ */
++#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
++/* true: HC has port power switches */
++#define HCC_PPC(p) ((p) & (1 << 3))
++/* true: HC has port indicators */
++#define HCS_INDICATOR(p) ((p) & (1 << 4))
++/* true: HC has Light HC Reset Capability */
++#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
++/* true: HC supports latency tolerance messaging */
++#define HCC_LTC(p) ((p) & (1 << 6))
++/* true: no secondary Stream ID Support */
++#define HCC_NSS(p) ((p) & (1 << 7))
++/* true: HC supports Stopped - Short Packet */
++#define HCC_SPC(p) ((p) & (1 << 9))
++/* true: HC has Contiguous Frame ID Capability */
++#define HCC_CFC(p) ((p) & (1 << 11))
++/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
++#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
++/* Extended Capabilities pointer from PCI base - section 5.3.6 */
++#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
++
++#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
++
++/* db_off bitmask - bits 0:1 reserved */
++#define DBOFF_MASK (~0x3)
++
++/* run_regs_off bitmask - bits 0:4 reserved */
++#define RTSOFF_MASK (~0x1f)
++
++/* HCCPARAMS2 - hcc_params2 - bitmasks */
++/* true: HC supports U3 entry Capability */
++#define HCC2_U3C(p) ((p) & (1 << 0))
++/* true: HC supports Configure endpoint command Max exit latency too large */
++#define HCC2_CMC(p) ((p) & (1 << 1))
++/* true: HC supports Force Save context Capability */
++#define HCC2_FSC(p) ((p) & (1 << 2))
++/* true: HC supports Compliance Transition Capability */
++#define HCC2_CTC(p) ((p) & (1 << 3))
++/* true: HC support Large ESIT payload Capability > 48k */
++#define HCC2_LEC(p) ((p) & (1 << 4))
++/* true: HC support Configuration Information Capability */
++#define HCC2_CIC(p) ((p) & (1 << 5))
++/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */
++#define HCC2_ETC(p) ((p) & (1 << 6))
+diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h
+index 51a7ab3ba0cac0..76170d7a7e7c35 100644
+--- a/drivers/usb/host/xhci-dbgcap.h
++++ b/drivers/usb/host/xhci-dbgcap.h
+@@ -108,7 +108,7 @@ struct dbc_port {
+ struct tasklet_struct push;
+
+ struct list_head write_pool;
+- struct kfifo write_fifo;
++ unsigned int tx_boundary;
+
+ bool registered;
+ };
+diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
+index b74e98e9439326..0266c2f5bc0d8e 100644
+--- a/drivers/usb/host/xhci-dbgtty.c
++++ b/drivers/usb/host/xhci-dbgtty.c
+@@ -25,16 +25,26 @@ static inline struct dbc_port *dbc_to_port(struct xhci_dbc *dbc)
+ }
+
+ static unsigned int
+-dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
++dbc_kfifo_to_req(struct dbc_port *port, char *packet)
+ {
+- unsigned int len;
+-
+- len = kfifo_len(&port->write_fifo);
+- if (len < size)
+- size = len;
+- if (size != 0)
+- size = kfifo_out(&port->write_fifo, packet, size);
+- return size;
++ unsigned int len;
++
++ len = kfifo_len(&port->port.xmit_fifo);
++
++ if (len == 0)
++ return 0;
++
++ len = min(len, DBC_MAX_PACKET);
++
++ if (port->tx_boundary)
++ len = min(port->tx_boundary, len);
++
++ len = kfifo_out(&port->port.xmit_fifo, packet, len);
++
++ if (port->tx_boundary)
++ port->tx_boundary -= len;
++
++ return len;
+ }
+
+ static int dbc_start_tx(struct dbc_port *port)
+@@ -49,7 +59,7 @@ static int dbc_start_tx(struct dbc_port *port)
+
+ while (!list_empty(pool)) {
+ req = list_entry(pool->next, struct dbc_request, list_pool);
+- len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
++ len = dbc_kfifo_to_req(port, req->buf);
+ if (len == 0)
+ break;
+ do_tty_wake = true;
+@@ -213,14 +223,32 @@ static ssize_t dbc_tty_write(struct tty_struct *tty, const u8 *buf,
+ {
+ struct dbc_port *port = tty->driver_data;
+ unsigned long flags;
++ unsigned int written = 0;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+- if (count)
+- count = kfifo_in(&port->write_fifo, buf, count);
+- dbc_start_tx(port);
++
++ /*
++ * Treat tty write as one usb transfer. Make sure the writes are turned
++ * into TRB request having the same size boundaries as the tty writes.
++ * Don't add data to kfifo before previous write is turned into TRBs
++ */
++ if (port->tx_boundary) {
++ spin_unlock_irqrestore(&port->port_lock, flags);
++ return 0;
++ }
++
++ if (count) {
++ written = kfifo_in(&port->port.xmit_fifo, buf, count);
++
++ if (written == count)
++ port->tx_boundary = kfifo_len(&port->port.xmit_fifo);
++
++ dbc_start_tx(port);
++ }
++
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+- return count;
++ return written;
+ }
+
+ static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
+@@ -230,7 +258,7 @@ static int dbc_tty_put_char(struct tty_struct *tty, u8 ch)
+ int status;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+- status = kfifo_put(&port->write_fifo, ch);
++ status = kfifo_put(&port->port.xmit_fifo, ch);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return status;
+@@ -253,7 +281,11 @@ static unsigned int dbc_tty_write_room(struct tty_struct *tty)
+ unsigned int room;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+- room = kfifo_avail(&port->write_fifo);
++ room = kfifo_avail(&port->port.xmit_fifo);
++
++ if (port->tx_boundary)
++ room = 0;
++
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return room;
+@@ -266,7 +298,7 @@ static unsigned int dbc_tty_chars_in_buffer(struct tty_struct *tty)
+ unsigned int chars;
+
+ spin_lock_irqsave(&port->port_lock, flags);
+- chars = kfifo_len(&port->write_fifo);
++ chars = kfifo_len(&port->port.xmit_fifo);
+ spin_unlock_irqrestore(&port->port_lock, flags);
+
+ return chars;
+@@ -424,7 +456,8 @@ static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
+ goto err_idr;
+ }
+
+- ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
++ ret = kfifo_alloc(&port->port.xmit_fifo, DBC_WRITE_BUF_SIZE,
++ GFP_KERNEL);
+ if (ret)
+ goto err_exit_port;
+
+@@ -453,7 +486,7 @@ static int xhci_dbc_tty_register_device(struct xhci_dbc *dbc)
+ xhci_dbc_free_requests(&port->read_pool);
+ xhci_dbc_free_requests(&port->write_pool);
+ err_free_fifo:
+- kfifo_free(&port->write_fifo);
++ kfifo_free(&port->port.xmit_fifo);
+ err_exit_port:
+ idr_remove(&dbc_tty_minors, port->minor);
+ err_idr:
+@@ -478,7 +511,7 @@ static void xhci_dbc_tty_unregister_device(struct xhci_dbc *dbc)
+ idr_remove(&dbc_tty_minors, port->minor);
+ mutex_unlock(&dbc_tty_minors_lock);
+
+- kfifo_free(&port->write_fifo);
++ kfifo_free(&port->port.xmit_fifo);
+ xhci_dbc_free_requests(&port->read_pool);
+ xhci_dbc_free_requests(&port->read_queue);
+ xhci_dbc_free_requests(&port->write_pool);
+diff --git a/drivers/usb/host/xhci-port.h b/drivers/usb/host/xhci-port.h
+new file mode 100644
+index 00000000000000..f19efb966d180c
+--- /dev/null
++++ b/drivers/usb/host/xhci-port.h
+@@ -0,0 +1,176 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++
++/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
++/* true: device connected */
++#define PORT_CONNECT (1 << 0)
++/* true: port enabled */
++#define PORT_PE (1 << 1)
++/* bit 2 reserved and zeroed */
++/* true: port has an over-current condition */
++#define PORT_OC (1 << 3)
++/* true: port reset signaling asserted */
++#define PORT_RESET (1 << 4)
++/* Port Link State - bits 5:8
++ * A read gives the current link PM state of the port,
++ * a write with Link State Write Strobe set sets the link state.
++ */
++#define PORT_PLS_MASK (0xf << 5)
++#define XDEV_U0 (0x0 << 5)
++#define XDEV_U1 (0x1 << 5)
++#define XDEV_U2 (0x2 << 5)
++#define XDEV_U3 (0x3 << 5)
++#define XDEV_DISABLED (0x4 << 5)
++#define XDEV_RXDETECT (0x5 << 5)
++#define XDEV_INACTIVE (0x6 << 5)
++#define XDEV_POLLING (0x7 << 5)
++#define XDEV_RECOVERY (0x8 << 5)
++#define XDEV_HOT_RESET (0x9 << 5)
++#define XDEV_COMP_MODE (0xa << 5)
++#define XDEV_TEST_MODE (0xb << 5)
++#define XDEV_RESUME (0xf << 5)
++
++/* true: port has power (see HCC_PPC) */
++#define PORT_POWER (1 << 9)
++/* bits 10:13 indicate device speed:
++ * 0 - undefined speed - port hasn't be initialized by a reset yet
++ * 1 - full speed
++ * 2 - low speed
++ * 3 - high speed
++ * 4 - super speed
++ * 5-15 reserved
++ */
++#define DEV_SPEED_MASK (0xf << 10)
++#define XDEV_FS (0x1 << 10)
++#define XDEV_LS (0x2 << 10)
++#define XDEV_HS (0x3 << 10)
++#define XDEV_SS (0x4 << 10)
++#define XDEV_SSP (0x5 << 10)
++#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
++#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
++#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
++#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
++#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
++#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP)
++#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS)
++#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f)
++
++/* Bits 20:23 in the Slot Context are the speed for the device */
++#define SLOT_SPEED_FS (XDEV_FS << 10)
++#define SLOT_SPEED_LS (XDEV_LS << 10)
++#define SLOT_SPEED_HS (XDEV_HS << 10)
++#define SLOT_SPEED_SS (XDEV_SS << 10)
++#define SLOT_SPEED_SSP (XDEV_SSP << 10)
++/* Port Indicator Control */
++#define PORT_LED_OFF (0 << 14)
++#define PORT_LED_AMBER (1 << 14)
++#define PORT_LED_GREEN (2 << 14)
++#define PORT_LED_MASK (3 << 14)
++/* Port Link State Write Strobe - set this when changing link state */
++#define PORT_LINK_STROBE (1 << 16)
++/* true: connect status change */
++#define PORT_CSC (1 << 17)
++/* true: port enable change */
++#define PORT_PEC (1 << 18)
++/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
++ * into an enabled state, and the device into the default state. A "warm" reset
++ * also resets the link, forcing the device through the link training sequence.
++ * SW can also look at the Port Reset register to see when warm reset is done.
++ */
++#define PORT_WRC (1 << 19)
++/* true: over-current change */
++#define PORT_OCC (1 << 20)
++/* true: reset change - 1 to 0 transition of PORT_RESET */
++#define PORT_RC (1 << 21)
++/* port link status change - set on some port link state transitions:
++ * Transition Reason
++ * ------------------------------------------------------------------------------
++ * - U3 to Resume Wakeup signaling from a device
++ * - Resume to Recovery to U0 USB 3.0 device resume
++ * - Resume to U0 USB 2.0 device resume
++ * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
++ * - U3 to U0 Software resume of USB 2.0 device complete
++ * - U2 to U0 L1 resume of USB 2.1 device complete
++ * - U0 to U0 (???) L1 entry rejection by USB 2.1 device
++ * - U0 to disabled L1 entry error with USB 2.1 device
++ * - Any state to inactive Error on USB 3.0 port
++ */
++#define PORT_PLC (1 << 22)
++/* port configure error change - port failed to configure its link partner */
++#define PORT_CEC (1 << 23)
++#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
++ PORT_RC | PORT_PLC | PORT_CEC)
++
++
++/* Cold Attach Status - xHC can set this bit to report device attached during
++ * Sx state. Warm port reset should be perfomed to clear this bit and move port
++ * to connected state.
++ */
++#define PORT_CAS (1 << 24)
++/* wake on connect (enable) */
++#define PORT_WKCONN_E (1 << 25)
++/* wake on disconnect (enable) */
++#define PORT_WKDISC_E (1 << 26)
++/* wake on over-current (enable) */
++#define PORT_WKOC_E (1 << 27)
++/* bits 28:29 reserved */
++/* true: device is non-removable - for USB 3.0 roothub emulation */
++#define PORT_DEV_REMOVE (1 << 30)
++/* Initiate a warm port reset - complete when PORT_WRC is '1' */
++#define PORT_WR (1 << 31)
++
++/* We mark duplicate entries with -1 */
++#define DUPLICATE_ENTRY ((u8)(-1))
++
++/* Port Power Management Status and Control - port_power_base bitmasks */
++/* Inactivity timer value for transitions into U1, in microseconds.
++ * Timeout can be up to 127us. 0xFF means an infinite timeout.
++ */
++#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
++#define PORT_U1_TIMEOUT_MASK 0xff
++/* Inactivity timer value for transitions into U2 */
++#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
++#define PORT_U2_TIMEOUT_MASK (0xff << 8)
++/* Bits 24:31 for port testing */
++
++/* USB2 Protocol PORTSPMSC */
++#define PORT_L1S_MASK 7
++#define PORT_L1S_SUCCESS 1
++#define PORT_RWE (1 << 3)
++#define PORT_HIRD(p) (((p) & 0xf) << 4)
++#define PORT_HIRD_MASK (0xf << 4)
++#define PORT_L1DS_MASK (0xff << 8)
++#define PORT_L1DS(p) (((p) & 0xff) << 8)
++#define PORT_HLE (1 << 16)
++#define PORT_TEST_MODE_SHIFT 28
++
++/* USB3 Protocol PORTLI Port Link Information */
++#define PORT_RX_LANES(p) (((p) >> 16) & 0xf)
++#define PORT_TX_LANES(p) (((p) >> 20) & 0xf)
++
++/* USB2 Protocol PORTHLPMC */
++#define PORT_HIRDM(p)((p) & 3)
++#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
++#define PORT_BESLD(p)(((p) & 0xf) << 10)
++
++/* use 512 microseconds as USB2 LPM L1 default timeout. */
++#define XHCI_L1_TIMEOUT 512
++
++/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
++ * Safe to use with mixed HIRD and BESL systems (host and device) and is used
++ * by other operating systems.
++ *
++ * XHCI 1.0 errata 8/14/12 Table 13 notes:
++ * "Software should choose xHC BESL/BESLD field values that do not violate a
++ * device's resume latency requirements,
++ * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
++ * or not program values < '4' if BLC = '0' and a BESL device is attached.
++ */
++#define XHCI_DEFAULT_BESL 4
++
++/*
++ * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
++ * to complete link training. usually link trainig completes much faster
++ * so check status 10 times with 36ms sleep in places we need to wait for
++ * polling to complete.
++ */
++#define XHCI_PORT_POLLING_LFPS_TIME 36
+diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
+index 7754ed55d220b3..f2190d121233b2 100644
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -23,6 +23,9 @@
+ #include "xhci-ext-caps.h"
+ #include "pci-quirks.h"
+
++#include "xhci-port.h"
++#include "xhci-caps.h"
++
+ /* max buffer size for trace and debug messages */
+ #define XHCI_MSG_MAX 500
+
+@@ -63,90 +66,6 @@ struct xhci_cap_regs {
+ /* Reserved up to (CAPLENGTH - 0x1C) */
+ };
+
+-/* hc_capbase bitmasks */
+-/* bits 7:0 - how long is the Capabilities register */
+-#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
+-/* bits 31:16 */
+-#define HC_VERSION(p) (((p) >> 16) & 0xffff)
+-
+-/* HCSPARAMS1 - hcs_params1 - bitmasks */
+-/* bits 0:7, Max Device Slots */
+-#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
+-#define HCS_SLOTS_MASK 0xff
+-/* bits 8:18, Max Interrupters */
+-#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
+-/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
+-#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
+-
+-/* HCSPARAMS2 - hcs_params2 - bitmasks */
+-/* bits 0:3, frames or uframes that SW needs to queue transactions
+- * ahead of the HW to meet periodic deadlines */
+-#define HCS_IST(p) (((p) >> 0) & 0xf)
+-/* bits 4:7, max number of Event Ring segments */
+-#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
+-/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
+-/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
+-/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
+-#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
+-
+-/* HCSPARAMS3 - hcs_params3 - bitmasks */
+-/* bits 0:7, Max U1 to U0 latency for the roothub ports */
+-#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
+-/* bits 16:31, Max U2 to U0 latency for the roothub ports */
+-#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
+-
+-/* HCCPARAMS - hcc_params - bitmasks */
+-/* true: HC can use 64-bit address pointers */
+-#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
+-/* true: HC can do bandwidth negotiation */
+-#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
+-/* true: HC uses 64-byte Device Context structures
+- * FIXME 64-byte context structures aren't supported yet.
+- */
+-#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
+-/* true: HC has port power switches */
+-#define HCC_PPC(p) ((p) & (1 << 3))
+-/* true: HC has port indicators */
+-#define HCS_INDICATOR(p) ((p) & (1 << 4))
+-/* true: HC has Light HC Reset Capability */
+-#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
+-/* true: HC supports latency tolerance messaging */
+-#define HCC_LTC(p) ((p) & (1 << 6))
+-/* true: no secondary Stream ID Support */
+-#define HCC_NSS(p) ((p) & (1 << 7))
+-/* true: HC supports Stopped - Short Packet */
+-#define HCC_SPC(p) ((p) & (1 << 9))
+-/* true: HC has Contiguous Frame ID Capability */
+-#define HCC_CFC(p) ((p) & (1 << 11))
+-/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
+-#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
+-/* Extended Capabilities pointer from PCI base - section 5.3.6 */
+-#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
+-
+-#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
+-
+-/* db_off bitmask - bits 0:1 reserved */
+-#define DBOFF_MASK (~0x3)
+-
+-/* run_regs_off bitmask - bits 0:4 reserved */
+-#define RTSOFF_MASK (~0x1f)
+-
+-/* HCCPARAMS2 - hcc_params2 - bitmasks */
+-/* true: HC supports U3 entry Capability */
+-#define HCC2_U3C(p) ((p) & (1 << 0))
+-/* true: HC supports Configure endpoint command Max exit latency too large */
+-#define HCC2_CMC(p) ((p) & (1 << 1))
+-/* true: HC supports Force Save context Capability */
+-#define HCC2_FSC(p) ((p) & (1 << 2))
+-/* true: HC supports Compliance Transition Capability */
+-#define HCC2_CTC(p) ((p) & (1 << 3))
+-/* true: HC support Large ESIT payload Capability > 48k */
+-#define HCC2_LEC(p) ((p) & (1 << 4))
+-/* true: HC support Configuration Information Capability */
+-#define HCC2_CIC(p) ((p) & (1 << 5))
+-/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */
+-#define HCC2_ETC(p) ((p) & (1 << 6))
+-
+ /* Number of registers per port */
+ #define NUM_PORT_REGS 4
+
+@@ -292,181 +211,6 @@ struct xhci_op_regs {
+ #define CONFIG_CIE (1 << 9)
+ /* bits 10:31 - reserved and should be preserved */
+
+-/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
+-/* true: device connected */
+-#define PORT_CONNECT (1 << 0)
+-/* true: port enabled */
+-#define PORT_PE (1 << 1)
+-/* bit 2 reserved and zeroed */
+-/* true: port has an over-current condition */
+-#define PORT_OC (1 << 3)
+-/* true: port reset signaling asserted */
+-#define PORT_RESET (1 << 4)
+-/* Port Link State - bits 5:8
+- * A read gives the current link PM state of the port,
+- * a write with Link State Write Strobe set sets the link state.
+- */
+-#define PORT_PLS_MASK (0xf << 5)
+-#define XDEV_U0 (0x0 << 5)
+-#define XDEV_U1 (0x1 << 5)
+-#define XDEV_U2 (0x2 << 5)
+-#define XDEV_U3 (0x3 << 5)
+-#define XDEV_DISABLED (0x4 << 5)
+-#define XDEV_RXDETECT (0x5 << 5)
+-#define XDEV_INACTIVE (0x6 << 5)
+-#define XDEV_POLLING (0x7 << 5)
+-#define XDEV_RECOVERY (0x8 << 5)
+-#define XDEV_HOT_RESET (0x9 << 5)
+-#define XDEV_COMP_MODE (0xa << 5)
+-#define XDEV_TEST_MODE (0xb << 5)
+-#define XDEV_RESUME (0xf << 5)
+-
+-/* true: port has power (see HCC_PPC) */
+-#define PORT_POWER (1 << 9)
+-/* bits 10:13 indicate device speed:
+- * 0 - undefined speed - port hasn't be initialized by a reset yet
+- * 1 - full speed
+- * 2 - low speed
+- * 3 - high speed
+- * 4 - super speed
+- * 5-15 reserved
+- */
+-#define DEV_SPEED_MASK (0xf << 10)
+-#define XDEV_FS (0x1 << 10)
+-#define XDEV_LS (0x2 << 10)
+-#define XDEV_HS (0x3 << 10)
+-#define XDEV_SS (0x4 << 10)
+-#define XDEV_SSP (0x5 << 10)
+-#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
+-#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
+-#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
+-#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
+-#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
+-#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP)
+-#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS)
+-#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f)
+-
+-/* Bits 20:23 in the Slot Context are the speed for the device */
+-#define SLOT_SPEED_FS (XDEV_FS << 10)
+-#define SLOT_SPEED_LS (XDEV_LS << 10)
+-#define SLOT_SPEED_HS (XDEV_HS << 10)
+-#define SLOT_SPEED_SS (XDEV_SS << 10)
+-#define SLOT_SPEED_SSP (XDEV_SSP << 10)
+-/* Port Indicator Control */
+-#define PORT_LED_OFF (0 << 14)
+-#define PORT_LED_AMBER (1 << 14)
+-#define PORT_LED_GREEN (2 << 14)
+-#define PORT_LED_MASK (3 << 14)
+-/* Port Link State Write Strobe - set this when changing link state */
+-#define PORT_LINK_STROBE (1 << 16)
+-/* true: connect status change */
+-#define PORT_CSC (1 << 17)
+-/* true: port enable change */
+-#define PORT_PEC (1 << 18)
+-/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
+- * into an enabled state, and the device into the default state. A "warm" reset
+- * also resets the link, forcing the device through the link training sequence.
+- * SW can also look at the Port Reset register to see when warm reset is done.
+- */
+-#define PORT_WRC (1 << 19)
+-/* true: over-current change */
+-#define PORT_OCC (1 << 20)
+-/* true: reset change - 1 to 0 transition of PORT_RESET */
+-#define PORT_RC (1 << 21)
+-/* port link status change - set on some port link state transitions:
+- * Transition Reason
+- * ------------------------------------------------------------------------------
+- * - U3 to Resume Wakeup signaling from a device
+- * - Resume to Recovery to U0 USB 3.0 device resume
+- * - Resume to U0 USB 2.0 device resume
+- * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
+- * - U3 to U0 Software resume of USB 2.0 device complete
+- * - U2 to U0 L1 resume of USB 2.1 device complete
+- * - U0 to U0 (???) L1 entry rejection by USB 2.1 device
+- * - U0 to disabled L1 entry error with USB 2.1 device
+- * - Any state to inactive Error on USB 3.0 port
+- */
+-#define PORT_PLC (1 << 22)
+-/* port configure error change - port failed to configure its link partner */
+-#define PORT_CEC (1 << 23)
+-#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
+- PORT_RC | PORT_PLC | PORT_CEC)
+-
+-
+-/* Cold Attach Status - xHC can set this bit to report device attached during
+- * Sx state. Warm port reset should be perfomed to clear this bit and move port
+- * to connected state.
+- */
+-#define PORT_CAS (1 << 24)
+-/* wake on connect (enable) */
+-#define PORT_WKCONN_E (1 << 25)
+-/* wake on disconnect (enable) */
+-#define PORT_WKDISC_E (1 << 26)
+-/* wake on over-current (enable) */
+-#define PORT_WKOC_E (1 << 27)
+-/* bits 28:29 reserved */
+-/* true: device is non-removable - for USB 3.0 roothub emulation */
+-#define PORT_DEV_REMOVE (1 << 30)
+-/* Initiate a warm port reset - complete when PORT_WRC is '1' */
+-#define PORT_WR (1 << 31)
+-
+-/* We mark duplicate entries with -1 */
+-#define DUPLICATE_ENTRY ((u8)(-1))
+-
+-/* Port Power Management Status and Control - port_power_base bitmasks */
+-/* Inactivity timer value for transitions into U1, in microseconds.
+- * Timeout can be up to 127us. 0xFF means an infinite timeout.
+- */
+-#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
+-#define PORT_U1_TIMEOUT_MASK 0xff
+-/* Inactivity timer value for transitions into U2 */
+-#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
+-#define PORT_U2_TIMEOUT_MASK (0xff << 8)
+-/* Bits 24:31 for port testing */
+-
+-/* USB2 Protocol PORTSPMSC */
+-#define PORT_L1S_MASK 7
+-#define PORT_L1S_SUCCESS 1
+-#define PORT_RWE (1 << 3)
+-#define PORT_HIRD(p) (((p) & 0xf) << 4)
+-#define PORT_HIRD_MASK (0xf << 4)
+-#define PORT_L1DS_MASK (0xff << 8)
+-#define PORT_L1DS(p) (((p) & 0xff) << 8)
+-#define PORT_HLE (1 << 16)
+-#define PORT_TEST_MODE_SHIFT 28
+-
+-/* USB3 Protocol PORTLI Port Link Information */
+-#define PORT_RX_LANES(p) (((p) >> 16) & 0xf)
+-#define PORT_TX_LANES(p) (((p) >> 20) & 0xf)
+-
+-/* USB2 Protocol PORTHLPMC */
+-#define PORT_HIRDM(p)((p) & 3)
+-#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
+-#define PORT_BESLD(p)(((p) & 0xf) << 10)
+-
+-/* use 512 microseconds as USB2 LPM L1 default timeout. */
+-#define XHCI_L1_TIMEOUT 512
+-
+-/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
+- * Safe to use with mixed HIRD and BESL systems (host and device) and is used
+- * by other operating systems.
+- *
+- * XHCI 1.0 errata 8/14/12 Table 13 notes:
+- * "Software should choose xHC BESL/BESLD field values that do not violate a
+- * device's resume latency requirements,
+- * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
+- * or not program values < '4' if BLC = '0' and a BESL device is attached.
+- */
+-#define XHCI_DEFAULT_BESL 4
+-
+-/*
+- * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
+- * to complete link training. usually link trainig completes much faster
+- * so check status 10 times with 36ms sleep in places we need to wait for
+- * polling to complete.
+- */
+-#define XHCI_PORT_POLLING_LFPS_TIME 36
+-
+ /**
+ * struct xhci_intr_reg - Interrupt Register Set
+ * @irq_pending: IMAN - Interrupt Management Register. Used to enable
+diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
+index f92fc2acfcba04..79cad8d61dacdd 100644
+--- a/drivers/usb/typec/class.c
++++ b/drivers/usb/typec/class.c
+@@ -502,6 +502,7 @@ static void typec_altmode_release(struct device *dev)
+ typec_altmode_put_partner(alt);
+
+ altmode_id_remove(alt->adev.dev.parent, alt->id);
++ put_device(alt->adev.dev.parent);
+ kfree(alt);
+ }
+
+@@ -551,6 +552,8 @@ typec_register_altmode(struct device *parent,
+ alt->adev.dev.type = &typec_altmode_dev_type;
+ dev_set_name(&alt->adev.dev, "%s.%u", dev_name(parent), id);
+
++ get_device(alt->adev.dev.parent);
++
+ /* Link partners and plugs with the ports */
+ if (!is_port)
+ typec_altmode_set_partner(alt);
+diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
+index 4e999e1c14075d..434cf3d5f4cf18 100644
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -3794,6 +3794,8 @@ void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
+ spin_lock(&cache->lock);
+ if (cache->ro)
+ space_info->bytes_readonly += num_bytes;
++ else if (btrfs_is_zoned(cache->fs_info))
++ space_info->bytes_zone_unusable += num_bytes;
+ cache->reserved -= num_bytes;
+ space_info->bytes_reserved -= num_bytes;
+ space_info->max_extent_size = 0;
+diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
+index 9c07d5c3e5ad29..7066414be6ee8a 100644
+--- a/fs/btrfs/dir-item.c
++++ b/fs/btrfs/dir-item.c
+@@ -347,8 +347,8 @@ btrfs_search_dir_index_item(struct btrfs_root *root, struct btrfs_path *path,
+ return di;
+ }
+ /* Adjust return code if the key was not found in the next leaf. */
+- if (ret > 0)
+- ret = 0;
++ if (ret >= 0)
++ ret = -ENOENT;
+
+ return ERR_PTR(ret);
+ }
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index ee04185d8e0f58..ea19ea75674d2f 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4293,11 +4293,8 @@ static int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
+ */
+ if (btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) {
+ di = btrfs_search_dir_index_item(root, path, dir_ino, &fname.disk_name);
+- if (IS_ERR_OR_NULL(di)) {
+- if (!di)
+- ret = -ENOENT;
+- else
+- ret = PTR_ERR(di);
++ if (IS_ERR(di)) {
++ ret = PTR_ERR(di);
+ btrfs_abort_transaction(trans, ret);
+ goto out;
+ }
+diff --git a/fs/exec.c b/fs/exec.c
+index f49b352a60323a..7776209d98c10b 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -143,13 +143,11 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
+ goto out;
+
+ /*
+- * may_open() has already checked for this, so it should be
+- * impossible to trip now. But we need to be extra cautious
+- * and check again at the very end too.
++ * Check do_open_execat() for an explanation.
+ */
+ error = -EACCES;
+- if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
+- path_noexec(&file->f_path)))
++ if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)) ||
++ path_noexec(&file->f_path))
+ goto exit;
+
+ error = -ENOEXEC;
+@@ -925,23 +923,22 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags)
+
+ file = do_filp_open(fd, name, &open_exec_flags);
+ if (IS_ERR(file))
+- goto out;
++ return file;
+
+ /*
+- * may_open() has already checked for this, so it should be
+- * impossible to trip now. But we need to be extra cautious
+- * and check again at the very end too.
++ * In the past the regular type check was here. It moved to may_open() in
++ * 633fb6ac3980 ("exec: move S_ISREG() check earlier"). Since then it is
++ * an invariant that all non-regular files error out before we get here.
+ */
+ err = -EACCES;
+- if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
+- path_noexec(&file->f_path)))
++ if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode)) ||
++ path_noexec(&file->f_path))
+ goto exit;
+
+ err = deny_write_access(file);
+ if (err)
+ goto exit;
+
+-out:
+ return file;
+
+ exit:
+diff --git a/fs/jfs/jfs_dmap.c b/fs/jfs/jfs_dmap.c
+index 974ecf5e0d9522..3ab410059dc202 100644
+--- a/fs/jfs/jfs_dmap.c
++++ b/fs/jfs/jfs_dmap.c
+@@ -187,7 +187,7 @@ int dbMount(struct inode *ipbmap)
+ }
+
+ bmp->db_numag = le32_to_cpu(dbmp_le->dn_numag);
+- if (!bmp->db_numag || bmp->db_numag >= MAXAG) {
++ if (!bmp->db_numag || bmp->db_numag > MAXAG) {
+ err = -EINVAL;
+ goto err_release_metapage;
+ }
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index f16bbbfcf672c8..975dd74a7a4db4 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -8254,7 +8254,7 @@ nfs4_state_shutdown_net(struct net *net)
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ unregister_shrinker(&nn->nfsd_client_shrinker);
+- cancel_work(&nn->nfsd_shrinker_work);
++ cancel_work_sync(&nn->nfsd_shrinker_work);
+ cancel_delayed_work_sync(&nn->laundromat_work);
+ locks_end_grace(&nn->nfsd4_manager);
+
+diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
+index b4e54d079b7d03..36d29c183bb7fa 100644
+--- a/fs/nilfs2/page.c
++++ b/fs/nilfs2/page.c
+@@ -77,7 +77,8 @@ void nilfs_forget_buffer(struct buffer_head *bh)
+ const unsigned long clear_bits =
+ (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
+ BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
+- BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
++ BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) |
++ BIT(BH_Delay));
+
+ lock_buffer(bh);
+ set_mask_bits(&bh->b_state, clear_bits, 0);
+@@ -410,7 +411,8 @@ void nilfs_clear_dirty_page(struct page *page, bool silent)
+ const unsigned long clear_bits =
+ (BIT(BH_Uptodate) | BIT(BH_Dirty) | BIT(BH_Mapped) |
+ BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
+- BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected));
++ BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) |
++ BIT(BH_Delay));
+
+ bh = head = page_buffers(page);
+ do {
+diff --git a/fs/open.c b/fs/open.c
+index 59db720693f9a0..f9ac703ec1b2d3 100644
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -1461,6 +1461,8 @@ SYSCALL_DEFINE4(openat2, int, dfd, const char __user *, filename,
+
+ if (unlikely(usize < OPEN_HOW_SIZE_VER0))
+ return -EINVAL;
++ if (unlikely(usize > PAGE_SIZE))
++ return -E2BIG;
+
+ err = copy_struct_from_user(&tmp, sizeof(tmp), how, usize);
+ if (err)
+diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
+index 3bbac925d0766b..8d7484400fe8e1 100644
+--- a/fs/smb/client/fs_context.c
++++ b/fs/smb/client/fs_context.c
+@@ -918,8 +918,15 @@ static int smb3_reconfigure(struct fs_context *fc)
+ else {
+ kfree_sensitive(ses->password);
+ ses->password = kstrdup(ctx->password, GFP_KERNEL);
++ if (!ses->password)
++ return -ENOMEM;
+ kfree_sensitive(ses->password2);
+ ses->password2 = kstrdup(ctx->password2, GFP_KERNEL);
++ if (!ses->password2) {
++ kfree_sensitive(ses->password);
++ ses->password = NULL;
++ return -ENOMEM;
++ }
+ }
+ STEAL_STRING(cifs_sb, ctx, domainname);
+ STEAL_STRING(cifs_sb, ctx, nodename);
+diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
+index ad0e0de9a165d4..7429b96a6ae5ef 100644
+--- a/fs/smb/client/reparse.c
++++ b/fs/smb/client/reparse.c
+@@ -330,6 +330,18 @@ static int parse_reparse_posix(struct reparse_posix_data *buf,
+
+ switch ((type = le64_to_cpu(buf->InodeType))) {
+ case NFS_SPECFILE_LNK:
++ if (len == 0 || (len % 2)) {
++ cifs_dbg(VFS, "srv returned malformed nfs symlink buffer\n");
++ return -EIO;
++ }
++ /*
++ * Check that buffer does not contain UTF-16 null codepoint
++ * because Linux cannot process symlink with null byte.
++ */
++ if (UniStrnlen((wchar_t *)buf->DataBuffer, len/2) != len/2) {
++ cifs_dbg(VFS, "srv returned null byte in nfs symlink target location\n");
++ return -EIO;
++ }
+ data->symlink_target = cifs_strndup_from_utf16(buf->DataBuffer,
+ len, true,
+ cifs_sb->local_nls);
+@@ -340,8 +352,19 @@ static int parse_reparse_posix(struct reparse_posix_data *buf,
+ break;
+ case NFS_SPECFILE_CHR:
+ case NFS_SPECFILE_BLK:
++ /* DataBuffer for block and char devices contains two 32-bit numbers */
++ if (len != 8) {
++ cifs_dbg(VFS, "srv returned malformed nfs buffer for type: 0x%llx\n", type);
++ return -EIO;
++ }
++ break;
+ case NFS_SPECFILE_FIFO:
+ case NFS_SPECFILE_SOCK:
++ /* DataBuffer for fifos and sockets is empty */
++ if (len != 0) {
++ cifs_dbg(VFS, "srv returned malformed nfs buffer for type: 0x%llx\n", type);
++ return -EIO;
++ }
+ break;
+ default:
+ cifs_dbg(VFS, "%s: unhandled inode type: 0x%llx\n",
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 450e3050324c6c..ab6e79be2c15dd 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -1122,7 +1122,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_fid fid;
+ unsigned int size[1];
+ void *data[1];
+- struct smb2_file_full_ea_info *ea = NULL;
++ struct smb2_file_full_ea_info *ea;
+ struct smb2_query_info_rsp *rsp;
+ int rc, used_len = 0;
+ int retries = 0, cur_sleep = 1;
+@@ -1143,6 +1143,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
+ if (!utf16_path)
+ return -ENOMEM;
+
++ ea = NULL;
+ resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER;
+ vars = kzalloc(sizeof(*vars), GFP_KERNEL);
+ if (!vars) {
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 83a03201bb8628..a86a3fbfb5a49c 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -3300,6 +3300,15 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
+ return rc;
+
+ if (indatalen) {
++ unsigned int len;
++
++ if (WARN_ON_ONCE(smb3_encryption_required(tcon) &&
++ (check_add_overflow(total_len - 1,
++ ALIGN(indatalen, 8), &len) ||
++ len > MAX_CIFS_SMALL_BUFFER_SIZE))) {
++ cifs_small_buf_release(req);
++ return -EIO;
++ }
+ /*
+ * indatalen is usually small at a couple of bytes max, so
+ * just allocate through generic pool
+diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
+index bb471ec3640467..f5de2030e769a4 100644
+--- a/fs/udf/balloc.c
++++ b/fs/udf/balloc.c
+@@ -387,6 +387,7 @@ static void udf_table_free_blocks(struct super_block *sb,
+ struct extent_position oepos, epos;
+ int8_t etype;
+ struct udf_inode_info *iinfo;
++ int ret = 0;
+
+ mutex_lock(&sbi->s_alloc_mutex);
+ iinfo = UDF_I(table);
+@@ -400,8 +401,12 @@ static void udf_table_free_blocks(struct super_block *sb,
+ epos.block = oepos.block = iinfo->i_location;
+ epos.bh = oepos.bh = NULL;
+
+- while (count &&
+- (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
++ while (count) {
++ ret = udf_next_aext(table, &epos, &eloc, &elen, &etype, 1);
++ if (ret < 0)
++ goto error_return;
++ if (ret == 0)
++ break;
+ if (((eloc.logicalBlockNum +
+ (elen >> sb->s_blocksize_bits)) == start)) {
+ if ((0x3FFFFFFF - elen) <
+@@ -476,11 +481,8 @@ static void udf_table_free_blocks(struct super_block *sb,
+ adsize = sizeof(struct short_ad);
+ else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
+ adsize = sizeof(struct long_ad);
+- else {
+- brelse(oepos.bh);
+- brelse(epos.bh);
++ else
+ goto error_return;
+- }
+
+ if (epos.offset + (2 * adsize) > sb->s_blocksize) {
+ /* Steal a block from the extent being free'd */
+@@ -496,10 +498,10 @@ static void udf_table_free_blocks(struct super_block *sb,
+ __udf_add_aext(table, &epos, &eloc, elen, 1);
+ }
+
++error_return:
+ brelse(epos.bh);
+ brelse(oepos.bh);
+
+-error_return:
+ mutex_unlock(&sbi->s_alloc_mutex);
+ return;
+ }
+@@ -515,6 +517,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
+ struct extent_position epos;
+ int8_t etype = -1;
+ struct udf_inode_info *iinfo;
++ int ret = 0;
+
+ if (first_block >= sbi->s_partmaps[partition].s_partition_len)
+ return 0;
+@@ -533,11 +536,14 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
+ epos.bh = NULL;
+ eloc.logicalBlockNum = 0xFFFFFFFF;
+
+- while (first_block != eloc.logicalBlockNum &&
+- (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
++ while (first_block != eloc.logicalBlockNum) {
++ ret = udf_next_aext(table, &epos, &eloc, &elen, &etype, 1);
++ if (ret < 0)
++ goto err_out;
++ if (ret == 0)
++ break;
+ udf_debug("eloc=%u, elen=%u, first_block=%u\n",
+ eloc.logicalBlockNum, elen, first_block);
+- ; /* empty loop body */
+ }
+
+ if (first_block == eloc.logicalBlockNum) {
+@@ -556,6 +562,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
+ alloc_count = 0;
+ }
+
++err_out:
+ brelse(epos.bh);
+
+ if (alloc_count)
+@@ -577,6 +584,7 @@ static udf_pblk_t udf_table_new_block(struct super_block *sb,
+ struct extent_position epos, goal_epos;
+ int8_t etype;
+ struct udf_inode_info *iinfo = UDF_I(table);
++ int ret = 0;
+
+ *err = -ENOSPC;
+
+@@ -600,8 +608,10 @@ static udf_pblk_t udf_table_new_block(struct super_block *sb,
+ epos.block = iinfo->i_location;
+ epos.bh = goal_epos.bh = NULL;
+
+- while (spread &&
+- (etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1) {
++ while (spread) {
++ ret = udf_next_aext(table, &epos, &eloc, &elen, &etype, 1);
++ if (ret <= 0)
++ break;
+ if (goal >= eloc.logicalBlockNum) {
+ if (goal < eloc.logicalBlockNum +
+ (elen >> sb->s_blocksize_bits))
+@@ -629,9 +639,11 @@ static udf_pblk_t udf_table_new_block(struct super_block *sb,
+
+ brelse(epos.bh);
+
+- if (spread == 0xFFFFFFFF) {
++ if (ret < 0 || spread == 0xFFFFFFFF) {
+ brelse(goal_epos.bh);
+ mutex_unlock(&sbi->s_alloc_mutex);
++ if (ret < 0)
++ *err = ret;
+ return 0;
+ }
+
+diff --git a/fs/udf/directory.c b/fs/udf/directory.c
+index 93153665eb3747..632453aa38934a 100644
+--- a/fs/udf/directory.c
++++ b/fs/udf/directory.c
+@@ -166,13 +166,19 @@ static struct buffer_head *udf_fiiter_bread_blk(struct udf_fileident_iter *iter)
+ */
+ static int udf_fiiter_advance_blk(struct udf_fileident_iter *iter)
+ {
++ int8_t etype = -1;
++ int err = 0;
++
+ iter->loffset++;
+ if (iter->loffset < DIV_ROUND_UP(iter->elen, 1<<iter->dir->i_blkbits))
+ return 0;
+
+ iter->loffset = 0;
+- if (udf_next_aext(iter->dir, &iter->epos, &iter->eloc, &iter->elen, 1)
+- != (EXT_RECORDED_ALLOCATED >> 30)) {
++ err = udf_next_aext(iter->dir, &iter->epos, &iter->eloc,
++ &iter->elen, &etype, 1);
++ if (err < 0)
++ return err;
++ else if (err == 0 || etype != (EXT_RECORDED_ALLOCATED >> 30)) {
+ if (iter->pos == iter->dir->i_size) {
+ iter->elen = 0;
+ return 0;
+@@ -240,6 +246,7 @@ int udf_fiiter_init(struct udf_fileident_iter *iter, struct inode *dir,
+ {
+ struct udf_inode_info *iinfo = UDF_I(dir);
+ int err = 0;
++ int8_t etype;
+
+ iter->dir = dir;
+ iter->bh[0] = iter->bh[1] = NULL;
+@@ -259,9 +266,9 @@ int udf_fiiter_init(struct udf_fileident_iter *iter, struct inode *dir,
+ goto out;
+ }
+
+- if (inode_bmap(dir, iter->pos >> dir->i_blkbits, &iter->epos,
+- &iter->eloc, &iter->elen, &iter->loffset) !=
+- (EXT_RECORDED_ALLOCATED >> 30)) {
++ err = inode_bmap(dir, iter->pos >> dir->i_blkbits, &iter->epos,
++ &iter->eloc, &iter->elen, &iter->loffset, &etype);
++ if (err <= 0 || etype != (EXT_RECORDED_ALLOCATED >> 30)) {
+ if (pos == dir->i_size)
+ return 0;
+ udf_err(dir->i_sb,
+@@ -457,6 +464,7 @@ int udf_fiiter_append_blk(struct udf_fileident_iter *iter)
+ sector_t block;
+ uint32_t old_elen = iter->elen;
+ int err;
++ int8_t etype;
+
+ if (WARN_ON_ONCE(iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB))
+ return -EINVAL;
+@@ -471,8 +479,9 @@ int udf_fiiter_append_blk(struct udf_fileident_iter *iter)
+ udf_fiiter_update_elen(iter, old_elen);
+ return err;
+ }
+- if (inode_bmap(iter->dir, block, &iter->epos, &iter->eloc, &iter->elen,
+- &iter->loffset) != (EXT_RECORDED_ALLOCATED >> 30)) {
++ err = inode_bmap(iter->dir, block, &iter->epos, &iter->eloc, &iter->elen,
++ &iter->loffset, &etype);
++ if (err <= 0 || etype != (EXT_RECORDED_ALLOCATED >> 30)) {
+ udf_err(iter->dir->i_sb,
+ "block %llu not allocated in directory (ino %lu)\n",
+ (unsigned long long)block, iter->dir->i_ino);
+diff --git a/fs/udf/inode.c b/fs/udf/inode.c
+index 8db07d1f56bc94..e98c198f85b964 100644
+--- a/fs/udf/inode.c
++++ b/fs/udf/inode.c
+@@ -408,7 +408,7 @@ struct udf_map_rq {
+
+ static int udf_map_block(struct inode *inode, struct udf_map_rq *map)
+ {
+- int err;
++ int ret;
+ struct udf_inode_info *iinfo = UDF_I(inode);
+
+ if (WARN_ON_ONCE(iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB))
+@@ -420,18 +420,24 @@ static int udf_map_block(struct inode *inode, struct udf_map_rq *map)
+ uint32_t elen;
+ sector_t offset;
+ struct extent_position epos = {};
++ int8_t etype;
+
+ down_read(&iinfo->i_data_sem);
+- if (inode_bmap(inode, map->lblk, &epos, &eloc, &elen, &offset)
+- == (EXT_RECORDED_ALLOCATED >> 30)) {
++ ret = inode_bmap(inode, map->lblk, &epos, &eloc, &elen, &offset,
++ &etype);
++ if (ret < 0)
++ goto out_read;
++ if (ret > 0 && etype == (EXT_RECORDED_ALLOCATED >> 30)) {
+ map->pblk = udf_get_lb_pblock(inode->i_sb, &eloc,
+ offset);
+ map->oflags |= UDF_BLK_MAPPED;
++ ret = 0;
+ }
++out_read:
+ up_read(&iinfo->i_data_sem);
+ brelse(epos.bh);
+
+- return 0;
++ return ret;
+ }
+
+ down_write(&iinfo->i_data_sem);
+@@ -442,9 +448,9 @@ static int udf_map_block(struct inode *inode, struct udf_map_rq *map)
+ if (((loff_t)map->lblk) << inode->i_blkbits >= iinfo->i_lenExtents)
+ udf_discard_prealloc(inode);
+ udf_clear_extent_cache(inode);
+- err = inode_getblk(inode, map);
++ ret = inode_getblk(inode, map);
+ up_write(&iinfo->i_data_sem);
+- return err;
++ return ret;
+ }
+
+ static int __udf_get_block(struct inode *inode, sector_t block,
+@@ -547,6 +553,7 @@ static int udf_do_extend_file(struct inode *inode,
+ } else {
+ struct kernel_lb_addr tmploc;
+ uint32_t tmplen;
++ int8_t tmptype;
+
+ udf_write_aext(inode, last_pos, &last_ext->extLocation,
+ last_ext->extLength, 1);
+@@ -556,8 +563,12 @@ static int udf_do_extend_file(struct inode *inode,
+ * more extents, we may need to enter possible following
+ * empty indirect extent.
+ */
+- if (new_block_bytes)
+- udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
++ if (new_block_bytes) {
++ err = udf_next_aext(inode, last_pos, &tmploc, &tmplen,
++ &tmptype, 0);
++ if (err < 0)
++ goto out_err;
++ }
+ }
+ iinfo->i_lenExtents += add;
+
+@@ -661,8 +672,10 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ */
+ udf_discard_prealloc(inode);
+
+- etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
+- within_last_ext = (etype != -1);
++ err = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset, &etype);
++ if (err < 0)
++ goto out;
++ within_last_ext = (err == 1);
+ /* We don't expect extents past EOF... */
+ WARN_ON_ONCE(within_last_ext &&
+ elen > ((loff_t)offset + 1) << inode->i_blkbits);
+@@ -676,8 +689,10 @@ static int udf_extend_file(struct inode *inode, loff_t newsize)
+ extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
+ } else {
+ epos.offset -= adsize;
+- etype = udf_next_aext(inode, &epos, &extent.extLocation,
+- &extent.extLength, 0);
++ err = udf_next_aext(inode, &epos, &extent.extLocation,
++ &extent.extLength, &etype, 0);
++ if (err <= 0)
++ goto out;
+ extent.extLength |= etype << 30;
+ }
+
+@@ -714,11 +729,11 @@ static int inode_getblk(struct inode *inode, struct udf_map_rq *map)
+ loff_t lbcount = 0, b_off = 0;
+ udf_pblk_t newblocknum;
+ sector_t offset = 0;
+- int8_t etype;
++ int8_t etype, tmpetype;
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ udf_pblk_t goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
+ int lastblock = 0;
+- bool isBeyondEOF;
++ bool isBeyondEOF = false;
+ int ret = 0;
+
+ prev_epos.offset = udf_file_entry_alloc_offset(inode);
+@@ -750,9 +765,13 @@ static int inode_getblk(struct inode *inode, struct udf_map_rq *map)
+ prev_epos.offset = cur_epos.offset;
+ cur_epos.offset = next_epos.offset;
+
+- etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 1);
+- if (etype == -1)
++ ret = udf_next_aext(inode, &next_epos, &eloc, &elen, &etype, 1);
++ if (ret < 0) {
++ goto out_free;
++ } else if (ret == 0) {
++ isBeyondEOF = true;
+ break;
++ }
+
+ c = !c;
+
+@@ -773,13 +792,17 @@ static int inode_getblk(struct inode *inode, struct udf_map_rq *map)
+ * Move prev_epos and cur_epos into indirect extent if we are at
+ * the pointer to it
+ */
+- udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, 0);
+- udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, 0);
++ ret = udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, &tmpetype, 0);
++ if (ret < 0)
++ goto out_free;
++ ret = udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, &tmpetype, 0);
++ if (ret < 0)
++ goto out_free;
+
+ /* if the extent is allocated and recorded, return the block
+ if the extent is not a multiple of the blocksize, round up */
+
+- if (etype == (EXT_RECORDED_ALLOCATED >> 30)) {
++ if (!isBeyondEOF && etype == (EXT_RECORDED_ALLOCATED >> 30)) {
+ if (elen & (inode->i_sb->s_blocksize - 1)) {
+ elen = EXT_RECORDED_ALLOCATED |
+ ((elen + inode->i_sb->s_blocksize - 1) &
+@@ -795,10 +818,9 @@ static int inode_getblk(struct inode *inode, struct udf_map_rq *map)
+ }
+
+ /* Are we beyond EOF and preallocated extent? */
+- if (etype == -1) {
++ if (isBeyondEOF) {
+ loff_t hole_len;
+
+- isBeyondEOF = true;
+ if (count) {
+ if (c)
+ laarr[0] = laarr[1];
+@@ -834,7 +856,6 @@ static int inode_getblk(struct inode *inode, struct udf_map_rq *map)
+ endnum = c + 1;
+ lastblock = 1;
+ } else {
+- isBeyondEOF = false;
+ endnum = startnum = ((count > 2) ? 2 : count);
+
+ /* if the current extent is in position 0,
+@@ -848,15 +869,17 @@ static int inode_getblk(struct inode *inode, struct udf_map_rq *map)
+
+ /* if the current block is located in an extent,
+ read the next extent */
+- etype = udf_next_aext(inode, &next_epos, &eloc, &elen, 0);
+- if (etype != -1) {
++ ret = udf_next_aext(inode, &next_epos, &eloc, &elen, &etype, 0);
++ if (ret > 0) {
+ laarr[c + 1].extLength = (etype << 30) | elen;
+ laarr[c + 1].extLocation = eloc;
+ count++;
+ startnum++;
+ endnum++;
+- } else
++ } else if (ret == 0)
+ lastblock = 1;
++ else
++ goto out_free;
+ }
+
+ /* if the current extent is not recorded but allocated, get the
+@@ -1174,6 +1197,7 @@ static int udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr,
+ int start = 0, i;
+ struct kernel_lb_addr tmploc;
+ uint32_t tmplen;
++ int8_t tmpetype;
+ int err;
+
+ if (startnum > endnum) {
+@@ -1191,14 +1215,19 @@ static int udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr,
+ */
+ if (err < 0)
+ return err;
+- udf_next_aext(inode, epos, &laarr[i].extLocation,
+- &laarr[i].extLength, 1);
++ err = udf_next_aext(inode, epos, &laarr[i].extLocation,
++ &laarr[i].extLength, &tmpetype, 1);
++ if (err < 0)
++ return err;
+ start++;
+ }
+ }
+
+ for (i = start; i < endnum; i++) {
+- udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
++ err = udf_next_aext(inode, epos, &tmploc, &tmplen, &tmpetype, 0);
++ if (err < 0)
++ return err;
++
+ udf_write_aext(inode, epos, &laarr[i].extLocation,
+ laarr[i].extLength, 1);
+ }
+@@ -1953,6 +1982,7 @@ int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block,
+ struct extent_position nepos;
+ struct kernel_lb_addr neloc;
+ int ver, adsize;
++ int err = 0;
+
+ if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+ adsize = sizeof(struct short_ad);
+@@ -1997,10 +2027,12 @@ int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block,
+ if (epos->offset + adsize > sb->s_blocksize) {
+ struct kernel_lb_addr cp_loc;
+ uint32_t cp_len;
+- int cp_type;
++ int8_t cp_type;
+
+ epos->offset -= adsize;
+- cp_type = udf_current_aext(inode, epos, &cp_loc, &cp_len, 0);
++ err = udf_current_aext(inode, epos, &cp_loc, &cp_len, &cp_type, 0);
++ if (err <= 0)
++ goto err_out;
+ cp_len |= ((uint32_t)cp_type) << 30;
+
+ __udf_add_aext(inode, &nepos, &cp_loc, cp_len, 1);
+@@ -2015,6 +2047,9 @@ int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block,
+ *epos = nepos;
+
+ return 0;
++err_out:
++ brelse(bh);
++ return err;
+ }
+
+ /*
+@@ -2160,21 +2195,30 @@ void udf_write_aext(struct inode *inode, struct extent_position *epos,
+ */
+ #define UDF_MAX_INDIR_EXTS 16
+
+-int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
+- struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
++/*
++ * Returns 1 on success, -errno on error, 0 on hit EOF.
++ */
++int udf_next_aext(struct inode *inode, struct extent_position *epos,
++ struct kernel_lb_addr *eloc, uint32_t *elen, int8_t *etype,
++ int inc)
+ {
+- int8_t etype;
+ unsigned int indirections = 0;
++ int ret = 0;
++ udf_pblk_t block;
+
+- while ((etype = udf_current_aext(inode, epos, eloc, elen, inc)) ==
+- (EXT_NEXT_EXTENT_ALLOCDESCS >> 30)) {
+- udf_pblk_t block;
++ while (1) {
++ ret = udf_current_aext(inode, epos, eloc, elen,
++ etype, inc);
++ if (ret <= 0)
++ return ret;
++ if (*etype != (EXT_NEXT_EXTENT_ALLOCDESCS >> 30))
++ return ret;
+
+ if (++indirections > UDF_MAX_INDIR_EXTS) {
+ udf_err(inode->i_sb,
+ "too many indirect extents in inode %lu\n",
+ inode->i_ino);
+- return -1;
++ return -EFSCORRUPTED;
+ }
+
+ epos->block = *eloc;
+@@ -2184,18 +2228,19 @@ int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
+ epos->bh = sb_bread(inode->i_sb, block);
+ if (!epos->bh) {
+ udf_debug("reading block %u failed!\n", block);
+- return -1;
++ return -EIO;
+ }
+ }
+-
+- return etype;
+ }
+
+-int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
+- struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
++/*
++ * Returns 1 on success, -errno on error, 0 on hit EOF.
++ */
++int udf_current_aext(struct inode *inode, struct extent_position *epos,
++ struct kernel_lb_addr *eloc, uint32_t *elen, int8_t *etype,
++ int inc)
+ {
+ int alen;
+- int8_t etype;
+ uint8_t *ptr;
+ struct short_ad *sad;
+ struct long_ad *lad;
+@@ -2210,20 +2255,23 @@ int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
+ alen = udf_file_entry_alloc_offset(inode) +
+ iinfo->i_lenAlloc;
+ } else {
++ struct allocExtDesc *header =
++ (struct allocExtDesc *)epos->bh->b_data;
++
+ if (!epos->offset)
+ epos->offset = sizeof(struct allocExtDesc);
+ ptr = epos->bh->b_data + epos->offset;
+- alen = sizeof(struct allocExtDesc) +
+- le32_to_cpu(((struct allocExtDesc *)epos->bh->b_data)->
+- lengthAllocDescs);
++ if (check_add_overflow(sizeof(struct allocExtDesc),
++ le32_to_cpu(header->lengthAllocDescs), &alen))
++ return -1;
+ }
+
+ switch (iinfo->i_alloc_type) {
+ case ICBTAG_FLAG_AD_SHORT:
+ sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc);
+ if (!sad)
+- return -1;
+- etype = le32_to_cpu(sad->extLength) >> 30;
++ return 0;
++ *etype = le32_to_cpu(sad->extLength) >> 30;
+ eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
+ eloc->partitionReferenceNum =
+ iinfo->i_location.partitionReferenceNum;
+@@ -2232,17 +2280,17 @@ int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
+ case ICBTAG_FLAG_AD_LONG:
+ lad = udf_get_filelongad(ptr, alen, &epos->offset, inc);
+ if (!lad)
+- return -1;
+- etype = le32_to_cpu(lad->extLength) >> 30;
++ return 0;
++ *etype = le32_to_cpu(lad->extLength) >> 30;
+ *eloc = lelb_to_cpu(lad->extLocation);
+ *elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
+ break;
+ default:
+ udf_debug("alloc_type = %u unsupported\n", iinfo->i_alloc_type);
+- return -1;
++ return -EINVAL;
+ }
+
+- return etype;
++ return 1;
+ }
+
+ static int udf_insert_aext(struct inode *inode, struct extent_position epos,
+@@ -2251,20 +2299,24 @@ static int udf_insert_aext(struct inode *inode, struct extent_position epos,
+ struct kernel_lb_addr oeloc;
+ uint32_t oelen;
+ int8_t etype;
+- int err;
++ int ret;
+
+ if (epos.bh)
+ get_bh(epos.bh);
+
+- while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
++ while (1) {
++ ret = udf_next_aext(inode, &epos, &oeloc, &oelen, &etype, 0);
++ if (ret <= 0)
++ break;
+ udf_write_aext(inode, &epos, &neloc, nelen, 1);
+ neloc = oeloc;
+ nelen = (etype << 30) | oelen;
+ }
+- err = udf_add_aext(inode, &epos, &neloc, nelen, 1);
++ if (ret == 0)
++ ret = udf_add_aext(inode, &epos, &neloc, nelen, 1);
+ brelse(epos.bh);
+
+- return err;
++ return ret;
+ }
+
+ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
+@@ -2276,6 +2328,7 @@ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
+ struct udf_inode_info *iinfo;
+ struct kernel_lb_addr eloc;
+ uint32_t elen;
++ int ret;
+
+ if (epos.bh) {
+ get_bh(epos.bh);
+@@ -2291,10 +2344,18 @@ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
+ adsize = 0;
+
+ oepos = epos;
+- if (udf_next_aext(inode, &epos, &eloc, &elen, 1) == -1)
++ if (udf_next_aext(inode, &epos, &eloc, &elen, &etype, 1) <= 0)
+ return -1;
+
+- while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
++ while (1) {
++ ret = udf_next_aext(inode, &epos, &eloc, &elen, &etype, 1);
++ if (ret < 0) {
++ brelse(epos.bh);
++ brelse(oepos.bh);
++ return -1;
++ }
++ if (ret == 0)
++ break;
+ udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1);
+ if (oepos.bh != epos.bh) {
+ oepos.block = epos.block;
+@@ -2351,14 +2412,17 @@ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
+ return (elen >> 30);
+ }
+
+-int8_t inode_bmap(struct inode *inode, sector_t block,
+- struct extent_position *pos, struct kernel_lb_addr *eloc,
+- uint32_t *elen, sector_t *offset)
++/*
++ * Returns 1 on success, -errno on error, 0 on hit EOF.
++ */
++int inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos,
++ struct kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset,
++ int8_t *etype)
+ {
+ unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
+ loff_t lbcount = 0, bcount = (loff_t) block << blocksize_bits;
+- int8_t etype;
+ struct udf_inode_info *iinfo;
++ int err = 0;
+
+ iinfo = UDF_I(inode);
+ if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) {
+@@ -2368,11 +2432,13 @@ int8_t inode_bmap(struct inode *inode, sector_t block,
+ }
+ *elen = 0;
+ do {
+- etype = udf_next_aext(inode, pos, eloc, elen, 1);
+- if (etype == -1) {
+- *offset = (bcount - lbcount) >> blocksize_bits;
+- iinfo->i_lenExtents = lbcount;
+- return -1;
++ err = udf_next_aext(inode, pos, eloc, elen, etype, 1);
++ if (err <= 0) {
++ if (err == 0) {
++ *offset = (bcount - lbcount) >> blocksize_bits;
++ iinfo->i_lenExtents = lbcount;
++ }
++ return err;
+ }
+ lbcount += *elen;
+ } while (lbcount <= bcount);
+@@ -2380,5 +2446,5 @@ int8_t inode_bmap(struct inode *inode, sector_t block,
+ udf_update_extent_cache(inode, lbcount - *elen, pos);
+ *offset = (bcount + *elen - lbcount) >> blocksize_bits;
+
+- return etype;
++ return 1;
+ }
+diff --git a/fs/udf/partition.c b/fs/udf/partition.c
+index af877991edc13a..2b85c9501bed89 100644
+--- a/fs/udf/partition.c
++++ b/fs/udf/partition.c
+@@ -282,9 +282,11 @@ static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
+ sector_t ext_offset;
+ struct extent_position epos = {};
+ uint32_t phyblock;
++ int8_t etype;
++ int err = 0;
+
+- if (inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset) !=
+- (EXT_RECORDED_ALLOCATED >> 30))
++ err = inode_bmap(inode, block, &epos, &eloc, &elen, &ext_offset, &etype);
++ if (err <= 0 || etype != (EXT_RECORDED_ALLOCATED >> 30))
+ phyblock = 0xFFFFFFFF;
+ else {
+ map = &UDF_SB(sb)->s_partmaps[partition];
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 3c78535f406b00..20dff9ed2471da 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -2454,13 +2454,14 @@ static unsigned int udf_count_free_table(struct super_block *sb,
+ uint32_t elen;
+ struct kernel_lb_addr eloc;
+ struct extent_position epos;
++ int8_t etype;
+
+ mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
+ epos.block = UDF_I(table)->i_location;
+ epos.offset = sizeof(struct unallocSpaceEntry);
+ epos.bh = NULL;
+
+- while (udf_next_aext(table, &epos, &eloc, &elen, 1) != -1)
++ while (udf_next_aext(table, &epos, &eloc, &elen, &etype, 1) > 0)
+ accum += (elen >> table->i_sb->s_blocksize_bits);
+
+ brelse(epos.bh);
+diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
+index a686c10fd709d1..4f33a4a4888613 100644
+--- a/fs/udf/truncate.c
++++ b/fs/udf/truncate.c
+@@ -69,6 +69,7 @@ void udf_truncate_tail_extent(struct inode *inode)
+ int8_t etype = -1, netype;
+ int adsize;
+ struct udf_inode_info *iinfo = UDF_I(inode);
++ int ret;
+
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB ||
+ inode->i_size == iinfo->i_lenExtents)
+@@ -85,7 +86,10 @@ void udf_truncate_tail_extent(struct inode *inode)
+ BUG();
+
+ /* Find the last extent in the file */
+- while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
++ while (1) {
++ ret = udf_next_aext(inode, &epos, &eloc, &elen, &netype, 1);
++ if (ret <= 0)
++ break;
+ etype = netype;
+ lbcount += elen;
+ if (lbcount > inode->i_size) {
+@@ -101,7 +105,8 @@ void udf_truncate_tail_extent(struct inode *inode)
+ epos.offset -= adsize;
+ extent_trunc(inode, &epos, &eloc, etype, elen, nelen);
+ epos.offset += adsize;
+- if (udf_next_aext(inode, &epos, &eloc, &elen, 1) != -1)
++ if (udf_next_aext(inode, &epos, &eloc, &elen,
++ &netype, 1) > 0)
+ udf_err(inode->i_sb,
+ "Extent after EOF in inode %u\n",
+ (unsigned)inode->i_ino);
+@@ -110,7 +115,8 @@ void udf_truncate_tail_extent(struct inode *inode)
+ }
+ /* This inode entry is in-memory only and thus we don't have to mark
+ * the inode dirty */
+- iinfo->i_lenExtents = inode->i_size;
++ if (ret == 0)
++ iinfo->i_lenExtents = inode->i_size;
+ brelse(epos.bh);
+ }
+
+@@ -124,6 +130,8 @@ void udf_discard_prealloc(struct inode *inode)
+ int8_t etype = -1;
+ struct udf_inode_info *iinfo = UDF_I(inode);
+ int bsize = i_blocksize(inode);
++ int8_t tmpetype = -1;
++ int ret;
+
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB ||
+ ALIGN(inode->i_size, bsize) == ALIGN(iinfo->i_lenExtents, bsize))
+@@ -132,15 +140,23 @@ void udf_discard_prealloc(struct inode *inode)
+ epos.block = iinfo->i_location;
+
+ /* Find the last extent in the file */
+- while (udf_next_aext(inode, &epos, &eloc, &elen, 0) != -1) {
++ while (1) {
++ ret = udf_next_aext(inode, &epos, &eloc, &elen, &tmpetype, 0);
++ if (ret < 0)
++ goto out;
++ if (ret == 0)
++ break;
+ brelse(prev_epos.bh);
+ prev_epos = epos;
+ if (prev_epos.bh)
+ get_bh(prev_epos.bh);
+
+- etype = udf_next_aext(inode, &epos, &eloc, &elen, 1);
++ ret = udf_next_aext(inode, &epos, &eloc, &elen, &etype, 1);
++ if (ret < 0)
++ goto out;
+ lbcount += elen;
+ }
++
+ if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
+ lbcount -= elen;
+ udf_delete_aext(inode, prev_epos);
+@@ -150,6 +166,7 @@ void udf_discard_prealloc(struct inode *inode)
+ /* This inode entry is in-memory only and thus we don't have to mark
+ * the inode dirty */
+ iinfo->i_lenExtents = lbcount;
++out:
+ brelse(epos.bh);
+ brelse(prev_epos.bh);
+ }
+@@ -188,6 +205,7 @@ int udf_truncate_extents(struct inode *inode)
+ loff_t byte_offset;
+ int adsize;
+ struct udf_inode_info *iinfo = UDF_I(inode);
++ int ret = 0;
+
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
+ adsize = sizeof(struct short_ad);
+@@ -196,10 +214,12 @@ int udf_truncate_extents(struct inode *inode)
+ else
+ BUG();
+
+- etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
++ ret = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset, &etype);
++ if (ret < 0)
++ return ret;
+ byte_offset = (offset << sb->s_blocksize_bits) +
+ (inode->i_size & (sb->s_blocksize - 1));
+- if (etype == -1) {
++ if (ret == 0) {
+ /* We should extend the file? */
+ WARN_ON(byte_offset);
+ return 0;
+@@ -217,8 +237,8 @@ int udf_truncate_extents(struct inode *inode)
+ else
+ lenalloc -= sizeof(struct allocExtDesc);
+
+- while ((etype = udf_current_aext(inode, &epos, &eloc,
+- &elen, 0)) != -1) {
++ while ((ret = udf_current_aext(inode, &epos, &eloc,
++ &elen, &etype, 0)) > 0) {
+ if (etype == (EXT_NEXT_EXTENT_ALLOCDESCS >> 30)) {
+ udf_write_aext(inode, &epos, &neloc, nelen, 0);
+ if (indirect_ext_len) {
+@@ -253,6 +273,11 @@ int udf_truncate_extents(struct inode *inode)
+ }
+ }
+
++ if (ret < 0) {
++ brelse(epos.bh);
++ return ret;
++ }
++
+ if (indirect_ext_len) {
+ BUG_ON(!epos.bh);
+ udf_free_blocks(sb, NULL, &epos.block, 0, indirect_ext_len);
+diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
+index 88692512a46687..d159f20d61e89a 100644
+--- a/fs/udf/udfdecl.h
++++ b/fs/udf/udfdecl.h
+@@ -157,8 +157,9 @@ extern struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block,
+ extern int udf_setsize(struct inode *, loff_t);
+ extern void udf_evict_inode(struct inode *);
+ extern int udf_write_inode(struct inode *, struct writeback_control *wbc);
+-extern int8_t inode_bmap(struct inode *, sector_t, struct extent_position *,
+- struct kernel_lb_addr *, uint32_t *, sector_t *);
++extern int inode_bmap(struct inode *inode, sector_t block,
++ struct extent_position *pos, struct kernel_lb_addr *eloc,
++ uint32_t *elen, sector_t *offset, int8_t *etype);
+ int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
+ extern int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block,
+ struct extent_position *epos);
+@@ -169,10 +170,12 @@ extern int udf_add_aext(struct inode *, struct extent_position *,
+ extern void udf_write_aext(struct inode *, struct extent_position *,
+ struct kernel_lb_addr *, uint32_t, int);
+ extern int8_t udf_delete_aext(struct inode *, struct extent_position);
+-extern int8_t udf_next_aext(struct inode *, struct extent_position *,
+- struct kernel_lb_addr *, uint32_t *, int);
+-extern int8_t udf_current_aext(struct inode *, struct extent_position *,
+- struct kernel_lb_addr *, uint32_t *, int);
++extern int udf_next_aext(struct inode *inode, struct extent_position *epos,
++ struct kernel_lb_addr *eloc, uint32_t *elen,
++ int8_t *etype, int inc);
++extern int udf_current_aext(struct inode *inode, struct extent_position *epos,
++ struct kernel_lb_addr *eloc, uint32_t *elen,
++ int8_t *etype, int inc);
+ extern void udf_update_extra_perms(struct inode *inode, umode_t mode);
+
+ /* misc.c */
+diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
+index ec425d2834f869..e1720d93066695 100644
+--- a/include/acpi/cppc_acpi.h
++++ b/include/acpi/cppc_acpi.h
+@@ -147,6 +147,8 @@ extern int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls);
+ extern int cppc_set_enable(int cpu, bool enable);
+ extern int cppc_get_perf_caps(int cpu, struct cppc_perf_caps *caps);
+ extern bool cppc_perf_ctrs_in_pcc(void);
++extern unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf);
++extern unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq);
+ extern bool acpi_cpc_valid(void);
+ extern bool cppc_allow_fast_switch(void);
+ extern int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data);
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 1e05cc80e0485f..5a27fd533fabc8 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -616,6 +616,7 @@ enum bpf_type_flag {
+ */
+ PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS),
+
++ /* MEM can be uninitialized. */
+ MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS),
+
+ /* DYNPTR points to memory local to the bpf program. */
+@@ -681,6 +682,13 @@ enum bpf_type_flag {
+ */
+ MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS),
+
++ /* MEM is being written to, often combined with MEM_UNINIT. Non-presence
++ * of MEM_WRITE means that MEM is only being read. MEM_WRITE without the
++ * MEM_UNINIT means that memory needs to be initialized since it is also
++ * read.
++ */
++ MEM_WRITE = BIT(18 + BPF_BASE_TYPE_BITS),
++
+ __BPF_TYPE_FLAG_MAX,
+ __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1,
+ };
+@@ -738,10 +746,10 @@ enum bpf_arg_type {
+ ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET,
+ ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK,
+ ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID,
+- /* pointer to memory does not need to be initialized, helper function must fill
+- * all bytes or clear them in error case.
++ /* Pointer to memory does not need to be initialized, since helper function
++ * fills all bytes or clears them in error case.
+ */
+- ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | ARG_PTR_TO_MEM,
++ ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | MEM_WRITE | ARG_PTR_TO_MEM,
+ /* Pointer to valid memory of size known at compile time. */
+ ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM,
+
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index e4e24da16d2c39..b1fdb1554f2f9c 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -1080,15 +1080,6 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
+ local_irq_restore(flags);
+ }
+
+-static inline void count_memcg_page_event(struct page *page,
+- enum vm_event_item idx)
+-{
+- struct mem_cgroup *memcg = page_memcg(page);
+-
+- if (memcg)
+- count_memcg_events(memcg, idx, 1);
+-}
+-
+ static inline void count_memcg_folio_events(struct folio *folio,
+ enum vm_event_item idx, unsigned long nr)
+ {
+@@ -1565,11 +1556,6 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
+ {
+ }
+
+-static inline void count_memcg_page_event(struct page *page,
+- int idx)
+-{
+-}
+-
+ static inline void count_memcg_folio_events(struct folio *folio,
+ enum vm_event_item idx, unsigned long nr)
+ {
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 8f5ac20b4c03d9..1576e7443eee50 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -3363,6 +3363,12 @@ static inline void netif_tx_wake_all_queues(struct net_device *dev)
+
+ static __always_inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
+ {
++ /* Paired with READ_ONCE() from dev_watchdog() */
++ WRITE_ONCE(dev_queue->trans_start, jiffies);
++
++ /* This barrier is paired with smp_mb() from dev_watchdog() */
++ smp_mb__before_atomic();
++
+ /* Must be an atomic op see netif_txq_try_stop() */
+ set_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state);
+ }
+@@ -3479,6 +3485,12 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+ if (likely(dql_avail(&dev_queue->dql) >= 0))
+ return;
+
++ /* Paired with READ_ONCE() from dev_watchdog() */
++ WRITE_ONCE(dev_queue->trans_start, jiffies);
++
++ /* This barrier is paired with smp_mb() from dev_watchdog() */
++ smp_mb__before_atomic();
++
+ set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
+
+ /*
+diff --git a/include/linux/task_work.h b/include/linux/task_work.h
+index 26b8a47f41fcac..2964171856e00d 100644
+--- a/include/linux/task_work.h
++++ b/include/linux/task_work.h
+@@ -14,10 +14,14 @@ init_task_work(struct callback_head *twork, task_work_func_t func)
+ }
+
+ enum task_work_notify_mode {
+- TWA_NONE,
++ TWA_NONE = 0,
+ TWA_RESUME,
+ TWA_SIGNAL,
+ TWA_SIGNAL_NO_IPI,
++ TWA_NMI_CURRENT,
++
++ TWA_FLAGS = 0xff00,
++ TWAF_NO_ALLOC = 0x0100,
+ };
+
+ static inline bool task_work_pending(struct task_struct *task)
+diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
+index cb8bd759e80057..9d799777c333c0 100644
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -765,7 +765,8 @@ struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
+ void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
+ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
+ u32 *fd_type, const char **buf,
+- u64 *probe_offset, u64 *probe_addr);
++ u64 *probe_offset, u64 *probe_addr,
++ unsigned long *missed);
+ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+ int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
+ #else
+@@ -805,7 +806,7 @@ static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
+ static inline int bpf_get_perf_event_info(const struct perf_event *event,
+ u32 *prog_id, u32 *fd_type,
+ const char **buf, u64 *probe_offset,
+- u64 *probe_addr)
++ u64 *probe_addr, unsigned long *missed)
+ {
+ return -EOPNOTSUPP;
+ }
+@@ -880,6 +881,7 @@ extern void perf_kprobe_destroy(struct perf_event *event);
+ extern int bpf_get_kprobe_info(const struct perf_event *event,
+ u32 *fd_type, const char **symbol,
+ u64 *probe_offset, u64 *probe_addr,
++ unsigned long *missed,
+ bool perf_type_tracepoint);
+ #endif
+ #ifdef CONFIG_UPROBE_EVENTS
+diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
+index e4a6831133f818..4763a47bf8c8a8 100644
+--- a/include/net/bluetooth/bluetooth.h
++++ b/include/net/bluetooth/bluetooth.h
+@@ -403,6 +403,7 @@ int bt_sock_register(int proto, const struct net_proto_family *ops);
+ void bt_sock_unregister(int proto);
+ void bt_sock_link(struct bt_sock_list *l, struct sock *s);
+ void bt_sock_unlink(struct bt_sock_list *l, struct sock *s);
++bool bt_sock_linked(struct bt_sock_list *l, struct sock *s);
+ struct sock *bt_sock_alloc(struct net *net, struct socket *sock,
+ struct proto *prot, int proto, gfp_t prio, int kern);
+ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+diff --git a/include/net/genetlink.h b/include/net/genetlink.h
+index c53244f2043704..e8c34aa4a640db 100644
+--- a/include/net/genetlink.h
++++ b/include/net/genetlink.h
+@@ -478,13 +478,12 @@ static inline int genlmsg_multicast(const struct genl_family *family,
+ * @skb: netlink message as socket buffer
+ * @portid: own netlink portid to avoid sending to yourself
+ * @group: offset of multicast group in groups array
+- * @flags: allocation flags
+ *
+ * This function must hold the RTNL or rcu_read_lock().
+ */
+ int genlmsg_multicast_allns(const struct genl_family *family,
+ struct sk_buff *skb, u32 portid,
+- unsigned int group, gfp_t flags);
++ unsigned int group);
+
+ /**
+ * genlmsg_unicast - unicast a netlink message
+diff --git a/include/net/sock.h b/include/net/sock.h
+index c3961050b8e394..e0be8bd9839602 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2826,6 +2826,11 @@ static inline bool sk_is_stream_unix(const struct sock *sk)
+ return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM;
+ }
+
++static inline bool sk_is_vsock(const struct sock *sk)
++{
++ return sk->sk_family == AF_VSOCK;
++}
++
+ /**
+ * sk_eat_skb - Release a skb if it is no longer needed
+ * @sk: socket to eat this skb from
+diff --git a/include/net/xfrm.h b/include/net/xfrm.h
+index b280e7c4601160..93a9866ee481fa 100644
+--- a/include/net/xfrm.h
++++ b/include/net/xfrm.h
+@@ -342,20 +342,25 @@ struct xfrm_if_cb {
+ void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
+ void xfrm_if_unregister_cb(void);
+
++struct xfrm_dst_lookup_params {
++ struct net *net;
++ int tos;
++ int oif;
++ xfrm_address_t *saddr;
++ xfrm_address_t *daddr;
++ u32 mark;
++ __u8 ipproto;
++ union flowi_uli uli;
++};
++
+ struct net_device;
+ struct xfrm_type;
+ struct xfrm_dst;
+ struct xfrm_policy_afinfo {
+ struct dst_ops *dst_ops;
+- struct dst_entry *(*dst_lookup)(struct net *net,
+- int tos, int oif,
+- const xfrm_address_t *saddr,
+- const xfrm_address_t *daddr,
+- u32 mark);
+- int (*get_saddr)(struct net *net, int oif,
+- xfrm_address_t *saddr,
+- xfrm_address_t *daddr,
+- u32 mark);
++ struct dst_entry *(*dst_lookup)(const struct xfrm_dst_lookup_params *params);
++ int (*get_saddr)(xfrm_address_t *saddr,
++ const struct xfrm_dst_lookup_params *params);
+ int (*fill_dst)(struct xfrm_dst *xdst,
+ struct net_device *dev,
+ const struct flowi *fl);
+@@ -1728,10 +1733,7 @@ static inline int xfrm_user_policy(struct sock *sk, int optname,
+ }
+ #endif
+
+-struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
+- const xfrm_address_t *saddr,
+- const xfrm_address_t *daddr,
+- int family, u32 mark);
++struct dst_entry *__xfrm_dst_lookup(int family, const struct xfrm_dst_lookup_params *params);
+
+ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
+
+diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
+index 6e2ef1d4b0028d..37f2443b3cdb0f 100644
+--- a/include/trace/events/huge_memory.h
++++ b/include/trace/events/huge_memory.h
+@@ -207,10 +207,10 @@ TRACE_EVENT(mm_khugepaged_scan_file,
+ );
+
+ TRACE_EVENT(mm_khugepaged_collapse_file,
+- TP_PROTO(struct mm_struct *mm, struct page *hpage, pgoff_t index,
+- bool is_shmem, unsigned long addr, struct file *file,
++ TP_PROTO(struct mm_struct *mm, struct folio *new_folio, pgoff_t index,
++ unsigned long addr, bool is_shmem, struct file *file,
+ int nr, int result),
+- TP_ARGS(mm, hpage, index, addr, is_shmem, file, nr, result),
++ TP_ARGS(mm, new_folio, index, addr, is_shmem, file, nr, result),
+ TP_STRUCT__entry(
+ __field(struct mm_struct *, mm)
+ __field(unsigned long, hpfn)
+@@ -224,7 +224,7 @@ TRACE_EVENT(mm_khugepaged_collapse_file,
+
+ TP_fast_assign(
+ __entry->mm = mm;
+- __entry->hpfn = hpage ? page_to_pfn(hpage) : -1;
++ __entry->hpfn = new_folio ? folio_pfn(new_folio) : -1;
+ __entry->index = index;
+ __entry->addr = addr;
+ __entry->is_shmem = is_shmem;
+@@ -233,7 +233,7 @@ TRACE_EVENT(mm_khugepaged_collapse_file,
+ __entry->result = result;
+ ),
+
+- TP_printk("mm=%p, hpage_pfn=0x%lx, index=%ld, addr=%ld, is_shmem=%d, filename=%s, nr=%d, result=%s",
++ TP_printk("mm=%p, hpage_pfn=0x%lx, index=%ld, addr=%lx, is_shmem=%d, filename=%s, nr=%d, result=%s",
+ __entry->mm,
+ __entry->hpfn,
+ __entry->index,
+diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
+index ba6e346c8d669a..431bc700bcfb93 100644
+--- a/include/uapi/linux/bpf.h
++++ b/include/uapi/linux/bpf.h
+@@ -5921,11 +5921,6 @@ enum {
+ BPF_F_MARK_ENFORCE = (1ULL << 6),
+ };
+
+-/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
+-enum {
+- BPF_F_INGRESS = (1ULL << 0),
+-};
+-
+ /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
+ enum {
+ BPF_F_TUNINFO_IPV6 = (1ULL << 0),
+@@ -6072,10 +6067,12 @@ enum {
+ BPF_F_BPRM_SECUREEXEC = (1ULL << 0),
+ };
+
+-/* Flags for bpf_redirect_map helper */
++/* Flags for bpf_redirect and bpf_redirect_map helpers */
+ enum {
+- BPF_F_BROADCAST = (1ULL << 3),
+- BPF_F_EXCLUDE_INGRESS = (1ULL << 4),
++ BPF_F_INGRESS = (1ULL << 0), /* used for skb path */
++ BPF_F_BROADCAST = (1ULL << 3), /* used for XDP path */
++ BPF_F_EXCLUDE_INGRESS = (1ULL << 4), /* used for XDP path */
++#define BPF_F_REDIRECT_FLAGS (BPF_F_INGRESS | BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS)
+ };
+
+ #define __bpf_md_ptr(type, name) \
+@@ -6559,20 +6556,27 @@ struct bpf_link_info {
+ __aligned_u64 file_name; /* in/out */
+ __u32 name_len;
+ __u32 offset; /* offset from file_name */
++ __u64 cookie;
+ } uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
+ struct {
+ __aligned_u64 func_name; /* in/out */
+ __u32 name_len;
+ __u32 offset; /* offset from func_name */
+ __u64 addr;
++ __u64 missed;
++ __u64 cookie;
+ } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
+ struct {
+ __aligned_u64 tp_name; /* in/out */
+ __u32 name_len;
++ __u32 :32;
++ __u64 cookie;
+ } tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
+ struct {
+ __u64 config;
+ __u32 type;
++ __u32 :32;
++ __u64 cookie;
+ } event; /* BPF_PERF_EVENT_EVENT */
+ };
+ } perf_event;
+diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
+index e0e4d4f490e87c..c8828016a66fdc 100644
+--- a/kernel/bpf/btf.c
++++ b/kernel/bpf/btf.c
+@@ -8435,6 +8435,7 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
+ if (!type) {
+ bpf_log(ctx->log, "relo #%u: bad type id %u\n",
+ relo_idx, relo->type_id);
++ kfree(specs);
+ return -EINVAL;
+ }
+
+diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
+index 69e78dc4bb18e8..96b0345f76c2ce 100644
+--- a/kernel/bpf/devmap.c
++++ b/kernel/bpf/devmap.c
+@@ -322,9 +322,11 @@ static int dev_map_hash_get_next_key(struct bpf_map *map, void *key,
+
+ static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
+ struct xdp_frame **frames, int n,
+- struct net_device *dev)
++ struct net_device *tx_dev,
++ struct net_device *rx_dev)
+ {
+- struct xdp_txq_info txq = { .dev = dev };
++ struct xdp_txq_info txq = { .dev = tx_dev };
++ struct xdp_rxq_info rxq = { .dev = rx_dev };
+ struct xdp_buff xdp;
+ int i, nframes = 0;
+
+@@ -335,6 +337,7 @@ static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
+
+ xdp_convert_frame_to_buff(xdpf, &xdp);
+ xdp.txq = &txq;
++ xdp.rxq = &rxq;
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ switch (act) {
+@@ -349,7 +352,7 @@ static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog,
+ bpf_warn_invalid_xdp_action(NULL, xdp_prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+- trace_xdp_exception(dev, xdp_prog, act);
++ trace_xdp_exception(tx_dev, xdp_prog, act);
+ fallthrough;
+ case XDP_DROP:
+ xdp_return_frame_rx_napi(xdpf);
+@@ -377,7 +380,7 @@ static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
+ }
+
+ if (bq->xdp_prog) {
+- to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev);
++ to_send = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev, bq->dev_rx);
+ if (!to_send)
+ goto out;
+ }
+diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
+index 3dba5bb294d8e4..41d62405c85214 100644
+--- a/kernel/bpf/helpers.c
++++ b/kernel/bpf/helpers.c
+@@ -110,7 +110,7 @@ const struct bpf_func_proto bpf_map_pop_elem_proto = {
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+- .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
++ .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
+ };
+
+ BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value)
+@@ -123,7 +123,7 @@ const struct bpf_func_proto bpf_map_peek_elem_proto = {
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_CONST_MAP_PTR,
+- .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT,
++ .arg2_type = ARG_PTR_TO_MAP_VALUE | MEM_UNINIT | MEM_WRITE,
+ };
+
+ BPF_CALL_3(bpf_map_lookup_percpu_elem, struct bpf_map *, map, void *, key, u32, cpu)
+@@ -538,7 +538,7 @@ const struct bpf_func_proto bpf_strtol_proto = {
+ .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
+ .arg2_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_ANYTHING,
+- .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
++ .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
+ .arg4_size = sizeof(s64),
+ };
+
+@@ -568,7 +568,7 @@ const struct bpf_func_proto bpf_strtoul_proto = {
+ .arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
+ .arg2_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_ANYTHING,
+- .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
++ .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
+ .arg4_size = sizeof(u64),
+ };
+
+@@ -1607,7 +1607,7 @@ static const struct bpf_func_proto bpf_dynptr_from_mem_proto = {
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+- .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT,
++ .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_LOCAL | MEM_UNINIT | MEM_WRITE,
+ };
+
+ BPF_CALL_5(bpf_dynptr_read, void *, dst, u32, len, const struct bpf_dynptr_kern *, src,
+diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
+index 238d9b206bbdec..246559c3e93d0d 100644
+--- a/kernel/bpf/ringbuf.c
++++ b/kernel/bpf/ringbuf.c
+@@ -632,7 +632,7 @@ const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto = {
+ .arg1_type = ARG_CONST_MAP_PTR,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+- .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT,
++ .arg4_type = ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_RINGBUF | MEM_UNINIT | MEM_WRITE,
+ };
+
+ BPF_CALL_2(bpf_ringbuf_submit_dynptr, struct bpf_dynptr_kern *, ptr, u64, flags)
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index b1933d074f0519..8a1cadc1ff9dd9 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -3442,26 +3442,34 @@ static void bpf_perf_link_dealloc(struct bpf_link *link)
+ }
+
+ static int bpf_perf_link_fill_common(const struct perf_event *event,
+- char __user *uname, u32 ulen,
++ char __user *uname, u32 *ulenp,
+ u64 *probe_offset, u64 *probe_addr,
+- u32 *fd_type)
++ u32 *fd_type, unsigned long *missed)
+ {
+ const char *buf;
+- u32 prog_id;
++ u32 prog_id, ulen;
+ size_t len;
+ int err;
+
++ ulen = *ulenp;
+ if (!ulen ^ !uname)
+ return -EINVAL;
+
+ err = bpf_get_perf_event_info(event, &prog_id, fd_type, &buf,
+- probe_offset, probe_addr);
++ probe_offset, probe_addr, missed);
+ if (err)
+ return err;
++
++ if (buf) {
++ len = strlen(buf);
++ *ulenp = len + 1;
++ } else {
++ *ulenp = 1;
++ }
+ if (!uname)
+ return 0;
++
+ if (buf) {
+- len = strlen(buf);
+ err = bpf_copy_to_user(uname, buf, ulen, len);
+ if (err)
+ return err;
+@@ -3478,6 +3486,7 @@ static int bpf_perf_link_fill_common(const struct perf_event *event,
+ static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
+ struct bpf_link_info *info)
+ {
++ unsigned long missed;
+ char __user *uname;
+ u64 addr, offset;
+ u32 ulen, type;
+@@ -3485,19 +3494,21 @@ static int bpf_perf_link_fill_kprobe(const struct perf_event *event,
+
+ uname = u64_to_user_ptr(info->perf_event.kprobe.func_name);
+ ulen = info->perf_event.kprobe.name_len;
+- err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
+- &type);
++ err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr,
++ &type, &missed);
+ if (err)
+ return err;
+ if (type == BPF_FD_TYPE_KRETPROBE)
+ info->perf_event.type = BPF_PERF_EVENT_KRETPROBE;
+ else
+ info->perf_event.type = BPF_PERF_EVENT_KPROBE;
+-
++ info->perf_event.kprobe.name_len = ulen;
+ info->perf_event.kprobe.offset = offset;
++ info->perf_event.kprobe.missed = missed;
+ if (!kallsyms_show_value(current_cred()))
+ addr = 0;
+ info->perf_event.kprobe.addr = addr;
++ info->perf_event.kprobe.cookie = event->bpf_cookie;
+ return 0;
+ }
+ #endif
+@@ -3513,8 +3524,8 @@ static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
+
+ uname = u64_to_user_ptr(info->perf_event.uprobe.file_name);
+ ulen = info->perf_event.uprobe.name_len;
+- err = bpf_perf_link_fill_common(event, uname, ulen, &offset, &addr,
+- &type);
++ err = bpf_perf_link_fill_common(event, uname, &ulen, &offset, &addr,
++ &type, NULL);
+ if (err)
+ return err;
+
+@@ -3522,7 +3533,9 @@ static int bpf_perf_link_fill_uprobe(const struct perf_event *event,
+ info->perf_event.type = BPF_PERF_EVENT_URETPROBE;
+ else
+ info->perf_event.type = BPF_PERF_EVENT_UPROBE;
++ info->perf_event.uprobe.name_len = ulen;
+ info->perf_event.uprobe.offset = offset;
++ info->perf_event.uprobe.cookie = event->bpf_cookie;
+ return 0;
+ }
+ #endif
+@@ -3546,11 +3559,18 @@ static int bpf_perf_link_fill_tracepoint(const struct perf_event *event,
+ {
+ char __user *uname;
+ u32 ulen;
++ int err;
+
+ uname = u64_to_user_ptr(info->perf_event.tracepoint.tp_name);
+ ulen = info->perf_event.tracepoint.name_len;
++ err = bpf_perf_link_fill_common(event, uname, &ulen, NULL, NULL, NULL, NULL);
++ if (err)
++ return err;
++
+ info->perf_event.type = BPF_PERF_EVENT_TRACEPOINT;
+- return bpf_perf_link_fill_common(event, uname, ulen, NULL, NULL, NULL);
++ info->perf_event.tracepoint.name_len = ulen;
++ info->perf_event.tracepoint.cookie = event->bpf_cookie;
++ return 0;
+ }
+
+ static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
+@@ -3558,6 +3578,7 @@ static int bpf_perf_link_fill_perf_event(const struct perf_event *event,
+ {
+ info->perf_event.event.type = event->attr.type;
+ info->perf_event.event.config = event->attr.config;
++ info->perf_event.event.cookie = event->bpf_cookie;
+ info->perf_event.type = BPF_PERF_EVENT_EVENT;
+ return 0;
+ }
+@@ -4897,7 +4918,7 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
+
+ err = bpf_get_perf_event_info(event, &prog_id, &fd_type,
+ &buf, &probe_offset,
+- &probe_addr);
++ &probe_addr, NULL);
+ if (!err)
+ err = bpf_task_fd_query_copy(attr, uattr, prog_id,
+ fd_type, buf,
+@@ -5668,7 +5689,7 @@ static const struct bpf_func_proto bpf_kallsyms_lookup_name_proto = {
+ .arg1_type = ARG_PTR_TO_MEM,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
+ .arg3_type = ARG_ANYTHING,
+- .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
++ .arg4_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
+ .arg4_size = sizeof(u64),
+ };
+
+diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c
+index c4ab9d6cdbe9c8..f7ef58090c7d0b 100644
+--- a/kernel/bpf/task_iter.c
++++ b/kernel/bpf/task_iter.c
+@@ -119,7 +119,7 @@ static struct task_struct *task_seq_get_next(struct bpf_iter_seq_task_common *co
+ rcu_read_lock();
+ pid = find_pid_ns(common->pid, common->ns);
+ if (pid) {
+- task = get_pid_task(pid, PIDTYPE_TGID);
++ task = get_pid_task(pid, PIDTYPE_PID);
+ *tid = common->pid;
+ }
+ rcu_read_unlock();
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 3032a464d31bbf..03b5797b8fca9e 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2799,10 +2799,16 @@ static struct btf *__find_kfunc_desc_btf(struct bpf_verifier_env *env,
+ b->module = mod;
+ b->offset = offset;
+
++ /* sort() reorders entries by value, so b may no longer point
++ * to the right entry after this
++ */
+ sort(tab->descs, tab->nr_descs, sizeof(tab->descs[0]),
+ kfunc_btf_cmp_by_off, NULL);
++ } else {
++ btf = b->btf;
+ }
+- return b->btf;
++
++ return btf;
+ }
+
+ void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab)
+@@ -6137,10 +6143,10 @@ static void coerce_reg_to_size_sx(struct bpf_reg_state *reg, int size)
+
+ /* both of s64_max/s64_min positive or negative */
+ if ((s64_max >= 0) == (s64_min >= 0)) {
+- reg->smin_value = reg->s32_min_value = s64_min;
+- reg->smax_value = reg->s32_max_value = s64_max;
+- reg->umin_value = reg->u32_min_value = s64_min;
+- reg->umax_value = reg->u32_max_value = s64_max;
++ reg->s32_min_value = reg->smin_value = s64_min;
++ reg->s32_max_value = reg->smax_value = s64_max;
++ reg->u32_min_value = reg->umin_value = s64_min;
++ reg->u32_max_value = reg->umax_value = s64_max;
+ reg->var_off = tnum_range(s64_min, s64_max);
+ return;
+ }
+@@ -7192,7 +7198,8 @@ static int check_stack_range_initialized(
+ }
+
+ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
+- int access_size, bool zero_size_allowed,
++ int access_size, enum bpf_access_type access_type,
++ bool zero_size_allowed,
+ struct bpf_call_arg_meta *meta)
+ {
+ struct bpf_reg_state *regs = cur_regs(env), *reg = &regs[regno];
+@@ -7204,7 +7211,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
+ return check_packet_access(env, regno, reg->off, access_size,
+ zero_size_allowed);
+ case PTR_TO_MAP_KEY:
+- if (meta && meta->raw_mode) {
++ if (access_type == BPF_WRITE) {
+ verbose(env, "R%d cannot write into %s\n", regno,
+ reg_type_str(env, reg->type));
+ return -EACCES;
+@@ -7212,15 +7219,13 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
+ return check_mem_region_access(env, regno, reg->off, access_size,
+ reg->map_ptr->key_size, false);
+ case PTR_TO_MAP_VALUE:
+- if (check_map_access_type(env, regno, reg->off, access_size,
+- meta && meta->raw_mode ? BPF_WRITE :
+- BPF_READ))
++ if (check_map_access_type(env, regno, reg->off, access_size, access_type))
+ return -EACCES;
+ return check_map_access(env, regno, reg->off, access_size,
+ zero_size_allowed, ACCESS_HELPER);
+ case PTR_TO_MEM:
+ if (type_is_rdonly_mem(reg->type)) {
+- if (meta && meta->raw_mode) {
++ if (access_type == BPF_WRITE) {
+ verbose(env, "R%d cannot write into %s\n", regno,
+ reg_type_str(env, reg->type));
+ return -EACCES;
+@@ -7231,7 +7236,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
+ zero_size_allowed);
+ case PTR_TO_BUF:
+ if (type_is_rdonly_mem(reg->type)) {
+- if (meta && meta->raw_mode) {
++ if (access_type == BPF_WRITE) {
+ verbose(env, "R%d cannot write into %s\n", regno,
+ reg_type_str(env, reg->type));
+ return -EACCES;
+@@ -7259,7 +7264,6 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
+ * Dynamically check it now.
+ */
+ if (!env->ops->convert_ctx_access) {
+- enum bpf_access_type atype = meta && meta->raw_mode ? BPF_WRITE : BPF_READ;
+ int offset = access_size - 1;
+
+ /* Allow zero-byte read from PTR_TO_CTX */
+@@ -7267,7 +7271,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
+ return zero_size_allowed ? 0 : -EACCES;
+
+ return check_mem_access(env, env->insn_idx, regno, offset, BPF_B,
+- atype, -1, false, false);
++ access_type, -1, false, false);
+ }
+
+ fallthrough;
+@@ -7286,6 +7290,7 @@ static int check_helper_mem_access(struct bpf_verifier_env *env, int regno,
+
+ static int check_mem_size_reg(struct bpf_verifier_env *env,
+ struct bpf_reg_state *reg, u32 regno,
++ enum bpf_access_type access_type,
+ bool zero_size_allowed,
+ struct bpf_call_arg_meta *meta)
+ {
+@@ -7301,15 +7306,12 @@ static int check_mem_size_reg(struct bpf_verifier_env *env,
+ */
+ meta->msize_max_value = reg->umax_value;
+
+- /* The register is SCALAR_VALUE; the access check
+- * happens using its boundaries.
++ /* The register is SCALAR_VALUE; the access check happens using
++ * its boundaries. For unprivileged variable accesses, disable
++ * raw mode so that the program is required to initialize all
++ * the memory that the helper could just partially fill up.
+ */
+ if (!tnum_is_const(reg->var_off))
+- /* For unprivileged variable accesses, disable raw
+- * mode so that the program is required to
+- * initialize all the memory that the helper could
+- * just partially fill up.
+- */
+ meta = NULL;
+
+ if (reg->smin_value < 0) {
+@@ -7318,12 +7320,10 @@ static int check_mem_size_reg(struct bpf_verifier_env *env,
+ return -EACCES;
+ }
+
+- if (reg->umin_value == 0) {
+- err = check_helper_mem_access(env, regno - 1, 0,
+- zero_size_allowed,
+- meta);
+- if (err)
+- return err;
++ if (reg->umin_value == 0 && !zero_size_allowed) {
++ verbose(env, "R%d invalid zero-sized read: u64=[%lld,%lld]\n",
++ regno, reg->umin_value, reg->umax_value);
++ return -EACCES;
+ }
+
+ if (reg->umax_value >= BPF_MAX_VAR_SIZ) {
+@@ -7331,9 +7331,8 @@ static int check_mem_size_reg(struct bpf_verifier_env *env,
+ regno);
+ return -EACCES;
+ }
+- err = check_helper_mem_access(env, regno - 1,
+- reg->umax_value,
+- zero_size_allowed, meta);
++ err = check_helper_mem_access(env, regno - 1, reg->umax_value,
++ access_type, zero_size_allowed, meta);
+ if (!err)
+ err = mark_chain_precision(env, regno);
+ return err;
+@@ -7344,13 +7343,11 @@ int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
+ {
+ bool may_be_null = type_may_be_null(reg->type);
+ struct bpf_reg_state saved_reg;
+- struct bpf_call_arg_meta meta;
+ int err;
+
+ if (register_is_null(reg))
+ return 0;
+
+- memset(&meta, 0, sizeof(meta));
+ /* Assuming that the register contains a value check if the memory
+ * access is safe. Temporarily save and restore the register's state as
+ * the conversion shouldn't be visible to a caller.
+@@ -7360,10 +7357,8 @@ int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
+ mark_ptr_not_null_reg(reg);
+ }
+
+- err = check_helper_mem_access(env, regno, mem_size, true, &meta);
+- /* Check access for BPF_WRITE */
+- meta.raw_mode = true;
+- err = err ?: check_helper_mem_access(env, regno, mem_size, true, &meta);
++ err = check_helper_mem_access(env, regno, mem_size, BPF_READ, true, NULL);
++ err = err ?: check_helper_mem_access(env, regno, mem_size, BPF_WRITE, true, NULL);
+
+ if (may_be_null)
+ *reg = saved_reg;
+@@ -7389,13 +7384,12 @@ static int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg
+ mark_ptr_not_null_reg(mem_reg);
+ }
+
+- err = check_mem_size_reg(env, reg, regno, true, &meta);
+- /* Check access for BPF_WRITE */
+- meta.raw_mode = true;
+- err = err ?: check_mem_size_reg(env, reg, regno, true, &meta);
++ err = check_mem_size_reg(env, reg, regno, BPF_READ, true, &meta);
++ err = err ?: check_mem_size_reg(env, reg, regno, BPF_WRITE, true, &meta);
+
+ if (may_be_null)
+ *mem_reg = saved_reg;
++
+ return err;
+ }
+
+@@ -8581,9 +8575,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
+ verbose(env, "invalid map_ptr to access map->key\n");
+ return -EACCES;
+ }
+- err = check_helper_mem_access(env, regno,
+- meta->map_ptr->key_size, false,
+- NULL);
++ err = check_helper_mem_access(env, regno, meta->map_ptr->key_size,
++ BPF_READ, false, NULL);
+ break;
+ case ARG_PTR_TO_MAP_VALUE:
+ if (type_may_be_null(arg_type) && register_is_null(reg))
+@@ -8598,9 +8591,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
+ return -EACCES;
+ }
+ meta->raw_mode = arg_type & MEM_UNINIT;
+- err = check_helper_mem_access(env, regno,
+- meta->map_ptr->value_size, false,
+- meta);
++ err = check_helper_mem_access(env, regno, meta->map_ptr->value_size,
++ arg_type & MEM_WRITE ? BPF_WRITE : BPF_READ,
++ false, meta);
+ break;
+ case ARG_PTR_TO_PERCPU_BTF_ID:
+ if (!reg->btf_id) {
+@@ -8642,7 +8635,9 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
+ */
+ meta->raw_mode = arg_type & MEM_UNINIT;
+ if (arg_type & MEM_FIXED_SIZE) {
+- err = check_helper_mem_access(env, regno, fn->arg_size[arg], false, meta);
++ err = check_helper_mem_access(env, regno, fn->arg_size[arg],
++ arg_type & MEM_WRITE ? BPF_WRITE : BPF_READ,
++ false, meta);
+ if (err)
+ return err;
+ if (arg_type & MEM_ALIGNED)
+@@ -8650,10 +8645,16 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
+ }
+ break;
+ case ARG_CONST_SIZE:
+- err = check_mem_size_reg(env, reg, regno, false, meta);
++ err = check_mem_size_reg(env, reg, regno,
++ fn->arg_type[arg - 1] & MEM_WRITE ?
++ BPF_WRITE : BPF_READ,
++ false, meta);
+ break;
+ case ARG_CONST_SIZE_OR_ZERO:
+- err = check_mem_size_reg(env, reg, regno, true, meta);
++ err = check_mem_size_reg(env, reg, regno,
++ fn->arg_type[arg - 1] & MEM_WRITE ?
++ BPF_WRITE : BPF_READ,
++ true, meta);
+ break;
+ case ARG_PTR_TO_DYNPTR:
+ err = process_dynptr_func(env, regno, insn_idx, arg_type, 0);
+diff --git a/kernel/sched/core.c b/kernel/sched/core.c
+index 9b406d9886541b..b6f922a20f83a5 100644
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -12050,7 +12050,9 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr)
+ return;
+ if (time_before(now, READ_ONCE(curr->mm->mm_cid_next_scan)))
+ return;
+- task_work_add(curr, work, TWA_RESUME);
++
++ /* No page allocation under rq lock */
++ task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC);
+ }
+
+ void sched_mm_cid_exit_signals(struct task_struct *t)
+diff --git a/kernel/task_work.c b/kernel/task_work.c
+index 2134ac8057a94e..c969f1f26be58a 100644
+--- a/kernel/task_work.c
++++ b/kernel/task_work.c
+@@ -1,10 +1,20 @@
+ // SPDX-License-Identifier: GPL-2.0
++#include <linux/irq_work.h>
+ #include <linux/spinlock.h>
+ #include <linux/task_work.h>
+ #include <linux/resume_user_mode.h>
+
+ static struct callback_head work_exited; /* all we need is ->next == NULL */
+
++#ifdef CONFIG_IRQ_WORK
++static void task_work_set_notify_irq(struct irq_work *entry)
++{
++ test_and_set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
++}
++static DEFINE_PER_CPU(struct irq_work, irq_work_NMI_resume) =
++ IRQ_WORK_INIT_HARD(task_work_set_notify_irq);
++#endif
++
+ /**
+ * task_work_add - ask the @task to execute @work->func()
+ * @task: the task which should run the callback
+@@ -12,7 +22,7 @@ static struct callback_head work_exited; /* all we need is ->next == NULL */
+ * @notify: how to notify the targeted task
+ *
+ * Queue @work for task_work_run() below and notify the @task if @notify
+- * is @TWA_RESUME, @TWA_SIGNAL, or @TWA_SIGNAL_NO_IPI.
++ * is @TWA_RESUME, @TWA_SIGNAL, @TWA_SIGNAL_NO_IPI or @TWA_NMI_CURRENT.
+ *
+ * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted
+ * task and run the task_work, regardless of whether the task is currently
+@@ -24,6 +34,8 @@ static struct callback_head work_exited; /* all we need is ->next == NULL */
+ * kernel anyway.
+ * @TWA_RESUME work is run only when the task exits the kernel and returns to
+ * user mode, or before entering guest mode.
++ * @TWA_NMI_CURRENT works like @TWA_RESUME, except it can only be used for the
++ * current @task and if the current context is NMI.
+ *
+ * Fails if the @task is exiting/exited and thus it can't process this @work.
+ * Otherwise @work->func() will be called when the @task goes through one of
+@@ -43,9 +55,27 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
+ enum task_work_notify_mode notify)
+ {
+ struct callback_head *head;
+-
+- /* record the work call stack in order to print it in KASAN reports */
+- kasan_record_aux_stack(work);
++ int flags = notify & TWA_FLAGS;
++
++ notify &= ~TWA_FLAGS;
++ if (notify == TWA_NMI_CURRENT) {
++ if (WARN_ON_ONCE(task != current))
++ return -EINVAL;
++ if (!IS_ENABLED(CONFIG_IRQ_WORK))
++ return -EINVAL;
++ } else {
++ /*
++ * Record the work call stack in order to print it in KASAN
++ * reports.
++ *
++ * Note that stack allocation can fail if TWAF_NO_ALLOC flag
++ * is set and new page is needed to expand the stack buffer.
++ */
++ if (flags & TWAF_NO_ALLOC)
++ kasan_record_aux_stack_noalloc(work);
++ else
++ kasan_record_aux_stack(work);
++ }
+
+ head = READ_ONCE(task->task_works);
+ do {
+@@ -66,6 +96,11 @@ int task_work_add(struct task_struct *task, struct callback_head *work,
+ case TWA_SIGNAL_NO_IPI:
+ __set_notify_signal(task);
+ break;
++#ifdef CONFIG_IRQ_WORK
++ case TWA_NMI_CURRENT:
++ irq_work_queue(this_cpu_ptr(&irq_work_NMI_resume));
++ break;
++#endif
+ default:
+ WARN_ON_ONCE(1);
+ break;
+diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
+index 8127673bfc45e6..05e73d209aa87e 100644
+--- a/kernel/time/posix-clock.c
++++ b/kernel/time/posix-clock.c
+@@ -290,6 +290,9 @@ static int pc_clock_settime(clockid_t id, const struct timespec64 *ts)
+ struct posix_clock_desc cd;
+ int err;
+
++ if (!timespec64_valid_strict(ts))
++ return -EINVAL;
++
+ err = get_clock_desc(id, &cd);
+ if (err)
+ return err;
+@@ -299,9 +302,6 @@ static int pc_clock_settime(clockid_t id, const struct timespec64 *ts)
+ goto out;
+ }
+
+- if (!timespec64_valid_strict(ts))
+- return -EINVAL;
+-
+ if (cd.clk->ops.clock_settime)
+ err = cd.clk->ops.clock_settime(cd.clk, ts);
+ else
+diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
+index eca858bde80470..9064f75de7e468 100644
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -1220,7 +1220,7 @@ static const struct bpf_func_proto bpf_get_func_arg_proto = {
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+- .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
++ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
+ .arg3_size = sizeof(u64),
+ };
+
+@@ -1237,7 +1237,7 @@ static const struct bpf_func_proto bpf_get_func_ret_proto = {
+ .func = get_func_ret,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+- .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
++ .arg2_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_WRITE | MEM_ALIGNED,
+ .arg2_size = sizeof(u64),
+ };
+
+@@ -2217,8 +2217,6 @@ void perf_event_detach_bpf_prog(struct perf_event *event)
+
+ old_array = bpf_event_rcu_dereference(event->tp_event->prog_array);
+ ret = bpf_prog_array_copy(old_array, event->prog, NULL, 0, &new_array);
+- if (ret == -ENOENT)
+- goto unlock;
+ if (ret < 0) {
+ bpf_prog_array_delete_safe(old_array, event->prog);
+ } else {
+@@ -2389,7 +2387,8 @@ int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
+
+ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
+ u32 *fd_type, const char **buf,
+- u64 *probe_offset, u64 *probe_addr)
++ u64 *probe_offset, u64 *probe_addr,
++ unsigned long *missed)
+ {
+ bool is_tracepoint, is_syscall_tp;
+ struct bpf_prog *prog;
+@@ -2424,7 +2423,7 @@ int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
+ #ifdef CONFIG_KPROBE_EVENTS
+ if (flags & TRACE_EVENT_FL_KPROBE)
+ err = bpf_get_kprobe_info(event, fd_type, buf,
+- probe_offset, probe_addr,
++ probe_offset, probe_addr, missed,
+ event->attr.type == PERF_TYPE_TRACEPOINT);
+ #endif
+ #ifdef CONFIG_UPROBE_EVENTS
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 4f93d57cc02990..ecd869ed27670c 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -5757,6 +5757,7 @@ static const char readme_msg[] =
+ "\t $stack<index>, $stack, $retval, $comm,\n"
+ #endif
+ "\t +|-[u]<offset>(<fetcharg>), \\imm-value, \\\"imm-string\"\n"
++ "\t kernel return probes support: $retval, $arg<N>, $comm\n"
+ "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, char, string, symbol,\n"
+ "\t b<bit-width>@<bit-offset>/<container-size>, ustring,\n"
+ "\t symstr, <type>\\[<array-size>\\]\n"
+diff --git a/kernel/trace/trace_eprobe.c b/kernel/trace/trace_eprobe.c
+index 72714cbf475c78..31bb977670bdfb 100644
+--- a/kernel/trace/trace_eprobe.c
++++ b/kernel/trace/trace_eprobe.c
+@@ -220,7 +220,7 @@ static struct trace_eprobe *alloc_event_probe(const char *group,
+ if (!ep->event_system)
+ goto error;
+
+- ret = trace_probe_init(&ep->tp, this_event, group, false);
++ ret = trace_probe_init(&ep->tp, this_event, group, false, nargs);
+ if (ret < 0)
+ goto error;
+
+@@ -390,8 +390,8 @@ static int get_eprobe_size(struct trace_probe *tp, void *rec)
+
+ /* Note that we don't verify it, since the code does not come from user space */
+ static int
+-process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
+- void *base)
++process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
++ void *dest, void *base)
+ {
+ unsigned long val;
+ int ret;
+@@ -438,7 +438,7 @@ __eprobe_trace_func(struct eprobe_data *edata, void *rec)
+ return;
+
+ entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
+- store_trace_args(&entry[1], &edata->ep->tp, rec, sizeof(*entry), dsize);
++ store_trace_args(&entry[1], &edata->ep->tp, rec, NULL, sizeof(*entry), dsize);
+
+ trace_event_buffer_commit(&fbuffer);
+ }
+@@ -915,6 +915,11 @@ static int __trace_eprobe_create(int argc, const char *argv[])
+ }
+ }
+
++ if (argc - 2 > MAX_TRACE_ARGS) {
++ ret = -E2BIG;
++ goto error;
++ }
++
+ mutex_lock(&event_mutex);
+ event_call = find_and_get_event(sys_name, sys_event);
+ ep = alloc_event_probe(group, event, event_call, argc - 2);
+@@ -940,7 +945,7 @@ static int __trace_eprobe_create(int argc, const char *argv[])
+
+ argc -= 2; argv += 2;
+ /* parse arguments */
+- for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
++ for (i = 0; i < argc; i++) {
+ trace_probe_log_set_index(i + 2);
+ ret = trace_eprobe_tp_update_arg(ep, argv, i);
+ if (ret)
+diff --git a/kernel/trace/trace_fprobe.c b/kernel/trace/trace_fprobe.c
+index 7d2ddbcfa377cf..f26bb8a90cb541 100644
+--- a/kernel/trace/trace_fprobe.c
++++ b/kernel/trace/trace_fprobe.c
+@@ -4,6 +4,7 @@
+ * Copyright (C) 2022 Google LLC.
+ */
+ #define pr_fmt(fmt) "trace_fprobe: " fmt
++#include <asm/ptrace.h>
+
+ #include <linux/fprobe.h>
+ #include <linux/module.h>
+@@ -129,8 +130,8 @@ static bool trace_fprobe_is_registered(struct trace_fprobe *tf)
+ * from user space.
+ */
+ static int
+-process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
+- void *base)
++process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
++ void *dest, void *base)
+ {
+ struct pt_regs *regs = rec;
+ unsigned long val;
+@@ -152,6 +153,9 @@ process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
+ case FETCH_OP_ARG:
+ val = regs_get_kernel_argument(regs, code->param);
+ break;
++ case FETCH_OP_EDATA:
++ val = *(unsigned long *)((unsigned long)edata + code->offset);
++ break;
+ #endif
+ case FETCH_NOP_SYMBOL: /* Ignore a place holder */
+ code++;
+@@ -184,7 +188,7 @@ __fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
+ if (trace_trigger_soft_disabled(trace_file))
+ return;
+
+- dsize = __get_data_size(&tf->tp, regs);
++ dsize = __get_data_size(&tf->tp, regs, NULL);
+
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file,
+ sizeof(*entry) + tf->tp.size + dsize);
+@@ -194,7 +198,7 @@ __fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
+ fbuffer.regs = regs;
+ entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
+ entry->ip = entry_ip;
+- store_trace_args(&entry[1], &tf->tp, regs, sizeof(*entry), dsize);
++ store_trace_args(&entry[1], &tf->tp, regs, NULL, sizeof(*entry), dsize);
+
+ trace_event_buffer_commit(&fbuffer);
+ }
+@@ -210,11 +214,24 @@ fentry_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
+ }
+ NOKPROBE_SYMBOL(fentry_trace_func);
+
+-/* Kretprobe handler */
++/* function exit handler */
++static int trace_fprobe_entry_handler(struct fprobe *fp, unsigned long entry_ip,
++ unsigned long ret_ip, struct pt_regs *regs,
++ void *entry_data)
++{
++ struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
++
++ if (tf->tp.entry_arg)
++ store_trace_entry_data(entry_data, &tf->tp, regs);
++
++ return 0;
++}
++NOKPROBE_SYMBOL(trace_fprobe_entry_handler)
++
+ static nokprobe_inline void
+ __fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
+ unsigned long ret_ip, struct pt_regs *regs,
+- struct trace_event_file *trace_file)
++ void *entry_data, struct trace_event_file *trace_file)
+ {
+ struct fexit_trace_entry_head *entry;
+ struct trace_event_buffer fbuffer;
+@@ -227,7 +244,7 @@ __fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
+ if (trace_trigger_soft_disabled(trace_file))
+ return;
+
+- dsize = __get_data_size(&tf->tp, regs);
++ dsize = __get_data_size(&tf->tp, regs, entry_data);
+
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file,
+ sizeof(*entry) + tf->tp.size + dsize);
+@@ -238,19 +255,19 @@ __fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
+ entry = fbuffer.entry = ring_buffer_event_data(fbuffer.event);
+ entry->func = entry_ip;
+ entry->ret_ip = ret_ip;
+- store_trace_args(&entry[1], &tf->tp, regs, sizeof(*entry), dsize);
++ store_trace_args(&entry[1], &tf->tp, regs, entry_data, sizeof(*entry), dsize);
+
+ trace_event_buffer_commit(&fbuffer);
+ }
+
+ static void
+ fexit_trace_func(struct trace_fprobe *tf, unsigned long entry_ip,
+- unsigned long ret_ip, struct pt_regs *regs)
++ unsigned long ret_ip, struct pt_regs *regs, void *entry_data)
+ {
+ struct event_file_link *link;
+
+ trace_probe_for_each_link_rcu(link, &tf->tp)
+- __fexit_trace_func(tf, entry_ip, ret_ip, regs, link->file);
++ __fexit_trace_func(tf, entry_ip, ret_ip, regs, entry_data, link->file);
+ }
+ NOKPROBE_SYMBOL(fexit_trace_func);
+
+@@ -269,7 +286,7 @@ static int fentry_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
+ if (hlist_empty(head))
+ return 0;
+
+- dsize = __get_data_size(&tf->tp, regs);
++ dsize = __get_data_size(&tf->tp, regs, NULL);
+ __size = sizeof(*entry) + tf->tp.size + dsize;
+ size = ALIGN(__size + sizeof(u32), sizeof(u64));
+ size -= sizeof(u32);
+@@ -280,7 +297,7 @@ static int fentry_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
+
+ entry->ip = entry_ip;
+ memset(&entry[1], 0, dsize);
+- store_trace_args(&entry[1], &tf->tp, regs, sizeof(*entry), dsize);
++ store_trace_args(&entry[1], &tf->tp, regs, NULL, sizeof(*entry), dsize);
+ perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
+ head, NULL);
+ return 0;
+@@ -289,7 +306,8 @@ NOKPROBE_SYMBOL(fentry_perf_func);
+
+ static void
+ fexit_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
+- unsigned long ret_ip, struct pt_regs *regs)
++ unsigned long ret_ip, struct pt_regs *regs,
++ void *entry_data)
+ {
+ struct trace_event_call *call = trace_probe_event_call(&tf->tp);
+ struct fexit_trace_entry_head *entry;
+@@ -301,7 +319,7 @@ fexit_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
+ if (hlist_empty(head))
+ return;
+
+- dsize = __get_data_size(&tf->tp, regs);
++ dsize = __get_data_size(&tf->tp, regs, entry_data);
+ __size = sizeof(*entry) + tf->tp.size + dsize;
+ size = ALIGN(__size + sizeof(u32), sizeof(u64));
+ size -= sizeof(u32);
+@@ -312,7 +330,7 @@ fexit_perf_func(struct trace_fprobe *tf, unsigned long entry_ip,
+
+ entry->func = entry_ip;
+ entry->ret_ip = ret_ip;
+- store_trace_args(&entry[1], &tf->tp, regs, sizeof(*entry), dsize);
++ store_trace_args(&entry[1], &tf->tp, regs, entry_data, sizeof(*entry), dsize);
+ perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
+ head, NULL);
+ }
+@@ -343,10 +361,10 @@ static void fexit_dispatcher(struct fprobe *fp, unsigned long entry_ip,
+ struct trace_fprobe *tf = container_of(fp, struct trace_fprobe, fp);
+
+ if (trace_probe_test_flag(&tf->tp, TP_FLAG_TRACE))
+- fexit_trace_func(tf, entry_ip, ret_ip, regs);
++ fexit_trace_func(tf, entry_ip, ret_ip, regs, entry_data);
+ #ifdef CONFIG_PERF_EVENTS
+ if (trace_probe_test_flag(&tf->tp, TP_FLAG_PROFILE))
+- fexit_perf_func(tf, entry_ip, ret_ip, regs);
++ fexit_perf_func(tf, entry_ip, ret_ip, regs, entry_data);
+ #endif
+ }
+ NOKPROBE_SYMBOL(fexit_dispatcher);
+@@ -389,7 +407,7 @@ static struct trace_fprobe *alloc_trace_fprobe(const char *group,
+ tf->tpoint = tpoint;
+ tf->fp.nr_maxactive = maxactive;
+
+- ret = trace_probe_init(&tf->tp, event, group, false);
++ ret = trace_probe_init(&tf->tp, event, group, false, nargs);
+ if (ret < 0)
+ goto error;
+
+@@ -1085,6 +1103,10 @@ static int __trace_fprobe_create(int argc, const char *argv[])
+ argc = new_argc;
+ argv = new_argv;
+ }
++ if (argc > MAX_TRACE_ARGS) {
++ ret = -E2BIG;
++ goto out;
++ }
+
+ /* setup a probe */
+ tf = alloc_trace_fprobe(group, event, symbol, tpoint, maxactive,
+@@ -1101,7 +1123,7 @@ static int __trace_fprobe_create(int argc, const char *argv[])
+ (unsigned long)tf->tpoint->probestub);
+
+ /* parse arguments */
+- for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
++ for (i = 0; i < argc; i++) {
+ trace_probe_log_set_index(i + 2);
+ ctx.offset = 0;
+ ret = traceprobe_parse_probe_arg(&tf->tp, i, argv[i], &ctx);
+@@ -1109,6 +1131,11 @@ static int __trace_fprobe_create(int argc, const char *argv[])
+ goto error; /* This can be -ENOMEM */
+ }
+
++ if (is_return && tf->tp.entry_arg) {
++ tf->fp.entry_handler = trace_fprobe_entry_handler;
++ tf->fp.entry_data_size = traceprobe_get_entry_data_size(&tf->tp);
++ }
++
+ ret = traceprobe_set_print_fmt(&tf->tp,
+ is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL);
+ if (ret < 0)
+diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
+index 47812aa16bb574..12d997bb3e789b 100644
+--- a/kernel/trace/trace_kprobe.c
++++ b/kernel/trace/trace_kprobe.c
+@@ -290,7 +290,7 @@ static struct trace_kprobe *alloc_trace_kprobe(const char *group,
+ INIT_HLIST_NODE(&tk->rp.kp.hlist);
+ INIT_LIST_HEAD(&tk->rp.kp.list);
+
+- ret = trace_probe_init(&tk->tp, event, group, false);
++ ret = trace_probe_init(&tk->tp, event, group, false, nargs);
+ if (ret < 0)
+ goto error;
+
+@@ -740,6 +740,9 @@ static unsigned int number_of_same_symbols(char *func_name)
+ return ctx.count;
+ }
+
++static int trace_kprobe_entry_handler(struct kretprobe_instance *ri,
++ struct pt_regs *regs);
++
+ static int __trace_kprobe_create(int argc, const char *argv[])
+ {
+ /*
+@@ -929,6 +932,10 @@ static int __trace_kprobe_create(int argc, const char *argv[])
+ argc = new_argc;
+ argv = new_argv;
+ }
++ if (argc > MAX_TRACE_ARGS) {
++ ret = -E2BIG;
++ goto out;
++ }
+
+ /* setup a probe */
+ tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
+@@ -941,13 +948,18 @@ static int __trace_kprobe_create(int argc, const char *argv[])
+ }
+
+ /* parse arguments */
+- for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
++ for (i = 0; i < argc; i++) {
+ trace_probe_log_set_index(i + 2);
+ ctx.offset = 0;
+ ret = traceprobe_parse_probe_arg(&tk->tp, i, argv[i], &ctx);
+ if (ret)
+ goto error; /* This can be -ENOMEM */
+ }
++ /* entry handler for kretprobe */
++ if (is_return && tk->tp.entry_arg) {
++ tk->rp.entry_handler = trace_kprobe_entry_handler;
++ tk->rp.data_size = traceprobe_get_entry_data_size(&tk->tp);
++ }
+
+ ptype = is_return ? PROBE_PRINT_RETURN : PROBE_PRINT_NORMAL;
+ ret = traceprobe_set_print_fmt(&tk->tp, ptype);
+@@ -1249,6 +1261,12 @@ static const struct file_operations kprobe_events_ops = {
+ .write = probes_write,
+ };
+
++static unsigned long trace_kprobe_missed(struct trace_kprobe *tk)
++{
++ return trace_kprobe_is_return(tk) ?
++ tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed;
++}
++
+ /* Probes profiling interfaces */
+ static int probes_profile_seq_show(struct seq_file *m, void *v)
+ {
+@@ -1260,8 +1278,7 @@ static int probes_profile_seq_show(struct seq_file *m, void *v)
+ return 0;
+
+ tk = to_trace_kprobe(ev);
+- nmissed = trace_kprobe_is_return(tk) ?
+- tk->rp.kp.nmissed + tk->rp.nmissed : tk->rp.kp.nmissed;
++ nmissed = trace_kprobe_missed(tk);
+ seq_printf(m, " %-44s %15lu %15lu\n",
+ trace_probe_name(&tk->tp),
+ trace_kprobe_nhit(tk),
+@@ -1298,8 +1315,8 @@ static const struct file_operations kprobe_profile_ops = {
+
+ /* Note that we don't verify it, since the code does not come from user space */
+ static int
+-process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
+- void *base)
++process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
++ void *dest, void *base)
+ {
+ struct pt_regs *regs = rec;
+ unsigned long val;
+@@ -1324,6 +1341,9 @@ process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
+ case FETCH_OP_ARG:
+ val = regs_get_kernel_argument(regs, code->param);
+ break;
++ case FETCH_OP_EDATA:
++ val = *(unsigned long *)((unsigned long)edata + code->offset);
++ break;
+ #endif
+ case FETCH_NOP_SYMBOL: /* Ignore a place holder */
+ code++;
+@@ -1354,7 +1374,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
+ if (trace_trigger_soft_disabled(trace_file))
+ return;
+
+- dsize = __get_data_size(&tk->tp, regs);
++ dsize = __get_data_size(&tk->tp, regs, NULL);
+
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file,
+ sizeof(*entry) + tk->tp.size + dsize);
+@@ -1363,7 +1383,7 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
+
+ fbuffer.regs = regs;
+ entry->ip = (unsigned long)tk->rp.kp.addr;
+- store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
++ store_trace_args(&entry[1], &tk->tp, regs, NULL, sizeof(*entry), dsize);
+
+ trace_event_buffer_commit(&fbuffer);
+ }
+@@ -1379,6 +1399,31 @@ kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
+ NOKPROBE_SYMBOL(kprobe_trace_func);
+
+ /* Kretprobe handler */
++
++static int trace_kprobe_entry_handler(struct kretprobe_instance *ri,
++ struct pt_regs *regs)
++{
++ struct kretprobe *rp = get_kretprobe(ri);
++ struct trace_kprobe *tk;
++
++ /*
++ * There is a small chance that get_kretprobe(ri) returns NULL when
++ * the kretprobe is unregister on another CPU between kretprobe's
++ * trampoline_handler and this function.
++ */
++ if (unlikely(!rp))
++ return -ENOENT;
++
++ tk = container_of(rp, struct trace_kprobe, rp);
++
++ /* store argument values into ri->data as entry data */
++ if (tk->tp.entry_arg)
++ store_trace_entry_data(ri->data, &tk->tp, regs);
++
++ return 0;
++}
++
++
+ static nokprobe_inline void
+ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
+ struct pt_regs *regs,
+@@ -1394,7 +1439,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
+ if (trace_trigger_soft_disabled(trace_file))
+ return;
+
+- dsize = __get_data_size(&tk->tp, regs);
++ dsize = __get_data_size(&tk->tp, regs, ri->data);
+
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file,
+ sizeof(*entry) + tk->tp.size + dsize);
+@@ -1404,7 +1449,7 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
+ fbuffer.regs = regs;
+ entry->func = (unsigned long)tk->rp.kp.addr;
+ entry->ret_ip = get_kretprobe_retaddr(ri);
+- store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
++ store_trace_args(&entry[1], &tk->tp, regs, ri->data, sizeof(*entry), dsize);
+
+ trace_event_buffer_commit(&fbuffer);
+ }
+@@ -1552,7 +1597,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
+ if (hlist_empty(head))
+ return 0;
+
+- dsize = __get_data_size(&tk->tp, regs);
++ dsize = __get_data_size(&tk->tp, regs, NULL);
+ __size = sizeof(*entry) + tk->tp.size + dsize;
+ size = ALIGN(__size + sizeof(u32), sizeof(u64));
+ size -= sizeof(u32);
+@@ -1563,7 +1608,7 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
+
+ entry->ip = (unsigned long)tk->rp.kp.addr;
+ memset(&entry[1], 0, dsize);
+- store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
++ store_trace_args(&entry[1], &tk->tp, regs, NULL, sizeof(*entry), dsize);
+ perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
+ head, NULL);
+ return 0;
+@@ -1588,7 +1633,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
+ if (hlist_empty(head))
+ return;
+
+- dsize = __get_data_size(&tk->tp, regs);
++ dsize = __get_data_size(&tk->tp, regs, ri->data);
+ __size = sizeof(*entry) + tk->tp.size + dsize;
+ size = ALIGN(__size + sizeof(u32), sizeof(u64));
+ size -= sizeof(u32);
+@@ -1599,7 +1644,7 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
+
+ entry->func = (unsigned long)tk->rp.kp.addr;
+ entry->ret_ip = get_kretprobe_retaddr(ri);
+- store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
++ store_trace_args(&entry[1], &tk->tp, regs, ri->data, sizeof(*entry), dsize);
+ perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
+ head, NULL);
+ }
+@@ -1607,7 +1652,8 @@ NOKPROBE_SYMBOL(kretprobe_perf_func);
+
+ int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
+ const char **symbol, u64 *probe_offset,
+- u64 *probe_addr, bool perf_type_tracepoint)
++ u64 *probe_addr, unsigned long *missed,
++ bool perf_type_tracepoint)
+ {
+ const char *pevent = trace_event_name(event->tp_event);
+ const char *group = event->tp_event->class->system;
+@@ -1626,6 +1672,8 @@ int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type,
+ *probe_addr = kallsyms_show_value(current_cred()) ?
+ (unsigned long)tk->rp.kp.addr : 0;
+ *symbol = tk->symbol;
++ if (missed)
++ *missed = trace_kprobe_missed(tk);
+ return 0;
+ }
+ #endif /* CONFIG_PERF_EVENTS */
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index ae162ba36a4803..8c73156a7eb94c 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -275,7 +275,7 @@ int traceprobe_parse_event_name(const char **pevent, const char **pgroup,
+ }
+ trace_probe_log_err(offset, NO_EVENT_NAME);
+ return -EINVAL;
+- } else if (len > MAX_EVENT_NAME_LEN) {
++ } else if (len >= MAX_EVENT_NAME_LEN) {
+ trace_probe_log_err(offset, EVENT_TOO_LONG);
+ return -EINVAL;
+ }
+@@ -598,6 +598,8 @@ static int parse_btf_field(char *fieldname, const struct btf_type *type,
+ return 0;
+ }
+
++static int __store_entry_arg(struct trace_probe *tp, int argnum);
++
+ static int parse_btf_arg(char *varname,
+ struct fetch_insn **pcode, struct fetch_insn *end,
+ struct traceprobe_parse_context *ctx)
+@@ -622,11 +624,7 @@ static int parse_btf_arg(char *varname,
+ return -EOPNOTSUPP;
+ }
+
+- if (ctx->flags & TPARG_FL_RETURN) {
+- if (strcmp(varname, "$retval") != 0) {
+- trace_probe_log_err(ctx->offset, NO_BTFARG);
+- return -ENOENT;
+- }
++ if (ctx->flags & TPARG_FL_RETURN && !strcmp(varname, "$retval")) {
+ code->op = FETCH_OP_RETVAL;
+ /* Check whether the function return type is not void */
+ if (query_btf_context(ctx) == 0) {
+@@ -658,11 +656,21 @@ static int parse_btf_arg(char *varname,
+ const char *name = btf_name_by_offset(ctx->btf, params[i].name_off);
+
+ if (name && !strcmp(name, varname)) {
+- code->op = FETCH_OP_ARG;
+- if (ctx->flags & TPARG_FL_TPOINT)
+- code->param = i + 1;
+- else
+- code->param = i;
++ if (tparg_is_function_entry(ctx->flags)) {
++ code->op = FETCH_OP_ARG;
++ if (ctx->flags & TPARG_FL_TPOINT)
++ code->param = i + 1;
++ else
++ code->param = i;
++ } else if (tparg_is_function_return(ctx->flags)) {
++ code->op = FETCH_OP_EDATA;
++ ret = __store_entry_arg(ctx->tp, i);
++ if (ret < 0) {
++ /* internal error */
++ return ret;
++ }
++ code->offset = ret;
++ }
+ tid = params[i].type;
+ goto found;
+ }
+@@ -759,6 +767,110 @@ static int check_prepare_btf_string_fetch(char *typename,
+
+ #endif
+
++#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
++
++static int __store_entry_arg(struct trace_probe *tp, int argnum)
++{
++ struct probe_entry_arg *earg = tp->entry_arg;
++ bool match = false;
++ int i, offset;
++
++ if (!earg) {
++ earg = kzalloc(sizeof(*tp->entry_arg), GFP_KERNEL);
++ if (!earg)
++ return -ENOMEM;
++ earg->size = 2 * tp->nr_args + 1;
++ earg->code = kcalloc(earg->size, sizeof(struct fetch_insn),
++ GFP_KERNEL);
++ if (!earg->code) {
++ kfree(earg);
++ return -ENOMEM;
++ }
++ /* Fill the code buffer with 'end' to simplify it */
++ for (i = 0; i < earg->size; i++)
++ earg->code[i].op = FETCH_OP_END;
++ tp->entry_arg = earg;
++ }
++
++ offset = 0;
++ for (i = 0; i < earg->size - 1; i++) {
++ switch (earg->code[i].op) {
++ case FETCH_OP_END:
++ earg->code[i].op = FETCH_OP_ARG;
++ earg->code[i].param = argnum;
++ earg->code[i + 1].op = FETCH_OP_ST_EDATA;
++ earg->code[i + 1].offset = offset;
++ return offset;
++ case FETCH_OP_ARG:
++ match = (earg->code[i].param == argnum);
++ break;
++ case FETCH_OP_ST_EDATA:
++ offset = earg->code[i].offset;
++ if (match)
++ return offset;
++ offset += sizeof(unsigned long);
++ break;
++ default:
++ break;
++ }
++ }
++ return -ENOSPC;
++}
++
++int traceprobe_get_entry_data_size(struct trace_probe *tp)
++{
++ struct probe_entry_arg *earg = tp->entry_arg;
++ int i, size = 0;
++
++ if (!earg)
++ return 0;
++
++ for (i = 0; i < earg->size; i++) {
++ switch (earg->code[i].op) {
++ case FETCH_OP_END:
++ goto out;
++ case FETCH_OP_ST_EDATA:
++ size = earg->code[i].offset + sizeof(unsigned long);
++ break;
++ default:
++ break;
++ }
++ }
++out:
++ return size;
++}
++
++void store_trace_entry_data(void *edata, struct trace_probe *tp, struct pt_regs *regs)
++{
++ struct probe_entry_arg *earg = tp->entry_arg;
++ unsigned long val = 0;
++ int i;
++
++ if (!earg)
++ return;
++
++ for (i = 0; i < earg->size; i++) {
++ struct fetch_insn *code = &earg->code[i];
++
++ switch (code->op) {
++ case FETCH_OP_ARG:
++ val = regs_get_kernel_argument(regs, code->param);
++ break;
++ case FETCH_OP_ST_EDATA:
++ *(unsigned long *)((unsigned long)edata + code->offset) = val;
++ break;
++ case FETCH_OP_END:
++ goto end;
++ default:
++ break;
++ }
++ }
++end:
++ return;
++}
++NOKPROBE_SYMBOL(store_trace_entry_data)
++#endif
++
+ #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long))
+
+ /* Parse $vars. @orig_arg points '$', which syncs to @ctx->offset */
+@@ -834,7 +946,7 @@ static int parse_probe_vars(char *orig_arg, const struct fetch_type *t,
+
+ #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
+ len = str_has_prefix(arg, "arg");
+- if (len && tparg_is_function_entry(ctx->flags)) {
++ if (len) {
+ ret = kstrtoul(arg + len, 10, &param);
+ if (ret)
+ goto inval;
+@@ -843,15 +955,29 @@ static int parse_probe_vars(char *orig_arg, const struct fetch_type *t,
+ err = TP_ERR_BAD_ARG_NUM;
+ goto inval;
+ }
++ param--; /* argN starts from 1, but internal arg[N] starts from 0 */
+
+- code->op = FETCH_OP_ARG;
+- code->param = (unsigned int)param - 1;
+- /*
+- * The tracepoint probe will probe a stub function, and the
+- * first parameter of the stub is a dummy and should be ignored.
+- */
+- if (ctx->flags & TPARG_FL_TPOINT)
+- code->param++;
++ if (tparg_is_function_entry(ctx->flags)) {
++ code->op = FETCH_OP_ARG;
++ code->param = (unsigned int)param;
++ /*
++ * The tracepoint probe will probe a stub function, and the
++ * first parameter of the stub is a dummy and should be ignored.
++ */
++ if (ctx->flags & TPARG_FL_TPOINT)
++ code->param++;
++ } else if (tparg_is_function_return(ctx->flags)) {
++ /* function entry argument access from return probe */
++ ret = __store_entry_arg(ctx->tp, param);
++ if (ret < 0) /* This error should be an internal error */
++ return ret;
++
++ code->op = FETCH_OP_EDATA;
++ code->offset = ret;
++ } else {
++ err = TP_ERR_NOFENTRY_ARGS;
++ goto inval;
++ }
+ return 0;
+ }
+ #endif
+@@ -1041,7 +1167,8 @@ parse_probe_arg(char *arg, const struct fetch_type *type,
+ break;
+ default:
+ if (isalpha(arg[0]) || arg[0] == '_') { /* BTF variable */
+- if (!tparg_is_function_entry(ctx->flags)) {
++ if (!tparg_is_function_entry(ctx->flags) &&
++ !tparg_is_function_return(ctx->flags)) {
+ trace_probe_log_err(ctx->offset, NOSUP_BTFARG);
+ return -EINVAL;
+ }
+@@ -1383,9 +1510,7 @@ int traceprobe_parse_probe_arg(struct trace_probe *tp, int i, const char *arg,
+ struct probe_arg *parg = &tp->args[i];
+ const char *body;
+
+- /* Increment count for freeing args in error case */
+- tp->nr_args++;
+-
++ ctx->tp = tp;
+ body = strchr(arg, '=');
+ if (body) {
+ if (body - arg > MAX_ARG_NAME_LEN) {
+@@ -1442,7 +1567,8 @@ static int argv_has_var_arg(int argc, const char *argv[], int *args_idx,
+ if (str_has_prefix(argv[i], "$arg")) {
+ trace_probe_log_set_index(i + 2);
+
+- if (!tparg_is_function_entry(ctx->flags)) {
++ if (!tparg_is_function_entry(ctx->flags) &&
++ !tparg_is_function_return(ctx->flags)) {
+ trace_probe_log_err(0, NOFENTRY_ARGS);
+ return -EINVAL;
+ }
+@@ -1765,12 +1891,18 @@ void trace_probe_cleanup(struct trace_probe *tp)
+ for (i = 0; i < tp->nr_args; i++)
+ traceprobe_free_probe_arg(&tp->args[i]);
+
++ if (tp->entry_arg) {
++ kfree(tp->entry_arg->code);
++ kfree(tp->entry_arg);
++ tp->entry_arg = NULL;
++ }
++
+ if (tp->event)
+ trace_probe_unlink(tp);
+ }
+
+ int trace_probe_init(struct trace_probe *tp, const char *event,
+- const char *group, bool alloc_filter)
++ const char *group, bool alloc_filter, int nargs)
+ {
+ struct trace_event_call *call;
+ size_t size = sizeof(struct trace_probe_event);
+@@ -1806,6 +1938,11 @@ int trace_probe_init(struct trace_probe *tp, const char *event,
+ goto error;
+ }
+
++ tp->nr_args = nargs;
++ /* Make sure pointers in args[] are NULL */
++ if (nargs)
++ memset(tp->args, 0, sizeof(tp->args[0]) * nargs);
++
+ return 0;
+
+ error:
+diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
+index c1877d0182691c..cef3a50628a3e4 100644
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -92,6 +92,7 @@ enum fetch_op {
+ FETCH_OP_ARG, /* Function argument : .param */
+ FETCH_OP_FOFFS, /* File offset: .immediate */
+ FETCH_OP_DATA, /* Allocated data: .data */
++ FETCH_OP_EDATA, /* Entry data: .offset */
+ // Stage 2 (dereference) op
+ FETCH_OP_DEREF, /* Dereference: .offset */
+ FETCH_OP_UDEREF, /* User-space Dereference: .offset */
+@@ -102,6 +103,7 @@ enum fetch_op {
+ FETCH_OP_ST_STRING, /* String: .offset, .size */
+ FETCH_OP_ST_USTRING, /* User String: .offset, .size */
+ FETCH_OP_ST_SYMSTR, /* Kernel Symbol String: .offset, .size */
++ FETCH_OP_ST_EDATA, /* Store Entry Data: .offset */
+ // Stage 4 (modify) op
+ FETCH_OP_MOD_BF, /* Bitfield: .basesize, .lshift, .rshift */
+ // Stage 5 (loop) op
+@@ -232,6 +234,11 @@ struct probe_arg {
+ const struct fetch_type *type; /* Type of this argument */
+ };
+
++struct probe_entry_arg {
++ struct fetch_insn *code;
++ unsigned int size; /* The entry data size */
++};
++
+ struct trace_uprobe_filter {
+ rwlock_t rwlock;
+ int nr_systemwide;
+@@ -253,6 +260,7 @@ struct trace_probe {
+ struct trace_probe_event *event;
+ ssize_t size; /* trace entry size */
+ unsigned int nr_args;
++ struct probe_entry_arg *entry_arg; /* This is only for return probe */
+ struct probe_arg args[];
+ };
+
+@@ -338,7 +346,7 @@ static inline bool trace_probe_has_single_file(struct trace_probe *tp)
+ }
+
+ int trace_probe_init(struct trace_probe *tp, const char *event,
+- const char *group, bool alloc_filter);
++ const char *group, bool alloc_filter, int nargs);
+ void trace_probe_cleanup(struct trace_probe *tp);
+ int trace_probe_append(struct trace_probe *tp, struct trace_probe *to);
+ void trace_probe_unlink(struct trace_probe *tp);
+@@ -355,6 +363,18 @@ int trace_probe_create(const char *raw_command, int (*createfn)(int, const char
+ int trace_probe_print_args(struct trace_seq *s, struct probe_arg *args, int nr_args,
+ u8 *data, void *field);
+
++#ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
++int traceprobe_get_entry_data_size(struct trace_probe *tp);
++/* This is a runtime function to store entry data */
++void store_trace_entry_data(void *edata, struct trace_probe *tp, struct pt_regs *regs);
++#else /* !CONFIG_HAVE_FUNCTION_ARG_ACCESS_API */
++static inline int traceprobe_get_entry_data_size(struct trace_probe *tp)
++{
++ return 0;
++}
++#define store_trace_entry_data(edata, tp, regs) do { } while (0)
++#endif
++
+ #define trace_probe_for_each_link(pos, tp) \
+ list_for_each_entry(pos, &(tp)->event->files, list)
+ #define trace_probe_for_each_link_rcu(pos, tp) \
+@@ -381,6 +401,11 @@ static inline bool tparg_is_function_entry(unsigned int flags)
+ return (flags & TPARG_FL_LOC_MASK) == (TPARG_FL_KERNEL | TPARG_FL_FENTRY);
+ }
+
++static inline bool tparg_is_function_return(unsigned int flags)
++{
++ return (flags & TPARG_FL_LOC_MASK) == (TPARG_FL_KERNEL | TPARG_FL_RETURN);
++}
++
+ struct traceprobe_parse_context {
+ struct trace_event_call *event;
+ /* BTF related parameters */
+@@ -392,6 +417,7 @@ struct traceprobe_parse_context {
+ const struct btf_type *last_type; /* Saved type */
+ u32 last_bitoffs; /* Saved bitoffs */
+ u32 last_bitsize; /* Saved bitsize */
++ struct trace_probe *tp;
+ unsigned int flags;
+ int offset;
+ };
+@@ -506,7 +532,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
+ C(NO_BTFARG, "This variable is not found at this probe point"),\
+ C(NO_BTF_ENTRY, "No BTF entry for this probe point"), \
+ C(BAD_VAR_ARGS, "$arg* must be an independent parameter without name etc."),\
+- C(NOFENTRY_ARGS, "$arg* can be used only on function entry"), \
++ C(NOFENTRY_ARGS, "$arg* can be used only on function entry or exit"), \
+ C(DOUBLE_ARGS, "$arg* can be used only once in the parameters"), \
+ C(ARGS_2LONG, "$arg* failed because the argument list is too long"), \
+ C(ARGIDX_2BIG, "$argN index is too big"), \
+diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h
+index 3935b347f874bc..2caf0d2afb3228 100644
+--- a/kernel/trace/trace_probe_tmpl.h
++++ b/kernel/trace/trace_probe_tmpl.h
+@@ -54,7 +54,7 @@ fetch_apply_bitfield(struct fetch_insn *code, void *buf)
+ * If dest is NULL, don't store result and return required dynamic data size.
+ */
+ static int
+-process_fetch_insn(struct fetch_insn *code, void *rec,
++process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
+ void *dest, void *base);
+ static nokprobe_inline int fetch_store_strlen(unsigned long addr);
+ static nokprobe_inline int
+@@ -232,7 +232,7 @@ process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val,
+
+ /* Sum up total data length for dynamic arrays (strings) */
+ static nokprobe_inline int
+-__get_data_size(struct trace_probe *tp, struct pt_regs *regs)
++__get_data_size(struct trace_probe *tp, struct pt_regs *regs, void *edata)
+ {
+ struct probe_arg *arg;
+ int i, len, ret = 0;
+@@ -240,7 +240,7 @@ __get_data_size(struct trace_probe *tp, struct pt_regs *regs)
+ for (i = 0; i < tp->nr_args; i++) {
+ arg = tp->args + i;
+ if (unlikely(arg->dynamic)) {
+- len = process_fetch_insn(arg->code, regs, NULL, NULL);
++ len = process_fetch_insn(arg->code, regs, edata, NULL, NULL);
+ if (len > 0)
+ ret += len;
+ }
+@@ -251,7 +251,7 @@ __get_data_size(struct trace_probe *tp, struct pt_regs *regs)
+
+ /* Store the value of each argument */
+ static nokprobe_inline void
+-store_trace_args(void *data, struct trace_probe *tp, void *rec,
++store_trace_args(void *data, struct trace_probe *tp, void *rec, void *edata,
+ int header_size, int maxlen)
+ {
+ struct probe_arg *arg;
+@@ -266,7 +266,7 @@ store_trace_args(void *data, struct trace_probe *tp, void *rec,
+ /* Point the dynamic data area if needed */
+ if (unlikely(arg->dynamic))
+ *dl = make_data_loc(maxlen, dyndata - base);
+- ret = process_fetch_insn(arg->code, rec, dl, base);
++ ret = process_fetch_insn(arg->code, rec, edata, dl, base);
+ if (arg->dynamic && likely(ret > 0)) {
+ dyndata += ret;
+ maxlen -= ret;
+diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
+index 99c051de412afa..3e7d92d2650bcc 100644
+--- a/kernel/trace/trace_uprobe.c
++++ b/kernel/trace/trace_uprobe.c
+@@ -211,8 +211,8 @@ static unsigned long translate_user_vaddr(unsigned long file_offset)
+
+ /* Note that we don't verify it, since the code does not come from user space */
+ static int
+-process_fetch_insn(struct fetch_insn *code, void *rec, void *dest,
+- void *base)
++process_fetch_insn(struct fetch_insn *code, void *rec, void *edata,
++ void *dest, void *base)
+ {
+ struct pt_regs *regs = rec;
+ unsigned long val;
+@@ -337,7 +337,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
+ if (!tu)
+ return ERR_PTR(-ENOMEM);
+
+- ret = trace_probe_init(&tu->tp, event, group, true);
++ ret = trace_probe_init(&tu->tp, event, group, true, nargs);
+ if (ret < 0)
+ goto error;
+
+@@ -556,6 +556,8 @@ static int __trace_uprobe_create(int argc, const char **argv)
+
+ if (argc < 2)
+ return -ECANCELED;
++ if (argc - 2 > MAX_TRACE_ARGS)
++ return -E2BIG;
+
+ if (argv[0][1] == ':')
+ event = &argv[0][2];
+@@ -681,7 +683,7 @@ static int __trace_uprobe_create(int argc, const char **argv)
+ tu->filename = filename;
+
+ /* parse arguments */
+- for (i = 0; i < argc && i < MAX_TRACE_ARGS; i++) {
++ for (i = 0; i < argc; i++) {
+ struct traceprobe_parse_context ctx = {
+ .flags = (is_return ? TPARG_FL_RETURN : 0) | TPARG_FL_USER,
+ };
+@@ -854,9 +856,11 @@ static const struct file_operations uprobe_profile_ops = {
+ struct uprobe_cpu_buffer {
+ struct mutex mutex;
+ void *buf;
++ int dsize;
+ };
+ static struct uprobe_cpu_buffer __percpu *uprobe_cpu_buffer;
+ static int uprobe_buffer_refcnt;
++#define MAX_UCB_BUFFER_SIZE PAGE_SIZE
+
+ static int uprobe_buffer_init(void)
+ {
+@@ -940,12 +944,41 @@ static struct uprobe_cpu_buffer *uprobe_buffer_get(void)
+
+ static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb)
+ {
++ if (!ucb)
++ return;
+ mutex_unlock(&ucb->mutex);
+ }
+
++static struct uprobe_cpu_buffer *prepare_uprobe_buffer(struct trace_uprobe *tu,
++ struct pt_regs *regs,
++ struct uprobe_cpu_buffer **ucbp)
++{
++ struct uprobe_cpu_buffer *ucb;
++ int dsize, esize;
++
++ if (*ucbp)
++ return *ucbp;
++
++ esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
++ dsize = __get_data_size(&tu->tp, regs, NULL);
++
++ ucb = uprobe_buffer_get();
++ ucb->dsize = tu->tp.size + dsize;
++
++ if (WARN_ON_ONCE(ucb->dsize > MAX_UCB_BUFFER_SIZE)) {
++ ucb->dsize = MAX_UCB_BUFFER_SIZE;
++ dsize = MAX_UCB_BUFFER_SIZE - tu->tp.size;
++ }
++
++ store_trace_args(ucb->buf, &tu->tp, regs, NULL, esize, dsize);
++
++ *ucbp = ucb;
++ return ucb;
++}
++
+ static void __uprobe_trace_func(struct trace_uprobe *tu,
+ unsigned long func, struct pt_regs *regs,
+- struct uprobe_cpu_buffer *ucb, int dsize,
++ struct uprobe_cpu_buffer *ucb,
+ struct trace_event_file *trace_file)
+ {
+ struct uprobe_trace_entry_head *entry;
+@@ -956,14 +989,11 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
+
+ WARN_ON(call != trace_file->event_call);
+
+- if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE))
+- return;
+-
+ if (trace_trigger_soft_disabled(trace_file))
+ return;
+
+ esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
+- size = esize + tu->tp.size + dsize;
++ size = esize + ucb->dsize;
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
+ if (!entry)
+ return;
+@@ -977,23 +1007,26 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
+ data = DATAOF_TRACE_ENTRY(entry, false);
+ }
+
+- memcpy(data, ucb->buf, tu->tp.size + dsize);
++ memcpy(data, ucb->buf, ucb->dsize);
+
+ trace_event_buffer_commit(&fbuffer);
+ }
+
+ /* uprobe handler */
+ static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
+- struct uprobe_cpu_buffer *ucb, int dsize)
++ struct uprobe_cpu_buffer **ucbp)
+ {
+ struct event_file_link *link;
++ struct uprobe_cpu_buffer *ucb;
+
+ if (is_ret_probe(tu))
+ return 0;
+
++ ucb = prepare_uprobe_buffer(tu, regs, ucbp);
++
+ rcu_read_lock();
+ trace_probe_for_each_link_rcu(link, &tu->tp)
+- __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file);
++ __uprobe_trace_func(tu, 0, regs, ucb, link->file);
+ rcu_read_unlock();
+
+ return 0;
+@@ -1001,13 +1034,16 @@ static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs,
+
+ static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func,
+ struct pt_regs *regs,
+- struct uprobe_cpu_buffer *ucb, int dsize)
++ struct uprobe_cpu_buffer **ucbp)
+ {
+ struct event_file_link *link;
++ struct uprobe_cpu_buffer *ucb;
++
++ ucb = prepare_uprobe_buffer(tu, regs, ucbp);
+
+ rcu_read_lock();
+ trace_probe_for_each_link_rcu(link, &tu->tp)
+- __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file);
++ __uprobe_trace_func(tu, func, regs, ucb, link->file);
+ rcu_read_unlock();
+ }
+
+@@ -1335,10 +1371,11 @@ static bool uprobe_perf_filter(struct uprobe_consumer *uc,
+
+ static void __uprobe_perf_func(struct trace_uprobe *tu,
+ unsigned long func, struct pt_regs *regs,
+- struct uprobe_cpu_buffer *ucb, int dsize)
++ struct uprobe_cpu_buffer **ucbp)
+ {
+ struct trace_event_call *call = trace_probe_event_call(&tu->tp);
+ struct uprobe_trace_entry_head *entry;
++ struct uprobe_cpu_buffer *ucb;
+ struct hlist_head *head;
+ void *data;
+ int size, esize;
+@@ -1356,7 +1393,8 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
+
+ esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
+
+- size = esize + tu->tp.size + dsize;
++ ucb = prepare_uprobe_buffer(tu, regs, ucbp);
++ size = esize + ucb->dsize;
+ size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32);
+ if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough"))
+ return;
+@@ -1379,13 +1417,10 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
+ data = DATAOF_TRACE_ENTRY(entry, false);
+ }
+
+- memcpy(data, ucb->buf, tu->tp.size + dsize);
+-
+- if (size - esize > tu->tp.size + dsize) {
+- int len = tu->tp.size + dsize;
++ memcpy(data, ucb->buf, ucb->dsize);
+
+- memset(data + len, 0, size - esize - len);
+- }
++ if (size - esize > ucb->dsize)
++ memset(data + ucb->dsize, 0, size - esize - ucb->dsize);
+
+ perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
+ head, NULL);
+@@ -1395,21 +1430,21 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
+
+ /* uprobe profile handler */
+ static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs,
+- struct uprobe_cpu_buffer *ucb, int dsize)
++ struct uprobe_cpu_buffer **ucbp)
+ {
+ if (!uprobe_perf_filter(&tu->consumer, 0, current->mm))
+ return UPROBE_HANDLER_REMOVE;
+
+ if (!is_ret_probe(tu))
+- __uprobe_perf_func(tu, 0, regs, ucb, dsize);
++ __uprobe_perf_func(tu, 0, regs, ucbp);
+ return 0;
+ }
+
+ static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func,
+ struct pt_regs *regs,
+- struct uprobe_cpu_buffer *ucb, int dsize)
++ struct uprobe_cpu_buffer **ucbp)
+ {
+- __uprobe_perf_func(tu, func, regs, ucb, dsize);
++ __uprobe_perf_func(tu, func, regs, ucbp);
+ }
+
+ int bpf_get_uprobe_info(const struct perf_event *event, u32 *fd_type,
+@@ -1474,11 +1509,9 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
+ {
+ struct trace_uprobe *tu;
+ struct uprobe_dispatch_data udd;
+- struct uprobe_cpu_buffer *ucb;
+- int dsize, esize;
++ struct uprobe_cpu_buffer *ucb = NULL;
+ int ret = 0;
+
+-
+ tu = container_of(con, struct trace_uprobe, consumer);
+ tu->nhit++;
+
+@@ -1490,18 +1523,12 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
+ if (WARN_ON_ONCE(!uprobe_cpu_buffer))
+ return 0;
+
+- dsize = __get_data_size(&tu->tp, regs);
+- esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
+-
+- ucb = uprobe_buffer_get();
+- store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
+-
+ if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
+- ret |= uprobe_trace_func(tu, regs, ucb, dsize);
++ ret |= uprobe_trace_func(tu, regs, &ucb);
+
+ #ifdef CONFIG_PERF_EVENTS
+ if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
+- ret |= uprobe_perf_func(tu, regs, ucb, dsize);
++ ret |= uprobe_perf_func(tu, regs, &ucb);
+ #endif
+ uprobe_buffer_put(ucb);
+ return ret;
+@@ -1512,8 +1539,7 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con,
+ {
+ struct trace_uprobe *tu;
+ struct uprobe_dispatch_data udd;
+- struct uprobe_cpu_buffer *ucb;
+- int dsize, esize;
++ struct uprobe_cpu_buffer *ucb = NULL;
+
+ tu = container_of(con, struct trace_uprobe, consumer);
+
+@@ -1525,18 +1551,12 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con,
+ if (WARN_ON_ONCE(!uprobe_cpu_buffer))
+ return 0;
+
+- dsize = __get_data_size(&tu->tp, regs);
+- esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
+-
+- ucb = uprobe_buffer_get();
+- store_trace_args(ucb->buf, &tu->tp, regs, esize, dsize);
+-
+ if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE))
+- uretprobe_trace_func(tu, func, regs, ucb, dsize);
++ uretprobe_trace_func(tu, func, regs, &ucb);
+
+ #ifdef CONFIG_PERF_EVENTS
+ if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE))
+- uretprobe_perf_func(tu, func, regs, ucb, dsize);
++ uretprobe_perf_func(tu, func, regs, &ucb);
+ #endif
+ uprobe_buffer_put(ucb);
+ return 0;
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index da5513cfc12588..f94c3e957b8298 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -2999,7 +2999,7 @@ config RUST_BUILD_ASSERT_ALLOW
+ bool "Allow unoptimized build-time assertions"
+ depends on RUST
+ help
+- Controls how are `build_error!` and `build_assert!` handled during build.
++ Controls how `build_error!` and `build_assert!` are handled during the build.
+
+ If calls to them exist in the binary, it may indicate a violated invariant
+ or that the optimizer failed to verify the invariant during compilation.
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index 88433cc25d8a5a..a87cfe1d4b7beb 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -887,20 +887,6 @@ static int hpage_collapse_find_target_node(struct collapse_control *cc)
+ }
+ #endif
+
+-static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
+- nodemask_t *nmask)
+-{
+- *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
+- if (unlikely(!*hpage)) {
+- count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
+- return false;
+- }
+-
+- folio_prep_large_rmappable((struct folio *)*hpage);
+- count_vm_event(THP_COLLAPSE_ALLOC);
+- return true;
+-}
+-
+ /*
+ * If mmap_lock temporarily dropped, revalidate vma
+ * before taking mmap_lock.
+@@ -1055,7 +1041,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm,
+ return result;
+ }
+
+-static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
++static int alloc_charge_folio(struct folio **foliop, struct mm_struct *mm,
+ struct collapse_control *cc)
+ {
+ gfp_t gfp = (cc->is_khugepaged ? alloc_hugepage_khugepaged_gfpmask() :
+@@ -1063,17 +1049,23 @@ static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
+ int node = hpage_collapse_find_target_node(cc);
+ struct folio *folio;
+
+- if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
++ folio = __folio_alloc(gfp, HPAGE_PMD_ORDER, node, &cc->alloc_nmask);
++ if (!folio) {
++ *foliop = NULL;
++ count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
+ return SCAN_ALLOC_HUGE_PAGE_FAIL;
++ }
+
+- folio = page_folio(*hpage);
++ count_vm_event(THP_COLLAPSE_ALLOC);
+ if (unlikely(mem_cgroup_charge(folio, mm, gfp))) {
+ folio_put(folio);
+- *hpage = NULL;
++ *foliop = NULL;
+ return SCAN_CGROUP_CHARGE_FAIL;
+ }
+- count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
+
++ count_memcg_folio_events(folio, THP_COLLAPSE_ALLOC, 1);
++
++ *foliop = folio;
+ return SCAN_SUCCEED;
+ }
+
+@@ -1085,6 +1077,7 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
+ pmd_t *pmd, _pmd;
+ pte_t *pte;
+ pgtable_t pgtable;
++ struct folio *folio;
+ struct page *hpage;
+ spinlock_t *pmd_ptl, *pte_ptl;
+ int result = SCAN_FAIL;
+@@ -1101,7 +1094,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
+ */
+ mmap_read_unlock(mm);
+
+- result = alloc_charge_hpage(&hpage, mm, cc);
++ result = alloc_charge_folio(&folio, mm, cc);
++ hpage = &folio->page;
+ if (result != SCAN_SUCCEED)
+ goto out_nolock;
+
+@@ -1205,12 +1199,11 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
+ goto out_up_write;
+
+ /*
+- * spin_lock() below is not the equivalent of smp_wmb(), but
+- * the smp_wmb() inside __SetPageUptodate() can be reused to
+- * avoid the copy_huge_page writes to become visible after
+- * the set_pmd_at() write.
++ * The smp_wmb() inside __folio_mark_uptodate() ensures the
++ * copy_huge_page writes become visible before the set_pmd_at()
++ * write.
+ */
+- __SetPageUptodate(hpage);
++ __folio_mark_uptodate(folio);
+ pgtable = pmd_pgtable(_pmd);
+
+ _pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
+@@ -1218,8 +1211,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
+
+ spin_lock(pmd_ptl);
+ BUG_ON(!pmd_none(*pmd));
+- page_add_new_anon_rmap(hpage, vma, address);
+- lru_cache_add_inactive_or_unevictable(hpage, vma);
++ folio_add_new_anon_rmap(folio, vma, address);
++ folio_add_lru_vma(folio, vma);
+ pgtable_trans_huge_deposit(mm, pmd, pgtable);
+ set_pmd_at(mm, address, pmd, _pmd);
+ update_mmu_cache_pmd(vma, address, pmd);
+@@ -1790,29 +1783,27 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
+ struct collapse_control *cc)
+ {
+ struct address_space *mapping = file->f_mapping;
+- struct page *hpage;
+ struct page *page;
+- struct page *tmp;
+- struct folio *folio;
++ struct page *tmp, *dst;
++ struct folio *folio, *new_folio;
+ pgoff_t index = 0, end = start + HPAGE_PMD_NR;
+ LIST_HEAD(pagelist);
+ XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
+ int nr_none = 0, result = SCAN_SUCCEED;
+ bool is_shmem = shmem_file(file);
+- int nr = 0;
+
+ VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
+ VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
+
+- result = alloc_charge_hpage(&hpage, mm, cc);
++ result = alloc_charge_folio(&new_folio, mm, cc);
+ if (result != SCAN_SUCCEED)
+ goto out;
+
+- __SetPageLocked(hpage);
++ __folio_set_locked(new_folio);
+ if (is_shmem)
+- __SetPageSwapBacked(hpage);
+- hpage->index = start;
+- hpage->mapping = mapping;
++ __folio_set_swapbacked(new_folio);
++ new_folio->index = start;
++ new_folio->mapping = mapping;
+
+ /*
+ * Ensure we have slots for all the pages in the range. This is
+@@ -2045,20 +2036,24 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
+ * The old pages are locked, so they won't change anymore.
+ */
+ index = start;
++ dst = folio_page(new_folio, 0);
+ list_for_each_entry(page, &pagelist, lru) {
+ while (index < page->index) {
+- clear_highpage(hpage + (index % HPAGE_PMD_NR));
++ clear_highpage(dst);
+ index++;
++ dst++;
+ }
+- if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) {
++ if (copy_mc_highpage(dst, page) > 0) {
+ result = SCAN_COPY_MC;
+ goto rollback;
+ }
+ index++;
++ dst++;
+ }
+ while (index < end) {
+- clear_highpage(hpage + (index % HPAGE_PMD_NR));
++ clear_highpage(dst);
+ index++;
++ dst++;
+ }
+
+ if (nr_none) {
+@@ -2086,16 +2081,17 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
+ }
+
+ /*
+- * If userspace observed a missing page in a VMA with a MODE_MISSING
+- * userfaultfd, then it might expect a UFFD_EVENT_PAGEFAULT for that
+- * page. If so, we need to roll back to avoid suppressing such an
+- * event. Since wp/minor userfaultfds don't give userspace any
+- * guarantees that the kernel doesn't fill a missing page with a zero
+- * page, so they don't matter here.
++ * If userspace observed a missing page in a VMA with
++ * a MODE_MISSING userfaultfd, then it might expect a
++ * UFFD_EVENT_PAGEFAULT for that page. If so, we need to
++ * roll back to avoid suppressing such an event. Since
++ * wp/minor userfaultfds don't give userspace any
++ * guarantees that the kernel doesn't fill a missing
++ * page with a zero page, so they don't matter here.
+ *
+- * Any userfaultfds registered after this point will not be able to
+- * observe any missing pages due to the previously inserted retry
+- * entries.
++ * Any userfaultfds registered after this point will
++ * not be able to observe any missing pages due to the
++ * previously inserted retry entries.
+ */
+ vma_interval_tree_foreach(vma, &mapping->i_mmap, start, end) {
+ if (userfaultfd_missing(vma)) {
+@@ -2120,33 +2116,32 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
+ xas_lock_irq(&xas);
+ }
+
+- nr = thp_nr_pages(hpage);
+ if (is_shmem)
+- __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
++ __lruvec_stat_mod_folio(new_folio, NR_SHMEM_THPS, HPAGE_PMD_NR);
+ else
+- __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
++ __lruvec_stat_mod_folio(new_folio, NR_FILE_THPS, HPAGE_PMD_NR);
+
+ if (nr_none) {
+- __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
++ __lruvec_stat_mod_folio(new_folio, NR_FILE_PAGES, nr_none);
+ /* nr_none is always 0 for non-shmem. */
+- __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
++ __lruvec_stat_mod_folio(new_folio, NR_SHMEM, nr_none);
+ }
+
+ /*
+- * Mark hpage as uptodate before inserting it into the page cache so
+- * that it isn't mistaken for an fallocated but unwritten page.
++ * Mark new_folio as uptodate before inserting it into the
++ * page cache so that it isn't mistaken for an fallocated but
++ * unwritten page.
+ */
+- folio = page_folio(hpage);
+- folio_mark_uptodate(folio);
+- folio_ref_add(folio, HPAGE_PMD_NR - 1);
++ folio_mark_uptodate(new_folio);
++ folio_ref_add(new_folio, HPAGE_PMD_NR - 1);
+
+ if (is_shmem)
+- folio_mark_dirty(folio);
+- folio_add_lru(folio);
++ folio_mark_dirty(new_folio);
++ folio_add_lru(new_folio);
+
+ /* Join all the small entries into a single multi-index entry. */
+ xas_set_order(&xas, start, HPAGE_PMD_ORDER);
+- xas_store(&xas, hpage);
++ xas_store(&xas, new_folio);
+ WARN_ON_ONCE(xas_error(&xas));
+ xas_unlock_irq(&xas);
+
+@@ -2157,7 +2152,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
+ retract_page_tables(mapping, start);
+ if (cc && !cc->is_khugepaged)
+ result = SCAN_PTE_MAPPED_HUGEPAGE;
+- unlock_page(hpage);
++ folio_unlock(new_folio);
+
+ /*
+ * The collapse has succeeded, so free the old pages.
+@@ -2202,13 +2197,13 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
+ smp_mb();
+ }
+
+- hpage->mapping = NULL;
++ new_folio->mapping = NULL;
+
+- unlock_page(hpage);
+- put_page(hpage);
++ folio_unlock(new_folio);
++ folio_put(new_folio);
+ out:
+ VM_BUG_ON(!list_empty(&pagelist));
+- trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);
++ trace_mm_khugepaged_collapse_file(mm, new_folio, index, addr, is_shmem, file, HPAGE_PMD_NR, result);
+ return result;
+ }
+
+diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
+index e39fba5565c5d4..0b4d0a8bd36141 100644
+--- a/net/bluetooth/af_bluetooth.c
++++ b/net/bluetooth/af_bluetooth.c
+@@ -185,6 +185,28 @@ void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk)
+ }
+ EXPORT_SYMBOL(bt_sock_unlink);
+
++bool bt_sock_linked(struct bt_sock_list *l, struct sock *s)
++{
++ struct sock *sk;
++
++ if (!l || !s)
++ return false;
++
++ read_lock(&l->lock);
++
++ sk_for_each(sk, &l->head) {
++ if (s == sk) {
++ read_unlock(&l->lock);
++ return true;
++ }
++ }
++
++ read_unlock(&l->lock);
++
++ return false;
++}
++EXPORT_SYMBOL(bt_sock_linked);
++
+ void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
+ {
+ const struct cred *old_cred;
+diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
+index a660c428e2207c..38f542665f1962 100644
+--- a/net/bluetooth/bnep/core.c
++++ b/net/bluetooth/bnep/core.c
+@@ -745,8 +745,7 @@ static int __init bnep_init(void)
+ if (flt[0])
+ BT_INFO("BNEP filters: %s", flt);
+
+- bnep_sock_init();
+- return 0;
++ return bnep_sock_init();
+ }
+
+ static void __exit bnep_exit(void)
+diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c
+index 9b365fb44fac6d..c2c80d6000836b 100644
+--- a/net/bluetooth/iso.c
++++ b/net/bluetooth/iso.c
+@@ -90,6 +90,16 @@ static struct sock *iso_get_sock_listen(bdaddr_t *src, bdaddr_t *dst,
+ #define ISO_CONN_TIMEOUT (HZ * 40)
+ #define ISO_DISCONN_TIMEOUT (HZ * 2)
+
++static struct sock *iso_sock_hold(struct iso_conn *conn)
++{
++ if (!conn || !bt_sock_linked(&iso_sk_list, conn->sk))
++ return NULL;
++
++ sock_hold(conn->sk);
++
++ return conn->sk;
++}
++
+ static void iso_sock_timeout(struct work_struct *work)
+ {
+ struct iso_conn *conn = container_of(work, struct iso_conn,
+@@ -97,9 +107,7 @@ static void iso_sock_timeout(struct work_struct *work)
+ struct sock *sk;
+
+ iso_conn_lock(conn);
+- sk = conn->sk;
+- if (sk)
+- sock_hold(sk);
++ sk = iso_sock_hold(conn);
+ iso_conn_unlock(conn);
+
+ if (!sk)
+@@ -217,9 +225,7 @@ static void iso_conn_del(struct hci_conn *hcon, int err)
+
+ /* Kill socket */
+ iso_conn_lock(conn);
+- sk = conn->sk;
+- if (sk)
+- sock_hold(sk);
++ sk = iso_sock_hold(conn);
+ iso_conn_unlock(conn);
+
+ if (sk) {
+diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
+index 3c3650902c8396..fb368540139a18 100644
+--- a/net/bluetooth/sco.c
++++ b/net/bluetooth/sco.c
+@@ -76,6 +76,16 @@ struct sco_pinfo {
+ #define SCO_CONN_TIMEOUT (HZ * 40)
+ #define SCO_DISCONN_TIMEOUT (HZ * 2)
+
++static struct sock *sco_sock_hold(struct sco_conn *conn)
++{
++ if (!conn || !bt_sock_linked(&sco_sk_list, conn->sk))
++ return NULL;
++
++ sock_hold(conn->sk);
++
++ return conn->sk;
++}
++
+ static void sco_sock_timeout(struct work_struct *work)
+ {
+ struct sco_conn *conn = container_of(work, struct sco_conn,
+@@ -87,9 +97,7 @@ static void sco_sock_timeout(struct work_struct *work)
+ sco_conn_unlock(conn);
+ return;
+ }
+- sk = conn->sk;
+- if (sk)
+- sock_hold(sk);
++ sk = sco_sock_hold(conn);
+ sco_conn_unlock(conn);
+
+ if (!sk)
+@@ -194,9 +202,7 @@ static void sco_conn_del(struct hci_conn *hcon, int err)
+
+ /* Kill socket */
+ sco_conn_lock(conn);
+- sk = conn->sk;
+- if (sk)
+- sock_hold(sk);
++ sk = sco_sock_hold(conn);
+ sco_conn_unlock(conn);
+
+ if (sk) {
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 8bfd46a070c167..a2467a7c01f9ed 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2423,9 +2423,9 @@ static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev,
+
+ /* Internal, non-exposed redirect flags. */
+ enum {
+- BPF_F_NEIGH = (1ULL << 1),
+- BPF_F_PEER = (1ULL << 2),
+- BPF_F_NEXTHOP = (1ULL << 3),
++ BPF_F_NEIGH = (1ULL << 16),
++ BPF_F_PEER = (1ULL << 17),
++ BPF_F_NEXTHOP = (1ULL << 18),
+ #define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP)
+ };
+
+@@ -2435,6 +2435,8 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
+ struct sk_buff *clone;
+ int ret;
+
++ BUILD_BUG_ON(BPF_F_REDIRECT_INTERNAL & BPF_F_REDIRECT_FLAGS);
++
+ if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
+ return -EINVAL;
+
+@@ -6221,24 +6223,16 @@ BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
+ {
+ int ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
+ struct net_device *dev = skb->dev;
+- int skb_len, dev_len;
+- int mtu = 0;
+-
+- if (unlikely(flags & ~(BPF_MTU_CHK_SEGS))) {
+- ret = -EINVAL;
+- goto out;
+- }
++ int mtu, dev_len, skb_len;
+
+- if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len))) {
+- ret = -EINVAL;
+- goto out;
+- }
++ if (unlikely(flags & ~(BPF_MTU_CHK_SEGS)))
++ return -EINVAL;
++ if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len)))
++ return -EINVAL;
+
+ dev = __dev_via_ifindex(dev, ifindex);
+- if (unlikely(!dev)) {
+- ret = -ENODEV;
+- goto out;
+- }
++ if (unlikely(!dev))
++ return -ENODEV;
+
+ mtu = READ_ONCE(dev->mtu);
+ dev_len = mtu + dev->hard_header_len;
+@@ -6273,19 +6267,15 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
+ struct net_device *dev = xdp->rxq->dev;
+ int xdp_len = xdp->data_end - xdp->data;
+ int ret = BPF_MTU_CHK_RET_SUCCESS;
+- int mtu = 0, dev_len;
++ int mtu, dev_len;
+
+ /* XDP variant doesn't support multi-buffer segment check (yet) */
+- if (unlikely(flags)) {
+- ret = -EINVAL;
+- goto out;
+- }
++ if (unlikely(flags))
++ return -EINVAL;
+
+ dev = __dev_via_ifindex(dev, ifindex);
+- if (unlikely(!dev)) {
+- ret = -ENODEV;
+- goto out;
+- }
++ if (unlikely(!dev))
++ return -ENODEV;
+
+ mtu = READ_ONCE(dev->mtu);
+ dev_len = mtu + dev->hard_header_len;
+@@ -6297,7 +6287,7 @@ BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
+ xdp_len += len_diff; /* minus result pass check */
+ if (xdp_len > dev_len)
+ ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
+-out:
++
+ *mtu_len = mtu;
+ return ret;
+ }
+@@ -6308,7 +6298,7 @@ static const struct bpf_func_proto bpf_skb_check_mtu_proto = {
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+- .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
++ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_WRITE | MEM_ALIGNED,
+ .arg3_size = sizeof(u32),
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+@@ -6320,7 +6310,7 @@ static const struct bpf_func_proto bpf_xdp_check_mtu_proto = {
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+- .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_UNINIT | MEM_ALIGNED,
++ .arg3_type = ARG_PTR_TO_FIXED_SIZE_MEM | MEM_WRITE | MEM_ALIGNED,
+ .arg3_size = sizeof(u32),
+ .arg4_type = ARG_ANYTHING,
+ .arg5_type = ARG_ANYTHING,
+diff --git a/net/core/sock_map.c b/net/core/sock_map.c
+index 2afac40bb83ca1..2da881a8e7983a 100644
+--- a/net/core/sock_map.c
++++ b/net/core/sock_map.c
+@@ -644,6 +644,8 @@ BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
+ sk = __sock_map_lookup_elem(map, key);
+ if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
+ return SK_DROP;
++ if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk))
++ return SK_DROP;
+
+ skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
+ return SK_PASS;
+@@ -672,6 +674,8 @@ BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg *, msg,
+ return SK_DROP;
+ if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
+ return SK_DROP;
++ if (sk_is_vsock(sk))
++ return SK_DROP;
+
+ msg->flags = flags;
+ msg->sk_redir = sk;
+@@ -1246,6 +1250,8 @@ BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
+ sk = __sock_hash_lookup_elem(map, key);
+ if (unlikely(!sk || !sock_map_redirect_allowed(sk)))
+ return SK_DROP;
++ if ((flags & BPF_F_INGRESS) && sk_is_vsock(sk))
++ return SK_DROP;
+
+ skb_bpf_set_redir(skb, sk, flags & BPF_F_INGRESS);
+ return SK_PASS;
+@@ -1274,6 +1280,8 @@ BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg *, msg,
+ return SK_DROP;
+ if (!(flags & BPF_F_INGRESS) && !sk_is_tcp(sk))
+ return SK_DROP;
++ if (sk_is_vsock(sk))
++ return SK_DROP;
+
+ msg->flags = flags;
+ msg->sk_redir = sk;
+diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
+index cb0c80328eebf3..4822f68edbf08b 100644
+--- a/net/ipv4/devinet.c
++++ b/net/ipv4/devinet.c
+@@ -283,17 +283,19 @@ static struct in_device *inetdev_init(struct net_device *dev)
+ /* Account for reference dev->ip_ptr (below) */
+ refcount_set(&in_dev->refcnt, 1);
+
+- err = devinet_sysctl_register(in_dev);
+- if (err) {
+- in_dev->dead = 1;
+- neigh_parms_release(&arp_tbl, in_dev->arp_parms);
+- in_dev_put(in_dev);
+- in_dev = NULL;
+- goto out;
++ if (dev != blackhole_netdev) {
++ err = devinet_sysctl_register(in_dev);
++ if (err) {
++ in_dev->dead = 1;
++ neigh_parms_release(&arp_tbl, in_dev->arp_parms);
++ in_dev_put(in_dev);
++ in_dev = NULL;
++ goto out;
++ }
++ ip_mc_init_dev(in_dev);
++ if (dev->flags & IFF_UP)
++ ip_mc_up(in_dev);
+ }
+- ip_mc_init_dev(in_dev);
+- if (dev->flags & IFF_UP)
+- ip_mc_up(in_dev);
+
+ /* we can receive as soon as ip_ptr is set -- do this last */
+ rcu_assign_pointer(dev->ip_ptr, in_dev);
+@@ -332,6 +334,19 @@ static void inetdev_destroy(struct in_device *in_dev)
+ in_dev_put(in_dev);
+ }
+
++static int __init inet_blackhole_dev_init(void)
++{
++ int err = 0;
++
++ rtnl_lock();
++ if (!inetdev_init(blackhole_netdev))
++ err = -ENOMEM;
++ rtnl_unlock();
++
++ return err;
++}
++late_initcall(inet_blackhole_dev_init);
++
+ int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
+ {
+ const struct in_ifaddr *ifa;
+diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
+index 8720f3840b6985..ca8cc0988b618c 100644
+--- a/net/ipv4/inet_connection_sock.c
++++ b/net/ipv4/inet_connection_sock.c
+@@ -980,21 +980,31 @@ static bool reqsk_queue_unlink(struct request_sock *req)
+ found = __sk_nulls_del_node_init_rcu(sk);
+ spin_unlock(lock);
+ }
+- if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
+- reqsk_put(req);
++
+ return found;
+ }
+
+-bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
++static bool __inet_csk_reqsk_queue_drop(struct sock *sk,
++ struct request_sock *req,
++ bool from_timer)
+ {
+ bool unlinked = reqsk_queue_unlink(req);
+
++ if (!from_timer && timer_delete_sync(&req->rsk_timer))
++ reqsk_put(req);
++
+ if (unlinked) {
+ reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
+ reqsk_put(req);
+ }
++
+ return unlinked;
+ }
++
++bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
++{
++ return __inet_csk_reqsk_queue_drop(sk, req, false);
++}
+ EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
+
+ void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
+@@ -1087,7 +1097,7 @@ static void reqsk_timer_handler(struct timer_list *t)
+
+ if (!inet_ehash_insert(req_to_sk(nreq), req_to_sk(oreq), NULL)) {
+ /* delete timer */
+- inet_csk_reqsk_queue_drop(sk_listener, nreq);
++ __inet_csk_reqsk_queue_drop(sk_listener, nreq, true);
+ goto no_ownership;
+ }
+
+@@ -1113,7 +1123,8 @@ static void reqsk_timer_handler(struct timer_list *t)
+ }
+
+ drop:
+- inet_csk_reqsk_queue_drop_and_put(oreq->rsk_listener, oreq);
++ __inet_csk_reqsk_queue_drop(sk_listener, oreq, true);
++ reqsk_put(req);
+ }
+
+ static bool reqsk_queue_hash_req(struct request_sock *req,
+diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
+index c33bca2c384154..63bbc7f6a5a883 100644
+--- a/net/ipv4/xfrm4_policy.c
++++ b/net/ipv4/xfrm4_policy.c
+@@ -17,47 +17,43 @@
+ #include <net/ip.h>
+ #include <net/l3mdev.h>
+
+-static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
+- int tos, int oif,
+- const xfrm_address_t *saddr,
+- const xfrm_address_t *daddr,
+- u32 mark)
++static struct dst_entry *__xfrm4_dst_lookup(struct flowi4 *fl4,
++ const struct xfrm_dst_lookup_params *params)
+ {
+ struct rtable *rt;
+
+ memset(fl4, 0, sizeof(*fl4));
+- fl4->daddr = daddr->a4;
+- fl4->flowi4_tos = tos;
+- fl4->flowi4_l3mdev = l3mdev_master_ifindex_by_index(net, oif);
+- fl4->flowi4_mark = mark;
+- if (saddr)
+- fl4->saddr = saddr->a4;
+-
+- rt = __ip_route_output_key(net, fl4);
++ fl4->daddr = params->daddr->a4;
++ fl4->flowi4_tos = params->tos;
++ fl4->flowi4_l3mdev = l3mdev_master_ifindex_by_index(params->net,
++ params->oif);
++ fl4->flowi4_mark = params->mark;
++ if (params->saddr)
++ fl4->saddr = params->saddr->a4;
++ fl4->flowi4_proto = params->ipproto;
++ fl4->uli = params->uli;
++
++ rt = __ip_route_output_key(params->net, fl4);
+ if (!IS_ERR(rt))
+ return &rt->dst;
+
+ return ERR_CAST(rt);
+ }
+
+-static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif,
+- const xfrm_address_t *saddr,
+- const xfrm_address_t *daddr,
+- u32 mark)
++static struct dst_entry *xfrm4_dst_lookup(const struct xfrm_dst_lookup_params *params)
+ {
+ struct flowi4 fl4;
+
+- return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr, mark);
++ return __xfrm4_dst_lookup(&fl4, params);
+ }
+
+-static int xfrm4_get_saddr(struct net *net, int oif,
+- xfrm_address_t *saddr, xfrm_address_t *daddr,
+- u32 mark)
++static int xfrm4_get_saddr(xfrm_address_t *saddr,
++ const struct xfrm_dst_lookup_params *params)
+ {
+ struct dst_entry *dst;
+ struct flowi4 fl4;
+
+- dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr, mark);
++ dst = __xfrm4_dst_lookup(&fl4, params);
+ if (IS_ERR(dst))
+ return -EHOSTUNREACH;
+
+diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
+index 444b0b4469a49e..4cd625af91e6ce 100644
+--- a/net/ipv6/xfrm6_policy.c
++++ b/net/ipv6/xfrm6_policy.c
+@@ -23,23 +23,24 @@
+ #include <net/ip6_route.h>
+ #include <net/l3mdev.h>
+
+-static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
+- const xfrm_address_t *saddr,
+- const xfrm_address_t *daddr,
+- u32 mark)
++static struct dst_entry *xfrm6_dst_lookup(const struct xfrm_dst_lookup_params *params)
+ {
+ struct flowi6 fl6;
+ struct dst_entry *dst;
+ int err;
+
+ memset(&fl6, 0, sizeof(fl6));
+- fl6.flowi6_l3mdev = l3mdev_master_ifindex_by_index(net, oif);
+- fl6.flowi6_mark = mark;
+- memcpy(&fl6.daddr, daddr, sizeof(fl6.daddr));
+- if (saddr)
+- memcpy(&fl6.saddr, saddr, sizeof(fl6.saddr));
++ fl6.flowi6_l3mdev = l3mdev_master_ifindex_by_index(params->net,
++ params->oif);
++ fl6.flowi6_mark = params->mark;
++ memcpy(&fl6.daddr, params->daddr, sizeof(fl6.daddr));
++ if (params->saddr)
++ memcpy(&fl6.saddr, params->saddr, sizeof(fl6.saddr));
+
+- dst = ip6_route_output(net, NULL, &fl6);
++ fl6.flowi4_proto = params->ipproto;
++ fl6.uli = params->uli;
++
++ dst = ip6_route_output(params->net, NULL, &fl6);
+
+ err = dst->error;
+ if (dst->error) {
+@@ -50,15 +51,14 @@ static struct dst_entry *xfrm6_dst_lookup(struct net *net, int tos, int oif,
+ return dst;
+ }
+
+-static int xfrm6_get_saddr(struct net *net, int oif,
+- xfrm_address_t *saddr, xfrm_address_t *daddr,
+- u32 mark)
++static int xfrm6_get_saddr(xfrm_address_t *saddr,
++ const struct xfrm_dst_lookup_params *params)
+ {
+ struct dst_entry *dst;
+ struct net_device *dev;
+ struct inet6_dev *idev;
+
+- dst = xfrm6_dst_lookup(net, 0, oif, NULL, daddr, mark);
++ dst = xfrm6_dst_lookup(params);
+ if (IS_ERR(dst))
+ return -EHOSTUNREACH;
+
+@@ -68,7 +68,8 @@ static int xfrm6_get_saddr(struct net *net, int oif,
+ return -EHOSTUNREACH;
+ }
+ dev = idev->dev;
+- ipv6_dev_get_saddr(dev_net(dev), dev, &daddr->in6, 0, &saddr->in6);
++ ipv6_dev_get_saddr(dev_net(dev), dev, &params->daddr->in6, 0,
++ &saddr->in6);
+ dst_release(dst);
+ return 0;
+ }
+diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
+index a901fd14fe3bfe..e27e00cb16c6b1 100644
+--- a/net/l2tp/l2tp_netlink.c
++++ b/net/l2tp/l2tp_netlink.c
+@@ -115,7 +115,7 @@ static int l2tp_tunnel_notify(struct genl_family *family,
+ NLM_F_ACK, tunnel, cmd);
+
+ if (ret >= 0) {
+- ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
++ ret = genlmsg_multicast_allns(family, msg, 0, 0);
+ /* We don't care if no one is listening */
+ if (ret == -ESRCH)
+ ret = 0;
+@@ -143,7 +143,7 @@ static int l2tp_session_notify(struct genl_family *family,
+ NLM_F_ACK, session, cmd);
+
+ if (ret >= 0) {
+- ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
++ ret = genlmsg_multicast_allns(family, msg, 0, 0);
+ /* We don't care if no one is listening */
+ if (ret == -ESRCH)
+ ret = 0;
+diff --git a/net/netfilter/nf_bpf_link.c b/net/netfilter/nf_bpf_link.c
+index 0e4beae421f830..2aad0562a41351 100644
+--- a/net/netfilter/nf_bpf_link.c
++++ b/net/netfilter/nf_bpf_link.c
+@@ -23,6 +23,7 @@ static unsigned int nf_hook_run_bpf(void *bpf_prog, struct sk_buff *skb,
+ struct bpf_nf_link {
+ struct bpf_link link;
+ struct nf_hook_ops hook_ops;
++ netns_tracker ns_tracker;
+ struct net *net;
+ u32 dead;
+ const struct nf_defrag_hook *defrag_hook;
+@@ -120,6 +121,7 @@ static void bpf_nf_link_release(struct bpf_link *link)
+ if (!cmpxchg(&nf_link->dead, 0, 1)) {
+ nf_unregister_net_hook(nf_link->net, &nf_link->hook_ops);
+ bpf_nf_disable_defrag(nf_link);
++ put_net_track(nf_link->net, &nf_link->ns_tracker);
+ }
+ }
+
+@@ -150,11 +152,12 @@ static int bpf_nf_link_fill_link_info(const struct bpf_link *link,
+ struct bpf_link_info *info)
+ {
+ struct bpf_nf_link *nf_link = container_of(link, struct bpf_nf_link, link);
++ const struct nf_defrag_hook *hook = nf_link->defrag_hook;
+
+ info->netfilter.pf = nf_link->hook_ops.pf;
+ info->netfilter.hooknum = nf_link->hook_ops.hooknum;
+ info->netfilter.priority = nf_link->hook_ops.priority;
+- info->netfilter.flags = 0;
++ info->netfilter.flags = hook ? BPF_F_NETFILTER_IP_DEFRAG : 0;
+
+ return 0;
+ }
+@@ -257,6 +260,8 @@ int bpf_nf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
+ return err;
+ }
+
++ get_net_track(net, &link->ns_tracker, GFP_KERNEL);
++
+ return bpf_link_settle(&link_primer);
+ }
+
+diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
+index d80abd6ccaf8f7..6dcf4bc7e30b2a 100644
+--- a/net/netfilter/xt_NFLOG.c
++++ b/net/netfilter/xt_NFLOG.c
+@@ -79,7 +79,7 @@ static struct xt_target nflog_tg_reg[] __read_mostly = {
+ {
+ .name = "NFLOG",
+ .revision = 0,
+- .family = NFPROTO_IPV4,
++ .family = NFPROTO_IPV6,
+ .checkentry = nflog_tg_check,
+ .destroy = nflog_tg_destroy,
+ .target = nflog_tg,
+diff --git a/net/netfilter/xt_TRACE.c b/net/netfilter/xt_TRACE.c
+index f3fa4f11348cd8..a642ff09fc8e8c 100644
+--- a/net/netfilter/xt_TRACE.c
++++ b/net/netfilter/xt_TRACE.c
+@@ -49,6 +49,7 @@ static struct xt_target trace_tg_reg[] __read_mostly = {
+ .target = trace_tg,
+ .checkentry = trace_tg_check,
+ .destroy = trace_tg_destroy,
++ .me = THIS_MODULE,
+ },
+ #endif
+ };
+diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c
+index f76fe04fc9a4e1..65b965ca40ea7e 100644
+--- a/net/netfilter/xt_mark.c
++++ b/net/netfilter/xt_mark.c
+@@ -62,7 +62,7 @@ static struct xt_target mark_tg_reg[] __read_mostly = {
+ {
+ .name = "MARK",
+ .revision = 2,
+- .family = NFPROTO_IPV4,
++ .family = NFPROTO_IPV6,
+ .target = mark_tg,
+ .targetsize = sizeof(struct xt_mark_tginfo2),
+ .me = THIS_MODULE,
+diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
+index d41c4a936ad0c9..d6eee5140c8be1 100644
+--- a/net/netlink/genetlink.c
++++ b/net/netlink/genetlink.c
+@@ -1355,15 +1355,11 @@ static int genl_ctrl_event(int event, const struct genl_family *family,
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+- if (!family->netnsok) {
++ if (!family->netnsok)
+ genlmsg_multicast_netns(&genl_ctrl, &init_net, msg, 0,
+ 0, GFP_KERNEL);
+- } else {
+- rcu_read_lock();
+- genlmsg_multicast_allns(&genl_ctrl, msg, 0,
+- 0, GFP_ATOMIC);
+- rcu_read_unlock();
+- }
++ else
++ genlmsg_multicast_allns(&genl_ctrl, msg, 0, 0);
+
+ return 0;
+ }
+@@ -1752,23 +1748,23 @@ static int __init genl_init(void)
+
+ core_initcall(genl_init);
+
+-static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
+- gfp_t flags)
++static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group)
+ {
+ struct sk_buff *tmp;
+ struct net *net, *prev = NULL;
+ bool delivered = false;
+ int err;
+
++ rcu_read_lock();
+ for_each_net_rcu(net) {
+ if (prev) {
+- tmp = skb_clone(skb, flags);
++ tmp = skb_clone(skb, GFP_ATOMIC);
+ if (!tmp) {
+ err = -ENOMEM;
+ goto error;
+ }
+ err = nlmsg_multicast(prev->genl_sock, tmp,
+- portid, group, flags);
++ portid, group, GFP_ATOMIC);
+ if (!err)
+ delivered = true;
+ else if (err != -ESRCH)
+@@ -1777,27 +1773,31 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
+
+ prev = net;
+ }
++ err = nlmsg_multicast(prev->genl_sock, skb, portid, group, GFP_ATOMIC);
++
++ rcu_read_unlock();
+
+- err = nlmsg_multicast(prev->genl_sock, skb, portid, group, flags);
+ if (!err)
+ delivered = true;
+ else if (err != -ESRCH)
+ return err;
+ return delivered ? 0 : -ESRCH;
+ error:
++ rcu_read_unlock();
++
+ kfree_skb(skb);
+ return err;
+ }
+
+ int genlmsg_multicast_allns(const struct genl_family *family,
+ struct sk_buff *skb, u32 portid,
+- unsigned int group, gfp_t flags)
++ unsigned int group)
+ {
+ if (WARN_ON_ONCE(group >= family->n_mcgrps))
+ return -EINVAL;
+
+ group = family->mcgrp_offset + group;
+- return genlmsg_mcast(skb, portid, group, flags);
++ return genlmsg_mcast(skb, portid, group);
+ }
+ EXPORT_SYMBOL(genlmsg_multicast_allns);
+
+diff --git a/net/sched/act_api.c b/net/sched/act_api.c
+index 2d6d58e1b278a1..4572aa6e0273f8 100644
+--- a/net/sched/act_api.c
++++ b/net/sched/act_api.c
+@@ -1489,8 +1489,29 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
+ bool skip_sw = tc_skip_sw(fl_flags);
+ bool skip_hw = tc_skip_hw(fl_flags);
+
+- if (tc_act_bind(act->tcfa_flags))
++ if (tc_act_bind(act->tcfa_flags)) {
++ /* Action is created by classifier and is not
++ * standalone. Check that the user did not set
++ * any action flags different than the
++ * classifier flags, and inherit the flags from
++ * the classifier for the compatibility case
++ * where no flags were specified at all.
++ */
++ if ((tc_act_skip_sw(act->tcfa_flags) && !skip_sw) ||
++ (tc_act_skip_hw(act->tcfa_flags) && !skip_hw)) {
++ NL_SET_ERR_MSG(extack,
++ "Mismatch between action and filter offload flags");
++ err = -EINVAL;
++ goto err;
++ }
++ if (skip_sw)
++ act->tcfa_flags |= TCA_ACT_FLAGS_SKIP_SW;
++ if (skip_hw)
++ act->tcfa_flags |= TCA_ACT_FLAGS_SKIP_HW;
+ continue;
++ }
++
++ /* Action is standalone */
+ if (skip_sw != tc_act_skip_sw(act->tcfa_flags) ||
+ skip_hw != tc_act_skip_hw(act->tcfa_flags)) {
+ NL_SET_ERR_MSG(extack,
+diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
+index 6ab9359c1706f1..b51af871a621ca 100644
+--- a/net/sched/sch_generic.c
++++ b/net/sched/sch_generic.c
+@@ -505,19 +505,28 @@ static void dev_watchdog(struct timer_list *t)
+ unsigned int timedout_ms = 0;
+ unsigned int i;
+ unsigned long trans_start;
++ unsigned long oldest_start = jiffies;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(dev, i);
++ if (!netif_xmit_stopped(txq))
++ continue;
++
++ /* Paired with WRITE_ONCE() + smp_mb...() in
++ * netdev_tx_sent_queue() and netif_tx_stop_queue().
++ */
++ smp_mb();
+ trans_start = READ_ONCE(txq->trans_start);
+- if (netif_xmit_stopped(txq) &&
+- time_after(jiffies, (trans_start +
+- dev->watchdog_timeo))) {
++
++ if (time_after(jiffies, trans_start + dev->watchdog_timeo)) {
+ timedout_ms = jiffies_to_msecs(jiffies - trans_start);
+ atomic_long_inc(&txq->trans_timeout);
+ break;
+ }
++ if (time_after(oldest_start, trans_start))
++ oldest_start = trans_start;
+ }
+
+ if (unlikely(timedout_ms)) {
+@@ -530,7 +539,7 @@ static void dev_watchdog(struct timer_list *t)
+ netif_unfreeze_queues(dev);
+ }
+ if (!mod_timer(&dev->watchdog_timer,
+- round_jiffies(jiffies +
++ round_jiffies(oldest_start +
+ dev->watchdog_timeo)))
+ release = false;
+ }
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 87090d67903621..951a87909c2974 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -1988,7 +1988,8 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
+
+ taprio_start_sched(sch, start, new_admin);
+
+- rcu_assign_pointer(q->admin_sched, new_admin);
++ admin = rcu_replace_pointer(q->admin_sched, new_admin,
++ lockdep_rtnl_is_held());
+ if (admin)
+ call_rcu(&admin->rcu, taprio_free_sched_cb);
+
+@@ -2396,9 +2397,6 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+ struct tc_mqprio_qopt opt = { 0 };
+ struct nlattr *nest, *sched_nest;
+
+- oper = rtnl_dereference(q->oper_sched);
+- admin = rtnl_dereference(q->admin_sched);
+-
+ mqprio_qopt_reconstruct(dev, &opt);
+
+ nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
+@@ -2419,18 +2417,23 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+ nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay))
+ goto options_error;
+
++ rcu_read_lock();
++
++ oper = rtnl_dereference(q->oper_sched);
++ admin = rtnl_dereference(q->admin_sched);
++
+ if (oper && taprio_dump_tc_entries(skb, q, oper))
+- goto options_error;
++ goto options_error_rcu;
+
+ if (oper && dump_schedule(skb, oper))
+- goto options_error;
++ goto options_error_rcu;
+
+ if (!admin)
+ goto done;
+
+ sched_nest = nla_nest_start_noflag(skb, TCA_TAPRIO_ATTR_ADMIN_SCHED);
+ if (!sched_nest)
+- goto options_error;
++ goto options_error_rcu;
+
+ if (dump_schedule(skb, admin))
+ goto admin_error;
+@@ -2438,11 +2441,15 @@ static int taprio_dump(struct Qdisc *sch, struct sk_buff *skb)
+ nla_nest_end(skb, sched_nest);
+
+ done:
++ rcu_read_unlock();
+ return nla_nest_end(skb, nest);
+
+ admin_error:
+ nla_nest_cancel(skb, sched_nest);
+
++options_error_rcu:
++ rcu_read_unlock();
++
+ options_error:
+ nla_nest_cancel(skb, nest);
+
+diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
+index 306b536fa89e98..284cec1e20ec11 100644
+--- a/net/smc/smc_pnet.c
++++ b/net/smc/smc_pnet.c
+@@ -753,7 +753,7 @@ static int smc_pnet_add_pnetid(struct net *net, u8 *pnetid)
+
+ write_lock(&sn->pnetids_ndev.lock);
+ list_for_each_entry(pi, &sn->pnetids_ndev.list, list) {
+- if (smc_pnet_match(pnetid, pe->pnetid)) {
++ if (smc_pnet_match(pnetid, pi->pnetid)) {
+ refcount_inc(&pi->refcnt);
+ kfree(pe);
+ goto unlock;
+diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c
+index 0021065a600a03..994c0cd4fddbf1 100644
+--- a/net/smc/smc_wr.c
++++ b/net/smc/smc_wr.c
+@@ -648,8 +648,10 @@ void smc_wr_free_link(struct smc_link *lnk)
+ smc_wr_tx_wait_no_pending_sends(lnk);
+ percpu_ref_kill(&lnk->wr_reg_refs);
+ wait_for_completion(&lnk->reg_ref_comp);
++ percpu_ref_exit(&lnk->wr_reg_refs);
+ percpu_ref_kill(&lnk->wr_tx_refs);
+ wait_for_completion(&lnk->tx_ref_comp);
++ percpu_ref_exit(&lnk->wr_tx_refs);
+
+ if (lnk->wr_rx_dma_addr) {
+ ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
+@@ -912,11 +914,13 @@ int smc_wr_create_link(struct smc_link *lnk)
+ init_waitqueue_head(&lnk->wr_reg_wait);
+ rc = percpu_ref_init(&lnk->wr_reg_refs, smcr_wr_reg_refs_free, 0, GFP_KERNEL);
+ if (rc)
+- goto dma_unmap;
++ goto cancel_ref;
+ init_completion(&lnk->reg_ref_comp);
+ init_waitqueue_head(&lnk->wr_rx_empty_wait);
+ return rc;
+
++cancel_ref:
++ percpu_ref_exit(&lnk->wr_tx_refs);
+ dma_unmap:
+ if (lnk->wr_rx_v2_dma_addr) {
+ ib_dma_unmap_single(ibdev, lnk->wr_rx_v2_dma_addr,
+diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
+index e87fd9480acdac..78b5f4f8808b92 100644
+--- a/net/vmw_vsock/virtio_transport_common.c
++++ b/net/vmw_vsock/virtio_transport_common.c
+@@ -1508,6 +1508,7 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
+ {
+ struct virtio_vsock_sock *vvs = vsk->trans;
+ struct sock *sk = sk_vsock(vsk);
++ struct virtio_vsock_hdr *hdr;
+ struct sk_buff *skb;
+ int off = 0;
+ int err;
+@@ -1517,10 +1518,19 @@ int virtio_transport_read_skb(struct vsock_sock *vsk, skb_read_actor_t recv_acto
+ * works for types other than dgrams.
+ */
+ skb = __skb_recv_datagram(sk, &vvs->rx_queue, MSG_DONTWAIT, &off, &err);
++ if (!skb) {
++ spin_unlock_bh(&vvs->rx_lock);
++ return err;
++ }
++
++ hdr = virtio_vsock_hdr(skb);
++ if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM)
++ vvs->msg_count--;
++
++ virtio_transport_dec_rx_pkt(vvs, le32_to_cpu(hdr->len));
+ spin_unlock_bh(&vvs->rx_lock);
+
+- if (!skb)
+- return err;
++ virtio_transport_send_credit_update(vsk);
+
+ return recv_actor(sk, skb);
+ }
+diff --git a/net/vmw_vsock/vsock_bpf.c b/net/vmw_vsock/vsock_bpf.c
+index c42c5cc18f3241..4aa6e74ec2957b 100644
+--- a/net/vmw_vsock/vsock_bpf.c
++++ b/net/vmw_vsock/vsock_bpf.c
+@@ -114,14 +114,6 @@ static int vsock_bpf_recvmsg(struct sock *sk, struct msghdr *msg,
+ return copied;
+ }
+
+-/* Copy of original proto with updated sock_map methods */
+-static struct proto vsock_bpf_prot = {
+- .close = sock_map_close,
+- .recvmsg = vsock_bpf_recvmsg,
+- .sock_is_readable = sk_msg_is_readable,
+- .unhash = sock_map_unhash,
+-};
+-
+ static void vsock_bpf_rebuild_protos(struct proto *prot, const struct proto *base)
+ {
+ *prot = *base;
+diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
+index 9e74f249cb45f1..79790730366985 100644
+--- a/net/wireless/nl80211.c
++++ b/net/wireless/nl80211.c
+@@ -17905,10 +17905,8 @@ void nl80211_common_reg_change_event(enum nl80211_commands cmd_id,
+
+ genlmsg_end(msg, hdr);
+
+- rcu_read_lock();
+ genlmsg_multicast_allns(&nl80211_fam, msg, 0,
+- NL80211_MCGRP_REGULATORY, GFP_ATOMIC);
+- rcu_read_unlock();
++ NL80211_MCGRP_REGULATORY);
+
+ return;
+
+@@ -18605,10 +18603,8 @@ void nl80211_send_beacon_hint_event(struct wiphy *wiphy,
+
+ genlmsg_end(msg, hdr);
+
+- rcu_read_lock();
+ genlmsg_multicast_allns(&nl80211_fam, msg, 0,
+- NL80211_MCGRP_REGULATORY, GFP_ATOMIC);
+- rcu_read_unlock();
++ NL80211_MCGRP_REGULATORY);
+
+ return;
+
+diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
+index 6346690d5c699d..04dc0c8a837076 100644
+--- a/net/xfrm/xfrm_device.c
++++ b/net/xfrm/xfrm_device.c
+@@ -263,6 +263,8 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
+
+ dev = dev_get_by_index(net, xuo->ifindex);
+ if (!dev) {
++ struct xfrm_dst_lookup_params params;
++
+ if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
+ saddr = &x->props.saddr;
+ daddr = &x->id.daddr;
+@@ -271,9 +273,12 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
+ daddr = &x->props.saddr;
+ }
+
+- dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
+- x->props.family,
+- xfrm_smark_get(0, x));
++ memset(&params, 0, sizeof(params));
++ params.net = net;
++ params.saddr = saddr;
++ params.daddr = daddr;
++ params.mark = xfrm_smark_get(0, x);
++ dst = __xfrm_dst_lookup(x->props.family, &params);
+ if (IS_ERR(dst))
+ return (is_packet_offload) ? -EINVAL : 0;
+
+diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
+index b699cc2ec35ac3..d788baffbf104e 100644
+--- a/net/xfrm/xfrm_policy.c
++++ b/net/xfrm/xfrm_policy.c
+@@ -251,10 +251,8 @@ static const struct xfrm_if_cb *xfrm_if_get_cb(void)
+ return rcu_dereference(xfrm_if_cb);
+ }
+
+-struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
+- const xfrm_address_t *saddr,
+- const xfrm_address_t *daddr,
+- int family, u32 mark)
++struct dst_entry *__xfrm_dst_lookup(int family,
++ const struct xfrm_dst_lookup_params *params)
+ {
+ const struct xfrm_policy_afinfo *afinfo;
+ struct dst_entry *dst;
+@@ -263,7 +261,7 @@ struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
+ if (unlikely(afinfo == NULL))
+ return ERR_PTR(-EAFNOSUPPORT);
+
+- dst = afinfo->dst_lookup(net, tos, oif, saddr, daddr, mark);
++ dst = afinfo->dst_lookup(params);
+
+ rcu_read_unlock();
+
+@@ -277,6 +275,7 @@ static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
+ xfrm_address_t *prev_daddr,
+ int family, u32 mark)
+ {
++ struct xfrm_dst_lookup_params params;
+ struct net *net = xs_net(x);
+ xfrm_address_t *saddr = &x->props.saddr;
+ xfrm_address_t *daddr = &x->id.daddr;
+@@ -291,7 +290,29 @@ static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x,
+ daddr = x->coaddr;
+ }
+
+- dst = __xfrm_dst_lookup(net, tos, oif, saddr, daddr, family, mark);
++ params.net = net;
++ params.saddr = saddr;
++ params.daddr = daddr;
++ params.tos = tos;
++ params.oif = oif;
++ params.mark = mark;
++ params.ipproto = x->id.proto;
++ if (x->encap) {
++ switch (x->encap->encap_type) {
++ case UDP_ENCAP_ESPINUDP:
++ params.ipproto = IPPROTO_UDP;
++ params.uli.ports.sport = x->encap->encap_sport;
++ params.uli.ports.dport = x->encap->encap_dport;
++ break;
++ case TCP_ENCAP_ESPINTCP:
++ params.ipproto = IPPROTO_TCP;
++ params.uli.ports.sport = x->encap->encap_sport;
++ params.uli.ports.dport = x->encap->encap_dport;
++ break;
++ }
++ }
++
++ dst = __xfrm_dst_lookup(family, &params);
+
+ if (!IS_ERR(dst)) {
+ if (prev_saddr != saddr)
+@@ -2424,15 +2445,15 @@ int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
+ }
+
+ static int
+-xfrm_get_saddr(struct net *net, int oif, xfrm_address_t *local,
+- xfrm_address_t *remote, unsigned short family, u32 mark)
++xfrm_get_saddr(unsigned short family, xfrm_address_t *saddr,
++ const struct xfrm_dst_lookup_params *params)
+ {
+ int err;
+ const struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
+
+ if (unlikely(afinfo == NULL))
+ return -EINVAL;
+- err = afinfo->get_saddr(net, oif, local, remote, mark);
++ err = afinfo->get_saddr(saddr, params);
+ rcu_read_unlock();
+ return err;
+ }
+@@ -2461,9 +2482,14 @@ xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl,
+ remote = &tmpl->id.daddr;
+ local = &tmpl->saddr;
+ if (xfrm_addr_any(local, tmpl->encap_family)) {
+- error = xfrm_get_saddr(net, fl->flowi_oif,
+- &tmp, remote,
+- tmpl->encap_family, 0);
++ struct xfrm_dst_lookup_params params;
++
++ memset(&params, 0, sizeof(params));
++ params.net = net;
++ params.oif = fl->flowi_oif;
++ params.daddr = remote;
++ error = xfrm_get_saddr(tmpl->encap_family, &tmp,
++ &params);
+ if (error)
+ goto fail;
+ local = &tmp;
+diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
+index 979f23cded401a..1d91b42e799710 100644
+--- a/net/xfrm/xfrm_user.c
++++ b/net/xfrm/xfrm_user.c
+@@ -176,6 +176,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
+ struct netlink_ext_ack *extack)
+ {
+ int err;
++ u16 family = p->sel.family;
+
+ err = -EINVAL;
+ switch (p->family) {
+@@ -196,7 +197,10 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
+ goto out;
+ }
+
+- switch (p->sel.family) {
++ if (!family && !(p->flags & XFRM_STATE_AF_UNSPEC))
++ family = p->family;
++
++ switch (family) {
+ case AF_UNSPEC:
+ break;
+
+@@ -995,7 +999,9 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
+ if (!nla)
+ return -EMSGSIZE;
+ ap = nla_data(nla);
+- memcpy(ap, auth, sizeof(struct xfrm_algo_auth));
++ strscpy_pad(ap->alg_name, auth->alg_name, sizeof(ap->alg_name));
++ ap->alg_key_len = auth->alg_key_len;
++ ap->alg_trunc_len = auth->alg_trunc_len;
+ if (redact_secret && auth->alg_key_len)
+ memset(ap->alg_key, 0, (auth->alg_key_len + 7) / 8);
+ else
+diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
+index 2c23a5a2860860..54bc18e8164b32 100644
+--- a/security/selinux/selinuxfs.c
++++ b/security/selinux/selinuxfs.c
+@@ -582,11 +582,18 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+
+ {
+- struct selinux_fs_info *fsi = file_inode(file)->i_sb->s_fs_info;
++ struct selinux_fs_info *fsi;
+ struct selinux_load_state load_state;
+ ssize_t length;
+ void *data = NULL;
+
++ /* no partial writes */
++ if (*ppos)
++ return -EINVAL;
++ /* no empty policies */
++ if (!count)
++ return -EINVAL;
++
+ mutex_lock(&selinux_state.policy_mutex);
+
+ length = avc_has_perm(current_sid(), SECINITSID_SECURITY,
+@@ -594,26 +601,22 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
+ if (length)
+ goto out;
+
+- /* No partial writes. */
+- length = -EINVAL;
+- if (*ppos != 0)
+- goto out;
+-
+- length = -ENOMEM;
+ data = vmalloc(count);
+- if (!data)
++ if (!data) {
++ length = -ENOMEM;
+ goto out;
+-
+- length = -EFAULT;
+- if (copy_from_user(data, buf, count) != 0)
++ }
++ if (copy_from_user(data, buf, count) != 0) {
++ length = -EFAULT;
+ goto out;
++ }
+
+ length = security_load_policy(data, count, &load_state);
+ if (length) {
+ pr_warn_ratelimited("SELinux: failed to load policy\n");
+ goto out;
+ }
+-
++ fsi = file_inode(file)->i_sb->s_fs_info;
+ length = sel_make_policy_nodes(fsi, load_state.policy);
+ if (length) {
+ pr_warn_ratelimited("SELinux: failed to initialize selinuxfs\n");
+@@ -622,13 +625,12 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
+ }
+
+ selinux_policy_commit(&load_state);
+-
+ length = count;
+-
+ audit_log(audit_context(), GFP_KERNEL, AUDIT_MAC_POLICY_LOAD,
+ "auid=%u ses=%u lsm=selinux res=1",
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
+ audit_get_sessionid(current));
++
+ out:
+ mutex_unlock(&selinux_state.policy_mutex);
+ vfree(data);
+diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
+index 5f0f8d9c08d1e7..8c6254ff143eb4 100644
+--- a/sound/firewire/amdtp-stream.c
++++ b/sound/firewire/amdtp-stream.c
+@@ -172,6 +172,9 @@ static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
+ step = max(step, amdtp_syt_intervals[i]);
+ }
+
++ if (step == 0)
++ return -EINVAL;
++
+ t.min = roundup(s->min, step);
+ t.max = rounddown(s->max, step);
+ t.integer = 1;
+diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
+index 9698ebe3fbc2e7..233b03bb4f9869 100644
+--- a/sound/pci/hda/Kconfig
++++ b/sound/pci/hda/Kconfig
+@@ -173,7 +173,7 @@ config SND_HDA_SCODEC_TAS2781_I2C
+ depends on SND_SOC
+ select SND_SOC_TAS2781_COMLIB
+ select SND_SOC_TAS2781_FMWLIB
+- select CRC32_SARWATE
++ select CRC32
+ help
+ Say Y or M here to include TAS2781 I2C HD-audio side codec support
+ in snd-hda-intel driver, such as ALC287.
+diff --git a/sound/pci/hda/patch_cs8409.c b/sound/pci/hda/patch_cs8409.c
+index e41316e2e98338..892223d9e64aba 100644
+--- a/sound/pci/hda/patch_cs8409.c
++++ b/sound/pci/hda/patch_cs8409.c
+@@ -1411,8 +1411,9 @@ void dolphin_fixups(struct hda_codec *codec, const struct hda_fixup *fix, int ac
+ kctrl = snd_hda_gen_add_kctl(&spec->gen, "Line Out Playback Volume",
+ &cs42l42_dac_volume_mixer);
+ /* Update Line Out kcontrol template */
+- kctrl->private_value = HDA_COMPOSE_AMP_VAL_OFS(DOLPHIN_HP_PIN_NID, 3, CS8409_CODEC1,
+- HDA_OUTPUT, CS42L42_VOL_DAC) | HDA_AMP_VAL_MIN_MUTE;
++ if (kctrl)
++ kctrl->private_value = HDA_COMPOSE_AMP_VAL_OFS(DOLPHIN_HP_PIN_NID, 3, CS8409_CODEC1,
++ HDA_OUTPUT, CS42L42_VOL_DAC) | HDA_AMP_VAL_MIN_MUTE;
+ cs8409_enable_ur(codec, 0);
+ snd_hda_codec_set_name(codec, "CS8409/CS42L42");
+ break;
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 07e1547fff2e51..9be5a5c509f09a 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -3857,20 +3857,18 @@ static void alc_default_init(struct hda_codec *codec)
+
+ hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+
+- if (hp_pin_sense)
++ if (hp_pin_sense) {
+ msleep(2);
+
+- snd_hda_codec_write(codec, hp_pin, 0,
+- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+-
+- if (hp_pin_sense)
+- msleep(85);
++ snd_hda_codec_write(codec, hp_pin, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+
+- snd_hda_codec_write(codec, hp_pin, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
++ msleep(75);
+
+- if (hp_pin_sense)
+- msleep(100);
++ snd_hda_codec_write(codec, hp_pin, 0,
++ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE);
++ msleep(75);
++ }
+ }
+
+ static void alc_default_shutup(struct hda_codec *codec)
+@@ -3886,22 +3884,20 @@ static void alc_default_shutup(struct hda_codec *codec)
+
+ hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+
+- if (hp_pin_sense)
++ if (hp_pin_sense) {
+ msleep(2);
+
+- snd_hda_codec_write(codec, hp_pin, 0,
+- AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+-
+- if (hp_pin_sense)
+- msleep(85);
+-
+- if (!spec->no_shutup_pins)
+ snd_hda_codec_write(codec, hp_pin, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+- if (hp_pin_sense)
+- msleep(100);
++ msleep(75);
+
++ if (!spec->no_shutup_pins)
++ snd_hda_codec_write(codec, hp_pin, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
++
++ msleep(75);
++ }
+ alc_auto_setup_eapd(codec, false);
+ alc_shutup_pins(codec);
+ }
+@@ -7362,6 +7358,7 @@ enum {
+ ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
+ ALC256_FIXUP_ASUS_HEADSET_MIC,
+ ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
++ ALC255_FIXUP_PREDATOR_SUBWOOFER,
+ ALC299_FIXUP_PREDATOR_SPK,
+ ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
+ ALC289_FIXUP_DELL_SPK1,
+@@ -8709,6 +8706,13 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+ },
++ [ALC255_FIXUP_PREDATOR_SUBWOOFER] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ { 0x17, 0x90170151 }, /* use as internal speaker (LFE) */
++ { 0x1b, 0x90170152 } /* use as internal speaker (back) */
++ }
++ },
+ [ALC299_FIXUP_PREDATOR_SPK] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -9682,6 +9686,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1166, "Acer Veriton N4640G", ALC269_FIXUP_LIFEBOOK),
+ SND_PCI_QUIRK(0x1025, 0x1167, "Acer Veriton N6640G", ALC269_FIXUP_LIFEBOOK),
++ SND_PCI_QUIRK(0x1025, 0x1177, "Acer Predator G9-593", ALC255_FIXUP_PREDATOR_SUBWOOFER),
++ SND_PCI_QUIRK(0x1025, 0x1178, "Acer Predator G9-593", ALC255_FIXUP_PREDATOR_SUBWOOFER),
+ SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
+ SND_PCI_QUIRK(0x1025, 0x1247, "Acer vCopperbox", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS),
+ SND_PCI_QUIRK(0x1025, 0x1248, "Acer Veriton N4660G", ALC269VC_FIXUP_ACER_MIC_NO_PRESENCE),
+diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c
+index 248e3bcbf386b0..76f5d926d1eac1 100644
+--- a/sound/soc/amd/yc/acp6x-mach.c
++++ b/sound/soc/amd/yc/acp6x-mach.c
+@@ -444,6 +444,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
+ DMI_MATCH(DMI_BOARD_NAME, "8A3E"),
+ }
+ },
++ {
++ .driver_data = &acp6x_card,
++ .matches = {
++ DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
++ DMI_MATCH(DMI_BOARD_NAME, "8A7F"),
++ }
++ },
+ {
+ .driver_data = &acp6x_card,
+ .matches = {
+diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c
+index 29197d34ec099d..7e9f0ab9141245 100644
+--- a/sound/soc/codecs/lpass-rx-macro.c
++++ b/sound/soc/codecs/lpass-rx-macro.c
+@@ -909,7 +909,7 @@ static const struct reg_default rx_defaults[] = {
+ { CDC_RX_BCL_VBAT_PK_EST2, 0x01 },
+ { CDC_RX_BCL_VBAT_PK_EST3, 0x40 },
+ { CDC_RX_BCL_VBAT_RF_PROC1, 0x2A },
+- { CDC_RX_BCL_VBAT_RF_PROC1, 0x00 },
++ { CDC_RX_BCL_VBAT_RF_PROC2, 0x00 },
+ { CDC_RX_BCL_VBAT_TAC1, 0x00 },
+ { CDC_RX_BCL_VBAT_TAC2, 0x18 },
+ { CDC_RX_BCL_VBAT_TAC3, 0x18 },
+diff --git a/sound/soc/codecs/max98388.c b/sound/soc/codecs/max98388.c
+index cde5e85946cb88..87386404129d92 100644
+--- a/sound/soc/codecs/max98388.c
++++ b/sound/soc/codecs/max98388.c
+@@ -764,6 +764,7 @@ static int max98388_dai_tdm_slot(struct snd_soc_dai *dai,
+ addr = MAX98388_R2044_PCM_TX_CTRL1 + (cnt / 8);
+ bits = cnt % 8;
+ regmap_update_bits(max98388->regmap, addr, bits, bits);
++ slot_found++;
+ if (slot_found >= MAX_NUM_CH)
+ break;
+ }
+diff --git a/sound/soc/fsl/fsl_micfil.c b/sound/soc/fsl/fsl_micfil.c
+index 0d37edb70261cb..9407179af5d574 100644
+--- a/sound/soc/fsl/fsl_micfil.c
++++ b/sound/soc/fsl/fsl_micfil.c
+@@ -67,6 +67,7 @@ struct fsl_micfil_soc_data {
+ bool imx;
+ bool use_edma;
+ bool use_verid;
++ bool volume_sx;
+ u64 formats;
+ };
+
+@@ -76,6 +77,7 @@ static struct fsl_micfil_soc_data fsl_micfil_imx8mm = {
+ .fifo_depth = 8,
+ .dataline = 0xf,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
++ .volume_sx = true,
+ };
+
+ static struct fsl_micfil_soc_data fsl_micfil_imx8mp = {
+@@ -84,6 +86,7 @@ static struct fsl_micfil_soc_data fsl_micfil_imx8mp = {
+ .fifo_depth = 32,
+ .dataline = 0xf,
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
++ .volume_sx = false,
+ };
+
+ static struct fsl_micfil_soc_data fsl_micfil_imx93 = {
+@@ -94,6 +97,7 @@ static struct fsl_micfil_soc_data fsl_micfil_imx93 = {
+ .formats = SNDRV_PCM_FMTBIT_S32_LE,
+ .use_edma = true,
+ .use_verid = true,
++ .volume_sx = false,
+ };
+
+ static const struct of_device_id fsl_micfil_dt_ids[] = {
+@@ -317,7 +321,26 @@ static int hwvad_detected(struct snd_kcontrol *kcontrol,
+ return 0;
+ }
+
+-static const struct snd_kcontrol_new fsl_micfil_snd_controls[] = {
++static const struct snd_kcontrol_new fsl_micfil_volume_controls[] = {
++ SOC_SINGLE_TLV("CH0 Volume", REG_MICFIL_OUT_CTRL,
++ MICFIL_OUTGAIN_CHX_SHIFT(0), 0xF, 0, gain_tlv),
++ SOC_SINGLE_TLV("CH1 Volume", REG_MICFIL_OUT_CTRL,
++ MICFIL_OUTGAIN_CHX_SHIFT(1), 0xF, 0, gain_tlv),
++ SOC_SINGLE_TLV("CH2 Volume", REG_MICFIL_OUT_CTRL,
++ MICFIL_OUTGAIN_CHX_SHIFT(2), 0xF, 0, gain_tlv),
++ SOC_SINGLE_TLV("CH3 Volume", REG_MICFIL_OUT_CTRL,
++ MICFIL_OUTGAIN_CHX_SHIFT(3), 0xF, 0, gain_tlv),
++ SOC_SINGLE_TLV("CH4 Volume", REG_MICFIL_OUT_CTRL,
++ MICFIL_OUTGAIN_CHX_SHIFT(4), 0xF, 0, gain_tlv),
++ SOC_SINGLE_TLV("CH5 Volume", REG_MICFIL_OUT_CTRL,
++ MICFIL_OUTGAIN_CHX_SHIFT(5), 0xF, 0, gain_tlv),
++ SOC_SINGLE_TLV("CH6 Volume", REG_MICFIL_OUT_CTRL,
++ MICFIL_OUTGAIN_CHX_SHIFT(6), 0xF, 0, gain_tlv),
++ SOC_SINGLE_TLV("CH7 Volume", REG_MICFIL_OUT_CTRL,
++ MICFIL_OUTGAIN_CHX_SHIFT(7), 0xF, 0, gain_tlv),
++};
++
++static const struct snd_kcontrol_new fsl_micfil_volume_sx_controls[] = {
+ SOC_SINGLE_SX_TLV("CH0 Volume", REG_MICFIL_OUT_CTRL,
+ MICFIL_OUTGAIN_CHX_SHIFT(0), 0x8, 0xF, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH1 Volume", REG_MICFIL_OUT_CTRL,
+@@ -334,6 +357,9 @@ static const struct snd_kcontrol_new fsl_micfil_snd_controls[] = {
+ MICFIL_OUTGAIN_CHX_SHIFT(6), 0x8, 0xF, gain_tlv),
+ SOC_SINGLE_SX_TLV("CH7 Volume", REG_MICFIL_OUT_CTRL,
+ MICFIL_OUTGAIN_CHX_SHIFT(7), 0x8, 0xF, gain_tlv),
++};
++
++static const struct snd_kcontrol_new fsl_micfil_snd_controls[] = {
+ SOC_ENUM_EXT("MICFIL Quality Select",
+ fsl_micfil_quality_enum,
+ micfil_quality_get, micfil_quality_set),
+@@ -801,6 +827,20 @@ static int fsl_micfil_dai_probe(struct snd_soc_dai *cpu_dai)
+ return 0;
+ }
+
++static int fsl_micfil_component_probe(struct snd_soc_component *component)
++{
++ struct fsl_micfil *micfil = snd_soc_component_get_drvdata(component);
++
++ if (micfil->soc->volume_sx)
++ snd_soc_add_component_controls(component, fsl_micfil_volume_sx_controls,
++ ARRAY_SIZE(fsl_micfil_volume_sx_controls));
++ else
++ snd_soc_add_component_controls(component, fsl_micfil_volume_controls,
++ ARRAY_SIZE(fsl_micfil_volume_controls));
++
++ return 0;
++}
++
+ static const struct snd_soc_dai_ops fsl_micfil_dai_ops = {
+ .probe = fsl_micfil_dai_probe,
+ .startup = fsl_micfil_startup,
+@@ -821,6 +861,7 @@ static struct snd_soc_dai_driver fsl_micfil_dai = {
+
+ static const struct snd_soc_component_driver fsl_micfil_component = {
+ .name = "fsl-micfil-dai",
++ .probe = fsl_micfil_component_probe,
+ .controls = fsl_micfil_snd_controls,
+ .num_controls = ARRAY_SIZE(fsl_micfil_snd_controls),
+ .legacy_dai_naming = 1,
+diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
+index 3d202398c5411b..aa15f56ca139d2 100644
+--- a/sound/soc/fsl/fsl_sai.c
++++ b/sound/soc/fsl/fsl_sai.c
+@@ -604,6 +604,9 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
+
+ val_cr4 |= FSL_SAI_CR4_FRSZ(slots);
+
++ /* Set to avoid channel swap */
++ val_cr4 |= FSL_SAI_CR4_FCONT;
++
+ /* Set to output mode to avoid tri-stated data pins */
+ if (tx)
+ val_cr4 |= FSL_SAI_CR4_CHMOD;
+@@ -690,7 +693,7 @@ static int fsl_sai_hw_params(struct snd_pcm_substream *substream,
+
+ regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx, ofs),
+ FSL_SAI_CR4_SYWD_MASK | FSL_SAI_CR4_FRSZ_MASK |
+- FSL_SAI_CR4_CHMOD_MASK,
++ FSL_SAI_CR4_CHMOD_MASK | FSL_SAI_CR4_FCONT_MASK,
+ val_cr4);
+ regmap_update_bits(sai->regmap, FSL_SAI_xCR5(tx, ofs),
+ FSL_SAI_CR5_WNW_MASK | FSL_SAI_CR5_W0W_MASK |
+diff --git a/sound/soc/fsl/fsl_sai.h b/sound/soc/fsl/fsl_sai.h
+index 550df87b6a068c..eba465c2387b6e 100644
+--- a/sound/soc/fsl/fsl_sai.h
++++ b/sound/soc/fsl/fsl_sai.h
+@@ -137,6 +137,7 @@
+
+ /* SAI Transmit and Receive Configuration 4 Register */
+
++#define FSL_SAI_CR4_FCONT_MASK BIT(28)
+ #define FSL_SAI_CR4_FCONT BIT(28)
+ #define FSL_SAI_CR4_FCOMB_SHIFT BIT(26)
+ #define FSL_SAI_CR4_FCOMB_SOFT BIT(27)
+diff --git a/sound/soc/loongson/loongson_card.c b/sound/soc/loongson/loongson_card.c
+index 8cc54aedd00242..010e959d4c69a8 100644
+--- a/sound/soc/loongson/loongson_card.c
++++ b/sound/soc/loongson/loongson_card.c
+@@ -137,6 +137,7 @@ static int loongson_card_parse_of(struct loongson_card_data *data)
+ dev_err(dev, "getting cpu dlc error (%d)\n", ret);
+ goto err;
+ }
++ loongson_dai_links[i].platforms->of_node = loongson_dai_links[i].cpus->of_node;
+
+ ret = snd_soc_of_get_dlc(codec, NULL, loongson_dai_links[i].codecs, 0);
+ if (ret < 0) {
+diff --git a/sound/soc/qcom/lpass-cpu.c b/sound/soc/qcom/lpass-cpu.c
+index 73b42d9ee24471..e587455dc40a0e 100644
+--- a/sound/soc/qcom/lpass-cpu.c
++++ b/sound/soc/qcom/lpass-cpu.c
+@@ -1246,6 +1246,8 @@ int asoc_qcom_lpass_cpu_platform_probe(struct platform_device *pdev)
+ /* Allocation for i2sctl regmap fields */
+ drvdata->i2sctl = devm_kzalloc(&pdev->dev, sizeof(struct lpaif_i2sctl),
+ GFP_KERNEL);
++ if (!drvdata->i2sctl)
++ return -ENOMEM;
+
+ /* Initialize bitfields for dai I2SCTL register */
+ ret = lpass_cpu_init_i2sctl_bitfields(dev, drvdata->i2sctl,
+diff --git a/sound/soc/qcom/sm8250.c b/sound/soc/qcom/sm8250.c
+index 6558bf2e14e83d..9eb8ae0196d91f 100644
+--- a/sound/soc/qcom/sm8250.c
++++ b/sound/soc/qcom/sm8250.c
+@@ -153,6 +153,7 @@ static int sm8250_platform_probe(struct platform_device *pdev)
+
+ static const struct of_device_id snd_sm8250_dt_match[] = {
+ {.compatible = "qcom,sm8250-sndcard"},
++ {.compatible = "qcom,qrb4210-rb2-sndcard"},
+ {.compatible = "qcom,qrb5165-rb5-sndcard"},
+ {}
+ };
+diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
+index 1bd7114c472a8b..98c7be340a536d 100644
+--- a/sound/soc/sh/rcar/core.c
++++ b/sound/soc/sh/rcar/core.c
+@@ -1297,7 +1297,9 @@ static int rsnd_dai_of_node(struct rsnd_priv *priv, int *is_graph)
+ if (!of_node_name_eq(ports, "ports") &&
+ !of_node_name_eq(ports, "port"))
+ continue;
+- priv->component_dais[i] = of_graph_get_endpoint_count(ports);
++ priv->component_dais[i] =
++ of_graph_get_endpoint_count(of_node_name_eq(ports, "ports") ?
++ ports : np);
+ nr += priv->component_dais[i];
+ i++;
+ if (i >= RSND_MAX_COMPONENT) {
+@@ -1510,7 +1512,8 @@ static int rsnd_dai_probe(struct rsnd_priv *priv)
+ if (!of_node_name_eq(ports, "ports") &&
+ !of_node_name_eq(ports, "port"))
+ continue;
+- for_each_endpoint_of_node(ports, dai_np) {
++ for_each_endpoint_of_node(of_node_name_eq(ports, "ports") ?
++ ports : np, dai_np) {
+ __rsnd_dai_probe(priv, dai_np, dai_np, 0, dai_i);
+ if (rsnd_is_gen3(priv) || rsnd_is_gen4(priv)) {
+ rdai = rsnd_rdai_get(priv, dai_i);
+diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
+index ba6e346c8d669a..977ec094bc2a6c 100644
+--- a/tools/include/uapi/linux/bpf.h
++++ b/tools/include/uapi/linux/bpf.h
+@@ -6559,20 +6559,27 @@ struct bpf_link_info {
+ __aligned_u64 file_name; /* in/out */
+ __u32 name_len;
+ __u32 offset; /* offset from file_name */
++ __u64 cookie;
+ } uprobe; /* BPF_PERF_EVENT_UPROBE, BPF_PERF_EVENT_URETPROBE */
+ struct {
+ __aligned_u64 func_name; /* in/out */
+ __u32 name_len;
+ __u32 offset; /* offset from func_name */
+ __u64 addr;
++ __u64 missed;
++ __u64 cookie;
+ } kprobe; /* BPF_PERF_EVENT_KPROBE, BPF_PERF_EVENT_KRETPROBE */
+ struct {
+ __aligned_u64 tp_name; /* in/out */
+ __u32 name_len;
++ __u32 :32;
++ __u64 cookie;
+ } tracepoint; /* BPF_PERF_EVENT_TRACEPOINT */
+ struct {
+ __u64 config;
+ __u32 type;
++ __u32 :32;
++ __u64 cookie;
+ } event; /* BPF_PERF_EVENT_EVENT */
+ };
+ } perf_event;
+diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
+index ab364e95a9b23e..f5a3a84fac955b 100644
+--- a/tools/testing/selftests/bpf/Makefile
++++ b/tools/testing/selftests/bpf/Makefile
+@@ -200,7 +200,7 @@ $(OUTPUT)/%:%.c
+ ifeq ($(SRCARCH),x86)
+ LLD := lld
+ else
+-LLD := ld
++LLD := $(shell command -v $(LD))
+ endif
+
+ # Filter out -static for liburandom_read.so and its dependent targets so that static builds
+diff --git a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
+index 9d768e08371495..5b0c6a04cdbfe0 100644
+--- a/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
++++ b/tools/testing/selftests/bpf/prog_tests/fill_link_info.c
+@@ -30,6 +30,8 @@ static noinline void uprobe_func(void)
+ asm volatile ("");
+ }
+
++#define PERF_EVENT_COOKIE 0xdeadbeef
++
+ static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long addr,
+ ssize_t offset, ssize_t entry_offset)
+ {
+@@ -61,8 +63,11 @@ static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long add
+ ASSERT_EQ(info.perf_event.kprobe.addr, addr + entry_offset,
+ "kprobe_addr");
+
++ ASSERT_EQ(info.perf_event.kprobe.cookie, PERF_EVENT_COOKIE, "kprobe_cookie");
++
++ ASSERT_EQ(info.perf_event.kprobe.name_len, strlen(KPROBE_FUNC) + 1,
++ "name_len");
+ if (!info.perf_event.kprobe.func_name) {
+- ASSERT_EQ(info.perf_event.kprobe.name_len, 0, "name_len");
+ info.perf_event.kprobe.func_name = ptr_to_u64(&buf);
+ info.perf_event.kprobe.name_len = sizeof(buf);
+ goto again;
+@@ -73,13 +78,16 @@ static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long add
+ ASSERT_EQ(err, 0, "cmp_kprobe_func_name");
+ break;
+ case BPF_PERF_EVENT_TRACEPOINT:
++ ASSERT_EQ(info.perf_event.tracepoint.name_len, strlen(TP_NAME) + 1,
++ "name_len");
+ if (!info.perf_event.tracepoint.tp_name) {
+- ASSERT_EQ(info.perf_event.tracepoint.name_len, 0, "name_len");
+ info.perf_event.tracepoint.tp_name = ptr_to_u64(&buf);
+ info.perf_event.tracepoint.name_len = sizeof(buf);
+ goto again;
+ }
+
++ ASSERT_EQ(info.perf_event.tracepoint.cookie, PERF_EVENT_COOKIE, "tracepoint_cookie");
++
+ err = strncmp(u64_to_ptr(info.perf_event.tracepoint.tp_name), TP_NAME,
+ strlen(TP_NAME));
+ ASSERT_EQ(err, 0, "cmp_tp_name");
+@@ -88,13 +96,16 @@ static int verify_perf_link_info(int fd, enum bpf_perf_event_type type, long add
+ case BPF_PERF_EVENT_URETPROBE:
+ ASSERT_EQ(info.perf_event.uprobe.offset, offset, "uprobe_offset");
+
++ ASSERT_EQ(info.perf_event.uprobe.name_len, strlen(UPROBE_FILE) + 1,
++ "name_len");
+ if (!info.perf_event.uprobe.file_name) {
+- ASSERT_EQ(info.perf_event.uprobe.name_len, 0, "name_len");
+ info.perf_event.uprobe.file_name = ptr_to_u64(&buf);
+ info.perf_event.uprobe.name_len = sizeof(buf);
+ goto again;
+ }
+
++ ASSERT_EQ(info.perf_event.uprobe.cookie, PERF_EVENT_COOKIE, "uprobe_cookie");
++
+ err = strncmp(u64_to_ptr(info.perf_event.uprobe.file_name), UPROBE_FILE,
+ strlen(UPROBE_FILE));
+ ASSERT_EQ(err, 0, "cmp_file_name");
+@@ -138,16 +149,17 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
+ DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
+ .attach_mode = PROBE_ATTACH_MODE_LINK,
+ .retprobe = type == BPF_PERF_EVENT_KRETPROBE,
++ .bpf_cookie = PERF_EVENT_COOKIE,
+ );
+ ssize_t entry_offset = 0;
++ struct bpf_link *link;
+ int link_fd, err;
+
+- skel->links.kprobe_run = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run,
+- KPROBE_FUNC, &opts);
+- if (!ASSERT_OK_PTR(skel->links.kprobe_run, "attach_kprobe"))
++ link = bpf_program__attach_kprobe_opts(skel->progs.kprobe_run, KPROBE_FUNC, &opts);
++ if (!ASSERT_OK_PTR(link, "attach_kprobe"))
+ return;
+
+- link_fd = bpf_link__fd(skel->links.kprobe_run);
++ link_fd = bpf_link__fd(link);
+ if (!invalid) {
+ /* See also arch_adjust_kprobe_addr(). */
+ if (skel->kconfig->CONFIG_X86_KERNEL_IBT)
+@@ -157,39 +169,48 @@ static void test_kprobe_fill_link_info(struct test_fill_link_info *skel,
+ } else {
+ kprobe_fill_invalid_user_buffer(link_fd);
+ }
+- bpf_link__detach(skel->links.kprobe_run);
++ bpf_link__destroy(link);
+ }
+
+ static void test_tp_fill_link_info(struct test_fill_link_info *skel)
+ {
++ DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts,
++ .bpf_cookie = PERF_EVENT_COOKIE,
++ );
++ struct bpf_link *link;
+ int link_fd, err;
+
+- skel->links.tp_run = bpf_program__attach_tracepoint(skel->progs.tp_run, TP_CAT, TP_NAME);
+- if (!ASSERT_OK_PTR(skel->links.tp_run, "attach_tp"))
++ link = bpf_program__attach_tracepoint_opts(skel->progs.tp_run, TP_CAT, TP_NAME, &opts);
++ if (!ASSERT_OK_PTR(link, "attach_tp"))
+ return;
+
+- link_fd = bpf_link__fd(skel->links.tp_run);
++ link_fd = bpf_link__fd(link);
+ err = verify_perf_link_info(link_fd, BPF_PERF_EVENT_TRACEPOINT, 0, 0, 0);
+ ASSERT_OK(err, "verify_perf_link_info");
+- bpf_link__detach(skel->links.tp_run);
++ bpf_link__destroy(link);
+ }
+
+ static void test_uprobe_fill_link_info(struct test_fill_link_info *skel,
+ enum bpf_perf_event_type type)
+ {
++ DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts,
++ .retprobe = type == BPF_PERF_EVENT_URETPROBE,
++ .bpf_cookie = PERF_EVENT_COOKIE,
++ );
++ struct bpf_link *link;
+ int link_fd, err;
+
+- skel->links.uprobe_run = bpf_program__attach_uprobe(skel->progs.uprobe_run,
+- type == BPF_PERF_EVENT_URETPROBE,
+- 0, /* self pid */
+- UPROBE_FILE, uprobe_offset);
+- if (!ASSERT_OK_PTR(skel->links.uprobe_run, "attach_uprobe"))
++ link = bpf_program__attach_uprobe_opts(skel->progs.uprobe_run,
++ 0, /* self pid */
++ UPROBE_FILE, uprobe_offset,
++ &opts);
++ if (!ASSERT_OK_PTR(link, "attach_uprobe"))
+ return;
+
+- link_fd = bpf_link__fd(skel->links.uprobe_run);
++ link_fd = bpf_link__fd(link);
+ err = verify_perf_link_info(link_fd, type, 0, uprobe_offset, 0);
+ ASSERT_OK(err, "verify_perf_link_info");
+- bpf_link__detach(skel->links.uprobe_run);
++ bpf_link__destroy(link);
+ }
+
+ static int verify_kmulti_link_info(int fd, bool retprobe)
+@@ -278,24 +299,24 @@ static void test_kprobe_multi_fill_link_info(struct test_fill_link_info *skel,
+ bool retprobe, bool invalid)
+ {
+ LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
++ struct bpf_link *link;
+ int link_fd, err;
+
+ opts.syms = kmulti_syms;
+ opts.cnt = KMULTI_CNT;
+ opts.retprobe = retprobe;
+- skel->links.kmulti_run = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run,
+- NULL, &opts);
+- if (!ASSERT_OK_PTR(skel->links.kmulti_run, "attach_kprobe_multi"))
++ link = bpf_program__attach_kprobe_multi_opts(skel->progs.kmulti_run, NULL, &opts);
++ if (!ASSERT_OK_PTR(link, "attach_kprobe_multi"))
+ return;
+
+- link_fd = bpf_link__fd(skel->links.kmulti_run);
++ link_fd = bpf_link__fd(link);
+ if (!invalid) {
+ err = verify_kmulti_link_info(link_fd, retprobe);
+ ASSERT_OK(err, "verify_kmulti_link_info");
+ } else {
+ verify_kmulti_invalid_user_buffer(link_fd);
+ }
+- bpf_link__detach(skel->links.kmulti_run);
++ bpf_link__destroy(link);
+ }
+
+ void test_fill_link_info(void)
+diff --git a/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c b/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c
+index 692216c0ad3d45..3e8340c2408f37 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c
++++ b/tools/testing/selftests/bpf/progs/verifier_helper_value_access.c
+@@ -91,7 +91,7 @@ l0_%=: exit; \
+
+ SEC("tracepoint")
+ __description("helper access to map: empty range")
+-__failure __msg("invalid access to map value, value_size=48 off=0 size=0")
++__failure __msg("R2 invalid zero-sized read")
+ __naked void access_to_map_empty_range(void)
+ {
+ asm volatile (" \
+@@ -221,7 +221,7 @@ l0_%=: exit; \
+
+ SEC("tracepoint")
+ __description("helper access to adjusted map (via const imm): empty range")
+-__failure __msg("invalid access to map value, value_size=48 off=4 size=0")
++__failure __msg("R2 invalid zero-sized read")
+ __naked void via_const_imm_empty_range(void)
+ {
+ asm volatile (" \
+@@ -386,7 +386,7 @@ l0_%=: exit; \
+
+ SEC("tracepoint")
+ __description("helper access to adjusted map (via const reg): empty range")
+-__failure __msg("R1 min value is outside of the allowed memory range")
++__failure __msg("R2 invalid zero-sized read")
+ __naked void via_const_reg_empty_range(void)
+ {
+ asm volatile (" \
+@@ -556,7 +556,7 @@ l0_%=: exit; \
+
+ SEC("tracepoint")
+ __description("helper access to adjusted map (via variable): empty range")
+-__failure __msg("R1 min value is outside of the allowed memory range")
++__failure __msg("R2 invalid zero-sized read")
+ __naked void map_via_variable_empty_range(void)
+ {
+ asm volatile (" \
+diff --git a/tools/testing/selftests/bpf/progs/verifier_raw_stack.c b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c
+index f67390224a9cf9..7cc83acac7271d 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_raw_stack.c
++++ b/tools/testing/selftests/bpf/progs/verifier_raw_stack.c
+@@ -64,7 +64,7 @@ __naked void load_bytes_negative_len_2(void)
+
+ SEC("tc")
+ __description("raw_stack: skb_load_bytes, zero len")
+-__failure __msg("invalid zero-sized read")
++__failure __msg("R4 invalid zero-sized read: u64=[0,0]")
+ __naked void skb_load_bytes_zero_len(void)
+ {
+ asm volatile (" \
+diff --git a/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_syntax_errors.tc
+index 20e42c030095b0..61877d16645112 100644
+--- a/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_syntax_errors.tc
++++ b/tools/testing/selftests/ftrace/test.d/dynevent/fprobe_syntax_errors.tc
+@@ -34,7 +34,9 @@ check_error 'f vfs_read ^$stack10000' # BAD_STACK_NUM
+
+ check_error 'f vfs_read ^$arg10000' # BAD_ARG_NUM
+
++if !grep -q 'kernel return probes support:' README; then
+ check_error 'f vfs_read $retval ^$arg1' # BAD_VAR
++fi
+ check_error 'f vfs_read ^$none_var' # BAD_VAR
+ check_error 'f vfs_read ^'$REG # BAD_VAR
+
+@@ -99,7 +101,9 @@ if grep -q "<argname>" README; then
+ check_error 'f vfs_read args=^$arg*' # BAD_VAR_ARGS
+ check_error 'f vfs_read +0(^$arg*)' # BAD_VAR_ARGS
+ check_error 'f vfs_read $arg* ^$arg*' # DOUBLE_ARGS
++if !grep -q 'kernel return probes support:' README; then
+ check_error 'f vfs_read%return ^$arg*' # NOFENTRY_ARGS
++fi
+ check_error 'f vfs_read ^hoge' # NO_BTFARG
+ check_error 'f kfree ^$arg10' # NO_BTFARG (exceed the number of parameters)
+ check_error 'f kfree%return ^$retval' # NO_RETVAL
+diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
+index 65fbb26fd58c10..a16c6a6f6055cf 100644
+--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_syntax_errors.tc
+@@ -108,7 +108,9 @@ if grep -q "<argname>" README; then
+ check_error 'p vfs_read args=^$arg*' # BAD_VAR_ARGS
+ check_error 'p vfs_read +0(^$arg*)' # BAD_VAR_ARGS
+ check_error 'p vfs_read $arg* ^$arg*' # DOUBLE_ARGS
++if !grep -q 'kernel return probes support:' README; then
+ check_error 'r vfs_read ^$arg*' # NOFENTRY_ARGS
++fi
+ check_error 'p vfs_read+8 ^$arg*' # NOFENTRY_ARGS
+ check_error 'p vfs_read ^hoge' # NO_BTFARG
+ check_error 'p kfree ^$arg10' # NO_BTFARG (exceed the number of parameters)