diff options
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1009_linux-5.15.10.patch | 1480 |
2 files changed, 1484 insertions, 0 deletions
diff --git a/0000_README b/0000_README index d936a301..bce570a9 100644 --- a/0000_README +++ b/0000_README @@ -79,6 +79,10 @@ Patch: 1008_linux-5.15.9.patch From: http://www.kernel.org Desc: Linux 5.15.9 +Patch: 1009_linux-5.15.10.patch +From: http://www.kernel.org +Desc: Linux 5.15.10 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1009_linux-5.15.10.patch b/1009_linux-5.15.10.patch new file mode 100644 index 00000000..53cd4f05 --- /dev/null +++ b/1009_linux-5.15.10.patch @@ -0,0 +1,1480 @@ +diff --git a/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml b/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml +index 877183cf42787..1ef849dc74d7e 100644 +--- a/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml ++++ b/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml +@@ -79,6 +79,8 @@ properties: + + properties: + data-lanes: ++ description: ++ Note that 'fsl,imx7-mipi-csi2' only supports up to 2 data lines. + items: + minItems: 1 + maxItems: 4 +@@ -91,18 +93,6 @@ properties: + required: + - data-lanes + +- allOf: +- - if: +- properties: +- compatible: +- contains: +- const: fsl,imx7-mipi-csi2 +- then: +- properties: +- data-lanes: +- items: +- maxItems: 2 +- + port@1: + $ref: /schemas/graph.yaml#/properties/port + description: +diff --git a/Makefile b/Makefile +index e6d2ea920a1d1..d5e266291e5db 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 15 +-SUBLEVEL = 9 ++SUBLEVEL = 10 + EXTRAVERSION = + NAME = Trick or Treat + +diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h +index a0e78a6027be0..c75e84489f57b 100644 +--- a/arch/arm64/kvm/hyp/include/hyp/switch.h ++++ b/arch/arm64/kvm/hyp/include/hyp/switch.h +@@ -416,6 +416,12 @@ static inline bool __hyp_handle_ptrauth(struct kvm_vcpu *vcpu) + */ + static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) + { ++ /* ++ * Save PSTATE early so that we can evaluate the vcpu mode ++ * early on. ++ */ ++ vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR); ++ + if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) + vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR); + +diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +index de7e14c862e6c..7ecca8b078519 100644 +--- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h ++++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +@@ -70,7 +70,12 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) + static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt) + { + ctxt->regs.pc = read_sysreg_el2(SYS_ELR); +- ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR); ++ /* ++ * Guest PSTATE gets saved at guest fixup time in all ++ * cases. We still need to handle the nVHE host side here. ++ */ ++ if (!has_vhe() && ctxt->__hyp_running_vcpu) ++ ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR); + + if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) + ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2); +diff --git a/arch/s390/lib/test_unwind.c b/arch/s390/lib/test_unwind.c +index ecf327d743a03..c0635cf787e31 100644 +--- a/arch/s390/lib/test_unwind.c ++++ b/arch/s390/lib/test_unwind.c +@@ -171,10 +171,11 @@ static noinline int unwindme_func4(struct unwindme *u) + } + + /* +- * trigger specification exception ++ * Trigger operation exception; use insn notation to bypass ++ * llvm's integrated assembler sanity checks. + */ + asm volatile( +- " mvcl %%r1,%%r1\n" ++ " .insn e,0x0000\n" /* illegal opcode */ + "0: nopr %%r7\n" + EX_TABLE(0b, 0b) + :); +diff --git a/drivers/block/loop.c b/drivers/block/loop.c +index dfc72a1f6500d..c00ae30fde89e 100644 +--- a/drivers/block/loop.c ++++ b/drivers/block/loop.c +@@ -2429,7 +2429,7 @@ static int loop_control_remove(int idx) + int ret; + + if (idx < 0) { +- pr_warn("deleting an unspecified loop device is not supported.\n"); ++ pr_warn_once("deleting an unspecified loop device is not supported.\n"); + return -EINVAL; + } + +diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c +index ed3c4c42fc23b..d68d05d5d3838 100644 +--- a/drivers/char/agp/parisc-agp.c ++++ b/drivers/char/agp/parisc-agp.c +@@ -281,7 +281,7 @@ agp_ioc_init(void __iomem *ioc_regs) + return 0; + } + +-static int ++static int __init + lba_find_capability(int cap) + { + struct _parisc_agp_info *info = &parisc_agp_info; +@@ -366,7 +366,7 @@ fail: + return error; + } + +-static int ++static int __init + find_quicksilver(struct device *dev, void *data) + { + struct parisc_device **lba = data; +@@ -378,7 +378,7 @@ find_quicksilver(struct device *dev, void *data) + return 0; + } + +-static int ++static int __init + parisc_agp_init(void) + { + extern struct sba_device *sba_list; +diff --git a/drivers/clk/qcom/gcc-sm6125.c b/drivers/clk/qcom/gcc-sm6125.c +index 543cfab7561f9..431b55bb0d2f7 100644 +--- a/drivers/clk/qcom/gcc-sm6125.c ++++ b/drivers/clk/qcom/gcc-sm6125.c +@@ -1121,7 +1121,7 @@ static struct clk_rcg2 gcc_sdcc1_apps_clk_src = { + .name = "gcc_sdcc1_apps_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = ARRAY_SIZE(gcc_parent_data_1), +- .ops = &clk_rcg2_ops, ++ .ops = &clk_rcg2_floor_ops, + }, + }; + +@@ -1143,7 +1143,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = { + .name = "gcc_sdcc1_ice_core_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = ARRAY_SIZE(gcc_parent_data_0), +- .ops = &clk_rcg2_floor_ops, ++ .ops = &clk_rcg2_ops, + }, + }; + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +index cdf46bd0d8d5b..ab36cce59d2e4 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +@@ -1393,7 +1393,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( + struct sg_table *sg = NULL; + uint64_t user_addr = 0; + struct amdgpu_bo *bo; +- struct drm_gem_object *gobj; ++ struct drm_gem_object *gobj = NULL; + u32 domain, alloc_domain; + u64 alloc_flags; + int ret; +@@ -1503,14 +1503,16 @@ allocate_init_user_pages_failed: + remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info); + drm_vma_node_revoke(&gobj->vma_node, drm_priv); + err_node_allow: +- drm_gem_object_put(gobj); + /* Don't unreserve system mem limit twice */ + goto err_reserve_limit; + err_bo_create: + unreserve_mem_limit(adev, size, alloc_domain, !!sg); + err_reserve_limit: + mutex_destroy(&(*mem)->lock); +- kfree(*mem); ++ if (gobj) ++ drm_gem_object_put(gobj); ++ else ++ kfree(*mem); + err: + if (sg) { + sg_free_table(sg); +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +index 96ca42bcfdbf9..1545884dc703e 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +@@ -3854,7 +3854,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) + /* disable all interrupts */ + amdgpu_irq_disable_all(adev); + if (adev->mode_info.mode_config_initialized){ +- if (!amdgpu_device_has_dc_support(adev)) ++ if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev))) + drm_helper_force_disable_all(adev_to_drm(adev)); + else + drm_atomic_helper_shutdown(adev_to_drm(adev)); +@@ -5130,7 +5130,7 @@ skip_hw_reset: + drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); + } + +- if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) { ++ if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) { + drm_helper_resume_force_mode(adev_to_drm(tmp_adev)); + } + +diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +index ce982afeff913..ac9a8cd21c4b6 100644 +--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c ++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +@@ -504,8 +504,8 @@ static int amdgpu_vkms_sw_fini(void *handle) + int i = 0; + + for (i = 0; i < adev->mode_info.num_crtc; i++) +- if (adev->mode_info.crtcs[i]) +- hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer); ++ if (adev->amdgpu_vkms_output[i].vblank_hrtimer.function) ++ hrtimer_cancel(&adev->amdgpu_vkms_output[i].vblank_hrtimer); + + kfree(adev->mode_info.bios_hardcoded_edid); + kfree(adev->amdgpu_vkms_output); +diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +index 179080329af89..5a674235ae41a 100644 +--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c ++++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +@@ -1565,7 +1565,6 @@ retry_flush_work: + static void svm_range_restore_work(struct work_struct *work) + { + struct delayed_work *dwork = to_delayed_work(work); +- struct amdkfd_process_info *process_info; + struct svm_range_list *svms; + struct svm_range *prange; + struct kfd_process *p; +@@ -1585,12 +1584,10 @@ static void svm_range_restore_work(struct work_struct *work) + * the lifetime of this thread, kfd_process and mm will be valid. + */ + p = container_of(svms, struct kfd_process, svms); +- process_info = p->kgd_process_info; + mm = p->mm; + if (!mm) + return; + +- mutex_lock(&process_info->lock); + svm_range_list_lock_and_flush_work(svms, mm); + mutex_lock(&svms->lock); + +@@ -1643,7 +1640,6 @@ static void svm_range_restore_work(struct work_struct *work) + out_reschedule: + mutex_unlock(&svms->lock); + mmap_write_unlock(mm); +- mutex_unlock(&process_info->lock); + + /* If validation failed, reschedule another attempt */ + if (evicted_ranges) { +@@ -2974,7 +2970,6 @@ static int + svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, + uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs) + { +- struct amdkfd_process_info *process_info = p->kgd_process_info; + struct mm_struct *mm = current->mm; + struct list_head update_list; + struct list_head insert_list; +@@ -2993,8 +2988,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size, + + svms = &p->svms; + +- mutex_lock(&process_info->lock); +- + svm_range_list_lock_and_flush_work(svms, mm); + + if (!svm_range_is_valid(mm, start, size)) { +@@ -3070,8 +3063,6 @@ out_unlock_range: + mutex_unlock(&svms->lock); + mmap_read_unlock(mm); + out: +- mutex_unlock(&process_info->lock); +- + pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid, + &p->svms, start, start + size - 1, r); + +diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +index cce062adc4391..8a441a22c46ec 100644 +--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c ++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +@@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) + ret = -EINVAL; + goto cleanup; + } ++ ++ if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) && ++ (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) { ++ DRM_DEBUG_DRIVER("No DP connector available for CRC source\n"); ++ ret = -EINVAL; ++ goto cleanup; ++ } ++ + } + + #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +index a60396d5be445..e94546187cf15 100644 +--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c ++++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +@@ -1623,6 +1623,10 @@ bool dc_is_stream_unchanged( + if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param) + return false; + ++ // Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks ++ if (old_stream->audio_info.mode_count != stream->audio_info.mode_count) ++ return false; ++ + return true; + } + +diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +index 267a880811d65..723074aae5b63 100644 +--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c ++++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +@@ -1424,17 +1424,24 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) + { + struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; + struct msm_gpu *gpu = &adreno_gpu->base; +- u32 gpu_scid, cntl1_regval = 0; ++ u32 cntl1_regval = 0; + + if (IS_ERR(a6xx_gpu->llc_mmio)) + return; + + if (!llcc_slice_activate(a6xx_gpu->llc_slice)) { +- gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); ++ u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice); + + gpu_scid &= 0x1f; + cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) | + (gpu_scid << 15) | (gpu_scid << 20); ++ ++ /* On A660, the SCID programming for UCHE traffic is done in ++ * A6XX_GBIF_SCACHE_CNTL0[14:10] ++ */ ++ if (adreno_is_a660_family(adreno_gpu)) ++ gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | ++ (1 << 8), (gpu_scid << 10) | (1 << 8)); + } + + /* +@@ -1471,13 +1478,6 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu) + } + + gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval); +- +- /* On A660, the SCID programming for UCHE traffic is done in +- * A6XX_GBIF_SCACHE_CNTL0[14:10] +- */ +- if (adreno_is_a660_family(adreno_gpu)) +- gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) | +- (1 << 8), (gpu_scid << 10) | (1 << 8)); + } + + static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu) +diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c +index eb40d8413bca9..6d36f63c33388 100644 +--- a/drivers/gpu/drm/msm/dp/dp_aux.c ++++ b/drivers/gpu/drm/msm/dp/dp_aux.c +@@ -33,6 +33,7 @@ struct dp_aux_private { + bool read; + bool no_send_addr; + bool no_send_stop; ++ bool initted; + u32 offset; + u32 segment; + +@@ -331,6 +332,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, + } + + mutex_lock(&aux->mutex); ++ if (!aux->initted) { ++ ret = -EIO; ++ goto exit; ++ } + + dp_aux_update_offset_and_segment(aux, msg); + dp_aux_transfer_helper(aux, msg, true); +@@ -380,6 +385,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux, + } + + aux->cmd_busy = false; ++ ++exit: + mutex_unlock(&aux->mutex); + + return ret; +@@ -431,8 +438,13 @@ void dp_aux_init(struct drm_dp_aux *dp_aux) + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + ++ mutex_lock(&aux->mutex); ++ + dp_catalog_aux_enable(aux->catalog, true); + aux->retry_cnt = 0; ++ aux->initted = true; ++ ++ mutex_unlock(&aux->mutex); + } + + void dp_aux_deinit(struct drm_dp_aux *dp_aux) +@@ -441,7 +453,12 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux) + + aux = container_of(dp_aux, struct dp_aux_private, dp_aux); + ++ mutex_lock(&aux->mutex); ++ ++ aux->initted = false; + dp_catalog_aux_enable(aux->catalog, false); ++ ++ mutex_unlock(&aux->mutex); + } + + int dp_aux_register(struct drm_dp_aux *dp_aux) +diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c +index ea641151e77e7..dc85974c78975 100644 +--- a/drivers/gpu/drm/msm/dsi/dsi_host.c ++++ b/drivers/gpu/drm/msm/dsi/dsi_host.c +@@ -1696,6 +1696,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host, + if (!prop) { + DRM_DEV_DEBUG(dev, + "failed to find data lane mapping, using default\n"); ++ /* Set the number of date lanes to 4 by default. */ ++ msm_host->num_data_lanes = 4; + return 0; + } + +diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c +index a38f23be497d8..d9aef97eb93ad 100644 +--- a/drivers/gpu/drm/msm/msm_gem_submit.c ++++ b/drivers/gpu/drm/msm/msm_gem_submit.c +@@ -780,6 +780,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, + args->nr_cmds); + if (IS_ERR(submit)) { + ret = PTR_ERR(submit); ++ submit = NULL; + goto out_unlock; + } + +diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c +index 731d5117f9f10..14389fd7afb89 100644 +--- a/drivers/hwmon/corsair-psu.c ++++ b/drivers/hwmon/corsair-psu.c +@@ -729,7 +729,7 @@ static int corsairpsu_probe(struct hid_device *hdev, const struct hid_device_id + corsairpsu_check_cmd_support(priv); + + priv->hwmon_dev = hwmon_device_register_with_info(&hdev->dev, "corsairpsu", priv, +- &corsairpsu_chip_info, 0); ++ &corsairpsu_chip_info, NULL); + + if (IS_ERR(priv->hwmon_dev)) { + ret = PTR_ERR(priv->hwmon_dev); +diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c +index 819ab4ee517e1..02ddb237f69af 100644 +--- a/drivers/i2c/busses/i2c-rk3x.c ++++ b/drivers/i2c/busses/i2c-rk3x.c +@@ -423,8 +423,8 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd) + if (!(ipd & REG_INT_MBRF)) + return; + +- /* ack interrupt */ +- i2c_writel(i2c, REG_INT_MBRF, REG_IPD); ++ /* ack interrupt (read also produces a spurious START flag, clear it too) */ ++ i2c_writel(i2c, REG_INT_MBRF | REG_INT_START, REG_IPD); + + /* Can only handle a maximum of 32 bytes at a time */ + if (len > 32) +diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c +index 7b2474e6876f4..5cb21d7da05b6 100644 +--- a/drivers/i2c/busses/i2c-virtio.c ++++ b/drivers/i2c/busses/i2c-virtio.c +@@ -22,24 +22,24 @@ + /** + * struct virtio_i2c - virtio I2C data + * @vdev: virtio device for this controller +- * @completion: completion of virtio I2C message + * @adap: I2C adapter for this controller + * @vq: the virtio virtqueue for communication + */ + struct virtio_i2c { + struct virtio_device *vdev; +- struct completion completion; + struct i2c_adapter adap; + struct virtqueue *vq; + }; + + /** + * struct virtio_i2c_req - the virtio I2C request structure ++ * @completion: completion of virtio I2C message + * @out_hdr: the OUT header of the virtio I2C message + * @buf: the buffer into which data is read, or from which it's written + * @in_hdr: the IN header of the virtio I2C message + */ + struct virtio_i2c_req { ++ struct completion completion; + struct virtio_i2c_out_hdr out_hdr ____cacheline_aligned; + uint8_t *buf ____cacheline_aligned; + struct virtio_i2c_in_hdr in_hdr ____cacheline_aligned; +@@ -47,9 +47,11 @@ struct virtio_i2c_req { + + static void virtio_i2c_msg_done(struct virtqueue *vq) + { +- struct virtio_i2c *vi = vq->vdev->priv; ++ struct virtio_i2c_req *req; ++ unsigned int len; + +- complete(&vi->completion); ++ while ((req = virtqueue_get_buf(vq, &len))) ++ complete(&req->completion); + } + + static int virtio_i2c_prepare_reqs(struct virtqueue *vq, +@@ -62,6 +64,8 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq, + for (i = 0; i < num; i++) { + int outcnt = 0, incnt = 0; + ++ init_completion(&reqs[i].completion); ++ + /* + * We don't support 0 length messages and so filter out + * 0 length transfers by using i2c_adapter_quirks. +@@ -108,21 +112,15 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq, + struct virtio_i2c_req *reqs, + struct i2c_msg *msgs, int num) + { +- struct virtio_i2c_req *req; + bool failed = false; +- unsigned int len; + int i, j = 0; + + for (i = 0; i < num; i++) { +- /* Detach the ith request from the vq */ +- req = virtqueue_get_buf(vq, &len); ++ struct virtio_i2c_req *req = &reqs[i]; + +- /* +- * Condition req == &reqs[i] should always meet since we have +- * total num requests in the vq. reqs[i] can never be NULL here. +- */ +- if (!failed && (WARN_ON(req != &reqs[i]) || +- req->in_hdr.status != VIRTIO_I2C_MSG_OK)) ++ wait_for_completion(&req->completion); ++ ++ if (!failed && req->in_hdr.status != VIRTIO_I2C_MSG_OK) + failed = true; + + i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], !failed); +@@ -158,12 +156,8 @@ static int virtio_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, + * remote here to clear the virtqueue, so we can try another set of + * messages later on. + */ +- +- reinit_completion(&vi->completion); + virtqueue_kick(vq); + +- wait_for_completion(&vi->completion); +- + count = virtio_i2c_complete_reqs(vq, reqs, msgs, count); + + err_free: +@@ -211,8 +205,6 @@ static int virtio_i2c_probe(struct virtio_device *vdev) + vdev->priv = vi; + vi->vdev = vdev; + +- init_completion(&vi->completion); +- + ret = virtio_i2c_setup_vqs(vi); + if (ret) + return ret; +diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c +index 7de525a5ccf8c..aa119441eb45c 100644 +--- a/drivers/infiniband/hw/irdma/hw.c ++++ b/drivers/infiniband/hw/irdma/hw.c +@@ -60,6 +60,8 @@ static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq) + { + struct irdma_cq *cq = iwcq->back_cq; + ++ if (!cq->user_mode) ++ cq->armed = false; + if (cq->ibcq.comp_handler) + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); + } +@@ -146,6 +148,7 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp, + qp->flush_code = FLUSH_PROT_ERR; + break; + case IRDMA_AE_AMP_BAD_QP: ++ case IRDMA_AE_WQE_UNEXPECTED_OPCODE: + qp->flush_code = FLUSH_LOC_QP_OP_ERR; + break; + case IRDMA_AE_AMP_BAD_STAG_KEY: +@@ -156,7 +159,6 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp, + case IRDMA_AE_PRIV_OPERATION_DENIED: + case IRDMA_AE_IB_INVALID_REQUEST: + case IRDMA_AE_IB_REMOTE_ACCESS_ERROR: +- case IRDMA_AE_IB_REMOTE_OP_ERROR: + qp->flush_code = FLUSH_REM_ACCESS_ERR; + qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; + break; +@@ -184,6 +186,9 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp, + case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS: + qp->flush_code = FLUSH_MW_BIND_ERR; + break; ++ case IRDMA_AE_IB_REMOTE_OP_ERROR: ++ qp->flush_code = FLUSH_REM_OP_ERR; ++ break; + default: + qp->flush_code = FLUSH_FATAL_ERR; + break; +diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h +index b678fe712447e..8b215f3cee891 100644 +--- a/drivers/infiniband/hw/irdma/main.h ++++ b/drivers/infiniband/hw/irdma/main.h +@@ -541,6 +541,7 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd, + void (*callback_fcn)(struct irdma_cqp_request *cqp_request), + void *cb_param); + void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request); ++bool irdma_cq_empty(struct irdma_cq *iwcq); + int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event, + void *ptr); + int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event, +diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c +index aeeb1c310965d..fed49da770f3b 100644 +--- a/drivers/infiniband/hw/irdma/pble.c ++++ b/drivers/infiniband/hw/irdma/pble.c +@@ -25,8 +25,7 @@ void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) + list_del(&chunk->list); + if (chunk->type == PBLE_SD_PAGED) + irdma_pble_free_paged_mem(chunk); +- if (chunk->bitmapbuf) +- kfree(chunk->bitmapmem.va); ++ bitmap_free(chunk->bitmapbuf); + kfree(chunk->chunkmem.va); + } + } +@@ -283,7 +282,6 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) + "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n", + pble_rsrc->next_fpm_addr, chunk->size, chunk->size); + pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3); +- list_add(&chunk->list, &pble_rsrc->pinfo.clist); + sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ? + sd_entry->u.pd_table.pd_page_addr.pa : + sd_entry->u.bp.addr.pa; +@@ -295,12 +293,12 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) + goto error; + } + ++ list_add(&chunk->list, &pble_rsrc->pinfo.clist); + sd_entry->valid = true; + return 0; + + error: +- if (chunk->bitmapbuf) +- kfree(chunk->bitmapmem.va); ++ bitmap_free(chunk->bitmapbuf); + kfree(chunk->chunkmem.va); + + return ret_code; +diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h +index e1b3b8118a2ca..aa20827dcc9de 100644 +--- a/drivers/infiniband/hw/irdma/pble.h ++++ b/drivers/infiniband/hw/irdma/pble.h +@@ -78,7 +78,6 @@ struct irdma_chunk { + u32 pg_cnt; + enum irdma_alloc_type type; + struct irdma_sc_dev *dev; +- struct irdma_virt_mem bitmapmem; + struct irdma_virt_mem chunkmem; + }; + +diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c +index ac91ea5296db9..feebfe6bf31ad 100644 +--- a/drivers/infiniband/hw/irdma/utils.c ++++ b/drivers/infiniband/hw/irdma/utils.c +@@ -2284,15 +2284,10 @@ enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm, + + sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift; + +- pchunk->bitmapmem.size = sizeofbitmap >> 3; +- pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL); +- +- if (!pchunk->bitmapmem.va) ++ pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL); ++ if (!pchunk->bitmapbuf) + return IRDMA_ERR_NO_MEMORY; + +- pchunk->bitmapbuf = pchunk->bitmapmem.va; +- bitmap_zero(pchunk->bitmapbuf, sizeofbitmap); +- + pchunk->sizeofbitmap = sizeofbitmap; + /* each pble is 8 bytes hence shift by 3 */ + pprm->total_pble_alloc += pchunk->size >> 3; +@@ -2536,3 +2531,18 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event) + ibevent.element.qp = &iwqp->ibqp; + iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context); + } ++ ++bool irdma_cq_empty(struct irdma_cq *iwcq) ++{ ++ struct irdma_cq_uk *ukcq; ++ u64 qword3; ++ __le64 *cqe; ++ u8 polarity; ++ ++ ukcq = &iwcq->sc_cq.cq_uk; ++ cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq); ++ get_64bit_val(cqe, 24, &qword3); ++ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); ++ ++ return polarity != ukcq->polarity; ++} +diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c +index 102dc9342f2a2..8bbc4620a97a2 100644 +--- a/drivers/infiniband/hw/irdma/verbs.c ++++ b/drivers/infiniband/hw/irdma/verbs.c +@@ -3604,18 +3604,31 @@ static int irdma_req_notify_cq(struct ib_cq *ibcq, + struct irdma_cq *iwcq; + struct irdma_cq_uk *ukcq; + unsigned long flags; +- enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT; ++ enum irdma_cmpl_notify cq_notify; ++ bool promo_event = false; ++ int ret = 0; + ++ cq_notify = notify_flags == IB_CQ_SOLICITED ? ++ IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT; + iwcq = to_iwcq(ibcq); + ukcq = &iwcq->sc_cq.cq_uk; +- if (notify_flags == IB_CQ_SOLICITED) +- cq_notify = IRDMA_CQ_COMPL_SOLICITED; + + spin_lock_irqsave(&iwcq->lock, flags); +- irdma_uk_cq_request_notification(ukcq, cq_notify); ++ /* Only promote to arm the CQ for any event if the last arm event was solicited. */ ++ if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED) ++ promo_event = true; ++ ++ if (!iwcq->armed || promo_event) { ++ iwcq->armed = true; ++ iwcq->last_notify = cq_notify; ++ irdma_uk_cq_request_notification(ukcq, cq_notify); ++ } ++ ++ if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !irdma_cq_empty(iwcq)) ++ ret = 1; + spin_unlock_irqrestore(&iwcq->lock, flags); + +- return 0; ++ return ret; + } + + static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num, +diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h +index 5c244cd321a3a..d0fdef8d09ead 100644 +--- a/drivers/infiniband/hw/irdma/verbs.h ++++ b/drivers/infiniband/hw/irdma/verbs.h +@@ -110,6 +110,8 @@ struct irdma_cq { + u16 cq_size; + u16 cq_num; + bool user_mode; ++ bool armed; ++ enum irdma_cmpl_notify last_notify; + u32 polled_cmpls; + u32 cq_mem_size; + struct irdma_dma_mem kmem; +diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h +index bf20a388eabe1..6204ae2caef58 100644 +--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h ++++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h +@@ -641,7 +641,6 @@ struct mlx5_ib_mr { + + /* User MR data */ + struct mlx5_cache_ent *cache_ent; +- struct ib_umem *umem; + + /* This is zero'd when the MR is allocated */ + union { +@@ -653,7 +652,7 @@ struct mlx5_ib_mr { + struct list_head list; + }; + +- /* Used only by kernel MRs (umem == NULL) */ ++ /* Used only by kernel MRs */ + struct { + void *descs; + void *descs_alloc; +@@ -675,8 +674,9 @@ struct mlx5_ib_mr { + int data_length; + }; + +- /* Used only by User MRs (umem != NULL) */ ++ /* Used only by User MRs */ + struct { ++ struct ib_umem *umem; + unsigned int page_shift; + /* Current access_flags */ + int access_flags; +diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c +index 22e2f4d79743d..69b2ce4c292ae 100644 +--- a/drivers/infiniband/hw/mlx5/mr.c ++++ b/drivers/infiniband/hw/mlx5/mr.c +@@ -1911,19 +1911,18 @@ err: + return ret; + } + +-static void +-mlx5_free_priv_descs(struct mlx5_ib_mr *mr) ++static void mlx5_free_priv_descs(struct mlx5_ib_mr *mr) + { +- if (!mr->umem && mr->descs) { +- struct ib_device *device = mr->ibmr.device; +- int size = mr->max_descs * mr->desc_size; +- struct mlx5_ib_dev *dev = to_mdev(device); ++ struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); ++ int size = mr->max_descs * mr->desc_size; + +- dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, +- DMA_TO_DEVICE); +- kfree(mr->descs_alloc); +- mr->descs = NULL; +- } ++ if (!mr->descs) ++ return; ++ ++ dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size, ++ DMA_TO_DEVICE); ++ kfree(mr->descs_alloc); ++ mr->descs = NULL; + } + + int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) +@@ -1999,7 +1998,8 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) + if (mr->cache_ent) { + mlx5_mr_cache_free(dev, mr); + } else { +- mlx5_free_priv_descs(mr); ++ if (!udata) ++ mlx5_free_priv_descs(mr); + kfree(mr); + } + return 0; +@@ -2086,7 +2086,6 @@ static struct mlx5_ib_mr *mlx5_ib_alloc_pi_mr(struct ib_pd *pd, + if (err) + goto err_free_in; + +- mr->umem = NULL; + kfree(in); + + return mr; +@@ -2213,7 +2212,6 @@ static struct ib_mr *__mlx5_ib_alloc_mr(struct ib_pd *pd, + } + + mr->ibmr.device = pd->device; +- mr->umem = NULL; + + switch (mr_type) { + case IB_MR_TYPE_MEM_REG: +diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c +index 1ab6af7ddb254..ed326d82725cd 100644 +--- a/drivers/infiniband/sw/rxe/rxe_qp.c ++++ b/drivers/infiniband/sw/rxe/rxe_qp.c +@@ -367,6 +367,7 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, + + err2: + rxe_queue_cleanup(qp->sq.queue); ++ qp->sq.queue = NULL; + err1: + qp->pd = NULL; + qp->rcq = NULL; +diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c +index 3d6c6e8805207..a130320de4128 100644 +--- a/drivers/mtd/nand/raw/nand_base.c ++++ b/drivers/mtd/nand/raw/nand_base.c +@@ -926,7 +926,7 @@ int nand_choose_best_sdr_timings(struct nand_chip *chip, + struct nand_sdr_timings *spec_timings) + { + const struct nand_controller_ops *ops = chip->controller->ops; +- int best_mode = 0, mode, ret; ++ int best_mode = 0, mode, ret = -EOPNOTSUPP; + + iface->type = NAND_SDR_IFACE; + +@@ -977,7 +977,7 @@ int nand_choose_best_nvddr_timings(struct nand_chip *chip, + struct nand_nvddr_timings *spec_timings) + { + const struct nand_controller_ops *ops = chip->controller->ops; +- int best_mode = 0, mode, ret; ++ int best_mode = 0, mode, ret = -EOPNOTSUPP; + + iface->type = NAND_NVDDR_IFACE; + +@@ -1837,7 +1837,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) + NAND_OP_CMD(NAND_CMD_ERASE1, 0), + NAND_OP_ADDR(2, addrs, 0), + NAND_OP_CMD(NAND_CMD_ERASE2, +- NAND_COMMON_TIMING_MS(conf, tWB_max)), ++ NAND_COMMON_TIMING_NS(conf, tWB_max)), + NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tBERS_max), + 0), + }; +diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +index 8c223beeb6b8a..a78e8f00cf71b 100644 +--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c ++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +@@ -1569,6 +1569,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) + ice_vc_set_default_allowlist(vf); + + ice_vf_fdir_exit(vf); ++ ice_vf_fdir_init(vf); + /* clean VF control VSI when resetting VFs since it should be + * setup only when VF creates its first FDIR rule. + */ +@@ -1695,6 +1696,7 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) + } + + ice_vf_fdir_exit(vf); ++ ice_vf_fdir_init(vf); + /* clean VF control VSI when resetting VF since it should be setup + * only when VF creates its first FDIR rule. + */ +diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +index ef518b1040f72..fde521b1eecad 100644 +--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c ++++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +@@ -663,7 +663,7 @@ void __init mlx4_en_init_ptys2ethtool_map(void) + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_T, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_CX_SGMII, SPEED_1000, +- ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); ++ ETHTOOL_LINK_MODE_1000baseX_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_1000BASE_KX, SPEED_1000, + ETHTOOL_LINK_MODE_1000baseKX_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_T, SPEED_10000, +@@ -675,9 +675,9 @@ void __init mlx4_en_init_ptys2ethtool_map(void) + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_KR, SPEED_10000, + ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_CR, SPEED_10000, +- ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); ++ ETHTOOL_LINK_MODE_10000baseCR_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_10GBASE_SR, SPEED_10000, +- ETHTOOL_LINK_MODE_10000baseKR_Full_BIT); ++ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT); + MLX4_BUILD_PTYS2ETHTOOL_CONFIG(MLX4_20GBASE_KR2, SPEED_20000, + ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT, + ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT); +diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c +index 8799854bacb29..5b0215b7c1761 100644 +--- a/drivers/net/vmxnet3/vmxnet3_drv.c ++++ b/drivers/net/vmxnet3/vmxnet3_drv.c +@@ -3261,7 +3261,7 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) + + #ifdef CONFIG_PCI_MSI + if (adapter->intr.type == VMXNET3_IT_MSIX) { +- int i, nvec; ++ int i, nvec, nvec_allocated; + + nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ? + 1 : adapter->num_tx_queues; +@@ -3274,14 +3274,15 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) + for (i = 0; i < nvec; i++) + adapter->intr.msix_entries[i].entry = i; + +- nvec = vmxnet3_acquire_msix_vectors(adapter, nvec); +- if (nvec < 0) ++ nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec); ++ if (nvec_allocated < 0) + goto msix_err; + + /* If we cannot allocate one MSIx vector per queue + * then limit the number of rx queues to 1 + */ +- if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) { ++ if (nvec_allocated == VMXNET3_LINUX_MIN_MSIX_VECT && ++ nvec != VMXNET3_LINUX_MIN_MSIX_VECT) { + if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE + || adapter->num_rx_queues != 1) { + adapter->share_intr = VMXNET3_INTR_TXSHARE; +@@ -3291,14 +3292,14 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) + } + } + +- adapter->intr.num_intrs = nvec; ++ adapter->intr.num_intrs = nvec_allocated; + return; + + msix_err: + /* If we cannot allocate MSIx vectors use only one rx queue */ + dev_info(&adapter->pdev->dev, + "Failed to enable MSI-X, error %d. " +- "Limiting #rx queues to 1, try MSI.\n", nvec); ++ "Limiting #rx queues to 1, try MSI.\n", nvec_allocated); + + adapter->intr.type = VMXNET3_IT_MSI; + } +diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c +index b72d7b9b45ea9..81e062009d271 100644 +--- a/drivers/staging/most/dim2/dim2.c ++++ b/drivers/staging/most/dim2/dim2.c +@@ -726,6 +726,23 @@ static int get_dim2_clk_speed(const char *clock_speed, u8 *val) + return -EINVAL; + } + ++static void dim2_release(struct device *d) ++{ ++ struct dim2_hdm *dev = container_of(d, struct dim2_hdm, dev); ++ unsigned long flags; ++ ++ kthread_stop(dev->netinfo_task); ++ ++ spin_lock_irqsave(&dim_lock, flags); ++ dim_shutdown(); ++ spin_unlock_irqrestore(&dim_lock, flags); ++ ++ if (dev->disable_platform) ++ dev->disable_platform(to_platform_device(d->parent)); ++ ++ kfree(dev); ++} ++ + /* + * dim2_probe - dim2 probe handler + * @pdev: platform device structure +@@ -746,7 +763,7 @@ static int dim2_probe(struct platform_device *pdev) + + enum { MLB_INT_IDX, AHB0_INT_IDX }; + +- dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); ++ dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return -ENOMEM; + +@@ -758,25 +775,27 @@ static int dim2_probe(struct platform_device *pdev) + "microchip,clock-speed", &clock_speed); + if (ret) { + dev_err(&pdev->dev, "missing dt property clock-speed\n"); +- return ret; ++ goto err_free_dev; + } + + ret = get_dim2_clk_speed(clock_speed, &dev->clk_speed); + if (ret) { + dev_err(&pdev->dev, "bad dt property clock-speed\n"); +- return ret; ++ goto err_free_dev; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dev->io_base = devm_ioremap_resource(&pdev->dev, res); +- if (IS_ERR(dev->io_base)) +- return PTR_ERR(dev->io_base); ++ if (IS_ERR(dev->io_base)) { ++ ret = PTR_ERR(dev->io_base); ++ goto err_free_dev; ++ } + + of_id = of_match_node(dim2_of_match, pdev->dev.of_node); + pdata = of_id->data; + ret = pdata && pdata->enable ? pdata->enable(pdev) : 0; + if (ret) +- return ret; ++ goto err_free_dev; + + dev->disable_platform = pdata ? pdata->disable : NULL; + +@@ -867,24 +886,19 @@ static int dim2_probe(struct platform_device *pdev) + dev->most_iface.request_netinfo = request_netinfo; + dev->most_iface.driver_dev = &pdev->dev; + dev->most_iface.dev = &dev->dev; +- dev->dev.init_name = "dim2_state"; ++ dev->dev.init_name = dev->name; + dev->dev.parent = &pdev->dev; ++ dev->dev.release = dim2_release; + +- ret = most_register_interface(&dev->most_iface); +- if (ret) { +- dev_err(&pdev->dev, "failed to register MOST interface\n"); +- goto err_stop_thread; +- } +- +- return 0; ++ return most_register_interface(&dev->most_iface); + +-err_stop_thread: +- kthread_stop(dev->netinfo_task); + err_shutdown_dim: + dim_shutdown(); + err_disable_platform: + if (dev->disable_platform) + dev->disable_platform(pdev); ++err_free_dev: ++ kfree(dev); + + return ret; + } +@@ -898,17 +912,8 @@ err_disable_platform: + static int dim2_remove(struct platform_device *pdev) + { + struct dim2_hdm *dev = platform_get_drvdata(pdev); +- unsigned long flags; + + most_deregister_interface(&dev->most_iface); +- kthread_stop(dev->netinfo_task); +- +- spin_lock_irqsave(&dim_lock, flags); +- dim_shutdown(); +- spin_unlock_irqrestore(&dim_lock, flags); +- +- if (dev->disable_platform) +- dev->disable_platform(pdev); + + return 0; + } +diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c +index b1e7190ae4836..ac5112def40d1 100644 +--- a/drivers/tty/serial/fsl_lpuart.c ++++ b/drivers/tty/serial/fsl_lpuart.c +@@ -2625,6 +2625,7 @@ OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup); + OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup); + OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup); + OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup); ++OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup); + EARLYCON_DECLARE(lpuart, lpuart_early_console_setup); + EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup); + +diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c +index d9b977c0f38dc..2798fbe8d0018 100644 +--- a/fs/fuse/dir.c ++++ b/fs/fuse/dir.c +@@ -738,11 +738,19 @@ static int fuse_symlink(struct user_namespace *mnt_userns, struct inode *dir, + return create_new_entry(fm, &args, dir, entry, S_IFLNK); + } + ++void fuse_flush_time_update(struct inode *inode) ++{ ++ int err = sync_inode_metadata(inode, 1); ++ ++ mapping_set_error(inode->i_mapping, err); ++} ++ + void fuse_update_ctime(struct inode *inode) + { + if (!IS_NOCMTIME(inode)) { + inode->i_ctime = current_time(inode); + mark_inode_dirty_sync(inode); ++ fuse_flush_time_update(inode); + } + } + +diff --git a/fs/fuse/file.c b/fs/fuse/file.c +index 11404f8c21c75..5c5ed58d91a73 100644 +--- a/fs/fuse/file.c ++++ b/fs/fuse/file.c +@@ -1848,6 +1848,17 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc) + struct fuse_file *ff; + int err; + ++ /* ++ * Inode is always written before the last reference is dropped and ++ * hence this should not be reached from reclaim. ++ * ++ * Writing back the inode from reclaim can deadlock if the request ++ * processing itself needs an allocation. Allocations triggering ++ * reclaim while serving a request can't be prevented, because it can ++ * involve any number of unrelated userspace processes. ++ */ ++ WARN_ON(wbc->for_reclaim); ++ + ff = __fuse_write_file_get(fi); + err = fuse_flush_times(inode, ff); + if (ff) +@@ -3002,6 +3013,8 @@ out: + if (lock_inode) + inode_unlock(inode); + ++ fuse_flush_time_update(inode); ++ + return err; + } + +@@ -3111,6 +3124,8 @@ out: + inode_unlock(inode_out); + file_accessed(file_in); + ++ fuse_flush_time_update(inode_out); ++ + return err; + } + +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h +index f55f9f94b1a4f..a59e36c7deaea 100644 +--- a/fs/fuse/fuse_i.h ++++ b/fs/fuse/fuse_i.h +@@ -1148,6 +1148,7 @@ int fuse_allow_current_process(struct fuse_conn *fc); + + u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id); + ++void fuse_flush_time_update(struct inode *inode); + void fuse_update_ctime(struct inode *inode); + + int fuse_update_attributes(struct inode *inode, struct file *file); +diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c +index 12d49a1914e84..2f999d38c9b4a 100644 +--- a/fs/fuse/inode.c ++++ b/fs/fuse/inode.c +@@ -118,6 +118,9 @@ static void fuse_evict_inode(struct inode *inode) + { + struct fuse_inode *fi = get_fuse_inode(inode); + ++ /* Will write inode on close/munmap and in all other dirtiers */ ++ WARN_ON(inode->i_state & I_DIRTY_INODE); ++ + truncate_inode_pages_final(&inode->i_data); + clear_inode(inode); + if (inode->i_sb->s_flags & SB_ACTIVE) { +diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c +index 994ec22d40402..4b54529f8176b 100644 +--- a/fs/netfs/read_helper.c ++++ b/fs/netfs/read_helper.c +@@ -354,16 +354,11 @@ static void netfs_rreq_write_to_cache_work(struct work_struct *work) + netfs_rreq_do_write_to_cache(rreq); + } + +-static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq, +- bool was_async) ++static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq) + { +- if (was_async) { +- rreq->work.func = netfs_rreq_write_to_cache_work; +- if (!queue_work(system_unbound_wq, &rreq->work)) +- BUG(); +- } else { +- netfs_rreq_do_write_to_cache(rreq); +- } ++ rreq->work.func = netfs_rreq_write_to_cache_work; ++ if (!queue_work(system_unbound_wq, &rreq->work)) ++ BUG(); + } + + /* +@@ -560,7 +555,7 @@ again: + wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); + + if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags)) +- return netfs_rreq_write_to_cache(rreq, was_async); ++ return netfs_rreq_write_to_cache(rreq); + + netfs_rreq_completed(rreq, was_async); + } +diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c +index 39bb56d2dcbef..9628b55718468 100644 +--- a/kernel/trace/tracing_map.c ++++ b/kernel/trace/tracing_map.c +@@ -15,6 +15,7 @@ + #include <linux/jhash.h> + #include <linux/slab.h> + #include <linux/sort.h> ++#include <linux/kmemleak.h> + + #include "tracing_map.h" + #include "trace.h" +@@ -307,6 +308,7 @@ static void tracing_map_array_free(struct tracing_map_array *a) + for (i = 0; i < a->n_pages; i++) { + if (!a->pages[i]) + break; ++ kmemleak_free(a->pages[i]); + free_page((unsigned long)a->pages[i]); + } + +@@ -342,6 +344,7 @@ static struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts, + a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL); + if (!a->pages[i]) + goto free; ++ kmemleak_alloc(a->pages[i], PAGE_SIZE, 1, GFP_KERNEL); + } + out: + return a; +diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c +index f7fea3a7c5e64..62a67fdc344cd 100644 +--- a/net/ipv4/inet_connection_sock.c ++++ b/net/ipv4/inet_connection_sock.c +@@ -721,7 +721,7 @@ static struct request_sock *inet_reqsk_clone(struct request_sock *req, + + sk_node_init(&nreq_sk->sk_node); + nreq_sk->sk_tx_queue_mapping = req_sk->sk_tx_queue_mapping; +-#ifdef CONFIG_XPS ++#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING + nreq_sk->sk_rx_queue_mapping = req_sk->sk_rx_queue_mapping; + #endif + nreq_sk->sk_incoming_cpu = req_sk->sk_incoming_cpu; +diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c +index ada47e59647a0..81ba8e51e01ff 100644 +--- a/net/netlink/af_netlink.c ++++ b/net/netlink/af_netlink.c +@@ -1871,6 +1871,11 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) + if (msg->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + ++ if (len == 0) { ++ pr_warn_once("Zero length message leads to an empty skb\n"); ++ return -ENODATA; ++ } ++ + err = scm_send(sock, msg, &scm, true); + if (err < 0) + return err; +diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c +index 082085c25a8e4..8048a3dcc5f8b 100644 +--- a/net/nfc/netlink.c ++++ b/net/nfc/netlink.c +@@ -636,8 +636,10 @@ static int nfc_genl_dump_devices_done(struct netlink_callback *cb) + { + struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0]; + +- nfc_device_iter_exit(iter); +- kfree(iter); ++ if (iter) { ++ nfc_device_iter_exit(iter); ++ kfree(iter); ++ } + + return 0; + } +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 90e9263ac0bd7..21fec82489bd7 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -335,7 +335,10 @@ enum { + ((pci)->device == 0x0c0c) || \ + ((pci)->device == 0x0d0c) || \ + ((pci)->device == 0x160c) || \ +- ((pci)->device == 0x490d)) ++ ((pci)->device == 0x490d) || \ ++ ((pci)->device == 0x4f90) || \ ++ ((pci)->device == 0x4f91) || \ ++ ((pci)->device == 0x4f92)) + + #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) + +@@ -2472,6 +2475,13 @@ static const struct pci_device_id azx_ids[] = { + /* DG1 */ + { PCI_DEVICE(0x8086, 0x490d), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, ++ /* DG2 */ ++ { PCI_DEVICE(0x8086, 0x4f90), ++ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, ++ { PCI_DEVICE(0x8086, 0x4f91), ++ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, ++ { PCI_DEVICE(0x8086, 0x4f92), ++ .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, + /* Alderlake-S */ + { PCI_DEVICE(0x8086, 0x7ad0), + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE}, +diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c +index 65d2c55399195..415701bd10ac8 100644 +--- a/sound/pci/hda/patch_hdmi.c ++++ b/sound/pci/hda/patch_hdmi.c +@@ -4380,10 +4380,11 @@ HDA_CODEC_ENTRY(0x8086280f, "Icelake HDMI", patch_i915_icl_hdmi), + HDA_CODEC_ENTRY(0x80862812, "Tigerlake HDMI", patch_i915_tgl_hdmi), + HDA_CODEC_ENTRY(0x80862814, "DG1 HDMI", patch_i915_tgl_hdmi), + HDA_CODEC_ENTRY(0x80862815, "Alderlake HDMI", patch_i915_tgl_hdmi), +-HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi), + HDA_CODEC_ENTRY(0x80862816, "Rocketlake HDMI", patch_i915_tgl_hdmi), ++HDA_CODEC_ENTRY(0x80862819, "DG2 HDMI", patch_i915_tgl_hdmi), + HDA_CODEC_ENTRY(0x8086281a, "Jasperlake HDMI", patch_i915_icl_hdmi), + HDA_CODEC_ENTRY(0x8086281b, "Elkhartlake HDMI", patch_i915_icl_hdmi), ++HDA_CODEC_ENTRY(0x8086281c, "Alderlake-P HDMI", patch_i915_tgl_hdmi), + HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), + HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), + HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi), +diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c +index 6ad191e731fc9..d454f5a7af936 100644 +--- a/tools/perf/builtin-inject.c ++++ b/tools/perf/builtin-inject.c +@@ -819,7 +819,7 @@ static int __cmd_inject(struct perf_inject *inject) + inject->tool.ordered_events = true; + inject->tool.ordering_requires_timestamps = true; + /* Allow space in the header for new attributes */ +- output_data_offset = 4096; ++ output_data_offset = roundup(8192 + session->header.data_offset, 4096); + if (inject->strip) + strip_init(inject); + } +diff --git a/tools/perf/util/bpf_skel/bperf.h b/tools/perf/util/bpf_skel/bperf.h +deleted file mode 100644 +index 186a5551ddb9d..0000000000000 +--- a/tools/perf/util/bpf_skel/bperf.h ++++ /dev/null +@@ -1,14 +0,0 @@ +-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +-// Copyright (c) 2021 Facebook +- +-#ifndef __BPERF_STAT_H +-#define __BPERF_STAT_H +- +-typedef struct { +- __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); +- __uint(key_size, sizeof(__u32)); +- __uint(value_size, sizeof(struct bpf_perf_event_value)); +- __uint(max_entries, 1); +-} reading_map; +- +-#endif /* __BPERF_STAT_H */ +diff --git a/tools/perf/util/bpf_skel/bperf_follower.bpf.c b/tools/perf/util/bpf_skel/bperf_follower.bpf.c +index b8fa3cb2da230..6d2ea67b161ac 100644 +--- a/tools/perf/util/bpf_skel/bperf_follower.bpf.c ++++ b/tools/perf/util/bpf_skel/bperf_follower.bpf.c +@@ -4,11 +4,21 @@ + #include <linux/perf_event.h> + #include <bpf/bpf_helpers.h> + #include <bpf/bpf_tracing.h> +-#include "bperf.h" + #include "bperf_u.h" + +-reading_map diff_readings SEC(".maps"); +-reading_map accum_readings SEC(".maps"); ++struct { ++ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); ++ __uint(key_size, sizeof(__u32)); ++ __uint(value_size, sizeof(struct bpf_perf_event_value)); ++ __uint(max_entries, 1); ++} diff_readings SEC(".maps"); ++ ++struct { ++ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); ++ __uint(key_size, sizeof(__u32)); ++ __uint(value_size, sizeof(struct bpf_perf_event_value)); ++ __uint(max_entries, 1); ++} accum_readings SEC(".maps"); + + struct { + __uint(type, BPF_MAP_TYPE_HASH); +diff --git a/tools/perf/util/bpf_skel/bperf_leader.bpf.c b/tools/perf/util/bpf_skel/bperf_leader.bpf.c +index 4f70d1459e86c..d82e1633a2e0a 100644 +--- a/tools/perf/util/bpf_skel/bperf_leader.bpf.c ++++ b/tools/perf/util/bpf_skel/bperf_leader.bpf.c +@@ -4,7 +4,6 @@ + #include <linux/perf_event.h> + #include <bpf/bpf_helpers.h> + #include <bpf/bpf_tracing.h> +-#include "bperf.h" + + struct { + __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); +@@ -13,8 +12,19 @@ struct { + __uint(map_flags, BPF_F_PRESERVE_ELEMS); + } events SEC(".maps"); + +-reading_map prev_readings SEC(".maps"); +-reading_map diff_readings SEC(".maps"); ++struct { ++ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); ++ __uint(key_size, sizeof(__u32)); ++ __uint(value_size, sizeof(struct bpf_perf_event_value)); ++ __uint(max_entries, 1); ++} prev_readings SEC(".maps"); ++ ++struct { ++ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); ++ __uint(key_size, sizeof(__u32)); ++ __uint(value_size, sizeof(struct bpf_perf_event_value)); ++ __uint(max_entries, 1); ++} diff_readings SEC(".maps"); + + SEC("raw_tp/sched_switch") + int BPF_PROG(on_switch) |