summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2019-12-13 07:39:32 -0500
committerMike Pagano <mpagano@gentoo.org>2019-12-13 07:39:32 -0500
commitf0c91086a01b8aebce1a268d2545b694d3e52c39 (patch)
tree87c4d5e7f882acec31b0e885891e08bb58c47fad
parentLinux patch 5.4.2 (diff)
downloadlinux-patches-f0c91086a01b8aebce1a268d2545b694d3e52c39.tar.gz
linux-patches-f0c91086a01b8aebce1a268d2545b694d3e52c39.tar.bz2
linux-patches-f0c91086a01b8aebce1a268d2545b694d3e52c39.zip
Linux patch 5.4.3 and add missing entries in README5.4-4
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README12
-rw-r--r--1002_linux-5.4.3.patch3239
2 files changed, 3251 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 60b41a8e..b9a78f51 100644
--- a/0000_README
+++ b/0000_README
@@ -43,6 +43,18 @@ EXPERIMENTAL
Individual Patch Descriptions:
--------------------------------------------------------------------------
+Patch: 1000_linux-5.4.1.patch
+From: http://www.kernel.org
+Desc: Linux 5.4.1
+
+Patch: 1001_linux-5.4.2.patch
+From: http://www.kernel.org
+Desc: Linux 5.4.2
+
+Patch: 1002_linux-5.4.3.patch
+From: http://www.kernel.org
+Desc: Linux 5.4.3
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1002_linux-5.4.3.patch b/1002_linux-5.4.3.patch
new file mode 100644
index 00000000..3c6aab73
--- /dev/null
+++ b/1002_linux-5.4.3.patch
@@ -0,0 +1,3239 @@
+diff --git a/Makefile b/Makefile
+index e67f2e95b71d..07998b60d56c 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 4
+-SUBLEVEL = 2
++SUBLEVEL = 3
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
+index a76f620f7f35..a5f8752f607b 100644
+--- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi
++++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
+@@ -18,8 +18,8 @@
+
+ / {
+ compatible = "samsung,exynos5433";
+- #address-cells = <1>;
+- #size-cells = <1>;
++ #address-cells = <2>;
++ #size-cells = <2>;
+
+ interrupt-parent = <&gic>;
+
+@@ -311,7 +311,7 @@
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+- ranges;
++ ranges = <0x0 0x0 0x0 0x18000000>;
+
+ chipid@10000000 {
+ compatible = "samsung,exynos4210-chipid";
+diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
+index bcb9d8cee267..0821489a874d 100644
+--- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
++++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
+@@ -12,8 +12,8 @@
+ / {
+ compatible = "samsung,exynos7";
+ interrupt-parent = <&gic>;
+- #address-cells = <1>;
+- #size-cells = <1>;
++ #address-cells = <2>;
++ #size-cells = <2>;
+
+ aliases {
+ pinctrl0 = &pinctrl_alive;
+@@ -98,7 +98,7 @@
+ compatible = "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+- ranges;
++ ranges = <0 0 0 0x18000000>;
+
+ chipid@10000000 {
+ compatible = "samsung,exynos4210-chipid";
+diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
+index 4c38426a6969..02909a48dfcd 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
+@@ -309,9 +309,8 @@
+ regulator-name = "VDD_12V";
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
+- gpio = <&gpio TEGRA194_MAIN_GPIO(A, 1) GPIO_ACTIVE_LOW>;
++ gpio = <&gpio TEGRA194_MAIN_GPIO(A, 1) GPIO_ACTIVE_HIGH>;
+ regulator-boot-on;
+- enable-active-low;
+ };
+ };
+ };
+diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+index a7dc319214a4..b0095072bc28 100644
+--- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
++++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
+@@ -1612,7 +1612,7 @@
+ regulator-name = "VDD_HDMI_5V0";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+- gpio = <&exp1 12 GPIO_ACTIVE_LOW>;
++ gpio = <&exp1 12 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ vin-supply = <&vdd_5v0_sys>;
+ };
+diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
+index 127712b0b970..32fc8061aa76 100644
+--- a/arch/arm64/include/asm/uaccess.h
++++ b/arch/arm64/include/asm/uaccess.h
+@@ -62,8 +62,13 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
+ {
+ unsigned long ret, limit = current_thread_info()->addr_limit;
+
++ /*
++ * Asynchronous I/O running in a kernel thread does not have the
++ * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
++ * the user address before checking.
++ */
+ if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
+- test_thread_flag(TIF_TAGGED_ADDR))
++ (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
+ addr = untagged_addr(addr);
+
+ __chk_user_ptr(addr);
+diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
+index a3f9c665bb5b..baa740815b3c 100644
+--- a/arch/powerpc/kvm/book3s_xive.c
++++ b/arch/powerpc/kvm/book3s_xive.c
+@@ -2005,6 +2005,10 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
+
+ pr_devel("Creating xive for partition\n");
+
++ /* Already there ? */
++ if (kvm->arch.xive)
++ return -EEXIST;
++
+ xive = kvmppc_xive_get_device(kvm, type);
+ if (!xive)
+ return -ENOMEM;
+@@ -2014,12 +2018,6 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
+ xive->kvm = kvm;
+ mutex_init(&xive->lock);
+
+- /* Already there ? */
+- if (kvm->arch.xive)
+- ret = -EEXIST;
+- else
+- kvm->arch.xive = xive;
+-
+ /* We use the default queue size set by the host */
+ xive->q_order = xive_native_default_eq_shift();
+ if (xive->q_order < PAGE_SHIFT)
+@@ -2039,6 +2037,7 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
+ if (ret)
+ return ret;
+
++ kvm->arch.xive = xive;
+ return 0;
+ }
+
+diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
+index 78b906ffa0d2..5a3373e06e60 100644
+--- a/arch/powerpc/kvm/book3s_xive_native.c
++++ b/arch/powerpc/kvm/book3s_xive_native.c
+@@ -50,6 +50,24 @@ static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio)
+ }
+ }
+
++static int kvmppc_xive_native_configure_queue(u32 vp_id, struct xive_q *q,
++ u8 prio, __be32 *qpage,
++ u32 order, bool can_escalate)
++{
++ int rc;
++ __be32 *qpage_prev = q->qpage;
++
++ rc = xive_native_configure_queue(vp_id, q, prio, qpage, order,
++ can_escalate);
++ if (rc)
++ return rc;
++
++ if (qpage_prev)
++ put_page(virt_to_page(qpage_prev));
++
++ return rc;
++}
++
+ void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
+ {
+ struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
+@@ -582,19 +600,14 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
+ q->guest_qaddr = 0;
+ q->guest_qshift = 0;
+
+- rc = xive_native_configure_queue(xc->vp_id, q, priority,
+- NULL, 0, true);
++ rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
++ NULL, 0, true);
+ if (rc) {
+ pr_err("Failed to reset queue %d for VCPU %d: %d\n",
+ priority, xc->server_num, rc);
+ return rc;
+ }
+
+- if (q->qpage) {
+- put_page(virt_to_page(q->qpage));
+- q->qpage = NULL;
+- }
+-
+ return 0;
+ }
+
+@@ -624,12 +637,6 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
+
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ gfn = gpa_to_gfn(kvm_eq.qaddr);
+- page = gfn_to_page(kvm, gfn);
+- if (is_error_page(page)) {
+- srcu_read_unlock(&kvm->srcu, srcu_idx);
+- pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
+- return -EINVAL;
+- }
+
+ page_size = kvm_host_page_size(kvm, gfn);
+ if (1ull << kvm_eq.qshift > page_size) {
+@@ -638,6 +645,13 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
+ return -EINVAL;
+ }
+
++ page = gfn_to_page(kvm, gfn);
++ if (is_error_page(page)) {
++ srcu_read_unlock(&kvm->srcu, srcu_idx);
++ pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
++ return -EINVAL;
++ }
++
+ qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK);
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+
+@@ -653,8 +667,8 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
+ * OPAL level because the use of END ESBs is not supported by
+ * Linux.
+ */
+- rc = xive_native_configure_queue(xc->vp_id, q, priority,
+- (__be32 *) qaddr, kvm_eq.qshift, true);
++ rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
++ (__be32 *) qaddr, kvm_eq.qshift, true);
+ if (rc) {
+ pr_err("Failed to configure queue %d for VCPU %d: %d\n",
+ priority, xc->server_num, rc);
+@@ -1081,7 +1095,6 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
+ dev->private = xive;
+ xive->dev = dev;
+ xive->kvm = kvm;
+- kvm->arch.xive = xive;
+ mutex_init(&xive->mapping_lock);
+ mutex_init(&xive->lock);
+
+@@ -1102,6 +1115,7 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
+ if (ret)
+ return ret;
+
++ kvm->arch.xive = xive;
+ return 0;
+ }
+
+diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
+index 688911051b44..f4afa301954a 100644
+--- a/arch/sparc/include/asm/io_64.h
++++ b/arch/sparc/include/asm/io_64.h
+@@ -407,6 +407,7 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
+ }
+
+ #define ioremap_nocache(X,Y) ioremap((X),(Y))
++#define ioremap_uc(X,Y) ioremap((X),(Y))
+ #define ioremap_wc(X,Y) ioremap((X),(Y))
+ #define ioremap_wt(X,Y) ioremap((X),(Y))
+
+diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
+index f68c0c753c38..53dbcca9af09 100644
+--- a/arch/x86/kvm/cpuid.c
++++ b/arch/x86/kvm/cpuid.c
+@@ -504,7 +504,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
+
+ r = -E2BIG;
+
+- if (*nent >= maxnent)
++ if (WARN_ON(*nent >= maxnent))
+ goto out;
+
+ do_host_cpuid(entry, function, 0);
+@@ -810,6 +810,9 @@ out:
+ static int do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 func,
+ int *nent, int maxnent, unsigned int type)
+ {
++ if (*nent >= maxnent)
++ return -E2BIG;
++
+ if (type == KVM_GET_EMULATED_CPUID)
+ return __do_cpuid_func_emulated(entry, func, nent, maxnent);
+
+diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
+index 0e7c9301fe86..d0523741fb03 100644
+--- a/arch/x86/kvm/vmx/nested.c
++++ b/arch/x86/kvm/vmx/nested.c
+@@ -2418,6 +2418,16 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
+ entry_failure_code))
+ return -EINVAL;
+
++ /*
++ * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
++ * on nested VM-Exit, which can occur without actually running L2 and
++ * thus without hitting vmx_set_cr3(), e.g. if L1 is entering L2 with
++ * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
++ * transition to HLT instead of running L2.
++ */
++ if (enable_ept)
++ vmcs_writel(GUEST_CR3, vmcs12->guest_cr3);
++
+ /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
+ if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
+ is_pae_paging(vcpu)) {
+diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
+index 04a8212704c1..f09a213fd5cb 100644
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -2995,6 +2995,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
+ void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+ {
+ struct kvm *kvm = vcpu->kvm;
++ bool update_guest_cr3 = true;
+ unsigned long guest_cr3;
+ u64 eptp;
+
+@@ -3011,15 +3012,18 @@ void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+ spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
+ }
+
+- if (enable_unrestricted_guest || is_paging(vcpu) ||
+- is_guest_mode(vcpu))
++ /* Loading vmcs02.GUEST_CR3 is handled by nested VM-Enter. */
++ if (is_guest_mode(vcpu))
++ update_guest_cr3 = false;
++ else if (enable_unrestricted_guest || is_paging(vcpu))
+ guest_cr3 = kvm_read_cr3(vcpu);
+ else
+ guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
+ ept_load_pdptrs(vcpu);
+ }
+
+- vmcs_writel(GUEST_CR3, guest_cr3);
++ if (update_guest_cr3)
++ vmcs_writel(GUEST_CR3, guest_cr3);
+ }
+
+ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 5d530521f11d..8d82ec0482fc 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -300,13 +300,14 @@ int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
+ struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
+ int err;
+
+- if (((value ^ smsr->values[slot].curr) & mask) == 0)
++ value = (value & mask) | (smsr->values[slot].host & ~mask);
++ if (value == smsr->values[slot].curr)
+ return 0;
+- smsr->values[slot].curr = value;
+ err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
+ if (err)
+ return 1;
+
++ smsr->values[slot].curr = value;
+ if (!smsr->registered) {
+ smsr->urn.on_user_return = kvm_on_user_return;
+ user_return_notifier_register(&smsr->urn);
+@@ -1327,10 +1328,15 @@ static u64 kvm_get_arch_capabilities(void)
+ * If TSX is disabled on the system, guests are also mitigated against
+ * TAA and clear CPU buffer mitigation is not required for guests.
+ */
+- if (boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM) &&
+- (data & ARCH_CAP_TSX_CTRL_MSR))
++ if (!boot_cpu_has(X86_FEATURE_RTM))
++ data &= ~ARCH_CAP_TAA_NO;
++ else if (!boot_cpu_has_bug(X86_BUG_TAA))
++ data |= ARCH_CAP_TAA_NO;
++ else if (data & ARCH_CAP_TSX_CTRL_MSR)
+ data &= ~ARCH_CAP_MDS_NO;
+
++ /* KVM does not emulate MSR_IA32_TSX_CTRL. */
++ data &= ~ARCH_CAP_TSX_CTRL_MSR;
+ return data;
+ }
+
+@@ -4421,6 +4427,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ case KVM_SET_NESTED_STATE: {
+ struct kvm_nested_state __user *user_kvm_nested_state = argp;
+ struct kvm_nested_state kvm_state;
++ int idx;
+
+ r = -EINVAL;
+ if (!kvm_x86_ops->set_nested_state)
+@@ -4444,7 +4451,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
+ && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE))
+ break;
+
++ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
++ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+ break;
+ }
+ case KVM_GET_SUPPORTED_HV_CPUID: {
+diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
+index 9ceacd1156db..304d31d8cbbc 100644
+--- a/arch/x86/mm/fault.c
++++ b/arch/x86/mm/fault.c
+@@ -197,7 +197,7 @@ void vmalloc_sync_all(void)
+ return;
+
+ for (address = VMALLOC_START & PMD_MASK;
+- address >= TASK_SIZE_MAX && address < FIXADDR_TOP;
++ address >= TASK_SIZE_MAX && address < VMALLOC_END;
+ address += PMD_SIZE) {
+ struct page *page;
+
+diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
+index 527e69b12002..e723559c386a 100644
+--- a/arch/x86/pci/fixup.c
++++ b/arch/x86/pci/fixup.c
+@@ -588,6 +588,17 @@ static void pci_fixup_amd_ehci_pme(struct pci_dev *dev)
+ }
+ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme);
+
++/*
++ * Device [1022:7914]
++ * When in D0, PME# doesn't get asserted when plugging USB 2.0 device.
++ */
++static void pci_fixup_amd_fch_xhci_pme(struct pci_dev *dev)
++{
++ dev_info(&dev->dev, "PME# does not work under D0, disabling it\n");
++ dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7914, pci_fixup_amd_fch_xhci_pme);
++
+ /*
+ * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff]
+ *
+diff --git a/crypto/af_alg.c b/crypto/af_alg.c
+index 879cf23f7489..0dceaabc6321 100644
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -1043,7 +1043,7 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
+ af_alg_free_resources(areq);
+ sock_put(sk);
+
+- iocb->ki_complete(iocb, err ? err : resultlen, 0);
++ iocb->ki_complete(iocb, err ? err : (int)resultlen, 0);
+ }
+ EXPORT_SYMBOL_GPL(af_alg_async_cb);
+
+diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user_base.c
+index 910e0b46012e..b785c476de67 100644
+--- a/crypto/crypto_user_base.c
++++ b/crypto/crypto_user_base.c
+@@ -213,8 +213,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
+ drop_alg:
+ crypto_mod_put(alg);
+
+- if (err)
++ if (err) {
++ kfree_skb(skb);
+ return err;
++ }
+
+ return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
+ }
+diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
+index 8bad88413de1..1be95432fa23 100644
+--- a/crypto/crypto_user_stat.c
++++ b/crypto/crypto_user_stat.c
+@@ -328,8 +328,10 @@ int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
+ drop_alg:
+ crypto_mod_put(alg);
+
+- if (err)
++ if (err) {
++ kfree_skb(skb);
+ return err;
++ }
+
+ return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
+ }
+diff --git a/crypto/ecc.c b/crypto/ecc.c
+index dfe114bc0c4a..8ee787723c5c 100644
+--- a/crypto/ecc.c
++++ b/crypto/ecc.c
+@@ -1284,10 +1284,11 @@ EXPORT_SYMBOL(ecc_point_mult_shamir);
+ static inline void ecc_swap_digits(const u64 *in, u64 *out,
+ unsigned int ndigits)
+ {
++ const __be64 *src = (__force __be64 *)in;
+ int i;
+
+ for (i = 0; i < ndigits; i++)
+- out[i] = __swab64(in[ndigits - 1 - i]);
++ out[i] = be64_to_cpu(src[ndigits - 1 - i]);
+ }
+
+ static int __ecc_is_key_valid(const struct ecc_curve *curve,
+diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
+index eb76a823fbb2..7067d5542a82 100644
+--- a/drivers/android/binder_alloc.c
++++ b/drivers/android/binder_alloc.c
+@@ -277,8 +277,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ return 0;
+
+ free_range:
+- for (page_addr = end - PAGE_SIZE; page_addr >= start;
+- page_addr -= PAGE_SIZE) {
++ for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
+ bool ret;
+ size_t index;
+
+@@ -291,6 +290,8 @@ free_range:
+ WARN_ON(!ret);
+
+ trace_binder_free_lru_end(alloc, index);
++ if (page_addr == start)
++ break;
+ continue;
+
+ err_vm_insert_page_failed:
+@@ -298,7 +299,8 @@ err_vm_insert_page_failed:
+ page->page_ptr = NULL;
+ err_alloc_page_failed:
+ err_page_ptr_cleared:
+- ;
++ if (page_addr == start)
++ break;
+ }
+ err_no_vma:
+ if (mm) {
+@@ -681,17 +683,17 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
+ struct binder_buffer *buffer;
+
+ mutex_lock(&binder_alloc_mmap_lock);
+- if (alloc->buffer) {
++ if (alloc->buffer_size) {
+ ret = -EBUSY;
+ failure_string = "already mapped";
+ goto err_already_mapped;
+ }
++ alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
++ SZ_4M);
++ mutex_unlock(&binder_alloc_mmap_lock);
+
+ alloc->buffer = (void __user *)vma->vm_start;
+- mutex_unlock(&binder_alloc_mmap_lock);
+
+- alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
+- SZ_4M);
+ alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
+ sizeof(alloc->pages[0]),
+ GFP_KERNEL);
+@@ -722,8 +724,9 @@ err_alloc_buf_struct_failed:
+ kfree(alloc->pages);
+ alloc->pages = NULL;
+ err_alloc_pages_failed:
+- mutex_lock(&binder_alloc_mmap_lock);
+ alloc->buffer = NULL;
++ mutex_lock(&binder_alloc_mmap_lock);
++ alloc->buffer_size = 0;
+ err_already_mapped:
+ mutex_unlock(&binder_alloc_mmap_lock);
+ binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
+@@ -841,14 +844,20 @@ void binder_alloc_print_pages(struct seq_file *m,
+ int free = 0;
+
+ mutex_lock(&alloc->mutex);
+- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+- page = &alloc->pages[i];
+- if (!page->page_ptr)
+- free++;
+- else if (list_empty(&page->lru))
+- active++;
+- else
+- lru++;
++ /*
++ * Make sure the binder_alloc is fully initialized, otherwise we might
++ * read inconsistent state.
++ */
++ if (binder_alloc_get_vma(alloc) != NULL) {
++ for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
++ page = &alloc->pages[i];
++ if (!page->page_ptr)
++ free++;
++ else if (list_empty(&page->lru))
++ active++;
++ else
++ lru++;
++ }
+ }
+ mutex_unlock(&alloc->mutex);
+ seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
+diff --git a/drivers/char/lp.c b/drivers/char/lp.c
+index 7c9269e3477a..bd95aba1f9fe 100644
+--- a/drivers/char/lp.c
++++ b/drivers/char/lp.c
+@@ -713,6 +713,10 @@ static int lp_set_timeout64(unsigned int minor, void __user *arg)
+ if (copy_from_user(karg, arg, sizeof(karg)))
+ return -EFAULT;
+
++ /* sparc64 suseconds_t is 32-bit only */
++ if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
++ karg[1] >>= 32;
++
+ return lp_set_timeout(minor, karg[0], karg[1]);
+ }
+
+diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
+index 35db14cf3102..85a6efd6b68f 100644
+--- a/drivers/cpufreq/imx-cpufreq-dt.c
++++ b/drivers/cpufreq/imx-cpufreq-dt.c
+@@ -44,19 +44,19 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
+ mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
+
+ /*
+- * Early samples without fuses written report "0 0" which means
+- * consumer segment and minimum speed grading.
+- *
+- * According to datasheet minimum speed grading is not supported for
+- * consumer parts so clamp to 1 to avoid warning for "no OPPs"
++ * Early samples without fuses written report "0 0" which may NOT
++ * match any OPP defined in DT. So clamp to minimum OPP defined in
++ * DT to avoid warning for "no OPPs".
+ *
+ * Applies to i.MX8M series SoCs.
+ */
+- if (mkt_segment == 0 && speed_grade == 0 && (
+- of_machine_is_compatible("fsl,imx8mm") ||
+- of_machine_is_compatible("fsl,imx8mn") ||
+- of_machine_is_compatible("fsl,imx8mq")))
+- speed_grade = 1;
++ if (mkt_segment == 0 && speed_grade == 0) {
++ if (of_machine_is_compatible("fsl,imx8mm") ||
++ of_machine_is_compatible("fsl,imx8mq"))
++ speed_grade = 1;
++ if (of_machine_is_compatible("fsl,imx8mn"))
++ speed_grade = 0xb;
++ }
+
+ supported_hw[0] = BIT(speed_grade);
+ supported_hw[1] = BIT(mkt_segment);
+diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
+index de5e9352e920..7d6b695c4ab3 100644
+--- a/drivers/crypto/amcc/crypto4xx_core.c
++++ b/drivers/crypto/amcc/crypto4xx_core.c
+@@ -365,12 +365,8 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
+ dma_alloc_coherent(dev->core_dev->device,
+ PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
+ &dev->scatter_buffer_pa, GFP_ATOMIC);
+- if (!dev->scatter_buffer_va) {
+- dma_free_coherent(dev->core_dev->device,
+- sizeof(struct ce_sd) * PPC4XX_NUM_SD,
+- dev->sdr, dev->sdr_pa);
++ if (!dev->scatter_buffer_va)
+ return -ENOMEM;
+- }
+
+ for (i = 0; i < PPC4XX_NUM_SD; i++) {
+ dev->sdr[i].ptr = dev->scatter_buffer_pa +
+diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
+index 026f193556f9..00920a2b95ce 100644
+--- a/drivers/crypto/atmel-aes.c
++++ b/drivers/crypto/atmel-aes.c
+@@ -490,6 +490,29 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
+ static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
+ #endif
+
++static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
++{
++ struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
++ struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
++ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
++ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++
++ if (req->nbytes < ivsize)
++ return;
++
++ if (rctx->mode & AES_FLAGS_ENCRYPT) {
++ scatterwalk_map_and_copy(req->info, req->dst,
++ req->nbytes - ivsize, ivsize, 0);
++ } else {
++ if (req->src == req->dst)
++ memcpy(req->info, rctx->lastc, ivsize);
++ else
++ scatterwalk_map_and_copy(req->info, req->src,
++ req->nbytes - ivsize,
++ ivsize, 0);
++ }
++}
++
+ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
+ {
+ #ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
+@@ -500,26 +523,8 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
+ clk_disable(dd->iclk);
+ dd->flags &= ~AES_FLAGS_BUSY;
+
+- if (!dd->ctx->is_aead) {
+- struct ablkcipher_request *req =
+- ablkcipher_request_cast(dd->areq);
+- struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
+- struct crypto_ablkcipher *ablkcipher =
+- crypto_ablkcipher_reqtfm(req);
+- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+-
+- if (rctx->mode & AES_FLAGS_ENCRYPT) {
+- scatterwalk_map_and_copy(req->info, req->dst,
+- req->nbytes - ivsize, ivsize, 0);
+- } else {
+- if (req->src == req->dst) {
+- memcpy(req->info, rctx->lastc, ivsize);
+- } else {
+- scatterwalk_map_and_copy(req->info, req->src,
+- req->nbytes - ivsize, ivsize, 0);
+- }
+- }
+- }
++ if (!dd->ctx->is_aead)
++ atmel_aes_set_iv_as_last_ciphertext_block(dd);
+
+ if (dd->is_async)
+ dd->areq->complete(dd->areq, err);
+@@ -1125,10 +1130,12 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
+ rctx->mode = mode;
+
+ if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
+- int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
++ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+
+- scatterwalk_map_and_copy(rctx->lastc, req->src,
+- (req->nbytes - ivsize), ivsize, 0);
++ if (req->nbytes >= ivsize)
++ scatterwalk_map_and_copy(rctx->lastc, req->src,
++ req->nbytes - ivsize,
++ ivsize, 0);
+ }
+
+ return atmel_aes_handle_queue(dd, &req->base);
+diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
+index a54f9367a580..0770a83bf1a5 100644
+--- a/drivers/crypto/ccp/ccp-dmaengine.c
++++ b/drivers/crypto/ccp/ccp-dmaengine.c
+@@ -342,6 +342,7 @@ static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
+ desc->tx_desc.flags = flags;
+ desc->tx_desc.tx_submit = ccp_tx_submit;
+ desc->ccp = chan->ccp;
++ INIT_LIST_HEAD(&desc->entry);
+ INIT_LIST_HEAD(&desc->pending);
+ INIT_LIST_HEAD(&desc->active);
+ desc->status = DMA_IN_PROGRESS;
+diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
+index d81a1297cb9e..940485112d15 100644
+--- a/drivers/crypto/geode-aes.c
++++ b/drivers/crypto/geode-aes.c
+@@ -10,6 +10,7 @@
+ #include <linux/spinlock.h>
+ #include <crypto/algapi.h>
+ #include <crypto/aes.h>
++#include <crypto/skcipher.h>
+
+ #include <linux/io.h>
+ #include <linux/delay.h>
+@@ -166,13 +167,15 @@ static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
+ /*
+ * The requested key size is not supported by HW, do a fallback
+ */
+- op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+- op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
++ crypto_sync_skcipher_clear_flags(op->fallback.blk, CRYPTO_TFM_REQ_MASK);
++ crypto_sync_skcipher_set_flags(op->fallback.blk,
++ tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
+
+- ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
++ ret = crypto_sync_skcipher_setkey(op->fallback.blk, key, len);
+ if (ret) {
+ tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+- tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
++ tfm->crt_flags |= crypto_sync_skcipher_get_flags(op->fallback.blk) &
++ CRYPTO_TFM_RES_MASK;
+ }
+ return ret;
+ }
+@@ -181,33 +184,28 @@ static int fallback_blk_dec(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+ {
+- unsigned int ret;
+- struct crypto_blkcipher *tfm;
+ struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
++ SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk);
+
+- tfm = desc->tfm;
+- desc->tfm = op->fallback.blk;
+-
+- ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
++ skcipher_request_set_sync_tfm(req, op->fallback.blk);
++ skcipher_request_set_callback(req, 0, NULL, NULL);
++ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+
+- desc->tfm = tfm;
+- return ret;
++ return crypto_skcipher_decrypt(req);
+ }
++
+ static int fallback_blk_enc(struct blkcipher_desc *desc,
+ struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+ {
+- unsigned int ret;
+- struct crypto_blkcipher *tfm;
+ struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
++ SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk);
+
+- tfm = desc->tfm;
+- desc->tfm = op->fallback.blk;
+-
+- ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
++ skcipher_request_set_sync_tfm(req, op->fallback.blk);
++ skcipher_request_set_callback(req, 0, NULL, NULL);
++ skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
+
+- desc->tfm = tfm;
+- return ret;
++ return crypto_skcipher_encrypt(req);
+ }
+
+ static void
+@@ -307,6 +305,9 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
+ struct blkcipher_walk walk;
+ int err, ret;
+
++ if (nbytes % AES_BLOCK_SIZE)
++ return -EINVAL;
++
+ if (unlikely(op->keylen != AES_KEYSIZE_128))
+ return fallback_blk_dec(desc, dst, src, nbytes);
+
+@@ -339,6 +340,9 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
+ struct blkcipher_walk walk;
+ int err, ret;
+
++ if (nbytes % AES_BLOCK_SIZE)
++ return -EINVAL;
++
+ if (unlikely(op->keylen != AES_KEYSIZE_128))
+ return fallback_blk_enc(desc, dst, src, nbytes);
+
+@@ -366,9 +370,8 @@ static int fallback_init_blk(struct crypto_tfm *tfm)
+ const char *name = crypto_tfm_alg_name(tfm);
+ struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+
+- op->fallback.blk = crypto_alloc_blkcipher(name, 0,
+- CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+-
++ op->fallback.blk = crypto_alloc_sync_skcipher(name, 0,
++ CRYPTO_ALG_NEED_FALLBACK);
+ if (IS_ERR(op->fallback.blk)) {
+ printk(KERN_ERR "Error allocating fallback algo %s\n", name);
+ return PTR_ERR(op->fallback.blk);
+@@ -381,7 +384,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
+ {
+ struct geode_aes_op *op = crypto_tfm_ctx(tfm);
+
+- crypto_free_blkcipher(op->fallback.blk);
++ crypto_free_sync_skcipher(op->fallback.blk);
+ op->fallback.blk = NULL;
+ }
+
+@@ -420,6 +423,9 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
+ struct blkcipher_walk walk;
+ int err, ret;
+
++ if (nbytes % AES_BLOCK_SIZE)
++ return -EINVAL;
++
+ if (unlikely(op->keylen != AES_KEYSIZE_128))
+ return fallback_blk_dec(desc, dst, src, nbytes);
+
+@@ -450,6 +456,9 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
+ struct blkcipher_walk walk;
+ int err, ret;
+
++ if (nbytes % AES_BLOCK_SIZE)
++ return -EINVAL;
++
+ if (unlikely(op->keylen != AES_KEYSIZE_128))
+ return fallback_blk_enc(desc, dst, src, nbytes);
+
+diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h
+index 5c6e131a8f9d..f8a86898ac22 100644
+--- a/drivers/crypto/geode-aes.h
++++ b/drivers/crypto/geode-aes.h
+@@ -60,7 +60,7 @@ struct geode_aes_op {
+ u8 *iv;
+
+ union {
+- struct crypto_blkcipher *blk;
++ struct crypto_sync_skcipher *blk;
+ struct crypto_cipher *cip;
+ } fallback;
+ u32 keylen;
+diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
+index 0bb62857ffb2..f6f6a688c009 100644
+--- a/drivers/edac/ghes_edac.c
++++ b/drivers/edac/ghes_edac.c
+@@ -26,9 +26,18 @@ struct ghes_edac_pvt {
+ char msg[80];
+ };
+
+-static atomic_t ghes_init = ATOMIC_INIT(0);
++static refcount_t ghes_refcount = REFCOUNT_INIT(0);
++
++/*
++ * Access to ghes_pvt must be protected by ghes_lock. The spinlock
++ * also provides the necessary (implicit) memory barrier for the SMP
++ * case to make the pointer visible on another CPU.
++ */
+ static struct ghes_edac_pvt *ghes_pvt;
+
++/* GHES registration mutex */
++static DEFINE_MUTEX(ghes_reg_mutex);
++
+ /*
+ * Sync with other, potentially concurrent callers of
+ * ghes_edac_report_mem_error(). We don't know what the
+@@ -79,9 +88,8 @@ static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
+ (*num_dimm)++;
+ }
+
+-static int get_dimm_smbios_index(u16 handle)
++static int get_dimm_smbios_index(struct mem_ctl_info *mci, u16 handle)
+ {
+- struct mem_ctl_info *mci = ghes_pvt->mci;
+ int i;
+
+ for (i = 0; i < mci->tot_dimms; i++) {
+@@ -198,14 +206,11 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
+ enum hw_event_mc_err_type type;
+ struct edac_raw_error_desc *e;
+ struct mem_ctl_info *mci;
+- struct ghes_edac_pvt *pvt = ghes_pvt;
++ struct ghes_edac_pvt *pvt;
+ unsigned long flags;
+ char *p;
+ u8 grain_bits;
+
+- if (!pvt)
+- return;
+-
+ /*
+ * We can do the locking below because GHES defers error processing
+ * from NMI to IRQ context. Whenever that changes, we'd at least
+@@ -216,6 +221,10 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
+
+ spin_lock_irqsave(&ghes_lock, flags);
+
++ pvt = ghes_pvt;
++ if (!pvt)
++ goto unlock;
++
+ mci = pvt->mci;
+ e = &mci->error_desc;
+
+@@ -348,7 +357,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
+ p += sprintf(p, "DIMM DMI handle: 0x%.4x ",
+ mem_err->mem_dev_handle);
+
+- index = get_dimm_smbios_index(mem_err->mem_dev_handle);
++ index = get_dimm_smbios_index(mci, mem_err->mem_dev_handle);
+ if (index >= 0) {
+ e->top_layer = index;
+ e->enable_per_layer_report = true;
+@@ -443,6 +452,8 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
+ grain_bits, e->syndrome, pvt->detail_location);
+
+ edac_raw_mc_handle_error(type, mci, e);
++
++unlock:
+ spin_unlock_irqrestore(&ghes_lock, flags);
+ }
+
+@@ -457,10 +468,12 @@ static struct acpi_platform_list plat_list[] = {
+ int ghes_edac_register(struct ghes *ghes, struct device *dev)
+ {
+ bool fake = false;
+- int rc, num_dimm = 0;
++ int rc = 0, num_dimm = 0;
+ struct mem_ctl_info *mci;
++ struct ghes_edac_pvt *pvt;
+ struct edac_mc_layer layers[1];
+ struct ghes_edac_dimm_fill dimm_fill;
++ unsigned long flags;
+ int idx = -1;
+
+ if (IS_ENABLED(CONFIG_X86)) {
+@@ -472,11 +485,14 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
+ idx = 0;
+ }
+
++ /* finish another registration/unregistration instance first */
++ mutex_lock(&ghes_reg_mutex);
++
+ /*
+ * We have only one logical memory controller to which all DIMMs belong.
+ */
+- if (atomic_inc_return(&ghes_init) > 1)
+- return 0;
++ if (refcount_inc_not_zero(&ghes_refcount))
++ goto unlock;
+
+ /* Get the number of DIMMs */
+ dmi_walk(ghes_edac_count_dimms, &num_dimm);
+@@ -494,12 +510,13 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
+ mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt));
+ if (!mci) {
+ pr_info("Can't allocate memory for EDAC data\n");
+- return -ENOMEM;
++ rc = -ENOMEM;
++ goto unlock;
+ }
+
+- ghes_pvt = mci->pvt_info;
+- ghes_pvt->ghes = ghes;
+- ghes_pvt->mci = mci;
++ pvt = mci->pvt_info;
++ pvt->ghes = ghes;
++ pvt->mci = mci;
+
+ mci->pdev = dev;
+ mci->mtype_cap = MEM_FLAG_EMPTY;
+@@ -541,23 +558,48 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
+ if (rc < 0) {
+ pr_info("Can't register at EDAC core\n");
+ edac_mc_free(mci);
+- return -ENODEV;
++ rc = -ENODEV;
++ goto unlock;
+ }
+- return 0;
++
++ spin_lock_irqsave(&ghes_lock, flags);
++ ghes_pvt = pvt;
++ spin_unlock_irqrestore(&ghes_lock, flags);
++
++ /* only increment on success */
++ refcount_inc(&ghes_refcount);
++
++unlock:
++ mutex_unlock(&ghes_reg_mutex);
++
++ return rc;
+ }
+
+ void ghes_edac_unregister(struct ghes *ghes)
+ {
+ struct mem_ctl_info *mci;
++ unsigned long flags;
+
+- if (!ghes_pvt)
+- return;
++ mutex_lock(&ghes_reg_mutex);
+
+- if (atomic_dec_return(&ghes_init))
+- return;
++ if (!refcount_dec_and_test(&ghes_refcount))
++ goto unlock;
+
+- mci = ghes_pvt->mci;
++ /*
++ * Wait for the irq handler being finished.
++ */
++ spin_lock_irqsave(&ghes_lock, flags);
++ mci = ghes_pvt ? ghes_pvt->mci : NULL;
+ ghes_pvt = NULL;
+- edac_mc_del_mc(mci->pdev);
+- edac_mc_free(mci);
++ spin_unlock_irqrestore(&ghes_lock, flags);
++
++ if (!mci)
++ goto unlock;
++
++ mci = edac_mc_del_mc(mci->pdev);
++ if (mci)
++ edac_mc_free(mci);
++
++unlock:
++ mutex_unlock(&ghes_reg_mutex);
+ }
+diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
+index 8230dac01a89..3a4126dc2520 100644
+--- a/drivers/gpu/drm/drm_damage_helper.c
++++ b/drivers/gpu/drm/drm_damage_helper.c
+@@ -212,8 +212,14 @@ retry:
+ drm_for_each_plane(plane, fb->dev) {
+ struct drm_plane_state *plane_state;
+
+- if (plane->state->fb != fb)
++ ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
++ if (ret)
++ goto out;
++
++ if (plane->state->fb != fb) {
++ drm_modeset_unlock(&plane->mutex);
+ continue;
++ }
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
+index 2a77823b8e9a..e66c38332df4 100644
+--- a/drivers/gpu/drm/i810/i810_dma.c
++++ b/drivers/gpu/drm/i810/i810_dma.c
+@@ -728,7 +728,7 @@ static void i810_dma_dispatch_vertex(struct drm_device *dev,
+ if (nbox > I810_NR_SAREA_CLIPRECTS)
+ nbox = I810_NR_SAREA_CLIPRECTS;
+
+- if (used > 4 * 1024)
++ if (used < 0 || used > 4 * 1024)
+ used = 0;
+
+ if (sarea_priv->dirty)
+@@ -1048,7 +1048,7 @@ static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, in
+ if (u != I810_BUF_CLIENT)
+ DRM_DEBUG("MC found buffer that isn't mine!\n");
+
+- if (used > 4 * 1024)
++ if (used < 0 || used > 4 * 1024)
+ used = 0;
+
+ sarea_priv->dirty = 0x7f;
+diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
+index 9a09eba53182..5649887d2b90 100644
+--- a/drivers/gpu/drm/mcde/mcde_drv.c
++++ b/drivers/gpu/drm/mcde/mcde_drv.c
+@@ -484,7 +484,8 @@ static int mcde_probe(struct platform_device *pdev)
+ }
+ if (!match) {
+ dev_err(dev, "no matching components\n");
+- return -ENODEV;
++ ret = -ENODEV;
++ goto clk_disable;
+ }
+ if (IS_ERR(match)) {
+ dev_err(dev, "could not create component match\n");
+diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
+index 6be879578140..1c74381a4fc9 100644
+--- a/drivers/gpu/drm/msm/msm_debugfs.c
++++ b/drivers/gpu/drm/msm/msm_debugfs.c
+@@ -47,12 +47,8 @@ static int msm_gpu_release(struct inode *inode, struct file *file)
+ struct msm_gpu_show_priv *show_priv = m->private;
+ struct msm_drm_private *priv = show_priv->dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+- int ret;
+-
+- ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
+- if (ret)
+- return ret;
+
++ mutex_lock(&show_priv->dev->struct_mutex);
+ gpu->funcs->gpu_state_put(show_priv->state);
+ mutex_unlock(&show_priv->dev->struct_mutex);
+
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+index 219c10eb752c..ee44640edeb5 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
+@@ -652,10 +652,13 @@ static ssize_t cyc_threshold_store(struct device *dev,
+
+ if (kstrtoul(buf, 16, &val))
+ return -EINVAL;
++
++ /* mask off max threshold before checking min value */
++ val &= ETM_CYC_THRESHOLD_MASK;
+ if (val < drvdata->ccitmin)
+ return -EINVAL;
+
+- config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
++ config->ccctlr = val;
+ return size;
+ }
+ static DEVICE_ATTR_RW(cyc_threshold);
+@@ -686,14 +689,16 @@ static ssize_t bb_ctrl_store(struct device *dev,
+ return -EINVAL;
+ if (!drvdata->nr_addr_cmp)
+ return -EINVAL;
++
+ /*
+- * Bit[7:0] selects which address range comparator is used for
+- * branch broadcast control.
++ * Bit[8] controls include(1) / exclude(0), bits[0-7] select
++ * individual range comparators. If include then at least 1
++ * range must be selected.
+ */
+- if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
++ if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
+ return -EINVAL;
+
+- config->bb_ctrl = val;
++ config->bb_ctrl = val & GENMASK(8, 0);
+ return size;
+ }
+ static DEVICE_ATTR_RW(bb_ctrl);
+@@ -1324,8 +1329,8 @@ static ssize_t seq_event_store(struct device *dev,
+
+ spin_lock(&drvdata->spinlock);
+ idx = config->seq_idx;
+- /* RST, bits[7:0] */
+- config->seq_ctrl[idx] = val & 0xFF;
++ /* Seq control has two masks B[15:8] F[7:0] */
++ config->seq_ctrl[idx] = val & 0xFFFF;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+ }
+@@ -1580,7 +1585,7 @@ static ssize_t res_ctrl_store(struct device *dev,
+ if (idx % 2 != 0)
+ /* PAIRINV, bit[21] */
+ val &= ~BIT(21);
+- config->res_ctrl[idx] = val;
++ config->res_ctrl[idx] = val & GENMASK(21, 0);
+ spin_unlock(&drvdata->spinlock);
+ return size;
+ }
+diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
+index 3926be78036e..568b21eb6ea1 100644
+--- a/drivers/infiniband/hw/qib/qib_sysfs.c
++++ b/drivers/infiniband/hw/qib/qib_sysfs.c
+@@ -301,6 +301,9 @@ static ssize_t qib_portattr_show(struct kobject *kobj,
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, pport_kobj);
+
++ if (!pattr->show)
++ return -EIO;
++
+ return pattr->show(ppd, buf);
+ }
+
+@@ -312,6 +315,9 @@ static ssize_t qib_portattr_store(struct kobject *kobj,
+ struct qib_pportdata *ppd =
+ container_of(kobj, struct qib_pportdata, pport_kobj);
+
++ if (!pattr->store)
++ return -EIO;
++
+ return pattr->store(ppd, buf, len);
+ }
+
+diff --git a/drivers/input/joystick/psxpad-spi.c b/drivers/input/joystick/psxpad-spi.c
+index 7eee1b0e360f..99a6052500ca 100644
+--- a/drivers/input/joystick/psxpad-spi.c
++++ b/drivers/input/joystick/psxpad-spi.c
+@@ -292,7 +292,7 @@ static int psxpad_spi_probe(struct spi_device *spi)
+ if (!pad)
+ return -ENOMEM;
+
+- pdev = input_allocate_polled_device();
++ pdev = devm_input_allocate_polled_device(&spi->dev);
+ if (!pdev) {
+ dev_err(&spi->dev, "failed to allocate input device\n");
+ return -ENOMEM;
+diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
+index 56fae3472114..1ae6f8bba9ae 100644
+--- a/drivers/input/mouse/synaptics.c
++++ b/drivers/input/mouse/synaptics.c
+@@ -172,6 +172,7 @@ static const char * const smbus_pnp_ids[] = {
+ "LEN0071", /* T480 */
+ "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
+ "LEN0073", /* X1 Carbon G5 (Elantech) */
++ "LEN0091", /* X1 Carbon 6 */
+ "LEN0092", /* X1 Carbon 6 */
+ "LEN0093", /* T480 */
+ "LEN0096", /* X280 */
+diff --git a/drivers/input/rmi4/rmi_f34v7.c b/drivers/input/rmi4/rmi_f34v7.c
+index a4cabf52740c..74f7c6f214ff 100644
+--- a/drivers/input/rmi4/rmi_f34v7.c
++++ b/drivers/input/rmi4/rmi_f34v7.c
+@@ -1189,6 +1189,9 @@ int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw)
+ {
+ int ret;
+
++ f34->fn->rmi_dev->driver->set_irq_bits(f34->fn->rmi_dev,
++ f34->fn->irq_mask);
++
+ rmi_f34v7_read_queries_bl_version(f34);
+
+ f34->v7.image = fw->data;
+diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
+index 2407ea43de59..b313c579914f 100644
+--- a/drivers/input/rmi4/rmi_smbus.c
++++ b/drivers/input/rmi4/rmi_smbus.c
+@@ -163,7 +163,6 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
+ /* prepare to write next block of bytes */
+ cur_len -= SMB_MAX_COUNT;
+ databuff += SMB_MAX_COUNT;
+- rmiaddr += SMB_MAX_COUNT;
+ }
+ exit:
+ mutex_unlock(&rmi_smb->page_mutex);
+@@ -215,7 +214,6 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
+ /* prepare to read next block of bytes */
+ cur_len -= SMB_MAX_COUNT;
+ databuff += SMB_MAX_COUNT;
+- rmiaddr += SMB_MAX_COUNT;
+ }
+
+ retval = 0;
+diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
+index fb43aa708660..0403102e807e 100644
+--- a/drivers/input/touchscreen/goodix.c
++++ b/drivers/input/touchscreen/goodix.c
+@@ -128,6 +128,15 @@ static const unsigned long goodix_irq_flags[] = {
+ */
+ static const struct dmi_system_id rotated_screen[] = {
+ #if defined(CONFIG_DMI) && defined(CONFIG_X86)
++ {
++ .ident = "Teclast X89",
++ .matches = {
++ /* tPAD is too generic, also match on bios date */
++ DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
++ DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
++ DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
++ },
++ },
+ {
+ .ident = "WinBook TW100",
+ .matches = {
+diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
+index 4c5ba35d48d4..834b35dc3b13 100644
+--- a/drivers/mailbox/tegra-hsp.c
++++ b/drivers/mailbox/tegra-hsp.c
+@@ -657,7 +657,7 @@ static int tegra_hsp_probe(struct platform_device *pdev)
+ hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
+ hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
+
+- err = platform_get_irq_byname(pdev, "doorbell");
++ err = platform_get_irq_byname_optional(pdev, "doorbell");
+ if (err >= 0)
+ hsp->doorbell_irq = err;
+
+@@ -677,7 +677,7 @@ static int tegra_hsp_probe(struct platform_device *pdev)
+ if (!name)
+ return -ENOMEM;
+
+- err = platform_get_irq_byname(pdev, name);
++ err = platform_get_irq_byname_optional(pdev, name);
+ if (err >= 0) {
+ hsp->shared_irqs[i] = err;
+ count++;
+diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
+index 1e772287b1c8..aa88bdeb9978 100644
+--- a/drivers/md/raid0.c
++++ b/drivers/md/raid0.c
+@@ -615,7 +615,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
+ tmp_dev = map_sector(mddev, zone, sector, &sector);
+ break;
+ default:
+- WARN("md/raid0:%s: Invalid layout\n", mdname(mddev));
++ WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
+ bio_io_error(bio);
+ return true;
+ }
+diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
+index 13da4c5c7d17..7741151606ef 100644
+--- a/drivers/media/rc/rc-main.c
++++ b/drivers/media/rc/rc-main.c
+@@ -1773,6 +1773,7 @@ static int rc_prepare_rx_device(struct rc_dev *dev)
+ set_bit(MSC_SCAN, dev->input_dev->mscbit);
+
+ /* Pointer/mouse events */
++ set_bit(INPUT_PROP_POINTING_STICK, dev->input_dev->propbit);
+ set_bit(EV_REL, dev->input_dev->evbit);
+ set_bit(REL_X, dev->input_dev->relbit);
+ set_bit(REL_Y, dev->input_dev->relbit);
+diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
+index 0a9f42e5fedf..2e57122f02fb 100644
+--- a/drivers/net/can/slcan.c
++++ b/drivers/net/can/slcan.c
+@@ -617,6 +617,7 @@ err_free_chan:
+ sl->tty = NULL;
+ tty->disc_data = NULL;
+ clear_bit(SLF_INUSE, &sl->flags);
++ slc_free_netdev(sl->dev);
+ free_netdev(sl->dev);
+
+ err_exit:
+diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
+index 04aac3bb54ef..81e942f713e6 100644
+--- a/drivers/net/can/usb/ucan.c
++++ b/drivers/net/can/usb/ucan.c
+@@ -792,7 +792,7 @@ resubmit:
+ up);
+
+ usb_anchor_urb(urb, &up->rx_urbs);
+- ret = usb_submit_urb(urb, GFP_KERNEL);
++ ret = usb_submit_urb(urb, GFP_ATOMIC);
+
+ if (ret < 0) {
+ netdev_err(up->netdev,
+diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
+index a9657ae6d782..d14e55e3c9da 100644
+--- a/drivers/net/wireless/marvell/mwifiex/main.c
++++ b/drivers/net/wireless/marvell/mwifiex/main.c
+@@ -631,6 +631,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
+
+ mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
+ mwifiex_dbg(adapter, MSG, "driver_version = %s\n", fmt);
++ adapter->is_up = true;
+ goto done;
+
+ err_add_intf:
+@@ -1469,6 +1470,7 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
+ mwifiex_deauthenticate(priv, NULL);
+
+ mwifiex_uninit_sw(adapter);
++ adapter->is_up = false;
+
+ if (adapter->if_ops.down_dev)
+ adapter->if_ops.down_dev(adapter);
+@@ -1730,7 +1732,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
+ if (!adapter)
+ return 0;
+
+- mwifiex_uninit_sw(adapter);
++ if (adapter->is_up)
++ mwifiex_uninit_sw(adapter);
+
+ if (adapter->irq_wakeup >= 0)
+ device_init_wakeup(adapter->dev, false);
+diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
+index 095837fba300..547ff3c578ee 100644
+--- a/drivers/net/wireless/marvell/mwifiex/main.h
++++ b/drivers/net/wireless/marvell/mwifiex/main.h
+@@ -1017,6 +1017,7 @@ struct mwifiex_adapter {
+
+ /* For synchronizing FW initialization with device lifecycle. */
+ struct completion *fw_done;
++ bool is_up;
+
+ bool ext_scan;
+ u8 fw_api_ver;
+diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
+index 24c041dad9f6..fec38b6e86ff 100644
+--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
++++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
+@@ -444,6 +444,9 @@ static int mwifiex_sdio_suspend(struct device *dev)
+ return 0;
+ }
+
++ if (!adapter->is_up)
++ return -EBUSY;
++
+ mwifiex_enable_wake(adapter);
+
+ /* Enable the Host Sleep */
+@@ -2220,22 +2223,30 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
+ struct sdio_func *func = card->func;
+ int ret;
+
++ /* Prepare the adapter for the reset. */
+ mwifiex_shutdown_sw(adapter);
++ clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
++ clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
+
+- /* power cycle the adapter */
++ /* Run a HW reset of the SDIO interface. */
+ sdio_claim_host(func);
+- mmc_hw_reset(func->card->host);
++ ret = mmc_hw_reset(func->card->host);
+ sdio_release_host(func);
+
+- /* Previous save_adapter won't be valid after this. We will cancel
+- * pending work requests.
+- */
+- clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
+- clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
+-
+- ret = mwifiex_reinit_sw(adapter);
+- if (ret)
+- dev_err(&func->dev, "reinit failed: %d\n", ret);
++ switch (ret) {
++ case 1:
++ dev_dbg(&func->dev, "SDIO HW reset asynchronous\n");
++ complete_all(adapter->fw_done);
++ break;
++ case 0:
++ ret = mwifiex_reinit_sw(adapter);
++ if (ret)
++ dev_err(&func->dev, "reinit failed: %d\n", ret);
++ break;
++ default:
++ dev_err(&func->dev, "SDIO HW reset failed: %d\n", ret);
++ break;
++ }
+ }
+
+ /* This function read/write firmware */
+diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+index 6c7f26ef6476..9cc8a335d519 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
++++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+@@ -1756,6 +1756,7 @@ static int rsi_send_beacon(struct rsi_common *common)
+ skb_pull(skb, (64 - dword_align_bytes));
+ if (rsi_prepare_beacon(common, skb)) {
+ rsi_dbg(ERR_ZONE, "Failed to prepare beacon\n");
++ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+ skb_queue_tail(&common->tx_queue[MGMT_BEACON_Q], skb);
+diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
+index 7aa0517ff2f3..3c82de5f9417 100644
+--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
++++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
+@@ -155,7 +155,7 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
+ err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask);
+ offset_mask |= CMDQ_WRITE_ENABLE_MASK;
+ }
+- err |= cmdq_pkt_write(pkt, value, subsys, offset_mask);
++ err |= cmdq_pkt_write(pkt, subsys, offset_mask, value);
+
+ return err;
+ }
+diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
+index acf318e7330c..ba8eff41b746 100644
+--- a/drivers/spi/spi-atmel.c
++++ b/drivers/spi/spi-atmel.c
+@@ -1183,10 +1183,8 @@ static int atmel_spi_setup(struct spi_device *spi)
+ as = spi_master_get_devdata(spi->master);
+
+ /* see notes above re chipselect */
+- if (!atmel_spi_is_v2(as)
+- && spi->chip_select == 0
+- && (spi->mode & SPI_CS_HIGH)) {
+- dev_dbg(&spi->dev, "setup: can't be active-high\n");
++ if (!as->use_cs_gpios && (spi->mode & SPI_CS_HIGH)) {
++ dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
+ return -EINVAL;
+ }
+
+diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
+index c02e24c01136..63c9f7edaf6c 100644
+--- a/drivers/spi/spi-fsl-qspi.c
++++ b/drivers/spi/spi-fsl-qspi.c
+@@ -63,6 +63,11 @@
+ #define QUADSPI_IPCR 0x08
+ #define QUADSPI_IPCR_SEQID(x) ((x) << 24)
+
++#define QUADSPI_FLSHCR 0x0c
++#define QUADSPI_FLSHCR_TCSS_MASK GENMASK(3, 0)
++#define QUADSPI_FLSHCR_TCSH_MASK GENMASK(11, 8)
++#define QUADSPI_FLSHCR_TDH_MASK GENMASK(17, 16)
++
+ #define QUADSPI_BUF3CR 0x1c
+ #define QUADSPI_BUF3CR_ALLMST_MASK BIT(31)
+ #define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8)
+@@ -95,6 +100,9 @@
+ #define QUADSPI_FR 0x160
+ #define QUADSPI_FR_TFF_MASK BIT(0)
+
++#define QUADSPI_RSER 0x164
++#define QUADSPI_RSER_TFIE BIT(0)
++
+ #define QUADSPI_SPTRCLR 0x16c
+ #define QUADSPI_SPTRCLR_IPPTRC BIT(8)
+ #define QUADSPI_SPTRCLR_BFPTRC BIT(0)
+@@ -112,9 +120,6 @@
+ #define QUADSPI_LCKER_LOCK BIT(0)
+ #define QUADSPI_LCKER_UNLOCK BIT(1)
+
+-#define QUADSPI_RSER 0x164
+-#define QUADSPI_RSER_TFIE BIT(0)
+-
+ #define QUADSPI_LUT_BASE 0x310
+ #define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
+ #define QUADSPI_LUT_REG(idx) \
+@@ -181,6 +186,12 @@
+ */
+ #define QUADSPI_QUIRK_BASE_INTERNAL BIT(4)
+
++/*
++ * Controller uses TDH bits in register QUADSPI_FLSHCR.
++ * They need to be set in accordance with the DDR/SDR mode.
++ */
++#define QUADSPI_QUIRK_USE_TDH_SETTING BIT(5)
++
+ struct fsl_qspi_devtype_data {
+ unsigned int rxfifo;
+ unsigned int txfifo;
+@@ -209,7 +220,8 @@ static const struct fsl_qspi_devtype_data imx7d_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_512,
+ .ahb_buf_size = SZ_1K,
+- .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
++ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
++ QUADSPI_QUIRK_USE_TDH_SETTING,
+ .little_endian = true,
+ };
+
+@@ -217,7 +229,8 @@ static const struct fsl_qspi_devtype_data imx6ul_data = {
+ .rxfifo = SZ_128,
+ .txfifo = SZ_512,
+ .ahb_buf_size = SZ_1K,
+- .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
++ .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
++ QUADSPI_QUIRK_USE_TDH_SETTING,
+ .little_endian = true,
+ };
+
+@@ -275,6 +288,11 @@ static inline int needs_amba_base_offset(struct fsl_qspi *q)
+ return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL);
+ }
+
++static inline int needs_tdh_setting(struct fsl_qspi *q)
++{
++ return q->devtype_data->quirks & QUADSPI_QUIRK_USE_TDH_SETTING;
++}
++
+ /*
+ * An IC bug makes it necessary to rearrange the 32-bit data.
+ * Later chips, such as IMX6SLX, have fixed this bug.
+@@ -710,6 +728,16 @@ static int fsl_qspi_default_setup(struct fsl_qspi *q)
+ qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
+ base + QUADSPI_MCR);
+
++ /*
++ * Previous boot stages (BootROM, bootloader) might have used DDR
++ * mode and did not clear the TDH bits. As we currently use SDR mode
++ * only, clear the TDH bits if necessary.
++ */
++ if (needs_tdh_setting(q))
++ qspi_writel(q, qspi_readl(q, base + QUADSPI_FLSHCR) &
++ ~QUADSPI_FLSHCR_TDH_MASK,
++ base + QUADSPI_FLSHCR);
++
+ reg = qspi_readl(q, base + QUADSPI_SMPR);
+ qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
+ | QUADSPI_SMPR_FSPHS_MASK
+diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
+index 9ac6f9fe13cf..4e726929bb4f 100644
+--- a/drivers/spi/spi-stm32-qspi.c
++++ b/drivers/spi/spi-stm32-qspi.c
+@@ -528,7 +528,6 @@ static void stm32_qspi_release(struct stm32_qspi *qspi)
+ stm32_qspi_dma_free(qspi);
+ mutex_destroy(&qspi->lock);
+ clk_disable_unprepare(qspi->clk);
+- spi_master_put(qspi->ctrl);
+ }
+
+ static int stm32_qspi_probe(struct platform_device *pdev)
+@@ -626,6 +625,8 @@ static int stm32_qspi_probe(struct platform_device *pdev)
+
+ err:
+ stm32_qspi_release(qspi);
++ spi_master_put(qspi->ctrl);
++
+ return ret;
+ }
+
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index f9502dbbb5c1..26b91ee0855d 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1711,15 +1711,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
+ spi->mode |= SPI_3WIRE;
+ if (of_property_read_bool(nc, "spi-lsb-first"))
+ spi->mode |= SPI_LSB_FIRST;
+-
+- /*
+- * For descriptors associated with the device, polarity inversion is
+- * handled in the gpiolib, so all chip selects are "active high" in
+- * the logical sense, the gpiolib will invert the line if need be.
+- */
+- if (ctlr->use_gpio_descriptors)
+- spi->mode |= SPI_CS_HIGH;
+- else if (of_property_read_bool(nc, "spi-cs-high"))
++ if (of_property_read_bool(nc, "spi-cs-high"))
+ spi->mode |= SPI_CS_HIGH;
+
+ /* Device DUAL/QUAD mode */
+@@ -1783,6 +1775,15 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
+ }
+ spi->chip_select = value;
+
++ /*
++ * For descriptors associated with the device, polarity inversion is
++ * handled in the gpiolib, so all gpio chip selects are "active high"
++ * in the logical sense, the gpiolib will invert the line if need be.
++ */
++ if ((ctlr->use_gpio_descriptors) && ctlr->cs_gpiods &&
++ ctlr->cs_gpiods[spi->chip_select])
++ spi->mode |= SPI_CS_HIGH;
++
+ /* Device speed */
+ rc = of_property_read_u32(nc, "spi-max-frequency", &value);
+ if (rc) {
+diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
+index a8a864b40913..042220d86d33 100644
+--- a/drivers/staging/octeon/octeon-ethernet.h
++++ b/drivers/staging/octeon/octeon-ethernet.h
+@@ -14,7 +14,7 @@
+ #include <linux/of.h>
+ #include <linux/phy.h>
+
+-#ifdef CONFIG_MIPS
++#ifdef CONFIG_CAVIUM_OCTEON_SOC
+
+ #include <asm/octeon/octeon.h>
+
+diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h
+index b78ce9eaab85..ae014265064a 100644
+--- a/drivers/staging/octeon/octeon-stubs.h
++++ b/drivers/staging/octeon/octeon-stubs.h
+@@ -1,5 +1,8 @@
+ #define CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE 512
+-#define XKPHYS_TO_PHYS(p) (p)
++
++#ifndef XKPHYS_TO_PHYS
++# define XKPHYS_TO_PHYS(p) (p)
++#endif
+
+ #define OCTEON_IRQ_WORKQ0 0
+ #define OCTEON_IRQ_RML 0
+diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
+index d4481cc8958f..c28271817e43 100644
+--- a/drivers/thermal/thermal_core.c
++++ b/drivers/thermal/thermal_core.c
+@@ -304,7 +304,7 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
+ &tz->poll_queue,
+ msecs_to_jiffies(delay));
+ else
+- cancel_delayed_work_sync(&tz->poll_queue);
++ cancel_delayed_work(&tz->poll_queue);
+ }
+
+ static void monitor_thermal_zone(struct thermal_zone_device *tz)
+@@ -1414,7 +1414,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
+
+ mutex_unlock(&thermal_list_lock);
+
+- thermal_zone_device_set_polling(tz, 0);
++ cancel_delayed_work_sync(&tz->poll_queue);
+
+ thermal_set_governor(tz, NULL);
+
+diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
+index 1c72fdc2dd37..51a7d3b19b39 100644
+--- a/drivers/tty/serial/8250/8250_dw.c
++++ b/drivers/tty/serial/8250/8250_dw.c
+@@ -386,10 +386,10 @@ static int dw8250_probe(struct platform_device *pdev)
+ {
+ struct uart_8250_port uart = {}, *up = &uart;
+ struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+- int irq = platform_get_irq(pdev, 0);
+ struct uart_port *p = &up->port;
+ struct device *dev = &pdev->dev;
+ struct dw8250_data *data;
++ int irq;
+ int err;
+ u32 val;
+
+@@ -398,11 +398,9 @@ static int dw8250_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
+- if (irq < 0) {
+- if (irq != -EPROBE_DEFER)
+- dev_err(dev, "cannot get irq\n");
++ irq = platform_get_irq(pdev, 0);
++ if (irq < 0)
+ return irq;
+- }
+
+ spin_lock_init(&p->lock);
+ p->mapbase = regs->start;
+diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
+index b411ba4eb5e9..4d067f515f74 100644
+--- a/drivers/tty/serial/8250/8250_mtk.c
++++ b/drivers/tty/serial/8250/8250_mtk.c
+@@ -544,7 +544,7 @@ static int mtk8250_probe(struct platform_device *pdev)
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+- data->rx_wakeup_irq = platform_get_irq(pdev, 1);
++ data->rx_wakeup_irq = platform_get_irq_optional(pdev, 1);
+
+ return 0;
+ }
+diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
+index 6adbadd6a56a..8a01d034f9d1 100644
+--- a/drivers/tty/serial/8250/8250_pci.c
++++ b/drivers/tty/serial/8250/8250_pci.c
+@@ -745,16 +745,8 @@ static int pci_ni8430_init(struct pci_dev *dev)
+ }
+
+ /* UART Port Control Register */
+-#define NI16550_PCR_OFFSET 0x0f
+-#define NI16550_PCR_RS422 0x00
+-#define NI16550_PCR_ECHO_RS485 0x01
+-#define NI16550_PCR_DTR_RS485 0x02
+-#define NI16550_PCR_AUTO_RS485 0x03
+-#define NI16550_PCR_WIRE_MODE_MASK 0x03
+-#define NI16550_PCR_TXVR_ENABLE_BIT BIT(3)
+-#define NI16550_PCR_RS485_TERMINATION_BIT BIT(6)
+-#define NI16550_ACR_DTR_AUTO_DTR (0x2 << 3)
+-#define NI16550_ACR_DTR_MANUAL_DTR (0x0 << 3)
++#define NI8430_PORTCON 0x0f
++#define NI8430_PORTCON_TXVR_ENABLE (1 << 3)
+
+ static int
+ pci_ni8430_setup(struct serial_private *priv,
+@@ -776,117 +768,14 @@ pci_ni8430_setup(struct serial_private *priv,
+ return -ENOMEM;
+
+ /* enable the transceiver */
+- writeb(readb(p + offset + NI16550_PCR_OFFSET) | NI16550_PCR_TXVR_ENABLE_BIT,
+- p + offset + NI16550_PCR_OFFSET);
++ writeb(readb(p + offset + NI8430_PORTCON) | NI8430_PORTCON_TXVR_ENABLE,
++ p + offset + NI8430_PORTCON);
+
+ iounmap(p);
+
+ return setup_port(priv, port, bar, offset, board->reg_shift);
+ }
+
+-static int pci_ni8431_config_rs485(struct uart_port *port,
+- struct serial_rs485 *rs485)
+-{
+- u8 pcr, acr;
+- struct uart_8250_port *up;
+-
+- up = container_of(port, struct uart_8250_port, port);
+- acr = up->acr;
+- pcr = port->serial_in(port, NI16550_PCR_OFFSET);
+- pcr &= ~NI16550_PCR_WIRE_MODE_MASK;
+-
+- if (rs485->flags & SER_RS485_ENABLED) {
+- /* RS-485 */
+- if ((rs485->flags & SER_RS485_RX_DURING_TX) &&
+- (rs485->flags & SER_RS485_RTS_ON_SEND)) {
+- dev_dbg(port->dev, "Invalid 2-wire mode\n");
+- return -EINVAL;
+- }
+-
+- if (rs485->flags & SER_RS485_RX_DURING_TX) {
+- /* Echo */
+- dev_vdbg(port->dev, "2-wire DTR with echo\n");
+- pcr |= NI16550_PCR_ECHO_RS485;
+- acr |= NI16550_ACR_DTR_MANUAL_DTR;
+- } else {
+- /* Auto or DTR */
+- if (rs485->flags & SER_RS485_RTS_ON_SEND) {
+- /* Auto */
+- dev_vdbg(port->dev, "2-wire Auto\n");
+- pcr |= NI16550_PCR_AUTO_RS485;
+- acr |= NI16550_ACR_DTR_AUTO_DTR;
+- } else {
+- /* DTR-controlled */
+- /* No Echo */
+- dev_vdbg(port->dev, "2-wire DTR no echo\n");
+- pcr |= NI16550_PCR_DTR_RS485;
+- acr |= NI16550_ACR_DTR_MANUAL_DTR;
+- }
+- }
+- } else {
+- /* RS-422 */
+- dev_vdbg(port->dev, "4-wire\n");
+- pcr |= NI16550_PCR_RS422;
+- acr |= NI16550_ACR_DTR_MANUAL_DTR;
+- }
+-
+- dev_dbg(port->dev, "write pcr: 0x%08x\n", pcr);
+- port->serial_out(port, NI16550_PCR_OFFSET, pcr);
+-
+- up->acr = acr;
+- port->serial_out(port, UART_SCR, UART_ACR);
+- port->serial_out(port, UART_ICR, up->acr);
+-
+- /* Update the cache. */
+- port->rs485 = *rs485;
+-
+- return 0;
+-}
+-
+-static int pci_ni8431_setup(struct serial_private *priv,
+- const struct pciserial_board *board,
+- struct uart_8250_port *uart, int idx)
+-{
+- u8 pcr, acr;
+- struct pci_dev *dev = priv->dev;
+- void __iomem *addr;
+- unsigned int bar, offset = board->first_offset;
+-
+- if (idx >= board->num_ports)
+- return 1;
+-
+- bar = FL_GET_BASE(board->flags);
+- offset += idx * board->uart_offset;
+-
+- addr = pci_ioremap_bar(dev, bar);
+- if (!addr)
+- return -ENOMEM;
+-
+- /* enable the transceiver */
+- writeb(readb(addr + NI16550_PCR_OFFSET) | NI16550_PCR_TXVR_ENABLE_BIT,
+- addr + NI16550_PCR_OFFSET);
+-
+- pcr = readb(addr + NI16550_PCR_OFFSET);
+- pcr &= ~NI16550_PCR_WIRE_MODE_MASK;
+-
+- /* set wire mode to default RS-422 */
+- pcr |= NI16550_PCR_RS422;
+- acr = NI16550_ACR_DTR_MANUAL_DTR;
+-
+- /* write port configuration to register */
+- writeb(pcr, addr + NI16550_PCR_OFFSET);
+-
+- /* access and write to UART acr register */
+- writeb(UART_ACR, addr + UART_SCR);
+- writeb(acr, addr + UART_ICR);
+-
+- uart->port.rs485_config = &pci_ni8431_config_rs485;
+-
+- iounmap(addr);
+-
+- return setup_port(priv, uart, bar, offset, board->reg_shift);
+-}
+-
+ static int pci_netmos_9900_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_8250_port *port, int idx)
+@@ -2023,15 +1912,6 @@ pci_moxa_setup(struct serial_private *priv,
+ #define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM 0x10E9
+ #define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM 0x11D8
+
+-#define PCIE_DEVICE_ID_NI_PXIE8430_2328 0x74C2
+-#define PCIE_DEVICE_ID_NI_PXIE8430_23216 0x74C1
+-#define PCI_DEVICE_ID_NI_PXI8431_4852 0x7081
+-#define PCI_DEVICE_ID_NI_PXI8431_4854 0x70DE
+-#define PCI_DEVICE_ID_NI_PXI8431_4858 0x70E3
+-#define PCI_DEVICE_ID_NI_PXI8433_4852 0x70E9
+-#define PCI_DEVICE_ID_NI_PXI8433_4854 0x70ED
+-#define PCIE_DEVICE_ID_NI_PXIE8431_4858 0x74C4
+-#define PCIE_DEVICE_ID_NI_PXIE8431_48516 0x74C3
+
+ #define PCI_DEVICE_ID_MOXA_CP102E 0x1024
+ #define PCI_DEVICE_ID_MOXA_CP102EL 0x1025
+@@ -2269,87 +2149,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
+ .setup = pci_ni8430_setup,
+ .exit = pci_ni8430_exit,
+ },
+- {
+- .vendor = PCI_VENDOR_ID_NI,
+- .device = PCIE_DEVICE_ID_NI_PXIE8430_2328,
+- .subvendor = PCI_ANY_ID,
+- .subdevice = PCI_ANY_ID,
+- .init = pci_ni8430_init,
+- .setup = pci_ni8430_setup,
+- .exit = pci_ni8430_exit,
+- },
+- {
+- .vendor = PCI_VENDOR_ID_NI,
+- .device = PCIE_DEVICE_ID_NI_PXIE8430_23216,
+- .subvendor = PCI_ANY_ID,
+- .subdevice = PCI_ANY_ID,
+- .init = pci_ni8430_init,
+- .setup = pci_ni8430_setup,
+- .exit = pci_ni8430_exit,
+- },
+- {
+- .vendor = PCI_VENDOR_ID_NI,
+- .device = PCI_DEVICE_ID_NI_PXI8431_4852,
+- .subvendor = PCI_ANY_ID,
+- .subdevice = PCI_ANY_ID,
+- .init = pci_ni8430_init,
+- .setup = pci_ni8431_setup,
+- .exit = pci_ni8430_exit,
+- },
+- {
+- .vendor = PCI_VENDOR_ID_NI,
+- .device = PCI_DEVICE_ID_NI_PXI8431_4854,
+- .subvendor = PCI_ANY_ID,
+- .subdevice = PCI_ANY_ID,
+- .init = pci_ni8430_init,
+- .setup = pci_ni8431_setup,
+- .exit = pci_ni8430_exit,
+- },
+- {
+- .vendor = PCI_VENDOR_ID_NI,
+- .device = PCI_DEVICE_ID_NI_PXI8431_4858,
+- .subvendor = PCI_ANY_ID,
+- .subdevice = PCI_ANY_ID,
+- .init = pci_ni8430_init,
+- .setup = pci_ni8431_setup,
+- .exit = pci_ni8430_exit,
+- },
+- {
+- .vendor = PCI_VENDOR_ID_NI,
+- .device = PCI_DEVICE_ID_NI_PXI8433_4852,
+- .subvendor = PCI_ANY_ID,
+- .subdevice = PCI_ANY_ID,
+- .init = pci_ni8430_init,
+- .setup = pci_ni8431_setup,
+- .exit = pci_ni8430_exit,
+- },
+- {
+- .vendor = PCI_VENDOR_ID_NI,
+- .device = PCI_DEVICE_ID_NI_PXI8433_4854,
+- .subvendor = PCI_ANY_ID,
+- .subdevice = PCI_ANY_ID,
+- .init = pci_ni8430_init,
+- .setup = pci_ni8431_setup,
+- .exit = pci_ni8430_exit,
+- },
+- {
+- .vendor = PCI_VENDOR_ID_NI,
+- .device = PCIE_DEVICE_ID_NI_PXIE8431_4858,
+- .subvendor = PCI_ANY_ID,
+- .subdevice = PCI_ANY_ID,
+- .init = pci_ni8430_init,
+- .setup = pci_ni8431_setup,
+- .exit = pci_ni8430_exit,
+- },
+- {
+- .vendor = PCI_VENDOR_ID_NI,
+- .device = PCIE_DEVICE_ID_NI_PXIE8431_48516,
+- .subvendor = PCI_ANY_ID,
+- .subdevice = PCI_ANY_ID,
+- .init = pci_ni8430_init,
+- .setup = pci_ni8431_setup,
+- .exit = pci_ni8430_exit,
+- },
+ /* Quatech */
+ {
+ .vendor = PCI_VENDOR_ID_QUATECH,
+@@ -3106,13 +2905,6 @@ enum pci_board_num_t {
+ pbn_ni8430_4,
+ pbn_ni8430_8,
+ pbn_ni8430_16,
+- pbn_ni8430_pxie_8,
+- pbn_ni8430_pxie_16,
+- pbn_ni8431_2,
+- pbn_ni8431_4,
+- pbn_ni8431_8,
+- pbn_ni8431_pxie_8,
+- pbn_ni8431_pxie_16,
+ pbn_ADDIDATA_PCIe_1_3906250,
+ pbn_ADDIDATA_PCIe_2_3906250,
+ pbn_ADDIDATA_PCIe_4_3906250,
+@@ -3765,55 +3557,6 @@ static struct pciserial_board pci_boards[] = {
+ .uart_offset = 0x10,
+ .first_offset = 0x800,
+ },
+- [pbn_ni8430_pxie_16] = {
+- .flags = FL_BASE0,
+- .num_ports = 16,
+- .base_baud = 3125000,
+- .uart_offset = 0x10,
+- .first_offset = 0x800,
+- },
+- [pbn_ni8430_pxie_8] = {
+- .flags = FL_BASE0,
+- .num_ports = 8,
+- .base_baud = 3125000,
+- .uart_offset = 0x10,
+- .first_offset = 0x800,
+- },
+- [pbn_ni8431_8] = {
+- .flags = FL_BASE0,
+- .num_ports = 8,
+- .base_baud = 3686400,
+- .uart_offset = 0x10,
+- .first_offset = 0x800,
+- },
+- [pbn_ni8431_4] = {
+- .flags = FL_BASE0,
+- .num_ports = 4,
+- .base_baud = 3686400,
+- .uart_offset = 0x10,
+- .first_offset = 0x800,
+- },
+- [pbn_ni8431_2] = {
+- .flags = FL_BASE0,
+- .num_ports = 2,
+- .base_baud = 3686400,
+- .uart_offset = 0x10,
+- .first_offset = 0x800,
+- },
+- [pbn_ni8431_pxie_16] = {
+- .flags = FL_BASE0,
+- .num_ports = 16,
+- .base_baud = 3125000,
+- .uart_offset = 0x10,
+- .first_offset = 0x800,
+- },
+- [pbn_ni8431_pxie_8] = {
+- .flags = FL_BASE0,
+- .num_ports = 8,
+- .base_baud = 3125000,
+- .uart_offset = 0x10,
+- .first_offset = 0x800,
+- },
+ /*
+ * ADDI-DATA GmbH PCI-Express communication cards <info@addi-data.com>
+ */
+@@ -5567,33 +5310,6 @@ static const struct pci_device_id serial_pci_tbl[] = {
+ { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8432_2324,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_ni8430_4 },
+- { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8430_2328,
+- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_ni8430_pxie_8 },
+- { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8430_23216,
+- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_ni8430_pxie_16 },
+- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4852,
+- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_ni8431_2 },
+- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4854,
+- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_ni8431_4 },
+- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4858,
+- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_ni8431_8 },
+- { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8431_4858,
+- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_ni8431_pxie_8 },
+- { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8431_48516,
+- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_ni8431_pxie_16 },
+- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8433_4852,
+- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_ni8431_2 },
+- { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8433_4854,
+- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+- pbn_ni8431_4 },
+
+ /*
+ * MOXA
+diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
+index 3a7d1a66f79c..b0b689546395 100644
+--- a/drivers/tty/serial/amba-pl011.c
++++ b/drivers/tty/serial/amba-pl011.c
+@@ -813,10 +813,8 @@ __acquires(&uap->port.lock)
+ if (!uap->using_tx_dma)
+ return;
+
+- /* Avoid deadlock with the DMA engine callback */
+- spin_unlock(&uap->port.lock);
+- dmaengine_terminate_all(uap->dmatx.chan);
+- spin_lock(&uap->port.lock);
++ dmaengine_terminate_async(uap->dmatx.chan);
++
+ if (uap->dmatx.queued) {
+ dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
+ DMA_TO_DEVICE);
+diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
+index 537896c4d887..3f64b08f50ef 100644
+--- a/drivers/tty/serial/fsl_lpuart.c
++++ b/drivers/tty/serial/fsl_lpuart.c
+@@ -437,8 +437,8 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
+ }
+
+ sport->dma_tx_desc = dmaengine_prep_slave_sg(sport->dma_tx_chan, sgl,
+- sport->dma_tx_nents,
+- DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
++ ret, DMA_MEM_TO_DEV,
++ DMA_PREP_INTERRUPT);
+ if (!sport->dma_tx_desc) {
+ dma_unmap_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+ dev_err(dev, "Cannot prepare TX slave DMA!\n");
+diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
+index ffefd218761e..31033d517e82 100644
+--- a/drivers/tty/serial/ifx6x60.c
++++ b/drivers/tty/serial/ifx6x60.c
+@@ -1230,6 +1230,9 @@ static int ifx_spi_spi_remove(struct spi_device *spi)
+ struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi);
+ /* stop activity */
+ tasklet_kill(&ifx_dev->io_work_tasklet);
++
++ pm_runtime_disable(&spi->dev);
++
+ /* free irq */
+ free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), ifx_dev);
+ free_irq(gpio_to_irq(ifx_dev->gpio.srdy), ifx_dev);
+diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
+index 3657a24913fc..00964b6e4ac1 100644
+--- a/drivers/tty/serial/msm_serial.c
++++ b/drivers/tty/serial/msm_serial.c
+@@ -980,6 +980,7 @@ static unsigned int msm_get_mctrl(struct uart_port *port)
+ static void msm_reset(struct uart_port *port)
+ {
+ struct msm_port *msm_port = UART_TO_MSM(port);
++ unsigned int mr;
+
+ /* reset everything */
+ msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
+@@ -987,7 +988,10 @@ static void msm_reset(struct uart_port *port)
+ msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
+ msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
+- msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
++ msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
++ mr = msm_read(port, UART_MR1);
++ mr &= ~UART_MR1_RX_RDY_CTL;
++ msm_write(port, mr, UART_MR1);
+
+ /* Disable DM modes */
+ if (msm_port->is_uartdm)
+diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
+index c4a414a46c7f..b0a6eb106edb 100644
+--- a/drivers/tty/serial/serial_core.c
++++ b/drivers/tty/serial/serial_core.c
+@@ -1111,7 +1111,7 @@ static int uart_break_ctl(struct tty_struct *tty, int break_state)
+ if (!uport)
+ goto out;
+
+- if (uport->type != PORT_UNKNOWN)
++ if (uport->type != PORT_UNKNOWN && uport->ops->break_ctl)
+ uport->ops->break_ctl(uport, break_state);
+ ret = 0;
+ out:
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index df90747ee3a8..2f72514d63ed 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -240,8 +240,8 @@ static void stm32_receive_chars(struct uart_port *port, bool threaded)
+ * cleared by the sequence [read SR - read DR].
+ */
+ if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
+- stm32_clr_bits(port, ofs->icr, USART_ICR_ORECF |
+- USART_ICR_PECF | USART_ICR_FECF);
++ writel_relaxed(sr & USART_SR_ERR_MASK,
++ port->membase + ofs->icr);
+
+ c = stm32_get_char(port, &sr, &stm32_port->last_res);
+ port->icount.rx++;
+@@ -435,7 +435,7 @@ static void stm32_transmit_chars(struct uart_port *port)
+ if (ofs->icr == UNDEF_REG)
+ stm32_clr_bits(port, ofs->isr, USART_SR_TC);
+ else
+- stm32_set_bits(port, ofs->icr, USART_ICR_TCCF);
++ writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
+
+ if (stm32_port->tx_ch)
+ stm32_transmit_chars_dma(port);
+diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
+index 515fc095e3b4..15d33fa0c925 100644
+--- a/drivers/tty/vt/keyboard.c
++++ b/drivers/tty/vt/keyboard.c
+@@ -1491,7 +1491,7 @@ static void kbd_event(struct input_handle *handle, unsigned int event_type,
+
+ if (event_type == EV_MSC && event_code == MSC_RAW && HW_RAW(handle->dev))
+ kbd_rawcode(value);
+- if (event_type == EV_KEY)
++ if (event_type == EV_KEY && event_code <= KEY_MAX)
+ kbd_keycode(event_code, value, HW_RAW(handle->dev));
+
+ spin_unlock(&kbd_event_lock);
+diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
+index 1f042346e722..778f83ea2249 100644
+--- a/drivers/tty/vt/vc_screen.c
++++ b/drivers/tty/vt/vc_screen.c
+@@ -456,6 +456,9 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+ size_t ret;
+ char *con_buf;
+
++ if (use_unicode(inode))
++ return -EOPNOTSUPP;
++
+ con_buf = (char *) __get_free_page(GFP_KERNEL);
+ if (!con_buf)
+ return -ENOMEM;
+diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
+index 65f634ec7fc2..bb1e2e1d0076 100644
+--- a/drivers/usb/gadget/function/u_serial.c
++++ b/drivers/usb/gadget/function/u_serial.c
+@@ -1239,8 +1239,10 @@ int gserial_alloc_line(unsigned char *line_num)
+ __func__, port_num, PTR_ERR(tty_dev));
+
+ ret = PTR_ERR(tty_dev);
++ mutex_lock(&ports[port_num].lock);
+ port = ports[port_num].port;
+ ports[port_num].port = NULL;
++ mutex_unlock(&ports[port_num].lock);
+ gserial_free_port(port);
+ goto err;
+ }
+diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
+index 4ec0906bf12c..7e00960651fa 100644
+--- a/drivers/watchdog/aspeed_wdt.c
++++ b/drivers/watchdog/aspeed_wdt.c
+@@ -258,11 +258,6 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
+ if (IS_ERR(wdt->base))
+ return PTR_ERR(wdt->base);
+
+- /*
+- * The ast2400 wdt can run at PCLK, or 1MHz. The ast2500 only
+- * runs at 1MHz. We chose to always run at 1MHz, as there's no
+- * good reason to have a faster watchdog counter.
+- */
+ wdt->wdd.info = &aspeed_wdt_info;
+ wdt->wdd.ops = &aspeed_wdt_ops;
+ wdt->wdd.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT_MS;
+@@ -278,7 +273,16 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
+ return -EINVAL;
+ config = ofdid->data;
+
+- wdt->ctrl = WDT_CTRL_1MHZ_CLK;
++ /*
++ * On clock rates:
++ * - ast2400 wdt can run at PCLK, or 1MHz
++ * - ast2500 only runs at 1MHz, hard coding bit 4 to 1
++ * - ast2600 always runs at 1MHz
++ *
++ * Set the ast2400 to run at 1MHz as it simplifies the driver.
++ */
++ if (of_device_is_compatible(np, "aspeed,ast2400-wdt"))
++ wdt->ctrl = WDT_CTRL_1MHZ_CLK;
+
+ /*
+ * Control reset on a per-device basis to ensure the
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 9c073dbdc1b0..d612468ee66b 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1403,11 +1403,7 @@ static void flush_disk(struct block_device *bdev, bool kill_dirty)
+ "resized disk %s\n",
+ bdev->bd_disk ? bdev->bd_disk->disk_name : "");
+ }
+-
+- if (!bdev->bd_disk)
+- return;
+- if (disk_part_scan_enabled(bdev->bd_disk))
+- bdev->bd_invalidated = 1;
++ bdev->bd_invalidated = 1;
+ }
+
+ /**
+@@ -1512,6 +1508,19 @@ EXPORT_SYMBOL(bd_set_size);
+
+ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
+
++static void bdev_disk_changed(struct block_device *bdev, bool invalidate)
++{
++ if (disk_part_scan_enabled(bdev->bd_disk)) {
++ if (invalidate)
++ invalidate_partitions(bdev->bd_disk, bdev);
++ else
++ rescan_partitions(bdev->bd_disk, bdev);
++ } else {
++ check_disk_size_change(bdev->bd_disk, bdev, !invalidate);
++ bdev->bd_invalidated = 0;
++ }
++}
++
+ /*
+ * bd_mutex locking:
+ *
+@@ -1594,12 +1603,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ * The latter is necessary to prevent ghost
+ * partitions on a removed medium.
+ */
+- if (bdev->bd_invalidated) {
+- if (!ret)
+- rescan_partitions(disk, bdev);
+- else if (ret == -ENOMEDIUM)
+- invalidate_partitions(disk, bdev);
+- }
++ if (bdev->bd_invalidated &&
++ (!ret || ret == -ENOMEDIUM))
++ bdev_disk_changed(bdev, ret == -ENOMEDIUM);
+
+ if (ret)
+ goto out_clear;
+@@ -1632,12 +1638,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
+ if (bdev->bd_disk->fops->open)
+ ret = bdev->bd_disk->fops->open(bdev, mode);
+ /* the same as first opener case, read comment there */
+- if (bdev->bd_invalidated) {
+- if (!ret)
+- rescan_partitions(bdev->bd_disk, bdev);
+- else if (ret == -ENOMEDIUM)
+- invalidate_partitions(bdev->bd_disk, bdev);
+- }
++ if (bdev->bd_invalidated &&
++ (!ret || ret == -ENOMEDIUM))
++ bdev_disk_changed(bdev, ret == -ENOMEDIUM);
+ if (ret)
+ goto out_unlock_bdev;
+ }
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index fa7b0fa72bb3..a3b6be80f8a9 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -313,9 +313,6 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ INIT_LIST_HEAD(&fdlocks->locks);
+ fdlocks->cfile = cfile;
+ cfile->llist = fdlocks;
+- cifs_down_write(&cinode->lock_sem);
+- list_add(&fdlocks->llist, &cinode->llist);
+- up_write(&cinode->lock_sem);
+
+ cfile->count = 1;
+ cfile->pid = current->tgid;
+@@ -339,6 +336,10 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
+ oplock = 0;
+ }
+
++ cifs_down_write(&cinode->lock_sem);
++ list_add(&fdlocks->llist, &cinode->llist);
++ up_write(&cinode->lock_sem);
++
+ spin_lock(&tcon->open_file_lock);
+ if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
+ oplock = fid->pending_open->oplock;
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
+index e311f58dc1c8..449d1584ff72 100644
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -673,10 +673,10 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each(tmp, &server->smb_ses_list) {
+ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
++
+ list_for_each(tmp1, &ses->tcon_list) {
+ tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
+
+- cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
+ spin_lock(&tcon->open_file_lock);
+ list_for_each(tmp2, &tcon->openFileList) {
+ cfile = list_entry(tmp2, struct cifsFileInfo,
+@@ -688,6 +688,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ continue;
+
+ cifs_dbg(FYI, "file id match, oplock break\n");
++ cifs_stats_inc(
++ &tcon->stats.cifs_stats.num_oplock_brks);
+ cinode = CIFS_I(d_inode(cfile->dentry));
+ spin_lock(&cfile->file_info_lock);
+ if (!CIFS_CACHE_WRITE(cinode) &&
+@@ -720,9 +722,6 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ return true;
+ }
+ spin_unlock(&tcon->open_file_lock);
+- spin_unlock(&cifs_tcp_ses_lock);
+- cifs_dbg(FYI, "No matching file for oplock break\n");
+- return true;
+ }
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
+diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
+index 54d638f9ba1c..ee190119f45c 100644
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -248,7 +248,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
+ kfree(forget);
+ if (ret == -ENOMEM)
+ goto out;
+- if (ret || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
++ if (ret || fuse_invalid_attr(&outarg.attr) ||
++ (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
+ goto invalid;
+
+ forget_all_cached_acls(inode);
+@@ -319,6 +320,12 @@ int fuse_valid_type(int m)
+ S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
+ }
+
++bool fuse_invalid_attr(struct fuse_attr *attr)
++{
++ return !fuse_valid_type(attr->mode) ||
++ attr->size > LLONG_MAX;
++}
++
+ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
+ struct fuse_entry_out *outarg, struct inode **inode)
+ {
+@@ -350,7 +357,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
+ err = -EIO;
+ if (!outarg->nodeid)
+ goto out_put_forget;
+- if (!fuse_valid_type(outarg->attr.mode))
++ if (fuse_invalid_attr(&outarg->attr))
+ goto out_put_forget;
+
+ *inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
+@@ -475,7 +482,8 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
+ goto out_free_ff;
+
+ err = -EIO;
+- if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
++ if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid) ||
++ fuse_invalid_attr(&outentry.attr))
+ goto out_free_ff;
+
+ ff->fh = outopen.fh;
+@@ -583,7 +591,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
+ goto out_put_forget_req;
+
+ err = -EIO;
+- if (invalid_nodeid(outarg.nodeid))
++ if (invalid_nodeid(outarg.nodeid) || fuse_invalid_attr(&outarg.attr))
+ goto out_put_forget_req;
+
+ if ((outarg.attr.mode ^ mode) & S_IFMT)
+@@ -862,7 +870,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
+
+ spin_lock(&fi->lock);
+ fi->attr_version = atomic64_inc_return(&fc->attr_version);
+- inc_nlink(inode);
++ if (likely(inode->i_nlink < UINT_MAX))
++ inc_nlink(inode);
+ spin_unlock(&fi->lock);
+ fuse_invalidate_attr(inode);
+ fuse_update_ctime(inode);
+@@ -942,7 +951,8 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
+ args.out_args[0].value = &outarg;
+ err = fuse_simple_request(fc, &args);
+ if (!err) {
+- if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
++ if (fuse_invalid_attr(&outarg.attr) ||
++ (inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
+ make_bad_inode(inode);
+ err = -EIO;
+ } else {
+@@ -1563,7 +1573,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
+ goto error;
+ }
+
+- if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
++ if (fuse_invalid_attr(&outarg.attr) ||
++ (inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
+ make_bad_inode(inode);
+ err = -EIO;
+ goto error;
+diff --git a/fs/fuse/file.c b/fs/fuse/file.c
+index db48a5cf8620..a63d779eac10 100644
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -713,8 +713,10 @@ static ssize_t fuse_async_req_send(struct fuse_conn *fc,
+
+ ia->ap.args.end = fuse_aio_complete_req;
+ err = fuse_simple_background(fc, &ia->ap.args, GFP_KERNEL);
++ if (err)
++ fuse_aio_complete_req(fc, &ia->ap.args, err);
+
+- return err ?: num_bytes;
++ return num_bytes;
+ }
+
+ static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count,
+@@ -1096,6 +1098,8 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
+ ia->write.in.flags = fuse_write_flags(iocb);
+
+ err = fuse_simple_request(fc, &ap->args);
++ if (!err && ia->write.out.size > count)
++ err = -EIO;
+
+ offset = ap->descs[0].offset;
+ count = ia->write.out.size;
+diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
+index d148188cfca4..aa75e2305b75 100644
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -989,6 +989,8 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc);
+ */
+ int fuse_valid_type(int m);
+
++bool fuse_invalid_attr(struct fuse_attr *attr);
++
+ /**
+ * Is current process allowed to perform filesystem operation?
+ */
+diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
+index 5c38b9d84c6e..6a40f75a0d25 100644
+--- a/fs/fuse/readdir.c
++++ b/fs/fuse/readdir.c
+@@ -184,7 +184,7 @@ static int fuse_direntplus_link(struct file *file,
+
+ if (invalid_nodeid(o->nodeid))
+ return -EIO;
+- if (!fuse_valid_type(o->attr.mode))
++ if (fuse_invalid_attr(&o->attr))
+ return -EIO;
+
+ fc = get_fuse_conn(dir);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index cbe8dabb6479..a340147387ec 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -70,6 +70,7 @@
+ #include <linux/nospec.h>
+ #include <linux/sizes.h>
+ #include <linux/hugetlb.h>
++#include <linux/highmem.h>
+
+ #include <uapi/linux/io_uring.h>
+
+@@ -1351,9 +1352,19 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
+ return -EAGAIN;
+
+ while (iov_iter_count(iter)) {
+- struct iovec iovec = iov_iter_iovec(iter);
++ struct iovec iovec;
+ ssize_t nr;
+
++ if (!iov_iter_is_bvec(iter)) {
++ iovec = iov_iter_iovec(iter);
++ } else {
++ /* fixed buffers import bvec */
++ iovec.iov_base = kmap(iter->bvec->bv_page)
++ + iter->iov_offset;
++ iovec.iov_len = min(iter->count,
++ iter->bvec->bv_len - iter->iov_offset);
++ }
++
+ if (rw == READ) {
+ nr = file->f_op->read(file, iovec.iov_base,
+ iovec.iov_len, &kiocb->ki_pos);
+@@ -1362,6 +1373,9 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
+ iovec.iov_len, &kiocb->ki_pos);
+ }
+
++ if (iov_iter_is_bvec(iter))
++ kunmap(iter->bvec->bv_page);
++
+ if (nr < 0) {
+ if (!ret)
+ ret = nr;
+@@ -1654,6 +1668,8 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
+ ret = fn(sock, msg, flags);
+ if (force_nonblock && ret == -EAGAIN)
+ return ret;
++ if (ret == -ERESTARTSYS)
++ ret = -EINTR;
+ }
+
+ io_cqring_add_event(req->ctx, sqe->user_data, ret);
+@@ -2023,7 +2039,7 @@ add:
+ }
+
+ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
+- const struct io_uring_sqe *sqe)
++ struct sqe_submit *s)
+ {
+ struct io_uring_sqe *sqe_copy;
+
+@@ -2041,7 +2057,8 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ return 0;
+ }
+
+- memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
++ memcpy(&req->submit, s, sizeof(*s));
++ memcpy(sqe_copy, s->sqe, sizeof(*sqe_copy));
+ req->submit.sqe = sqe_copy;
+
+ INIT_WORK(&req->work, io_sq_wq_submit_work);
+@@ -2409,7 +2426,7 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ {
+ int ret;
+
+- ret = io_req_defer(ctx, req, s->sqe);
++ ret = io_req_defer(ctx, req, s);
+ if (ret) {
+ if (ret != -EIOCBQUEUED) {
+ io_free_req(req);
+@@ -2436,7 +2453,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ * list.
+ */
+ req->flags |= REQ_F_IO_DRAIN;
+- ret = io_req_defer(ctx, req, s->sqe);
++ ret = io_req_defer(ctx, req, s);
+ if (ret) {
+ if (ret != -EIOCBQUEUED) {
+ io_free_req(req);
+diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
+index 1fc28c2da279..fd46ec83cb04 100644
+--- a/fs/iomap/direct-io.c
++++ b/fs/iomap/direct-io.c
+@@ -497,8 +497,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
+ }
+ pos += ret;
+
+- if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
++ if (iov_iter_rw(iter) == READ && pos >= dio->i_size) {
++ /*
++ * We only report that we've read data up to i_size.
++ * Revert iter to a state corresponding to that as
++ * some callers (such as splice code) rely on it.
++ */
++ iov_iter_revert(iter, pos - dio->i_size);
+ break;
++ }
+ } while ((count = iov_iter_count(iter)) > 0);
+ blk_finish_plug(&plug);
+
+diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
+index 6ebae6bbe6a5..7d4af6cea2a6 100644
+--- a/fs/kernfs/dir.c
++++ b/fs/kernfs/dir.c
+@@ -622,7 +622,6 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
+ {
+ struct kernfs_node *kn;
+ u32 gen;
+- int cursor;
+ int ret;
+
+ name = kstrdup_const(name, GFP_KERNEL);
+@@ -635,11 +634,11 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
+
+ idr_preload(GFP_KERNEL);
+ spin_lock(&kernfs_idr_lock);
+- cursor = idr_get_cursor(&root->ino_idr);
+ ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC);
+- if (ret >= 0 && ret < cursor)
++ if (ret >= 0 && ret < root->last_ino)
+ root->next_generation++;
+ gen = root->next_generation;
++ root->last_ino = ret;
+ spin_unlock(&kernfs_idr_lock);
+ idr_preload_end();
+ if (ret < 0)
+diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
+index 4e3e77b76411..38c0aeda500e 100644
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1077,7 +1077,8 @@ nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
+ goto out;
+
+ status = nfsd4_clone_file_range(src->nf_file, clone->cl_src_pos,
+- dst->nf_file, clone->cl_dst_pos, clone->cl_count);
++ dst->nf_file, clone->cl_dst_pos, clone->cl_count,
++ EX_ISSYNC(cstate->current_fh.fh_export));
+
+ nfsd_file_put(dst);
+ nfsd_file_put(src);
+diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
+index fdf7ed4bd5dd..e8bee8ff30c5 100644
+--- a/fs/nfsd/nfssvc.c
++++ b/fs/nfsd/nfssvc.c
+@@ -95,12 +95,11 @@ static const struct svc_version *nfsd_acl_version[] = {
+
+ #define NFSD_ACL_MINVERS 2
+ #define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
+-static const struct svc_version *nfsd_acl_versions[NFSD_ACL_NRVERS];
+
+ static struct svc_program nfsd_acl_program = {
+ .pg_prog = NFS_ACL_PROGRAM,
+ .pg_nvers = NFSD_ACL_NRVERS,
+- .pg_vers = nfsd_acl_versions,
++ .pg_vers = nfsd_acl_version,
+ .pg_name = "nfsacl",
+ .pg_class = "nfsd",
+ .pg_stats = &nfsd_acl_svcstats,
+diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
+index bd0a385df3fc..cf423fea0c6f 100644
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -525,7 +525,7 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
+ #endif
+
+ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
+- u64 dst_pos, u64 count)
++ u64 dst_pos, u64 count, bool sync)
+ {
+ loff_t cloned;
+
+@@ -534,6 +534,12 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
+ return nfserrno(cloned);
+ if (count && cloned != count)
+ return nfserrno(-EINVAL);
++ if (sync) {
++ loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX;
++ int status = vfs_fsync_range(dst, dst_pos, dst_end, 0);
++ if (status < 0)
++ return nfserrno(status);
++ }
+ return 0;
+ }
+
+diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
+index a13fd9d7e1f5..cc110a10bfe8 100644
+--- a/fs/nfsd/vfs.h
++++ b/fs/nfsd/vfs.h
+@@ -56,7 +56,7 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *, struct svc_fh *,
+ __be32 nfsd4_vfs_fallocate(struct svc_rqst *, struct svc_fh *,
+ struct file *, loff_t, loff_t, int);
+ __be32 nfsd4_clone_file_range(struct file *, u64, struct file *,
+- u64, u64);
++ u64, u64, bool);
+ #endif /* CONFIG_NFSD_V4 */
+ __be32 nfsd_create_locked(struct svc_rqst *, struct svc_fh *,
+ char *name, int len, struct iattr *attrs,
+diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
+index 603fbc4e2f70..10e6049c0ba9 100644
+--- a/include/linux/jbd2.h
++++ b/include/linux/jbd2.h
+@@ -1582,7 +1582,7 @@ static inline int jbd2_space_needed(journal_t *journal)
+ static inline unsigned long jbd2_log_space_left(journal_t *journal)
+ {
+ /* Allow for rounding errors */
+- unsigned long free = journal->j_free - 32;
++ long free = journal->j_free - 32;
+
+ if (journal->j_committing_transaction) {
+ unsigned long committing = atomic_read(&journal->
+@@ -1591,7 +1591,7 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal)
+ /* Transaction + control blocks */
+ free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT);
+ }
+- return free;
++ return max_t(long, free, 0);
+ }
+
+ /*
+diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
+index 936b61bd504e..f797ccc650e7 100644
+--- a/include/linux/kernfs.h
++++ b/include/linux/kernfs.h
+@@ -187,6 +187,7 @@ struct kernfs_root {
+
+ /* private fields, do not use outside kernfs proper */
+ struct idr ino_idr;
++ u32 last_ino;
+ u32 next_generation;
+ struct kernfs_syscall_ops *syscall_ops;
+
+diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
+index 3247a3dc7934..b06b75776a32 100644
+--- a/include/linux/miscdevice.h
++++ b/include/linux/miscdevice.h
+@@ -57,6 +57,7 @@
+ #define UHID_MINOR 239
+ #define USERIO_MINOR 240
+ #define VHOST_VSOCK_MINOR 241
++#define RFKILL_MINOR 242
+ #define MISC_DYNAMIC_MINOR 255
+
+ struct device;
+diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
+index b260c5fd2337..e05b95e83d5a 100644
+--- a/include/sound/hdaudio.h
++++ b/include/sound/hdaudio.h
+@@ -493,6 +493,7 @@ struct hdac_stream {
+ bool prepared:1;
+ bool no_period_wakeup:1;
+ bool locked:1;
++ bool stripe:1; /* apply stripe control */
+
+ /* timestamp */
+ unsigned long start_wallclk; /* start + minimum wallclk */
+diff --git a/kernel/time/time.c b/kernel/time/time.c
+index 5c54ca632d08..83f403e7a15c 100644
+--- a/kernel/time/time.c
++++ b/kernel/time/time.c
+@@ -881,7 +881,8 @@ int get_timespec64(struct timespec64 *ts,
+ ts->tv_sec = kts.tv_sec;
+
+ /* Zero out the padding for 32 bit systems or in compat mode */
+- if (IS_ENABLED(CONFIG_64BIT_TIME) && in_compat_syscall())
++ if (IS_ENABLED(CONFIG_64BIT_TIME) && (!IS_ENABLED(CONFIG_64BIT) ||
++ in_compat_syscall()))
+ kts.tv_nsec &= 0xFFFFFFFFUL;
+
+ ts->tv_nsec = kts.tv_nsec;
+diff --git a/net/rfkill/core.c b/net/rfkill/core.c
+index f9b08a6d8dbe..0bf9bf1ceb8f 100644
+--- a/net/rfkill/core.c
++++ b/net/rfkill/core.c
+@@ -1316,10 +1316,12 @@ static const struct file_operations rfkill_fops = {
+ .llseek = no_llseek,
+ };
+
++#define RFKILL_NAME "rfkill"
++
+ static struct miscdevice rfkill_miscdev = {
+- .name = "rfkill",
+ .fops = &rfkill_fops,
+- .minor = MISC_DYNAMIC_MINOR,
++ .name = RFKILL_NAME,
++ .minor = RFKILL_MINOR,
+ };
+
+ static int __init rfkill_init(void)
+@@ -1371,3 +1373,6 @@ static void __exit rfkill_exit(void)
+ class_unregister(&rfkill_class);
+ }
+ module_exit(rfkill_exit);
++
++MODULE_ALIAS_MISCDEV(RFKILL_MINOR);
++MODULE_ALIAS("devname:" RFKILL_NAME);
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 360afe153193..987c4b1f0b17 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -260,7 +260,7 @@ static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const c
+ rpc_reset_waitqueue_priority(queue);
+ queue->qlen = 0;
+ queue->timer_list.expires = 0;
+- INIT_DEFERRABLE_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
++ INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
+ INIT_LIST_HEAD(&queue->timer_list.list);
+ rpc_assign_waitqueue_name(queue, qname);
+ }
+diff --git a/sound/core/oss/linear.c b/sound/core/oss/linear.c
+index 2045697f449d..797d838a2f9e 100644
+--- a/sound/core/oss/linear.c
++++ b/sound/core/oss/linear.c
+@@ -107,6 +107,8 @@ static snd_pcm_sframes_t linear_transfer(struct snd_pcm_plugin *plugin,
+ }
+ }
+ #endif
++ if (frames > dst_channels[0].frames)
++ frames = dst_channels[0].frames;
+ convert(plugin, src_channels, dst_channels, frames);
+ return frames;
+ }
+diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c
+index 7915564bd394..3788906421a7 100644
+--- a/sound/core/oss/mulaw.c
++++ b/sound/core/oss/mulaw.c
+@@ -269,6 +269,8 @@ static snd_pcm_sframes_t mulaw_transfer(struct snd_pcm_plugin *plugin,
+ }
+ }
+ #endif
++ if (frames > dst_channels[0].frames)
++ frames = dst_channels[0].frames;
+ data = (struct mulaw_priv *)plugin->extra_data;
+ data->func(plugin, src_channels, dst_channels, frames);
+ return frames;
+diff --git a/sound/core/oss/route.c b/sound/core/oss/route.c
+index c8171f5783c8..72dea04197ef 100644
+--- a/sound/core/oss/route.c
++++ b/sound/core/oss/route.c
+@@ -57,6 +57,8 @@ static snd_pcm_sframes_t route_transfer(struct snd_pcm_plugin *plugin,
+ return -ENXIO;
+ if (frames == 0)
+ return 0;
++ if (frames > dst_channels[0].frames)
++ frames = dst_channels[0].frames;
+
+ nsrcs = plugin->src_format.channels;
+ ndsts = plugin->dst_format.channels;
+diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
+index d8fe7ff0cd58..f9707fb05efe 100644
+--- a/sound/hda/hdac_stream.c
++++ b/sound/hda/hdac_stream.c
+@@ -96,12 +96,14 @@ void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start)
+ 1 << azx_dev->index,
+ 1 << azx_dev->index);
+ /* set stripe control */
+- if (azx_dev->substream)
+- stripe_ctl = snd_hdac_get_stream_stripe_ctl(bus, azx_dev->substream);
+- else
+- stripe_ctl = 0;
+- snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK,
+- stripe_ctl);
++ if (azx_dev->stripe) {
++ if (azx_dev->substream)
++ stripe_ctl = snd_hdac_get_stream_stripe_ctl(bus, azx_dev->substream);
++ else
++ stripe_ctl = 0;
++ snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK,
++ stripe_ctl);
++ }
+ /* set DMA start and interrupt mask */
+ snd_hdac_stream_updateb(azx_dev, SD_CTL,
+ 0, SD_CTL_DMA_START | SD_INT_MASK);
+@@ -118,7 +120,10 @@ void snd_hdac_stream_clear(struct hdac_stream *azx_dev)
+ snd_hdac_stream_updateb(azx_dev, SD_CTL,
+ SD_CTL_DMA_START | SD_INT_MASK, 0);
+ snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
+- snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0);
++ if (azx_dev->stripe) {
++ snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0);
++ azx_dev->stripe = 0;
++ }
+ azx_dev->running = false;
+ }
+ EXPORT_SYMBOL_GPL(snd_hdac_stream_clear);
+diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
+index 8272b50b8349..6a8564566375 100644
+--- a/sound/pci/hda/hda_bind.c
++++ b/sound/pci/hda/hda_bind.c
+@@ -43,6 +43,10 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev)
+ {
+ struct hda_codec *codec = container_of(dev, struct hda_codec, core);
+
++ /* ignore unsol events during shutdown */
++ if (codec->bus->shutdown)
++ return;
++
+ if (codec->patch_ops.unsol_event)
+ codec->patch_ops.unsol_event(codec, ev);
+ }
+diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
+index c52419376c74..86a416cdeb29 100644
+--- a/sound/pci/hda/hda_intel.c
++++ b/sound/pci/hda/hda_intel.c
+@@ -1382,8 +1382,11 @@ static int azx_free(struct azx *chip)
+ static int azx_dev_disconnect(struct snd_device *device)
+ {
+ struct azx *chip = device->device_data;
++ struct hdac_bus *bus = azx_bus(chip);
+
+ chip->bus.shutdown = 1;
++ cancel_work_sync(&bus->unsol_work);
++
+ return 0;
+ }
+
+diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
+index 968d3caab6ac..90aa0f400a57 100644
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -910,6 +910,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
+ SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
++ SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index d14f6684737d..4dafc864d765 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -32,6 +32,7 @@
+ #include <sound/hda_codec.h>
+ #include "hda_local.h"
+ #include "hda_jack.h"
++#include "hda_controller.h"
+
+ static bool static_hdmi_pcm;
+ module_param(static_hdmi_pcm, bool, 0644);
+@@ -1240,6 +1241,10 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ per_pin->cvt_nid = per_cvt->cvt_nid;
+ hinfo->nid = per_cvt->cvt_nid;
+
++ /* flip stripe flag for the assigned stream if supported */
++ if (get_wcaps(codec, per_cvt->cvt_nid) & AC_WCAP_STRIPE)
++ azx_stream(get_azx_dev(substream))->stripe = 1;
++
+ snd_hda_set_dev_select(codec, per_pin->pin_nid, per_pin->dev_id);
+ snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
+ AC_VERB_SET_CONNECT_SEL,
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 80f66ba85f87..ed3e314b5233 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -367,9 +367,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ case 0x10ec0215:
+ case 0x10ec0233:
+ case 0x10ec0235:
+- case 0x10ec0236:
+ case 0x10ec0255:
+- case 0x10ec0256:
+ case 0x10ec0257:
+ case 0x10ec0282:
+ case 0x10ec0283:
+@@ -381,6 +379,11 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
+ case 0x10ec0300:
+ alc_update_coef_idx(codec, 0x10, 1<<9, 0);
+ break;
++ case 0x10ec0236:
++ case 0x10ec0256:
++ alc_write_coef_idx(codec, 0x36, 0x5757);
++ alc_update_coef_idx(codec, 0x10, 1<<9, 0);
++ break;
+ case 0x10ec0275:
+ alc_update_coef_idx(codec, 0xe, 0, 1<<0);
+ break;
+@@ -5892,6 +5895,7 @@ enum {
+ ALC299_FIXUP_PREDATOR_SPK,
+ ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
+ ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
++ ALC294_FIXUP_ASUS_INTSPK_GPIO,
+ };
+
+ static const struct hda_fixup alc269_fixups[] = {
+@@ -6982,6 +6986,13 @@ static const struct hda_fixup alc269_fixups[] = {
+ .chained = true,
+ .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+ },
++ [ALC294_FIXUP_ASUS_INTSPK_GPIO] = {
++ .type = HDA_FIXUP_FUNC,
++ /* The GPIO must be pulled to initialize the AMP */
++ .v.func = alc_fixup_gpio4,
++ .chained = true,
++ .chain_id = ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -7141,7 +7152,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
+ SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
+ SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
+- SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
++ SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_GPIO),
+ SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
+ SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
+@@ -7248,6 +7259,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+ SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
+ SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
++ SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
+
+ #if 0
+@@ -8455,6 +8467,8 @@ static void alc662_fixup_aspire_ethos_hp(struct hda_codec *codec,
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ snd_hda_jack_detect_enable_callback(codec, 0x1b,
+ alc662_aspire_ethos_mute_speakers);
++ /* subwoofer needs an extra GPIO setting to become audible */
++ alc_setup_gpio(codec, 0x02);
+ break;
+ case HDA_FIXUP_ACT_INIT:
+ /* Make sure to start in a correct state, i.e. if
+@@ -8537,7 +8551,6 @@ enum {
+ ALC662_FIXUP_USI_HEADSET_MODE,
+ ALC662_FIXUP_LENOVO_MULTI_CODECS,
+ ALC669_FIXUP_ACER_ASPIRE_ETHOS,
+- ALC669_FIXUP_ACER_ASPIRE_ETHOS_SUBWOOFER,
+ ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET,
+ };
+
+@@ -8869,18 +8882,6 @@ static const struct hda_fixup alc662_fixups[] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc662_fixup_aspire_ethos_hp,
+ },
+- [ALC669_FIXUP_ACER_ASPIRE_ETHOS_SUBWOOFER] = {
+- .type = HDA_FIXUP_VERBS,
+- /* subwoofer needs an extra GPIO setting to become audible */
+- .v.verbs = (const struct hda_verb[]) {
+- {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
+- {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
+- {0x01, AC_VERB_SET_GPIO_DATA, 0x00},
+- { }
+- },
+- .chained = true,
+- .chain_id = ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET
+- },
+ [ALC669_FIXUP_ACER_ASPIRE_ETHOS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+@@ -8890,7 +8891,7 @@ static const struct hda_fixup alc662_fixups[] = {
+ { }
+ },
+ .chained = true,
+- .chain_id = ALC669_FIXUP_ACER_ASPIRE_ETHOS_SUBWOOFER
++ .chain_id = ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET
+ },
+ };
+
+diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
+index 67be8d31afab..6dba8b728d23 100644
+--- a/tools/perf/builtin-script.c
++++ b/tools/perf/builtin-script.c
+@@ -1084,7 +1084,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
+ insn++;
+ }
+ }
+- if (off != (unsigned)len)
++ if (off != end - start)
+ printed += fprintf(fp, "\tmismatch of LBR data and executable\n");
+ }
+
+diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
+index 61b3911d91e6..4b28c9d08d5a 100755
+--- a/tools/perf/scripts/python/exported-sql-viewer.py
++++ b/tools/perf/scripts/python/exported-sql-viewer.py
+@@ -625,7 +625,7 @@ class CallGraphRootItem(CallGraphLevelItemBase):
+ self.query_done = True
+ if_has_calls = ""
+ if IsSelectable(glb.db, "comms", columns = "has_calls"):
+- if_has_calls = " WHERE has_calls = TRUE"
++ if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE
+ query = QSqlQuery(glb.db)
+ QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls)
+ while query.next():
+@@ -905,7 +905,7 @@ class CallTreeRootItem(CallGraphLevelItemBase):
+ self.query_done = True
+ if_has_calls = ""
+ if IsSelectable(glb.db, "comms", columns = "has_calls"):
+- if_has_calls = " WHERE has_calls = TRUE"
++ if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE
+ query = QSqlQuery(glb.db)
+ QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls)
+ while query.next():
+@@ -3509,6 +3509,12 @@ class DBRef():
+ def __init__(self, is_sqlite3, dbname):
+ self.is_sqlite3 = is_sqlite3
+ self.dbname = dbname
++ self.TRUE = "TRUE"
++ self.FALSE = "FALSE"
++ # SQLite prior to version 3.23 does not support TRUE and FALSE
++ if self.is_sqlite3:
++ self.TRUE = "1"
++ self.FALSE = "0"
+
+ def Open(self, connection_name):
+ dbname = self.dbname
+diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
+index 4cdbae6f4e61..612f6757015d 100644
+--- a/tools/testing/selftests/Makefile
++++ b/tools/testing/selftests/Makefile
+@@ -213,7 +213,7 @@ ifdef INSTALL_PATH
+ @# included in the generated runlist.
+ for TARGET in $(TARGETS); do \
+ BUILD_TARGET=$$BUILD/$$TARGET; \
+- [ ! -d $$INSTALL_PATH/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \
++ [ ! -d $(INSTALL_PATH)/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \
+ echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
+ echo "cd $$TARGET" >> $(ALL_SCRIPT); \
+ echo -n "run_many" >> $(ALL_SCRIPT); \
+diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
+index 8d69f007dd0c..b3a97dcaa30d 100644
+--- a/virt/kvm/arm/vgic/vgic-v3.c
++++ b/virt/kvm/arm/vgic/vgic-v3.c
+@@ -363,8 +363,8 @@ retry:
+ int vgic_v3_save_pending_tables(struct kvm *kvm)
+ {
+ struct vgic_dist *dist = &kvm->arch.vgic;
+- int last_byte_offset = -1;
+ struct vgic_irq *irq;
++ gpa_t last_ptr = ~(gpa_t)0;
+ int ret;
+ u8 val;
+
+@@ -384,11 +384,11 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
+ bit_nr = irq->intid % BITS_PER_BYTE;
+ ptr = pendbase + byte_offset;
+
+- if (byte_offset != last_byte_offset) {
++ if (ptr != last_ptr) {
+ ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
+ if (ret)
+ return ret;
+- last_byte_offset = byte_offset;
++ last_ptr = ptr;
+ }
+
+ stored = val & (1U << bit_nr);