diff options
author | Mike Pagano <mpagano@gentoo.org> | 2021-03-20 10:39:19 -0400 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2021-03-20 10:39:19 -0400 |
commit | 3592f5060532b13ec1f86a5de9834f1354471c0d (patch) | |
tree | 2b7be7ff64d542b27612f1477785716d5310c8e9 | |
parent | Support for the BMQ(BitMap Queue) Scheduler via experimental use flag (diff) | |
download | linux-patches-3592f5060532b13ec1f86a5de9834f1354471c0d.tar.gz linux-patches-3592f5060532b13ec1f86a5de9834f1354471c0d.tar.bz2 linux-patches-3592f5060532b13ec1f86a5de9834f1354471c0d.zip |
Linux patch 5.11.85.11-10
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1007_linux-5.11.8.patch | 1700 |
2 files changed, 1704 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 25a30f86..54163b1f 100644 --- a/0000_README +++ b/0000_README @@ -71,6 +71,10 @@ Patch: 1006_linux-5.11.7.patch From: http://www.kernel.org Desc: Linux 5.11.7 +Patch: 1007_linux-5.11.8.patch +From: http://www.kernel.org +Desc: Linux 5.11.8 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1007_linux-5.11.8.patch b/1007_linux-5.11.8.patch new file mode 100644 index 00000000..62c2850e --- /dev/null +++ b/1007_linux-5.11.8.patch @@ -0,0 +1,1700 @@ +diff --git a/Makefile b/Makefile +index 6ba32b82c4802..d8a39ece170dd 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,7 +1,7 @@ + # SPDX-License-Identifier: GPL-2.0 + VERSION = 5 + PATCHLEVEL = 11 +-SUBLEVEL = 7 ++SUBLEVEL = 8 + EXTRAVERSION = + NAME = 💕 Valentine's Day Edition 💕 + +diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h +index a7f5a1bbc8aca..f97e4a4595618 100644 +--- a/arch/arm64/include/asm/el2_setup.h ++++ b/arch/arm64/include/asm/el2_setup.h +@@ -111,7 +111,7 @@ + .endm + + /* Virtual CPU ID registers */ +-.macro __init_el2_nvhe_idregs ++.macro __init_el2_idregs + mrs x0, midr_el1 + mrs x1, mpidr_el1 + msr vpidr_el2, x0 +@@ -163,6 +163,7 @@ + __init_el2_stage2 + __init_el2_gicv3 + __init_el2_hstr ++ __init_el2_idregs + + /* + * When VHE is not in use, early init of EL2 needs to be done here. +@@ -171,7 +172,6 @@ + * will be done via the _EL1 system register aliases in __cpu_setup. + */ + .ifeqs "\mode", "nvhe" +- __init_el2_nvhe_idregs + __init_el2_nvhe_cptr + __init_el2_nvhe_sve + __init_el2_nvhe_prepare_eret +diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S +index d1436c37008b4..57aef3f5a81e2 100644 +--- a/arch/x86/crypto/aesni-intel_asm.S ++++ b/arch/x86/crypto/aesni-intel_asm.S +@@ -2715,25 +2715,18 @@ SYM_FUNC_END(aesni_ctr_enc) + pxor CTR, IV; + + /* +- * void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *dst, +- * const u8 *src, bool enc, le128 *iv) ++ * void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, ++ * const u8 *src, unsigned int len, le128 *iv) + */ +-SYM_FUNC_START(aesni_xts_crypt8) ++SYM_FUNC_START(aesni_xts_encrypt) + FRAME_BEGIN +- testb %cl, %cl +- movl $0, %ecx +- movl $240, %r10d +- leaq _aesni_enc4, %r11 +- leaq _aesni_dec4, %rax +- cmovel %r10d, %ecx +- cmoveq %rax, %r11 + + movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK + movups (IVP), IV + + mov 480(KEYP), KLEN +- addq %rcx, KEYP + ++.Lxts_enc_loop4: + movdqa IV, STATE1 + movdqu 0x00(INP), INC + pxor INC, STATE1 +@@ -2757,71 +2750,103 @@ SYM_FUNC_START(aesni_xts_crypt8) + pxor INC, STATE4 + movdqu IV, 0x30(OUTP) + +- CALL_NOSPEC r11 ++ call _aesni_enc4 + + movdqu 0x00(OUTP), INC + pxor INC, STATE1 + movdqu STATE1, 0x00(OUTP) + +- _aesni_gf128mul_x_ble() +- movdqa IV, STATE1 +- movdqu 0x40(INP), INC +- pxor INC, STATE1 +- movdqu IV, 0x40(OUTP) +- + movdqu 0x10(OUTP), INC + pxor INC, STATE2 + movdqu STATE2, 0x10(OUTP) + +- _aesni_gf128mul_x_ble() +- movdqa IV, STATE2 +- movdqu 0x50(INP), INC +- pxor INC, STATE2 +- movdqu IV, 0x50(OUTP) +- + movdqu 0x20(OUTP), INC + pxor INC, STATE3 + movdqu STATE3, 0x20(OUTP) + +- _aesni_gf128mul_x_ble() +- movdqa IV, STATE3 +- movdqu 0x60(INP), INC +- pxor INC, STATE3 +- movdqu IV, 0x60(OUTP) +- + movdqu 0x30(OUTP), INC + pxor INC, STATE4 + movdqu STATE4, 0x30(OUTP) + + _aesni_gf128mul_x_ble() +- movdqa IV, STATE4 +- movdqu 0x70(INP), INC +- pxor INC, STATE4 +- movdqu IV, 0x70(OUTP) + +- _aesni_gf128mul_x_ble() ++ add $64, INP ++ add $64, OUTP ++ sub $64, LEN ++ ja .Lxts_enc_loop4 ++ + movups IV, (IVP) + +- CALL_NOSPEC r11 ++ FRAME_END ++ ret ++SYM_FUNC_END(aesni_xts_encrypt) ++ ++/* ++ * void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, ++ * const u8 *src, unsigned int len, le128 *iv) ++ */ ++SYM_FUNC_START(aesni_xts_decrypt) ++ FRAME_BEGIN ++ ++ movdqa .Lgf128mul_x_ble_mask, GF128MUL_MASK ++ movups (IVP), IV ++ ++ mov 480(KEYP), KLEN ++ add $240, KEYP + +- movdqu 0x40(OUTP), INC ++.Lxts_dec_loop4: ++ movdqa IV, STATE1 ++ movdqu 0x00(INP), INC + pxor INC, STATE1 +- movdqu STATE1, 0x40(OUTP) ++ movdqu IV, 0x00(OUTP) + +- movdqu 0x50(OUTP), INC ++ _aesni_gf128mul_x_ble() ++ movdqa IV, STATE2 ++ movdqu 0x10(INP), INC ++ pxor INC, STATE2 ++ movdqu IV, 0x10(OUTP) ++ ++ _aesni_gf128mul_x_ble() ++ movdqa IV, STATE3 ++ movdqu 0x20(INP), INC ++ pxor INC, STATE3 ++ movdqu IV, 0x20(OUTP) ++ ++ _aesni_gf128mul_x_ble() ++ movdqa IV, STATE4 ++ movdqu 0x30(INP), INC ++ pxor INC, STATE4 ++ movdqu IV, 0x30(OUTP) ++ ++ call _aesni_dec4 ++ ++ movdqu 0x00(OUTP), INC ++ pxor INC, STATE1 ++ movdqu STATE1, 0x00(OUTP) ++ ++ movdqu 0x10(OUTP), INC + pxor INC, STATE2 +- movdqu STATE2, 0x50(OUTP) ++ movdqu STATE2, 0x10(OUTP) + +- movdqu 0x60(OUTP), INC ++ movdqu 0x20(OUTP), INC + pxor INC, STATE3 +- movdqu STATE3, 0x60(OUTP) ++ movdqu STATE3, 0x20(OUTP) + +- movdqu 0x70(OUTP), INC ++ movdqu 0x30(OUTP), INC + pxor INC, STATE4 +- movdqu STATE4, 0x70(OUTP) ++ movdqu STATE4, 0x30(OUTP) ++ ++ _aesni_gf128mul_x_ble() ++ ++ add $64, INP ++ add $64, OUTP ++ sub $64, LEN ++ ja .Lxts_dec_loop4 ++ ++ movups IV, (IVP) + + FRAME_END + ret +-SYM_FUNC_END(aesni_xts_crypt8) ++SYM_FUNC_END(aesni_xts_decrypt) + + #endif +diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c +index f9a1d98e75349..be891fdf8d174 100644 +--- a/arch/x86/crypto/aesni-intel_glue.c ++++ b/arch/x86/crypto/aesni-intel_glue.c +@@ -97,6 +97,12 @@ asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out, + #define AVX_GEN2_OPTSIZE 640 + #define AVX_GEN4_OPTSIZE 4096 + ++asmlinkage void aesni_xts_encrypt(const struct crypto_aes_ctx *ctx, u8 *out, ++ const u8 *in, unsigned int len, u8 *iv); ++ ++asmlinkage void aesni_xts_decrypt(const struct crypto_aes_ctx *ctx, u8 *out, ++ const u8 *in, unsigned int len, u8 *iv); ++ + #ifdef CONFIG_X86_64 + + static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out, +@@ -104,9 +110,6 @@ static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out, + asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, + const u8 *in, unsigned int len, u8 *iv); + +-asmlinkage void aesni_xts_crypt8(const struct crypto_aes_ctx *ctx, u8 *out, +- const u8 *in, bool enc, le128 *iv); +- + /* asmlinkage void aesni_gcm_enc() + * void *ctx, AES Key schedule. Starts on a 16 byte boundary. + * struct gcm_context_data. May be uninitialized. +@@ -547,14 +550,14 @@ static void aesni_xts_dec(const void *ctx, u8 *dst, const u8 *src, le128 *iv) + glue_xts_crypt_128bit_one(ctx, dst, src, iv, aesni_dec); + } + +-static void aesni_xts_enc8(const void *ctx, u8 *dst, const u8 *src, le128 *iv) ++static void aesni_xts_enc32(const void *ctx, u8 *dst, const u8 *src, le128 *iv) + { +- aesni_xts_crypt8(ctx, dst, src, true, iv); ++ aesni_xts_encrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv); + } + +-static void aesni_xts_dec8(const void *ctx, u8 *dst, const u8 *src, le128 *iv) ++static void aesni_xts_dec32(const void *ctx, u8 *dst, const u8 *src, le128 *iv) + { +- aesni_xts_crypt8(ctx, dst, src, false, iv); ++ aesni_xts_decrypt(ctx, dst, src, 32 * AES_BLOCK_SIZE, (u8 *)iv); + } + + static const struct common_glue_ctx aesni_enc_xts = { +@@ -562,8 +565,8 @@ static const struct common_glue_ctx aesni_enc_xts = { + .fpu_blocks_limit = 1, + + .funcs = { { +- .num_blocks = 8, +- .fn_u = { .xts = aesni_xts_enc8 } ++ .num_blocks = 32, ++ .fn_u = { .xts = aesni_xts_enc32 } + }, { + .num_blocks = 1, + .fn_u = { .xts = aesni_xts_enc } +@@ -575,8 +578,8 @@ static const struct common_glue_ctx aesni_dec_xts = { + .fpu_blocks_limit = 1, + + .funcs = { { +- .num_blocks = 8, +- .fn_u = { .xts = aesni_xts_dec8 } ++ .num_blocks = 32, ++ .fn_u = { .xts = aesni_xts_dec32 } + }, { + .num_blocks = 1, + .fn_u = { .xts = aesni_xts_dec } +diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h +index bfc6389edc28a..cf101b73a3603 100644 +--- a/arch/x86/kvm/mmu/mmu_internal.h ++++ b/arch/x86/kvm/mmu/mmu_internal.h +@@ -76,12 +76,15 @@ static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) + static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu) + { + /* +- * When using the EPT page-modification log, the GPAs in the log +- * would come from L2 rather than L1. Therefore, we need to rely +- * on write protection to record dirty pages. This also bypasses +- * PML, since writes now result in a vmexit. ++ * When using the EPT page-modification log, the GPAs in the CPU dirty ++ * log would come from L2 rather than L1. Therefore, we need to rely ++ * on write protection to record dirty pages, which bypasses PML, since ++ * writes now result in a vmexit. Note, the check on CPU dirty logging ++ * being enabled is mandatory as the bits used to denote WP-only SPTEs ++ * are reserved for NPT w/ PAE (32-bit KVM). + */ +- return vcpu->arch.mmu == &vcpu->arch.guest_mmu; ++ return vcpu->arch.mmu == &vcpu->arch.guest_mmu && ++ kvm_x86_ops.cpu_dirty_log_size; + } + + bool is_nx_huge_page_enabled(void); +diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c +index 5492b66a81532..31f8aa2c40ed8 100644 +--- a/drivers/infiniband/ulp/srp/ib_srp.c ++++ b/drivers/infiniband/ulp/srp/ib_srp.c +@@ -3628,7 +3628,7 @@ static ssize_t srp_create_target(struct device *dev, + struct srp_rdma_ch *ch; + struct srp_device *srp_dev = host->srp_dev; + struct ib_device *ibdev = srp_dev->dev; +- int ret, node_idx, node, cpu, i; ++ int ret, i, ch_idx; + unsigned int max_sectors_per_mr, mr_per_cmd = 0; + bool multich = false; + uint32_t max_iu_len; +@@ -3753,81 +3753,61 @@ static ssize_t srp_create_target(struct device *dev, + goto out; + + ret = -ENOMEM; +- if (target->ch_count == 0) ++ if (target->ch_count == 0) { + target->ch_count = +- max_t(unsigned int, num_online_nodes(), +- min(ch_count ?: +- min(4 * num_online_nodes(), +- ibdev->num_comp_vectors), +- num_online_cpus())); ++ min(ch_count ?: ++ max(4 * num_online_nodes(), ++ ibdev->num_comp_vectors), ++ num_online_cpus()); ++ } ++ + target->ch = kcalloc(target->ch_count, sizeof(*target->ch), + GFP_KERNEL); + if (!target->ch) + goto out; + +- node_idx = 0; +- for_each_online_node(node) { +- const int ch_start = (node_idx * target->ch_count / +- num_online_nodes()); +- const int ch_end = ((node_idx + 1) * target->ch_count / +- num_online_nodes()); +- const int cv_start = node_idx * ibdev->num_comp_vectors / +- num_online_nodes(); +- const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors / +- num_online_nodes(); +- int cpu_idx = 0; +- +- for_each_online_cpu(cpu) { +- if (cpu_to_node(cpu) != node) +- continue; +- if (ch_start + cpu_idx >= ch_end) +- continue; +- ch = &target->ch[ch_start + cpu_idx]; +- ch->target = target; +- ch->comp_vector = cv_start == cv_end ? cv_start : +- cv_start + cpu_idx % (cv_end - cv_start); +- spin_lock_init(&ch->lock); +- INIT_LIST_HEAD(&ch->free_tx); +- ret = srp_new_cm_id(ch); +- if (ret) +- goto err_disconnect; ++ for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) { ++ ch = &target->ch[ch_idx]; ++ ch->target = target; ++ ch->comp_vector = ch_idx % ibdev->num_comp_vectors; ++ spin_lock_init(&ch->lock); ++ INIT_LIST_HEAD(&ch->free_tx); ++ ret = srp_new_cm_id(ch); ++ if (ret) ++ goto err_disconnect; + +- ret = srp_create_ch_ib(ch); +- if (ret) +- goto err_disconnect; ++ ret = srp_create_ch_ib(ch); ++ if (ret) ++ goto err_disconnect; + +- ret = srp_alloc_req_data(ch); +- if (ret) +- goto err_disconnect; ++ ret = srp_alloc_req_data(ch); ++ if (ret) ++ goto err_disconnect; + +- ret = srp_connect_ch(ch, max_iu_len, multich); +- if (ret) { +- char dst[64]; +- +- if (target->using_rdma_cm) +- snprintf(dst, sizeof(dst), "%pIS", +- &target->rdma_cm.dst); +- else +- snprintf(dst, sizeof(dst), "%pI6", +- target->ib_cm.orig_dgid.raw); +- shost_printk(KERN_ERR, target->scsi_host, +- PFX "Connection %d/%d to %s failed\n", +- ch_start + cpu_idx, +- target->ch_count, dst); +- if (node_idx == 0 && cpu_idx == 0) { +- goto free_ch; +- } else { +- srp_free_ch_ib(target, ch); +- srp_free_req_data(target, ch); +- target->ch_count = ch - target->ch; +- goto connected; +- } +- } ++ ret = srp_connect_ch(ch, max_iu_len, multich); ++ if (ret) { ++ char dst[64]; + +- multich = true; +- cpu_idx++; ++ if (target->using_rdma_cm) ++ snprintf(dst, sizeof(dst), "%pIS", ++ &target->rdma_cm.dst); ++ else ++ snprintf(dst, sizeof(dst), "%pI6", ++ target->ib_cm.orig_dgid.raw); ++ shost_printk(KERN_ERR, target->scsi_host, ++ PFX "Connection %d/%d to %s failed\n", ++ ch_idx, ++ target->ch_count, dst); ++ if (ch_idx == 0) { ++ goto free_ch; ++ } else { ++ srp_free_ch_ib(target, ch); ++ srp_free_req_data(target, ch); ++ target->ch_count = ch - target->ch; ++ goto connected; ++ } + } +- node_idx++; ++ multich = true; + } + + connected: +diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c +index 95c7fa171e35a..f504b6858ed29 100644 +--- a/drivers/net/dsa/b53/b53_common.c ++++ b/drivers/net/dsa/b53/b53_common.c +@@ -510,6 +510,19 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port) + } + EXPORT_SYMBOL(b53_imp_vlan_setup); + ++static void b53_port_set_learning(struct b53_device *dev, int port, ++ bool learning) ++{ ++ u16 reg; ++ ++ b53_read16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, ®); ++ if (learning) ++ reg &= ~BIT(port); ++ else ++ reg |= BIT(port); ++ b53_write16(dev, B53_CTRL_PAGE, B53_DIS_LEARNING, reg); ++} ++ + int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) + { + struct b53_device *dev = ds->priv; +@@ -523,6 +536,7 @@ int b53_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) + cpu_port = dsa_to_port(ds, port)->cpu_dp->index; + + b53_br_egress_floods(ds, port, true, true); ++ b53_port_set_learning(dev, port, false); + + if (dev->ops->irq_enable) + ret = dev->ops->irq_enable(dev, port); +@@ -656,6 +670,7 @@ static void b53_enable_cpu_port(struct b53_device *dev, int port) + b53_brcm_hdr_setup(dev->ds, port); + + b53_br_egress_floods(dev->ds, port, true, true); ++ b53_port_set_learning(dev, port, false); + } + + static void b53_enable_mib(struct b53_device *dev) +@@ -1839,6 +1854,8 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br) + b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); + dev->ports[port].vlan_ctl_mask = pvlan; + ++ b53_port_set_learning(dev, port, true); ++ + return 0; + } + EXPORT_SYMBOL(b53_br_join); +@@ -1886,6 +1903,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) + vl->untag |= BIT(port) | BIT(cpu_port); + b53_set_vlan_entry(dev, pvid, vl); + } ++ b53_port_set_learning(dev, port, false); + } + EXPORT_SYMBOL(b53_br_leave); + +diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h +index c90985c294a2e..b2c539a421545 100644 +--- a/drivers/net/dsa/b53/b53_regs.h ++++ b/drivers/net/dsa/b53/b53_regs.h +@@ -115,6 +115,7 @@ + #define B53_UC_FLOOD_MASK 0x32 + #define B53_MC_FLOOD_MASK 0x34 + #define B53_IPMC_FLOOD_MASK 0x36 ++#define B53_DIS_LEARNING 0x3c + + /* + * Override Ports 0-7 State on devices with xMII interfaces (8 bit) +diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c +index 445226720ff29..edb0a1027b38f 100644 +--- a/drivers/net/dsa/bcm_sf2.c ++++ b/drivers/net/dsa/bcm_sf2.c +@@ -222,23 +222,10 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port, + reg &= ~P_TXQ_PSM_VDD(port); + core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL); + +- /* Enable learning */ +- reg = core_readl(priv, CORE_DIS_LEARN); +- reg &= ~BIT(port); +- core_writel(priv, reg, CORE_DIS_LEARN); +- + /* Enable Broadcom tags for that port if requested */ +- if (priv->brcm_tag_mask & BIT(port)) { ++ if (priv->brcm_tag_mask & BIT(port)) + b53_brcm_hdr_setup(ds, port); + +- /* Disable learning on ASP port */ +- if (port == 7) { +- reg = core_readl(priv, CORE_DIS_LEARN); +- reg |= BIT(port); +- core_writel(priv, reg, CORE_DIS_LEARN); +- } +- } +- + /* Configure Traffic Class to QoS mapping, allow each priority to map + * to a different queue number + */ +diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c +index cb29421d745aa..d38109cc3a011 100644 +--- a/drivers/regulator/pca9450-regulator.c ++++ b/drivers/regulator/pca9450-regulator.c +@@ -5,6 +5,7 @@ + */ + + #include <linux/err.h> ++#include <linux/gpio/consumer.h> + #include <linux/i2c.h> + #include <linux/interrupt.h> + #include <linux/kernel.h> +@@ -32,6 +33,7 @@ struct pca9450_regulator_desc { + struct pca9450 { + struct device *dev; + struct regmap *regmap; ++ struct gpio_desc *sd_vsel_gpio; + enum pca9450_chip_type type; + unsigned int rcnt; + int irq; +@@ -795,6 +797,34 @@ static int pca9450_i2c_probe(struct i2c_client *i2c, + return ret; + } + ++ /* Clear PRESET_EN bit in BUCK123_DVS to use DVS registers */ ++ ret = regmap_clear_bits(pca9450->regmap, PCA9450_REG_BUCK123_DVS, ++ BUCK123_PRESET_EN); ++ if (ret) { ++ dev_err(&i2c->dev, "Failed to clear PRESET_EN bit: %d\n", ret); ++ return ret; ++ } ++ ++ /* Set reset behavior on assertion of WDOG_B signal */ ++ ret = regmap_update_bits(pca9450->regmap, PCA9450_REG_RESET_CTRL, ++ WDOG_B_CFG_MASK, WDOG_B_CFG_COLD_LDO12); ++ if (ret) { ++ dev_err(&i2c->dev, "Failed to set WDOG_B reset behavior\n"); ++ return ret; ++ } ++ ++ /* ++ * The driver uses the LDO5CTRL_H register to control the LDO5 regulator. ++ * This is only valid if the SD_VSEL input of the PMIC is high. Let's ++ * check if the pin is available as GPIO and set it to high. ++ */ ++ pca9450->sd_vsel_gpio = gpiod_get_optional(pca9450->dev, "sd-vsel", GPIOD_OUT_HIGH); ++ ++ if (IS_ERR(pca9450->sd_vsel_gpio)) { ++ dev_err(&i2c->dev, "Failed to get SD_VSEL GPIO\n"); ++ return ret; ++ } ++ + dev_info(&i2c->dev, "%s probed.\n", + type == PCA9450_TYPE_PCA9450A ? "pca9450a" : "pca9450bc"); + +diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h +index 7c4b8cb93f9fd..103dfc2fa62ee 100644 +--- a/fs/fuse/fuse_i.h ++++ b/fs/fuse/fuse_i.h +@@ -863,6 +863,7 @@ static inline u64 fuse_get_attr_version(struct fuse_conn *fc) + + static inline void fuse_make_bad(struct inode *inode) + { ++ remove_inode_hash(inode); + set_bit(FUSE_I_BAD, &get_fuse_inode(inode)->state); + } + +diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c +index 61fce59cb4d38..f2c6bbe5cdb81 100644 +--- a/fs/gfs2/ops_fstype.c ++++ b/fs/gfs2/ops_fstype.c +@@ -1084,6 +1084,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) + int silent = fc->sb_flags & SB_SILENT; + struct gfs2_sbd *sdp; + struct gfs2_holder mount_gh; ++ struct gfs2_holder freeze_gh; + int error; + + sdp = init_sbd(sb); +@@ -1195,25 +1196,18 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc) + goto fail_per_node; + } + +- if (sb_rdonly(sb)) { +- struct gfs2_holder freeze_gh; ++ error = gfs2_freeze_lock(sdp, &freeze_gh, 0); ++ if (error) ++ goto fail_per_node; + +- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, +- LM_FLAG_NOEXP | GL_EXACT, +- &freeze_gh); +- if (error) { +- fs_err(sdp, "can't make FS RO: %d\n", error); +- goto fail_per_node; +- } +- gfs2_glock_dq_uninit(&freeze_gh); +- } else { ++ if (!sb_rdonly(sb)) + error = gfs2_make_fs_rw(sdp); +- if (error) { +- fs_err(sdp, "can't make FS RW: %d\n", error); +- goto fail_per_node; +- } +- } + ++ gfs2_freeze_unlock(&freeze_gh); ++ if (error) { ++ fs_err(sdp, "can't make FS RW: %d\n", error); ++ goto fail_per_node; ++ } + gfs2_glock_dq_uninit(&mount_gh); + gfs2_online_uevent(sdp); + return 0; +@@ -1514,6 +1508,12 @@ static int gfs2_reconfigure(struct fs_context *fc) + fc->sb_flags |= SB_RDONLY; + + if ((sb->s_flags ^ fc->sb_flags) & SB_RDONLY) { ++ struct gfs2_holder freeze_gh; ++ ++ error = gfs2_freeze_lock(sdp, &freeze_gh, 0); ++ if (error) ++ return -EINVAL; ++ + if (fc->sb_flags & SB_RDONLY) { + error = gfs2_make_fs_ro(sdp); + if (error) +@@ -1523,6 +1523,7 @@ static int gfs2_reconfigure(struct fs_context *fc) + if (error) + errorfc(fc, "unable to remount read-write"); + } ++ gfs2_freeze_unlock(&freeze_gh); + } + sdp->sd_args = *newargs; + +diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c +index a3c1911862f01..8f9c6480a5df4 100644 +--- a/fs/gfs2/recovery.c ++++ b/fs/gfs2/recovery.c +@@ -470,9 +470,7 @@ void gfs2_recover_func(struct work_struct *work) + + /* Acquire a shared hold on the freeze lock */ + +- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, +- LM_FLAG_NOEXP | LM_FLAG_PRIORITY | +- GL_EXACT, &thaw_gh); ++ error = gfs2_freeze_lock(sdp, &thaw_gh, LM_FLAG_PRIORITY); + if (error) + goto fail_gunlock_ji; + +@@ -524,7 +522,7 @@ void gfs2_recover_func(struct work_struct *work) + clean_journal(jd, &head); + up_read(&sdp->sd_log_flush_lock); + +- gfs2_glock_dq_uninit(&thaw_gh); ++ gfs2_freeze_unlock(&thaw_gh); + t_rep = ktime_get(); + fs_info(sdp, "jid=%u: Journal replayed in %lldms [jlck:%lldms, " + "jhead:%lldms, tlck:%lldms, replay:%lldms]\n", +@@ -546,7 +544,7 @@ void gfs2_recover_func(struct work_struct *work) + goto done; + + fail_gunlock_thaw: +- gfs2_glock_dq_uninit(&thaw_gh); ++ gfs2_freeze_unlock(&thaw_gh); + fail_gunlock_ji: + if (jlocked) { + gfs2_glock_dq_uninit(&ji_gh); +diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c +index 2f56acc41c049..754ea2a137b4f 100644 +--- a/fs/gfs2/super.c ++++ b/fs/gfs2/super.c +@@ -165,7 +165,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) + { + struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); + struct gfs2_glock *j_gl = ip->i_gl; +- struct gfs2_holder freeze_gh; + struct gfs2_log_header_host head; + int error; + +@@ -173,12 +172,6 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) + if (error) + return error; + +- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, +- LM_FLAG_NOEXP | GL_EXACT, +- &freeze_gh); +- if (error) +- goto fail_threads; +- + j_gl->gl_ops->go_inval(j_gl, DIO_METADATA); + if (gfs2_withdrawn(sdp)) { + error = -EIO; +@@ -205,13 +198,9 @@ int gfs2_make_fs_rw(struct gfs2_sbd *sdp) + + set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); + +- gfs2_glock_dq_uninit(&freeze_gh); +- + return 0; + + fail: +- gfs2_glock_dq_uninit(&freeze_gh); +-fail_threads: + if (sdp->sd_quotad_process) + kthread_stop(sdp->sd_quotad_process); + sdp->sd_quotad_process = NULL; +@@ -452,7 +441,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp) + } + + if (error) +- gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); ++ gfs2_freeze_unlock(&sdp->sd_freeze_gh); + + out: + while (!list_empty(&list)) { +@@ -609,30 +598,9 @@ out: + + int gfs2_make_fs_ro(struct gfs2_sbd *sdp) + { +- struct gfs2_holder freeze_gh; + int error = 0; + int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); + +- gfs2_holder_mark_uninitialized(&freeze_gh); +- if (sdp->sd_freeze_gl && +- !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) { +- if (!log_write_allowed) { +- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, +- LM_ST_SHARED, LM_FLAG_TRY | +- LM_FLAG_NOEXP | GL_EXACT, +- &freeze_gh); +- if (error == GLR_TRYFAILED) +- error = 0; +- } else { +- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, +- LM_ST_SHARED, +- LM_FLAG_NOEXP | GL_EXACT, +- &freeze_gh); +- if (error && !gfs2_withdrawn(sdp)) +- return error; +- } +- } +- + gfs2_flush_delete_work(sdp); + if (!log_write_allowed && current == sdp->sd_quotad_process) + fs_warn(sdp, "The quotad daemon is withdrawing.\n"); +@@ -661,9 +629,6 @@ int gfs2_make_fs_ro(struct gfs2_sbd *sdp) + atomic_read(&sdp->sd_reserving_log) == 0, + HZ * 5); + } +- if (gfs2_holder_initialized(&freeze_gh)) +- gfs2_glock_dq_uninit(&freeze_gh); +- + gfs2_quota_cleanup(sdp); + + if (!log_write_allowed) +@@ -772,10 +737,8 @@ void gfs2_freeze_func(struct work_struct *work) + struct super_block *sb = sdp->sd_vfs; + + atomic_inc(&sb->s_active); +- error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, +- LM_FLAG_NOEXP | GL_EXACT, &freeze_gh); ++ error = gfs2_freeze_lock(sdp, &freeze_gh, 0); + if (error) { +- fs_info(sdp, "GFS2: couldn't get freeze lock : %d\n", error); + gfs2_assert_withdraw(sdp, 0); + } else { + atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN); +@@ -785,7 +748,7 @@ void gfs2_freeze_func(struct work_struct *work) + error); + gfs2_assert_withdraw(sdp, 0); + } +- gfs2_glock_dq_uninit(&freeze_gh); ++ gfs2_freeze_unlock(&freeze_gh); + } + deactivate_super(sb); + clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags); +@@ -853,7 +816,7 @@ static int gfs2_unfreeze(struct super_block *sb) + return 0; + } + +- gfs2_glock_dq_uninit(&sdp->sd_freeze_gh); ++ gfs2_freeze_unlock(&sdp->sd_freeze_gh); + mutex_unlock(&sdp->sd_freeze_mutex); + return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE); + } +diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c +index 574bea29f21ee..dc4985429cf2d 100644 +--- a/fs/gfs2/util.c ++++ b/fs/gfs2/util.c +@@ -91,19 +91,50 @@ out_unlock: + return error; + } + ++/** ++ * gfs2_freeze_lock - hold the freeze glock ++ * @sdp: the superblock ++ * @freeze_gh: pointer to the requested holder ++ * @caller_flags: any additional flags needed by the caller ++ */ ++int gfs2_freeze_lock(struct gfs2_sbd *sdp, struct gfs2_holder *freeze_gh, ++ int caller_flags) ++{ ++ int flags = LM_FLAG_NOEXP | GL_EXACT | caller_flags; ++ int error; ++ ++ error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, flags, ++ freeze_gh); ++ if (error && error != GLR_TRYFAILED) ++ fs_err(sdp, "can't lock the freeze lock: %d\n", error); ++ return error; ++} ++ ++void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh) ++{ ++ if (gfs2_holder_initialized(freeze_gh)) ++ gfs2_glock_dq_uninit(freeze_gh); ++} ++ + static void signal_our_withdraw(struct gfs2_sbd *sdp) + { + struct gfs2_glock *live_gl = sdp->sd_live_gh.gh_gl; +- struct inode *inode = sdp->sd_jdesc->jd_inode; +- struct gfs2_inode *ip = GFS2_I(inode); +- struct gfs2_glock *i_gl = ip->i_gl; +- u64 no_formal_ino = ip->i_no_formal_ino; ++ struct inode *inode; ++ struct gfs2_inode *ip; ++ struct gfs2_glock *i_gl; ++ u64 no_formal_ino; ++ int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); + int ret = 0; + int tries; + +- if (test_bit(SDF_NORECOVERY, &sdp->sd_flags)) ++ if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) || !sdp->sd_jdesc) + return; + ++ inode = sdp->sd_jdesc->jd_inode; ++ ip = GFS2_I(inode); ++ i_gl = ip->i_gl; ++ no_formal_ino = ip->i_no_formal_ino; ++ + /* Prevent any glock dq until withdraw recovery is complete */ + set_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags); + /* +@@ -118,8 +149,21 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp) + * therefore we need to clear SDF_JOURNAL_LIVE manually. + */ + clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags); +- if (!sb_rdonly(sdp->sd_vfs)) +- ret = gfs2_make_fs_ro(sdp); ++ if (!sb_rdonly(sdp->sd_vfs)) { ++ struct gfs2_holder freeze_gh; ++ ++ gfs2_holder_mark_uninitialized(&freeze_gh); ++ if (sdp->sd_freeze_gl && ++ !gfs2_glock_is_locked_by_me(sdp->sd_freeze_gl)) { ++ ret = gfs2_freeze_lock(sdp, &freeze_gh, ++ log_write_allowed ? 0 : LM_FLAG_TRY); ++ if (ret == GLR_TRYFAILED) ++ ret = 0; ++ } ++ if (!ret) ++ ret = gfs2_make_fs_ro(sdp); ++ gfs2_freeze_unlock(&freeze_gh); ++ } + + if (sdp->sd_lockstruct.ls_ops->lm_lock == NULL) { /* lock_nolock */ + if (!ret) +diff --git a/fs/gfs2/util.h b/fs/gfs2/util.h +index a4443dd8a94b9..69e1a0ae5a4dc 100644 +--- a/fs/gfs2/util.h ++++ b/fs/gfs2/util.h +@@ -149,6 +149,9 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function, + + extern int check_journal_clean(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, + bool verbose); ++extern int gfs2_freeze_lock(struct gfs2_sbd *sdp, ++ struct gfs2_holder *freeze_gh, int caller_flags); ++extern void gfs2_freeze_unlock(struct gfs2_holder *freeze_gh); + + #define gfs2_io_error(sdp) \ + gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__) +diff --git a/fs/io_uring.c b/fs/io_uring.c +index 00ef0b90d1491..262fd4cfd3ad5 100644 +--- a/fs/io_uring.c ++++ b/fs/io_uring.c +@@ -1823,18 +1823,22 @@ static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, + return all_flushed; + } + +-static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, ++static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force, + struct task_struct *tsk, + struct files_struct *files) + { ++ bool ret = true; ++ + if (test_bit(0, &ctx->cq_check_overflow)) { + /* iopoll syncs against uring_lock, not completion_lock */ + if (ctx->flags & IORING_SETUP_IOPOLL) + mutex_lock(&ctx->uring_lock); +- __io_cqring_overflow_flush(ctx, force, tsk, files); ++ ret = __io_cqring_overflow_flush(ctx, force, tsk, files); + if (ctx->flags & IORING_SETUP_IOPOLL) + mutex_unlock(&ctx->uring_lock); + } ++ ++ return ret; + } + + static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags) +@@ -2717,6 +2721,13 @@ static bool io_rw_reissue(struct io_kiocb *req, long res) + return false; + if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker()) + return false; ++ /* ++ * If ref is dying, we might be running poll reap from the exit work. ++ * Don't attempt to reissue from that path, just let it fail with ++ * -EAGAIN. ++ */ ++ if (percpu_ref_is_dying(&req->ctx->refs)) ++ return false; + + lockdep_assert_held(&req->ctx->uring_lock); + +@@ -3507,7 +3518,6 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, + else + kiocb->ki_flags |= IOCB_NOWAIT; + +- + /* If the file doesn't support async, just async punt */ + no_async = force_nonblock && !io_file_supports_async(req->file, READ); + if (no_async) +@@ -3519,9 +3529,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, + + ret = io_iter_do_read(req, iter); + +- if (!ret) { +- goto done; +- } else if (ret == -EIOCBQUEUED) { ++ if (ret == -EIOCBQUEUED) { + ret = 0; + goto out_free; + } else if (ret == -EAGAIN) { +@@ -3535,7 +3543,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, + iov_iter_revert(iter, io_size - iov_iter_count(iter)); + ret = 0; + goto copy_iov; +- } else if (ret < 0) { ++ } else if (ret <= 0) { + /* make sure -ERESTARTSYS -> -EINTR is done */ + goto done; + } +@@ -3579,6 +3587,7 @@ retry: + goto out_free; + } else if (ret > 0 && ret < io_size) { + /* we got some bytes, but not all. retry. */ ++ kiocb->ki_flags &= ~IOCB_WAITQ; + goto retry; + } + done: +@@ -7201,6 +7210,25 @@ static int io_run_task_work_sig(void) + return -EINTR; + } + ++/* when returns >0, the caller should retry */ ++static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, ++ struct io_wait_queue *iowq, ++ signed long *timeout) ++{ ++ int ret; ++ ++ /* make sure we run task_work before checking for signals */ ++ ret = io_run_task_work_sig(); ++ if (ret || io_should_wake(iowq)) ++ return ret; ++ /* let the caller flush overflows, retry */ ++ if (test_bit(0, &ctx->cq_check_overflow)) ++ return 1; ++ ++ *timeout = schedule_timeout(*timeout); ++ return !*timeout ? -ETIME : 1; ++} ++ + /* + * Wait until events become available, if we don't already have some. The + * application must reap them itself, as they reside on the shared cq ring. +@@ -7219,9 +7247,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, + .to_wait = min_events, + }; + struct io_rings *rings = ctx->rings; +- struct timespec64 ts; +- signed long timeout = 0; +- int ret = 0; ++ signed long timeout = MAX_SCHEDULE_TIMEOUT; ++ int ret; + + do { + io_cqring_overflow_flush(ctx, false, NULL, NULL); +@@ -7245,6 +7272,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, + } + + if (uts) { ++ struct timespec64 ts; ++ + if (get_timespec64(&ts, uts)) + return -EFAULT; + timeout = timespec64_to_jiffies(&ts); +@@ -7253,34 +7282,17 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, + iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts); + trace_io_uring_cqring_wait(ctx, min_events); + do { +- io_cqring_overflow_flush(ctx, false, NULL, NULL); +- prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, +- TASK_INTERRUPTIBLE); +- /* make sure we run task_work before checking for signals */ +- ret = io_run_task_work_sig(); +- if (ret > 0) { +- finish_wait(&ctx->wait, &iowq.wq); +- continue; +- } +- else if (ret < 0) ++ /* if we can't even flush overflow, don't wait for more */ ++ if (!io_cqring_overflow_flush(ctx, false, NULL, NULL)) { ++ ret = -EBUSY; + break; +- if (io_should_wake(&iowq)) +- break; +- if (test_bit(0, &ctx->cq_check_overflow)) { +- finish_wait(&ctx->wait, &iowq.wq); +- continue; + } +- if (uts) { +- timeout = schedule_timeout(timeout); +- if (timeout == 0) { +- ret = -ETIME; +- break; +- } +- } else { +- schedule(); +- } +- } while (1); +- finish_wait(&ctx->wait, &iowq.wq); ++ prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, ++ TASK_INTERRUPTIBLE); ++ ret = io_cqring_wait_schedule(ctx, &iowq, &timeout); ++ finish_wait(&ctx->wait, &iowq.wq); ++ cond_resched(); ++ } while (ret > 0); + + restore_saved_sigmask_unless(ret == -EINTR); + +diff --git a/fs/locks.c b/fs/locks.c +index 99ca97e81b7a9..6125d2de39b8b 100644 +--- a/fs/locks.c ++++ b/fs/locks.c +@@ -1808,9 +1808,6 @@ check_conflicting_open(struct file *filp, const long arg, int flags) + + if (flags & FL_LAYOUT) + return 0; +- if (flags & FL_DELEG) +- /* We leave these checks to the caller. */ +- return 0; + + if (arg == F_RDLCK) + return inode_is_open_for_write(inode) ? -EAGAIN : 0; +diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c +index 1d2cd6a88f61d..cf8b91b1ed373 100644 +--- a/fs/nfsd/nfs4state.c ++++ b/fs/nfsd/nfs4state.c +@@ -4945,31 +4945,6 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, + return fl; + } + +-static int nfsd4_check_conflicting_opens(struct nfs4_client *clp, +- struct nfs4_file *fp) +-{ +- struct nfs4_clnt_odstate *co; +- struct file *f = fp->fi_deleg_file->nf_file; +- struct inode *ino = locks_inode(f); +- int writes = atomic_read(&ino->i_writecount); +- +- if (fp->fi_fds[O_WRONLY]) +- writes--; +- if (fp->fi_fds[O_RDWR]) +- writes--; +- if (writes > 0) +- return -EAGAIN; +- spin_lock(&fp->fi_lock); +- list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { +- if (co->co_client != clp) { +- spin_unlock(&fp->fi_lock); +- return -EAGAIN; +- } +- } +- spin_unlock(&fp->fi_lock); +- return 0; +-} +- + static struct nfs4_delegation * + nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, + struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate) +@@ -4989,12 +4964,9 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, + + nf = find_readable_file(fp); + if (!nf) { +- /* +- * We probably could attempt another open and get a read +- * delegation, but for now, don't bother until the +- * client actually sends us one. +- */ +- return ERR_PTR(-EAGAIN); ++ /* We should always have a readable file here */ ++ WARN_ON_ONCE(1); ++ return ERR_PTR(-EBADF); + } + spin_lock(&state_lock); + spin_lock(&fp->fi_lock); +@@ -5024,19 +4996,11 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, + if (!fl) + goto out_clnt_odstate; + +- status = nfsd4_check_conflicting_opens(clp, fp); +- if (status) { +- locks_free_lock(fl); +- goto out_clnt_odstate; +- } + status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL); + if (fl) + locks_free_lock(fl); + if (status) + goto out_clnt_odstate; +- status = nfsd4_check_conflicting_opens(clp, fp); +- if (status) +- goto out_clnt_odstate; + + spin_lock(&state_lock); + spin_lock(&fp->fi_lock); +@@ -5118,6 +5082,17 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, + goto out_no_deleg; + if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) + goto out_no_deleg; ++ /* ++ * Also, if the file was opened for write or ++ * create, there's a good chance the client's ++ * about to write to it, resulting in an ++ * immediate recall (since we don't support ++ * write delegations): ++ */ ++ if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) ++ goto out_no_deleg; ++ if (open->op_create == NFS4_OPEN_CREATE) ++ goto out_no_deleg; + break; + default: + goto out_no_deleg; +diff --git a/include/linux/regulator/pca9450.h b/include/linux/regulator/pca9450.h +index 1bbd3014f9067..71902f41c9199 100644 +--- a/include/linux/regulator/pca9450.h ++++ b/include/linux/regulator/pca9450.h +@@ -147,6 +147,9 @@ enum { + #define BUCK6_FPWM 0x04 + #define BUCK6_ENMODE_MASK 0x03 + ++/* PCA9450_REG_BUCK123_PRESET_EN bit */ ++#define BUCK123_PRESET_EN 0x80 ++ + /* PCA9450_BUCK1OUT_DVS0 bits */ + #define BUCK1OUT_DVS0_MASK 0x7F + #define BUCK1OUT_DVS0_DEFAULT 0x14 +@@ -216,4 +219,11 @@ enum { + #define IRQ_THERM_105 0x02 + #define IRQ_THERM_125 0x01 + ++/* PCA9450_REG_RESET_CTRL bits */ ++#define WDOG_B_CFG_MASK 0xC0 ++#define WDOG_B_CFG_NONE 0x00 ++#define WDOG_B_CFG_WARM 0x40 ++#define WDOG_B_CFG_COLD_LDO12 0x80 ++#define WDOG_B_CFG_COLD 0xC0 ++ + #endif /* __LINUX_REG_PCA9450_H__ */ +diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c +index 33683eafea90e..ab23dfb9df1b1 100644 +--- a/kernel/bpf/verifier.c ++++ b/kernel/bpf/verifier.c +@@ -5389,10 +5389,14 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, + { + bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || + (opcode == BPF_SUB && !off_is_neg); +- u32 off; ++ u32 off, max; + + switch (ptr_reg->type) { + case PTR_TO_STACK: ++ /* Offset 0 is out-of-bounds, but acceptable start for the ++ * left direction, see BPF_REG_FP. ++ */ ++ max = MAX_BPF_STACK + mask_to_left; + /* Indirect variable offset stack access is prohibited in + * unprivileged mode so it's not handled here. + */ +@@ -5400,16 +5404,17 @@ static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, + if (mask_to_left) + *ptr_limit = MAX_BPF_STACK + off; + else +- *ptr_limit = -off; +- return 0; ++ *ptr_limit = -off - 1; ++ return *ptr_limit >= max ? -ERANGE : 0; + case PTR_TO_MAP_VALUE: ++ max = ptr_reg->map_ptr->value_size; + if (mask_to_left) { + *ptr_limit = ptr_reg->umax_value + ptr_reg->off; + } else { + off = ptr_reg->smin_value + ptr_reg->off; +- *ptr_limit = ptr_reg->map_ptr->value_size - off; ++ *ptr_limit = ptr_reg->map_ptr->value_size - off - 1; + } +- return 0; ++ return *ptr_limit >= max ? -ERANGE : 0; + default: + return -EINVAL; + } +@@ -5462,6 +5467,7 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, + u32 alu_state, alu_limit; + struct bpf_reg_state tmp; + bool ret; ++ int err; + + if (can_skip_alu_sanitation(env, insn)) + return 0; +@@ -5477,10 +5483,13 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env, + alu_state |= ptr_is_dst_reg ? + BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; + +- if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg)) +- return 0; +- if (update_alu_sanitation_state(aux, alu_state, alu_limit)) +- return -EACCES; ++ err = retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg); ++ if (err < 0) ++ return err; ++ ++ err = update_alu_sanitation_state(aux, alu_state, alu_limit); ++ if (err < 0) ++ return err; + do_sim: + /* Simulate and find potential out-of-bounds access under + * speculative execution from truncation as a result of +@@ -5596,7 +5605,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, + case BPF_ADD: + ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); + if (ret < 0) { +- verbose(env, "R%d tried to add from different maps or paths\n", dst); ++ verbose(env, "R%d tried to add from different maps, paths, or prohibited types\n", dst); + return ret; + } + /* We can take a fixed offset as long as it doesn't overflow +@@ -5651,7 +5660,7 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, + case BPF_SUB: + ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0); + if (ret < 0) { +- verbose(env, "R%d tried to sub from different maps or paths\n", dst); ++ verbose(env, "R%d tried to sub from different maps, paths, or prohibited types\n", dst); + return ret; + } + if (dst_reg == off_reg) { +@@ -11079,7 +11088,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) + off_reg = issrc ? insn->src_reg : insn->dst_reg; + if (isneg) + *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); +- *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1); ++ *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); + *patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg); + *patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg); + *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); +diff --git a/net/mptcp/pm.c b/net/mptcp/pm.c +index da2ed576f2899..1c01c3bcbf5aa 100644 +--- a/net/mptcp/pm.c ++++ b/net/mptcp/pm.c +@@ -20,6 +20,8 @@ int mptcp_pm_announce_addr(struct mptcp_sock *msk, + + pr_debug("msk=%p, local_id=%d", msk, addr->id); + ++ lockdep_assert_held(&msk->pm.lock); ++ + if (add_addr) { + pr_warn("addr_signal error, add_addr=%d", add_addr); + return -EINVAL; +@@ -188,8 +190,7 @@ void mptcp_pm_add_addr_received(struct mptcp_sock *msk, + + void mptcp_pm_add_addr_send_ack(struct mptcp_sock *msk) + { +- if (!mptcp_pm_should_add_signal_ipv6(msk) && +- !mptcp_pm_should_add_signal_port(msk)) ++ if (!mptcp_pm_should_add_signal(msk)) + return; + + mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_SEND_ACK); +diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c +index a6d983d80576a..71c41b9488619 100644 +--- a/net/mptcp/pm_netlink.c ++++ b/net/mptcp/pm_netlink.c +@@ -134,6 +134,8 @@ select_local_address(const struct pm_nl_pernet *pernet, + { + struct mptcp_pm_addr_entry *entry, *ret = NULL; + ++ msk_owned_by_me(msk); ++ + rcu_read_lock(); + __mptcp_flush_join_list(msk); + list_for_each_entry_rcu(entry, &pernet->local_addr_list, list) { +@@ -191,6 +193,8 @@ lookup_anno_list_by_saddr(struct mptcp_sock *msk, + { + struct mptcp_pm_add_entry *entry; + ++ lockdep_assert_held(&msk->pm.lock); ++ + list_for_each_entry(entry, &msk->pm.anno_list, list) { + if (addresses_equal(&entry->addr, addr, false)) + return entry; +@@ -266,6 +270,8 @@ static bool mptcp_pm_alloc_anno_list(struct mptcp_sock *msk, + struct sock *sk = (struct sock *)msk; + struct net *net = sock_net(sk); + ++ lockdep_assert_held(&msk->pm.lock); ++ + if (lookup_anno_list_by_saddr(msk, &entry->addr)) + return false; + +@@ -408,8 +414,10 @@ void mptcp_pm_nl_add_addr_send_ack(struct mptcp_sock *msk) + { + struct mptcp_subflow_context *subflow; + +- if (!mptcp_pm_should_add_signal_ipv6(msk) && +- !mptcp_pm_should_add_signal_port(msk)) ++ msk_owned_by_me(msk); ++ lockdep_assert_held(&msk->pm.lock); ++ ++ if (!mptcp_pm_should_add_signal(msk)) + return; + + __mptcp_flush_join_list(msk); +@@ -419,10 +427,9 @@ void mptcp_pm_nl_add_addr_send_ack(struct mptcp_sock *msk) + u8 add_addr; + + spin_unlock_bh(&msk->pm.lock); +- if (mptcp_pm_should_add_signal_ipv6(msk)) +- pr_debug("send ack for add_addr6"); +- if (mptcp_pm_should_add_signal_port(msk)) +- pr_debug("send ack for add_addr_port"); ++ pr_debug("send ack for add_addr%s%s", ++ mptcp_pm_should_add_signal_ipv6(msk) ? " [ipv6]" : "", ++ mptcp_pm_should_add_signal_port(msk) ? " [port]" : ""); + + lock_sock(ssk); + tcp_send_ack(ssk); +@@ -445,6 +452,8 @@ void mptcp_pm_nl_rm_addr_received(struct mptcp_sock *msk) + + pr_debug("address rm_id %d", msk->pm.rm_id); + ++ msk_owned_by_me(msk); ++ + if (!msk->pm.rm_id) + return; + +@@ -480,6 +489,8 @@ void mptcp_pm_nl_rm_subflow_received(struct mptcp_sock *msk, u8 rm_id) + + pr_debug("subflow rm_id %d", rm_id); + ++ msk_owned_by_me(msk); ++ + if (!rm_id) + return; + +diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c +index 056846eb2e5bd..7345df40385ab 100644 +--- a/net/mptcp/protocol.c ++++ b/net/mptcp/protocol.c +@@ -2100,6 +2100,14 @@ static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk) + return backup; + } + ++static void mptcp_dispose_initial_subflow(struct mptcp_sock *msk) ++{ ++ if (msk->subflow) { ++ iput(SOCK_INODE(msk->subflow)); ++ msk->subflow = NULL; ++ } ++} ++ + /* subflow sockets can be either outgoing (connect) or incoming + * (accept). + * +@@ -2144,6 +2152,9 @@ void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, + + if (ssk == msk->last_snd) + msk->last_snd = NULL; ++ ++ if (msk->subflow && ssk == msk->subflow->sk) ++ mptcp_dispose_initial_subflow(msk); + } + + static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu) +@@ -2186,6 +2197,8 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk) + { + struct mptcp_subflow_context *subflow, *tmp; + ++ might_sleep(); ++ + list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) { + struct sock *ssk = mptcp_subflow_tcp_sock(subflow); + +@@ -2529,11 +2542,7 @@ static void __mptcp_destroy_sock(struct sock *sk) + + pr_debug("msk=%p", msk); + +- /* dispose the ancillatory tcp socket, if any */ +- if (msk->subflow) { +- iput(SOCK_INODE(msk->subflow)); +- msk->subflow = NULL; +- } ++ might_sleep(); + + /* be sure to always acquire the join list lock, to sync vs + * mptcp_finish_join(). +@@ -2559,6 +2568,7 @@ static void __mptcp_destroy_sock(struct sock *sk) + sk_stream_kill_queues(sk); + xfrm_sk_free_policy(sk); + sk_refcnt_debug_release(sk); ++ mptcp_dispose_initial_subflow(msk); + sock_put(sk); + } + +diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h +index 18fef4273bdc6..c374345ad1349 100644 +--- a/net/mptcp/protocol.h ++++ b/net/mptcp/protocol.h +@@ -286,6 +286,11 @@ struct mptcp_sock { + #define mptcp_for_each_subflow(__msk, __subflow) \ + list_for_each_entry(__subflow, &((__msk)->conn_list), node) + ++static inline void msk_owned_by_me(const struct mptcp_sock *msk) ++{ ++ sock_owned_by_me((const struct sock *)msk); ++} ++ + static inline struct mptcp_sock *mptcp_sk(const struct sock *sk) + { + return (struct mptcp_sock *)sk; +diff --git a/tools/testing/selftests/bpf/verifier/bounds_deduction.c b/tools/testing/selftests/bpf/verifier/bounds_deduction.c +index 1fd07a4f27ac2..c162498a64fc6 100644 +--- a/tools/testing/selftests/bpf/verifier/bounds_deduction.c ++++ b/tools/testing/selftests/bpf/verifier/bounds_deduction.c +@@ -6,8 +6,9 @@ + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .result = REJECT, ++ .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", + .errstr = "R0 tried to subtract pointer from scalar", ++ .result = REJECT, + }, + { + "check deducing bounds from const, 2", +@@ -20,6 +21,8 @@ + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), + BPF_EXIT_INSN(), + }, ++ .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types", ++ .result_unpriv = REJECT, + .result = ACCEPT, + .retval = 1, + }, +@@ -31,8 +34,9 @@ + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .result = REJECT, ++ .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", + .errstr = "R0 tried to subtract pointer from scalar", ++ .result = REJECT, + }, + { + "check deducing bounds from const, 4", +@@ -45,6 +49,8 @@ + BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), + BPF_EXIT_INSN(), + }, ++ .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types", ++ .result_unpriv = REJECT, + .result = ACCEPT, + }, + { +@@ -55,8 +61,9 @@ + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .result = REJECT, ++ .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", + .errstr = "R0 tried to subtract pointer from scalar", ++ .result = REJECT, + }, + { + "check deducing bounds from const, 6", +@@ -67,8 +74,9 @@ + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .result = REJECT, ++ .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", + .errstr = "R0 tried to subtract pointer from scalar", ++ .result = REJECT, + }, + { + "check deducing bounds from const, 7", +@@ -80,8 +88,9 @@ + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, +- .result = REJECT, ++ .errstr_unpriv = "R1 tried to sub from different maps, paths, or prohibited types", + .errstr = "dereference of modified ctx ptr", ++ .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, + }, + { +@@ -94,8 +103,9 @@ + offsetof(struct __sk_buff, mark)), + BPF_EXIT_INSN(), + }, +- .result = REJECT, ++ .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types", + .errstr = "dereference of modified ctx ptr", ++ .result = REJECT, + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, + }, + { +@@ -106,8 +116,9 @@ + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .result = REJECT, ++ .errstr_unpriv = "R0 tried to sub from different maps, paths, or prohibited types", + .errstr = "R0 tried to subtract pointer from scalar", ++ .result = REJECT, + }, + { + "check deducing bounds from const, 10", +@@ -119,6 +130,6 @@ + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_EXIT_INSN(), + }, +- .result = REJECT, + .errstr = "math between ctx pointer and register with unbounded min value is not allowed", ++ .result = REJECT, + }, +diff --git a/tools/testing/selftests/bpf/verifier/map_ptr.c b/tools/testing/selftests/bpf/verifier/map_ptr.c +index b117bdd3806d8..6f610cfddae53 100644 +--- a/tools/testing/selftests/bpf/verifier/map_ptr.c ++++ b/tools/testing/selftests/bpf/verifier/map_ptr.c +@@ -75,6 +75,8 @@ + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 4 }, ++ .result_unpriv = REJECT, ++ .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types", + .result = ACCEPT, + }, + { +@@ -91,5 +93,7 @@ + BPF_EXIT_INSN(), + }, + .fixup_map_hash_16b = { 4 }, ++ .result_unpriv = REJECT, ++ .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types", + .result = ACCEPT, + }, +diff --git a/tools/testing/selftests/bpf/verifier/unpriv.c b/tools/testing/selftests/bpf/verifier/unpriv.c +index a3fe0fbaed41a..2df9871b169d4 100644 +--- a/tools/testing/selftests/bpf/verifier/unpriv.c ++++ b/tools/testing/selftests/bpf/verifier/unpriv.c +@@ -496,7 +496,7 @@ + .result = ACCEPT, + }, + { +- "unpriv: adding of fp", ++ "unpriv: adding of fp, reg", + .insns = { + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_MOV64_IMM(BPF_REG_1, 0), +@@ -504,6 +504,19 @@ + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), + BPF_EXIT_INSN(), + }, ++ .errstr_unpriv = "R1 tried to add from different maps, paths, or prohibited types", ++ .result_unpriv = REJECT, ++ .result = ACCEPT, ++}, ++{ ++ "unpriv: adding of fp, imm", ++ .insns = { ++ BPF_MOV64_IMM(BPF_REG_0, 0), ++ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0), ++ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), ++ BPF_EXIT_INSN(), ++ }, + .errstr_unpriv = "R1 stack pointer arithmetic goes out of range", + .result_unpriv = REJECT, + .result = ACCEPT, +diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c +index ed4e76b246499..feb91266db39a 100644 +--- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c ++++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c +@@ -169,7 +169,7 @@ + .fixup_map_array_48b = { 1 }, + .result = ACCEPT, + .result_unpriv = REJECT, +- .errstr_unpriv = "R2 tried to add from different maps or paths", ++ .errstr_unpriv = "R2 tried to add from different maps, paths, or prohibited types", + .retval = 0, + }, + { +@@ -516,6 +516,27 @@ + .result = ACCEPT, + .retval = 0xabcdef12, + }, ++{ ++ "map access: value_ptr += N, value_ptr -= N known scalar", ++ .insns = { ++ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), ++ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), ++ BPF_LD_MAP_FD(BPF_REG_1, 0), ++ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), ++ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), ++ BPF_MOV32_IMM(BPF_REG_1, 0x12345678), ++ BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), ++ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2), ++ BPF_MOV64_IMM(BPF_REG_1, 2), ++ BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), ++ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0), ++ BPF_EXIT_INSN(), ++ }, ++ .fixup_map_array_48b = { 3 }, ++ .result = ACCEPT, ++ .retval = 0x12345678, ++}, + { + "map access: unknown scalar += value_ptr, 1", + .insns = { |