summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2020-04-13 08:21:08 -0400
committerMike Pagano <mpagano@gentoo.org>2020-04-13 08:21:08 -0400
commit323c1062fe0439f87dd5f634f23f2e360b8c4b03 (patch)
tree0fde40af362698d00fd607b17b0e09ccbc9f5dd9
parentBump ZSTD Patchset to V5 (diff)
downloadlinux-patches-5.6-7.tar.gz
linux-patches-5.6-7.tar.bz2
linux-patches-5.6-7.zip
Linux patch 5.6.45.6-7
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1003_linux-5.6.4.patch1810
2 files changed, 1814 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 458ce4b4..4f1ee49e 100644
--- a/0000_README
+++ b/0000_README
@@ -55,6 +55,10 @@ Patch: 1002_linux-5.6.3.patch
From: http://www.kernel.org
Desc: Linux 5.6.3
+Patch: 1003_linux-5.6.4.patch
+From: http://www.kernel.org
+Desc: Linux 5.6.4
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1003_linux-5.6.4.patch b/1003_linux-5.6.4.patch
new file mode 100644
index 00000000..9246d83a
--- /dev/null
+++ b/1003_linux-5.6.4.patch
@@ -0,0 +1,1810 @@
+diff --git a/Makefile b/Makefile
+index 41aafb394d25..0a7e41471838 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 5
+ PATCHLEVEL = 6
+-SUBLEVEL = 3
++SUBLEVEL = 4
+ EXTRAVERSION =
+ NAME = Kleptomaniac Octopus
+
+diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
+index 237ee0c4169f..612ed3c6d581 100644
+--- a/arch/s390/include/asm/lowcore.h
++++ b/arch/s390/include/asm/lowcore.h
+@@ -141,7 +141,9 @@ struct lowcore {
+
+ /* br %r1 trampoline */
+ __u16 br_r1_trampoline; /* 0x0400 */
+- __u8 pad_0x0402[0x0e00-0x0402]; /* 0x0402 */
++ __u32 return_lpswe; /* 0x0402 */
++ __u32 return_mcck_lpswe; /* 0x0406 */
++ __u8 pad_0x040a[0x0e00-0x040a]; /* 0x040a */
+
+ /*
+ * 0xe00 contains the address of the IPL Parameter Information
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index aadb3d0e2adc..8e7fb3954dc1 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -161,6 +161,7 @@ typedef struct thread_struct thread_struct;
+ #define INIT_THREAD { \
+ .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
+ .fpu.regs = (void *) init_task.thread.fpu.fprs, \
++ .last_break = 1, \
+ }
+
+ /*
+diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
+index b241ddb67caf..534f212753d6 100644
+--- a/arch/s390/include/asm/setup.h
++++ b/arch/s390/include/asm/setup.h
+@@ -8,6 +8,7 @@
+
+ #include <linux/bits.h>
+ #include <uapi/asm/setup.h>
++#include <linux/build_bug.h>
+
+ #define EP_OFFSET 0x10008
+ #define EP_STRING "S390EP"
+@@ -162,6 +163,12 @@ static inline unsigned long kaslr_offset(void)
+ return __kaslr_offset;
+ }
+
++static inline u32 gen_lpswe(unsigned long addr)
++{
++ BUILD_BUG_ON(addr > 0xfff);
++ return 0xb2b20000 | addr;
++}
++
+ #else /* __ASSEMBLY__ */
+
+ #define IPL_DEVICE (IPL_DEVICE_OFFSET)
+diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
+index ce33406cfe83..e80f0e6f5972 100644
+--- a/arch/s390/kernel/asm-offsets.c
++++ b/arch/s390/kernel/asm-offsets.c
+@@ -124,6 +124,8 @@ int main(void)
+ OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
+ OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
+ OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr);
++ OFFSET(__LC_RETURN_LPSWE, lowcore, return_lpswe);
++ OFFSET(__LC_RETURN_MCCK_LPSWE, lowcore, return_mcck_lpswe);
+ OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);
+ OFFSET(__LC_EXT_OLD_PSW, lowcore, external_old_psw);
+ OFFSET(__LC_SVC_OLD_PSW, lowcore, svc_old_psw);
+diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
+index 9205add8481d..3ae64914bd14 100644
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -115,26 +115,29 @@ _LPP_OFFSET = __LC_LPP
+
+ .macro SWITCH_ASYNC savearea,timer
+ tmhh %r8,0x0001 # interrupting from user ?
+- jnz 1f
++ jnz 2f
+ lgr %r14,%r9
++ cghi %r14,__LC_RETURN_LPSWE
++ je 0f
+ slg %r14,BASED(.Lcritical_start)
+ clg %r14,BASED(.Lcritical_length)
+- jhe 0f
++ jhe 1f
++0:
+ lghi %r11,\savearea # inside critical section, do cleanup
+ brasl %r14,cleanup_critical
+ tmhh %r8,0x0001 # retest problem state after cleanup
+- jnz 1f
+-0: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
++ jnz 2f
++1: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
+ slgr %r14,%r15
+ srag %r14,%r14,STACK_SHIFT
+- jnz 2f
++ jnz 3f
+ CHECK_STACK \savearea
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+- j 3f
+-1: UPDATE_VTIME %r14,%r15,\timer
++ j 4f
++2: UPDATE_VTIME %r14,%r15,\timer
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+-2: lg %r15,__LC_ASYNC_STACK # load async stack
+-3: la %r11,STACK_FRAME_OVERHEAD(%r15)
++3: lg %r15,__LC_ASYNC_STACK # load async stack
++4: la %r11,STACK_FRAME_OVERHEAD(%r15)
+ .endm
+
+ .macro UPDATE_VTIME w1,w2,enter_timer
+@@ -401,7 +404,7 @@ ENTRY(system_call)
+ stpt __LC_EXIT_TIMER
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ lmg %r11,%r15,__PT_R11(%r11)
+- lpswe __LC_RETURN_PSW
++ b __LC_RETURN_LPSWE(%r0)
+ .Lsysc_done:
+
+ #
+@@ -608,43 +611,50 @@ ENTRY(pgm_check_handler)
+ BPOFF
+ stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ lg %r10,__LC_LAST_BREAK
+- lg %r12,__LC_CURRENT
++ srag %r11,%r10,12
++ jnz 0f
++ /* if __LC_LAST_BREAK is < 4096, it contains one of
++ * the lpswe addresses in lowcore. Set it to 1 (initial state)
++ * to prevent leaking that address to userspace.
++ */
++ lghi %r10,1
++0: lg %r12,__LC_CURRENT
+ lghi %r11,0
+ larl %r13,cleanup_critical
+ lmg %r8,%r9,__LC_PGM_OLD_PSW
+ tmhh %r8,0x0001 # test problem state bit
+- jnz 2f # -> fault in user space
++ jnz 3f # -> fault in user space
+ #if IS_ENABLED(CONFIG_KVM)
+ # cleanup critical section for program checks in sie64a
+ lgr %r14,%r9
+ slg %r14,BASED(.Lsie_critical_start)
+ clg %r14,BASED(.Lsie_critical_length)
+- jhe 0f
++ jhe 1f
+ lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
+ ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ larl %r9,sie_exit # skip forward to sie_exit
+ lghi %r11,_PIF_GUEST_FAULT
+ #endif
+-0: tmhh %r8,0x4000 # PER bit set in old PSW ?
+- jnz 1f # -> enabled, can't be a double fault
++1: tmhh %r8,0x4000 # PER bit set in old PSW ?
++ jnz 2f # -> enabled, can't be a double fault
+ tm __LC_PGM_ILC+3,0x80 # check for per exception
+ jnz .Lpgm_svcper # -> single stepped svc
+-1: CHECK_STACK __LC_SAVE_AREA_SYNC
++2: CHECK_STACK __LC_SAVE_AREA_SYNC
+ aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
+- # CHECK_VMAP_STACK branches to stack_overflow or 4f
+- CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
+-2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
++ # CHECK_VMAP_STACK branches to stack_overflow or 5f
++ CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f
++3: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ lg %r15,__LC_KERNEL_STACK
+ lgr %r14,%r12
+ aghi %r14,__TASK_thread # pointer to thread_struct
+ lghi %r13,__LC_PGM_TDB
+ tm __LC_PGM_ILC+2,0x02 # check for transaction abort
+- jz 3f
++ jz 4f
+ mvc __THREAD_trap_tdb(256,%r14),0(%r13)
+-3: stg %r10,__THREAD_last_break(%r14)
+-4: lgr %r13,%r11
++4: stg %r10,__THREAD_last_break(%r14)
++5: lgr %r13,%r11
+ la %r11,STACK_FRAME_OVERHEAD(%r15)
+ stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+@@ -663,14 +673,14 @@ ENTRY(pgm_check_handler)
+ stg %r13,__PT_FLAGS(%r11)
+ stg %r10,__PT_ARGS(%r11)
+ tm __LC_PGM_ILC+3,0x80 # check for per exception
+- jz 5f
++ jz 6f
+ tmhh %r8,0x0001 # kernel per event ?
+ jz .Lpgm_kprobe
+ oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
+ mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
+ mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
+ mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
+-5: REENABLE_IRQS
++6: REENABLE_IRQS
+ xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
+ larl %r1,pgm_check_table
+ llgh %r10,__PT_INT_CODE+2(%r11)
+@@ -775,7 +785,7 @@ ENTRY(io_int_handler)
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ .Lio_exit_kernel:
+ lmg %r11,%r15,__PT_R11(%r11)
+- lpswe __LC_RETURN_PSW
++ b __LC_RETURN_LPSWE(%r0)
+ .Lio_done:
+
+ #
+@@ -1214,7 +1224,7 @@ ENTRY(mcck_int_handler)
+ stpt __LC_EXIT_TIMER
+ mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+ 0: lmg %r11,%r15,__PT_R11(%r11)
+- lpswe __LC_RETURN_MCCK_PSW
++ b __LC_RETURN_MCCK_LPSWE
+
+ .Lmcck_panic:
+ lg %r15,__LC_NODAT_STACK
+@@ -1271,6 +1281,8 @@ ENDPROC(stack_overflow)
+ #endif
+
+ ENTRY(cleanup_critical)
++ cghi %r9,__LC_RETURN_LPSWE
++ je .Lcleanup_lpswe
+ #if IS_ENABLED(CONFIG_KVM)
+ clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
+ jl 0f
+@@ -1424,6 +1436,7 @@ ENDPROC(cleanup_critical)
+ mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
+ mvc 0(64,%r11),__PT_R8(%r9)
+ lmg %r0,%r7,__PT_R0(%r9)
++.Lcleanup_lpswe:
+ 1: lmg %r8,%r9,__LC_RETURN_PSW
+ BR_EX %r14,%r11
+ .Lcleanup_sysc_restore_insn:
+diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
+index 6ccef5f29761..eb6e23ad15a2 100644
+--- a/arch/s390/kernel/process.c
++++ b/arch/s390/kernel/process.c
+@@ -106,6 +106,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
+ p->thread.system_timer = 0;
+ p->thread.hardirq_timer = 0;
+ p->thread.softirq_timer = 0;
++ p->thread.last_break = 1;
+
+ frame->sf.back_chain = 0;
+ /* new return point is ret_from_fork */
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index b2c2f75860e8..6f8efeaf220d 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -73,6 +73,7 @@
+ #include <asm/nospec-branch.h>
+ #include <asm/mem_detect.h>
+ #include <asm/uv.h>
++#include <asm/asm-offsets.h>
+ #include "entry.h"
+
+ /*
+@@ -450,6 +451,8 @@ static void __init setup_lowcore_dat_off(void)
+ lc->spinlock_index = 0;
+ arch_spin_lock_setup(0);
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
++ lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
++ lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
+
+ set_prefix((u32)(unsigned long) lc);
+ lowcore_ptr[0] = lc;
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index a08bd2522dd9..f87d4e14269c 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -212,6 +212,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
+ lc->spinlock_lockval = arch_spin_lockval(cpu);
+ lc->spinlock_index = 0;
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
++ lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
++ lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
+ if (nmi_alloc_per_cpu(lc))
+ goto out_async;
+ if (vdso_alloc_per_cpu(lc))
+diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
+index b403fa14847d..f810930aff42 100644
+--- a/arch/s390/mm/vmem.c
++++ b/arch/s390/mm/vmem.c
+@@ -415,6 +415,10 @@ void __init vmem_map_init(void)
+ SET_MEMORY_RO | SET_MEMORY_X);
+ __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
+ SET_MEMORY_RO | SET_MEMORY_X);
++
++ /* we need lowcore executable for our LPSWE instructions */
++ set_memory_x(0, 1);
++
+ pr_info("Write protected kernel read-only data: %luk\n",
+ (unsigned long)(__end_rodata - _stext) >> 10);
+ }
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index d92088dec6c3..d4bd9b961726 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -3023,6 +3023,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
+
+ static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
+ {
++ /*
++ * blk_mq_map_queues() and multiple .map_queues() implementations
++ * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
++ * number of hardware queues.
++ */
++ if (set->nr_maps == 1)
++ set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
++
+ if (set->ops->map_queues && !is_kdump_kernel()) {
+ int i;
+
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index e5f95922bc21..ce49cbfa941b 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -1012,6 +1012,10 @@ static bool acpi_s2idle_wake(void)
+ if (acpi_any_fixed_event_status_set())
+ return true;
+
++ /* Check wakeups from drivers sharing the SCI. */
++ if (acpi_check_wakeup_handlers())
++ return true;
++
+ /*
+ * If there are no EC events to process and at least one of the
+ * other enabled GPEs is active, the wakeup is regarded as a
+diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
+index 41675d24a9bc..3d90480ce1b1 100644
+--- a/drivers/acpi/sleep.h
++++ b/drivers/acpi/sleep.h
+@@ -2,6 +2,7 @@
+
+ extern void acpi_enable_wakeup_devices(u8 sleep_state);
+ extern void acpi_disable_wakeup_devices(u8 sleep_state);
++extern bool acpi_check_wakeup_handlers(void);
+
+ extern struct list_head acpi_wakeup_device_list;
+ extern struct mutex acpi_device_lock;
+diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
+index 9614126bf56e..90c40f992e13 100644
+--- a/drivers/acpi/wakeup.c
++++ b/drivers/acpi/wakeup.c
+@@ -12,6 +12,15 @@
+ #include "internal.h"
+ #include "sleep.h"
+
++struct acpi_wakeup_handler {
++ struct list_head list_node;
++ bool (*wakeup)(void *context);
++ void *context;
++};
++
++static LIST_HEAD(acpi_wakeup_handler_head);
++static DEFINE_MUTEX(acpi_wakeup_handler_mutex);
++
+ /*
+ * We didn't lock acpi_device_lock in the file, because it invokes oops in
+ * suspend/resume and isn't really required as this is called in S-state. At
+@@ -96,3 +105,75 @@ int __init acpi_wakeup_device_init(void)
+ mutex_unlock(&acpi_device_lock);
+ return 0;
+ }
++
++/**
++ * acpi_register_wakeup_handler - Register wakeup handler
++ * @wake_irq: The IRQ through which the device may receive wakeups
++ * @wakeup: Wakeup-handler to call when the SCI has triggered a wakeup
++ * @context: Context to pass to the handler when calling it
++ *
++ * Drivers which may share an IRQ with the SCI can use this to register
++ * a handler which returns true when the device they are managing wants
++ * to trigger a wakeup.
++ */
++int acpi_register_wakeup_handler(int wake_irq, bool (*wakeup)(void *context),
++ void *context)
++{
++ struct acpi_wakeup_handler *handler;
++
++ /*
++ * If the device is not sharing its IRQ with the SCI, there is no
++ * need to register the handler.
++ */
++ if (!acpi_sci_irq_valid() || wake_irq != acpi_sci_irq)
++ return 0;
++
++ handler = kmalloc(sizeof(*handler), GFP_KERNEL);
++ if (!handler)
++ return -ENOMEM;
++
++ handler->wakeup = wakeup;
++ handler->context = context;
++
++ mutex_lock(&acpi_wakeup_handler_mutex);
++ list_add(&handler->list_node, &acpi_wakeup_handler_head);
++ mutex_unlock(&acpi_wakeup_handler_mutex);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(acpi_register_wakeup_handler);
++
++/**
++ * acpi_unregister_wakeup_handler - Unregister wakeup handler
++ * @wakeup: Wakeup-handler passed to acpi_register_wakeup_handler()
++ * @context: Context passed to acpi_register_wakeup_handler()
++ */
++void acpi_unregister_wakeup_handler(bool (*wakeup)(void *context),
++ void *context)
++{
++ struct acpi_wakeup_handler *handler;
++
++ mutex_lock(&acpi_wakeup_handler_mutex);
++ list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) {
++ if (handler->wakeup == wakeup && handler->context == context) {
++ list_del(&handler->list_node);
++ kfree(handler);
++ break;
++ }
++ }
++ mutex_unlock(&acpi_wakeup_handler_mutex);
++}
++EXPORT_SYMBOL_GPL(acpi_unregister_wakeup_handler);
++
++bool acpi_check_wakeup_handlers(void)
++{
++ struct acpi_wakeup_handler *handler;
++
++ /* No need to lock, nothing else is running when we're called. */
++ list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) {
++ if (handler->wakeup(handler->context))
++ return true;
++ }
++
++ return false;
++}
+diff --git a/drivers/base/core.c b/drivers/base/core.c
+index dbb0f9130f42..d32a3aefff32 100644
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -523,9 +523,13 @@ static void device_link_add_missing_supplier_links(void)
+
+ mutex_lock(&wfs_lock);
+ list_for_each_entry_safe(dev, tmp, &wait_for_suppliers,
+- links.needs_suppliers)
+- if (!fwnode_call_int_op(dev->fwnode, add_links, dev))
++ links.needs_suppliers) {
++ int ret = fwnode_call_int_op(dev->fwnode, add_links, dev);
++ if (!ret)
+ list_del_init(&dev->links.needs_suppliers);
++ else if (ret != -ENODEV)
++ dev->links.need_for_probe = false;
++ }
+ mutex_unlock(&wfs_lock);
+ }
+
+diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
+index 30cf00f8e9a0..0576801944fd 100644
+--- a/drivers/char/hw_random/imx-rngc.c
++++ b/drivers/char/hw_random/imx-rngc.c
+@@ -105,8 +105,10 @@ static int imx_rngc_self_test(struct imx_rngc *rngc)
+ return -ETIMEDOUT;
+ }
+
+- if (rngc->err_reg != 0)
++ if (rngc->err_reg != 0) {
++ imx_rngc_irq_mask_clear(rngc);
+ return -EIO;
++ }
+
+ return 0;
+ }
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index c7f9584de2c8..a6b77a850ddd 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -2149,11 +2149,11 @@ struct batched_entropy {
+
+ /*
+ * Get a random word for internal kernel use only. The quality of the random
+- * number is either as good as RDRAND or as good as /dev/urandom, with the
+- * goal of being quite fast and not depleting entropy. In order to ensure
++ * number is good as /dev/urandom, but there is no backtrack protection, with
++ * the goal of being quite fast and not depleting entropy. In order to ensure
+ * that the randomness provided by this function is okay, the function
+- * wait_for_random_bytes() should be called and return 0 at least once
+- * at any point prior.
++ * wait_for_random_bytes() should be called and return 0 at least once at any
++ * point prior.
+ */
+ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
+@@ -2166,15 +2166,6 @@ u64 get_random_u64(void)
+ struct batched_entropy *batch;
+ static void *previous;
+
+-#if BITS_PER_LONG == 64
+- if (arch_get_random_long((unsigned long *)&ret))
+- return ret;
+-#else
+- if (arch_get_random_long((unsigned long *)&ret) &&
+- arch_get_random_long((unsigned long *)&ret + 1))
+- return ret;
+-#endif
+-
+ warn_unseeded_randomness(&previous);
+
+ batch = raw_cpu_ptr(&batched_entropy_u64);
+@@ -2199,9 +2190,6 @@ u32 get_random_u32(void)
+ struct batched_entropy *batch;
+ static void *previous;
+
+- if (arch_get_random_int(&ret))
+- return ret;
+-
+ warn_unseeded_randomness(&previous);
+
+ batch = raw_cpu_ptr(&batched_entropy_u32);
+diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
+index 2dec3a02ab9f..ff972cf30712 100644
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -2968,6 +2968,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
+ err2:
+ kfree(route->path_rec);
+ route->path_rec = NULL;
++ route->num_paths = 0;
+ err1:
+ kfree(work);
+ return ret;
+@@ -4790,6 +4791,19 @@ static int __init cma_init(void)
+ {
+ int ret;
+
++ /*
++ * There is a rare lock ordering dependency in cma_netdev_callback()
++ * that only happens when bonding is enabled. Teach lockdep that rtnl
++ * must never be nested under lock so it can find these without having
++ * to test with bonding.
++ */
++ if (IS_ENABLED(CONFIG_LOCKDEP)) {
++ rtnl_lock();
++ mutex_lock(&lock);
++ mutex_unlock(&lock);
++ rtnl_unlock();
++ }
++
+ cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
+ if (!cma_wq)
+ return -ENOMEM;
+diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
+index 0274e9b704be..f4f79f1292b9 100644
+--- a/drivers/infiniband/core/ucma.c
++++ b/drivers/infiniband/core/ucma.c
+@@ -91,6 +91,7 @@ struct ucma_context {
+
+ struct ucma_file *file;
+ struct rdma_cm_id *cm_id;
++ struct mutex mutex;
+ u64 uid;
+
+ struct list_head list;
+@@ -216,6 +217,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
+ init_completion(&ctx->comp);
+ INIT_LIST_HEAD(&ctx->mc_list);
+ ctx->file = file;
++ mutex_init(&ctx->mutex);
+
+ if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
+ goto error;
+@@ -589,6 +591,7 @@ static int ucma_free_ctx(struct ucma_context *ctx)
+ }
+
+ events_reported = ctx->events_reported;
++ mutex_destroy(&ctx->mutex);
+ kfree(ctx);
+ return events_reported;
+ }
+@@ -658,7 +661,10 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
++ mutex_unlock(&ctx->mutex);
++
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -681,7 +687,9 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -705,8 +713,10 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+ (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -731,8 +741,10 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
+ (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -752,7 +764,9 @@ static ssize_t ucma_resolve_route(struct ucma_file *file,
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -841,6 +855,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ memset(&resp, 0, sizeof resp);
+ addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
+ memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
+@@ -864,6 +879,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
+ ucma_copy_iw_route(&resp, &ctx->cm_id->route);
+
+ out:
++ mutex_unlock(&ctx->mutex);
+ if (copy_to_user(u64_to_user_ptr(cmd.response),
+ &resp, sizeof(resp)))
+ ret = -EFAULT;
+@@ -1014,6 +1030,7 @@ static ssize_t ucma_query(struct ucma_file *file,
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ switch (cmd.option) {
+ case RDMA_USER_CM_QUERY_ADDR:
+ ret = ucma_query_addr(ctx, response, out_len);
+@@ -1028,6 +1045,7 @@ static ssize_t ucma_query(struct ucma_file *file,
+ ret = -ENOSYS;
+ break;
+ }
++ mutex_unlock(&ctx->mutex);
+
+ ucma_put_ctx(ctx);
+ return ret;
+@@ -1068,7 +1086,9 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
+ return PTR_ERR(ctx);
+
+ ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
++ mutex_lock(&ctx->mutex);
+ ret = rdma_connect(ctx->cm_id, &conn_param);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -1089,7 +1109,9 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
+
+ ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
+ cmd.backlog : max_backlog;
++ mutex_lock(&ctx->mutex);
+ ret = rdma_listen(ctx->cm_id, ctx->backlog);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -1112,13 +1134,17 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
+ if (cmd.conn_param.valid) {
+ ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
+ mutex_lock(&file->mut);
++ mutex_lock(&ctx->mutex);
+ ret = __rdma_accept(ctx->cm_id, &conn_param, NULL);
++ mutex_unlock(&ctx->mutex);
+ if (!ret)
+ ctx->uid = cmd.uid;
+ mutex_unlock(&file->mut);
+- } else
++ } else {
++ mutex_lock(&ctx->mutex);
+ ret = __rdma_accept(ctx->cm_id, NULL, NULL);
+-
++ mutex_unlock(&ctx->mutex);
++ }
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -1137,7 +1163,9 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -1156,7 +1184,9 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ ret = rdma_disconnect(ctx->cm_id);
++ mutex_unlock(&ctx->mutex);
+ ucma_put_ctx(ctx);
+ return ret;
+ }
+@@ -1187,7 +1217,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
+ resp.qp_attr_mask = 0;
+ memset(&qp_attr, 0, sizeof qp_attr);
+ qp_attr.qp_state = cmd.qp_state;
++ mutex_lock(&ctx->mutex);
+ ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
++ mutex_unlock(&ctx->mutex);
+ if (ret)
+ goto out;
+
+@@ -1273,9 +1305,13 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
+ struct sa_path_rec opa;
+
+ sa_convert_path_ib_to_opa(&opa, &sa_path);
++ mutex_lock(&ctx->mutex);
+ ret = rdma_set_ib_path(ctx->cm_id, &opa);
++ mutex_unlock(&ctx->mutex);
+ } else {
++ mutex_lock(&ctx->mutex);
+ ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
++ mutex_unlock(&ctx->mutex);
+ }
+ if (ret)
+ return ret;
+@@ -1308,7 +1344,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level,
+
+ switch (level) {
+ case RDMA_OPTION_ID:
++ mutex_lock(&ctx->mutex);
+ ret = ucma_set_option_id(ctx, optname, optval, optlen);
++ mutex_unlock(&ctx->mutex);
+ break;
+ case RDMA_OPTION_IB:
+ ret = ucma_set_option_ib(ctx, optname, optval, optlen);
+@@ -1368,8 +1406,10 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
++ mutex_lock(&ctx->mutex);
+ if (ctx->cm_id->device)
+ ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
++ mutex_unlock(&ctx->mutex);
+
+ ucma_put_ctx(ctx);
+ return ret;
+@@ -1412,8 +1452,10 @@ static ssize_t ucma_process_join(struct ucma_file *file,
+ mc->join_state = join_state;
+ mc->uid = cmd->uid;
+ memcpy(&mc->addr, addr, cmd->addr_size);
++ mutex_lock(&ctx->mutex);
+ ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
+ join_state, mc);
++ mutex_unlock(&ctx->mutex);
+ if (ret)
+ goto err2;
+
+@@ -1513,7 +1555,10 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
+ goto out;
+ }
+
++ mutex_lock(&mc->ctx->mutex);
+ rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
++ mutex_unlock(&mc->ctx->mutex);
++
+ mutex_lock(&mc->ctx->file->mut);
+ ucma_cleanup_mc_events(mc);
+ list_del(&mc->list);
+diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
+index 90f62c4bddba..074ec71772d2 100644
+--- a/drivers/infiniband/hw/hfi1/sysfs.c
++++ b/drivers/infiniband/hw/hfi1/sysfs.c
+@@ -674,7 +674,11 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
+ dd_dev_err(dd,
+ "Skipping sc2vl sysfs info, (err %d) port %u\n",
+ ret, port_num);
+- goto bail;
++ /*
++ * Based on the documentation for kobject_init_and_add(), the
++ * caller should call kobject_put even if this call fails.
++ */
++ goto bail_sc2vl;
+ }
+ kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD);
+
+@@ -684,7 +688,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
+ dd_dev_err(dd,
+ "Skipping sl2sc sysfs info, (err %d) port %u\n",
+ ret, port_num);
+- goto bail_sc2vl;
++ goto bail_sl2sc;
+ }
+ kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD);
+
+@@ -694,7 +698,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
+ dd_dev_err(dd,
+ "Skipping vl2mtu sysfs info, (err %d) port %u\n",
+ ret, port_num);
+- goto bail_sl2sc;
++ goto bail_vl2mtu;
+ }
+ kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD);
+
+@@ -704,7 +708,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
+ dd_dev_err(dd,
+ "Skipping Congestion Control sysfs info, (err %d) port %u\n",
+ ret, port_num);
+- goto bail_vl2mtu;
++ goto bail_cc;
+ }
+
+ kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
+@@ -742,7 +746,6 @@ bail_sl2sc:
+ kobject_put(&ppd->sl2sc_kobj);
+ bail_sc2vl:
+ kobject_put(&ppd->sc2vl_kobj);
+-bail:
+ return ret;
+ }
+
+@@ -853,8 +856,13 @@ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
+
+ return 0;
+ bail:
+- for (i = 0; i < dd->num_sdma; i++)
+- kobject_del(&dd->per_sdma[i].kobj);
++ /*
++ * The function kobject_put() will call kobject_del() if the kobject
++ * has been added successfully. The sysfs files created under the
++ * kobject directory will also be removed during the process.
++ */
++ for (; i >= 0; i--)
++ kobject_put(&dd->per_sdma[i].kobj);
+
+ return ret;
+ }
+@@ -867,6 +875,10 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd)
+ struct hfi1_pportdata *ppd;
+ int i;
+
++ /* Unwind operations in hfi1_verbs_register_sysfs() */
++ for (i = 0; i < dd->num_sdma; i++)
++ kobject_put(&dd->per_sdma[i].kobj);
++
+ for (i = 0; i < dd->num_pports; i++) {
+ ppd = &dd->pport[i];
+
+diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
+index ffa7c2100edb..1279aeabf651 100644
+--- a/drivers/infiniband/hw/mlx5/main.c
++++ b/drivers/infiniband/hw/mlx5/main.c
+@@ -1192,12 +1192,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
+ if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_GRE;
+- if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
+- MLX5_FLEX_PROTO_CW_MPLS_GRE)
++ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
+- if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
+- MLX5_FLEX_PROTO_CW_MPLS_UDP)
++ if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
+ resp.tunnel_offloads_caps |=
+ MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
+ }
+diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
+index c5651a96b196..559e5fd3bad8 100644
+--- a/drivers/infiniband/sw/siw/siw_cm.c
++++ b/drivers/infiniband/sw/siw/siw_cm.c
+@@ -1769,14 +1769,23 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
+ return 0;
+ }
+
+-static int siw_listen_address(struct iw_cm_id *id, int backlog,
+- struct sockaddr *laddr, int addr_family)
++/*
++ * siw_create_listen - Create resources for a listener's IWCM ID @id
++ *
++ * Starts listen on the socket address id->local_addr.
++ *
++ */
++int siw_create_listen(struct iw_cm_id *id, int backlog)
+ {
+ struct socket *s;
+ struct siw_cep *cep = NULL;
+ struct siw_device *sdev = to_siw_dev(id->device);
++ int addr_family = id->local_addr.ss_family;
+ int rv = 0, s_val;
+
++ if (addr_family != AF_INET && addr_family != AF_INET6)
++ return -EAFNOSUPPORT;
++
+ rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
+ if (rv < 0)
+ return rv;
+@@ -1791,9 +1800,25 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
+ siw_dbg(id->device, "setsockopt error: %d\n", rv);
+ goto error;
+ }
+- rv = s->ops->bind(s, laddr, addr_family == AF_INET ?
+- sizeof(struct sockaddr_in) :
+- sizeof(struct sockaddr_in6));
++ if (addr_family == AF_INET) {
++ struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
++
++ /* For wildcard addr, limit binding to current device only */
++ if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
++ s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
++
++ rv = s->ops->bind(s, (struct sockaddr *)laddr,
++ sizeof(struct sockaddr_in));
++ } else {
++ struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr);
++
++ /* For wildcard addr, limit binding to current device only */
++ if (ipv6_addr_any(&laddr->sin6_addr))
++ s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
++
++ rv = s->ops->bind(s, (struct sockaddr *)laddr,
++ sizeof(struct sockaddr_in6));
++ }
+ if (rv) {
+ siw_dbg(id->device, "socket bind error: %d\n", rv);
+ goto error;
+@@ -1852,7 +1877,7 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
+ list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
+ cep->state = SIW_EPSTATE_LISTENING;
+
+- siw_dbg(id->device, "Listen at laddr %pISp\n", laddr);
++ siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
+
+ return 0;
+
+@@ -1910,106 +1935,6 @@ static void siw_drop_listeners(struct iw_cm_id *id)
+ }
+ }
+
+-/*
+- * siw_create_listen - Create resources for a listener's IWCM ID @id
+- *
+- * Listens on the socket address id->local_addr.
+- *
+- * If the listener's @id provides a specific local IP address, at most one
+- * listening socket is created and associated with @id.
+- *
+- * If the listener's @id provides the wildcard (zero) local IP address,
+- * a separate listen is performed for each local IP address of the device
+- * by creating a listening socket and binding to that local IP address.
+- *
+- */
+-int siw_create_listen(struct iw_cm_id *id, int backlog)
+-{
+- struct net_device *dev = to_siw_dev(id->device)->netdev;
+- int rv = 0, listeners = 0;
+-
+- siw_dbg(id->device, "backlog %d\n", backlog);
+-
+- /*
+- * For each attached address of the interface, create a
+- * listening socket, if id->local_addr is the wildcard
+- * IP address or matches the IP address.
+- */
+- if (id->local_addr.ss_family == AF_INET) {
+- struct in_device *in_dev = in_dev_get(dev);
+- struct sockaddr_in s_laddr;
+- const struct in_ifaddr *ifa;
+-
+- if (!in_dev) {
+- rv = -ENODEV;
+- goto out;
+- }
+- memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
+-
+- siw_dbg(id->device, "laddr %pISp\n", &s_laddr);
+-
+- rtnl_lock();
+- in_dev_for_each_ifa_rtnl(ifa, in_dev) {
+- if (ipv4_is_zeronet(s_laddr.sin_addr.s_addr) ||
+- s_laddr.sin_addr.s_addr == ifa->ifa_address) {
+- s_laddr.sin_addr.s_addr = ifa->ifa_address;
+-
+- rv = siw_listen_address(id, backlog,
+- (struct sockaddr *)&s_laddr,
+- AF_INET);
+- if (!rv)
+- listeners++;
+- }
+- }
+- rtnl_unlock();
+- in_dev_put(in_dev);
+- } else if (id->local_addr.ss_family == AF_INET6) {
+- struct inet6_dev *in6_dev = in6_dev_get(dev);
+- struct inet6_ifaddr *ifp;
+- struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr);
+-
+- if (!in6_dev) {
+- rv = -ENODEV;
+- goto out;
+- }
+- siw_dbg(id->device, "laddr %pISp\n", &s_laddr);
+-
+- rtnl_lock();
+- list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
+- if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
+- continue;
+- if (ipv6_addr_any(&s_laddr->sin6_addr) ||
+- ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) {
+- struct sockaddr_in6 bind_addr = {
+- .sin6_family = AF_INET6,
+- .sin6_port = s_laddr->sin6_port,
+- .sin6_flowinfo = 0,
+- .sin6_addr = ifp->addr,
+- .sin6_scope_id = dev->ifindex };
+-
+- rv = siw_listen_address(id, backlog,
+- (struct sockaddr *)&bind_addr,
+- AF_INET6);
+- if (!rv)
+- listeners++;
+- }
+- }
+- rtnl_unlock();
+- in6_dev_put(in6_dev);
+- } else {
+- rv = -EAFNOSUPPORT;
+- }
+-out:
+- if (listeners)
+- rv = 0;
+- else if (!rv)
+- rv = -EINVAL;
+-
+- siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK");
+-
+- return rv;
+-}
+-
+ int siw_destroy_listen(struct iw_cm_id *id)
+ {
+ if (!id->provider_data) {
+diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
+index 426820ab9afe..b486250923c5 100644
+--- a/drivers/mtd/ubi/fastmap-wl.c
++++ b/drivers/mtd/ubi/fastmap-wl.c
+@@ -39,6 +39,13 @@ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
+ return victim;
+ }
+
++static inline void return_unused_peb(struct ubi_device *ubi,
++ struct ubi_wl_entry *e)
++{
++ wl_tree_add(e, &ubi->free);
++ ubi->free_count++;
++}
++
+ /**
+ * return_unused_pool_pebs - returns unused PEB to the free tree.
+ * @ubi: UBI device description object
+@@ -52,8 +59,7 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
+
+ for (i = pool->used; i < pool->size; i++) {
+ e = ubi->lookuptbl[pool->pebs[i]];
+- wl_tree_add(e, &ubi->free);
+- ubi->free_count++;
++ return_unused_peb(ubi, e);
+ }
+ }
+
+@@ -361,6 +367,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
+ return_unused_pool_pebs(ubi, &ubi->fm_pool);
+ return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+
++ if (ubi->fm_anchor) {
++ return_unused_peb(ubi, ubi->fm_anchor);
++ ubi->fm_anchor = NULL;
++ }
++
+ if (ubi->fm) {
+ for (i = 0; i < ubi->fm->used_blocks; i++)
+ kfree(ubi->fm->e[i]);
+diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
+index a3664281a33f..4dfa459ef5c7 100644
+--- a/drivers/net/can/slcan.c
++++ b/drivers/net/can/slcan.c
+@@ -148,7 +148,7 @@ static void slc_bump(struct slcan *sl)
+ u32 tmpid;
+ char *cmd = sl->rbuff;
+
+- cf.can_id = 0;
++ memset(&cf, 0, sizeof(cf));
+
+ switch (*cmd) {
+ case 'r':
+@@ -187,8 +187,6 @@ static void slc_bump(struct slcan *sl)
+ else
+ return;
+
+- *(u64 *) (&cf.data) = 0; /* clear payload */
+-
+ /* RTR frames may have a dlc > 0 but they never have any data bytes */
+ if (!(cf.can_id & CAN_RTR_FLAG)) {
+ for (i = 0; i < cf.can_dlc; i++) {
+diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
+index b0f5280a83cb..e93c81c4062e 100644
+--- a/drivers/net/dsa/bcm_sf2.c
++++ b/drivers/net/dsa/bcm_sf2.c
+@@ -472,7 +472,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
+ priv->slave_mii_bus->parent = ds->dev->parent;
+ priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
+
+- err = of_mdiobus_register(priv->slave_mii_bus, dn);
++ err = mdiobus_register(priv->slave_mii_bus);
+ if (err && dn)
+ of_node_put(dn);
+
+@@ -1069,6 +1069,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
+ const struct bcm_sf2_of_data *data;
+ struct b53_platform_data *pdata;
+ struct dsa_switch_ops *ops;
++ struct device_node *ports;
+ struct bcm_sf2_priv *priv;
+ struct b53_device *dev;
+ struct dsa_switch *ds;
+@@ -1136,7 +1137,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
+ set_bit(0, priv->cfp.used);
+ set_bit(0, priv->cfp.unique);
+
+- bcm_sf2_identify_ports(priv, dn->child);
++ ports = of_find_node_by_name(dn, "ports");
++ if (ports) {
++ bcm_sf2_identify_ports(priv, ports);
++ of_node_put(ports);
++ }
+
+ priv->irq0 = irq_of_parse_and_map(dn, 0);
+ priv->irq1 = irq_of_parse_and_map(dn, 1);
+diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
+index 7cbd1bd4c5a6..9b0de2852c69 100644
+--- a/drivers/net/dsa/mt7530.c
++++ b/drivers/net/dsa/mt7530.c
+@@ -1356,6 +1356,9 @@ mt7530_setup(struct dsa_switch *ds)
+ continue;
+
+ phy_node = of_parse_phandle(mac_np, "phy-handle", 0);
++ if (!phy_node)
++ continue;
++
+ if (phy_node->parent == priv->dev->of_node->parent) {
+ ret = of_get_phy_mode(mac_np, &interface);
+ if (ret && ret != -ENODEV)
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+index 97f90edbc068..b0bdf7233f0c 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+@@ -3138,7 +3138,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
+ return ret;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+- pi->xact_addr_filt = ret;
+ return 0;
+ }
+
+@@ -6682,6 +6681,10 @@ static void shutdown_one(struct pci_dev *pdev)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ cxgb_close(adapter->port[i]);
+
++ rtnl_lock();
++ cxgb4_mqprio_stop_offload(adapter);
++ rtnl_unlock();
++
+ if (is_uld(adapter)) {
+ detach_ulds(adapter);
+ t4_uld_clean_up(adapter);
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+index ec3eb45ee3b4..e6af4906d674 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+@@ -301,6 +301,7 @@ static void cxgb4_mqprio_free_hw_resources(struct net_device *dev)
+ cxgb4_clear_msix_aff(eorxq->msix->vec,
+ eorxq->msix->aff_mask);
+ free_irq(eorxq->msix->vec, &eorxq->rspq);
++ cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
+ }
+
+ free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
+@@ -611,6 +612,28 @@ out:
+ return ret;
+ }
+
++void cxgb4_mqprio_stop_offload(struct adapter *adap)
++{
++ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
++ struct net_device *dev;
++ u8 i;
++
++ if (!adap->tc_mqprio || !adap->tc_mqprio->port_mqprio)
++ return;
++
++ for_each_port(adap, i) {
++ dev = adap->port[i];
++ if (!dev)
++ continue;
++
++ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[i];
++ if (!tc_port_mqprio->mqprio.qopt.num_tc)
++ continue;
++
++ cxgb4_mqprio_disable_offload(dev);
++ }
++}
++
+ int cxgb4_init_tc_mqprio(struct adapter *adap)
+ {
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio, *port_mqprio;
+diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
+index c532f1ef8451..ff8794132b22 100644
+--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
++++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
+@@ -38,6 +38,7 @@ struct cxgb4_tc_mqprio {
+
+ int cxgb4_setup_tc_mqprio(struct net_device *dev,
+ struct tc_mqprio_qopt_offload *mqprio);
++void cxgb4_mqprio_stop_offload(struct adapter *adap);
+ int cxgb4_init_tc_mqprio(struct adapter *adap);
+ void cxgb4_cleanup_tc_mqprio(struct adapter *adap);
+ #endif /* __CXGB4_TC_MQPRIO_H__ */
+diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+index b607919c8ad0..498de6ef6870 100644
+--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+@@ -123,9 +123,12 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
+ u8 prio = act->vlan.prio;
+ u16 vid = act->vlan.vid;
+
+- return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
+- act->id, vid,
+- proto, prio, extack);
++ err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
++ act->id, vid,
++ proto, prio, extack);
++ if (err)
++ return err;
++ break;
+ }
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
+diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
+index 791d99b9e1cf..6b633e9d76da 100644
+--- a/drivers/net/ethernet/realtek/r8169_main.c
++++ b/drivers/net/ethernet/realtek/r8169_main.c
+@@ -5549,12 +5549,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+
+ netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
+
+- dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+- NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
+- NETIF_F_HW_VLAN_CTAG_RX;
+- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+- NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
+- NETIF_F_HW_VLAN_CTAG_RX;
++ dev->features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
++ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
++ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
++ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HIGHDMA;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+@@ -5572,25 +5570,25 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+ dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+
+ if (rtl_chip_supports_csum_v2(tp)) {
+- dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+- dev->features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
++ dev->hw_features |= NETIF_F_IPV6_CSUM;
++ dev->features |= NETIF_F_IPV6_CSUM;
++ }
++
++ /* There has been a number of reports that using SG/TSO results in
++ * tx timeouts. However for a lot of people SG/TSO works fine.
++ * Therefore disable both features by default, but allow users to
++ * enable them. Use at own risk!
++ */
++ if (rtl_chip_supports_csum_v2(tp)) {
++ dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
+ dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
+ dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
+ } else {
++ dev->hw_features |= NETIF_F_SG | NETIF_F_TSO;
+ dev->gso_max_size = RTL_GSO_MAX_SIZE_V1;
+ dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
+ }
+
+- /* RTL8168e-vl and one RTL8168c variant are known to have a
+- * HW issue with TSO.
+- */
+- if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
+- tp->mac_version == RTL_GIGA_MAC_VER_22) {
+- dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
+- dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
+- dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
+- }
+-
+ dev->hw_features |= NETIF_F_RXALL;
+ dev->hw_features |= NETIF_F_RXFCS;
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+index 542784300620..efc6ec1b8027 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
++++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+@@ -207,7 +207,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
+ reg++;
+ }
+
+- while (reg <= perfect_addr_number) {
++ while (reg < perfect_addr_number) {
+ writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
+ writel(0, ioaddr + GMAC_ADDR_LOW(reg));
+ reg++;
+diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
+index 481cf48c9b9e..31f731e6df72 100644
+--- a/drivers/net/phy/at803x.c
++++ b/drivers/net/phy/at803x.c
+@@ -425,8 +425,8 @@ static int at803x_parse_dt(struct phy_device *phydev)
+ */
+ if (at803x_match_phy_id(phydev, ATH8030_PHY_ID) ||
+ at803x_match_phy_id(phydev, ATH8035_PHY_ID)) {
+- priv->clk_25m_reg &= ~AT8035_CLK_OUT_MASK;
+- priv->clk_25m_mask &= ~AT8035_CLK_OUT_MASK;
++ priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
++ priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
+ }
+ }
+
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 63dedec0433d..51b64f087717 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -25,6 +25,7 @@
+ #include <linux/micrel_phy.h>
+ #include <linux/of.h>
+ #include <linux/clk.h>
++#include <linux/delay.h>
+
+ /* Operation Mode Strap Override */
+ #define MII_KSZPHY_OMSO 0x16
+@@ -902,6 +903,12 @@ static int kszphy_resume(struct phy_device *phydev)
+
+ genphy_resume(phydev);
+
++ /* After switching from power-down to normal mode, an internal global
++ * reset is automatically generated. Wait a minimum of 1 ms before
++ * read/write access to the PHY registers.
++ */
++ usleep_range(1000, 2000);
++
+ ret = kszphy_config_reset(phydev);
+ if (ret)
+ return ret;
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 650c937ed56b..9de9b7d8aedd 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1715,8 +1715,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
+ alloc_frag->offset += buflen;
+ }
+ err = tun_xdp_act(tun, xdp_prog, &xdp, act);
+- if (err < 0)
+- goto err_xdp;
++ if (err < 0) {
++ if (act == XDP_REDIRECT || act == XDP_TX)
++ put_page(alloc_frag->page);
++ goto out;
++ }
++
+ if (err == XDP_REDIRECT)
+ xdp_do_flush();
+ if (err != XDP_PASS)
+@@ -1730,8 +1734,6 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
+
+ return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
+
+-err_xdp:
+- put_page(alloc_frag->page);
+ out:
+ rcu_read_unlock();
+ local_bh_enable();
+diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
+index f14e2c5f9da5..55f088f535e2 100644
+--- a/drivers/platform/x86/intel_int0002_vgpio.c
++++ b/drivers/platform/x86/intel_int0002_vgpio.c
+@@ -127,6 +127,14 @@ static irqreturn_t int0002_irq(int irq, void *data)
+ return IRQ_HANDLED;
+ }
+
++static bool int0002_check_wake(void *data)
++{
++ u32 gpe_sts_reg;
++
++ gpe_sts_reg = inl(GPE0A_STS_PORT);
++ return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
++}
++
+ static struct irq_chip int0002_byt_irqchip = {
+ .name = DRV_NAME,
+ .irq_ack = int0002_irq_ack,
+@@ -220,6 +228,7 @@ static int int0002_probe(struct platform_device *pdev)
+ return ret;
+ }
+
++ acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
+ device_init_wakeup(dev, true);
+ return 0;
+ }
+@@ -227,6 +236,7 @@ static int int0002_probe(struct platform_device *pdev)
+ static int int0002_remove(struct platform_device *pdev)
+ {
+ device_init_wakeup(&pdev->dev, false);
++ acpi_unregister_wakeup_handler(int0002_check_wake, NULL);
+ return 0;
+ }
+
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 1e00bf2d65a2..a83aeccafae3 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -1521,7 +1521,7 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r
+ for (i = 0; i < req->num_trbs; i++) {
+ struct dwc3_trb *trb;
+
+- trb = req->trb + i;
++ trb = &dep->trb_pool[dep->trb_dequeue];
+ trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_deq(dep);
+ }
+diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
+index bb6ae995c2e5..5eb3fc90f9f6 100644
+--- a/drivers/video/fbdev/core/fbcon.c
++++ b/drivers/video/fbdev/core/fbcon.c
+@@ -1283,6 +1283,9 @@ finished:
+ if (!con_is_bound(&fb_con))
+ fbcon_exit();
+
++ if (vc->vc_num == logo_shown)
++ logo_shown = FBCON_LOGO_CANSHOW;
++
+ return;
+ }
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 3affd96a98ba..bdcffd78fbb9 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -5607,7 +5607,7 @@ static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
+ struct io_file_put {
+ struct llist_node llist;
+ struct file *file;
+- struct completion *done;
++ bool free_pfile;
+ };
+
+ static void io_ring_file_ref_flush(struct fixed_file_data *data)
+@@ -5618,9 +5618,7 @@ static void io_ring_file_ref_flush(struct fixed_file_data *data)
+ while ((node = llist_del_all(&data->put_llist)) != NULL) {
+ llist_for_each_entry_safe(pfile, tmp, node, llist) {
+ io_ring_file_put(data->ctx, pfile->file);
+- if (pfile->done)
+- complete(pfile->done);
+- else
++ if (pfile->free_pfile)
+ kfree(pfile);
+ }
+ }
+@@ -5820,7 +5818,6 @@ static bool io_queue_file_removal(struct fixed_file_data *data,
+ struct file *file)
+ {
+ struct io_file_put *pfile, pfile_stack;
+- DECLARE_COMPLETION_ONSTACK(done);
+
+ /*
+ * If we fail allocating the struct we need for doing async reomval
+@@ -5829,15 +5826,15 @@ static bool io_queue_file_removal(struct fixed_file_data *data,
+ pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
+ if (!pfile) {
+ pfile = &pfile_stack;
+- pfile->done = &done;
+- }
++ pfile->free_pfile = false;
++ } else
++ pfile->free_pfile = true;
+
+ pfile->file = file;
+ llist_add(&pfile->llist, &data->put_llist);
+
+ if (pfile == &pfile_stack) {
+ percpu_ref_switch_to_atomic(&data->refs, io_atomic_switch);
+- wait_for_completion(&done);
+ flush_work(&data->ref_work);
+ return false;
+ }
+diff --git a/include/linux/acpi.h b/include/linux/acpi.h
+index 0f24d701fbdc..efac0f9c01a2 100644
+--- a/include/linux/acpi.h
++++ b/include/linux/acpi.h
+@@ -488,6 +488,11 @@ void __init acpi_nvs_nosave_s3(void);
+ void __init acpi_sleep_no_blacklist(void);
+ #endif /* CONFIG_PM_SLEEP */
+
++int acpi_register_wakeup_handler(
++ int wake_irq, bool (*wakeup)(void *context), void *context);
++void acpi_unregister_wakeup_handler(
++ bool (*wakeup)(void *context), void *context);
++
+ struct acpi_osc_context {
+ char *uuid_str; /* UUID string */
+ int rev;
+diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
+index bfdf41537cf1..e5a3e26cad01 100644
+--- a/include/linux/mlx5/mlx5_ifc.h
++++ b/include/linux/mlx5/mlx5_ifc.h
+@@ -875,7 +875,11 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
+ u8 swp_csum[0x1];
+ u8 swp_lso[0x1];
+ u8 cqe_checksum_full[0x1];
+- u8 reserved_at_24[0x5];
++ u8 tunnel_stateless_geneve_tx[0x1];
++ u8 tunnel_stateless_mpls_over_udp[0x1];
++ u8 tunnel_stateless_mpls_over_gre[0x1];
++ u8 tunnel_stateless_vxlan_gpe[0x1];
++ u8 tunnel_stateless_ipv4_over_vxlan[0x1];
+ u8 tunnel_stateless_ip_over_ip[0x1];
+ u8 reserved_at_2a[0x6];
+ u8 max_vxlan_udp_ports[0x8];
+diff --git a/mm/slub.c b/mm/slub.c
+index 6589b41d5a60..3b17e774831a 100644
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -259,7 +259,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
+ * freepointer to be restored incorrectly.
+ */
+ return (void *)((unsigned long)ptr ^ s->random ^
+- (unsigned long)kasan_reset_tag((void *)ptr_addr));
++ swab((unsigned long)kasan_reset_tag((void *)ptr_addr)));
+ #else
+ return ptr;
+ #endif
+diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
+index 0c7d31c6c18c..a58584949a95 100644
+--- a/net/bluetooth/rfcomm/tty.c
++++ b/net/bluetooth/rfcomm/tty.c
+@@ -413,10 +413,8 @@ static int __rfcomm_create_dev(struct sock *sk, void __user *arg)
+ dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel);
+ if (IS_ERR(dlc))
+ return PTR_ERR(dlc);
+- else if (dlc) {
+- rfcomm_dlc_put(dlc);
++ if (dlc)
+ return -EBUSY;
+- }
+ dlc = rfcomm_dlc_alloc(GFP_KERNEL);
+ if (!dlc)
+ return -ENOMEM;
+diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
+index 46d614b611db..2a8175de8578 100644
+--- a/net/ipv6/addrconf.c
++++ b/net/ipv6/addrconf.c
+@@ -3296,6 +3296,10 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
+ if (netif_is_l3_master(idev->dev))
+ return;
+
++ /* no link local addresses on devices flagged as slaves */
++ if (idev->dev->flags & IFF_SLAVE)
++ return;
++
+ ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
+
+ switch (idev->cnf.addr_gen_mode) {
+diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
+index 9904299424a1..61e95029c18f 100644
+--- a/net/sched/cls_tcindex.c
++++ b/net/sched/cls_tcindex.c
+@@ -11,6 +11,7 @@
+ #include <linux/skbuff.h>
+ #include <linux/errno.h>
+ #include <linux/slab.h>
++#include <linux/refcount.h>
+ #include <net/act_api.h>
+ #include <net/netlink.h>
+ #include <net/pkt_cls.h>
+@@ -26,9 +27,12 @@
+ #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
+
+
++struct tcindex_data;
++
+ struct tcindex_filter_result {
+ struct tcf_exts exts;
+ struct tcf_result res;
++ struct tcindex_data *p;
+ struct rcu_work rwork;
+ };
+
+@@ -49,6 +53,7 @@ struct tcindex_data {
+ u32 hash; /* hash table size; 0 if undefined */
+ u32 alloc_hash; /* allocated size */
+ u32 fall_through; /* 0: only classify if explicit match */
++ refcount_t refcnt; /* a temporary refcnt for perfect hash */
+ struct rcu_work rwork;
+ };
+
+@@ -57,6 +62,20 @@ static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
+ return tcf_exts_has_actions(&r->exts) || r->res.classid;
+ }
+
++static void tcindex_data_get(struct tcindex_data *p)
++{
++ refcount_inc(&p->refcnt);
++}
++
++static void tcindex_data_put(struct tcindex_data *p)
++{
++ if (refcount_dec_and_test(&p->refcnt)) {
++ kfree(p->perfect);
++ kfree(p->h);
++ kfree(p);
++ }
++}
++
+ static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
+ u16 key)
+ {
+@@ -132,6 +151,7 @@ static int tcindex_init(struct tcf_proto *tp)
+ p->mask = 0xffff;
+ p->hash = DEFAULT_HASH_SIZE;
+ p->fall_through = 1;
++ refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
+
+ rcu_assign_pointer(tp->root, p);
+ return 0;
+@@ -141,6 +161,7 @@ static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
+ {
+ tcf_exts_destroy(&r->exts);
+ tcf_exts_put_net(&r->exts);
++ tcindex_data_put(r->p);
+ }
+
+ static void tcindex_destroy_rexts_work(struct work_struct *work)
+@@ -212,6 +233,8 @@ found:
+ else
+ __tcindex_destroy_fexts(f);
+ } else {
++ tcindex_data_get(p);
++
+ if (tcf_exts_get_net(&r->exts))
+ tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
+ else
+@@ -228,9 +251,7 @@ static void tcindex_destroy_work(struct work_struct *work)
+ struct tcindex_data,
+ rwork);
+
+- kfree(p->perfect);
+- kfree(p->h);
+- kfree(p);
++ tcindex_data_put(p);
+ }
+
+ static inline int
+@@ -248,9 +269,11 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
+ };
+
+ static int tcindex_filter_result_init(struct tcindex_filter_result *r,
++ struct tcindex_data *p,
+ struct net *net)
+ {
+ memset(r, 0, sizeof(*r));
++ r->p = p;
+ return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
+ TCA_TCINDEX_POLICE);
+ }
+@@ -290,6 +313,7 @@ static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
+ TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
+ if (err < 0)
+ goto errout;
++ cp->perfect[i].p = cp;
+ }
+
+ return 0;
+@@ -334,6 +358,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+ cp->alloc_hash = p->alloc_hash;
+ cp->fall_through = p->fall_through;
+ cp->tp = tp;
++ refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */
+
+ if (tb[TCA_TCINDEX_HASH])
+ cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
+@@ -366,7 +391,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+ }
+ cp->h = p->h;
+
+- err = tcindex_filter_result_init(&new_filter_result, net);
++ err = tcindex_filter_result_init(&new_filter_result, cp, net);
+ if (err < 0)
+ goto errout_alloc;
+ if (old_r)
+@@ -434,7 +459,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+ goto errout_alloc;
+ f->key = handle;
+ f->next = NULL;
+- err = tcindex_filter_result_init(&f->result, net);
++ err = tcindex_filter_result_init(&f->result, cp, net);
+ if (err < 0) {
+ kfree(f);
+ goto errout_alloc;
+@@ -447,7 +472,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
+ }
+
+ if (old_r && old_r != r) {
+- err = tcindex_filter_result_init(old_r, net);
++ err = tcindex_filter_result_init(old_r, cp, net);
+ if (err < 0) {
+ kfree(f);
+ goto errout_alloc;
+@@ -571,6 +596,14 @@ static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
+ for (i = 0; i < p->hash; i++) {
+ struct tcindex_filter_result *r = p->perfect + i;
+
++ /* tcf_queue_work() does not guarantee the ordering we
++ * want, so we have to take this refcnt temporarily to
++ * ensure 'p' is freed after all tcindex_filter_result
++ * here. Imperfect hash does not need this, because it
++ * uses linked lists rather than an array.
++ */
++ tcindex_data_get(p);
++
+ tcf_unbind_filter(tp, &r->res);
+ if (tcf_exts_get_net(&r->exts))
+ tcf_queue_work(&r->rwork,
+diff --git a/sound/soc/codecs/tas2562.c b/sound/soc/codecs/tas2562.c
+index be52886a5edb..fb2233ca9103 100644
+--- a/sound/soc/codecs/tas2562.c
++++ b/sound/soc/codecs/tas2562.c
+@@ -409,7 +409,7 @@ static const struct snd_kcontrol_new vsense_switch =
+ 1, 1);
+
+ static const struct snd_kcontrol_new tas2562_snd_controls[] = {
+- SOC_SINGLE_TLV("Amp Gain Volume", TAS2562_PB_CFG1, 0, 0x1c, 0,
++ SOC_SINGLE_TLV("Amp Gain Volume", TAS2562_PB_CFG1, 1, 0x1c, 0,
+ tas2562_dac_tlv),
+ };
+
+diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
+index 9d5405881209..434737b2b2b2 100644
+--- a/sound/soc/jz4740/jz4740-i2s.c
++++ b/sound/soc/jz4740/jz4740-i2s.c
+@@ -83,7 +83,7 @@
+ #define JZ_AIC_I2S_STATUS_BUSY BIT(2)
+
+ #define JZ_AIC_CLK_DIV_MASK 0xf
+-#define I2SDIV_DV_SHIFT 8
++#define I2SDIV_DV_SHIFT 0
+ #define I2SDIV_DV_MASK (0xf << I2SDIV_DV_SHIFT)
+ #define I2SDIV_IDV_SHIFT 8
+ #define I2SDIV_IDV_MASK (0xf << I2SDIV_IDV_SHIFT)
+diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c
+index 8cb504d30384..5ef1c15e88ad 100644
+--- a/tools/accounting/getdelays.c
++++ b/tools/accounting/getdelays.c
+@@ -136,7 +136,7 @@ static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
+ msg.g.version = 0x1;
+ na = (struct nlattr *) GENLMSG_DATA(&msg);
+ na->nla_type = nla_type;
+- na->nla_len = nla_len + 1 + NLA_HDRLEN;
++ na->nla_len = nla_len + NLA_HDRLEN;
+ memcpy(NLA_DATA(na), nla_data, nla_len);
+ msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
+