diff options
author | Mike Pagano <mpagano@gentoo.org> | 2018-12-01 10:04:28 -0500 |
---|---|---|
committer | Mike Pagano <mpagano@gentoo.org> | 2018-12-01 10:04:28 -0500 |
commit | 626f2c8ce77608ff669ec8d0e1ee7571f3f28c56 (patch) | |
tree | d7ec9d3068189460a1941bf4c430d7690964ae25 | |
parent | proj/linux-patches: Linux patch 4.9.141 (diff) | |
download | linux-patches-626f2c8ce77608ff669ec8d0e1ee7571f3f28c56.tar.gz linux-patches-626f2c8ce77608ff669ec8d0e1ee7571f3f28c56.tar.bz2 linux-patches-626f2c8ce77608ff669ec8d0e1ee7571f3f28c56.zip |
proj/linux-patches: Linux patch 4.9.142
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1141_linux-4.9.142.patch | 3926 |
2 files changed, 3930 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 2838e5fb..1aec0750 100644 --- a/0000_README +++ b/0000_README @@ -607,6 +607,10 @@ Patch: 1140_linux-4.9.141.patch From: http://www.kernel.org Desc: Linux 4.9.141 +Patch: 1141_linux-4.9.142.patch +From: http://www.kernel.org +Desc: Linux 4.9.142 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1141_linux-4.9.142.patch b/1141_linux-4.9.142.patch new file mode 100644 index 00000000..2958496b --- /dev/null +++ b/1141_linux-4.9.142.patch @@ -0,0 +1,3926 @@ +diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt +index 35e17f748ca7..af5859b2d0f9 100644 +--- a/Documentation/sysctl/fs.txt ++++ b/Documentation/sysctl/fs.txt +@@ -34,7 +34,9 @@ Currently, these files are in /proc/sys/fs: + - overflowgid + - pipe-user-pages-hard + - pipe-user-pages-soft ++- protected_fifos + - protected_hardlinks ++- protected_regular + - protected_symlinks + - suid_dumpable + - super-max +@@ -182,6 +184,24 @@ applied. + + ============================================================== + ++protected_fifos: ++ ++The intent of this protection is to avoid unintentional writes to ++an attacker-controlled FIFO, where a program expected to create a regular ++file. ++ ++When set to "0", writing to FIFOs is unrestricted. ++ ++When set to "1" don't allow O_CREAT open on FIFOs that we don't own ++in world writable sticky directories, unless they are owned by the ++owner of the directory. ++ ++When set to "2" it also applies to group writable sticky directories. ++ ++This protection is based on the restrictions in Openwall. ++ ++============================================================== ++ + protected_hardlinks: + + A long-standing class of security issues is the hardlink-based +@@ -202,6 +222,22 @@ This protection is based on the restrictions in Openwall and grsecurity. + + ============================================================== + ++protected_regular: ++ ++This protection is similar to protected_fifos, but it ++avoids writes to an attacker-controlled regular file, where a program ++expected to create one. ++ ++When set to "0", writing to regular files is unrestricted. ++ ++When set to "1" don't allow O_CREAT open on regular files that we ++don't own in world writable sticky directories, unless they are ++owned by the owner of the directory. ++ ++When set to "2" it also applies to group writable sticky directories. ++ ++============================================================== ++ + protected_symlinks: + + A long-standing class of security issues is the symlink-based +diff --git a/MAINTAINERS b/MAINTAINERS +index 63cefa62324c..4f559f5b3a89 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -11469,6 +11469,7 @@ F: arch/alpha/kernel/srm_env.c + + STABLE BRANCH + M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> ++M: Sasha Levin <sashal@kernel.org> + L: stable@vger.kernel.org + S: Supported + F: Documentation/stable_kernel_rules.txt +diff --git a/Makefile b/Makefile +index 8eba73521a7f..72ed8ff90329 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 141 ++SUBLEVEL = 142 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile +index 92110c2c6c59..ee94597773fa 100644 +--- a/arch/arm64/Makefile ++++ b/arch/arm64/Makefile +@@ -10,7 +10,7 @@ + # + # Copyright (C) 1995-2001 by Russell King + +-LDFLAGS_vmlinux :=-p --no-undefined -X ++LDFLAGS_vmlinux :=--no-undefined -X + CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET) + GZFLAGS :=-9 + +diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h +index f6fda8482f60..3b10f532c28e 100644 +--- a/arch/powerpc/include/asm/io.h ++++ b/arch/powerpc/include/asm/io.h +@@ -333,19 +333,13 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src, + * their hooks, a bitfield is reserved for use by the platform near the + * top of MMIO addresses (not PIO, those have to cope the hard way). + * +- * This bit field is 12 bits and is at the top of the IO virtual +- * addresses PCI_IO_INDIRECT_TOKEN_MASK. ++ * The highest address in the kernel virtual space are: + * +- * The kernel virtual space is thus: ++ * d0003fffffffffff # with Hash MMU ++ * c00fffffffffffff # with Radix MMU + * +- * 0xD000000000000000 : vmalloc +- * 0xD000080000000000 : PCI PHB IO space +- * 0xD000080080000000 : ioremap +- * 0xD0000fffffffffff : end of ioremap region +- * +- * Since the top 4 bits are reserved as the region ID, we use thus +- * the next 12 bits and keep 4 bits available for the future if the +- * virtual address space is ever to be extended. ++ * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits ++ * that can be used for the field. + * + * The direct IO mapping operations will then mask off those bits + * before doing the actual access, though that only happen when +@@ -357,8 +351,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src, + */ + + #ifdef CONFIG_PPC_INDIRECT_MMIO +-#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul +-#define PCI_IO_IND_TOKEN_SHIFT 48 ++#define PCI_IO_IND_TOKEN_SHIFT 52 ++#define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT) + #define PCI_FIX_ADDR(addr) \ + ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK)) + #define PCI_GET_ADDR_TOKEN(addr) \ +diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h +index 2e0e67ef3544..e8cedf32345a 100644 +--- a/arch/powerpc/kvm/trace.h ++++ b/arch/powerpc/kvm/trace.h +@@ -5,8 +5,6 @@ + + #undef TRACE_SYSTEM + #define TRACE_SYSTEM kvm +-#define TRACE_INCLUDE_PATH . +-#define TRACE_INCLUDE_FILE trace + + /* + * Tracepoint for guest mode entry. +@@ -119,4 +117,10 @@ TRACE_EVENT(kvm_check_requests, + #endif /* _TRACE_KVM_H */ + + /* This part must be outside protection */ ++#undef TRACE_INCLUDE_PATH ++#undef TRACE_INCLUDE_FILE ++ ++#define TRACE_INCLUDE_PATH . ++#define TRACE_INCLUDE_FILE trace ++ + #include <trace/define_trace.h> +diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h +index 7ec534d1db9f..7eadbf449a1f 100644 +--- a/arch/powerpc/kvm/trace_booke.h ++++ b/arch/powerpc/kvm/trace_booke.h +@@ -5,8 +5,6 @@ + + #undef TRACE_SYSTEM + #define TRACE_SYSTEM kvm_booke +-#define TRACE_INCLUDE_PATH . +-#define TRACE_INCLUDE_FILE trace_booke + + #define kvm_trace_symbol_exit \ + {0, "CRITICAL"}, \ +@@ -217,4 +215,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio, + #endif + + /* This part must be outside protection */ ++ ++#undef TRACE_INCLUDE_PATH ++#undef TRACE_INCLUDE_FILE ++ ++#define TRACE_INCLUDE_PATH . ++#define TRACE_INCLUDE_FILE trace_booke ++ + #include <trace/define_trace.h> +diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h +index fb21990c0fb4..d9a21a7bd5c9 100644 +--- a/arch/powerpc/kvm/trace_hv.h ++++ b/arch/powerpc/kvm/trace_hv.h +@@ -8,8 +8,6 @@ + + #undef TRACE_SYSTEM + #define TRACE_SYSTEM kvm_hv +-#define TRACE_INCLUDE_PATH . +-#define TRACE_INCLUDE_FILE trace_hv + + #define kvm_trace_symbol_hcall \ + {H_REMOVE, "H_REMOVE"}, \ +@@ -496,4 +494,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit, + #endif /* _TRACE_KVM_HV_H */ + + /* This part must be outside protection */ ++ ++#undef TRACE_INCLUDE_PATH ++#undef TRACE_INCLUDE_FILE ++ ++#define TRACE_INCLUDE_PATH . ++#define TRACE_INCLUDE_FILE trace_hv ++ + #include <trace/define_trace.h> +diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h +index d44f324184fb..e8e2b9ad4ac6 100644 +--- a/arch/powerpc/kvm/trace_pr.h ++++ b/arch/powerpc/kvm/trace_pr.h +@@ -7,8 +7,6 @@ + + #undef TRACE_SYSTEM + #define TRACE_SYSTEM kvm_pr +-#define TRACE_INCLUDE_PATH . +-#define TRACE_INCLUDE_FILE trace_pr + + TRACE_EVENT(kvm_book3s_reenter, + TP_PROTO(int r, struct kvm_vcpu *vcpu), +@@ -271,4 +269,11 @@ TRACE_EVENT(kvm_unmap_hva, + #endif /* _TRACE_KVM_H */ + + /* This part must be outside protection */ ++ ++#undef TRACE_INCLUDE_PATH ++#undef TRACE_INCLUDE_FILE ++ ++#define TRACE_INCLUDE_PATH . ++#define TRACE_INCLUDE_FILE trace_pr ++ + #include <trace/define_trace.h> +diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c +index 6cff96e0d77b..0ef83c274019 100644 +--- a/arch/powerpc/mm/numa.c ++++ b/arch/powerpc/mm/numa.c +@@ -1289,7 +1289,7 @@ static long vphn_get_associativity(unsigned long cpu, + + switch (rc) { + case H_FUNCTION: +- printk(KERN_INFO ++ printk_once(KERN_INFO + "VPHN is not supported. Disabling polling...\n"); + stop_topology_update(); + break; +diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c +index cb2cd04e6698..b6c85b760305 100644 +--- a/arch/s390/mm/gmap.c ++++ b/arch/s390/mm/gmap.c +@@ -686,6 +686,8 @@ void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to) + vmaddr |= gaddr & ~PMD_MASK; + /* Find vma in the parent mm */ + vma = find_vma(gmap->mm, vmaddr); ++ if (!vma) ++ continue; + size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK)); + zap_page_range(vma, vmaddr, size, NULL); + } +diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c +index a3dcc12bef4a..8c700069060b 100644 +--- a/arch/x86/events/intel/uncore_snb.c ++++ b/arch/x86/events/intel/uncore_snb.c +@@ -14,6 +14,25 @@ + #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 + #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f + #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f ++#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c ++#define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904 ++#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914 ++#define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f ++#define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f ++#define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc ++#define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0 ++#define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10 ++#define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4 ++#define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f ++#define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f ++#define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2 ++#define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30 ++#define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18 ++#define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6 ++#define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31 ++#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33 ++#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca ++#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32 + + /* SNB event control */ + #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff +@@ -631,7 +650,82 @@ static const struct pci_device_id skl_uncore_pci_ids[] = { + PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), + .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), + }, +- ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, ++ { /* IMC */ ++ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC), ++ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), ++ }, + { /* end: all zeroes */ }, + }; + +@@ -680,6 +774,25 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { + IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ + IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ + IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ ++ IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */ ++ IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */ ++ IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */ ++ IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */ ++ IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */ ++ IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */ ++ IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */ ++ IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */ ++ IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */ ++ IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */ ++ IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */ ++ IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */ ++ IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */ ++ IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */ ++ IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */ ++ IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */ ++ IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */ ++ IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */ ++ IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */ + { /* end marker */ } + }; + +diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c +index a321d7d849c6..326b9ba4518e 100644 +--- a/drivers/block/floppy.c ++++ b/drivers/block/floppy.c +@@ -3823,10 +3823,11 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive) + bio.bi_end_io = floppy_rb0_cb; + bio_set_op_attrs(&bio, REQ_OP_READ, 0); + ++ init_completion(&cbdata.complete); ++ + submit_bio(&bio); + process_fd_request(); + +- init_completion(&cbdata.complete); + wait_for_completion(&cbdata.complete); + + __free_page(page); +diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c +index ef1fa8145419..fa86946d12aa 100644 +--- a/drivers/cpufreq/imx6q-cpufreq.c ++++ b/drivers/cpufreq/imx6q-cpufreq.c +@@ -130,8 +130,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) + /* Ensure the arm clock divider is what we expect */ + ret = clk_set_rate(arm_clk, new_freq * 1000); + if (ret) { ++ int ret1; ++ + dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); +- regulator_set_voltage_tol(arm_reg, volt_old, 0); ++ ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0); ++ if (ret1) ++ dev_warn(cpu_dev, ++ "failed to restore vddarm voltage: %d\n", ret1); + return ret; + } + +diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c +index 1d1c9693ebfb..8ee91777abce 100644 +--- a/drivers/firmware/efi/arm-init.c ++++ b/drivers/firmware/efi/arm-init.c +@@ -256,6 +256,10 @@ void __init efi_init(void) + (params.mmap & ~PAGE_MASK))); + + init_screen_info(); ++ ++ /* ARM does not permit early mappings to persist across paging_init() */ ++ if (IS_ENABLED(CONFIG_ARM)) ++ efi_memmap_unmap(); + } + + static int __init register_gop_device(void) +diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c +index 4d788e0debfe..069c5a4479e6 100644 +--- a/drivers/firmware/efi/arm-runtime.c ++++ b/drivers/firmware/efi/arm-runtime.c +@@ -118,7 +118,7 @@ static int __init arm_enable_runtime_services(void) + { + u64 mapsize; + +- if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) { ++ if (!efi_enabled(EFI_BOOT)) { + pr_info("EFI services will not be available.\n"); + return 0; + } +diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c +index 78686443cb37..3fd2b450c649 100644 +--- a/drivers/firmware/efi/memmap.c ++++ b/drivers/firmware/efi/memmap.c +@@ -117,6 +117,9 @@ int __init efi_memmap_init_early(struct efi_memory_map_data *data) + + void __init efi_memmap_unmap(void) + { ++ if (!efi_enabled(EFI_MEMMAP)) ++ return; ++ + if (!efi.memmap.late) { + unsigned long size; + +diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c +index 2ec402ae14de..9e2fe12c2858 100644 +--- a/drivers/gpio/gpiolib.c ++++ b/drivers/gpio/gpiolib.c +@@ -1153,7 +1153,7 @@ int gpiochip_add_data(struct gpio_chip *chip, void *data) + gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); + if (!gdev->descs) { + status = -ENOMEM; +- goto err_free_gdev; ++ goto err_free_ida; + } + + if (chip->ngpio == 0) { +@@ -1285,8 +1285,9 @@ err_free_label: + kfree(gdev->label); + err_free_descs: + kfree(gdev->descs); +-err_free_gdev: ++err_free_ida: + ida_simple_remove(&gpio_ida, gdev->id); ++err_free_gdev: + /* failures here can mean systems won't boot... */ + pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__, + gdev->base, gdev->base + gdev->ngpio - 1, +diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c +index f54afd2113a9..736133f5c5a9 100644 +--- a/drivers/gpu/drm/ast/ast_drv.c ++++ b/drivers/gpu/drm/ast/ast_drv.c +@@ -60,8 +60,29 @@ static const struct pci_device_id pciidlist[] = { + + MODULE_DEVICE_TABLE(pci, pciidlist); + ++static void ast_kick_out_firmware_fb(struct pci_dev *pdev) ++{ ++ struct apertures_struct *ap; ++ bool primary = false; ++ ++ ap = alloc_apertures(1); ++ if (!ap) ++ return; ++ ++ ap->ranges[0].base = pci_resource_start(pdev, 0); ++ ap->ranges[0].size = pci_resource_len(pdev, 0); ++ ++#ifdef CONFIG_X86 ++ primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; ++#endif ++ drm_fb_helper_remove_conflicting_framebuffers(ap, "astdrmfb", primary); ++ kfree(ap); ++} ++ + static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + { ++ ast_kick_out_firmware_fb(pdev); ++ + return drm_get_pci_dev(pdev, ent, &driver); + } + +diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c +index 5957c3e659fe..57205016b04a 100644 +--- a/drivers/gpu/drm/ast/ast_mode.c ++++ b/drivers/gpu/drm/ast/ast_mode.c +@@ -545,6 +545,7 @@ static int ast_crtc_do_set_base(struct drm_crtc *crtc, + } + ast_bo_unreserve(bo); + ++ ast_set_offset_reg(crtc); + ast_set_start_address_crt1(crtc, (u32)gpu_addr); + + return 0; +@@ -1235,7 +1236,7 @@ static int ast_cursor_move(struct drm_crtc *crtc, + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07)); + + /* dummy write to fire HWC */ +- ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xCB, 0xFF, 0x00); ++ ast_show_cursor(crtc); + + return 0; + } +diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c +index d28c4cf7c1ee..dc92dc41ef93 100644 +--- a/drivers/infiniband/core/verbs.c ++++ b/drivers/infiniband/core/verbs.c +@@ -1522,7 +1522,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) + */ + if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) { + if (attr.qp_state >= IB_QPS_INIT) { +- if (qp->device->get_link_layer(qp->device, attr.port_num) != ++ if (rdma_port_get_link_layer(qp->device, attr.port_num) != + IB_LINK_LAYER_INFINIBAND) + return true; + goto lid_check; +@@ -1531,7 +1531,7 @@ static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid) + + /* Can't get a quick answer, iterate over all ports */ + for (port = 0; port < qp->device->phys_port_cnt; port++) +- if (qp->device->get_link_layer(qp->device, port) != ++ if (rdma_port_get_link_layer(qp->device, port) != + IB_LINK_LAYER_INFINIBAND) + num_eth_ports++; + +diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c +index 018a41562704..619475c7d761 100644 +--- a/drivers/infiniband/hw/hfi1/user_sdma.c ++++ b/drivers/infiniband/hw/hfi1/user_sdma.c +@@ -148,11 +148,8 @@ MODULE_PARM_DESC(sdma_comp_size, "Size of User SDMA completion ring. Default: 12 + #define TXREQ_FLAGS_REQ_LAST_PKT BIT(0) + + /* SDMA request flag bits */ +-#define SDMA_REQ_FOR_THREAD 1 +-#define SDMA_REQ_SEND_DONE 2 +-#define SDMA_REQ_HAVE_AHG 3 +-#define SDMA_REQ_HAS_ERROR 4 +-#define SDMA_REQ_DONE_ERROR 5 ++#define SDMA_REQ_HAVE_AHG 1 ++#define SDMA_REQ_HAS_ERROR 2 + + #define SDMA_PKT_Q_INACTIVE BIT(0) + #define SDMA_PKT_Q_ACTIVE BIT(1) +@@ -252,8 +249,6 @@ struct user_sdma_request { + u64 seqsubmitted; + struct list_head txps; + unsigned long flags; +- /* status of the last txreq completed */ +- int status; + }; + + /* +@@ -546,7 +541,6 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, + struct sdma_req_info info; + struct user_sdma_request *req; + u8 opcode, sc, vl; +- int req_queued = 0; + u16 dlid; + u32 selector; + +@@ -611,11 +605,13 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, + req->data_iovs = req_iovcnt(info.ctrl) - 1; /* subtract header vector */ + req->pq = pq; + req->cq = cq; +- req->status = -1; + INIT_LIST_HEAD(&req->txps); + + memcpy(&req->info, &info, sizeof(info)); + ++ /* The request is initialized, count it */ ++ atomic_inc(&pq->n_reqs); ++ + if (req_opcode(info.ctrl) == EXPECTED) { + /* expected must have a TID info and at least one data vector */ + if (req->data_iovs < 2) { +@@ -704,7 +700,7 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, + memcpy(&req->iovs[i].iov, iovec + idx++, sizeof(struct iovec)); + ret = pin_vector_pages(req, &req->iovs[i]); + if (ret) { +- req->status = ret; ++ req->data_iovs = i; + goto free_req; + } + req->data_len += req->iovs[i].iov.iov_len; +@@ -772,14 +768,10 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, + } + + set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); +- atomic_inc(&pq->n_reqs); +- req_queued = 1; + /* Send the first N packets in the request to buy us some time */ + ret = user_sdma_send_pkts(req, pcount); +- if (unlikely(ret < 0 && ret != -EBUSY)) { +- req->status = ret; ++ if (unlikely(ret < 0 && ret != -EBUSY)) + goto free_req; +- } + + /* + * It is possible that the SDMA engine would have processed all the +@@ -796,17 +788,11 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, + * request have been submitted to the SDMA engine. However, it + * will not wait for send completions. + */ +- while (!test_bit(SDMA_REQ_SEND_DONE, &req->flags)) { ++ while (req->seqsubmitted != req->info.npkts) { + ret = user_sdma_send_pkts(req, pcount); + if (ret < 0) { +- if (ret != -EBUSY) { +- req->status = ret; +- set_bit(SDMA_REQ_DONE_ERROR, &req->flags); +- if (ACCESS_ONCE(req->seqcomp) == +- req->seqsubmitted - 1) +- goto free_req; +- return ret; +- } ++ if (ret != -EBUSY) ++ goto free_req; + wait_event_interruptible_timeout( + pq->busy.wait_dma, + (pq->state == SDMA_PKT_Q_ACTIVE), +@@ -817,10 +803,19 @@ int hfi1_user_sdma_process_request(struct file *fp, struct iovec *iovec, + *count += idx; + return 0; + free_req: +- user_sdma_free_request(req, true); +- if (req_queued) ++ /* ++ * If the submitted seqsubmitted == npkts, the completion routine ++ * controls the final state. If sequbmitted < npkts, wait for any ++ * outstanding packets to finish before cleaning up. ++ */ ++ if (req->seqsubmitted < req->info.npkts) { ++ if (req->seqsubmitted) ++ wait_event(pq->busy.wait_dma, ++ (req->seqcomp == req->seqsubmitted - 1)); ++ user_sdma_free_request(req, true); + pq_update(pq); +- set_comp_state(pq, cq, info.comp_idx, ERROR, req->status); ++ set_comp_state(pq, cq, info.comp_idx, ERROR, ret); ++ } + return ret; + } + +@@ -903,10 +898,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) + pq = req->pq; + + /* If tx completion has reported an error, we are done. */ +- if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) { +- set_bit(SDMA_REQ_DONE_ERROR, &req->flags); ++ if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) + return -EFAULT; +- } + + /* + * Check if we might have sent the entire request already +@@ -929,10 +922,8 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) + * with errors. If so, we are not going to process any + * more packets from this request. + */ +- if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) { +- set_bit(SDMA_REQ_DONE_ERROR, &req->flags); ++ if (test_bit(SDMA_REQ_HAS_ERROR, &req->flags)) + return -EFAULT; +- } + + tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL); + if (!tx) +@@ -1090,7 +1081,6 @@ dosend: + ret = sdma_send_txlist(req->sde, &pq->busy, &req->txps, &count); + req->seqsubmitted += count; + if (req->seqsubmitted == req->info.npkts) { +- set_bit(SDMA_REQ_SEND_DONE, &req->flags); + /* + * The txreq has already been submitted to the HW queue + * so we can free the AHG entry now. Corruption will not +@@ -1489,11 +1479,15 @@ static int set_txreq_header_ahg(struct user_sdma_request *req, + return diff; + } + +-/* +- * SDMA tx request completion callback. Called when the SDMA progress +- * state machine gets notification that the SDMA descriptors for this +- * tx request have been processed by the DMA engine. Called in +- * interrupt context. ++/** ++ * user_sdma_txreq_cb() - SDMA tx request completion callback. ++ * @txreq: valid sdma tx request ++ * @status: success/failure of request ++ * ++ * Called when the SDMA progress state machine gets notification that ++ * the SDMA descriptors for this tx request have been processed by the ++ * DMA engine. Called in interrupt context. ++ * Only do work on completed sequences. + */ + static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) + { +@@ -1502,7 +1496,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) + struct user_sdma_request *req; + struct hfi1_user_sdma_pkt_q *pq; + struct hfi1_user_sdma_comp_q *cq; +- u16 idx; ++ enum hfi1_sdma_comp_state state = COMPLETE; + + if (!tx->req) + return; +@@ -1515,31 +1509,19 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) + SDMA_DBG(req, "SDMA completion with error %d", + status); + set_bit(SDMA_REQ_HAS_ERROR, &req->flags); ++ state = ERROR; + } + + req->seqcomp = tx->seqnum; + kmem_cache_free(pq->txreq_cache, tx); +- tx = NULL; +- +- idx = req->info.comp_idx; +- if (req->status == -1 && status == SDMA_TXREQ_S_OK) { +- if (req->seqcomp == req->info.npkts - 1) { +- req->status = 0; +- user_sdma_free_request(req, false); +- pq_update(pq); +- set_comp_state(pq, cq, idx, COMPLETE, 0); +- } +- } else { +- if (status != SDMA_TXREQ_S_OK) +- req->status = status; +- if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) && +- (test_bit(SDMA_REQ_SEND_DONE, &req->flags) || +- test_bit(SDMA_REQ_DONE_ERROR, &req->flags))) { +- user_sdma_free_request(req, false); +- pq_update(pq); +- set_comp_state(pq, cq, idx, ERROR, req->status); +- } +- } ++ ++ /* sequence isn't complete? We are done */ ++ if (req->seqcomp != req->info.npkts - 1) ++ return; ++ ++ user_sdma_free_request(req, false); ++ set_comp_state(pq, cq, req->info.comp_idx, state, status); ++ pq_update(pq); + } + + static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq) +@@ -1572,6 +1554,8 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin) + if (!node) + continue; + ++ req->iovs[i].node = NULL; ++ + if (unpin) + hfi1_mmu_rb_remove(req->pq->handler, + &node->rb); +diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c +index f397a5b6910f..2e52015634f9 100644 +--- a/drivers/input/joystick/xpad.c ++++ b/drivers/input/joystick/xpad.c +@@ -89,8 +89,10 @@ + + #define XPAD_PKT_LEN 64 + +-/* xbox d-pads should map to buttons, as is required for DDR pads +- but we map them to axes when possible to simplify things */ ++/* ++ * xbox d-pads should map to buttons, as is required for DDR pads ++ * but we map them to axes when possible to simplify things ++ */ + #define MAP_DPAD_TO_BUTTONS (1 << 0) + #define MAP_TRIGGERS_TO_BUTTONS (1 << 1) + #define MAP_STICKS_TO_NULL (1 << 2) +@@ -126,45 +128,77 @@ static const struct xpad_device { + u8 mapping; + u8 xtype; + } xpad_device[] = { ++ { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 }, ++ { 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX }, ++ { 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX }, ++ { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, ++ { 0x044f, 0x0f10, "Thrustmaster Modena GT Wheel", 0, XTYPE_XBOX }, ++ { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 }, + { 0x045e, 0x0202, "Microsoft X-Box pad v1 (US)", 0, XTYPE_XBOX }, + { 0x045e, 0x0285, "Microsoft X-Box pad (Japan)", 0, XTYPE_XBOX }, + { 0x045e, 0x0287, "Microsoft Xbox Controller S", 0, XTYPE_XBOX }, ++ { 0x045e, 0x0288, "Microsoft Xbox Controller S v2", 0, XTYPE_XBOX }, + { 0x045e, 0x0289, "Microsoft X-Box pad v2 (US)", 0, XTYPE_XBOX }, + { 0x045e, 0x028e, "Microsoft X-Box 360 pad", 0, XTYPE_XBOX360 }, ++ { 0x045e, 0x028f, "Microsoft X-Box 360 pad v2", 0, XTYPE_XBOX360 }, ++ { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, + { 0x045e, 0x02d1, "Microsoft X-Box One pad", 0, XTYPE_XBOXONE }, + { 0x045e, 0x02dd, "Microsoft X-Box One pad (Firmware 2015)", 0, XTYPE_XBOXONE }, + { 0x045e, 0x02e3, "Microsoft X-Box One Elite pad", 0, XTYPE_XBOXONE }, +- { 0x045e, 0x0291, "Xbox 360 Wireless Receiver (XBOX)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, ++ { 0x045e, 0x02ea, "Microsoft X-Box One S pad", 0, XTYPE_XBOXONE }, + { 0x045e, 0x0719, "Xbox 360 Wireless Receiver", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360W }, +- { 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX }, +- { 0x044f, 0xb326, "Thrustmaster Gamepad GP XID", 0, XTYPE_XBOX360 }, + { 0x046d, 0xc21d, "Logitech Gamepad F310", 0, XTYPE_XBOX360 }, + { 0x046d, 0xc21e, "Logitech Gamepad F510", 0, XTYPE_XBOX360 }, + { 0x046d, 0xc21f, "Logitech Gamepad F710", 0, XTYPE_XBOX360 }, + { 0x046d, 0xc242, "Logitech Chillstream Controller", 0, XTYPE_XBOX360 }, + { 0x046d, 0xca84, "Logitech Xbox Cordless Controller", 0, XTYPE_XBOX }, + { 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX }, ++ { 0x046d, 0xca8a, "Logitech Precision Vibration Feedback Wheel", 0, XTYPE_XBOX }, ++ { 0x046d, 0xcaa3, "Logitech DriveFx Racing Wheel", 0, XTYPE_XBOX360 }, ++ { 0x056e, 0x2004, "Elecom JC-U3613M", 0, XTYPE_XBOX360 }, + { 0x05fd, 0x1007, "Mad Catz Controller (unverified)", 0, XTYPE_XBOX }, + { 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX }, ++ { 0x05fe, 0x3030, "Chic Controller", 0, XTYPE_XBOX }, ++ { 0x05fe, 0x3031, "Chic Controller", 0, XTYPE_XBOX }, ++ { 0x062a, 0x0020, "Logic3 Xbox GamePad", 0, XTYPE_XBOX }, ++ { 0x062a, 0x0033, "Competition Pro Steering Wheel", 0, XTYPE_XBOX }, ++ { 0x06a3, 0x0200, "Saitek Racing Wheel", 0, XTYPE_XBOX }, ++ { 0x06a3, 0x0201, "Saitek Adrenalin", 0, XTYPE_XBOX }, ++ { 0x06a3, 0xf51a, "Saitek P3600", 0, XTYPE_XBOX360 }, ++ { 0x0738, 0x4506, "Mad Catz 4506 Wireless Controller", 0, XTYPE_XBOX }, + { 0x0738, 0x4516, "Mad Catz Control Pad", 0, XTYPE_XBOX }, ++ { 0x0738, 0x4520, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX }, + { 0x0738, 0x4522, "Mad Catz LumiCON", 0, XTYPE_XBOX }, + { 0x0738, 0x4526, "Mad Catz Control Pad Pro", 0, XTYPE_XBOX }, ++ { 0x0738, 0x4530, "Mad Catz Universal MC2 Racing Wheel and Pedals", 0, XTYPE_XBOX }, + { 0x0738, 0x4536, "Mad Catz MicroCON", 0, XTYPE_XBOX }, + { 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, + { 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX }, ++ { 0x0738, 0x4586, "Mad Catz MicroCon Wireless Controller", 0, XTYPE_XBOX }, ++ { 0x0738, 0x4588, "Mad Catz Blaster", 0, XTYPE_XBOX }, ++ { 0x0738, 0x45ff, "Mad Catz Beat Pad (w/ Handle)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, + { 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, + { 0x0738, 0x4718, "Mad Catz Street Fighter IV FightStick SE", 0, XTYPE_XBOX360 }, + { 0x0738, 0x4726, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, + { 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x0738, 0x4736, "Mad Catz MicroCon Gamepad", 0, XTYPE_XBOX360 }, + { 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0738, 0x4740, "Mad Catz Beat Pad", 0, XTYPE_XBOX360 }, ++ { 0x0738, 0x4743, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, ++ { 0x0738, 0x4758, "Mad Catz Arcade Game Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0738, 0x4a01, "Mad Catz FightStick TE 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, + { 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, ++ { 0x0738, 0x9871, "Mad Catz Portable Drum", 0, XTYPE_XBOX360 }, + { 0x0738, 0xb726, "Mad Catz Xbox controller - MW2", 0, XTYPE_XBOX360 }, ++ { 0x0738, 0xb738, "Mad Catz MVC2TE Stick 2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 }, + { 0x0738, 0xcb02, "Saitek Cyborg Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0738, 0xcb03, "Saitek P3200 Rumble Pad - PC/Xbox 360", 0, XTYPE_XBOX360 }, ++ { 0x0738, 0xcb29, "Saitek Aviator Stick AV8R02", 0, XTYPE_XBOX360 }, + { 0x0738, 0xf738, "Super SFIV FightStick TE S", 0, XTYPE_XBOX360 }, ++ { 0x07ff, 0xffff, "Mad Catz GamePad", 0, XTYPE_XBOX360 }, ++ { 0x0c12, 0x0005, "Intec wireless", 0, XTYPE_XBOX }, ++ { 0x0c12, 0x8801, "Nyko Xbox Controller", 0, XTYPE_XBOX }, + { 0x0c12, 0x8802, "Zeroplus Xbox Controller", 0, XTYPE_XBOX }, + { 0x0c12, 0x8809, "RedOctane Xbox Dance Pad", DANCEPAD_MAP_CONFIG, XTYPE_XBOX }, + { 0x0c12, 0x880a, "Pelican Eclipse PL-2023", 0, XTYPE_XBOX }, +@@ -172,35 +206,66 @@ static const struct xpad_device { + { 0x0c12, 0x9902, "HAMA VibraX - *FAULTY HARDWARE*", 0, XTYPE_XBOX }, + { 0x0d2f, 0x0002, "Andamiro Pump It Up pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, + { 0x0e4c, 0x1097, "Radica Gamester Controller", 0, XTYPE_XBOX }, ++ { 0x0e4c, 0x1103, "Radica Gamester Reflex", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX }, + { 0x0e4c, 0x2390, "Radica Games Jtech Controller", 0, XTYPE_XBOX }, ++ { 0x0e4c, 0x3510, "Radica Gamester", 0, XTYPE_XBOX }, + { 0x0e6f, 0x0003, "Logic3 Freebird wireless Controller", 0, XTYPE_XBOX }, + { 0x0e6f, 0x0005, "Eclipse wireless Controller", 0, XTYPE_XBOX }, + { 0x0e6f, 0x0006, "Edge wireless Controller", 0, XTYPE_XBOX }, ++ { 0x0e6f, 0x0008, "After Glow Pro Controller", 0, XTYPE_XBOX }, + { 0x0e6f, 0x0105, "HSM3 Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0e6f, 0x0113, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, ++ { 0x0e6f, 0x011f, "Rock Candy Gamepad Wired Controller", 0, XTYPE_XBOX360 }, ++ { 0x0e6f, 0x0131, "PDP EA Sports Controller", 0, XTYPE_XBOX360 }, ++ { 0x0e6f, 0x0133, "Xbox 360 Wired Controller", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0139, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x013a, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x0147, "PDP Marvel Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x015c, "PDP Xbox One Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x0161, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x0162, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x0163, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x0164, "PDP Battlefield One", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x0165, "PDP Titanfall 2", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0201, "Pelican PL-3601 'TSZ' Wired Xbox 360 Controller", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, + { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, +- { 0x0e6f, 0x0146, "Rock Candy Wired Controller for Xbox One", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, ++ { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, ++ { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, ++ { 0x0e6f, 0x0413, "Afterglow AX.1 Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, ++ { 0x0e6f, 0x0501, "PDP Xbox 360 Controller", 0, XTYPE_XBOX360 }, ++ { 0x0e6f, 0xf900, "PDP Afterglow AX.1", 0, XTYPE_XBOX360 }, + { 0x0e8f, 0x0201, "SmartJoy Frag Xpad/PS2 adaptor", 0, XTYPE_XBOX }, + { 0x0e8f, 0x3008, "Generic xbox control (dealextreme)", 0, XTYPE_XBOX }, + { 0x0f0d, 0x000a, "Hori Co. DOA4 FightStick", 0, XTYPE_XBOX360 }, ++ { 0x0f0d, 0x000c, "Hori PadEX Turbo", 0, XTYPE_XBOX360 }, + { 0x0f0d, 0x000d, "Hori Fighting Stick EX2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x0f0d, 0x0016, "Hori Real Arcade Pro.EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x0f0d, 0x001b, "Hori Real Arcade Pro VX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x0f0d, 0x0063, "Hori Real Arcade Pro Hayabusa (USA) Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, + { 0x0f0d, 0x0067, "HORIPAD ONE", 0, XTYPE_XBOXONE }, ++ { 0x0f0d, 0x0078, "Hori Real Arcade Pro V Kai Xbox One", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, ++ { 0x0f30, 0x010b, "Philips Recoil", 0, XTYPE_XBOX }, + { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX }, + { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX }, + { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX }, ++ { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 }, + { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 }, ++ { 0x12ab, 0x0303, "Mortal Kombat Klassic FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x12ab, 0x8809, "Xbox DDR dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, + { 0x1430, 0x4748, "RedOctane Guitar Hero X-plorer", 0, XTYPE_XBOX360 }, + { 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX }, ++ { 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 }, + { 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 }, + { 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 }, ++ { 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE }, + { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE }, + { 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 }, + { 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, +@@ -208,27 +273,67 @@ static const struct xpad_device { + { 0x162e, 0xbeef, "Joytech Neo-Se Take2", 0, XTYPE_XBOX360 }, + { 0x1689, 0xfd00, "Razer Onza Tournament Edition", 0, XTYPE_XBOX360 }, + { 0x1689, 0xfd01, "Razer Onza Classic Edition", 0, XTYPE_XBOX360 }, +- { 0x24c6, 0x542a, "Xbox ONE spectra", 0, XTYPE_XBOXONE }, +- { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 }, ++ { 0x1689, 0xfe00, "Razer Sabertooth", 0, XTYPE_XBOX360 }, + { 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 }, + { 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1bad, 0x0130, "Ion Drum Rocker", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf018, "Mad Catz Street Fighter IV SE Fighting Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf019, "Mad Catz Brawlstick for Xbox 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf021, "Mad Cats Ghost Recon FS GamePad", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xf023, "MLG Pro Circuit Controller (Xbox)", 0, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf025, "Mad Catz Call Of Duty", 0, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf027, "Mad Catz FPS Pro", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xf028, "Street Fighter IV FightPad", 0, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf02e, "Mad Catz Fightpad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf030, "Mad Catz Xbox 360 MC2 MicroCon Racing Wheel", 0, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf036, "Mad Catz MicroCon GamePad Pro", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xf038, "Street Fighter IV FightStick TE", 0, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf039, "Mad Catz MvC2 TE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf03a, "Mad Catz SFxT Fightstick Pro", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf03d, "Street Fighter IV Arcade Stick TE - Chun Li", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf03e, "Mad Catz MLG FightStick TE", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf03f, "Mad Catz FightStick SoulCaliber", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf042, "Mad Catz FightStick TES+", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf080, "Mad Catz FightStick TE2", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf501, "HoriPad EX2 Turbo", 0, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf502, "Hori Real Arcade Pro.VX SA", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf503, "Hori Fighting Stick VX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf504, "Hori Real Arcade Pro. EX", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf505, "Hori Fighting Stick EX2B", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf506, "Hori Real Arcade Pro.EX Premium VLX", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xf900, "Harmonix Xbox 360 Controller", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xf901, "Gamestop Xbox 360 Controller", 0, XTYPE_XBOX360 }, + { 0x1bad, 0xf903, "Tron Xbox 360 controller", 0, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf904, "PDP Versus Fighting Pad", 0, XTYPE_XBOX360 }, ++ { 0x1bad, 0xf906, "MortalKombat FightStick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, ++ { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, ++ { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, ++ { 0x24c6, 0x530a, "Xbox 360 Pro EX Controller", 0, XTYPE_XBOX360 }, ++ { 0x24c6, 0x531a, "PowerA Pro Ex", 0, XTYPE_XBOX360 }, ++ { 0x24c6, 0x5397, "FUS1ON Tournament Controller", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x541a, "PowerA Xbox One Mini Wired Controller", 0, XTYPE_XBOXONE }, ++ { 0x24c6, 0x542a, "Xbox ONE spectra", 0, XTYPE_XBOXONE }, + { 0x24c6, 0x543a, "PowerA Xbox One wired controller", 0, XTYPE_XBOXONE }, + { 0x24c6, 0x5500, "Hori XBOX 360 EX 2 with Turbo", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x5501, "Hori Real Arcade Pro VX-SA", 0, XTYPE_XBOX360 }, ++ { 0x24c6, 0x5502, "Hori Fighting Stick VX Alt", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x24c6, 0x5503, "Hori Fighting Edge", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, + { 0x24c6, 0x5506, "Hori SOULCALIBUR V Stick", 0, XTYPE_XBOX360 }, ++ { 0x24c6, 0x550d, "Hori GEM Xbox controller", 0, XTYPE_XBOX360 }, ++ { 0x24c6, 0x550e, "Hori Real Arcade Pro V Kai 360", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, ++ { 0x24c6, 0x551a, "PowerA FUSION Pro Controller", 0, XTYPE_XBOXONE }, ++ { 0x24c6, 0x561a, "PowerA FUSION Controller", 0, XTYPE_XBOXONE }, ++ { 0x24c6, 0x5b00, "ThrustMaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x5b02, "Thrustmaster, Inc. GPX Controller", 0, XTYPE_XBOX360 }, + { 0x24c6, 0x5b03, "Thrustmaster Ferrari 458 Racing Wheel", 0, XTYPE_XBOX360 }, ++ { 0x24c6, 0x5d04, "Razer Sabertooth", 0, XTYPE_XBOX360 }, ++ { 0x24c6, 0xfafe, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, ++ { 0x3767, 0x0101, "Fanatec Speedster 3 Forceshock Wheel", 0, XTYPE_XBOX }, + { 0xffff, 0xffff, "Chinese-made Xbox Controller", 0, XTYPE_XBOX }, + { 0x0000, 0x0000, "Generic X-Box pad", 0, XTYPE_UNKNOWN } + }; +@@ -289,15 +394,15 @@ static const signed short xpad_abs_triggers[] = { + * match against vendor id as well. Wired Xbox 360 devices have protocol 1, + * wireless controllers have protocol 129. + */ +-#define XPAD_XBOX360_VENDOR_PROTOCOL(vend,pr) \ ++#define XPAD_XBOX360_VENDOR_PROTOCOL(vend, pr) \ + .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, \ + .idVendor = (vend), \ + .bInterfaceClass = USB_CLASS_VENDOR_SPEC, \ + .bInterfaceSubClass = 93, \ + .bInterfaceProtocol = (pr) + #define XPAD_XBOX360_VENDOR(vend) \ +- { XPAD_XBOX360_VENDOR_PROTOCOL(vend,1) }, \ +- { XPAD_XBOX360_VENDOR_PROTOCOL(vend,129) } ++ { XPAD_XBOX360_VENDOR_PROTOCOL((vend), 1) }, \ ++ { XPAD_XBOX360_VENDOR_PROTOCOL((vend), 129) } + + /* The Xbox One controller uses subclass 71 and protocol 208. */ + #define XPAD_XBOXONE_VENDOR_PROTOCOL(vend, pr) \ +@@ -307,37 +412,138 @@ static const signed short xpad_abs_triggers[] = { + .bInterfaceSubClass = 71, \ + .bInterfaceProtocol = (pr) + #define XPAD_XBOXONE_VENDOR(vend) \ +- { XPAD_XBOXONE_VENDOR_PROTOCOL(vend, 208) } ++ { XPAD_XBOXONE_VENDOR_PROTOCOL((vend), 208) } + +-static struct usb_device_id xpad_table[] = { ++static const struct usb_device_id xpad_table[] = { + { USB_INTERFACE_INFO('X', 'B', 0) }, /* X-Box USB-IF not approved class */ ++ XPAD_XBOX360_VENDOR(0x0079), /* GPD Win 2 Controller */ + XPAD_XBOX360_VENDOR(0x044f), /* Thrustmaster X-Box 360 controllers */ + XPAD_XBOX360_VENDOR(0x045e), /* Microsoft X-Box 360 controllers */ + XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft X-Box One controllers */ + XPAD_XBOX360_VENDOR(0x046d), /* Logitech X-Box 360 style controllers */ ++ XPAD_XBOX360_VENDOR(0x056e), /* Elecom JC-U3613M */ ++ XPAD_XBOX360_VENDOR(0x06a3), /* Saitek P3600 */ + XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz X-Box 360 controllers */ + { USB_DEVICE(0x0738, 0x4540) }, /* Mad Catz Beat Pad */ + XPAD_XBOXONE_VENDOR(0x0738), /* Mad Catz FightStick TE 2 */ ++ XPAD_XBOX360_VENDOR(0x07ff), /* Mad Catz GamePad */ + XPAD_XBOX360_VENDOR(0x0e6f), /* 0x0e6f X-Box 360 controllers */ + XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */ ++ XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ ++ XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ ++ XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */ + XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */ + XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */ + XPAD_XBOX360_VENDOR(0x146b), /* BigBen Interactive Controllers */ +- XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ +- XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */ +- XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */ +- XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ +- XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ +- XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ + XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */ + XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */ + XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */ + XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ ++ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ ++ XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ ++ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ ++ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ + { } + }; + + MODULE_DEVICE_TABLE(usb, xpad_table); + ++struct xboxone_init_packet { ++ u16 idVendor; ++ u16 idProduct; ++ const u8 *data; ++ u8 len; ++}; ++ ++#define XBOXONE_INIT_PKT(_vid, _pid, _data) \ ++ { \ ++ .idVendor = (_vid), \ ++ .idProduct = (_pid), \ ++ .data = (_data), \ ++ .len = ARRAY_SIZE(_data), \ ++ } ++ ++ ++/* ++ * This packet is required for all Xbox One pads with 2015 ++ * or later firmware installed (or present from the factory). ++ */ ++static const u8 xboxone_fw2015_init[] = { ++ 0x05, 0x20, 0x00, 0x01, 0x00 ++}; ++ ++/* ++ * This packet is required for the Titanfall 2 Xbox One pads ++ * (0x0e6f:0x0165) to finish initialization and for Hori pads ++ * (0x0f0d:0x0067) to make the analog sticks work. ++ */ ++static const u8 xboxone_hori_init[] = { ++ 0x01, 0x20, 0x00, 0x09, 0x00, 0x04, 0x20, 0x3a, ++ 0x00, 0x00, 0x00, 0x80, 0x00 ++}; ++ ++/* ++ * This packet is required for some of the PDP pads to start ++ * sending input reports. These pads include: (0x0e6f:0x02ab), ++ * (0x0e6f:0x02a4). ++ */ ++static const u8 xboxone_pdp_init1[] = { ++ 0x0a, 0x20, 0x00, 0x03, 0x00, 0x01, 0x14 ++}; ++ ++/* ++ * This packet is required for some of the PDP pads to start ++ * sending input reports. These pads include: (0x0e6f:0x02ab), ++ * (0x0e6f:0x02a4). ++ */ ++static const u8 xboxone_pdp_init2[] = { ++ 0x06, 0x20, 0x00, 0x02, 0x01, 0x00 ++}; ++ ++/* ++ * A specific rumble packet is required for some PowerA pads to start ++ * sending input reports. One of those pads is (0x24c6:0x543a). ++ */ ++static const u8 xboxone_rumblebegin_init[] = { ++ 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00, ++ 0x1D, 0x1D, 0xFF, 0x00, 0x00 ++}; ++ ++/* ++ * A rumble packet with zero FF intensity will immediately ++ * terminate the rumbling required to init PowerA pads. ++ * This should happen fast enough that the motors don't ++ * spin up to enough speed to actually vibrate the gamepad. ++ */ ++static const u8 xboxone_rumbleend_init[] = { ++ 0x09, 0x00, 0x00, 0x09, 0x00, 0x0F, 0x00, 0x00, ++ 0x00, 0x00, 0x00, 0x00, 0x00 ++}; ++ ++/* ++ * This specifies the selection of init packets that a gamepad ++ * will be sent on init *and* the order in which they will be ++ * sent. The correct sequence number will be added when the ++ * packet is going to be sent. ++ */ ++static const struct xboxone_init_packet xboxone_init_packets[] = { ++ XBOXONE_INIT_PKT(0x0e6f, 0x0165, xboxone_hori_init), ++ XBOXONE_INIT_PKT(0x0f0d, 0x0067, xboxone_hori_init), ++ XBOXONE_INIT_PKT(0x0000, 0x0000, xboxone_fw2015_init), ++ XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init1), ++ XBOXONE_INIT_PKT(0x0e6f, 0x02ab, xboxone_pdp_init2), ++ XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init1), ++ XBOXONE_INIT_PKT(0x0e6f, 0x02a4, xboxone_pdp_init2), ++ XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init1), ++ XBOXONE_INIT_PKT(0x0e6f, 0x02a6, xboxone_pdp_init2), ++ XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumblebegin_init), ++ XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumblebegin_init), ++ XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumblebegin_init), ++ XBOXONE_INIT_PKT(0x24c6, 0x541a, xboxone_rumbleend_init), ++ XBOXONE_INIT_PKT(0x24c6, 0x542a, xboxone_rumbleend_init), ++ XBOXONE_INIT_PKT(0x24c6, 0x543a, xboxone_rumbleend_init), ++}; ++ + struct xpad_output_packet { + u8 data[XPAD_PKT_LEN]; + u8 len; +@@ -374,6 +580,7 @@ struct usb_xpad { + + struct xpad_output_packet out_packets[XPAD_NUM_OUT_PACKETS]; + int last_out_packet; ++ int init_seq; + + #if defined(CONFIG_JOYSTICK_XPAD_LEDS) + struct xpad_led *led; +@@ -390,6 +597,7 @@ struct usb_xpad { + + static int xpad_init_input(struct usb_xpad *xpad); + static void xpad_deinit_input(struct usb_xpad *xpad); ++static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num); + + /* + * xpad_process_packet +@@ -609,14 +817,36 @@ static void xpad360w_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned cha + } + + /* +- * xpadone_process_buttons ++ * xpadone_process_packet ++ * ++ * Completes a request by converting the data into events for the ++ * input subsystem. This version is for the Xbox One controller. + * +- * Process a button update packet from an Xbox one controller. ++ * The report format was gleaned from ++ * https://github.com/kylelemons/xbox/blob/master/xbox.go + */ +-static void xpadone_process_buttons(struct usb_xpad *xpad, +- struct input_dev *dev, +- unsigned char *data) ++static void xpadone_process_packet(struct usb_xpad *xpad, u16 cmd, unsigned char *data) + { ++ struct input_dev *dev = xpad->dev; ++ ++ /* the xbox button has its own special report */ ++ if (data[0] == 0X07) { ++ /* ++ * The Xbox One S controller requires these reports to be ++ * acked otherwise it continues sending them forever and ++ * won't report further mode button events. ++ */ ++ if (data[1] == 0x30) ++ xpadone_ack_mode_report(xpad, data[2]); ++ ++ input_report_key(dev, BTN_MODE, data[4] & 0x01); ++ input_sync(dev); ++ return; ++ } ++ /* check invalid packet */ ++ else if (data[0] != 0X20) ++ return; ++ + /* menu/view buttons */ + input_report_key(dev, BTN_START, data[4] & 0x04); + input_report_key(dev, BTN_SELECT, data[4] & 0x08); +@@ -679,34 +909,6 @@ static void xpadone_process_buttons(struct usb_xpad *xpad, + input_sync(dev); + } + +-/* +- * xpadone_process_packet +- * +- * Completes a request by converting the data into events for the +- * input subsystem. This version is for the Xbox One controller. +- * +- * The report format was gleaned from +- * https://github.com/kylelemons/xbox/blob/master/xbox.go +- */ +- +-static void xpadone_process_packet(struct usb_xpad *xpad, +- u16 cmd, unsigned char *data) +-{ +- struct input_dev *dev = xpad->dev; +- +- switch (data[0]) { +- case 0x20: +- xpadone_process_buttons(xpad, dev, data); +- break; +- +- case 0x07: +- /* the xbox button has its own special report */ +- input_report_key(dev, BTN_MODE, data[4] & 0x01); +- input_sync(dev); +- break; +- } +-} +- + static void xpad_irq_in(struct urb *urb) + { + struct usb_xpad *xpad = urb->context; +@@ -753,12 +955,48 @@ exit: + __func__, retval); + } + ++/* Callers must hold xpad->odata_lock spinlock */ ++static bool xpad_prepare_next_init_packet(struct usb_xpad *xpad) ++{ ++ const struct xboxone_init_packet *init_packet; ++ ++ if (xpad->xtype != XTYPE_XBOXONE) ++ return false; ++ ++ /* Perform initialization sequence for Xbox One pads that require it */ ++ while (xpad->init_seq < ARRAY_SIZE(xboxone_init_packets)) { ++ init_packet = &xboxone_init_packets[xpad->init_seq++]; ++ ++ if (init_packet->idVendor != 0 && ++ init_packet->idVendor != xpad->dev->id.vendor) ++ continue; ++ ++ if (init_packet->idProduct != 0 && ++ init_packet->idProduct != xpad->dev->id.product) ++ continue; ++ ++ /* This packet applies to our device, so prepare to send it */ ++ memcpy(xpad->odata, init_packet->data, init_packet->len); ++ xpad->irq_out->transfer_buffer_length = init_packet->len; ++ ++ /* Update packet with current sequence number */ ++ xpad->odata[2] = xpad->odata_serial++; ++ return true; ++ } ++ ++ return false; ++} ++ + /* Callers must hold xpad->odata_lock spinlock */ + static bool xpad_prepare_next_out_packet(struct usb_xpad *xpad) + { + struct xpad_output_packet *pkt, *packet = NULL; + int i; + ++ /* We may have init packets to send before we can send user commands */ ++ if (xpad_prepare_next_init_packet(xpad)) ++ return true; ++ + for (i = 0; i < XPAD_NUM_OUT_PACKETS; i++) { + if (++xpad->last_out_packet >= XPAD_NUM_OUT_PACKETS) + xpad->last_out_packet = 0; +@@ -851,10 +1089,9 @@ static void xpad_irq_out(struct urb *urb) + spin_unlock_irqrestore(&xpad->odata_lock, flags); + } + +-static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad) ++static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad, ++ struct usb_endpoint_descriptor *ep_irq_out) + { +- struct usb_endpoint_descriptor *ep_irq_out; +- int ep_irq_out_idx; + int error; + + if (xpad->xtype == XTYPE_UNKNOWN) +@@ -864,23 +1101,17 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad) + + xpad->odata = usb_alloc_coherent(xpad->udev, XPAD_PKT_LEN, + GFP_KERNEL, &xpad->odata_dma); +- if (!xpad->odata) { +- error = -ENOMEM; +- goto fail1; +- } ++ if (!xpad->odata) ++ return -ENOMEM; + + spin_lock_init(&xpad->odata_lock); + + xpad->irq_out = usb_alloc_urb(0, GFP_KERNEL); + if (!xpad->irq_out) { + error = -ENOMEM; +- goto fail2; ++ goto err_free_coherent; + } + +- /* Xbox One controller has in/out endpoints swapped. */ +- ep_irq_out_idx = xpad->xtype == XTYPE_XBOXONE ? 0 : 1; +- ep_irq_out = &intf->cur_altsetting->endpoint[ep_irq_out_idx].desc; +- + usb_fill_int_urb(xpad->irq_out, xpad->udev, + usb_sndintpipe(xpad->udev, ep_irq_out->bEndpointAddress), + xpad->odata, XPAD_PKT_LEN, +@@ -890,8 +1121,9 @@ static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad) + + return 0; + +- fail2: usb_free_coherent(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma); +- fail1: return error; ++err_free_coherent: ++ usb_free_coherent(xpad->udev, XPAD_PKT_LEN, xpad->odata, xpad->odata_dma); ++ return error; + } + + static void xpad_stop_output(struct usb_xpad *xpad) +@@ -950,24 +1182,17 @@ static int xpad_inquiry_pad_presence(struct usb_xpad *xpad) + + static int xpad_start_xbox_one(struct usb_xpad *xpad) + { +- struct xpad_output_packet *packet = +- &xpad->out_packets[XPAD_OUT_CMD_IDX]; + unsigned long flags; + int retval; + + spin_lock_irqsave(&xpad->odata_lock, flags); + +- /* Xbox one controller needs to be initialized. */ +- packet->data[0] = 0x05; +- packet->data[1] = 0x20; +- packet->data[2] = xpad->odata_serial++; /* packet serial */ +- packet->data[3] = 0x01; /* rumble bit enable? */ +- packet->data[4] = 0x00; +- packet->len = 5; +- packet->pending = true; +- +- /* Reset the sequence so we send out start packet first */ +- xpad->last_out_packet = -1; ++ /* ++ * Begin the init sequence by attempting to send a packet. ++ * We will cycle through the init packet sequence before ++ * sending any packets from the output ring. ++ */ ++ xpad->init_seq = 0; + retval = xpad_try_sending_next_out_packet(xpad); + + spin_unlock_irqrestore(&xpad->odata_lock, flags); +@@ -975,6 +1200,30 @@ static int xpad_start_xbox_one(struct usb_xpad *xpad) + return retval; + } + ++static void xpadone_ack_mode_report(struct usb_xpad *xpad, u8 seq_num) ++{ ++ unsigned long flags; ++ struct xpad_output_packet *packet = ++ &xpad->out_packets[XPAD_OUT_CMD_IDX]; ++ static const u8 mode_report_ack[] = { ++ 0x01, 0x20, 0x00, 0x09, 0x00, 0x07, 0x20, 0x02, ++ 0x00, 0x00, 0x00, 0x00, 0x00 ++ }; ++ ++ spin_lock_irqsave(&xpad->odata_lock, flags); ++ ++ packet->len = sizeof(mode_report_ack); ++ memcpy(packet->data, mode_report_ack, packet->len); ++ packet->data[2] = seq_num; ++ packet->pending = true; ++ ++ /* Reset the sequence so we send out the ack now */ ++ xpad->last_out_packet = -1; ++ xpad_try_sending_next_out_packet(xpad); ++ ++ spin_unlock_irqrestore(&xpad->odata_lock, flags); ++} ++ + #ifdef CONFIG_JOYSTICK_XPAD_FF + static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect *effect) + { +@@ -1046,9 +1295,9 @@ static int xpad_play_effect(struct input_dev *dev, void *data, struct ff_effect + packet->data[7] = 0x00; + packet->data[8] = strong / 512; /* left actuator */ + packet->data[9] = weak / 512; /* right actuator */ +- packet->data[10] = 0xFF; +- packet->data[11] = 0x00; +- packet->data[12] = 0x00; ++ packet->data[10] = 0xFF; /* on period */ ++ packet->data[11] = 0x00; /* off period */ ++ packet->data[12] = 0xFF; /* repeat count */ + packet->len = 13; + packet->pending = true; + break; +@@ -1199,6 +1448,7 @@ static int xpad_led_probe(struct usb_xpad *xpad) + led_cdev = &led->led_cdev; + led_cdev->name = led->name; + led_cdev->brightness_set = xpad_led_set; ++ led_cdev->flags = LED_CORE_SUSPENDRESUME; + + error = led_classdev_register(&xpad->udev->dev, led_cdev); + if (error) +@@ -1333,7 +1583,6 @@ static void xpad_close(struct input_dev *dev) + static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs) + { + struct usb_xpad *xpad = input_get_drvdata(input_dev); +- set_bit(abs, input_dev->absbit); + + switch (abs) { + case ABS_X: +@@ -1353,6 +1602,9 @@ static void xpad_set_up_abs(struct input_dev *input_dev, signed short abs) + case ABS_HAT0Y: /* the d-pad (only if dpad is mapped to axes */ + input_set_abs_params(input_dev, abs, -1, 1, 0, 0); + break; ++ default: ++ input_set_abs_params(input_dev, abs, 0, 0, 0, 0); ++ break; + } + } + +@@ -1393,10 +1645,7 @@ static int xpad_init_input(struct usb_xpad *xpad) + input_dev->close = xpad_close; + } + +- __set_bit(EV_KEY, input_dev->evbit); +- + if (!(xpad->mapping & MAP_STICKS_TO_NULL)) { +- __set_bit(EV_ABS, input_dev->evbit); + /* set up axes */ + for (i = 0; xpad_abs[i] >= 0; i++) + xpad_set_up_abs(input_dev, xpad_abs[i]); +@@ -1404,21 +1653,22 @@ static int xpad_init_input(struct usb_xpad *xpad) + + /* set up standard buttons */ + for (i = 0; xpad_common_btn[i] >= 0; i++) +- __set_bit(xpad_common_btn[i], input_dev->keybit); ++ input_set_capability(input_dev, EV_KEY, xpad_common_btn[i]); + + /* set up model-specific ones */ + if (xpad->xtype == XTYPE_XBOX360 || xpad->xtype == XTYPE_XBOX360W || + xpad->xtype == XTYPE_XBOXONE) { + for (i = 0; xpad360_btn[i] >= 0; i++) +- __set_bit(xpad360_btn[i], input_dev->keybit); ++ input_set_capability(input_dev, EV_KEY, xpad360_btn[i]); + } else { + for (i = 0; xpad_btn[i] >= 0; i++) +- __set_bit(xpad_btn[i], input_dev->keybit); ++ input_set_capability(input_dev, EV_KEY, xpad_btn[i]); + } + + if (xpad->mapping & MAP_DPAD_TO_BUTTONS) { + for (i = 0; xpad_btn_pad[i] >= 0; i++) +- __set_bit(xpad_btn_pad[i], input_dev->keybit); ++ input_set_capability(input_dev, EV_KEY, ++ xpad_btn_pad[i]); + } + + /* +@@ -1435,7 +1685,8 @@ static int xpad_init_input(struct usb_xpad *xpad) + + if (xpad->mapping & MAP_TRIGGERS_TO_BUTTONS) { + for (i = 0; xpad_btn_triggers[i] >= 0; i++) +- __set_bit(xpad_btn_triggers[i], input_dev->keybit); ++ input_set_capability(input_dev, EV_KEY, ++ xpad_btn_triggers[i]); + } else { + for (i = 0; xpad_abs_triggers[i] >= 0; i++) + xpad_set_up_abs(input_dev, xpad_abs_triggers[i]); +@@ -1469,8 +1720,7 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id + { + struct usb_device *udev = interface_to_usbdev(intf); + struct usb_xpad *xpad; +- struct usb_endpoint_descriptor *ep_irq_in; +- int ep_irq_in_idx; ++ struct usb_endpoint_descriptor *ep_irq_in, *ep_irq_out; + int i, error; + + if (intf->cur_altsetting->desc.bNumEndpoints != 2) +@@ -1540,13 +1790,28 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id + goto err_free_in_urb; + } + +- error = xpad_init_output(intf, xpad); +- if (error) ++ ep_irq_in = ep_irq_out = NULL; ++ ++ for (i = 0; i < 2; i++) { ++ struct usb_endpoint_descriptor *ep = ++ &intf->cur_altsetting->endpoint[i].desc; ++ ++ if (usb_endpoint_xfer_int(ep)) { ++ if (usb_endpoint_dir_in(ep)) ++ ep_irq_in = ep; ++ else ++ ep_irq_out = ep; ++ } ++ } ++ ++ if (!ep_irq_in || !ep_irq_out) { ++ error = -ENODEV; + goto err_free_in_urb; ++ } + +- /* Xbox One controller has in/out endpoints swapped. */ +- ep_irq_in_idx = xpad->xtype == XTYPE_XBOXONE ? 1 : 0; +- ep_irq_in = &intf->cur_altsetting->endpoint[ep_irq_in_idx].desc; ++ error = xpad_init_output(intf, xpad, ep_irq_out); ++ if (error) ++ goto err_free_in_urb; + + usb_fill_int_urb(xpad->irq_in, udev, + usb_rcvintpipe(udev, ep_irq_in->bEndpointAddress), +@@ -1663,8 +1928,16 @@ static int xpad_resume(struct usb_interface *intf) + retval = xpad360w_start_input(xpad); + } else { + mutex_lock(&input->mutex); +- if (input->users) ++ if (input->users) { + retval = xpad_start_input(xpad); ++ } else if (xpad->xtype == XTYPE_XBOXONE) { ++ /* ++ * Even if there are no users, we'll send Xbox One pads ++ * the startup sequence so they don't sit there and ++ * blink until somebody opens the input device again. ++ */ ++ retval = xpad_start_xbox_one(xpad); ++ } + mutex_unlock(&input->mutex); + } + +diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c +index 8d6208c0b400..ff3d9fc0f1b3 100644 +--- a/drivers/net/can/dev.c ++++ b/drivers/net/can/dev.c +@@ -453,6 +453,34 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, + } + EXPORT_SYMBOL_GPL(can_put_echo_skb); + ++struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr) ++{ ++ struct can_priv *priv = netdev_priv(dev); ++ struct sk_buff *skb = priv->echo_skb[idx]; ++ struct canfd_frame *cf; ++ ++ if (idx >= priv->echo_skb_max) { ++ netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n", ++ __func__, idx, priv->echo_skb_max); ++ return NULL; ++ } ++ ++ if (!skb) { ++ netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n", ++ __func__, idx); ++ return NULL; ++ } ++ ++ /* Using "struct canfd_frame::len" for the frame ++ * length is supported on both CAN and CANFD frames. ++ */ ++ cf = (struct canfd_frame *)skb->data; ++ *len_ptr = cf->len; ++ priv->echo_skb[idx] = NULL; ++ ++ return skb; ++} ++ + /* + * Get the skb from the stack and loop it back locally + * +@@ -462,22 +490,16 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb); + */ + unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx) + { +- struct can_priv *priv = netdev_priv(dev); +- +- BUG_ON(idx >= priv->echo_skb_max); +- +- if (priv->echo_skb[idx]) { +- struct sk_buff *skb = priv->echo_skb[idx]; +- struct can_frame *cf = (struct can_frame *)skb->data; +- u8 dlc = cf->can_dlc; ++ struct sk_buff *skb; ++ u8 len; + +- netif_rx(priv->echo_skb[idx]); +- priv->echo_skb[idx] = NULL; ++ skb = __can_get_echo_skb(dev, idx, &len); ++ if (!skb) ++ return 0; + +- return dlc; +- } ++ netif_rx(skb); + +- return 0; ++ return len; + } + EXPORT_SYMBOL_GPL(can_get_echo_skb); + +diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c +index 3b9e1a5dce82..9bd90a7c4d40 100644 +--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c ++++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c +@@ -483,7 +483,7 @@ static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv) + if (!compat) + return -ENOMEM; + +- priv->mdio_dn = of_find_compatible_node(dn, NULL, compat); ++ priv->mdio_dn = of_get_compatible_child(dn, compat); + kfree(compat); + if (!priv->mdio_dn) { + dev_err(kdev, "unable to find MDIO bus node\n"); +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c +index 0cbcd3f77341..6b4e38105b72 100644 +--- a/drivers/net/usb/lan78xx.c ++++ b/drivers/net/usb/lan78xx.c +@@ -31,6 +31,7 @@ + #include <linux/mdio.h> + #include <net/ip6_checksum.h> + #include <linux/microchipphy.h> ++#include <linux/of_net.h> + #include "lan78xx.h" + + #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>" +@@ -1644,34 +1645,31 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev) + addr[5] = (addr_hi >> 8) & 0xFF; + + if (!is_valid_ether_addr(addr)) { +- /* reading mac address from EEPROM or OTP */ +- if ((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, +- addr) == 0) || +- (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ETH_ALEN, +- addr) == 0)) { +- if (is_valid_ether_addr(addr)) { +- /* eeprom values are valid so use them */ +- netif_dbg(dev, ifup, dev->net, +- "MAC address read from EEPROM"); +- } else { +- /* generate random MAC */ +- random_ether_addr(addr); +- netif_dbg(dev, ifup, dev->net, +- "MAC address set to random addr"); +- } +- +- addr_lo = addr[0] | (addr[1] << 8) | +- (addr[2] << 16) | (addr[3] << 24); +- addr_hi = addr[4] | (addr[5] << 8); +- +- ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); +- ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); ++ if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) { ++ /* valid address present in Device Tree */ ++ netif_dbg(dev, ifup, dev->net, ++ "MAC address read from Device Tree"); ++ } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ++ ETH_ALEN, addr) == 0) || ++ (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET, ++ ETH_ALEN, addr) == 0)) && ++ is_valid_ether_addr(addr)) { ++ /* eeprom values are valid so use them */ ++ netif_dbg(dev, ifup, dev->net, ++ "MAC address read from EEPROM"); + } else { + /* generate random MAC */ + random_ether_addr(addr); + netif_dbg(dev, ifup, dev->net, + "MAC address set to random addr"); + } ++ ++ addr_lo = addr[0] | (addr[1] << 8) | ++ (addr[2] << 16) | (addr[3] << 24); ++ addr_hi = addr[4] | (addr[5] << 8); ++ ++ ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo); ++ ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi); + } + + ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo); +diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c +index 5fe6841b8889..fb632a454fc2 100644 +--- a/drivers/net/wireless/ath/ath10k/mac.c ++++ b/drivers/net/wireless/ath/ath10k/mac.c +@@ -4967,7 +4967,9 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, + } + + ar->free_vdev_map &= ~(1LL << arvif->vdev_id); ++ spin_lock_bh(&ar->data_lock); + list_add(&arvif->list, &ar->arvifs); ++ spin_unlock_bh(&ar->data_lock); + + /* It makes no sense to have firmware do keepalives. mac80211 already + * takes care of this with idle connection polling. +@@ -5118,7 +5120,9 @@ err_peer_delete: + err_vdev_delete: + ath10k_wmi_vdev_delete(ar, arvif->vdev_id); + ar->free_vdev_map |= 1LL << arvif->vdev_id; ++ spin_lock_bh(&ar->data_lock); + list_del(&arvif->list); ++ spin_unlock_bh(&ar->data_lock); + + err: + if (arvif->beacon_buf) { +@@ -5164,7 +5168,9 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw, + arvif->vdev_id, ret); + + ar->free_vdev_map |= 1LL << arvif->vdev_id; ++ spin_lock_bh(&ar->data_lock); + list_del(&arvif->list); ++ spin_unlock_bh(&ar->data_lock); + + if (arvif->vdev_type == WMI_VDEV_TYPE_AP || + arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { +diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +index c221597e2519..530f52120972 100644 +--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c ++++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +@@ -5990,7 +5990,8 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg, + * for subsequent chanspecs. + */ + channel->flags = IEEE80211_CHAN_NO_HT40 | +- IEEE80211_CHAN_NO_80MHZ; ++ IEEE80211_CHAN_NO_80MHZ | ++ IEEE80211_CHAN_NO_160MHZ; + ch.bw = BRCMU_CHAN_BW_20; + cfg->d11inf.encchspec(&ch); + chaninfo = ch.chspec; +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +index 0bffade1ea5b..92557cd31a39 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +@@ -327,8 +327,12 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, + goto out; + } + +- if (changed) +- *changed = (resp->status == MCC_RESP_NEW_CHAN_PROFILE); ++ if (changed) { ++ u32 status = le32_to_cpu(resp->status); ++ ++ *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || ++ status == MCC_RESP_ILLEGAL); ++ } + + regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, + __le32_to_cpu(resp->n_channels), +@@ -3976,10 +3980,6 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, + sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG); + } + +- if (!fw_has_capa(&mvm->fw->ucode_capa, +- IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) +- return; +- + /* if beacon filtering isn't on mac80211 does it anyway */ + if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) + return; +diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +index eade099b6dbf..e51aca87b4b0 100644 +--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c ++++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +@@ -739,9 +739,8 @@ iwl_mvm_update_mcc(struct iwl_mvm *mvm, const char *alpha2, + } + + IWL_DEBUG_LAR(mvm, +- "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') change: %d n_chans: %d\n", +- status, mcc, mcc >> 8, mcc & 0xff, +- !!(status == MCC_RESP_NEW_CHAN_PROFILE), n_channels); ++ "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n", ++ status, mcc, mcc >> 8, mcc & 0xff, n_channels); + + exit: + iwl_free_resp(&cmd); +diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +index 48d51be11f9b..4da3541471e6 100644 +--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c ++++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c +@@ -1209,6 +1209,12 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, + priv->adapter->curr_iface_comb.p2p_intf--; + priv->adapter->curr_iface_comb.sta_intf++; + dev->ieee80211_ptr->iftype = type; ++ if (mwifiex_deinit_priv_params(priv)) ++ return -1; ++ if (mwifiex_init_new_priv_params(priv, dev, type)) ++ return -1; ++ if (mwifiex_sta_init_cmd(priv, false, false)) ++ return -1; + break; + case NL80211_IFTYPE_ADHOC: + if (mwifiex_cfg80211_deinit_p2p(priv)) +@@ -3079,8 +3085,10 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) + + mwifiex_stop_net_dev_queue(priv->netdev, adapter); + +- skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) ++ skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) { ++ skb_unlink(skb, &priv->bypass_txq); + mwifiex_write_data_complete(priv->adapter, skb, 0, -1); ++ } + + if (netif_carrier_ok(priv->netdev)) + netif_carrier_off(priv->netdev); +diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c +index 1fdb86cd4734..cb681b265b10 100644 +--- a/drivers/net/wireless/marvell/mwifiex/pcie.c ++++ b/drivers/net/wireless/marvell/mwifiex/pcie.c +@@ -101,7 +101,6 @@ static int mwifiex_pcie_suspend(struct device *dev) + { + struct mwifiex_adapter *adapter; + struct pcie_service_card *card; +- int hs_actived; + struct pci_dev *pdev = to_pci_dev(dev); + + if (pdev) { +@@ -117,7 +116,15 @@ static int mwifiex_pcie_suspend(struct device *dev) + + adapter = card->adapter; + +- hs_actived = mwifiex_enable_hs(adapter); ++ /* Enable the Host Sleep */ ++ if (!mwifiex_enable_hs(adapter)) { ++ mwifiex_dbg(adapter, ERROR, ++ "cmd: failed to suspend\n"); ++ adapter->hs_enabling = false; ++ return -EFAULT; ++ } ++ ++ flush_workqueue(adapter->workqueue); + + /* Indicate device suspended */ + adapter->is_suspended = true; +@@ -1676,9 +1683,6 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) + + if (!adapter->curr_cmd) { + if (adapter->ps_state == PS_STATE_SLEEP_CFM) { +- mwifiex_process_sleep_confirm_resp(adapter, skb->data, +- skb->len); +- mwifiex_pcie_enable_host_int(adapter); + if (mwifiex_write_reg(adapter, + PCIE_CPU_INT_EVENT, + CPU_INTR_SLEEP_CFM_DONE)) { +@@ -1691,6 +1695,9 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter) + while (reg->sleep_cookie && (count++ < 10) && + mwifiex_pcie_ok_to_access_hw(adapter)) + usleep_range(50, 60); ++ mwifiex_pcie_enable_host_int(adapter); ++ mwifiex_process_sleep_confirm_resp(adapter, skb->data, ++ skb->len); + } else { + mwifiex_dbg(adapter, ERROR, + "There is no command but got cmdrsp\n"); +@@ -2329,6 +2336,8 @@ static int mwifiex_process_pcie_int(struct mwifiex_adapter *adapter) + ret = mwifiex_pcie_process_cmd_complete(adapter); + if (ret) + return ret; ++ if (adapter->hs_activated) ++ return ret; + } + + if (card->msi_enable) { +diff --git a/drivers/net/wireless/marvell/mwifiex/wmm.c b/drivers/net/wireless/marvell/mwifiex/wmm.c +index 0eb246502e1d..dea2fe671dfe 100644 +--- a/drivers/net/wireless/marvell/mwifiex/wmm.c ++++ b/drivers/net/wireless/marvell/mwifiex/wmm.c +@@ -503,8 +503,10 @@ mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv, + struct mwifiex_adapter *adapter = priv->adapter; + struct sk_buff *skb, *tmp; + +- skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) ++ skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) { ++ skb_unlink(skb, &ra_list->skb_head); + mwifiex_write_data_complete(adapter, skb, 0, -1); ++ } + } + + /* +@@ -600,11 +602,15 @@ mwifiex_clean_txrx(struct mwifiex_private *priv) + priv->adapter->if_ops.clean_pcie_ring(priv->adapter); + spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags); + +- skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) ++ skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) { ++ skb_unlink(skb, &priv->tdls_txq); + mwifiex_write_data_complete(priv->adapter, skb, 0, -1); ++ } + +- skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) ++ skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) { ++ skb_unlink(skb, &priv->bypass_txq); + mwifiex_write_data_complete(priv->adapter, skb, 0, -1); ++ } + atomic_set(&priv->adapter->bypass_tx_pending, 0); + + idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL); +diff --git a/drivers/net/wireless/st/cw1200/wsm.c b/drivers/net/wireless/st/cw1200/wsm.c +index ed93bf3474ec..be4c22e0d902 100644 +--- a/drivers/net/wireless/st/cw1200/wsm.c ++++ b/drivers/net/wireless/st/cw1200/wsm.c +@@ -1805,16 +1805,18 @@ static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size) + { + size_t pos = buf->data - buf->begin; + size_t size = pos + extra_size; ++ u8 *tmp; + + size = round_up(size, FWLOAD_BLOCK_SIZE); + +- buf->begin = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA); +- if (buf->begin) { +- buf->data = &buf->begin[pos]; +- buf->end = &buf->begin[size]; +- return 0; +- } else { +- buf->end = buf->data = buf->begin; ++ tmp = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA); ++ if (!tmp) { ++ wsm_buf_deinit(buf); + return -ENOMEM; + } ++ ++ buf->begin = tmp; ++ buf->data = &buf->begin[pos]; ++ buf->end = &buf->begin[size]; ++ return 0; + } +diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c +index 6c0c301611c4..1b11ded79c4f 100644 +--- a/drivers/nfc/nfcmrvl/uart.c ++++ b/drivers/nfc/nfcmrvl/uart.c +@@ -73,10 +73,9 @@ static int nfcmrvl_uart_parse_dt(struct device_node *node, + struct device_node *matched_node; + int ret; + +- matched_node = of_find_compatible_node(node, NULL, "marvell,nfc-uart"); ++ matched_node = of_get_compatible_child(node, "marvell,nfc-uart"); + if (!matched_node) { +- matched_node = of_find_compatible_node(node, NULL, +- "mrvl,nfc-uart"); ++ matched_node = of_get_compatible_child(node, "mrvl,nfc-uart"); + if (!matched_node) + return -ENODEV; + } +diff --git a/drivers/of/base.c b/drivers/of/base.c +index 466b285cef3e..f366af135d5b 100644 +--- a/drivers/of/base.c ++++ b/drivers/of/base.c +@@ -738,6 +738,31 @@ struct device_node *of_get_next_available_child(const struct device_node *node, + } + EXPORT_SYMBOL(of_get_next_available_child); + ++/** ++ * of_get_compatible_child - Find compatible child node ++ * @parent: parent node ++ * @compatible: compatible string ++ * ++ * Lookup child node whose compatible property contains the given compatible ++ * string. ++ * ++ * Returns a node pointer with refcount incremented, use of_node_put() on it ++ * when done; or NULL if not found. ++ */ ++struct device_node *of_get_compatible_child(const struct device_node *parent, ++ const char *compatible) ++{ ++ struct device_node *child; ++ ++ for_each_child_of_node(parent, child) { ++ if (of_device_is_compatible(child, compatible)) ++ break; ++ } ++ ++ return child; ++} ++EXPORT_SYMBOL(of_get_compatible_child); ++ + /** + * of_get_child_by_name - Find the child node by name for a given parent + * @node: parent node +diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c +index 9443c9d408c6..df61a71420b1 100644 +--- a/drivers/pinctrl/meson/pinctrl-meson.c ++++ b/drivers/pinctrl/meson/pinctrl-meson.c +@@ -275,7 +275,7 @@ static int meson_pinconf_set(struct pinctrl_dev *pcdev, unsigned int pin, + dev_dbg(pc->dev, "pin %u: disable bias\n", pin); + + meson_calc_reg_and_bit(bank, pin, REG_PULL, ®, &bit); +- ret = regmap_update_bits(pc->reg_pull, reg, ++ ret = regmap_update_bits(pc->reg_pullen, reg, + BIT(bit), 0); + if (ret) + return ret; +diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c +index 2bfdf638b673..8a3667e761dd 100644 +--- a/drivers/rtc/rtc-pcf2127.c ++++ b/drivers/rtc/rtc-pcf2127.c +@@ -237,6 +237,9 @@ static int pcf2127_i2c_gather_write(void *context, + memcpy(buf + 1, val, val_size); + + ret = i2c_master_send(client, buf, val_size + 1); ++ ++ kfree(buf); ++ + if (ret != val_size + 1) + return ret < 0 ? ret : -EIO; + +diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h +index 5bb2316f60bf..54deeb754db5 100644 +--- a/drivers/scsi/ufs/ufs.h ++++ b/drivers/scsi/ufs/ufs.h +@@ -46,6 +46,7 @@ + #define QUERY_DESC_HDR_SIZE 2 + #define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \ + (sizeof(struct utp_upiu_header))) ++#define RESPONSE_UPIU_SENSE_DATA_LENGTH 18 + + #define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\ + cpu_to_be32((byte3 << 24) | (byte2 << 16) |\ +@@ -410,7 +411,7 @@ struct utp_cmd_rsp { + __be32 residual_transfer_count; + __be32 reserved[4]; + __be16 sense_data_len; +- u8 sense_data[18]; ++ u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH]; + }; + + /** +diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c +index d15eaa466c59..52b546fb509b 100644 +--- a/drivers/scsi/ufs/ufshcd-pci.c ++++ b/drivers/scsi/ufs/ufshcd-pci.c +@@ -104,6 +104,7 @@ static void ufshcd_pci_remove(struct pci_dev *pdev) + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + ufshcd_remove(hba); ++ ufshcd_dealloc_host(hba); + } + + /** +@@ -147,6 +148,7 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) + err = ufshcd_init(hba, mmio_base, pdev->irq); + if (err) { + dev_err(&pdev->dev, "Initialization failed\n"); ++ ufshcd_dealloc_host(hba); + return err; + } + +diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c +index db53f38da864..a72a4ba78125 100644 +--- a/drivers/scsi/ufs/ufshcd-pltfrm.c ++++ b/drivers/scsi/ufs/ufshcd-pltfrm.c +@@ -163,7 +163,7 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name, + if (ret) { + dev_err(dev, "%s: unable to find %s err %d\n", + __func__, prop_name, ret); +- goto out_free; ++ goto out; + } + + vreg->min_uA = 0; +@@ -185,9 +185,6 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name, + + goto out; + +-out_free: +- devm_kfree(dev, vreg); +- vreg = NULL; + out: + if (!ret) + *out_vreg = vreg; +diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c +index f857086ce2fa..5cfd56f08ffb 100644 +--- a/drivers/scsi/ufs/ufshcd.c ++++ b/drivers/scsi/ufs/ufshcd.c +@@ -672,6 +672,21 @@ int ufshcd_hold(struct ufs_hba *hba, bool async) + start: + switch (hba->clk_gating.state) { + case CLKS_ON: ++ /* ++ * Wait for the ungate work to complete if in progress. ++ * Though the clocks may be in ON state, the link could ++ * still be in hibner8 state if hibern8 is allowed ++ * during clock gating. ++ * Make sure we exit hibern8 state also in addition to ++ * clocks being ON. ++ */ ++ if (ufshcd_can_hibern8_during_gating(hba) && ++ ufshcd_is_link_hibern8(hba)) { ++ spin_unlock_irqrestore(hba->host->host_lock, flags); ++ flush_work(&hba->clk_gating.ungate_work); ++ spin_lock_irqsave(hba->host->host_lock, flags); ++ goto start; ++ } + break; + case REQ_CLKS_OFF: + if (cancel_delayed_work(&hba->clk_gating.gate_work)) { +@@ -901,10 +916,14 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) + int len; + if (lrbp->sense_buffer && + ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) { ++ int len_to_copy; ++ + len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); ++ len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len); ++ + memcpy(lrbp->sense_buffer, + lrbp->ucd_rsp_ptr->sr.sense_data, +- min_t(int, len, SCSI_SENSE_BUFFERSIZE)); ++ min_t(int, len_to_copy, SCSI_SENSE_BUFFERSIZE)); + } + } + +@@ -6373,7 +6392,10 @@ EXPORT_SYMBOL(ufshcd_system_suspend); + + int ufshcd_system_resume(struct ufs_hba *hba) + { +- if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev)) ++ if (!hba) ++ return -EINVAL; ++ ++ if (!hba->is_powered || pm_runtime_suspended(hba->dev)) + /* + * Let the runtime resume take care of resuming + * if runtime suspended. +@@ -6394,7 +6416,10 @@ EXPORT_SYMBOL(ufshcd_system_resume); + */ + int ufshcd_runtime_suspend(struct ufs_hba *hba) + { +- if (!hba || !hba->is_powered) ++ if (!hba) ++ return -EINVAL; ++ ++ if (!hba->is_powered) + return 0; + + return ufshcd_suspend(hba, UFS_RUNTIME_PM); +@@ -6424,10 +6449,13 @@ EXPORT_SYMBOL(ufshcd_runtime_suspend); + */ + int ufshcd_runtime_resume(struct ufs_hba *hba) + { +- if (!hba || !hba->is_powered) ++ if (!hba) ++ return -EINVAL; ++ ++ if (!hba->is_powered) + return 0; +- else +- return ufshcd_resume(hba, UFS_RUNTIME_PM); ++ ++ return ufshcd_resume(hba, UFS_RUNTIME_PM); + } + EXPORT_SYMBOL(ufshcd_runtime_resume); + +@@ -6479,8 +6507,6 @@ void ufshcd_remove(struct ufs_hba *hba) + ufshcd_disable_intr(hba, hba->intr_mask); + ufshcd_hba_stop(hba, true); + +- scsi_host_put(hba->host); +- + ufshcd_exit_clk_gating(hba); + if (ufshcd_is_clkscaling_enabled(hba)) + devfreq_remove_device(hba->devfreq); +@@ -6605,15 +6631,47 @@ static int ufshcd_devfreq_target(struct device *dev, + { + int err = 0; + struct ufs_hba *hba = dev_get_drvdata(dev); ++ bool release_clk_hold = false; ++ unsigned long irq_flags; + + if (!ufshcd_is_clkscaling_enabled(hba)) + return -EINVAL; + ++ spin_lock_irqsave(hba->host->host_lock, irq_flags); ++ if (ufshcd_eh_in_progress(hba)) { ++ spin_unlock_irqrestore(hba->host->host_lock, irq_flags); ++ return 0; ++ } ++ ++ if (ufshcd_is_clkgating_allowed(hba) && ++ (hba->clk_gating.state != CLKS_ON)) { ++ if (cancel_delayed_work(&hba->clk_gating.gate_work)) { ++ /* hold the vote until the scaling work is completed */ ++ hba->clk_gating.active_reqs++; ++ release_clk_hold = true; ++ hba->clk_gating.state = CLKS_ON; ++ } else { ++ /* ++ * Clock gating work seems to be running in parallel ++ * hence skip scaling work to avoid deadlock between ++ * current scaling work and gating work. ++ */ ++ spin_unlock_irqrestore(hba->host->host_lock, irq_flags); ++ return 0; ++ } ++ } ++ spin_unlock_irqrestore(hba->host->host_lock, irq_flags); ++ + if (*freq == UINT_MAX) + err = ufshcd_scale_clks(hba, true); + else if (*freq == 0) + err = ufshcd_scale_clks(hba, false); + ++ spin_lock_irqsave(hba->host->host_lock, irq_flags); ++ if (release_clk_hold) ++ __ufshcd_release(hba); ++ spin_unlock_irqrestore(hba->host->host_lock, irq_flags); ++ + return err; + } + +@@ -6816,7 +6874,6 @@ exit_gating: + ufshcd_exit_clk_gating(hba); + out_disable: + hba->is_irq_enabled = false; +- scsi_host_put(host); + ufshcd_hba_exit(hba); + out_error: + return err; +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c +index 0475f9685a41..904fc9c37fde 100644 +--- a/drivers/tty/n_tty.c ++++ b/drivers/tty/n_tty.c +@@ -154,17 +154,28 @@ static inline unsigned char *echo_buf_addr(struct n_tty_data *ldata, size_t i) + return &ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)]; + } + ++/* If we are not echoing the data, perhaps this is a secret so erase it */ ++static void zero_buffer(struct tty_struct *tty, u8 *buffer, int size) ++{ ++ bool icanon = !!L_ICANON(tty); ++ bool no_echo = !L_ECHO(tty); ++ ++ if (icanon && no_echo) ++ memset(buffer, 0x00, size); ++} ++ + static int tty_copy_to_user(struct tty_struct *tty, void __user *to, + size_t tail, size_t n) + { + struct n_tty_data *ldata = tty->disc_data; + size_t size = N_TTY_BUF_SIZE - tail; +- const void *from = read_buf_addr(ldata, tail); ++ void *from = read_buf_addr(ldata, tail); + int uncopied; + + if (n > size) { + tty_audit_add_data(tty, from, size); + uncopied = copy_to_user(to, from, size); ++ zero_buffer(tty, from, size - uncopied); + if (uncopied) + return uncopied; + to += size; +@@ -173,7 +184,9 @@ static int tty_copy_to_user(struct tty_struct *tty, void __user *to, + } + + tty_audit_add_data(tty, from, n); +- return copy_to_user(to, from, n); ++ uncopied = copy_to_user(to, from, n); ++ zero_buffer(tty, from, n - uncopied); ++ return uncopied; + } + + /** +@@ -1962,11 +1975,12 @@ static int copy_from_read_buf(struct tty_struct *tty, + n = min(head - ldata->read_tail, N_TTY_BUF_SIZE - tail); + n = min(*nr, n); + if (n) { +- const unsigned char *from = read_buf_addr(ldata, tail); ++ unsigned char *from = read_buf_addr(ldata, tail); + retval = copy_to_user(*b, from, n); + n -= retval; + is_eof = n == 1 && *from == EOF_CHAR(tty); + tty_audit_add_data(tty, from, n); ++ zero_buffer(tty, from, n); + smp_store_release(&ldata->read_tail, ldata->read_tail + n); + /* Turn single EOF into zero-length read */ + if (L_EXTPROC(tty) && ldata->icanon && is_eof && +diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c +index e99f1c5b1df6..41b9a7ccce08 100644 +--- a/drivers/tty/tty_buffer.c ++++ b/drivers/tty/tty_buffer.c +@@ -458,6 +458,8 @@ int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p, + if (count && ld->ops->receive_buf) + ld->ops->receive_buf(ld->tty, p, f, count); + } ++ if (count > 0) ++ memset(p, 0, count); + return count; + } + EXPORT_SYMBOL_GPL(tty_ldisc_receive_buf); +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 7aee55244b4a..851f5a553de2 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -2809,7 +2809,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1, + USB_PORT_FEAT_C_BH_PORT_RESET); + usb_clear_port_feature(hub->hdev, port1, + USB_PORT_FEAT_C_PORT_LINK_STATE); +- usb_clear_port_feature(hub->hdev, port1, ++ ++ if (udev) ++ usb_clear_port_feature(hub->hdev, port1, + USB_PORT_FEAT_C_CONNECTION); + + /* +diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c +index 53b26e978d90..1e91b803ee4e 100644 +--- a/drivers/usb/dwc3/core.c ++++ b/drivers/usb/dwc3/core.c +@@ -1145,6 +1145,7 @@ static int dwc3_probe(struct platform_device *pdev) + + err5: + dwc3_event_buffers_cleanup(dwc); ++ dwc3_ulpi_exit(dwc); + + err4: + dwc3_free_scratch_buffers(dwc); +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c +index 0f09ab5399f4..00d10660ff14 100644 +--- a/drivers/usb/host/xhci-hub.c ++++ b/drivers/usb/host/xhci-hub.c +@@ -768,7 +768,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, + status |= USB_PORT_STAT_SUSPEND; + } + if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME && +- !DEV_SUPERSPEED_ANY(raw_port_status)) { ++ !DEV_SUPERSPEED_ANY(raw_port_status) && hcd->speed < HCD_USB3) { + if ((raw_port_status & PORT_RESET) || + !(raw_port_status & PORT_PE)) + return 0xffffffff; +@@ -814,7 +814,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, + time_left = wait_for_completion_timeout( + &bus_state->rexit_done[wIndex], + msecs_to_jiffies( +- XHCI_MAX_REXIT_TIMEOUT)); ++ XHCI_MAX_REXIT_TIMEOUT_MS)); + spin_lock_irqsave(&xhci->lock, flags); + + if (time_left) { +@@ -828,7 +828,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, + } else { + int port_status = readl(port_array[wIndex]); + xhci_warn(xhci, "Port resume took longer than %i msec, port status = 0x%x\n", +- XHCI_MAX_REXIT_TIMEOUT, ++ XHCI_MAX_REXIT_TIMEOUT_MS, + port_status); + status |= USB_PORT_STAT_SUSPEND; + clear_bit(wIndex, &bus_state->rexit_ports); +@@ -1322,13 +1322,16 @@ int xhci_bus_suspend(struct usb_hcd *hcd) + __le32 __iomem **port_array; + struct xhci_bus_state *bus_state; + unsigned long flags; ++ u32 portsc_buf[USB_MAXCHILDREN]; ++ bool wake_enabled; + + max_ports = xhci_get_ports(hcd, &port_array); + bus_state = &xhci->bus_state[hcd_index(hcd)]; ++ wake_enabled = hcd->self.root_hub->do_remote_wakeup; + + spin_lock_irqsave(&xhci->lock, flags); + +- if (hcd->self.root_hub->do_remote_wakeup) { ++ if (wake_enabled) { + if (bus_state->resuming_ports || /* USB2 */ + bus_state->port_remote_wakeup) { /* USB3 */ + spin_unlock_irqrestore(&xhci->lock, flags); +@@ -1336,26 +1339,36 @@ int xhci_bus_suspend(struct usb_hcd *hcd) + return -EBUSY; + } + } +- +- port_index = max_ports; ++ /* ++ * Prepare ports for suspend, but don't write anything before all ports ++ * are checked and we know bus suspend can proceed ++ */ + bus_state->bus_suspended = 0; ++ port_index = max_ports; + while (port_index--) { +- /* suspend the port if the port is not suspended */ + u32 t1, t2; +- int slot_id; + + t1 = readl(port_array[port_index]); + t2 = xhci_port_state_to_neutral(t1); ++ portsc_buf[port_index] = 0; + +- if ((t1 & PORT_PE) && !(t1 & PORT_PLS_MASK)) { +- xhci_dbg(xhci, "port %d not suspended\n", port_index); +- slot_id = xhci_find_slot_id_by_port(hcd, xhci, +- port_index + 1); +- if (slot_id) { ++ /* Bail out if a USB3 port has a new device in link training */ ++ if ((t1 & PORT_PLS_MASK) == XDEV_POLLING) { ++ bus_state->bus_suspended = 0; ++ spin_unlock_irqrestore(&xhci->lock, flags); ++ xhci_dbg(xhci, "Bus suspend bailout, port in polling\n"); ++ return -EBUSY; ++ } ++ ++ /* suspend ports in U0, or bail out for new connect changes */ ++ if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) { ++ if ((t1 & PORT_CSC) && wake_enabled) { ++ bus_state->bus_suspended = 0; + spin_unlock_irqrestore(&xhci->lock, flags); +- xhci_stop_device(xhci, slot_id, 1); +- spin_lock_irqsave(&xhci->lock, flags); ++ xhci_dbg(xhci, "Bus suspend bailout, port connect change\n"); ++ return -EBUSY; + } ++ xhci_dbg(xhci, "port %d not suspended\n", port_index); + t2 &= ~PORT_PLS_MASK; + t2 |= PORT_LINK_STROBE | XDEV_U3; + set_bit(port_index, &bus_state->bus_suspended); +@@ -1364,7 +1377,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd) + * including the USB 3.0 roothub, but only if CONFIG_PM + * is enabled, so also enable remote wake here. + */ +- if (hcd->self.root_hub->do_remote_wakeup) { ++ if (wake_enabled) { + if (t1 & PORT_CONNECT) { + t2 |= PORT_WKOC_E | PORT_WKDISC_E; + t2 &= ~PORT_WKCONN_E; +@@ -1377,7 +1390,26 @@ int xhci_bus_suspend(struct usb_hcd *hcd) + + t1 = xhci_port_state_to_neutral(t1); + if (t1 != t2) +- writel(t2, port_array[port_index]); ++ portsc_buf[port_index] = t2; ++ } ++ ++ /* write port settings, stopping and suspending ports if needed */ ++ port_index = max_ports; ++ while (port_index--) { ++ if (!portsc_buf[port_index]) ++ continue; ++ if (test_bit(port_index, &bus_state->bus_suspended)) { ++ int slot_id; ++ ++ slot_id = xhci_find_slot_id_by_port(hcd, xhci, ++ port_index + 1); ++ if (slot_id) { ++ spin_unlock_irqrestore(&xhci->lock, flags); ++ xhci_stop_device(xhci, slot_id, 1); ++ spin_lock_irqsave(&xhci->lock, flags); ++ } ++ } ++ writel(portsc_buf[port_index], port_array[port_index]); + } + hcd->state = HC_STATE_SUSPENDED; + bus_state->next_statechange = jiffies + msecs_to_jiffies(10); +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 89a14d5f6ad8..f4e34a75d413 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -1676,7 +1676,7 @@ static void handle_port_status(struct xhci_hcd *xhci, + * RExit to a disconnect state). If so, let the the driver know it's + * out of the RExit state. + */ +- if (!DEV_SUPERSPEED_ANY(temp) && ++ if (!DEV_SUPERSPEED_ANY(temp) && hcd->speed < HCD_USB3 && + test_and_clear_bit(faked_port_index, + &bus_state->rexit_ports)) { + complete(&bus_state->rexit_done[faked_port_index]); +diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h +index b9181281aa9e..e679fec9ce3a 100644 +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1509,7 +1509,7 @@ struct xhci_bus_state { + * It can take up to 20 ms to transition from RExit to U0 on the + * Intel Lynx Point LP xHCI host. + */ +-#define XHCI_MAX_REXIT_TIMEOUT (20 * 1000) ++#define XHCI_MAX_REXIT_TIMEOUT_MS 20 + + static inline unsigned int hcd_index(struct usb_hcd *hcd) + { +diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c +index b0405d6aac85..48db9a9f13f9 100644 +--- a/fs/9p/vfs_dir.c ++++ b/fs/9p/vfs_dir.c +@@ -76,15 +76,6 @@ static inline int dt_type(struct p9_wstat *mistat) + return rettype; + } + +-static void p9stat_init(struct p9_wstat *stbuf) +-{ +- stbuf->name = NULL; +- stbuf->uid = NULL; +- stbuf->gid = NULL; +- stbuf->muid = NULL; +- stbuf->extension = NULL; +-} +- + /** + * v9fs_alloc_rdir_buf - Allocate buffer used for read and readdir + * @filp: opened file structure +@@ -145,12 +136,10 @@ static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx) + rdir->tail = n; + } + while (rdir->head < rdir->tail) { +- p9stat_init(&st); + err = p9stat_read(fid->clnt, rdir->buf + rdir->head, + rdir->tail - rdir->head, &st); + if (err) { + p9_debug(P9_DEBUG_VFS, "returned %d\n", err); +- p9stat_free(&st); + return -EIO; + } + reclen = st.size+2; +diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c +index 1e5c896f6b79..0acb83efedea 100644 +--- a/fs/bfs/inode.c ++++ b/fs/bfs/inode.c +@@ -350,7 +350,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) + + s->s_magic = BFS_MAGIC; + +- if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) { ++ if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end) || ++ le32_to_cpu(bfs_sb->s_start) < BFS_BSIZE) { + printf("Superblock is corrupted\n"); + goto out1; + } +@@ -359,9 +360,11 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent) + sizeof(struct bfs_inode) + + BFS_ROOT_INO - 1; + imap_len = (info->si_lasti / 8) + 1; +- info->si_imap = kzalloc(imap_len, GFP_KERNEL); +- if (!info->si_imap) ++ info->si_imap = kzalloc(imap_len, GFP_KERNEL | __GFP_NOWARN); ++ if (!info->si_imap) { ++ printf("Cannot allocate %u bytes\n", imap_len); + goto out1; ++ } + for (i = 0; i < BFS_ROOT_INO; i++) + set_bit(i, info->si_imap); + +diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c +index 6d7f66816319..84e5ac061b17 100644 +--- a/fs/gfs2/ops_fstype.c ++++ b/fs/gfs2/ops_fstype.c +@@ -71,13 +71,13 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) + if (!sdp) + return NULL; + +- sb->s_fs_info = sdp; + sdp->sd_vfs = sb; + sdp->sd_lkstats = alloc_percpu(struct gfs2_pcpu_lkstats); + if (!sdp->sd_lkstats) { + kfree(sdp); + return NULL; + } ++ sb->s_fs_info = sdp; + + set_bit(SDF_NOJOURNALID, &sdp->sd_flags); + gfs2_tune_init(&sdp->sd_tune); +diff --git a/fs/namei.c b/fs/namei.c +index 85ac38b99065..eb4626bad88a 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -892,6 +892,8 @@ static inline void put_link(struct nameidata *nd) + + int sysctl_protected_symlinks __read_mostly = 0; + int sysctl_protected_hardlinks __read_mostly = 0; ++int sysctl_protected_fifos __read_mostly; ++int sysctl_protected_regular __read_mostly; + + /** + * may_follow_link - Check symlink following for unsafe situations +@@ -1005,6 +1007,45 @@ static int may_linkat(struct path *link) + return -EPERM; + } + ++/** ++ * may_create_in_sticky - Check whether an O_CREAT open in a sticky directory ++ * should be allowed, or not, on files that already ++ * exist. ++ * @dir: the sticky parent directory ++ * @inode: the inode of the file to open ++ * ++ * Block an O_CREAT open of a FIFO (or a regular file) when: ++ * - sysctl_protected_fifos (or sysctl_protected_regular) is enabled ++ * - the file already exists ++ * - we are in a sticky directory ++ * - we don't own the file ++ * - the owner of the directory doesn't own the file ++ * - the directory is world writable ++ * If the sysctl_protected_fifos (or sysctl_protected_regular) is set to 2 ++ * the directory doesn't have to be world writable: being group writable will ++ * be enough. ++ * ++ * Returns 0 if the open is allowed, -ve on error. ++ */ ++static int may_create_in_sticky(struct dentry * const dir, ++ struct inode * const inode) ++{ ++ if ((!sysctl_protected_fifos && S_ISFIFO(inode->i_mode)) || ++ (!sysctl_protected_regular && S_ISREG(inode->i_mode)) || ++ likely(!(dir->d_inode->i_mode & S_ISVTX)) || ++ uid_eq(inode->i_uid, dir->d_inode->i_uid) || ++ uid_eq(current_fsuid(), inode->i_uid)) ++ return 0; ++ ++ if (likely(dir->d_inode->i_mode & 0002) || ++ (dir->d_inode->i_mode & 0020 && ++ ((sysctl_protected_fifos >= 2 && S_ISFIFO(inode->i_mode)) || ++ (sysctl_protected_regular >= 2 && S_ISREG(inode->i_mode))))) { ++ return -EACCES; ++ } ++ return 0; ++} ++ + static __always_inline + const char *get_link(struct nameidata *nd) + { +@@ -3356,9 +3397,15 @@ finish_open: + if (error) + return error; + audit_inode(nd->name, nd->path.dentry, 0); +- error = -EISDIR; +- if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry)) +- goto out; ++ if (open_flag & O_CREAT) { ++ error = -EISDIR; ++ if (d_is_dir(nd->path.dentry)) ++ goto out; ++ error = may_create_in_sticky(dir, ++ d_backing_inode(nd->path.dentry)); ++ if (unlikely(error)) ++ goto out; ++ } + error = -ENOTDIR; + if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry)) + goto out; +diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h +index 5f5270941ba0..f7178f44825b 100644 +--- a/include/linux/can/dev.h ++++ b/include/linux/can/dev.h +@@ -154,6 +154,7 @@ void can_change_state(struct net_device *dev, struct can_frame *cf, + + void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, + unsigned int idx); ++struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr); + unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); + void can_free_echo_skb(struct net_device *dev, unsigned int idx); + +diff --git a/include/linux/fs.h b/include/linux/fs.h +index e9867aff53d8..bcad2b963296 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -69,6 +69,8 @@ extern struct inodes_stat_t inodes_stat; + extern int leases_enable, lease_break_time; + extern int sysctl_protected_symlinks; + extern int sysctl_protected_hardlinks; ++extern int sysctl_protected_fifos; ++extern int sysctl_protected_regular; + + struct buffer_head; + typedef int (get_block_t)(struct inode *inode, sector_t iblock, +diff --git a/include/linux/integrity.h b/include/linux/integrity.h +index c2d6082a1a4c..858d3f4a2241 100644 +--- a/include/linux/integrity.h ++++ b/include/linux/integrity.h +@@ -14,6 +14,7 @@ + + enum integrity_status { + INTEGRITY_PASS = 0, ++ INTEGRITY_PASS_IMMUTABLE, + INTEGRITY_FAIL, + INTEGRITY_NOLABEL, + INTEGRITY_NOXATTRS, +diff --git a/include/linux/of.h b/include/linux/of.h +index 299aeb192727..a19cc85b9373 100644 +--- a/include/linux/of.h ++++ b/include/linux/of.h +@@ -275,6 +275,8 @@ extern struct device_node *of_get_next_child(const struct device_node *node, + extern struct device_node *of_get_next_available_child( + const struct device_node *node, struct device_node *prev); + ++extern struct device_node *of_get_compatible_child(const struct device_node *parent, ++ const char *compatible); + extern struct device_node *of_get_child_by_name(const struct device_node *node, + const char *name); + +@@ -606,6 +608,12 @@ static inline bool of_have_populated_dt(void) + return false; + } + ++static inline struct device_node *of_get_compatible_child(const struct device_node *parent, ++ const char *compatible) ++{ ++ return NULL; ++} ++ + static inline struct device_node *of_get_child_by_name( + const struct device_node *node, + const char *name) +diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h +index a3d90b9da18d..407874535fd3 100644 +--- a/include/linux/pfn_t.h ++++ b/include/linux/pfn_t.h +@@ -9,7 +9,7 @@ + * PFN_DEV - pfn is not covered by system memmap by default + * PFN_MAP - pfn has a dynamic page mapping established by a device driver + */ +-#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT)) ++#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT)) + #define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1)) + #define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2)) + #define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3)) +diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c +index 77777d918676..cc892a9e109d 100644 +--- a/kernel/debug/kdb/kdb_io.c ++++ b/kernel/debug/kdb/kdb_io.c +@@ -215,7 +215,7 @@ static char *kdb_read(char *buffer, size_t bufsize) + int count; + int i; + int diag, dtab_count; +- int key; ++ int key, buf_size, ret; + + + diag = kdbgetintenv("DTABCOUNT", &dtab_count); +@@ -335,9 +335,8 @@ poll_again: + else + p_tmp = tmpbuffer; + len = strlen(p_tmp); +- count = kallsyms_symbol_complete(p_tmp, +- sizeof(tmpbuffer) - +- (p_tmp - tmpbuffer)); ++ buf_size = sizeof(tmpbuffer) - (p_tmp - tmpbuffer); ++ count = kallsyms_symbol_complete(p_tmp, buf_size); + if (tab == 2 && count > 0) { + kdb_printf("\n%d symbols are found.", count); + if (count > dtab_count) { +@@ -349,9 +348,13 @@ poll_again: + } + kdb_printf("\n"); + for (i = 0; i < count; i++) { +- if (WARN_ON(!kallsyms_symbol_next(p_tmp, i))) ++ ret = kallsyms_symbol_next(p_tmp, i, buf_size); ++ if (WARN_ON(!ret)) + break; +- kdb_printf("%s ", p_tmp); ++ if (ret != -E2BIG) ++ kdb_printf("%s ", p_tmp); ++ else ++ kdb_printf("%s... ", p_tmp); + *(p_tmp + len) = '\0'; + } + if (i >= dtab_count) +diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h +index 75014d7f4568..533e04e75a9c 100644 +--- a/kernel/debug/kdb/kdb_private.h ++++ b/kernel/debug/kdb/kdb_private.h +@@ -83,7 +83,7 @@ typedef struct __ksymtab { + unsigned long sym_start; + unsigned long sym_end; + } kdb_symtab_t; +-extern int kallsyms_symbol_next(char *prefix_name, int flag); ++extern int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size); + extern int kallsyms_symbol_complete(char *prefix_name, int max_len); + + /* Exported Symbols for kernel loadable modules to use. */ +diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c +index d35cc2d3a4cc..2aed4a33521b 100644 +--- a/kernel/debug/kdb/kdb_support.c ++++ b/kernel/debug/kdb/kdb_support.c +@@ -221,11 +221,13 @@ int kallsyms_symbol_complete(char *prefix_name, int max_len) + * Parameters: + * prefix_name prefix of a symbol name to lookup + * flag 0 means search from the head, 1 means continue search. ++ * buf_size maximum length that can be written to prefix_name ++ * buffer + * Returns: + * 1 if a symbol matches the given prefix. + * 0 if no string found + */ +-int kallsyms_symbol_next(char *prefix_name, int flag) ++int kallsyms_symbol_next(char *prefix_name, int flag, int buf_size) + { + int prefix_len = strlen(prefix_name); + static loff_t pos; +@@ -235,10 +237,8 @@ int kallsyms_symbol_next(char *prefix_name, int flag) + pos = 0; + + while ((name = kdb_walk_kallsyms(&pos))) { +- if (strncmp(name, prefix_name, prefix_len) == 0) { +- strncpy(prefix_name, name, strlen(name)+1); +- return 1; +- } ++ if (!strncmp(name, prefix_name, prefix_len)) ++ return strscpy(prefix_name, name, buf_size); + } + return 0; + } +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index 917be221438b..6b3fff6a6437 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -4087,8 +4087,8 @@ static int __sched_setscheduler(struct task_struct *p, + int queue_flags = DEQUEUE_SAVE | DEQUEUE_MOVE; + struct rq *rq; + +- /* may grab non-irq protected spin_locks */ +- BUG_ON(in_interrupt()); ++ /* The pi code expects interrupts enabled */ ++ BUG_ON(pi && in_interrupt()); + recheck: + /* double check policy once rq lock held */ + if (policy < 0) { +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 7df6be31be36..23f658d311c0 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -1794,6 +1794,24 @@ static struct ctl_table fs_table[] = { + .extra1 = &zero, + .extra2 = &one, + }, ++ { ++ .procname = "protected_fifos", ++ .data = &sysctl_protected_fifos, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = proc_dointvec_minmax, ++ .extra1 = &zero, ++ .extra2 = &two, ++ }, ++ { ++ .procname = "protected_regular", ++ .data = &sysctl_protected_regular, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = proc_dointvec_minmax, ++ .extra1 = &zero, ++ .extra2 = &two, ++ }, + { + .procname = "suid_dumpable", + .data = &suid_dumpable, +diff --git a/mm/shmem.c b/mm/shmem.c +index 4b5cca167baf..358a92be43eb 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -2414,9 +2414,7 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence) + inode_lock(inode); + /* We're holding i_mutex so we can access i_size directly */ + +- if (offset < 0) +- offset = -EINVAL; +- else if (offset >= inode->i_size) ++ if (offset < 0 || offset >= inode->i_size) + offset = -ENXIO; + else { + start = offset >> PAGE_SHIFT; +diff --git a/mm/slab.c b/mm/slab.c +index c59844dbd034..263dcda6897b 100644 +--- a/mm/slab.c ++++ b/mm/slab.c +@@ -3690,6 +3690,8 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) + struct kmem_cache *cachep; + void *ret; + ++ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) ++ return NULL; + cachep = kmalloc_slab(size, flags); + if (unlikely(ZERO_OR_NULL_PTR(cachep))) + return cachep; +@@ -3725,6 +3727,8 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, + struct kmem_cache *cachep; + void *ret; + ++ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) ++ return NULL; + cachep = kmalloc_slab(size, flags); + if (unlikely(ZERO_OR_NULL_PTR(cachep))) + return cachep; +diff --git a/mm/slab_common.c b/mm/slab_common.c +index 622f6b6ae844..13f1926f8fcd 100644 +--- a/mm/slab_common.c ++++ b/mm/slab_common.c +@@ -883,18 +883,18 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) + { + int index; + +- if (unlikely(size > KMALLOC_MAX_SIZE)) { +- WARN_ON_ONCE(!(flags & __GFP_NOWARN)); +- return NULL; +- } +- + if (size <= 192) { + if (!size) + return ZERO_SIZE_PTR; + + index = size_index[size_index_elem(size)]; +- } else ++ } else { ++ if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) { ++ WARN_ON(1); ++ return NULL; ++ } + index = fls(size - 1); ++ } + + #ifdef CONFIG_ZONE_DMA + if (unlikely((flags & GFP_DMA))) +diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h +index 3bfec472734a..78916c510d9a 100644 +--- a/net/ieee802154/6lowpan/6lowpan_i.h ++++ b/net/ieee802154/6lowpan/6lowpan_i.h +@@ -19,8 +19,8 @@ typedef unsigned __bitwise__ lowpan_rx_result; + struct frag_lowpan_compare_key { + u16 tag; + u16 d_size; +- const struct ieee802154_addr src; +- const struct ieee802154_addr dst; ++ struct ieee802154_addr src; ++ struct ieee802154_addr dst; + }; + + /* Equivalent of ipv4 struct ipq +diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c +index 6fca75581e13..aab1e2dfdfca 100644 +--- a/net/ieee802154/6lowpan/reassembly.c ++++ b/net/ieee802154/6lowpan/reassembly.c +@@ -74,14 +74,14 @@ fq_find(struct net *net, const struct lowpan_802154_cb *cb, + { + struct netns_ieee802154_lowpan *ieee802154_lowpan = + net_ieee802154_lowpan(net); +- struct frag_lowpan_compare_key key = { +- .tag = cb->d_tag, +- .d_size = cb->d_size, +- .src = *src, +- .dst = *dst, +- }; ++ struct frag_lowpan_compare_key key = {}; + struct inet_frag_queue *q; + ++ key.tag = cb->d_tag; ++ key.d_size = cb->d_size; ++ key.src = *src; ++ key.dst = *dst; ++ + q = inet_frag_find(&ieee802154_lowpan->frags, &key); + if (!q) + return NULL; +@@ -371,7 +371,7 @@ int lowpan_frag_rcv(struct sk_buff *skb, u8 frag_type) + struct lowpan_frag_queue *fq; + struct net *net = dev_net(skb->dev); + struct lowpan_802154_cb *cb = lowpan_802154_cb(skb); +- struct ieee802154_hdr hdr; ++ struct ieee802154_hdr hdr = {}; + int err; + + if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) +diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c +index 85aae8c84aeb..789e66b0187a 100644 +--- a/net/llc/af_llc.c ++++ b/net/llc/af_llc.c +@@ -726,7 +726,6 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + struct sk_buff *skb = NULL; + struct sock *sk = sock->sk; + struct llc_sock *llc = llc_sk(sk); +- unsigned long cpu_flags; + size_t copied = 0; + u32 peek_seq = 0; + u32 *seq, skb_len; +@@ -851,9 +850,8 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + goto copy_uaddr; + + if (!(flags & MSG_PEEK)) { +- spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); +- sk_eat_skb(sk, skb); +- spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); ++ skb_unlink(skb, &sk->sk_receive_queue); ++ kfree_skb(skb); + *seq = 0; + } + +@@ -874,9 +872,8 @@ copy_uaddr: + llc_cmsg_rcv(msg, skb); + + if (!(flags & MSG_PEEK)) { +- spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags); +- sk_eat_skb(sk, skb); +- spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags); ++ skb_unlink(skb, &sk->sk_receive_queue); ++ kfree_skb(skb); + *seq = 0; + } + +diff --git a/net/sctp/associola.c b/net/sctp/associola.c +index 738c55e994c4..7e127cde1ccc 100644 +--- a/net/sctp/associola.c ++++ b/net/sctp/associola.c +@@ -488,8 +488,9 @@ void sctp_assoc_set_primary(struct sctp_association *asoc, + void sctp_assoc_rm_peer(struct sctp_association *asoc, + struct sctp_transport *peer) + { +- struct list_head *pos; +- struct sctp_transport *transport; ++ struct sctp_transport *transport; ++ struct list_head *pos; ++ struct sctp_chunk *ch; + + pr_debug("%s: association:%p addr:%pISpc\n", + __func__, asoc, &peer->ipaddr.sa); +@@ -547,7 +548,6 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, + */ + if (!list_empty(&peer->transmitted)) { + struct sctp_transport *active = asoc->peer.active_path; +- struct sctp_chunk *ch; + + /* Reset the transport of each chunk on this list */ + list_for_each_entry(ch, &peer->transmitted, +@@ -569,6 +569,10 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, + sctp_transport_hold(active); + } + ++ list_for_each_entry(ch, &asoc->outqueue.out_chunk_list, list) ++ if (ch->transport == peer) ++ ch->transport = NULL; ++ + asoc->peer.transport_count--; + + sctp_transport_free(peer); +diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c +index f1df9837f1ac..1ac08dcbf85d 100644 +--- a/net/sunrpc/auth_generic.c ++++ b/net/sunrpc/auth_generic.c +@@ -281,13 +281,7 @@ static bool generic_key_to_expire(struct rpc_cred *cred) + { + struct auth_cred *acred = &container_of(cred, struct generic_cred, + gc_base)->acred; +- bool ret; +- +- get_rpccred(cred); +- ret = test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags); +- put_rpccred(cred); +- +- return ret; ++ return test_bit(RPC_CRED_KEY_EXPIRE_SOON, &acred->ac_flags); + } + + static const struct rpc_credops generic_credops = { +diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h +index f5f12727771a..2ff02459fcfd 100644 +--- a/security/integrity/evm/evm.h ++++ b/security/integrity/evm/evm.h +@@ -48,7 +48,7 @@ int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name, + size_t req_xattr_value_len, char *digest); + int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name, + const char *req_xattr_value, +- size_t req_xattr_value_len, char *digest); ++ size_t req_xattr_value_len, char type, char *digest); + int evm_init_hmac(struct inode *inode, const struct xattr *xattr, + char *hmac_val); + int evm_init_secfs(void); +diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c +index 6fcbd8e99baf..c783fefa558a 100644 +--- a/security/integrity/evm/evm_crypto.c ++++ b/security/integrity/evm/evm_crypto.c +@@ -139,7 +139,7 @@ out: + * protection.) + */ + static void hmac_add_misc(struct shash_desc *desc, struct inode *inode, +- char *digest) ++ char type, char *digest) + { + struct h_misc { + unsigned long ino; +@@ -150,13 +150,27 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode, + } hmac_misc; + + memset(&hmac_misc, 0, sizeof(hmac_misc)); +- hmac_misc.ino = inode->i_ino; +- hmac_misc.generation = inode->i_generation; +- hmac_misc.uid = from_kuid(inode->i_sb->s_user_ns, inode->i_uid); +- hmac_misc.gid = from_kgid(inode->i_sb->s_user_ns, inode->i_gid); ++ /* Don't include the inode or generation number in portable ++ * signatures ++ */ ++ if (type != EVM_XATTR_PORTABLE_DIGSIG) { ++ hmac_misc.ino = inode->i_ino; ++ hmac_misc.generation = inode->i_generation; ++ } ++ /* The hmac uid and gid must be encoded in the initial user ++ * namespace (not the filesystems user namespace) as encoding ++ * them in the filesystems user namespace allows an attack ++ * where first they are written in an unprivileged fuse mount ++ * of a filesystem and then the system is tricked to mount the ++ * filesystem for real on next boot and trust it because ++ * everything is signed. ++ */ ++ hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid); ++ hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid); + hmac_misc.mode = inode->i_mode; + crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc)); +- if (evm_hmac_attrs & EVM_ATTR_FSUUID) ++ if ((evm_hmac_attrs & EVM_ATTR_FSUUID) && ++ type != EVM_XATTR_PORTABLE_DIGSIG) + crypto_shash_update(desc, inode->i_sb->s_uuid, + sizeof(inode->i_sb->s_uuid)); + crypto_shash_final(desc, digest); +@@ -182,6 +196,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry, + char *xattr_value = NULL; + int error; + int size; ++ bool ima_present = false; + + if (!(inode->i_opflags & IOP_XATTR)) + return -EOPNOTSUPP; +@@ -192,11 +207,18 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry, + + error = -ENODATA; + for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) { ++ bool is_ima = false; ++ ++ if (strcmp(*xattrname, XATTR_NAME_IMA) == 0) ++ is_ima = true; ++ + if ((req_xattr_name && req_xattr_value) + && !strcmp(*xattrname, req_xattr_name)) { + error = 0; + crypto_shash_update(desc, (const u8 *)req_xattr_value, + req_xattr_value_len); ++ if (is_ima) ++ ima_present = true; + continue; + } + size = vfs_getxattr_alloc(dentry, *xattrname, +@@ -211,9 +233,14 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry, + error = 0; + xattr_size = size; + crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size); ++ if (is_ima) ++ ima_present = true; + } +- hmac_add_misc(desc, inode, digest); ++ hmac_add_misc(desc, inode, type, digest); + ++ /* Portable EVM signatures must include an IMA hash */ ++ if (type == EVM_XATTR_PORTABLE_DIGSIG && !ima_present) ++ return -EPERM; + out: + kfree(xattr_value); + kfree(desc); +@@ -225,17 +252,45 @@ int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name, + char *digest) + { + return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value, +- req_xattr_value_len, EVM_XATTR_HMAC, digest); ++ req_xattr_value_len, EVM_XATTR_HMAC, digest); + } + + int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name, + const char *req_xattr_value, size_t req_xattr_value_len, +- char *digest) ++ char type, char *digest) + { + return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value, +- req_xattr_value_len, IMA_XATTR_DIGEST, digest); ++ req_xattr_value_len, type, digest); ++} ++ ++static int evm_is_immutable(struct dentry *dentry, struct inode *inode) ++{ ++ const struct evm_ima_xattr_data *xattr_data = NULL; ++ struct integrity_iint_cache *iint; ++ int rc = 0; ++ ++ iint = integrity_iint_find(inode); ++ if (iint && (iint->flags & EVM_IMMUTABLE_DIGSIG)) ++ return 1; ++ ++ /* Do this the hard way */ ++ rc = vfs_getxattr_alloc(dentry, XATTR_NAME_EVM, (char **)&xattr_data, 0, ++ GFP_NOFS); ++ if (rc <= 0) { ++ if (rc == -ENODATA) ++ return 0; ++ return rc; ++ } ++ if (xattr_data->type == EVM_XATTR_PORTABLE_DIGSIG) ++ rc = 1; ++ else ++ rc = 0; ++ ++ kfree(xattr_data); ++ return rc; + } + ++ + /* + * Calculate the hmac and update security.evm xattr + * +@@ -248,6 +303,16 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name, + struct evm_ima_xattr_data xattr_data; + int rc = 0; + ++ /* ++ * Don't permit any transformation of the EVM xattr if the signature ++ * is of an immutable type ++ */ ++ rc = evm_is_immutable(dentry, inode); ++ if (rc < 0) ++ return rc; ++ if (rc) ++ return -EPERM; ++ + rc = evm_calc_hmac(dentry, xattr_name, xattr_value, + xattr_value_len, xattr_data.digest); + if (rc == 0) { +@@ -273,7 +338,7 @@ int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr, + } + + crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len); +- hmac_add_misc(desc, inode, hmac_val); ++ hmac_add_misc(desc, inode, EVM_XATTR_HMAC, hmac_val); + kfree(desc); + return 0; + } +diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c +index ba8615576d4d..976b8dce6496 100644 +--- a/security/integrity/evm/evm_main.c ++++ b/security/integrity/evm/evm_main.c +@@ -29,7 +29,7 @@ + int evm_initialized; + + static char *integrity_status_msg[] = { +- "pass", "fail", "no_label", "no_xattrs", "unknown" ++ "pass", "pass_immutable", "fail", "no_label", "no_xattrs", "unknown" + }; + char *evm_hmac = "hmac(sha1)"; + char *evm_hash = "sha1"; +@@ -118,7 +118,8 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry, + enum integrity_status evm_status = INTEGRITY_PASS; + int rc, xattr_len; + +- if (iint && iint->evm_status == INTEGRITY_PASS) ++ if (iint && (iint->evm_status == INTEGRITY_PASS || ++ iint->evm_status == INTEGRITY_PASS_IMMUTABLE)) + return iint->evm_status; + + /* if status is not PASS, try to check again - against -ENOMEM */ +@@ -155,22 +156,26 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry, + rc = -EINVAL; + break; + case EVM_IMA_XATTR_DIGSIG: ++ case EVM_XATTR_PORTABLE_DIGSIG: + rc = evm_calc_hash(dentry, xattr_name, xattr_value, +- xattr_value_len, calc.digest); ++ xattr_value_len, xattr_data->type, ++ calc.digest); + if (rc) + break; + rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM, + (const char *)xattr_data, xattr_len, + calc.digest, sizeof(calc.digest)); + if (!rc) { +- /* Replace RSA with HMAC if not mounted readonly and +- * not immutable +- */ +- if (!IS_RDONLY(d_backing_inode(dentry)) && +- !IS_IMMUTABLE(d_backing_inode(dentry))) ++ if (xattr_data->type == EVM_XATTR_PORTABLE_DIGSIG) { ++ if (iint) ++ iint->flags |= EVM_IMMUTABLE_DIGSIG; ++ evm_status = INTEGRITY_PASS_IMMUTABLE; ++ } else if (!IS_RDONLY(d_backing_inode(dentry)) && ++ !IS_IMMUTABLE(d_backing_inode(dentry))) { + evm_update_evmxattr(dentry, xattr_name, + xattr_value, + xattr_value_len); ++ } + } + break; + default: +@@ -271,7 +276,7 @@ static enum integrity_status evm_verify_current_integrity(struct dentry *dentry) + * affect security.evm. An interesting side affect of writing posix xattr + * acls is their modifying of the i_mode, which is included in security.evm. + * For posix xattr acls only, permit security.evm, even if it currently +- * doesn't exist, to be updated. ++ * doesn't exist, to be updated unless the EVM signature is immutable. + */ + static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name, + const void *xattr_value, size_t xattr_value_len) +@@ -339,7 +344,8 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name, + if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) { + if (!xattr_value_len) + return -EINVAL; +- if (xattr_data->type != EVM_IMA_XATTR_DIGSIG) ++ if (xattr_data->type != EVM_IMA_XATTR_DIGSIG && ++ xattr_data->type != EVM_XATTR_PORTABLE_DIGSIG) + return -EPERM; + } + return evm_protect_xattr(dentry, xattr_name, xattr_value, +@@ -416,6 +422,9 @@ void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name) + /** + * evm_inode_setattr - prevent updating an invalid EVM extended attribute + * @dentry: pointer to the affected dentry ++ * ++ * Permit update of file attributes when files have a valid EVM signature, ++ * except in the case of them having an immutable portable signature. + */ + int evm_inode_setattr(struct dentry *dentry, struct iattr *attr) + { +diff --git a/security/integrity/iint.c b/security/integrity/iint.c +index c710d22042f9..7ea39b19e8ad 100644 +--- a/security/integrity/iint.c ++++ b/security/integrity/iint.c +@@ -74,6 +74,7 @@ static void iint_free(struct integrity_iint_cache *iint) + iint->ima_hash = NULL; + iint->version = 0; + iint->flags = 0UL; ++ iint->atomic_flags = 0UL; + iint->ima_file_status = INTEGRITY_UNKNOWN; + iint->ima_mmap_status = INTEGRITY_UNKNOWN; + iint->ima_bprm_status = INTEGRITY_UNKNOWN; +@@ -155,12 +156,14 @@ static void init_once(void *foo) + memset(iint, 0, sizeof(*iint)); + iint->version = 0; + iint->flags = 0UL; ++ iint->atomic_flags = 0; + iint->ima_file_status = INTEGRITY_UNKNOWN; + iint->ima_mmap_status = INTEGRITY_UNKNOWN; + iint->ima_bprm_status = INTEGRITY_UNKNOWN; + iint->ima_read_status = INTEGRITY_UNKNOWN; + iint->evm_status = INTEGRITY_UNKNOWN; + iint->measured_pcrs = 0; ++ mutex_init(&iint->mutex); + } + + static int __init integrity_iintcache_init(void) +diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c +index d01a52f8f708..3b43057bf949 100644 +--- a/security/integrity/ima/ima_api.c ++++ b/security/integrity/ima/ima_api.c +@@ -198,42 +198,59 @@ int ima_collect_measurement(struct integrity_iint_cache *iint, + struct inode *inode = file_inode(file); + const char *filename = file->f_path.dentry->d_name.name; + int result = 0; ++ int length; ++ void *tmpbuf; ++ u64 i_version; + struct { + struct ima_digest_data hdr; + char digest[IMA_MAX_DIGEST_SIZE]; + } hash; + +- if (!(iint->flags & IMA_COLLECTED)) { +- u64 i_version = file_inode(file)->i_version; ++ if (iint->flags & IMA_COLLECTED) ++ goto out; + +- if (file->f_flags & O_DIRECT) { +- audit_cause = "failed(directio)"; +- result = -EACCES; +- goto out; +- } ++ /* ++ * Dectecting file change is based on i_version. On filesystems ++ * which do not support i_version, support is limited to an initial ++ * measurement/appraisal/audit. ++ */ ++ i_version = file_inode(file)->i_version; ++ hash.hdr.algo = algo; + +- hash.hdr.algo = algo; +- +- result = (!buf) ? ima_calc_file_hash(file, &hash.hdr) : +- ima_calc_buffer_hash(buf, size, &hash.hdr); +- if (!result) { +- int length = sizeof(hash.hdr) + hash.hdr.length; +- void *tmpbuf = krealloc(iint->ima_hash, length, +- GFP_NOFS); +- if (tmpbuf) { +- iint->ima_hash = tmpbuf; +- memcpy(iint->ima_hash, &hash, length); +- iint->version = i_version; +- iint->flags |= IMA_COLLECTED; +- } else +- result = -ENOMEM; +- } ++ /* Initialize hash digest to 0's in case of failure */ ++ memset(&hash.digest, 0, sizeof(hash.digest)); ++ ++ if (buf) ++ result = ima_calc_buffer_hash(buf, size, &hash.hdr); ++ else ++ result = ima_calc_file_hash(file, &hash.hdr); ++ ++ if (result && result != -EBADF && result != -EINVAL) ++ goto out; ++ ++ length = sizeof(hash.hdr) + hash.hdr.length; ++ tmpbuf = krealloc(iint->ima_hash, length, GFP_NOFS); ++ if (!tmpbuf) { ++ result = -ENOMEM; ++ goto out; + } ++ ++ iint->ima_hash = tmpbuf; ++ memcpy(iint->ima_hash, &hash, length); ++ iint->version = i_version; ++ ++ /* Possibly temporary failure due to type of read (eg. O_DIRECT) */ ++ if (!result) ++ iint->flags |= IMA_COLLECTED; + out: +- if (result) ++ if (result) { ++ if (file->f_flags & O_DIRECT) ++ audit_cause = "failed(directio)"; ++ + integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, + filename, "collect_data", audit_cause, + result, 0); ++ } + return result; + } + +@@ -277,7 +294,7 @@ void ima_store_measurement(struct integrity_iint_cache *iint, + } + + result = ima_store_template(entry, violation, inode, filename, pcr); +- if (!result || result == -EEXIST) { ++ if ((!result || result == -EEXIST) && !(file->f_flags & O_DIRECT)) { + iint->flags |= IMA_MEASURED; + iint->measured_pcrs |= (0x1 << pcr); + } +diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c +index 1e6f23f77f15..af55c31754a4 100644 +--- a/security/integrity/ima/ima_appraise.c ++++ b/security/integrity/ima/ima_appraise.c +@@ -214,7 +214,9 @@ int ima_appraise_measurement(enum ima_hooks func, + } + + status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint); +- if ((status != INTEGRITY_PASS) && (status != INTEGRITY_UNKNOWN)) { ++ if ((status != INTEGRITY_PASS) && ++ (status != INTEGRITY_PASS_IMMUTABLE) && ++ (status != INTEGRITY_UNKNOWN)) { + if ((status == INTEGRITY_NOLABEL) + || (status == INTEGRITY_NOXATTRS)) + cause = "missing-HMAC"; +@@ -232,6 +234,7 @@ int ima_appraise_measurement(enum ima_hooks func, + status = INTEGRITY_FAIL; + break; + } ++ clear_bit(IMA_DIGSIG, &iint->atomic_flags); + if (xattr_len - sizeof(xattr_value->type) - hash_start >= + iint->ima_hash->length) + /* xattr length may be longer. md5 hash in previous +@@ -250,7 +253,7 @@ int ima_appraise_measurement(enum ima_hooks func, + status = INTEGRITY_PASS; + break; + case EVM_IMA_XATTR_DIGSIG: +- iint->flags |= IMA_DIGSIG; ++ set_bit(IMA_DIGSIG, &iint->atomic_flags); + rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA, + (const char *)xattr_value, rc, + iint->ima_hash->digest, +@@ -301,7 +304,7 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file) + int rc = 0; + + /* do not collect and update hash for digital signatures */ +- if (iint->flags & IMA_DIGSIG) ++ if (test_bit(IMA_DIGSIG, &iint->atomic_flags)) + return; + + if (iint->ima_file_status != INTEGRITY_PASS) +@@ -311,7 +314,9 @@ void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file) + if (rc < 0) + return; + ++ inode_lock(file_inode(file)); + ima_fix_xattr(dentry, iint); ++ inode_unlock(file_inode(file)); + } + + /** +@@ -334,16 +339,14 @@ void ima_inode_post_setattr(struct dentry *dentry) + return; + + must_appraise = ima_must_appraise(inode, MAY_ACCESS, POST_SETATTR); ++ if (!must_appraise) ++ __vfs_removexattr(dentry, XATTR_NAME_IMA); + iint = integrity_iint_find(inode); + if (iint) { +- iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED | +- IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK | +- IMA_ACTION_RULE_FLAGS); +- if (must_appraise) +- iint->flags |= IMA_APPRAISE; ++ set_bit(IMA_CHANGE_ATTR, &iint->atomic_flags); ++ if (!must_appraise) ++ clear_bit(IMA_UPDATE_XATTR, &iint->atomic_flags); + } +- if (!must_appraise) +- __vfs_removexattr(dentry, XATTR_NAME_IMA); + } + + /* +@@ -372,12 +375,12 @@ static void ima_reset_appraise_flags(struct inode *inode, int digsig) + iint = integrity_iint_find(inode); + if (!iint) + return; +- +- iint->flags &= ~IMA_DONE_MASK; + iint->measured_pcrs = 0; ++ set_bit(IMA_CHANGE_XATTR, &iint->atomic_flags); + if (digsig) +- iint->flags |= IMA_DIGSIG; +- return; ++ set_bit(IMA_DIGSIG, &iint->atomic_flags); ++ else ++ clear_bit(IMA_DIGSIG, &iint->atomic_flags); + } + + int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name, +diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c +index 93f09173cc49..20e66291ca99 100644 +--- a/security/integrity/ima/ima_crypto.c ++++ b/security/integrity/ima/ima_crypto.c +@@ -443,6 +443,16 @@ int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) + loff_t i_size; + int rc; + ++ /* ++ * For consistency, fail file's opened with the O_DIRECT flag on ++ * filesystems mounted with/without DAX option. ++ */ ++ if (file->f_flags & O_DIRECT) { ++ hash->length = hash_digest_size[ima_hash_algo]; ++ hash->algo = ima_hash_algo; ++ return -EINVAL; ++ } ++ + i_size = i_size_read(file_inode(file)); + + if (ima_ahash_minsize && i_size >= ima_ahash_minsize) { +diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c +index 9652541c4d43..ea1e629a5d4c 100644 +--- a/security/integrity/ima/ima_main.c ++++ b/security/integrity/ima/ima_main.c +@@ -99,10 +99,13 @@ static void ima_rdwr_violation_check(struct file *file, + if (!iint) + iint = integrity_iint_find(inode); + /* IMA_MEASURE is set from reader side */ +- if (iint && (iint->flags & IMA_MEASURE)) ++ if (iint && test_bit(IMA_MUST_MEASURE, ++ &iint->atomic_flags)) + send_tomtou = true; + } + } else { ++ if (must_measure) ++ set_bit(IMA_MUST_MEASURE, &iint->atomic_flags); + if ((atomic_read(&inode->i_writecount) > 0) && must_measure) + send_writers = true; + } +@@ -124,21 +127,24 @@ static void ima_check_last_writer(struct integrity_iint_cache *iint, + struct inode *inode, struct file *file) + { + fmode_t mode = file->f_mode; ++ bool update; + + if (!(mode & FMODE_WRITE)) + return; + +- inode_lock(inode); ++ mutex_lock(&iint->mutex); + if (atomic_read(&inode->i_writecount) == 1) { ++ update = test_and_clear_bit(IMA_UPDATE_XATTR, ++ &iint->atomic_flags); + if ((iint->version != inode->i_version) || + (iint->flags & IMA_NEW_FILE)) { + iint->flags &= ~(IMA_DONE_MASK | IMA_NEW_FILE); + iint->measured_pcrs = 0; +- if (iint->flags & IMA_APPRAISE) ++ if (update) + ima_update_xattr(iint, file); + } + } +- inode_unlock(inode); ++ mutex_unlock(&iint->mutex); + } + + /** +@@ -171,7 +177,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size, + char *pathbuf = NULL; + char filename[NAME_MAX]; + const char *pathname = NULL; +- int rc = -ENOMEM, action, must_appraise; ++ int rc = 0, action, must_appraise = 0; + int pcr = CONFIG_IMA_MEASURE_PCR_IDX; + struct evm_ima_xattr_data *xattr_value = NULL; + int xattr_len = 0; +@@ -202,17 +208,31 @@ static int process_measurement(struct file *file, char *buf, loff_t size, + if (action) { + iint = integrity_inode_get(inode); + if (!iint) +- goto out; ++ rc = -ENOMEM; + } + +- if (violation_check) { ++ if (!rc && violation_check) + ima_rdwr_violation_check(file, iint, action & IMA_MEASURE, + &pathbuf, &pathname); +- if (!action) { +- rc = 0; +- goto out_free; +- } +- } ++ ++ inode_unlock(inode); ++ ++ if (rc) ++ goto out; ++ if (!action) ++ goto out; ++ ++ mutex_lock(&iint->mutex); ++ ++ if (test_and_clear_bit(IMA_CHANGE_ATTR, &iint->atomic_flags)) ++ /* reset appraisal flags if ima_inode_post_setattr was called */ ++ iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED | ++ IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK | ++ IMA_ACTION_FLAGS); ++ ++ if (test_and_clear_bit(IMA_CHANGE_XATTR, &iint->atomic_flags)) ++ /* reset all flags if ima_inode_setxattr was called */ ++ iint->flags &= ~IMA_DONE_MASK; + + /* Determine if already appraised/measured based on bitmask + * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED, +@@ -230,7 +250,7 @@ static int process_measurement(struct file *file, char *buf, loff_t size, + if (!action) { + if (must_appraise) + rc = ima_get_cache_status(iint, func); +- goto out_digsig; ++ goto out_locked; + } + + template_desc = ima_template_desc_current(); +@@ -242,11 +262,8 @@ static int process_measurement(struct file *file, char *buf, loff_t size, + hash_algo = ima_get_hash_algo(xattr_value, xattr_len); + + rc = ima_collect_measurement(iint, file, buf, size, hash_algo); +- if (rc != 0) { +- if (file->f_flags & O_DIRECT) +- rc = (iint->flags & IMA_PERMIT_DIRECTIO) ? 0 : -EACCES; +- goto out_digsig; +- } ++ if (rc != 0 && rc != -EBADF && rc != -EINVAL) ++ goto out_locked; + + if (!pathbuf) /* ima_rdwr_violation possibly pre-fetched */ + pathname = ima_d_path(&file->f_path, &pathbuf, filename); +@@ -254,24 +271,32 @@ static int process_measurement(struct file *file, char *buf, loff_t size, + if (action & IMA_MEASURE) + ima_store_measurement(iint, file, pathname, + xattr_value, xattr_len, pcr); +- if (action & IMA_APPRAISE_SUBMASK) ++ if (rc == 0 && (action & IMA_APPRAISE_SUBMASK)) { ++ inode_lock(inode); + rc = ima_appraise_measurement(func, iint, file, pathname, + xattr_value, xattr_len, opened); ++ inode_unlock(inode); ++ } + if (action & IMA_AUDIT) + ima_audit_measurement(iint, pathname); + +-out_digsig: +- if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG) && ++ if ((file->f_flags & O_DIRECT) && (iint->flags & IMA_PERMIT_DIRECTIO)) ++ rc = 0; ++out_locked: ++ if ((mask & MAY_WRITE) && test_bit(IMA_DIGSIG, &iint->atomic_flags) && + !(iint->flags & IMA_NEW_FILE)) + rc = -EACCES; ++ mutex_unlock(&iint->mutex); + kfree(xattr_value); +-out_free: ++out: + if (pathbuf) + __putname(pathbuf); +-out: +- inode_unlock(inode); +- if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE)) +- return -EACCES; ++ if (must_appraise) { ++ if (rc && (ima_appraise & IMA_APPRAISE_ENFORCE)) ++ return -EACCES; ++ if (file->f_mode & FMODE_WRITE) ++ set_bit(IMA_UPDATE_XATTR, &iint->atomic_flags); ++ } + return 0; + } + +diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h +index 24520b4ef3b0..2f7e236b931c 100644 +--- a/security/integrity/integrity.h ++++ b/security/integrity/integrity.h +@@ -29,10 +29,10 @@ + /* iint cache flags */ + #define IMA_ACTION_FLAGS 0xff000000 + #define IMA_ACTION_RULE_FLAGS 0x06000000 +-#define IMA_DIGSIG 0x01000000 +-#define IMA_DIGSIG_REQUIRED 0x02000000 +-#define IMA_PERMIT_DIRECTIO 0x04000000 +-#define IMA_NEW_FILE 0x08000000 ++#define IMA_DIGSIG_REQUIRED 0x01000000 ++#define IMA_PERMIT_DIRECTIO 0x02000000 ++#define IMA_NEW_FILE 0x04000000 ++#define EVM_IMMUTABLE_DIGSIG 0x08000000 + + #define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \ + IMA_APPRAISE_SUBMASK) +@@ -53,11 +53,19 @@ + #define IMA_APPRAISED_SUBMASK (IMA_FILE_APPRAISED | IMA_MMAP_APPRAISED | \ + IMA_BPRM_APPRAISED | IMA_READ_APPRAISED) + ++/* iint cache atomic_flags */ ++#define IMA_CHANGE_XATTR 0 ++#define IMA_UPDATE_XATTR 1 ++#define IMA_CHANGE_ATTR 2 ++#define IMA_DIGSIG 3 ++#define IMA_MUST_MEASURE 4 ++ + enum evm_ima_xattr_type { + IMA_XATTR_DIGEST = 0x01, + EVM_XATTR_HMAC, + EVM_IMA_XATTR_DIGSIG, + IMA_XATTR_DIGEST_NG, ++ EVM_XATTR_PORTABLE_DIGSIG, + IMA_XATTR_LAST + }; + +@@ -100,10 +108,12 @@ struct signature_v2_hdr { + /* integrity data associated with an inode */ + struct integrity_iint_cache { + struct rb_node rb_node; /* rooted in integrity_iint_tree */ ++ struct mutex mutex; /* protects: version, flags, digest */ + struct inode *inode; /* back pointer to inode in question */ + u64 version; /* track inode changes */ + unsigned long flags; + unsigned long measured_pcrs; ++ unsigned long atomic_flags; + enum integrity_status ima_file_status:4; + enum integrity_status ima_mmap_status:4; + enum integrity_status ima_bprm_status:4; +diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c +index d719db4219cd..175e4dce58df 100644 +--- a/security/selinux/ss/policydb.c ++++ b/security/selinux/ss/policydb.c +@@ -1097,7 +1097,7 @@ static int str_read(char **strp, gfp_t flags, void *fp, u32 len) + if ((len == 0) || (len == (u32)-1)) + return -EINVAL; + +- str = kmalloc(len + 1, flags); ++ str = kmalloc(len + 1, flags | __GFP_NOWARN); + if (!str) + return -ENOMEM; + +diff --git a/tools/power/cpupower/bench/Makefile b/tools/power/cpupower/bench/Makefile +index 3e59f1aa3947..8a285bca8e6c 100644 +--- a/tools/power/cpupower/bench/Makefile ++++ b/tools/power/cpupower/bench/Makefile +@@ -8,7 +8,7 @@ endif + ifeq ($(strip $(STATIC)),true) + LIBS = -L../ -L$(OUTPUT) -lm + OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o \ +- $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/sysfs.o ++ $(OUTPUT)../lib/cpufreq.o $(OUTPUT)../lib/cpupower.o + else + LIBS = -L../ -L$(OUTPUT) -lm -lcpupower + OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o +diff --git a/tools/power/cpupower/lib/cpufreq.c b/tools/power/cpupower/lib/cpufreq.c +index 1b993fe1ce23..0c0f3e3f0d80 100644 +--- a/tools/power/cpupower/lib/cpufreq.c ++++ b/tools/power/cpupower/lib/cpufreq.c +@@ -28,7 +28,7 @@ static unsigned int sysfs_cpufreq_read_file(unsigned int cpu, const char *fname, + + snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/cpufreq/%s", + cpu, fname); +- return sysfs_read_file(path, buf, buflen); ++ return cpupower_read_sysfs(path, buf, buflen); + } + + /* helper function to write a new value to a /sys file */ +diff --git a/tools/power/cpupower/lib/cpuidle.c b/tools/power/cpupower/lib/cpuidle.c +index 9bd4c7655fdb..852d25462388 100644 +--- a/tools/power/cpupower/lib/cpuidle.c ++++ b/tools/power/cpupower/lib/cpuidle.c +@@ -319,7 +319,7 @@ static unsigned int sysfs_cpuidle_read_file(const char *fname, char *buf, + + snprintf(path, sizeof(path), PATH_TO_CPU "cpuidle/%s", fname); + +- return sysfs_read_file(path, buf, buflen); ++ return cpupower_read_sysfs(path, buf, buflen); + } + + +diff --git a/tools/power/cpupower/lib/cpupower.c b/tools/power/cpupower/lib/cpupower.c +index 9c395ec924de..9711d628b0f4 100644 +--- a/tools/power/cpupower/lib/cpupower.c ++++ b/tools/power/cpupower/lib/cpupower.c +@@ -15,7 +15,7 @@ + #include "cpupower.h" + #include "cpupower_intern.h" + +-unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen) ++unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen) + { + int fd; + ssize_t numread; +@@ -95,7 +95,7 @@ static int sysfs_topology_read_file(unsigned int cpu, const char *fname, int *re + + snprintf(path, sizeof(path), PATH_TO_CPU "cpu%u/topology/%s", + cpu, fname); +- if (sysfs_read_file(path, linebuf, MAX_LINE_LEN) == 0) ++ if (cpupower_read_sysfs(path, linebuf, MAX_LINE_LEN) == 0) + return -1; + *result = strtol(linebuf, &endp, 0); + if (endp == linebuf || errno == ERANGE) +diff --git a/tools/power/cpupower/lib/cpupower_intern.h b/tools/power/cpupower/lib/cpupower_intern.h +index f8ec4009621c..433fa8619679 100644 +--- a/tools/power/cpupower/lib/cpupower_intern.h ++++ b/tools/power/cpupower/lib/cpupower_intern.h +@@ -2,4 +2,4 @@ + #define MAX_LINE_LEN 4096 + #define SYSFS_PATH_MAX 255 + +-unsigned int sysfs_read_file(const char *path, char *buf, size_t buflen); ++unsigned int cpupower_read_sysfs(const char *path, char *buf, size_t buflen); |