summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2019-08-09 13:34:19 -0400
committerMike Pagano <mpagano@gentoo.org>2019-08-09 13:34:19 -0400
commitfcd2c7c7ef7fa5514960a5210ef967ab25d31730 (patch)
treeb03481413716cd51136876791a1309d1b1b38236
parentLinuxpatch 4.14.137 (diff)
downloadlinux-patches-fcd2c7c7ef7fa5514960a5210ef967ab25d31730.tar.gz
linux-patches-fcd2c7c7ef7fa5514960a5210ef967ab25d31730.tar.bz2
linux-patches-fcd2c7c7ef7fa5514960a5210ef967ab25d31730.zip
Linux patch 4.14.1374.14-147
Signed-off-by: Mike Pagano <mpagano@gentoo.org>
-rw-r--r--0000_README4
-rw-r--r--1137_linux-4.14.138.patch1356
2 files changed, 1360 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 5aa7458d..2b98c17c 100644
--- a/0000_README
+++ b/0000_README
@@ -591,6 +591,10 @@ Patch: 1136_linux-4.14.137.patch
From: https://www.kernel.org
Desc: Linux 4.14.137
+Patch: 1137_linux-4.14.138.patch
+From: https://www.kernel.org
+Desc: Linux 4.14.138
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1137_linux-4.14.138.patch b/1137_linux-4.14.138.patch
new file mode 100644
index 00000000..6fa2c7d1
--- /dev/null
+++ b/1137_linux-4.14.138.patch
@@ -0,0 +1,1356 @@
+diff --git a/Makefile b/Makefile
+index ff604059b6a8..82ae13348266 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,7 +1,7 @@
+ # SPDX-License-Identifier: GPL-2.0
+ VERSION = 4
+ PATCHLEVEL = 14
+-SUBLEVEL = 137
++SUBLEVEL = 138
+ EXTRAVERSION =
+ NAME = Petit Gorille
+
+diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi
+index a7883676f675..b144a6a5d352 100644
+--- a/arch/arm/boot/dts/logicpd-som-lv.dtsi
++++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi
+@@ -115,10 +115,14 @@
+ };
+
+ &i2c2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&i2c2_pins>;
+ clock-frequency = <400000>;
+ };
+
+ &i2c3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&i2c3_pins>;
+ clock-frequency = <400000>;
+ };
+
+@@ -241,6 +245,18 @@
+ OMAP3_WKUP_IOPAD(0x2a0c, PIN_OUTPUT | MUX_MODE4) /* sys_boot1.gpio_3 */
+ >;
+ };
++ i2c2_pins: pinmux_i2c2_pins {
++ pinctrl-single,pins = <
++ OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT | MUX_MODE0) /* i2c2_scl */
++ OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT | MUX_MODE0) /* i2c2_sda */
++ >;
++ };
++ i2c3_pins: pinmux_i2c3_pins {
++ pinctrl-single,pins = <
++ OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT | MUX_MODE0) /* i2c3_scl */
++ OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT | MUX_MODE0) /* i2c3_sda */
++ >;
++ };
+ };
+
+ &omap3_pmx_core2 {
+diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+index cf22b35f0a28..fe4cbdc72359 100644
+--- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
++++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
+@@ -121,10 +121,14 @@
+ };
+
+ &i2c2 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&i2c2_pins>;
+ clock-frequency = <400000>;
+ };
+
+ &i2c3 {
++ pinctrl-names = "default";
++ pinctrl-0 = <&i2c3_pins>;
+ clock-frequency = <400000>;
+ at24@50 {
+ compatible = "atmel,24c64";
+@@ -219,6 +223,18 @@
+ OMAP3_CORE1_IOPAD(0x21bc, PIN_INPUT | MUX_MODE0) /* i2c1_sda.i2c1_sda */
+ >;
+ };
++ i2c2_pins: pinmux_i2c2_pins {
++ pinctrl-single,pins = <
++ OMAP3_CORE1_IOPAD(0x21be, PIN_INPUT | MUX_MODE0) /* i2c2_scl */
++ OMAP3_CORE1_IOPAD(0x21c0, PIN_INPUT | MUX_MODE0) /* i2c2_sda */
++ >;
++ };
++ i2c3_pins: pinmux_i2c3_pins {
++ pinctrl-single,pins = <
++ OMAP3_CORE1_IOPAD(0x21c2, PIN_INPUT | MUX_MODE0) /* i2c3_scl */
++ OMAP3_CORE1_IOPAD(0x21c4, PIN_INPUT | MUX_MODE0) /* i2c3_sda */
++ >;
++ };
+ };
+
+ &uart2 {
+diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
+index c5bc80a03515..5048c7a55eef 100644
+--- a/arch/arm64/include/asm/cpufeature.h
++++ b/arch/arm64/include/asm/cpufeature.h
+@@ -44,9 +44,10 @@
+ */
+
+ enum ftr_type {
+- FTR_EXACT, /* Use a predefined safe value */
+- FTR_LOWER_SAFE, /* Smaller value is safe */
+- FTR_HIGHER_SAFE,/* Bigger value is safe */
++ FTR_EXACT, /* Use a predefined safe value */
++ FTR_LOWER_SAFE, /* Smaller value is safe */
++ FTR_HIGHER_SAFE, /* Bigger value is safe */
++ FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
+ };
+
+ #define FTR_STRICT true /* SANITY check strict matching required */
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 29b5b72b7877..3312d46fa29e 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -178,8 +178,8 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
+- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
+- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 24, 4, 0), /* CWG */
++ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 20, 4, 0), /* ERG */
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
+ /*
+ * Linux can handle differing I-cache policies. Userspace JITs will
+@@ -411,6 +411,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
+ case FTR_LOWER_SAFE:
+ ret = new < cur ? new : cur;
+ break;
++ case FTR_HIGHER_OR_ZERO_SAFE:
++ if (!cur || !new)
++ break;
++ /* Fallthrough */
+ case FTR_HIGHER_SAFE:
+ ret = new > cur ? new : cur;
+ break;
+diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
+index fc72b763fdd7..2b29598791e8 100644
+--- a/drivers/atm/iphase.c
++++ b/drivers/atm/iphase.c
+@@ -63,6 +63,7 @@
+ #include <asm/byteorder.h>
+ #include <linux/vmalloc.h>
+ #include <linux/jiffies.h>
++#include <linux/nospec.h>
+ #include "iphase.h"
+ #include "suni.h"
+ #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
+@@ -2760,8 +2761,11 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
+ }
+ if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
+ board = ia_cmds.status;
+- if ((board < 0) || (board > iadev_count))
+- board = 0;
++
++ if ((board < 0) || (board > iadev_count))
++ board = 0;
++ board = array_index_nospec(board, iadev_count + 1);
++
+ iadev = ia_dev[board];
+ switch (ia_cmds.cmd) {
+ case MEMDUMP:
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index 28ae3dc57103..1e2e6e58256a 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -537,6 +537,7 @@
+ #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
+ #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
+ #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
++#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
+
+ #define USB_VENDOR_ID_HUION 0x256c
+ #define USB_DEVICE_ID_HUION_TABLET 0x006e
+diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
+index e10eda031b01..7b5c6bd92d56 100644
+--- a/drivers/hid/usbhid/hid-quirks.c
++++ b/drivers/hid/usbhid/hid-quirks.c
+@@ -100,6 +100,7 @@ static const struct hid_blacklist {
+ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A, HID_QUIRK_ALWAYS_POLL },
++ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
+diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
+index c2fb08bba296..60e2d4cf1fe3 100644
+--- a/drivers/hid/wacom_wac.c
++++ b/drivers/hid/wacom_wac.c
+@@ -537,14 +537,14 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
+ */
+ buttons = (data[4] << 1) | (data[3] & 0x01);
+ } else if (features->type == CINTIQ_COMPANION_2) {
+- /* d-pad right -> data[4] & 0x10
+- * d-pad up -> data[4] & 0x20
+- * d-pad left -> data[4] & 0x40
+- * d-pad down -> data[4] & 0x80
+- * d-pad center -> data[3] & 0x01
++ /* d-pad right -> data[2] & 0x10
++ * d-pad up -> data[2] & 0x20
++ * d-pad left -> data[2] & 0x40
++ * d-pad down -> data[2] & 0x80
++ * d-pad center -> data[1] & 0x01
+ */
+ buttons = ((data[2] >> 4) << 7) |
+- ((data[1] & 0x04) << 6) |
++ ((data[1] & 0x04) << 4) |
+ ((data[2] & 0x0F) << 2) |
+ (data[1] & 0x03);
+ } else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
+index 40475ebf3a61..aadaa9e84eee 100644
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -794,14 +794,13 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
+ struct net_device *dev;
+
+ union {
+- struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid_addr, dgid_addr;
+
+
+- rdma_gid2ip(&sgid_addr._sockaddr, sgid);
+- rdma_gid2ip(&dgid_addr._sockaddr, dgid);
++ rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
++ rdma_gid2ip((struct sockaddr *)&dgid_addr, dgid);
+
+ memset(&dev_addr, 0, sizeof(dev_addr));
+ if (if_index)
+@@ -810,8 +809,9 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid,
+
+ ctx.addr = &dev_addr;
+ init_completion(&ctx.comp);
+- ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr,
+- &dev_addr, 1000, resolve_cb, &ctx);
++ ret = rdma_resolve_ip(&self, (struct sockaddr *)&sgid_addr,
++ (struct sockaddr *)&dgid_addr, &dev_addr, 1000,
++ resolve_cb, &ctx);
+ if (ret)
+ return ret;
+
+@@ -841,16 +841,15 @@ int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
+ int ret = 0;
+ struct rdma_dev_addr dev_addr;
+ union {
+- struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } gid_addr;
+
+- rdma_gid2ip(&gid_addr._sockaddr, sgid);
++ rdma_gid2ip((struct sockaddr *)&gid_addr, sgid);
+
+ memset(&dev_addr, 0, sizeof(dev_addr));
+ dev_addr.net = &init_net;
+- ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
++ ret = rdma_translate_ip((struct sockaddr *)&gid_addr, &dev_addr, vlan_id);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
+index b81d2597f563..50068b0a91fa 100644
+--- a/drivers/infiniband/core/sa_query.c
++++ b/drivers/infiniband/core/sa_query.c
+@@ -1263,7 +1263,6 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
+ &init_net
+ };
+ union {
+- struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid_addr, dgid_addr;
+@@ -1271,12 +1270,13 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
+ if (!device->get_netdev)
+ return -EOPNOTSUPP;
+
+- rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
+- rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
++ rdma_gid2ip((struct sockaddr *)&sgid_addr, &rec->sgid);
++ rdma_gid2ip((struct sockaddr *)&dgid_addr, &rec->dgid);
+
+ /* validate the route */
+- ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
+- &dgid_addr._sockaddr, &dev_addr);
++ ret = rdma_resolve_ip_route((struct sockaddr *)&sgid_addr,
++ (struct sockaddr *)&dgid_addr,
++ &dev_addr);
+ if (ret)
+ return ret;
+
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+index d0249e463338..ca29a6b76291 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+@@ -83,7 +83,6 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
+ struct iphdr ipv4;
+ const struct ib_global_route *ib_grh;
+ union {
+- struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid_addr, dgid_addr;
+@@ -133,9 +132,9 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
+ ipv4.tot_len = htons(0);
+ ipv4.ttl = ib_grh->hop_limit;
+ ipv4.protocol = nxthdr;
+- rdma_gid2ip(&sgid_addr._sockaddr, sgid);
++ rdma_gid2ip((struct sockaddr *)&sgid_addr, sgid);
+ ipv4.saddr = sgid_addr._sockaddr_in.sin_addr.s_addr;
+- rdma_gid2ip(&dgid_addr._sockaddr, &ib_grh->dgid);
++ rdma_gid2ip((struct sockaddr*)&dgid_addr, &ib_grh->dgid);
+ ipv4.daddr = dgid_addr._sockaddr_in.sin_addr.s_addr;
+ memcpy((u8 *)ah->av + eth_sz, &ipv4, sizeof(struct iphdr));
+ } else {
+diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+index 65b166cc7437..1ba296aeabca 100644
+--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
++++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+@@ -2508,7 +2508,6 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
+ u32 vlan_id = 0xFFFF;
+ u8 mac_addr[6], hdr_type;
+ union {
+- struct sockaddr _sockaddr;
+ struct sockaddr_in _sockaddr_in;
+ struct sockaddr_in6 _sockaddr_in6;
+ } sgid_addr, dgid_addr;
+@@ -2556,8 +2555,8 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
+
+ hdr_type = ib_gid_to_network_type(sgid_attr.gid_type, &sgid);
+ if (hdr_type == RDMA_NETWORK_IPV4) {
+- rdma_gid2ip(&sgid_addr._sockaddr, &sgid);
+- rdma_gid2ip(&dgid_addr._sockaddr, &grh->dgid);
++ rdma_gid2ip((struct sockaddr *)&sgid_addr, &sgid);
++ rdma_gid2ip((struct sockaddr *)&dgid_addr, &grh->dgid);
+ memcpy(&cmd->params.dgid[0],
+ &dgid_addr._sockaddr_in.sin_addr.s_addr, 4);
+ memcpy(&cmd->params.sgid[0],
+diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+index 64828d1438ab..17b825f73c52 100644
+--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+@@ -1934,7 +1934,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
+ }
+
+ /* select a non-FCoE queue */
+- return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
++ return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp));
+ }
+
+ void bnx2x_set_num_queues(struct bnx2x *bp)
+diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
+index e9aa8080a67a..d1eede2625ca 100644
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -6952,6 +6952,7 @@ log_error:
+ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
+ {
+ struct mvpp2_port *port = netdev_priv(dev);
++ bool running = netif_running(dev);
+ int err;
+
+ if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
+@@ -6960,40 +6961,24 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
+ mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
+ }
+
+- if (!netif_running(dev)) {
+- err = mvpp2_bm_update_mtu(dev, mtu);
+- if (!err) {
+- port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+- return 0;
+- }
+-
+- /* Reconfigure BM to the original MTU */
+- err = mvpp2_bm_update_mtu(dev, dev->mtu);
+- if (err)
+- goto log_error;
+- }
+-
+- mvpp2_stop_dev(port);
++ if (running)
++ mvpp2_stop_dev(port);
+
+ err = mvpp2_bm_update_mtu(dev, mtu);
+- if (!err) {
++ if (err) {
++ netdev_err(dev, "failed to change MTU\n");
++ /* Reconfigure BM to the original MTU */
++ mvpp2_bm_update_mtu(dev, dev->mtu);
++ } else {
+ port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+- goto out_start;
+ }
+
+- /* Reconfigure BM to the original MTU */
+- err = mvpp2_bm_update_mtu(dev, dev->mtu);
+- if (err)
+- goto log_error;
+-
+-out_start:
+- mvpp2_start_dev(port);
+- mvpp2_egress_enable(port);
+- mvpp2_ingress_enable(port);
++ if (running) {
++ mvpp2_start_dev(port);
++ mvpp2_egress_enable(port);
++ mvpp2_ingress_enable(port);
++ }
+
+- return 0;
+-log_error:
+- netdev_err(dev, "failed to change MTU\n");
+ return err;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+index 07fda3984e10..bc8de24c56de 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+@@ -307,7 +307,7 @@ void mlx5_unregister_device(struct mlx5_core_dev *dev)
+ struct mlx5_interface *intf;
+
+ mutex_lock(&mlx5_intf_mutex);
+- list_for_each_entry(intf, &intf_list, list)
++ list_for_each_entry_reverse(intf, &intf_list, list)
+ mlx5_remove_device(intf, priv);
+ list_del(&priv->dev_list);
+ mutex_unlock(&mlx5_intf_mutex);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+index 47003ea4ed65..5103b82fe6c5 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+@@ -473,13 +473,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
+ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
+ {
+ struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
+- u64 bytes, packets, lastuse = 0;
+ struct mlx5e_tc_flow *flow;
+ struct mlx5e_encap_entry *e;
+ struct mlx5_fc *counter;
+ struct neigh_table *tbl;
+ bool neigh_used = false;
+ struct neighbour *n;
++ u64 lastuse;
+
+ if (m_neigh->family == AF_INET)
+ tbl = &arp_tbl;
+@@ -496,7 +496,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
+ list_for_each_entry(flow, &e->flows, encap) {
+ if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
+ counter = mlx5_flow_rule_counter(flow->rule);
+- mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
++ lastuse = mlx5_fc_query_lastuse(counter);
+ if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
+ neigh_used = true;
+ break;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+index 89d1f8650033..966ba3f29ed7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+@@ -312,6 +312,11 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
+ }
+ }
+
++u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
++{
++ return counter->cache.lastuse;
++}
++
+ void mlx5_fc_query_cached(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse)
+ {
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index 5bfc961e53c9..5b13c2ba1059 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -203,6 +203,8 @@ static int phylink_parse_fixedlink(struct phylink *pl, struct device_node *np)
+ __ETHTOOL_LINK_MODE_MASK_NBITS, true);
+ linkmode_zero(pl->supported);
+ phylink_set(pl->supported, MII);
++ phylink_set(pl->supported, Pause);
++ phylink_set(pl->supported, Asym_Pause);
+ if (s) {
+ __set_bit(s->bit, pl->supported);
+ } else {
+diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
+index c37ef5287caa..fa7121dcab67 100644
+--- a/drivers/net/ppp/pppoe.c
++++ b/drivers/net/ppp/pppoe.c
+@@ -1137,6 +1137,9 @@ static const struct proto_ops pppoe_ops = {
+ .recvmsg = pppoe_recvmsg,
+ .mmap = sock_no_mmap,
+ .ioctl = pppox_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = pppox_compat_ioctl,
++#endif
+ };
+
+ static const struct pppox_proto pppoe_proto = {
+diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
+index c0599b3b23c0..9128e42e33e7 100644
+--- a/drivers/net/ppp/pppox.c
++++ b/drivers/net/ppp/pppox.c
+@@ -22,6 +22,7 @@
+ #include <linux/string.h>
+ #include <linux/module.h>
+ #include <linux/kernel.h>
++#include <linux/compat.h>
+ #include <linux/errno.h>
+ #include <linux/netdevice.h>
+ #include <linux/net.h>
+@@ -103,6 +104,18 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+
+ EXPORT_SYMBOL(pppox_ioctl);
+
++#ifdef CONFIG_COMPAT
++int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
++{
++ if (cmd == PPPOEIOCSFWD32)
++ cmd = PPPOEIOCSFWD;
++
++ return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
++}
++
++EXPORT_SYMBOL(pppox_compat_ioctl);
++#endif
++
+ static int pppox_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
+ {
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 68b274b3e448..51d769901397 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -636,6 +636,9 @@ static const struct proto_ops pptp_ops = {
+ .recvmsg = sock_no_recvmsg,
+ .mmap = sock_no_mmap,
+ .ioctl = pppox_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = pppox_compat_ioctl,
++#endif
+ };
+
+ static const struct pppox_proto pppox_pptp_proto = {
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 84a33c81b9b7..7e197ba8abe4 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -1350,6 +1350,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
+
+ skb_reserve(skb, pad - delta);
+ skb_put(skb, len + delta);
++ skb_set_owner_w(skb, tfile->socket.sk);
+ get_page(alloc_frag->page);
+ alloc_frag->offset += buflen;
+
+diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
+index e65d027b91fa..529be35ac178 100644
+--- a/drivers/nfc/nfcmrvl/main.c
++++ b/drivers/nfc/nfcmrvl/main.c
+@@ -244,7 +244,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
+ /* Reset possible fault of previous session */
+ clear_bit(NFCMRVL_PHY_ERROR, &priv->flags);
+
+- if (priv->config.reset_n_io) {
++ if (gpio_is_valid(priv->config.reset_n_io)) {
+ nfc_info(priv->dev, "reset the chip\n");
+ gpio_set_value(priv->config.reset_n_io, 0);
+ usleep_range(5000, 10000);
+@@ -255,7 +255,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
+
+ void nfcmrvl_chip_halt(struct nfcmrvl_private *priv)
+ {
+- if (priv->config.reset_n_io)
++ if (gpio_is_valid(priv->config.reset_n_io))
+ gpio_set_value(priv->config.reset_n_io, 0);
+ }
+
+diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
+index 9a22056e8d9e..e5a622ce4b95 100644
+--- a/drivers/nfc/nfcmrvl/uart.c
++++ b/drivers/nfc/nfcmrvl/uart.c
+@@ -26,7 +26,7 @@
+ static unsigned int hci_muxed;
+ static unsigned int flow_control;
+ static unsigned int break_control;
+-static unsigned int reset_n_io;
++static int reset_n_io = -EINVAL;
+
+ /*
+ ** NFCMRVL NCI OPS
+@@ -231,5 +231,5 @@ MODULE_PARM_DESC(break_control, "Tell if UART driver must drive break signal.");
+ module_param(hci_muxed, uint, 0);
+ MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one.");
+
+-module_param(reset_n_io, uint, 0);
++module_param(reset_n_io, int, 0);
+ MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal.");
+diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
+index bd35eab652be..deb953290f8f 100644
+--- a/drivers/nfc/nfcmrvl/usb.c
++++ b/drivers/nfc/nfcmrvl/usb.c
+@@ -304,6 +304,7 @@ static int nfcmrvl_probe(struct usb_interface *intf,
+
+ /* No configuration for USB */
+ memset(&config, 0, sizeof(config));
++ config.reset_n_io = -EINVAL;
+
+ nfc_info(&udev->dev, "intf %p id %p\n", intf, id);
+
+diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
+index bd61bf4e2da2..d95ba1a07ba3 100644
+--- a/drivers/scsi/fcoe/fcoe_ctlr.c
++++ b/drivers/scsi/fcoe/fcoe_ctlr.c
+@@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
+ */
+ static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
+ {
+- return (struct fcoe_rport *)(rdata + 1);
++ return container_of(rdata, struct fcoe_rport, rdata);
+ }
+
+ /**
+@@ -2283,7 +2283,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
+ */
+ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
+ struct sk_buff *skb,
+- struct fc_rport_priv *rdata)
++ struct fcoe_rport *frport)
+ {
+ struct fip_header *fiph;
+ struct fip_desc *desc = NULL;
+@@ -2291,16 +2291,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
+ struct fip_wwn_desc *wwn = NULL;
+ struct fip_vn_desc *vn = NULL;
+ struct fip_size_desc *size = NULL;
+- struct fcoe_rport *frport;
+ size_t rlen;
+ size_t dlen;
+ u32 desc_mask = 0;
+ u32 dtype;
+ u8 sub;
+
+- memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
+- frport = fcoe_ctlr_rport(rdata);
+-
+ fiph = (struct fip_header *)skb->data;
+ frport->flags = ntohs(fiph->fip_flags);
+
+@@ -2363,15 +2359,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
+ if (dlen != sizeof(struct fip_wwn_desc))
+ goto len_err;
+ wwn = (struct fip_wwn_desc *)desc;
+- rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
++ frport->rdata.ids.node_name =
++ get_unaligned_be64(&wwn->fd_wwn);
+ break;
+ case FIP_DT_VN_ID:
+ if (dlen != sizeof(struct fip_vn_desc))
+ goto len_err;
+ vn = (struct fip_vn_desc *)desc;
+ memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
+- rdata->ids.port_id = ntoh24(vn->fd_fc_id);
+- rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
++ frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id);
++ frport->rdata.ids.port_name =
++ get_unaligned_be64(&vn->fd_wwpn);
+ break;
+ case FIP_DT_FC4F:
+ if (dlen != sizeof(struct fip_fc4_feat))
+@@ -2752,10 +2750,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
+ {
+ struct fip_header *fiph;
+ enum fip_vn2vn_subcode sub;
+- struct {
+- struct fc_rport_priv rdata;
+- struct fcoe_rport frport;
+- } buf;
++ struct fcoe_rport frport = { };
+ int rc, vlan_id = 0;
+
+ fiph = (struct fip_header *)skb->data;
+@@ -2771,7 +2766,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
+ goto drop;
+ }
+
+- rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
++ rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
+ if (rc) {
+ LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
+ goto drop;
+@@ -2780,19 +2775,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
+ mutex_lock(&fip->ctlr_mutex);
+ switch (sub) {
+ case FIP_SC_VN_PROBE_REQ:
+- fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
++ fcoe_ctlr_vn_probe_req(fip, &frport.rdata);
+ break;
+ case FIP_SC_VN_PROBE_REP:
+- fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
++ fcoe_ctlr_vn_probe_reply(fip, &frport.rdata);
+ break;
+ case FIP_SC_VN_CLAIM_NOTIFY:
+- fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
++ fcoe_ctlr_vn_claim_notify(fip, &frport.rdata);
+ break;
+ case FIP_SC_VN_CLAIM_REP:
+- fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
++ fcoe_ctlr_vn_claim_resp(fip, &frport.rdata);
+ break;
+ case FIP_SC_VN_BEACON:
+- fcoe_ctlr_vn_beacon(fip, &buf.rdata);
++ fcoe_ctlr_vn_beacon(fip, &frport.rdata);
+ break;
+ default:
+ LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
+@@ -2816,22 +2811,18 @@ drop:
+ */
+ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
+ struct sk_buff *skb,
+- struct fc_rport_priv *rdata)
++ struct fcoe_rport *frport)
+ {
+ struct fip_header *fiph;
+ struct fip_desc *desc = NULL;
+ struct fip_mac_desc *macd = NULL;
+ struct fip_wwn_desc *wwn = NULL;
+- struct fcoe_rport *frport;
+ size_t rlen;
+ size_t dlen;
+ u32 desc_mask = 0;
+ u32 dtype;
+ u8 sub;
+
+- memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
+- frport = fcoe_ctlr_rport(rdata);
+-
+ fiph = (struct fip_header *)skb->data;
+ frport->flags = ntohs(fiph->fip_flags);
+
+@@ -2885,7 +2876,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
+ if (dlen != sizeof(struct fip_wwn_desc))
+ goto len_err;
+ wwn = (struct fip_wwn_desc *)desc;
+- rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
++ frport->rdata.ids.node_name =
++ get_unaligned_be64(&wwn->fd_wwn);
+ break;
+ default:
+ LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
+@@ -2996,22 +2988,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
+ {
+ struct fip_header *fiph;
+ enum fip_vlan_subcode sub;
+- struct {
+- struct fc_rport_priv rdata;
+- struct fcoe_rport frport;
+- } buf;
++ struct fcoe_rport frport = { };
+ int rc;
+
+ fiph = (struct fip_header *)skb->data;
+ sub = fiph->fip_subcode;
+- rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata);
++ rc = fcoe_ctlr_vlan_parse(fip, skb, &frport);
+ if (rc) {
+ LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
+ goto drop;
+ }
+ mutex_lock(&fip->ctlr_mutex);
+ if (sub == FIP_SC_VL_REQ)
+- fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata);
++ fcoe_ctlr_vlan_disc_reply(fip, &frport.rdata);
+ mutex_unlock(&fip->ctlr_mutex);
+
+ drop:
+diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
+index 31d31aad3de1..0e964ce75406 100644
+--- a/drivers/scsi/libfc/fc_rport.c
++++ b/drivers/scsi/libfc/fc_rport.c
+@@ -142,12 +142,15 @@ EXPORT_SYMBOL(fc_rport_lookup);
+ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
+ {
+ struct fc_rport_priv *rdata;
++ size_t rport_priv_size = sizeof(*rdata);
+
+ rdata = fc_rport_lookup(lport, port_id);
+ if (rdata)
+ return rdata;
+
+- rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
++ if (lport->rport_priv_size > 0)
++ rport_priv_size = lport->rport_priv_size;
++ rdata = kzalloc(rport_priv_size, GFP_KERNEL);
+ if (!rdata)
+ return NULL;
+
+diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
+index 25abf2d1732a..eab27d41ba83 100644
+--- a/drivers/spi/spi-bcm2835.c
++++ b/drivers/spi/spi-bcm2835.c
+@@ -554,7 +554,8 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
+ bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
+
+ /* handle all the 3-wire mode */
+- if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
++ if (spi->mode & SPI_3WIRE && tfr->rx_buf &&
++ tfr->rx_buf != master->dummy_rx)
+ cs |= BCM2835_SPI_CS_REN;
+ else
+ cs &= ~BCM2835_SPI_CS_REN;
+diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
+index bd5d91e119ca..ea52b98b39fa 100644
+--- a/fs/compat_ioctl.c
++++ b/fs/compat_ioctl.c
+@@ -1032,9 +1032,6 @@ COMPATIBLE_IOCTL(PPPIOCDISCONN)
+ COMPATIBLE_IOCTL(PPPIOCATTCHAN)
+ COMPATIBLE_IOCTL(PPPIOCGCHAN)
+ COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
+-/* PPPOX */
+-COMPATIBLE_IOCTL(PPPOEIOCSFWD)
+-COMPATIBLE_IOCTL(PPPOEIOCDFWD)
+ /* Big A */
+ /* sparc only */
+ /* Big Q for sound/OSS */
+diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
+index eb396f71285f..8d4b92185a09 100644
+--- a/include/linux/cgroup-defs.h
++++ b/include/linux/cgroup-defs.h
+@@ -201,6 +201,7 @@ struct css_set {
+ */
+ struct list_head tasks;
+ struct list_head mg_tasks;
++ struct list_head dying_tasks;
+
+ /* all css_task_iters currently walking this cset */
+ struct list_head task_iters;
+diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
+index ef4e4ce42642..0e21619f1c03 100644
+--- a/include/linux/cgroup.h
++++ b/include/linux/cgroup.h
+@@ -42,6 +42,9 @@
+ /* walk all threaded css_sets in the domain */
+ #define CSS_TASK_ITER_THREADED (1U << 1)
+
++/* internal flags */
++#define CSS_TASK_ITER_SKIPPED (1U << 16)
++
+ /* a css_task_iter should be treated as an opaque object */
+ struct css_task_iter {
+ struct cgroup_subsys *ss;
+@@ -56,6 +59,7 @@ struct css_task_iter {
+ struct list_head *task_pos;
+ struct list_head *tasks_head;
+ struct list_head *mg_tasks_head;
++ struct list_head *dying_tasks_head;
+
+ struct css_set *cur_cset;
+ struct css_set *cur_dcset;
+diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
+index ba7a9b0c7c57..24e9b360da65 100644
+--- a/include/linux/if_pppox.h
++++ b/include/linux/if_pppox.h
+@@ -84,6 +84,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
+ extern void unregister_pppox_proto(int proto_num);
+ extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
+ extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
++extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
++
++#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t)
+
+ /* PPPoX socket states */
+ enum {
+diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
+index b25e7baa273e..dfe626ad818a 100644
+--- a/include/linux/mlx5/fs.h
++++ b/include/linux/mlx5/fs.h
+@@ -164,6 +164,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
+ struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
+ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
+ void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
++u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
+ void mlx5_fc_query_cached(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse);
+ int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn);
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 0b477a1e1177..7994e569644e 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -1688,6 +1688,23 @@ static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unli
+ tcp_sk(sk)->highest_sack = NULL;
+ }
+
++static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
++{
++ struct sk_buff *skb = tcp_write_queue_head(sk);
++
++ if (skb == tcp_send_head(sk))
++ skb = NULL;
++
++ return skb;
++}
++
++static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
++{
++ struct sk_buff *skb = tcp_send_head(sk);
++
++ return skb ? tcp_write_queue_prev(sk, skb) : tcp_write_queue_tail(sk);
++}
++
+ static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
+ {
+ __skb_queue_tail(&sk->sk_write_queue, skb);
+diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
+index a4e41444f5fe..282358843659 100644
+--- a/include/scsi/libfcoe.h
++++ b/include/scsi/libfcoe.h
+@@ -241,6 +241,7 @@ struct fcoe_fcf {
+ * @vn_mac: VN_Node assigned MAC address for data
+ */
+ struct fcoe_rport {
++ struct fc_rport_priv rdata;
+ unsigned long time;
+ u16 fcoe_len;
+ u16 flags;
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index d30a51da94e2..2c57030f54aa 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -204,7 +204,8 @@ static struct cftype cgroup_base_files[];
+
+ static int cgroup_apply_control(struct cgroup *cgrp);
+ static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
+-static void css_task_iter_advance(struct css_task_iter *it);
++static void css_task_iter_skip(struct css_task_iter *it,
++ struct task_struct *task);
+ static int cgroup_destroy_locked(struct cgroup *cgrp);
+ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
+ struct cgroup_subsys *ss);
+@@ -642,6 +643,7 @@ struct css_set init_css_set = {
+ .dom_cset = &init_css_set,
+ .tasks = LIST_HEAD_INIT(init_css_set.tasks),
+ .mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
++ .dying_tasks = LIST_HEAD_INIT(init_css_set.dying_tasks),
+ .task_iters = LIST_HEAD_INIT(init_css_set.task_iters),
+ .threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets),
+ .cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
+@@ -737,6 +739,21 @@ static void css_set_update_populated(struct css_set *cset, bool populated)
+ cgroup_update_populated(link->cgrp, populated);
+ }
+
++/*
++ * @task is leaving, advance task iterators which are pointing to it so
++ * that they can resume at the next position. Advancing an iterator might
++ * remove it from the list, use safe walk. See css_task_iter_skip() for
++ * details.
++ */
++static void css_set_skip_task_iters(struct css_set *cset,
++ struct task_struct *task)
++{
++ struct css_task_iter *it, *pos;
++
++ list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node)
++ css_task_iter_skip(it, task);
++}
++
+ /**
+ * css_set_move_task - move a task from one css_set to another
+ * @task: task being moved
+@@ -762,22 +779,9 @@ static void css_set_move_task(struct task_struct *task,
+ css_set_update_populated(to_cset, true);
+
+ if (from_cset) {
+- struct css_task_iter *it, *pos;
+-
+ WARN_ON_ONCE(list_empty(&task->cg_list));
+
+- /*
+- * @task is leaving, advance task iterators which are
+- * pointing to it so that they can resume at the next
+- * position. Advancing an iterator might remove it from
+- * the list, use safe walk. See css_task_iter_advance*()
+- * for details.
+- */
+- list_for_each_entry_safe(it, pos, &from_cset->task_iters,
+- iters_node)
+- if (it->task_pos == &task->cg_list)
+- css_task_iter_advance(it);
+-
++ css_set_skip_task_iters(from_cset, task);
+ list_del_init(&task->cg_list);
+ if (!css_set_populated(from_cset))
+ css_set_update_populated(from_cset, false);
+@@ -1104,6 +1108,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
+ cset->dom_cset = cset;
+ INIT_LIST_HEAD(&cset->tasks);
+ INIT_LIST_HEAD(&cset->mg_tasks);
++ INIT_LIST_HEAD(&cset->dying_tasks);
+ INIT_LIST_HEAD(&cset->task_iters);
+ INIT_LIST_HEAD(&cset->threaded_csets);
+ INIT_HLIST_NODE(&cset->hlist);
+@@ -4043,15 +4048,18 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
+ it->task_pos = NULL;
+ return;
+ }
+- } while (!css_set_populated(cset));
++ } while (!css_set_populated(cset) && list_empty(&cset->dying_tasks));
+
+ if (!list_empty(&cset->tasks))
+ it->task_pos = cset->tasks.next;
+- else
++ else if (!list_empty(&cset->mg_tasks))
+ it->task_pos = cset->mg_tasks.next;
++ else
++ it->task_pos = cset->dying_tasks.next;
+
+ it->tasks_head = &cset->tasks;
+ it->mg_tasks_head = &cset->mg_tasks;
++ it->dying_tasks_head = &cset->dying_tasks;
+
+ /*
+ * We don't keep css_sets locked across iteration steps and thus
+@@ -4077,9 +4085,20 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
+ list_add(&it->iters_node, &cset->task_iters);
+ }
+
++static void css_task_iter_skip(struct css_task_iter *it,
++ struct task_struct *task)
++{
++ lockdep_assert_held(&css_set_lock);
++
++ if (it->task_pos == &task->cg_list) {
++ it->task_pos = it->task_pos->next;
++ it->flags |= CSS_TASK_ITER_SKIPPED;
++ }
++}
++
+ static void css_task_iter_advance(struct css_task_iter *it)
+ {
+- struct list_head *next;
++ struct task_struct *task;
+
+ lockdep_assert_held(&css_set_lock);
+ repeat:
+@@ -4089,25 +4108,40 @@ repeat:
+ * consumed first and then ->mg_tasks. After ->mg_tasks,
+ * we move onto the next cset.
+ */
+- next = it->task_pos->next;
+-
+- if (next == it->tasks_head)
+- next = it->mg_tasks_head->next;
++ if (it->flags & CSS_TASK_ITER_SKIPPED)
++ it->flags &= ~CSS_TASK_ITER_SKIPPED;
++ else
++ it->task_pos = it->task_pos->next;
+
+- if (next == it->mg_tasks_head)
++ if (it->task_pos == it->tasks_head)
++ it->task_pos = it->mg_tasks_head->next;
++ if (it->task_pos == it->mg_tasks_head)
++ it->task_pos = it->dying_tasks_head->next;
++ if (it->task_pos == it->dying_tasks_head)
+ css_task_iter_advance_css_set(it);
+- else
+- it->task_pos = next;
+ } else {
+ /* called from start, proceed to the first cset */
+ css_task_iter_advance_css_set(it);
+ }
+
+- /* if PROCS, skip over tasks which aren't group leaders */
+- if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
+- !thread_group_leader(list_entry(it->task_pos, struct task_struct,
+- cg_list)))
+- goto repeat;
++ if (!it->task_pos)
++ return;
++
++ task = list_entry(it->task_pos, struct task_struct, cg_list);
++
++ if (it->flags & CSS_TASK_ITER_PROCS) {
++ /* if PROCS, skip over tasks which aren't group leaders */
++ if (!thread_group_leader(task))
++ goto repeat;
++
++ /* and dying leaders w/o live member threads */
++ if (!atomic_read(&task->signal->live))
++ goto repeat;
++ } else {
++ /* skip all dying ones */
++ if (task->flags & PF_EXITING)
++ goto repeat;
++ }
+ }
+
+ /**
+@@ -4163,6 +4197,10 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
+
+ spin_lock_irq(&css_set_lock);
+
++ /* @it may be half-advanced by skips, finish advancing */
++ if (it->flags & CSS_TASK_ITER_SKIPPED)
++ css_task_iter_advance(it);
++
+ if (it->task_pos) {
+ it->cur_task = list_entry(it->task_pos, struct task_struct,
+ cg_list);
+@@ -5540,6 +5578,7 @@ void cgroup_exit(struct task_struct *tsk)
+ if (!list_empty(&tsk->cg_list)) {
+ spin_lock_irq(&css_set_lock);
+ css_set_move_task(tsk, cset, NULL, false);
++ list_add_tail(&tsk->cg_list, &cset->dying_tasks);
+ cset->nr_tasks--;
+ spin_unlock_irq(&css_set_lock);
+ } else {
+@@ -5560,6 +5599,13 @@ void cgroup_release(struct task_struct *task)
+ do_each_subsys_mask(ss, ssid, have_release_callback) {
+ ss->release(task);
+ } while_each_subsys_mask();
++
++ if (use_task_css_set_links) {
++ spin_lock_irq(&css_set_lock);
++ css_set_skip_task_iters(task_css_set(task), task);
++ list_del_init(&task->cg_list);
++ spin_unlock_irq(&css_set_lock);
++ }
+ }
+
+ void cgroup_free(struct task_struct *task)
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 95ce231ff5e2..15437cfdcd70 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -193,6 +193,7 @@ repeat:
+ rcu_read_unlock();
+
+ proc_flush_task(p);
++ cgroup_release(p);
+
+ write_lock_irq(&tasklist_lock);
+ ptrace_release_task(p);
+@@ -218,7 +219,6 @@ repeat:
+ }
+
+ write_unlock_irq(&tasklist_lock);
+- cgroup_release(p);
+ release_thread(p);
+ call_rcu(&p->rcu, delayed_put_task_struct);
+
+diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
+index 5cd83145c7d8..b24782d53474 100644
+--- a/net/bridge/br_multicast.c
++++ b/net/bridge/br_multicast.c
+@@ -1593,6 +1593,9 @@ br_multicast_leave_group(struct net_bridge *br,
+ if (!br_port_group_equal(p, port, src))
+ continue;
+
++ if (p->flags & MDB_PG_FLAGS_PERMANENT)
++ break;
++
+ rcu_assign_pointer(*pp, p->next);
+ hlist_del_init(&p->mglist);
+ del_timer(&p->timer);
+diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
+index 9b8a53568b0f..e24a74884768 100644
+--- a/net/bridge/br_vlan.c
++++ b/net/bridge/br_vlan.c
+@@ -636,6 +636,11 @@ void br_vlan_flush(struct net_bridge *br)
+
+ ASSERT_RTNL();
+
++ /* delete auto-added default pvid local fdb before flushing vlans
++ * otherwise it will be leaked on bridge device init failure
++ */
++ br_fdb_delete_by_port(br, NULL, 0, 1);
++
+ vg = br_vlan_group(br);
+ __vlan_flush(vg);
+ RCU_INIT_POINTER(br->vlgrp, NULL);
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 08c0e7613ef6..f79b513e80dc 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -8652,6 +8652,8 @@ static void __net_exit default_device_exit(struct net *net)
+
+ /* Push remaining network devices to init_net */
+ snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
++ if (__dev_get_by_name(&init_net, fb_name))
++ snprintf(fb_name, IFNAMSIZ, "dev%%d");
+ err = dev_change_net_namespace(dev, &init_net, fb_name);
+ if (err) {
+ pr_emerg("%s: failed to move %s to init_net: %d\n",
+diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
+index a5960b9b6741..a99086bf26ea 100644
+--- a/net/ipv4/tcp_output.c
++++ b/net/ipv4/tcp_output.c
+@@ -1264,6 +1264,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct sk_buff *buff;
+ int nsize, old_factor;
++ long limit;
+ int nlen;
+ u8 flags;
+
+@@ -1274,7 +1275,15 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
+ if (nsize < 0)
+ nsize = 0;
+
+- if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf + 0x20000)) {
++ /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
++ * We need some allowance to not penalize applications setting small
++ * SO_SNDBUF values.
++ * Also allow first and last skb in retransmit queue to be split.
++ */
++ limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
++ if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
++ skb != tcp_rtx_queue_head(sk) &&
++ skb != tcp_rtx_queue_tail(sk))) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
+ return -ENOMEM;
+ }
+diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
+index f71c7915ff0e..067fc78cc529 100644
+--- a/net/ipv6/ip6_tunnel.c
++++ b/net/ipv6/ip6_tunnel.c
+@@ -1280,12 +1280,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ }
+
+ fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
++ dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
+
+ if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
+ return -1;
+
+- dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
+-
+ skb_set_inner_ipproto(skb, IPPROTO_IPIP);
+
+ err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
+@@ -1371,12 +1370,11 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
+ }
+
+ fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
++ dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
+
+ if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
+ return -1;
+
+- dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
+-
+ skb_set_inner_ipproto(skb, IPPROTO_IPV6);
+
+ err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
+diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
+index 8bef35aa8786..a7fcf48e9087 100644
+--- a/net/l2tp/l2tp_ppp.c
++++ b/net/l2tp/l2tp_ppp.c
+@@ -1793,6 +1793,9 @@ static const struct proto_ops pppol2tp_ops = {
+ .recvmsg = pppol2tp_recvmsg,
+ .mmap = sock_no_mmap,
+ .ioctl = pppox_ioctl,
++#ifdef CONFIG_COMPAT
++ .compat_ioctl = pppox_compat_ioctl,
++#endif
+ };
+
+ static const struct pppox_proto pppol2tp_proto = {
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
+index 31de26c99023..16a403d17f44 100644
+--- a/net/sched/act_ife.c
++++ b/net/sched/act_ife.c
+@@ -459,6 +459,9 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ int ret = 0;
+ int err;
+
++ if (!nla)
++ return -EINVAL;
++
+ err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy, NULL);
+ if (err < 0)
+ return err;
+diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
+index c518a1efcb9d..b22e5cde6059 100644
+--- a/net/sched/sch_codel.c
++++ b/net/sched/sch_codel.c
+@@ -71,10 +71,10 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
+ struct Qdisc *sch = ctx;
+ struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
+
+- if (skb)
++ if (skb) {
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+-
+- prefetch(&skb->end); /* we'll need skb_shinfo() */
++ prefetch(&skb->end); /* we'll need skb_shinfo() */
++ }
+ return skb;
+ }
+
+diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
+index 41954ed7ff51..ad4dcc663c6d 100644
+--- a/net/tipc/netlink_compat.c
++++ b/net/tipc/netlink_compat.c
+@@ -55,6 +55,7 @@ struct tipc_nl_compat_msg {
+ int rep_type;
+ int rep_size;
+ int req_type;
++ int req_size;
+ struct net *net;
+ struct sk_buff *rep;
+ struct tlv_desc *req;
+@@ -252,7 +253,8 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
+ int err;
+ struct sk_buff *arg;
+
+- if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
++ if (msg->req_type && (!msg->req_size ||
++ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
+ return -EINVAL;
+
+ msg->rep = tipc_tlv_alloc(msg->rep_size);
+@@ -345,7 +347,8 @@ static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
+ {
+ int err;
+
+- if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
++ if (msg->req_type && (!msg->req_size ||
++ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
+ return -EINVAL;
+
+ err = __tipc_nl_compat_doit(cmd, msg);
+@@ -1267,8 +1270,8 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
+ goto send;
+ }
+
+- len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
+- if (!len || !TLV_OK(msg.req, len)) {
++ msg.req_size = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
++ if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) {
+ msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
+ err = -EOPNOTSUPP;
+ goto send;
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index 7d748e272572..5422543faff8 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -165,6 +165,8 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
+ "__reiserfs_panic",
+ "lbug_with_loc",
+ "fortify_panic",
++ "machine_real_restart",
++ "rewind_stack_do_exit",
+ };
+
+ if (func->bind == STB_WEAK)