diff options
-rw-r--r-- | 0000_README | 4 | ||||
-rw-r--r-- | 1111_linux-4.9.112.patch | 1895 |
2 files changed, 1899 insertions, 0 deletions
diff --git a/0000_README b/0000_README index 59417ea1..f268044f 100644 --- a/0000_README +++ b/0000_README @@ -487,6 +487,10 @@ Patch: 1110_linux-4.9.111.patch From: http://www.kernel.org Desc: Linux 4.9.111 +Patch: 1111_linux-4.9.112.patch +From: http://www.kernel.org +Desc: Linux 4.9.112 + Patch: 1500_XATTR_USER_PREFIX.patch From: https://bugs.gentoo.org/show_bug.cgi?id=470644 Desc: Support for namespace user.pax.* on tmpfs. diff --git a/1111_linux-4.9.112.patch b/1111_linux-4.9.112.patch new file mode 100644 index 00000000..f3dc288c --- /dev/null +++ b/1111_linux-4.9.112.patch @@ -0,0 +1,1895 @@ +diff --git a/Makefile b/Makefile +index b10646531fcd..c4544293db10 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 4 + PATCHLEVEL = 9 +-SUBLEVEL = 111 ++SUBLEVEL = 112 + EXTRAVERSION = + NAME = Roaring Lionus + +diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi +index e9a5d0b8c7b0..908b269a016b 100644 +--- a/arch/arm/boot/dts/imx6q.dtsi ++++ b/arch/arm/boot/dts/imx6q.dtsi +@@ -96,7 +96,7 @@ + clocks = <&clks IMX6Q_CLK_ECSPI5>, + <&clks IMX6Q_CLK_ECSPI5>; + clock-names = "ipg", "per"; +- dmas = <&sdma 11 7 1>, <&sdma 12 7 2>; ++ dmas = <&sdma 11 8 1>, <&sdma 12 8 2>; + dma-names = "rx", "tx"; + status = "disabled"; + }; +diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S +index a4fd00064c80..771cfd2e1e6d 100644 +--- a/arch/s390/kernel/entry.S ++++ b/arch/s390/kernel/entry.S +@@ -1187,7 +1187,7 @@ cleanup_critical: + jl 0f + clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end + jl .Lcleanup_load_fpu_regs +-0: BR_EX %r14 ++0: BR_EX %r14,%r11 + + .align 8 + .Lcleanup_table: +@@ -1217,7 +1217,7 @@ cleanup_critical: + ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce + larl %r9,sie_exit # skip forward to sie_exit +- BR_EX %r14 ++ BR_EX %r14,%r11 + #endif + + .Lcleanup_system_call: +diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c +index b0fd028b2eee..7a4279d8a902 100644 +--- a/arch/x86/kernel/cpu/common.c ++++ b/arch/x86/kernel/cpu/common.c +@@ -848,6 +848,13 @@ void get_cpu_cap(struct cpuinfo_x86 *c) + + init_scattered_cpuid_features(c); + init_speculation_control(c); ++ ++ /* ++ * Clear/Set all flags overridden by options, after probe. ++ * This needs to happen each time we re-probe, which may happen ++ * several times during CPU initialization. ++ */ ++ apply_forced_caps(c); + } + + static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) +diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c +index 91c48cdfe81f..516be613bd41 100644 +--- a/arch/x86/kernel/kprobes/core.c ++++ b/arch/x86/kernel/kprobes/core.c +@@ -414,25 +414,38 @@ void free_insn_page(void *page) + module_memfree(page); + } + ++/* Prepare reljump right after instruction to boost */ ++static void prepare_boost(struct kprobe *p, int length) ++{ ++ if (can_boost(p->ainsn.insn, p->addr) && ++ MAX_INSN_SIZE - length >= RELATIVEJUMP_SIZE) { ++ /* ++ * These instructions can be executed directly if it ++ * jumps back to correct address. ++ */ ++ synthesize_reljump(p->ainsn.insn + length, p->addr + length); ++ p->ainsn.boostable = 1; ++ } else { ++ p->ainsn.boostable = -1; ++ } ++} ++ + static int arch_copy_kprobe(struct kprobe *p) + { +- int ret; ++ int len; + + set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1); + + /* Copy an instruction with recovering if other optprobe modifies it.*/ +- ret = __copy_instruction(p->ainsn.insn, p->addr); +- if (!ret) ++ len = __copy_instruction(p->ainsn.insn, p->addr); ++ if (!len) + return -EINVAL; + + /* + * __copy_instruction can modify the displacement of the instruction, + * but it doesn't affect boostable check. + */ +- if (can_boost(p->ainsn.insn, p->addr)) +- p->ainsn.boostable = 0; +- else +- p->ainsn.boostable = -1; ++ prepare_boost(p, len); + + set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1); + +@@ -897,21 +910,6 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs, + break; + } + +- if (p->ainsn.boostable == 0) { +- if ((regs->ip > copy_ip) && +- (regs->ip - copy_ip) + 5 < MAX_INSN_SIZE) { +- /* +- * These instructions can be executed directly if it +- * jumps back to correct address. +- */ +- synthesize_reljump((void *)regs->ip, +- (void *)orig_ip + (regs->ip - copy_ip)); +- p->ainsn.boostable = 1; +- } else { +- p->ainsn.boostable = -1; +- } +- } +- + regs->ip += orig_ip - copy_ip; + + no_change: +diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c +index a11540e51f62..8eca26ef6471 100644 +--- a/arch/x86/xen/smp.c ++++ b/arch/x86/xen/smp.c +@@ -28,6 +28,7 @@ + #include <xen/interface/vcpu.h> + #include <xen/interface/xenpmu.h> + ++#include <asm/spec-ctrl.h> + #include <asm/xen/interface.h> + #include <asm/xen/hypercall.h> + +@@ -87,6 +88,8 @@ static void cpu_bringup(void) + cpu_data(cpu).x86_max_cores = 1; + set_cpu_sibling_map(cpu); + ++ speculative_store_bypass_ht_init(); ++ + xen_setup_cpu_clockevents(); + + notify_cpu_starting(cpu); +@@ -375,6 +378,8 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) + } + set_cpu_sibling_map(0); + ++ speculative_store_bypass_ht_init(); ++ + xen_pmu_init(0); + + if (xen_smp_intr_init(0)) +diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c +index a7c5b79371a7..23ee46a0c78c 100644 +--- a/drivers/base/power/opp/core.c ++++ b/drivers/base/power/opp/core.c +@@ -651,7 +651,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) + rcu_read_unlock(); + + /* Scaling up? Scale voltage before frequency */ +- if (freq > old_freq) { ++ if (freq >= old_freq) { + ret = _set_opp_voltage(dev, reg, u_volt, u_volt_min, + u_volt_max); + if (ret) +diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c +index c6755c9a0aea..51c233c4e058 100644 +--- a/drivers/block/drbd/drbd_worker.c ++++ b/drivers/block/drbd/drbd_worker.c +@@ -269,8 +269,8 @@ void drbd_request_endio(struct bio *bio) + what = COMPLETED_OK; + } + +- bio_put(req->private_bio); + req->private_bio = ERR_PTR(bio->bi_error); ++ bio_put(bio); + + /* not req_mod(), we need irqsave here! */ + spin_lock_irqsave(&device->resource->req_lock, flags); +diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c +index 67ea2ce03a23..39d0fdcb17d2 100644 +--- a/drivers/gpu/drm/udl/udl_fb.c ++++ b/drivers/gpu/drm/udl/udl_fb.c +@@ -136,7 +136,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, + + if (cmd > (char *) urb->transfer_buffer) { + /* Send partial buffer remaining before exiting */ +- int len = cmd - (char *) urb->transfer_buffer; ++ int len; ++ if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length) ++ *cmd++ = 0xAF; ++ len = cmd - (char *) urb->transfer_buffer; + ret = udl_submit_urb(dev, urb, len); + bytes_sent += len; + } else +diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c +index 917dcb978c2c..9259a2f8bf3a 100644 +--- a/drivers/gpu/drm/udl/udl_transfer.c ++++ b/drivers/gpu/drm/udl/udl_transfer.c +@@ -152,11 +152,11 @@ static void udl_compress_hline16( + raw_pixels_count_byte = cmd++; /* we'll know this later */ + raw_pixel_start = pixel; + +- cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1, +- min((int)(pixel_end - pixel) / bpp, +- (int)(cmd_buffer_end - cmd) / 2))) * bpp; ++ cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL, ++ (unsigned long)(pixel_end - pixel) / bpp, ++ (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp; + +- prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp); ++ prefetch_range((void *) pixel, cmd_pixel_end - pixel); + pixel_val16 = get_pixel_val16(pixel, bpp); + + while (pixel < cmd_pixel_end) { +@@ -192,6 +192,9 @@ static void udl_compress_hline16( + if (pixel > raw_pixel_start) { + /* finalize last RAW span */ + *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF; ++ } else { ++ /* undo unused byte */ ++ cmd--; + } + + *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF; +diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c +index acfb522a432a..29423691c105 100644 +--- a/drivers/hid/hid-debug.c ++++ b/drivers/hid/hid-debug.c +@@ -1152,6 +1152,8 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, + goto out; + if (list->tail > list->head) { + len = list->tail - list->head; ++ if (len > count) ++ len = count; + + if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { + ret = -EFAULT; +@@ -1161,6 +1163,8 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, + list->head += len; + } else { + len = HID_DEBUG_BUFSIZE - list->head; ++ if (len > count) ++ len = count; + + if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { + ret = -EFAULT; +@@ -1168,7 +1172,9 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, + } + list->head = 0; + ret += len; +- goto copy_rest; ++ count -= len; ++ if (count > 0) ++ goto copy_rest; + } + + } +diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c +index 2548c5dbdc75..00bce002b357 100644 +--- a/drivers/hid/i2c-hid/i2c-hid.c ++++ b/drivers/hid/i2c-hid/i2c-hid.c +@@ -477,7 +477,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid) + return; + } + +- if ((ret_size > size) || (ret_size <= 2)) { ++ if ((ret_size > size) || (ret_size < 2)) { + dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n", + __func__, size, ret_size); + return; +diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c +index 700145b15088..b59b15d4caa9 100644 +--- a/drivers/hid/usbhid/hiddev.c ++++ b/drivers/hid/usbhid/hiddev.c +@@ -35,6 +35,7 @@ + #include <linux/hiddev.h> + #include <linux/compat.h> + #include <linux/vmalloc.h> ++#include <linux/nospec.h> + #include "usbhid.h" + + #ifdef CONFIG_USB_DYNAMIC_MINORS +@@ -478,10 +479,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, + + if (uref->field_index >= report->maxfield) + goto inval; ++ uref->field_index = array_index_nospec(uref->field_index, ++ report->maxfield); + + field = report->field[uref->field_index]; + if (uref->usage_index >= field->maxusage) + goto inval; ++ uref->usage_index = array_index_nospec(uref->usage_index, ++ field->maxusage); + + uref->usage_code = field->usage[uref->usage_index].hid; + +@@ -508,6 +513,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd, + + if (uref->field_index >= report->maxfield) + goto inval; ++ uref->field_index = array_index_nospec(uref->field_index, ++ report->maxfield); + + field = report->field[uref->field_index]; + +@@ -761,6 +768,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + + if (finfo.field_index >= report->maxfield) + break; ++ finfo.field_index = array_index_nospec(finfo.field_index, ++ report->maxfield); + + field = report->field[finfo.field_index]; + memset(&finfo, 0, sizeof(finfo)); +@@ -801,6 +810,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) + + if (cinfo.index >= hid->maxcollection) + break; ++ cinfo.index = array_index_nospec(cinfo.index, ++ hid->maxcollection); + + cinfo.type = hid->collection[cinfo.index].type; + cinfo.usage = hid->collection[cinfo.index].usage; +diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c +index 726615e54f2a..c7592fe30e6e 100644 +--- a/drivers/i2c/busses/i2c-rcar.c ++++ b/drivers/i2c/busses/i2c-rcar.c +@@ -700,6 +700,8 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, + + pm_runtime_get_sync(dev); + ++ rcar_i2c_init(priv); ++ + ret = rcar_i2c_bus_barrier(priv); + if (ret < 0) + goto out; +@@ -857,8 +859,6 @@ static int rcar_i2c_probe(struct platform_device *pdev) + if (ret < 0) + goto out_pm_put; + +- rcar_i2c_init(priv); +- + /* Don't suspend when multi-master to keep arbitration working */ + if (of_property_read_bool(dev->of_node, "multi-master")) + priv->flags |= ID_P_PM_BLOCKED; +diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c +index 148b313c6471..d30b3b908621 100644 +--- a/drivers/infiniband/hw/hfi1/chip.c ++++ b/drivers/infiniband/hw/hfi1/chip.c +@@ -6717,7 +6717,7 @@ static void rxe_kernel_unfreeze(struct hfi1_devdata *dd) + for (i = 0; i < dd->n_krcv_queues; i++) { + rcvmask = HFI1_RCVCTRL_CTXT_ENB; + /* HFI1_RCVCTRL_TAILUPD_[ENB|DIS] needs to be set explicitly */ +- rcvmask |= HFI1_CAP_KGET_MASK(dd->rcd[i]->flags, DMA_RTAIL) ? ++ rcvmask |= dd->rcd[i]->rcvhdrtail_kvaddr ? + HFI1_RCVCTRL_TAILUPD_ENB : HFI1_RCVCTRL_TAILUPD_DIS; + hfi1_rcvctrl(dd, rcvmask, i); + } +@@ -8211,7 +8211,7 @@ static inline int check_packet_present(struct hfi1_ctxtdata *rcd) + u32 tail; + int present; + +- if (!HFI1_CAP_IS_KSET(DMA_RTAIL)) ++ if (!rcd->rcvhdrtail_kvaddr) + present = (rcd->seq_cnt == + rhf_rcv_seq(rhf_to_cpu(get_rhf_addr(rcd)))); + else /* is RDMA rtail */ +@@ -11550,7 +11550,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt) + /* reset the tail and hdr addresses, and sequence count */ + write_kctxt_csr(dd, ctxt, RCV_HDR_ADDR, + rcd->rcvhdrq_dma); +- if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) ++ if (rcd->rcvhdrtail_kvaddr) + write_kctxt_csr(dd, ctxt, RCV_HDR_TAIL_ADDR, + rcd->rcvhdrqtailaddr_dma); + rcd->seq_cnt = 1; +@@ -11630,7 +11630,7 @@ void hfi1_rcvctrl(struct hfi1_devdata *dd, unsigned int op, int ctxt) + rcvctrl |= RCV_CTXT_CTRL_INTR_AVAIL_SMASK; + if (op & HFI1_RCVCTRL_INTRAVAIL_DIS) + rcvctrl &= ~RCV_CTXT_CTRL_INTR_AVAIL_SMASK; +- if (op & HFI1_RCVCTRL_TAILUPD_ENB && rcd->rcvhdrqtailaddr_dma) ++ if ((op & HFI1_RCVCTRL_TAILUPD_ENB) && rcd->rcvhdrtail_kvaddr) + rcvctrl |= RCV_CTXT_CTRL_TAIL_UPD_SMASK; + if (op & HFI1_RCVCTRL_TAILUPD_DIS) { + /* See comment on RcvCtxtCtrl.TailUpd above */ +diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c +index bb729764a799..d612f9d94083 100644 +--- a/drivers/infiniband/hw/hfi1/file_ops.c ++++ b/drivers/infiniband/hw/hfi1/file_ops.c +@@ -609,7 +609,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma) + ret = -EINVAL; + goto done; + } +- if (flags & VM_WRITE) { ++ if ((flags & VM_WRITE) || !uctxt->rcvhdrtail_kvaddr) { + ret = -EPERM; + goto done; + } +diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c +index c81c44525dd5..9dc8cf096e2e 100644 +--- a/drivers/infiniband/hw/hfi1/init.c ++++ b/drivers/infiniband/hw/hfi1/init.c +@@ -1618,7 +1618,6 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) + u64 reg; + + if (!rcd->rcvhdrq) { +- dma_addr_t dma_hdrqtail; + gfp_t gfp_flags; + + /* +@@ -1641,13 +1640,13 @@ int hfi1_create_rcvhdrq(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) + goto bail; + } + +- if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL)) { ++ if (HFI1_CAP_KGET_MASK(rcd->flags, DMA_RTAIL) || ++ HFI1_CAP_UGET_MASK(rcd->flags, DMA_RTAIL)) { + rcd->rcvhdrtail_kvaddr = dma_zalloc_coherent( +- &dd->pcidev->dev, PAGE_SIZE, &dma_hdrqtail, +- gfp_flags); ++ &dd->pcidev->dev, PAGE_SIZE, ++ &rcd->rcvhdrqtailaddr_dma, gfp_flags); + if (!rcd->rcvhdrtail_kvaddr) + goto bail_free; +- rcd->rcvhdrqtailaddr_dma = dma_hdrqtail; + } + + rcd->rcvhdrq_size = amt; +diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c +index 35fd57fdeba9..c837defb5e4d 100644 +--- a/drivers/md/dm-bufio.c ++++ b/drivers/md/dm-bufio.c +@@ -819,12 +819,14 @@ enum new_flag { + static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf) + { + struct dm_buffer *b; ++ bool tried_noio_alloc = false; + + /* + * dm-bufio is resistant to allocation failures (it just keeps + * one buffer reserved in cases all the allocations fail). + * So set flags to not try too hard: +- * GFP_NOIO: don't recurse into the I/O layer ++ * GFP_NOWAIT: don't wait; if we need to sleep we'll release our ++ * mutex and wait ourselves. + * __GFP_NORETRY: don't retry and rather return failure + * __GFP_NOMEMALLOC: don't use emergency reserves + * __GFP_NOWARN: don't print a warning in case of failure +@@ -834,7 +836,7 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client + */ + while (1) { + if (dm_bufio_cache_size_latch != 1) { +- b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); ++ b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); + if (b) + return b; + } +@@ -842,6 +844,15 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client + if (nf == NF_PREFETCH) + return NULL; + ++ if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) { ++ dm_bufio_unlock(c); ++ b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN); ++ dm_bufio_lock(c); ++ if (b) ++ return b; ++ tried_noio_alloc = true; ++ } ++ + if (!list_empty(&c->reserved_buffers)) { + b = list_entry(c->reserved_buffers.next, + struct dm_buffer, lru_list); +@@ -1587,19 +1598,11 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) + static unsigned long + dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) + { +- struct dm_bufio_client *c; +- unsigned long count; +- unsigned long retain_target; +- +- c = container_of(shrink, struct dm_bufio_client, shrinker); +- if (sc->gfp_mask & __GFP_FS) +- dm_bufio_lock(c); +- else if (!dm_bufio_trylock(c)) +- return 0; ++ struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); ++ unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) + ++ READ_ONCE(c->n_buffers[LIST_DIRTY]); ++ unsigned long retain_target = get_retain_buffers(c); + +- count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY]; +- retain_target = get_retain_buffers(c); +- dm_bufio_unlock(c); + return (count < retain_target) ? 0 : (count - retain_target); + } + +diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c +index d558ed3e59c6..cc5666050282 100644 +--- a/drivers/media/i2c/cx25840/cx25840-core.c ++++ b/drivers/media/i2c/cx25840/cx25840-core.c +@@ -467,8 +467,13 @@ static void cx23885_initialize(struct i2c_client *client) + { + DEFINE_WAIT(wait); + struct cx25840_state *state = to_state(i2c_get_clientdata(client)); ++ u32 clk_freq = 0; + struct workqueue_struct *q; + ++ /* cx23885 sets hostdata to clk_freq pointer */ ++ if (v4l2_get_subdev_hostdata(&state->sd)) ++ clk_freq = *((u32 *)v4l2_get_subdev_hostdata(&state->sd)); ++ + /* + * Come out of digital power down + * The CX23888, at least, needs this, otherwise registers aside from +@@ -504,8 +509,13 @@ static void cx23885_initialize(struct i2c_client *client) + * 50.0 MHz * (0xb + 0xe8ba26/0x2000000)/4 = 5 * 28.636363 MHz + * 572.73 MHz before post divide + */ +- /* HVR1850 or 50MHz xtal */ +- cx25840_write(client, 0x2, 0x71); ++ if (clk_freq == 25000000) { ++ /* 888/ImpactVCBe or 25Mhz xtal */ ++ ; /* nothing to do */ ++ } else { ++ /* HVR1850 or 50MHz xtal */ ++ cx25840_write(client, 0x2, 0x71); ++ } + cx25840_write4(client, 0x11c, 0x01d1744c); + cx25840_write4(client, 0x118, 0x00000416); + cx25840_write4(client, 0x404, 0x0010253e); +@@ -548,9 +558,15 @@ static void cx23885_initialize(struct i2c_client *client) + /* HVR1850 */ + switch (state->id) { + case CX23888_AV: +- /* 888/HVR1250 specific */ +- cx25840_write4(client, 0x10c, 0x13333333); +- cx25840_write4(client, 0x108, 0x00000515); ++ if (clk_freq == 25000000) { ++ /* 888/ImpactVCBe or 25MHz xtal */ ++ cx25840_write4(client, 0x10c, 0x01b6db7b); ++ cx25840_write4(client, 0x108, 0x00000512); ++ } else { ++ /* 888/HVR1250 or 50MHz xtal */ ++ cx25840_write4(client, 0x10c, 0x13333333); ++ cx25840_write4(client, 0x108, 0x00000515); ++ } + break; + default: + cx25840_write4(client, 0x10c, 0x002be2c9); +@@ -580,7 +596,7 @@ static void cx23885_initialize(struct i2c_client *client) + * 368.64 MHz before post divide + * 122.88 MHz / 0xa = 12.288 MHz + */ +- /* HVR1850 or 50MHz xtal */ ++ /* HVR1850 or 50MHz xtal or 25MHz xtal */ + cx25840_write4(client, 0x114, 0x017dbf48); + cx25840_write4(client, 0x110, 0x000a030e); + break; +diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c +index 33d025e42793..de35a2a362f9 100644 +--- a/drivers/mtd/chips/cfi_cmdset_0002.c ++++ b/drivers/mtd/chips/cfi_cmdset_0002.c +@@ -42,7 +42,7 @@ + #define AMD_BOOTLOC_BUG + #define FORCE_WORD_WRITE 0 + +-#define MAX_WORD_RETRIES 3 ++#define MAX_RETRIES 3 + + #define SST49LF004B 0x0060 + #define SST49LF040B 0x0050 +@@ -1643,7 +1643,7 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, + map_write( map, CMD(0xF0), chip->start ); + /* FIXME - should have reset delay before continuing */ + +- if (++retry_cnt <= MAX_WORD_RETRIES) ++ if (++retry_cnt <= MAX_RETRIES) + goto retry; + + ret = -EIO; +@@ -2102,7 +2102,7 @@ static int do_panic_write_oneword(struct map_info *map, struct flchip *chip, + map_write(map, CMD(0xF0), chip->start); + /* FIXME - should have reset delay before continuing */ + +- if (++retry_cnt <= MAX_WORD_RETRIES) ++ if (++retry_cnt <= MAX_RETRIES) + goto retry; + + ret = -EIO; +@@ -2237,6 +2237,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) + unsigned long int adr; + DECLARE_WAITQUEUE(wait, current); + int ret = 0; ++ int retry_cnt = 0; + + adr = cfi->addr_unlock1; + +@@ -2254,6 +2255,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) + ENABLE_VPP(map); + xip_disable(map, chip, adr); + ++ retry: + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); +@@ -2290,12 +2292,13 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) + chip->erase_suspended = 0; + } + +- if (chip_ready(map, adr)) ++ if (chip_good(map, adr, map_word_ff(map))) + break; + + if (time_after(jiffies, timeo)) { + printk(KERN_WARNING "MTD %s(): software timeout\n", + __func__ ); ++ ret = -EIO; + break; + } + +@@ -2303,12 +2306,15 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip) + UDELAY(map, chip, adr, 1000000/HZ); + } + /* Did we succeed? */ +- if (!chip_good(map, adr, map_word_ff(map))) { ++ if (ret) { + /* reset on all failures. */ + map_write( map, CMD(0xF0), chip->start ); + /* FIXME - should have reset delay before continuing */ + +- ret = -EIO; ++ if (++retry_cnt <= MAX_RETRIES) { ++ ret = 0; ++ goto retry; ++ } + } + + chip->state = FL_READY; +@@ -2327,6 +2333,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, + unsigned long timeo = jiffies + HZ; + DECLARE_WAITQUEUE(wait, current); + int ret = 0; ++ int retry_cnt = 0; + + adr += chip->start; + +@@ -2344,6 +2351,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, + ENABLE_VPP(map); + xip_disable(map, chip, adr); + ++ retry: + cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL); + cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL); +@@ -2380,7 +2388,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, + chip->erase_suspended = 0; + } + +- if (chip_ready(map, adr)) { ++ if (chip_good(map, adr, map_word_ff(map))) { + xip_enable(map, chip, adr); + break; + } +@@ -2389,6 +2397,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, + xip_enable(map, chip, adr); + printk(KERN_WARNING "MTD %s(): software timeout\n", + __func__ ); ++ ret = -EIO; + break; + } + +@@ -2396,12 +2405,15 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, + UDELAY(map, chip, adr, 1000000/HZ); + } + /* Did we succeed? */ +- if (!chip_good(map, adr, map_word_ff(map))) { ++ if (ret) { + /* reset on all failures. */ + map_write( map, CMD(0xF0), chip->start ); + /* FIXME - should have reset delay before continuing */ + +- ret = -EIO; ++ if (++retry_cnt <= MAX_RETRIES) { ++ ret = 0; ++ goto retry; ++ } + } + + chip->state = FL_READY; +diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c +index 0c84ee80e5b6..5c44eb57885b 100644 +--- a/drivers/mtd/nand/mxc_nand.c ++++ b/drivers/mtd/nand/mxc_nand.c +@@ -48,7 +48,7 @@ + #define NFC_V1_V2_CONFIG (host->regs + 0x0a) + #define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c) + #define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e) +-#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10) ++#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10) + #define NFC_V1_V2_WRPROT (host->regs + 0x12) + #define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14) + #define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16) +@@ -1121,6 +1121,9 @@ static void preset_v2(struct mtd_info *mtd) + writew(config1, NFC_V1_V2_CONFIG1); + /* preset operation */ + ++ /* spare area size in 16-bit half-words */ ++ writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA); ++ + /* Unlock the internal RAM Buffer */ + writew(0x2, NFC_V1_V2_CONFIG); + +diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +index d50350c7adc4..22a5916e477e 100644 +--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c ++++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +@@ -4187,10 +4187,6 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, + if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) && + !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) + return -EINVAL; +- if (!info->linking) +- break; +- if (netdev_has_any_upper_dev(upper_dev)) +- return -EINVAL; + break; + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; +@@ -4566,6 +4562,8 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, + return -EINVAL; + if (!info->linking) + break; ++ if (netdev_has_any_upper_dev(upper_dev)) ++ return -EINVAL; + /* We can't have multiple VLAN interfaces configured on + * the same port and being members in the same bridge. + */ +diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c +index 2032a6de026b..707190d3ada0 100644 +--- a/drivers/net/phy/micrel.c ++++ b/drivers/net/phy/micrel.c +@@ -801,9 +801,6 @@ static struct phy_driver ksphy_driver[] = { + .read_status = genphy_read_status, + .ack_interrupt = kszphy_ack_interrupt, + .config_intr = kszphy_config_intr, +- .get_sset_count = kszphy_get_sset_count, +- .get_strings = kszphy_get_strings, +- .get_stats = kszphy_get_stats, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { +@@ -948,9 +945,6 @@ static struct phy_driver ksphy_driver[] = { + .read_status = genphy_read_status, + .ack_interrupt = kszphy_ack_interrupt, + .config_intr = kszphy_config_intr, +- .get_sset_count = kszphy_get_sset_count, +- .get_strings = kszphy_get_strings, +- .get_stats = kszphy_get_stats, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { +@@ -960,6 +954,7 @@ static struct phy_driver ksphy_driver[] = { + .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .driver_data = &ksz9021_type, ++ .probe = kszphy_probe, + .config_init = ksz9021_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, +@@ -979,6 +974,7 @@ static struct phy_driver ksphy_driver[] = { + .features = (PHY_GBIT_FEATURES | SUPPORTED_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .driver_data = &ksz9021_type, ++ .probe = kszphy_probe, + .config_init = ksz9031_config_init, + .config_aneg = genphy_config_aneg, + .read_status = ksz9031_read_status, +@@ -998,9 +994,6 @@ static struct phy_driver ksphy_driver[] = { + .config_init = kszphy_config_init, + .config_aneg = ksz8873mll_config_aneg, + .read_status = ksz8873mll_read_status, +- .get_sset_count = kszphy_get_sset_count, +- .get_strings = kszphy_get_strings, +- .get_stats = kszphy_get_stats, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { +@@ -1012,9 +1005,6 @@ static struct phy_driver ksphy_driver[] = { + .config_init = kszphy_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, +- .get_sset_count = kszphy_get_sset_count, +- .get_strings = kszphy_get_strings, +- .get_stats = kszphy_get_stats, + .suspend = genphy_suspend, + .resume = genphy_resume, + }, { +@@ -1026,9 +1016,6 @@ static struct phy_driver ksphy_driver[] = { + .config_init = kszphy_config_init, + .config_aneg = ksz8873mll_config_aneg, + .read_status = ksz8873mll_read_status, +- .get_sset_count = kszphy_get_sset_count, +- .get_strings = kszphy_get_strings, +- .get_stats = kszphy_get_stats, + .suspend = genphy_suspend, + .resume = genphy_resume, + } }; +diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c +index 0f81d739f9e9..2065a0f9dca6 100644 +--- a/drivers/scsi/sg.c ++++ b/drivers/scsi/sg.c +@@ -51,6 +51,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */ + #include <linux/atomic.h> + #include <linux/ratelimit.h> + #include <linux/uio.h> ++#include <linux/cred.h> /* for sg_check_file_access() */ + + #include "scsi.h" + #include <scsi/scsi_dbg.h> +@@ -210,6 +211,33 @@ static void sg_device_destroy(struct kref *kref); + sdev_prefix_printk(prefix, (sdp)->device, \ + (sdp)->disk->disk_name, fmt, ##a) + ++/* ++ * The SCSI interfaces that use read() and write() as an asynchronous variant of ++ * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways ++ * to trigger read() and write() calls from various contexts with elevated ++ * privileges. This can lead to kernel memory corruption (e.g. if these ++ * interfaces are called through splice()) and privilege escalation inside ++ * userspace (e.g. if a process with access to such a device passes a file ++ * descriptor to a SUID binary as stdin/stdout/stderr). ++ * ++ * This function provides protection for the legacy API by restricting the ++ * calling context. ++ */ ++static int sg_check_file_access(struct file *filp, const char *caller) ++{ ++ if (filp->f_cred != current_real_cred()) { ++ pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n", ++ caller, task_tgid_vnr(current), current->comm); ++ return -EPERM; ++ } ++ if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { ++ pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n", ++ caller, task_tgid_vnr(current), current->comm); ++ return -EACCES; ++ } ++ return 0; ++} ++ + static int sg_allow_access(struct file *filp, unsigned char *cmd) + { + struct sg_fd *sfp = filp->private_data; +@@ -394,6 +422,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) + struct sg_header *old_hdr = NULL; + int retval = 0; + ++ /* ++ * This could cause a response to be stranded. Close the associated ++ * file descriptor to free up any resources being held. ++ */ ++ retval = sg_check_file_access(filp, __func__); ++ if (retval) ++ return retval; ++ + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) + return -ENXIO; + SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, +@@ -581,9 +617,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) + struct sg_header old_hdr; + sg_io_hdr_t *hp; + unsigned char cmnd[SG_MAX_CDB_SIZE]; ++ int retval; + +- if (unlikely(segment_eq(get_fs(), KERNEL_DS))) +- return -EINVAL; ++ retval = sg_check_file_access(filp, __func__); ++ if (retval) ++ return retval; + + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) + return -ENXIO; +diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c +index 4e5c0f17f579..c2a7cb95725b 100644 +--- a/drivers/staging/android/ion/ion_heap.c ++++ b/drivers/staging/android/ion/ion_heap.c +@@ -38,7 +38,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap, + struct page **tmp = pages; + + if (!pages) +- return NULL; ++ return ERR_PTR(-ENOMEM); + + if (buffer->flags & ION_FLAG_CACHED) + pgprot = PAGE_KERNEL; +diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c +index 802f51e46405..171960568356 100644 +--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c ++++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c +@@ -642,7 +642,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev, + /* Make sure D/A update mode is direct update */ + outb(0, dev->iobase + DAQP_AUX_REG); + +- for (i = 0; i > insn->n; i++) { ++ for (i = 0; i < insn->n; i++) { + unsigned int val = data[i]; + int ret; + +diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c +index 1c70541a1467..0475f9685a41 100644 +--- a/drivers/tty/n_tty.c ++++ b/drivers/tty/n_tty.c +@@ -126,6 +126,8 @@ struct n_tty_data { + struct mutex output_lock; + }; + ++#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1)) ++ + static inline size_t read_cnt(struct n_tty_data *ldata) + { + return ldata->read_head - ldata->read_tail; +@@ -143,6 +145,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i) + + static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i) + { ++ smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */ + return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)]; + } + +@@ -318,9 +321,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata) + static void reset_buffer_flags(struct n_tty_data *ldata) + { + ldata->read_head = ldata->canon_head = ldata->read_tail = 0; +- ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0; + ldata->commit_head = 0; +- ldata->echo_mark = 0; + ldata->line_start = 0; + + ldata->erasing = 0; +@@ -619,12 +620,19 @@ static size_t __process_echoes(struct tty_struct *tty) + old_space = space = tty_write_room(tty); + + tail = ldata->echo_tail; +- while (ldata->echo_commit != tail) { ++ while (MASK(ldata->echo_commit) != MASK(tail)) { + c = echo_buf(ldata, tail); + if (c == ECHO_OP_START) { + unsigned char op; + int no_space_left = 0; + ++ /* ++ * Since add_echo_byte() is called without holding ++ * output_lock, we might see only portion of multi-byte ++ * operation. ++ */ ++ if (MASK(ldata->echo_commit) == MASK(tail + 1)) ++ goto not_yet_stored; + /* + * If the buffer byte is the start of a multi-byte + * operation, get the next byte, which is either the +@@ -636,6 +644,8 @@ static size_t __process_echoes(struct tty_struct *tty) + unsigned int num_chars, num_bs; + + case ECHO_OP_ERASE_TAB: ++ if (MASK(ldata->echo_commit) == MASK(tail + 2)) ++ goto not_yet_stored; + num_chars = echo_buf(ldata, tail + 2); + + /* +@@ -730,7 +740,8 @@ static size_t __process_echoes(struct tty_struct *tty) + /* If the echo buffer is nearly full (so that the possibility exists + * of echo overrun before the next commit), then discard enough + * data at the tail to prevent a subsequent overrun */ +- while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { ++ while (ldata->echo_commit > tail && ++ ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { + if (echo_buf(ldata, tail) == ECHO_OP_START) { + if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB) + tail += 3; +@@ -740,6 +751,7 @@ static size_t __process_echoes(struct tty_struct *tty) + tail++; + } + ++ not_yet_stored: + ldata->echo_tail = tail; + return old_space - space; + } +@@ -750,6 +762,7 @@ static void commit_echoes(struct tty_struct *tty) + size_t nr, old, echoed; + size_t head; + ++ mutex_lock(&ldata->output_lock); + head = ldata->echo_head; + ldata->echo_mark = head; + old = ldata->echo_commit - ldata->echo_tail; +@@ -758,10 +771,12 @@ static void commit_echoes(struct tty_struct *tty) + * is over the threshold (and try again each time another + * block is accumulated) */ + nr = head - ldata->echo_tail; +- if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK)) ++ if (nr < ECHO_COMMIT_WATERMARK || ++ (nr % ECHO_BLOCK > old % ECHO_BLOCK)) { ++ mutex_unlock(&ldata->output_lock); + return; ++ } + +- mutex_lock(&ldata->output_lock); + ldata->echo_commit = head; + echoed = __process_echoes(tty); + mutex_unlock(&ldata->output_lock); +@@ -812,7 +827,9 @@ static void flush_echoes(struct tty_struct *tty) + + static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata) + { +- *echo_buf_addr(ldata, ldata->echo_head++) = c; ++ *echo_buf_addr(ldata, ldata->echo_head) = c; ++ smp_wmb(); /* Matches smp_rmb() in echo_buf(). */ ++ ldata->echo_head++; + } + + /** +@@ -980,14 +997,15 @@ static void eraser(unsigned char c, struct tty_struct *tty) + } + + seen_alnums = 0; +- while (ldata->read_head != ldata->canon_head) { ++ while (MASK(ldata->read_head) != MASK(ldata->canon_head)) { + head = ldata->read_head; + + /* erase a single possibly multibyte character */ + do { + head--; + c = read_buf(ldata, head); +- } while (is_continuation(c, tty) && head != ldata->canon_head); ++ } while (is_continuation(c, tty) && ++ MASK(head) != MASK(ldata->canon_head)); + + /* do not partially erase */ + if (is_continuation(c, tty)) +@@ -1029,7 +1047,7 @@ static void eraser(unsigned char c, struct tty_struct *tty) + * This info is used to go back the correct + * number of columns. + */ +- while (tail != ldata->canon_head) { ++ while (MASK(tail) != MASK(ldata->canon_head)) { + tail--; + c = read_buf(ldata, tail); + if (c == '\t') { +@@ -1304,7 +1322,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c) + finish_erasing(ldata); + echo_char(c, tty); + echo_char_raw('\n', ldata); +- while (tail != ldata->read_head) { ++ while (MASK(tail) != MASK(ldata->read_head)) { + echo_char(read_buf(ldata, tail), tty); + tail++; + } +@@ -1880,30 +1898,21 @@ static int n_tty_open(struct tty_struct *tty) + struct n_tty_data *ldata; + + /* Currently a malloc failure here can panic */ +- ldata = vmalloc(sizeof(*ldata)); ++ ldata = vzalloc(sizeof(*ldata)); + if (!ldata) +- goto err; ++ return -ENOMEM; + + ldata->overrun_time = jiffies; + mutex_init(&ldata->atomic_read_lock); + mutex_init(&ldata->output_lock); + + tty->disc_data = ldata; +- reset_buffer_flags(tty->disc_data); +- ldata->column = 0; +- ldata->canon_column = 0; +- ldata->num_overrun = 0; +- ldata->no_room = 0; +- ldata->lnext = 0; + tty->closing = 0; + /* indicate buffer work may resume */ + clear_bit(TTY_LDISC_HALTED, &tty->flags); + n_tty_set_termios(tty, NULL); + tty_unthrottle(tty); +- + return 0; +-err: +- return -ENOMEM; + } + + static inline int input_available_p(struct tty_struct *tty, int poll) +@@ -2413,7 +2422,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata) + tail = ldata->read_tail; + nr = head - tail; + /* Skip EOF-chars.. */ +- while (head != tail) { ++ while (MASK(head) != MASK(tail)) { + if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) && + read_buf(ldata, tail) == __DISABLED_CHAR) + nr--; +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index 9e1ac58e269e..9d3e413f48c6 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -785,7 +785,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ + if (!*vc->vc_uni_pagedir_loc) + con_set_default_unimap(vc); + +- vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL); ++ vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL); + if (!vc->vc_screenbuf) + goto err_free; + +@@ -872,7 +872,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc, + + if (new_screen_size > (4 << 20)) + return -EINVAL; +- newscreen = kmalloc(new_screen_size, GFP_USER); ++ newscreen = kzalloc(new_screen_size, GFP_USER); + if (!newscreen) + return -ENOMEM; + +diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c +index fe22ac7c760a..08bef18372ea 100644 +--- a/drivers/usb/class/cdc-acm.c ++++ b/drivers/usb/class/cdc-acm.c +@@ -1712,6 +1712,9 @@ static const struct usb_device_id acm_ids[] = { + { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */ + .driver_info = SINGLE_RX_URB, + }, ++ { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */ ++ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ ++ }, + { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */ + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ + }, +diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c +index 13754353251f..9669184fb1fe 100644 +--- a/drivers/usb/dwc2/hcd_queue.c ++++ b/drivers/usb/dwc2/hcd_queue.c +@@ -479,7 +479,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg, + /* Get the map and adjust if this is a multi_tt hub */ + map = qh->dwc_tt->periodic_bitmaps; + if (qh->dwc_tt->usb_tt->multi) +- map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport; ++ map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1); + + return map; + } +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index 46b4dea7a0ec..6f2c77a7c08e 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -92,6 +92,9 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ + { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ + { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */ ++ { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */ ++ { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */ ++ { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */ + { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ + { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ + { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */ +@@ -109,6 +112,9 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ + { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */ + { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ ++ { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */ ++ { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */ ++ { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */ + { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */ + { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ + { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ +@@ -121,7 +127,9 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */ + { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ + { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */ ++ { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */ + { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */ ++ { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */ + { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ + { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ + { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ +@@ -131,17 +139,23 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ + { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ ++ { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */ ++ { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */ + { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ + { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */ + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ + { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ ++ { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */ + { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ + { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */ + { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */ + { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ ++ { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */ + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ + { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ ++ { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */ ++ { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */ + { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ + { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */ + { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */ +diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c +index d57222894892..8407b07428a6 100644 +--- a/fs/cifs/cifssmb.c ++++ b/fs/cifs/cifssmb.c +@@ -150,8 +150,14 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) + * greater than cifs socket timeout which is 7 seconds + */ + while (server->tcpStatus == CifsNeedReconnect) { +- wait_event_interruptible_timeout(server->response_q, +- (server->tcpStatus != CifsNeedReconnect), 10 * HZ); ++ rc = wait_event_interruptible_timeout(server->response_q, ++ (server->tcpStatus != CifsNeedReconnect), ++ 10 * HZ); ++ if (rc < 0) { ++ cifs_dbg(FYI, "%s: aborting reconnect due to a received" ++ " signal by the process\n", __func__); ++ return -ERESTARTSYS; ++ } + + /* are we still trying to reconnect? */ + if (server->tcpStatus != CifsNeedReconnect) +diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c +index e0214334769b..4ded64b8b43b 100644 +--- a/fs/cifs/smb2pdu.c ++++ b/fs/cifs/smb2pdu.c +@@ -155,7 +155,7 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ , + static int + smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) + { +- int rc = 0; ++ int rc; + struct nls_table *nls_codepage; + struct cifs_ses *ses; + struct TCP_Server_Info *server; +@@ -166,10 +166,10 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) + * for those three - in the calling routine. + */ + if (tcon == NULL) +- return rc; ++ return 0; + + if (smb2_command == SMB2_TREE_CONNECT) +- return rc; ++ return 0; + + if (tcon->tidStatus == CifsExiting) { + /* +@@ -212,8 +212,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) + return -EAGAIN; + } + +- wait_event_interruptible_timeout(server->response_q, +- (server->tcpStatus != CifsNeedReconnect), 10 * HZ); ++ rc = wait_event_interruptible_timeout(server->response_q, ++ (server->tcpStatus != CifsNeedReconnect), ++ 10 * HZ); ++ if (rc < 0) { ++ cifs_dbg(FYI, "%s: aborting reconnect due to a received" ++ " signal by the process\n", __func__); ++ return -ERESTARTSYS; ++ } + + /* are we still trying to reconnect? */ + if (server->tcpStatus != CifsNeedReconnect) +@@ -231,7 +237,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) + } + + if (!tcon->ses->need_reconnect && !tcon->need_reconnect) +- return rc; ++ return 0; + + nls_codepage = load_nls_default(); + +diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c +index 6776f4aa3d12..ad13f07cf0d3 100644 +--- a/fs/ext4/balloc.c ++++ b/fs/ext4/balloc.c +@@ -183,7 +183,6 @@ static int ext4_init_block_bitmap(struct super_block *sb, + unsigned int bit, bit_max; + struct ext4_sb_info *sbi = EXT4_SB(sb); + ext4_fsblk_t start, tmp; +- int flex_bg = 0; + struct ext4_group_info *grp; + + J_ASSERT_BH(bh, buffer_locked(bh)); +@@ -216,22 +215,19 @@ static int ext4_init_block_bitmap(struct super_block *sb, + + start = ext4_group_first_block_no(sb, block_group); + +- if (ext4_has_feature_flex_bg(sb)) +- flex_bg = 1; +- + /* Set bits for block and inode bitmaps, and inode table */ + tmp = ext4_block_bitmap(sb, gdp); +- if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) ++ if (ext4_block_in_group(sb, tmp, block_group)) + ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); + + tmp = ext4_inode_bitmap(sb, gdp); +- if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) ++ if (ext4_block_in_group(sb, tmp, block_group)) + ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); + + tmp = ext4_inode_table(sb, gdp); + for (; tmp < ext4_inode_table(sb, gdp) + + sbi->s_itb_per_group; tmp++) { +- if (!flex_bg || ext4_block_in_group(sb, tmp, block_group)) ++ if (ext4_block_in_group(sb, tmp, block_group)) + ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data); + } + +@@ -454,7 +450,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) + goto verify; + } + ext4_lock_group(sb, block_group); +- if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { ++ if (ext4_has_group_desc_csum(sb) && ++ (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { ++ if (block_group == 0) { ++ ext4_unlock_group(sb, block_group); ++ unlock_buffer(bh); ++ ext4_error(sb, "Block bitmap for bg 0 marked " ++ "uninitialized"); ++ err = -EFSCORRUPTED; ++ goto out; ++ } + err = ext4_init_block_bitmap(sb, bh, block_group, desc); + set_bitmap_uptodate(bh); + set_buffer_uptodate(bh); +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index a8a750f59621..43e27d8ec770 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -1542,11 +1542,6 @@ static inline struct timespec ext4_current_time(struct inode *inode) + static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino) + { + return ino == EXT4_ROOT_INO || +- ino == EXT4_USR_QUOTA_INO || +- ino == EXT4_GRP_QUOTA_INO || +- ino == EXT4_BOOT_LOADER_INO || +- ino == EXT4_JOURNAL_INO || +- ino == EXT4_RESIZE_INO || + (ino >= EXT4_FIRST_INO(sb) && + ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)); + } +diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h +index 8ecf84b8f5a1..a284fb28944b 100644 +--- a/fs/ext4/ext4_extents.h ++++ b/fs/ext4/ext4_extents.h +@@ -103,6 +103,7 @@ struct ext4_extent_header { + }; + + #define EXT4_EXT_MAGIC cpu_to_le16(0xf30a) ++#define EXT4_MAX_EXTENT_DEPTH 5 + + #define EXT4_EXTENT_TAIL_OFFSET(hdr) \ + (sizeof(struct ext4_extent_header) + \ +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index 63c702b4b24c..106a5bb3ae68 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -881,6 +881,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block, + + eh = ext_inode_hdr(inode); + depth = ext_depth(inode); ++ if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) { ++ EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d", ++ depth); ++ ret = -EFSCORRUPTED; ++ goto err; ++ } + + if (path) { + ext4_ext_drop_refs(path); +diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c +index dcf63daefee0..460866b2166d 100644 +--- a/fs/ext4/ialloc.c ++++ b/fs/ext4/ialloc.c +@@ -152,7 +152,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group) + } + + ext4_lock_group(sb, block_group); +- if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) { ++ if (ext4_has_group_desc_csum(sb) && ++ (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) { ++ if (block_group == 0) { ++ ext4_unlock_group(sb, block_group); ++ unlock_buffer(bh); ++ ext4_error(sb, "Inode bitmap for bg 0 marked " ++ "uninitialized"); ++ err = -EFSCORRUPTED; ++ goto out; ++ } + memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); + ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), + sb->s_blocksize * 8, bh->b_data); +@@ -926,7 +935,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, + + /* recheck and clear flag under lock if we still need to */ + ext4_lock_group(sb, group); +- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { ++ if (ext4_has_group_desc_csum(sb) && ++ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { + gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); + ext4_free_group_clusters_set(sb, gdp, + ext4_free_clusters_after_init(sb, group, gdp)); +diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c +index 73cbc01ef5ad..e6ac24de119d 100644 +--- a/fs/ext4/inline.c ++++ b/fs/ext4/inline.c +@@ -434,6 +434,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle, + + memset((void *)ext4_raw_inode(&is.iloc)->i_block, + 0, EXT4_MIN_INLINE_DATA_SIZE); ++ memset(ei->i_data, 0, EXT4_MIN_INLINE_DATA_SIZE); + + if (ext4_has_feature_extents(inode->i_sb)) { + if (S_ISDIR(inode->i_mode) || +diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c +index 7c025ee1276f..5c4c9af4aaf4 100644 +--- a/fs/ext4/inode.c ++++ b/fs/ext4/inode.c +@@ -377,9 +377,9 @@ static int __check_block_validity(struct inode *inode, const char *func, + if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, + map->m_len)) { + ext4_error_inode(inode, func, line, map->m_pblk, +- "lblock %lu mapped to illegal pblock " ++ "lblock %lu mapped to illegal pblock %llu " + "(length %d)", (unsigned long) map->m_lblk, +- map->m_len); ++ map->m_pblk, map->m_len); + return -EFSCORRUPTED; + } + return 0; +@@ -4242,7 +4242,8 @@ static int __ext4_get_inode_loc(struct inode *inode, + int inodes_per_block, inode_offset; + + iloc->bh = NULL; +- if (!ext4_valid_inum(sb, inode->i_ino)) ++ if (inode->i_ino < EXT4_ROOT_INO || ++ inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count)) + return -EFSCORRUPTED; + + iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb); +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 14bd37041e1a..53e1890660a2 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -2444,7 +2444,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, + * initialize bb_free to be able to skip + * empty groups without initialization + */ +- if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { ++ if (ext4_has_group_desc_csum(sb) && ++ (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { + meta_group_info[i]->bb_free = + ext4_free_clusters_after_init(sb, group, desc); + } else { +@@ -2969,7 +2970,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, + #endif + ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, + ac->ac_b_ex.fe_len); +- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { ++ if (ext4_has_group_desc_csum(sb) && ++ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { + gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); + ext4_free_group_clusters_set(sb, gdp, + ext4_free_clusters_after_init(sb, +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index bfb83d76d128..41ef83471ea5 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -2231,6 +2231,7 @@ static int ext4_check_descriptors(struct super_block *sb, + struct ext4_sb_info *sbi = EXT4_SB(sb); + ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block); + ext4_fsblk_t last_block; ++ ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0) + 1; + ext4_fsblk_t block_bitmap; + ext4_fsblk_t inode_bitmap; + ext4_fsblk_t inode_table; +@@ -2263,6 +2264,14 @@ static int ext4_check_descriptors(struct super_block *sb, + if (!(sb->s_flags & MS_RDONLY)) + return 0; + } ++ if (block_bitmap >= sb_block + 1 && ++ block_bitmap <= last_bg_block) { ++ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " ++ "Block bitmap for group %u overlaps " ++ "block group descriptors", i); ++ if (!(sb->s_flags & MS_RDONLY)) ++ return 0; ++ } + if (block_bitmap < first_block || block_bitmap > last_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Block bitmap for group %u not in group " +@@ -2277,6 +2286,14 @@ static int ext4_check_descriptors(struct super_block *sb, + if (!(sb->s_flags & MS_RDONLY)) + return 0; + } ++ if (inode_bitmap >= sb_block + 1 && ++ inode_bitmap <= last_bg_block) { ++ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " ++ "Inode bitmap for group %u overlaps " ++ "block group descriptors", i); ++ if (!(sb->s_flags & MS_RDONLY)) ++ return 0; ++ } + if (inode_bitmap < first_block || inode_bitmap > last_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " + "Inode bitmap for group %u not in group " +@@ -2291,6 +2308,14 @@ static int ext4_check_descriptors(struct super_block *sb, + if (!(sb->s_flags & MS_RDONLY)) + return 0; + } ++ if (inode_table >= sb_block + 1 && ++ inode_table <= last_bg_block) { ++ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " ++ "Inode table for group %u overlaps " ++ "block group descriptors", i); ++ if (!(sb->s_flags & MS_RDONLY)) ++ return 0; ++ } + if (inode_table < first_block || + inode_table + sbi->s_itb_per_group - 1 > last_block) { + ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " +@@ -2998,13 +3023,22 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb) + ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count; + struct ext4_group_desc *gdp = NULL; + ++ if (!ext4_has_group_desc_csum(sb)) ++ return ngroups; ++ + for (group = 0; group < ngroups; group++) { + gdp = ext4_get_group_desc(sb, group, NULL); + if (!gdp) + continue; + +- if (!(gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED))) ++ if (gdp->bg_flags & cpu_to_le16(EXT4_BG_INODE_ZEROED)) ++ continue; ++ if (group != 0) + break; ++ ext4_error(sb, "Inode table for bg 0 marked as " ++ "needing zeroing"); ++ if (sb->s_flags & MS_RDONLY) ++ return ngroups; + } + + return group; +@@ -3622,6 +3656,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + le32_to_cpu(es->s_log_block_size)); + goto failed_mount; + } ++ if (le32_to_cpu(es->s_log_cluster_size) > ++ (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { ++ ext4_msg(sb, KERN_ERR, ++ "Invalid log cluster size: %u", ++ le32_to_cpu(es->s_log_cluster_size)); ++ goto failed_mount; ++ } + + if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) { + ext4_msg(sb, KERN_ERR, +@@ -3679,6 +3720,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + } else { + sbi->s_inode_size = le16_to_cpu(es->s_inode_size); + sbi->s_first_ino = le32_to_cpu(es->s_first_ino); ++ if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) { ++ ext4_msg(sb, KERN_ERR, "invalid first ino: %u", ++ sbi->s_first_ino); ++ goto failed_mount; ++ } + if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) || + (!is_power_of_2(sbi->s_inode_size)) || + (sbi->s_inode_size > blocksize)) { +@@ -3755,13 +3801,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + "block size (%d)", clustersize, blocksize); + goto failed_mount; + } +- if (le32_to_cpu(es->s_log_cluster_size) > +- (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { +- ext4_msg(sb, KERN_ERR, +- "Invalid log cluster size: %u", +- le32_to_cpu(es->s_log_cluster_size)); +- goto failed_mount; +- } + sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - + le32_to_cpu(es->s_log_block_size); + sbi->s_clusters_per_group = +@@ -3782,10 +3821,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + } + } else { + if (clustersize != blocksize) { +- ext4_warning(sb, "fragment/cluster size (%d) != " +- "block size (%d)", clustersize, +- blocksize); +- clustersize = blocksize; ++ ext4_msg(sb, KERN_ERR, ++ "fragment/cluster size (%d) != " ++ "block size (%d)", clustersize, blocksize); ++ goto failed_mount; + } + if (sbi->s_blocks_per_group > blocksize * 8) { + ext4_msg(sb, KERN_ERR, +@@ -3839,6 +3878,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + ext4_blocks_count(es)); + goto failed_mount; + } ++ if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) && ++ (sbi->s_cluster_ratio == 1)) { ++ ext4_msg(sb, KERN_WARNING, "bad geometry: first data " ++ "block is 0 with a 1k block and cluster size"); ++ goto failed_mount; ++ } ++ + blocks_count = (ext4_blocks_count(es) - + le32_to_cpu(es->s_first_data_block) + + EXT4_BLOCKS_PER_GROUP(sb) - 1); +@@ -3874,6 +3920,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) + ret = -ENOMEM; + goto failed_mount; + } ++ if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) != ++ le32_to_cpu(es->s_inodes_count)) { ++ ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu", ++ le32_to_cpu(es->s_inodes_count), ++ ((u64)sbi->s_groups_count * sbi->s_inodes_per_group)); ++ ret = -EINVAL; ++ goto failed_mount; ++ } + + bgl_lock_init(sbi->s_blockgroup_lock); + +@@ -4575,6 +4629,14 @@ static int ext4_commit_super(struct super_block *sb, int sync) + + if (!sbh || block_device_ejected(sb)) + return error; ++ ++ /* ++ * The superblock bh should be mapped, but it might not be if the ++ * device was hot-removed. Not much we can do but fail the I/O. ++ */ ++ if (!buffer_mapped(sbh)) ++ return error; ++ + /* + * If the file system is mounted read-only, don't update the + * superblock write time. This avoids updating the superblock +diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c +index 9e9e0936138b..b320c1ba7fdc 100644 +--- a/fs/jbd2/transaction.c ++++ b/fs/jbd2/transaction.c +@@ -1353,6 +1353,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) + if (jh->b_transaction == transaction && + jh->b_jlist != BJ_Metadata) { + jbd_lock_bh_state(bh); ++ if (jh->b_transaction == transaction && ++ jh->b_jlist != BJ_Metadata) ++ pr_err("JBD2: assertion failure: h_type=%u " ++ "h_line_no=%u block_no=%llu jlist=%u\n", ++ handle->h_type, handle->h_line_no, ++ (unsigned long long) bh->b_blocknr, ++ jh->b_jlist); + J_ASSERT_JH(jh, jh->b_transaction != transaction || + jh->b_jlist == BJ_Metadata); + jbd_unlock_bh_state(bh); +@@ -1372,11 +1379,11 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh) + * of the transaction. This needs to be done + * once a transaction -bzzz + */ +- jh->b_modified = 1; + if (handle->h_buffer_credits <= 0) { + ret = -ENOSPC; + goto out_unlock_bh; + } ++ jh->b_modified = 1; + handle->h_buffer_credits--; + } + +diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c +index a17cb1d8415c..01e71812e174 100644 +--- a/kernel/trace/trace_functions_graph.c ++++ b/kernel/trace/trace_functions_graph.c +@@ -830,6 +830,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, + struct ftrace_graph_ret *graph_ret; + struct ftrace_graph_ent *call; + unsigned long long duration; ++ int cpu = iter->cpu; + int i; + + graph_ret = &ret_entry->ret; +@@ -838,7 +839,6 @@ print_graph_entry_leaf(struct trace_iterator *iter, + + if (data) { + struct fgraph_cpu_data *cpu_data; +- int cpu = iter->cpu; + + cpu_data = per_cpu_ptr(data->cpu_data, cpu); + +@@ -868,6 +868,9 @@ print_graph_entry_leaf(struct trace_iterator *iter, + + trace_seq_printf(s, "%ps();\n", (void *)call->func); + ++ print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET, ++ cpu, iter->ent->pid, flags); ++ + return trace_handle_return(s); + } + +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index 6ff65c405243..f9e735537c37 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -2171,6 +2171,7 @@ static void __init gather_bootmem_prealloc(void) + */ + if (hstate_is_gigantic(h)) + adjust_managed_page_count(page, 1 << h->order); ++ cond_resched(); + } + } + +diff --git a/mm/page_alloc.c b/mm/page_alloc.c +index 94018ea5f935..28240ce475d6 100644 +--- a/mm/page_alloc.c ++++ b/mm/page_alloc.c +@@ -3642,7 +3642,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, + * orientated. + */ + if (!(alloc_flags & ALLOC_CPUSET) || (alloc_flags & ALLOC_NO_WATERMARKS)) { +- ac->zonelist = node_zonelist(numa_node_id(), gfp_mask); + ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, + ac->high_zoneidx, ac->nodemask); + } +diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c +index d476b7950adf..a88dab33cdf6 100644 +--- a/net/ipv4/fib_semantics.c ++++ b/net/ipv4/fib_semantics.c +@@ -980,7 +980,7 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg) + return -EINVAL; + } else { + if (nla_len(nla) != sizeof(u32)) +- return false; ++ return -EINVAL; + val = nla_get_u32(nla); + } + if (type == RTAX_ADVMSS && val > 65535 - 40) +diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c +index ae0485d776f4..fc7ca1e46908 100644 +--- a/net/ipv6/sit.c ++++ b/net/ipv6/sit.c +@@ -659,7 +659,6 @@ static int ipip6_rcv(struct sk_buff *skb) + if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6), + !net_eq(tunnel->net, dev_net(tunnel->dev)))) + goto out; +- iph = ip_hdr(skb); + + err = IP_ECN_decapsulate(iph, skb); + if (unlikely(err)) { +diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c +index ffb9e8ada899..e02fed784cd0 100644 +--- a/net/netfilter/nf_log.c ++++ b/net/netfilter/nf_log.c +@@ -444,14 +444,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write, + rcu_assign_pointer(net->nf.nf_loggers[tindex], logger); + mutex_unlock(&nf_log_mutex); + } else { ++ struct ctl_table tmp = *table; ++ ++ tmp.data = buf; + mutex_lock(&nf_log_mutex); + logger = nft_log_dereference(net->nf.nf_loggers[tindex]); + if (!logger) +- table->data = "NONE"; ++ strlcpy(buf, "NONE", sizeof(buf)); + else +- table->data = logger->name; +- r = proc_dostring(table, write, buffer, lenp, ppos); ++ strlcpy(buf, logger->name, sizeof(buf)); + mutex_unlock(&nf_log_mutex); ++ r = proc_dostring(&tmp, write, buffer, lenp, ppos); + } + + return r; +diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c +index 0dd5c695482f..9d593ecd8e87 100644 +--- a/net/netfilter/nf_tables_core.c ++++ b/net/netfilter/nf_tables_core.c +@@ -185,7 +185,8 @@ nft_do_chain(struct nft_pktinfo *pkt, void *priv) + + switch (regs.verdict.code) { + case NFT_JUMP: +- BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE); ++ if (WARN_ON_ONCE(stackptr >= NFT_JUMP_STACK_SIZE)) ++ return NF_DROP; + jumpstack[stackptr].chain = chain; + jumpstack[stackptr].rule = rule; + jumpstack[stackptr].rulenum = rulenum; +diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include +index 179219845dfc..63774307a751 100644 +--- a/scripts/Kbuild.include ++++ b/scripts/Kbuild.include +@@ -8,6 +8,7 @@ squote := ' + empty := + space := $(empty) $(empty) + space_escape := _-_SPACE_-_ ++pound := \# + + ### + # Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o +@@ -241,11 +242,11 @@ endif + + # Replace >$< with >$$< to preserve $ when reloading the .cmd file + # (needed for make) +-# Replace >#< with >\#< to avoid starting a comment in the .cmd file ++# Replace >#< with >$(pound)< to avoid starting a comment in the .cmd file + # (needed for make) + # Replace >'< with >'\''< to be able to enclose the whole string in '...' + # (needed for the shell) +-make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1))))) ++make-cmd = $(call escsq,$(subst $(pound),$$(pound),$(subst $$,$$$$,$(cmd_$(1))))) + + # Find any prerequisites that is newer than target or that does not exist. + # PHONY targets skipped in both cases. +diff --git a/tools/build/Build.include b/tools/build/Build.include +index 1dcb95e76f70..b8165545ddf6 100644 +--- a/tools/build/Build.include ++++ b/tools/build/Build.include +@@ -12,6 +12,7 @@ + # Convenient variables + comma := , + squote := ' ++pound := \# + + ### + # Name of target with a '.' as filename prefix. foo/bar.o => foo/.bar.o +@@ -43,11 +44,11 @@ echo-cmd = $(if $($(quiet)cmd_$(1)),\ + ### + # Replace >$< with >$$< to preserve $ when reloading the .cmd file + # (needed for make) +-# Replace >#< with >\#< to avoid starting a comment in the .cmd file ++# Replace >#< with >$(pound)< to avoid starting a comment in the .cmd file + # (needed for make) + # Replace >'< with >'\''< to be able to enclose the whole string in '...' + # (needed for the shell) +-make-cmd = $(call escsq,$(subst \#,\\\#,$(subst $$,$$$$,$(cmd_$(1))))) ++make-cmd = $(call escsq,$(subst $(pound),$$(pound),$(subst $$,$$$$,$(cmd_$(1))))) + + ### + # Find any prerequisites that is newer than target or that does not exist. +diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile +index e6acc281dd37..8ae824dbfca3 100644 +--- a/tools/objtool/Makefile ++++ b/tools/objtool/Makefile +@@ -35,7 +35,7 @@ CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES) + LDFLAGS += -lelf $(LIBSUBCMD) + + # Allow old libelf to be used: +-elfshdr := $(shell echo '\#include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) ++elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) + CFLAGS += $(if $(elfshdr),,-DLIBELF_USE_DEPRECATED) + + AWK = awk +diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include +index 19edc1a7a232..7ea4438b801d 100644 +--- a/tools/scripts/Makefile.include ++++ b/tools/scripts/Makefile.include +@@ -92,3 +92,5 @@ ifneq ($(silent),1) + QUIET_INSTALL = @printf ' INSTALL %s\n' $1; + endif + endif ++ ++pound := \# |