summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--0000_README4
-rw-r--r--1001_linux-4.12.2.patch383
2 files changed, 387 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 4e20aba9..5a67d310 100644
--- a/0000_README
+++ b/0000_README
@@ -47,6 +47,10 @@ Patch: 1000_linux-4.12.1.patch
From: http://www.kernel.org
Desc: Linux 4.12.1
+Patch: 1001_linux-4.12.2.patch
+From: http://www.kernel.org
+Desc: Linux 4.12.2
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1001_linux-4.12.2.patch b/1001_linux-4.12.2.patch
new file mode 100644
index 00000000..181b8878
--- /dev/null
+++ b/1001_linux-4.12.2.patch
@@ -0,0 +1,383 @@
+diff --git a/Makefile b/Makefile
+index 1286f8cc7b5b..7c81bbba2943 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 12
+-SUBLEVEL = 1
++SUBLEVEL = 2
+ EXTRAVERSION =
+ NAME = Fearless Coyote
+
+diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
+index 0b1ff4c1c14e..fffb2794dd89 100644
+--- a/arch/x86/include/asm/pat.h
++++ b/arch/x86/include/asm/pat.h
+@@ -7,6 +7,7 @@
+ bool pat_enabled(void);
+ void pat_disable(const char *reason);
+ extern void pat_init(void);
++extern void init_cache_modes(void);
+
+ extern int reserve_memtype(u64 start, u64 end,
+ enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
+diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
+index f81823695014..36646f19d40b 100644
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -1076,6 +1076,13 @@ void __init setup_arch(char **cmdline_p)
+ max_possible_pfn = max_pfn;
+
+ /*
++ * This call is required when the CPU does not support PAT. If
++ * mtrr_bp_init() invoked it already via pat_init() the call has no
++ * effect.
++ */
++ init_cache_modes();
++
++ /*
+ * Define random base addresses for memory sections after max_pfn is
+ * defined and before each memory section base is used.
+ */
+diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
+index 9b78685b66e6..45979502f64b 100644
+--- a/arch/x86/mm/pat.c
++++ b/arch/x86/mm/pat.c
+@@ -37,14 +37,14 @@
+ #undef pr_fmt
+ #define pr_fmt(fmt) "" fmt
+
+-static bool boot_cpu_done;
+-
+-static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
+-static void init_cache_modes(void);
++static bool __read_mostly boot_cpu_done;
++static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
++static bool __read_mostly pat_initialized;
++static bool __read_mostly init_cm_done;
+
+ void pat_disable(const char *reason)
+ {
+- if (!__pat_enabled)
++ if (pat_disabled)
+ return;
+
+ if (boot_cpu_done) {
+@@ -52,10 +52,8 @@ void pat_disable(const char *reason)
+ return;
+ }
+
+- __pat_enabled = 0;
++ pat_disabled = true;
+ pr_info("x86/PAT: %s\n", reason);
+-
+- init_cache_modes();
+ }
+
+ static int __init nopat(char *str)
+@@ -67,7 +65,7 @@ early_param("nopat", nopat);
+
+ bool pat_enabled(void)
+ {
+- return !!__pat_enabled;
++ return pat_initialized;
+ }
+ EXPORT_SYMBOL_GPL(pat_enabled);
+
+@@ -205,6 +203,8 @@ static void __init_cache_modes(u64 pat)
+ update_cache_mode_entry(i, cache);
+ }
+ pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
++
++ init_cm_done = true;
+ }
+
+ #define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
+@@ -225,6 +225,7 @@ static void pat_bsp_init(u64 pat)
+ }
+
+ wrmsrl(MSR_IA32_CR_PAT, pat);
++ pat_initialized = true;
+
+ __init_cache_modes(pat);
+ }
+@@ -242,10 +243,9 @@ static void pat_ap_init(u64 pat)
+ wrmsrl(MSR_IA32_CR_PAT, pat);
+ }
+
+-static void init_cache_modes(void)
++void init_cache_modes(void)
+ {
+ u64 pat = 0;
+- static int init_cm_done;
+
+ if (init_cm_done)
+ return;
+@@ -287,8 +287,6 @@ static void init_cache_modes(void)
+ }
+
+ __init_cache_modes(pat);
+-
+- init_cm_done = 1;
+ }
+
+ /**
+@@ -306,10 +304,8 @@ void pat_init(void)
+ u64 pat;
+ struct cpuinfo_x86 *c = &boot_cpu_data;
+
+- if (!pat_enabled()) {
+- init_cache_modes();
++ if (pat_disabled)
+ return;
+- }
+
+ if ((c->x86_vendor == X86_VENDOR_INTEL) &&
+ (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
+diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
+index 8baab4307f7b..7830d304dff6 100644
+--- a/crypto/rsa-pkcs1pad.c
++++ b/crypto/rsa-pkcs1pad.c
+@@ -496,7 +496,7 @@ static int pkcs1pad_verify_complete(struct akcipher_request *req, int err)
+ goto done;
+ pos++;
+
+- if (memcmp(out_buf + pos, digest_info->data, digest_info->size))
++ if (crypto_memneq(out_buf + pos, digest_info->data, digest_info->size))
+ goto done;
+
+ pos += digest_info->size;
+diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
+index 398807d1b77e..4ecf92e3b404 100644
+--- a/drivers/crypto/caam/caamalg.c
++++ b/drivers/crypto/caam/caamalg.c
+@@ -1475,8 +1475,7 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
+ struct device *jrdev = ctx->jrdev;
+- gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
+- CRYPTO_TFM_REQ_MAY_SLEEP)) ?
++ gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+ GFP_KERNEL : GFP_ATOMIC;
+ int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
+ struct ablkcipher_edesc *edesc;
+diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
+index f191c2a75732..4ed485a99c68 100644
+--- a/drivers/staging/comedi/comedi_fops.c
++++ b/drivers/staging/comedi/comedi_fops.c
+@@ -2915,6 +2915,7 @@ static int __init comedi_init(void)
+ dev = comedi_alloc_board_minor(NULL);
+ if (IS_ERR(dev)) {
+ comedi_cleanup_board_minors();
++ class_destroy(comedi_class);
+ cdev_del(&comedi_cdev);
+ unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
+ COMEDI_NUM_MINORS);
+diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
+index 028f54b453d0..b7afef056659 100644
+--- a/drivers/staging/vt6656/main_usb.c
++++ b/drivers/staging/vt6656/main_usb.c
+@@ -513,6 +513,9 @@ static int vnt_start(struct ieee80211_hw *hw)
+ goto free_all;
+ }
+
++ if (vnt_key_init_table(priv))
++ goto free_all;
++
+ priv->int_interval = 1; /* bInterval is set to 1 */
+
+ vnt_int_start_interrupt(priv);
+diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
+index bbefddd92bfe..92606b1e55bd 100644
+--- a/drivers/tty/serial/imx.c
++++ b/drivers/tty/serial/imx.c
+@@ -1340,29 +1340,13 @@ static int imx_startup(struct uart_port *port)
+ imx_enable_ms(&sport->port);
+
+ /*
+- * If the serial port is opened for reading start RX DMA immediately
+- * instead of waiting for RX FIFO interrupts. In our iMX53 the average
+- * delay for the first reception dropped from approximately 35000
+- * microseconds to 1000 microseconds.
++ * Start RX DMA immediately instead of waiting for RX FIFO interrupts.
++ * In our iMX53 the average delay for the first reception dropped from
++ * approximately 35000 microseconds to 1000 microseconds.
+ */
+ if (sport->dma_is_enabled) {
+- struct tty_struct *tty = sport->port.state->port.tty;
+- struct tty_file_private *file_priv;
+- int readcnt = 0;
+-
+- spin_lock(&tty->files_lock);
+-
+- if (!list_empty(&tty->tty_files))
+- list_for_each_entry(file_priv, &tty->tty_files, list)
+- if (!(file_priv->file->f_flags & O_WRONLY))
+- readcnt++;
+-
+- spin_unlock(&tty->files_lock);
+-
+- if (readcnt > 0) {
+- imx_disable_rx_int(sport);
+- start_rx_dma(sport);
+- }
++ imx_disable_rx_int(sport);
++ start_rx_dma(sport);
+ }
+
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
+index d74dc5f81a04..48c7a7d55ed3 100644
+--- a/fs/ext4/sysfs.c
++++ b/fs/ext4/sysfs.c
+@@ -100,7 +100,7 @@ static ssize_t reserved_clusters_store(struct ext4_attr *a,
+ int ret;
+
+ ret = kstrtoull(skip_spaces(buf), 0, &val);
+- if (!ret || val >= clusters)
++ if (ret || val >= clusters)
+ return -EINVAL;
+
+ atomic64_set(&sbi->s_resv_clusters, val);
+diff --git a/fs/proc/internal.h b/fs/proc/internal.h
+index c5ae09b6c726..18694598bebf 100644
+--- a/fs/proc/internal.h
++++ b/fs/proc/internal.h
+@@ -67,7 +67,7 @@ struct proc_inode {
+ struct proc_dir_entry *pde;
+ struct ctl_table_header *sysctl;
+ struct ctl_table *sysctl_entry;
+- struct list_head sysctl_inodes;
++ struct hlist_node sysctl_inodes;
+ const struct proc_ns_operations *ns_ops;
+ struct inode vfs_inode;
+ };
+diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
+index 67985a7233c2..9bf06e2b1284 100644
+--- a/fs/proc/proc_sysctl.c
++++ b/fs/proc/proc_sysctl.c
+@@ -191,7 +191,7 @@ static void init_header(struct ctl_table_header *head,
+ head->set = set;
+ head->parent = NULL;
+ head->node = node;
+- INIT_LIST_HEAD(&head->inodes);
++ INIT_HLIST_HEAD(&head->inodes);
+ if (node) {
+ struct ctl_table *entry;
+ for (entry = table; entry->procname; entry++, node++)
+@@ -261,25 +261,42 @@ static void unuse_table(struct ctl_table_header *p)
+ complete(p->unregistering);
+ }
+
+-/* called under sysctl_lock */
+ static void proc_sys_prune_dcache(struct ctl_table_header *head)
+ {
+- struct inode *inode, *prev = NULL;
++ struct inode *inode;
+ struct proc_inode *ei;
++ struct hlist_node *node;
++ struct super_block *sb;
+
+ rcu_read_lock();
+- list_for_each_entry_rcu(ei, &head->inodes, sysctl_inodes) {
+- inode = igrab(&ei->vfs_inode);
+- if (inode) {
+- rcu_read_unlock();
+- iput(prev);
+- prev = inode;
+- d_prune_aliases(inode);
++ for (;;) {
++ node = hlist_first_rcu(&head->inodes);
++ if (!node)
++ break;
++ ei = hlist_entry(node, struct proc_inode, sysctl_inodes);
++ spin_lock(&sysctl_lock);
++ hlist_del_init_rcu(&ei->sysctl_inodes);
++ spin_unlock(&sysctl_lock);
++
++ inode = &ei->vfs_inode;
++ sb = inode->i_sb;
++ if (!atomic_inc_not_zero(&sb->s_active))
++ continue;
++ inode = igrab(inode);
++ rcu_read_unlock();
++ if (unlikely(!inode)) {
++ deactivate_super(sb);
+ rcu_read_lock();
++ continue;
+ }
++
++ d_prune_aliases(inode);
++ iput(inode);
++ deactivate_super(sb);
++
++ rcu_read_lock();
+ }
+ rcu_read_unlock();
+- iput(prev);
+ }
+
+ /* called under sysctl_lock, will reacquire if has to wait */
+@@ -461,7 +478,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
+ }
+ ei->sysctl = head;
+ ei->sysctl_entry = table;
+- list_add_rcu(&ei->sysctl_inodes, &head->inodes);
++ hlist_add_head_rcu(&ei->sysctl_inodes, &head->inodes);
+ head->count++;
+ spin_unlock(&sysctl_lock);
+
+@@ -489,7 +506,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
+ void proc_sys_evict_inode(struct inode *inode, struct ctl_table_header *head)
+ {
+ spin_lock(&sysctl_lock);
+- list_del_rcu(&PROC_I(inode)->sysctl_inodes);
++ hlist_del_init_rcu(&PROC_I(inode)->sysctl_inodes);
+ if (!--head->count)
+ kfree_rcu(head, rcu);
+ spin_unlock(&sysctl_lock);
+diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
+index 80d07816def0..1c04a26bfd2f 100644
+--- a/include/linux/sysctl.h
++++ b/include/linux/sysctl.h
+@@ -143,7 +143,7 @@ struct ctl_table_header
+ struct ctl_table_set *set;
+ struct ctl_dir *parent;
+ struct ctl_node *node;
+- struct list_head inodes; /* head for proc_inode->sysctl_inodes */
++ struct hlist_head inodes; /* head for proc_inode->sysctl_inodes */
+ };
+
+ struct ctl_dir {
+diff --git a/ipc/mqueue.c b/ipc/mqueue.c
+index e8d41ff57241..a6ced9e07e1c 100644
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -1253,8 +1253,10 @@ SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
+
+ timeo = MAX_SCHEDULE_TIMEOUT;
+ ret = netlink_attachskb(sock, nc, &timeo, NULL);
+- if (ret == 1)
++ if (ret == 1) {
++ sock = NULL;
+ goto retry;
++ }
+ if (ret) {
+ sock = NULL;
+ nc = NULL;
+diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
+index c65f7989f850..20819df98125 100644
+--- a/kernel/locking/rwsem-spinlock.c
++++ b/kernel/locking/rwsem-spinlock.c
+@@ -231,8 +231,8 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
+
+ out_nolock:
+ list_del(&waiter.list);
+- if (!list_empty(&sem->wait_list))
+- __rwsem_do_wake(sem, 1);
++ if (!list_empty(&sem->wait_list) && sem->count >= 0)
++ __rwsem_do_wake(sem, 0);
+ raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
+
+ return -EINTR;