summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Pagano <mpagano@gentoo.org>2017-09-27 12:38:38 -0400
committerMike Pagano <mpagano@gentoo.org>2017-09-27 12:38:38 -0400
commite3e2cb00d3cd51ee0e0f642dd67f6fe06bb79801 (patch)
tree527a7892f7120689e0734618e896c226727f9228
parentLinux patch 4.9.51 (diff)
downloadlinux-patches-e3e2cb00d3cd51ee0e0f642dd67f6fe06bb79801.tar.gz
linux-patches-e3e2cb00d3cd51ee0e0f642dd67f6fe06bb79801.tar.bz2
linux-patches-e3e2cb00d3cd51ee0e0f642dd67f6fe06bb79801.zip
Linux patch 4.9.524.9-54
-rw-r--r--0000_README4
-rw-r--r--1051_linux-4.9.52.patch3985
2 files changed, 3989 insertions, 0 deletions
diff --git a/0000_README b/0000_README
index 54efac88..2ae097d8 100644
--- a/0000_README
+++ b/0000_README
@@ -247,6 +247,10 @@ Patch: 1050_linux-4.9.51.patch
From: http://www.kernel.org
Desc: Linux 4.9.51
+Patch: 1051_linux-4.9.52.patch
+From: http://www.kernel.org
+Desc: Linux 4.9.52
+
Patch: 1500_XATTR_USER_PREFIX.patch
From: https://bugs.gentoo.org/show_bug.cgi?id=470644
Desc: Support for namespace user.pax.* on tmpfs.
diff --git a/1051_linux-4.9.52.patch b/1051_linux-4.9.52.patch
new file mode 100644
index 00000000..6e49cca8
--- /dev/null
+++ b/1051_linux-4.9.52.patch
@@ -0,0 +1,3985 @@
+diff --git a/Makefile b/Makefile
+index b48aebbe187f..c53de1e38c6a 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 4
+ PATCHLEVEL = 9
+-SUBLEVEL = 51
++SUBLEVEL = 52
+ EXTRAVERSION =
+ NAME = Roaring Lionus
+
+diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S
+index 1eea99beecc3..85d9ea4a0acc 100644
+--- a/arch/arc/kernel/entry.S
++++ b/arch/arc/kernel/entry.S
+@@ -92,6 +92,12 @@ ENTRY(EV_MachineCheck)
+ lr r0, [efa]
+ mov r1, sp
+
++ ; hardware auto-disables MMU, re-enable it to allow kernel vaddr
++ ; access for say stack unwinding of modules for crash dumps
++ lr r3, [ARC_REG_PID]
++ or r3, r3, MMU_ENABLE
++ sr r3, [ARC_REG_PID]
++
+ lsr r3, r2, 8
+ bmsk r3, r3, 7
+ brne r3, ECR_C_MCHK_DUP_TLB, 1f
+diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
+index bdb295e09160..a4dc881da277 100644
+--- a/arch/arc/mm/tlb.c
++++ b/arch/arc/mm/tlb.c
+@@ -896,9 +896,6 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address,
+
+ local_irq_save(flags);
+
+- /* re-enable the MMU */
+- write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID));
+-
+ /* loop thru all sets of TLB */
+ for (set = 0; set < mmu->sets; set++) {
+
+diff --git a/arch/mips/math-emu/dp_fmax.c b/arch/mips/math-emu/dp_fmax.c
+index fd71b8daaaf2..5bec64f2884e 100644
+--- a/arch/mips/math-emu/dp_fmax.c
++++ b/arch/mips/math-emu/dp_fmax.c
+@@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754dp_nanxcpt(x);
+
+- /* numbers are preferred to NaNs */
++ /*
++ * Quiet NaN handling
++ */
++
++ /*
++ * The case of both inputs quiet NaNs
++ */
++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
++ return x;
++
++ /*
++ * The cases of exactly one input quiet NaN (numbers
++ * are here preferred as returned values to NaNs)
++ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+@@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
+ return ys ? x : y;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+- if (xs == ys)
+- return x;
+- return ieee754dp_zero(1);
++ return ieee754dp_zero(xs & ys);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+@@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y)
+ else if (xs < ys)
+ return x;
+
+- /* Compare exponent */
+- if (xe > ye)
+- return x;
+- else if (xe < ye)
+- return y;
++ /* Signs of inputs are equal, let's compare exponents */
++ if (xs == 0) {
++ /* Inputs are both positive */
++ if (xe > ye)
++ return x;
++ else if (xe < ye)
++ return y;
++ } else {
++ /* Inputs are both negative */
++ if (xe > ye)
++ return y;
++ else if (xe < ye)
++ return x;
++ }
+
+- /* Compare mantissa */
++ /* Signs and exponents of inputs are equal, let's compare mantissas */
++ if (xs == 0) {
++ /* Inputs are both positive, with equal signs and exponents */
++ if (xm <= ym)
++ return y;
++ return x;
++ }
++ /* Inputs are both negative, with equal signs and exponents */
+ if (xm <= ym)
+- return y;
+- return x;
++ return x;
++ return y;
+ }
+
+ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
+@@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754dp_nanxcpt(x);
+
+- /* numbers are preferred to NaNs */
++ /*
++ * Quiet NaN handling
++ */
++
++ /*
++ * The case of both inputs quiet NaNs
++ */
++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
++ return x;
++
++ /*
++ * The cases of exactly one input quiet NaN (numbers
++ * are here preferred as returned values to NaNs)
++ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+@@ -164,6 +202,9 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
+ /*
+ * Infinity and zero handling
+ */
++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
++ return ieee754dp_inf(xs & ys);
++
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+@@ -171,7 +212,6 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return x;
+
+- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+@@ -180,9 +220,7 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+- if (xs == ys)
+- return x;
+- return ieee754dp_zero(1);
++ return ieee754dp_zero(xs & ys);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+@@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y)
+ return y;
+
+ /* Compare mantissa */
+- if (xm <= ym)
++ if (xm < ym)
+ return y;
+- return x;
++ else if (xm > ym)
++ return x;
++ else if (xs == 0)
++ return x;
++ return y;
+ }
+diff --git a/arch/mips/math-emu/dp_fmin.c b/arch/mips/math-emu/dp_fmin.c
+index c1072b0dfb95..a287b23818d8 100644
+--- a/arch/mips/math-emu/dp_fmin.c
++++ b/arch/mips/math-emu/dp_fmin.c
+@@ -47,14 +47,26 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754dp_nanxcpt(x);
+
+- /* numbers are preferred to NaNs */
++ /*
++ * Quiet NaN handling
++ */
++
++ /*
++ * The case of both inputs quiet NaNs
++ */
++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
++ return x;
++
++ /*
++ * The cases of exactly one input quiet NaN (numbers
++ * are here preferred as returned values to NaNs)
++ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+@@ -80,9 +92,7 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
+ return ys ? y : x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+- if (xs == ys)
+- return x;
+- return ieee754dp_zero(1);
++ return ieee754dp_zero(xs | ys);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+@@ -106,16 +116,32 @@ union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y)
+ else if (xs < ys)
+ return y;
+
+- /* Compare exponent */
+- if (xe > ye)
+- return y;
+- else if (xe < ye)
+- return x;
++ /* Signs of inputs are the same, let's compare exponents */
++ if (xs == 0) {
++ /* Inputs are both positive */
++ if (xe > ye)
++ return y;
++ else if (xe < ye)
++ return x;
++ } else {
++ /* Inputs are both negative */
++ if (xe > ye)
++ return x;
++ else if (xe < ye)
++ return y;
++ }
+
+- /* Compare mantissa */
++ /* Signs and exponents of inputs are equal, let's compare mantissas */
++ if (xs == 0) {
++ /* Inputs are both positive, with equal signs and exponents */
++ if (xm <= ym)
++ return x;
++ return y;
++ }
++ /* Inputs are both negative, with equal signs and exponents */
+ if (xm <= ym)
+- return x;
+- return y;
++ return y;
++ return x;
+ }
+
+ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
+@@ -147,14 +173,26 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754dp_nanxcpt(x);
+
+- /* numbers are preferred to NaNs */
++ /*
++ * Quiet NaN handling
++ */
++
++ /*
++ * The case of both inputs quiet NaNs
++ */
++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
++ return x;
++
++ /*
++ * The cases of exactly one input quiet NaN (numbers
++ * are here preferred as returned values to NaNs)
++ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+@@ -164,25 +202,25 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
+ /*
+ * Infinity and zero handling
+ */
++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
++ return ieee754dp_inf(xs | ys);
++
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+- return x;
++ return y;
+
+- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+- return y;
++ return x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+- if (xs == ys)
+- return x;
+- return ieee754dp_zero(1);
++ return ieee754dp_zero(xs | ys);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+@@ -207,7 +245,11 @@ union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y)
+ return x;
+
+ /* Compare mantissa */
+- if (xm <= ym)
++ if (xm < ym)
++ return x;
++ else if (xm > ym)
++ return y;
++ else if (xs == 1)
+ return x;
+ return y;
+ }
+diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c
+index 4a2d03c72959..e0d9be5fbf4c 100644
+--- a/arch/mips/math-emu/dp_maddf.c
++++ b/arch/mips/math-emu/dp_maddf.c
+@@ -14,22 +14,45 @@
+
+ #include "ieee754dp.h"
+
+-enum maddf_flags {
+- maddf_negate_product = 1 << 0,
+-};
++
++/* 128 bits shift right logical with rounding. */
++void srl128(u64 *hptr, u64 *lptr, int count)
++{
++ u64 low;
++
++ if (count >= 128) {
++ *lptr = *hptr != 0 || *lptr != 0;
++ *hptr = 0;
++ } else if (count >= 64) {
++ if (count == 64) {
++ *lptr = *hptr | (*lptr != 0);
++ } else {
++ low = *lptr;
++ *lptr = *hptr >> (count - 64);
++ *lptr |= (*hptr << (128 - count)) != 0 || low != 0;
++ }
++ *hptr = 0;
++ } else {
++ low = *lptr;
++ *lptr = low >> count | *hptr << (64 - count);
++ *lptr |= (low << (64 - count)) != 0;
++ *hptr = *hptr >> count;
++ }
++}
+
+ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y, enum maddf_flags flags)
+ {
+ int re;
+ int rs;
+- u64 rm;
+ unsigned lxm;
+ unsigned hxm;
+ unsigned lym;
+ unsigned hym;
+ u64 lrm;
+ u64 hrm;
++ u64 lzm;
++ u64 hzm;
+ u64 t;
+ u64 at;
+ int s;
+@@ -48,52 +71,34 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+
+ ieee754_clearcx();
+
+- switch (zc) {
+- case IEEE754_CLASS_SNAN:
+- ieee754_setcx(IEEE754_INVALID_OPERATION);
++ /*
++ * Handle the cases when at least one of x, y or z is a NaN.
++ * Order of precedence is sNaN, qNaN and z, x, y.
++ */
++ if (zc == IEEE754_CLASS_SNAN)
+ return ieee754dp_nanxcpt(z);
+- case IEEE754_CLASS_DNORM:
+- DPDNORMZ;
+- /* QNAN is handled separately below */
+- }
+-
+- switch (CLPAIR(xc, yc)) {
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+- return ieee754dp_nanxcpt(y);
+-
+- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
++ if (xc == IEEE754_CLASS_SNAN)
+ return ieee754dp_nanxcpt(x);
+-
+- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
++ if (yc == IEEE754_CLASS_SNAN)
++ return ieee754dp_nanxcpt(y);
++ if (zc == IEEE754_CLASS_QNAN)
++ return z;
++ if (xc == IEEE754_CLASS_QNAN)
++ return x;
++ if (yc == IEEE754_CLASS_QNAN)
+ return y;
+
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+- return x;
++ if (zc == IEEE754_CLASS_DNORM)
++ DPDNORMZ;
++ /* ZERO z cases are handled separately below */
+
++ switch (CLPAIR(xc, yc)) {
+
+ /*
+ * Infinity handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+- if (zc == IEEE754_CLASS_QNAN)
+- return z;
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754dp_indef();
+
+@@ -102,9 +107,27 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+- if (zc == IEEE754_CLASS_QNAN)
+- return z;
+- return ieee754dp_inf(xs ^ ys);
++ if ((zc == IEEE754_CLASS_INF) &&
++ ((!(flags & MADDF_NEGATE_PRODUCT) && (zs != (xs ^ ys))) ||
++ ((flags & MADDF_NEGATE_PRODUCT) && (zs == (xs ^ ys))))) {
++ /*
++ * Cases of addition of infinities with opposite signs
++ * or subtraction of infinities with same signs.
++ */
++ ieee754_setcx(IEEE754_INVALID_OPERATION);
++ return ieee754dp_indef();
++ }
++ /*
++ * z is here either not an infinity, or an infinity having the
++ * same sign as product (x*y) (in case of MADDF.D instruction)
++ * or product -(x*y) (in MSUBF.D case). The result must be an
++ * infinity, and its sign is determined only by the value of
++ * (flags & MADDF_NEGATE_PRODUCT) and the signs of x and y.
++ */
++ if (flags & MADDF_NEGATE_PRODUCT)
++ return ieee754dp_inf(1 ^ (xs ^ ys));
++ else
++ return ieee754dp_inf(xs ^ ys);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+@@ -113,32 +136,42 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754dp_inf(zs);
+- /* Multiplication is 0 so just return z */
++ if (zc == IEEE754_CLASS_ZERO) {
++ /* Handle cases +0 + (-0) and similar ones. */
++ if ((!(flags & MADDF_NEGATE_PRODUCT)
++ && (zs == (xs ^ ys))) ||
++ ((flags & MADDF_NEGATE_PRODUCT)
++ && (zs != (xs ^ ys))))
++ /*
++ * Cases of addition of zeros of equal signs
++ * or subtraction of zeroes of opposite signs.
++ * The sign of the resulting zero is in any
++ * such case determined only by the sign of z.
++ */
++ return z;
++
++ return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
++ }
++ /* x*y is here 0, and z is not 0, so just return z */
+ return z;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+- if (zc == IEEE754_CLASS_QNAN)
+- return z;
+- else if (zc == IEEE754_CLASS_INF)
++ if (zc == IEEE754_CLASS_INF)
+ return ieee754dp_inf(zs);
+ DPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+- if (zc == IEEE754_CLASS_QNAN)
+- return z;
+- else if (zc == IEEE754_CLASS_INF)
++ if (zc == IEEE754_CLASS_INF)
+ return ieee754dp_inf(zs);
+ DPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+- if (zc == IEEE754_CLASS_QNAN)
+- return z;
+- else if (zc == IEEE754_CLASS_INF)
++ if (zc == IEEE754_CLASS_INF)
+ return ieee754dp_inf(zs);
+ /* fall through to real computations */
+ }
+@@ -157,7 +190,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+
+ re = xe + ye;
+ rs = xs ^ ys;
+- if (flags & maddf_negate_product)
++ if (flags & MADDF_NEGATE_PRODUCT)
+ rs ^= 1;
+
+ /* shunt to top of word */
+@@ -165,7 +198,7 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+ ym <<= 64 - (DP_FBITS + 1);
+
+ /*
+- * Multiply 64 bits xm, ym to give high 64 bits rm with stickness.
++ * Multiply 64 bits xm and ym to give 128 bits result in hrm:lrm.
+ */
+
+ /* 32 * 32 => 64 */
+@@ -195,78 +228,110 @@ static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+
+ hrm = hrm + (t >> 32);
+
+- rm = hrm | (lrm != 0);
+-
+- /*
+- * Sticky shift down to normal rounding precision.
+- */
+- if ((s64) rm < 0) {
+- rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
+- ((rm << (DP_FBITS + 1 + 3)) != 0);
++ /* Put explicit bit at bit 126 if necessary */
++ if ((int64_t)hrm < 0) {
++ lrm = (hrm << 63) | (lrm >> 1);
++ hrm = hrm >> 1;
+ re++;
+- } else {
+- rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
+- ((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
+ }
+- assert(rm & (DP_HIDDEN_BIT << 3));
+
+- /* And now the addition */
+- assert(zm & DP_HIDDEN_BIT);
++ assert(hrm & (1 << 62));
+
+- /*
+- * Provide guard,round and stick bit space.
+- */
+- zm <<= 3;
++ if (zc == IEEE754_CLASS_ZERO) {
++ /*
++ * Move explicit bit from bit 126 to bit 55 since the
++ * ieee754dp_format code expects the mantissa to be
++ * 56 bits wide (53 + 3 rounding bits).
++ */
++ srl128(&hrm, &lrm, (126 - 55));
++ return ieee754dp_format(rs, re, lrm);
++ }
++
++ /* Move explicit bit from bit 52 to bit 126 */
++ lzm = 0;
++ hzm = zm << 10;
++ assert(hzm & (1 << 62));
+
++ /* Make the exponents the same */
+ if (ze > re) {
+ /*
+ * Have to shift y fraction right to align.
+ */
+ s = ze - re;
+- rm = XDPSRS(rm, s);
++ srl128(&hrm, &lrm, s);
+ re += s;
+ } else if (re > ze) {
+ /*
+ * Have to shift x fraction right to align.
+ */
+ s = re - ze;
+- zm = XDPSRS(zm, s);
++ srl128(&hzm, &lzm, s);
+ ze += s;
+ }
+ assert(ze == re);
+ assert(ze <= DP_EMAX);
+
++ /* Do the addition */
+ if (zs == rs) {
+ /*
+- * Generate 28 bit result of adding two 27 bit numbers
+- * leaving result in xm, xs and xe.
++ * Generate 128 bit result by adding two 127 bit numbers
++ * leaving result in hzm:lzm, zs and ze.
+ */
+- zm = zm + rm;
+-
+- if (zm >> (DP_FBITS + 1 + 3)) { /* carry out */
+- zm = XDPSRS1(zm);
++ hzm = hzm + hrm + (lzm > (lzm + lrm));
++ lzm = lzm + lrm;
++ if ((int64_t)hzm < 0) { /* carry out */
++ srl128(&hzm, &lzm, 1);
+ ze++;
+ }
+ } else {
+- if (zm >= rm) {
+- zm = zm - rm;
++ if (hzm > hrm || (hzm == hrm && lzm >= lrm)) {
++ hzm = hzm - hrm - (lzm < lrm);
++ lzm = lzm - lrm;
+ } else {
+- zm = rm - zm;
++ hzm = hrm - hzm - (lrm < lzm);
++ lzm = lrm - lzm;
+ zs = rs;
+ }
+- if (zm == 0)
++ if (lzm == 0 && hzm == 0)
+ return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+ /*
+- * Normalize to rounding precision.
++ * Put explicit bit at bit 126 if necessary.
+ */
+- while ((zm >> (DP_FBITS + 3)) == 0) {
+- zm <<= 1;
+- ze--;
++ if (hzm == 0) {
++ /* left shift by 63 or 64 bits */
++ if ((int64_t)lzm < 0) {
++ /* MSB of lzm is the explicit bit */
++ hzm = lzm >> 1;
++ lzm = lzm << 63;
++ ze -= 63;
++ } else {
++ hzm = lzm;
++ lzm = 0;
++ ze -= 64;
++ }
++ }
++
++ t = 0;
++ while ((hzm >> (62 - t)) == 0)
++ t++;
++
++ assert(t <= 62);
++ if (t) {
++ hzm = hzm << t | lzm >> (64 - t);
++ lzm = lzm << t;
++ ze -= t;
+ }
+ }
+
+- return ieee754dp_format(zs, ze, zm);
++ /*
++ * Move explicit bit from bit 126 to bit 55 since the
++ * ieee754dp_format code expects the mantissa to be
++ * 56 bits wide (53 + 3 rounding bits).
++ */
++ srl128(&hzm, &lzm, (126 - 55));
++
++ return ieee754dp_format(zs, ze, lzm);
+ }
+
+ union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
+@@ -278,5 +343,5 @@ union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
+ union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y)
+ {
+- return _dp_maddf(z, x, y, maddf_negate_product);
++ return _dp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
+ }
+diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
+index 8bc2f6963324..dd2071f430e0 100644
+--- a/arch/mips/math-emu/ieee754int.h
++++ b/arch/mips/math-emu/ieee754int.h
+@@ -26,6 +26,10 @@
+
+ #define CLPAIR(x, y) ((x)*6+(y))
+
++enum maddf_flags {
++ MADDF_NEGATE_PRODUCT = 1 << 0,
++};
++
+ static inline void ieee754_clearcx(void)
+ {
+ ieee754_csr.cx = 0;
+diff --git a/arch/mips/math-emu/ieee754sp.h b/arch/mips/math-emu/ieee754sp.h
+index 8476067075fe..0f63e4202cff 100644
+--- a/arch/mips/math-emu/ieee754sp.h
++++ b/arch/mips/math-emu/ieee754sp.h
+@@ -45,6 +45,10 @@ static inline int ieee754sp_finite(union ieee754sp x)
+ return SPBEXP(x) != SP_EMAX + 1 + SP_EBIAS;
+ }
+
++/* 64 bit right shift with rounding */
++#define XSPSRS64(v, rs) \
++ (((rs) >= 64) ? ((v) != 0) : ((v) >> (rs)) | ((v) << (64-(rs)) != 0))
++
+ /* 3bit extended single precision sticky right shift */
+ #define XSPSRS(v, rs) \
+ ((rs > (SP_FBITS+3))?1:((v) >> (rs)) | ((v) << (32-(rs)) != 0))
+diff --git a/arch/mips/math-emu/sp_fmax.c b/arch/mips/math-emu/sp_fmax.c
+index 4d000844e48e..74a5a00d2f22 100644
+--- a/arch/mips/math-emu/sp_fmax.c
++++ b/arch/mips/math-emu/sp_fmax.c
+@@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754sp_nanxcpt(x);
+
+- /* numbers are preferred to NaNs */
++ /*
++ * Quiet NaN handling
++ */
++
++ /*
++ * The case of both inputs quiet NaNs
++ */
++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
++ return x;
++
++ /*
++ * The cases of exactly one input quiet NaN (numbers
++ * are here preferred as returned values to NaNs)
++ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+@@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
+ return ys ? x : y;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+- if (xs == ys)
+- return x;
+- return ieee754sp_zero(1);
++ return ieee754sp_zero(xs & ys);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+@@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y)
+ else if (xs < ys)
+ return x;
+
+- /* Compare exponent */
+- if (xe > ye)
+- return x;
+- else if (xe < ye)
+- return y;
++ /* Signs of inputs are equal, let's compare exponents */
++ if (xs == 0) {
++ /* Inputs are both positive */
++ if (xe > ye)
++ return x;
++ else if (xe < ye)
++ return y;
++ } else {
++ /* Inputs are both negative */
++ if (xe > ye)
++ return y;
++ else if (xe < ye)
++ return x;
++ }
+
+- /* Compare mantissa */
++ /* Signs and exponents of inputs are equal, let's compare mantissas */
++ if (xs == 0) {
++ /* Inputs are both positive, with equal signs and exponents */
++ if (xm <= ym)
++ return y;
++ return x;
++ }
++ /* Inputs are both negative, with equal signs and exponents */
+ if (xm <= ym)
+- return y;
+- return x;
++ return x;
++ return y;
+ }
+
+ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
+@@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754sp_nanxcpt(x);
+
+- /* numbers are preferred to NaNs */
++ /*
++ * Quiet NaN handling
++ */
++
++ /*
++ * The case of both inputs quiet NaNs
++ */
++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
++ return x;
++
++ /*
++ * The cases of exactly one input quiet NaN (numbers
++ * are here preferred as returned values to NaNs)
++ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+@@ -164,6 +202,9 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
+ /*
+ * Infinity and zero handling
+ */
++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
++ return ieee754sp_inf(xs & ys);
++
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+@@ -171,7 +212,6 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return x;
+
+- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+@@ -180,9 +220,7 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+- if (xs == ys)
+- return x;
+- return ieee754sp_zero(1);
++ return ieee754sp_zero(xs & ys);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+@@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y)
+ return y;
+
+ /* Compare mantissa */
+- if (xm <= ym)
++ if (xm < ym)
+ return y;
+- return x;
++ else if (xm > ym)
++ return x;
++ else if (xs == 0)
++ return x;
++ return y;
+ }
+diff --git a/arch/mips/math-emu/sp_fmin.c b/arch/mips/math-emu/sp_fmin.c
+index 4eb1bb9e9dec..c51385f46b09 100644
+--- a/arch/mips/math-emu/sp_fmin.c
++++ b/arch/mips/math-emu/sp_fmin.c
+@@ -47,14 +47,26 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754sp_nanxcpt(x);
+
+- /* numbers are preferred to NaNs */
++ /*
++ * Quiet NaN handling
++ */
++
++ /*
++ * The case of both inputs quiet NaNs
++ */
++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
++ return x;
++
++ /*
++ * The cases of exactly one input quiet NaN (numbers
++ * are here preferred as returned values to NaNs)
++ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+@@ -80,9 +92,7 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
+ return ys ? y : x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+- if (xs == ys)
+- return x;
+- return ieee754sp_zero(1);
++ return ieee754sp_zero(xs | ys);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+@@ -106,16 +116,32 @@ union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y)
+ else if (xs < ys)
+ return y;
+
+- /* Compare exponent */
+- if (xe > ye)
+- return y;
+- else if (xe < ye)
+- return x;
++ /* Signs of inputs are the same, let's compare exponents */
++ if (xs == 0) {
++ /* Inputs are both positive */
++ if (xe > ye)
++ return y;
++ else if (xe < ye)
++ return x;
++ } else {
++ /* Inputs are both negative */
++ if (xe > ye)
++ return x;
++ else if (xe < ye)
++ return y;
++ }
+
+- /* Compare mantissa */
++ /* Signs and exponents of inputs are equal, let's compare mantissas */
++ if (xs == 0) {
++ /* Inputs are both positive, with equal signs and exponents */
++ if (xm <= ym)
++ return x;
++ return y;
++ }
++ /* Inputs are both negative, with equal signs and exponents */
+ if (xm <= ym)
+- return x;
+- return y;
++ return y;
++ return x;
+ }
+
+ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
+@@ -147,14 +173,26 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ return ieee754sp_nanxcpt(x);
+
+- /* numbers are preferred to NaNs */
++ /*
++ * Quiet NaN handling
++ */
++
++ /*
++ * The case of both inputs quiet NaNs
++ */
++ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
++ return x;
++
++ /*
++ * The cases of exactly one input quiet NaN (numbers
++ * are here preferred as returned values to NaNs)
++ */
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+@@ -164,25 +202,25 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
+ /*
+ * Infinity and zero handling
+ */
++ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
++ return ieee754sp_inf(xs | ys);
++
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+- return x;
++ return y;
+
+- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+- return y;
++ return x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+- if (xs == ys)
+- return x;
+- return ieee754sp_zero(1);
++ return ieee754sp_zero(xs | ys);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+@@ -207,7 +245,11 @@ union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y)
+ return x;
+
+ /* Compare mantissa */
+- if (xm <= ym)
++ if (xm < ym)
++ return x;
++ else if (xm > ym)
++ return y;
++ else if (xs == 1)
+ return x;
+ return y;
+ }
+diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c
+index a8cd8b4f235e..7195fe785d81 100644
+--- a/arch/mips/math-emu/sp_maddf.c
++++ b/arch/mips/math-emu/sp_maddf.c
+@@ -14,9 +14,6 @@
+
+ #include "ieee754sp.h"
+
+-enum maddf_flags {
+- maddf_negate_product = 1 << 0,
+-};
+
+ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y, enum maddf_flags flags)
+@@ -24,14 +21,8 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
+ int re;
+ int rs;
+ unsigned rm;
+- unsigned short lxm;
+- unsigned short hxm;
+- unsigned short lym;
+- unsigned short hym;
+- unsigned lrm;
+- unsigned hrm;
+- unsigned t;
+- unsigned at;
++ uint64_t rm64;
++ uint64_t zm64;
+ int s;
+
+ COMPXSP;
+@@ -48,51 +39,35 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
+
+ ieee754_clearcx();
+
+- switch (zc) {
+- case IEEE754_CLASS_SNAN:
+- ieee754_setcx(IEEE754_INVALID_OPERATION);
++ /*
++ * Handle the cases when at least one of x, y or z is a NaN.
++ * Order of precedence is sNaN, qNaN and z, x, y.
++ */
++ if (zc == IEEE754_CLASS_SNAN)
+ return ieee754sp_nanxcpt(z);
+- case IEEE754_CLASS_DNORM:
+- SPDNORMZ;
+- /* QNAN is handled separately below */
+- }
+-
+- switch (CLPAIR(xc, yc)) {
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
++ if (xc == IEEE754_CLASS_SNAN)
++ return ieee754sp_nanxcpt(x);
++ if (yc == IEEE754_CLASS_SNAN)
+ return ieee754sp_nanxcpt(y);
++ if (zc == IEEE754_CLASS_QNAN)
++ return z;
++ if (xc == IEEE754_CLASS_QNAN)
++ return x;
++ if (yc == IEEE754_CLASS_QNAN)
++ return y;
+
+- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+- return ieee754sp_nanxcpt(x);
++ if (zc == IEEE754_CLASS_DNORM)
++ SPDNORMZ;
++ /* ZERO z cases are handled separately below */
+
+- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+- return y;
++ switch (CLPAIR(xc, yc)) {
+
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+- return x;
+
+ /*
+ * Infinity handling
+ */
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+- if (zc == IEEE754_CLASS_QNAN)
+- return z;
+ ieee754_setcx(IEEE754_INVALID_OPERATION);
+ return ieee754sp_indef();
+
+@@ -101,9 +76,27 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+- if (zc == IEEE754_CLASS_QNAN)
+- return z;
+- return ieee754sp_inf(xs ^ ys);
++ if ((zc == IEEE754_CLASS_INF) &&
++ ((!(flags & MADDF_NEGATE_PRODUCT) && (zs != (xs ^ ys))) ||
++ ((flags & MADDF_NEGATE_PRODUCT) && (zs == (xs ^ ys))))) {
++ /*
++ * Cases of addition of infinities with opposite signs
++ * or subtraction of infinities with same signs.
++ */
++ ieee754_setcx(IEEE754_INVALID_OPERATION);
++ return ieee754sp_indef();
++ }
++ /*
++ * z is here either not an infinity, or an infinity having the
++ * same sign as product (x*y) (in case of MADDF.D instruction)
++ * or product -(x*y) (in MSUBF.D case). The result must be an
++ * infinity, and its sign is determined only by the value of
++ * (flags & MADDF_NEGATE_PRODUCT) and the signs of x and y.
++ */
++ if (flags & MADDF_NEGATE_PRODUCT)
++ return ieee754sp_inf(1 ^ (xs ^ ys));
++ else
++ return ieee754sp_inf(xs ^ ys);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+@@ -112,32 +105,42 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754sp_inf(zs);
+- /* Multiplication is 0 so just return z */
++ if (zc == IEEE754_CLASS_ZERO) {
++ /* Handle cases +0 + (-0) and similar ones. */
++ if ((!(flags & MADDF_NEGATE_PRODUCT)
++ && (zs == (xs ^ ys))) ||
++ ((flags & MADDF_NEGATE_PRODUCT)
++ && (zs != (xs ^ ys))))
++ /*
++ * Cases of addition of zeros of equal signs
++ * or subtraction of zeroes of opposite signs.
++ * The sign of the resulting zero is in any
++ * such case determined only by the sign of z.
++ */
++ return z;
++
++ return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
++ }
++ /* x*y is here 0, and z is not 0, so just return z */
+ return z;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+- if (zc == IEEE754_CLASS_QNAN)
+- return z;
+- else if (zc == IEEE754_CLASS_INF)
++ if (zc == IEEE754_CLASS_INF)
+ return ieee754sp_inf(zs);
+ SPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+- if (zc == IEEE754_CLASS_QNAN)
+- return z;
+- else if (zc == IEEE754_CLASS_INF)
++ if (zc == IEEE754_CLASS_INF)
+ return ieee754sp_inf(zs);
+ SPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+- if (zc == IEEE754_CLASS_QNAN)
+- return z;
+- else if (zc == IEEE754_CLASS_INF)
++ if (zc == IEEE754_CLASS_INF)
+ return ieee754sp_inf(zs);
+ /* fall through to real computations */
+ }
+@@ -158,108 +161,93 @@ static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
+
+ re = xe + ye;
+ rs = xs ^ ys;
+- if (flags & maddf_negate_product)
++ if (flags & MADDF_NEGATE_PRODUCT)
+ rs ^= 1;
+
+- /* shunt to top of word */
+- xm <<= 32 - (SP_FBITS + 1);
+- ym <<= 32 - (SP_FBITS + 1);
+-
+- /*
+- * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+- */
+- lxm = xm & 0xffff;
+- hxm = xm >> 16;
+- lym = ym & 0xffff;
+- hym = ym >> 16;
+-
+- lrm = lxm * lym; /* 16 * 16 => 32 */
+- hrm = hxm * hym; /* 16 * 16 => 32 */
++ /* Multiple 24 bit xm and ym to give 48 bit results */
++ rm64 = (uint64_t)xm * ym;
+
+- t = lxm * hym; /* 16 * 16 => 32 */
+- at = lrm + (t << 16);
+- hrm += at < lrm;
+- lrm = at;
+- hrm = hrm + (t >> 16);
++ /* Shunt to top of word */
++ rm64 = rm64 << 16;
+
+- t = hxm * lym; /* 16 * 16 => 32 */
+- at = lrm + (t << 16);
+- hrm += at < lrm;
+- lrm = at;
+- hrm = hrm + (t >> 16);
+-
+- rm = hrm | (lrm != 0);
+-
+- /*
+- * Sticky shift down to normal rounding precision.
+- */
+- if ((int) rm < 0) {
+- rm = (rm >> (32 - (SP_FBITS + 1 + 3))) |
+- ((rm << (SP_FBITS + 1 + 3)) != 0);
++ /* Put explicit bit at bit 62 if necessary */
++ if ((int64_t) rm64 < 0) {
++ rm64 = rm64 >> 1;
+ re++;
+- } else {
+- rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) |
+- ((rm << (SP_FBITS + 1 + 3 + 1)) != 0);
+ }
+- assert(rm & (SP_HIDDEN_BIT << 3));
+
+- /* And now the addition */
++ assert(rm64 & (1 << 62));
+
+- assert(zm & SP_HIDDEN_BIT);
++ if (zc == IEEE754_CLASS_ZERO) {
++ /*
++ * Move explicit bit from bit 62 to bit 26 since the
++ * ieee754sp_format code expects the mantissa to be
++ * 27 bits wide (24 + 3 rounding bits).
++ */
++ rm = XSPSRS64(rm64, (62 - 26));
++ return ieee754sp_format(rs, re, rm);
++ }
+
+- /*
+- * Provide guard,round and stick bit space.
+- */
+- zm <<= 3;
++ /* Move explicit bit from bit 23 to bit 62 */
++ zm64 = (uint64_t)zm << (62 - 23);
++ assert(zm64 & (1 << 62));
+
++ /* Make the exponents the same */
+ if (ze > re) {
+ /*
+ * Have to shift r fraction right to align.
+ */
+ s = ze - re;
+- rm = XSPSRS(rm, s);
++ rm64 = XSPSRS64(rm64, s);
+ re += s;
+ } else if (re > ze) {
+ /*
+ * Have to shift z fraction right to align.
+ */
+ s = re - ze;
+- zm = XSPSRS(zm, s);
++ zm64 = XSPSRS64(zm64, s);
+ ze += s;
+ }
+ assert(ze == re);
+ assert(ze <= SP_EMAX);
+
++ /* Do the addition */
+ if (zs == rs) {
+ /*
+- * Generate 28 bit result of adding two 27 bit numbers
+- * leaving result in zm, zs and ze.
++ * Generate 64 bit result by adding two 63 bit numbers
++ * leaving result in zm64, zs and ze.
+ */
+- zm = zm + rm;
+-
+- if (zm >> (SP_FBITS + 1 + 3)) { /* carry out */
+- zm = XSPSRS1(zm);
++ zm64 = zm64 + rm64;
++ if ((int64_t)zm64 < 0) { /* carry out */
++ zm64 = XSPSRS1(zm64);
+ ze++;
+ }
+ } else {
+- if (zm >= rm) {
+- zm = zm - rm;
++ if (zm64 >= rm64) {
++ zm64 = zm64 - rm64;
+ } else {
+- zm = rm - zm;
++ zm64 = rm64 - zm64;
+ zs = rs;
+ }
+- if (zm == 0)
++ if (zm64 == 0)
+ return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
+
+ /*
+- * Normalize in extended single precision
++ * Put explicit bit at bit 62 if necessary.
+ */
+- while ((zm >> (SP_MBITS + 3)) == 0) {
+- zm <<= 1;
++ while ((zm64 >> 62) == 0) {
++ zm64 <<= 1;
+ ze--;
+ }
+-
+ }
++
++ /*
++ * Move explicit bit from bit 62 to bit 26 since the
++ * ieee754sp_format code expects the mantissa to be
++ * 27 bits wide (24 + 3 rounding bits).
++ */
++ zm = XSPSRS64(zm64, (62 - 26));
++
+ return ieee754sp_format(zs, ze, zm);
+ }
+
+@@ -272,5 +260,5 @@ union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
+ union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y)
+ {
+- return _sp_maddf(z, x, y, maddf_negate_product);
++ return _sp_maddf(z, x, y, MADDF_NEGATE_PRODUCT);
+ }
+diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
+index b2da7c8baed7..292458b694fb 100644
+--- a/arch/powerpc/kernel/align.c
++++ b/arch/powerpc/kernel/align.c
+@@ -235,6 +235,28 @@ static int emulate_dcbz(struct pt_regs *regs, unsigned char __user *addr)
+
+ #define SWIZ_PTR(p) ((unsigned char __user *)((p) ^ swiz))
+
++#define __get_user_or_set_dar(_regs, _dest, _addr) \
++ ({ \
++ int rc = 0; \
++ typeof(_addr) __addr = (_addr); \
++ if (__get_user_inatomic(_dest, __addr)) { \
++ _regs->dar = (unsigned long)__addr; \
++ rc = -EFAULT; \
++ } \
++ rc; \
++ })
++
++#define __put_user_or_set_dar(_regs, _src, _addr) \
++ ({ \
++ int rc = 0; \
++ typeof(_addr) __addr = (_addr); \
++ if (__put_user_inatomic(_src, __addr)) { \
++ _regs->dar = (unsigned long)__addr; \
++ rc = -EFAULT; \
++ } \
++ rc; \
++ })
++
+ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
+ unsigned int reg, unsigned int nb,
+ unsigned int flags, unsigned int instr,
+@@ -263,9 +285,10 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
+ } else {
+ unsigned long pc = regs->nip ^ (swiz & 4);
+
+- if (__get_user_inatomic(instr,
+- (unsigned int __user *)pc))
++ if (__get_user_or_set_dar(regs, instr,
++ (unsigned int __user *)pc))
+ return -EFAULT;
++
+ if (swiz == 0 && (flags & SW))
+ instr = cpu_to_le32(instr);
+ nb = (instr >> 11) & 0x1f;
+@@ -309,31 +332,31 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
+ ((nb0 + 3) / 4) * sizeof(unsigned long));
+
+ for (i = 0; i < nb; ++i, ++p)
+- if (__get_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
+- SWIZ_PTR(p)))
++ if (__get_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
++ SWIZ_PTR(p)))
+ return -EFAULT;
+ if (nb0 > 0) {
+ rptr = &regs->gpr[0];
+ addr += nb;
+ for (i = 0; i < nb0; ++i, ++p)
+- if (__get_user_inatomic(REG_BYTE(rptr,
+- i ^ bswiz),
+- SWIZ_PTR(p)))
++ if (__get_user_or_set_dar(regs,
++ REG_BYTE(rptr, i ^ bswiz),
++ SWIZ_PTR(p)))
+ return -EFAULT;
+ }
+
+ } else {
+ for (i = 0; i < nb; ++i, ++p)
+- if (__put_user_inatomic(REG_BYTE(rptr, i ^ bswiz),
+- SWIZ_PTR(p)))
++ if (__put_user_or_set_dar(regs, REG_BYTE(rptr, i ^ bswiz),
++ SWIZ_PTR(p)))
+ return -EFAULT;
+ if (nb0 > 0) {
+ rptr = &regs->gpr[0];
+ addr += nb;
+ for (i = 0; i < nb0; ++i, ++p)
+- if (__put_user_inatomic(REG_BYTE(rptr,
+- i ^ bswiz),
+- SWIZ_PTR(p)))
++ if (__put_user_or_set_dar(regs,
++ REG_BYTE(rptr, i ^ bswiz),
++ SWIZ_PTR(p)))
+ return -EFAULT;
+ }
+ }
+@@ -345,29 +368,32 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
+ * Only POWER6 has these instructions, and it does true little-endian,
+ * so we don't need the address swizzling.
+ */
+-static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
+- unsigned int flags)
++static int emulate_fp_pair(struct pt_regs *regs, unsigned char __user *addr,
++ unsigned int reg, unsigned int flags)
+ {
+ char *ptr0 = (char *) &current->thread.TS_FPR(reg);
+ char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
+- int i, ret, sw = 0;
++ int i, sw = 0;
+
+ if (reg & 1)
+ return 0; /* invalid form: FRS/FRT must be even */
+ if (flags & SW)
+ sw = 7;
+- ret = 0;
++
+ for (i = 0; i < 8; ++i) {
+ if (!(flags & ST)) {
+- ret |= __get_user(ptr0[i^sw], addr + i);
+- ret |= __get_user(ptr1[i^sw], addr + i + 8);
++ if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++ return -EFAULT;
++ if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++ return -EFAULT;
+ } else {
+- ret |= __put_user(ptr0[i^sw], addr + i);
+- ret |= __put_user(ptr1[i^sw], addr + i + 8);
++ if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++ return -EFAULT;
++ if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++ return -EFAULT;
+ }
+ }
+- if (ret)
+- return -EFAULT;
++
+ return 1; /* exception handled and fixed up */
+ }
+
+@@ -377,24 +403,27 @@ static int emulate_lq_stq(struct pt_regs *regs, unsigned char __user *addr,
+ {
+ char *ptr0 = (char *)&regs->gpr[reg];
+ char *ptr1 = (char *)&regs->gpr[reg+1];
+- int i, ret, sw = 0;
++ int i, sw = 0;
+
+ if (reg & 1)
+ return 0; /* invalid form: GPR must be even */
+ if (flags & SW)
+ sw = 7;
+- ret = 0;
++
+ for (i = 0; i < 8; ++i) {
+ if (!(flags & ST)) {
+- ret |= __get_user(ptr0[i^sw], addr + i);
+- ret |= __get_user(ptr1[i^sw], addr + i + 8);
++ if (__get_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++ return -EFAULT;
++ if (__get_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++ return -EFAULT;
+ } else {
+- ret |= __put_user(ptr0[i^sw], addr + i);
+- ret |= __put_user(ptr1[i^sw], addr + i + 8);
++ if (__put_user_or_set_dar(regs, ptr0[i^sw], addr + i))
++ return -EFAULT;
++ if (__put_user_or_set_dar(regs, ptr1[i^sw], addr + i + 8))
++ return -EFAULT;
+ }
+ }
+- if (ret)
+- return -EFAULT;
++
+ return 1; /* exception handled and fixed up */
+ }
+ #endif /* CONFIG_PPC64 */
+@@ -687,9 +716,14 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
+ for (j = 0; j < length; j += elsize) {
+ for (i = 0; i < elsize; ++i) {
+ if (flags & ST)
+- ret |= __put_user(ptr[i^sw], addr + i);
++ ret = __put_user_or_set_dar(regs, ptr[i^sw],
++ addr + i);
+ else
+- ret |= __get_user(ptr[i^sw], addr + i);
++ ret = __get_user_or_set_dar(regs, ptr[i^sw],
++ addr + i);
++
++ if (ret)
++ return ret;
+ }
+ ptr += elsize;
+ #ifdef __LITTLE_ENDIAN__
+@@ -739,7 +773,7 @@ int fix_alignment(struct pt_regs *regs)
+ unsigned int dsisr;
+ unsigned char __user *addr;
+ unsigned long p, swiz;
+- int ret, i;
++ int i;
+ union data {
+ u64 ll;
+ double dd;
+@@ -936,7 +970,7 @@ int fix_alignment(struct pt_regs *regs)
+ if (flags & F) {
+ /* Special case for 16-byte FP loads and stores */
+ PPC_WARN_ALIGNMENT(fp_pair, regs);
+- return emulate_fp_pair(addr, reg, flags);
++ return emulate_fp_pair(regs, addr, reg, flags);
+ } else {
+ #ifdef CONFIG_PPC64
+ /* Special case for 16-byte loads and stores */
+@@ -966,15 +1000,12 @@ int fix_alignment(struct pt_regs *regs)
+ }
+
+ data.ll = 0;
+- ret = 0;
+ p = (unsigned long)addr;
+
+ for (i = 0; i < nb; i++)
+- ret |= __get_user_inatomic(data.v[start + i],
+- SWIZ_PTR(p++));
+-
+- if (unlikely(ret))
+- return -EFAULT;
++ if (__get_user_or_set_dar(regs, data.v[start + i],
++ SWIZ_PTR(p++)))
++ return -EFAULT;
+
+ } else if (flags & F) {
+ data.ll = current->thread.TS_FPR(reg);
+@@ -1046,15 +1077,13 @@ int fix_alignment(struct pt_regs *regs)
+ break;
+ }
+
+- ret = 0;
+ p = (unsigned long)addr;
+
+ for (i = 0; i < nb; i++)
+- ret |= __put_user_inatomic(data.v[start + i],
+- SWIZ_PTR(p++));
++ if (__put_user_or_set_dar(regs, data.v[start + i],
++ SWIZ_PTR(p++)))
++ return -EFAULT;
+
+- if (unlikely(ret))
+- return -EFAULT;
+ } else if (flags & F)
+ current->thread.TS_FPR(reg) = data.ll;
+ else
+diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
+index bea785d7f853..af85d6b12028 100644
+--- a/arch/s390/include/asm/mmu.h
++++ b/arch/s390/include/asm/mmu.h
+@@ -5,6 +5,7 @@
+ #include <linux/errno.h>
+
+ typedef struct {
++ spinlock_t lock;
+ cpumask_t cpu_attach_mask;
+ atomic_t flush_count;
+ unsigned int flush_mm;
+@@ -25,6 +26,7 @@ typedef struct {
+ } mm_context_t;
+
+ #define INIT_MM_CONTEXT(name) \
++ .context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \
+ .context.pgtable_lock = \
+ __SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \
+ .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
+diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
+index 515fea5a3fc4..f65a708ac395 100644
+--- a/arch/s390/include/asm/mmu_context.h
++++ b/arch/s390/include/asm/mmu_context.h
+@@ -15,6 +15,7 @@
+ static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+ {
++ spin_lock_init(&mm->context.lock);
+ spin_lock_init(&mm->context.pgtable_lock);
+ INIT_LIST_HEAD(&mm->context.pgtable_list);
+ spin_lock_init(&mm->context.gmap_lock);
+@@ -93,7 +94,6 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ if (prev == next)
+ return;
+ cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
+- cpumask_set_cpu(cpu, mm_cpumask(next));
+ /* Clear old ASCE by loading the kernel ASCE. */
+ __ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ __ctl_load(S390_lowcore.kernel_asce, 7, 7);
+@@ -111,9 +111,8 @@ static inline void finish_arch_post_lock_switch(void)
+ preempt_disable();
+ while (atomic_read(&mm->context.flush_count))
+ cpu_relax();
+-
+- if (mm->context.flush_mm)
+- __tlb_flush_mm(mm);
++ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
++ __tlb_flush_mm_lazy(mm);
+ preempt_enable();
+ }
+ set_fs(current->thread.mm_segment);
+@@ -126,6 +125,7 @@ static inline void activate_mm(struct mm_struct *prev,
+ struct mm_struct *next)
+ {
+ switch_mm(prev, next, current);
++ cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
+ set_user_asce(next);
+ }
+
+diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
+index 39846100682a..eed927aeb08f 100644
+--- a/arch/s390/include/asm/tlbflush.h
++++ b/arch/s390/include/asm/tlbflush.h
+@@ -43,23 +43,6 @@ static inline void __tlb_flush_global(void)
+ * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
+ * this implicates multiple ASCEs!).
+ */
+-static inline void __tlb_flush_full(struct mm_struct *mm)
+-{
+- preempt_disable();
+- atomic_inc(&mm->context.flush_count);
+- if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
+- /* Local TLB flush */
+- __tlb_flush_local();
+- } else {
+- /* Global TLB flush */
+- __tlb_flush_global();
+- /* Reset TLB flush mask */
+- cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
+- }
+- atomic_dec(&mm->context.flush_count);
+- preempt_enable();
+-}
+-
+ static inline void __tlb_flush_mm(struct mm_struct *mm)
+ {
+ unsigned long gmap_asce;
+@@ -71,16 +54,18 @@ static inline void __tlb_flush_mm(struct mm_struct *mm)
+ */
+ preempt_disable();
+ atomic_inc(&mm->context.flush_count);
++ /* Reset TLB flush mask */
++ cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
++ barrier();
+ gmap_asce = READ_ONCE(mm->context.gmap_asce);
+ if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
+ if (gmap_asce)
+ __tlb_flush_idte(gmap_asce);
+ __tlb_flush_idte(mm->context.asce);
+ } else {
+- __tlb_flush_full(mm);
++ /* Global TLB flush */
++ __tlb_flush_global();
+ }
+- /* Reset TLB flush mask */
+- cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
+ atomic_dec(&mm->context.flush_count);
+ preempt_enable();
+ }
+@@ -94,7 +79,6 @@ static inline void __tlb_flush_kernel(void)
+ }
+ #else
+ #define __tlb_flush_global() __tlb_flush_local()
+-#define __tlb_flush_full(mm) __tlb_flush_local()
+
+ /*
+ * Flush TLB entries for a specific ASCE on all CPUs.
+@@ -112,10 +96,12 @@ static inline void __tlb_flush_kernel(void)
+
+ static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
+ {
++ spin_lock(&mm->context.lock);
+ if (mm->context.flush_mm) {
+- __tlb_flush_mm(mm);
+ mm->context.flush_mm = 0;
++ __tlb_flush_mm(mm);
+ }
++ spin_unlock(&mm->context.lock);
+ }
+
+ /*
+diff --git a/block/blk-core.c b/block/blk-core.c
+index d1f2801ce836..95379fc83805 100644
+--- a/block/blk-core.c
++++ b/block/blk-core.c
+@@ -233,7 +233,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
+ **/
+ void blk_start_queue(struct request_queue *q)
+ {
+- WARN_ON(!irqs_disabled());
++ WARN_ON(!in_interrupt() && !irqs_disabled());
+
+ queue_flag_clear(QUEUE_FLAG_STOPPED, q);
+ __blk_run_queue(q);
+diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
+index 45af0fe00f33..aaf2f810d170 100644
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -143,8 +143,10 @@ static int skcipher_alloc_sgl(struct sock *sk)
+ sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
+ sgl->cur = 0;
+
+- if (sg)
++ if (sg) {
+ sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
++ sg_unmark_end(sg + (MAX_SGL_ENTS - 1));
++ }
+
+ list_add_tail(&sgl->list, &ctx->tsgl);
+ }
+diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
+index 3822eae102db..6f78cea75103 100644
+--- a/drivers/block/skd_main.c
++++ b/drivers/block/skd_main.c
+@@ -2163,6 +2163,9 @@ static void skd_send_fitmsg(struct skd_device *skdev,
+ */
+ qcmd |= FIT_QCMD_MSGSIZE_64;
+
++ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
++ smp_wmb();
++
+ SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
+
+ }
+@@ -2209,6 +2212,9 @@ static void skd_send_special_fitmsg(struct skd_device *skdev,
+ qcmd = skspcl->mb_dma_address;
+ qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
+
++ /* Make sure skd_msg_buf is written before the doorbell is triggered. */
++ smp_wmb();
++
+ SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
+ }
+
+@@ -4622,15 +4628,16 @@ static void skd_free_disk(struct skd_device *skdev)
+ {
+ struct gendisk *disk = skdev->disk;
+
+- if (disk != NULL) {
+- struct request_queue *q = disk->queue;
++ if (disk && (disk->flags & GENHD_FL_UP))
++ del_gendisk(disk);
+
+- if (disk->flags & GENHD_FL_UP)
+- del_gendisk(disk);
+- if (q)
+- blk_cleanup_queue(q);
+- put_disk(disk);
++ if (skdev->queue) {
++ blk_cleanup_queue(skdev->queue);
++ skdev->queue = NULL;
++ disk->queue = NULL;
+ }
++
++ put_disk(disk);
+ skdev->disk = NULL;
+ }
+
+diff --git a/drivers/crypto/ccp/ccp-crypto-aes-xts.c b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+index 58a4244b4752..3f26a415ef44 100644
+--- a/drivers/crypto/ccp/ccp-crypto-aes-xts.c
++++ b/drivers/crypto/ccp/ccp-crypto-aes-xts.c
+@@ -1,8 +1,9 @@
+ /*
+ * AMD Cryptographic Coprocessor (CCP) AES XTS crypto API support
+ *
+- * Copyright (C) 2013 Advanced Micro Devices, Inc.
++ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+ *
++ * Author: Gary R Hook <gary.hook@amd.com>
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+@@ -164,6 +165,7 @@ static int ccp_aes_xts_crypt(struct ablkcipher_request *req,
+ memset(&rctx->cmd, 0, sizeof(rctx->cmd));
+ INIT_LIST_HEAD(&rctx->cmd.entry);
+ rctx->cmd.engine = CCP_ENGINE_XTS_AES_128;
++ rctx->cmd.u.xts.type = CCP_AES_TYPE_128;
+ rctx->cmd.u.xts.action = (encrypt) ? CCP_AES_ACTION_ENCRYPT
+ : CCP_AES_ACTION_DECRYPT;
+ rctx->cmd.u.xts.unit_size = unit_size;
+diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
+index 2c0ce5f605b3..17b19a68e269 100644
+--- a/drivers/crypto/ccp/ccp-dev-v5.c
++++ b/drivers/crypto/ccp/ccp-dev-v5.c
+@@ -131,6 +131,7 @@ union ccp_function {
+ #define CCP_AES_MODE(p) ((p)->aes.mode)
+ #define CCP_AES_TYPE(p) ((p)->aes.type)
+ #define CCP_XTS_SIZE(p) ((p)->aes_xts.size)
++#define CCP_XTS_TYPE(p) ((p)->aes_xts.type)
+ #define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt)
+ #define CCP_SHA_TYPE(p) ((p)->sha.type)
+ #define CCP_RSA_SIZE(p) ((p)->rsa.size)
+@@ -318,6 +319,7 @@ static int ccp5_perform_xts_aes(struct ccp_op *op)
+ CCP5_CMD_PROT(&desc) = 0;
+
+ function.raw = 0;
++ CCP_XTS_TYPE(&function) = op->u.xts.type;
+ CCP_XTS_ENCRYPT(&function) = op->u.xts.action;
+ CCP_XTS_SIZE(&function) = op->u.xts.unit_size;
+ CCP5_CMD_FUNCTION(&desc) = function.raw;
+diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h
+index 8ac7ae17e1f4..e23c36c7691c 100644
+--- a/drivers/crypto/ccp/ccp-dev.h
++++ b/drivers/crypto/ccp/ccp-dev.h
+@@ -187,6 +187,7 @@
+ #define CCP_AES_CTX_SB_COUNT 1
+
+ #define CCP_XTS_AES_KEY_SB_COUNT 1
++#define CCP5_XTS_AES_KEY_SB_COUNT 2
+ #define CCP_XTS_AES_CTX_SB_COUNT 1
+
+ #define CCP_SHA_SB_COUNT 1
+@@ -472,6 +473,7 @@ struct ccp_aes_op {
+ };
+
+ struct ccp_xts_aes_op {
++ enum ccp_aes_type type;
+ enum ccp_aes_action action;
+ enum ccp_xts_aes_unit_size unit_size;
+ };
+diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
+index 50fae4442801..64deb006c3be 100644
+--- a/drivers/crypto/ccp/ccp-ops.c
++++ b/drivers/crypto/ccp/ccp-ops.c
+@@ -779,6 +779,8 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
+ struct ccp_op op;
+ unsigned int unit_size, dm_offset;
+ bool in_place = false;
++ unsigned int sb_count;
++ enum ccp_aes_type aestype;
+ int ret;
+
+ switch (xts->unit_size) {
+@@ -802,7 +804,9 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
+ return -EINVAL;
+ }
+
+- if (xts->key_len != AES_KEYSIZE_128)
++ if (xts->key_len == AES_KEYSIZE_128)
++ aestype = CCP_AES_TYPE_128;
++ else
+ return -EINVAL;
+
+ if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
+@@ -824,23 +828,44 @@ static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
+ op.sb_key = cmd_q->sb_key;
+ op.sb_ctx = cmd_q->sb_ctx;
+ op.init = 1;
++ op.u.xts.type = aestype;
+ op.u.xts.action = xts->action;
+ op.u.xts.unit_size = xts->unit_size;
+
+- /* All supported key sizes fit in a single (32-byte) SB entry
+- * and must be in little endian format. Use the 256-bit byte
+- * swap passthru option to convert from big endian to little
+- * endian.
++ /* A version 3 device only supports 128-bit keys, which fits into a
++ * single SB entry. A version 5 device uses a 512-bit vector, so two
++ * SB entries.
+ */
++ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0))
++ sb_count = CCP_XTS_AES_KEY_SB_COUNT;
++ else
++ sb_count = CCP5_XTS_AES_KEY_SB_COUNT;
+ ret = ccp_init_dm_workarea(&key, cmd_q,
+- CCP_XTS_AES_KEY_SB_COUNT * CCP_SB_BYTES,
++ sb_count * CCP_SB_BYTES,
+ DMA_TO_DEVICE);
+ if (ret)
+ return ret;
+
+- dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
+- ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
+- ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
++ if (cmd_q->ccp->vdata->version == CCP_VERSION(3, 0)) {
++ /* All supported key sizes must be in little endian format.
++ * Use the 256-bit byte swap passthru option to convert from
++ * big endian to little endian.
++ */
++ dm_offset = CCP_SB_BYTES - AES_KEYSIZE_128;
++ ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
++ ccp_set_dm_area(&key, 0, xts->key, xts->key_len, xts->key_len);
++ } else {
++ /* Version 5 CCPs use a 512-bit space for the key: each portion
++ * occupies 256 bits, or one entire slot, and is zero-padded.
++ */
++ unsigned int pad;
++
++ dm_offset = CCP_SB_BYTES;
++ pad = dm_offset - xts->key_len;
++ ccp_set_dm_area(&key, pad, xts->key, 0, xts->key_len);
++ ccp_set_dm_area(&key, dm_offset + pad, xts->key, xts->key_len,
++ xts->key_len);
++ }
+ ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key,
+ CCP_PASSTHRU_BYTESWAP_256BIT);
+ if (ret) {
+diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
+index 7309c0824887..a2449d77af07 100644
+--- a/drivers/devfreq/devfreq.c
++++ b/drivers/devfreq/devfreq.c
+@@ -574,7 +574,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
+ err = device_register(&devfreq->dev);
+ if (err) {
+ mutex_unlock(&devfreq->lock);
+- goto err_out;
++ goto err_dev;
+ }
+
+ devfreq->trans_table = devm_kzalloc(&devfreq->dev, sizeof(unsigned int) *
+@@ -618,6 +618,9 @@ struct devfreq *devfreq_add_device(struct device *dev,
+ mutex_unlock(&devfreq_list_lock);
+
+ device_unregister(&devfreq->dev);
++err_dev:
++ if (devfreq)
++ kfree(devfreq);
+ err_out:
+ return ERR_PTR(err);
+ }
+diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
+index c3b21865443e..1feec34ca9dd 100644
+--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
++++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
+@@ -47,6 +47,13 @@ static void sun4i_drv_disable_vblank(struct drm_device *drm, unsigned int pipe)
+ sun4i_tcon_enable_vblank(tcon, false);
+ }
+
++static void sun4i_drv_lastclose(struct drm_device *dev)
++{
++ struct sun4i_drv *drv = dev->dev_private;
++
++ drm_fbdev_cma_restore_mode(drv->fbdev);
++}
++
+ static const struct file_operations sun4i_drv_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+@@ -65,6 +72,7 @@ static struct drm_driver sun4i_drv_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_ATOMIC,
+
+ /* Generic Operations */
++ .lastclose = sun4i_drv_lastclose,
+ .fops = &sun4i_drv_fops,
+ .name = "sun4i-drm",
+ .desc = "Allwinner sun4i Display Engine",
+diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
+index 63e82f8e8308..fb4ce0394ac7 100644
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -446,15 +446,10 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
+
+ ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6);
+ if (ret < 0)
+- goto put;
++ return ret;
+
+ rt = (struct rt6_info *)dst;
+- if (ipv6_addr_any(&fl6.saddr)) {
+- ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev,
+- &fl6.daddr, 0, &fl6.saddr);
+- if (ret)
+- goto put;
+-
++ if (ipv6_addr_any(&src_in->sin6_addr)) {
+ src_in->sin6_family = AF_INET6;
+ src_in->sin6_addr = fl6.saddr;
+ }
+@@ -471,9 +466,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
+
+ *pdst = dst;
+ return 0;
+-put:
+- dst_release(dst);
+- return ret;
+ }
+ #else
+ static int addr6_resolve(struct sockaddr_in6 *src_in,
+diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
+index 4bd5b5caa243..613074e963bb 100644
+--- a/drivers/infiniband/hw/hfi1/rc.c
++++ b/drivers/infiniband/hw/hfi1/rc.c
+@@ -551,7 +551,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
+ case IB_WR_RDMA_WRITE:
+ if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+- /* FALLTHROUGH */
++ goto no_flow_control;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ /* If no credit, return. */
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
+@@ -559,6 +559,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
+ qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
+ goto bail;
+ }
++no_flow_control:
+ put_ib_reth_vaddr(
+ wqe->rdma_wr.remote_addr,
+ &ohdr->u.rc.reth);
+diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
+index f3fe787c9426..c1523f9a3c12 100644
+--- a/drivers/infiniband/hw/qib/qib_rc.c
++++ b/drivers/infiniband/hw/qib/qib_rc.c
+@@ -357,7 +357,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
+ case IB_WR_RDMA_WRITE:
+ if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
+ qp->s_lsn++;
+- /* FALLTHROUGH */
++ goto no_flow_control;
+ case IB_WR_RDMA_WRITE_WITH_IMM:
+ /* If no credit, return. */
+ if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
+@@ -365,7 +365,7 @@ int qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags)
+ qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
+ goto bail;
+ }
+-
++no_flow_control:
+ ohdr->u.rc.reth.vaddr =
+ cpu_to_be64(wqe->rdma_wr.remote_addr);
+ ohdr->u.rc.reth.rkey =
+diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
+index 5be14ad29d46..dbf09836ff30 100644
+--- a/drivers/input/serio/i8042-x86ia64io.h
++++ b/drivers/input/serio/i8042-x86ia64io.h
+@@ -904,6 +904,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+ },
+ },
++ {
++ /* Gigabyte P57 - Elantech touchpad */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "P57"),
++ },
++ },
+ {
+ /* Schenker XMG C504 - Elantech touchpad */
+ .matches = {
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index c3ea03c9a1a8..02619cabda8b 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -333,6 +333,7 @@ struct cached_dev {
+ /* Limit number of writeback bios in flight */
+ struct semaphore in_flight;
+ struct task_struct *writeback_thread;
++ struct workqueue_struct *writeback_write_wq;
+
+ struct keybuf writeback_keys;
+
+diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
+index a37c1776f2e3..e0f1c6d534fe 100644
+--- a/drivers/md/bcache/request.c
++++ b/drivers/md/bcache/request.c
+@@ -196,12 +196,12 @@ static void bch_data_insert_start(struct closure *cl)
+ struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
+ struct bio *bio = op->bio, *n;
+
+- if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
+- wake_up_gc(op->c);
+-
+ if (op->bypass)
+ return bch_data_invalidate(cl);
+
++ if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
++ wake_up_gc(op->c);
++
+ /*
+ * Journal writes are marked REQ_PREFLUSH; if the original write was a
+ * flush, it'll wait on the journal write.
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 66669c8f4161..f4557f558b24 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1025,7 +1025,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
+ }
+
+ if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
+- bch_sectors_dirty_init(dc);
++ bch_sectors_dirty_init(&dc->disk);
+ atomic_set(&dc->has_dirty, 1);
+ atomic_inc(&dc->count);
+ bch_writeback_queue(dc);
+@@ -1058,6 +1058,8 @@ static void cached_dev_free(struct closure *cl)
+ cancel_delayed_work_sync(&dc->writeback_rate_update);
+ if (!IS_ERR_OR_NULL(dc->writeback_thread))
+ kthread_stop(dc->writeback_thread);
++ if (dc->writeback_write_wq)
++ destroy_workqueue(dc->writeback_write_wq);
+
+ mutex_lock(&bch_register_lock);
+
+@@ -1229,6 +1231,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
+ goto err;
+
+ bcache_device_attach(d, c, u - c->uuids);
++ bch_sectors_dirty_init(d);
+ bch_flash_dev_request_init(d);
+ add_disk(d->disk);
+
+@@ -1967,6 +1970,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
+ else
+ err = "device busy";
+ mutex_unlock(&bch_register_lock);
++ if (!IS_ERR(bdev))
++ bdput(bdev);
+ if (attr == &ksysfs_register_quiet)
+ goto out;
+ }
+diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
+index b3ff57d61dde..4fbb5532f24c 100644
+--- a/drivers/md/bcache/sysfs.c
++++ b/drivers/md/bcache/sysfs.c
+@@ -191,7 +191,7 @@ STORE(__cached_dev)
+ {
+ struct cached_dev *dc = container_of(kobj, struct cached_dev,
+ disk.kobj);
+- unsigned v = size;
++ ssize_t v = size;
+ struct cache_set *c;
+ struct kobj_uevent_env *env;
+
+@@ -226,7 +226,7 @@ STORE(__cached_dev)
+ bch_cached_dev_run(dc);
+
+ if (attr == &sysfs_cache_mode) {
+- ssize_t v = bch_read_string_list(buf, bch_cache_modes + 1);
++ v = bch_read_string_list(buf, bch_cache_modes + 1);
+
+ if (v < 0)
+ return v;
+diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
+index dde6172f3f10..eb70f6894f05 100644
+--- a/drivers/md/bcache/util.c
++++ b/drivers/md/bcache/util.c
+@@ -73,24 +73,44 @@ STRTO_H(strtouint, unsigned int)
+ STRTO_H(strtoll, long long)
+ STRTO_H(strtoull, unsigned long long)
+
++/**
++ * bch_hprint() - formats @v to human readable string for sysfs.
++ *
++ * @v - signed 64 bit integer
++ * @buf - the (at least 8 byte) buffer to format the result into.
++ *
++ * Returns the number of bytes used by format.
++ */
+ ssize_t bch_hprint(char *buf, int64_t v)
+ {
+ static const char units[] = "?kMGTPEZY";
+- char dec[4] = "";
+- int u, t = 0;
+-
+- for (u = 0; v >= 1024 || v <= -1024; u++) {
+- t = v & ~(~0 << 10);
+- v >>= 10;
+- }
+-
+- if (!u)
+- return sprintf(buf, "%llu", v);
+-
+- if (v < 100 && v > -100)
+- snprintf(dec, sizeof(dec), ".%i", t / 100);
+-
+- return sprintf(buf, "%lli%s%c", v, dec, units[u]);
++ int u = 0, t;
++
++ uint64_t q;
++
++ if (v < 0)
++ q = -v;
++ else
++ q = v;
++
++ /* For as long as the number is more than 3 digits, but at least
++ * once, shift right / divide by 1024. Keep the remainder for
++ * a digit after the decimal point.
++ */
++ do {
++ u++;
++
++ t = q & ~(~0 << 10);
++ q >>= 10;
++ } while (q >= 1000);
++
++ if (v < 0)
++ /* '-', up to 3 digits, '.', 1 digit, 1 character, null;
++ * yields 8 bytes.
++ */
++ return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]);
++ else
++ return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]);
+ }
+
+ ssize_t bch_snprint_string_list(char *buf, size_t size, const char * const list[],
+diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
+index e51644e503a5..4ce2b19fe120 100644
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -20,7 +20,8 @@
+ static void __update_writeback_rate(struct cached_dev *dc)
+ {
+ struct cache_set *c = dc->disk.c;
+- uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size;
++ uint64_t cache_sectors = c->nbuckets * c->sb.bucket_size -
++ bcache_flash_devs_sectors_dirty(c);
+ uint64_t cache_dirty_target =
+ div_u64(cache_sectors * dc->writeback_percent, 100);
+
+@@ -186,7 +187,7 @@ static void write_dirty(struct closure *cl)
+
+ closure_bio_submit(&io->bio, cl);
+
+- continue_at(cl, write_dirty_finish, system_wq);
++ continue_at(cl, write_dirty_finish, io->dc->writeback_write_wq);
+ }
+
+ static void read_dirty_endio(struct bio *bio)
+@@ -206,7 +207,7 @@ static void read_dirty_submit(struct closure *cl)
+
+ closure_bio_submit(&io->bio, cl);
+
+- continue_at(cl, write_dirty, system_wq);
++ continue_at(cl, write_dirty, io->dc->writeback_write_wq);
+ }
+
+ static void read_dirty(struct cached_dev *dc)
+@@ -482,17 +483,17 @@ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
+ return MAP_CONTINUE;
+ }
+
+-void bch_sectors_dirty_init(struct cached_dev *dc)
++void bch_sectors_dirty_init(struct bcache_device *d)
+ {
+ struct sectors_dirty_init op;
+
+ bch_btree_op_init(&op.op, -1);
+- op.inode = dc->disk.id;
++ op.inode = d->id;
+
+- bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
++ bch_btree_map_keys(&op.op, d->c, &KEY(op.inode, 0, 0),
+ sectors_dirty_init_fn, 0);
+
+- dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
++ d->sectors_dirty_last = bcache_dev_sectors_dirty(d);
+ }
+
+ void bch_cached_dev_writeback_init(struct cached_dev *dc)
+@@ -516,6 +517,11 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc)
+
+ int bch_cached_dev_writeback_start(struct cached_dev *dc)
+ {
++ dc->writeback_write_wq = alloc_workqueue("bcache_writeback_wq",
++ WQ_MEM_RECLAIM, 0);
++ if (!dc->writeback_write_wq)
++ return -ENOMEM;
++
+ dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
+ "bcache_writeback");
+ if (IS_ERR(dc->writeback_thread))
+diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
+index 301eaf565167..cdf8d253209e 100644
+--- a/drivers/md/bcache/writeback.h
++++ b/drivers/md/bcache/writeback.h
+@@ -14,6 +14,25 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
+ return ret;
+ }
+
++static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c)
++{
++ uint64_t i, ret = 0;
++
++ mutex_lock(&bch_register_lock);
++
++ for (i = 0; i < c->nr_uuids; i++) {
++ struct bcache_device *d = c->devices[i];
++
++ if (!d || !UUID_FLASH_ONLY(&c->uuids[i]))
++ continue;
++ ret += bcache_dev_sectors_dirty(d);
++ }
++
++ mutex_unlock(&bch_register_lock);
++
++ return ret;
++}
++
+ static inline unsigned offset_to_stripe(struct bcache_device *d,
+ uint64_t offset)
+ {
+@@ -85,7 +104,7 @@ static inline void bch_writeback_add(struct cached_dev *dc)
+
+ void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
+
+-void bch_sectors_dirty_init(struct cached_dev *dc);
++void bch_sectors_dirty_init(struct bcache_device *);
+ void bch_cached_dev_writeback_init(struct cached_dev *);
+ int bch_cached_dev_writeback_start(struct cached_dev *);
+
+diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
+index 2d826927a3bf..fb02c3979bf4 100644
+--- a/drivers/md/bitmap.c
++++ b/drivers/md/bitmap.c
+@@ -1992,6 +1992,11 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
+ long pages;
+ struct bitmap_page *new_bp;
+
++ if (bitmap->storage.file && !init) {
++ pr_info("md: cannot resize file-based bitmap\n");
++ return -EINVAL;
++ }
++
+ if (chunksize == 0) {
+ /* If there is enough space, leave the chunk size unchanged,
+ * else increase by factor of two until there is enough space.
+diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
+index c2ee6e39fd0c..20397aba6849 100644
+--- a/drivers/media/usb/uvc/uvc_ctrl.c
++++ b/drivers/media/usb/uvc/uvc_ctrl.c
+@@ -2002,6 +2002,13 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
+ goto done;
+ }
+
++ /* Validate the user-provided bit-size and offset */
++ if (mapping->size > 32 ||
++ mapping->offset + mapping->size > ctrl->info.size * 8) {
++ ret = -EINVAL;
++ goto done;
++ }
++
+ list_for_each_entry(map, &ctrl->info.mappings, list) {
+ if (mapping->id == map->id) {
+ uvc_trace(UVC_TRACE_CONTROL, "Can't add mapping '%s', "
+diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+index bacecbd68a6d..dc51dd86377d 100644
+--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
++++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+@@ -773,7 +773,8 @@ static int put_v4l2_event32(struct v4l2_event *kp, struct v4l2_event32 __user *u
+ copy_to_user(&up->u, &kp->u, sizeof(kp->u)) ||
+ put_user(kp->pending, &up->pending) ||
+ put_user(kp->sequence, &up->sequence) ||
+- compat_put_timespec(&kp->timestamp, &up->timestamp) ||
++ put_user(kp->timestamp.tv_sec, &up->timestamp.tv_sec) ||
++ put_user(kp->timestamp.tv_nsec, &up->timestamp.tv_nsec) ||
+ put_user(kp->id, &up->id) ||
+ copy_to_user(up->reserved, kp->reserved, 8 * sizeof(__u32)))
+ return -EFAULT;
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index b57fc6d6e28a..d08dfc8b9ba9 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -586,6 +586,14 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
+ events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
+ PCI_EXP_SLTSTA_DLLSC);
++
++ /*
++ * If we've already reported a power fault, don't report it again
++ * until we've done something to handle it.
++ */
++ if (ctrl->power_fault_detected)
++ events &= ~PCI_EXP_SLTSTA_PFD;
++
+ if (!events)
+ return IRQ_NONE;
+
+diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
+index de0ea474fb73..e5824c7b7b6b 100644
+--- a/drivers/pci/hotplug/shpchp_hpc.c
++++ b/drivers/pci/hotplug/shpchp_hpc.c
+@@ -1062,6 +1062,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
+ if (rc) {
+ ctrl_info(ctrl, "Can't get msi for the hotplug controller\n");
+ ctrl_info(ctrl, "Use INTx for the hotplug controller\n");
++ } else {
++ pci_set_master(pdev);
+ }
+
+ rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED,
+diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
+index c9a146948192..a5b7bd3c9bac 100644
+--- a/drivers/pinctrl/pinctrl-amd.c
++++ b/drivers/pinctrl/pinctrl-amd.c
+@@ -32,6 +32,7 @@
+ #include <linux/pinctrl/pinconf.h>
+ #include <linux/pinctrl/pinconf-generic.h>
+
++#include "core.h"
+ #include "pinctrl-utils.h"
+ #include "pinctrl-amd.h"
+
+@@ -712,6 +713,69 @@ static const struct pinconf_ops amd_pinconf_ops = {
+ .pin_config_group_set = amd_pinconf_group_set,
+ };
+
++#ifdef CONFIG_PM_SLEEP
++static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
++{
++ const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
++
++ if (!pd)
++ return false;
++
++ /*
++ * Only restore the pin if it is actually in use by the kernel (or
++ * by userspace).
++ */
++ if (pd->mux_owner || pd->gpio_owner ||
++ gpiochip_line_is_irq(&gpio_dev->gc, pin))
++ return true;
++
++ return false;
++}
++
++int amd_gpio_suspend(struct device *dev)
++{
++ struct platform_device *pdev = to_platform_device(dev);
++ struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
++ struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
++ int i;
++
++ for (i = 0; i < desc->npins; i++) {
++ int pin = desc->pins[i].number;
++
++ if (!amd_gpio_should_save(gpio_dev, pin))
++ continue;
++
++ gpio_dev->saved_regs[i] = readl(gpio_dev->base + pin*4);
++ }
++
++ return 0;
++}
++
++int amd_gpio_resume(struct device *dev)
++{
++ struct platform_device *pdev = to_platform_device(dev);
++ struct amd_gpio *gpio_dev = platform_get_drvdata(pdev);
++ struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
++ int i;
++
++ for (i = 0; i < desc->npins; i++) {
++ int pin = desc->pins[i].number;
++
++ if (!amd_gpio_should_save(gpio_dev, pin))
++ continue;
++
++ writel(gpio_dev->saved_regs[i], gpio_dev->base + pin*4);
++ }
++
++ return 0;
++}
++
++static const struct dev_pm_ops amd_gpio_pm_ops = {
++ SET_LATE_SYSTEM_SLEEP_PM_OPS(amd_gpio_suspend,
++ amd_gpio_resume)
++};
++#endif
++
+ static struct pinctrl_desc amd_pinctrl_desc = {
+ .pins = kerncz_pins,
+ .npins = ARRAY_SIZE(kerncz_pins),
+@@ -751,6 +815,14 @@ static int amd_gpio_probe(struct platform_device *pdev)
+ return -EINVAL;
+ }
+
++#ifdef CONFIG_PM_SLEEP
++ gpio_dev->saved_regs = devm_kcalloc(&pdev->dev, amd_pinctrl_desc.npins,
++ sizeof(*gpio_dev->saved_regs),
++ GFP_KERNEL);
++ if (!gpio_dev->saved_regs)
++ return -ENOMEM;
++#endif
++
+ gpio_dev->pdev = pdev;
+ gpio_dev->gc.direction_input = amd_gpio_direction_input;
+ gpio_dev->gc.direction_output = amd_gpio_direction_output;
+@@ -839,6 +911,9 @@ static struct platform_driver amd_gpio_driver = {
+ .driver = {
+ .name = "amd_gpio",
+ .acpi_match_table = ACPI_PTR(amd_gpio_acpi_match),
++#ifdef CONFIG_PM_SLEEP
++ .pm = &amd_gpio_pm_ops,
++#endif
+ },
+ .probe = amd_gpio_probe,
+ .remove = amd_gpio_remove,
+diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
+index 7bfea47dbb47..e8bbb20779d0 100644
+--- a/drivers/pinctrl/pinctrl-amd.h
++++ b/drivers/pinctrl/pinctrl-amd.h
+@@ -95,6 +95,7 @@ struct amd_gpio {
+ struct gpio_chip gc;
+ struct resource *res;
+ struct platform_device *pdev;
++ u32 *saved_regs;
+ };
+
+ /* KERNCZ configuration*/
+diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
+index d5bf36ec8a75..34367d172961 100644
+--- a/drivers/s390/scsi/zfcp_dbf.c
++++ b/drivers/s390/scsi/zfcp_dbf.c
+@@ -3,7 +3,7 @@
+ *
+ * Debug traces for zfcp.
+ *
+- * Copyright IBM Corp. 2002, 2016
++ * Copyright IBM Corp. 2002, 2017
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -447,6 +447,7 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
+ struct fc_ct_hdr *reqh = sg_virt(ct_els->req);
+ struct fc_ns_gid_ft *reqn = (struct fc_ns_gid_ft *)(reqh + 1);
+ struct scatterlist *resp_entry = ct_els->resp;
++ struct fc_ct_hdr *resph;
+ struct fc_gpn_ft_resp *acc;
+ int max_entries, x, last = 0;
+
+@@ -473,6 +474,13 @@ static u16 zfcp_dbf_san_res_cap_len_if_gpn_ft(char *tag,
+ return len; /* not GPN_FT response so do not cap */
+
+ acc = sg_virt(resp_entry);
++
++ /* cap all but accept CT responses to at least the CT header */
++ resph = (struct fc_ct_hdr *)acc;
++ if ((ct_els->status) ||
++ (resph->ct_cmd != cpu_to_be16(FC_FS_ACC)))
++ return max(FC_CT_HDR_LEN, ZFCP_DBF_SAN_MAX_PAYLOAD);
++
+ max_entries = (reqh->ct_mr_size * 4 / sizeof(struct fc_gpn_ft_resp))
+ + 1 /* zfcp_fc_scan_ports: bytes correct, entries off-by-one
+ * to account for header as 1st pseudo "entry" */;
+@@ -555,8 +563,8 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
+ rec->scsi_retries = sc->retries;
+ rec->scsi_allowed = sc->allowed;
+ rec->scsi_id = sc->device->id;
+- /* struct zfcp_dbf_scsi needs to be updated to handle 64bit LUNs */
+ rec->scsi_lun = (u32)sc->device->lun;
++ rec->scsi_lun_64_hi = (u32)(sc->device->lun >> 32);
+ rec->host_scribble = (unsigned long)sc->host_scribble;
+
+ memcpy(rec->scsi_opcode, sc->cmnd,
+@@ -564,19 +572,32 @@ void zfcp_dbf_scsi(char *tag, int level, struct scsi_cmnd *sc,
+
+ if (fsf) {
+ rec->fsf_req_id = fsf->req_id;
++ rec->pl_len = FCP_RESP_WITH_EXT;
+ fcp_rsp = (struct fcp_resp_with_ext *)
+ &(fsf->qtcb->bottom.io.fcp_rsp);
++ /* mandatory parts of FCP_RSP IU in this SCSI record */
+ memcpy(&rec->fcp_rsp, fcp_rsp, FCP_RESP_WITH_EXT);
+ if (fcp_rsp->resp.fr_flags & FCP_RSP_LEN_VAL) {
+ fcp_rsp_info = (struct fcp_resp_rsp_info *) &fcp_rsp[1];
+ rec->fcp_rsp_info = fcp_rsp_info->rsp_code;
++ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_rsp_len);
+ }
+ if (fcp_rsp->resp.fr_flags & FCP_SNS_LEN_VAL) {
+- rec->pl_len = min((u16)SCSI_SENSE_BUFFERSIZE,
+- (u16)ZFCP_DBF_PAY_MAX_REC);
+- zfcp_dbf_pl_write(dbf, sc->sense_buffer, rec->pl_len,
+- "fcp_sns", fsf->req_id);
++ rec->pl_len += be32_to_cpu(fcp_rsp->ext.fr_sns_len);
+ }
++ /* complete FCP_RSP IU in associated PAYload record
++ * but only if there are optional parts
++ */
++ if (fcp_rsp->resp.fr_flags != 0)
++ zfcp_dbf_pl_write(
++ dbf, fcp_rsp,
++ /* at least one full PAY record
++ * but not beyond hardware response field
++ */
++ min_t(u16, max_t(u16, rec->pl_len,
++ ZFCP_DBF_PAY_MAX_REC),
++ FSF_FCP_RSP_SIZE),
++ "fcp_riu", fsf->req_id);
+ }
+
+ debug_event(dbf->scsi, level, rec, sizeof(*rec));
+diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
+index db186d44cfaf..b60667c145fd 100644
+--- a/drivers/s390/scsi/zfcp_dbf.h
++++ b/drivers/s390/scsi/zfcp_dbf.h
+@@ -2,7 +2,7 @@
+ * zfcp device driver
+ * debug feature declarations
+ *
+- * Copyright IBM Corp. 2008, 2016
++ * Copyright IBM Corp. 2008, 2017
+ */
+
+ #ifndef ZFCP_DBF_H
+@@ -204,7 +204,7 @@ enum zfcp_dbf_scsi_id {
+ * @id: unique number of recovery record type
+ * @tag: identifier string specifying the location of initiation
+ * @scsi_id: scsi device id
+- * @scsi_lun: scsi device logical unit number
++ * @scsi_lun: scsi device logical unit number, low part of 64 bit, old 32 bit
+ * @scsi_result: scsi result
+ * @scsi_retries: current retry number of scsi request
+ * @scsi_allowed: allowed retries
+@@ -214,6 +214,7 @@ enum zfcp_dbf_scsi_id {
+ * @host_scribble: LLD specific data attached to SCSI request
+ * @pl_len: length of paload stored as zfcp_dbf_pay
+ * @fsf_rsp: response for fsf request
++ * @scsi_lun_64_hi: scsi device logical unit number, high part of 64 bit
+ */
+ struct zfcp_dbf_scsi {
+ u8 id;
+@@ -230,6 +231,7 @@ struct zfcp_dbf_scsi {
+ u64 host_scribble;
+ u16 pl_len;
+ struct fcp_resp_with_ext fcp_rsp;
++ u32 scsi_lun_64_hi;
+ } __packed;
+
+ /**
+@@ -323,7 +325,11 @@ void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
+ {
+ struct fsf_qtcb *qtcb = req->qtcb;
+
+- if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
++ if (unlikely(req->status & (ZFCP_STATUS_FSFREQ_DISMISSED |
++ ZFCP_STATUS_FSFREQ_ERROR))) {
++ zfcp_dbf_hba_fsf_resp("fs_rerr", 3, req);
++
++ } else if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+ (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
+ zfcp_dbf_hba_fsf_resp("fs_perr", 1, req);
+
+@@ -401,7 +407,8 @@ void zfcp_dbf_scsi_abort(char *tag, struct scsi_cmnd *scmd,
+ * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
+ */
+ static inline
+-void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
++void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag,
++ struct zfcp_fsf_req *fsf_req)
+ {
+ char tmp_tag[ZFCP_DBF_TAG_LEN];
+
+@@ -411,7 +418,7 @@ void zfcp_dbf_scsi_devreset(char *tag, struct scsi_cmnd *scmnd, u8 flag)
+ memcpy(tmp_tag, "lr_", 3);
+
+ memcpy(&tmp_tag[3], tag, 4);
+- _zfcp_dbf_scsi(tmp_tag, 1, scmnd, NULL);
++ _zfcp_dbf_scsi(tmp_tag, 1, scmnd, fsf_req);
+ }
+
+ /**
+diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
+index df2b541c8287..a2275825186f 100644
+--- a/drivers/s390/scsi/zfcp_fc.h
++++ b/drivers/s390/scsi/zfcp_fc.h
+@@ -4,7 +4,7 @@
+ * Fibre Channel related definitions and inline functions for the zfcp
+ * device driver
+ *
+- * Copyright IBM Corp. 2009
++ * Copyright IBM Corp. 2009, 2017
+ */
+
+ #ifndef ZFCP_FC_H
+@@ -279,6 +279,10 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
+ !(rsp_flags & FCP_SNS_LEN_VAL) &&
+ fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
+ set_host_byte(scsi, DID_ERROR);
++ } else if (unlikely(rsp_flags & FCP_RESID_OVER)) {
++ /* FCP_DL was not sufficient for SCSI data length */
++ if (fcp_rsp->resp.fr_status == SAM_STAT_GOOD)
++ set_host_byte(scsi, DID_ERROR);
+ }
+ }
+
+diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
+index 27ff38f839fc..1964391db904 100644
+--- a/drivers/s390/scsi/zfcp_fsf.c
++++ b/drivers/s390/scsi/zfcp_fsf.c
+@@ -928,8 +928,8 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
+
+ switch (header->fsf_status) {
+ case FSF_GOOD:
+- zfcp_dbf_san_res("fsscth2", req);
+ ct->status = 0;
++ zfcp_dbf_san_res("fsscth2", req);
+ break;
+ case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+ zfcp_fsf_class_not_supp(req);
+@@ -1109,8 +1109,8 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
+
+ switch (header->fsf_status) {
+ case FSF_GOOD:
+- zfcp_dbf_san_res("fsselh1", req);
+ send_els->status = 0;
++ zfcp_dbf_san_res("fsselh1", req);
+ break;
+ case FSF_SERVICE_CLASS_NOT_SUPPORTED:
+ zfcp_fsf_class_not_supp(req);
+@@ -2258,7 +2258,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
+ fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd;
+ zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0);
+
+- if (scsi_prot_sg_count(scsi_cmnd)) {
++ if ((scsi_get_prot_op(scsi_cmnd) != SCSI_PROT_NORMAL) &&
++ scsi_prot_sg_count(scsi_cmnd)) {
+ zfcp_qdio_set_data_div(qdio, &req->qdio_req,
+ scsi_prot_sg_count(scsi_cmnd));
+ retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req,
+diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
+index 07ffdbb5107f..9bd9b9a29dfc 100644
+--- a/drivers/s390/scsi/zfcp_scsi.c
++++ b/drivers/s390/scsi/zfcp_scsi.c
+@@ -3,7 +3,7 @@
+ *
+ * Interface to Linux SCSI midlayer.
+ *
+- * Copyright IBM Corp. 2002, 2016
++ * Copyright IBM Corp. 2002, 2017
+ */
+
+ #define KMSG_COMPONENT "zfcp"
+@@ -273,25 +273,29 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
+
+ zfcp_erp_wait(adapter);
+ ret = fc_block_scsi_eh(scpnt);
+- if (ret)
++ if (ret) {
++ zfcp_dbf_scsi_devreset("fiof", scpnt, tm_flags, NULL);
+ return ret;
++ }
+
+ if (!(atomic_read(&adapter->status) &
+ ZFCP_STATUS_COMMON_RUNNING)) {
+- zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
++ zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags, NULL);
+ return SUCCESS;
+ }
+ }
+- if (!fsf_req)
++ if (!fsf_req) {
++ zfcp_dbf_scsi_devreset("reqf", scpnt, tm_flags, NULL);
+ return FAILED;
++ }
+
+ wait_for_completion(&fsf_req->completion);
+
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
+- zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
++ zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags, fsf_req);
+ retval = FAILED;
+ } else {
+- zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
++ zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags, fsf_req);
+ zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
+ }
+
+diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
+index d8b1fbd4c8aa..35cbd36f8d3b 100644
+--- a/drivers/scsi/megaraid/megaraid_sas_base.c
++++ b/drivers/scsi/megaraid/megaraid_sas_base.c
+@@ -1901,9 +1901,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc
+ if (cmd_fusion->sync_cmd_idx != (u32)ULONG_MAX) {
+ cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+ if (cmd_mfi->sync_cmd &&
+- cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)
++ (cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT)) {
++ cmd_mfi->frame->hdr.cmd_status =
++ MFI_STAT_WRONG_STATE;
+ megasas_complete_cmd(instance,
+ cmd_mfi, DID_OK);
++ }
+ }
+ }
+ } else {
+@@ -5290,7 +5293,8 @@ static int megasas_init_fw(struct megasas_instance *instance)
+ instance->throttlequeuedepth =
+ MEGASAS_THROTTLE_QUEUE_DEPTH;
+
+- if (resetwaittime > MEGASAS_RESET_WAIT_TIME)
++ if ((resetwaittime < 1) ||
++ (resetwaittime > MEGASAS_RESET_WAIT_TIME))
+ resetwaittime = MEGASAS_RESET_WAIT_TIME;
+
+ if ((scmd_timeout < 10) || (scmd_timeout > MEGASAS_DEFAULT_CMD_TIMEOUT))
+@@ -5459,6 +5463,14 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num,
+ prev_aen.word =
+ le32_to_cpu(instance->aen_cmd->frame->dcmd.mbox.w[1]);
+
++ if ((curr_aen.members.class < MFI_EVT_CLASS_DEBUG) ||
++ (curr_aen.members.class > MFI_EVT_CLASS_DEAD)) {
++ dev_info(&instance->pdev->dev,
++ "%s %d out of range class %d send by application\n",
++ __func__, __LINE__, curr_aen.members.class);
++ return 0;
++ }
++
+ /*
+ * A class whose enum value is smaller is inclusive of all
+ * higher values. If a PROGRESS (= -1) was previously
+diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
+index 8c4641b518b5..9a34afcb1c4c 100644
+--- a/drivers/scsi/qla2xxx/qla_attr.c
++++ b/drivers/scsi/qla2xxx/qla_attr.c
+@@ -318,6 +318,8 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
+ return -EINVAL;
+ if (start > ha->optrom_size)
+ return -EINVAL;
++ if (size > ha->optrom_size - start)
++ size = ha->optrom_size - start;
+
+ mutex_lock(&ha->optrom_mutex);
+ switch (val) {
+@@ -343,8 +345,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
+ }
+
+ ha->optrom_region_start = start;
+- ha->optrom_region_size = start + size > ha->optrom_size ?
+- ha->optrom_size - start : size;
++ ha->optrom_region_size = start + size;
+
+ ha->optrom_state = QLA_SREADING;
+ ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+@@ -417,8 +418,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
+ }
+
+ ha->optrom_region_start = start;
+- ha->optrom_region_size = start + size > ha->optrom_size ?
+- ha->optrom_size - start : size;
++ ha->optrom_region_size = start + size;
+
+ ha->optrom_state = QLA_SWRITING;
+ ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
+index 3dfb54abc874..f8ae70476b3a 100644
+--- a/drivers/scsi/qla2xxx/qla_mid.c
++++ b/drivers/scsi/qla2xxx/qla_mid.c
+@@ -74,7 +74,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
+ * ensures no active vp_list traversal while the vport is removed
+ * from the queue)
+ */
+- wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
++ wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
+ 10*HZ);
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
+index 9236a13d5d2a..02dfbc1373e3 100644
+--- a/drivers/scsi/sg.c
++++ b/drivers/scsi/sg.c
+@@ -122,7 +122,7 @@ struct sg_device; /* forward declarations */
+ struct sg_fd;
+
+ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
+- struct sg_request *nextrp; /* NULL -> tail request (slist) */
++ struct list_head entry; /* list entry */
+ struct sg_fd *parentfp; /* NULL -> not in use */
+ Sg_scatter_hold data; /* hold buffer, perhaps scatter list */
+ sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */
+@@ -146,8 +146,7 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
+ int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
+ int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
+ Sg_scatter_hold reserve; /* buffer held for this file descriptor */
+- unsigned save_scat_len; /* original length of trunc. scat. element */
+- Sg_request *headrp; /* head of request slist, NULL->empty */
++ struct list_head rq_list; /* head of request list */
+ struct fasync_struct *async_qp; /* used by asynchronous notification */
+ Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */
+ char low_dma; /* as in parent but possibly overridden to 1 */
+@@ -829,6 +828,39 @@ static int max_sectors_bytes(struct request_queue *q)
+ return max_sectors << 9;
+ }
+
++static void
++sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo)
++{
++ Sg_request *srp;
++ int val;
++ unsigned int ms;
++
++ val = 0;
++ list_for_each_entry(srp, &sfp->rq_list, entry) {
++ if (val > SG_MAX_QUEUE)
++ break;
++ rinfo[val].req_state = srp->done + 1;
++ rinfo[val].problem =
++ srp->header.masked_status &
++ srp->header.host_status &
++ srp->header.driver_status;
++ if (srp->done)
++ rinfo[val].duration =
++ srp->header.duration;
++ else {
++ ms = jiffies_to_msecs(jiffies);
++ rinfo[val].duration =
++ (ms > srp->header.duration) ?
++ (ms - srp->header.duration) : 0;
++ }
++ rinfo[val].orphan = srp->orphan;
++ rinfo[val].sg_io_owned = srp->sg_io_owned;
++ rinfo[val].pack_id = srp->header.pack_id;
++ rinfo[val].usr_ptr = srp->header.usr_ptr;
++ val++;
++ }
++}
++
+ static long
+ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+ {
+@@ -941,7 +973,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+ if (!access_ok(VERIFY_WRITE, ip, sizeof (int)))
+ return -EFAULT;
+ read_lock_irqsave(&sfp->rq_list_lock, iflags);
+- for (srp = sfp->headrp; srp; srp = srp->nextrp) {
++ list_for_each_entry(srp, &sfp->rq_list, entry) {
+ if ((1 == srp->done) && (!srp->sg_io_owned)) {
+ read_unlock_irqrestore(&sfp->rq_list_lock,
+ iflags);
+@@ -954,7 +986,8 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+ return 0;
+ case SG_GET_NUM_WAITING:
+ read_lock_irqsave(&sfp->rq_list_lock, iflags);
+- for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) {
++ val = 0;
++ list_for_each_entry(srp, &sfp->rq_list, entry) {
+ if ((1 == srp->done) && (!srp->sg_io_owned))
+ ++val;
+ }
+@@ -1022,42 +1055,15 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
+ return -EFAULT;
+ else {
+ sg_req_info_t *rinfo;
+- unsigned int ms;
+
+- rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
+- GFP_KERNEL);
++ rinfo = kzalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE,
++ GFP_KERNEL);
+ if (!rinfo)
+ return -ENOMEM;
+ read_lock_irqsave(&sfp->rq_list_lock, iflags);
+- for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE;
+- ++val, srp = srp ? srp->nextrp : srp) {
+- memset(&rinfo[val], 0, SZ_SG_REQ_INFO);
+- if (srp) {
+- rinfo[val].req_state = srp->done + 1;
+- rinfo[val].problem =
+- srp->header.masked_status &
+- srp->header.host_status &
+- srp->header.driver_status;
+- if (srp->done)
+- rinfo[val].duration =
+- srp->header.duration;
+- else {
+- ms = jiffies_to_msecs(jiffies);
+- rinfo[val].duration =
+- (ms > srp->header.duration) ?
+- (ms - srp->header.duration) : 0;
+- }
+- rinfo[val].orphan = srp->orphan;
+- rinfo[val].sg_io_owned =
+- srp->sg_io_owned;
+- rinfo[val].pack_id =
+- srp->header.pack_id;
+- rinfo[val].usr_ptr =
+- srp->header.usr_ptr;
+- }
+- }
++ sg_fill_request_table(sfp, rinfo);
+ read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+- result = __copy_to_user(p, rinfo,
++ result = __copy_to_user(p, rinfo,
+ SZ_SG_REQ_INFO * SG_MAX_QUEUE);
+ result = result ? -EFAULT : 0;
+ kfree(rinfo);
+@@ -1163,7 +1169,7 @@ sg_poll(struct file *filp, poll_table * wait)
+ return POLLERR;
+ poll_wait(filp, &sfp->read_wait, wait);
+ read_lock_irqsave(&sfp->rq_list_lock, iflags);
+- for (srp = sfp->headrp; srp; srp = srp->nextrp) {
++ list_for_each_entry(srp, &sfp->rq_list, entry) {
+ /* if any read waiting, flag it */
+ if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned))
+ res = POLLIN | POLLRDNORM;
+@@ -2049,7 +2055,6 @@ sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp)
+ req_schp->pages = NULL;
+ req_schp->page_order = 0;
+ req_schp->sglist_len = 0;
+- sfp->save_scat_len = 0;
+ srp->res_used = 0;
+ /* Called without mutex lock to avoid deadlock */
+ sfp->res_in_use = 0;
+@@ -2062,7 +2067,7 @@ sg_get_rq_mark(Sg_fd * sfp, int pack_id)
+ unsigned long iflags;
+
+ write_lock_irqsave(&sfp->rq_list_lock, iflags);
+- for (resp = sfp->headrp; resp; resp = resp->nextrp) {
++ list_for_each_entry(resp, &sfp->rq_list, entry) {
+ /* look for requests that are ready + not SG_IO owned */
+ if ((1 == resp->done) && (!resp->sg_io_owned) &&
+ ((-1 == pack_id) || (resp->header.pack_id == pack_id))) {
+@@ -2080,70 +2085,45 @@ sg_add_request(Sg_fd * sfp)
+ {
+ int k;
+ unsigned long iflags;
+- Sg_request *resp;
+ Sg_request *rp = sfp->req_arr;
+
+ write_lock_irqsave(&sfp->rq_list_lock, iflags);
+- resp = sfp->headrp;
+- if (!resp) {
+- memset(rp, 0, sizeof (Sg_request));
+- rp->parentfp = sfp;
+- resp = rp;
+- sfp->headrp = resp;
+- } else {
+- if (0 == sfp->cmd_q)
+- resp = NULL; /* command queuing disallowed */
+- else {
+- for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
+- if (!rp->parentfp)
+- break;
+- }
+- if (k < SG_MAX_QUEUE) {
+- memset(rp, 0, sizeof (Sg_request));
+- rp->parentfp = sfp;
+- while (resp->nextrp)
+- resp = resp->nextrp;
+- resp->nextrp = rp;
+- resp = rp;
+- } else
+- resp = NULL;
++ if (!list_empty(&sfp->rq_list)) {
++ if (!sfp->cmd_q)
++ goto out_unlock;
++
++ for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) {
++ if (!rp->parentfp)
++ break;
+ }
++ if (k >= SG_MAX_QUEUE)
++ goto out_unlock;
+ }
+- if (resp) {
+- resp->nextrp = NULL;
+- resp->header.duration = jiffies_to_msecs(jiffies);
+- }
++ memset(rp, 0, sizeof (Sg_request));
++ rp->parentfp = sfp;
++ rp->header.duration = jiffies_to_msecs(jiffies);
++ list_add_tail(&rp->entry, &sfp->rq_list);
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+- return resp;
++ return rp;
++out_unlock:
++ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
++ return NULL;
+ }
+
+ /* Return of 1 for found; 0 for not found */
+ static int
+ sg_remove_request(Sg_fd * sfp, Sg_request * srp)
+ {
+- Sg_request *prev_rp;
+- Sg_request *rp;
+ unsigned long iflags;
+ int res = 0;
+
+- if ((!sfp) || (!srp) || (!sfp->headrp))
++ if (!sfp || !srp || list_empty(&sfp->rq_list))
+ return res;
+ write_lock_irqsave(&sfp->rq_list_lock, iflags);
+- prev_rp = sfp->headrp;
+- if (srp == prev_rp) {
+- sfp->headrp = prev_rp->nextrp;
+- prev_rp->parentfp = NULL;
++ if (!list_empty(&srp->entry)) {
++ list_del(&srp->entry);
++ srp->parentfp = NULL;
+ res = 1;
+- } else {
+- while ((rp = prev_rp->nextrp)) {
+- if (srp == rp) {
+- prev_rp->nextrp = rp->nextrp;
+- rp->parentfp = NULL;
+- res = 1;
+- break;
+- }
+- prev_rp = rp;
+- }
+ }
+ write_unlock_irqrestore(&sfp->rq_list_lock, iflags);
+ return res;
+@@ -2162,7 +2142,7 @@ sg_add_sfp(Sg_device * sdp)
+
+ init_waitqueue_head(&sfp->read_wait);
+ rwlock_init(&sfp->rq_list_lock);
+-
++ INIT_LIST_HEAD(&sfp->rq_list);
+ kref_init(&sfp->f_ref);
+ mutex_init(&sfp->f_mutex);
+ sfp->timeout = SG_DEFAULT_TIMEOUT;
+@@ -2203,10 +2183,13 @@ sg_remove_sfp_usercontext(struct work_struct *work)
+ {
+ struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work);
+ struct sg_device *sdp = sfp->parentdp;
++ Sg_request *srp;
+
+ /* Cleanup any responses which were never read(). */
+- while (sfp->headrp)
+- sg_finish_rem_req(sfp->headrp);
++ while (!list_empty(&sfp->rq_list)) {
++ srp = list_first_entry(&sfp->rq_list, Sg_request, entry);
++ sg_finish_rem_req(srp);
++ }
+
+ if (sfp->reserve.bufflen > 0) {
+ SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp,
+@@ -2609,7 +2592,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
+ /* must be called while holding sg_index_lock */
+ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
+ {
+- int k, m, new_interface, blen, usg;
++ int k, new_interface, blen, usg;
+ Sg_request *srp;
+ Sg_fd *fp;
+ const sg_io_hdr_t *hp;
+@@ -2629,13 +2612,11 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
+ seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n",
+ (int) fp->cmd_q, (int) fp->force_packid,
+ (int) fp->keep_orphan);
+- for (m = 0, srp = fp->headrp;
+- srp != NULL;
+- ++m, srp = srp->nextrp) {
++ list_for_each_entry(srp, &fp->rq_list, entry) {
+ hp = &srp->header;
+ new_interface = (hp->interface_id == '\0') ? 0 : 1;
+ if (srp->res_used) {
+- if (new_interface &&
++ if (new_interface &&
+ (SG_FLAG_MMAP_IO & hp->flags))
+ cp = " mmap>> ";
+ else
+@@ -2666,7 +2647,7 @@ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
+ seq_printf(s, "ms sgat=%d op=0x%02x\n", usg,
+ (int) srp->data.cmd_opcode);
+ }
+- if (0 == m)
++ if (list_empty(&fp->rq_list))
+ seq_puts(s, " No requests active\n");
+ read_unlock(&fp->rq_list_lock);
+ }
+diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
+index c5ab1b0037fc..2bf96d33428a 100644
+--- a/drivers/scsi/storvsc_drv.c
++++ b/drivers/scsi/storvsc_drv.c
+@@ -1559,6 +1559,8 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
+ ret = storvsc_do_io(dev, cmd_request);
+
+ if (ret == -EAGAIN) {
++ if (payload_sz > sizeof(cmd_request->mpb))
++ kfree(payload);
+ /* no more space */
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ }
+diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
+index aa80dc94ddc2..c220c2c0893f 100644
+--- a/drivers/tty/tty_buffer.c
++++ b/drivers/tty/tty_buffer.c
+@@ -361,6 +361,32 @@ int tty_insert_flip_string_flags(struct tty_port *port,
+ }
+ EXPORT_SYMBOL(tty_insert_flip_string_flags);
+
++/**
++ * __tty_insert_flip_char - Add one character to the tty buffer
++ * @port: tty port
++ * @ch: character
++ * @flag: flag byte
++ *
++ * Queue a single byte to the tty buffering, with an optional flag.
++ * This is the slow path of tty_insert_flip_char.
++ */
++int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
++{
++ struct tty_buffer *tb;
++ int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0;
++
++ if (!__tty_buffer_request_room(port, 1, flags))
++ return 0;
++
++ tb = port->buf.tail;
++ if (~tb->flags & TTYB_NORMAL)
++ *flag_buf_ptr(tb, tb->used) = flag;
++ *char_buf_ptr(tb, tb->used++) = ch;
++
++ return 1;
++}
++EXPORT_SYMBOL(__tty_insert_flip_char);
++
+ /**
+ * tty_schedule_flip - push characters to ldisc
+ * @port: tty port to push from
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 5fa9ba1de429..f72535e1898f 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -2334,6 +2334,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ unsigned int s_flags = sb->s_flags;
+ int nr_orphans = 0, nr_truncates = 0;
+ #ifdef CONFIG_QUOTA
++ int quota_update = 0;
+ int i;
+ #endif
+ if (!es->s_last_orphan) {
+@@ -2372,14 +2373,32 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ #ifdef CONFIG_QUOTA
+ /* Needed for iput() to work correctly and not trash data */
+ sb->s_flags |= MS_ACTIVE;
+- /* Turn on quotas so that they are updated correctly */
++
++ /*
++ * Turn on quotas which were not enabled for read-only mounts if
++ * filesystem has quota feature, so that they are updated correctly.
++ */
++ if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
++ int ret = ext4_enable_quotas(sb);
++
++ if (!ret)
++ quota_update = 1;
++ else
++ ext4_msg(sb, KERN_ERR,
++ "Cannot turn on quotas: error %d", ret);
++ }
++
++ /* Turn on journaled quotas used for old sytle */
+ for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+ if (EXT4_SB(sb)->s_qf_names[i]) {
+ int ret = ext4_quota_on_mount(sb, i);
+- if (ret < 0)
++
++ if (!ret)
++ quota_update = 1;
++ else
+ ext4_msg(sb, KERN_ERR,
+ "Cannot turn on journaled "
+- "quota: error %d", ret);
++ "quota: type %d: error %d", i, ret);
+ }
+ }
+ #endif
+@@ -2438,10 +2457,12 @@ static void ext4_orphan_cleanup(struct super_block *sb,
+ ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
+ PLURAL(nr_truncates));
+ #ifdef CONFIG_QUOTA
+- /* Turn quotas off */
+- for (i = 0; i < EXT4_MAXQUOTAS; i++) {
+- if (sb_dqopt(sb)->files[i])
+- dquot_quota_off(sb, i);
++ /* Turn off quotas if they were enabled for orphan cleanup */
++ if (quota_update) {
++ for (i = 0; i < EXT4_MAXQUOTAS; i++) {
++ if (sb_dqopt(sb)->files[i])
++ dquot_quota_off(sb, i);
++ }
+ }
+ #endif
+ sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+@@ -5365,6 +5386,9 @@ static int ext4_enable_quotas(struct super_block *sb)
+ DQUOT_USAGE_ENABLED |
+ (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
+ if (err) {
++ for (type--; type >= 0; type--)
++ dquot_quota_off(sb, type);
++
+ ext4_warning(sb,
+ "Failed to enable quota tracking "
+ "(type=%d, err=%d). Please run "
+diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
+index 0a2115084c3f..582bfee40345 100644
+--- a/fs/nfs/callback.c
++++ b/fs/nfs/callback.c
+@@ -75,7 +75,10 @@ nfs4_callback_svc(void *vrqstp)
+
+ set_freezable();
+
+- while (!kthread_should_stop()) {
++ while (!kthread_freezable_should_stop(NULL)) {
++
++ if (signal_pending(current))
++ flush_signals(current);
+ /*
+ * Listen for a request on the socket
+ */
+@@ -84,6 +87,8 @@ nfs4_callback_svc(void *vrqstp)
+ continue;
+ svc_process(rqstp);
+ }
++ svc_exit_thread(rqstp);
++ module_put_and_exit(0);
+ return 0;
+ }
+
+@@ -102,9 +107,10 @@ nfs41_callback_svc(void *vrqstp)
+
+ set_freezable();
+
+- while (!kthread_should_stop()) {
+- if (try_to_freeze())
+- continue;
++ while (!kthread_freezable_should_stop(NULL)) {
++
++ if (signal_pending(current))
++ flush_signals(current);
+
+ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
+ spin_lock_bh(&serv->sv_cb_lock);
+@@ -120,11 +126,13 @@ nfs41_callback_svc(void *vrqstp)
+ error);
+ } else {
+ spin_unlock_bh(&serv->sv_cb_lock);
+- schedule();
++ if (!kthread_should_stop())
++ schedule();
+ finish_wait(&serv->sv_cb_waitq, &wq);
+ }
+- flush_signals(current);
+ }
++ svc_exit_thread(rqstp);
++ module_put_and_exit(0);
+ return 0;
+ }
+
+@@ -220,14 +228,14 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv,
+ static struct svc_serv_ops nfs40_cb_sv_ops = {
+ .svo_function = nfs4_callback_svc,
+ .svo_enqueue_xprt = svc_xprt_do_enqueue,
+- .svo_setup = svc_set_num_threads,
++ .svo_setup = svc_set_num_threads_sync,
+ .svo_module = THIS_MODULE,
+ };
+ #if defined(CONFIG_NFS_V4_1)
+ static struct svc_serv_ops nfs41_cb_sv_ops = {
+ .svo_function = nfs41_callback_svc,
+ .svo_enqueue_xprt = svc_xprt_do_enqueue,
+- .svo_setup = svc_set_num_threads,
++ .svo_setup = svc_set_num_threads_sync,
+ .svo_module = THIS_MODULE,
+ };
+
+diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c
+index 7a3754488312..9409aac232f7 100644
+--- a/fs/orangefs/acl.c
++++ b/fs/orangefs/acl.c
+@@ -61,9 +61,9 @@ struct posix_acl *orangefs_get_acl(struct inode *inode, int type)
+ return acl;
+ }
+
+-int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
++static int __orangefs_set_acl(struct inode *inode, struct posix_acl *acl,
++ int type)
+ {
+- struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+ int error = 0;
+ void *value = NULL;
+ size_t size = 0;
+@@ -72,22 +72,6 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ name = XATTR_NAME_POSIX_ACL_ACCESS;
+- if (acl) {
+- umode_t mode;
+-
+- error = posix_acl_update_mode(inode, &mode, &acl);
+- if (error) {
+- gossip_err("%s: posix_acl_update_mode err: %d\n",
+- __func__,
+- error);
+- return error;
+- }
+-
+- if (inode->i_mode != mode)
+- SetModeFlag(orangefs_inode);
+- inode->i_mode = mode;
+- mark_inode_dirty_sync(inode);
+- }
+ break;
+ case ACL_TYPE_DEFAULT:
+ name = XATTR_NAME_POSIX_ACL_DEFAULT;
+@@ -132,6 +116,29 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
+ return error;
+ }
+
++int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type)
++{
++ int error;
++
++ if (type == ACL_TYPE_ACCESS && acl) {
++ umode_t mode;
++
++ error = posix_acl_update_mode(inode, &mode, &acl);
++ if (error) {
++ gossip_err("%s: posix_acl_update_mode err: %d\n",
++ __func__,
++ error);
++ return error;
++ }
++
++ if (inode->i_mode != mode)
++ SetModeFlag(ORANGEFS_I(inode));
++ inode->i_mode = mode;
++ mark_inode_dirty_sync(inode);
++ }
++ return __orangefs_set_acl(inode, acl, type);
++}
++
+ int orangefs_init_acl(struct inode *inode, struct inode *dir)
+ {
+ struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode);
+@@ -146,13 +153,14 @@ int orangefs_init_acl(struct inode *inode, struct inode *dir)
+ return error;
+
+ if (default_acl) {
+- error = orangefs_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
++ error = __orangefs_set_acl(inode, default_acl,
++ ACL_TYPE_DEFAULT);
+ posix_acl_release(default_acl);
+ }
+
+ if (acl) {
+ if (!error)
+- error = orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS);
++ error = __orangefs_set_acl(inode, acl, ACL_TYPE_ACCESS);
+ posix_acl_release(acl);
+ }
+
+diff --git a/include/linux/ccp.h b/include/linux/ccp.h
+index edc5d04b9632..1cfe5ef3060b 100644
+--- a/include/linux/ccp.h
++++ b/include/linux/ccp.h
+@@ -1,7 +1,7 @@
+ /*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+- * Copyright (C) 2013,2016 Advanced Micro Devices, Inc.
++ * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ * Author: Gary R Hook <gary.hook@amd.com>
+@@ -222,6 +222,7 @@ enum ccp_xts_aes_unit_size {
+ * AES operation the new IV overwrites the old IV.
+ */
+ struct ccp_xts_aes_engine {
++ enum ccp_aes_type type;
+ enum ccp_aes_action action;
+ enum ccp_xts_aes_unit_size unit_size;
+
+diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
+index 7321ae933867..102c84dcc11a 100644
+--- a/include/linux/sunrpc/svc.h
++++ b/include/linux/sunrpc/svc.h
+@@ -470,6 +470,7 @@ void svc_pool_map_put(void);
+ struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
+ struct svc_serv_ops *);
+ int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
++int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int);
+ int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
+ void svc_destroy(struct svc_serv *);
+ void svc_shutdown_net(struct svc_serv *, struct net *);
+diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
+index c28dd523f96e..d43837f2ce3a 100644
+--- a/include/linux/tty_flip.h
++++ b/include/linux/tty_flip.h
+@@ -12,6 +12,7 @@ extern int tty_prepare_flip_string(struct tty_port *port,
+ unsigned char **chars, size_t size);
+ extern void tty_flip_buffer_push(struct tty_port *port);
+ void tty_schedule_flip(struct tty_port *port);
++int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
+
+ static inline int tty_insert_flip_char(struct tty_port *port,
+ unsigned char ch, char flag)
+@@ -26,7 +27,7 @@ static inline int tty_insert_flip_char(struct tty_port *port,
+ *char_buf_ptr(tb, tb->used++) = ch;
+ return 1;
+ }
+- return tty_insert_flip_string_flags(port, &ch, &flag, 1);
++ return __tty_insert_flip_char(port, ch, flag);
+ }
+
+ static inline int tty_insert_flip_string(struct tty_port *port,
+diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
+index 6e432ed7d0fe..53ed8ae5de1c 100644
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -2747,13 +2747,14 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
+
+ if (!command || !ftrace_enabled) {
+ /*
+- * If these are per_cpu ops, they still need their
+- * per_cpu field freed. Since, function tracing is
++ * If these are dynamic or per_cpu ops, they still
++ * need their data freed. Since, function tracing is
+ * not currently active, we can just free them
+ * without synchronizing all CPUs.
+ */
+- if (ops->flags & FTRACE_OPS_FL_PER_CPU)
+- per_cpu_ops_free(ops);
++ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU))
++ goto free_ops;
++
+ return 0;
+ }
+
+@@ -2808,6 +2809,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
+ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_PER_CPU)) {
+ schedule_on_each_cpu(ftrace_sync);
+
++ free_ops:
+ arch_ftrace_trampoline_free(ops);
+
+ if (ops->flags & FTRACE_OPS_FL_PER_CPU)
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 7379f735a9f4..f95bf81529f5 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2369,11 +2369,17 @@ static char *get_trace_buf(void)
+ if (!buffer || buffer->nesting >= 4)
+ return NULL;
+
+- return &buffer->buffer[buffer->nesting++][0];
++ buffer->nesting++;
++
++ /* Interrupts must see nesting incremented before we use the buffer */
++ barrier();
++ return &buffer->buffer[buffer->nesting][0];
+ }
+
+ static void put_trace_buf(void)
+ {
++ /* Don't let the decrement of nesting leak before this */
++ barrier();
+ this_cpu_dec(trace_percpu_buffer->nesting);
+ }
+
+@@ -5658,7 +5664,7 @@ static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
+ tracing_reset_online_cpus(&tr->trace_buffer);
+
+ #ifdef CONFIG_TRACER_MAX_TRACE
+- if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
++ if (tr->max_buffer.buffer)
+ ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
+ tracing_reset_online_cpus(&tr->max_buffer);
+ #endif
+diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
+index b0f86ea77881..ca70d11b8aa7 100644
+--- a/kernel/trace/trace_selftest.c
++++ b/kernel/trace/trace_selftest.c
+@@ -272,7 +272,7 @@ static int trace_selftest_ops(struct trace_array *tr, int cnt)
+ goto out_free;
+ if (cnt > 1) {
+ if (trace_selftest_test_global_cnt == 0)
+- goto out;
++ goto out_free;
+ }
+ if (trace_selftest_test_dyn_cnt == 0)
+ goto out_free;
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 2abf8d5f0ad4..7064aae8ded7 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -2100,13 +2100,25 @@ static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
+ continue;
+
+ /*
+- * It should never happen but changes to locking could
+- * inadvertently allow a per-cpu drain to add pages
+- * to MIGRATE_HIGHATOMIC while unreserving so be safe
+- * and watch for underflows.
++ * In page freeing path, migratetype change is racy so
++ * we can counter several free pages in a pageblock
++ * in this loop althoug we changed the pageblock type
++ * from highatomic to ac->migratetype. So we should
++ * adjust the count once.
+ */
+- zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
+- zone->nr_reserved_highatomic);
++ if (get_pageblock_migratetype(page) ==
++ MIGRATE_HIGHATOMIC) {
++ /*
++ * It should never happen but changes to
++ * locking could inadvertently allow a per-cpu
++ * drain to add pages to MIGRATE_HIGHATOMIC
++ * while unreserving so be safe and watch for
++ * underflows.
++ */
++ zone->nr_reserved_highatomic -= min(
++ pageblock_nr_pages,
++ zone->nr_reserved_highatomic);
++ }
+
+ /*
+ * Convert to ac->migratetype and avoid the normal
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 6bd150882ba4..ed9ce7c63252 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -95,19 +95,26 @@ static struct conntrack_gc_work conntrack_gc_work;
+
+ void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
+ {
++ /* 1) Acquire the lock */
+ spin_lock(lock);
+- while (unlikely(nf_conntrack_locks_all)) {
+- spin_unlock(lock);
+
+- /*
+- * Order the 'nf_conntrack_locks_all' load vs. the
+- * spin_unlock_wait() loads below, to ensure
+- * that 'nf_conntrack_locks_all_lock' is indeed held:
+- */
+- smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
+- spin_unlock_wait(&nf_conntrack_locks_all_lock);
+- spin_lock(lock);
+- }
++ /* 2) read nf_conntrack_locks_all, with ACQUIRE semantics
++ * It pairs with the smp_store_release() in nf_conntrack_all_unlock()
++ */
++ if (likely(smp_load_acquire(&nf_conntrack_locks_all) == false))
++ return;
++
++ /* fast path failed, unlock */
++ spin_unlock(lock);
++
++ /* Slow path 1) get global lock */
++ spin_lock(&nf_conntrack_locks_all_lock);
++
++ /* Slow path 2) get the lock we want */
++ spin_lock(lock);
++
++ /* Slow path 3) release the global lock */
++ spin_unlock(&nf_conntrack_locks_all_lock);
+ }
+ EXPORT_SYMBOL_GPL(nf_conntrack_lock);
+
+@@ -148,28 +155,27 @@ static void nf_conntrack_all_lock(void)
+ int i;
+
+ spin_lock(&nf_conntrack_locks_all_lock);
+- nf_conntrack_locks_all = true;
+
+- /*
+- * Order the above store of 'nf_conntrack_locks_all' against
+- * the spin_unlock_wait() loads below, such that if
+- * nf_conntrack_lock() observes 'nf_conntrack_locks_all'
+- * we must observe nf_conntrack_locks[] held:
+- */
+- smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */
++ nf_conntrack_locks_all = true;
+
+ for (i = 0; i < CONNTRACK_LOCKS; i++) {
+- spin_unlock_wait(&nf_conntrack_locks[i]);
++ spin_lock(&nf_conntrack_locks[i]);
++
++ /* This spin_unlock provides the "release" to ensure that
++ * nf_conntrack_locks_all==true is visible to everyone that
++ * acquired spin_lock(&nf_conntrack_locks[]).
++ */
++ spin_unlock(&nf_conntrack_locks[i]);
+ }
+ }
+
+ static void nf_conntrack_all_unlock(void)
+ {
+- /*
+- * All prior stores must be complete before we clear
++ /* All prior stores must be complete before we clear
+ * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock()
+ * might observe the false value but not the entire
+- * critical section:
++ * critical section.
++ * It pairs with the smp_load_acquire() in nf_conntrack_lock()
+ */
+ smp_store_release(&nf_conntrack_locks_all, false);
+ spin_unlock(&nf_conntrack_locks_all_lock);
+diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
+index 75f290bddca1..272c34551979 100644
+--- a/net/sunrpc/svc.c
++++ b/net/sunrpc/svc.c
+@@ -702,59 +702,32 @@ choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
+ return task;
+ }
+
+-/*
+- * Create or destroy enough new threads to make the number
+- * of threads the given number. If `pool' is non-NULL, applies
+- * only to threads in that pool, otherwise round-robins between
+- * all pools. Caller must ensure that mutual exclusion between this and
+- * server startup or shutdown.
+- *
+- * Destroying threads relies on the service threads filling in
+- * rqstp->rq_task, which only the nfs ones do. Assumes the serv
+- * has been created using svc_create_pooled().
+- *
+- * Based on code that used to be in nfsd_svc() but tweaked
+- * to be pool-aware.
+- */
+-int
+-svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
++/* create new threads */
++static int
++svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+ {
+ struct svc_rqst *rqstp;
+ struct task_struct *task;
+ struct svc_pool *chosen_pool;
+- int error = 0;
+ unsigned int state = serv->sv_nrthreads-1;
+ int node;
+
+- if (pool == NULL) {
+- /* The -1 assumes caller has done a svc_get() */
+- nrservs -= (serv->sv_nrthreads-1);
+- } else {
+- spin_lock_bh(&pool->sp_lock);
+- nrservs -= pool->sp_nrthreads;
+- spin_unlock_bh(&pool->sp_lock);
+- }
+-
+- /* create new threads */
+- while (nrservs > 0) {
++ do {
+ nrservs--;
+ chosen_pool = choose_pool(serv, pool, &state);
+
+ node = svc_pool_map_get_node(chosen_pool->sp_id);
+ rqstp = svc_prepare_thread(serv, chosen_pool, node);
+- if (IS_ERR(rqstp)) {
+- error = PTR_ERR(rqstp);
+- break;
+- }
++ if (IS_ERR(rqstp))
++ return PTR_ERR(rqstp);
+
+ __module_get(serv->sv_ops->svo_module);
+ task = kthread_create_on_node(serv->sv_ops->svo_function, rqstp,
+ node, "%s", serv->sv_name);
+ if (IS_ERR(task)) {
+- error = PTR_ERR(task);
+ module_put(serv->sv_ops->svo_module);
+ svc_exit_thread(rqstp);
+- break;
++ return PTR_ERR(task);
+ }
+
+ rqstp->rq_task = task;
+@@ -763,18 +736,103 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
+
+ svc_sock_update_bufs(serv);
+ wake_up_process(task);
+- }
++ } while (nrservs > 0);
++
++ return 0;
++}
++
++
++/* destroy old threads */
++static int
++svc_signal_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
++{
++ struct task_struct *task;
++ unsigned int state = serv->sv_nrthreads-1;
++
+ /* destroy old threads */
+- while (nrservs < 0 &&
+- (task = choose_victim(serv, pool, &state)) != NULL) {
++ do {
++ task = choose_victim(serv, pool, &state);
++ if (task == NULL)
++ break;
+ send_sig(SIGINT, task, 1);
+ nrservs++;
++ } while (nrservs < 0);
++
++ return 0;
++}
++
++/*
++ * Create or destroy enough new threads to make the number
++ * of threads the given number. If `pool' is non-NULL, applies
++ * only to threads in that pool, otherwise round-robins between
++ * all pools. Caller must ensure that mutual exclusion between this and
++ * server startup or shutdown.
++ *
++ * Destroying threads relies on the service threads filling in
++ * rqstp->rq_task, which only the nfs ones do. Assumes the serv
++ * has been created using svc_create_pooled().
++ *
++ * Based on code that used to be in nfsd_svc() but tweaked
++ * to be pool-aware.
++ */
++int
++svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
++{
++ if (pool == NULL) {
++ /* The -1 assumes caller has done a svc_get() */
++ nrservs -= (serv->sv_nrthreads-1);
++ } else {
++ spin_lock_bh(&pool->sp_lock);
++ nrservs -= pool->sp_nrthreads;
++ spin_unlock_bh(&pool->sp_lock);
+ }
+
+- return error;
++ if (nrservs > 0)
++ return svc_start_kthreads(serv, pool, nrservs);
++ if (nrservs < 0)
++ return svc_signal_kthreads(serv, pool, nrservs);
++ return 0;
+ }
+ EXPORT_SYMBOL_GPL(svc_set_num_threads);
+
++/* destroy old threads */
++static int
++svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
++{
++ struct task_struct *task;
++ unsigned int state = serv->sv_nrthreads-1;
++
++ /* destroy old threads */
++ do {
++ task = choose_victim(serv, pool, &state);
++ if (task == NULL)
++ break;
++ kthread_stop(task);
++ nrservs++;
++ } while (nrservs < 0);
++ return 0;
++}
++
++int
++svc_set_num_threads_sync(struct svc_serv *serv, struct svc_pool *pool, int nrservs)
++{
++ if (pool == NULL) {
++ /* The -1 assumes caller has done a svc_get() */
++ nrservs -= (serv->sv_nrthreads-1);
++ } else {
++ spin_lock_bh(&pool->sp_lock);
++ nrservs -= pool->sp_nrthreads;
++ spin_unlock_bh(&pool->sp_lock);
++ }
++
++ if (nrservs > 0)
++ return svc_start_kthreads(serv, pool, nrservs);
++ if (nrservs < 0)
++ return svc_stop_kthreads(serv, pool, nrservs);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(svc_set_num_threads_sync);
++
+ /*
+ * Called from a server thread as it's exiting. Caller must hold the "service
+ * mutex" for the service.