Re: [PATCH 3.2 000/107] 3.2.72-rc1 review

From: Ben Hutchings
Date: Thu Oct 08 2015 - 21:17:46 EST


This is the combined diff for 3.2.72-rc1 relative to 3.2.71.

Ben.

--
Ben Hutchings
If the facts do not conform to your theory, they must be disposed of.
diff --git a/Makefile b/Makefile
index 9d5fea7..ebb597f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 2
-SUBLEVEL = 71
-EXTRAVERSION =
+SUBLEVEL = 72
+EXTRAVERSION = -rc1
NAME = Saber-toothed Squirrel

# *DOCUMENTATION*
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 362c7ca..4a93374 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -53,6 +53,14 @@ endif

comma = ,

+#
+# The Scalar Replacement of Aggregates (SRA) optimization pass in GCC 4.9 and
+# later may result in code being generated that handles signed short and signed
+# char struct members incorrectly. So disable it.
+# (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65932)
+#
+KBUILD_CFLAGS += $(call cc-option,-fno-ipa-sra)
+
# This selects which instruction set is used.
# Note that GCC does not numerically define an architecture version
# macro, but instead defines a whole series of macros which makes
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 9e617bd..c1d9c77 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -486,12 +486,23 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
*/
thumb = handler & 1;

+#if __LINUX_ARM_ARCH__ >= 6
+ /*
+ * Clear the If-Then Thumb-2 execution state. ARM spec
+ * requires this to be all 000s in ARM mode. Snapdragon
+ * S4/Krait misbehaves on a Thumb=>ARM signal transition
+ * without this.
+ *
+ * We must do this whenever we are running on a Thumb-2
+ * capable CPU, which includes ARMv6T2. However, we elect
+ * to do this whenever we're on an ARMv6 or later CPU for
+ * simplicity.
+ */
+ cpsr &= ~PSR_IT_MASK;
+#endif
+
if (thumb) {
cpsr |= PSR_T_BIT;
-#if __LINUX_ARM_ARCH__ >= 7
- /* clear the If-Then Thumb-2 execution state */
- cpsr &= ~PSR_IT_MASK;
-#endif
} else
cpsr &= ~PSR_T_BIT;
}
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index b2202a6..95bcedb 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -153,8 +153,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
+#ifdef CONFIG_SMP
+ /*
+ * For SMP, multiple CPUs can race, so we need to do
+ * this atomically.
+ */
+#ifdef CONFIG_64BIT
+#define LL_INSN "lld"
+#define SC_INSN "scd"
+#else /* CONFIG_32BIT */
+#define LL_INSN "ll"
+#define SC_INSN "sc"
+#endif
+ unsigned long page_global = _PAGE_GLOBAL;
+ unsigned long tmp;
+
+ __asm__ __volatile__ (
+ " .set push\n"
+ " .set noreorder\n"
+ "1: " LL_INSN " %[tmp], %[buddy]\n"
+ " bnez %[tmp], 2f\n"
+ " or %[tmp], %[tmp], %[global]\n"
+ " " SC_INSN " %[tmp], %[buddy]\n"
+ " beqz %[tmp], 1b\n"
+ " nop\n"
+ "2:\n"
+ " .set pop"
+ : [buddy] "+m" (buddy->pte),
+ [tmp] "=&r" (tmp)
+ : [global] "r" (page_global));
+#else /* !CONFIG_SMP */
if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
+#endif /* CONFIG_SMP */
}
#endif
}
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 802e616..c7e2684 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
unsigned int real_len;
- cpumask_t mask;
+ cpumask_t allowed, mask;
int retval;
struct task_struct *p;

@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
if (retval)
goto out_unlock;

- cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
+ cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
+ cpumask_and(&mask, &allowed, cpu_active_mask);

out_unlock:
read_unlock(&tasklist_lock);
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index c0b1aff..88934b3 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -336,8 +336,8 @@ void do_cpu_irq_mask(struct pt_regs *regs)
struct pt_regs *old_regs;
unsigned long eirr_val;
int irq, cpu = smp_processor_id();
-#ifdef CONFIG_SMP
struct irq_desc *desc;
+#ifdef CONFIG_SMP
cpumask_t dest;
#endif

@@ -350,8 +350,12 @@ void do_cpu_irq_mask(struct pt_regs *regs)
goto set_out;
irq = eirr_to_irq(eirr_val);

-#ifdef CONFIG_SMP
+ /* Filter out spurious interrupts, mostly from serial port at bootup */
desc = irq_to_desc(irq);
+ if (unlikely(!desc->action))
+ goto set_out;
+
+#ifdef CONFIG_SMP
cpumask_copy(&dest, desc->irq_data.affinity);
if (irqd_is_per_cpu(&desc->irq_data) &&
!cpu_isset(smp_processor_id(), dest)) {
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 85bb66d..15a05ca 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -130,6 +130,7 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
struct msi_desc *entry;
+ irq_hw_number_t hwirq;

if (WARN_ON(!phb))
return;
@@ -137,9 +138,10 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
list_for_each_entry(entry, &pdev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- pnv_put_msi(phb, virq_to_hw(entry->irq));
irq_dispose_mapping(entry->irq);
+ pnv_put_msi(phb, hwirq);
}
}
#endif /* CONFIG_PCI_MSI */
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index e5c344d3..8ebbdc6 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -106,15 +106,16 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
struct fsl_msi *msi_data;
+ irq_hw_number_t hwirq;

list_for_each_entry(entry, &pdev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
msi_data = irq_get_chip_data(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_data->bitmap,
- virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
}

return;
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
index 38e6238..e873616 100644
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
@@ -74,6 +74,7 @@ static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
+ irq_hw_number_t hwirq;

pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);

@@ -81,10 +82,10 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
if (entry->irq == NO_IRQ)
continue;

+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
- virq_to_hw(entry->irq), ALLOC_CHUNK);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK);
}

return;
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 9a7aa0e..dfc3486 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -124,15 +124,16 @@ static int u3msi_msi_check_device(struct pci_dev *pdev, int nvec, int type)
static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
{
struct msi_desc *entry;
+ irq_hw_number_t hwirq;

list_for_each_entry(entry, &pdev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;

+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap,
- virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
}

return;
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
index 1c2d7af..4aae9c8 100644
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -114,16 +114,17 @@ void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry;
struct ppc4xx_msi *msi_data = &ppc4xx_msi;
+ irq_hw_number_t hwirq;

dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");

list_for_each_entry(entry, &dev->msi_list, list) {
if (entry->irq == NO_IRQ)
continue;
+ hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&msi_data->bitmap,
- virq_to_hw(entry->irq), 1);
irq_dispose_mapping(entry->irq);
+ msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
}
}

diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 9fdd05d5..8831a40 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -52,6 +52,19 @@ typedef struct
__u32 gprs_high[NUM_GPRS];
} rt_sigframe32;

+static inline void sigset_to_sigset32(unsigned long *set64,
+ compat_sigset_word *set32)
+{
+ set32[0] = (compat_sigset_word) set64[0];
+ set32[1] = (compat_sigset_word)(set64[0] >> 32);
+}
+
+static inline void sigset32_to_sigset(compat_sigset_word *set32,
+ unsigned long *set64)
+{
+ set64[0] = (unsigned long) set32[0] | ((unsigned long) set32[1] << 32);
+}
+
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{
int err;
@@ -361,12 +374,14 @@ asmlinkage long sys32_sigreturn(void)
{
struct pt_regs *regs = task_pt_regs(current);
sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
+ compat_sigset_t cset;
sigset_t set;

if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
- if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
+ if (__copy_from_user(&cset.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
goto badframe;
+ sigset32_to_sigset(cset.sig, set.sig);
sigdelsetmask(&set, ~_BLOCKABLE);
set_current_blocked(&set);
if (restore_sigregs32(regs, &frame->sregs))
@@ -383,6 +398,7 @@ asmlinkage long sys32_rt_sigreturn(void)
{
struct pt_regs *regs = task_pt_regs(current);
rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
+ compat_sigset_t cset;
sigset_t set;
stack_t st;
__u32 ss_sp;
@@ -391,8 +407,9 @@ asmlinkage long sys32_rt_sigreturn(void)

if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
- if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ if (__copy_from_user(&cset, &frame->uc.uc_sigmask, sizeof(cset)))
goto badframe;
+ sigset32_to_sigset(cset.sig, set.sig);
sigdelsetmask(&set, ~_BLOCKABLE);
set_current_blocked(&set);
if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
@@ -464,13 +481,16 @@ static int setup_frame32(int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs * regs)
{
sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(sigframe32));
+ compat_sigset_t cset;
+
if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32)))
goto give_sigsegv;

if (frame == (void __user *) -1UL)
goto give_sigsegv;

- if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32))
+ sigset_to_sigset32(set->sig, cset.sig);
+ if (__copy_to_user(&frame->sc.oldmask, &cset.sig, _SIGMASK_COPY_SIZE32))
goto give_sigsegv;

if (save_sigregs32(regs, &frame->sregs))
@@ -524,6 +544,7 @@ give_sigsegv:
static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs * regs)
{
+ compat_sigset_t cset;
int err = 0;
rt_sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(rt_sigframe32));
if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32)))
@@ -536,6 +557,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
goto give_sigsegv;

/* Create the ucontext. */
+ sigset_to_sigset32(set->sig, cset.sig);
err |= __put_user(UC_EXTENDED, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
@@ -544,7 +566,7 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= save_sigregs32(regs, &frame->uc.uc_mcontext);
err |= save_sigregs_gprs_high(regs, frame->gprs_high);
- err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ err |= __copy_to_user(&frame->uc.uc_sigmask, &cset, sizeof(cset));
if (err)
goto give_sigsegv;

diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
index 39ca301..3a8c2af 100644
--- a/arch/sparc/include/asm/visasm.h
+++ b/arch/sparc/include/asm/visasm.h
@@ -28,18 +28,12 @@
* Must preserve %o5 between VISEntryHalf and VISExitHalf */

#define VISEntryHalf \
- rd %fprs, %o5; \
- andcc %o5, FPRS_FEF, %g0; \
- be,pt %icc, 297f; \
- sethi %hi(298f), %g7; \
- sethi %hi(VISenterhalf), %g1; \
- jmpl %g1 + %lo(VISenterhalf), %g0; \
- or %g7, %lo(298f), %g7; \
- clr %o5; \
-297: wr %o5, FPRS_FEF, %fprs; \
-298:
+ VISEntry

#define VISExitHalf \
+ VISExit
+
+#define VISExitHalfFast \
wr %o5, 0, %fprs;

#ifndef __ASSEMBLY__
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
index b320ae9..a063d84 100644
--- a/arch/sparc/lib/VISsave.S
+++ b/arch/sparc/lib/VISsave.S
@@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3

stx %g3, [%g6 + TI_GSR]
2: add %g6, %g1, %g3
- cmp %o5, FPRS_DU
- be,pn %icc, 6f
- sll %g1, 3, %g1
+ mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5
+ sll %g1, 3, %g1
stb %o5, [%g3 + TI_FPSAVED]
rd %gsr, %g2
add %g6, %g1, %g3
@@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
.align 32
80: jmpl %g7 + %g0, %g0
nop
-
-6: ldub [%g3 + TI_FPSAVED], %o5
- or %o5, FPRS_DU, %o5
- add %g6, TI_FPREGS+0x80, %g2
- stb %o5, [%g3 + TI_FPSAVED]
-
- sll %g1, 5, %g1
- add %g6, TI_FPREGS+0xc0, %g3
- wr %g0, FPRS_FEF, %fprs
- membar #Sync
- stda %f32, [%g2 + %g1] ASI_BLK_P
- stda %f48, [%g3 + %g1] ASI_BLK_P
- membar #Sync
- ba,pt %xcc, 80f
- nop
-
- .align 32
-80: jmpl %g7 + %g0, %g0
- nop
-
- .align 32
-VISenterhalf:
- ldub [%g6 + TI_FPDEPTH], %g1
- brnz,a,pn %g1, 1f
- cmp %g1, 1
- stb %g0, [%g6 + TI_FPSAVED]
- stx %fsr, [%g6 + TI_XFSR]
- clr %o5
- jmpl %g7 + %g0, %g0
- wr %g0, FPRS_FEF, %fprs
-
-1: bne,pn %icc, 2f
- srl %g1, 1, %g1
- ba,pt %xcc, vis1
- sub %g7, 8, %g7
-2: addcc %g6, %g1, %g3
- sll %g1, 3, %g1
- andn %o5, FPRS_DU, %g2
- stb %g2, [%g3 + TI_FPSAVED]
-
- rd %gsr, %g2
- add %g6, %g1, %g3
- stx %g2, [%g3 + TI_GSR]
- add %g6, %g1, %g2
- stx %fsr, [%g2 + TI_XFSR]
- sll %g1, 5, %g1
-3: andcc %o5, FPRS_DL, %g0
- be,pn %icc, 4f
- add %g6, TI_FPREGS, %g2
-
- add %g6, TI_FPREGS+0x40, %g3
- membar #Sync
- stda %f0, [%g2 + %g1] ASI_BLK_P
- stda %f16, [%g3 + %g1] ASI_BLK_P
- membar #Sync
- ba,pt %xcc, 4f
- nop
-
- .align 32
-4: and %o5, FPRS_DU, %o5
- jmpl %g7 + %g0, %g0
- wr %o5, FPRS_FEF, %fprs
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index f781251..4827b23 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -291,6 +291,7 @@ static struct ahash_alg ghash_async_alg = {
.cra_name = "ghash",
.cra_driver_name = "ghash-clmulni",
.cra_priority = 400,
+ .cra_ctxsize = sizeof(struct ghash_async_ctx),
.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC,
.cra_blocksize = GHASH_BLOCK_SIZE,
.cra_type = &crypto_ahash_type,
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 3225868..382ce8a 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -277,21 +277,6 @@ static inline void clear_LDT(void)
set_ldt(NULL, 0);
}

-/*
- * load one particular LDT into the current CPU
- */
-static inline void load_LDT_nolock(mm_context_t *pc)
-{
- set_ldt(pc->ldt, pc->size);
-}
-
-static inline void load_LDT(mm_context_t *pc)
-{
- preempt_disable();
- load_LDT_nolock(pc);
- preempt_enable();
-}
-
static inline unsigned long get_desc_base(const struct desc_struct *desc)
{
return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 5f55e69..926f672 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -9,8 +9,7 @@
* we put the segment information here.
*/
typedef struct {
- void *ldt;
- int size;
+ struct ldt_struct *ldt;

#ifdef CONFIG_X86_64
/* True if mm supports a task running in 32 bit compatibility mode. */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 6902152..ce4ea94 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -16,6 +16,51 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
#endif /* !CONFIG_PARAVIRT */

/*
+ * ldt_structs can be allocated, used, and freed, but they are never
+ * modified while live.
+ */
+struct ldt_struct {
+ /*
+ * Xen requires page-aligned LDTs with special permissions. This is
+ * needed to prevent us from installing evil descriptors such as
+ * call gates. On native, we could merge the ldt_struct and LDT
+ * allocations, but it's not worth trying to optimize.
+ */
+ struct desc_struct *entries;
+ int size;
+};
+
+static inline void load_mm_ldt(struct mm_struct *mm)
+{
+ struct ldt_struct *ldt;
+
+ /* smp_read_barrier_depends synchronizes with barrier in install_ldt */
+ ldt = ACCESS_ONCE(mm->context.ldt);
+ smp_read_barrier_depends();
+
+ /*
+ * Any change to mm->context.ldt is followed by an IPI to all
+ * CPUs with the mm active. The LDT will not be freed until
+ * after the IPI is handled by all such CPUs. This means that,
+ * if the ldt_struct changes before we return, the values we see
+ * will be safe, and the new values will be loaded before we run
+ * any user code.
+ *
+ * NB: don't try to convert this to use RCU without extreme care.
+ * We would still need IRQs off, because we don't want to change
+ * the local LDT after an IPI loaded a newer value than the one
+ * that we can see.
+ */
+
+ if (unlikely(ldt))
+ set_ldt(ldt->entries, ldt->size);
+ else
+ clear_LDT();
+
+ DEBUG_LOCKS_WARN_ON(preemptible());
+}
+
+/*
* Used for LDT copy/destruction.
*/
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
@@ -52,7 +97,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* load the LDT, if the LDT is different:
*/
if (unlikely(prev->context.ldt != next->context.ldt))
- load_LDT_nolock(&next->context);
+ load_mm_ldt(next);
}
#ifdef CONFIG_SMP
else {
@@ -65,7 +110,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* to make sure to use no freed page tables.
*/
load_cr3(next->pgd);
- load_LDT_nolock(&next->context);
+ load_mm_ldt(next);
}
}
#endif
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 5538b13..3d48aa4 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -159,6 +159,7 @@
/* C1E active bits in int pending message */
#define K8_INTP_C1E_ACTIVE_MASK 0x18000000
#define MSR_K8_TSEG_ADDR 0xc0010112
+#define MSR_K8_TSEG_MASK 0xc0010113
#define K8_MTRRFIXRANGE_DRAM_ENABLE 0x00040000 /* MtrrFixDramEn bit */
#define K8_MTRRFIXRANGE_DRAM_MODIFY 0x00080000 /* MtrrFixDramModEn bit */
#define K8_MTRR_RDMEM_WRMEM_MASK 0x18181818 /* Mask: RdMem|WrMem */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 6284d6d..0cbdebf 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1225,7 +1225,7 @@ void __cpuinit cpu_init(void)
load_sp0(t, &current->thread);
set_tss_desc(cpu, t);
load_TR_desc();
- load_LDT(&init_mm.context);
+ load_mm_ldt(&init_mm);

clear_all_debug_regs();
dbg_restore_debug_regs();
@@ -1273,7 +1273,7 @@ void __cpuinit cpu_init(void)
load_sp0(t, thread);
set_tss_desc(cpu, t);
load_TR_desc();
- load_LDT(&init_mm.context);
+ load_mm_ldt(&init_mm);

t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);

diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 8d15c69..f6daf3c 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -1504,7 +1504,18 @@ END(error_exit)
/* runs on exception stack */
ENTRY(nmi)
INTR_FRAME
+ /*
+ * Fix up the exception frame if we're on Xen.
+ * PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
+ * one value to the stack on native, so it may clobber the rdx
+ * scratch slot, but it won't clobber any of the important
+ * slots past it.
+ *
+ * Xen is a different story, because the Xen frame itself overlaps
+ * the "NMI executing" variable.
+ */
PARAVIRT_ADJUST_EXCEPTION_FRAME
+
pushq_cfi $-1
subq $ORIG_RAX-R15, %rsp
CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index 0a8e65e..1dd3230 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -12,6 +12,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/smp.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>

@@ -21,82 +22,87 @@
#include <asm/mmu_context.h>
#include <asm/syscalls.h>

-#ifdef CONFIG_SMP
+/* context.lock is held for us, so we don't need any locking. */
static void flush_ldt(void *current_mm)
{
- if (current->active_mm == current_mm)
- load_LDT(&current->active_mm->context);
+ mm_context_t *pc;
+
+ if (current->active_mm != current_mm)
+ return;
+
+ pc = &current->active_mm->context;
+ set_ldt(pc->ldt->entries, pc->ldt->size);
}
-#endif

-static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
+/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
+static struct ldt_struct *alloc_ldt_struct(int size)
{
- void *oldldt, *newldt;
- int oldsize;
-
- if (mincount <= pc->size)
- return 0;
- oldsize = pc->size;
- mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) &
- (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1));
- if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE)
- newldt = vmalloc(mincount * LDT_ENTRY_SIZE);
+ struct ldt_struct *new_ldt;
+ int alloc_size;
+
+ if (size > LDT_ENTRIES)
+ return NULL;
+
+ new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
+ if (!new_ldt)
+ return NULL;
+
+ BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
+ alloc_size = size * LDT_ENTRY_SIZE;
+
+ /*
+ * Xen is very picky: it requires a page-aligned LDT that has no
+ * trailing nonzero bytes in any page that contains LDT descriptors.
+ * Keep it simple: zero the whole allocation and never allocate less
+ * than PAGE_SIZE.
+ */
+ if (alloc_size > PAGE_SIZE)
+ new_ldt->entries = vzalloc(alloc_size);
else
- newldt = (void *)__get_free_page(GFP_KERNEL);
-
- if (!newldt)
- return -ENOMEM;
+ new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);

- if (oldsize)
- memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE);
- oldldt = pc->ldt;
- memset(newldt + oldsize * LDT_ENTRY_SIZE, 0,
- (mincount - oldsize) * LDT_ENTRY_SIZE);
+ if (!new_ldt->entries) {
+ kfree(new_ldt);
+ return NULL;
+ }

- paravirt_alloc_ldt(newldt, mincount);
+ new_ldt->size = size;
+ return new_ldt;
+}

-#ifdef CONFIG_X86_64
- /* CHECKME: Do we really need this ? */
- wmb();
-#endif
- pc->ldt = newldt;
- wmb();
- pc->size = mincount;
- wmb();
-
- if (reload) {
-#ifdef CONFIG_SMP
- preempt_disable();
- load_LDT(pc);
- if (!cpumask_equal(mm_cpumask(current->mm),
- cpumask_of(smp_processor_id())))
- smp_call_function(flush_ldt, current->mm, 1);
- preempt_enable();
-#else
- load_LDT(pc);
-#endif
- }
- if (oldsize) {
- paravirt_free_ldt(oldldt, oldsize);
- if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(oldldt);
- else
- put_page(virt_to_page(oldldt));
- }
- return 0;
+/* After calling this, the LDT is immutable. */
+static void finalize_ldt_struct(struct ldt_struct *ldt)
+{
+ paravirt_alloc_ldt(ldt->entries, ldt->size);
}

-static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
+/* context.lock is held */
+static void install_ldt(struct mm_struct *current_mm,
+ struct ldt_struct *ldt)
{
- int err = alloc_ldt(new, old->size, 0);
- int i;
+ /* Synchronizes with smp_read_barrier_depends in load_mm_ldt. */
+ barrier();
+ ACCESS_ONCE(current_mm->context.ldt) = ldt;
+
+ /* Activate the LDT for all CPUs using current_mm. */
+ smp_call_function_many(mm_cpumask(current_mm), flush_ldt, current_mm,
+ true);
+ local_irq_disable();
+ flush_ldt(current_mm);
+ local_irq_enable();
+}

- if (err < 0)
- return err;
+static void free_ldt_struct(struct ldt_struct *ldt)
+{
+ if (likely(!ldt))
+ return;

- for (i = 0; i < old->size; i++)
- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
- return 0;
+ paravirt_free_ldt(ldt->entries, ldt->size);
+ if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
+ vfree(ldt->entries);
+ else
+ kfree(ldt->entries);
+ kfree(ldt);
}

/*
@@ -105,17 +111,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
*/
int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
+ struct ldt_struct *new_ldt;
struct mm_struct *old_mm;
int retval = 0;

mutex_init(&mm->context.lock);
- mm->context.size = 0;
old_mm = current->mm;
- if (old_mm && old_mm->context.size > 0) {
- mutex_lock(&old_mm->context.lock);
- retval = copy_ldt(&mm->context, &old_mm->context);
- mutex_unlock(&old_mm->context.lock);
+ if (!old_mm) {
+ mm->context.ldt = NULL;
+ return 0;
+ }
+
+ mutex_lock(&old_mm->context.lock);
+ if (!old_mm->context.ldt) {
+ mm->context.ldt = NULL;
+ goto out_unlock;
}
+
+ new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
+ if (!new_ldt) {
+ retval = -ENOMEM;
+ goto out_unlock;
+ }
+
+ memcpy(new_ldt->entries, old_mm->context.ldt->entries,
+ new_ldt->size * LDT_ENTRY_SIZE);
+ finalize_ldt_struct(new_ldt);
+
+ mm->context.ldt = new_ldt;
+
+out_unlock:
+ mutex_unlock(&old_mm->context.lock);
return retval;
}

@@ -126,53 +152,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
*/
void destroy_context(struct mm_struct *mm)
{
- if (mm->context.size) {
-#ifdef CONFIG_X86_32
- /* CHECKME: Can this ever happen ? */
- if (mm == current->active_mm)
- clear_LDT();
-#endif
- paravirt_free_ldt(mm->context.ldt, mm->context.size);
- if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
- vfree(mm->context.ldt);
- else
- put_page(virt_to_page(mm->context.ldt));
- mm->context.size = 0;
- }
+ free_ldt_struct(mm->context.ldt);
+ mm->context.ldt = NULL;
}

static int read_ldt(void __user *ptr, unsigned long bytecount)
{
- int err;
+ int retval;
unsigned long size;
struct mm_struct *mm = current->mm;

- if (!mm->context.size)
- return 0;
+ mutex_lock(&mm->context.lock);
+
+ if (!mm->context.ldt) {
+ retval = 0;
+ goto out_unlock;
+ }
+
if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;

- mutex_lock(&mm->context.lock);
- size = mm->context.size * LDT_ENTRY_SIZE;
+ size = mm->context.ldt->size * LDT_ENTRY_SIZE;
if (size > bytecount)
size = bytecount;

- err = 0;
- if (copy_to_user(ptr, mm->context.ldt, size))
- err = -EFAULT;
- mutex_unlock(&mm->context.lock);
- if (err < 0)
- goto error_return;
+ if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
+ retval = -EFAULT;
+ goto out_unlock;
+ }
+
if (size != bytecount) {
- /* zero-fill the rest */
- if (clear_user(ptr + size, bytecount - size) != 0) {
- err = -EFAULT;
- goto error_return;
+ /* Zero-fill the rest and pretend we read bytecount bytes. */
+ if (clear_user(ptr + size, bytecount - size)) {
+ retval = -EFAULT;
+ goto out_unlock;
}
}
- return bytecount;
-error_return:
- return err;
+ retval = bytecount;
+
+out_unlock:
+ mutex_unlock(&mm->context.lock);
+ return retval;
}

static int read_default_ldt(void __user *ptr, unsigned long bytecount)
@@ -196,6 +216,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
struct desc_struct ldt;
int error;
struct user_desc ldt_info;
+ int oldsize, newsize;
+ struct ldt_struct *new_ldt, *old_ldt;

error = -EINVAL;
if (bytecount != sizeof(ldt_info))
@@ -214,34 +236,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
goto out;
}

- mutex_lock(&mm->context.lock);
- if (ldt_info.entry_number >= mm->context.size) {
- error = alloc_ldt(&current->mm->context,
- ldt_info.entry_number + 1, 1);
- if (error < 0)
- goto out_unlock;
- }
-
- /* Allow LDTs to be cleared by the user. */
- if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
- if (oldmode || LDT_empty(&ldt_info)) {
- memset(&ldt, 0, sizeof(ldt));
- goto install;
+ if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
+ LDT_empty(&ldt_info)) {
+ /* The user wants to clear the entry. */
+ memset(&ldt, 0, sizeof(ldt));
+ } else {
+ if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
+ error = -EINVAL;
+ goto out;
}
+
+ fill_ldt(&ldt, &ldt_info);
+ if (oldmode)
+ ldt.avl = 0;
}

- if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
- error = -EINVAL;
+ mutex_lock(&mm->context.lock);
+
+ old_ldt = mm->context.ldt;
+ oldsize = old_ldt ? old_ldt->size : 0;
+ newsize = max((int)(ldt_info.entry_number + 1), oldsize);
+
+ error = -ENOMEM;
+ new_ldt = alloc_ldt_struct(newsize);
+ if (!new_ldt)
goto out_unlock;
- }

- fill_ldt(&ldt, &ldt_info);
- if (oldmode)
- ldt.avl = 0;
+ if (old_ldt)
+ memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
+ new_ldt->entries[ldt_info.entry_number] = ldt;
+ finalize_ldt_struct(new_ldt);

- /* Install the new entry ... */
-install:
- write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
+ install_ldt(mm, new_ldt);
+ free_ldt_struct(old_ldt);
error = 0;

out_unlock:
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 84c938f..af5b675 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -38,10 +38,18 @@
#include <asm/tlbflush.h>
#include <asm/timer.h>

-/* nop stub */
-void _paravirt_nop(void)
-{
-}
+/*
+ * nop stub, which must not clobber anything *including the stack* to
+ * avoid confusing the entry prologues.
+ */
+extern void _paravirt_nop(void);
+asm (".pushsection .entry.text, \"ax\"\n"
+ ".global _paravirt_nop\n"
+ "_paravirt_nop:\n\t"
+ "ret\n\t"
+ ".size _paravirt_nop, . - _paravirt_nop\n\t"
+ ".type _paravirt_nop, @function\n\t"
+ ".popsection");

/* identity function, which can be inlined */
u32 _paravirt_ident_32(u32 x)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index e361095..7e94abd 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -218,11 +218,11 @@ void __show_regs(struct pt_regs *regs, int all)
void release_thread(struct task_struct *dead_task)
{
if (dead_task->mm) {
- if (dead_task->mm->context.size) {
+ if (dead_task->mm->context.ldt) {
printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
dead_task->comm,
dead_task->mm->context.ldt,
- dead_task->mm->context.size);
+ dead_task->mm->context.ldt->size);
BUG();
}
}
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index d4f278e..bfe6a14 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -5,6 +5,7 @@
#include <linux/mm.h>
#include <linux/ptrace.h>
#include <asm/desc.h>
+#include <asm/mmu_context.h>

unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
{
@@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
struct desc_struct *desc;
unsigned long base;

- seg &= ~7UL;
+ seg >>= 3;

mutex_lock(&child->mm->context.lock);
- if (unlikely((seg >> 3) >= child->mm->context.size))
+ if (unlikely(!child->mm->context.ldt ||
+ seg >= child->mm->context.ldt->size))
addr = -1L; /* bogus selector, access would fault */
else {
- desc = child->mm->context.ldt + seg;
+ desc = &child->mm->context.ldt->entries[seg];
base = get_desc_base(desc);

/* 16-bit code segment? */
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 9f3706e..e8177b1 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -18,6 +18,7 @@
#include <asm/hypervisor.h>
#include <asm/nmi.h>
#include <asm/x86_init.h>
+#include <asm/geode.h>

unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
EXPORT_SYMBOL(cpu_khz);
@@ -802,15 +803,17 @@ EXPORT_SYMBOL_GPL(mark_tsc_unstable);

static void __init check_system_tsc_reliable(void)
{
-#ifdef CONFIG_MGEODE_LX
- /* RTSC counts during suspend */
+#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
+ if (is_geode_lx()) {
+ /* RTSC counts during suspend */
#define RTSC_SUSP 0x100
- unsigned long res_low, res_high;
+ unsigned long res_low, res_high;

- rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
- /* Geode_LX - the OLPC CPU has a very reliable TSC */
- if (res_low & RTSC_SUSP)
- tsc_clocksource_reliable = 1;
+ rdmsr_safe(MSR_GEODE_BUSCONT_CONF0, &res_low, &res_high);
+ /* Geode_LX - the OLPC CPU has a very reliable TSC */
+ if (res_low & RTSC_SUSP)
+ tsc_clocksource_reliable = 1;
+ }
#endif
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
tsc_clocksource_reliable = 1;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4a949c7..cac7b2b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -326,12 +326,6 @@ static u64 __get_spte_lockless(u64 *sptep)
{
return ACCESS_ONCE(*sptep);
}
-
-static bool __check_direct_spte_mmio_pf(u64 spte)
-{
- /* It is valid if the spte is zapped. */
- return spte == 0ull;
-}
#else
union split_spte {
struct {
@@ -436,23 +430,6 @@ retry:

return spte.spte;
}
-
-static bool __check_direct_spte_mmio_pf(u64 spte)
-{
- union split_spte sspte = (union split_spte)spte;
- u32 high_mmio_mask = shadow_mmio_mask >> 32;
-
- /* It is valid if the spte is zapped. */
- if (spte == 0ull)
- return true;
-
- /* It is valid if the spte is being zapped. */
- if (sspte.spte_low == 0ull &&
- (sspte.spte_high & high_mmio_mask) == high_mmio_mask)
- return true;
-
- return false;
-}
#endif

static bool spte_has_volatile_bits(u64 spte)
@@ -2895,21 +2872,6 @@ static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
return vcpu_match_mmio_gva(vcpu, addr);
}

-
-/*
- * On direct hosts, the last spte is only allows two states
- * for mmio page fault:
- * - It is the mmio spte
- * - It is zapped or it is being zapped.
- *
- * This function completely checks the spte when the last spte
- * is not the mmio spte.
- */
-static bool check_direct_spte_mmio_pf(u64 spte)
-{
- return __check_direct_spte_mmio_pf(spte);
-}
-
static u64 walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr)
{
struct kvm_shadow_walk_iterator iterator;
@@ -2951,13 +2913,6 @@ int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct)
}

/*
- * It's ok if the gva is remapped by other cpus on shadow guest,
- * it's a BUG if the gfn is not a mmio page.
- */
- if (direct && !check_direct_spte_mmio_pf(spte))
- return -1;
-
- /*
* If the page table is zapped by other cpus, let CPU fault again on
* the address.
*/
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index bb179cc..0e3289b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1885,6 +1885,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
case MSR_IA32_LASTINTFROMIP:
case MSR_IA32_LASTINTTOIP:
case MSR_K8_SYSCFG:
+ case MSR_K8_TSEG_ADDR:
+ case MSR_K8_TSEG_MASK:
case MSR_K7_HWCR:
case MSR_VM_HSAVE_PA:
case MSR_P6_PERFCTR0:
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index 7718541..dab1f8b 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -28,7 +28,6 @@
#include <linux/regset.h>

#include <asm/uaccess.h>
-#include <asm/desc.h>
#include <asm/user.h>
#include <asm/i387.h>

@@ -184,7 +183,7 @@ void math_emulate(struct math_emu_info *info)
math_abort(FPU_info, SIGILL);
}

- code_descriptor = LDT_DESCRIPTOR(FPU_CS);
+ code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
if (SEG_D_SIZE(code_descriptor)) {
/* The above test may be wrong, the book is not clear */
/* Segmented 32 bit protected mode */
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 2c61441..d342fce 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -16,9 +16,24 @@
#include <linux/kernel.h>
#include <linux/mm.h>

-/* s is always from a cpu register, and the cpu does bounds checking
- * during register load --> no further bounds checks needed */
-#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3])
+#include <asm/desc.h>
+#include <asm/mmu_context.h>
+
+static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
+{
+ static struct desc_struct zero_desc;
+ struct desc_struct ret = zero_desc;
+
+#ifdef CONFIG_MODIFY_LDT_SYSCALL
+ seg >>= 3;
+ mutex_lock(&current->mm->context.lock);
+ if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
+ ret = current->mm->context.ldt->entries[seg];
+ mutex_unlock(&current->mm->context.lock);
+#endif
+ return ret;
+}
+
#define SEG_D_SIZE(x) ((x).b & (3 << 21))
#define SEG_G_BIT(x) ((x).b & (1 << 23))
#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
index 6ef5e99..8300db7 100644
--- a/arch/x86/math-emu/get_address.c
+++ b/arch/x86/math-emu/get_address.c
@@ -20,7 +20,6 @@
#include <linux/stddef.h>

#include <asm/uaccess.h>
-#include <asm/desc.h>

#include "fpu_system.h"
#include "exception.h"
@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
addr->selector = PM_REG_(segment);
}

- descriptor = LDT_DESCRIPTOR(PM_REG_(segment));
+ descriptor = FPU_get_ldt_descriptor(addr->selector);
base_address = SEG_BASE_ADDR(descriptor);
address = base_address + offset;
limit = base_address
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 43c9f6a..bc21909 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -21,6 +21,7 @@
#include <asm/xcr.h>
#include <asm/suspend.h>
#include <asm/debugreg.h>
+#include <asm/mmu_context.h>

#ifdef CONFIG_X86_32
static struct saved_context saved_context;
@@ -147,7 +148,7 @@ static void fix_processor_context(void)
syscall_init(); /* This sets MSR_*STAR and related */
#endif
load_TR_desc(); /* This does ltr */
- load_LDT(&current->active_mm->context); /* This does lldt */
+ load_mm_ldt(current->active_mm); /* This does lldt */
}

/**
diff --git a/drivers/auxdisplay/ks0108.c b/drivers/auxdisplay/ks0108.c
index 5b93852..0d75285 100644
--- a/drivers/auxdisplay/ks0108.c
+++ b/drivers/auxdisplay/ks0108.c
@@ -139,6 +139,7 @@ static int __init ks0108_init(void)

ks0108_pardevice = parport_register_device(ks0108_parport, KS0108_NAME,
NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL);
+ parport_put_port(ks0108_parport);
if (ks0108_pardevice == NULL) {
printk(KERN_ERR KS0108_NAME ": ERROR: "
"parport didn't register new device\n");
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 65cd748..cc956a4 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -254,10 +254,10 @@ void * devres_get(struct device *dev, void *new_res,
if (!dr) {
add_dr(dev, &new_dr->node);
dr = new_dr;
- new_dr = NULL;
+ new_res = NULL;
}
spin_unlock_irqrestore(&dev->devres_lock, flags);
- devres_free(new_dr);
+ devres_free(new_res);

return dr->data;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 7a24895..ba8f361 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -311,9 +311,7 @@ int platform_device_add(struct platform_device *pdev)
failed:
while (--i >= 0) {
struct resource *r = &pdev->resource[i];
- unsigned long type = resource_type(r);
-
- if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+ if (r->parent)
release_resource(r);
}

@@ -338,9 +336,7 @@ void platform_device_del(struct platform_device *pdev)

for (i = 0; i < pdev->num_resources; i++) {
struct resource *r = &pdev->resource[i];
- unsigned long type = resource_type(r);
-
- if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+ if (r->parent)
release_resource(r);
}
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 4c20c5b..8e9a8f0 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -914,7 +914,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
crypt->mode |= NPE_OP_NOT_IN_PLACE;
/* This was never tested by Intel
* for more than one dst buffer, I think. */
- BUG_ON(req->dst->length < nbytes);
req_ctx->dst = NULL;
if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
flags, DMA_FROM_DEVICE))
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a0b69ae0..9b9f447 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -950,13 +950,13 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
u32 old_write = obj->base.write_domain;


+ obj->dirty = 1; /* be paranoid */
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;

i915_gem_object_move_to_active(obj, ring, seqno);
if (obj->base.write_domain) {
- obj->dirty = 1;
obj->pending_gpu_write = true;
list_move_tail(&obj->gpu_write_list,
&ring->gpu_write_list);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 21e689d..87a677e 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3399,6 +3399,14 @@ void radeon_combios_asic_init(struct drm_device *dev)
rdev->pdev->subsystem_device == 0x30ae)
return;

+ /* quirk for rs4xx HP Compaq dc5750 Small Form Factor to make it resume
+ * - it hangs on resume inside the dynclk 1 table.
+ */
+ if (rdev->family == CHIP_RS480 &&
+ rdev->pdev->subsystem_vendor == 0x103c &&
+ rdev->pdev->subsystem_device == 0x280a)
+ return;
+
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
if (table)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 683cede1..6303fc8 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -82,6 +82,11 @@ void radeon_connector_hotplug(struct drm_connector *connector)
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
} else if (radeon_dp_needs_link_train(radeon_connector)) {
+ /* Don't try to start link training before we
+ * have the dpcd */
+ if (!radeon_dp_getdpcd(radeon_connector))
+ return;
+
/* set it to OFF so that drm_helper_connector_dpms()
* won't return immediately since the current state
* is ON at this point.
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 5bcb2af..228af18 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -69,7 +69,7 @@
*/

struct ib_uverbs_device {
- struct kref ref;
+ atomic_t refcount;
int num_comp_vectors;
struct completion comp;
struct device *dev;
@@ -78,6 +78,7 @@ struct ib_uverbs_device {
struct cdev cdev;
struct rb_root xrcd_tree;
struct mutex xrcd_tree_mutex;
+ struct kobject kobj;
};

struct ib_uverbs_event_file {
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index a8445b8..3be21aa 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1979,6 +1979,12 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
next->send_flags = user_wr->send_flags;

if (is_ud) {
+ if (next->opcode != IB_WR_SEND &&
+ next->opcode != IB_WR_SEND_WITH_IMM) {
+ ret = -EINVAL;
+ goto out_put;
+ }
+
next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
file->ucontext);
if (!next->wr.ud.ah) {
@@ -2015,9 +2021,11 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
user_wr->wr.atomic.compare_add;
next->wr.atomic.swap = user_wr->wr.atomic.swap;
next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
+ case IB_WR_SEND:
break;
default:
- break;
+ ret = -EINVAL;
+ goto out_put;
}
}

diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 9379b97..f07c6e3 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -117,14 +117,18 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
static void ib_uverbs_add_one(struct ib_device *device);
static void ib_uverbs_remove_one(struct ib_device *device);

-static void ib_uverbs_release_dev(struct kref *ref)
+static void ib_uverbs_release_dev(struct kobject *kobj)
{
struct ib_uverbs_device *dev =
- container_of(ref, struct ib_uverbs_device, ref);
+ container_of(kobj, struct ib_uverbs_device, kobj);

- complete(&dev->comp);
+ kfree(dev);
}

+static struct kobj_type ib_uverbs_dev_ktype = {
+ .release = ib_uverbs_release_dev,
+};
+
static void ib_uverbs_release_event_file(struct kref *ref)
{
struct ib_uverbs_event_file *file =
@@ -273,13 +277,19 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
return context->device->dealloc_ucontext(context);
}

+static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
+{
+ complete(&dev->comp);
+}
+
static void ib_uverbs_release_file(struct kref *ref)
{
struct ib_uverbs_file *file =
container_of(ref, struct ib_uverbs_file, ref);

module_put(file->device->ib_dev->owner);
- kref_put(&file->device->ref, ib_uverbs_release_dev);
+ if (atomic_dec_and_test(&file->device->refcount))
+ ib_uverbs_comp_dev(file->device);

kfree(file);
}
@@ -621,9 +631,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
int ret;

dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
- if (dev)
- kref_get(&dev->ref);
- else
+ if (!atomic_inc_not_zero(&dev->refcount))
return -ENXIO;

if (!try_module_get(dev->ib_dev->owner)) {
@@ -644,6 +652,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
mutex_init(&file->mutex);

filp->private_data = file;
+ kobject_get(&dev->kobj);

return nonseekable_open(inode, filp);

@@ -651,13 +660,16 @@ err_module:
module_put(dev->ib_dev->owner);

err:
- kref_put(&dev->ref, ib_uverbs_release_dev);
+ if (atomic_dec_and_test(&dev->refcount))
+ ib_uverbs_comp_dev(dev);
+
return ret;
}

static int ib_uverbs_close(struct inode *inode, struct file *filp)
{
struct ib_uverbs_file *file = filp->private_data;
+ struct ib_uverbs_device *dev = file->device;

ib_uverbs_cleanup_ucontext(file, file->ucontext);

@@ -665,6 +677,7 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
kref_put(&file->async_file->ref, ib_uverbs_release_event_file);

kref_put(&file->ref, ib_uverbs_release_file);
+ kobject_put(&dev->kobj);

return 0;
}
@@ -760,10 +773,11 @@ static void ib_uverbs_add_one(struct ib_device *device)
if (!uverbs_dev)
return;

- kref_init(&uverbs_dev->ref);
+ atomic_set(&uverbs_dev->refcount, 1);
init_completion(&uverbs_dev->comp);
uverbs_dev->xrcd_tree = RB_ROOT;
mutex_init(&uverbs_dev->xrcd_tree_mutex);
+ kobject_init(&uverbs_dev->kobj, &ib_uverbs_dev_ktype);

spin_lock(&map_lock);
devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
@@ -790,6 +804,7 @@ static void ib_uverbs_add_one(struct ib_device *device)
cdev_init(&uverbs_dev->cdev, NULL);
uverbs_dev->cdev.owner = THIS_MODULE;
uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
+ uverbs_dev->cdev.kobj.parent = &uverbs_dev->kobj;
kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
if (cdev_add(&uverbs_dev->cdev, base, 1))
goto err_cdev;
@@ -820,9 +835,10 @@ err_cdev:
clear_bit(devnum, overflow_map);

err:
- kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
+ if (atomic_dec_and_test(&uverbs_dev->refcount))
+ ib_uverbs_comp_dev(uverbs_dev);
wait_for_completion(&uverbs_dev->comp);
- kfree(uverbs_dev);
+ kobject_put(&uverbs_dev->kobj);
return;
}

@@ -842,9 +858,10 @@ static void ib_uverbs_remove_one(struct ib_device *device)
else
clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);

- kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
+ if (atomic_dec_and_test(&uverbs_dev->refcount))
+ ib_uverbs_comp_dev(uverbs_dev);
wait_for_completion(&uverbs_dev->comp);
- kfree(uverbs_dev);
+ kobject_put(&uverbs_dev->kobj);
}

static char *uverbs_devnode(struct device *dev, mode_t *mode)
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 4b8f9c4..5426ccf 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -169,9 +169,13 @@ int mlx4_ib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr)
enum rdma_link_layer ll;

memset(ah_attr, 0, sizeof *ah_attr);
- ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
ah_attr->port_num = be32_to_cpu(ah->av.ib.port_pd) >> 24;
ll = rdma_port_get_link_layer(ibah->device, ah_attr->port_num);
+ if (ll == IB_LINK_LAYER_ETHERNET)
+ ah_attr->sl = be32_to_cpu(ah->av.eth.sl_tclass_flowlabel) >> 29;
+ else
+ ah_attr->sl = be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 28;
+
ah_attr->dlid = ll == IB_LINK_LAYER_INFINIBAND ? be16_to_cpu(ah->av.ib.dlid) : 0;
if (ah->av.ib.stat_rate)
ah_attr->static_rate = ah->av.ib.stat_rate - MLX4_STAT_RATE_OFFSET;
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 4c2b079..c0b72a6 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1421,6 +1421,10 @@ extern struct mutex qib_mutex;
qib_get_unit_name((dd)->unit), ##__VA_ARGS__); \
} while (0)

+#define qib_dev_warn(dd, fmt, ...) \
+ dev_warn(&(dd)->pcidev->dev, "%s: " fmt, \
+ qib_get_unit_name((dd)->unit), ##__VA_ARGS__)
+
#define qib_dev_porterr(dd, port, fmt, ...) \
do { \
dev_err(&(dd)->pcidev->dev, "%s: IB%u:%u " fmt, \
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 8fd19a4..ca6e6cf 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -69,6 +69,10 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr)
* unrestricted LKEY.
*/
rkt->gen++;
+ /*
+ * bits are capped in qib_verbs.c to insure enough bits
+ * for generation number
+ */
mr->lkey = (r << (32 - ib_qib_lkey_table_size)) |
((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen)
<< 8);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index a894762..c51a6f9 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -40,6 +40,7 @@
#include <linux/rculist.h>
#include <linux/mm.h>
#include <linux/random.h>
+#include <linux/vmalloc.h>

#include "qib.h"
#include "qib_common.h"
@@ -2035,10 +2036,16 @@ int qib_register_ib_device(struct qib_devdata *dd)
* the LKEY). The remaining bits act as a generation number or tag.
*/
spin_lock_init(&dev->lk_table.lock);
+ /* insure generation is at least 4 bits see keys.c */
+ if (ib_qib_lkey_table_size > MAX_LKEY_TABLE_BITS) {
+ qib_dev_warn(dd, "lkey bits %u too large, reduced to %u\n",
+ ib_qib_lkey_table_size, MAX_LKEY_TABLE_BITS);
+ ib_qib_lkey_table_size = MAX_LKEY_TABLE_BITS;
+ }
dev->lk_table.max = 1 << ib_qib_lkey_table_size;
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
dev->lk_table.table = (struct qib_mregion **)
- __get_free_pages(GFP_KERNEL, get_order(lk_tab_size));
+ vmalloc(lk_tab_size);
if (dev->lk_table.table == NULL) {
ret = -ENOMEM;
goto err_lk;
@@ -2208,7 +2215,7 @@ err_tx:
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
err_hdrs:
- free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size));
+ vfree(dev->lk_table.table);
err_lk:
kfree(dev->qp_table);
err_qpt:
@@ -2262,7 +2269,6 @@ void qib_unregister_ib_device(struct qib_devdata *dd)
sizeof(struct qib_pio_header),
dev->pio_hdrs, dev->pio_hdrs_phys);
lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table);
- free_pages((unsigned long) dev->lk_table.table,
- get_order(lk_tab_size));
+ vfree(dev->lk_table.table);
kfree(dev->qp_table);
}
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 0c19ef0..66f7f62 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -622,6 +622,8 @@ struct qib_qpn_table {
struct qpn_map map[QPNMAP_ENTRIES];
};

+#define MAX_LKEY_TABLE_BITS 23
+
struct qib_lkey_table {
spinlock_t lock; /* protect changes in this struct */
u32 next; /* next unused index (speeds search) */
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 4cf2534..f55a3cf 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -126,19 +126,14 @@ static int evdev_flush(struct file *file, fl_owner_t id)
{
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
- int retval;

- retval = mutex_lock_interruptible(&evdev->mutex);
- if (retval)
- return retval;
+ mutex_lock(&evdev->mutex);

- if (!evdev->exist)
- retval = -ENODEV;
- else
- retval = input_flush_device(&evdev->handle, file);
+ if (evdev->exist)
+ input_flush_device(&evdev->handle, file);

mutex_unlock(&evdev->mutex);
- return retval;
+ return 0;
}

static void evdev_free(struct device *dev)
diff --git a/drivers/macintosh/windfarm_core.c b/drivers/macintosh/windfarm_core.c
index ce88979..004fa10 100644
--- a/drivers/macintosh/windfarm_core.c
+++ b/drivers/macintosh/windfarm_core.c
@@ -421,7 +421,7 @@ int wf_unregister_client(struct notifier_block *nb)
{
mutex_lock(&wf_lock);
blocking_notifier_chain_unregister(&wf_client_list, nb);
- wf_client_count++;
+ wf_client_count--;
if (wf_client_count == 0)
wf_stop_thread();
mutex_unlock(&wf_lock);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index ea8a181..d7e9242 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5384,9 +5384,9 @@ static int get_bitmap_file(struct mddev * mddev, void __user * arg)
int err = -ENOMEM;

if (md_allow_write(mddev))
- file = kmalloc(sizeof(*file), GFP_NOIO);
+ file = kzalloc(sizeof(*file), GFP_NOIO);
else
- file = kmalloc(sizeof(*file), GFP_KERNEL);
+ file = kzalloc(sizeof(*file), GFP_KERNEL);

if (!file)
goto out;
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index acba54e..f8458ca 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -134,4 +134,10 @@ int lower_bound(struct btree_node *n, uint64_t key);

extern struct dm_block_validator btree_node_validator;

+/*
+ * Value type for upper levels of multi-level btrees.
+ */
+extern void init_le64_type(struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt);
+
#endif /* DM_BTREE_INTERNAL_H */
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 6e79c11..294eb5b 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
return r;
}

-static struct dm_btree_value_type le64_type = {
- .context = NULL,
- .size = sizeof(__le64),
- .inc = NULL,
- .dec = NULL,
- .equal = NULL
-};
-
int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
uint64_t *keys, dm_block_t *new_root)
{
@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
int index = 0, r = 0;
struct shadow_spine spine;
struct btree_node *n;
+ struct dm_btree_value_type le64_vt;

+ init_le64_type(info->tm, &le64_vt);
init_shadow_spine(&spine, info);
for (level = 0; level < info->levels; level++) {
r = remove_raw(&spine, info,
(level == last_level ?
- &info->value_type : &le64_type),
+ &info->value_type : &le64_vt),
root, keys[level], (unsigned *)&index);
if (r < 0)
break;
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index 2f0805c..f6cb762 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -242,3 +242,40 @@ int shadow_root(struct shadow_spine *s)
{
return s->root;
}
+
+static void le64_inc(void *context, void *value_le)
+{
+ struct dm_transaction_manager *tm = context;
+ __le64 v_le;
+
+ memcpy(&v_le, value_le, sizeof(v_le));
+ dm_tm_inc(tm, le64_to_cpu(v_le));
+}
+
+static void le64_dec(void *context, void *value_le)
+{
+ struct dm_transaction_manager *tm = context;
+ __le64 v_le;
+
+ memcpy(&v_le, value_le, sizeof(v_le));
+ dm_tm_dec(tm, le64_to_cpu(v_le));
+}
+
+static int le64_equal(void *context, void *value1_le, void *value2_le)
+{
+ __le64 v1_le, v2_le;
+
+ memcpy(&v1_le, value1_le, sizeof(v1_le));
+ memcpy(&v2_le, value2_le, sizeof(v2_le));
+ return v1_le == v2_le;
+}
+
+void init_le64_type(struct dm_transaction_manager *tm,
+ struct dm_btree_value_type *vt)
+{
+ vt->context = tm;
+ vt->size = sizeof(__le64);
+ vt->inc = le64_inc;
+ vt->dec = le64_dec;
+ vt->equal = le64_equal;
+}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 18f37e0..62f297a 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -647,12 +647,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
struct btree_node *n;
struct dm_btree_value_type le64_type;

- le64_type.context = NULL;
- le64_type.size = sizeof(__le64);
- le64_type.inc = NULL;
- le64_type.dec = NULL;
- le64_type.equal = NULL;
-
+ init_le64_type(info->tm, &le64_type);
init_shadow_spine(&spine, info);

for (level = 0; level < (info->levels - 1); level++) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 6e7b002..a5f284d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1208,6 +1208,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
struct r1conf *conf = mddev->private;
+ unsigned long flags;

/*
* If it is not operational, then we have already marked it as dead
@@ -1227,14 +1228,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
return;
}
set_bit(Blocked, &rdev->flags);
+ spin_lock_irqsave(&conf->device_lock, flags);
if (test_and_clear_bit(In_sync, &rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded++;
set_bit(Faulty, &rdev->flags);
- spin_unlock_irqrestore(&conf->device_lock, flags);
} else
set_bit(Faulty, &rdev->flags);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
/*
* if recovery is running, make sure it aborts.
*/
@@ -1292,7 +1292,10 @@ static int raid1_spare_active(struct mddev *mddev)
* Find all failed disks within the RAID1 configuration
* and mark them readable.
* Called under mddev lock, so rcu protection not needed.
+ * device_lock used to avoid races with raid1_end_read_request
+ * which expects 'In_sync' flags and ->degraded to be consistent.
*/
+ spin_lock_irqsave(&conf->device_lock, flags);
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = conf->mirrors[i].rdev;
if (rdev
@@ -1302,7 +1305,6 @@ static int raid1_spare_active(struct mddev *mddev)
sysfs_notify_dirent_safe(rdev->sysfs_state);
}
}
- spin_lock_irqsave(&conf->device_lock, flags);
mddev->degraded -= count;
spin_unlock_irqrestore(&conf->device_lock, flags);

diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index a47ba33..2be51c8 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -945,9 +945,6 @@ static int rc_dev_uevent(struct device *device, struct kobj_uevent_env *env)
{
struct rc_dev *dev = to_rc_dev(device);

- if (!dev || !dev->input_dev)
- return -ENODEV;
-
if (dev->rc_map.name)
ADD_HOTPLUG_VAR("NAME=%s", dev->rc_map.name);
if (dev->driver_name)
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 5af2a8f..9145834c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -861,6 +861,23 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
}
}

+static struct slave *bond_get_old_active(struct bonding *bond,
+ struct slave *new_active)
+{
+ struct slave *slave;
+ int i;
+
+ bond_for_each_slave(bond, slave, i) {
+ if (slave == new_active)
+ continue;
+
+ if (!compare_ether_addr(bond->dev->dev_addr, slave->dev->dev_addr))
+ return slave;
+ }
+
+ return NULL;
+}
+
/*
* bond_do_fail_over_mac
*
@@ -898,6 +915,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);

+ if (!old_active)
+ old_active = bond_get_old_active(bond, new_active);
+
if (old_active) {
memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
memcpy(saddr.sa_data, old_active->dev->dev_addr,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f13a673..715e5c4 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -981,9 +981,9 @@ static int virtnet_probe(struct virtio_device *vdev)
/* Do we support "hardware" checksums? */
if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
/* This opens up the world of extra features. */
- dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+ dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
if (csum)
- dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;

if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index ec347d2..e813eff 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -313,6 +313,8 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+ {RTL_USB_DEVICE(0x0846, 0x9043, rtl92cu_hal_cfg)}, /*NG WNA1000Mv2*/
+ {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 53a613f..39016a0 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -604,10 +604,10 @@ struct device_node *of_find_matching_node_by_address(struct device_node *from,
struct resource res;

while (dn) {
- if (of_address_to_resource(dn, 0, &res))
- continue;
- if (res.start == base_address)
+ if (!of_address_to_resource(dn, 0, &res) &&
+ res.start == base_address)
return dn;
+
dn = of_find_matching_node(dn, matches);
}

diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index fdaa42a..fac01f8 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -355,6 +355,56 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
.release = pci_vpd_pci22_release,
};

+static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
+ void *arg)
+{
+ struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+ ssize_t ret;
+
+ if (!tdev)
+ return -ENODEV;
+
+ ret = pci_read_vpd(tdev, pos, count, arg);
+ pci_dev_put(tdev);
+ return ret;
+}
+
+static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
+ const void *arg)
+{
+ struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+ ssize_t ret;
+
+ if (!tdev)
+ return -ENODEV;
+
+ ret = pci_write_vpd(tdev, pos, count, arg);
+ pci_dev_put(tdev);
+ return ret;
+}
+
+static const struct pci_vpd_ops pci_vpd_f0_ops = {
+ .read = pci_vpd_f0_read,
+ .write = pci_vpd_f0_write,
+ .release = pci_vpd_pci22_release,
+};
+
+static int pci_vpd_f0_dev_check(struct pci_dev *dev)
+{
+ struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+ int ret = 0;
+
+ if (!tdev)
+ return -ENODEV;
+ if (!tdev->vpd || !tdev->multifunction ||
+ dev->class != tdev->class || dev->vendor != tdev->vendor ||
+ dev->device != tdev->device)
+ ret = -ENODEV;
+
+ pci_dev_put(tdev);
+ return ret;
+}
+
int pci_vpd_pci22_init(struct pci_dev *dev)
{
struct pci_vpd_pci22 *vpd;
@@ -363,12 +413,21 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
if (!cap)
return -ENODEV;
+ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
+ int ret = pci_vpd_f0_dev_check(dev);
+
+ if (ret)
+ return ret;
+ }
vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
if (!vpd)
return -ENOMEM;

vpd->base.len = PCI_VPD_PCI22_SIZE;
- vpd->base.ops = &pci_vpd_pci22_ops;
+ if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
+ vpd->base.ops = &pci_vpd_f0_ops;
+ else
+ vpd->base.ops = &pci_vpd_pci22_ops;
mutex_init(&vpd->lock);
vpd->cap = cap;
vpd->busy = false;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 481b184..93be760 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1941,6 +1941,15 @@ static void __devinit quirk_netmos(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos);

+static void quirk_f0_vpd_link(struct pci_dev *dev)
+{
+ if ((dev->class >> 8) != PCI_CLASS_NETWORK_ETHERNET ||
+ !dev->multifunction || !PCI_FUNC(dev->devfn))
+ return;
+ dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_f0_vpd_link);
+
static void __devinit quirk_e100_interrupt(struct pci_dev *dev)
{
u16 command, pmcsr;
@@ -2875,8 +2884,9 @@ static void __devinit fixup_ti816x_class(struct pci_dev* dev)
{
/* TI 816x devices do not have class code set when in PCIe boot mode */
if (dev->class == PCI_CLASS_NOT_DEFINED) {
- dev_info(&dev->dev, "Setting PCI class for 816x PCIe device\n");
- dev->class = PCI_CLASS_MULTIMEDIA_VIDEO;
+ dev->class = PCI_CLASS_MULTIMEDIA_VIDEO << 8;
+ dev_info(&dev->dev, "PCI class overridden (%#08x -> %#08x)\n",
+ PCI_CLASS_NOT_DEFINED, dev->class);
}
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_TI, 0xb800, fixup_ti816x_class);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 221875e..735b324 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1029,11 +1029,26 @@ restart:
fc_fcp_pkt_hold(fsp);
spin_unlock_irqrestore(&si->scsi_queue_lock, flags);

- if (!fc_fcp_lock_pkt(fsp)) {
+ spin_lock_bh(&fsp->scsi_pkt_lock);
+ if (!(fsp->state & FC_SRB_COMPL)) {
+ fsp->state |= FC_SRB_COMPL;
+ /*
+ * TODO: dropping scsi_pkt_lock and then reacquiring
+ * again around fc_fcp_cleanup_cmd() is required,
+ * since fc_fcp_cleanup_cmd() calls into
+ * fc_seq_set_resp() and that func preempts cpu using
+ * schedule. May be schedule and related code should be
+ * removed instead of unlocking here to avoid scheduling
+ * while atomic bug.
+ */
+ spin_unlock_bh(&fsp->scsi_pkt_lock);
+
fc_fcp_cleanup_cmd(fsp, error);
+
+ spin_lock_bh(&fsp->scsi_pkt_lock);
fc_io_compl(fsp);
- fc_fcp_unlock_pkt(fsp);
}
+ spin_unlock_bh(&fsp->scsi_pkt_lock);

fc_fcp_pkt_release(fsp);
spin_lock_irqsave(&si->scsi_queue_lock, flags);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 2794a30..8771c03 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2906,10 +2906,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- unsigned long flags;

del_timer_sync(&conn->transport_timer);

+ mutex_lock(&session->eh_mutex);
spin_lock_bh(&session->lock);
conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
if (session->leadconn == conn) {
@@ -2921,28 +2921,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
}
spin_unlock_bh(&session->lock);

- /*
- * Block until all in-progress commands for this connection
- * time out or fail.
- */
- for (;;) {
- spin_lock_irqsave(session->host->host_lock, flags);
- if (!session->host->host_busy) { /* OK for ERL == 0 */
- spin_unlock_irqrestore(session->host->host_lock, flags);
- break;
- }
- spin_unlock_irqrestore(session->host->host_lock, flags);
- msleep_interruptible(500);
- iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
- "host_busy %d host_failed %d\n",
- session->host->host_busy,
- session->host->host_failed);
- /*
- * force eh_abort() to unblock
- */
- wake_up(&conn->ehwait);
- }
-
/* flush queued up work because we free the connection below */
iscsi_suspend_tx(conn);

@@ -2955,6 +2933,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
if (session->leadconn == conn)
session->leadconn = NULL;
spin_unlock_bh(&session->lock);
+ mutex_unlock(&session->eh_mutex);

iscsi_destroy_conn(cls_conn);
}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index dc25bee..2ecc2d6 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -799,6 +799,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
if (!(sccr1_reg & SSCR1_TIE))
mask &= ~SSSR_TFS;

+ /* Ignore RX timeout interrupt if it is disabled */
+ if (!(sccr1_reg & SSCR1_TINTE))
+ mask &= ~SSSR_TINT;
+
if (!(status & mask))
return IRQ_NONE;

diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 59fb984..7c33cbb 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -979,7 +979,7 @@ static int iscsit_handle_scsi_cmd(
if (cmd->targ_xfer_tag == 0xFFFFFFFF)
cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
spin_unlock_bh(&conn->sess->ttt_lock);
- } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
+ } else
cmd->targ_xfer_tag = 0xFFFFFFFF;
cmd->cmd_sn = hdr->cmdsn;
cmd->exp_stat_sn = hdr->exp_statsn;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 6993961..c3cf95e 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -668,11 +668,8 @@ int target_report_luns(struct se_task *se_task)
* coming via a target_core_mod PASSTHROUGH op, and not through
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
- if (!se_sess) {
- int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
- lun_count = 1;
+ if (!se_sess)
goto done;
- }

spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
@@ -699,6 +696,14 @@ int target_report_luns(struct se_task *se_task)
* See SPC3 r07, page 159.
*/
done:
+ /*
+ * If no LUNs are accessible, report virtual LUN 0.
+ */
+ if (lun_count == 0) {
+ int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
+ lun_count = 1;
+ }
+
lun_count *= 8;
buf[0] = ((lun_count >> 24) & 0xff);
buf[1] = ((lun_count >> 16) & 0xff);
diff --git a/drivers/tty/serial/8250_pnp.c b/drivers/tty/serial/8250_pnp.c
index a2f2365..add2096 100644
--- a/drivers/tty/serial/8250_pnp.c
+++ b/drivers/tty/serial/8250_pnp.c
@@ -42,6 +42,12 @@ static const struct pnp_device_id pnp_dev_table[] = {
{ "AEI1240", 0 },
/* Rockwell 56K ACF II Fax+Data+Voice Modem */
{ "AKY1021", 0 /*SPCI_FL_NO_SHIRQ*/ },
+ /*
+ * ALi Fast Infrared Controller
+ * Native driver (ali-ircc) is broken so at least
+ * it can be used with irtty-sir.
+ */
+ { "ALI5123", 0 },
/* AZT3005 PnP SOUND DEVICE */
{ "AZT4001", 0 },
/* Best Data Products Inc. Smart One 336F PnP Modem */
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 0276db3..478d71b 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -114,7 +114,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
ep->ss_ep_comp.bmAttributes = 16;
} else if (usb_endpoint_xfer_isoc(&ep->desc) &&
- desc->bmAttributes > 2) {
+ USB_SS_MULT(desc->bmAttributes) > 3) {
dev_warn(ddev, "Isoc endpoint has Mult of %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to 3\n", desc->bmAttributes + 1,
@@ -123,7 +123,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
}

if (usb_endpoint_xfer_isoc(&ep->desc))
- max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
+ max_tx = (desc->bMaxBurst + 1) *
+ (USB_SS_MULT(desc->bmAttributes)) *
usb_endpoint_maxp(&ep->desc);
else if (usb_endpoint_xfer_int(&ep->desc))
max_tx = usb_endpoint_maxp(&ep->desc) *
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 9aa1cbb..9716d61 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1052,7 +1052,7 @@ static void set_feature(struct m66592 *m66592, struct usb_ctrlrequest *ctrl)
tmp = m66592_read(m66592, M66592_INTSTS0) &
M66592_CTSQ;
udelay(1);
- } while (tmp != M66592_CS_IDST || timeout-- > 0);
+ } while (tmp != M66592_CS_IDST && timeout-- > 0);

if (tmp == M66592_CS_IDST)
m66592_bset(m66592,
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index da487fd..cf995d4 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -272,6 +272,10 @@ static void ehci_fsl_usb_setup(struct ehci_hcd *ehci)
out_be32(non_ehci + FSL_SOC_USB_SNOOP2, 0x80000000 | SNOOP_SIZE_2GB);
#endif

+ /* Deal with USB erratum A-005275 */
+ if (pdata->has_fsl_erratum_a005275 == 1)
+ ehci->has_fsl_hs_errata = 1;
+
if ((pdata->operating_mode == FSL_USB2_DR_HOST) ||
(pdata->operating_mode == FSL_USB2_DR_OTG))
ehci_fsl_setup_phy(ehci, pdata->phy_mode, 0);
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 4527b90..313a47d 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -1049,6 +1049,13 @@ static int ehci_hub_control (
*/
ehci->reset_done [wIndex] = jiffies
+ msecs_to_jiffies (50);
+
+ /*
+ * Force full-speed connect for FSL high-speed
+ * erratum; disable HS Chirp by setting PFSC bit
+ */
+ if (ehci_has_fsl_hs_errata(ehci))
+ temp |= (1 << PORTSC_FSL_PFSC);
}
ehci_writel(ehci, temp, status_reg);
break;
diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
index 14ced00..ddaaead 100644
--- a/drivers/usb/host/ehci-sysfs.c
+++ b/drivers/usb/host/ehci-sysfs.c
@@ -29,7 +29,7 @@ static ssize_t show_companion(struct device *dev,
int count = PAGE_SIZE;
char *ptr = buf;

- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ ehci = hcd_to_ehci(dev_get_drvdata(dev));
nports = HCS_N_PORTS(ehci->hcs_params);

for (index = 0; index < nports; ++index) {
@@ -54,7 +54,7 @@ static ssize_t store_companion(struct device *dev,
struct ehci_hcd *ehci;
int portnum, new_owner;

- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ ehci = hcd_to_ehci(dev_get_drvdata(dev));
new_owner = PORT_OWNER; /* Owned by companion */
if (sscanf(buf, "%d", &portnum) != 1)
return -EINVAL;
@@ -85,7 +85,7 @@ static ssize_t show_uframe_periodic_max(struct device *dev,
struct ehci_hcd *ehci;
int n;

- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ ehci = hcd_to_ehci(dev_get_drvdata(dev));
n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max);
return n;
}
@@ -102,7 +102,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
unsigned long flags;
ssize_t ret;

- ehci = hcd_to_ehci(bus_to_hcd(dev_get_drvdata(dev)));
+ ehci = hcd_to_ehci(dev_get_drvdata(dev));
if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
return -EINVAL;

@@ -167,6 +167,9 @@ static inline int create_sysfs_files(struct ehci_hcd *ehci)
struct device *controller = ehci_to_hcd(ehci)->self.controller;
int i = 0;

+ if (dev_get_drvdata(controller) != ehci_to_hcd(ehci))
+ return 0;
+
/* with integrated TT there is no companion! */
if (!ehci_is_TDI(ehci))
i = device_create_file(controller, &dev_attr_companion);
@@ -182,6 +185,9 @@ static inline void remove_sysfs_files(struct ehci_hcd *ehci)
{
struct device *controller = ehci_to_hcd(ehci)->self.controller;

+ if (dev_get_drvdata(controller) != ehci_to_hcd(ehci))
+ return;
+
/* with integrated TT there is no companion! */
if (!ehci_is_TDI(ehci))
device_remove_file(controller, &dev_attr_companion);
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index b65912d..bd8adbb 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -136,6 +136,7 @@ struct ehci_hcd { /* one per controller */
/* SILICON QUIRKS */
unsigned no_selective_suspend:1;
unsigned has_fsl_port_bug:1; /* FreeScale */
+ unsigned has_fsl_hs_errata:1; /* Freescale HS quirk */
unsigned big_endian_mmio:1;
unsigned big_endian_desc:1;
unsigned big_endian_capbase:1;
@@ -612,6 +613,17 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
#define ehci_has_fsl_portno_bug(e) (0)
#endif

+#define PORTSC_FSL_PFSC 24 /* Port Force Full-Speed Connect */
+
+#if defined(CONFIG_PPC_85xx)
+/* Some Freescale processors have an erratum (USB A-005275) in which
+ * incoming packets get corrupted in HS mode
+ */
+#define ehci_has_fsl_hs_errata(e) ((e)->has_fsl_hs_errata)
+#else
+#define ehci_has_fsl_hs_errata(e) (0)
+#endif
+
/*
* While most USB host controllers implement their registers in
* little-endian format, a minority (celleb companion chip) implement
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 5a42cf0..ac19ee5 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -166,6 +166,10 @@ static int __devinit fsl_usb2_mph_dr_of_probe(struct platform_device *ofdev)

prop = of_get_property(np, "phy_type", NULL);
pdata->phy_mode = determine_usb_phy(prop);
+ if (of_get_property(np, "fsl,usb-erratum-a005275", NULL))
+ pdata->has_fsl_erratum_a005275 = 1;
+ else
+ pdata->has_fsl_erratum_a005275 = 0;

for (i = 0; i < ARRAY_SIZE(dev_data->drivers); i++) {
if (!dev_data->drivers[i])
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index d5d2af5..ab023b1 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1403,10 +1403,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
* use Event Data TRBs, and we don't chain in a link TRB on short
* transfers, we're basically dividing by 1.
*
- * xHCI 1.0 specification indicates that the Average TRB Length should
- * be set to 8 for control endpoints.
+ * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
+ * should be set to 8 for control endpoints.
*/
- if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
+ if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
else
ep_ctx->tx_info |=
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a47e29a..dc8e5a8 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -85,7 +85,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
return 0;
/* offset in TRBs */
segment_offset = trb - seg->trbs;
- if (segment_offset > TRBS_PER_SEGMENT)
+ if (segment_offset >= TRBS_PER_SEGMENT)
return 0;
return seg->dma + (segment_offset * sizeof(*trb));
}
@@ -350,6 +350,15 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
ret = handshake(xhci, &xhci->op_regs->cmd_ring,
CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
if (ret < 0) {
+ /* we are about to kill xhci, give it one more chance */
+ xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
+ &xhci->op_regs->cmd_ring);
+ udelay(1000);
+ ret = handshake(xhci, &xhci->op_regs->cmd_ring,
+ CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
+ if (ret == 0)
+ return 0;
+
xhci_err(xhci, "Stopped the command ring failed, "
"maybe the host is dead\n");
xhci->xhc_state |= XHCI_STATE_DYING;
@@ -3423,8 +3432,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (start_cycle == 0)
field |= 0x1;

- /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
- if (xhci->hci_version == 0x100) {
+ /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
+ if (xhci->hci_version >= 0x100) {
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_TX_TYPE(TRB_DATA_IN);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 950a8cc..2f51dec 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -141,7 +141,8 @@ static int xhci_start(struct xhci_hcd *xhci)
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
if (!ret)
- xhci->xhc_state &= ~XHCI_STATE_HALTED;
+ xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+
return ret;
}

diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8257d3b..4ffaa9d 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -629,6 +629,10 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2WI_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX3_PID) },
/*
* ELV devices:
*/
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index e4a57bb..7d11642 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -568,6 +568,14 @@
*/
#define FTDI_SYNAPSE_SS200_PID 0x9090 /* SS200 - SNAP Stick 200 */

+/*
+ * CustomWare / ShipModul NMEA multiplexers product ids (FTDI_VID)
+ */
+#define FTDI_CUSTOMWARE_MINIPLEX_PID 0xfd48 /* MiniPlex first generation NMEA Multiplexer */
+#define FTDI_CUSTOMWARE_MINIPLEX2_PID 0xfd49 /* MiniPlex-USB and MiniPlex-2 series */
+#define FTDI_CUSTOMWARE_MINIPLEX2WI_PID 0xfd4a /* MiniPlex-2Wi */
+#define FTDI_CUSTOMWARE_MINIPLEX3_PID 0xfd4b /* MiniPlex-3 series */
+

/********************************/
/** third-party VID/PID combos **/
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 64ea95d..7e6d2ec 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -276,6 +276,10 @@ static void option_instat_callback(struct urb *urb);
#define ZTE_PRODUCT_MF622 0x0001
#define ZTE_PRODUCT_MF628 0x0015
#define ZTE_PRODUCT_MF626 0x0031
+#define ZTE_PRODUCT_ZM8620_X 0x0396
+#define ZTE_PRODUCT_ME3620_MBIM 0x0426
+#define ZTE_PRODUCT_ME3620_X 0x1432
+#define ZTE_PRODUCT_ME3620_L 0x1433
#define ZTE_PRODUCT_CDMA_TECH 0xfffe
#define ZTE_PRODUCT_AC8710 0xfff1
#define ZTE_PRODUCT_AC2726 0xfff5
@@ -547,6 +551,18 @@ static const struct option_blacklist_info zte_mc2716_z_blacklist = {
.sendsetup = BIT(1) | BIT(2) | BIT(3),
};

+static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
+ .reserved = BIT(2) | BIT(3) | BIT(4),
+};
+
+static const struct option_blacklist_info zte_me3620_xl_blacklist = {
+ .reserved = BIT(3) | BIT(4) | BIT(5),
+};
+
+static const struct option_blacklist_info zte_zm8620_x_blacklist = {
+ .reserved = BIT(3) | BIT(4) | BIT(5),
+};
+
static const struct option_blacklist_info huawei_cdc12_blacklist = {
.reserved = BIT(1) | BIT(2),
};
@@ -1578,6 +1594,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
+ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
+ .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
+ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
+ .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index db9e54a..f34c0dd 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -303,6 +303,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
},
+ { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
/* AT&T Direct IP LTE modems */
{ USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
.driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 5481809..75670b5 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -143,6 +143,8 @@ static int whiteheat_firmware_download(struct usb_serial *serial,
static int whiteheat_firmware_attach(struct usb_serial *serial);

/* function prototypes for the Connect Tech WhiteHEAT serial converter */
+static int whiteheat_probe(struct usb_serial *serial,
+ const struct usb_device_id *id);
static int whiteheat_attach(struct usb_serial *serial);
static void whiteheat_release(struct usb_serial *serial);
static int whiteheat_open(struct tty_struct *tty,
@@ -188,6 +190,7 @@ static struct usb_serial_driver whiteheat_device = {
.usb_driver = &whiteheat_driver,
.id_table = id_table_std,
.num_ports = 4,
+ .probe = whiteheat_probe,
.attach = whiteheat_attach,
.release = whiteheat_release,
.open = whiteheat_open,
@@ -387,6 +390,34 @@ static int whiteheat_firmware_attach(struct usb_serial *serial)
/*****************************************************************************
* Connect Tech's White Heat serial driver functions
*****************************************************************************/
+
+static int whiteheat_probe(struct usb_serial *serial,
+ const struct usb_device_id *id)
+{
+ struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ size_t num_bulk_in = 0;
+ size_t num_bulk_out = 0;
+ size_t min_num_bulk;
+ unsigned int i;
+
+ iface_desc = serial->interface->cur_altsetting;
+
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
+ endpoint = &iface_desc->endpoint[i].desc;
+ if (usb_endpoint_is_bulk_in(endpoint))
+ ++num_bulk_in;
+ if (usb_endpoint_is_bulk_out(endpoint))
+ ++num_bulk_out;
+ }
+
+ min_num_bulk = COMMAND_PORT + 1;
+ if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk)
+ return -ENODEV;
+
+ return 0;
+}
+
static int whiteheat_attach(struct usb_serial *serial)
{
struct usb_serial_port *command_port;
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 080b186..01d1779 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -493,11 +493,15 @@ static int gntdev_release(struct inode *inode, struct file *flip)

pr_debug("priv %p\n", priv);

+ spin_lock(&priv->lock);
while (!list_empty(&priv->maps)) {
map = list_entry(priv->maps.next, struct grant_map, next);
list_del(&map->next);
+ spin_unlock(&priv->lock);
gntdev_put_map(map);
+ spin_lock(&priv->lock);
}
+ spin_unlock(&priv->lock);

if (use_ptemod)
mmu_notifier_unregister(&priv->mn, priv->mm);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 05937a8..9a837a8 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2444,7 +2444,8 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
bio_end_io_t end_io_func,
int mirror_num,
unsigned long prev_bio_flags,
- unsigned long bio_flags)
+ unsigned long bio_flags,
+ bool force_bio_submit)
{
int ret = 0;
struct bio *bio;
@@ -2463,6 +2464,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
sector;

if (prev_bio_flags != bio_flags || !contig ||
+ force_bio_submit ||
(tree->ops && tree->ops->merge_bio_hook &&
tree->ops->merge_bio_hook(page, offset, page_size, bio,
bio_flags)) ||
@@ -2519,7 +2521,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
struct page *page,
get_extent_t *get_extent,
struct bio **bio, int mirror_num,
- unsigned long *bio_flags)
+ unsigned long *bio_flags,
+ u64 *prev_em_start)
{
struct inode *inode = page->mapping->host;
u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
@@ -2575,6 +2578,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
}
}
while (cur <= end) {
+ bool force_bio_submit = false;
+
if (cur >= last_byte) {
char *userpage;
struct extent_state *cached = NULL;
@@ -2621,6 +2626,49 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
block_start = em->block_start;
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
block_start = EXTENT_MAP_HOLE;
+
+ /*
+ * If we have a file range that points to a compressed extent
+ * and it's followed by a consecutive file range that points to
+ * to the same compressed extent (possibly with a different
+ * offset and/or length, so it either points to the whole extent
+ * or only part of it), we must make sure we do not submit a
+ * single bio to populate the pages for the 2 ranges because
+ * this makes the compressed extent read zero out the pages
+ * belonging to the 2nd range. Imagine the following scenario:
+ *
+ * File layout
+ * [0 - 8K] [8K - 24K]
+ * | |
+ * | |
+ * points to extent X, points to extent X,
+ * offset 4K, length of 8K offset 0, length 16K
+ *
+ * [extent X, compressed length = 4K uncompressed length = 16K]
+ *
+ * If the bio to read the compressed extent covers both ranges,
+ * it will decompress extent X into the pages belonging to the
+ * first range and then it will stop, zeroing out the remaining
+ * pages that belong to the other range that points to extent X.
+ * So here we make sure we submit 2 bios, one for the first
+ * range and another one for the third range. Both will target
+ * the same physical extent from disk, but we can't currently
+ * make the compressed bio endio callback populate the pages
+ * for both ranges because each compressed bio is tightly
+ * coupled with a single extent map, and each range can have
+ * an extent map with a different offset value relative to the
+ * uncompressed data of our extent and different lengths. This
+ * is a corner case so we prioritize correctness over
+ * non-optimal behavior (submitting 2 bios for the same extent).
+ */
+ if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) &&
+ prev_em_start && *prev_em_start != (u64)-1 &&
+ *prev_em_start != em->orig_start)
+ force_bio_submit = true;
+
+ if (prev_em_start)
+ *prev_em_start = em->orig_start;
+
free_extent_map(em);
em = NULL;

@@ -2675,7 +2723,8 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
bdev, bio, pnr,
end_bio_extent_readpage, mirror_num,
*bio_flags,
- this_bio_flag);
+ this_bio_flag,
+ force_bio_submit);
nr++;
*bio_flags = this_bio_flag;
}
@@ -2701,7 +2750,7 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
int ret;

ret = __extent_read_full_page(tree, page, get_extent, &bio, mirror_num,
- &bio_flags);
+ &bio_flags, NULL);
if (bio)
ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
return ret;
@@ -2960,7 +3009,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
sector, iosize, pg_offset,
bdev, &epd->bio, max_nr,
end_bio_extent_writepage,
- 0, 0, 0);
+ 0, 0, 0, false);
if (ret)
SetPageError(page);
}
@@ -3219,6 +3268,7 @@ int extent_readpages(struct extent_io_tree *tree,
struct bio *bio = NULL;
unsigned page_idx;
unsigned long bio_flags = 0;
+ u64 prev_em_start = (u64)-1;

for (page_idx = 0; page_idx < nr_pages; page_idx++) {
struct page *page = list_entry(pages->prev, struct page, lru);
@@ -3228,7 +3278,8 @@ int extent_readpages(struct extent_io_tree *tree,
if (!add_to_page_cache_lru(page, mapping,
page->index, GFP_NOFS)) {
__extent_read_full_page(tree, page, get_extent,
- &bio, 0, &bio_flags);
+ &bio, 0, &bio_flags,
+ &prev_em_start);
}
page_cache_release(page);
}
@@ -3998,6 +4049,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
unsigned long num_pages;
struct bio *bio = NULL;
unsigned long bio_flags = 0;
+ u64 prev_em_start = (u64)-1;

if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
return 0;
@@ -4053,7 +4105,8 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
ClearPageError(page);
err = __extent_read_full_page(tree, page,
get_extent, &bio,
- mirror_num, &bio_flags);
+ mirror_num, &bio_flags,
+ &prev_em_start);
if (err)
ret = err;
} else {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 622d322..cb10cb9 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3518,7 +3518,8 @@ void btrfs_evict_inode(struct inode *inode)
goto no_delete;
}
/* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
- btrfs_wait_ordered_range(inode, 0, (u64)-1);
+ if (!special_file(inode->i_mode))
+ btrfs_wait_ordered_range(inode, 0, (u64)-1);

if (root->fs_info->log_root_recovering) {
BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan));
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 3c981db..8bfafe5 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -361,8 +361,10 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
if (opt->flags & CEPH_OPT_NOCRC)
seq_puts(m, ",nocrc");

- if (opt->name)
- seq_printf(m, ",name=%s", opt->name);
+ if (opt->name) {
+ seq_puts(m, ",name=");
+ seq_escape(m, opt->name, ", \t\n\\");
+ }
if (opt->key)
seq_puts(m, ",secret=<hidden>");

@@ -405,7 +407,7 @@ static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
- seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
+ seq_show_option(m, "snapdirname", fsopt->snapdir_name);
return 0;
}

diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index b4675bd..af95386 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -388,6 +388,48 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
return 0;
}

+/* Server has provided av pairs/target info in the type 2 challenge
+ * packet and we have plucked it and stored within smb session.
+ * We parse that blob here to find the server given timestamp
+ * as part of ntlmv2 authentication (or local current time as
+ * default in case of failure)
+ */
+static __le64
+find_timestamp(struct cifs_ses *ses)
+{
+ unsigned int attrsize;
+ unsigned int type;
+ unsigned int onesize = sizeof(struct ntlmssp2_name);
+ unsigned char *blobptr;
+ unsigned char *blobend;
+ struct ntlmssp2_name *attrptr;
+
+ if (!ses->auth_key.len || !ses->auth_key.response)
+ return 0;
+
+ blobptr = ses->auth_key.response;
+ blobend = blobptr + ses->auth_key.len;
+
+ while (blobptr + onesize < blobend) {
+ attrptr = (struct ntlmssp2_name *) blobptr;
+ type = le16_to_cpu(attrptr->type);
+ if (type == NTLMSSP_AV_EOL)
+ break;
+ blobptr += 2; /* advance attr type */
+ attrsize = le16_to_cpu(attrptr->length);
+ blobptr += 2; /* advance attr size */
+ if (blobptr + attrsize > blobend)
+ break;
+ if (type == NTLMSSP_AV_TIMESTAMP) {
+ if (attrsize == sizeof(u64))
+ return *((__le64 *)blobptr);
+ }
+ blobptr += attrsize; /* advance attr value */
+ }
+
+ return cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+}
+
static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
const struct nls_table *nls_cp)
{
@@ -544,6 +586,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
struct ntlmv2_resp *buf;
char ntlmv2_hash[16];
unsigned char *tiblob = NULL; /* target info blob */
+ __le64 rsp_timestamp;

if (ses->server->secType == RawNTLMSSP) {
if (!ses->domainName) {
@@ -561,6 +604,12 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
}
}

+ /* Must be within 5 minutes of the server (or in range +/-2h
+ * in case of Mac OS X), so simply carry over server timestamp
+ * (as Windows 7 does)
+ */
+ rsp_timestamp = find_timestamp(ses);
+
baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
tilen = ses->auth_key.len;
tiblob = ses->auth_key.response;
@@ -578,7 +627,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
(ses->auth_key.response + CIFS_SESS_KEY_SIZE);
buf->blob_signature = cpu_to_le32(0x00000101);
buf->reserved = 0;
- buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+ buf->time = rsp_timestamp;
+
get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
buf->reserved2 = 0;

diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 25bb97f..d955b8e 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -381,10 +381,10 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
seq_printf(s, ",multiuser");
else if (tcon->ses->user_name)
- seq_printf(s, ",username=%s", tcon->ses->user_name);
+ seq_show_option(s, "username", tcon->ses->user_name);

if (tcon->ses->domainName)
- seq_printf(s, ",domain=%s", tcon->ses->domainName);
+ seq_show_option(s, "domain", tcon->ses->domainName);

if (srcaddr->sa_family != AF_UNSPEC) {
struct sockaddr_in *saddr4;
diff --git a/fs/dcache.c b/fs/dcache.c
index 8a35300..46265f5 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2452,6 +2452,8 @@ static int prepend_path(const struct path *path,
{
struct dentry *dentry = path->dentry;
struct vfsmount *vfsmnt = path->mnt;
+ char *orig_buffer = *buffer;
+ int orig_len = *buflen;
bool slash = false;
int error = 0;

@@ -2459,6 +2461,14 @@ static int prepend_path(const struct path *path,
struct dentry * parent;

if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
+ /* Escaped? */
+ if (dentry != vfsmnt->mnt_root) {
+ *buffer = orig_buffer;
+ *buflen = orig_len;
+ slash = false;
+ error = 3;
+ goto global_root;
+ }
/* Global root? */
if (vfsmnt->mnt_parent == vfsmnt) {
goto global_root;
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 534c1d4..eba8f1d 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -55,26 +55,26 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd)

lower_dentry = ecryptfs_dentry_to_lower(dentry);
lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
- if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
- goto out;
- if (nd) {
- dentry_save = nd->path.dentry;
- vfsmount_save = nd->path.mnt;
- nd->path.dentry = lower_dentry;
- nd->path.mnt = lower_mnt;
- }
- rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd);
- if (nd) {
- nd->path.dentry = dentry_save;
- nd->path.mnt = vfsmount_save;
+ if (lower_dentry->d_op && lower_dentry->d_op->d_revalidate) {
+ if (nd) {
+ dentry_save = nd->path.dentry;
+ vfsmount_save = nd->path.mnt;
+ nd->path.dentry = lower_dentry;
+ nd->path.mnt = lower_mnt;
+ }
+ rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd);
+ if (nd) {
+ nd->path.dentry = dentry_save;
+ nd->path.mnt = vfsmount_save;
+ }
}
if (dentry->d_inode) {
- struct inode *lower_inode =
- ecryptfs_inode_to_lower(dentry->d_inode);
+ struct inode *inode = dentry->d_inode;

- fsstack_copy_attr_all(dentry->d_inode, lower_inode);
+ fsstack_copy_attr_all(inode, ecryptfs_inode_to_lower(inode));
+ if (!inode->i_nlink)
+ return 0;
}
-out:
return rc;
}

diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index be4db0e..e2cf43b 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1018,10 +1018,10 @@ static inline void ext4_show_quota_options(struct seq_file *seq,
}

if (sbi->s_qf_names[USRQUOTA])
- seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
+ seq_show_option(seq, "usrjquota", sbi->s_qf_names[USRQUOTA]);

if (sbi->s_qf_names[GRPQUOTA])
- seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
+ seq_show_option(seq, "grpjquota", sbi->s_qf_names[GRPQUOTA]);

if (test_opt(sb, USRQUOTA))
seq_puts(seq, ",usrquota");
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 71e4209..be2ece5 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1298,11 +1298,11 @@ static int gfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
if (is_ancestor(mnt->mnt_root, sdp->sd_master_dir))
seq_printf(s, ",meta");
if (args->ar_lockproto[0])
- seq_printf(s, ",lockproto=%s", args->ar_lockproto);
+ seq_show_option(s, "lockproto", args->ar_lockproto);
if (args->ar_locktable[0])
- seq_printf(s, ",locktable=%s", args->ar_locktable);
+ seq_show_option(s, "locktable", args->ar_locktable);
if (args->ar_hostdata[0])
- seq_printf(s, ",hostdata=%s", args->ar_hostdata);
+ seq_show_option(s, "hostdata", args->ar_hostdata);
if (args->ar_spectator)
seq_printf(s, ",spectator");
if (args->ar_localflocks)
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index cdb41a1..8daea16 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -287,7 +287,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
page_cache_release(page);
goto fail;
}
- page_cache_release(page);
node->page[i] = page;
}

@@ -397,11 +396,11 @@ node_error:

void hfs_bnode_free(struct hfs_bnode *node)
{
- //int i;
+ int i;

- //for (i = 0; i < node->tree->pages_per_bnode; i++)
- // if (node->page[i])
- // page_cache_release(node->page[i]);
+ for (i = 0; i < node->tree->pages_per_bnode; i++)
+ if (node->page[i])
+ page_cache_release(node->page[i]);
kfree(node);
}

diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
index 92fb358..db240c5 100644
--- a/fs/hfs/brec.c
+++ b/fs/hfs/brec.c
@@ -132,13 +132,16 @@ skip:
hfs_bnode_write(node, entry, data_off + key_len, entry_len);
hfs_bnode_dump(node);

- if (new_node) {
- /* update parent key if we inserted a key
- * at the start of the first node
- */
- if (!rec && new_node != node)
- hfs_brec_update_parent(fd);
+ /*
+ * update parent key if we inserted a key
+ * at the start of the node and it is not the new node
+ */
+ if (!rec && new_node != node) {
+ hfs_bnode_read_key(node, fd->search_key, data_off + size);
+ hfs_brec_update_parent(fd);
+ }

+ if (new_node) {
hfs_bnode_put(fd->bnode);
if (!new_node->parent) {
hfs_btree_inc_height(tree);
@@ -167,9 +170,6 @@ skip:
goto again;
}

- if (!rec)
- hfs_brec_update_parent(fd);
-
return 0;
}

@@ -366,6 +366,8 @@ again:
if (IS_ERR(parent))
return PTR_ERR(parent);
__hfs_brec_find(parent, fd);
+ if (fd->record < 0)
+ return -ENOENT;
hfs_bnode_dump(parent);
rec = fd->record;

diff --git a/fs/hfs/super.c b/fs/hfs/super.c
index 1b55f70..cac813d 100644
--- a/fs/hfs/super.c
+++ b/fs/hfs/super.c
@@ -138,9 +138,9 @@ static int hfs_show_options(struct seq_file *seq, struct vfsmount *mnt)
struct hfs_sb_info *sbi = HFS_SB(mnt->mnt_sb);

if (sbi->s_creator != cpu_to_be32(0x3f3f3f3f))
- seq_printf(seq, ",creator=%.4s", (char *)&sbi->s_creator);
+ seq_show_option_n(seq, "creator", (char *)&sbi->s_creator, 4);
if (sbi->s_type != cpu_to_be32(0x3f3f3f3f))
- seq_printf(seq, ",type=%.4s", (char *)&sbi->s_type);
+ seq_show_option_n(seq, "type", (char *)&sbi->s_type, 4);
seq_printf(seq, ",uid=%u,gid=%u", sbi->s_uid, sbi->s_gid);
if (sbi->s_file_umask != 0133)
seq_printf(seq, ",file_umask=%o", sbi->s_file_umask);
diff --git a/fs/hfsplus/bnode.c b/fs/hfsplus/bnode.c
index 1c42cc5..a1e9109 100644
--- a/fs/hfsplus/bnode.c
+++ b/fs/hfsplus/bnode.c
@@ -454,7 +454,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
page_cache_release(page);
goto fail;
}
- page_cache_release(page);
node->page[i] = page;
}

@@ -566,13 +565,11 @@ node_error:

void hfs_bnode_free(struct hfs_bnode *node)
{
-#if 0
int i;

for (i = 0; i < node->tree->pages_per_bnode; i++)
if (node->page[i])
page_cache_release(node->page[i]);
-#endif
kfree(node);
}

diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index bb62a5882..c8d6b4f 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -211,9 +211,9 @@ int hfsplus_show_options(struct seq_file *seq, struct vfsmount *mnt)
struct hfsplus_sb_info *sbi = HFSPLUS_SB(mnt->mnt_sb);

if (sbi->creator != HFSPLUS_DEF_CR_TYPE)
- seq_printf(seq, ",creator=%.4s", (char *)&sbi->creator);
+ seq_show_option_n(seq, "creator", (char *)&sbi->creator, 4);
if (sbi->type != HFSPLUS_DEF_CR_TYPE)
- seq_printf(seq, ",type=%.4s", (char *)&sbi->type);
+ seq_show_option_n(seq, "type", (char *)&sbi->type, 4);
seq_printf(seq, ",umask=%o,uid=%u,gid=%u", sbi->umask,
sbi->uid, sbi->gid);
if (sbi->part >= 0)
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 2f72da5..104e4d9 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -265,7 +265,7 @@ static int hostfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
size_t offset = strlen(root_ino) + 1;

if (strlen(root_path) > offset)
- seq_printf(seq, ",%s", root_path + offset);
+ seq_show_option(seq, root_path + offset, NULL);

return 0;
}
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 39c7059..e84393f 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -509,14 +509,15 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
* journal_clean_one_cp_list
*
* Find all the written-back checkpoint buffers in the given list and
- * release them.
+ * release them. If 'destroy' is set, clean all buffers unconditionally.
*
* Called with the journal locked.
* Called with j_list_lock held.
* Returns number of bufers reaped (for debug)
*/

-static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
+static int journal_clean_one_cp_list(struct journal_head *jh, bool destroy,
+ int *released)
{
struct journal_head *last_jh;
struct journal_head *next_jh = jh;
@@ -532,7 +533,10 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
next_jh = jh->b_cpnext;
/* Use trylock because of the ranking */
if (jbd_trylock_bh_state(jh2bh(jh))) {
- ret = __try_to_free_cp_buf(jh);
+ if (!destroy)
+ ret = __try_to_free_cp_buf(jh);
+ else
+ ret = __jbd2_journal_remove_checkpoint(jh) + 1;
if (ret) {
freed++;
if (ret == 2) {
@@ -558,13 +562,14 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
* journal_clean_checkpoint_list
*
* Find all the written-back checkpoint buffers in the journal and release them.
+ * If 'destroy' is set, release all buffers unconditionally.
*
* Called with the journal locked.
* Called with j_list_lock held.
* Returns number of buffers reaped (for debug)
*/

-int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
+int __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy)
{
transaction_t *transaction, *last_transaction, *next_transaction;
int ret = 0;
@@ -580,7 +585,7 @@ int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
transaction = next_transaction;
next_transaction = transaction->t_cpnext;
ret += journal_clean_one_cp_list(transaction->
- t_checkpoint_list, &released);
+ t_checkpoint_list, destroy, &released);
/*
* This function only frees up some memory if possible so we
* dont have an obligation to finish processing. Bail out if
@@ -596,7 +601,7 @@ int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
* we can possibly see not yet submitted buffers on io_list
*/
ret += journal_clean_one_cp_list(transaction->
- t_checkpoint_io_list, &released);
+ t_checkpoint_io_list, destroy, &released);
if (need_resched())
goto out;
} while (transaction != last_transaction);
@@ -605,6 +610,28 @@ out:
}

/*
+ * Remove buffers from all checkpoint lists as journal is aborted and we just
+ * need to free memory
+ */
+void jbd2_journal_destroy_checkpoint(journal_t *journal)
+{
+ /*
+ * We loop because __jbd2_journal_clean_checkpoint_list() may abort
+ * early due to a need of rescheduling.
+ */
+ while (1) {
+ spin_lock(&journal->j_list_lock);
+ if (!journal->j_checkpoint_transactions) {
+ spin_unlock(&journal->j_list_lock);
+ break;
+ }
+ __jbd2_journal_clean_checkpoint_list(journal, true);
+ spin_unlock(&journal->j_list_lock);
+ cond_resched();
+ }
+}
+
+/*
* journal_remove_checkpoint: called after a buffer has been committed
* to disk (either by being write-back flushed to disk, or being
* committed to the log).
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index bccb605..45ae6ec 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -340,6 +340,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
/* Do we need to erase the effects of a prior jbd2_journal_flush? */
if (journal->j_flags & JBD2_FLUSHED) {
jbd_debug(3, "super block updated\n");
+ mutex_lock(&journal->j_checkpoint_mutex);
/*
* We hold j_checkpoint_mutex so tail cannot change under us.
* We don't need any special data guarantees for writing sb
@@ -350,6 +351,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
journal->j_tail_sequence,
journal->j_tail,
WRITE_SYNC);
+ mutex_unlock(&journal->j_checkpoint_mutex);
} else {
jbd_debug(3, "superblock not updated\n");
}
@@ -433,7 +435,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
* frees some memory
*/
spin_lock(&journal->j_list_lock);
- __jbd2_journal_clean_checkpoint_list(journal);
+ __jbd2_journal_clean_checkpoint_list(journal, false);
spin_unlock(&journal->j_list_lock);

jbd_debug(3, "JBD2: commit phase 1\n");
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 9532dac..7b7607e 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1242,6 +1242,8 @@ static int journal_reset(journal_t *journal)
journal->j_errno);
journal->j_flags |= JBD2_FLUSHED;
} else {
+ /* Lock here to make assertions happy... */
+ mutex_lock(&journal->j_checkpoint_mutex);
/*
* Update log tail information. We use WRITE_FUA since new
* transaction will start reusing journal space and so we
@@ -1252,6 +1254,7 @@ static int journal_reset(journal_t *journal)
journal->j_tail_sequence,
journal->j_tail,
WRITE_FUA);
+ mutex_unlock(&journal->j_checkpoint_mutex);
}
return jbd2_journal_start_thread(journal);
}
@@ -1314,6 +1317,7 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
journal_superblock_t *sb = journal->j_superblock;
int ret;

+ BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
tail_block, tail_tid);

@@ -1344,6 +1348,7 @@ static void jbd2_mark_journal_empty(journal_t *journal)
{
journal_superblock_t *sb = journal->j_superblock;

+ BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
read_lock(&journal->j_state_lock);
jbd_debug(1, "JBD2: Marking journal as empty (seq %d)\n",
journal->j_tail_sequence);
@@ -1566,8 +1571,17 @@ int jbd2_journal_destroy(journal_t *journal)
while (journal->j_checkpoint_transactions != NULL) {
spin_unlock(&journal->j_list_lock);
mutex_lock(&journal->j_checkpoint_mutex);
- jbd2_log_do_checkpoint(journal);
+ err = jbd2_log_do_checkpoint(journal);
mutex_unlock(&journal->j_checkpoint_mutex);
+ /*
+ * If checkpointing failed, just free the buffers to avoid
+ * looping forever
+ */
+ if (err) {
+ jbd2_journal_destroy_checkpoint(journal);
+ spin_lock(&journal->j_list_lock);
+ break;
+ }
spin_lock(&journal->j_list_lock);
}

@@ -1577,9 +1591,11 @@ int jbd2_journal_destroy(journal_t *journal)
spin_unlock(&journal->j_list_lock);

if (journal->j_sb_buffer) {
- if (!is_journal_aborted(journal))
+ if (!is_journal_aborted(journal)) {
+ mutex_lock(&journal->j_checkpoint_mutex);
jbd2_mark_journal_empty(journal);
- else
+ mutex_unlock(&journal->j_checkpoint_mutex);
+ } else
err = -EIO;
brelse(journal->j_sb_buffer);
}
@@ -1828,10 +1844,13 @@ int jbd2_journal_flush(journal_t *journal)
if (is_journal_aborted(journal))
return -EIO;

+ mutex_lock(&journal->j_checkpoint_mutex);
if (!err) {
err = jbd2_cleanup_journal_tail(journal);
- if (err < 0)
+ if (err < 0) {
+ mutex_unlock(&journal->j_checkpoint_mutex);
goto out;
+ }
err = 0;
}

@@ -1841,6 +1860,7 @@ int jbd2_journal_flush(journal_t *journal)
* commits of data to the journal will restore the current
* s_start value. */
jbd2_mark_journal_empty(journal);
+ mutex_unlock(&journal->j_checkpoint_mutex);
write_lock(&journal->j_state_lock);
J_ASSERT(!journal->j_running_transaction);
J_ASSERT(!journal->j_committing_transaction);
@@ -1882,8 +1902,12 @@ int jbd2_journal_wipe(journal_t *journal, int write)
write ? "Clearing" : "Ignoring");

err = jbd2_journal_skip_recovery(journal);
- if (write)
+ if (write) {
+ /* Lock to make assertions happy... */
+ mutex_lock(&journal->j_checkpoint_mutex);
jbd2_mark_journal_empty(journal);
+ mutex_unlock(&journal->j_checkpoint_mutex);
+ }

no_recovery:
return err;
diff --git a/fs/namei.c b/fs/namei.c
index c8b13a9..2c22655 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -398,6 +398,24 @@ void path_put(struct path *path)
}
EXPORT_SYMBOL(path_put);

+/**
+ * path_connected - Verify that a path->dentry is below path->mnt.mnt_root
+ * @path: nameidate to verify
+ *
+ * Rename can sometimes move a file or directory outside of a bind
+ * mount, path_connected allows those cases to be detected.
+ */
+static bool path_connected(const struct path *path)
+{
+ struct vfsmount *mnt = path->mnt;
+
+ /* Only bind mounts can have disconnected paths */
+ if (mnt->mnt_root == mnt->mnt_sb->s_root)
+ return true;
+
+ return is_subdir(path->dentry, mnt->mnt_root);
+}
+
/*
* Path walking has 2 modes, rcu-walk and ref-walk (see
* Documentation/filesystems/path-lookup.txt). In situations when we can't
@@ -933,6 +951,8 @@ static int follow_dotdot_rcu(struct nameidata *nd)
goto failed;
nd->path.dentry = parent;
nd->seq = seq;
+ if (unlikely(!path_connected(&nd->path)))
+ goto failed;
break;
}
if (!follow_up_rcu(&nd->path))
@@ -1027,7 +1047,7 @@ static void follow_mount(struct path *path)
}
}

-static void follow_dotdot(struct nameidata *nd)
+static int follow_dotdot(struct nameidata *nd)
{
if (!nd->root.mnt)
set_root(nd);
@@ -1043,6 +1063,10 @@ static void follow_dotdot(struct nameidata *nd)
/* rare case of legitimate dget_parent()... */
nd->path.dentry = dget_parent(nd->path.dentry);
dput(old);
+ if (unlikely(!path_connected(&nd->path))) {
+ path_put(&nd->path);
+ return -ENOENT;
+ }
break;
}
if (!follow_up(&nd->path))
@@ -1050,6 +1074,7 @@ static void follow_dotdot(struct nameidata *nd)
}
follow_mount(&nd->path);
nd->inode = nd->path.dentry->d_inode;
+ return 0;
}

/*
@@ -1241,7 +1266,7 @@ static inline int handle_dots(struct nameidata *nd, int type)
if (follow_dotdot_rcu(nd))
return -ECHILD;
} else
- follow_dotdot(nd);
+ return follow_dotdot(nd);
}
return 0;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index e83786f..609a951 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1793,7 +1793,7 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode
if (server->caps & NFS_CAP_POSIX_LOCK)
set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);

- if (opendata->o_arg.open_flags & O_EXCL) {
+ if ((opendata->o_arg.open_flags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) {
nfs4_exclusive_attrset(opendata, sattr);

nfs_fattr_init(opendata->o_res.f_attr);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 7ba6ac1..8e48ba5 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1411,6 +1411,7 @@ int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data,
int found, ret;
int set_maybe;
int dispatch_assert = 0;
+ int dispatched = 0;

if (!dlm_grab(dlm))
return DLM_MASTER_RESP_NO;
@@ -1617,13 +1618,16 @@ send_response:
mlog(ML_ERROR, "failed to dispatch assert master work\n");
response = DLM_MASTER_RESP_ERROR;
dlm_lockres_put(res);
+ } else {
+ dispatched = 1;
}
} else {
if (res)
dlm_lockres_put(res);
}

- dlm_put(dlm);
+ if (!dispatched)
+ dlm_put(dlm);
return response;
}

@@ -2041,7 +2045,6 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,


/* queue up work for dlm_assert_master_worker */
- dlm_grab(dlm); /* get an extra ref for the work item */
dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL);
item->u.am.lockres = res; /* already have a ref */
/* can optionally ignore node numbers higher than this node */
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index d15b071..0e5013e 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1689,6 +1689,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
unsigned int hash;
int master = DLM_LOCK_RES_OWNER_UNKNOWN;
u32 flags = DLM_ASSERT_MASTER_REQUERY;
+ int dispatched = 0;

if (!dlm_grab(dlm)) {
/* since the domain has gone away on this
@@ -1710,6 +1711,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
mlog_errno(-ENOMEM);
/* retry!? */
BUG();
+ } else {
+ dispatched = 1;
}
} else /* put.. incase we are not the master */
dlm_lockres_put(res);
@@ -1717,7 +1720,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
}
spin_unlock(&dlm->spinlock);

- dlm_put(dlm);
+ if (!dispatched)
+ dlm_put(dlm);
return master;
}

diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 231eab2..b5e457c 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3968,9 +3968,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
osb->dc_work_sequence = osb->dc_wake_sequence;

processed = osb->blocked_lock_count;
- while (processed) {
- BUG_ON(list_empty(&osb->blocked_lock_list));
-
+ /*
+ * blocked lock processing in this loop might call iput which can
+ * remove items off osb->blocked_lock_list. Downconvert up to
+ * 'processed' number of locks, but stop short if we had some
+ * removed in ocfs2_mark_lockres_freeing when downconverting.
+ */
+ while (processed && !list_empty(&osb->blocked_lock_list)) {
lockres = list_entry(osb->blocked_lock_list.next,
struct ocfs2_lock_res, l_blocked_list);
list_del_init(&lockres->l_blocked_list);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 4994f8b..5fe6b1e 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1583,8 +1583,8 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt)
seq_printf(s, ",localflocks,");

if (osb->osb_cluster_stack[0])
- seq_printf(s, ",cluster_stack=%.*s", OCFS2_STACK_LABEL_LEN,
- osb->osb_cluster_stack);
+ seq_show_option_n(s, "cluster_stack", osb->osb_cluster_stack,
+ OCFS2_STACK_LABEL_LEN);
if (opts & OCFS2_MOUNT_USRQUOTA)
seq_printf(s, ",usrquota");
if (opts & OCFS2_MOUNT_GRPQUOTA)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index d1bd6a9..de404f2 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -11,6 +11,7 @@
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
+#include <linux/security.h>

#include <asm/elf.h>
#include <asm/uaccess.h>
@@ -606,6 +607,7 @@ const struct file_operations proc_clear_refs_operations = {
struct pagemapread {
int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
u64 *buffer;
+ bool show_pfn;
};

#define PM_ENTRY_BYTES sizeof(u64)
@@ -654,14 +656,14 @@ static u64 swap_pte_to_pagemap_entry(pte_t pte)
return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
}

-static u64 pte_to_pagemap_entry(pte_t pte)
+static u64 pte_to_pagemap_entry(struct pagemapread *pm, pte_t pte)
{
u64 pme = 0;
if (is_swap_pte(pte))
pme = PM_PFRAME(swap_pte_to_pagemap_entry(pte))
| PM_PSHIFT(PAGE_SHIFT) | PM_SWAP;
else if (pte_present(pte))
- pme = PM_PFRAME(pte_pfn(pte))
+ pme = (pm->show_pfn ? PM_PFRAME(pte_pfn(pte)) : 0)
| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
return pme;
}
@@ -693,7 +695,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (vma && (vma->vm_start <= addr) &&
!is_vm_hugetlb_page(vma)) {
pte = pte_offset_map(pmd, addr);
- pfn = pte_to_pagemap_entry(*pte);
+ pfn = pte_to_pagemap_entry(pm, *pte);
/* unmap before userspace copy */
pte_unmap(pte);
}
@@ -708,11 +710,11 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
}

#ifdef CONFIG_HUGETLB_PAGE
-static u64 huge_pte_to_pagemap_entry(pte_t pte, int offset)
+static u64 huge_pte_to_pagemap_entry(struct pagemapread *pm, pte_t pte, int offset)
{
u64 pme = 0;
if (pte_present(pte))
- pme = PM_PFRAME(pte_pfn(pte) + offset)
+ pme = (pm->show_pfn ? PM_PFRAME(pte_pfn(pte) + offset) : 0)
| PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT;
return pme;
}
@@ -728,7 +730,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,

for (; addr != end; addr += PAGE_SIZE) {
int offset = (addr & ~hmask) >> PAGE_SHIFT;
- pfn = huge_pte_to_pagemap_entry(*pte, offset);
+ pfn = huge_pte_to_pagemap_entry(pm, *pte, offset);
err = add_to_pagemap(addr, pfn, pm);
if (err)
return err;
@@ -792,6 +794,10 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (!count)
goto out_task;

+ /* do not disclose physical addresses: attack vector */
+ pm.show_pfn = !security_capable(&init_user_ns, file->f_cred,
+ CAP_SYS_ADMIN);
+
pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
ret = -ENOMEM;
@@ -864,19 +870,9 @@ out:
return ret;
}

-static int pagemap_open(struct inode *inode, struct file *file)
-{
- /* do not disclose physical addresses to unprivileged
- userspace (closes a rowhammer attack vector) */
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- return 0;
-}
-
const struct file_operations proc_pagemap_operations = {
.llseek = mem_lseek, /* borrow this */
.read = pagemap_read,
- .open = pagemap_open,
};
#endif /* CONFIG_PROC_PAGE_MONITOR */

diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index b367581..c2b06d4 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -365,7 +365,8 @@ xfs_end_bio(
xfs_ioend_t *ioend = bio->bi_private;

ASSERT(atomic_read(&bio->bi_cnt) >= 1);
- ioend->io_error = test_bit(BIO_UPTODATE, &bio->bi_flags) ? 0 : error;
+ if (!ioend->io_error && !test_bit(BIO_UPTODATE, &bio->bi_flags))
+ ioend->io_error = error;

/* Toss bio and pass work off to an xfsdatad thread */
bio->bi_private = NULL;
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index 9c7d22f..c782906 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -111,8 +111,15 @@ typedef struct xfs_attr_leaf_name_remote {
typedef struct xfs_attr_leafblock {
xfs_attr_leaf_hdr_t hdr; /* constant-structure header block */
xfs_attr_leaf_entry_t entries[1]; /* sorted on key, not name */
- xfs_attr_leaf_name_local_t namelist; /* grows from bottom of buf */
- xfs_attr_leaf_name_remote_t valuelist; /* grows from bottom of buf */
+ /*
+ * The rest of the block contains the following structures after the
+ * leaf entries, growing from the bottom up. The variables are never
+ * referenced and definining them can actually make gcc optimize away
+ * accesses to the 'entries' array above index 0 so don't do that.
+ *
+ * xfs_attr_leaf_name_local_t namelist;
+ * xfs_attr_leaf_name_remote_t valuelist;
+ */
} xfs_attr_leafblock_t;

/*
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 8a89949..90ccd1c 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -533,9 +533,9 @@ xfs_showargs(
seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);

if (mp->m_logname)
- seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
+ seq_show_option(m, MNTOPT_LOGDEV, mp->m_logname);
if (mp->m_rtname)
- seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
+ seq_show_option(m, MNTOPT_RTDEV, mp->m_rtname);

if (mp->m_dalign > 0)
seq_printf(m, "," MNTOPT_SUNIT "=%d",
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h
index fffdf00..2fe0d22 100644
--- a/include/linux/fsl_devices.h
+++ b/include/linux/fsl_devices.h
@@ -83,6 +83,7 @@ struct fsl_usb2_platform_data {

unsigned suspended:1;
unsigned already_suspended:1;
+ unsigned has_fsl_erratum_a005275:1;

/* register save area for suspend/resume */
u32 pm_command;
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 4920c55..fbfd0c4 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -980,8 +980,9 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
extern void jbd2_journal_commit_transaction(journal_t *);

/* Checkpoint list management */
-int __jbd2_journal_clean_checkpoint_list(journal_t *journal);
+int __jbd2_journal_clean_checkpoint_list(journal_t *journal, bool destroy);
int __jbd2_journal_remove_checkpoint(struct journal_head *);
+void jbd2_journal_destroy_checkpoint(journal_t *journal);
void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);


diff --git a/include/linux/pci.h b/include/linux/pci.h
index fe76a74..f0c4495 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -176,6 +176,8 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
/* Provide indication device is assigned by a Virtual Machine Manager */
PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
+ /* Get VPD from function 0 VPD */
+ PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
};

enum pci_irq_reroute_variant {
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 0b69a46..8803d6e 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -122,6 +122,41 @@ void *__seq_open_private(struct file *, const struct seq_operations *, int);
int seq_open_private(struct file *, const struct seq_operations *, int);
int seq_release_private(struct inode *, struct file *);

+/**
+ * seq_show_options - display mount options with appropriate escapes.
+ * @m: the seq_file handle
+ * @name: the mount option name
+ * @value: the mount option name's value, can be NULL
+ */
+static inline void seq_show_option(struct seq_file *m, const char *name,
+ const char *value)
+{
+ seq_putc(m, ',');
+ seq_escape(m, name, ",= \t\n\\");
+ if (value) {
+ seq_putc(m, '=');
+ seq_escape(m, value, ", \t\n\\");
+ }
+}
+
+/**
+ * seq_show_option_n - display mount options with appropriate escapes
+ * where @value must be a specific length.
+ * @m: the seq_file handle
+ * @name: the mount option name
+ * @value: the mount option name's value, cannot be NULL
+ * @length: the length of @value to display
+ *
+ * This is a macro since this uses "length" to define the size of the
+ * stack buffer.
+ */
+#define seq_show_option_n(m, name, value, length) { \
+ char val_buf[length + 1]; \
+ strncpy(val_buf, value, length); \
+ val_buf[length] = '\0'; \
+ seq_show_option(m, name, val_buf); \
+}
+
#define SEQ_START_TOKEN ((void *)1)

/*
diff --git a/include/net/ip.h b/include/net/ip.h
index 1ee535b..2c7c5a9 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -138,6 +138,7 @@ static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
}

/* datagram.c */
+int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
extern int ip4_datagram_connect(struct sock *sk,
struct sockaddr *uaddr, int addr_len);

diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 5735a0f..3907358 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -217,7 +217,7 @@ extern void inet6_rt_notify(int event, struct rt6_info *rt,
struct nl_info *info);

extern void fib6_run_gc(unsigned long expires,
- struct net *net);
+ struct net *net, bool force);

extern void fib6_gc_cleanup(void);

diff --git a/ipc/msg.c b/ipc/msg.c
index 25f1a61..391e3e0 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -198,6 +198,15 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
return retval;
}

+ msq->q_stime = msq->q_rtime = 0;
+ msq->q_ctime = get_seconds();
+ msq->q_cbytes = msq->q_qnum = 0;
+ msq->q_qbytes = ns->msg_ctlmnb;
+ msq->q_lspid = msq->q_lrpid = 0;
+ INIT_LIST_HEAD(&msq->q_messages);
+ INIT_LIST_HEAD(&msq->q_receivers);
+ INIT_LIST_HEAD(&msq->q_senders);
+
/*
* ipc_addid() locks msq
*/
@@ -208,15 +217,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
return id;
}

- msq->q_stime = msq->q_rtime = 0;
- msq->q_ctime = get_seconds();
- msq->q_cbytes = msq->q_qnum = 0;
- msq->q_qbytes = ns->msg_ctlmnb;
- msq->q_lspid = msq->q_lrpid = 0;
- INIT_LIST_HEAD(&msq->q_messages);
- INIT_LIST_HEAD(&msq->q_receivers);
- INIT_LIST_HEAD(&msq->q_senders);
-
msg_unlock(msq);

return msq->q_perm.id;
diff --git a/ipc/sem.c b/ipc/sem.c
index 5215a81..b31c3ef 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -314,14 +314,6 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
return retval;
}

- id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
- if (id < 0) {
- security_sem_free(sma);
- ipc_rcu_putref(sma);
- return id;
- }
- ns->used_sems += nsems;
-
sma->sem_base = (struct sem *) &sma[1];

for (i = 0; i < nsems; i++)
@@ -332,6 +324,15 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
+
+ id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
+ if (id < 0) {
+ security_sem_free(sma);
+ ipc_rcu_putref(sma);
+ return id;
+ }
+ ns->used_sems += nsems;
+
sem_unlock(sma);

return sma->sem_perm.id;
@@ -1606,16 +1607,27 @@ void exit_sem(struct task_struct *tsk)
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
struct sem_undo, list_proc);
- if (&un->list_proc == &ulp->list_proc)
- semid = -1;
- else
- semid = un->semid;
+ if (&un->list_proc == &ulp->list_proc) {
+ /*
+ * We must wait for freeary() before freeing this ulp,
+ * in case we raced with last sem_undo. There is a small
+ * possibility where we exit while freeary() didn't
+ * finish unlocking sem_undo_list.
+ */
+ spin_unlock_wait(&ulp->lock);
+ rcu_read_unlock();
+ break;
+ }
+ spin_lock(&ulp->lock);
+ semid = un->semid;
+ spin_unlock(&ulp->lock);
rcu_read_unlock();

+ /* exit_sem raced with IPC_RMID, nothing to do */
if (semid == -1)
- break;
+ continue;

- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
+ sma = sem_lock_check(tsk->nsproxy->ipc_ns, semid);

/* exit_sem raced with IPC_RMID, nothing to do */
if (IS_ERR(sma))
diff --git a/ipc/shm.c b/ipc/shm.c
index 326a20b..16b1f9e 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -498,12 +498,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
if (IS_ERR(file))
goto no_file;

- id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
- if (id < 0) {
- error = id;
- goto no_id;
- }
-
shp->shm_cprid = task_tgid_vnr(current);
shp->shm_lprid = 0;
shp->shm_atim = shp->shm_dtim = 0;
@@ -512,6 +506,13 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
shp->shm_nattch = 0;
shp->shm_file = file;
shp->shm_creator = current;
+
+ id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
+ if (id < 0) {
+ error = id;
+ goto no_id;
+ }
+
/*
* shmid gets reported as "inode#" in /proc/pid/maps.
* proc-ps tools use this. Changing this will break them.
diff --git a/ipc/util.c b/ipc/util.c
index 75261a3..e4c9377 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -264,6 +264,10 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
rcu_read_lock();
spin_lock(&new->lock);

+ current_euid_egid(&euid, &egid);
+ new->cuid = new->uid = euid;
+ new->gid = new->cgid = egid;
+
err = idr_get_new(&ids->ipcs_idr, new, &id);
if (err) {
spin_unlock(&new->lock);
@@ -273,10 +277,6 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)

ids->in_use++;

- current_euid_egid(&euid, &egid);
- new->cuid = new->uid = euid;
- new->gid = new->cgid = egid;
-
new->seq = ids->seq++;
if(ids->seq > ids->seq_max)
ids->seq = 0;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index eafb6dd..ec64a18 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1053,15 +1053,16 @@ static int cgroup_show_options(struct seq_file *seq, struct vfsmount *vfs)

mutex_lock(&cgroup_mutex);
for_each_subsys(root, ss)
- seq_printf(seq, ",%s", ss->name);
+ seq_show_option(seq, ss->name, NULL);
if (test_bit(ROOT_NOPREFIX, &root->flags))
seq_puts(seq, ",noprefix");
if (strlen(root->release_agent_path))
- seq_printf(seq, ",release_agent=%s", root->release_agent_path);
+ seq_show_option(seq, "release_agent",
+ root->release_agent_path);
if (clone_children(&root->top_cgroup))
seq_puts(seq, ",clone_children");
if (strlen(root->name))
- seq_printf(seq, ",name=%s", root->name);
+ seq_show_option(seq, "name", root->name);
mutex_unlock(&cgroup_mutex);
return 0;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4277095..f475286 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3942,12 +3942,20 @@ static const struct file_operations perf_fops = {
* to user-space before waking everybody up.
*/

+static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
+{
+ /* only the parent has fasync state */
+ if (event->parent)
+ event = event->parent;
+ return &event->fasync;
+}
+
void perf_event_wakeup(struct perf_event *event)
{
ring_buffer_wakeup(event);

if (event->pending_kill) {
- kill_fasync(&event->fasync, SIGIO, event->pending_kill);
+ kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
event->pending_kill = 0;
}
}
@@ -4924,7 +4932,7 @@ static int __perf_event_overflow(struct perf_event *event,
else
perf_event_output(event, data, regs);

- if (event->fasync && event->pending_kill) {
+ if (*perf_event_fasync(event) && event->pending_kill) {
event->pending_wakeup = 1;
irq_work_queue(&event->pending);
}
diff --git a/net/Kconfig b/net/Kconfig
index a073148..946e17a 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -250,7 +250,7 @@ menu "Network testing"

config NET_PKTGEN
tristate "Packet Generator (USE WITH CAUTION)"
- depends on PROC_FS
+ depends on INET && PROC_FS
---help---
This module will inject preconfigured packets, at a configurable
rate, out of a given interface. It is used for network interface
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 6f54d0a..a496313 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -128,6 +128,35 @@ out_noerr:
goto out;
}

+static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+
+ if (skb->peeked)
+ return skb;
+
+ /* We have to unshare an skb before modifying it. */
+ if (!skb_shared(skb))
+ goto done;
+
+ nskb = skb_clone(skb, GFP_ATOMIC);
+ if (!nskb)
+ return ERR_PTR(-ENOMEM);
+
+ skb->prev->next = nskb;
+ skb->next->prev = nskb;
+ nskb->prev = skb->prev;
+ nskb->next = skb->next;
+
+ consume_skb(skb);
+ skb = nskb;
+
+done:
+ skb->peeked = 1;
+
+ return skb;
+}
+
/**
* __skb_recv_datagram - Receive a datagram skbuff
* @sk: socket
@@ -160,7 +189,9 @@ out_noerr:
struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
int *peeked, int *err)
{
+ struct sk_buff_head *queue = &sk->sk_receive_queue;
struct sk_buff *skb;
+ unsigned long cpu_flags;
long timeo;
/*
* Caller is allowed not to check sk->sk_err before skb_recv_datagram()
@@ -179,15 +210,17 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
* Look at current nfs client by the way...
* However, this function was correct in any case. 8)
*/
- unsigned long cpu_flags;
- struct sk_buff_head *queue = &sk->sk_receive_queue;
-
spin_lock_irqsave(&queue->lock, cpu_flags);
skb = skb_peek(queue);
if (skb) {
*peeked = skb->peeked;
if (flags & MSG_PEEK) {
- skb->peeked = 1;
+
+ skb = skb_set_peeked(skb);
+ error = PTR_ERR(skb);
+ if (IS_ERR(skb))
+ goto unlock_err;
+
atomic_inc(&skb->users);
} else
__skb_unlink(skb, queue);
@@ -206,6 +239,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,

return NULL;

+unlock_err:
+ spin_unlock_irqrestore(&queue->lock, cpu_flags);
no_packet:
*err = error;
return NULL;
@@ -656,7 +691,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
if (likely(!sum)) {
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
netdev_rx_csum_fault(skb->dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (!skb_shared(skb))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
}
return sum;
}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 6af54f2..c7caf3e 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -594,15 +594,17 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
{
int idx = 0;
struct fib_rule *rule;
+ int err = 0;

rcu_read_lock();
list_for_each_entry_rcu(rule, &ops->rules_list, list) {
if (idx < cb->args[1])
goto skip;

- if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq, RTM_NEWRULE,
- NLM_F_MULTI, ops) < 0)
+ err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, RTM_NEWRULE,
+ NLM_F_MULTI, ops);
+ if (err < 0)
break;
skip:
idx++;
@@ -611,7 +613,7 @@ skip:
cb->args[1] = idx;
rules_ops_put(ops);

- return skb->len;
+ return err;
}

static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
@@ -627,7 +629,9 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
if (ops == NULL)
return -EAFNOSUPPORT;

- return dump_rules(skb, cb, ops);
+ dump_rules(skb, cb, ops);
+
+ return skb->len;
}

rcu_read_lock();
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 9dd65a9..7879b2f 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3481,8 +3481,10 @@ static int pktgen_thread_worker(void *arg)
pktgen_rem_thread(t);

/* Wait for kthread_stop */
- while (!kthread_should_stop()) {
+ for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
+ if (kthread_should_stop())
+ break;
schedule();
}
__set_current_state(TASK_RUNNING);
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index ec07510..ffb8b6e 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -20,7 +20,7 @@
#include <net/route.h>
#include <net/tcp_states.h>

-int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)

sk_dst_reset(sk);

- lock_sock(sk);
-
oif = sk->sk_bound_dev_if;
saddr = inet->inet_saddr;
if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
@@ -81,7 +79,17 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sk_dst_set(sk, &rt->dst);
err = 0;
out:
- release_sock(sk);
return err;
}
+EXPORT_SYMBOL(__ip4_datagram_connect);
+
+int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ int res;
+
+ lock_sock(sk);
+ res = __ip4_datagram_connect(sk, uaddr, addr_len);
+ release_sock(sk);
+ return res;
+}
EXPORT_SYMBOL(ip4_datagram_connect);
diff --git a/net/ipv6/Makefile b/net/ipv6/Makefile
index 4b20d56..8b25fbb 100644
--- a/net/ipv6/Makefile
+++ b/net/ipv6/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/
obj-$(CONFIG_IPV6_SIT) += sit.o
obj-$(CONFIG_IPV6_TUNNEL) += ip6_tunnel.o

-obj-y += addrconf_core.o exthdrs_core.o output_core.o
+obj-y += addrconf_core.o exthdrs_core.o
+obj-$(CONFIG_INET) += output_core.o

obj-$(subst m,y,$(CONFIG_IPV6)) += inet6_hashtables.o
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3afdd78..006867d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4321,6 +4321,21 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
return ret;
}

+static
+int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct inet6_dev *idev = ctl->extra1;
+ int min_mtu = IPV6_MIN_MTU;
+ struct ctl_table lctl;
+
+ lctl = *ctl;
+ lctl.extra1 = &min_mtu;
+ lctl.extra2 = idev ? &idev->dev->mtu : NULL;
+
+ return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
+}
+
static void dev_disable_change(struct inet6_dev *idev)
{
if (!idev || !idev->dev)
@@ -4421,7 +4436,7 @@ static struct addrconf_sysctl_table
.data = &ipv6_devconf.mtu6,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = addrconf_sysctl_mtu,
},
{
.procname = "accept_ra",
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 33719b7..67f3632 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -38,7 +38,7 @@ static inline int ipv6_mapped_addr_any(const struct in6_addr *a)
return (ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0));
}

-int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
struct inet_sock *inet = inet_sk(sk);
@@ -54,7 +54,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
if (usin->sin6_family == AF_INET) {
if (__ipv6_only_sock(sk))
return -EAFNOSUPPORT;
- err = ip4_datagram_connect(sk, uaddr, addr_len);
+ err = __ip4_datagram_connect(sk, uaddr, addr_len);
goto ipv4_connected;
}

@@ -97,9 +97,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
sin.sin_addr.s_addr = daddr->s6_addr32[3];
sin.sin_port = usin->sin6_port;

- err = ip4_datagram_connect(sk,
- (struct sockaddr*) &sin,
- sizeof(sin));
+ err = __ip4_datagram_connect(sk,
+ (struct sockaddr *) &sin,
+ sizeof(sin));

ipv4_connected:
if (err)
@@ -203,6 +203,16 @@ out:
return err;
}

+int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+{
+ int res;
+
+ lock_sock(sk);
+ res = __ip6_datagram_connect(sk, uaddr, addr_len);
+ release_sock(sk);
+ return res;
+}
+
void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
__be16 port, u32 info, u8 *payload)
{
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index ad62afc..785e62d 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1473,27 +1473,28 @@ static int fib6_age(struct rt6_info *rt, void *arg)

static DEFINE_SPINLOCK(fib6_gc_lock);

-void fib6_run_gc(unsigned long expires, struct net *net)
+void fib6_run_gc(unsigned long expires, struct net *net, bool force)
{
- if (expires != ~0UL) {
+ unsigned long now;
+
+ if (force) {
spin_lock_bh(&fib6_gc_lock);
- gc_args.timeout = expires ? (int)expires :
- net->ipv6.sysctl.ip6_rt_gc_interval;
- } else {
- if (!spin_trylock_bh(&fib6_gc_lock)) {
- mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
- return;
- }
- gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval;
+ } else if (!spin_trylock_bh(&fib6_gc_lock)) {
+ mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
+ return;
}
+ gc_args.timeout = expires ? (int)expires :
+ net->ipv6.sysctl.ip6_rt_gc_interval;

gc_args.more = icmp6_dst_gc();

fib6_clean_all(net, fib6_age, 0, NULL);
+ now = jiffies;
+ net->ipv6.ip6_rt_last_gc = now;

if (gc_args.more)
mod_timer(&net->ipv6.ip6_fib_timer,
- round_jiffies(jiffies
+ round_jiffies(now
+ net->ipv6.sysctl.ip6_rt_gc_interval));
else
del_timer(&net->ipv6.ip6_fib_timer);
@@ -1502,7 +1503,7 @@ void fib6_run_gc(unsigned long expires, struct net *net)

static void fib6_gc_timer_cb(unsigned long arg)
{
- fib6_run_gc(0, (struct net *)arg);
+ fib6_run_gc(0, (struct net *)arg, true);
}

static int __net_init fib6_net_init(struct net *net)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index f96c96f..99ee86d 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -550,7 +550,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)

if (it->cache == &mrt->mfc6_unres_queue)
spin_unlock_bh(&mfc_unres_lock);
- else if (it->cache == mrt->mfc6_cache_array)
+ else if (it->cache == &mrt->mfc6_cache_array[it->ct])
read_unlock(&mrt_lock);
}

diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 884d45f..39836da 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1743,11 +1743,11 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
switch (event) {
case NETDEV_CHANGEADDR:
neigh_changeaddr(&nd_tbl, dev);
- fib6_run_gc(~0UL, net);
+ fib6_run_gc(0, net, false);
break;
case NETDEV_DOWN:
neigh_ifdown(&nd_tbl, dev);
- fib6_run_gc(~0UL, net);
+ fib6_run_gc(0, net, false);
break;
case NETDEV_NOTIFY_PEERS:
ndisc_send_unsol_na(dev);
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index d89d1a6..3a8776d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1159,7 +1159,6 @@ static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),

static int ip6_dst_gc(struct dst_ops *ops)
{
- unsigned long now = jiffies;
struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
@@ -1169,13 +1168,12 @@ static int ip6_dst_gc(struct dst_ops *ops)
int entries;

entries = dst_entries_get_fast(ops);
- if (time_after(rt_last_gc + rt_min_interval, now) &&
+ if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
entries <= rt_max_size)
goto out;

net->ipv6.ip6_rt_gc_expire++;
- fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
- net->ipv6.ip6_rt_last_gc = now;
+ fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size);
entries = dst_entries_get_slow(ops);
if (entries < ops->gc_thresh)
net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
@@ -2726,7 +2724,7 @@ int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
net = (struct net *)ctl->extra1;
delay = net->ipv6.sysctl.flush_delay;
proc_dointvec(ctl, write, buffer, lenp, ppos);
- fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
+ fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
return 0;
}

diff --git a/net/key/af_key.c b/net/key/af_key.c
index dc8d7ef..8636f10 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -220,7 +220,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
#define BROADCAST_ONE 1
#define BROADCAST_REGISTERED 2
#define BROADCAST_PROMISC_ONLY 4
-static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
+static int pfkey_broadcast(struct sk_buff *skb,
int broadcast_flags, struct sock *one_sk,
struct net *net)
{
@@ -246,7 +246,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
* socket.
*/
if (pfk->promisc)
- pfkey_broadcast_one(skb, &skb2, allocation, sk);
+ pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);

/* the exact target will be processed later */
if (sk == one_sk)
@@ -261,7 +261,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
continue;
}

- err2 = pfkey_broadcast_one(skb, &skb2, allocation, sk);
+ err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk);

/* Error is cleare after succecful sending to at least one
* registered KM */
@@ -271,7 +271,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
rcu_read_unlock();

if (one_sk != NULL)
- err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
+ err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);

kfree_skb(skb2);
kfree_skb(skb);
@@ -294,7 +294,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
hdr = (struct sadb_msg *) pfk->dump.skb->data;
hdr->sadb_msg_seq = 0;
hdr->sadb_msg_errno = rc;
- pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+ pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = NULL;
}
@@ -335,7 +335,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
sizeof(uint64_t));

- pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
+ pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));

return 0;
}
@@ -1361,7 +1361,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_

xfrm_state_put(x);

- pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
+ pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);

return 0;
}
@@ -1449,7 +1449,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->pid;

- pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
+ pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));

return 0;
}
@@ -1566,7 +1566,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
out_hdr->sadb_msg_reserved = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
- pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
+ pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));

return 0;
}
@@ -1667,7 +1667,7 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
return -ENOBUFS;
}

- pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk, sock_net(sk));
+ pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));

return 0;
}
@@ -1686,7 +1686,7 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));

- return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
+ return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
}

static int key_notify_sa_flush(const struct km_event *c)
@@ -1707,7 +1707,7 @@ static int key_notify_sa_flush(const struct km_event *c)
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0;

- pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+ pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);

return 0;
}
@@ -1768,7 +1768,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr)
out_hdr->sadb_msg_pid = pfk->dump.msg_pid;

if (pfk->dump.skb)
- pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+ pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = out_skb;

@@ -1829,7 +1829,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb
new_hdr->sadb_msg_errno = 0;
}

- pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
+ pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
return 0;
}

@@ -2160,7 +2160,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = c->seq;
out_hdr->sadb_msg_pid = c->pid;
- pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
+ pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
return 0;

}
@@ -2386,7 +2386,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
- pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
+ pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
err = 0;

out:
@@ -2639,7 +2639,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
out_hdr->sadb_msg_pid = pfk->dump.msg_pid;

if (pfk->dump.skb)
- pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
+ pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = out_skb;

@@ -2690,7 +2690,7 @@ static int key_notify_policy_flush(const struct km_event *c)
hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0;
- pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
+ pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
return 0;

}
@@ -2756,7 +2756,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
void *ext_hdrs[SADB_EXT_MAX];
int err;

- pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
+ pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));

memset(ext_hdrs, 0, sizeof(ext_hdrs));
@@ -2962,7 +2962,7 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c)
out_hdr->sadb_msg_seq = 0;
out_hdr->sadb_msg_pid = 0;

- pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x));
+ pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
return 0;
}

@@ -3134,7 +3134,7 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
xfrm_ctx->ctx_len);
}

- return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x));
+ return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
}

static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
@@ -3332,7 +3332,7 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
n_port->sadb_x_nat_t_port_port = sport;
n_port->sadb_x_nat_t_port_reserved = 0;

- return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL, xs_net(x));
+ return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
}

#ifdef CONFIG_NET_KEY_MIGRATE
@@ -3524,7 +3524,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
}

/* broadcast migrate message to sockets */
- pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
+ pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);

return 0;

diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 65df296..91826b6 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -278,9 +278,6 @@ ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx)
if (tx->sdata->vif.type == NL80211_IFTYPE_WDS)
return TX_CONTINUE;

- if (tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
- return TX_CONTINUE;
-
if (tx->flags & IEEE80211_TX_PS_BUFFERED)
return TX_CONTINUE;

diff --git a/net/rds/connection.c b/net/rds/connection.c
index 9e07c75..be3eecd 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -178,6 +178,12 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
}
}

+ if (trans == NULL) {
+ kmem_cache_free(rds_conn_slab, conn);
+ conn = ERR_PTR(-ENODEV);
+ goto out;
+ }
+
conn->c_trans = trans;

ret = trans->conn_alloc(conn, gfp);
diff --git a/net/rds/info.c b/net/rds/info.c
index f1c016c..a4adb39 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,

/* check for all kinds of wrapping and the like */
start = (unsigned long)optval;
- if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) {
+ if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
ret = -EINVAL;
goto out;
}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 6d56eec..c3b8549 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -587,9 +587,7 @@ out:
return err;
no_route:
kfree_skb(nskb);
-
- if (asoc)
- IP_INC_STATS(&init_net, IPSTATS_MIB_OUTNOROUTES);
+ IP_INC_STATS(&init_net, IPSTATS_MIB_OUTNOROUTES);

/* FIXME: Returning the 'err' will effect all the associations
* associated with a socket, although only one of the paths of the
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 76388b0..581c06a 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -681,7 +681,7 @@ static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds,
* outstanding data and rely on the retransmission limit be reached
* to shutdown the association.
*/
- if (t->asoc->state != SCTP_STATE_SHUTDOWN_PENDING)
+ if (t->asoc->state < SCTP_STATE_SHUTDOWN_PENDING)
t->asoc->overall_error_count = 0;

/* Clear the hb_sent flag to signal that we had a good
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index d77a4f0..3a82fec 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -811,6 +811,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
{
struct socket *sock = transport->sock;
struct sock *sk = transport->inet;
+ struct rpc_xprt *xprt = &transport->xprt;

if (sk == NULL)
return;
@@ -824,6 +825,7 @@ static void xs_reset_transport(struct sock_xprt *transport)
sk->sk_user_data = NULL;

xs_restore_old_callbacks(transport, sk);
+ xprt_clear_connected(xprt);
write_unlock_bh(&sk->sk_callback_lock);

sk->sk_no_check = 0;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 058941e..580ecf2 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1541,6 +1541,8 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
u32 new_ref = new_tport->ref;
struct tipc_msg *msg = buf_msg(buf);

+ security_sk_clone(sock->sk, new_sock->sk);
+
lock_sock(new_sk);

/*
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 3346f42..4a19a7f 100644
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -125,7 +125,7 @@ my $ksource = $ARGV[0];
my $kconfig = $ARGV[1];
my $lsmod_file = $ENV{'LSMOD'};

-my @makefiles = `find $ksource -name Makefile 2>/dev/null`;
+my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
chomp @makefiles;

my %depends;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 0cd7097a..c279f2f 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1011,7 +1011,7 @@ static void selinux_write_opts(struct seq_file *m,
seq_puts(m, prefix);
if (has_comma)
seq_putc(m, '\"');
- seq_puts(m, opts->mnt_opts[i]);
+ seq_escape(m, opts->mnt_opts[i], "\"\n\\");
if (has_comma)
seq_putc(m, '\"');
}
diff --git a/sound/arm/Kconfig b/sound/arm/Kconfig
index 885683a..e040621 100644
--- a/sound/arm/Kconfig
+++ b/sound/arm/Kconfig
@@ -9,6 +9,14 @@ menuconfig SND_ARM
Drivers that are implemented on ASoC can be found in
"ALSA for SoC audio support" section.

+config SND_PXA2XX_LIB
+ tristate
+ select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
+ select SND_DMAENGINE_PCM
+
+config SND_PXA2XX_LIB_AC97
+ bool
+
if SND_ARM

config SND_ARMAACI
@@ -21,13 +29,6 @@ config SND_PXA2XX_PCM
tristate
select SND_PCM

-config SND_PXA2XX_LIB
- tristate
- select SND_AC97_CODEC if SND_PXA2XX_LIB_AC97
-
-config SND_PXA2XX_LIB_AC97
- bool
-
config SND_PXA2XX_AC97
tristate "AC97 driver for the Intel PXA2xx chip"
depends on ARCH_PXA
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index a0f7d3c..23deb67 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -1,7 +1,6 @@
config SND_PXA2XX_SOC
tristate "SoC Audio for the Intel PXA2xx chip"
depends on ARCH_PXA
- select SND_ARM
select SND_PXA2XX_LIB
help
Say Y or M if you want to add support for codecs attached to
@@ -15,7 +14,6 @@ config SND_PXA2XX_AC97
config SND_PXA2XX_SOC_AC97
tristate
select AC97_BUS
- select SND_ARM
select SND_PXA2XX_LIB_AC97
select SND_SOC_AC97_BUS

diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index e45d2b1..6c871c0 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -739,10 +739,10 @@ $(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<

$(OUTPUT)util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default $<

$(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
+ $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-undef -Wno-switch-default $<

$(OUTPUT)util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 2cd88c1..7a75ecb 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -796,25 +796,19 @@ static void print_cpudesc(struct perf_header *ph, int fd, FILE *fp)
static void print_nrcpus(struct perf_header *ph, int fd, FILE *fp)
{
ssize_t ret;
- u32 nr;
+ u32 nr[2];

ret = read(fd, &nr, sizeof(nr));
if (ret != (ssize_t)sizeof(nr))
- nr = -1; /* interpreted as error */
+ nr[0] = nr[1] = -1; /* interpreted as error */

- if (ph->needs_swap)
- nr = bswap_32(nr);
-
- fprintf(fp, "# nrcpus online : %u\n", nr);
-
- ret = read(fd, &nr, sizeof(nr));
- if (ret != (ssize_t)sizeof(nr))
- nr = -1; /* interpreted as error */
-
- if (ph->needs_swap)
- nr = bswap_32(nr);
+ if (ph->needs_swap) {
+ nr[0] = bswap_32(nr[0]);
+ nr[1] = bswap_32(nr[1]);
+ }

- fprintf(fp, "# nrcpus avail : %u\n", nr);
+ fprintf(fp, "# nrcpus online : %u\n", nr[1]);
+ fprintf(fp, "# nrcpus avail : %u\n", nr[0]);
}

static void print_version(struct perf_header *ph, int fd, FILE *fp)

Attachment: signature.asc
Description: This is a digitally signed message part