Re: Linux 3.5.7

From: Greg KH
Date: Fri Oct 12 2012 - 17:18:16 EST



diff --git a/Makefile b/Makefile
index 75c7683..b1f275b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 5
-SUBLEVEL = 6
+SUBLEVEL = 7
EXTRAVERSION =
NAME = Saber-toothed Squirrel

diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 153d3fc..668d4c7 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -28,6 +28,7 @@
#include <linux/tty.h>
#include <linux/console.h>
#include <linux/slab.h>
+#include <linux/rcupdate.h>

#include <asm/reg.h>
#include <asm/uaccess.h>
@@ -54,8 +55,10 @@ cpu_idle(void)
/* FIXME -- EV6 and LCA45 know how to power down
the CPU. */

+ rcu_idle_enter();
while (!need_resched())
cpu_relax();
+ rcu_idle_exit();
schedule();
}
}
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index c334a23..fce38a6 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -8,6 +8,7 @@
#define _ASM_ARM_SYSCALL_H

#include <linux/err.h>
+#include <linux/sched.h>

extern const unsigned long sys_call_table[];

diff --git a/arch/cris/kernel/process.c b/arch/cris/kernel/process.c
index 66fd017..7f65be6 100644
--- a/arch/cris/kernel/process.c
+++ b/arch/cris/kernel/process.c
@@ -25,6 +25,7 @@
#include <linux/elfcore.h>
#include <linux/mqueue.h>
#include <linux/reboot.h>
+#include <linux/rcupdate.h>

//#define DEBUG

@@ -74,6 +75,7 @@ void cpu_idle (void)
{
/* endless idle loop with no priority at all */
while (1) {
+ rcu_idle_enter();
while (!need_resched()) {
void (*idle)(void);
/*
@@ -86,6 +88,7 @@ void cpu_idle (void)
idle = default_idle;
idle();
}
+ rcu_idle_exit();
schedule_preempt_disabled();
}
}
diff --git a/arch/frv/kernel/process.c b/arch/frv/kernel/process.c
index ff95f50..2eb7fa5 100644
--- a/arch/frv/kernel/process.c
+++ b/arch/frv/kernel/process.c
@@ -25,6 +25,7 @@
#include <linux/reboot.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
+#include <linux/rcupdate.h>

#include <asm/asm-offsets.h>
#include <asm/uaccess.h>
@@ -69,12 +70,14 @@ void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
+ rcu_idle_enter();
while (!need_resched()) {
check_pgt_cache();

if (!frv_dma_inprogress && idle)
idle();
}
+ rcu_idle_exit();

schedule_preempt_disabled();
}
diff --git a/arch/h8300/kernel/process.c b/arch/h8300/kernel/process.c
index 0e9c315..f153ed1 100644
--- a/arch/h8300/kernel/process.c
+++ b/arch/h8300/kernel/process.c
@@ -36,6 +36,7 @@
#include <linux/reboot.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/rcupdate.h>

#include <asm/uaccess.h>
#include <asm/traps.h>
@@ -78,8 +79,10 @@ void (*idle)(void) = default_idle;
void cpu_idle(void)
{
while (1) {
+ rcu_idle_enter();
while (!need_resched())
idle();
+ rcu_idle_exit();
schedule_preempt_disabled();
}
}
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index dd6fc14..3e316ec 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -29,6 +29,7 @@
#include <linux/kdebug.h>
#include <linux/utsname.h>
#include <linux/tracehook.h>
+#include <linux/rcupdate.h>

#include <asm/cpu.h>
#include <asm/delay.h>
@@ -279,6 +280,7 @@ cpu_idle (void)

/* endless idle loop with no priority at all */
while (1) {
+ rcu_idle_enter();
if (can_do_pal_halt) {
current_thread_info()->status &= ~TS_POLLING;
/*
@@ -309,6 +311,7 @@ cpu_idle (void)
normal_xtp();
#endif
}
+ rcu_idle_exit();
schedule_preempt_disabled();
check_pgt_cache();
if (cpu_is_offline(cpu))
diff --git a/arch/m32r/kernel/process.c b/arch/m32r/kernel/process.c
index 3a4a32b..384e63f 100644
--- a/arch/m32r/kernel/process.c
+++ b/arch/m32r/kernel/process.c
@@ -26,6 +26,7 @@
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/hardirq.h>
+#include <linux/rcupdate.h>

#include <asm/io.h>
#include <asm/uaccess.h>
@@ -82,6 +83,7 @@ void cpu_idle (void)
{
/* endless idle loop with no priority at all */
while (1) {
+ rcu_idle_enter();
while (!need_resched()) {
void (*idle)(void) = pm_idle;

@@ -90,6 +92,7 @@ void cpu_idle (void)

idle();
}
+ rcu_idle_exit();
schedule_preempt_disabled();
}
}
diff --git a/arch/m68k/kernel/process.c b/arch/m68k/kernel/process.c
index c488e3c..ac2892e 100644
--- a/arch/m68k/kernel/process.c
+++ b/arch/m68k/kernel/process.c
@@ -25,6 +25,7 @@
#include <linux/reboot.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
+#include <linux/rcupdate.h>

#include <asm/uaccess.h>
#include <asm/traps.h>
@@ -75,8 +76,10 @@ void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
+ rcu_idle_enter();
while (!need_resched())
idle();
+ rcu_idle_exit();
schedule_preempt_disabled();
}
}
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 764e37a..654b1ad 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -225,7 +225,7 @@ KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
LDFLAGS += -m $(ld-emul)

ifdef CONFIG_MIPS
-CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -xc /dev/null | \
+CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/")
ifdef CONFIG_64BIT
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index b91ad3e..d272857 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -189,7 +189,7 @@ static void __init ar934x_clocks_init(void)
AR934X_PLL_CPU_CONFIG_NFRAC_MASK;

cpu_pll = nint * ath79_ref_clk.rate / ref_div;
- cpu_pll += frac * ath79_ref_clk.rate / (ref_div * (2 << 6));
+ cpu_pll += frac * ath79_ref_clk.rate / (ref_div * (1 << 6));
cpu_pll /= (1 << out_div);

pll = ath79_pll_rr(AR934X_PLL_DDR_CONFIG_REG);
@@ -203,7 +203,7 @@ static void __init ar934x_clocks_init(void)
AR934X_PLL_DDR_CONFIG_NFRAC_MASK;

ddr_pll = nint * ath79_ref_clk.rate / ref_div;
- ddr_pll += frac * ath79_ref_clk.rate / (ref_div * (2 << 10));
+ ddr_pll += frac * ath79_ref_clk.rate / (ref_div * (1 << 10));
ddr_pll /= (1 << out_div);

clk_ctrl = ath79_pll_rr(AR934X_PLL_CPU_DDR_CLK_CTRL_REG);
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index fdaf65e..c6136cb 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -104,7 +104,7 @@ obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o

obj-$(CONFIG_OF) += prom.o

-CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
+CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)

obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o

diff --git a/arch/mn10300/Makefile b/arch/mn10300/Makefile
index 33188b6..a3d0fef 100644
--- a/arch/mn10300/Makefile
+++ b/arch/mn10300/Makefile
@@ -26,7 +26,7 @@ CHECKFLAGS +=
PROCESSOR := unset
UNIT := unset

-KBUILD_CFLAGS += -mam33 -mmem-funcs -DCPU=AM33
+KBUILD_CFLAGS += -mam33 -DCPU=AM33 $(call cc-option,-mmem-funcs,)
KBUILD_AFLAGS += -mam33 -DCPU=AM33

ifeq ($(CONFIG_MN10300_CURRENT_IN_E2),y)
diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c
index 7dab0cd..e9cceba 100644
--- a/arch/mn10300/kernel/process.c
+++ b/arch/mn10300/kernel/process.c
@@ -25,6 +25,7 @@
#include <linux/err.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/rcupdate.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/io.h>
@@ -107,6 +108,7 @@ void cpu_idle(void)
{
/* endless idle loop with no priority at all */
for (;;) {
+ rcu_idle_enter();
while (!need_resched()) {
void (*idle)(void);

@@ -121,6 +123,7 @@ void cpu_idle(void)
}
idle();
}
+ rcu_idle_exit();

schedule_preempt_disabled();
}
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index d4b94b3..c54a4db 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -48,6 +48,7 @@
#include <linux/unistd.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
+#include <linux/rcupdate.h>

#include <asm/io.h>
#include <asm/asm-offsets.h>
@@ -69,8 +70,10 @@ void cpu_idle(void)

/* endless idle loop with no priority at all */
while (1) {
+ rcu_idle_enter();
while (!need_resched())
barrier();
+ rcu_idle_exit();
schedule_preempt_disabled();
check_pgt_cache();
}
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index ac39e6a..2974edd 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -181,6 +181,14 @@ static inline int pci_device_from_OF_node(struct device_node *np,
#if defined(CONFIG_EEH)
static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn)
{
+ /*
+ * For those OF nodes whose parent isn't PCI bridge, they
+ * don't have PCI_DN actually. So we have to skip them for
+ * any EEH operations.
+ */
+ if (!dn || !PCI_DN(dn))
+ return NULL;
+
return PCI_DN(dn)->edev;
}
#endif
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index ecd394c..9a0b5f5 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -1029,7 +1029,7 @@ static void eeh_add_device_early(struct device_node *dn)
{
struct pci_controller *phb;

- if (!dn || !of_node_to_eeh_dev(dn))
+ if (!of_node_to_eeh_dev(dn))
return;
phb = of_node_to_eeh_dev(dn)->phb;

diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
index 2707023..637970c 100644
--- a/arch/score/kernel/process.c
+++ b/arch/score/kernel/process.c
@@ -27,6 +27,7 @@
#include <linux/reboot.h>
#include <linux/elfcore.h>
#include <linux/pm.h>
+#include <linux/rcupdate.h>

void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
@@ -50,9 +51,10 @@ void __noreturn cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
+ rcu_idle_enter();
while (!need_resched())
barrier();
-
+ rcu_idle_exit();
schedule_preempt_disabled();
}
}
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 1f25214..d3d8198 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -85,7 +85,7 @@ endif
ifdef CONFIG_X86_X32
x32_ld_ok := $(call try-run,\
/bin/echo -e '1: .quad 1b' | \
- $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" - && \
+ $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" - && \
$(OBJCOPY) -O elf32-x86-64 "$$TMP" "$$TMPO" && \
$(LD) -m elf32_x86_64 "$$TMPO" -o "$$TMP",y,n)
ifeq ($(x32_ld_ok),y)
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index e398bb5..8a84501 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -28,6 +28,9 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
$(obj)/string.o $(obj)/cmdline.o $(obj)/early_serial_console.o \
$(obj)/piggy.o

+$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
+$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone
+
ifeq ($(CONFIG_EFI_STUB), y)
VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o
endif
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 49afb3f..c3520d7 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -146,8 +146,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)

static inline int pmd_large(pmd_t pte)
{
- return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
- (_PAGE_PSE | _PAGE_PRESENT);
+ return pmd_flags(pte) & _PAGE_PSE;
}

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -415,7 +414,13 @@ static inline int pte_hidden(pte_t pte)

static inline int pmd_present(pmd_t pmd)
{
- return pmd_flags(pmd) & _PAGE_PRESENT;
+ /*
+ * Checking for _PAGE_PSE is needed too because
+ * split_huge_page will temporarily clear the present bit (but
+ * the _PAGE_PSE flag will remain set at all times while the
+ * _PAGE_PRESENT bit is clear).
+ */
+ return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
}

static inline int pmd_none(pmd_t pmd)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 86c8704..61593fd 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -615,10 +615,6 @@ static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
-static void vmx_set_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg);
-static void vmx_get_segment(struct kvm_vcpu *vcpu,
- struct kvm_segment *var, int seg);

static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -2767,7 +2763,6 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
{
unsigned long flags;
struct vcpu_vmx *vmx = to_vmx(vcpu);
- struct kvm_segment var;

if (enable_unrestricted_guest)
return;
@@ -2811,23 +2806,20 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
if (emulate_invalid_guest_state)
goto continue_rmode;

- vmx_get_segment(vcpu, &var, VCPU_SREG_SS);
- vmx_set_segment(vcpu, &var, VCPU_SREG_SS);
-
- vmx_get_segment(vcpu, &var, VCPU_SREG_CS);
- vmx_set_segment(vcpu, &var, VCPU_SREG_CS);
-
- vmx_get_segment(vcpu, &var, VCPU_SREG_ES);
- vmx_set_segment(vcpu, &var, VCPU_SREG_ES);
+ vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
+ vmcs_write32(GUEST_SS_LIMIT, 0xffff);
+ vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);

- vmx_get_segment(vcpu, &var, VCPU_SREG_DS);
- vmx_set_segment(vcpu, &var, VCPU_SREG_DS);
+ vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
+ vmcs_write32(GUEST_CS_LIMIT, 0xffff);
+ if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
+ vmcs_writel(GUEST_CS_BASE, 0xf0000);
+ vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);

- vmx_get_segment(vcpu, &var, VCPU_SREG_GS);
- vmx_set_segment(vcpu, &var, VCPU_SREG_GS);
-
- vmx_get_segment(vcpu, &var, VCPU_SREG_FS);
- vmx_set_segment(vcpu, &var, VCPU_SREG_FS);
+ fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
+ fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
+ fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
+ fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);

continue_rmode:
kvm_mmu_reset_context(vcpu);
@@ -3230,44 +3222,6 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,

vmcs_write32(sf->ar_bytes, ar);
__clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
-
- /*
- * Fix segments for real mode guest in hosts that don't have
- * "unrestricted_mode" or it was disabled.
- * This is done to allow migration of the guests from hosts with
- * unrestricted guest like Westmere to older host that don't have
- * unrestricted guest like Nehelem.
- */
- if (!enable_unrestricted_guest && vmx->rmode.vm86_active) {
- switch (seg) {
- case VCPU_SREG_CS:
- vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
- vmcs_write32(GUEST_CS_LIMIT, 0xffff);
- if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
- vmcs_writel(GUEST_CS_BASE, 0xf0000);
- vmcs_write16(GUEST_CS_SELECTOR,
- vmcs_readl(GUEST_CS_BASE) >> 4);
- break;
- case VCPU_SREG_ES:
- fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
- break;
- case VCPU_SREG_DS:
- fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
- break;
- case VCPU_SREG_GS:
- fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
- break;
- case VCPU_SREG_FS:
- fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
- break;
- case VCPU_SREG_SS:
- vmcs_write16(GUEST_SS_SELECTOR,
- vmcs_readl(GUEST_SS_BASE) >> 4);
- vmcs_write32(GUEST_SS_LIMIT, 0xffff);
- vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
- break;
- }
- }
}

static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 92660eda..f55a4ce 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -890,6 +890,7 @@ void __init efi_enter_virtual_mode(void)
*
* Call EFI services through wrapper functions.
*/
+ efi.runtime_version = efi_systab.fw_revision;
efi.get_time = virt_efi_get_time;
efi.set_time = virt_efi_set_time;
efi.get_wakeup_time = virt_efi_get_wakeup_time;
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 2c8d6a3..bc44311 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -31,6 +31,7 @@
#include <linux/mqueue.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/rcupdate.h>

#include <asm/pgtable.h>
#include <asm/uaccess.h>
@@ -110,8 +111,10 @@ void cpu_idle(void)

/* endless idle loop with no priority at all */
while (1) {
+ rcu_idle_enter();
while (!need_resched())
platform_idle();
+ rcu_idle_exit();
schedule_preempt_disabled();
}
}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 92107ee..54a5732 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -990,8 +990,6 @@ static int __init acpi_bus_init(void)
status = acpi_ec_ecdt_probe();
/* Ignore result. Not having an ECDT is not fatal. */

- acpi_bus_osc_support();
-
status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
if (ACPI_FAILURE(status)) {
printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
@@ -999,6 +997,12 @@ static int __init acpi_bus_init(void)
}

/*
+ * _OSC method may exist in module level code,
+ * so it must be run after ACPI_FULL_INITIALIZATION
+ */
+ acpi_bus_osc_support();
+
+ /*
* _PDC control method may load dynamic SSDT tables,
* and we need to install the table handler before that.
*/
diff --git a/drivers/base/core.c b/drivers/base/core.c
index c598122..b18138a 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1895,8 +1895,8 @@ int __dev_printk(const char *level, const struct device *dev,
"DEVICE=+%s:%s", subsys, dev_name(dev));
}
skip:
- if (level[2])
- level_extra = &level[2]; /* skip past KERN_SOH "L" */
+ if (level[3])
+ level_extra = &level[3]; /* skip past "<L>" */

return printk_emit(0, level[1] - '0',
dictlen ? dict : NULL, dictlen,
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 742fcbe..9d54dce 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -994,7 +994,7 @@ int dpm_suspend_end(pm_message_t state)

error = dpm_suspend_noirq(state);
if (error) {
- dpm_resume_early(state);
+ dpm_resume_early(resume_event(state));
return error;
}

diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index de0435e..887f68f 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -35,6 +35,7 @@ new_skb(ulong len)
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb->protocol = __constant_htons(ETH_P_AOE);
+ skb_checksum_none_assert(skb);
}
return skb;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 6fbfc24..af81f77 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1034,15 +1034,15 @@ void drm_mode_config_cleanup(struct drm_device *dev)
fb->funcs->destroy(fb);
}

- list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
- crtc->funcs->destroy(crtc);
- }
-
list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
head) {
plane->funcs->destroy(plane);
}

+ list_for_each_entry_safe(crtc, ct, &dev->mode_config.crtc_list, head) {
+ crtc->funcs->destroy(crtc);
+ }
+
idr_remove_all(&dev->mode_config.crtc_idr);
idr_destroy(&dev->mode_config.crtc_idr);
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index acc91b1..972a32f 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -605,12 +605,12 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
intel_opregion_gse_intr(dev);

for (i = 0; i < 3; i++) {
+ if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+ drm_handle_vblank(dev, i);
if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
intel_prepare_page_flip(dev, i);
intel_finish_page_flip_plane(dev, i);
}
- if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
- drm_handle_vblank(dev, i);
}

/* check event from PCH */
@@ -692,6 +692,12 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);

+ if (de_iir & DE_PIPEA_VBLANK)
+ drm_handle_vblank(dev, 0);
+
+ if (de_iir & DE_PIPEB_VBLANK)
+ drm_handle_vblank(dev, 1);
+
if (de_iir & DE_PLANEA_FLIP_DONE) {
intel_prepare_page_flip(dev, 0);
intel_finish_page_flip_plane(dev, 0);
@@ -702,12 +708,6 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
intel_finish_page_flip_plane(dev, 1);
}

- if (de_iir & DE_PIPEA_VBLANK)
- drm_handle_vblank(dev, 0);
-
- if (de_iir & DE_PIPEB_VBLANK)
- drm_handle_vblank(dev, 1);
-
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
if (pch_iir & hotplug_mask)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 48d5e8e..4cad908 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -514,6 +514,9 @@
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 12)

+#define GEN6_GT_MODE 0x20d0
+#define GEN6_GT_MODE_HI (1 << 9)
+
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
#define RING_MODE_GEN7(ring) ((ring)->mmio_base+0x29c)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 2f22bea..6f0d039 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2641,13 +2641,34 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc)
udelay(100);
}

+static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+ bool pending;
+
+ if (atomic_read(&dev_priv->mm.wedged))
+ return false;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ pending = to_intel_crtc(crtc)->unpin_work != NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return pending;
+}
+
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;

if (crtc->fb == NULL)
return;

+ wait_event(dev_priv->pending_flip_queue,
+ !intel_crtc_has_pending_flip(crtc));
+
mutex_lock(&dev->struct_mutex);
intel_finish_fb(crtc->fb);
mutex_unlock(&dev->struct_mutex);
@@ -5916,9 +5937,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,

atomic_clear_mask(1 << intel_crtc->plane,
&obj->pending_flip.counter);
- if (atomic_read(&obj->pending_flip) == 0)
- wake_up(&dev_priv->pending_flip_queue);

+ wake_up(&dev_priv->pending_flip_queue);
schedule_work(&work->work);

trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
@@ -6167,7 +6187,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
default:
WARN_ONCE(1, "unknown plane in flip command\n");
ret = -ENODEV;
- goto err;
+ goto err_unpin;
}

ret = intel_ring_begin(ring, 4);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a48c391..b55aa0e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3363,6 +3363,11 @@ static void gen6_init_clock_gating(struct drm_device *dev)
DISPPLANE_TRICKLE_FEED_DISABLE);
intel_flush_display_plane(dev_priv, pipe);
}
+
+ /* The default value should be 0x200 according to docs, but the two
+ * platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
+ I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
+ I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
}

static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b5e7bd9..e5b84ff 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -280,6 +280,8 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0);

+ /* Initialize the ring. */
+ I915_WRITE_START(ring, obj->gtt_offset);
head = I915_READ_HEAD(ring) & HEAD_ADDR;

/* G45 ring initialization fails to reset head to zero */
@@ -305,11 +307,6 @@ static int init_ring_common(struct intel_ring_buffer *ring)
}
}

- /* Initialize the ring. This must happen _after_ we've cleared the ring
- * registers with the above sequence (the readback of the HEAD registers
- * also enforces ordering), otherwise the hw might lose the new ring
- * register values. */
- I915_WRITE_START(ring, obj->gtt_offset);
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
diff --git a/drivers/gpu/drm/nouveau/nvc0_fence.c b/drivers/gpu/drm/nouveau/nvc0_fence.c
index 47ab388..8e5a2f4 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fence.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fence.c
@@ -32,6 +32,7 @@
struct nvc0_fence_priv {
struct nouveau_fence_priv base;
struct nouveau_bo *bo;
+ u32 *suspend;
};

struct nvc0_fence_chan {
@@ -125,12 +126,36 @@ nvc0_fence_context_new(struct nouveau_channel *chan, int engine)
static int
nvc0_fence_fini(struct drm_device *dev, int engine, bool suspend)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct nvc0_fence_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ if (suspend) {
+ priv->suspend = vmalloc(pfifo->channels * sizeof(u32));
+ if (!priv->suspend)
+ return -ENOMEM;
+
+ for (i = 0; i < pfifo->channels; i++)
+ priv->suspend[i] = nouveau_bo_rd32(priv->bo, i);
+ }
+
return 0;
}

static int
nvc0_fence_init(struct drm_device *dev, int engine)
{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct nvc0_fence_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ if (priv->suspend) {
+ for (i = 0; i < pfifo->channels; i++)
+ nouveau_bo_wr32(priv->bo, i, priv->suspend[i]);
+ vfree(priv->suspend);
+ priv->suspend = NULL;
+ }
+
return 0;
}

diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 5df58d1..d13a348 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -149,6 +149,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
(rdev->pdev->subsystem_device == 0x01fd))
return true;

+ /* Gateway RS690 only seems to work with MSIs. */
+ if ((rdev->pdev->device == 0x791f) &&
+ (rdev->pdev->subsystem_vendor == 0x107b) &&
+ (rdev->pdev->subsystem_device == 0x0185))
+ return true;
+
+ /* try and enable MSIs by default on all RS690s */
+ if (rdev->family == CHIP_RS690)
+ return true;
+
/* RV515 seems to have MSI issues where it loses
* MSI rearms occasionally. This leads to lockups and freezes.
* disable it by default.
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 5b37e28..0c4ca48 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -555,7 +555,9 @@ void radeon_pm_suspend(struct radeon_device *rdev)
void radeon_pm_resume(struct radeon_device *rdev)
{
/* set up the default clocks if the MC ucode is loaded */
- if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+ if ((rdev->family >= CHIP_BARTS) &&
+ (rdev->family <= CHIP_CAYMAN) &&
+ rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
SET_VOLTAGE_TYPE_ASIC_VDDC);
@@ -610,7 +612,9 @@ int radeon_pm_init(struct radeon_device *rdev)
radeon_pm_print_states(rdev);
radeon_pm_init_profile(rdev);
/* set up the default clocks if the MC ucode is loaded */
- if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+ if ((rdev->family >= CHIP_BARTS) &&
+ (rdev->family <= CHIP_CAYMAN) &&
+ rdev->mc_fw) {
if (rdev->pm.default_vddc)
radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
SET_VOLTAGE_TYPE_ASIC_VDDC);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index 6eb507a..7d3f0f0 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -547,6 +547,8 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)

dev_priv->chipset = (enum savage_family)chipset;

+ pci_set_master(dev->pdev);
+
return 0;
}

diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b12af2f..3e1bdc1 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -589,7 +589,9 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
int i;

- domain->iommu_coherency = 1;
+ i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
+
+ domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;

for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
if (!ecap_coherent(g_iommus[i]->ecap)) {
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 36fe5a3..24c77a4 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1473,6 +1473,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
rdev = rc_allocate_device();
if (!rdev)
goto failure;
+ itdev->rdev = rdev;

ret = -ENODEV;

@@ -1604,7 +1605,6 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
if (ret)
goto failure3;

- itdev->rdev = rdev;
ite_pr(KERN_NOTICE, "driver has been successfully loaded\n");

return 0;
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 862c657..9c65a54 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -2880,12 +2880,20 @@ static void em28xx_card_setup(struct em28xx *dev)
}


-#if defined(CONFIG_MODULES) && defined(MODULE)
static void request_module_async(struct work_struct *work)
{
struct em28xx *dev = container_of(work,
struct em28xx, request_module_wk);

+ /*
+ * The em28xx extensions can be modules or builtin. If the
+ * modules are already loaded or are built in, those extensions
+ * can be initialised right now. Otherwise, the module init
+ * code will do it.
+ */
+ em28xx_init_extension(dev);
+
+#if defined(CONFIG_MODULES) && defined(MODULE)
if (dev->has_audio_class)
request_module("snd-usb-audio");
else if (dev->has_alsa_audio)
@@ -2895,6 +2903,7 @@ static void request_module_async(struct work_struct *work)
request_module("em28xx-dvb");
if (dev->board.ir_codes && !disable_ir)
request_module("em28xx-rc");
+#endif /* CONFIG_MODULES */
}

static void request_modules(struct em28xx *dev)
@@ -2907,10 +2916,6 @@ static void flush_request_modules(struct em28xx *dev)
{
flush_work_sync(&dev->request_module_wk);
}
-#else
-#define request_modules(dev)
-#define flush_request_modules(dev)
-#endif /* CONFIG_MODULES */

/*
* em28xx_release_resources()
@@ -3329,13 +3334,6 @@ static int em28xx_usb_probe(struct usb_interface *interface,
*/
mutex_unlock(&dev->lock);

- /*
- * These extensions can be modules. If the modules are already
- * loaded then we can initialise the device now, otherwise we
- * will initialise it when the modules load instead.
- */
- em28xx_init_extension(dev);
-
return 0;

unlock_and_free:
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index a0369a5..97c2e59 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -978,6 +978,7 @@ static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x093a, 0x262a)},
{USB_DEVICE(0x093a, 0x262c)},
{USB_DEVICE(0x145f, 0x013c)},
+ {USB_DEVICE(0x1ae7, 0x2001)}, /* SpeedLink Snappy Mic SL-6825-SBK */
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index ca881ef..746a59c 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -18,12 +18,19 @@
#include <linux/mfd/core.h>
#include <linux/mfd/max8925.h>

+static struct resource io_parent = {
+ .start = 0,
+ .end = 0xffffffff,
+ .flags = IORESOURCE_IO,
+};
+
static struct resource backlight_resources[] = {
{
.name = "max8925-backlight",
.start = MAX8925_WLED_MODE_CNTL,
.end = MAX8925_WLED_CNTL,
.flags = IORESOURCE_IO,
+ .parent = &io_parent,
},
};

@@ -42,6 +49,7 @@ static struct resource touch_resources[] = {
.start = MAX8925_TSC_IRQ,
.end = MAX8925_ADC_RES_END,
.flags = IORESOURCE_IO,
+ .parent = &io_parent,
},
};

@@ -60,6 +68,7 @@ static struct resource power_supply_resources[] = {
.start = MAX8925_CHG_IRQ1,
.end = MAX8925_CHG_IRQ1_MASK,
.flags = IORESOURCE_IO,
+ .parent = &io_parent,
},
};

@@ -118,6 +127,7 @@ static struct mfd_cell onkey_devs[] = {
.start = MAX8925_##_start, \
.end = MAX8925_##_end, \
.flags = IORESOURCE_IO, \
+ .parent = &io_parent, \
}

static struct resource regulator_resources[] = {
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 389a3ee..c4de776 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2102,8 +2102,7 @@ static int omap_hsmmc_suspend(struct device *dev)
if (ret) {
host->suspended = 0;
if (host->pdata->resume) {
- ret = host->pdata->resume(dev, host->slot_id);
- if (ret)
+ if (host->pdata->resume(dev, host->slot_id))
dev_dbg(dev, "Unmask interrupt failed\n");
}
goto err;
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 724b35e..3b8236b 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1191,6 +1191,10 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
host->sd_error = true;
dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
}
+ if (host->state == STATE_IDLE) {
+ dev_info(&host->pd->dev, "Spurious IRQ status 0x%x", state);
+ return IRQ_HANDLED;
+ }
if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
if (!host->dma_active)
return IRQ_WAKE_THREAD;
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index e5bfd0e..0598d52 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -43,7 +43,8 @@ struct map_info autcpu12_sram_map = {

static int __init init_autcpu12_sram (void)
{
- int err, save0, save1;
+ map_word tmp, save0, save1;
+ int err;

autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K);
if (!autcpu12_sram_map.virt) {
@@ -51,7 +52,7 @@ static int __init init_autcpu12_sram (void)
err = -EIO;
goto out;
}
- simple_map_init(&autcpu_sram_map);
+ simple_map_init(&autcpu12_sram_map);

/*
* Check for 32K/128K
@@ -61,20 +62,22 @@ static int __init init_autcpu12_sram (void)
* Read and check result on ofs 0x0
* Restore contents
*/
- save0 = map_read32(&autcpu12_sram_map,0);
- save1 = map_read32(&autcpu12_sram_map,0x10000);
- map_write32(&autcpu12_sram_map,~save0,0x10000);
+ save0 = map_read(&autcpu12_sram_map, 0);
+ save1 = map_read(&autcpu12_sram_map, 0x10000);
+ tmp.x[0] = ~save0.x[0];
+ map_write(&autcpu12_sram_map, tmp, 0x10000);
/* if we find this pattern on 0x0, we have 32K size
* restore contents and exit
*/
- if ( map_read32(&autcpu12_sram_map,0) != save0) {
- map_write32(&autcpu12_sram_map,save0,0x0);
+ tmp = map_read(&autcpu12_sram_map, 0);
+ if (!map_word_equal(&autcpu12_sram_map, tmp, save0)) {
+ map_write(&autcpu12_sram_map, save0, 0x0);
goto map;
}
/* We have a 128K found, restore 0x10000 and set size
* to 128K
*/
- map_write32(&autcpu12_sram_map,save1,0x10000);
+ map_write(&autcpu12_sram_map, save1, 0x10000);
autcpu12_sram_map.size = SZ_128K;

map:
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index d518e4d..f8c08ec 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -711,6 +711,8 @@ static const char *default_mtd_part_types[] = {
* partition parsers, specified in @types. However, if @types is %NULL, then
* the default list of parsers is used. The default list contains only the
* "cmdlinepart" and "ofpart" parsers ATM.
+ * Note: If there are more then one parser in @types, the kernel only takes the
+ * partitions parsed out by the first parser.
*
* This function may return:
* o a negative error code in case of failure
@@ -735,11 +737,12 @@ int parse_mtd_partitions(struct mtd_info *master, const char **types,
if (!parser)
continue;
ret = (*parser->parse_fn)(master, pparts, data);
+ put_partition_parser(parser);
if (ret > 0) {
printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n",
ret, parser->name, master->name);
+ break;
}
- put_partition_parser(parser);
}
return ret;
}
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 30d1319..c126469 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -390,7 +390,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
/* Read the mirror version, if available */
if (md && (md->options & NAND_BBT_VERSION)) {
scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
- mtd->writesize, td);
+ mtd->writesize, md);
md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
pr_info("Bad block table at page %d, version 0x%02X\n",
md->pages[0], md->version[0]);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index cf0cd31..5d881180 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -2333,6 +2333,7 @@ static int __init ns_init_module(void)
uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
if (new_size >> overridesize != nsmtd->erasesize) {
NS_ERR("overridesize is too big\n");
+ retval = -EINVAL;
goto err_exit;
}
/* N.B. This relies on nand_scan not doing anything with the size before we change it */
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index d7f681d..a7e0c8f 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1382,7 +1382,8 @@ static int omap_nand_remove(struct platform_device *pdev)
/* Release NAND device, its internal structures and partitions */
nand_release(&info->mtd);
iounmap(info->nand.IO_ADDR_R);
- kfree(&info->mtd);
+ release_mem_region(info->phys_base, NAND_IO_SIZE);
+ kfree(info);
return 0;
}

diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8098eea..2364f66 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -620,14 +620,16 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
struct bnx2x_fastpath *fp)
{
- /* Do nothing if no IP/L4 csum validation was done */
-
+ /* Do nothing if no L4 csum validation was done.
+ * We do not check whether IP csum was validated. For IPv4 we assume
+ * that if the card got as far as validating the L4 csum, it also
+ * validated the IP csum. IPv6 has no IP csum.
+ */
if (cqe->fast_path_cqe.status_flags &
- (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
- ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
+ ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
return;

- /* If both IP/L4 validation were done, check if an error was found. */
+ /* If L4 validation was done, check if an error was found. */

if (cqe->fast_path_cqe.type_error_flags &
(ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 3178f1e..9b20170 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2723,10 +2723,7 @@ static int ixgbe_get_ts_info(struct net_device *dev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_SOME);
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
break;
#endif /* CONFIG_IXGBE_PTP */
default:
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 342b3a7..a77c558 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1378,6 +1378,10 @@ static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
struct pci_dev *root = pdev->bus->self;
u32 aer_pos;

+ /* root bus? */
+ if (!root)
+ return;
+
if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM &&
adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP)
return;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 3b5c457..28892eb 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -862,6 +862,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)

next_dma = desc_read(desc, hw_next);
chan->head = desc_from_phys(pool, next_dma);
+ chan->count--;
chan->stats.teardown_dequeue++;

/* issue callback without locks held */
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index cbf7047..20f31d0 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -570,7 +570,7 @@ static int pppoe_release(struct socket *sock)

po = pppox_sk(sk);

- if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+ if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
dev_put(po->pppoe_dev);
po->pppoe_dev = NULL;
}
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 91d2588..1470d3e 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -79,6 +79,7 @@ static int rionet_capable = 1;
* on system trade-offs.
*/
static struct rio_dev **rionet_active;
+static int nact; /* total number of active rionet peers */

#define is_rionet_capable(src_ops, dst_ops) \
((src_ops & RIO_SRC_OPS_DATA_MSG) && \
@@ -175,6 +176,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct ethhdr *eth = (struct ethhdr *)skb->data;
u16 destid;
unsigned long flags;
+ int add_num = 1;

local_irq_save(flags);
if (!spin_trylock(&rnet->tx_lock)) {
@@ -182,7 +184,10 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_LOCKED;
}

- if ((rnet->tx_cnt + 1) > RIONET_TX_RING_SIZE) {
+ if (is_multicast_ether_addr(eth->h_dest))
+ add_num = nact;
+
+ if ((rnet->tx_cnt + add_num) > RIONET_TX_RING_SIZE) {
netif_stop_queue(ndev);
spin_unlock_irqrestore(&rnet->tx_lock, flags);
printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
@@ -191,11 +196,16 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}

if (is_multicast_ether_addr(eth->h_dest)) {
+ int count = 0;
for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
i++)
- if (rionet_active[i])
+ if (rionet_active[i]) {
rionet_queue_tx_msg(skb, ndev,
rionet_active[i]);
+ if (count)
+ atomic_inc(&skb->users);
+ count++;
+ }
} else if (RIONET_MAC_MATCH(eth->h_dest)) {
destid = RIONET_GET_DESTID(eth->h_dest);
if (rionet_active[destid])
@@ -220,14 +230,17 @@ static void rionet_dbell_event(struct rio_mport *mport, void *dev_id, u16 sid, u
if (info == RIONET_DOORBELL_JOIN) {
if (!rionet_active[sid]) {
list_for_each_entry(peer, &rionet_peers, node) {
- if (peer->rdev->destid == sid)
+ if (peer->rdev->destid == sid) {
rionet_active[sid] = peer->rdev;
+ nact++;
+ }
}
rio_mport_send_doorbell(mport, sid,
RIONET_DOORBELL_JOIN);
}
} else if (info == RIONET_DOORBELL_LEAVE) {
rionet_active[sid] = NULL;
+ nact--;
} else {
if (netif_msg_intr(rnet))
printk(KERN_WARNING "%s: unhandled doorbell\n",
@@ -523,6 +536,7 @@ static int rionet_probe(struct rio_dev *rdev, const struct rio_device_id *id)

rc = rionet_setup_netdev(rdev->net->hport, ndev);
rionet_check = 1;
+ nact = 0;
}

/*
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index d75d1f5..e43f1cf 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -678,7 +678,7 @@ static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
return -EIO;
}

- *datap = *attrdata;
+ *datap = le16_to_cpu(*attrdata);

kfree(attrdata);
return result;
diff --git a/drivers/net/wan/ixp4xx_hss.c b/drivers/net/wan/ixp4xx_hss.c
index aaaca9a..3f575af 100644
--- a/drivers/net/wan/ixp4xx_hss.c
+++ b/drivers/net/wan/ixp4xx_hss.c
@@ -10,6 +10,7 @@

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

+#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/cdev.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 806c44f..09bf377 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -132,6 +132,15 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
return AE_OK;

+ status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
+ if (ACPI_FAILURE(status)) {
+ warn("can't evaluate _ADR (%#x)\n", status);
+ return AE_OK;
+ }
+
+ device = (adr >> 16) & 0xffff;
+ function = adr & 0xffff;
+
pdev = pbus->self;
if (pdev && pci_is_pcie(pdev)) {
tmp = acpi_find_root_bridge_handle(pdev);
@@ -144,10 +153,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
}
}

- acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
- device = (adr >> 16) & 0xffff;
- function = adr & 0xffff;
-
newfunc = kzalloc(sizeof(struct acpiphp_func), GFP_KERNEL);
if (!newfunc)
return AE_NO_MEMORY;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 658ac97..9319aa1 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -684,8 +684,10 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,

/* Check if setup is sensible at all */
if (!pass &&
- (primary != bus->number || secondary <= bus->number)) {
- dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
+ (primary != bus->number || secondary <= bus->number ||
+ secondary > subordinate)) {
+ dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
+ secondary, subordinate);
broken = 1;
}

diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 0860181..4f1b10b 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -519,6 +519,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,

rwlock_init(&port->unit_list_lock);
INIT_LIST_HEAD(&port->unit_list);
+ atomic_set(&port->units, 0);

INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 96f13ad8..79a6afe 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -39,17 +39,23 @@ void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
}

-static int zfcp_ccw_activate(struct ccw_device *cdev)
-
+/**
+ * zfcp_ccw_activate - activate adapter and wait for it to finish
+ * @cdev: pointer to belonging ccw device
+ * @clear: Status flags to clear.
+ * @tag: s390dbf trace record tag
+ */
+static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);

if (!adapter)
return 0;

+ zfcp_erp_clear_adapter_status(adapter, clear);
zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
- "ccresu2");
+ tag);
zfcp_erp_wait(adapter);
flush_work(&adapter->scan_work);

@@ -164,26 +170,29 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
adapter->req_no = 0;

- zfcp_ccw_activate(cdev);
+ zfcp_ccw_activate(cdev, 0, "ccsonl1");
zfcp_ccw_adapter_put(adapter);
return 0;
}

/**
- * zfcp_ccw_set_offline - set_offline function of zfcp driver
+ * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish
* @cdev: pointer to belonging ccw device
+ * @set: Status flags to set.
+ * @tag: s390dbf trace record tag
*
* This function gets called by the common i/o layer and sets an adapter
* into state offline.
*/
-static int zfcp_ccw_set_offline(struct ccw_device *cdev)
+static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag)
{
struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);

if (!adapter)
return 0;

- zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
+ zfcp_erp_set_adapter_status(adapter, set);
+ zfcp_erp_adapter_shutdown(adapter, 0, tag);
zfcp_erp_wait(adapter);

zfcp_ccw_adapter_put(adapter);
@@ -191,6 +200,18 @@ static int zfcp_ccw_set_offline(struct ccw_device *cdev)
}

/**
+ * zfcp_ccw_set_offline - set_offline function of zfcp driver
+ * @cdev: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer and sets an adapter
+ * into state offline.
+ */
+static int zfcp_ccw_set_offline(struct ccw_device *cdev)
+{
+ return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1");
+}
+
+/**
* zfcp_ccw_notify - ccw notify function
* @cdev: pointer to belonging ccw device
* @event: indicates if adapter was detached or attached
@@ -207,6 +228,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)

switch (event) {
case CIO_GONE:
+ if (atomic_read(&adapter->status) &
+ ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
+ zfcp_dbf_hba_basic("ccnigo1", adapter);
+ break;
+ }
dev_warn(&cdev->dev, "The FCP device has been detached\n");
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
break;
@@ -216,6 +242,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
break;
case CIO_OPER:
+ if (atomic_read(&adapter->status) &
+ ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
+ zfcp_dbf_hba_basic("ccniop1", adapter);
+ break;
+ }
dev_info(&cdev->dev, "The FCP device is operational again\n");
zfcp_erp_set_adapter_status(adapter,
ZFCP_STATUS_COMMON_RUNNING);
@@ -251,6 +282,28 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
zfcp_ccw_adapter_put(adapter);
}

+static int zfcp_ccw_suspend(struct ccw_device *cdev)
+{
+ zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1");
+ return 0;
+}
+
+static int zfcp_ccw_thaw(struct ccw_device *cdev)
+{
+ /* trace records for thaw and final shutdown during suspend
+ can only be found in system dump until the end of suspend
+ but not after resume because it's based on the memory image
+ right after the very first suspend (freeze) callback */
+ zfcp_ccw_activate(cdev, 0, "ccthaw1");
+ return 0;
+}
+
+static int zfcp_ccw_resume(struct ccw_device *cdev)
+{
+ zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1");
+ return 0;
+}
+
struct ccw_driver zfcp_ccw_driver = {
.driver = {
.owner = THIS_MODULE,
@@ -263,7 +316,7 @@ struct ccw_driver zfcp_ccw_driver = {
.set_offline = zfcp_ccw_set_offline,
.notify = zfcp_ccw_notify,
.shutdown = zfcp_ccw_shutdown,
- .freeze = zfcp_ccw_set_offline,
- .thaw = zfcp_ccw_activate,
- .restore = zfcp_ccw_activate,
+ .freeze = zfcp_ccw_suspend,
+ .thaw = zfcp_ccw_thaw,
+ .restore = zfcp_ccw_resume,
};
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index fab2c25..8ed63aa 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -293,7 +293,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);

- shost_for_each_device(sdev, port->adapter->scsi_host) {
+ shost_for_each_device(sdev, adapter->scsi_host) {
zfcp_sdev = sdev_to_zfcp(sdev);
status = atomic_read(&zfcp_sdev->status);
if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index a9a816e..79b9848 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -191,7 +191,7 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
length = min((u16)sizeof(struct qdio_buffer),
(u16)ZFCP_DBF_PAY_MAX_REC);

- while ((char *)pl[payload->counter] && payload->counter < scount) {
+ while (payload->counter < scount && (char *)pl[payload->counter]) {
memcpy(payload->data, (char *)pl[payload->counter], length);
debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length));
payload->counter++;
@@ -200,6 +200,26 @@ void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount,
spin_unlock_irqrestore(&dbf->pay_lock, flags);
}

+/**
+ * zfcp_dbf_hba_basic - trace event for basic adapter events
+ * @adapter: pointer to struct zfcp_adapter
+ */
+void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
+{
+ struct zfcp_dbf *dbf = adapter->dbf;
+ struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dbf->hba_lock, flags);
+ memset(rec, 0, sizeof(*rec));
+
+ memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+ rec->id = ZFCP_DBF_HBA_BASIC;
+
+ debug_event(dbf->hba, 1, rec, sizeof(*rec));
+ spin_unlock_irqrestore(&dbf->hba_lock, flags);
+}
+
static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
struct zfcp_adapter *adapter,
struct zfcp_port *port,
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index 714f087..3ac7a4b 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -154,6 +154,7 @@ enum zfcp_dbf_hba_id {
ZFCP_DBF_HBA_RES = 1,
ZFCP_DBF_HBA_USS = 2,
ZFCP_DBF_HBA_BIT = 3,
+ ZFCP_DBF_HBA_BASIC = 4,
};

/**
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index ed5d921..f172b84 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -77,6 +77,7 @@ struct zfcp_reqlist;
#define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004
#define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008
#define ZFCP_STATUS_ADAPTER_HOST_CON_INIT 0x00000010
+#define ZFCP_STATUS_ADAPTER_SUSPENDED 0x00000040
#define ZFCP_STATUS_ADAPTER_ERP_PENDING 0x00000100
#define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED 0x00000200
#define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED 0x00000400
@@ -204,6 +205,7 @@ struct zfcp_port {
struct zfcp_adapter *adapter; /* adapter used to access port */
struct list_head unit_list; /* head of logical unit list */
rwlock_t unit_list_lock; /* unit list lock */
+ atomic_t units; /* zfcp_unit count */
atomic_t status; /* status of this remote port */
u64 wwnn; /* WWNN if known */
u64 wwpn; /* WWPN */
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 2302e1c..ef9e502 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -54,6 +54,7 @@ extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **);
+extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
@@ -158,6 +159,7 @@ extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
extern struct attribute_group zfcp_sysfs_unit_attrs;
extern struct attribute_group zfcp_sysfs_adapter_attrs;
extern struct attribute_group zfcp_sysfs_port_attrs;
+extern struct mutex zfcp_sysfs_port_units_mutex;
extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
extern struct device_attribute *zfcp_sysfs_shost_attrs[];

diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index e9a787e..2136fc2 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -219,7 +219,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
return;
}

- zfcp_dbf_hba_fsf_uss("fssrh_2", req);
+ zfcp_dbf_hba_fsf_uss("fssrh_4", req);

switch (sr_buf->status_type) {
case FSF_STATUS_READ_PORT_CLOSED:
@@ -437,6 +437,34 @@ void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
}
}

+#define ZFCP_FSF_PORTSPEED_1GBIT (1 << 0)
+#define ZFCP_FSF_PORTSPEED_2GBIT (1 << 1)
+#define ZFCP_FSF_PORTSPEED_4GBIT (1 << 2)
+#define ZFCP_FSF_PORTSPEED_10GBIT (1 << 3)
+#define ZFCP_FSF_PORTSPEED_8GBIT (1 << 4)
+#define ZFCP_FSF_PORTSPEED_16GBIT (1 << 5)
+#define ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED (1 << 15)
+
+static u32 zfcp_fsf_convert_portspeed(u32 fsf_speed)
+{
+ u32 fdmi_speed = 0;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_1GBIT)
+ fdmi_speed |= FC_PORTSPEED_1GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_2GBIT)
+ fdmi_speed |= FC_PORTSPEED_2GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_4GBIT)
+ fdmi_speed |= FC_PORTSPEED_4GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_10GBIT)
+ fdmi_speed |= FC_PORTSPEED_10GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_8GBIT)
+ fdmi_speed |= FC_PORTSPEED_8GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_16GBIT)
+ fdmi_speed |= FC_PORTSPEED_16GBIT;
+ if (fsf_speed & ZFCP_FSF_PORTSPEED_NOT_NEGOTIATED)
+ fdmi_speed |= FC_PORTSPEED_NOT_NEGOTIATED;
+ return fdmi_speed;
+}
+
static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
{
struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config;
@@ -456,7 +484,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
fc_host_port_name(shost) = nsp->fl_wwpn;
fc_host_node_name(shost) = nsp->fl_wwnn;
fc_host_port_id(shost) = ntoh24(bottom->s_id);
- fc_host_speed(shost) = bottom->fc_link_speed;
+ fc_host_speed(shost) =
+ zfcp_fsf_convert_portspeed(bottom->fc_link_speed);
fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3;

adapter->hydra_version = bottom->adapter_type;
@@ -580,7 +609,8 @@ static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req)
} else
fc_host_permanent_port_name(shost) = fc_host_port_name(shost);
fc_host_maxframe_size(shost) = bottom->maximum_frame_size;
- fc_host_supported_speeds(shost) = bottom->supported_speed;
+ fc_host_supported_speeds(shost) =
+ zfcp_fsf_convert_portspeed(bottom->supported_speed);
memcpy(fc_host_supported_fc4s(shost), bottom->supported_fc4_types,
FC_FC4_LIST_SIZE);
memcpy(fc_host_active_fc4s(shost), bottom->active_fc4_types,
@@ -771,12 +801,14 @@ out:
static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
{
struct scsi_device *sdev = req->data;
- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ struct zfcp_scsi_dev *zfcp_sdev;
union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;

if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;

+ zfcp_sdev = sdev_to_zfcp(sdev);
+
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
if (fsq->word[0] == fsq->word[1]) {
@@ -885,7 +917,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)

switch (header->fsf_status) {
case FSF_GOOD:
- zfcp_dbf_san_res("fsscth1", req);
+ zfcp_dbf_san_res("fsscth2", req);
ct->status = 0;
break;
case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -1739,13 +1771,15 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
{
struct zfcp_adapter *adapter = req->adapter;
struct scsi_device *sdev = req->data;
- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ struct zfcp_scsi_dev *zfcp_sdev;
struct fsf_qtcb_header *header = &req->qtcb->header;
struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;

if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;

+ zfcp_sdev = sdev_to_zfcp(sdev);
+
atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
ZFCP_STATUS_COMMON_ACCESS_BOXED |
ZFCP_STATUS_LUN_SHARED |
@@ -1856,11 +1890,13 @@ out:
static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
{
struct scsi_device *sdev = req->data;
- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ struct zfcp_scsi_dev *zfcp_sdev;

if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
return;

+ zfcp_sdev = sdev_to_zfcp(sdev);
+
switch (req->qtcb->header.fsf_status) {
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
@@ -1950,7 +1986,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
{
struct fsf_qual_latency_info *lat_in;
struct latency_cont *lat = NULL;
- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
+ struct zfcp_scsi_dev *zfcp_sdev;
struct zfcp_blk_drv_data blktrc;
int ticks = req->adapter->timer_ticks;

@@ -1965,6 +2001,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)

if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
!(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
+ zfcp_sdev = sdev_to_zfcp(scsi->device);
blktrc.flags |= ZFCP_BLK_LAT_VALID;
blktrc.channel_lat = lat_in->channel_lat * ticks;
blktrc.fabric_lat = lat_in->fabric_lat * ticks;
@@ -2002,12 +2039,14 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
{
struct scsi_cmnd *scmnd = req->data;
struct scsi_device *sdev = scmnd->device;
- struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+ struct zfcp_scsi_dev *zfcp_sdev;
struct fsf_qtcb_header *header = &req->qtcb->header;

if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
return;

+ zfcp_sdev = sdev_to_zfcp(sdev);
+
switch (header->fsf_status) {
case FSF_HANDLE_MISMATCH:
case FSF_PORT_HANDLE_NOT_VALID:
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index e14da57..e76d003 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -102,18 +102,22 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
{
struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
struct zfcp_adapter *adapter = qdio->adapter;
- struct qdio_buffer_element *sbale;
int sbal_no, sbal_idx;
- void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
- u64 req_id;
- u8 scount;

if (unlikely(qdio_err)) {
- memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
if (zfcp_adapter_multi_buffer_active(adapter)) {
+ void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1];
+ struct qdio_buffer_element *sbale;
+ u64 req_id;
+ u8 scount;
+
+ memset(pl, 0,
+ ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
sbale = qdio->res_q[idx]->element;
req_id = (u64) sbale->addr;
- scount = sbale->scount + 1; /* incl. signaling SBAL */
+ scount = min(sbale->scount + 1,
+ ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
+ /* incl. signaling SBAL */

for (sbal_no = 0; sbal_no < scount; sbal_no++) {
sbal_idx = (idx + sbal_no) %
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index cdc4ff7..9e62210 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -227,6 +227,8 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
zfcp_sysfs_port_rescan_store);

+DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
+
static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -249,6 +251,16 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
else
retval = 0;

+ mutex_lock(&zfcp_sysfs_port_units_mutex);
+ if (atomic_read(&port->units) > 0) {
+ retval = -EBUSY;
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
+ goto out;
+ }
+ /* port is about to be removed, so no more unit_add */
+ atomic_set(&port->units, -1);
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
+
write_lock_irq(&adapter->port_list_lock);
list_del(&port->list);
write_unlock_irq(&adapter->port_list_lock);
@@ -289,12 +301,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
{
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
u64 fcp_lun;
+ int retval;

if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
return -EINVAL;

- if (zfcp_unit_add(port, fcp_lun))
- return -EINVAL;
+ retval = zfcp_unit_add(port, fcp_lun);
+ if (retval)
+ return retval;

return count;
}
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 20796eb..4e6a535 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -104,7 +104,7 @@ static void zfcp_unit_release(struct device *dev)
{
struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);

- put_device(&unit->port->dev);
+ atomic_dec(&unit->port->units);
kfree(unit);
}

@@ -119,16 +119,27 @@ static void zfcp_unit_release(struct device *dev)
int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
{
struct zfcp_unit *unit;
+ int retval = 0;
+
+ mutex_lock(&zfcp_sysfs_port_units_mutex);
+ if (atomic_read(&port->units) == -1) {
+ /* port is already gone */
+ retval = -ENODEV;
+ goto out;
+ }

unit = zfcp_unit_find(port, fcp_lun);
if (unit) {
put_device(&unit->dev);
- return -EEXIST;
+ retval = -EEXIST;
+ goto out;
}

unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
- if (!unit)
- return -ENOMEM;
+ if (!unit) {
+ retval = -ENOMEM;
+ goto out;
+ }

unit->port = port;
unit->fcp_lun = fcp_lun;
@@ -139,28 +150,33 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
if (dev_set_name(&unit->dev, "0x%016llx",
(unsigned long long) fcp_lun)) {
kfree(unit);
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto out;
}

- get_device(&port->dev);
-
if (device_register(&unit->dev)) {
put_device(&unit->dev);
- return -ENOMEM;
+ retval = -ENOMEM;
+ goto out;
}

if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
device_unregister(&unit->dev);
- return -EINVAL;
+ retval = -EINVAL;
+ goto out;
}

+ atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
+
write_lock_irq(&port->unit_list_lock);
list_add_tail(&unit->list, &port->unit_list);
write_unlock_irq(&port->unit_list_lock);

zfcp_unit_scsi_scan(unit);

- return 0;
+out:
+ mutex_unlock(&zfcp_sysfs_port_units_mutex);
+ return retval;
}

/**
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index 68ce085..a540162 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -1173,7 +1173,16 @@ wait_io1:
outw(val, tmport);
outb(2, 0x80);
TCM_SYNC:
- udelay(0x800);
+ /*
+ * The funny division into multiple delays is to accomodate
+ * arches like ARM where udelay() multiplies its argument by
+ * a large number to initialize a loop counter. To avoid
+ * overflow, the maximum supported udelay is 2000 microseconds.
+ *
+ * XXX it would be more polite to find a way to use msleep()
+ */
+ mdelay(2);
+ udelay(48);
if ((inb(tmport) & 0x80) == 0x00) { /* bsy ? */
outw(0, tmport--);
outb(0, tmport);
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 33ef60d..6a8568c 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -203,6 +203,27 @@ cifs_strtoUTF16(__le16 *to, const char *from, int len,
int i;
wchar_t wchar_to; /* needed to quiet sparse */

+ /* special case for utf8 to handle no plane0 chars */
+ if (!strcmp(codepage->charset, "utf8")) {
+ /*
+ * convert utf8 -> utf16, we assume we have enough space
+ * as caller should have assumed conversion does not overflow
+ * in destination len is length in wchar_t units (16bits)
+ */
+ i = utf8s_to_utf16s(from, len, UTF16_LITTLE_ENDIAN,
+ (wchar_t *) to, len);
+
+ /* if success terminate and exit */
+ if (i >= 0)
+ goto success;
+ /*
+ * if fails fall back to UCS encoding as this
+ * function should not return negative values
+ * currently can fail only if source contains
+ * invalid encoded characters
+ */
+ }
+
for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
charlen = codepage->char2uni(from, len, &wchar_to);
if (charlen < 1) {
@@ -215,6 +236,7 @@ cifs_strtoUTF16(__le16 *to, const char *from, int len,
put_unaligned_le16(wchar_to, &to[i]);
}

+success:
put_unaligned_le16(0, &to[i]);
return i;
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index f967005..f027c2b 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -70,6 +70,7 @@ enum {
/* Mount options that take no arguments */
Opt_user_xattr, Opt_nouser_xattr,
Opt_forceuid, Opt_noforceuid,
+ Opt_forcegid, Opt_noforcegid,
Opt_noblocksend, Opt_noautotune,
Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
Opt_mapchars, Opt_nomapchars, Opt_sfu,
@@ -121,6 +122,8 @@ static const match_table_t cifs_mount_option_tokens = {
{ Opt_nouser_xattr, "nouser_xattr" },
{ Opt_forceuid, "forceuid" },
{ Opt_noforceuid, "noforceuid" },
+ { Opt_forcegid, "forcegid" },
+ { Opt_noforcegid, "noforcegid" },
{ Opt_noblocksend, "noblocksend" },
{ Opt_noautotune, "noautotune" },
{ Opt_hard, "hard" },
@@ -1345,6 +1348,12 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
case Opt_noforceuid:
override_uid = 0;
break;
+ case Opt_forcegid:
+ override_gid = 1;
+ break;
+ case Opt_noforcegid:
+ override_gid = 0;
+ break;
case Opt_noblocksend:
vol->noblocksnd = 1;
break;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 47a3e00..9e9aed3 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2453,6 +2453,16 @@ static int ext4_nonda_switch(struct super_block *sb)
free_blocks = EXT4_C2B(sbi,
percpu_counter_read_positive(&sbi->s_freeclusters_counter));
dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
+ /*
+ * Start pushing delalloc when 1/2 of free blocks are dirty.
+ */
+ if (dirty_blocks && (free_blocks < 2 * dirty_blocks) &&
+ !writeback_in_progress(sb->s_bdi) &&
+ down_read_trylock(&sb->s_umount)) {
+ writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
+ up_read(&sb->s_umount);
+ }
+
if (2 * free_blocks < 3 * dirty_blocks ||
free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
/*
@@ -2461,13 +2471,6 @@ static int ext4_nonda_switch(struct super_block *sb)
*/
return 1;
}
- /*
- * Even if we don't switch but are nearing capacity,
- * start pushing delalloc when 1/2 of free blocks are dirty.
- */
- if (free_blocks < 2 * dirty_blocks)
- writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE);
-
return 0;
}

@@ -3985,6 +3988,7 @@ static int ext4_do_update_inode(handle_t *handle,
struct ext4_inode_info *ei = EXT4_I(inode);
struct buffer_head *bh = iloc->bh;
int err = 0, rc, block;
+ int need_datasync = 0;
uid_t i_uid;
gid_t i_gid;

@@ -4035,7 +4039,10 @@ static int ext4_do_update_inode(handle_t *handle,
raw_inode->i_file_acl_high =
cpu_to_le16(ei->i_file_acl >> 32);
raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
- ext4_isize_set(raw_inode, ei->i_disksize);
+ if (ei->i_disksize != ext4_isize(raw_inode)) {
+ ext4_isize_set(raw_inode, ei->i_disksize);
+ need_datasync = 1;
+ }
if (ei->i_disksize > 0x7fffffffULL) {
struct super_block *sb = inode->i_sb;
if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
@@ -4088,7 +4095,7 @@ static int ext4_do_update_inode(handle_t *handle,
err = rc;
ext4_clear_inode_state(inode, EXT4_STATE_NEW);

- ext4_update_inode_fsync_trans(handle, inode, 0);
+ ext4_update_inode_fsync_trans(handle, inode, need_datasync);
out_brelse:
brelse(bh);
ext4_std_error(inode->i_sb, err);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index c5826c6..e2016f3 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -141,55 +141,21 @@ mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
}

/**
- * mext_check_null_inode - NULL check for two inodes
- *
- * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
- */
-static int
-mext_check_null_inode(struct inode *inode1, struct inode *inode2,
- const char *function, unsigned int line)
-{
- int ret = 0;
-
- if (inode1 == NULL) {
- __ext4_error(inode2->i_sb, function, line,
- "Both inodes should not be NULL: "
- "inode1 NULL inode2 %lu", inode2->i_ino);
- ret = -EIO;
- } else if (inode2 == NULL) {
- __ext4_error(inode1->i_sb, function, line,
- "Both inodes should not be NULL: "
- "inode1 %lu inode2 NULL", inode1->i_ino);
- ret = -EIO;
- }
- return ret;
-}
-
-/**
* double_down_write_data_sem - Acquire two inodes' write lock of i_data_sem
*
- * @orig_inode: original inode structure
- * @donor_inode: donor inode structure
- * Acquire write lock of i_data_sem of the two inodes (orig and donor) by
- * i_ino order.
+ * Acquire write lock of i_data_sem of the two inodes
*/
static void
-double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode)
+double_down_write_data_sem(struct inode *first, struct inode *second)
{
- struct inode *first = orig_inode, *second = donor_inode;
+ if (first < second) {
+ down_write(&EXT4_I(first)->i_data_sem);
+ down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
+ } else {
+ down_write(&EXT4_I(second)->i_data_sem);
+ down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING);

- /*
- * Use the inode number to provide the stable locking order instead
- * of its address, because the C language doesn't guarantee you can
- * compare pointers that don't come from the same array.
- */
- if (donor_inode->i_ino < orig_inode->i_ino) {
- first = donor_inode;
- second = orig_inode;
}
-
- down_write(&EXT4_I(first)->i_data_sem);
- down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING);
}

/**
@@ -969,14 +935,6 @@ mext_check_arguments(struct inode *orig_inode,
return -EINVAL;
}

- /* Files should be in the same ext4 FS */
- if (orig_inode->i_sb != donor_inode->i_sb) {
- ext4_debug("ext4 move extent: The argument files "
- "should be in same FS [ino:orig %lu, donor %lu]\n",
- orig_inode->i_ino, donor_inode->i_ino);
- return -EINVAL;
- }
-
/* Ext4 move extent supports only extent based file */
if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
ext4_debug("ext4 move extent: orig file is not extents "
@@ -1072,35 +1030,19 @@ mext_check_arguments(struct inode *orig_inode,
* @inode1: the inode structure
* @inode2: the inode structure
*
- * Lock two inodes' i_mutex by i_ino order.
- * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
+ * Lock two inodes' i_mutex
*/
-static int
+static void
mext_inode_double_lock(struct inode *inode1, struct inode *inode2)
{
- int ret = 0;
-
- BUG_ON(inode1 == NULL && inode2 == NULL);
-
- ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
- if (ret < 0)
- goto out;
-
- if (inode1 == inode2) {
- mutex_lock(&inode1->i_mutex);
- goto out;
- }
-
- if (inode1->i_ino < inode2->i_ino) {
+ BUG_ON(inode1 == inode2);
+ if (inode1 < inode2) {
mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD);
} else {
mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT);
mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD);
}
-
-out:
- return ret;
}

/**
@@ -1109,28 +1051,13 @@ out:
* @inode1: the inode that is released first
* @inode2: the inode that is released second
*
- * If inode1 or inode2 is NULL, return -EIO. Otherwise, return 0.
*/

-static int
+static void
mext_inode_double_unlock(struct inode *inode1, struct inode *inode2)
{
- int ret = 0;
-
- BUG_ON(inode1 == NULL && inode2 == NULL);
-
- ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__);
- if (ret < 0)
- goto out;
-
- if (inode1)
- mutex_unlock(&inode1->i_mutex);
-
- if (inode2 && inode2 != inode1)
- mutex_unlock(&inode2->i_mutex);
-
-out:
- return ret;
+ mutex_unlock(&inode1->i_mutex);
+ mutex_unlock(&inode2->i_mutex);
}

/**
@@ -1187,16 +1114,23 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0;
ext4_lblk_t rest_blocks;
pgoff_t orig_page_offset = 0, seq_end_page;
- int ret1, ret2, depth, last_extent = 0;
+ int ret, depth, last_extent = 0;
int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
int data_offset_in_page;
int block_len_in_page;
int uninit;

- /* orig and donor should be different file */
- if (orig_inode->i_ino == donor_inode->i_ino) {
+ if (orig_inode->i_sb != donor_inode->i_sb) {
+ ext4_debug("ext4 move extent: The argument files "
+ "should be in same FS [ino:orig %lu, donor %lu]\n",
+ orig_inode->i_ino, donor_inode->i_ino);
+ return -EINVAL;
+ }
+
+ /* orig and donor should be different inodes */
+ if (orig_inode == donor_inode) {
ext4_debug("ext4 move extent: The argument files should not "
- "be same file [ino:orig %lu, donor %lu]\n",
+ "be same inode [ino:orig %lu, donor %lu]\n",
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
@@ -1208,18 +1142,21 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
orig_inode->i_ino, donor_inode->i_ino);
return -EINVAL;
}
-
+ /* TODO: This is non obvious task to swap blocks for inodes with full
+ jornaling enabled */
+ if (ext4_should_journal_data(orig_inode) ||
+ ext4_should_journal_data(donor_inode)) {
+ return -EINVAL;
+ }
/* Protect orig and donor inodes against a truncate */
- ret1 = mext_inode_double_lock(orig_inode, donor_inode);
- if (ret1 < 0)
- return ret1;
+ mext_inode_double_lock(orig_inode, donor_inode);

/* Protect extent tree against block allocations via delalloc */
double_down_write_data_sem(orig_inode, donor_inode);
/* Check the filesystem environment whether move_extent can be done */
- ret1 = mext_check_arguments(orig_inode, donor_inode, orig_start,
+ ret = mext_check_arguments(orig_inode, donor_inode, orig_start,
donor_start, &len);
- if (ret1)
+ if (ret)
goto out;

file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits;
@@ -1227,13 +1164,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
if (file_end < block_end)
len -= block_end - file_end;

- ret1 = get_ext_path(orig_inode, block_start, &orig_path);
- if (ret1)
+ ret = get_ext_path(orig_inode, block_start, &orig_path);
+ if (ret)
goto out;

/* Get path structure to check the hole */
- ret1 = get_ext_path(orig_inode, block_start, &holecheck_path);
- if (ret1)
+ ret = get_ext_path(orig_inode, block_start, &holecheck_path);
+ if (ret)
goto out;

depth = ext_depth(orig_inode);
@@ -1252,13 +1189,13 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
last_extent = mext_next_extent(orig_inode,
holecheck_path, &ext_cur);
if (last_extent < 0) {
- ret1 = last_extent;
+ ret = last_extent;
goto out;
}
last_extent = mext_next_extent(orig_inode, orig_path,
&ext_dummy);
if (last_extent < 0) {
- ret1 = last_extent;
+ ret = last_extent;
goto out;
}
seq_start = le32_to_cpu(ext_cur->ee_block);
@@ -1272,7 +1209,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
if (le32_to_cpu(ext_cur->ee_block) > block_end) {
ext4_debug("ext4 move extent: The specified range of file "
"may be the hole\n");
- ret1 = -EINVAL;
+ ret = -EINVAL;
goto out;
}

@@ -1292,7 +1229,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
last_extent = mext_next_extent(orig_inode, holecheck_path,
&ext_cur);
if (last_extent < 0) {
- ret1 = last_extent;
+ ret = last_extent;
break;
}
add_blocks = ext4_ext_get_actual_len(ext_cur);
@@ -1349,18 +1286,18 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
orig_page_offset,
data_offset_in_page,
block_len_in_page, uninit,
- &ret1);
+ &ret);

/* Count how many blocks we have exchanged */
*moved_len += block_len_in_page;
- if (ret1 < 0)
+ if (ret < 0)
break;
if (*moved_len > len) {
EXT4_ERROR_INODE(orig_inode,
"We replaced blocks too much! "
"sum of replaced: %llu requested: %llu",
*moved_len, len);
- ret1 = -EIO;
+ ret = -EIO;
break;
}

@@ -1374,22 +1311,22 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
}

double_down_write_data_sem(orig_inode, donor_inode);
- if (ret1 < 0)
+ if (ret < 0)
break;

/* Decrease buffer counter */
if (holecheck_path)
ext4_ext_drop_refs(holecheck_path);
- ret1 = get_ext_path(orig_inode, seq_start, &holecheck_path);
- if (ret1)
+ ret = get_ext_path(orig_inode, seq_start, &holecheck_path);
+ if (ret)
break;
depth = holecheck_path->p_depth;

/* Decrease buffer counter */
if (orig_path)
ext4_ext_drop_refs(orig_path);
- ret1 = get_ext_path(orig_inode, seq_start, &orig_path);
- if (ret1)
+ ret = get_ext_path(orig_inode, seq_start, &orig_path);
+ if (ret)
break;

ext_cur = holecheck_path[depth].p_ext;
@@ -1412,12 +1349,7 @@ out:
kfree(holecheck_path);
}
double_up_write_data_sem(orig_inode, donor_inode);
- ret2 = mext_inode_double_unlock(orig_inode, donor_inode);
-
- if (ret1)
- return ret1;
- else if (ret2)
- return ret2;
+ mext_inode_double_unlock(orig_inode, donor_inode);

- return 0;
+ return ret;
}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 0edaf18..ad6767e 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2149,9 +2149,7 @@ retry:
err = PTR_ERR(inode);
if (!IS_ERR(inode)) {
init_special_inode(inode, inode->i_mode, rdev);
-#ifdef CONFIG_EXT4_FS_XATTR
inode->i_op = &ext4_special_inode_operations;
-#endif
err = ext4_add_nondir(handle, dentry, inode);
}
ext4_journal_stop(handle);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 17d38de..b0bdd10 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -200,8 +200,11 @@ static void free_flex_gd(struct ext4_new_flex_group_data *flex_gd)
* be a partial of a flex group.
*
* @sb: super block of fs to which the groups belongs
+ *
+ * Returns 0 on a successful allocation of the metadata blocks in the
+ * block group.
*/
-static void ext4_alloc_group_tables(struct super_block *sb,
+static int ext4_alloc_group_tables(struct super_block *sb,
struct ext4_new_flex_group_data *flex_gd,
int flexbg_size)
{
@@ -226,6 +229,8 @@ static void ext4_alloc_group_tables(struct super_block *sb,
(last_group & ~(flexbg_size - 1))));
next_group:
group = group_data[0].group;
+ if (src_group >= group_data[0].group + flex_gd->count)
+ return -ENOSPC;
start_blk = ext4_group_first_block_no(sb, src_group);
last_blk = start_blk + group_data[src_group - group].blocks_count;

@@ -235,7 +240,6 @@ next_group:

start_blk += overhead;

- BUG_ON(src_group >= group_data[0].group + flex_gd->count);
/* We collect contiguous blocks as much as possible. */
src_group++;
for (; src_group <= last_group; src_group++)
@@ -300,6 +304,7 @@ next_group:
group_data[i].free_blocks_count);
}
}
+ return 0;
}

static struct buffer_head *bclean(handle_t *handle, struct super_block *sb,
@@ -451,6 +456,9 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
gdblocks = ext4_bg_num_gdb(sb, group);
start = ext4_group_first_block_no(sb, group);

+ if (!ext4_bg_has_super(sb, group))
+ goto handle_itb;
+
/* Copy all of the GDT blocks into the backup in this group */
for (j = 0, block = start + 1; j < gdblocks; j++, block++) {
struct buffer_head *gdb;
@@ -493,6 +501,7 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
goto out;
}

+handle_itb:
/* Initialize group tables of the grop @group */
if (!(bg_flags[i] & EXT4_BG_INODE_ZEROED))
goto handle_bb;
@@ -1349,13 +1358,15 @@ exit_journal:
err = err2;

if (!err) {
- int i;
+ int gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
+ int gdb_num_end = ((group + flex_gd->count - 1) /
+ EXT4_DESC_PER_BLOCK(sb));
+
update_backups(sb, sbi->s_sbh->b_blocknr, (char *)es,
sizeof(struct ext4_super_block));
- for (i = 0; i < flex_gd->count; i++, group++) {
+ for (; gdb_num <= gdb_num_end; gdb_num++) {
struct buffer_head *gdb_bh;
- int gdb_num;
- gdb_num = group / EXT4_BLOCKS_PER_GROUP(sb);
+
gdb_bh = sbi->s_group_desc[gdb_num];
update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
gdb_bh->b_size);
@@ -1729,7 +1740,8 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
*/
while (ext4_setup_next_flex_gd(sb, flex_gd, n_blocks_count,
flexbg_size)) {
- ext4_alloc_group_tables(sb, flex_gd, flexbg_size);
+ if (ext4_alloc_group_tables(sb, flex_gd, flexbg_size) != 0)
+ break;
err = ext4_flex_group_add(sb, resize_inode, flex_gd);
if (unlikely(err))
break;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 41598ee..975405c 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1748,7 +1748,7 @@ static inline void ext4_show_quota_options(struct seq_file *seq,

static const char *token2str(int token)
{
- static const struct match_token *t;
+ const struct match_token *t;

for (t = tokens; t->token != Opt_err; t++)
if (t->token == token && !strchr(t->pattern, '='))
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 41a3ccf..e2901ab 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -68,6 +68,7 @@ int writeback_in_progress(struct backing_dev_info *bdi)
{
return test_bit(BDI_writeback_running, &bdi->state);
}
+EXPORT_SYMBOL(writeback_in_progress);

static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
{
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index bd23f2e..0f16edd 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1354,6 +1354,11 @@ static void jbd2_mark_journal_empty(journal_t *journal)

BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
read_lock(&journal->j_state_lock);
+ /* Is it already empty? */
+ if (sb->s_start == 0) {
+ read_unlock(&journal->j_state_lock);
+ return;
+ }
jbd_debug(1, "JBD2: Marking journal as empty (seq %d)\n",
journal->j_tail_sequence);

diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index 61ea413..1224d6b 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -100,6 +100,10 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
{
struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);

+#ifdef CONFIG_JFFS2_FS_WRITEBUFFER
+ cancel_delayed_work_sync(&c->wbuf_dwork);
+#endif
+
mutex_lock(&c->alloc_sem);
jffs2_flush_wbuf_pad(c);
mutex_unlock(&c->alloc_sem);
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 6f4529d..a6597d6 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -1044,10 +1044,10 @@ int jffs2_check_oob_empty(struct jffs2_sb_info *c,
ops.datbuf = NULL;

ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
- if (ret || ops.oobretlen != ops.ooblen) {
+ if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
jeb->offset, ops.ooblen, ops.oobretlen, ret);
- if (!ret)
+ if (!ret || mtd_is_bitflip(ret))
ret = -EIO;
return ret;
}
@@ -1086,10 +1086,10 @@ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c,
ops.datbuf = NULL;

ret = mtd_read_oob(c->mtd, jeb->offset, &ops);
- if (ret || ops.oobretlen != ops.ooblen) {
+ if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) {
pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n",
jeb->offset, ops.ooblen, ops.oobretlen, ret);
- if (!ret)
+ if (!ret || mtd_is_bitflip(ret))
ret = -EIO;
return ret;
}
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 7fcd0d6..b8730d9 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -115,7 +115,13 @@ u64 stable_page_flags(struct page *page)
u |= 1 << KPF_COMPOUND_TAIL;
if (PageHuge(page))
u |= 1 << KPF_HUGE;
- else if (PageTransCompound(page))
+ /*
+ * PageTransCompound can be true for non-huge compound pages (slab
+ * pages or pages allocated by drivers with __GFP_COMP) because it
+ * just checks PG_head/PG_tail, so we need to check PageLRU to make
+ * sure a given page is a thp, not a non-huge compound page.
+ */
+ else if (PageTransCompound(page) && PageLRU(compound_trans_head(page)))
u |= 1 << KPF_THP;

/*
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 4aa4273..c252970 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -188,7 +188,7 @@ struct sp_node {

struct shared_policy {
struct rb_root root;
- spinlock_t lock;
+ struct mutex mutex;
};

void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index 22e61fd..28e493b 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -84,6 +84,8 @@ struct xfrm_replay_state {
__u32 bitmap;
};

+#define XFRMA_REPLAY_ESN_MAX 4096
+
struct xfrm_replay_state_esn {
unsigned int bmp_len;
__u32 oseq;
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index e0a55df..887a232 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -269,6 +269,9 @@ struct xfrm_replay {
int (*check)(struct xfrm_state *x,
struct sk_buff *skb,
__be32 net_seq);
+ int (*recheck)(struct xfrm_state *x,
+ struct sk_buff *skb,
+ __be32 net_seq);
void (*notify)(struct xfrm_state *x, int event);
int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
};
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 8c8bd65..746d1ee 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2054,6 +2054,9 @@ static void scan_for_empty_cpusets(struct cpuset *root)
* (of no affect) on systems that are actively using CPU hotplug
* but making no active use of cpusets.
*
+ * The only exception to this is suspend/resume, where we don't
+ * modify cpusets at all.
+ *
* This routine ensures that top_cpuset.cpus_allowed tracks
* cpu_active_mask on each CPU hotplug (cpuhp) event.
*
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 4b97bba..dd55ba1 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -304,7 +304,9 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
static int
cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
{
- return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
+ return *rdp->nxttail[RCU_DONE_TAIL +
+ ACCESS_ONCE(rsp->completed) != rdp->completed] &&
+ !rcu_gp_in_progress(rsp);
}

/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c04c9ff..9f81a3a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7103,34 +7103,66 @@ match2:
mutex_unlock(&sched_domains_mutex);
}

+static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
+
/*
* Update cpusets according to cpu_active mask. If cpusets are
* disabled, cpuset_update_active_cpus() becomes a simple wrapper
* around partition_sched_domains().
+ *
+ * If we come here as part of a suspend/resume, don't touch cpusets because we
+ * want to restore it back to its original state upon resume anyway.
*/
static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
- switch (action & ~CPU_TASKS_FROZEN) {
+ switch (action) {
+ case CPU_ONLINE_FROZEN:
+ case CPU_DOWN_FAILED_FROZEN:
+
+ /*
+ * num_cpus_frozen tracks how many CPUs are involved in suspend
+ * resume sequence. As long as this is not the last online
+ * operation in the resume sequence, just build a single sched
+ * domain, ignoring cpusets.
+ */
+ num_cpus_frozen--;
+ if (likely(num_cpus_frozen)) {
+ partition_sched_domains(1, NULL, NULL);
+ break;
+ }
+
+ /*
+ * This is the last CPU online operation. So fall through and
+ * restore the original sched domains by considering the
+ * cpuset configurations.
+ */
+
case CPU_ONLINE:
case CPU_DOWN_FAILED:
cpuset_update_active_cpus();
- return NOTIFY_OK;
+ break;
default:
return NOTIFY_DONE;
}
+ return NOTIFY_OK;
}

static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
- switch (action & ~CPU_TASKS_FROZEN) {
+ switch (action) {
case CPU_DOWN_PREPARE:
cpuset_update_active_cpus();
- return NOTIFY_OK;
+ break;
+ case CPU_DOWN_PREPARE_FROZEN:
+ num_cpus_frozen++;
+ partition_sched_domains(1, NULL, NULL);
+ break;
default:
return NOTIFY_DONE;
}
+ return NOTIFY_OK;
}

void __init sched_init_smp(void)
diff --git a/kernel/sys.c b/kernel/sys.c
index 2d39a84..0349bde 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -368,6 +368,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
void kernel_restart(char *cmd)
{
kernel_restart_prepare(cmd);
+ disable_nonboot_cpus();
if (!cmd)
printk(KERN_EMERG "Restarting system.\n");
else
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2016347..478f2ab 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1869,7 +1869,9 @@ __acquires(&gcwq->lock)

spin_unlock_irq(&gcwq->lock);

+ smp_wmb(); /* paired with test_and_set_bit(PENDING) */
work_clear_pending(work);
+
lock_map_acquire_read(&cwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
trace_workqueue_execute_start(work);
diff --git a/lib/gcd.c b/lib/gcd.c
index cce4f3c..3657f12 100644
--- a/lib/gcd.c
+++ b/lib/gcd.c
@@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b)

if (a < b)
swap(a, b);
+
+ if (!b)
+ return a;
while ((r = a % b) != 0) {
a = b;
b = r;
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 19558df..9c34eb5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2432,7 +2432,8 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
* from page cache lookup which is in HPAGE_SIZE units.
*/
address = address & huge_page_mask(h);
- pgoff = vma_hugecache_offset(h, vma, address);
+ pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
+ vma->vm_pgoff;
mapping = vma->vm_file->f_dentry->d_inode->i_mapping;

/*
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index b12b28a..cee020c 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -607,6 +607,42 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
return first;
}

+/*
+ * Apply policy to a single VMA
+ * This must be called with the mmap_sem held for writing.
+ */
+static int vma_replace_policy(struct vm_area_struct *vma,
+ struct mempolicy *pol)
+{
+ int err;
+ struct mempolicy *old;
+ struct mempolicy *new;
+
+ pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
+ vma->vm_start, vma->vm_end, vma->vm_pgoff,
+ vma->vm_ops, vma->vm_file,
+ vma->vm_ops ? vma->vm_ops->set_policy : NULL);
+
+ new = mpol_dup(pol);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+
+ if (vma->vm_ops && vma->vm_ops->set_policy) {
+ err = vma->vm_ops->set_policy(vma, new);
+ if (err)
+ goto err_out;
+ }
+
+ old = vma->vm_policy;
+ vma->vm_policy = new; /* protected by mmap_sem */
+ mpol_put(old);
+
+ return 0;
+ err_out:
+ mpol_put(new);
+ return err;
+}
+
/* Step 2: apply policy to a range and do splits. */
static int mbind_range(struct mm_struct *mm, unsigned long start,
unsigned long end, struct mempolicy *new_pol)
@@ -655,23 +691,9 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
if (err)
goto out;
}
-
- /*
- * Apply policy to a single VMA. The reference counting of
- * policy for vma_policy linkages has already been handled by
- * vma_merge and split_vma as necessary. If this is a shared
- * policy then ->set_policy will increment the reference count
- * for an sp node.
- */
- pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
- vma->vm_start, vma->vm_end, vma->vm_pgoff,
- vma->vm_ops, vma->vm_file,
- vma->vm_ops ? vma->vm_ops->set_policy : NULL);
- if (vma->vm_ops && vma->vm_ops->set_policy) {
- err = vma->vm_ops->set_policy(vma, new_pol);
- if (err)
- goto out;
- }
+ err = vma_replace_policy(vma, new_pol);
+ if (err)
+ goto out;
}

out:
@@ -1530,8 +1552,18 @@ struct mempolicy *get_vma_policy(struct task_struct *task,
addr);
if (vpol)
pol = vpol;
- } else if (vma->vm_policy)
+ } else if (vma->vm_policy) {
pol = vma->vm_policy;
+
+ /*
+ * shmem_alloc_page() passes MPOL_F_SHARED policy with
+ * a pseudo vma whose vma->vm_ops=NULL. Take a reference
+ * count on these policies which will be dropped by
+ * mpol_cond_put() later
+ */
+ if (mpol_needs_cond_ref(pol))
+ mpol_get(pol);
+ }
}
if (!pol)
pol = &default_policy;
@@ -2055,7 +2087,7 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
*/

/* lookup first element intersecting start-end */
-/* Caller holds sp->lock */
+/* Caller holds sp->mutex */
static struct sp_node *
sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
{
@@ -2119,36 +2151,50 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)

if (!sp->root.rb_node)
return NULL;
- spin_lock(&sp->lock);
+ mutex_lock(&sp->mutex);
sn = sp_lookup(sp, idx, idx+1);
if (sn) {
mpol_get(sn->policy);
pol = sn->policy;
}
- spin_unlock(&sp->lock);
+ mutex_unlock(&sp->mutex);
return pol;
}

+static void sp_free(struct sp_node *n)
+{
+ mpol_put(n->policy);
+ kmem_cache_free(sn_cache, n);
+}
+
static void sp_delete(struct shared_policy *sp, struct sp_node *n)
{
pr_debug("deleting %lx-l%lx\n", n->start, n->end);
rb_erase(&n->nd, &sp->root);
- mpol_put(n->policy);
- kmem_cache_free(sn_cache, n);
+ sp_free(n);
}

static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
struct mempolicy *pol)
{
- struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
+ struct sp_node *n;
+ struct mempolicy *newpol;

+ n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
if (!n)
return NULL;
+
+ newpol = mpol_dup(pol);
+ if (IS_ERR(newpol)) {
+ kmem_cache_free(sn_cache, n);
+ return NULL;
+ }
+ newpol->flags |= MPOL_F_SHARED;
+
n->start = start;
n->end = end;
- mpol_get(pol);
- pol->flags |= MPOL_F_SHARED; /* for unref */
- n->policy = pol;
+ n->policy = newpol;
+
return n;
}

@@ -2156,10 +2202,10 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
unsigned long end, struct sp_node *new)
{
- struct sp_node *n, *new2 = NULL;
+ struct sp_node *n;
+ int ret = 0;

-restart:
- spin_lock(&sp->lock);
+ mutex_lock(&sp->mutex);
n = sp_lookup(sp, start, end);
/* Take care of old policies in the same range. */
while (n && n->start < end) {
@@ -2172,16 +2218,14 @@ restart:
} else {
/* Old policy spanning whole new range. */
if (n->end > end) {
+ struct sp_node *new2;
+ new2 = sp_alloc(end, n->end, n->policy);
if (!new2) {
- spin_unlock(&sp->lock);
- new2 = sp_alloc(end, n->end, n->policy);
- if (!new2)
- return -ENOMEM;
- goto restart;
+ ret = -ENOMEM;
+ goto out;
}
n->end = start;
sp_insert(sp, new2);
- new2 = NULL;
break;
} else
n->end = start;
@@ -2192,12 +2236,9 @@ restart:
}
if (new)
sp_insert(sp, new);
- spin_unlock(&sp->lock);
- if (new2) {
- mpol_put(new2->policy);
- kmem_cache_free(sn_cache, new2);
- }
- return 0;
+out:
+ mutex_unlock(&sp->mutex);
+ return ret;
}

/**
@@ -2215,7 +2256,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
int ret;

sp->root = RB_ROOT; /* empty tree == default mempolicy */
- spin_lock_init(&sp->lock);
+ mutex_init(&sp->mutex);

if (mpol) {
struct vm_area_struct pvma;
@@ -2269,7 +2310,7 @@ int mpol_set_shared_policy(struct shared_policy *info,
}
err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
if (err && new)
- kmem_cache_free(sn_cache, new);
+ sp_free(new);
return err;
}

@@ -2281,16 +2322,14 @@ void mpol_free_shared_policy(struct shared_policy *p)

if (!p->root.rb_node)
return;
- spin_lock(&p->lock);
+ mutex_lock(&p->mutex);
next = rb_first(&p->root);
while (next) {
n = rb_entry(next, struct sp_node, nd);
next = rb_next(&n->nd);
- rb_erase(&n->nd, &p->root);
- mpol_put(n->policy);
- kmem_cache_free(sn_cache, n);
+ sp_delete(p, n);
}
- spin_unlock(&p->lock);
+ mutex_unlock(&p->mutex);
}

/* assumes fs == KERNEL_DS */
diff --git a/mm/slab.c b/mm/slab.c
index e901a36..da2bb68 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1685,9 +1685,6 @@ void __init kmem_cache_init_late(void)

g_cpucache_up = LATE;

- /* Annotate slab for lockdep -- annotate the malloc caches */
- init_lock_keys();
-
/* 6) resize the head arrays to their final sizes */
mutex_lock(&cache_chain_mutex);
list_for_each_entry(cachep, &cache_chain, next)
@@ -1695,6 +1692,9 @@ void __init kmem_cache_init_late(void)
BUG();
mutex_unlock(&cache_chain_mutex);

+ /* Annotate slab for lockdep -- annotate the malloc caches */
+ init_lock_keys();
+
/* Done! */
g_cpucache_up = FULL;

diff --git a/mm/truncate.c b/mm/truncate.c
index 75801ac..f38055c 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -394,11 +394,12 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
return 0;

+ clear_page_mlock(page);
+
spin_lock_irq(&mapping->tree_lock);
if (PageDirty(page))
goto failed;

- clear_page_mlock(page);
BUG_ON(page_has_private(page));
__delete_from_page_cache(page);
spin_unlock_irq(&mapping->tree_lock);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 8ca533c..830059d 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -105,7 +105,6 @@ static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
return NULL;
memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
skb->mac_header += VLAN_HLEN;
- skb_reset_mac_len(skb);
return skb;
}

@@ -139,6 +138,8 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)

skb_reset_network_header(skb);
skb_reset_transport_header(skb);
+ skb_reset_mac_len(skb);
+
return skb;

err_free:
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 1835c15..7d1840b 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -22,8 +22,9 @@
#ifndef _NET_BATMAN_ADV_BITARRAY_H_
#define _NET_BATMAN_ADV_BITARRAY_H_

-/* returns true if the corresponding bit in the given seq_bits indicates true
- * and curr_seqno is within range of last_seqno */
+/* Returns 1 if the corresponding bit in the given seq_bits indicates true
+ * and curr_seqno is within range of last_seqno. Otherwise returns 0.
+ */
static inline int bat_test_bit(const unsigned long *seq_bits,
uint32_t last_seqno, uint32_t curr_seqno)
{
@@ -33,7 +34,7 @@ static inline int bat_test_bit(const unsigned long *seq_bits,
if (diff < 0 || diff >= TQ_LOCAL_WINDOW_SIZE)
return 0;
else
- return test_bit(diff, seq_bits);
+ return test_bit(diff, seq_bits) != 0;
}

/* turn corresponding bit on, so we can remember that we got the packet */
diff --git a/net/core/dev.c b/net/core/dev.c
index e2215ee..8b3dee5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2119,7 +2119,8 @@ static bool can_checksum_protocol(netdev_features_t features, __be16 protocol)
static netdev_features_t harmonize_features(struct sk_buff *skb,
__be16 protocol, netdev_features_t features)
{
- if (!can_checksum_protocol(features, protocol)) {
+ if (skb->ip_summed != CHECKSUM_NONE &&
+ !can_checksum_protocol(features, protocol)) {
features &= ~NETIF_F_ALL_CSUM;
features &= ~NETIF_F_SG;
} else if (illegal_highdma(skb->dev, skb)) {
@@ -2615,15 +2616,16 @@ void __skb_get_rxhash(struct sk_buff *skb)
if (!skb_flow_dissect(skb, &keys))
return;

- if (keys.ports) {
- if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0])
- swap(keys.port16[0], keys.port16[1]);
+ if (keys.ports)
skb->l4_rxhash = 1;
- }

/* get a consistent hash (same value on both flow directions) */
- if ((__force u32)keys.dst < (__force u32)keys.src)
+ if (((__force u32)keys.dst < (__force u32)keys.src) ||
+ (((__force u32)keys.dst == (__force u32)keys.src) &&
+ ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
swap(keys.dst, keys.src);
+ swap(keys.port16[0], keys.port16[1]);
+ }

hash = jhash_3words((__force u32)keys.dst,
(__force u32)keys.src,
diff --git a/net/core/sock.c b/net/core/sock.c
index fcf8fdf..c5f765c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -636,7 +636,8 @@ set_rcvbuf:

case SO_KEEPALIVE:
#ifdef CONFIG_INET
- if (sk->sk_protocol == IPPROTO_TCP)
+ if (sk->sk_protocol == IPPROTO_TCP &&
+ sk->sk_type == SOCK_STREAM)
tcp_set_keepalive(sk, valbool);
#endif
sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 4032b81..d53aa77 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -131,18 +131,20 @@ found:
* 0 - deliver
* 1 - block
*/
-static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
+static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
{
- int type;
+ struct icmphdr _hdr;
+ const struct icmphdr *hdr;

- if (!pskb_may_pull(skb, sizeof(struct icmphdr)))
+ hdr = skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_hdr), &_hdr);
+ if (!hdr)
return 1;

- type = icmp_hdr(skb)->type;
- if (type < 32) {
+ if (hdr->type < 32) {
__u32 data = raw_sk(sk)->filter.data;

- return ((1 << type) & data) != 0;
+ return ((1U << hdr->type) & data) != 0;
}

/* Do not block unknown ICMP types */
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 7ac0ce6..56e9fa7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1706,8 +1706,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
}

#ifdef CONFIG_NET_DMA
- if (tp->ucopy.dma_chan)
- dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+ if (tp->ucopy.dma_chan) {
+ if (tp->rcv_wnd == 0 &&
+ !skb_queue_empty(&sk->sk_async_wait_queue)) {
+ tcp_service_net_dma(sk, true);
+ tcp_cleanup_rbuf(sk, copied);
+ } else
+ dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+ }
#endif
if (copied >= target) {
/* Do not sleep, just process backlog. */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 262316b..ab30c96 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4818,7 +4818,7 @@ queue_and_out:

if (eaten > 0)
kfree_skb_partial(skb, fragstolen);
- else if (!sock_flag(sk, SOCK_DEAD))
+ if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, 0);
return;
}
@@ -5680,8 +5680,7 @@ no_ack:
#endif
if (eaten)
kfree_skb_partial(skb, fragstolen);
- else
- sk->sk_data_ready(sk, 0);
+ sk->sk_data_ready(sk, 0);
return 0;
}
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 938ba6d..0344f8e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -793,10 +793,16 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
struct in6_addr prefix;
struct rt6_info *rt;
struct net *net = dev_net(ifp->idev->dev);
+ struct flowi6 fl6 = {};
+
ipv6_addr_prefix(&prefix, &ifp->addr, ifp->prefix_len);
- rt = rt6_lookup(net, &prefix, NULL, ifp->idev->dev->ifindex, 1);
+ fl6.flowi6_oif = ifp->idev->dev->ifindex;
+ fl6.daddr = prefix;
+ rt = (struct rt6_info *)ip6_route_lookup(net, &fl6,
+ RT6_LOOKUP_F_IFACE);

- if (rt && addrconf_is_prefix_route(rt)) {
+ if (rt != net->ipv6.ip6_null_entry &&
+ addrconf_is_prefix_route(rt)) {
if (onlink == 0) {
ip6_del_rt(rt);
rt = NULL;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 6083276..0907191 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -818,6 +818,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
offsetof(struct rt6_info, rt6i_src),
allow_create, replace_required);

+ if (IS_ERR(sn)) {
+ err = PTR_ERR(sn);
+ sn = NULL;
+ }
if (!sn) {
/* If it is failed, discard just allocated
root, and then (in st_failure) stale node
diff --git a/net/ipv6/mip6.c b/net/ipv6/mip6.c
index 5b087c3..0f9bdc5 100644
--- a/net/ipv6/mip6.c
+++ b/net/ipv6/mip6.c
@@ -86,28 +86,30 @@ static int mip6_mh_len(int type)

static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
{
- struct ip6_mh *mh;
+ struct ip6_mh _hdr;
+ const struct ip6_mh *mh;

- if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) ||
- !pskb_may_pull(skb, (skb_transport_offset(skb) +
- ((skb_transport_header(skb)[1] + 1) << 3))))
+ mh = skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_hdr), &_hdr);
+ if (!mh)
return -1;

- mh = (struct ip6_mh *)skb_transport_header(skb);
+ if (((mh->ip6mh_hdrlen + 1) << 3) > skb->len)
+ return -1;

if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) {
LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n",
mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type));
- mip6_param_prob(skb, 0, ((&mh->ip6mh_hdrlen) -
- skb_network_header(skb)));
+ mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) +
+ skb_network_header_len(skb));
return -1;
}

if (mh->ip6mh_proto != IPPROTO_NONE) {
LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n",
mh->ip6mh_proto);
- mip6_param_prob(skb, 0, ((&mh->ip6mh_proto) -
- skb_network_header(skb)));
+ mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) +
+ skb_network_header_len(skb));
return -1;
}

diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 93d6983..9dca4a8 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -107,21 +107,20 @@ found:
* 0 - deliver
* 1 - block
*/
-static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
+static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
{
- struct icmp6hdr *icmph;
- struct raw6_sock *rp = raw6_sk(sk);
-
- if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) {
- __u32 *data = &rp->filter.data[0];
- int bit_nr;
+ struct icmp6hdr *_hdr;
+ const struct icmp6hdr *hdr;

- icmph = (struct icmp6hdr *) skb->data;
- bit_nr = icmph->icmp6_type;
+ hdr = skb_header_pointer(skb, skb_transport_offset(skb),
+ sizeof(_hdr), &_hdr);
+ if (hdr) {
+ const __u32 *data = &raw6_sk(sk)->filter.data[0];
+ unsigned int type = hdr->icmp6_type;

- return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0;
+ return (data[type >> 5] & (1U << (type & 31))) != 0;
}
- return 0;
+ return 1;
}

#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index becb048..a3ef2ad 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1485,17 +1485,18 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
struct fib6_table *table;
struct net *net = dev_net(rt->dst.dev);

- if (rt == net->ipv6.ip6_null_entry)
- return -ENOENT;
+ if (rt == net->ipv6.ip6_null_entry) {
+ err = -ENOENT;
+ goto out;
+ }

table = rt->rt6i_table;
write_lock_bh(&table->tb6_lock);
-
err = fib6_del(rt, info);
- dst_release(&rt->dst);
-
write_unlock_bh(&table->tb6_lock);

+out:
+ dst_release(&rt->dst);
return err;
}

diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 47b259f..94840d8 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -148,7 +148,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length);
}

- if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
+ if (!pskb_may_pull(skb, ETH_HLEN))
goto error;

secpath_reset(skb);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 06592d8..1b9024e 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1169,7 +1169,12 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
msg->msg_flags |= MSG_TRUNC;
}

- skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ if (er < 0) {
+ skb_free_datagram(sk, skb);
+ release_sock(sk);
+ return er;
+ }

if (sax != NULL) {
sax->sax25_family = AF_NETROM;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 6aabd77..564b9fc 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -250,10 +250,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
cl = defmap[TC_PRIO_BESTEFFORT];

- if (cl == NULL || cl->level >= head->level)
+ if (cl == NULL)
goto fallback;
}
-
+ if (cl->level >= head->level)
+ goto fallback;
#ifdef CONFIG_NET_CLS_ACT
switch (result) {
case TC_ACT_QUEUED:
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 9af01f3..d92f169 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -831,7 +831,10 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
if (mask) {
struct qfq_group *next = qfq_ffs(q, mask);
if (qfq_gt(roundedF, next->F)) {
- cl->S = next->F;
+ if (qfq_gt(limit, next->F))
+ cl->S = next->F;
+ else /* preserve timestamp correctness */
+ cl->S = limit;
return;
}
}
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 6ae47ac..83dddb9 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -339,6 +339,25 @@ finish:
return retval;
}

+static void sctp_packet_release_owner(struct sk_buff *skb)
+{
+ sk_free(skb->sk);
+}
+
+static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
+{
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sctp_packet_release_owner;
+
+ /*
+ * The data chunks have already been accounted for in sctp_sendmsg(),
+ * therefore only reserve a single byte to keep socket around until
+ * the packet has been transmitted.
+ */
+ atomic_inc(&sk->sk_wmem_alloc);
+}
+
/* All packets are sent to the network through this function from
* sctp_outq_tail().
*
@@ -380,7 +399,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
/* Set the owning socket so that we know where to get the
* destination IP address.
*/
- skb_set_owner_w(nskb, sk);
+ sctp_packet_set_owner_w(nskb, sk);

if (!sctp_transport_dst_check(tp)) {
sctp_transport_route(tp, NULL, sctp_sk(sk));
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 54a0dc2..ab2bb42 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -212,7 +212,7 @@ resume:
/* only the first xfrm gets the encap type */
encap_type = 0;

- if (async && x->repl->check(x, skb, seq)) {
+ if (async && x->repl->recheck(x, skb, seq)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
goto drop_unlock;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ccfbd32..ce19467 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1763,7 +1763,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family,

if (!afinfo) {
dst_release(dst_orig);
- ret = ERR_PTR(-EINVAL);
+ return ERR_PTR(-EINVAL);
} else {
ret = afinfo->blackhole_route(net, dst_orig);
}
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 2f6d11d..3efb07d 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -420,6 +420,18 @@ err:
return -EINVAL;
}

+static int xfrm_replay_recheck_esn(struct xfrm_state *x,
+ struct sk_buff *skb, __be32 net_seq)
+{
+ if (unlikely(XFRM_SKB_CB(skb)->seq.input.hi !=
+ htonl(xfrm_replay_seqhi(x, net_seq)))) {
+ x->stats.replay_window++;
+ return -EINVAL;
+ }
+
+ return xfrm_replay_check_esn(x, skb, net_seq);
+}
+
static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
{
unsigned int bitnr, nr, i;
@@ -479,6 +491,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
static struct xfrm_replay xfrm_replay_legacy = {
.advance = xfrm_replay_advance,
.check = xfrm_replay_check,
+ .recheck = xfrm_replay_check,
.notify = xfrm_replay_notify,
.overflow = xfrm_replay_overflow,
};
@@ -486,6 +499,7 @@ static struct xfrm_replay xfrm_replay_legacy = {
static struct xfrm_replay xfrm_replay_bmp = {
.advance = xfrm_replay_advance_bmp,
.check = xfrm_replay_check_bmp,
+ .recheck = xfrm_replay_check_bmp,
.notify = xfrm_replay_notify_bmp,
.overflow = xfrm_replay_overflow_bmp,
};
@@ -493,6 +507,7 @@ static struct xfrm_replay xfrm_replay_bmp = {
static struct xfrm_replay xfrm_replay_esn = {
.advance = xfrm_replay_advance_esn,
.check = xfrm_replay_check_esn,
+ .recheck = xfrm_replay_recheck_esn,
.notify = xfrm_replay_notify_bmp,
.overflow = xfrm_replay_overflow_esn,
};
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 44293b3..5b054d8 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -123,9 +123,21 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
struct nlattr **attrs)
{
struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
+ struct xfrm_replay_state_esn *rs;

- if ((p->flags & XFRM_STATE_ESN) && !rt)
- return -EINVAL;
+ if (p->flags & XFRM_STATE_ESN) {
+ if (!rt)
+ return -EINVAL;
+
+ rs = nla_data(rt);
+
+ if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
+ return -EINVAL;
+
+ if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
+ nla_len(rt) != sizeof(*rs))
+ return -EINVAL;
+ }

if (!rt)
return 0;
@@ -370,14 +382,15 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
struct nlattr *rp)
{
struct xfrm_replay_state_esn *up;
+ int ulen;

if (!replay_esn || !rp)
return 0;

up = nla_data(rp);
+ ulen = xfrm_replay_state_esn_len(up);

- if (xfrm_replay_state_esn_len(replay_esn) !=
- xfrm_replay_state_esn_len(up))
+ if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
return -EINVAL;

return 0;
@@ -388,22 +401,28 @@ static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn
struct nlattr *rta)
{
struct xfrm_replay_state_esn *p, *pp, *up;
+ int klen, ulen;

if (!rta)
return 0;

up = nla_data(rta);
+ klen = xfrm_replay_state_esn_len(up);
+ ulen = nla_len(rta) >= klen ? klen : sizeof(*up);

- p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
+ p = kzalloc(klen, GFP_KERNEL);
if (!p)
return -ENOMEM;

- pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
+ pp = kzalloc(klen, GFP_KERNEL);
if (!pp) {
kfree(p);
return -ENOMEM;
}

+ memcpy(p, up, ulen);
+ memcpy(pp, up, ulen);
+
*replay_esn = p;
*preplay_esn = pp;

@@ -442,10 +461,11 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
* somehow made shareable and move it to xfrm_state.c - JHS
*
*/
-static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
+static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
+ int update_esn)
{
struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
- struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
+ struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
@@ -555,7 +575,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
goto error;

/* override default values from above */
- xfrm_update_ae_params(x, attrs);
+ xfrm_update_ae_params(x, attrs, 0);

return x;

@@ -689,6 +709,7 @@ out:

static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
{
+ memset(p, 0, sizeof(*p));
memcpy(&p->id, &x->id, sizeof(p->id));
memcpy(&p->sel, &x->sel, sizeof(p->sel));
memcpy(&p->lft, &x->lft, sizeof(p->lft));
@@ -742,7 +763,7 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
return -EMSGSIZE;

algo = nla_data(nla);
- strcpy(algo->alg_name, auth->alg_name);
+ strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
algo->alg_key_len = auth->alg_key_len;

@@ -872,6 +893,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
{
struct xfrm_dump_info info;
struct sk_buff *skb;
+ int err;

skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!skb)
@@ -882,9 +904,10 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
info.nlmsg_seq = seq;
info.nlmsg_flags = 0;

- if (dump_one_state(x, 0, &info)) {
+ err = dump_one_state(x, 0, &info);
+ if (err) {
kfree_skb(skb);
- return NULL;
+ return ERR_PTR(err);
}

return skb;
@@ -1309,6 +1332,7 @@ static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy

static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
{
+ memset(p, 0, sizeof(*p));
memcpy(&p->sel, &xp->selector, sizeof(p->sel));
memcpy(&p->lft, &xp->lft, sizeof(p->lft));
memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
@@ -1413,6 +1437,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
struct xfrm_user_tmpl *up = &vec[i];
struct xfrm_tmpl *kp = &xp->xfrm_vec[i];

+ memset(up, 0, sizeof(*up));
memcpy(&up->id, &kp->id, sizeof(up->id));
up->family = kp->encap_family;
memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
@@ -1541,6 +1566,7 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
{
struct xfrm_dump_info info;
struct sk_buff *skb;
+ int err;

skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!skb)
@@ -1551,9 +1577,10 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
info.nlmsg_seq = seq;
info.nlmsg_flags = 0;

- if (dump_one_policy(xp, dir, 0, &info) < 0) {
+ err = dump_one_policy(xp, dir, 0, &info);
+ if (err) {
kfree_skb(skb);
- return NULL;
+ return ERR_PTR(err);
}

return skb;
@@ -1812,7 +1839,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
goto out;

spin_lock_bh(&x->lock);
- xfrm_update_ae_params(x, attrs);
+ xfrm_update_ae_params(x, attrs, 1);
spin_unlock_bh(&x->lock);

c.event = nlh->nlmsg_type;
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index 6a3ee98..978416d 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -98,24 +98,24 @@ try-run = $(shell set -e; \
# Usage: cflags-y += $(call as-option,-Wa$(comma)-isa=foo,)

as-option = $(call try-run,\
- $(CC) $(KBUILD_CFLAGS) $(1) -c -xassembler /dev/null -o "$$TMP",$(1),$(2))
+ $(CC) $(KBUILD_CFLAGS) $(1) -c -x assembler /dev/null -o "$$TMP",$(1),$(2))

# as-instr
# Usage: cflags-y += $(call as-instr,instr,option1,option2)

as-instr = $(call try-run,\
- printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -xassembler -o "$$TMP" -,$(2),$(3))
+ printf "%b\n" "$(1)" | $(CC) $(KBUILD_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))

# cc-option
# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)

cc-option = $(call try-run,\
- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",$(1),$(2))
+ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))

# cc-option-yn
# Usage: flag := $(call cc-option-yn,-march=winchip-c6)
cc-option-yn = $(call try-run,\
- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -xc /dev/null -o "$$TMP",y,n)
+ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",y,n)

# cc-option-align
# Prefix align with either -falign or -malign
@@ -125,7 +125,7 @@ cc-option-align = $(subst -functions=0,,\
# cc-disable-warning
# Usage: cflags-y += $(call cc-disable-warning,unused-but-set-variable)
cc-disable-warning = $(call try-run,\
- $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -xc /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))

# cc-version
# Usage gcc-ver := $(call cc-version)
@@ -143,7 +143,7 @@ cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3))
# cc-ldoption
# Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
cc-ldoption = $(call try-run,\
- $(CC) $(1) -nostdlib -xc /dev/null -o "$$TMP",$(1),$(2))
+ $(CC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))

# ld-option
# Usage: LDFLAGS += $(call ld-option, -X)
@@ -209,7 +209,7 @@ endif
# >$< substitution to preserve $ when reloading .cmd file
# note: when using inline perl scripts [perl -e '...$$t=1;...']
# in $(cmd_xxx) double $$ your perl vars
-make-cmd = $(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1)))))
+make-cmd = $(subst \\,\\\\,$(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1))))))

# Find any prerequisites that is newer than target or that does not exist.
# PHONY targets skipped in both cases.
diff --git a/scripts/gcc-version.sh b/scripts/gcc-version.sh
index debecb5..7f2126d 100644
--- a/scripts/gcc-version.sh
+++ b/scripts/gcc-version.sh
@@ -22,10 +22,10 @@ if [ ${#compiler} -eq 0 ]; then
exit 1
fi

-MAJOR=$(echo __GNUC__ | $compiler -E -xc - | tail -n 1)
-MINOR=$(echo __GNUC_MINOR__ | $compiler -E -xc - | tail -n 1)
+MAJOR=$(echo __GNUC__ | $compiler -E -x c - | tail -n 1)
+MINOR=$(echo __GNUC_MINOR__ | $compiler -E -x c - | tail -n 1)
if [ "x$with_patchlevel" != "x" ] ; then
- PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -xc - | tail -n 1)
+ PATCHLEVEL=$(echo __GNUC_PATCHLEVEL__ | $compiler -E -x c - | tail -n 1)
printf "%02d%02d%02d\\n" $MAJOR $MINOR $PATCHLEVEL
else
printf "%02d%02d\\n" $MAJOR $MINOR
diff --git a/scripts/gcc-x86_32-has-stack-protector.sh b/scripts/gcc-x86_32-has-stack-protector.sh
index 29493dc..12dbd0b 100644
--- a/scripts/gcc-x86_32-has-stack-protector.sh
+++ b/scripts/gcc-x86_32-has-stack-protector.sh
@@ -1,6 +1,6 @@
#!/bin/sh

-echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
if [ "$?" -eq "0" ] ; then
echo y
else
diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
index afaec61..973e8c1 100644
--- a/scripts/gcc-x86_64-has-stack-protector.sh
+++ b/scripts/gcc-x86_64-has-stack-protector.sh
@@ -1,6 +1,6 @@
#!/bin/sh

-echo "int foo(void) { char X[200]; return 3; }" | $* -S -xc -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
if [ "$?" -eq "0" ] ; then
echo y
else
diff --git a/scripts/kconfig/check.sh b/scripts/kconfig/check.sh
index fa59cbf..854d9c7 100755
--- a/scripts/kconfig/check.sh
+++ b/scripts/kconfig/check.sh
@@ -1,6 +1,6 @@
#!/bin/sh
# Needed for systems without gettext
-$* -xc -o /dev/null - > /dev/null 2>&1 << EOF
+$* -x c -o /dev/null - > /dev/null 2>&1 << EOF
#include <libintl.h>
int main()
{
diff --git a/scripts/kconfig/lxdialog/check-lxdialog.sh b/scripts/kconfig/lxdialog/check-lxdialog.sh
index 82cc3a8..50df490 100644
--- a/scripts/kconfig/lxdialog/check-lxdialog.sh
+++ b/scripts/kconfig/lxdialog/check-lxdialog.sh
@@ -38,7 +38,7 @@ trap "rm -f $tmp" 0 1 2 3 15

# Check if we can link to ncurses
check() {
- $cc -xc - -o $tmp 2>/dev/null <<'EOF'
+ $cc -x c - -o $tmp 2>/dev/null <<'EOF'
#include CURSES_LOC
main() {}
EOF
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index bccf07dd..3346f42 100644
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -463,6 +463,8 @@ while(<CIN>) {
if (defined($configs{$1})) {
if ($localyesconfig) {
$setconfigs{$1} = 'y';
+ print "$1=y\n";
+ next;
} else {
$setconfigs{$1} = $2;
}
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index 8b5c36f..c5d463c 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -120,6 +120,7 @@ struct loopback_pcm {
unsigned int last_drift;
unsigned long last_jiffies;
struct timer_list timer;
+ spinlock_t timer_lock;
};

static struct platform_device *devices[SNDRV_CARDS];
@@ -170,6 +171,7 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
unsigned long tick;
unsigned int rate_shift = get_rate_shift(dpcm);

+ spin_lock(&dpcm->timer_lock);
if (rate_shift != dpcm->pcm_rate_shift) {
dpcm->pcm_rate_shift = rate_shift;
dpcm->period_size_frac = frac_pos(dpcm, dpcm->pcm_period_size);
@@ -182,12 +184,15 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
tick = (tick + dpcm->pcm_bps - 1) / dpcm->pcm_bps;
dpcm->timer.expires = jiffies + tick;
add_timer(&dpcm->timer);
+ spin_unlock(&dpcm->timer_lock);
}

static inline void loopback_timer_stop(struct loopback_pcm *dpcm)
{
+ spin_lock(&dpcm->timer_lock);
del_timer(&dpcm->timer);
dpcm->timer.expires = 0;
+ spin_unlock(&dpcm->timer_lock);
}

#define CABLE_VALID_PLAYBACK (1 << SNDRV_PCM_STREAM_PLAYBACK)
@@ -667,6 +672,7 @@ static int loopback_open(struct snd_pcm_substream *substream)
dpcm->substream = substream;
setup_timer(&dpcm->timer, loopback_timer_function,
(unsigned long)dpcm);
+ spin_lock_init(&dpcm->timer_lock);

cable = loopback->cables[substream->number][dev];
if (!cable) {
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 41f28c8..afa5b4d 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -4467,6 +4467,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x17aa, 0x21ce, "Lenovo T420", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
{}
};
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 7faf28c..95b9090 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6145,6 +6145,12 @@ static int patch_alc269(struct hda_codec *codec)

spec = codec->spec;

+ alc_pick_fixup(codec, alc269_fixup_models,
+ alc269_fixup_tbl, alc269_fixups);
+ alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
+
+ alc_auto_parse_customize_define(codec);
+
if (codec->vendor_id == 0x10ec0269) {
spec->codec_variant = ALC269_TYPE_ALC269VA;
switch (alc_get_coef0(codec) & 0x00f0) {
@@ -6172,12 +6178,6 @@ static int patch_alc269(struct hda_codec *codec)
alc269_fill_coef(codec);
}

- alc_pick_fixup(codec, alc269_fixup_models,
- alc269_fixup_tbl, alc269_fixups);
- alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
-
- alc_auto_parse_customize_define(codec);
-
/* automatic parse from the BIOS config */
err = alc269_parse_auto_config(codec);
if (err < 0)
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index f21fd91..9c35043 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -3661,6 +3661,32 @@ static void set_widgets_power_state_vt2002P(struct hda_codec *codec)
update_power_state(codec, 0x21, AC_PWRST_D3);
}

+/*
+ * pin fix-up
+ */
+enum {
+ VIA_FIXUP_INTMIC_BOOST,
+};
+
+static void via_fixup_intmic_boost(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action == HDA_FIXUP_ACT_PRE_PROBE)
+ override_mic_boost(codec, 0x30, 0, 2, 40);
+}
+
+static const struct hda_fixup via_fixups[] = {
+ [VIA_FIXUP_INTMIC_BOOST] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = via_fixup_intmic_boost,
+ },
+};
+
+static const struct snd_pci_quirk vt2002p_fixups[] = {
+ SND_PCI_QUIRK(0x1043, 0x8532, "Asus X202E", VIA_FIXUP_INTMIC_BOOST),
+ {}
+};
+
/* patch for vt2002P */
static int patch_vt2002P(struct hda_codec *codec)
{
@@ -3677,6 +3703,9 @@ static int patch_vt2002P(struct hda_codec *codec)
override_mic_boost(codec, 0x29, 0, 3, 40);
add_secret_dac_path(codec);

+ snd_hda_pick_fixup(codec, NULL, vt2002p_fixups, via_fixups);
+ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
+
/* automatic parse from the BIOS config */
err = via_parse_auto_config(codec);
if (err < 0) {
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index b42b6b2..d924a59 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -146,7 +146,7 @@ SOC_SINGLE("Playback Attenuate (-6dB) Switch", AC97_MASTER_TONE, 6, 1, 0),
SOC_SINGLE("Bass Volume", AC97_MASTER_TONE, 8, 15, 1),
SOC_SINGLE("Treble Volume", AC97_MASTER_TONE, 0, 15, 1),

-SOC_SINGLE("Capture ADC Switch", AC97_REC_GAIN, 15, 1, 1),
+SOC_SINGLE("Capture Switch", AC97_REC_GAIN, 15, 1, 1),
SOC_ENUM("Capture Volume Steps", wm9712_enum[6]),
SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 1),
SOC_SINGLE("Capture ZC Switch", AC97_REC_GAIN, 7, 1, 0),
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index dfe957a..932a535 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -634,6 +634,11 @@ void wm_hubs_update_class_w(struct snd_soc_codec *codec)

snd_soc_update_bits(codec, WM8993_CLASS_W_0,
WM8993_CP_DYN_V | WM8993_CP_DYN_FREQ, enable);
+
+ snd_soc_write(codec, WM8993_LEFT_OUTPUT_VOLUME,
+ snd_soc_read(codec, WM8993_LEFT_OUTPUT_VOLUME));
+ snd_soc_write(codec, WM8993_RIGHT_OUTPUT_VOLUME,
+ snd_soc_read(codec, WM8993_RIGHT_OUTPUT_VOLUME));
}
EXPORT_SYMBOL_GPL(wm_hubs_update_class_w);

diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 4f40ba8..fe56c9d 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -1267,6 +1267,13 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
/* disable non-functional volume control */
master_bits &= ~UAC_CONTROL_BIT(UAC_FU_VOLUME);
break;
+ case USB_ID(0x1130, 0xf211):
+ snd_printk(KERN_INFO
+ "usbmixer: volume control quirk for Tenx TP6911 Audio Headset\n");
+ /* disable non-functional volume control */
+ channels = 0;
+ break;
+
}
if (channels > 0)
first_ch_bits = snd_usb_combine_bytes(bmaControls + csize, csize);
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 79780fa..d73ac9b 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -2781,6 +2781,59 @@ YAMAHA_DEVICE(0x7010, "UB99"),
}
},

+/* Microsoft XboxLive Headset/Xbox Communicator */
+{
+ USB_DEVICE(0x045e, 0x0283),
+ .bInterfaceClass = USB_CLASS_PER_INTERFACE,
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Microsoft",
+ .product_name = "XboxLive Headset/Xbox Communicator",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = &(const struct snd_usb_audio_quirk[]) {
+ {
+ /* playback */
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 1,
+ .iface = 0,
+ .altsetting = 0,
+ .altset_idx = 0,
+ .attributes = 0,
+ .endpoint = 0x04,
+ .ep_attr = 0x05,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 22050,
+ .rate_max = 22050
+ }
+ },
+ {
+ /* capture */
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+ .data = &(const struct audioformat) {
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels = 1,
+ .iface = 1,
+ .altsetting = 0,
+ .altset_idx = 0,
+ .attributes = 0,
+ .endpoint = 0x85,
+ .ep_attr = 0x05,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 16000,
+ .rate_max = 16000
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
+
{
/*
* Some USB MIDI devices don't have an audio control interface,
diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c
index f759f4f..fd2f922 100644
--- a/tools/lguest/lguest.c
+++ b/tools/lguest/lguest.c
@@ -1299,6 +1299,7 @@ static struct device *new_device(const char *name, u16 type)
dev->feature_len = 0;
dev->num_vq = 0;
dev->running = false;
+ dev->next = NULL;

/*
* Append to device list. Prepending to a single-linked list is
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 0eee64c..e21f414 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -62,7 +62,7 @@ ifeq ($(ARCH),x86_64)
ARCH := x86
IS_X86_64 := 0
ifeq (, $(findstring m32,$(EXTRA_CFLAGS)))
- IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1)
+ IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1)
endif
ifeq (${IS_X86_64}, 1)
RAW_ARCH := x86_64
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile
index a93e06c..cf397bd 100644
--- a/tools/power/cpupower/Makefile
+++ b/tools/power/cpupower/Makefile
@@ -111,7 +111,7 @@ GMO_FILES = ${shell for HLANG in ${LANGUAGES}; do echo $(OUTPUT)po/$$HLANG.gmo;
export CROSS CC AR STRIP RANLIB CFLAGS LDFLAGS LIB_OBJS

# check if compiler option is supported
-cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}
+cc-supports = ${shell if $(CC) ${1} -S -o /dev/null -x c /dev/null > /dev/null 2>&1; then echo "$(1)"; fi;}

# use '-Os' optimization if available, else use -O2
OPTIMIZATION := $(call cc-supports,-Os,-O2)
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/