Re: Linux 4.0.2

From: Greg KH
Date: Thu May 07 2015 - 19:11:57 EST


diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index 99ca40e8e810..5c204df6b689 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -282,7 +282,7 @@ following is true:

- The current CPU's queue head counter >= the recorded tail counter
value in rps_dev_flow[i]
-- The current CPU is unset (equal to RPS_NO_CPU)
+- The current CPU is unset (>= nr_cpu_ids)
- The current CPU is offline

After this check, the packet is sent to the (possibly updated) current
diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt
index 4ceef53164b0..d1ad9d5cae46 100644
--- a/Documentation/virtual/kvm/devices/s390_flic.txt
+++ b/Documentation/virtual/kvm/devices/s390_flic.txt
@@ -27,6 +27,9 @@ Groups:
Copies all floating interrupts into a buffer provided by userspace.
When the buffer is too small it returns -ENOMEM, which is the indication
for userspace to try again with a bigger buffer.
+ -ENOBUFS is returned when the allocation of a kernelspace buffer has
+ failed.
+ -EFAULT is returned when copying data to userspace failed.
All interrupts remain pending, i.e. are not deleted from the list of
currently pending interrupts.
attr->addr contains the userspace address of the buffer into which all
diff --git a/Makefile b/Makefile
index f499cd2f5738..0649a6011a76 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 0
-SUBLEVEL = 1
+SUBLEVEL = 2
EXTRAVERSION =
NAME = Hurr durr I'ma sheep

diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index fec1fca2ad66..6c4bc53cbf4e 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -167,7 +167,13 @@

macb1: ethernet@f802c000 {
phy-mode = "rmii";
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "okay";
+
+ ethernet-phy@1 {
+ reg = <0x1>;
+ };
};

dbgu: serial@ffffee00 {
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index a5441d5482a6..3cc8b8320345 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -154,7 +154,7 @@

uart2: serial@12200 {
compatible = "ns16550a";
- reg = <0x12000 0x100>;
+ reg = <0x12200 0x100>;
reg-shift = <2>;
interrupts = <9>;
clocks = <&core_clk 0>;
@@ -163,7 +163,7 @@

uart3: serial@12300 {
compatible = "ns16550a";
- reg = <0x12100 0x100>;
+ reg = <0x12300 0x100>;
reg-shift = <2>;
interrupts = <10>;
clocks = <&core_clk 0>;
diff --git a/arch/arm/boot/dts/exynos5250-spring.dts b/arch/arm/boot/dts/exynos5250-spring.dts
index f02775487cd4..c41600e587e0 100644
--- a/arch/arm/boot/dts/exynos5250-spring.dts
+++ b/arch/arm/boot/dts/exynos5250-spring.dts
@@ -429,7 +429,6 @@
&mmc_0 {
status = "okay";
num-slots = <1>;
- supports-highspeed;
broken-cd;
card-detect-delay = <200>;
samsung,dw-mshc-ciu-div = <3>;
@@ -437,11 +436,8 @@
samsung,dw-mshc-ddr-timing = <1 2>;
pinctrl-names = "default";
pinctrl-0 = <&sd0_clk &sd0_cmd &sd0_cd &sd0_bus4 &sd0_bus8>;
-
- slot@0 {
- reg = <0>;
- bus-width = <8>;
- };
+ bus-width = <8>;
+ cap-mmc-highspeed;
};

/*
@@ -451,7 +447,6 @@
&mmc_1 {
status = "okay";
num-slots = <1>;
- supports-highspeed;
broken-cd;
card-detect-delay = <200>;
samsung,dw-mshc-ciu-div = <3>;
@@ -459,11 +454,8 @@
samsung,dw-mshc-ddr-timing = <1 2>;
pinctrl-names = "default";
pinctrl-0 = <&sd1_clk &sd1_cmd &sd1_cd &sd1_bus4>;
-
- slot@0 {
- reg = <0>;
- bus-width = <4>;
- };
+ bus-width = <4>;
+ cap-sd-highspeed;
};

&pinctrl_0 {
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index afb9cafd3786..674d03f4ba15 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -115,7 +115,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
the loader. We need to make sure that it is out of the way of the program
that it will "exec", and that there is sufficient room for the brk. */

-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)

/* When the program starts, a1 contains a pointer to a function to be
registered with atexit, as per the SVR4 ABI. A value of 0 means we
diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
index 0db25bc32864..3a42ac646885 100644
--- a/arch/arm/include/uapi/asm/kvm.h
+++ b/arch/arm/include/uapi/asm/kvm.h
@@ -195,8 +195,14 @@ struct kvm_arch_memory_slot {
#define KVM_ARM_IRQ_CPU_IRQ 0
#define KVM_ARM_IRQ_CPU_FIQ 1

-/* Highest supported SPI, from VGIC_NR_IRQS */
+/*
+ * This used to hold the highest supported SPI, but it is now obsolete
+ * and only here to provide source code level compatibility with older
+ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
+ */
+#ifndef __KERNEL__
#define KVM_ARM_IRQ_GIC_MAX 127
+#endif

/* PSCI interface */
#define KVM_PSCI_FN_BASE 0x95c1ba5e
diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c
index c4cc50e58c13..cfb354ff2a60 100644
--- a/arch/arm/kernel/hibernate.c
+++ b/arch/arm/kernel/hibernate.c
@@ -22,6 +22,7 @@
#include <asm/suspend.h>
#include <asm/memory.h>
#include <asm/sections.h>
+#include "reboot.h"

int pfn_is_nosave(unsigned long pfn)
{
@@ -61,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused)

ret = swsusp_save();
if (ret == 0)
- soft_restart(virt_to_phys(cpu_resume));
+ _soft_restart(virt_to_phys(cpu_resume), false);
return ret;
}

@@ -86,7 +87,7 @@ static void notrace arch_restore_image(void *unused)
for (pbe = restore_pblist; pbe; pbe = pbe->next)
copy_page(pbe->orig_address, pbe->address);

- soft_restart(virt_to_phys(cpu_resume));
+ _soft_restart(virt_to_phys(cpu_resume), false);
}

static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index fdfa3a78ec8c..2bf1a162defb 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -41,6 +41,7 @@
#include <asm/system_misc.h>
#include <asm/mach/time.h>
#include <asm/tls.h>
+#include "reboot.h"

#ifdef CONFIG_CC_STACKPROTECTOR
#include <linux/stackprotector.h>
@@ -95,7 +96,7 @@ static void __soft_restart(void *addr)
BUG();
}

-void soft_restart(unsigned long addr)
+void _soft_restart(unsigned long addr, bool disable_l2)
{
u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);

@@ -104,7 +105,7 @@ void soft_restart(unsigned long addr)
local_fiq_disable();

/* Disable the L2 if we're the last man standing. */
- if (num_online_cpus() == 1)
+ if (disable_l2)
outer_disable();

/* Change to the new stack and continue with the reset. */
@@ -114,6 +115,11 @@ void soft_restart(unsigned long addr)
BUG();
}

+void soft_restart(unsigned long addr)
+{
+ _soft_restart(addr, num_online_cpus() == 1);
+}
+
/*
* Function pointers to optional machine specific functions
*/
diff --git a/arch/arm/kernel/reboot.h b/arch/arm/kernel/reboot.h
new file mode 100644
index 000000000000..c87f05816d6b
--- /dev/null
+++ b/arch/arm/kernel/reboot.h
@@ -0,0 +1,6 @@
+#ifndef REBOOT_H
+#define REBOOT_H
+
+extern void _soft_restart(unsigned long addr, bool disable_l2);
+
+#endif
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 5560f74f9eee..b652af50fda7 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -651,8 +651,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
if (!irqchip_in_kernel(kvm))
return -ENXIO;

- if (irq_num < VGIC_NR_PRIVATE_IRQS ||
- irq_num > KVM_ARM_IRQ_GIC_MAX)
+ if (irq_num < VGIC_NR_PRIVATE_IRQS)
return -EINVAL;

return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index 8b9f5e202ccf..4f4e22206ae5 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -415,6 +415,9 @@ static __init int armada_38x_cpuidle_init(void)
void __iomem *mpsoc_base;
u32 reg;

+ pr_warn("CPU idle is currently broken on Armada 38x: disabling");
+ return 0;
+
np = of_find_compatible_node(NULL, NULL,
"marvell,armada-380-coherency-fabric");
if (!np)
@@ -476,6 +479,16 @@ static int __init mvebu_v7_cpu_pm_init(void)
return 0;
of_node_put(np);

+ /*
+ * Currently the CPU idle support for Armada 38x is broken, as
+ * the CPU hotplug uses some of the CPU idle functions it is
+ * broken too, so let's disable it
+ */
+ if (of_machine_is_compatible("marvell,armada380")) {
+ cpu_hotplug_disable();
+ pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling");
+ }
+
if (of_machine_is_compatible("marvell,armadaxp"))
ret = armada_xp_cpuidle_init();
else if (of_machine_is_compatible("marvell,armada370"))
@@ -489,7 +502,8 @@ static int __init mvebu_v7_cpu_pm_init(void)
return ret;

mvebu_v7_pmsu_enable_l2_powerdown_onidle();
- platform_device_register(&mvebu_v7_cpuidle_device);
+ if (mvebu_v7_cpuidle_device.name)
+ platform_device_register(&mvebu_v7_cpuidle_device);
cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier);

return 0;
diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h
index 7bc66682687e..dcbe17f5e5f8 100644
--- a/arch/arm/mach-s3c64xx/crag6410.h
+++ b/arch/arm/mach-s3c64xx/crag6410.h
@@ -14,6 +14,7 @@
#include <mach/gpio-samsung.h>

#define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START
+#define BANFF_PMIC_IRQ_BASE (IRQ_BOARD_START + 64)

#define PCA935X_GPIO_BASE GPIO_BOARD_START
#define CODEC_GPIO_BASE (GPIO_BOARD_START + 8)
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index 10b913baab28..65c426bc45f7 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -554,6 +554,7 @@ static struct wm831x_touch_pdata touch_pdata = {

static struct wm831x_pdata crag_pmic_pdata = {
.wm831x_num = 1,
+ .irq_base = BANFF_PMIC_IRQ_BASE,
.gpio_base = BANFF_PMIC_GPIO_BASE,
.soft_shutdown = true,

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1b8e97331ffb..a6186c24ca47 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -361,6 +361,27 @@ config ARM64_ERRATUM_832075

If unsure, say Y.

+config ARM64_ERRATUM_845719
+ bool "Cortex-A53: 845719: a load might read incorrect data"
+ depends on COMPAT
+ default y
+ help
+ This option adds an alternative code sequence to work around ARM
+ erratum 845719 on Cortex-A53 parts up to r0p4.
+
+ When running a compat (AArch32) userspace on an affected Cortex-A53
+ part, a load at EL0 from a virtual address that matches the bottom 32
+ bits of the virtual address used by a recent load at (AArch64) EL1
+ might return incorrect data.
+
+ The workaround is to write the contextidr_el1 register on exception
+ return to a 32-bit task.
+ Please note that this does not necessarily enable the workaround,
+ as it depends on the alternative framework, which will only patch
+ the kernel if an affected CPU is detected.
+
+ If unsure, say Y.
+
endmenu


@@ -470,6 +491,10 @@ config HOTPLUG_CPU

source kernel/Kconfig.preempt

+config UP_LATE_INIT
+ def_bool y
+ depends on !SMP
+
config HZ
int
default 100
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 69ceedc982a5..4d2a925998f9 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -48,7 +48,7 @@ core-$(CONFIG_KVM) += arch/arm64/kvm/
core-$(CONFIG_XEN) += arch/arm64/xen/
core-$(CONFIG_CRYPTO) += arch/arm64/crypto/
libs-y := arch/arm64/lib/ $(libs-y)
-libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/
+core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a

# Default target when executing plain make
KBUILD_IMAGE := Image.gz
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index b6c16d5f622f..3f0c53c45771 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -23,8 +23,9 @@

#define ARM64_WORKAROUND_CLEAN_CACHE 0
#define ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE 1
+#define ARM64_WORKAROUND_845719 2

-#define ARM64_NCAPS 2
+#define ARM64_NCAPS 3

#ifndef __ASSEMBLY__

diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index 59e282311b58..8dcd61e32176 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -40,4 +40,6 @@ static inline u32 mpidr_hash_size(void)
extern u64 __cpu_logical_map[NR_CPUS];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]

+void __init do_post_cpus_up_work(void);
+
#endif /* __ASM_SMP_PLAT_H */
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 3ef77a466018..bc49a1886b61 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -188,8 +188,14 @@ struct kvm_arch_memory_slot {
#define KVM_ARM_IRQ_CPU_IRQ 0
#define KVM_ARM_IRQ_CPU_FIQ 1

-/* Highest supported SPI, from VGIC_NR_IRQS */
+/*
+ * This used to hold the highest supported SPI, but it is now obsolete
+ * and only here to provide source code level compatibility with older
+ * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
+ */
+#ifndef __KERNEL__
#define KVM_ARM_IRQ_GIC_MAX 127
+#endif

/* PSCI interface */
#define KVM_PSCI_FN_BASE 0x95c1ba5e
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index fa62637e63a8..ad6d52392bb1 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -88,7 +88,16 @@ struct arm64_cpu_capabilities arm64_errata[] = {
/* Cortex-A57 r0p0 - r1p2 */
.desc = "ARM erratum 832075",
.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
- MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12),
+ MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
+ (1 << MIDR_VARIANT_SHIFT) | 2),
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_845719
+ {
+ /* Cortex-A53 r0p[01234] */
+ .desc = "ARM erratum 845719",
+ .capability = ARM64_WORKAROUND_845719,
+ MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
},
#endif
{
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index cf21bb3bf752..959fe8733560 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -21,8 +21,10 @@
#include <linux/init.h>
#include <linux/linkage.h>

+#include <asm/alternative-asm.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
+#include <asm/cpufeature.h>
#include <asm/errno.h>
#include <asm/esr.h>
#include <asm/thread_info.h>
@@ -120,6 +122,24 @@
ct_user_enter
ldr x23, [sp, #S_SP] // load return stack pointer
msr sp_el0, x23
+
+#ifdef CONFIG_ARM64_ERRATUM_845719
+ alternative_insn \
+ "nop", \
+ "tbz x22, #4, 1f", \
+ ARM64_WORKAROUND_845719
+#ifdef CONFIG_PID_IN_CONTEXTIDR
+ alternative_insn \
+ "nop; nop", \
+ "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \
+ ARM64_WORKAROUND_845719
+#else
+ alternative_insn \
+ "nop", \
+ "msr contextidr_el1, xzr; 1:", \
+ ARM64_WORKAROUND_845719
+#endif
+#endif
.endif
msr elr_el1, x21 // set up the return data
msr spsr_el1, x22
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 07f930540f4a..c237ffb0effe 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -426,6 +426,7 @@ __create_page_tables:
*/
mov x0, x25
add x1, x26, #SWAPPER_DIR_SIZE
+ dmb sy
bl __inval_cache_range

mov lr, x27
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index e8420f635bd4..781f4697dc26 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -207,6 +207,18 @@ static void __init smp_build_mpidr_hash(void)
}
#endif

+void __init do_post_cpus_up_work(void)
+{
+ apply_alternatives_all();
+}
+
+#ifdef CONFIG_UP_LATE_INIT
+void __init up_late_init(void)
+{
+ do_post_cpus_up_work();
+}
+#endif /* CONFIG_UP_LATE_INIT */
+
static void __init setup_processor(void)
{
struct cpu_info *cpu_info;
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 328b8ce4b007..4257369341e4 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -309,7 +309,7 @@ void cpu_die(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
- apply_alternatives_all();
+ do_post_cpus_up_work();
}

void __init smp_prepare_boot_cpu(void)
diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
index 356ee84cad95..04845aaf5985 100644
--- a/arch/c6x/kernel/time.c
+++ b/arch/c6x/kernel/time.c
@@ -49,7 +49,7 @@ u64 sched_clock(void)
return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
}

-void time_init(void)
+void __init time_init(void)
{
u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;

diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h
index e41c56e375b1..1e38f0e1ea3e 100644
--- a/arch/mips/include/asm/asm-eva.h
+++ b/arch/mips/include/asm/asm-eva.h
@@ -11,6 +11,36 @@
#define __ASM_ASM_EVA_H

#ifndef __ASSEMBLY__
+
+/* Kernel variants */
+
+#define kernel_cache(op, base) "cache " op ", " base "\n"
+#define kernel_ll(reg, addr) "ll " reg ", " addr "\n"
+#define kernel_sc(reg, addr) "sc " reg ", " addr "\n"
+#define kernel_lw(reg, addr) "lw " reg ", " addr "\n"
+#define kernel_lwl(reg, addr) "lwl " reg ", " addr "\n"
+#define kernel_lwr(reg, addr) "lwr " reg ", " addr "\n"
+#define kernel_lh(reg, addr) "lh " reg ", " addr "\n"
+#define kernel_lb(reg, addr) "lb " reg ", " addr "\n"
+#define kernel_lbu(reg, addr) "lbu " reg ", " addr "\n"
+#define kernel_sw(reg, addr) "sw " reg ", " addr "\n"
+#define kernel_swl(reg, addr) "swl " reg ", " addr "\n"
+#define kernel_swr(reg, addr) "swr " reg ", " addr "\n"
+#define kernel_sh(reg, addr) "sh " reg ", " addr "\n"
+#define kernel_sb(reg, addr) "sb " reg ", " addr "\n"
+
+#ifdef CONFIG_32BIT
+/*
+ * No 'sd' or 'ld' instructions in 32-bit but the code will
+ * do the correct thing
+ */
+#define kernel_sd(reg, addr) user_sw(reg, addr)
+#define kernel_ld(reg, addr) user_lw(reg, addr)
+#else
+#define kernel_sd(reg, addr) "sd " reg", " addr "\n"
+#define kernel_ld(reg, addr) "ld " reg", " addr "\n"
+#endif /* CONFIG_32BIT */
+
#ifdef CONFIG_EVA

#define __BUILD_EVA_INSN(insn, reg, addr) \
@@ -41,37 +71,60 @@

#else

-#define user_cache(op, base) "cache " op ", " base "\n"
-#define user_ll(reg, addr) "ll " reg ", " addr "\n"
-#define user_sc(reg, addr) "sc " reg ", " addr "\n"
-#define user_lw(reg, addr) "lw " reg ", " addr "\n"
-#define user_lwl(reg, addr) "lwl " reg ", " addr "\n"
-#define user_lwr(reg, addr) "lwr " reg ", " addr "\n"
-#define user_lh(reg, addr) "lh " reg ", " addr "\n"
-#define user_lb(reg, addr) "lb " reg ", " addr "\n"
-#define user_lbu(reg, addr) "lbu " reg ", " addr "\n"
-#define user_sw(reg, addr) "sw " reg ", " addr "\n"
-#define user_swl(reg, addr) "swl " reg ", " addr "\n"
-#define user_swr(reg, addr) "swr " reg ", " addr "\n"
-#define user_sh(reg, addr) "sh " reg ", " addr "\n"
-#define user_sb(reg, addr) "sb " reg ", " addr "\n"
+#define user_cache(op, base) kernel_cache(op, base)
+#define user_ll(reg, addr) kernel_ll(reg, addr)
+#define user_sc(reg, addr) kernel_sc(reg, addr)
+#define user_lw(reg, addr) kernel_lw(reg, addr)
+#define user_lwl(reg, addr) kernel_lwl(reg, addr)
+#define user_lwr(reg, addr) kernel_lwr(reg, addr)
+#define user_lh(reg, addr) kernel_lh(reg, addr)
+#define user_lb(reg, addr) kernel_lb(reg, addr)
+#define user_lbu(reg, addr) kernel_lbu(reg, addr)
+#define user_sw(reg, addr) kernel_sw(reg, addr)
+#define user_swl(reg, addr) kernel_swl(reg, addr)
+#define user_swr(reg, addr) kernel_swr(reg, addr)
+#define user_sh(reg, addr) kernel_sh(reg, addr)
+#define user_sb(reg, addr) kernel_sb(reg, addr)

#ifdef CONFIG_32BIT
-/*
- * No 'sd' or 'ld' instructions in 32-bit but the code will
- * do the correct thing
- */
-#define user_sd(reg, addr) user_sw(reg, addr)
-#define user_ld(reg, addr) user_lw(reg, addr)
+#define user_sd(reg, addr) kernel_sw(reg, addr)
+#define user_ld(reg, addr) kernel_lw(reg, addr)
#else
-#define user_sd(reg, addr) "sd " reg", " addr "\n"
-#define user_ld(reg, addr) "ld " reg", " addr "\n"
+#define user_sd(reg, addr) kernel_sd(reg, addr)
+#define user_ld(reg, addr) kernel_ld(reg, addr)
#endif /* CONFIG_32BIT */

#endif /* CONFIG_EVA */

#else /* __ASSEMBLY__ */

+#define kernel_cache(op, base) cache op, base
+#define kernel_ll(reg, addr) ll reg, addr
+#define kernel_sc(reg, addr) sc reg, addr
+#define kernel_lw(reg, addr) lw reg, addr
+#define kernel_lwl(reg, addr) lwl reg, addr
+#define kernel_lwr(reg, addr) lwr reg, addr
+#define kernel_lh(reg, addr) lh reg, addr
+#define kernel_lb(reg, addr) lb reg, addr
+#define kernel_lbu(reg, addr) lbu reg, addr
+#define kernel_sw(reg, addr) sw reg, addr
+#define kernel_swl(reg, addr) swl reg, addr
+#define kernel_swr(reg, addr) swr reg, addr
+#define kernel_sh(reg, addr) sh reg, addr
+#define kernel_sb(reg, addr) sb reg, addr
+
+#ifdef CONFIG_32BIT
+/*
+ * No 'sd' or 'ld' instructions in 32-bit but the code will
+ * do the correct thing
+ */
+#define kernel_sd(reg, addr) user_sw(reg, addr)
+#define kernel_ld(reg, addr) user_lw(reg, addr)
+#else
+#define kernel_sd(reg, addr) sd reg, addr
+#define kernel_ld(reg, addr) ld reg, addr
+#endif /* CONFIG_32BIT */
+
#ifdef CONFIG_EVA

#define __BUILD_EVA_INSN(insn, reg, addr) \
@@ -101,31 +154,27 @@
#define user_sd(reg, addr) user_sw(reg, addr)
#else

-#define user_cache(op, base) cache op, base
-#define user_ll(reg, addr) ll reg, addr
-#define user_sc(reg, addr) sc reg, addr
-#define user_lw(reg, addr) lw reg, addr
-#define user_lwl(reg, addr) lwl reg, addr
-#define user_lwr(reg, addr) lwr reg, addr
-#define user_lh(reg, addr) lh reg, addr
-#define user_lb(reg, addr) lb reg, addr
-#define user_lbu(reg, addr) lbu reg, addr
-#define user_sw(reg, addr) sw reg, addr
-#define user_swl(reg, addr) swl reg, addr
-#define user_swr(reg, addr) swr reg, addr
-#define user_sh(reg, addr) sh reg, addr
-#define user_sb(reg, addr) sb reg, addr
+#define user_cache(op, base) kernel_cache(op, base)
+#define user_ll(reg, addr) kernel_ll(reg, addr)
+#define user_sc(reg, addr) kernel_sc(reg, addr)
+#define user_lw(reg, addr) kernel_lw(reg, addr)
+#define user_lwl(reg, addr) kernel_lwl(reg, addr)
+#define user_lwr(reg, addr) kernel_lwr(reg, addr)
+#define user_lh(reg, addr) kernel_lh(reg, addr)
+#define user_lb(reg, addr) kernel_lb(reg, addr)
+#define user_lbu(reg, addr) kernel_lbu(reg, addr)
+#define user_sw(reg, addr) kernel_sw(reg, addr)
+#define user_swl(reg, addr) kernel_swl(reg, addr)
+#define user_swr(reg, addr) kernel_swr(reg, addr)
+#define user_sh(reg, addr) kernel_sh(reg, addr)
+#define user_sb(reg, addr) kernel_sb(reg, addr)

#ifdef CONFIG_32BIT
-/*
- * No 'sd' or 'ld' instructions in 32-bit but the code will
- * do the correct thing
- */
-#define user_sd(reg, addr) user_sw(reg, addr)
-#define user_ld(reg, addr) user_lw(reg, addr)
+#define user_sd(reg, addr) kernel_sw(reg, addr)
+#define user_ld(reg, addr) kernel_lw(reg, addr)
#else
-#define user_sd(reg, addr) sd reg, addr
-#define user_ld(reg, addr) ld reg, addr
+#define user_sd(reg, addr) kernel_sd(reg, addr)
+#define user_ld(reg, addr) kernel_sd(reg, addr)
#endif /* CONFIG_32BIT */

#endif /* CONFIG_EVA */
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index dd083e999b08..9f26b079cc6a 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -170,6 +170,7 @@ static inline void lose_fpu(int save)
}
disable_msa();
clear_thread_flag(TIF_USEDMSA);
+ __disable_fpu();
} else if (is_fpu_owner()) {
if (save)
_save_fp(current);
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index ac4fc716062b..f722b0528c25 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -322,6 +322,7 @@ enum mips_mmu_types {
#define T_TRAP 13 /* Trap instruction */
#define T_VCEI 14 /* Virtual coherency exception */
#define T_FPE 15 /* Floating point exception */
+#define T_MSADIS 21 /* MSA disabled exception */
#define T_WATCH 23 /* Watch address reference */
#define T_VCED 31 /* Virtual coherency data */

@@ -578,6 +579,7 @@ struct kvm_mips_callbacks {
int (*handle_syscall)(struct kvm_vcpu *vcpu);
int (*handle_res_inst)(struct kvm_vcpu *vcpu);
int (*handle_break)(struct kvm_vcpu *vcpu);
+ int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
int (*vm_init)(struct kvm *kvm);
int (*vcpu_init)(struct kvm_vcpu *vcpu);
int (*vcpu_setup)(struct kvm_vcpu *vcpu);
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index bbb69695a0a1..7659da224fcd 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -109,10 +109,11 @@ static u32 unaligned_action;
extern void show_registers(struct pt_regs *regs);

#ifdef __BIG_ENDIAN
-#define LoadHW(addr, value, res) \
+#define _LoadHW(addr, value, res, type) \
+do { \
__asm__ __volatile__ (".set\tnoat\n" \
- "1:\t"user_lb("%0", "0(%2)")"\n" \
- "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
+ "1:\t"type##_lb("%0", "0(%2)")"\n" \
+ "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -127,13 +128,15 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)

#ifndef CONFIG_CPU_MIPSR6
-#define LoadW(addr, value, res) \
+#define _LoadW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
- "1:\t"user_lwl("%0", "(%2)")"\n" \
- "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
+ "1:\t"type##_lwl("%0", "(%2)")"\n" \
+ "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
"li\t%1, 0\n" \
"3:\n\t" \
".insn\n\t" \
@@ -146,21 +149,24 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#else
/* MIPSR6 has no lwl instruction */
-#define LoadW(addr, value, res) \
+#define _LoadW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n" \
".set\tnoat\n\t" \
- "1:"user_lb("%0", "0(%2)")"\n\t" \
- "2:"user_lbu("$1", "1(%2)")"\n\t" \
+ "1:"type##_lb("%0", "0(%2)")"\n\t" \
+ "2:"type##_lbu("$1", "1(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
- "3:"user_lbu("$1", "2(%2)")"\n\t" \
+ "3:"type##_lbu("$1", "2(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
- "4:"user_lbu("$1", "3(%2)")"\n\t" \
+ "4:"type##_lbu("$1", "3(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -178,14 +184,17 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t4b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#endif /* CONFIG_CPU_MIPSR6 */

-#define LoadHWU(addr, value, res) \
+#define _LoadHWU(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tnoat\n" \
- "1:\t"user_lbu("%0", "0(%2)")"\n" \
- "2:\t"user_lbu("$1", "1(%2)")"\n\t" \
+ "1:\t"type##_lbu("%0", "0(%2)")"\n" \
+ "2:\t"type##_lbu("$1", "1(%2)")"\n\t"\
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -201,13 +210,15 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)

#ifndef CONFIG_CPU_MIPSR6
-#define LoadWU(addr, value, res) \
+#define _LoadWU(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
- "1:\t"user_lwl("%0", "(%2)")"\n" \
- "2:\t"user_lwr("%0", "3(%2)")"\n\t" \
+ "1:\t"type##_lwl("%0", "(%2)")"\n" \
+ "2:\t"type##_lwr("%0", "3(%2)")"\n\t"\
"dsll\t%0, %0, 32\n\t" \
"dsrl\t%0, %0, 32\n\t" \
"li\t%1, 0\n" \
@@ -222,9 +233,11 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)

-#define LoadDW(addr, value, res) \
+#define _LoadDW(addr, value, res) \
+do { \
__asm__ __volatile__ ( \
"1:\tldl\t%0, (%2)\n" \
"2:\tldr\t%0, 7(%2)\n\t" \
@@ -240,21 +253,24 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#else
/* MIPSR6 has not lwl and ldl instructions */
-#define LoadWU(addr, value, res) \
+#define _LoadWU(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
- "1:"user_lbu("%0", "0(%2)")"\n\t" \
- "2:"user_lbu("$1", "1(%2)")"\n\t" \
+ "1:"type##_lbu("%0", "0(%2)")"\n\t" \
+ "2:"type##_lbu("$1", "1(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
- "3:"user_lbu("$1", "2(%2)")"\n\t" \
+ "3:"type##_lbu("$1", "2(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
- "4:"user_lbu("$1", "3(%2)")"\n\t" \
+ "4:"type##_lbu("$1", "3(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -272,9 +288,11 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t4b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)

-#define LoadDW(addr, value, res) \
+#define _LoadDW(addr, value, res) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
@@ -319,16 +337,19 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t8b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#endif /* CONFIG_CPU_MIPSR6 */


-#define StoreHW(addr, value, res) \
+#define _StoreHW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tnoat\n" \
- "1:\t"user_sb("%1", "1(%2)")"\n" \
+ "1:\t"type##_sb("%1", "1(%2)")"\n" \
"srl\t$1, %1, 0x8\n" \
- "2:\t"user_sb("$1", "0(%2)")"\n" \
+ "2:\t"type##_sb("$1", "0(%2)")"\n" \
".set\tat\n\t" \
"li\t%0, 0\n" \
"3:\n\t" \
@@ -342,13 +363,15 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT));\
+} while(0)

#ifndef CONFIG_CPU_MIPSR6
-#define StoreW(addr, value, res) \
+#define _StoreW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
- "1:\t"user_swl("%1", "(%2)")"\n" \
- "2:\t"user_swr("%1", "3(%2)")"\n\t" \
+ "1:\t"type##_swl("%1", "(%2)")"\n" \
+ "2:\t"type##_swr("%1", "3(%2)")"\n\t"\
"li\t%0, 0\n" \
"3:\n\t" \
".insn\n\t" \
@@ -361,9 +384,11 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
+} while(0)

-#define StoreDW(addr, value, res) \
+#define _StoreDW(addr, value, res) \
+do { \
__asm__ __volatile__ ( \
"1:\tsdl\t%1,(%2)\n" \
"2:\tsdr\t%1, 7(%2)\n\t" \
@@ -379,20 +404,23 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#else
/* MIPSR6 has no swl and sdl instructions */
-#define StoreW(addr, value, res) \
+#define _StoreW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
- "1:"user_sb("%1", "3(%2)")"\n\t" \
+ "1:"type##_sb("%1", "3(%2)")"\n\t" \
"srl\t$1, %1, 0x8\n\t" \
- "2:"user_sb("$1", "2(%2)")"\n\t" \
+ "2:"type##_sb("$1", "2(%2)")"\n\t" \
"srl\t$1, $1, 0x8\n\t" \
- "3:"user_sb("$1", "1(%2)")"\n\t" \
+ "3:"type##_sb("$1", "1(%2)")"\n\t" \
"srl\t$1, $1, 0x8\n\t" \
- "4:"user_sb("$1", "0(%2)")"\n\t" \
+ "4:"type##_sb("$1", "0(%2)")"\n\t" \
".set\tpop\n\t" \
"li\t%0, 0\n" \
"10:\n\t" \
@@ -409,9 +437,11 @@ extern void show_registers(struct pt_regs *regs);
".previous" \
: "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \
- : "memory");
+ : "memory"); \
+} while(0)

#define StoreDW(addr, value, res) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
@@ -451,15 +481,18 @@ extern void show_registers(struct pt_regs *regs);
".previous" \
: "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \
- : "memory");
+ : "memory"); \
+} while(0)
+
#endif /* CONFIG_CPU_MIPSR6 */

#else /* __BIG_ENDIAN */

-#define LoadHW(addr, value, res) \
+#define _LoadHW(addr, value, res, type) \
+do { \
__asm__ __volatile__ (".set\tnoat\n" \
- "1:\t"user_lb("%0", "1(%2)")"\n" \
- "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
+ "1:\t"type##_lb("%0", "1(%2)")"\n" \
+ "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -474,13 +507,15 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)

#ifndef CONFIG_CPU_MIPSR6
-#define LoadW(addr, value, res) \
+#define _LoadW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
- "1:\t"user_lwl("%0", "3(%2)")"\n" \
- "2:\t"user_lwr("%0", "(%2)")"\n\t" \
+ "1:\t"type##_lwl("%0", "3(%2)")"\n" \
+ "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
"li\t%1, 0\n" \
"3:\n\t" \
".insn\n\t" \
@@ -493,21 +528,24 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#else
/* MIPSR6 has no lwl instruction */
-#define LoadW(addr, value, res) \
+#define _LoadW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n" \
".set\tnoat\n\t" \
- "1:"user_lb("%0", "3(%2)")"\n\t" \
- "2:"user_lbu("$1", "2(%2)")"\n\t" \
+ "1:"type##_lb("%0", "3(%2)")"\n\t" \
+ "2:"type##_lbu("$1", "2(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
- "3:"user_lbu("$1", "1(%2)")"\n\t" \
+ "3:"type##_lbu("$1", "1(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
- "4:"user_lbu("$1", "0(%2)")"\n\t" \
+ "4:"type##_lbu("$1", "0(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -525,15 +563,18 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t4b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#endif /* CONFIG_CPU_MIPSR6 */


-#define LoadHWU(addr, value, res) \
+#define _LoadHWU(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tnoat\n" \
- "1:\t"user_lbu("%0", "1(%2)")"\n" \
- "2:\t"user_lbu("$1", "0(%2)")"\n\t" \
+ "1:\t"type##_lbu("%0", "1(%2)")"\n" \
+ "2:\t"type##_lbu("$1", "0(%2)")"\n\t"\
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -549,13 +590,15 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)

#ifndef CONFIG_CPU_MIPSR6
-#define LoadWU(addr, value, res) \
+#define _LoadWU(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
- "1:\t"user_lwl("%0", "3(%2)")"\n" \
- "2:\t"user_lwr("%0", "(%2)")"\n\t" \
+ "1:\t"type##_lwl("%0", "3(%2)")"\n" \
+ "2:\t"type##_lwr("%0", "(%2)")"\n\t"\
"dsll\t%0, %0, 32\n\t" \
"dsrl\t%0, %0, 32\n\t" \
"li\t%1, 0\n" \
@@ -570,9 +613,11 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)

-#define LoadDW(addr, value, res) \
+#define _LoadDW(addr, value, res) \
+do { \
__asm__ __volatile__ ( \
"1:\tldl\t%0, 7(%2)\n" \
"2:\tldr\t%0, (%2)\n\t" \
@@ -588,21 +633,24 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#else
/* MIPSR6 has not lwl and ldl instructions */
-#define LoadWU(addr, value, res) \
+#define _LoadWU(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
- "1:"user_lbu("%0", "3(%2)")"\n\t" \
- "2:"user_lbu("$1", "2(%2)")"\n\t" \
+ "1:"type##_lbu("%0", "3(%2)")"\n\t" \
+ "2:"type##_lbu("$1", "2(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
- "3:"user_lbu("$1", "1(%2)")"\n\t" \
+ "3:"type##_lbu("$1", "1(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
- "4:"user_lbu("$1", "0(%2)")"\n\t" \
+ "4:"type##_lbu("$1", "0(%2)")"\n\t" \
"sll\t%0, 0x8\n\t" \
"or\t%0, $1\n\t" \
"li\t%1, 0\n" \
@@ -620,9 +668,11 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t4b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)

-#define LoadDW(addr, value, res) \
+#define _LoadDW(addr, value, res) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
@@ -667,15 +717,17 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t8b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
- : "r" (addr), "i" (-EFAULT));
+ : "r" (addr), "i" (-EFAULT)); \
+} while(0)
#endif /* CONFIG_CPU_MIPSR6 */

-#define StoreHW(addr, value, res) \
+#define _StoreHW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tnoat\n" \
- "1:\t"user_sb("%1", "0(%2)")"\n" \
+ "1:\t"type##_sb("%1", "0(%2)")"\n" \
"srl\t$1,%1, 0x8\n" \
- "2:\t"user_sb("$1", "1(%2)")"\n" \
+ "2:\t"type##_sb("$1", "1(%2)")"\n" \
".set\tat\n\t" \
"li\t%0, 0\n" \
"3:\n\t" \
@@ -689,12 +741,15 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT));\
+} while(0)
+
#ifndef CONFIG_CPU_MIPSR6
-#define StoreW(addr, value, res) \
+#define _StoreW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
- "1:\t"user_swl("%1", "3(%2)")"\n" \
- "2:\t"user_swr("%1", "(%2)")"\n\t" \
+ "1:\t"type##_swl("%1", "3(%2)")"\n" \
+ "2:\t"type##_swr("%1", "(%2)")"\n\t"\
"li\t%0, 0\n" \
"3:\n\t" \
".insn\n\t" \
@@ -707,9 +762,11 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
+} while(0)

-#define StoreDW(addr, value, res) \
+#define _StoreDW(addr, value, res) \
+do { \
__asm__ __volatile__ ( \
"1:\tsdl\t%1, 7(%2)\n" \
"2:\tsdr\t%1, (%2)\n\t" \
@@ -725,20 +782,23 @@ extern void show_registers(struct pt_regs *regs);
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT)); \
+} while(0)
+
#else
/* MIPSR6 has no swl and sdl instructions */
-#define StoreW(addr, value, res) \
+#define _StoreW(addr, value, res, type) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
- "1:"user_sb("%1", "0(%2)")"\n\t" \
+ "1:"type##_sb("%1", "0(%2)")"\n\t" \
"srl\t$1, %1, 0x8\n\t" \
- "2:"user_sb("$1", "1(%2)")"\n\t" \
+ "2:"type##_sb("$1", "1(%2)")"\n\t" \
"srl\t$1, $1, 0x8\n\t" \
- "3:"user_sb("$1", "2(%2)")"\n\t" \
+ "3:"type##_sb("$1", "2(%2)")"\n\t" \
"srl\t$1, $1, 0x8\n\t" \
- "4:"user_sb("$1", "3(%2)")"\n\t" \
+ "4:"type##_sb("$1", "3(%2)")"\n\t" \
".set\tpop\n\t" \
"li\t%0, 0\n" \
"10:\n\t" \
@@ -755,9 +815,11 @@ extern void show_registers(struct pt_regs *regs);
".previous" \
: "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \
- : "memory");
+ : "memory"); \
+} while(0)

-#define StoreDW(addr, value, res) \
+#define _StoreDW(addr, value, res) \
+do { \
__asm__ __volatile__ ( \
".set\tpush\n\t" \
".set\tnoat\n\t" \
@@ -797,10 +859,28 @@ extern void show_registers(struct pt_regs *regs);
".previous" \
: "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \
- : "memory");
+ : "memory"); \
+} while(0)
+
#endif /* CONFIG_CPU_MIPSR6 */
#endif

+#define LoadHWU(addr, value, res) _LoadHWU(addr, value, res, kernel)
+#define LoadHWUE(addr, value, res) _LoadHWU(addr, value, res, user)
+#define LoadWU(addr, value, res) _LoadWU(addr, value, res, kernel)
+#define LoadWUE(addr, value, res) _LoadWU(addr, value, res, user)
+#define LoadHW(addr, value, res) _LoadHW(addr, value, res, kernel)
+#define LoadHWE(addr, value, res) _LoadHW(addr, value, res, user)
+#define LoadW(addr, value, res) _LoadW(addr, value, res, kernel)
+#define LoadWE(addr, value, res) _LoadW(addr, value, res, user)
+#define LoadDW(addr, value, res) _LoadDW(addr, value, res)
+
+#define StoreHW(addr, value, res) _StoreHW(addr, value, res, kernel)
+#define StoreHWE(addr, value, res) _StoreHW(addr, value, res, user)
+#define StoreW(addr, value, res) _StoreW(addr, value, res, kernel)
+#define StoreWE(addr, value, res) _StoreW(addr, value, res, user)
+#define StoreDW(addr, value, res) _StoreDW(addr, value, res)
+
static void emulate_load_store_insn(struct pt_regs *regs,
void __user *addr, unsigned int __user *pc)
{
@@ -872,7 +952,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
set_fs(seg);
goto sigbus;
}
- LoadHW(addr, value, res);
+ LoadHWE(addr, value, res);
if (res) {
set_fs(seg);
goto fault;
@@ -885,7 +965,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
set_fs(seg);
goto sigbus;
}
- LoadW(addr, value, res);
+ LoadWE(addr, value, res);
if (res) {
set_fs(seg);
goto fault;
@@ -898,7 +978,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
set_fs(seg);
goto sigbus;
}
- LoadHWU(addr, value, res);
+ LoadHWUE(addr, value, res);
if (res) {
set_fs(seg);
goto fault;
@@ -913,7 +993,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
}
compute_return_epc(regs);
value = regs->regs[insn.spec3_format.rt];
- StoreHW(addr, value, res);
+ StoreHWE(addr, value, res);
if (res) {
set_fs(seg);
goto fault;
@@ -926,7 +1006,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
}
compute_return_epc(regs);
value = regs->regs[insn.spec3_format.rt];
- StoreW(addr, value, res);
+ StoreWE(addr, value, res);
if (res) {
set_fs(seg);
goto fault;
@@ -943,7 +1023,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;

- LoadHW(addr, value, res);
+ if (config_enabled(CONFIG_EVA)) {
+ if (segment_eq(get_fs(), get_ds()))
+ LoadHW(addr, value, res);
+ else
+ LoadHWE(addr, value, res);
+ } else {
+ LoadHW(addr, value, res);
+ }
+
if (res)
goto fault;
compute_return_epc(regs);
@@ -954,7 +1042,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;

- LoadW(addr, value, res);
+ if (config_enabled(CONFIG_EVA)) {
+ if (segment_eq(get_fs(), get_ds()))
+ LoadW(addr, value, res);
+ else
+ LoadWE(addr, value, res);
+ } else {
+ LoadW(addr, value, res);
+ }
+
if (res)
goto fault;
compute_return_epc(regs);
@@ -965,7 +1061,15 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;

- LoadHWU(addr, value, res);
+ if (config_enabled(CONFIG_EVA)) {
+ if (segment_eq(get_fs(), get_ds()))
+ LoadHWU(addr, value, res);
+ else
+ LoadHWUE(addr, value, res);
+ } else {
+ LoadHWU(addr, value, res);
+ }
+
if (res)
goto fault;
compute_return_epc(regs);
@@ -1024,7 +1128,16 @@ static void emulate_load_store_insn(struct pt_regs *regs,

compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- StoreHW(addr, value, res);
+
+ if (config_enabled(CONFIG_EVA)) {
+ if (segment_eq(get_fs(), get_ds()))
+ StoreHW(addr, value, res);
+ else
+ StoreHWE(addr, value, res);
+ } else {
+ StoreHW(addr, value, res);
+ }
+
if (res)
goto fault;
break;
@@ -1035,7 +1148,16 @@ static void emulate_load_store_insn(struct pt_regs *regs,

compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- StoreW(addr, value, res);
+
+ if (config_enabled(CONFIG_EVA)) {
+ if (segment_eq(get_fs(), get_ds()))
+ StoreW(addr, value, res);
+ else
+ StoreWE(addr, value, res);
+ } else {
+ StoreW(addr, value, res);
+ }
+
if (res)
goto fault;
break;
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index fb3e8dfd1ff6..838d3a6a5b7d 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -2176,6 +2176,7 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
case T_SYSCALL:
case T_BREAK:
case T_RES_INST:
+ case T_MSADIS:
break;

case T_COP_UNUSABLE:
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index c9eccf5df912..f5e7ddab02f7 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -1119,6 +1119,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
ret = kvm_mips_callbacks->handle_break(vcpu);
break;

+ case T_MSADIS:
+ ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
+ break;
+
default:
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index fd7257b70e65..4372cc86650c 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -330,6 +330,33 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
return ret;
}

+static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+ uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ unsigned long cause = vcpu->arch.host_cp0_cause;
+ enum emulation_result er = EMULATE_DONE;
+ int ret = RESUME_GUEST;
+
+ /* No MSA supported in guest, guest reserved instruction exception */
+ er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+
+ switch (er) {
+ case EMULATE_DONE:
+ ret = RESUME_GUEST;
+ break;
+
+ case EMULATE_FAIL:
+ run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ ret = RESUME_HOST;
+ break;
+
+ default:
+ BUG();
+ }
+ return ret;
+}
+
static int kvm_trap_emul_vm_init(struct kvm *kvm)
{
return 0;
@@ -470,6 +497,7 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
.handle_syscall = kvm_trap_emul_handle_syscall,
.handle_res_inst = kvm_trap_emul_handle_res_inst,
.handle_break = kvm_trap_emul_handle_break,
+ .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,

.vm_init = kvm_trap_emul_vm_init,
.vcpu_init = kvm_trap_emul_vcpu_init,
diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c
index 21221edda7a9..0f75b6b3d218 100644
--- a/arch/mips/loongson/loongson-3/irq.c
+++ b/arch/mips/loongson/loongson-3/irq.c
@@ -44,6 +44,7 @@ void mach_irq_dispatch(unsigned int pending)

static struct irqaction cascade_irqaction = {
.handler = no_action,
+ .flags = IRQF_NO_SUSPEND,
.name = "cascade",
};

diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index 8fddd2cdbff7..efe366d618b1 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -53,6 +53,12 @@ fw_memblock_t * __init fw_getmdesc(int eva)
pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
physical_memsize = 0x02000000;
} else {
+ if (memsize > (256 << 20)) { /* memsize should be capped to 256M */
+ pr_warn("Unsupported memsize value (0x%lx) detected! "
+ "Using 0x10000000 (256M) instead\n",
+ memsize);
+ memsize = 256 << 20;
+ }
/* If ememsize is set, then set physical_memsize to that */
physical_memsize = ememsize ? : memsize;
}
diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
index 32a7c828f073..e7567c8a9e79 100644
--- a/arch/mips/power/hibernate.S
+++ b/arch/mips/power/hibernate.S
@@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend)
END(swsusp_arch_suspend)

LEAF(swsusp_arch_resume)
+ /* Avoid TLB mismatch during and after kernel resume */
+ jal local_flush_tlb_all
PTR_L t0, restore_pblist
0:
PTR_L t1, PBE_ADDRESS(t0) /* source */
@@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume)
bne t1, t3, 1b
PTR_L t0, PBE_NEXT(t0)
bnez t0, 0b
- jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
PTR_LA t0, saved_regs
PTR_L ra, PT_R31(t0)
PTR_L sp, PT_R29(t0)
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index ae77b7e59889..c641983bbdd6 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -61,12 +61,22 @@ struct cache_type_info {
};

/* These are used to index the cache_type_info array. */
-#define CACHE_TYPE_UNIFIED 0
-#define CACHE_TYPE_INSTRUCTION 1
-#define CACHE_TYPE_DATA 2
+#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
+#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
+#define CACHE_TYPE_INSTRUCTION 2
+#define CACHE_TYPE_DATA 3

static const struct cache_type_info cache_type_info[] = {
{
+ /* Embedded systems that use cache-size, cache-block-size,
+ * etc. for the Unified (typically L2) cache. */
+ .name = "Unified",
+ .size_prop = "cache-size",
+ .line_size_props = { "cache-line-size",
+ "cache-block-size", },
+ .nr_sets_prop = "cache-sets",
+ },
+ {
/* PowerPC Processor binding says the [di]-cache-*
* must be equal on unified caches, so just use
* d-cache properties. */
@@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
{
struct cache *iter;

- if (cache->type == CACHE_TYPE_UNIFIED)
+ if (cache->type == CACHE_TYPE_UNIFIED ||
+ cache->type == CACHE_TYPE_UNIFIED_D)
return cache;

list_for_each_entry(iter, &cache_list, list)
@@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np)
return of_get_property(np, "cache-unified", NULL);
}

-static struct cache *cache_do_one_devnode_unified(struct device_node *node,
- int level)
+/*
+ * Unified caches can have two different sets of tags. Most embedded
+ * use cache-size, etc. for the unified cache size, but open firmware systems
+ * use d-cache-size, etc. Check on initialization for which type we have, and
+ * return the appropriate structure type. Assume it's embedded if it isn't
+ * open firmware. If it's yet a 3rd type, then there will be missing entries
+ * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
+ * to be extended further.
+ */
+static int cache_is_unified_d(const struct device_node *np)
{
- struct cache *cache;
+ return of_get_property(np,
+ cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
+ CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
+}

+/*
+ */
+static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
+{
pr_debug("creating L%d ucache for %s\n", level, node->full_name);

- cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
-
- return cache;
+ return new_cache(cache_is_unified_d(node), level, node);
}

static struct cache *cache_do_one_devnode_split(struct device_node *node,
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7e408bfc7948..cecbe00cee24 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -581,6 +581,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, start);
pud_clear(pud);
pmd_free_tlb(tlb, pmd, start);
+ mm_dec_nr_pmds(tlb->mm);
}

static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 2396dda282cd..ead55351b254 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
sp = regs->gpr[1];
perf_callchain_store(entry, next_ip);

- for (;;) {
+ while (entry->nr < PERF_MAX_STACK_DEPTH) {
fp = (unsigned long __user *) sp;
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
return;
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 4c11421847be..3af8324c122e 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void)

void iic_setup_cpu(void)
{
- out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff);
+ out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff);
}

u8 iic_get_target_id(int cpu)
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index c7c8720aa39f..63db1b03e756 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,

io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);

- for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
+ for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);

mb();
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 6c9ff2b95119..1d9369e1e0f4 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1777,7 +1777,8 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
region.start += phb->ioda.io_segsize;
index++;
}
- } else if (res->flags & IORESOURCE_MEM) {
+ } else if ((res->flags & IORESOURCE_MEM) &&
+ !pnv_pci_is_mem_pref_64(res->flags)) {
region.start = res->start -
hose->mem_offset[0] -
phb->ioda.m32_pci_base;
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index 1c4c5accd220..d3236c9e226b 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -138,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
+ unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
+ unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));

/* Always save lowcore pages (LC protection might be enabled). */
if (pfn <= LC_PAGES)
@@ -145,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn)
if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
return 1;
/* Skip memory holes and read-only pages (NSS, DCSS, ...). */
+ if (pfn >= stext_pfn && pfn <= eshared_pfn)
+ return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
if (tprot(PFN_PHYS(pfn)))
return 1;
return 0;
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 073b5f387d1d..e7bc2fdb6f67 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -17,6 +17,7 @@
#include <linux/signal.h>
#include <linux/slab.h>
#include <linux/bitmap.h>
+#include <linux/vmalloc.h>
#include <asm/asm-offsets.h>
#include <asm/uaccess.h>
#include <asm/sclp.h>
@@ -1332,10 +1333,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
return rc;
}

-void kvm_s390_reinject_io_int(struct kvm *kvm,
+int kvm_s390_reinject_io_int(struct kvm *kvm,
struct kvm_s390_interrupt_info *inti)
{
- __inject_vm(kvm, inti);
+ return __inject_vm(kvm, inti);
}

int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
@@ -1455,61 +1456,66 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
spin_unlock(&fi->lock);
}

-static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
- u8 *addr)
+static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
+ struct kvm_s390_irq *irq)
{
- struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
- struct kvm_s390_irq irq = {0};
-
- irq.type = inti->type;
+ irq->type = inti->type;
switch (inti->type) {
case KVM_S390_INT_PFAULT_INIT:
case KVM_S390_INT_PFAULT_DONE:
case KVM_S390_INT_VIRTIO:
case KVM_S390_INT_SERVICE:
- irq.u.ext = inti->ext;
+ irq->u.ext = inti->ext;
break;
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
- irq.u.io = inti->io;
+ irq->u.io = inti->io;
break;
case KVM_S390_MCHK:
- irq.u.mchk = inti->mchk;
+ irq->u.mchk = inti->mchk;
break;
- default:
- return -EINVAL;
}
-
- if (copy_to_user(uptr, &irq, sizeof(irq)))
- return -EFAULT;
-
- return 0;
}

-static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
+static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
{
struct kvm_s390_interrupt_info *inti;
struct kvm_s390_float_interrupt *fi;
+ struct kvm_s390_irq *buf;
+ int max_irqs;
int ret = 0;
int n = 0;

+ if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
+ return -EINVAL;
+
+ /*
+ * We are already using -ENOMEM to signal
+ * userspace it may retry with a bigger buffer,
+ * so we need to use something else for this case
+ */
+ buf = vzalloc(len);
+ if (!buf)
+ return -ENOBUFS;
+
+ max_irqs = len / sizeof(struct kvm_s390_irq);
+
fi = &kvm->arch.float_int;
spin_lock(&fi->lock);
-
list_for_each_entry(inti, &fi->list, list) {
- if (len < sizeof(struct kvm_s390_irq)) {
+ if (n == max_irqs) {
/* signal userspace to try again */
ret = -ENOMEM;
break;
}
- ret = copy_irq_to_user(inti, buf);
- if (ret)
- break;
- buf += sizeof(struct kvm_s390_irq);
- len -= sizeof(struct kvm_s390_irq);
+ inti_to_irq(inti, &buf[n]);
n++;
}
-
spin_unlock(&fi->lock);
+ if (!ret && n > 0) {
+ if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
+ ret = -EFAULT;
+ }
+ vfree(buf);

return ret < 0 ? ret : n;
}
@@ -1520,7 +1526,7 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)

switch (attr->group) {
case KVM_DEV_FLIC_GET_ALL_IRQS:
- r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
+ r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
attr->attr);
break;
default:
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index c34109aa552d..6995a3080a0e 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -151,8 +151,8 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
u64 cr6, u64 schid);
-void kvm_s390_reinject_io_int(struct kvm *kvm,
- struct kvm_s390_interrupt_info *inti);
+int kvm_s390_reinject_io_int(struct kvm *kvm,
+ struct kvm_s390_interrupt_info *inti);
int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);

/* implemented in intercept.c */
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 351116939ea2..b982fbca34df 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -229,18 +229,19 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
struct kvm_s390_interrupt_info *inti;
unsigned long len;
u32 tpi_data[3];
- int cc, rc;
+ int rc;
u64 addr;

- rc = 0;
addr = kvm_s390_get_base_disp_s(vcpu);
if (addr & 3)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
- cc = 0;
+
inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
- if (!inti)
- goto no_interrupt;
- cc = 1;
+ if (!inti) {
+ kvm_s390_set_psw_cc(vcpu, 0);
+ return 0;
+ }
+
tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
tpi_data[1] = inti->io.io_int_parm;
tpi_data[2] = inti->io.io_int_word;
@@ -251,30 +252,38 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
*/
len = sizeof(tpi_data) - 4;
rc = write_guest(vcpu, addr, &tpi_data, len);
- if (rc)
- return kvm_s390_inject_prog_cond(vcpu, rc);
+ if (rc) {
+ rc = kvm_s390_inject_prog_cond(vcpu, rc);
+ goto reinject_interrupt;
+ }
} else {
/*
* Store the three-word I/O interruption code into
* the appropriate lowcore area.
*/
len = sizeof(tpi_data);
- if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
+ if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
+ /* failed writes to the low core are not recoverable */
rc = -EFAULT;
+ goto reinject_interrupt;
+ }
}
+
+ /* irq was successfully handed to the guest */
+ kfree(inti);
+ kvm_s390_set_psw_cc(vcpu, 1);
+ return 0;
+reinject_interrupt:
/*
* If we encounter a problem storing the interruption code, the
* instruction is suppressed from the guest's view: reinject the
* interrupt.
*/
- if (!rc)
+ if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
kfree(inti);
- else
- kvm_s390_reinject_io_int(vcpu->kvm, inti);
-no_interrupt:
- /* Set condition code and we're done. */
- if (!rc)
- kvm_s390_set_psw_cc(vcpu, cc);
+ rc = -EFAULT;
+ }
+ /* don't set the cc, a pgm irq was injected or we drop to user space */
return rc ? -EFAULT : 0;
}

@@ -467,6 +476,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
for (n = mem->count - 1; n > 0 ; n--)
memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));

+ memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
mem->vm[0].cpus_total = cpus;
mem->vm[0].cpus_configured = cpus;
mem->vm[0].cpus_standby = 0;
diff --git a/arch/x86/include/asm/insn.h b/arch/x86/include/asm/insn.h
index 47f29b1d1846..e7814b74caf8 100644
--- a/arch/x86/include/asm/insn.h
+++ b/arch/x86/include/asm/insn.h
@@ -69,7 +69,7 @@ struct insn {
const insn_byte_t *next_byte;
};

-#define MAX_INSN_SIZE 16
+#define MAX_INSN_SIZE 15

#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
index a1410db38a1a..653dfa7662e1 100644
--- a/arch/x86/include/asm/mwait.h
+++ b/arch/x86/include/asm/mwait.h
@@ -30,6 +30,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
:: "a" (eax), "c" (ecx));
}

+static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
+{
+ trace_hardirqs_on();
+ /* "mwait %eax, %ecx;" */
+ asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
+ :: "a" (eax), "c" (ecx));
+}
+
/*
* This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
* which can obviate IPI to trigger checking of need_resched.
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index d6b078e9fa28..25b1cc07d496 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -95,6 +95,7 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,

struct pvclock_vsyscall_time_info {
struct pvclock_vcpu_time_info pvti;
+ u32 migrate_count;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));

#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 073983398364..666bcf14ce10 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -557,6 +557,8 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_RETIRED.MISPRED */
INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
EVENT_CONSTRAINT_END
};

@@ -564,6 +566,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY */
INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
EVENT_CONSTRAINT_END
};

@@ -587,6 +591,8 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
EVENT_CONSTRAINT_END
};

@@ -602,6 +608,8 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
INTEL_FLAGS_UEVENT_CONSTRAINT(0x20c8, 0xf), /* ITLB_MISS_RETIRED */
INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
EVENT_CONSTRAINT_END
};

diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 046e2d620bbe..a388bb883128 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -24,6 +24,7 @@
#include <asm/syscalls.h>
#include <asm/idle.h>
#include <asm/uaccess.h>
+#include <asm/mwait.h>
#include <asm/i387.h>
#include <asm/fpu-internal.h>
#include <asm/debugreg.h>
@@ -399,6 +400,53 @@ static void amd_e400_idle(void)
default_idle();
}

+/*
+ * Intel Core2 and older machines prefer MWAIT over HALT for C1.
+ * We can't rely on cpuidle installing MWAIT, because it will not load
+ * on systems that support only C1 -- so the boot default must be MWAIT.
+ *
+ * Some AMD machines are the opposite, they depend on using HALT.
+ *
+ * So for default C1, which is used during boot until cpuidle loads,
+ * use MWAIT-C1 on Intel HW that has it, else use HALT.
+ */
+static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
+{
+ if (c->x86_vendor != X86_VENDOR_INTEL)
+ return 0;
+
+ if (!cpu_has(c, X86_FEATURE_MWAIT))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * MONITOR/MWAIT with no hints, used for default default C1 state.
+ * This invokes MWAIT with interrutps enabled and no flags,
+ * which is backwards compatible with the original MWAIT implementation.
+ */
+
+static void mwait_idle(void)
+{
+ if (!current_set_polling_and_test()) {
+ if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
+ smp_mb(); /* quirk */
+ clflush((void *)&current_thread_info()->flags);
+ smp_mb(); /* quirk */
+ }
+
+ __monitor((void *)&current_thread_info()->flags, 0, 0);
+ if (!need_resched())
+ __sti_mwait(0, 0);
+ else
+ local_irq_enable();
+ } else {
+ local_irq_enable();
+ }
+ __current_clr_polling();
+}
+
void select_idle_routine(const struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
@@ -412,6 +460,9 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
/* E400: APIC timer interrupt does not wake up CPU from C1e */
pr_info("using AMD E400 aware idle routine\n");
x86_idle = amd_e400_idle;
+ } else if (prefer_mwait_c1_over_halt(c)) {
+ pr_info("using mwait in idle threads\n");
+ x86_idle = mwait_idle;
} else
x86_idle = default_idle;
}
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 2f355d229a58..e5ecd20e72dd 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -141,7 +141,46 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
}

+static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
+
+static struct pvclock_vsyscall_time_info *
+pvclock_get_vsyscall_user_time_info(int cpu)
+{
+ if (!pvclock_vdso_info) {
+ BUG();
+ return NULL;
+ }
+
+ return &pvclock_vdso_info[cpu];
+}
+
+struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
+{
+ return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
+}
+
#ifdef CONFIG_X86_64
+static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
+ void *v)
+{
+ struct task_migration_notifier *mn = v;
+ struct pvclock_vsyscall_time_info *pvti;
+
+ pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
+
+ /* this is NULL when pvclock vsyscall is not initialized */
+ if (unlikely(pvti == NULL))
+ return NOTIFY_DONE;
+
+ pvti->migrate_count++;
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block pvclock_migrate = {
+ .notifier_call = pvclock_task_migrate,
+};
+
/*
* Initialize the generic pvclock vsyscall state. This will allocate
* a/some page(s) for the per-vcpu pvclock information, set up a
@@ -155,12 +194,17 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,

WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);

+ pvclock_vdso_info = i;
+
for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
__set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
__pa(i) + (idx*PAGE_SIZE),
PAGE_KERNEL_VVAR);
}

+
+ register_task_migration_notifier(&pvclock_migrate);
+
return 0;
}
#endif
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ae4f6d35d19c..a60bd3aa0965 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3621,8 +3621,16 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)

static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
- unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
- KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
+ /*
+ * Pass through host's Machine Check Enable value to hw_cr4, which
+ * is in force while we are in guest mode. Do not let guests control
+ * this bit, even if host CR4.MCE == 0.
+ */
+ unsigned long hw_cr4 =
+ (cr4_read_shadow() & X86_CR4_MCE) |
+ (cr4 & ~X86_CR4_MCE) |
+ (to_vmx(vcpu)->rmode.vm86_active ?
+ KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);

if (cr4 & X86_CR4_VMXE) {
/*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 32bf19ef3115..e222ba5d2beb 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5775,7 +5775,6 @@ int kvm_arch_init(void *opaque)
kvm_set_mmio_spte_mask();

kvm_x86_ops = ops;
- kvm_init_msr_list();

kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
PT_DIRTY_MASK, PT64_NX_MASK, 0);
@@ -7209,7 +7208,14 @@ void kvm_arch_hardware_disable(void)

int kvm_arch_hardware_setup(void)
{
- return kvm_x86_ops->hardware_setup();
+ int r;
+
+ r = kvm_x86_ops->hardware_setup();
+ if (r != 0)
+ return r;
+
+ kvm_init_msr_list();
+ return 0;
}

void kvm_arch_hardware_unsetup(void)
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 1313ae6b478b..85994f5d48e4 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -52,6 +52,13 @@
*/
void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
{
+ /*
+ * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
+ * even if the input buffer is long enough to hold them.
+ */
+ if (buf_len > MAX_INSN_SIZE)
+ buf_len = MAX_INSN_SIZE;
+
memset(insn, 0, sizeof(*insn));
insn->kaddr = kaddr;
insn->end_kaddr = kaddr + buf_len;
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 1f33b3d1fd68..0a42327a59d7 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -82,7 +82,7 @@ copy_user_handle_tail(char *to, char *from, unsigned len)
clac();

/* If the destination is a kernel buffer, we always clear the end */
- if ((unsigned long)to >= TASK_SIZE_MAX)
+ if (!__addr_ok(to))
memset(to, 0, len);
return len;
}
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 9793322751e0..40d2473836c9 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -82,18 +82,15 @@ static notrace cycle_t vread_pvclock(int *mode)
cycle_t ret;
u64 last;
u32 version;
+ u32 migrate_count;
u8 flags;
unsigned cpu, cpu1;


/*
- * Note: hypervisor must guarantee that:
- * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
- * 2. that per-CPU pvclock time info is updated if the
- * underlying CPU changes.
- * 3. that version is increased whenever underlying CPU
- * changes.
- *
+ * When looping to get a consistent (time-info, tsc) pair, we
+ * also need to deal with the possibility we can switch vcpus,
+ * so make sure we always re-fetch time-info for the current vcpu.
*/
do {
cpu = __getcpu() & VGETCPU_CPU_MASK;
@@ -102,20 +99,27 @@ static notrace cycle_t vread_pvclock(int *mode)
* __getcpu() calls (Gleb).
*/

- pvti = get_pvti(cpu);
+ /* Make sure migrate_count will change if we leave the VCPU. */
+ do {
+ pvti = get_pvti(cpu);
+ migrate_count = pvti->migrate_count;
+
+ cpu1 = cpu;
+ cpu = __getcpu() & VGETCPU_CPU_MASK;
+ } while (unlikely(cpu != cpu1));

version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);

/*
* Test we're still on the cpu as well as the version.
- * We could have been migrated just after the first
- * vgetcpu but before fetching the version, so we
- * wouldn't notice a version change.
+ * - We must read TSC of pvti's VCPU.
+ * - KVM doesn't follow the versioning protocol, so data could
+ * change before version if we left the VCPU.
*/
- cpu1 = __getcpu() & VGETCPU_CPU_MASK;
- } while (unlikely(cpu != cpu1 ||
- (pvti->pvti.version & 1) ||
- pvti->pvti.version != version));
+ smp_rmb();
+ } while (unlikely((pvti->pvti.version & 1) ||
+ pvti->pvti.version != version ||
+ pvti->migrate_count != migrate_count));

if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
*mode = VCLOCK_NONE;
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index e31d4949124a..87be10e8b57a 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -428,6 +428,36 @@ config DEFAULT_MEM_SIZE

If unsure, leave the default value here.

+config XTFPGA_LCD
+ bool "Enable XTFPGA LCD driver"
+ depends on XTENSA_PLATFORM_XTFPGA
+ default n
+ help
+ There's a 2x16 LCD on most of XTFPGA boards, kernel may output
+ progress messages there during bootup/shutdown. It may be useful
+ during board bringup.
+
+ If unsure, say N.
+
+config XTFPGA_LCD_BASE_ADDR
+ hex "XTFPGA LCD base address"
+ depends on XTFPGA_LCD
+ default "0x0d0c0000"
+ help
+ Base address of the LCD controller inside KIO region.
+ Different boards from XTFPGA family have LCD controller at different
+ addresses. Please consult prototyping user guide for your board for
+ the correct address. Wrong address here may lead to hardware lockup.
+
+config XTFPGA_LCD_8BIT_ACCESS
+ bool "Use 8-bit access to XTFPGA LCD"
+ depends on XTFPGA_LCD
+ default n
+ help
+ LCD may be connected with 4- or 8-bit interface, 8-bit access may
+ only be used with 8-bit interface. Please consult prototyping user
+ guide for your board for the correct interface width.
+
endmenu

menu "Executable file formats"
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index db5bb72e2f4e..62d84657c60b 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -715,7 +715,7 @@ __SYSCALL(323, sys_process_vm_writev, 6)
__SYSCALL(324, sys_name_to_handle_at, 5)
#define __NR_open_by_handle_at 325
__SYSCALL(325, sys_open_by_handle_at, 3)
-#define __NR_sync_file_range 326
+#define __NR_sync_file_range2 326
__SYSCALL(326, sys_sync_file_range2, 6)
#define __NR_perf_event_open 327
__SYSCALL(327, sys_perf_event_open, 5)
diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
index d05f8feeb8d7..17b1ef3232e4 100644
--- a/arch/xtensa/platforms/iss/network.c
+++ b/arch/xtensa/platforms/iss/network.c
@@ -349,8 +349,8 @@ static void iss_net_timer(unsigned long priv)
{
struct iss_net_private *lp = (struct iss_net_private *)priv;

- spin_lock(&lp->lock);
iss_net_poll();
+ spin_lock(&lp->lock);
mod_timer(&lp->timer, jiffies + lp->timer_val);
spin_unlock(&lp->lock);
}
@@ -361,7 +361,7 @@ static int iss_net_open(struct net_device *dev)
struct iss_net_private *lp = netdev_priv(dev);
int err;

- spin_lock(&lp->lock);
+ spin_lock_bh(&lp->lock);

err = lp->tp.open(lp);
if (err < 0)
@@ -376,9 +376,11 @@ static int iss_net_open(struct net_device *dev)
while ((err = iss_net_rx(dev)) > 0)
;

- spin_lock(&opened_lock);
+ spin_unlock_bh(&lp->lock);
+ spin_lock_bh(&opened_lock);
list_add(&lp->opened_list, &opened);
- spin_unlock(&opened_lock);
+ spin_unlock_bh(&opened_lock);
+ spin_lock_bh(&lp->lock);

init_timer(&lp->timer);
lp->timer_val = ISS_NET_TIMER_VALUE;
@@ -387,7 +389,7 @@ static int iss_net_open(struct net_device *dev)
mod_timer(&lp->timer, jiffies + lp->timer_val);

out:
- spin_unlock(&lp->lock);
+ spin_unlock_bh(&lp->lock);
return err;
}

@@ -395,7 +397,7 @@ static int iss_net_close(struct net_device *dev)
{
struct iss_net_private *lp = netdev_priv(dev);
netif_stop_queue(dev);
- spin_lock(&lp->lock);
+ spin_lock_bh(&lp->lock);

spin_lock(&opened_lock);
list_del(&opened);
@@ -405,18 +407,17 @@ static int iss_net_close(struct net_device *dev)

lp->tp.close(lp);

- spin_unlock(&lp->lock);
+ spin_unlock_bh(&lp->lock);
return 0;
}

static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct iss_net_private *lp = netdev_priv(dev);
- unsigned long flags;
int len;

netif_stop_queue(dev);
- spin_lock_irqsave(&lp->lock, flags);
+ spin_lock_bh(&lp->lock);

len = lp->tp.write(lp, &skb);

@@ -438,7 +439,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
}

- spin_unlock_irqrestore(&lp->lock, flags);
+ spin_unlock_bh(&lp->lock);

dev_kfree_skb(skb);
return NETDEV_TX_OK;
@@ -466,9 +467,9 @@ static int iss_net_set_mac(struct net_device *dev, void *addr)

if (!is_valid_ether_addr(hwaddr->sa_data))
return -EADDRNOTAVAIL;
- spin_lock(&lp->lock);
+ spin_lock_bh(&lp->lock);
memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
- spin_unlock(&lp->lock);
+ spin_unlock_bh(&lp->lock);
return 0;
}

@@ -520,11 +521,11 @@ static int iss_net_configure(int index, char *init)
*lp = (struct iss_net_private) {
.device_list = LIST_HEAD_INIT(lp->device_list),
.opened_list = LIST_HEAD_INIT(lp->opened_list),
- .lock = __SPIN_LOCK_UNLOCKED(lp.lock),
.dev = dev,
.index = index,
- };
+ };

+ spin_lock_init(&lp->lock);
/*
* If this name ends up conflicting with an existing registered
* netdevice, that is OK, register_netdev{,ice}() will notice this
diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
index b9ae206340cd..7839d38b2337 100644
--- a/arch/xtensa/platforms/xtfpga/Makefile
+++ b/arch/xtensa/platforms/xtfpga/Makefile
@@ -6,4 +6,5 @@
#
# Note 2! The CFLAGS definitions are in the main makefile...

-obj-y = setup.o lcd.o
+obj-y += setup.o
+obj-$(CONFIG_XTFPGA_LCD) += lcd.o
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index 6edd20bb4565..4e0af2662a21 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -40,9 +40,6 @@

/* UART */
#define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020)
-/* LCD instruction and data addresses. */
-#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000))
-#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004))

/* Misc. */
#define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000)
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
index 0e435645af5a..4c8541ed1139 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
@@ -11,10 +11,25 @@
#ifndef __XTENSA_XTAVNET_LCD_H
#define __XTENSA_XTAVNET_LCD_H

+#ifdef CONFIG_XTFPGA_LCD
/* Display string STR at position POS on the LCD. */
void lcd_disp_at_pos(char *str, unsigned char pos);

/* Shift the contents of the LCD display left or right. */
void lcd_shiftleft(void);
void lcd_shiftright(void);
+#else
+static inline void lcd_disp_at_pos(char *str, unsigned char pos)
+{
+}
+
+static inline void lcd_shiftleft(void)
+{
+}
+
+static inline void lcd_shiftright(void)
+{
+}
+#endif
+
#endif
diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
index 2872301598df..4dc0c1b43f4b 100644
--- a/arch/xtensa/platforms/xtfpga/lcd.c
+++ b/arch/xtensa/platforms/xtfpga/lcd.c
@@ -1,50 +1,63 @@
/*
- * Driver for the LCD display on the Tensilica LX60 Board.
+ * Driver for the LCD display on the Tensilica XTFPGA board family.
+ * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001, 2006 Tensilica Inc.
+ * Copyright (C) 2015 Cadence Design Systems Inc.
*/

-/*
- *
- * FIXME: this code is from the examples from the LX60 user guide.
- *
- * The lcd_pause function does busy waiting, which is probably not
- * great. Maybe the code could be changed to use kernel timers, or
- * change the hardware to not need to wait.
- */
-
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/io.h>

#include <platform/hardware.h>
#include <platform/lcd.h>
-#include <linux/delay.h>

-#define LCD_PAUSE_ITERATIONS 4000
+/* LCD instruction and data addresses. */
+#define LCD_INSTR_ADDR ((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR))
+#define LCD_DATA_ADDR (LCD_INSTR_ADDR + 4)
+
#define LCD_CLEAR 0x1
#define LCD_DISPLAY_ON 0xc

/* 8bit and 2 lines display */
#define LCD_DISPLAY_MODE8BIT 0x38
+#define LCD_DISPLAY_MODE4BIT 0x28
#define LCD_DISPLAY_POS 0x80
#define LCD_SHIFT_LEFT 0x18
#define LCD_SHIFT_RIGHT 0x1c

+static void lcd_put_byte(u8 *addr, u8 data)
+{
+#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
+ ACCESS_ONCE(*addr) = data;
+#else
+ ACCESS_ONCE(*addr) = data & 0xf0;
+ ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
+#endif
+}
+
static int __init lcd_init(void)
{
- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
mdelay(5);
- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
udelay(200);
- *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
+ udelay(50);
+#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
+ ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
+ udelay(50);
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
udelay(50);
- *LCD_INSTR_ADDR = LCD_DISPLAY_ON;
+#endif
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON);
udelay(50);
- *LCD_INSTR_ADDR = LCD_CLEAR;
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR);
mdelay(10);
lcd_disp_at_pos("XTENSA LINUX", 0);
return 0;
@@ -52,10 +65,10 @@ static int __init lcd_init(void)

void lcd_disp_at_pos(char *str, unsigned char pos)
{
- *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos);
udelay(100);
while (*str != 0) {
- *LCD_DATA_ADDR = *str;
+ lcd_put_byte(LCD_DATA_ADDR, *str);
udelay(200);
str++;
}
@@ -63,13 +76,13 @@ void lcd_disp_at_pos(char *str, unsigned char pos)

void lcd_shiftleft(void)
{
- *LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT);
udelay(50);
}

void lcd_shiftright(void)
{
- *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
+ lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT);
udelay(50);
}

diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 5ed064e8673c..ccf793247447 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -92,6 +92,7 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
ACPI_SET_BIT(gpe_register_info->enable_for_run,
(u8)register_bit);
}
+ gpe_register_info->enable_mask = gpe_register_info->enable_for_run;

return_ACPI_STATUS(AE_OK);
}
@@ -123,7 +124,7 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)

/* Enable the requested GPE */

- status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE_SAVE);
+ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
return_ACPI_STATUS(status);
}

@@ -202,7 +203,7 @@ acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
if (ACPI_SUCCESS(status)) {
status =
acpi_hw_low_set_gpe(gpe_event_info,
- ACPI_GPE_DISABLE_SAVE);
+ ACPI_GPE_DISABLE);
}

if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 84bc550f4f1d..af6514ed64c5 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -89,6 +89,8 @@ u32 acpi_hw_get_gpe_register_bit(struct acpi_gpe_event_info *gpe_event_info)
* RETURN: Status
*
* DESCRIPTION: Enable or disable a single GPE in the parent enable register.
+ * The enable_mask field of the involved GPE register must be
+ * updated by the caller if necessary.
*
******************************************************************************/

@@ -119,7 +121,7 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
/* Set or clear just the bit that corresponds to this GPE */

register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info);
- switch (action & ~ACPI_GPE_SAVE_MASK) {
+ switch (action) {
case ACPI_GPE_CONDITIONAL_ENABLE:

/* Only enable if the corresponding enable_mask bit is set */
@@ -149,9 +151,6 @@ acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action)
/* Write the updated enable mask */

status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
- if (ACPI_SUCCESS(status) && (action & ACPI_GPE_SAVE_MASK)) {
- gpe_register_info->enable_mask = (u8)enable_mask;
- }
return (status);
}

@@ -286,10 +285,8 @@ acpi_hw_gpe_enable_write(u8 enable_mask,
{
acpi_status status;

+ gpe_register_info->enable_mask = enable_mask;
status = acpi_hw_write(enable_mask, &gpe_register_info->enable_address);
- if (ACPI_SUCCESS(status)) {
- gpe_register_info->enable_mask = enable_mask;
- }
return (status);
}

diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 9bad45e63a45..7fbc2b9dcbbb 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -346,7 +346,6 @@ acpi_tb_install_standard_table(acpi_physical_address address,
*/
acpi_tb_uninstall_table(&new_table_desc);
*table_index = i;
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(AE_OK);
}
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index bbca7830e18a..349f4fdd0b25 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -298,7 +298,11 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
struct acpi_device_physical_node *pn;
bool offline = true;

- mutex_lock(&adev->physical_node_lock);
+ /*
+ * acpi_container_offline() calls this for all of the container's
+ * children under the container's physical_node_lock lock.
+ */
+ mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING);

list_for_each_entry(pn, &adev->physical_node_list, node)
if (device_supports_offline(pn->dev) && !pn->dev->offline) {
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 876bae5ade33..79bc203f51ef 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -515,11 +515,11 @@ int bus_add_device(struct device *dev)
goto out_put;
error = device_add_groups(dev, bus->dev_groups);
if (error)
- goto out_groups;
+ goto out_id;
error = sysfs_create_link(&bus->p->devices_kset->kobj,
&dev->kobj, dev_name(dev));
if (error)
- goto out_id;
+ goto out_groups;
error = sysfs_create_link(&dev->kobj,
&dev->bus->p->subsys.kobj, "subsystem");
if (error)
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 6e64563361f0..9c2ba1c97c42 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -62,15 +62,21 @@ static int cache_setup_of_node(unsigned int cpu)
return -ENOENT;
}

- while (np && index < cache_leaves(cpu)) {
+ while (index < cache_leaves(cpu)) {
this_leaf = this_cpu_ci->info_list + index;
if (this_leaf->level != 1)
np = of_find_next_cache_node(np);
else
np = of_node_get(np);/* cpu node itself */
+ if (!np)
+ break;
this_leaf->of_node = np;
index++;
}
+
+ if (index != cache_leaves(cpu)) /* not all OF nodes populated */
+ return -ENOENT;
+
return 0;
}

@@ -189,8 +195,11 @@ static int detect_cache_attributes(unsigned int cpu)
* will be set up here only if they are not populated already
*/
ret = cache_shared_cpu_map_setup(cpu);
- if (ret)
+ if (ret) {
+ pr_warn("Unable to detect cache hierarcy from DT for CPU %d\n",
+ cpu);
goto free_ci;
+ }
return 0;

free_ci:
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9421fed40905..e68ab79df28b 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -101,6 +101,15 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
}

r = platform_get_resource(dev, IORESOURCE_IRQ, num);
+ /*
+ * The resources may pass trigger flags to the irqs that need
+ * to be set up. It so happens that the trigger flags for
+ * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
+ * settings.
+ */
+ if (r && r->flags & IORESOURCE_BITS)
+ irqd_set_trigger_type(irq_get_irq_data(r->start),
+ r->flags & IORESOURCE_BITS);

return r ? r->start : -ENXIO;
#endif
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index de4c8499cbac..288547a3c566 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -65,6 +65,7 @@ static const struct usb_device_id ath3k_table[] = {
/* Atheros AR3011 with sflash firmware*/
{ USB_DEVICE(0x0489, 0xE027) },
{ USB_DEVICE(0x0489, 0xE03D) },
+ { USB_DEVICE(0x04F2, 0xAFF1) },
{ USB_DEVICE(0x0930, 0x0215) },
{ USB_DEVICE(0x0CF3, 0x3002) },
{ USB_DEVICE(0x0CF3, 0xE019) },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 8bfc4c2bba87..2c527da668ae 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -159,6 +159,7 @@ static const struct usb_device_id blacklist_table[] = {
/* Atheros 3011 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
+ { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
{ USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index e096e9cddb40..283f00a7f036 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -170,6 +170,41 @@ static void tpm_dev_del_device(struct tpm_chip *chip)
device_unregister(&chip->dev);
}

+static int tpm1_chip_register(struct tpm_chip *chip)
+{
+ int rc;
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return 0;
+
+ rc = tpm_sysfs_add_device(chip);
+ if (rc)
+ return rc;
+
+ rc = tpm_add_ppi(chip);
+ if (rc) {
+ tpm_sysfs_del_device(chip);
+ return rc;
+ }
+
+ chip->bios_dir = tpm_bios_log_setup(chip->devname);
+
+ return 0;
+}
+
+static void tpm1_chip_unregister(struct tpm_chip *chip)
+{
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return;
+
+ if (chip->bios_dir)
+ tpm_bios_log_teardown(chip->bios_dir);
+
+ tpm_remove_ppi(chip);
+
+ tpm_sysfs_del_device(chip);
+}
+
/*
* tpm_chip_register() - create a character device for the TPM chip
* @chip: TPM chip to use.
@@ -185,22 +220,13 @@ int tpm_chip_register(struct tpm_chip *chip)
{
int rc;

- /* Populate sysfs for TPM1 devices. */
- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
- rc = tpm_sysfs_add_device(chip);
- if (rc)
- goto del_misc;
-
- rc = tpm_add_ppi(chip);
- if (rc)
- goto del_sysfs;
-
- chip->bios_dir = tpm_bios_log_setup(chip->devname);
- }
+ rc = tpm1_chip_register(chip);
+ if (rc)
+ return rc;

rc = tpm_dev_add_device(chip);
if (rc)
- return rc;
+ goto out_err;

/* Make the chip available. */
spin_lock(&driver_lock);
@@ -210,10 +236,8 @@ int tpm_chip_register(struct tpm_chip *chip)
chip->flags |= TPM_CHIP_FLAG_REGISTERED;

return 0;
-del_sysfs:
- tpm_sysfs_del_device(chip);
-del_misc:
- tpm_dev_del_device(chip);
+out_err:
+ tpm1_chip_unregister(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_chip_register);
@@ -238,13 +262,7 @@ void tpm_chip_unregister(struct tpm_chip *chip)
spin_unlock(&driver_lock);
synchronize_rcu();

- if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
- if (chip->bios_dir)
- tpm_bios_log_teardown(chip->bios_dir);
- tpm_remove_ppi(chip);
- tpm_sysfs_del_device(chip);
- }
-
+ tpm1_chip_unregister(chip);
tpm_dev_del_device(chip);
}
EXPORT_SYMBOL_GPL(tpm_chip_unregister);
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index a23ac0c724f0..0b7c3e8840ba 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -56,22 +56,55 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
}

-static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ unsigned long *best_parent_rate,
+ struct clk_hw **best_parent_hw)
{
- unsigned long div;
+ struct clk *parent = NULL;
+ long best_rate = -EINVAL;
+ unsigned long tmp_rate;
+ int best_diff = -1;
+ int tmp_diff;
+ int i;

- if (!rate)
- return -EINVAL;
+ for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
+ int div;

- if (rate >= *parent_rate)
- return *parent_rate;
+ parent = clk_get_parent_by_index(hw->clk, i);
+ if (!parent)
+ continue;
+
+ for (div = 1; div < SAM9X5_USB_MAX_DIV + 2; div++) {
+ unsigned long tmp_parent_rate;
+
+ tmp_parent_rate = rate * div;
+ tmp_parent_rate = __clk_round_rate(parent,
+ tmp_parent_rate);
+ tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div);
+ if (tmp_rate < rate)
+ tmp_diff = rate - tmp_rate;
+ else
+ tmp_diff = tmp_rate - rate;
+
+ if (best_diff < 0 || best_diff > tmp_diff) {
+ best_rate = tmp_rate;
+ best_diff = tmp_diff;
+ *best_parent_rate = tmp_parent_rate;
+ *best_parent_hw = __clk_get_hw(parent);
+ }
+
+ if (!best_diff || tmp_rate < rate)
+ break;
+ }

- div = DIV_ROUND_CLOSEST(*parent_rate, rate);
- if (div > SAM9X5_USB_MAX_DIV + 1)
- div = SAM9X5_USB_MAX_DIV + 1;
+ if (!best_diff)
+ break;
+ }

- return DIV_ROUND_CLOSEST(*parent_rate, div);
+ return best_rate;
}

static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
@@ -121,7 +154,7 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,

static const struct clk_ops at91sam9x5_usb_ops = {
.recalc_rate = at91sam9x5_clk_usb_recalc_rate,
- .round_rate = at91sam9x5_clk_usb_round_rate,
+ .determine_rate = at91sam9x5_clk_usb_determine_rate,
.get_parent = at91sam9x5_clk_usb_get_parent,
.set_parent = at91sam9x5_clk_usb_set_parent,
.set_rate = at91sam9x5_clk_usb_set_rate,
@@ -159,7 +192,7 @@ static const struct clk_ops at91sam9n12_usb_ops = {
.disable = at91sam9n12_clk_usb_disable,
.is_enabled = at91sam9n12_clk_usb_is_enabled,
.recalc_rate = at91sam9x5_clk_usb_recalc_rate,
- .round_rate = at91sam9x5_clk_usb_round_rate,
+ .determine_rate = at91sam9x5_clk_usb_determine_rate,
.set_rate = at91sam9x5_clk_usb_set_rate,
};

@@ -179,7 +212,8 @@ at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
init.ops = &at91sam9x5_usb_ops;
init.parent_names = parent_names;
init.num_parents = num_parents;
- init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT;

usb->hw.init = &init;
usb->pmc = pmc;
@@ -207,7 +241,7 @@ at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
init.ops = &at91sam9n12_usb_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
- init.flags = CLK_SET_RATE_GATE;
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;

usb->hw.init = &init;
usb->pmc = pmc;
diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c
index 0039bd7d3965..466f30ca65c2 100644
--- a/drivers/clk/qcom/clk-rcg.c
+++ b/drivers/clk/qcom/clk-rcg.c
@@ -495,6 +495,57 @@ static int clk_rcg_bypass_set_rate(struct clk_hw *hw, unsigned long rate,
return __clk_rcg_set_rate(rcg, rcg->freq_tbl);
}

+/*
+ * This type of clock has a glitch-free mux that switches between the output of
+ * the M/N counter and an always on clock source (XO). When clk_set_rate() is
+ * called we need to make sure that we don't switch to the M/N counter if it
+ * isn't clocking because the mux will get stuck and the clock will stop
+ * outputting a clock. This can happen if the framework isn't aware that this
+ * clock is on and so clk_set_rate() doesn't turn on the new parent. To fix
+ * this we switch the mux in the enable/disable ops and reprogram the M/N
+ * counter in the set_rate op. We also make sure to switch away from the M/N
+ * counter in set_rate if software thinks the clock is off.
+ */
+static int clk_rcg_lcc_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ const struct freq_tbl *f;
+ int ret;
+ u32 gfm = BIT(10);
+
+ f = qcom_find_freq(rcg->freq_tbl, rate);
+ if (!f)
+ return -EINVAL;
+
+ /* Switch to XO to avoid glitches */
+ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0);
+ ret = __clk_rcg_set_rate(rcg, f);
+ /* Switch back to M/N if it's clocking */
+ if (__clk_is_enabled(hw->clk))
+ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm);
+
+ return ret;
+}
+
+static int clk_rcg_lcc_enable(struct clk_hw *hw)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ u32 gfm = BIT(10);
+
+ /* Use M/N */
+ return regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, gfm);
+}
+
+static void clk_rcg_lcc_disable(struct clk_hw *hw)
+{
+ struct clk_rcg *rcg = to_clk_rcg(hw);
+ u32 gfm = BIT(10);
+
+ /* Use XO */
+ regmap_update_bits(rcg->clkr.regmap, rcg->ns_reg, gfm, 0);
+}
+
static int __clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate)
{
struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
@@ -543,6 +594,17 @@ const struct clk_ops clk_rcg_bypass_ops = {
};
EXPORT_SYMBOL_GPL(clk_rcg_bypass_ops);

+const struct clk_ops clk_rcg_lcc_ops = {
+ .enable = clk_rcg_lcc_enable,
+ .disable = clk_rcg_lcc_disable,
+ .get_parent = clk_rcg_get_parent,
+ .set_parent = clk_rcg_set_parent,
+ .recalc_rate = clk_rcg_recalc_rate,
+ .determine_rate = clk_rcg_determine_rate,
+ .set_rate = clk_rcg_lcc_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_rcg_lcc_ops);
+
const struct clk_ops clk_dyn_rcg_ops = {
.enable = clk_enable_regmap,
.is_enabled = clk_is_enabled_regmap,
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index 687e41f91d7c..d09d06ba278e 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -96,6 +96,7 @@ struct clk_rcg {

extern const struct clk_ops clk_rcg_ops;
extern const struct clk_ops clk_rcg_bypass_ops;
+extern const struct clk_ops clk_rcg_lcc_ops;

#define to_clk_rcg(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg, clkr)

diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index 742acfa18d63..381f27469a9c 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -243,7 +243,7 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK;
cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
cfg |= rcg->parent_map[f->src] << CFG_SRC_SEL_SHIFT;
- if (rcg->mnd_width && f->n)
+ if (rcg->mnd_width && f->n && (f->m != f->n))
cfg |= CFG_MODE_DUAL_EDGE;
ret = regmap_update_bits(rcg->clkr.regmap,
rcg->cmd_rcgr + CFG_REG, mask, cfg);
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index cbdc31dea7f4..a015bb06c09b 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -525,8 +525,8 @@ static struct freq_tbl clk_tbl_gsbi_qup[] = {
{ 10800000, P_PXO, 1, 2, 5 },
{ 15060000, P_PLL8, 1, 2, 51 },
{ 24000000, P_PLL8, 4, 1, 4 },
+ { 25000000, P_PXO, 1, 0, 0 },
{ 25600000, P_PLL8, 1, 1, 15 },
- { 27000000, P_PXO, 1, 0, 0 },
{ 48000000, P_PLL8, 4, 1, 2 },
{ 51200000, P_PLL8, 1, 2, 15 },
{ }
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index c9ff27b4648b..a6d3a6745c4d 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -294,14 +294,14 @@ static struct clk_regmap_mux pcm_clk = {
};

static struct freq_tbl clk_tbl_aif_osr[] = {
- { 22050, P_PLL4, 1, 147, 20480 },
- { 32000, P_PLL4, 1, 1, 96 },
- { 44100, P_PLL4, 1, 147, 10240 },
- { 48000, P_PLL4, 1, 1, 64 },
- { 88200, P_PLL4, 1, 147, 5120 },
- { 96000, P_PLL4, 1, 1, 32 },
- { 176400, P_PLL4, 1, 147, 2560 },
- { 192000, P_PLL4, 1, 1, 16 },
+ { 2822400, P_PLL4, 1, 147, 20480 },
+ { 4096000, P_PLL4, 1, 1, 96 },
+ { 5644800, P_PLL4, 1, 147, 10240 },
+ { 6144000, P_PLL4, 1, 1, 64 },
+ { 11289600, P_PLL4, 1, 147, 5120 },
+ { 12288000, P_PLL4, 1, 1, 32 },
+ { 22579200, P_PLL4, 1, 147, 2560 },
+ { 24576000, P_PLL4, 1, 1, 16 },
{ },
};

@@ -360,7 +360,7 @@ static struct clk_branch spdif_clk = {
};

static struct freq_tbl clk_tbl_ahbix[] = {
- { 131072, P_PLL4, 1, 1, 3 },
+ { 131072000, P_PLL4, 1, 1, 3 },
{ },
};

@@ -386,13 +386,12 @@ static struct clk_rcg ahbix_clk = {
.freq_tbl = clk_tbl_ahbix,
.clkr = {
.enable_reg = 0x38,
- .enable_mask = BIT(10), /* toggle the gfmux to select mn/pxo */
+ .enable_mask = BIT(11),
.hw.init = &(struct clk_init_data){
.name = "ahbix",
.parent_names = lcc_pxo_pll4,
.num_parents = 2,
- .ops = &clk_rcg_ops,
- .flags = CLK_SET_RATE_GATE,
+ .ops = &clk_rcg_lcc_ops,
},
},
};
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 51462e85675f..714d6ba782c8 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -1354,7 +1354,7 @@ static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
VPLL_LOCK, VPLL_CON0, NULL),
};

-static void __init exynos4_core_down_clock(enum exynos4_soc soc)
+static void __init exynos4x12_core_down_clock(void)
{
unsigned int tmp;

@@ -1373,11 +1373,9 @@ static void __init exynos4_core_down_clock(enum exynos4_soc soc)
__raw_writel(tmp, reg_base + PWR_CTRL1);

/*
- * Disable the clock up feature on Exynos4x12, in case it was
- * enabled by bootloader.
+ * Disable the clock up feature in case it was enabled by bootloader.
*/
- if (exynos4_soc == EXYNOS4X12)
- __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
+ __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
}

/* register exynos4 clocks */
@@ -1474,7 +1472,8 @@ static void __init exynos4_clk_init(struct device_node *np,
samsung_clk_register_alias(ctx, exynos4_aliases,
ARRAY_SIZE(exynos4_aliases));

- exynos4_core_down_clock(soc);
+ if (soc == EXYNOS4X12)
+ exynos4x12_core_down_clock();
exynos4_clk_sleep_init();

samsung_clk_of_add_provider(np, ctx);
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index 9a893f2fe8e9..23ce0afefca5 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -1110,16 +1110,18 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
1, 2);
clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk;

- clk = clk_register_gate(NULL, "plld_dsi", "plld_out0", 0,
+ clk = clk_register_gate(NULL, "pll_d_dsi_out", "pll_d_out0", 0,
clk_base + PLLD_MISC, 30, 0, &pll_d_lock);
- clks[TEGRA124_CLK_PLLD_DSI] = clk;
+ clks[TEGRA124_CLK_PLL_D_DSI_OUT] = clk;

- clk = tegra_clk_register_periph_gate("dsia", "plld_dsi", 0, clk_base,
- 0, 48, periph_clk_enb_refcnt);
+ clk = tegra_clk_register_periph_gate("dsia", "pll_d_dsi_out", 0,
+ clk_base, 0, 48,
+ periph_clk_enb_refcnt);
clks[TEGRA124_CLK_DSIA] = clk;

- clk = tegra_clk_register_periph_gate("dsib", "plld_dsi", 0, clk_base,
- 0, 82, periph_clk_enb_refcnt);
+ clk = tegra_clk_register_periph_gate("dsib", "pll_d_dsi_out", 0,
+ clk_base, 0, 82,
+ periph_clk_enb_refcnt);
clks[TEGRA124_CLK_DSIB] = clk;

/* emc mux */
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index 9ddb7547cb43..7a1df61847fc 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -272,7 +272,7 @@ void __init tegra_add_of_provider(struct device_node *np)
of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);

rst_ctlr.of_node = np;
- rst_ctlr.nr_resets = clk_num * 32;
+ rst_ctlr.nr_resets = periph_banks * 32;
reset_controller_register(&rst_ctlr);
}

diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index 42f95a4326b0..9a28b7e07c71 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -554,15 +554,23 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
return err;
}

-static int omap_aes_check_aligned(struct scatterlist *sg)
+static int omap_aes_check_aligned(struct scatterlist *sg, int total)
{
+ int len = 0;
+
while (sg) {
if (!IS_ALIGNED(sg->offset, 4))
return -1;
if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
return -1;
+
+ len += sg->length;
sg = sg_next(sg);
}
+
+ if (len != total)
+ return -1;
+
return 0;
}

@@ -633,8 +641,8 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
dd->in_sg = req->src;
dd->out_sg = req->dst;

- if (omap_aes_check_aligned(dd->in_sg) ||
- omap_aes_check_aligned(dd->out_sg)) {
+ if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
+ omap_aes_check_aligned(dd->out_sg, dd->total)) {
if (omap_aes_copy_sgs(dd))
pr_err("Failed to copy SGs for unaligned cases\n");
dd->sgs_copied = 1;
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index d0bc123c7975..1a54205860f5 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -320,11 +320,13 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
u32 mask = 1 << (d->irq - gc->irq_base);

irq_gc_lock(gc);
- gc->mask_cache &= ~mask;
- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
+ ct->mask_cache_priv &= ~mask;
+
+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
irq_gc_unlock(gc);
}

@@ -332,11 +334,13 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+
u32 mask = 1 << (d->irq - gc->irq_base);

irq_gc_lock(gc);
- gc->mask_cache |= mask;
- writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
+ ct->mask_cache_priv |= mask;
+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
irq_gc_unlock(gc);
}

@@ -344,11 +348,13 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+
u32 mask = 1 << (d->irq - gc->irq_base);

irq_gc_lock(gc);
- gc->mask_cache &= ~mask;
- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
+ ct->mask_cache_priv &= ~mask;
+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
irq_gc_unlock(gc);
}

@@ -356,11 +362,13 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct mvebu_gpio_chip *mvchip = gc->private;
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+
u32 mask = 1 << (d->irq - gc->irq_base);

irq_gc_lock(gc);
- gc->mask_cache |= mask;
- writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
+ ct->mask_cache_priv |= mask;
+ writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
irq_gc_unlock(gc);
}

diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index bf17a60b40ed..1dbfba58f909 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -32,10 +32,16 @@
#include <drm/bridge/ptn3460.h>

#include "exynos_dp_core.h"
+#include "exynos_drm_fimd.h"

#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
connector)

+static inline struct exynos_drm_crtc *dp_to_crtc(struct exynos_dp_device *dp)
+{
+ return to_exynos_crtc(dp->encoder->crtc);
+}
+
static inline struct exynos_dp_device *
display_to_dp(struct exynos_drm_display *d)
{
@@ -1070,6 +1076,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
}
}

+ fimd_dp_clock_enable(dp_to_crtc(dp), true);
+
clk_prepare_enable(dp->clock);
exynos_dp_phy_init(dp);
exynos_dp_init_dp(dp);
@@ -1094,6 +1102,8 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
exynos_dp_phy_exit(dp);
clk_disable_unprepare(dp->clock);

+ fimd_dp_clock_enable(dp_to_crtc(dp), false);
+
if (dp->panel) {
if (drm_panel_unprepare(dp->panel))
DRM_ERROR("failed to turnoff the panel\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 33a10ce967ea..5d58f6cc0397 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -32,6 +32,7 @@
#include "exynos_drm_fbdev.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_iommu.h"
+#include "exynos_drm_fimd.h"

/*
* FIMD stands for Fully Interactive Mobile Display and
@@ -1233,6 +1234,24 @@ static int fimd_remove(struct platform_device *pdev)
return 0;
}

+void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
+{
+ struct fimd_context *ctx = crtc->ctx;
+ u32 val;
+
+ /*
+ * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
+ * clock. On these SoCs the bootloader may enable it but any
+ * power domain off/on will reset it to disable state.
+ */
+ if (ctx->driver_data != &exynos5_fimd_driver_data)
+ return;
+
+ val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
+ writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
+}
+EXPORT_SYMBOL_GPL(fimd_dp_clock_enable);
+
struct platform_driver fimd_driver = {
.probe = fimd_probe,
.remove = fimd_remove,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.h b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
new file mode 100644
index 000000000000..b4fcaa568456
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
@@ -0,0 +1,15 @@
+/*
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef _EXYNOS_DRM_FIMD_H_
+#define _EXYNOS_DRM_FIMD_H_
+
+extern void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable);
+
+#endif /* _EXYNOS_DRM_FIMD_H_ */
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index fa140e04d5fa..60ab1f75d58e 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -33,6 +33,7 @@ struct adv7511 {

unsigned int current_edid_segment;
uint8_t edid_buf[256];
+ bool edid_read;

wait_queue_head_t wq;
struct drm_encoder *encoder;
@@ -379,69 +380,71 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
return false;
}

-static irqreturn_t adv7511_irq_handler(int irq, void *devid)
-{
- struct adv7511 *adv7511 = devid;
-
- if (adv7511_hpd(adv7511))
- drm_helper_hpd_irq_event(adv7511->encoder->dev);
-
- wake_up_all(&adv7511->wq);
-
- return IRQ_HANDLED;
-}
-
-static unsigned int adv7511_is_interrupt_pending(struct adv7511 *adv7511,
- unsigned int irq)
+static int adv7511_irq_process(struct adv7511 *adv7511)
{
unsigned int irq0, irq1;
- unsigned int pending;
int ret;

ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
if (ret < 0)
- return 0;
+ return ret;
+
ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
if (ret < 0)
- return 0;
+ return ret;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
+
+ if (irq0 & ADV7511_INT0_HDP)
+ drm_helper_hpd_irq_event(adv7511->encoder->dev);
+
+ if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
+ adv7511->edid_read = true;
+
+ if (adv7511->i2c_main->irq)
+ wake_up_all(&adv7511->wq);
+ }
+
+ return 0;
+}

- pending = (irq1 << 8) | irq0;
+static irqreturn_t adv7511_irq_handler(int irq, void *devid)
+{
+ struct adv7511 *adv7511 = devid;
+ int ret;

- return pending & irq;
+ ret = adv7511_irq_process(adv7511);
+ return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
}

-static int adv7511_wait_for_interrupt(struct adv7511 *adv7511, int irq,
- int timeout)
+/* -----------------------------------------------------------------------------
+ * EDID retrieval
+ */
+
+static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
{
- unsigned int pending;
int ret;

if (adv7511->i2c_main->irq) {
ret = wait_event_interruptible_timeout(adv7511->wq,
- adv7511_is_interrupt_pending(adv7511, irq),
- msecs_to_jiffies(timeout));
- if (ret <= 0)
- return 0;
- pending = adv7511_is_interrupt_pending(adv7511, irq);
+ adv7511->edid_read, msecs_to_jiffies(timeout));
} else {
- if (timeout < 25)
- timeout = 25;
- do {
- pending = adv7511_is_interrupt_pending(adv7511, irq);
- if (pending)
+ for (; timeout > 0; timeout -= 25) {
+ ret = adv7511_irq_process(adv7511);
+ if (ret < 0)
break;
+
+ if (adv7511->edid_read)
+ break;
+
msleep(25);
- timeout -= 25;
- } while (timeout >= 25);
+ }
}

- return pending;
+ return adv7511->edid_read ? 0 : -EIO;
}

-/* -----------------------------------------------------------------------------
- * EDID retrieval
- */
-
static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
size_t len)
{
@@ -463,19 +466,14 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
return ret;

if (status != 2) {
+ adv7511->edid_read = false;
regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
block);
- ret = adv7511_wait_for_interrupt(adv7511,
- ADV7511_INT0_EDID_READY |
- ADV7511_INT1_DDC_ERROR, 200);
-
- if (!(ret & ADV7511_INT0_EDID_READY))
- return -EIO;
+ ret = adv7511_wait_for_edid(adv7511, 200);
+ if (ret < 0)
+ return ret;
}

- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
-
/* Break this apart, hopefully more I2C controllers will
* support 64 byte transfers than 256 byte transfers
*/
@@ -528,7 +526,9 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
/* Reading the EDID only works if the device is powered */
if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) {
regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
+ ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
adv7511->current_edid_segment = -1;
@@ -563,7 +563,9 @@ static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
adv7511->current_edid_segment = -1;

regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
+ ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
/*
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 5c66b568bb81..ec4d932f8be4 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1042,7 +1042,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);

s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
- s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
+ s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);

s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
s->ecochk = I915_READ(GAM_ECOCHK);
@@ -1124,7 +1124,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);

I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
- I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
+ I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);

I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
I915_WRITE(GAM_ECOCHK, s->ecochk);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index ede5bbbd8a08..07320cb32611 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3718,14 +3718,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
I915_WRITE16(IMR, dev_priv->irq_mask);

I915_WRITE16(IER,
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
I915_USER_INTERRUPT);
POSTING_READ16(IER);

@@ -3887,14 +3885,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
- I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
+ I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);

enable_mask =
I915_ASLE_INTERRUPT |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
- I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;

if (I915_HAS_HOTPLUG(dev)) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 33b3d0a24071..f536ff2628fd 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1740,6 +1740,7 @@ enum punit_power_well {
#define GMBUS_CYCLE_INDEX (2<<25)
#define GMBUS_CYCLE_STOP (4<<25)
#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_BYTE_COUNT_MAX 256U
#define GMBUS_SLAVE_INDEX_SHIFT 8
#define GMBUS_SLAVE_ADDR_SHIFT 1
#define GMBUS_SLAVE_READ (1<<0)
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index b31088a551f2..56e437e31580 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -270,18 +270,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
}

static int
-gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
- u32 gmbus1_index)
+gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
+ unsigned short addr, u8 *buf, unsigned int len,
+ u32 gmbus1_index)
{
int reg_offset = dev_priv->gpio_mmio_base;
- u16 len = msg->len;
- u8 *buf = msg->buf;

I915_WRITE(GMBUS1 + reg_offset,
gmbus1_index |
GMBUS_CYCLE_WAIT |
(len << GMBUS_BYTE_COUNT_SHIFT) |
- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
int ret;
@@ -303,11 +302,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
}

static int
-gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
+ u32 gmbus1_index)
{
- int reg_offset = dev_priv->gpio_mmio_base;
- u16 len = msg->len;
u8 *buf = msg->buf;
+ unsigned int rx_size = msg->len;
+ unsigned int len;
+ int ret;
+
+ do {
+ len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
+
+ ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
+ buf, len, gmbus1_index);
+ if (ret)
+ return ret;
+
+ rx_size -= len;
+ buf += len;
+ } while (rx_size != 0);
+
+ return 0;
+}
+
+static int
+gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
+ unsigned short addr, u8 *buf, unsigned int len)
+{
+ int reg_offset = dev_priv->gpio_mmio_base;
+ unsigned int chunk_size = len;
u32 val, loop;

val = loop = 0;
@@ -319,8 +342,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
I915_WRITE(GMBUS3 + reg_offset, val);
I915_WRITE(GMBUS1 + reg_offset,
GMBUS_CYCLE_WAIT |
- (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
- (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
+ (addr << GMBUS_SLAVE_ADDR_SHIFT) |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
@@ -337,6 +360,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
if (ret)
return ret;
}
+
+ return 0;
+}
+
+static int
+gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
+{
+ u8 *buf = msg->buf;
+ unsigned int tx_size = msg->len;
+ unsigned int len;
+ int ret;
+
+ do {
+ len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
+
+ ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
+ if (ret)
+ return ret;
+
+ buf += len;
+ tx_size -= len;
+ } while (tx_size != 0);
+
return 0;
}

diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 86807ee91bd1..9bd56116fd5a 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -330,8 +330,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
misc |= ATOM_COMPOSITESYNC;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
misc |= ATOM_INTERLACE;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
misc |= ATOM_DOUBLE_CLOCK_MODE;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;

args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
@@ -374,8 +376,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
misc |= ATOM_COMPOSITESYNC;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
misc |= ATOM_INTERLACE;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
misc |= ATOM_DOUBLE_CLOCK_MODE;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;

args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
args.ucCRTC = radeon_crtc->crtc_id;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 9c4786759f16..7fe5590b328b 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -459,6 +459,10 @@
#define USB_DEVICE_ID_UGCI_FLYING 0x0020
#define USB_DEVICE_ID_UGCI_FIGHTING 0x0030

+#define USB_VENDOR_ID_HP 0x03f0
+#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE 0x0a4a
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
+
#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e

diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index a82127753461..4e3ae9fbb9b5 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -78,6 +78,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+ { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 2978f5ee8d2a..00bc30e0db7f 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -135,7 +135,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
GFP_KERNEL);
if (!open_info) {
err = -ENOMEM;
- goto error0;
+ goto error_gpadl;
}

init_completion(&open_info->waitevent);
@@ -151,7 +151,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,

if (userdatalen > MAX_USER_DEFINED_BYTES) {
err = -EINVAL;
- goto error0;
+ goto error_gpadl;
}

if (userdatalen)
@@ -195,6 +195,9 @@ error1:
list_del(&open_info->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);

+error_gpadl:
+ vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
+
error0:
free_pages((unsigned long)out,
get_order(send_ringbuffer_size + recv_ringbuffer_size));
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 5f96b1b3e3a5..019d5426fe52 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -833,7 +833,7 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
clk_disable(i2c->clk);
spin_unlock_irqrestore(&i2c->lock, flags);

- return ret;
+ return ret < 0 ? ret : num;
}

static u32 rk3x_i2c_func(struct i2c_adapter *adap)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index edf274cabe81..8143162b374d 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -596,6 +596,7 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
adap->bus_recovery_info->set_scl(adap, 1);
return i2c_generic_recovery(adap);
}
+EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);

int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
{
@@ -610,6 +611,7 @@ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)

return ret;
}
+EXPORT_SYMBOL_GPL(i2c_generic_gpio_recovery);

int i2c_recover_bus(struct i2c_adapter *adap)
{
@@ -619,6 +621,7 @@ int i2c_recover_bus(struct i2c_adapter *adap)
dev_dbg(&adap->dev, "Trying i2c bus recovery\n");
return adap->bus_recovery_info->recover_bus(adap);
}
+EXPORT_SYMBOL_GPL(i2c_recover_bus);

static int i2c_device_probe(struct device *dev)
{
@@ -1410,6 +1413,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)

dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name);

+ pm_runtime_no_callbacks(&adap->dev);
+
#ifdef CONFIG_I2C_COMPAT
res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev,
adap->dev.parent);
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 593f7ca9adc7..06cc1ff088f1 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -32,8 +32,9 @@ struct i2c_mux_priv {
struct i2c_algorithm algo;

struct i2c_adapter *parent;
- void *mux_priv; /* the mux chip/device */
- u32 chan_id; /* the channel id */
+ struct device *mux_dev;
+ void *mux_priv;
+ u32 chan_id;

int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
@@ -119,6 +120,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,

/* Set up private adapter data */
priv->parent = parent;
+ priv->mux_dev = mux_dev;
priv->mux_priv = mux_priv;
priv->chan_id = chan_id;
priv->select = select;
@@ -203,7 +205,7 @@ void i2c_del_mux_adapter(struct i2c_adapter *adap)
char symlink_name[20];

snprintf(symlink_name, sizeof(symlink_name), "channel-%u", priv->chan_id);
- sysfs_remove_link(&adap->dev.parent->kobj, symlink_name);
+ sysfs_remove_link(&priv->mux_dev->kobj, symlink_name);

sysfs_remove_link(&priv->adap.dev.kobj, "mux_device");
i2c_del_adapter(adap);
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index b0e58522780d..44d1d7920202 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -218,18 +218,10 @@ static struct cpuidle_state byt_cstates[] = {
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
{
- .name = "C1E-BYT",
- .desc = "MWAIT 0x01",
- .flags = MWAIT2flg(0x01),
- .exit_latency = 15,
- .target_residency = 30,
- .enter = &intel_idle,
- .enter_freeze = intel_idle_freeze, },
- {
.name = "C6N-BYT",
.desc = "MWAIT 0x58",
.flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 40,
+ .exit_latency = 300,
.target_residency = 275,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
@@ -237,7 +229,7 @@ static struct cpuidle_state byt_cstates[] = {
.name = "C6S-BYT",
.desc = "MWAIT 0x52",
.flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 140,
+ .exit_latency = 500,
.target_residency = 560,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
@@ -246,7 +238,7 @@ static struct cpuidle_state byt_cstates[] = {
.desc = "MWAIT 0x60",
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
.exit_latency = 1200,
- .target_residency = 1500,
+ .target_residency = 4000,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
{
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 8c014b5dab4c..38acb3cfc545 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -99,12 +99,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
if (dmasync)
dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);

+ if (!size)
+ return ERR_PTR(-EINVAL);
+
/*
* If the combination of the addr and size requested for this memory
* region causes an integer overflow, return error.
*/
- if ((PAGE_ALIGN(addr + size) <= size) ||
- (PAGE_ALIGN(addr + size) <= addr))
+ if (((addr + size) < addr) ||
+ PAGE_ALIGN(addr + size) < (addr + size))
return ERR_PTR(-EINVAL);

if (!can_do_mlock())
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index ed2bd6701f9b..fbde33a5228c 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -2605,8 +2605,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,

memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);

- *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
- wr->wr.ud.hlen);
+ *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
*lso_seg_len = halign;
return 0;
}
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 20e859a6f1a6..76eb57b31a59 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -409,8 +409,8 @@ int iser_send_command(struct iscsi_conn *conn,
if (scsi_prot_sg_count(sc)) {
prot_buf->buf = scsi_prot_sglist(sc);
prot_buf->size = scsi_prot_sg_count(sc);
- prot_buf->data_len = data_buf->data_len >>
- ilog2(sc->device->sector_size) * 8;
+ prot_buf->data_len = (data_buf->data_len >>
+ ilog2(sc->device->sector_size)) * 8;
}

if (hdr->flags & ISCSI_FLAG_CMD_READ) {
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 075b19cc78e8..147029adb885 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -222,7 +222,7 @@ fail:
static void
isert_free_rx_descriptors(struct isert_conn *isert_conn)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct ib_device *ib_dev = isert_conn->conn_device->ib_device;
struct iser_rx_desc *rx_desc;
int i;

@@ -719,8 +719,8 @@ out:
static void
isert_connect_release(struct isert_conn *isert_conn)
{
- struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct isert_device *device = isert_conn->conn_device;
+ struct ib_device *ib_dev = device->ib_device;

isert_dbg("conn %p\n", isert_conn);

@@ -728,7 +728,8 @@ isert_connect_release(struct isert_conn *isert_conn)
isert_conn_free_fastreg_pool(isert_conn);

isert_free_rx_descriptors(isert_conn);
- rdma_destroy_id(isert_conn->conn_cm_id);
+ if (isert_conn->conn_cm_id)
+ rdma_destroy_id(isert_conn->conn_cm_id);

if (isert_conn->conn_qp) {
struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
@@ -878,12 +879,15 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
return 0;
}

-static void
+static int
isert_connect_error(struct rdma_cm_id *cma_id)
{
struct isert_conn *isert_conn = cma_id->qp->qp_context;

+ isert_conn->conn_cm_id = NULL;
isert_put_conn(isert_conn);
+
+ return -1;
}

static int
@@ -912,7 +916,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
case RDMA_CM_EVENT_CONNECT_ERROR:
- isert_connect_error(cma_id);
+ ret = isert_connect_error(cma_id);
break;
default:
isert_err("Unhandled RDMA CMA event: %d\n", event->event);
@@ -1861,11 +1865,13 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
spin_unlock_bh(&cmd->istate_lock);

- if (ret)
+ if (ret) {
+ target_put_sess_cmd(se_cmd->se_sess, se_cmd);
transport_send_check_condition_and_sense(se_cmd,
se_cmd->pi_err, 0);
- else
+ } else {
target_execute_cmd(se_cmd);
+ }
}

static void
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 27bcdbc950c9..ea6cb64dfb28 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1159,13 +1159,14 @@ static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
bool report_buttons)
{
struct alps_data *priv = psmouse->private;
- struct input_dev *dev;
+ struct input_dev *dev, *dev2 = NULL;

/* Figure out which device to use to report the bare packet */
if (priv->proto_version == ALPS_PROTO_V2 &&
(priv->flags & ALPS_DUALPOINT)) {
/* On V2 devices the DualPoint Stick reports bare packets */
dev = priv->dev2;
+ dev2 = psmouse->dev;
} else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) {
/* Register dev3 mouse if we received PS/2 packet first time */
if (!IS_ERR(priv->dev3))
@@ -1177,7 +1178,7 @@ static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
}

if (report_buttons)
- alps_report_buttons(dev, NULL,
+ alps_report_buttons(dev, dev2,
packet[0] & 1, packet[0] & 2, packet[0] & 4);

input_report_rel(dev, REL_X,
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 6e22682c8255..991dc6b20a58 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -893,6 +893,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
}

/*
+ * This writes the reg_07 value again to the hardware at the end of every
+ * set_rate call because the register loses its value. reg_07 allows setting
+ * absolute mode on v4 hardware
+ */
+static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse,
+ unsigned int rate)
+{
+ struct elantech_data *etd = psmouse->private;
+
+ etd->original_set_rate(psmouse, rate);
+ if (elantech_write_reg(psmouse, 0x07, etd->reg_07))
+ psmouse_err(psmouse, "restoring reg_07 failed\n");
+}
+
+/*
* Put the touchpad into absolute mode
*/
static int elantech_set_absolute_mode(struct psmouse *psmouse)
@@ -1094,6 +1109,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
* Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
* Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
+ * Asus TP500LN 0x381f17 10, 14, 0e clickpad
+ * Asus X750JN 0x381f17 10, 14, 0e clickpad
* Asus UX31 0x361f00 20, 15, 0e clickpad
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
@@ -1635,6 +1652,11 @@ int elantech_init(struct psmouse *psmouse)
goto init_fail;
}

+ if (etd->fw_version == 0x381f17) {
+ etd->original_set_rate = psmouse->set_rate;
+ psmouse->set_rate = elantech_set_rate_restore_reg_07;
+ }
+
if (elantech_set_input_params(psmouse)) {
psmouse_err(psmouse, "failed to query touchpad range.\n");
goto init_fail;
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index 6f3afec02f03..f965d1569cc3 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -142,6 +142,7 @@ struct elantech_data {
struct finger_pos mt[ETP_MAX_FINGERS];
unsigned char parity[256];
int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
+ void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate);
};

#ifdef CONFIG_MOUSE_PS2_ELANTECH
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 713a96237a80..414739295d04 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -925,11 +925,10 @@ static int crypt_convert(struct crypt_config *cc,

switch (r) {
/* async */
+ case -EINPROGRESS:
case -EBUSY:
wait_for_completion(&ctx->restart);
reinit_completion(&ctx->restart);
- /* fall through*/
- case -EINPROGRESS:
ctx->req = NULL;
ctx->cc_sector++;
continue;
@@ -1346,10 +1345,8 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
struct crypt_config *cc = io->cc;

- if (error == -EINPROGRESS) {
- complete(&ctx->restart);
+ if (error == -EINPROGRESS)
return;
- }

if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
@@ -1360,12 +1357,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);

if (!atomic_dec_and_test(&ctx->cc_pending))
- return;
+ goto done;

if (bio_data_dir(io->base_bio) == READ)
kcryptd_crypt_read_done(io);
else
kcryptd_crypt_write_io_submit(io, 1);
+done:
+ if (!completion_done(&ctx->restart))
+ complete(&ctx->restart);
}

static void kcryptd_crypt(struct work_struct *work)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 717daad71fb1..e6178787ce3d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -249,6 +249,7 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
const int rw = bio_data_dir(bio);
struct mddev *mddev = q->queuedata;
unsigned int sectors;
+ int cpu;

if (mddev == NULL || mddev->pers == NULL
|| !mddev->ready) {
@@ -284,7 +285,10 @@ static void md_make_request(struct request_queue *q, struct bio *bio)
sectors = bio_sectors(bio);
mddev->pers->make_request(mddev, bio);

- generic_start_io_acct(rw, sectors, &mddev->gendisk->part0);
+ cpu = part_stat_lock();
+ part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
+ part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
+ part_stat_unlock();

if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
wake_up(&mddev->sb_wait);
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 3ed9f42ddca6..3b5d7f704aa3 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -313,7 +313,7 @@ static struct strip_zone *find_zone(struct r0conf *conf,

/*
* remaps the bio to the target device. we separate two flows.
- * power 2 flow and a general flow for the sake of perfromance
+ * power 2 flow and a general flow for the sake of performance
*/
static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
sector_t sector, sector_t *sector_offset)
@@ -524,6 +524,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
split = bio;
}

+ sector = bio->bi_iter.bi_sector;
zone = find_zone(mddev->private, &sector);
tmp_dev = map_sector(mddev, zone, sector, &sector);
split->bi_bdev = tmp_dev->bdev;
diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
index 77c78de4f5bf..7020659f23c2 100644
--- a/drivers/media/rc/img-ir/img-ir-core.c
+++ b/drivers/media/rc/img-ir/img-ir-core.c
@@ -146,7 +146,7 @@ static int img_ir_remove(struct platform_device *pdev)
{
struct img_ir_priv *priv = platform_get_drvdata(pdev);

- free_irq(priv->irq, img_ir_isr);
+ free_irq(priv->irq, priv);
img_ir_remove_hw(priv);
img_ir_remove_raw(priv);

diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index 65a326c5128f..749ad5603c9e 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -240,6 +240,11 @@ static int stk1160_stop_streaming(struct stk1160 *dev)
if (mutex_lock_interruptible(&dev->v4l_lock))
return -ERESTARTSYS;

+ /*
+ * Once URBs are cancelled, the URB complete handler
+ * won't be running. This is required to safely release the
+ * current buffer (dev->isoc_ctl.buf).
+ */
stk1160_cancel_isoc(dev);

/*
@@ -620,8 +625,16 @@ void stk1160_clear_queue(struct stk1160 *dev)
stk1160_info("buffer [%p/%d] aborted\n",
buf, buf->vb.v4l2_buf.index);
}
- /* It's important to clear current buffer */
- dev->isoc_ctl.buf = NULL;
+
+ /* It's important to release the current buffer */
+ if (dev->isoc_ctl.buf) {
+ buf = dev->isoc_ctl.buf;
+ dev->isoc_ctl.buf = NULL;
+
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ stk1160_info("buffer [%p/%d] aborted\n",
+ buf, buf->vb.v4l2_buf.index);
+ }
spin_unlock_irqrestore(&dev->buf_lock, flags);
}

diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index fc145d202c46..922a750640e8 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)

if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
if (msb->data_dir == READ) {
- for (cnt = 0; cnt < msb->current_seg; cnt++)
+ for (cnt = 0; cnt < msb->current_seg; cnt++) {
t_len += msb->req_sg[cnt].length
/ msb->page_size;

@@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
t_len += msb->current_page - 1;

t_len *= msb->page_size;
+ }
}
} else
t_len = blk_rq_bytes(msb->block_req);
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 2a87f69be53d..1aed3b7b8d9b 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -128,7 +128,7 @@ static int mfd_add_device(struct device *parent, int id,
int platform_id;
int r;

- if (id < 0)
+ if (id == PLATFORM_DEVID_AUTO)
platform_id = id;
else
platform_id = id + cell->id;
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index e8a4218b5726..459ed1b601db 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -930,7 +930,9 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
return PTR_ERR(host->clk_sample);
}

- host->reset = devm_reset_control_get(&pdev->dev, "ahb");
+ host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
+ if (PTR_ERR(host->reset) == -EPROBE_DEFER)
+ return PTR_ERR(host->reset);

ret = clk_prepare_enable(host->clk_ahb);
if (ret) {
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index a31c3573d386..dba7e1c19dd7 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -1073,8 +1073,6 @@ EXPORT_SYMBOL(tmio_mmc_host_alloc);
void tmio_mmc_host_free(struct tmio_mmc_host *host)
{
mmc_free_host(host->mmc);
-
- host->mmc = NULL;
}
EXPORT_SYMBOL(tmio_mmc_host_free);

diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 9d2e16f3150a..b5e154856994 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -410,7 +410,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
second_is_newer = !second_is_newer;
} else {
dbg_bld("PEB %d CRC is OK", pnum);
- bitflips = !!err;
+ bitflips |= !!err;
}
mutex_unlock(&ubi->buf_mutex);

diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index d647e504f9b1..d16fccf79179 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -455,7 +455,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
/* Validate the request */
err = -EINVAL;
if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
- req.bytes < 0 || req.lnum >= vol->usable_leb_size)
+ req.bytes < 0 || req.bytes > vol->usable_leb_size)
break;

err = get_exclusive(desc);
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 16e34b37d134..8c9a710def99 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -1419,7 +1419,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
* during re-size.
*/
ubi_move_aeb_to_list(av, aeb, &ai->erase);
- vol->eba_tbl[aeb->lnum] = aeb->pnum;
+ else
+ vol->eba_tbl[aeb->lnum] = aeb->pnum;
}
}

diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 8f7bde6a85d6..0bd92d816391 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1002,7 +1002,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int shutdown)
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
- int vol_id = -1, uninitialized_var(lnum);
+ int vol_id = -1, lnum = -1;
#ifdef CONFIG_MTD_UBI_FASTMAP
int anchor = wrk->anchor;
#endif
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 81d41539fcba..77bf1337d179 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -2165,7 +2165,7 @@ static void macb_configure_caps(struct macb *bp)
}
}

- if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2)
+ if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) >= 0x2)
bp->caps |= MACB_CAPS_MACB_IS_GEM;

if (macb_is_gem(bp)) {
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 7f997d36948f..a71c446631d1 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
+static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
+ struct e1000_rx_ring *rx_ring,
+ int cleaned_count)
+{
+}
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int cleaned_count);
@@ -3552,8 +3557,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
msleep(1);
/* e1000_down has a dependency on max_frame_size */
hw->max_frame_size = max_frame;
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ /* prevent buffers from being reallocated */
+ adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
e1000_down(adapter);
+ }

/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index af829c578400..7ace07dad6a3 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (!np) {
dev_err(&pdev->dev, "missing phy-handle\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto err_netdev;
}
of_property_read_u32(np, "reg", &pep->phy_addr);
pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
@@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep->smi_bus = mdiobus_alloc();
if (pep->smi_bus == NULL) {
err = -ENOMEM;
- goto err_base;
+ goto err_netdev;
}
pep->smi_bus->priv = pep;
pep->smi_bus->name = "pxa168_eth smi";
@@ -1551,13 +1552,10 @@ err_mdiobus:
mdiobus_unregister(pep->smi_bus);
err_free_mdio:
mdiobus_free(pep->smi_bus);
-err_base:
- iounmap(pep->base);
err_netdev:
free_netdev(dev);
err_clk:
- clk_disable(clk);
- clk_put(clk);
+ clk_disable_unprepare(clk);
return err;
}

@@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev)
if (pep->phy)
phy_disconnect(pep->phy);
if (pep->clk) {
- clk_disable(pep->clk);
- clk_put(pep->clk);
- pep->clk = NULL;
+ clk_disable_unprepare(pep->clk);
}

- iounmap(pep->base);
- pep->base = NULL;
mdiobus_unregister(pep->smi_bus);
mdiobus_free(pep->smi_bus);
unregister_netdev(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index a7b58ba8492b..3dccf01837db 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -981,20 +981,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
struct mlx4_en_priv *priv = netdev_priv(dev);

/* check if requested function is supported by the device */
- if ((hfunc == ETH_RSS_HASH_TOP &&
- !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) ||
- (hfunc == ETH_RSS_HASH_XOR &&
- !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)))
- return -EINVAL;
+ if (hfunc == ETH_RSS_HASH_TOP) {
+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
+ return -EINVAL;
+ if (!(dev->features & NETIF_F_RXHASH))
+ en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
+ return 0;
+ } else if (hfunc == ETH_RSS_HASH_XOR) {
+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
+ return -EINVAL;
+ if (dev->features & NETIF_F_RXHASH)
+ en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
+ return 0;
+ }

- priv->rss_hash_fn = hfunc;
- if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH))
- en_warn(priv,
- "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
- if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH))
- en_warn(priv,
- "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
- return 0;
+ return -EINVAL;
}

static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
@@ -1068,6 +1069,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
priv->prof->rss_rings = rss_rings;
if (key)
memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE)
+ priv->rss_hash_fn = hfunc;

if (port_up) {
err = mlx4_en_start_port(dev);
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index af034dba9bd6..9d15566521a7 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1716,6 +1716,7 @@ ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
{
/* note: a 0-length skb is used as an error indication */
if (skb->len > 0) {
+ skb_checksum_complete_unset(skb);
#ifdef CONFIG_PPP_MULTILINK
/* XXX do channel-level decompression here */
if (PPP_PROTO(skb) == PPP_MP)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 90a714c189a8..23806c243a53 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
+ {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
@@ -377,6 +378,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
{RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
{RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
+ {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
{RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
{RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
{RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
index c93fae95baac..5fbd2230f372 100644
--- a/drivers/net/wireless/ti/wl18xx/debugfs.c
+++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
@@ -139,7 +139,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");

-WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
+WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);

WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
index 0f2cfb0d2a9e..bf14676e6515 100644
--- a/drivers/net/wireless/ti/wlcore/debugfs.h
+++ b/drivers/net/wireless/ti/wlcore/debugfs.h
@@ -26,8 +26,8 @@

#include "wlcore.h"

-int wl1271_format_buffer(char __user *userbuf, size_t count,
- loff_t *ppos, char *fmt, ...);
+__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count,
+ loff_t *ppos, char *fmt, ...);

int wl1271_debugfs_init(struct wl1271 *wl);
void wl1271_debugfs_exit(struct wl1271 *wl);
diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c
index eb886932d972..7b53a5c84041 100644
--- a/drivers/nfc/st21nfcb/i2c.c
+++ b/drivers/nfc/st21nfcb/i2c.c
@@ -109,7 +109,7 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
return phy->ndlc->hard_fault;

r = i2c_master_send(client, skb->data, skb->len);
- if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+ if (r < 0) { /* Retry, chip was in standby */
usleep_range(1000, 4000);
r = i2c_master_send(client, skb->data, skb->len);
}
@@ -148,7 +148,7 @@ static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy,
struct i2c_client *client = phy->i2c_dev;

r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
- if (r == -EREMOTEIO) { /* Retry, chip was in standby */
+ if (r < 0) { /* Retry, chip was in standby */
usleep_range(1000, 4000);
r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
}
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 15c0fab2bfa1..bceb30b539f3 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -1026,9 +1026,9 @@ static int compal_probe(struct platform_device *pdev)
if (err)
return err;

- hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
- "compal", data,
- compal_hwmon_groups);
+ hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
+ "compal", data,
+ compal_hwmon_groups);
if (IS_ERR(hwmon_dev)) {
err = PTR_ERR(hwmon_dev);
goto remove;
@@ -1036,7 +1036,9 @@ static int compal_probe(struct platform_device *pdev)

/* Power supply */
initialize_power_supply_data(data);
- power_supply_register(&compal_device->dev, &data->psy);
+ err = power_supply_register(&compal_device->dev, &data->psy);
+ if (err < 0)
+ goto remove;

platform_set_drvdata(pdev, data);

diff --git a/drivers/power/ipaq_micro_battery.c b/drivers/power/ipaq_micro_battery.c
index 9d694605cdb7..96b15e003f3f 100644
--- a/drivers/power/ipaq_micro_battery.c
+++ b/drivers/power/ipaq_micro_battery.c
@@ -226,6 +226,7 @@ static struct power_supply micro_ac_power = {
static int micro_batt_probe(struct platform_device *pdev)
{
struct micro_battery *mb;
+ int ret;

mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL);
if (!mb)
@@ -233,14 +234,30 @@ static int micro_batt_probe(struct platform_device *pdev)

mb->micro = dev_get_drvdata(pdev->dev.parent);
mb->wq = create_singlethread_workqueue("ipaq-battery-wq");
+ if (!mb->wq)
+ return -ENOMEM;
+
INIT_DELAYED_WORK(&mb->update, micro_battery_work);
platform_set_drvdata(pdev, mb);
queue_delayed_work(mb->wq, &mb->update, 1);
- power_supply_register(&pdev->dev, &micro_batt_power);
- power_supply_register(&pdev->dev, &micro_ac_power);
+
+ ret = power_supply_register(&pdev->dev, &micro_batt_power);
+ if (ret < 0)
+ goto batt_err;
+
+ ret = power_supply_register(&pdev->dev, &micro_ac_power);
+ if (ret < 0)
+ goto ac_err;

dev_info(&pdev->dev, "iPAQ micro battery driver\n");
return 0;
+
+ac_err:
+ power_supply_unregister(&micro_ac_power);
+batt_err:
+ cancel_delayed_work_sync(&mb->update);
+ destroy_workqueue(mb->wq);
+ return ret;
}

static int micro_batt_remove(struct platform_device *pdev)
@@ -251,6 +268,7 @@ static int micro_batt_remove(struct platform_device *pdev)
power_supply_unregister(&micro_ac_power);
power_supply_unregister(&micro_batt_power);
cancel_delayed_work_sync(&mb->update);
+ destroy_workqueue(mb->wq);

return 0;
}
diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
index 21fc233c7d61..176dab2e4c16 100644
--- a/drivers/power/lp8788-charger.c
+++ b/drivers/power/lp8788-charger.c
@@ -417,8 +417,10 @@ static int lp8788_psy_register(struct platform_device *pdev,
pchg->battery.num_properties = ARRAY_SIZE(lp8788_battery_prop);
pchg->battery.get_property = lp8788_battery_get_property;

- if (power_supply_register(&pdev->dev, &pchg->battery))
+ if (power_supply_register(&pdev->dev, &pchg->battery)) {
+ power_supply_unregister(&pchg->charger);
return -EPERM;
+ }

return 0;
}
diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c
index 7ef445a6cfa6..cf907609ec49 100644
--- a/drivers/power/twl4030_madc_battery.c
+++ b/drivers/power/twl4030_madc_battery.c
@@ -192,6 +192,7 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
{
struct twl4030_madc_battery *twl4030_madc_bat;
struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
+ int ret = 0;

twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL);
if (!twl4030_madc_bat)
@@ -216,9 +217,11 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)

twl4030_madc_bat->pdata = pdata;
platform_set_drvdata(pdev, twl4030_madc_bat);
- power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
+ ret = power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
+ if (ret < 0)
+ kfree(twl4030_madc_bat);

- return 0;
+ return ret;
}

static int twl4030_madc_battery_remove(struct platform_device *pdev)
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 675b5e7aba94..5a0800d19970 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1584,11 +1584,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance,
fp_possible = io_info.fpOkForIo;
}

- /* Use smp_processor_id() for now until cmd->request->cpu is CPU
+ /* Use raw_smp_processor_id() for now until cmd->request->cpu is CPU
id by default, not CPU group id, otherwise all MSI-X queues won't
be utilized */
cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ?
- smp_processor_id() % instance->msix_vectors : 0;
+ raw_smp_processor_id() % instance->msix_vectors : 0;

if (fp_possible) {
megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp,
@@ -1693,7 +1693,10 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
<< MR_RAID_CTX_RAID_FLAGS_IO_SUB_TYPE_SHIFT;
cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle;
cmd->request_desc->SCSIIO.MSIxIndex =
- instance->msix_vectors ? smp_processor_id() % instance->msix_vectors : 0;
+ instance->msix_vectors ?
+ raw_smp_processor_id() %
+ instance->msix_vectors :
+ 0;
os_timeout_value = scmd->request->timeout / HZ;

if (instance->secure_jbod_support &&
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 2d5ab6d969ec..454536c49315 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -441,14 +441,11 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
static int mvs_task_prep_ata(struct mvs_info *mvi,
struct mvs_task_exec_info *tei)
{
- struct sas_ha_struct *sha = mvi->sas;
struct sas_task *task = tei->task;
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = dev->lldd_dev;
struct mvs_cmd_hdr *hdr = tei->hdr;
struct asd_sas_port *sas_port = dev->port;
- struct sas_phy *sphy = dev->phy;
- struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
struct mvs_slot_info *slot;
void *buf_prd;
u32 tag = tei->tag, hdr_tag;
@@ -468,7 +465,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
slot->tx = mvi->tx_prod;
del_q = TXQ_MODE_I | tag |
(TXQ_CMD_STP << TXQ_CMD_SHIFT) |
- (MVS_PHY_ID << TXQ_PHY_SHIFT) |
+ ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) |
(mvi_dev->taskfileset << TXQ_SRS_SHIFT);
mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);

diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 6b78476d04bb..3290a3ed5b31 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -3100,6 +3100,7 @@ static void scsi_disk_release(struct device *dev)
ida_remove(&sd_index_ida, sdkp->index);
spin_unlock(&sd_index_lock);

+ blk_integrity_unregister(disk);
disk->private_data = NULL;
put_disk(disk);
put_device(&sdkp->device->sdev_gendev);
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 14c7d42a11c2..5c06d292b94c 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -77,7 +77,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)

disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;

- if (!sdkp)
+ if (!sdkp->ATO)
return;

if (type == SD_DIF_TYPE3_PROTECTION)
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index efc6e446b6c8..bf8c5c1e254e 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -746,21 +746,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
if (bounce_sgl[j].length == PAGE_SIZE) {
/* full..move to next entry */
sg_kunmap_atomic(bounce_addr);
+ bounce_addr = 0;
j++;
+ }

- /* if we need to use another bounce buffer */
- if (srclen || i != orig_sgl_count - 1)
- bounce_addr = sg_kmap_atomic(bounce_sgl,j);
+ /* if we need to use another bounce buffer */
+ if (srclen && bounce_addr == 0)
+ bounce_addr = sg_kmap_atomic(bounce_sgl, j);

- } else if (srclen == 0 && i == orig_sgl_count - 1) {
- /* unmap the last bounce that is < PAGE_SIZE */
- sg_kunmap_atomic(bounce_addr);
- }
}

sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
}

+ if (bounce_addr)
+ sg_kunmap_atomic(bounce_addr);
+
local_irq_restore(flags);

return total_copied;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 6fea4af51c41..aea3a67e5ce1 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -370,8 +370,6 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
if (spi_imx->dma_is_inited) {
dma = readl(spi_imx->base + MX51_ECSPI_DMA);

- spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
- spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2;
rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
@@ -868,6 +866,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
master->max_dma_len = MAX_SDMA_BD_BYTES;
spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
SPI_MASTER_MUST_TX;
+ spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
spi_imx->dma_is_inited = 1;

return 0;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 4eb7a980e670..7bf51860fd08 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -245,7 +245,10 @@ static int spidev_message(struct spidev_data *spidev,
k_tmp->len = u_tmp->len;

total += k_tmp->len;
- if (total > bufsiz) {
+ /* Check total length of transfers. Also check each
+ * transfer length to avoid arithmetic overflow.
+ */
+ if (total > bufsiz || k_tmp->len > bufsiz) {
status = -EMSGSIZE;
goto done;
}
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 7bdb62bf6b40..f83e00c78051 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -114,7 +114,7 @@ void sync_timeline_signal(struct sync_timeline *obj)
list_for_each_entry_safe(pt, next, &obj->active_list_head,
active_list) {
if (fence_is_signaled_locked(&pt->base))
- list_del(&pt->active_list);
+ list_del_init(&pt->active_list);
}

spin_unlock_irqrestore(&obj->child_list_lock, flags);
diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
index 6ed35b6ecf0d..04fc217481c7 100644
--- a/drivers/staging/panel/panel.c
+++ b/drivers/staging/panel/panel.c
@@ -335,11 +335,11 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
* LCD types
*/
#define LCD_TYPE_NONE 0
-#define LCD_TYPE_OLD 1
-#define LCD_TYPE_KS0074 2
-#define LCD_TYPE_HANTRONIX 3
-#define LCD_TYPE_NEXCOM 4
-#define LCD_TYPE_CUSTOM 5
+#define LCD_TYPE_CUSTOM 1
+#define LCD_TYPE_OLD 2
+#define LCD_TYPE_KS0074 3
+#define LCD_TYPE_HANTRONIX 4
+#define LCD_TYPE_NEXCOM 5

/*
* keypad types
@@ -502,7 +502,7 @@ MODULE_PARM_DESC(keypad_type,
static int lcd_type = NOT_SET;
module_param(lcd_type, int, 0000);
MODULE_PARM_DESC(lcd_type,
- "LCD type: 0=none, 1=old //, 2=serial ks0074, 3=hantronix //, 4=nexcom //, 5=compiled-in");
+ "LCD type: 0=none, 1=compiled-in, 2=old, 3=serial ks0074, 4=hantronix, 5=nexcom");

static int lcd_height = NOT_SET;
module_param(lcd_height, int, 0000);
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 07ce3fd88e70..fdf5c56251e5 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1308,10 +1308,18 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
priv->hw->conf.chandef.chan->hw_value);
}

- if (current_rate > RATE_11M)
- pkt_type = (u8)priv->byPacketType;
- else
+ if (current_rate > RATE_11M) {
+ if (info->band == IEEE80211_BAND_5GHZ) {
+ pkt_type = PK_TYPE_11A;
+ } else {
+ if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
+ pkt_type = PK_TYPE_11GB;
+ else
+ pkt_type = PK_TYPE_11GA;
+ }
+ } else {
pkt_type = PK_TYPE_11B;
+ }

/*Set fifo controls */
if (pkt_type == PK_TYPE_11A)
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 77d64251af40..5e3561243eda 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -537,7 +537,7 @@ static struct iscsit_transport iscsi_target_transport = {

static int __init iscsi_target_init_module(void)
{
- int ret = 0;
+ int ret = 0, size;

pr_debug("iSCSI-Target "ISCSIT_VERSION"\n");

@@ -546,6 +546,7 @@ static int __init iscsi_target_init_module(void)
pr_err("Unable to allocate memory for iscsit_global\n");
return -1;
}
+ spin_lock_init(&iscsit_global->ts_bitmap_lock);
mutex_init(&auth_id_lock);
spin_lock_init(&sess_idr_lock);
idr_init(&tiqn_idr);
@@ -555,15 +556,11 @@ static int __init iscsi_target_init_module(void)
if (ret < 0)
goto out;

- ret = iscsi_thread_set_init();
- if (ret < 0)
+ size = BITS_TO_LONGS(ISCSIT_BITMAP_BITS) * sizeof(long);
+ iscsit_global->ts_bitmap = vzalloc(size);
+ if (!iscsit_global->ts_bitmap) {
+ pr_err("Unable to allocate iscsit_global->ts_bitmap\n");
goto configfs_out;
-
- if (iscsi_allocate_thread_sets(TARGET_THREAD_SET_COUNT) !=
- TARGET_THREAD_SET_COUNT) {
- pr_err("iscsi_allocate_thread_sets() returned"
- " unexpected value!\n");
- goto ts_out1;
}

lio_qr_cache = kmem_cache_create("lio_qr_cache",
@@ -572,7 +569,7 @@ static int __init iscsi_target_init_module(void)
if (!lio_qr_cache) {
pr_err("nable to kmem_cache_create() for"
" lio_qr_cache\n");
- goto ts_out2;
+ goto bitmap_out;
}

lio_dr_cache = kmem_cache_create("lio_dr_cache",
@@ -617,10 +614,8 @@ dr_out:
kmem_cache_destroy(lio_dr_cache);
qr_out:
kmem_cache_destroy(lio_qr_cache);
-ts_out2:
- iscsi_deallocate_thread_sets();
-ts_out1:
- iscsi_thread_set_free();
+bitmap_out:
+ vfree(iscsit_global->ts_bitmap);
configfs_out:
iscsi_target_deregister_configfs();
out:
@@ -630,8 +625,6 @@ out:

static void __exit iscsi_target_cleanup_module(void)
{
- iscsi_deallocate_thread_sets();
- iscsi_thread_set_free();
iscsit_release_discovery_tpg();
iscsit_unregister_transport(&iscsi_target_transport);
kmem_cache_destroy(lio_qr_cache);
@@ -641,6 +634,7 @@ static void __exit iscsi_target_cleanup_module(void)

iscsi_target_deregister_configfs();

+ vfree(iscsit_global->ts_bitmap);
kfree(iscsit_global);
}

@@ -3715,17 +3709,16 @@ static int iscsit_send_reject(

void iscsit_thread_get_cpumask(struct iscsi_conn *conn)
{
- struct iscsi_thread_set *ts = conn->thread_set;
int ord, cpu;
/*
- * thread_id is assigned from iscsit_global->ts_bitmap from
- * within iscsi_thread_set.c:iscsi_allocate_thread_sets()
+ * bitmap_id is assigned from iscsit_global->ts_bitmap from
+ * within iscsit_start_kthreads()
*
- * Here we use thread_id to determine which CPU that this
- * iSCSI connection's iscsi_thread_set will be scheduled to
+ * Here we use bitmap_id to determine which CPU that this
+ * iSCSI connection's RX/TX threads will be scheduled to
* execute upon.
*/
- ord = ts->thread_id % cpumask_weight(cpu_online_mask);
+ ord = conn->bitmap_id % cpumask_weight(cpu_online_mask);
for_each_online_cpu(cpu) {
if (ord-- == 0) {
cpumask_set_cpu(cpu, conn->conn_cpumask);
@@ -3914,7 +3907,7 @@ check_rsp_state:
switch (state) {
case ISTATE_SEND_LOGOUTRSP:
if (!iscsit_logout_post_handler(cmd, conn))
- goto restart;
+ return -ECONNRESET;
/* fall through */
case ISTATE_SEND_STATUS:
case ISTATE_SEND_ASYNCMSG:
@@ -3942,8 +3935,6 @@ check_rsp_state:

err:
return -1;
-restart:
- return -EAGAIN;
}

static int iscsit_handle_response_queue(struct iscsi_conn *conn)
@@ -3970,21 +3961,13 @@ static int iscsit_handle_response_queue(struct iscsi_conn *conn)
int iscsi_target_tx_thread(void *arg)
{
int ret = 0;
- struct iscsi_conn *conn;
- struct iscsi_thread_set *ts = arg;
+ struct iscsi_conn *conn = arg;
/*
* Allow ourselves to be interrupted by SIGINT so that a
* connection recovery / failure event can be triggered externally.
*/
allow_signal(SIGINT);

-restart:
- conn = iscsi_tx_thread_pre_handler(ts);
- if (!conn)
- goto out;
-
- ret = 0;
-
while (!kthread_should_stop()) {
/*
* Ensure that both TX and RX per connection kthreads
@@ -3993,11 +3976,9 @@ restart:
iscsit_thread_check_cpumask(conn, current, 1);

wait_event_interruptible(conn->queues_wq,
- !iscsit_conn_all_queues_empty(conn) ||
- ts->status == ISCSI_THREAD_SET_RESET);
+ !iscsit_conn_all_queues_empty(conn));

- if ((ts->status == ISCSI_THREAD_SET_RESET) ||
- signal_pending(current))
+ if (signal_pending(current))
goto transport_err;

get_immediate:
@@ -4008,15 +3989,14 @@ get_immediate:
ret = iscsit_handle_response_queue(conn);
if (ret == 1)
goto get_immediate;
- else if (ret == -EAGAIN)
- goto restart;
+ else if (ret == -ECONNRESET)
+ goto out;
else if (ret < 0)
goto transport_err;
}

transport_err:
iscsit_take_action_for_connection_exit(conn);
- goto restart;
out:
return 0;
}
@@ -4111,8 +4091,7 @@ int iscsi_target_rx_thread(void *arg)
int ret;
u8 buffer[ISCSI_HDR_LEN], opcode;
u32 checksum = 0, digest = 0;
- struct iscsi_conn *conn = NULL;
- struct iscsi_thread_set *ts = arg;
+ struct iscsi_conn *conn = arg;
struct kvec iov;
/*
* Allow ourselves to be interrupted by SIGINT so that a
@@ -4120,11 +4099,6 @@ int iscsi_target_rx_thread(void *arg)
*/
allow_signal(SIGINT);

-restart:
- conn = iscsi_rx_thread_pre_handler(ts);
- if (!conn)
- goto out;
-
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
struct completion comp;
int rc;
@@ -4134,7 +4108,7 @@ restart:
if (rc < 0)
goto transport_err;

- goto out;
+ goto transport_err;
}

while (!kthread_should_stop()) {
@@ -4210,8 +4184,6 @@ transport_err:
if (!signal_pending(current))
atomic_set(&conn->transport_failed, 1);
iscsit_take_action_for_connection_exit(conn);
- goto restart;
-out:
return 0;
}

@@ -4273,7 +4245,24 @@ int iscsit_close_connection(
if (conn->conn_transport->transport_type == ISCSI_TCP)
complete(&conn->conn_logout_comp);

- iscsi_release_thread_set(conn);
+ if (!strcmp(current->comm, ISCSI_RX_THREAD_NAME)) {
+ if (conn->tx_thread &&
+ cmpxchg(&conn->tx_thread_active, true, false)) {
+ send_sig(SIGINT, conn->tx_thread, 1);
+ kthread_stop(conn->tx_thread);
+ }
+ } else if (!strcmp(current->comm, ISCSI_TX_THREAD_NAME)) {
+ if (conn->rx_thread &&
+ cmpxchg(&conn->rx_thread_active, true, false)) {
+ send_sig(SIGINT, conn->rx_thread, 1);
+ kthread_stop(conn->rx_thread);
+ }
+ }
+
+ spin_lock(&iscsit_global->ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+ get_order(1));
+ spin_unlock(&iscsit_global->ts_bitmap_lock);

iscsit_stop_timers_for_cmds(conn);
iscsit_stop_nopin_response_timer(conn);
@@ -4551,15 +4540,13 @@ static void iscsit_logout_post_handler_closesession(
struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
-
- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+ int sleep = cmpxchg(&conn->tx_thread_active, true, false);

atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);

iscsit_dec_conn_usage_count(conn);
- iscsit_stop_session(sess, 1, 1);
+ iscsit_stop_session(sess, sleep, sleep);
iscsit_dec_session_usage_count(sess);
target_put_session(sess->se_sess);
}
@@ -4567,13 +4554,12 @@ static void iscsit_logout_post_handler_closesession(
static void iscsit_logout_post_handler_samecid(
struct iscsi_conn *conn)
{
- iscsi_set_thread_clear(conn, ISCSI_CLEAR_TX_THREAD);
- iscsi_set_thread_set_signal(conn, ISCSI_SIGNAL_TX_THREAD);
+ int sleep = cmpxchg(&conn->tx_thread_active, true, false);

atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);

- iscsit_cause_connection_reinstatement(conn, 1);
+ iscsit_cause_connection_reinstatement(conn, sleep);
iscsit_dec_conn_usage_count(conn);
}

diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index bdd8731a4daa..e008ed261364 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -860,7 +860,10 @@ void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
}
spin_unlock_bh(&conn->state_lock);

- iscsi_thread_set_force_reinstatement(conn);
+ if (conn->tx_thread && conn->tx_thread_active)
+ send_sig(SIGINT, conn->tx_thread, 1);
+ if (conn->rx_thread && conn->rx_thread_active)
+ send_sig(SIGINT, conn->rx_thread, 1);

sleep:
wait_for_completion(&conn->conn_wait_rcfr_comp);
@@ -885,10 +888,10 @@ void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
return;
}

- if (iscsi_thread_set_force_reinstatement(conn) < 0) {
- spin_unlock_bh(&conn->state_lock);
- return;
- }
+ if (conn->tx_thread && conn->tx_thread_active)
+ send_sig(SIGINT, conn->tx_thread, 1);
+ if (conn->rx_thread && conn->rx_thread_active)
+ send_sig(SIGINT, conn->rx_thread, 1);

atomic_set(&conn->connection_reinstatement, 1);
if (!sleep) {
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 153fb66ac1b8..345f073ff6dc 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -699,6 +699,51 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
iscsit_start_nopin_timer(conn);
}

+int iscsit_start_kthreads(struct iscsi_conn *conn)
+{
+ int ret = 0;
+
+ spin_lock(&iscsit_global->ts_bitmap_lock);
+ conn->bitmap_id = bitmap_find_free_region(iscsit_global->ts_bitmap,
+ ISCSIT_BITMAP_BITS, get_order(1));
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
+
+ if (conn->bitmap_id < 0) {
+ pr_err("bitmap_find_free_region() failed for"
+ " iscsit_start_kthreads()\n");
+ return -ENOMEM;
+ }
+
+ conn->tx_thread = kthread_run(iscsi_target_tx_thread, conn,
+ "%s", ISCSI_TX_THREAD_NAME);
+ if (IS_ERR(conn->tx_thread)) {
+ pr_err("Unable to start iscsi_target_tx_thread\n");
+ ret = PTR_ERR(conn->tx_thread);
+ goto out_bitmap;
+ }
+ conn->tx_thread_active = true;
+
+ conn->rx_thread = kthread_run(iscsi_target_rx_thread, conn,
+ "%s", ISCSI_RX_THREAD_NAME);
+ if (IS_ERR(conn->rx_thread)) {
+ pr_err("Unable to start iscsi_target_rx_thread\n");
+ ret = PTR_ERR(conn->rx_thread);
+ goto out_tx;
+ }
+ conn->rx_thread_active = true;
+
+ return 0;
+out_tx:
+ kthread_stop(conn->tx_thread);
+ conn->tx_thread_active = false;
+out_bitmap:
+ spin_lock(&iscsit_global->ts_bitmap_lock);
+ bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
+ get_order(1));
+ spin_unlock(&iscsit_global->ts_bitmap_lock);
+ return ret;
+}
+
int iscsi_post_login_handler(
struct iscsi_np *np,
struct iscsi_conn *conn,
@@ -709,7 +754,7 @@ int iscsi_post_login_handler(
struct se_session *se_sess = sess->se_sess;
struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
- struct iscsi_thread_set *ts;
+ int rc;

iscsit_inc_conn_usage_count(conn);

@@ -724,7 +769,6 @@ int iscsi_post_login_handler(
/*
* SCSI Initiator -> SCSI Target Port Mapping
*/
- ts = iscsi_get_thread_set();
if (!zero_tsih) {
iscsi_set_session_parameters(sess->sess_ops,
conn->param_list, 0);
@@ -751,9 +795,11 @@ int iscsi_post_login_handler(
sess->sess_ops->InitiatorName);
spin_unlock_bh(&sess->conn_lock);

- iscsi_post_login_start_timers(conn);
+ rc = iscsit_start_kthreads(conn);
+ if (rc)
+ return rc;

- iscsi_activate_thread_set(conn, ts);
+ iscsi_post_login_start_timers(conn);
/*
* Determine CPU mask to ensure connection's RX and TX kthreads
* are scheduled on the same CPU.
@@ -810,8 +856,11 @@ int iscsi_post_login_handler(
" iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
spin_unlock_bh(&se_tpg->session_lock);

+ rc = iscsit_start_kthreads(conn);
+ if (rc)
+ return rc;
+
iscsi_post_login_start_timers(conn);
- iscsi_activate_thread_set(conn, ts);
/*
* Determine CPU mask to ensure connection's RX and TX kthreads
* are scheduled on the same CPU.
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 44620fb6bd45..cbb0cc277f4e 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -264,40 +264,32 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
struct se_device *se_dev = cmd->se_dev;
struct fd_dev *dev = FD_DEV(se_dev);
struct file *prot_fd = dev->fd_prot_file;
- struct scatterlist *sg;
loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
unsigned char *buf;
- u32 prot_size, len, size;
- int rc, ret = 1, i;
+ u32 prot_size;
+ int rc, ret = 1;

prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
se_dev->prot_length;

if (!is_write) {
- fd_prot->prot_buf = vzalloc(prot_size);
+ fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL);
if (!fd_prot->prot_buf) {
pr_err("Unable to allocate fd_prot->prot_buf\n");
return -ENOMEM;
}
buf = fd_prot->prot_buf;

- fd_prot->prot_sg_nents = cmd->t_prot_nents;
- fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
- fd_prot->prot_sg_nents, GFP_KERNEL);
+ fd_prot->prot_sg_nents = 1;
+ fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist),
+ GFP_KERNEL);
if (!fd_prot->prot_sg) {
pr_err("Unable to allocate fd_prot->prot_sg\n");
- vfree(fd_prot->prot_buf);
+ kfree(fd_prot->prot_buf);
return -ENOMEM;
}
- size = prot_size;
-
- for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
-
- len = min_t(u32, PAGE_SIZE, size);
- sg_set_buf(sg, buf, len);
- size -= len;
- buf += len;
- }
+ sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents);
+ sg_set_buf(fd_prot->prot_sg, buf, prot_size);
}

if (is_write) {
@@ -318,7 +310,7 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,

if (is_write || ret < 0) {
kfree(fd_prot->prot_sg);
- vfree(fd_prot->prot_buf);
+ kfree(fd_prot->prot_buf);
}

return ret;
@@ -549,6 +541,56 @@ fd_execute_write_same(struct se_cmd *cmd)
return 0;
}

+static int
+fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
+ void *buf, size_t bufsize)
+{
+ struct fd_dev *fd_dev = FD_DEV(se_dev);
+ struct file *prot_fd = fd_dev->fd_prot_file;
+ sector_t prot_length, prot;
+ loff_t pos = lba * se_dev->prot_length;
+
+ if (!prot_fd) {
+ pr_err("Unable to locate fd_dev->fd_prot_file\n");
+ return -ENODEV;
+ }
+
+ prot_length = nolb * se_dev->prot_length;
+
+ for (prot = 0; prot < prot_length;) {
+ sector_t len = min_t(sector_t, bufsize, prot_length - prot);
+ ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot);
+
+ if (ret != len) {
+ pr_err("vfs_write to prot file failed: %zd\n", ret);
+ return ret < 0 ? ret : -ENODEV;
+ }
+ prot += ret;
+ }
+
+ return 0;
+}
+
+static int
+fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
+{
+ void *buf;
+ int rc;
+
+ buf = (void *)__get_free_page(GFP_KERNEL);
+ if (!buf) {
+ pr_err("Unable to allocate FILEIO prot buf\n");
+ return -ENOMEM;
+ }
+ memset(buf, 0xff, PAGE_SIZE);
+
+ rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
+
+ free_page((unsigned long)buf);
+
+ return rc;
+}
+
static sense_reason_t
fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
{
@@ -556,6 +598,12 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
struct inode *inode = file->f_mapping->host;
int ret;

+ if (cmd->se_dev->dev_attrib.pi_prot_type) {
+ ret = fd_do_prot_unmap(cmd, lba, nolb);
+ if (ret)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
+
if (S_ISBLK(inode->i_mode)) {
/* The backend is block device, use discard */
struct block_device *bdev = inode->i_bdev;
@@ -658,11 +706,11 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
0, fd_prot.prot_sg, 0);
if (rc) {
kfree(fd_prot.prot_sg);
- vfree(fd_prot.prot_buf);
+ kfree(fd_prot.prot_buf);
return rc;
}
kfree(fd_prot.prot_sg);
- vfree(fd_prot.prot_buf);
+ kfree(fd_prot.prot_buf);
}
} else {
memset(&fd_prot, 0, sizeof(struct fd_prot));
@@ -678,7 +726,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
0, fd_prot.prot_sg, 0);
if (rc) {
kfree(fd_prot.prot_sg);
- vfree(fd_prot.prot_buf);
+ kfree(fd_prot.prot_buf);
return rc;
}
}
@@ -714,7 +762,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,

if (ret < 0) {
kfree(fd_prot.prot_sg);
- vfree(fd_prot.prot_buf);
+ kfree(fd_prot.prot_buf);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}

@@ -878,48 +926,28 @@ static int fd_init_prot(struct se_device *dev)

static int fd_format_prot(struct se_device *dev)
{
- struct fd_dev *fd_dev = FD_DEV(dev);
- struct file *prot_fd = fd_dev->fd_prot_file;
- sector_t prot_length, prot;
unsigned char *buf;
- loff_t pos = 0;
int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
- int rc, ret = 0, size, len;
+ int ret;

if (!dev->dev_attrib.pi_prot_type) {
pr_err("Unable to format_prot while pi_prot_type == 0\n");
return -ENODEV;
}
- if (!prot_fd) {
- pr_err("Unable to locate fd_dev->fd_prot_file\n");
- return -ENODEV;
- }

buf = vzalloc(unit_size);
if (!buf) {
pr_err("Unable to allocate FILEIO prot buf\n");
return -ENOMEM;
}
- prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
- size = prot_length;

pr_debug("Using FILEIO prot_length: %llu\n",
- (unsigned long long)prot_length);
+ (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
+ dev->prot_length);

memset(buf, 0xff, unit_size);
- for (prot = 0; prot < prot_length; prot += unit_size) {
- len = min(unit_size, size);
- rc = kernel_write(prot_fd, buf, len, pos);
- if (rc != len) {
- pr_err("vfs_write to prot file failed: %d\n", rc);
- ret = -ENODEV;
- goto out;
- }
- pos += len;
- size -= len;
- }
-
-out:
+ ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
+ buf, unit_size);
vfree(buf);
return ret;
}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 3e7297411110..755bd9b34612 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -312,7 +312,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
return 0;
}

-static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
+static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
{
unsigned char *buf, *addr;
struct scatterlist *sg;
@@ -376,7 +376,7 @@ sbc_execute_rw(struct se_cmd *cmd)
cmd->data_direction);
}

-static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
+static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
{
struct se_device *dev = cmd->se_dev;

@@ -399,7 +399,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
return TCM_NO_SENSE;
}

-static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
+static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
{
struct se_device *dev = cmd->se_dev;
struct scatterlist *write_sg = NULL, *sg;
@@ -414,11 +414,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)

/*
* Handle early failure in transport_generic_request_failure(),
- * which will not have taken ->caw_mutex yet..
+ * which will not have taken ->caw_sem yet..
*/
- if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
+ if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
return TCM_NO_SENSE;
/*
+ * Handle special case for zero-length COMPARE_AND_WRITE
+ */
+ if (!cmd->data_length)
+ goto out;
+ /*
* Immediately exit + release dev->caw_sem if command has already
* been failed with a non-zero SCSI status.
*/
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ac3cbabdbdf0..f786de0290db 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1615,11 +1615,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
transport_complete_task_attr(cmd);
/*
* Handle special case for COMPARE_AND_WRITE failure, where the
- * callback is expected to drop the per device ->caw_mutex.
+ * callback is expected to drop the per device ->caw_sem.
*/
if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
cmd->transport_complete_callback)
- cmd->transport_complete_callback(cmd);
+ cmd->transport_complete_callback(cmd, false);

switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
@@ -1975,8 +1975,12 @@ static void target_complete_ok_work(struct work_struct *work)
if (cmd->transport_complete_callback) {
sense_reason_t rc;

- rc = cmd->transport_complete_callback(cmd);
+ rc = cmd->transport_complete_callback(cmd, true);
if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
+ if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ !cmd->data_length)
+ goto queue_rsp;
+
return;
} else if (rc) {
ret = transport_send_check_condition_and_sense(cmd,
@@ -1990,6 +1994,7 @@ static void target_complete_ok_work(struct work_struct *work)
}
}

+queue_rsp:
switch (cmd->data_direction) {
case DMA_FROM_DEVICE:
spin_lock(&cmd->se_lun->lun_sep_lock);
@@ -2094,6 +2099,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
static inline void transport_free_pages(struct se_cmd *cmd)
{
if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
+ /*
+ * Release special case READ buffer payload required for
+ * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
+ */
+ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
+ transport_free_sgl(cmd->t_bidi_data_sg,
+ cmd->t_bidi_data_nents);
+ cmd->t_bidi_data_sg = NULL;
+ cmd->t_bidi_data_nents = 0;
+ }
transport_reset_sgl_orig(cmd);
return;
}
@@ -2246,6 +2261,7 @@ sense_reason_t
transport_generic_new_cmd(struct se_cmd *cmd)
{
int ret = 0;
+ bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);

/*
* Determine is the TCM fabric module has already allocated physical
@@ -2254,7 +2270,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
*/
if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
cmd->data_length) {
- bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);

if ((cmd->se_cmd_flags & SCF_BIDI) ||
(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
@@ -2285,6 +2300,20 @@ transport_generic_new_cmd(struct se_cmd *cmd)
cmd->data_length, zero_flag);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
+ cmd->data_length) {
+ /*
+ * Special case for COMPARE_AND_WRITE with fabrics
+ * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
+ */
+ u32 caw_length = cmd->t_task_nolb *
+ cmd->se_dev->dev_attrib.block_size;
+
+ ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
+ &cmd->t_bidi_data_nents,
+ caw_length, zero_flag);
+ if (ret < 0)
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
/*
* If this command is not a write we can execute it right here,
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index deae122c9c4b..d465ace95186 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -3444,7 +3444,8 @@ void serial8250_suspend_port(int line)
port->type != PORT_8250) {
unsigned char canary = 0xa5;
serial_out(up, UART_SCR, canary);
- up->canary = canary;
+ if (serial_in(up, UART_SCR) == canary)
+ up->canary = canary;
}

uart_suspend_port(&serial8250_reg, port);
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 6ae5b8560e4d..7a80250475eb 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -629,6 +629,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = {
{ "80860F0A", 0 },
{ "8086228A", 0 },
{ "APMC0D08", 0},
+ { "AMD0020", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 0eb29b1c47ac..23061918b0e4 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -818,7 +818,7 @@ static irqreturn_t imx_int(int irq, void *dev_id)
if (sts2 & USR2_ORE) {
dev_err(sport->port.dev, "Rx FIFO overrun\n");
sport->port.icount.overrun++;
- writel(sts2 | USR2_ORE, sport->port.membase + USR2);
+ writel(USR2_ORE, sport->port.membase + USR2);
}

return IRQ_HANDLED;
@@ -1181,10 +1181,12 @@ static int imx_startup(struct uart_port *port)
imx_uart_dma_init(sport);

spin_lock_irqsave(&sport->port.lock, flags);
+
/*
* Finally, clear and enable interrupts
*/
writel(USR1_RTSD, sport->port.membase + USR1);
+ writel(USR2_ORE, sport->port.membase + USR2);

if (sport->dma_is_inited && !sport->dma_is_enabled)
imx_enable_dma(sport);
@@ -1199,10 +1201,6 @@ static int imx_startup(struct uart_port *port)

writel(temp, sport->port.membase + UCR1);

- /* Clear any pending ORE flag before enabling interrupt */
- temp = readl(sport->port.membase + USR2);
- writel(temp | USR2_ORE, sport->port.membase + USR2);
-
temp = readl(sport->port.membase + UCR4);
temp |= UCR4_OREN;
writel(temp, sport->port.membase + UCR4);
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index a051a7a2b1bd..a81f9dd7ee97 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -245,7 +245,7 @@ static void wdm_int_callback(struct urb *urb)
case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
dev_dbg(&desc->intf->dev,
"NOTIFY_RESPONSE_AVAILABLE received: index %d len %d",
- dr->wIndex, dr->wLength);
+ le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength));
break;

case USB_CDC_NOTIFY_NETWORK_CONNECTION:
@@ -262,7 +262,9 @@ static void wdm_int_callback(struct urb *urb)
clear_bit(WDM_POLL_RUNNING, &desc->flags);
dev_err(&desc->intf->dev,
"unknown notification %d received: index %d len %d\n",
- dr->bNotificationType, dr->wIndex, dr->wLength);
+ dr->bNotificationType,
+ le16_to_cpu(dr->wIndex),
+ le16_to_cpu(dr->wLength));
goto exit;
}

@@ -408,7 +410,7 @@ static ssize_t wdm_write
USB_RECIP_INTERFACE);
req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
req->wValue = 0;
- req->wIndex = desc->inum;
+ req->wIndex = desc->inum; /* already converted */
req->wLength = cpu_to_le16(count);
set_bit(WDM_IN_USE, &desc->flags);
desc->outbuf = buf;
@@ -422,7 +424,7 @@ static ssize_t wdm_write
rv = usb_translate_errors(rv);
} else {
dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
- req->wIndex);
+ le16_to_cpu(req->wIndex));
}
out:
usb_autopm_put_interface(desc->intf);
@@ -820,7 +822,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
desc->irq->wValue = 0;
- desc->irq->wIndex = desc->inum;
+ desc->irq->wIndex = desc->inum; /* already converted */
desc->irq->wLength = cpu_to_le16(desc->wMaxCommand);

usb_fill_control_urb(
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d7c3d5a35946..3b7151687776 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3406,10 +3406,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
if (status) {
dev_dbg(&port_dev->dev, "can't resume, status %d\n", status);
} else {
- /* drive resume for at least 20 msec */
+ /* drive resume for USB_RESUME_TIMEOUT msec */
dev_dbg(&udev->dev, "usb %sresume\n",
(PMSG_IS_AUTO(msg) ? "auto-" : ""));
- msleep(25);
+ msleep(USB_RESUME_TIMEOUT);

/* Virtual root hubs can trigger on GET_PORT_STATUS to
* stop resume signaling. Then finish the resume
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index c78c8740db1d..758b7e0380f6 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -1521,7 +1521,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
dev_dbg(hsotg->dev,
"ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
writel(0, hsotg->regs + PCGCTL);
- usleep_range(20000, 40000);
+ msleep(USB_RESUME_TIMEOUT);

hprt0 = dwc2_read_hprt0(hsotg);
hprt0 |= HPRT0_RES;
diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c
index 90545980542f..6385c198c134 100644
--- a/drivers/usb/gadget/legacy/printer.c
+++ b/drivers/usb/gadget/legacy/printer.c
@@ -1031,6 +1031,15 @@ unknown:
break;
}
/* host either stalls (value < 0) or reports success */
+ if (value >= 0) {
+ req->length = value;
+ req->zero = value < wLength;
+ value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+ if (value < 0) {
+ ERROR(dev, "%s:%d Error!\n", __func__, __LINE__);
+ req->status = 0;
+ }
+ }
return value;
}

diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 85e56d1abd23..f4d88dfb26a7 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -792,12 +792,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
ehci->reset_done[i] == 0))
continue;

- /* start 20 msec resume signaling from this port,
- * and make hub_wq collect PORT_STAT_C_SUSPEND to
- * stop that signaling. Use 5 ms extra for safety,
- * like usb_port_resume() does.
+ /* start USB_RESUME_TIMEOUT msec resume signaling from
+ * this port, and make hub_wq collect
+ * PORT_STAT_C_SUSPEND to stop that signaling.
*/
- ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
+ ehci->reset_done[i] = jiffies +
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(i, &ehci->resuming_ports);
ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
usb_hcd_start_port_resume(&hcd->self, i);
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 87cf86f38b36..7354d0129a72 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -471,10 +471,13 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
}

- /* msleep for 20ms only if code is trying to resume port */
+ /*
+ * msleep for USB_RESUME_TIMEOUT ms only if code is trying to resume
+ * port
+ */
if (resume_needed) {
spin_unlock_irq(&ehci->lock);
- msleep(20);
+ msleep(USB_RESUME_TIMEOUT);
spin_lock_irq(&ehci->lock);
if (ehci->shutdown)
goto shutdown;
@@ -942,7 +945,7 @@ int ehci_hub_control(
temp &= ~PORT_WAKE_BITS;
ehci_writel(ehci, temp | PORT_RESUME, status_reg);
ehci->reset_done[wIndex] = jiffies
- + msecs_to_jiffies(20);
+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(wIndex, &ehci->resuming_ports);
usb_hcd_start_port_resume(&hcd->self, wIndex);
break;
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 475b21fd373b..7a6681fb7675 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1595,7 +1595,7 @@ static int fotg210_hub_control(
/* resume signaling for 20 msec */
fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
fotg210->reset_done[wIndex] = jiffies
- + msecs_to_jiffies(20);
+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
break;
case USB_PORT_FEAT_C_SUSPEND:
clear_bit(wIndex, &fotg210->port_c_suspend);
diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
index a83eefefffda..ba77e2e43f62 100644
--- a/drivers/usb/host/fusbh200-hcd.c
+++ b/drivers/usb/host/fusbh200-hcd.c
@@ -1550,10 +1550,9 @@ static int fusbh200_hub_control (
if ((temp & PORT_PE) == 0)
goto error;

- /* resume signaling for 20 msec */
fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg);
fusbh200->reset_done[wIndex] = jiffies
- + msecs_to_jiffies(20);
+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
break;
case USB_PORT_FEAT_C_SUSPEND:
clear_bit(wIndex, &fusbh200->port_c_suspend);
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index 113d0cc6cc43..9ef56443446d 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1490,7 +1490,7 @@ static int isp116x_bus_resume(struct usb_hcd *hcd)
spin_unlock_irq(&isp116x->lock);

hcd->state = HC_STATE_RESUMING;
- msleep(20);
+ msleep(USB_RESUME_TIMEOUT);

/* Go operational */
spin_lock_irq(&isp116x->lock);
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index ef7efb278b15..28a2866b6b16 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -2500,11 +2500,12 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
|| oxu->reset_done[i] != 0)
continue;

- /* start 20 msec resume signaling from this port,
- * and make hub_wq collect PORT_STAT_C_SUSPEND to
+ /* start USB_RESUME_TIMEOUT resume signaling from this
+ * port, and make hub_wq collect PORT_STAT_C_SUSPEND to
* stop that signaling.
*/
- oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
+ oxu->reset_done[i] = jiffies +
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
}
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index bdc82fea0a1f..54a417043e44 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -2301,7 +2301,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
rh->port &= ~USB_PORT_STAT_SUSPEND;
rh->port |= USB_PORT_STAT_C_SUSPEND << 16;
r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
- msleep(50);
+ msleep(USB_RESUME_TIMEOUT);
r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
}

diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 4f4ba1ea9e9b..9118cd8de1a7 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -1259,7 +1259,7 @@ sl811h_hub_control(
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);

mod_timer(&sl811->timer, jiffies
- + msecs_to_jiffies(20));
+ + msecs_to_jiffies(USB_RESUME_TIMEOUT));
break;
case USB_PORT_FEAT_POWER:
port_power(sl811, 0);
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index 19ba5eafb31e..7b3d1afcc14a 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -166,7 +166,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
/* Port received a wakeup request */
set_bit(port, &uhci->resuming_ports);
uhci->ports_timeout = jiffies +
- msecs_to_jiffies(25);
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
usb_hcd_start_port_resume(
&uhci_to_hcd(uhci)->self, port);

@@ -338,7 +338,8 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
uhci_finish_suspend(uhci, port, port_addr);

/* USB v2.0 7.1.7.5 */
- uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
+ uhci->ports_timeout = jiffies +
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
break;
case USB_PORT_FEAT_POWER:
/* UHCI has no power switching */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 73485fa4372f..eeedde8c435a 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1574,7 +1574,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
} else {
xhci_dbg(xhci, "resume HS port %d\n", port_id);
bus_state->resume_done[faked_port_index] = jiffies +
- msecs_to_jiffies(20);
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
set_bit(faked_port_index, &bus_state->resuming_ports);
mod_timer(&hcd->rh_timer,
bus_state->resume_done[faked_port_index]);
diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c
index 3cb98b1d5d29..7911b6b6fe40 100644
--- a/drivers/usb/isp1760/isp1760-hcd.c
+++ b/drivers/usb/isp1760/isp1760-hcd.c
@@ -1869,7 +1869,7 @@ static int isp1760_hub_control(struct usb_hcd *hcd, u16 typeReq,
reg_write32(hcd->regs, HC_PORTSC1,
temp | PORT_RESUME);
priv->reset_done = jiffies +
- msecs_to_jiffies(20);
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
}
break;
case USB_PORT_FEAT_C_SUSPEND:
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 067920f2d570..ec0ee3b486f5 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -99,6 +99,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
+#include <linux/usb.h>

#include "musb_core.h"

@@ -562,7 +563,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
(USB_PORT_STAT_C_SUSPEND << 16)
| MUSB_PORT_STAT_RESUME;
musb->rh_timer = jiffies
- + msecs_to_jiffies(20);
+ + msecs_to_jiffies(USB_RESUME_TIMEOUT);
musb->need_finish_resume = 1;

musb->xceiv->otg->state = OTG_STATE_A_HOST;
@@ -1597,16 +1598,30 @@ irqreturn_t musb_interrupt(struct musb *musb)
is_host_active(musb) ? "host" : "peripheral",
musb->int_usb, musb->int_tx, musb->int_rx);

- /* the core can interrupt us for multiple reasons; docs have
- * a generic interrupt flowchart to follow
+ /**
+ * According to Mentor Graphics' documentation, flowchart on page 98,
+ * IRQ should be handled as follows:
+ *
+ * . Resume IRQ
+ * . Session Request IRQ
+ * . VBUS Error IRQ
+ * . Suspend IRQ
+ * . Connect IRQ
+ * . Disconnect IRQ
+ * . Reset/Babble IRQ
+ * . SOF IRQ (we're not using this one)
+ * . Endpoint 0 IRQ
+ * . TX Endpoints
+ * . RX Endpoints
+ *
+ * We will be following that flowchart in order to avoid any problems
+ * that might arise with internal Finite State Machine.
*/
+
if (musb->int_usb)
retval |= musb_stage0_irq(musb, musb->int_usb,
devctl);

- /* "stage 1" is handling endpoint irqs */
-
- /* handle endpoint 0 first */
if (musb->int_tx & 1) {
if (is_host_active(musb))
retval |= musb_h_ep0_irq(musb);
@@ -1614,37 +1629,31 @@ irqreturn_t musb_interrupt(struct musb *musb)
retval |= musb_g_ep0_irq(musb);
}

- /* RX on endpoints 1-15 */
- reg = musb->int_rx >> 1;
+ reg = musb->int_tx >> 1;
ep_num = 1;
while (reg) {
if (reg & 1) {
- /* musb_ep_select(musb->mregs, ep_num); */
- /* REVISIT just retval = ep->rx_irq(...) */
retval = IRQ_HANDLED;
if (is_host_active(musb))
- musb_host_rx(musb, ep_num);
+ musb_host_tx(musb, ep_num);
else
- musb_g_rx(musb, ep_num);
+ musb_g_tx(musb, ep_num);
}
-
reg >>= 1;
ep_num++;
}

- /* TX on endpoints 1-15 */
- reg = musb->int_tx >> 1;
+ reg = musb->int_rx >> 1;
ep_num = 1;
while (reg) {
if (reg & 1) {
- /* musb_ep_select(musb->mregs, ep_num); */
- /* REVISIT just retval |= ep->tx_irq(...) */
retval = IRQ_HANDLED;
if (is_host_active(musb))
- musb_host_tx(musb, ep_num);
+ musb_host_rx(musb, ep_num);
else
- musb_g_tx(musb, ep_num);
+ musb_g_rx(musb, ep_num);
}
+
reg >>= 1;
ep_num++;
}
@@ -2463,7 +2472,7 @@ static int musb_resume(struct device *dev)
if (musb->need_finish_resume) {
musb->need_finish_resume = 0;
schedule_delayed_work(&musb->finish_resume_work,
- msecs_to_jiffies(20));
+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
}

/*
@@ -2506,7 +2515,7 @@ static int musb_runtime_resume(struct device *dev)
if (musb->need_finish_resume) {
musb->need_finish_resume = 0;
schedule_delayed_work(&musb->finish_resume_work,
- msecs_to_jiffies(20));
+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
}

return 0;
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index 294e159f4afe..5428ed11440d 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -136,7 +136,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
/* later, GetPortStatus will stop RESUME signaling */
musb->port1_status |= MUSB_PORT_STAT_RESUME;
schedule_delayed_work(&musb->finish_resume_work,
- msecs_to_jiffies(20));
+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
}
}

diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 2f9735b35338..d1cd6b50f520 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -81,7 +81,9 @@ static void devm_usb_phy_release(struct device *dev, void *res)

static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
{
- return res == match_data;
+ struct usb_phy **phy = res;
+
+ return *phy == match_data;
}

/**
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 995986b8e36b..d925f55e4857 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -862,6 +862,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
int elf_prot = 0, elf_flags;
unsigned long k, vaddr;
+ unsigned long total_size = 0;

if (elf_ppnt->p_type != PT_LOAD)
continue;
@@ -924,10 +925,16 @@ static int load_elf_binary(struct linux_binprm *bprm)
#else
load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
#endif
+ total_size = total_mapping_size(elf_phdata,
+ loc->elf_ex.e_phnum);
+ if (!total_size) {
+ error = -EINVAL;
+ goto out_free_dentry;
+ }
}

error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
- elf_prot, elf_flags, 0);
+ elf_prot, elf_flags, total_size);
if (BAD_ADDR(error)) {
retval = IS_ERR((void *)error) ?
PTR_ERR((void*)error) : -EINVAL;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 8b353ad02f03..0a795c969c78 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -6956,12 +6956,11 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
return -ENOSPC;
}

- if (btrfs_test_opt(root, DISCARD))
- ret = btrfs_discard_extent(root, start, len, NULL);
-
if (pin)
pin_down_extent(root, cache, start, len, 1);
else {
+ if (btrfs_test_opt(root, DISCARD))
+ ret = btrfs_discard_extent(root, start, len, NULL);
btrfs_add_free_space(cache, start, len);
btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 74609b931ba5..f23d4be3280e 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2897,6 +2897,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 len,
if (src == dst)
return -EINVAL;

+ if (len == 0)
+ return 0;
+
btrfs_double_lock(src, loff, dst, dst_loff, len);

ret = extent_same_check_offsets(src, loff, len);
@@ -3626,6 +3629,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
if (off + len == src->i_size)
len = ALIGN(src->i_size, bs) - off;

+ if (len == 0) {
+ ret = 0;
+ goto out_unlock;
+ }
+
/* verify the end result is block aligned */
if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
!IS_ALIGNED(destoff, bs))
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 883b93623bc5..45ea704be030 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -364,22 +364,42 @@ const struct xattr_handler *btrfs_xattr_handlers[] = {
/*
* Check if the attribute is in a supported namespace.
*
- * This applied after the check for the synthetic attributes in the system
+ * This is applied after the check for the synthetic attributes in the system
* namespace.
*/
-static bool btrfs_is_valid_xattr(const char *name)
+static int btrfs_is_valid_xattr(const char *name)
{
- return !strncmp(name, XATTR_SECURITY_PREFIX,
- XATTR_SECURITY_PREFIX_LEN) ||
- !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
- !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
- !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) ||
- !strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN);
+ int len = strlen(name);
+ int prefixlen = 0;
+
+ if (!strncmp(name, XATTR_SECURITY_PREFIX,
+ XATTR_SECURITY_PREFIX_LEN))
+ prefixlen = XATTR_SECURITY_PREFIX_LEN;
+ else if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
+ prefixlen = XATTR_SYSTEM_PREFIX_LEN;
+ else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
+ prefixlen = XATTR_TRUSTED_PREFIX_LEN;
+ else if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+ prefixlen = XATTR_USER_PREFIX_LEN;
+ else if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
+ prefixlen = XATTR_BTRFS_PREFIX_LEN;
+ else
+ return -EOPNOTSUPP;
+
+ /*
+ * The name cannot consist of just prefix
+ */
+ if (len <= prefixlen)
+ return -EINVAL;
+
+ return 0;
}

ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size)
{
+ int ret;
+
/*
* If this is a request for a synthetic attribute in the system.*
* namespace use the generic infrastructure to resolve a handler
@@ -388,8 +408,9 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_getxattr(dentry, name, buffer, size);

- if (!btrfs_is_valid_xattr(name))
- return -EOPNOTSUPP;
+ ret = btrfs_is_valid_xattr(name);
+ if (ret)
+ return ret;
return __btrfs_getxattr(dentry->d_inode, name, buffer, size);
}

@@ -397,6 +418,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags)
{
struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
+ int ret;

/*
* The permission on security.* and system.* is not checked
@@ -413,8 +435,9 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_setxattr(dentry, name, value, size, flags);

- if (!btrfs_is_valid_xattr(name))
- return -EOPNOTSUPP;
+ ret = btrfs_is_valid_xattr(name);
+ if (ret)
+ return ret;

if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
return btrfs_set_prop(dentry->d_inode, name,
@@ -430,6 +453,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
int btrfs_removexattr(struct dentry *dentry, const char *name)
{
struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
+ int ret;

/*
* The permission on security.* and system.* is not checked
@@ -446,8 +470,9 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_removexattr(dentry, name);

- if (!btrfs_is_valid_xattr(name))
- return -EOPNOTSUPP;
+ ret = btrfs_is_valid_xattr(name);
+ if (ret)
+ return ret;

if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
return btrfs_set_prop(dentry->d_inode, name,
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 28fe71a2904c..aae7011d99e8 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1865,7 +1865,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
struct inode *inode)
{
struct inode *dir = dentry->d_parent->d_inode;
- struct buffer_head *bh;
+ struct buffer_head *bh = NULL;
struct ext4_dir_entry_2 *de;
struct ext4_dir_entry_tail *t;
struct super_block *sb;
@@ -1889,14 +1889,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
return retval;
if (retval == 1) {
retval = 0;
- return retval;
+ goto out;
}
}

if (is_dx(dir)) {
retval = ext4_dx_add_entry(handle, dentry, inode);
if (!retval || (retval != ERR_BAD_DX_DIR))
- return retval;
+ goto out;
ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
dx_fallback++;
ext4_mark_inode_dirty(handle, dir);
@@ -1908,14 +1908,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
return PTR_ERR(bh);

retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
- if (retval != -ENOSPC) {
- brelse(bh);
- return retval;
- }
+ if (retval != -ENOSPC)
+ goto out;

if (blocks == 1 && !dx_fallback &&
- EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
- return make_indexed_dir(handle, dentry, inode, bh);
+ EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
+ retval = make_indexed_dir(handle, dentry, inode, bh);
+ bh = NULL; /* make_indexed_dir releases bh */
+ goto out;
+ }
brelse(bh);
}
bh = ext4_append(handle, dir, &block);
@@ -1931,6 +1932,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
}

retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
+out:
brelse(bh);
if (retval == 0)
ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index 665ef5a05183..a563ddbc19e6 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -31,7 +31,7 @@
static struct hlist_head nlm_files[FILE_NRHASH];
static DEFINE_MUTEX(nlm_file_mutex);

-#ifdef NFSD_DEBUG
+#ifdef CONFIG_SUNRPC_DEBUG
static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f)
{
u32 *fhp = (u32*)f->data;
diff --git a/fs/namei.c b/fs/namei.c
index c83145af4bfc..caa38a24e1f7 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1591,7 +1591,8 @@ static inline int walk_component(struct nameidata *nd, struct path *path,

if (should_follow_link(path->dentry, follow)) {
if (nd->flags & LOOKUP_RCU) {
- if (unlikely(unlazy_walk(nd, path->dentry))) {
+ if (unlikely(nd->path.mnt != path->mnt ||
+ unlazy_walk(nd, path->dentry))) {
err = -ECHILD;
goto out_err;
}
@@ -3047,7 +3048,8 @@ finish_lookup:

if (should_follow_link(path->dentry, !symlink_ok)) {
if (nd->flags & LOOKUP_RCU) {
- if (unlikely(unlazy_walk(nd, path->dentry))) {
+ if (unlikely(nd->path.mnt != path->mnt ||
+ unlazy_walk(nd, path->dentry))) {
error = -ECHILD;
goto out;
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 82ef1405260e..4622ee32a5e2 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -632,14 +632,17 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
*/
struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
{
- struct mount *p, *res;
- res = p = __lookup_mnt(mnt, dentry);
+ struct mount *p, *res = NULL;
+ p = __lookup_mnt(mnt, dentry);
if (!p)
goto out;
+ if (!(p->mnt.mnt_flags & MNT_UMOUNT))
+ res = p;
hlist_for_each_entry_continue(p, mnt_hash) {
if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
break;
- res = p;
+ if (!(p->mnt.mnt_flags & MNT_UMOUNT))
+ res = p;
}
out:
return res;
@@ -795,10 +798,8 @@ static void __touch_mnt_namespace(struct mnt_namespace *ns)
/*
* vfsmount lock must be held for write
*/
-static void detach_mnt(struct mount *mnt, struct path *old_path)
+static void unhash_mnt(struct mount *mnt)
{
- old_path->dentry = mnt->mnt_mountpoint;
- old_path->mnt = &mnt->mnt_parent->mnt;
mnt->mnt_parent = mnt;
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
list_del_init(&mnt->mnt_child);
@@ -811,6 +812,26 @@ static void detach_mnt(struct mount *mnt, struct path *old_path)
/*
* vfsmount lock must be held for write
*/
+static void detach_mnt(struct mount *mnt, struct path *old_path)
+{
+ old_path->dentry = mnt->mnt_mountpoint;
+ old_path->mnt = &mnt->mnt_parent->mnt;
+ unhash_mnt(mnt);
+}
+
+/*
+ * vfsmount lock must be held for write
+ */
+static void umount_mnt(struct mount *mnt)
+{
+ /* old mountpoint will be dropped when we can do that */
+ mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint;
+ unhash_mnt(mnt);
+}
+
+/*
+ * vfsmount lock must be held for write
+ */
void mnt_set_mountpoint(struct mount *mnt,
struct mountpoint *mp,
struct mount *child_mnt)
@@ -1078,6 +1099,13 @@ static void mntput_no_expire(struct mount *mnt)
rcu_read_unlock();

list_del(&mnt->mnt_instance);
+
+ if (unlikely(!list_empty(&mnt->mnt_mounts))) {
+ struct mount *p, *tmp;
+ list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
+ umount_mnt(p);
+ }
+ }
unlock_mount_hash();

if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) {
@@ -1319,49 +1347,63 @@ static inline void namespace_lock(void)
down_write(&namespace_sem);
}

+enum umount_tree_flags {
+ UMOUNT_SYNC = 1,
+ UMOUNT_PROPAGATE = 2,
+ UMOUNT_CONNECTED = 4,
+};
/*
* mount_lock must be held
* namespace_sem must be held for write
- * how = 0 => just this tree, don't propagate
- * how = 1 => propagate; we know that nobody else has reference to any victims
- * how = 2 => lazy umount
*/
-void umount_tree(struct mount *mnt, int how)
+static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
{
- HLIST_HEAD(tmp_list);
+ LIST_HEAD(tmp_list);
struct mount *p;

+ if (how & UMOUNT_PROPAGATE)
+ propagate_mount_unlock(mnt);
+
+ /* Gather the mounts to umount */
for (p = mnt; p; p = next_mnt(p, mnt)) {
- hlist_del_init_rcu(&p->mnt_hash);
- hlist_add_head(&p->mnt_hash, &tmp_list);
+ p->mnt.mnt_flags |= MNT_UMOUNT;
+ list_move(&p->mnt_list, &tmp_list);
}

- hlist_for_each_entry(p, &tmp_list, mnt_hash)
+ /* Hide the mounts from mnt_mounts */
+ list_for_each_entry(p, &tmp_list, mnt_list) {
list_del_init(&p->mnt_child);
+ }

- if (how)
+ /* Add propogated mounts to the tmp_list */
+ if (how & UMOUNT_PROPAGATE)
propagate_umount(&tmp_list);

- while (!hlist_empty(&tmp_list)) {
- p = hlist_entry(tmp_list.first, struct mount, mnt_hash);
- hlist_del_init_rcu(&p->mnt_hash);
+ while (!list_empty(&tmp_list)) {
+ bool disconnect;
+ p = list_first_entry(&tmp_list, struct mount, mnt_list);
list_del_init(&p->mnt_expire);
list_del_init(&p->mnt_list);
__touch_mnt_namespace(p->mnt_ns);
p->mnt_ns = NULL;
- if (how < 2)
+ if (how & UMOUNT_SYNC)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;

- pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, &unmounted);
+ disconnect = !(((how & UMOUNT_CONNECTED) &&
+ mnt_has_parent(p) &&
+ (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
+ IS_MNT_LOCKED_AND_LAZY(p));
+
+ pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
+ disconnect ? &unmounted : NULL);
if (mnt_has_parent(p)) {
- hlist_del_init(&p->mnt_mp_list);
- put_mountpoint(p->mnt_mp);
mnt_add_count(p->mnt_parent, -1);
- /* old mountpoint will be dropped when we can do that */
- p->mnt_ex_mountpoint = p->mnt_mountpoint;
- p->mnt_mountpoint = p->mnt.mnt_root;
- p->mnt_parent = p;
- p->mnt_mp = NULL;
+ if (!disconnect) {
+ /* Don't forget about p */
+ list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts);
+ } else {
+ umount_mnt(p);
+ }
}
change_mnt_propagation(p, MS_PRIVATE);
}
@@ -1447,14 +1489,14 @@ static int do_umount(struct mount *mnt, int flags)

if (flags & MNT_DETACH) {
if (!list_empty(&mnt->mnt_list))
- umount_tree(mnt, 2);
+ umount_tree(mnt, UMOUNT_PROPAGATE);
retval = 0;
} else {
shrink_submounts(mnt);
retval = -EBUSY;
if (!propagate_mount_busy(mnt, 2)) {
if (!list_empty(&mnt->mnt_list))
- umount_tree(mnt, 1);
+ umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
retval = 0;
}
}
@@ -1480,13 +1522,20 @@ void __detach_mounts(struct dentry *dentry)

namespace_lock();
mp = lookup_mountpoint(dentry);
- if (!mp)
+ if (IS_ERR_OR_NULL(mp))
goto out_unlock;

lock_mount_hash();
while (!hlist_empty(&mp->m_list)) {
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
- umount_tree(mnt, 2);
+ if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
+ struct mount *p, *tmp;
+ list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) {
+ hlist_add_head(&p->mnt_umount.s_list, &unmounted);
+ umount_mnt(p);
+ }
+ }
+ else umount_tree(mnt, UMOUNT_CONNECTED);
}
unlock_mount_hash();
put_mountpoint(mp);
@@ -1648,7 +1697,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
out:
if (res) {
lock_mount_hash();
- umount_tree(res, 0);
+ umount_tree(res, UMOUNT_SYNC);
unlock_mount_hash();
}
return q;
@@ -1672,7 +1721,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
{
namespace_lock();
lock_mount_hash();
- umount_tree(real_mount(mnt), 0);
+ umount_tree(real_mount(mnt), UMOUNT_SYNC);
unlock_mount_hash();
namespace_unlock();
}
@@ -1855,7 +1904,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
out_cleanup_ids:
while (!hlist_empty(&tree_list)) {
child = hlist_entry(tree_list.first, struct mount, mnt_hash);
- umount_tree(child, 0);
+ umount_tree(child, UMOUNT_SYNC);
}
unlock_mount_hash();
cleanup_group_ids(source_mnt, NULL);
@@ -2035,7 +2084,7 @@ static int do_loopback(struct path *path, const char *old_name,
err = graft_tree(mnt, parent, mp);
if (err) {
lock_mount_hash();
- umount_tree(mnt, 0);
+ umount_tree(mnt, UMOUNT_SYNC);
unlock_mount_hash();
}
out2:
@@ -2406,7 +2455,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
while (!list_empty(&graveyard)) {
mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
touch_mnt_namespace(mnt->mnt_ns);
- umount_tree(mnt, 1);
+ umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
}
unlock_mount_hash();
namespace_unlock();
@@ -2477,7 +2526,7 @@ static void shrink_submounts(struct mount *mnt)
m = list_first_entry(&graveyard, struct mount,
mnt_expire);
touch_mnt_namespace(m->mnt_ns);
- umount_tree(m, 1);
+ umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
}
}
}
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 351be9205bf8..8d129bb7355a 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -128,7 +128,7 @@ nfs41_callback_svc(void *vrqstp)
if (try_to_freeze())
continue;

- prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE);
spin_lock_bh(&serv->sv_cb_lock);
if (!list_empty(&serv->sv_cb_list)) {
req = list_first_entry(&serv->sv_cb_list,
@@ -142,10 +142,10 @@ nfs41_callback_svc(void *vrqstp)
error);
} else {
spin_unlock_bh(&serv->sv_cb_lock);
- /* schedule_timeout to game the hung task watchdog */
- schedule_timeout(60 * HZ);
+ schedule();
finish_wait(&serv->sv_cb_waitq, &wq);
}
+ flush_signals(current);
}
return 0;
}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index e907c8cf732e..ab21ef16a11a 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -129,22 +129,25 @@ nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
int i;
ssize_t count;

- WARN_ON_ONCE(hdr->pgio_mirror_idx >= dreq->mirror_count);
-
- count = dreq->mirrors[hdr->pgio_mirror_idx].count;
- if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
- count = hdr->io_start + hdr->good_bytes - dreq->io_start;
- dreq->mirrors[hdr->pgio_mirror_idx].count = count;
- }
-
- /* update the dreq->count by finding the minimum agreed count from all
- * mirrors */
- count = dreq->mirrors[0].count;
+ if (dreq->mirror_count == 1) {
+ dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
+ dreq->count += hdr->good_bytes;
+ } else {
+ /* mirrored writes */
+ count = dreq->mirrors[hdr->pgio_mirror_idx].count;
+ if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
+ count = hdr->io_start + hdr->good_bytes - dreq->io_start;
+ dreq->mirrors[hdr->pgio_mirror_idx].count = count;
+ }
+ /* update the dreq->count by finding the minimum agreed count from all
+ * mirrors */
+ count = dreq->mirrors[0].count;

- for (i = 1; i < dreq->mirror_count; i++)
- count = min(count, dreq->mirrors[i].count);
+ for (i = 1; i < dreq->mirror_count; i++)
+ count = min(count, dreq->mirrors[i].count);

- dreq->count = count;
+ dreq->count = count;
+ }
}

/*
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 5c399ec41079..d494ea2d66a9 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -7365,6 +7365,11 @@ nfs4_stat_to_errno(int stat)
.p_name = #proc, \
}

+#define STUB(proc) \
+[NFSPROC4_CLNT_##proc] = { \
+ .p_name = #proc, \
+}
+
struct rpc_procinfo nfs4_procedures[] = {
PROC(READ, enc_read, dec_read),
PROC(WRITE, enc_write, dec_write),
@@ -7417,6 +7422,7 @@ struct rpc_procinfo nfs4_procedures[] = {
PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
+ STUB(GETDEVICELIST),
PROC(BIND_CONN_TO_SESSION,
enc_bind_conn_to_session, dec_bind_conn_to_session),
PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid),
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 568ecf0a880f..848d8b1db4ce 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -284,7 +284,7 @@ int nfs_readpage(struct file *file, struct page *page)
dprintk("NFS: nfs_readpage (%p %ld@%lu)\n",
page, PAGE_CACHE_SIZE, page_file_index(page));
nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
- nfs_inc_stats(inode, NFSIOS_READPAGES);
+ nfs_add_stats(inode, NFSIOS_READPAGES, 1);

/*
* Try to flush any pending writes to the file..
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 849ed784d6ac..41b3f1096d69 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -580,7 +580,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, st
int ret;

nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
- nfs_inc_stats(inode, NFSIOS_WRITEPAGES);
+ nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);

nfs_pageio_cond_complete(pgio, page_file_index(page));
ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 92b9d97aff4f..5416968b3fb3 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1030,6 +1030,8 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
dprintk("NFSD: nfsd4_fallocate: couldn't process stateid!\n");
return status;
}
+ if (!file)
+ return nfserr_bad_stateid;

status = nfsd4_vfs_fallocate(rqstp, &cstate->current_fh, file,
fallocate->falloc_offset,
@@ -1069,6 +1071,8 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n");
return status;
}
+ if (!file)
+ return nfserr_bad_stateid;

switch (seek->seek_whence) {
case NFS4_CONTENT_DATA:
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 8ba1d888f1e6..ee1cccdb083a 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1139,7 +1139,7 @@ hash_sessionid(struct nfs4_sessionid *sessionid)
return sid->sequence % SESSION_HASH_SIZE;
}

-#ifdef NFSD_DEBUG
+#ifdef CONFIG_SUNRPC_DEBUG
static inline void
dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
{
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 5fb7e78169a6..5b33ce1db616 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -3422,6 +3422,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
unsigned long maxcount;
struct xdr_stream *xdr = &resp->xdr;
struct file *file = read->rd_filp;
+ struct svc_fh *fhp = read->rd_fhp;
int starting_len = xdr->buf->len;
struct raparms *ra;
__be32 *p;
@@ -3445,12 +3446,15 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
maxcount = min_t(unsigned long, maxcount, (xdr->buf->buflen - xdr->buf->len));
maxcount = min_t(unsigned long, maxcount, read->rd_length);

- if (!read->rd_filp) {
+ if (read->rd_filp)
+ err = nfsd_permission(resp->rqstp, fhp->fh_export,
+ fhp->fh_dentry,
+ NFSD_MAY_READ|NFSD_MAY_OWNER_OVERRIDE);
+ else
err = nfsd_get_tmp_read_open(resp->rqstp, read->rd_fhp,
&file, &ra);
- if (err)
- goto err_truncate;
- }
+ if (err)
+ goto err_truncate;

if (file->f_op->splice_read && test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
err = nfsd4_encode_splice_read(resp, read, file, maxcount);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index aa47d75ddb26..9690cb4dd588 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1250,15 +1250,15 @@ static int __init init_nfsd(void)
int retval;
printk(KERN_INFO "Installing knfsd (copyright (C) 1996 okir@xxxxxxxxxxxx).\n");

- retval = register_cld_notifier();
- if (retval)
- return retval;
retval = register_pernet_subsys(&nfsd_net_ops);
if (retval < 0)
- goto out_unregister_notifier;
- retval = nfsd4_init_slabs();
+ return retval;
+ retval = register_cld_notifier();
if (retval)
goto out_unregister_pernet;
+ retval = nfsd4_init_slabs();
+ if (retval)
+ goto out_unregister_notifier;
retval = nfsd4_init_pnfs();
if (retval)
goto out_free_slabs;
@@ -1290,10 +1290,10 @@ out_exit_pnfs:
nfsd4_exit_pnfs();
out_free_slabs:
nfsd4_free_slabs();
-out_unregister_pernet:
- unregister_pernet_subsys(&nfsd_net_ops);
out_unregister_notifier:
unregister_cld_notifier();
+out_unregister_pernet:
+ unregister_pernet_subsys(&nfsd_net_ops);
return retval;
}

@@ -1308,8 +1308,8 @@ static void __exit exit_nfsd(void)
nfsd4_exit_pnfs();
nfsd_fault_inject_cleanup();
unregister_filesystem(&nfsd_fs_type);
- unregister_pernet_subsys(&nfsd_net_ops);
unregister_cld_notifier();
+ unregister_pernet_subsys(&nfsd_net_ops);
}

MODULE_AUTHOR("Olaf Kirch <okir@xxxxxxxxxxxx>");
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 565c4da1a9eb..cf980523898b 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -24,7 +24,7 @@
#include "export.h"

#undef ifdebug
-#ifdef NFSD_DEBUG
+#ifdef CONFIG_SUNRPC_DEBUG
# define ifdebug(flag) if (nfsd_debug & NFSDDBG_##flag)
#else
# define ifdebug(flag) if (0)
diff --git a/fs/open.c b/fs/open.c
index 33f9cbf2610b..44a3be145bfe 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -570,6 +570,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
uid = make_kuid(current_user_ns(), user);
gid = make_kgid(current_user_ns(), group);

+retry_deleg:
newattrs.ia_valid = ATTR_CTIME;
if (user != (uid_t) -1) {
if (!uid_valid(uid))
@@ -586,7 +587,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
if (!S_ISDIR(inode->i_mode))
newattrs.ia_valid |=
ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
-retry_deleg:
mutex_lock(&inode->i_mutex);
error = security_path_chown(path, uid, gid);
if (!error)
diff --git a/fs/pnode.c b/fs/pnode.c
index 260ac8f898a4..6367e1e435c6 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -362,6 +362,46 @@ int propagate_mount_busy(struct mount *mnt, int refcnt)
}

/*
+ * Clear MNT_LOCKED when it can be shown to be safe.
+ *
+ * mount_lock lock must be held for write
+ */
+void propagate_mount_unlock(struct mount *mnt)
+{
+ struct mount *parent = mnt->mnt_parent;
+ struct mount *m, *child;
+
+ BUG_ON(parent == mnt);
+
+ for (m = propagation_next(parent, parent); m;
+ m = propagation_next(m, parent)) {
+ child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint);
+ if (child)
+ child->mnt.mnt_flags &= ~MNT_LOCKED;
+ }
+}
+
+/*
+ * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
+ */
+static void mark_umount_candidates(struct mount *mnt)
+{
+ struct mount *parent = mnt->mnt_parent;
+ struct mount *m;
+
+ BUG_ON(parent == mnt);
+
+ for (m = propagation_next(parent, parent); m;
+ m = propagation_next(m, parent)) {
+ struct mount *child = __lookup_mnt_last(&m->mnt,
+ mnt->mnt_mountpoint);
+ if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) {
+ SET_MNT_MARK(child);
+ }
+ }
+}
+
+/*
* NOTE: unmounting 'mnt' naturally propagates to all other mounts its
* parent propagates to.
*/
@@ -378,13 +418,16 @@ static void __propagate_umount(struct mount *mnt)
struct mount *child = __lookup_mnt_last(&m->mnt,
mnt->mnt_mountpoint);
/*
- * umount the child only if the child has no
- * other children
+ * umount the child only if the child has no children
+ * and the child is marked safe to unmount.
*/
- if (child && list_empty(&child->mnt_mounts)) {
+ if (!child || !IS_MNT_MARKED(child))
+ continue;
+ CLEAR_MNT_MARK(child);
+ if (list_empty(&child->mnt_mounts)) {
list_del_init(&child->mnt_child);
- hlist_del_init_rcu(&child->mnt_hash);
- hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
+ child->mnt.mnt_flags |= MNT_UMOUNT;
+ list_move_tail(&child->mnt_list, &mnt->mnt_list);
}
}
}
@@ -396,11 +439,14 @@ static void __propagate_umount(struct mount *mnt)
*
* vfsmount lock must be held for write
*/
-int propagate_umount(struct hlist_head *list)
+int propagate_umount(struct list_head *list)
{
struct mount *mnt;

- hlist_for_each_entry(mnt, list, mnt_hash)
+ list_for_each_entry_reverse(mnt, list, mnt_list)
+ mark_umount_candidates(mnt);
+
+ list_for_each_entry(mnt, list, mnt_list)
__propagate_umount(mnt);
return 0;
}
diff --git a/fs/pnode.h b/fs/pnode.h
index 4a246358b031..7114ce6e6b9e 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -19,6 +19,9 @@
#define IS_MNT_MARKED(m) ((m)->mnt.mnt_flags & MNT_MARKED)
#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
+#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
+#define IS_MNT_LOCKED_AND_LAZY(m) \
+ (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)

#define CL_EXPIRE 0x01
#define CL_SLAVE 0x02
@@ -40,14 +43,14 @@ static inline void set_mnt_shared(struct mount *mnt)
void change_mnt_propagation(struct mount *, int);
int propagate_mnt(struct mount *, struct mountpoint *, struct mount *,
struct hlist_head *);
-int propagate_umount(struct hlist_head *);
+int propagate_umount(struct list_head *);
int propagate_mount_busy(struct mount *, int);
+void propagate_mount_unlock(struct mount *);
void mnt_release_group_id(struct mount *);
int get_dominating_id(struct mount *mnt, const struct path *root);
unsigned int mnt_get_count(struct mount *mnt);
void mnt_set_mountpoint(struct mount *, struct mountpoint *,
struct mount *);
-void umount_tree(struct mount *, int);
struct mount *copy_tree(struct mount *, struct dentry *, int);
bool is_path_reachable(struct mount *, struct dentry *,
const struct path *root);
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index b034f1068dfe..0d5852557f1c 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -199,9 +199,29 @@ typedef int s32;
typedef s32 acpi_native_int;

typedef u32 acpi_size;
+
+#ifdef ACPI_32BIT_PHYSICAL_ADDRESS
+
+/*
+ * OSPMs can define this to shrink the size of the structures for 32-bit
+ * none PAE environment. ASL compiler may always define this to generate
+ * 32-bit OSPM compliant tables.
+ */
typedef u32 acpi_io_address;
typedef u32 acpi_physical_address;

+#else /* ACPI_32BIT_PHYSICAL_ADDRESS */
+
+/*
+ * It is reported that, after some calculations, the physical addresses can
+ * wrap over the 32-bit boundary on 32-bit PAE environment.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=87971
+ */
+typedef u64 acpi_io_address;
+typedef u64 acpi_physical_address;
+
+#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */
+
#define ACPI_MAX_PTR ACPI_UINT32_MAX
#define ACPI_SIZE_MAX ACPI_UINT32_MAX

@@ -736,10 +756,6 @@ typedef u32 acpi_event_status;
#define ACPI_GPE_ENABLE 0
#define ACPI_GPE_DISABLE 1
#define ACPI_GPE_CONDITIONAL_ENABLE 2
-#define ACPI_GPE_SAVE_MASK 4
-
-#define ACPI_GPE_ENABLE_SAVE (ACPI_GPE_ENABLE | ACPI_GPE_SAVE_MASK)
-#define ACPI_GPE_DISABLE_SAVE (ACPI_GPE_DISABLE | ACPI_GPE_SAVE_MASK)

/*
* GPE info flags - Per GPE
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index ad74dc51d5b7..ecdf9405dd3a 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -76,6 +76,7 @@
#define ACPI_LARGE_NAMESPACE_NODE
#define ACPI_DATA_TABLE_DISASSEMBLY
#define ACPI_SINGLE_THREADED
+#define ACPI_32BIT_PHYSICAL_ADDRESS
#endif

/* acpi_exec configuration. Multithreaded with full AML debugger */
diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
index ae2eb17a1658..a2156090563f 100644
--- a/include/dt-bindings/clock/tegra124-car-common.h
+++ b/include/dt-bindings/clock/tegra124-car-common.h
@@ -297,7 +297,7 @@
#define TEGRA124_CLK_PLL_C4 270
#define TEGRA124_CLK_PLL_DP 271
#define TEGRA124_CLK_PLL_E_MUX 272
-#define TEGRA124_CLK_PLLD_DSI 273
+#define TEGRA124_CLK_PLL_D_DSI_OUT 273
/* 274 */
/* 275 */
/* 276 */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index bbfceb756452..33b52fb0e20f 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -48,7 +48,7 @@ struct bpf_map *bpf_map_get(struct fd f);

/* function argument constraints */
enum bpf_arg_type {
- ARG_ANYTHING = 0, /* any argument is ok */
+ ARG_DONTCARE = 0, /* unused argument in helper function */

/* the following constraints used to prototype
* bpf_map_lookup/update/delete_elem() functions
@@ -62,6 +62,8 @@ enum bpf_arg_type {
*/
ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
+
+ ARG_ANYTHING, /* any (initialized) argument is ok */
};

/* type of values returned from helper functions */
diff --git a/include/linux/mount.h b/include/linux/mount.h
index c2c561dc0114..564beeec5d83 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -61,6 +61,7 @@ struct mnt_namespace;
#define MNT_DOOMED 0x1000000
#define MNT_SYNC_UMOUNT 0x2000000
#define MNT_MARKED 0x4000000
+#define MNT_UMOUNT 0x8000000

struct vfsmount {
struct dentry *mnt_root; /* root of the mounted tree */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a419b65770d6..51348f77e431 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -176,6 +176,14 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks);
extern void update_cpu_load_nohz(void);

+/* Notifier for when a task gets migrated to a new CPU */
+struct task_migration_notifier {
+ struct task_struct *task;
+ int from_cpu;
+ int to_cpu;
+};
+extern void register_task_migration_notifier(struct notifier_block *n);
+
extern unsigned long get_parent_ip(unsigned long addr);

extern void dump_cpu_task(int cpu);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f54d6659713a..bdccc4b46f57 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -769,6 +769,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,

struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
int node);
+struct sk_buff *__build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
static inline struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
@@ -3013,6 +3014,18 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
*/
#define CHECKSUM_BREAK 76

+/* Unset checksum-complete
+ *
+ * Unset checksum complete can be done when packet is being modified
+ * (uncompressed for instance) and checksum-complete value is
+ * invalidated.
+ */
+static inline void skb_checksum_complete_unset(struct sk_buff *skb)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->ip_summed = CHECKSUM_NONE;
+}
+
/* Validate (init) checksum based on checksum complete.
*
* Return values:
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 7ee1b5c3b4cb..447fe29b55b4 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -205,6 +205,32 @@ void usb_put_intf(struct usb_interface *intf);
#define USB_MAXINTERFACES 32
#define USB_MAXIADS (USB_MAXINTERFACES/2)

+/*
+ * USB Resume Timer: Every Host controller driver should drive the resume
+ * signalling on the bus for the amount of time defined by this macro.
+ *
+ * That way we will have a 'stable' behavior among all HCDs supported by Linux.
+ *
+ * Note that the USB Specification states we should drive resume for *at least*
+ * 20 ms, but it doesn't give an upper bound. This creates two possible
+ * situations which we want to avoid:
+ *
+ * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes
+ * us to fail USB Electrical Tests, thus failing Certification
+ *
+ * (b) Some (many) devices actually need more than 20 ms of resume signalling,
+ * and while we can argue that's against the USB Specification, we don't have
+ * control over which devices a certification laboratory will be using for
+ * certification. If CertLab uses a device which was tested against Windows and
+ * that happens to have relaxed resume signalling rules, we might fall into
+ * situations where we fail interoperability and electrical tests.
+ *
+ * In order to avoid both conditions, we're using a 40 ms resume timeout, which
+ * should cope with both LPJ calibration errors and devices not following every
+ * detail of the USB Specification.
+ */
+#define USB_RESUME_TIMEOUT 40 /* ms */
+
/**
* struct usb_interface_cache - long-term representation of a device interface
* @num_altsetting: number of altsettings defined.
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index d3583d3ee193..dd0f3abde75d 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -602,6 +602,11 @@ struct iscsi_conn {
struct iscsi_session *sess;
/* Pointer to thread_set in use for this conn's threads */
struct iscsi_thread_set *thread_set;
+ int bitmap_id;
+ int rx_thread_active;
+ struct task_struct *rx_thread;
+ int tx_thread_active;
+ struct task_struct *tx_thread;
/* list_head for session connection list */
struct list_head conn_list;
} ____cacheline_aligned;
@@ -871,10 +876,12 @@ struct iscsit_global {
/* Unique identifier used for the authentication daemon */
u32 auth_id;
u32 inactive_ts;
+#define ISCSIT_BITMAP_BITS 262144
/* Thread Set bitmap count */
int ts_bitmap_count;
/* Thread Set bitmap pointer */
unsigned long *ts_bitmap;
+ spinlock_t ts_bitmap_lock;
/* Used for iSCSI discovery session authentication */
struct iscsi_node_acl discovery_acl;
struct iscsi_portal_group *discovery_tpg;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 672150b6aaf5..985ca4c907fe 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -524,7 +524,7 @@ struct se_cmd {
sense_reason_t (*execute_cmd)(struct se_cmd *);
sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
u32, enum dma_data_direction);
- sense_reason_t (*transport_complete_callback)(struct se_cmd *);
+ sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);

unsigned char *t_task_cdb;
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
diff --git a/include/uapi/linux/nfsd/debug.h b/include/uapi/linux/nfsd/debug.h
index 0bf130a1c58d..28ec6c9c421a 100644
--- a/include/uapi/linux/nfsd/debug.h
+++ b/include/uapi/linux/nfsd/debug.h
@@ -12,14 +12,6 @@
#include <linux/sunrpc/debug.h>

/*
- * Enable debugging for nfsd.
- * Requires RPC_DEBUG.
- */
-#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
-# define NFSD_DEBUG 1
-#endif
-
-/*
* knfsd debug flags
*/
#define NFSDDBG_SOCK 0x0001
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index a20e4a3a8b15..847a0a2b399c 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -436,6 +436,12 @@
#define BLENDCON_NEW_8BIT_ALPHA_VALUE (1 << 0)
#define BLENDCON_NEW_4BIT_ALPHA_VALUE (0 << 0)

+/* Display port clock control */
+#define DP_MIE_CLKCON 0x27c
+#define DP_MIE_CLK_DISABLE 0x0
+#define DP_MIE_CLK_DP_ENABLE 0x2
+#define DP_MIE_CLK_MIE_ENABLE 0x3
+
/* Notes on per-window bpp settings
*
* Value Win0 Win1 Win2 Win3 Win 4
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 36508e69e92a..5d8ea3d8a897 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -755,7 +755,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
enum bpf_reg_type expected_type;
int err = 0;

- if (arg_type == ARG_ANYTHING)
+ if (arg_type == ARG_DONTCARE)
return 0;

if (reg->type == NOT_INIT) {
@@ -763,6 +763,9 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
return -EACCES;
}

+ if (arg_type == ARG_ANYTHING)
+ return 0;
+
if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY ||
arg_type == ARG_PTR_TO_MAP_VALUE) {
expected_type = PTR_TO_STACK;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 227fec36b12a..9a34bd80a745 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -697,6 +697,8 @@ static int ptrace_peek_siginfo(struct task_struct *child,
static int ptrace_resume(struct task_struct *child, long request,
unsigned long data)
{
+ bool need_siglock;
+
if (!valid_signal(data))
return -EIO;

@@ -724,8 +726,26 @@ static int ptrace_resume(struct task_struct *child, long request,
user_disable_single_step(child);
}

+ /*
+ * Change ->exit_code and ->state under siglock to avoid the race
+ * with wait_task_stopped() in between; a non-zero ->exit_code will
+ * wrongly look like another report from tracee.
+ *
+ * Note that we need siglock even if ->exit_code == data and/or this
+ * status was not reported yet, the new status must not be cleared by
+ * wait_task_stopped() after resume.
+ *
+ * If data == 0 we do not care if wait_task_stopped() reports the old
+ * status and clears the code too; this can't race with the tracee, it
+ * takes siglock after resume.
+ */
+ need_siglock = data && !thread_group_empty(current);
+ if (need_siglock)
+ spin_lock_irq(&child->sighand->siglock);
child->exit_code = data;
wake_up_state(child, __TASK_TRACED);
+ if (need_siglock)
+ spin_unlock_irq(&child->sighand->siglock);

return 0;
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 62671f53202a..3d5f6f6d14c2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -996,6 +996,13 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
rq_clock_skip_update(rq, true);
}

+static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
+
+void register_task_migration_notifier(struct notifier_block *n)
+{
+ atomic_notifier_chain_register(&task_migration_notifier, n);
+}
+
#ifdef CONFIG_SMP
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
@@ -1026,10 +1033,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
trace_sched_migrate_task(p, new_cpu);

if (task_cpu(p) != new_cpu) {
+ struct task_migration_notifier tmn;
+
if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
+
+ tmn.task = p;
+ tmn.from_cpu = task_cpu(p);
+ tmn.to_cpu = new_cpu;
+
+ atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
}

__set_task_cpu(p, new_cpu);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 3fa8fa6d9403..f670cbb17f5d 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -514,7 +514,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
unsigned long flags;
struct rq *rq;

- rq = task_rq_lock(current, &flags);
+ rq = task_rq_lock(p, &flags);

/*
* We need to take care of several possible races here:
@@ -569,7 +569,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
push_dl_task(rq);
#endif
unlock:
- task_rq_unlock(rq, current, &flags);
+ task_rq_unlock(rq, p, &flags);

return HRTIMER_NORESTART;
}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 5040d44fe5a3..922048a0f7ea 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2679,7 +2679,7 @@ static DEFINE_PER_CPU(unsigned int, current_context);

static __always_inline int trace_recursive_lock(void)
{
- unsigned int val = this_cpu_read(current_context);
+ unsigned int val = __this_cpu_read(current_context);
int bit;

if (in_interrupt()) {
@@ -2696,18 +2696,17 @@ static __always_inline int trace_recursive_lock(void)
return 1;

val |= (1 << bit);
- this_cpu_write(current_context, val);
+ __this_cpu_write(current_context, val);

return 0;
}

static __always_inline void trace_recursive_unlock(void)
{
- unsigned int val = this_cpu_read(current_context);
+ unsigned int val = __this_cpu_read(current_context);

- val--;
- val &= this_cpu_read(current_context);
- this_cpu_write(current_context, val);
+ val &= val & (val - 1);
+ __this_cpu_write(current_context, val);
}

#else
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index db54dda10ccc..a9c10a3cf122 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -565,6 +565,7 @@ static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
{
char *event = NULL, *sub = NULL, *match;
+ int ret;

/*
* The buf format can be <subsystem>:<event-name>
@@ -590,7 +591,13 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
event = NULL;
}

- return __ftrace_set_clr_event(tr, match, sub, event, set);
+ ret = __ftrace_set_clr_event(tr, match, sub, event, set);
+
+ /* Put back the colon to allow this to be called again */
+ if (buf)
+ *(buf - 1) = ':';
+
+ return ret;
}

/**
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 2d25ad1526bb..b6fce365ef27 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -1309,15 +1309,19 @@ void graph_trace_open(struct trace_iterator *iter)
{
/* pid and depth on the last trace processed */
struct fgraph_data *data;
+ gfp_t gfpflags;
int cpu;

iter->private = NULL;

- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ /* We can be called in atomic context via ftrace_dump() */
+ gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
+
+ data = kzalloc(sizeof(*data), gfpflags);
if (!data)
goto out_err;

- data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
+ data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
if (!data->cpu_data)
goto out_err_free;

diff --git a/lib/string.c b/lib/string.c
index ce81aaec3839..a5792019193c 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset);
void memzero_explicit(void *s, size_t count)
{
memset(s, 0, count);
- OPTIMIZER_HIDE_VAR(s);
+ barrier();
}
EXPORT_SYMBOL(memzero_explicit);

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6817b0350c71..956d4dbe001d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2316,8 +2316,14 @@ static struct page
struct vm_area_struct *vma, unsigned long address,
int node)
{
+ gfp_t flags;
+
VM_BUG_ON_PAGE(*hpage, *hpage);

+ /* Only allocate from the target node */
+ flags = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
+ __GFP_THISNODE;
+
/*
* Before allocating the hugepage, release the mmap_sem read lock.
* The allocation can take potentially a long time if it involves
@@ -2326,8 +2332,7 @@ static struct page
*/
up_read(&mm->mmap_sem);

- *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask(
- khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER);
+ *hpage = alloc_pages_exact_node(node, flags, HPAGE_PMD_ORDER);
if (unlikely(!*hpage)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
*hpage = ERR_PTR(-ENOMEM);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c41b2a0ee273..caad3c5a926f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3735,8 +3735,7 @@ retry:
if (!pmd_huge(*pmd))
goto out;
if (pmd_present(*pmd)) {
- page = pte_page(*(pte_t *)pmd) +
- ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
if (flags & FOLL_GET)
get_page(page);
} else {
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4721046a134a..de5dc5e12691 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1985,7 +1985,8 @@ retry_cpuset:
nmask = policy_nodemask(gfp, pol);
if (!nmask || node_isset(node, *nmask)) {
mpol_cond_put(pol);
- page = alloc_pages_exact_node(node, gfp, order);
+ page = alloc_pages_exact_node(node,
+ gfp | __GFP_THISNODE, order);
goto out;
}
}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 0ee453fad3de..f371cbff6d45 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -651,6 +651,13 @@ static int br_nf_forward_finish(struct sk_buff *skb)
struct net_device *in;

if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
+ int frag_max_size;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ frag_max_size = IPCB(skb)->frag_max_size;
+ BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
+ }
+
in = nf_bridge->physindev;
if (nf_bridge->mask & BRNF_PKT_TYPE) {
skb->pkt_type = PACKET_OTHERHOST;
@@ -710,8 +717,14 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
nf_bridge->mask |= BRNF_PKT_TYPE;
}

- if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
- return NF_DROP;
+ if (pf == NFPROTO_IPV4) {
+ int frag_max = BR_INPUT_SKB_CB(skb)->frag_max_size;
+
+ if (br_parse_ip_options(skb))
+ return NF_DROP;
+
+ IPCB(skb)->frag_max_size = frag_max;
+ }

/* The physdev module checks on this */
nf_bridge->mask |= BRNF_BRIDGED;
diff --git a/net/core/dev.c b/net/core/dev.c
index 45109b70664e..22a53acdb5bb 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3041,7 +3041,7 @@ static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow *rflow, u16 next_cpu)
{
- if (next_cpu != RPS_NO_CPU) {
+ if (next_cpu < nr_cpu_ids) {
#ifdef CONFIG_RFS_ACCEL
struct netdev_rx_queue *rxqueue;
struct rps_dev_flow_table *flow_table;
@@ -3146,7 +3146,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
* If the desired CPU (where last recvmsg was done) is
* different from current CPU (one in the rx-queue flow
* table entry), switch if one of the following holds:
- * - Current CPU is unset (equal to RPS_NO_CPU).
+ * - Current CPU is unset (>= nr_cpu_ids).
* - Current CPU is offline.
* - The current CPU's queue tail has advanced beyond the
* last packet that was enqueued using this table entry.
@@ -3154,14 +3154,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
* have been dequeued, thus preserving in order delivery.
*/
if (unlikely(tcpu != next_cpu) &&
- (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
+ (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
((int)(per_cpu(softnet_data, tcpu).input_queue_head -
rflow->last_qtail)) >= 0)) {
tcpu = next_cpu;
rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
}

- if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
+ if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
*rflowp = rflow;
cpu = tcpu;
goto done;
@@ -3202,14 +3202,14 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
struct rps_dev_flow_table *flow_table;
struct rps_dev_flow *rflow;
bool expire = true;
- int cpu;
+ unsigned int cpu;

rcu_read_lock();
flow_table = rcu_dereference(rxqueue->rps_flow_table);
if (flow_table && flow_id <= flow_table->mask) {
rflow = &flow_table->flows[flow_id];
cpu = ACCESS_ONCE(rflow->cpu);
- if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
+ if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
((int)(per_cpu(softnet_data, cpu).input_queue_head -
rflow->last_qtail) <
(int)(10 * flow_table->mask)))
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 98d45fe72f51..e9f9a15fce4e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -280,13 +280,14 @@ nodata:
EXPORT_SYMBOL(__alloc_skb);

/**
- * build_skb - build a network buffer
+ * __build_skb - build a network buffer
* @data: data buffer provided by caller
- * @frag_size: size of fragment, or 0 if head was kmalloced
+ * @frag_size: size of data, or 0 if head was kmalloced
*
* Allocate a new &sk_buff. Caller provides space holding head and
* skb_shared_info. @data must have been allocated by kmalloc() only if
- * @frag_size is 0, otherwise data should come from the page allocator.
+ * @frag_size is 0, otherwise data should come from the page allocator
+ * or vmalloc()
* The return is the new skb buffer.
* On a failure the return is %NULL, and @data is not freed.
* Notes :
@@ -297,7 +298,7 @@ EXPORT_SYMBOL(__alloc_skb);
* before giving packet to stack.
* RX rings only contains data buffers, not full skbs.
*/
-struct sk_buff *build_skb(void *data, unsigned int frag_size)
+struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{
struct skb_shared_info *shinfo;
struct sk_buff *skb;
@@ -311,7 +312,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)

memset(skb, 0, offsetof(struct sk_buff, tail));
skb->truesize = SKB_TRUESIZE(size);
- skb->head_frag = frag_size != 0;
atomic_set(&skb->users, 1);
skb->head = data;
skb->data = data;
@@ -328,6 +328,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)

return skb;
}
+
+/* build_skb() is wrapper over __build_skb(), that specifically
+ * takes care of skb->head and skb->pfmemalloc
+ * This means that if @frag_size is not zero, then @data must be backed
+ * by a page fragment, not kmalloc() or vmalloc()
+ */
+struct sk_buff *build_skb(void *data, unsigned int frag_size)
+{
+ struct sk_buff *skb = __build_skb(data, frag_size);
+
+ if (skb && frag_size) {
+ skb->head_frag = 1;
+ if (virt_to_head_page(data)->pfmemalloc)
+ skb->pfmemalloc = 1;
+ }
+ return skb;
+}
EXPORT_SYMBOL(build_skb);

struct netdev_alloc_cache {
@@ -348,7 +365,8 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
gfp_t gfp = gfp_mask;

if (order) {
- gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
+ gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
+ __GFP_NOMEMALLOC;
page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
nc->frag.size = PAGE_SIZE << (page ? order : 0);
}
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index d9bc28ac5d1b..53bd53fbbee0 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -82,6 +82,9 @@ int ip_forward(struct sk_buff *skb)
if (skb->pkt_type != PACKET_HOST)
goto drop;

+ if (unlikely(skb->sk))
+ goto drop;
+
if (skb_warn_if_lro(skb))
goto drop;

diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d520492ba698..9d48dc427a5a 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2751,39 +2751,65 @@ begin_fwd:
}
}

-/* Send a fin. The caller locks the socket for us. This cannot be
- * allowed to fail queueing a FIN frame under any circumstances.
+/* We allow to exceed memory limits for FIN packets to expedite
+ * connection tear down and (memory) recovery.
+ * Otherwise tcp_send_fin() could be tempted to either delay FIN
+ * or even be forced to close flow without any FIN.
+ */
+static void sk_forced_wmem_schedule(struct sock *sk, int size)
+{
+ int amt, status;
+
+ if (size <= sk->sk_forward_alloc)
+ return;
+ amt = sk_mem_pages(size);
+ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
+ sk_memory_allocated_add(sk, amt, &status);
+}
+
+/* Send a FIN. The caller locks the socket for us.
+ * We should try to send a FIN packet really hard, but eventually give up.
*/
void tcp_send_fin(struct sock *sk)
{
+ struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
struct tcp_sock *tp = tcp_sk(sk);
- struct sk_buff *skb = tcp_write_queue_tail(sk);
- int mss_now;

- /* Optimization, tack on the FIN if we have a queue of
- * unsent frames. But be careful about outgoing SACKS
- * and IP options.
+ /* Optimization, tack on the FIN if we have one skb in write queue and
+ * this skb was not yet sent, or we are under memory pressure.
+ * Note: in the latter case, FIN packet will be sent after a timeout,
+ * as TCP stack thinks it has already been transmitted.
*/
- mss_now = tcp_current_mss(sk);
-
- if (tcp_send_head(sk) != NULL) {
- TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
- TCP_SKB_CB(skb)->end_seq++;
+ if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
+coalesce:
+ TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
+ TCP_SKB_CB(tskb)->end_seq++;
tp->write_seq++;
+ if (!tcp_send_head(sk)) {
+ /* This means tskb was already sent.
+ * Pretend we included the FIN on previous transmit.
+ * We need to set tp->snd_nxt to the value it would have
+ * if FIN had been sent. This is because retransmit path
+ * does not change tp->snd_nxt.
+ */
+ tp->snd_nxt++;
+ return;
+ }
} else {
- /* Socket is locked, keep trying until memory is available. */
- for (;;) {
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
- if (skb)
- break;
- yield();
+ skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
+ if (unlikely(!skb)) {
+ if (tskb)
+ goto coalesce;
+ return;
}
+ skb_reserve(skb, MAX_TCP_HEADER);
+ sk_forced_wmem_schedule(sk, skb->truesize);
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
tcp_init_nondata_skb(skb, tp->write_seq,
TCPHDR_ACK | TCPHDR_FIN);
tcp_queue_skb(sk, skb);
}
- __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
+ __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
}

/* We get here when a process closes a file descriptor (either due to
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 142f66aece18..0ca013d66492 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2260,7 +2260,7 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
else
ssid_len = ssid[1];

- ieee80211_send_probe_req(sdata, sdata->vif.addr, NULL,
+ ieee80211_send_probe_req(sdata, sdata->vif.addr, dst,
ssid + 2, ssid_len, NULL,
0, (u32) -1, true, 0,
ifmgd->associated->channel, false);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 05919bf3f670..d1d7a8166f46 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1616,13 +1616,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
if (data == NULL)
return NULL;

- skb = build_skb(data, size);
+ skb = __build_skb(data, size);
if (skb == NULL)
vfree(data);
- else {
- skb->head_frag = 0;
+ else
skb->destructor = netlink_skb_destructor;
- }

return skb;
}
diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
index 2ca9f2e93139..53745f4c2bf5 100644
--- a/sound/pci/emu10k1/emuproc.c
+++ b/sound/pci/emu10k1/emuproc.c
@@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
struct snd_emu10k1 *emu = entry->private_data;
u32 value;
u32 value2;
- unsigned long flags;
u32 rate;

if (emu->card_capabilities->emu_model) {
- spin_lock_irqsave(&emu->emu_lock, flags);
snd_emu1010_fpga_read(emu, 0x38, &value);
- spin_unlock_irqrestore(&emu->emu_lock, flags);
if ((value & 0x1) == 0) {
- spin_lock_irqsave(&emu->emu_lock, flags);
snd_emu1010_fpga_read(emu, 0x2a, &value);
snd_emu1010_fpga_read(emu, 0x2b, &value2);
- spin_unlock_irqrestore(&emu->emu_lock, flags);
rate = 0x1770000 / (((value << 5) | value2)+1);
snd_iprintf(buffer, "ADAT Locked : %u\n", rate);
} else {
snd_iprintf(buffer, "ADAT Unlocked\n");
}
- spin_lock_irqsave(&emu->emu_lock, flags);
snd_emu1010_fpga_read(emu, 0x20, &value);
- spin_unlock_irqrestore(&emu->emu_lock, flags);
if ((value & 0x4) == 0) {
- spin_lock_irqsave(&emu->emu_lock, flags);
snd_emu1010_fpga_read(emu, 0x28, &value);
snd_emu1010_fpga_read(emu, 0x29, &value2);
- spin_unlock_irqrestore(&emu->emu_lock, flags);
rate = 0x1770000 / (((value << 5) | value2)+1);
snd_iprintf(buffer, "SPDIF Locked : %d\n", rate);
} else {
@@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry,
{
struct snd_emu10k1 *emu = entry->private_data;
u32 value;
- unsigned long flags;
int i;
snd_iprintf(buffer, "EMU1010 Registers:\n\n");

for(i = 0; i < 0x40; i+=1) {
- spin_lock_irqsave(&emu->emu_lock, flags);
snd_emu1010_fpga_read(emu, i, &value);
- spin_unlock_irqrestore(&emu->emu_lock, flags);
snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f);
}
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f9d12c0a7e5a..2fd490b1764b 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5047,12 +5047,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
@@ -5142,6 +5144,16 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
{0x1b, 0x411111f0}, \
{0x1e, 0x411111f0}

+#define ALC256_STANDARD_PINS \
+ {0x12, 0x90a60140}, \
+ {0x14, 0x90170110}, \
+ {0x19, 0x411111f0}, \
+ {0x1a, 0x411111f0}, \
+ {0x1b, 0x411111f0}, \
+ {0x1d, 0x40700001}, \
+ {0x1e, 0x411111f0}, \
+ {0x21, 0x02211020}
+
#define ALC282_STANDARD_PINS \
{0x14, 0x90170110}, \
{0x18, 0x411111f0}, \
@@ -5235,15 +5247,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x1d, 0x40700001},
{0x21, 0x02211050}),
SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
- {0x12, 0x90a60140},
- {0x13, 0x40000000},
- {0x14, 0x90170110},
- {0x19, 0x411111f0},
- {0x1a, 0x411111f0},
- {0x1b, 0x411111f0},
- {0x1d, 0x40700001},
- {0x1e, 0x411111f0},
- {0x21, 0x02211020}),
+ ALC256_STANDARD_PINS,
+ {0x13, 0x40000000}),
+ SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC256_STANDARD_PINS,
+ {0x13, 0x411111f0}),
SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
{0x12, 0x90a60130},
{0x13, 0x40000000},
@@ -5563,6 +5571,8 @@ static int patch_alc269(struct hda_codec *codec)
break;
case 0x10ec0256:
spec->codec_variant = ALC269_TYPE_ALC256;
+ spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
+ alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
break;
}

@@ -5576,8 +5586,8 @@ static int patch_alc269(struct hda_codec *codec)
if (err < 0)
goto error;

- if (!spec->gen.no_analog && spec->gen.beep_nid)
- set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
+ if (!spec->gen.no_analog && spec->gen.beep_nid && spec->gen.mixer_nid)
+ set_beep_amp(spec, spec->gen.mixer_nid, 0x04, HDA_INPUT);

codec->patch_ops = alc_patch_ops;
#ifdef CONFIG_PM
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 7d3a6accaf9a..e770ee6f36da 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -561,10 +561,10 @@ static int cs4271_codec_probe(struct snd_soc_codec *codec)
if (gpio_is_valid(cs4271->gpio_nreset)) {
/* Reset codec */
gpio_direction_output(cs4271->gpio_nreset, 0);
- udelay(1);
+ mdelay(1);
gpio_set_value(cs4271->gpio_nreset, 1);
/* Give the codec time to wake up */
- udelay(1);
+ mdelay(1);
}

ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2,
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index 474cae82a874..8c09e3ffdcaa 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -304,9 +304,9 @@ static const struct soc_enum pcm512x_veds =
static const struct snd_kcontrol_new pcm512x_controls[] = {
SOC_DOUBLE_R_TLV("Digital Playback Volume", PCM512x_DIGITAL_VOLUME_2,
PCM512x_DIGITAL_VOLUME_3, 0, 255, 1, digital_tlv),
-SOC_DOUBLE_TLV("Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
+SOC_DOUBLE_TLV("Analogue Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
PCM512x_LAGN_SHIFT, PCM512x_RAGN_SHIFT, 1, 1, analog_tlv),
-SOC_DOUBLE_TLV("Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
+SOC_DOUBLE_TLV("Analogue Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
PCM512x_AGBL_SHIFT, PCM512x_AGBR_SHIFT, 1, 0, boost_tlv),
SOC_DOUBLE("Digital Playback Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT,
PCM512x_RQMR_SHIFT, 1, 1),
@@ -576,8 +576,8 @@ static int pcm512x_find_pll_coeff(struct snd_soc_dai *dai,

/* pllin_rate / P (or here, den) cannot be greater than 20 MHz */
if (pllin_rate / den > 20000000 && num < 8) {
- num *= 20000000 / (pllin_rate / den);
- den *= 20000000 / (pllin_rate / den);
+ num *= DIV_ROUND_UP(pllin_rate / den, 20000000);
+ den *= DIV_ROUND_UP(pllin_rate / den, 20000000);
}
dev_dbg(dev, "num / den = %lu / %lu\n", num, den);

diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
index 31bb4801a005..9e71c768966f 100644
--- a/sound/soc/codecs/wm8741.c
+++ b/sound/soc/codecs/wm8741.c
@@ -123,7 +123,7 @@ static struct {
};

static const unsigned int rates_11289[] = {
- 44100, 88235,
+ 44100, 88200,
};

static const struct snd_pcm_hw_constraint_list constraints_11289 = {
@@ -150,7 +150,7 @@ static const struct snd_pcm_hw_constraint_list constraints_16384 = {
};

static const unsigned int rates_16934[] = {
- 44100, 88235,
+ 44100, 88200,
};

static const struct snd_pcm_hw_constraint_list constraints_16934 = {
@@ -168,7 +168,7 @@ static const struct snd_pcm_hw_constraint_list constraints_18432 = {
};

static const unsigned int rates_22579[] = {
- 44100, 88235, 1764000
+ 44100, 88200, 176400
};

static const struct snd_pcm_hw_constraint_list constraints_22579 = {
@@ -186,7 +186,7 @@ static const struct snd_pcm_hw_constraint_list constraints_24576 = {
};

static const unsigned int rates_36864[] = {
- 48000, 96000, 19200
+ 48000, 96000, 192000
};

static const struct snd_pcm_hw_constraint_list constraints_36864 = {
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index b6bb5947a8a8..8c2b9be80a9a 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -425,18 +425,8 @@ static int davinci_evm_probe(struct platform_device *pdev)
return ret;
}

-static int davinci_evm_remove(struct platform_device *pdev)
-{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
-
- snd_soc_unregister_card(card);
-
- return 0;
-}
-
static struct platform_driver davinci_evm_driver = {
.probe = davinci_evm_probe,
- .remove = davinci_evm_remove,
.driver = {
.name = "davinci_evm",
.pm = &snd_soc_pm_ops,
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 9a28365126f9..32631a86078b 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1115,6 +1115,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
{
/* devices which do not support reading the sample rate. */
switch (chip->usb_id) {
+ case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema */
case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
return true;
diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
index dcc665228c71..deb3569ab004 100644
--- a/tools/lib/traceevent/kbuffer-parse.c
+++ b/tools/lib/traceevent/kbuffer-parse.c
@@ -372,7 +372,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
switch (type_len) {
case KBUFFER_TYPE_PADDING:
*length = read_4(kbuf, data);
- data += *length;
break;

case KBUFFER_TYPE_TIME_EXTEND:
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index cc224080b525..0884d31ae12e 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -651,7 +651,7 @@ ifeq (${IS_64_BIT}, 1)
NO_PERF_READ_VDSO32 := 1
endif
endif
- ifneq (${IS_X86_64}, 1)
+ ifneq ($(ARCH), x86)
NO_PERF_READ_VDSOX32 := 1
endif
ifndef NO_PERF_READ_VDSOX32
@@ -699,7 +699,7 @@ sysconfdir = $(prefix)/etc
ETC_PERFCONFIG = etc/perfconfig
endif
ifndef lib
-ifeq ($(IS_X86_64),1)
+ifeq ($(ARCH)$(IS_64_BIT), x861)
lib = lib64
else
lib = lib
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index 75709d2b17b4..bff85324f799 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -5,7 +5,7 @@ include config/Makefile.arch

# FIXME looks like x86 is the only arch running tests ;-)
# we need some IS_(32/64) flag to make this generic
-ifeq ($(IS_X86_64),1)
+ifeq ($(ARCH)$(IS_64_BIT), x861)
lib = lib64
else
lib = lib
diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
index 6da965bdbc2c..85b523885f9d 100644
--- a/tools/perf/util/cloexec.c
+++ b/tools/perf/util/cloexec.c
@@ -7,6 +7,12 @@

static unsigned long flag = PERF_FLAG_FD_CLOEXEC;

+int __weak sched_getcpu(void)
+{
+ errno = ENOSYS;
+ return -1;
+}
+
static int perf_flag_probe(void)
{
/* use 'safest' configuration as used in perf_evsel__fallback() */
diff --git a/tools/perf/util/cloexec.h b/tools/perf/util/cloexec.h
index 94a5a7d829d5..68888c29b04a 100644
--- a/tools/perf/util/cloexec.h
+++ b/tools/perf/util/cloexec.h
@@ -3,4 +3,10 @@

unsigned long perf_event_open_cloexec_flag(void);

+#ifdef __GLIBC_PREREQ
+#if !__GLIBC_PREREQ(2, 6)
+extern int sched_getcpu(void) __THROW;
+#endif
+#endif
+
#endif /* __PERF_CLOEXEC_H */
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 33b7a2aef713..9bdf007d243a 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -74,6 +74,10 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
return GELF_ST_TYPE(sym->st_info);
}

+#ifndef STT_GNU_IFUNC
+#define STT_GNU_IFUNC 10
+#endif
+
static inline int elf_sym__is_function(const GElf_Sym *sym)
{
return (elf_sym__type(sym) == STT_FUNC ||
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index d1b3a361e526..4039854560d0 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -1,8 +1,12 @@
CC = $(CROSS_COMPILE)gcc
-BUILD_OUTPUT := $(PWD)
+BUILD_OUTPUT := $(CURDIR)
PREFIX := /usr
DESTDIR :=

+ifeq ("$(origin O)", "command line")
+ BUILD_OUTPUT := $(O)
+endif
+
turbostat : turbostat.c
CFLAGS += -Wall
CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index c9f60f524588..e5abe7cb2990 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1371,6 +1371,9 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
goto out;
}

+ if (irq_num >= kvm->arch.vgic.nr_irqs)
+ return -EINVAL;
+
vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
if (vcpu_id >= 0) {
/* kick the specified vcpu */
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index cc6a25d95fbf..f8f3f5fe53d3 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1653,8 +1653,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
ghc->generation = slots->generation;
ghc->len = len;
ghc->memslot = gfn_to_memslot(kvm, start_gfn);
- ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
- if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
+ ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
+ if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
ghc->hva += offset;
} else {
/*
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/