Re: [3.8.y.z extended stable] Linux 3.8.13.18
From: Kamal Mostafa
Date: Thu Feb 13 2014 - 12:45:34 EST
diff --git a/Documentation/devicetree/bindings/ata/marvell.txt b/Documentation/devicetree/bindings/ata/marvell.txt
index b5cdd20..1c83516 100644
--- a/Documentation/devicetree/bindings/ata/marvell.txt
+++ b/Documentation/devicetree/bindings/ata/marvell.txt
@@ -1,7 +1,7 @@
* Marvell Orion SATA
Required Properties:
-- compatibility : "marvell,orion-sata"
+- compatibility : "marvell,orion-sata" or "marvell,armada-370-sata"
- reg : Address range of controller
- interrupts : Interrupt controller is using
- nr-ports : Number of SATA ports in use.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index fd8d0d5..954eab8 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1372,8 +1372,8 @@ may allocate from based on an estimation of its current memory and swap use.
For example, if a task is using all allowed memory, its badness score will be
1000. If it is using half of its allowed memory, its score will be 500.
-There is an additional factor included in the badness score: root
-processes are given 3% extra memory over other tasks.
+There is an additional factor included in the badness score: the current memory
+and swap usage is discounted by 3% for root processes.
The amount of "allowed" memory depends on the context in which the oom killer
was called. If it is due to the memory assigned to the allocating task's cpuset
diff --git a/Makefile b/Makefile
index 54a09a9..0f1d495 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 8
SUBLEVEL = 13
-EXTRAVERSION = .17
+EXTRAVERSION = .18
NAME = Remoralised Urchins Update
# *DOCUMENTATION*
diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c
index 40736da..1d2ef5a 100644
--- a/arch/alpha/lib/csum_partial_copy.c
+++ b/arch/alpha/lib/csum_partial_copy.c
@@ -373,6 +373,11 @@ csum_partial_copy_from_user(const void __user *src, void *dst, int len,
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
- return csum_partial_copy_from_user((__force const void __user *)src,
- dst, len, sum, NULL);
+ __wsum checksum;
+ mm_segment_t oldfs = get_fs();
+ set_fs(KERNEL_DS);
+ checksum = csum_partial_copy_from_user((__force const void __user *)src,
+ dst, len, sum, NULL);
+ set_fs(oldfs);
+ return checksum;
}
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index 269b6b0..146c850 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -79,8 +79,8 @@
};
sata@d00a0000 {
- compatible = "marvell,orion-sata";
- reg = <0xd00a0000 0x2400>;
+ compatible = "marvell,armada-370-sata";
+ reg = <0xd00a0000 0x5000>;
interrupts = <55>;
clocks = <&gateclk 15>, <&gateclk 30>;
clock-names = "0", "1";
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index a34f1e2..373656a 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -630,10 +630,10 @@ load_ind:
emit(ARM_MUL(r_A, r_A, r_X), ctx);
break;
case BPF_S_ALU_DIV_K:
- /* current k == reciprocal_value(userspace k) */
+ if (k == 1)
+ break;
emit_mov_i(r_scratch, k, ctx);
- /* A = top 32 bits of the product */
- emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
+ emit_udiv(r_A, r_A, r_scratch, ctx);
break;
case BPF_S_ALU_DIV_X:
update_on_xread(ctx);
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index 92c6b00..b4437e8 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -788,6 +788,9 @@ static void remove_cache_dir(struct cache_dir *cache_dir)
{
remove_index_dirs(cache_dir);
+ /* Remove cache dir from sysfs */
+ kobject_del(cache_dir->kobj);
+
kobject_put(cache_dir->kobj);
kfree(cache_dir);
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index e834f1e..8a2284c 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -209,10 +209,11 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
}
PPC_DIVWU(r_A, r_A, r_X);
break;
- case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
+ case BPF_S_ALU_DIV_K: /* A /= K */
+ if (K == 1)
+ break;
PPC_LI32(r_scratch1, K);
- /* Top 32 bits of 64bit result -> A */
- PPC_MULHWU(r_A, r_A, r_scratch1);
+ PPC_DIVWU(r_A, r_A, r_scratch1);
break;
case BPF_S_ALU_AND_X:
ctx->seen |= SEEN_XREG;
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index a390687..d3fc59e 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -106,7 +106,7 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
{
- int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16;
+ int code = kvm_s390_get_base_disp_rs(vcpu) & 0xffff;
trace_kvm_s390_handle_diag(vcpu, code);
switch (code) {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 74e64f8..e1fc2aa 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -755,7 +755,7 @@ static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
* KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
* KVM_S390_STORE_STATUS_PREFIXED: -> prefix
*/
-int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
+int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr)
{
unsigned char archmode = 1;
int prefix;
@@ -773,14 +773,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
} else
prefix = 0;
- /*
- * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
- * copying in vcpu load/put. Lets update our copies before we save
- * it into the save area
- */
- save_fp_regs(&vcpu->arch.guest_fpregs);
- save_access_regs(vcpu->run->s.regs.acrs);
-
if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
vcpu->arch.guest_fpregs.fprs, 128, prefix))
return -EFAULT;
@@ -825,6 +817,19 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
return 0;
}
+int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ /*
+ * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
+ * copying in vcpu load/put. Lets update our copies before we save
+ * it into the save area
+ */
+ save_fp_regs(&vcpu->arch.guest_fpregs);
+ save_access_regs(vcpu->run->s.regs.acrs);
+
+ return kvm_s390_store_status_unloaded(vcpu, addr);
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index d75bc5e..59a03d5 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -85,8 +85,8 @@ int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
/* implemented in kvm-s390.c */
-int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu,
- unsigned long addr);
+int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
+int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
/* implemented in diag.c */
int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 566ddf6..7b35c77 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -130,6 +130,7 @@ unlock:
static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
{
struct kvm_s390_interrupt_info *inti;
+ int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
if (!inti)
@@ -137,8 +138,12 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
inti->type = KVM_S390_SIGP_STOP;
spin_lock_bh(&li->lock);
- if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED))
+ if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
+ kfree(inti);
+ if ((action & ACTION_STORE_ON_STOP) != 0)
+ rc = -ESHUTDOWN;
goto out;
+ }
list_add_tail(&inti->list, &li->list);
atomic_set(&li->active, 1);
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
@@ -148,7 +153,7 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
out:
spin_unlock_bh(&li->lock);
- return SIGP_CC_ORDER_CODE_ACCEPTED;
+ return rc;
}
static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
@@ -172,6 +177,16 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
unlock:
spin_unlock(&fi->lock);
VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
+
+ if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
+ /* If the CPU has already been stopped, we still have
+ * to save the status when doing stop-and-store. This
+ * has to be done after unlocking all spinlocks. */
+ struct kvm_vcpu *dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
+ rc = kvm_s390_store_status_unloaded(dst_vcpu,
+ KVM_S390_STORE_STATUS_NOADDR);
+ }
+
return rc;
}
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index bb28441..5e92f47 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -332,14 +332,16 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
/* lhi %r4,0 */
EMIT4(0xa7480000);
- /* dr %r4,%r12 */
- EMIT2(0x1d4c);
+ /* dlr %r4,%r12 */
+ EMIT4(0xb997004c);
break;
- case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K) */
- /* m %r4,<d(K)>(%r13) */
- EMIT4_DISP(0x5c40d000, EMIT_CONST(K));
- /* lr %r5,%r4 */
- EMIT2(0x1854);
+ case BPF_S_ALU_DIV_K: /* A /= K */
+ if (K == 1)
+ break;
+ /* lhi %r4,0 */
+ EMIT4(0xa7480000);
+ /* dl %r4,<d(K)>(%r13) */
+ EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
break;
case BPF_S_ALU_MOD_X: /* A %= X */
jit->seen |= SEEN_XREG | SEEN_RET0;
@@ -349,16 +351,21 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_PCREL(0xa7840000, (jit->ret0_ip - jit->prg));
/* lhi %r4,0 */
EMIT4(0xa7480000);
- /* dr %r4,%r12 */
- EMIT2(0x1d4c);
+ /* dlr %r4,%r12 */
+ EMIT4(0xb997004c);
/* lr %r5,%r4 */
EMIT2(0x1854);
break;
case BPF_S_ALU_MOD_K: /* A %= K */
+ if (K == 1) {
+ /* lhi %r5,0 */
+ EMIT4(0xa7580000);
+ break;
+ }
/* lhi %r4,0 */
EMIT4(0xa7480000);
- /* d %r4,<d(K)>(%r13) */
- EMIT4_DISP(0x5d40d000, EMIT_CONST(K));
+ /* dl %r4,<d(K)>(%r13) */
+ EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K));
/* lr %r5,%r4 */
EMIT2(0x1854);
break;
diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
index 38b3139..adad46e 100644
--- a/arch/sh/kernel/kgdb.c
+++ b/arch/sh/kernel/kgdb.c
@@ -13,6 +13,7 @@
#include <linux/kdebug.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/sched.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index 3109ca6..d844747 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -497,9 +497,20 @@ void bpf_jit_compile(struct sk_filter *fp)
case BPF_S_ALU_MUL_K: /* A *= K */
emit_alu_K(MUL, K);
break;
- case BPF_S_ALU_DIV_K: /* A /= K */
- emit_alu_K(MUL, K);
- emit_read_y(r_A);
+ case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/
+ if (K == 1)
+ break;
+ emit_write_y(G0);
+#ifdef CONFIG_SPARC32
+ /* The Sparc v8 architecture requires
+ * three instructions between a %y
+ * register write and the first use.
+ */
+ emit_nop();
+ emit_nop();
+ emit_nop();
+#endif
+ emit_alu_K(DIV, K);
break;
case BPF_S_ALU_DIV_X: /* A /= X; */
emit_cmpi(r_X, 0);
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 11300d2..98b511d 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -37,6 +37,7 @@
#include "irq.h"
#include "i8254.h"
+#include "x86.h"
#ifndef CONFIG_X86_64
#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
@@ -350,6 +351,23 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
atomic_set(&ps->pending, 0);
ps->irq_ack = 1;
+ /*
+ * Do not allow the guest to program periodic timers with small
+ * interval, since the hrtimers are not throttled by the host
+ * scheduler.
+ */
+ if (ps->is_periodic) {
+ s64 min_period = min_timer_period_us * 1000LL;
+
+ if (ps->period < min_period) {
+ pr_info_ratelimited(
+ "kvm: requested %lld ns "
+ "i8254 timer period limited to %lld ns\n",
+ ps->period, min_period);
+ ps->period = min_period;
+ }
+ }
+
hrtimer_start(&ps->timer, ktime_add_ns(ktime_get(), interval),
HRTIMER_MODE_ABS);
}
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 8da302f..6e27290 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -71,9 +71,6 @@
#define VEC_POS(v) ((v) & (32 - 1))
#define REG_POS(v) (((v) >> 5) << 4)
-static unsigned int min_timer_period_us = 500;
-module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
-
static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
{
*((u32 *) (apic->regs + reg_off)) = val;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c22ab85..bf8c370 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -94,6 +94,9 @@ EXPORT_SYMBOL_GPL(kvm_x86_ops);
static bool ignore_msrs = 0;
module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
+unsigned int min_timer_period_us = 500;
+module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
+
bool kvm_has_tsc_control;
EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
u32 kvm_max_guest_tsc_khz;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index e224f7a..3186542 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -124,5 +124,7 @@ int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
extern u64 host_xcr0;
+extern unsigned int min_timer_period_us;
+
extern struct static_key kvm_no_apic_vcpu;
#endif
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index d11a470..5e9c43f 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -303,15 +303,21 @@ void bpf_jit_compile(struct sk_filter *fp)
EMIT2(0x89, 0xd0); /* mov %edx,%eax */
break;
case BPF_S_ALU_MOD_K: /* A %= K; */
+ if (K == 1) {
+ CLEAR_A();
+ break;
+ }
EMIT2(0x31, 0xd2); /* xor %edx,%edx */
EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
EMIT2(0xf7, 0xf1); /* div %ecx */
EMIT2(0x89, 0xd0); /* mov %edx,%eax */
break;
- case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
- EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
- EMIT(K, 4);
- EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
+ case BPF_S_ALU_DIV_K: /* A /= K */
+ if (K == 1)
+ break;
+ EMIT2(0x31, 0xd2); /* xor %edx,%edx */
+ EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
+ EMIT2(0xf7, 0xf1); /* div %ecx */
break;
case BPF_S_ALU_AND_X:
seen |= SEEN_XREG;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 2a5d329..b826ce8 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -428,7 +428,7 @@ void __init efi_reserve_boot_services(void)
* - Not within any part of the kernel
* - Not the bios reserved area
*/
- if ((start+size >= virt_to_phys(_text)
+ if ((start + size > virt_to_phys(_text)
&& start <= virt_to_phys(_end)) ||
!e820_all_mapped(start, start+size, E820_RAM) ||
memblock_is_region_reserved(start, size)) {
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
index 0a78524..ab84ac1 100644
--- a/arch/x86/xen/platform-pci-unplug.c
+++ b/arch/x86/xen/platform-pci-unplug.c
@@ -69,6 +69,80 @@ static int check_platform_magic(void)
return 0;
}
+bool xen_has_pv_devices()
+{
+ if (!xen_domain())
+ return false;
+
+ /* PV domains always have them. */
+ if (xen_pv_domain())
+ return true;
+
+ /* And user has xen_platform_pci=0 set in guest config as
+ * driver did not modify the value. */
+ if (xen_platform_pci_unplug == 0)
+ return false;
+
+ if (xen_platform_pci_unplug & XEN_UNPLUG_NEVER)
+ return false;
+
+ if (xen_platform_pci_unplug & XEN_UNPLUG_ALL)
+ return true;
+
+ /* This is an odd one - we are going to run legacy
+ * and PV drivers at the same time. */
+ if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY)
+ return true;
+
+ /* And the caller has to follow with xen_pv_{disk,nic}_devices
+ * to be certain which driver can load. */
+ return false;
+}
+EXPORT_SYMBOL_GPL(xen_has_pv_devices);
+
+static bool __xen_has_pv_device(int state)
+{
+ /* HVM domains might or might not */
+ if (xen_hvm_domain() && (xen_platform_pci_unplug & state))
+ return true;
+
+ return xen_has_pv_devices();
+}
+
+bool xen_has_pv_nic_devices(void)
+{
+ return __xen_has_pv_device(XEN_UNPLUG_ALL_NICS | XEN_UNPLUG_ALL);
+}
+EXPORT_SYMBOL_GPL(xen_has_pv_nic_devices);
+
+bool xen_has_pv_disk_devices(void)
+{
+ return __xen_has_pv_device(XEN_UNPLUG_ALL_IDE_DISKS |
+ XEN_UNPLUG_AUX_IDE_DISKS | XEN_UNPLUG_ALL);
+}
+EXPORT_SYMBOL_GPL(xen_has_pv_disk_devices);
+
+/*
+ * This one is odd - it determines whether you want to run PV _and_
+ * legacy (IDE) drivers together. This combination is only possible
+ * under HVM.
+ */
+bool xen_has_pv_and_legacy_disk_devices(void)
+{
+ if (!xen_domain())
+ return false;
+
+ /* N.B. This is only ever used in HVM mode */
+ if (xen_pv_domain())
+ return false;
+
+ if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(xen_has_pv_and_legacy_disk_devices);
+
void xen_unplug_emulated_devices(void)
{
int r;
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 4b9951a..6182f28 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -193,7 +193,7 @@ void platform_calibrate_ccount(void)
* Ethernet -- OpenCores Ethernet MAC (ethoc driver)
*/
-static struct resource ethoc_res[] __initdata = {
+static struct resource ethoc_res[] = {
[0] = { /* register space */
.start = OETH_REGS_PADDR,
.end = OETH_REGS_PADDR + OETH_REGS_SIZE - 1,
@@ -211,7 +211,7 @@ static struct resource ethoc_res[] __initdata = {
},
};
-static struct ethoc_platform_data ethoc_pdata __initdata = {
+static struct ethoc_platform_data ethoc_pdata = {
/*
* The MAC address for these boards is 00:50:c2:13:6f:xx.
* The last byte (here as zero) is read from the DIP switches on the
@@ -221,7 +221,7 @@ static struct ethoc_platform_data ethoc_pdata __initdata = {
.phy_id = -1,
};
-static struct platform_device ethoc_device __initdata = {
+static struct platform_device ethoc_device = {
.name = "ethoc",
.id = -1,
.num_resources = ARRAY_SIZE(ethoc_res),
@@ -235,13 +235,13 @@ static struct platform_device ethoc_device __initdata = {
* UART
*/
-static struct resource serial_resource __initdata = {
+static struct resource serial_resource = {
.start = DUART16552_PADDR,
.end = DUART16552_PADDR + 0x1f,
.flags = IORESOURCE_MEM,
};
-static struct plat_serial8250_port serial_platform_data[] __initdata = {
+static struct plat_serial8250_port serial_platform_data[] = {
[0] = {
.mapbase = DUART16552_PADDR,
.irq = DUART16552_INTNUM,
@@ -254,7 +254,7 @@ static struct plat_serial8250_port serial_platform_data[] __initdata = {
{ },
};
-static struct platform_device xtavnet_uart __initdata = {
+static struct platform_device xtavnet_uart = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 1f0d457..de22f48 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -33,6 +33,7 @@
#include <linux/proc_fs.h>
#include <linux/acpi.h>
#include <linux/slab.h>
+#include <linux/regulator/machine.h>
#ifdef CONFIG_X86
#include <asm/mpspec.h>
#endif
@@ -976,6 +977,14 @@ void __init acpi_early_init(void)
goto error0;
}
+ /*
+ * If the system is using ACPI then we can be reasonably
+ * confident that any regulators are managed by the firmware
+ * so tell the regulator core it has everything it needs to
+ * know.
+ */
+ regulator_has_full_constraints();
+
return;
error0:
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index d6534f5..0dbb6c4 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2199,6 +2199,16 @@ int ata_dev_configure(struct ata_device *dev)
if (rc)
return rc;
+ /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */
+ if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) &&
+ (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
+ dev->horkage |= ATA_HORKAGE_NOLPM;
+
+ if (dev->horkage & ATA_HORKAGE_NOLPM) {
+ ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
+ dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
+ }
+
/* let ACPI work its magic */
rc = ata_acpi_on_devcfg(dev);
if (rc)
@@ -4187,6 +4197,23 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ /*
+ * Some WD SATA-I drives spin up and down erratically when the link
+ * is put into the slumber mode. We don't have full list of the
+ * affected devices. Disable LPM if the device matches one of the
+ * known prefixes and is SATA-1. As a side effect LPM partial is
+ * lost too.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=57211
+ */
+ { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+
/* End Marker */
{ }
};
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 2b628d6..9072fdb 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -111,12 +111,14 @@ static const char *ata_lpm_policy_names[] = {
[ATA_LPM_MIN_POWER] = "min_power",
};
-static ssize_t ata_scsi_lpm_store(struct device *dev,
+static ssize_t ata_scsi_lpm_store(struct device *device,
struct device_attribute *attr,
const char *buf, size_t count)
{
- struct Scsi_Host *shost = class_to_shost(dev);
+ struct Scsi_Host *shost = class_to_shost(device);
struct ata_port *ap = ata_shost_to_port(shost);
+ struct ata_link *link;
+ struct ata_device *dev;
enum ata_lpm_policy policy;
unsigned long flags;
@@ -132,10 +134,20 @@ static ssize_t ata_scsi_lpm_store(struct device *dev,
return -EINVAL;
spin_lock_irqsave(ap->lock, flags);
+
+ ata_for_each_link(link, ap, EDGE) {
+ ata_for_each_dev(dev, &ap->link, ENABLED) {
+ if (dev->horkage & ATA_HORKAGE_NOLPM) {
+ count = -EOPNOTSUPP;
+ goto out_unlock;
+ }
+ }
+ }
+
ap->target_lpm_policy = policy;
ata_port_schedule_eh(ap);
+out_unlock:
spin_unlock_irqrestore(ap->lock, flags);
-
return count;
}
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 35c6b6d..b256ff5 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -304,6 +304,7 @@ enum {
MV5_LTMODE = 0x30,
MV5_PHY_CTL = 0x0C,
SATA_IFCFG = 0x050,
+ LP_PHY_CTL = 0x058,
MV_M2_PREAMP_MASK = 0x7e0,
@@ -431,6 +432,7 @@ enum {
MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
+ MV_HP_FIX_LP_PHY_CTL = (1 << 13), /* fix speed in LP_PHY_CTL ? */
/* Port private flags (pp_flags) */
MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
@@ -1353,6 +1355,7 @@ static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
if (ofs != 0xffffffffU) {
void __iomem *addr = mv_ap_base(link->ap) + ofs;
+ struct mv_host_priv *hpriv = link->ap->host->private_data;
if (sc_reg_in == SCR_CONTROL) {
/*
* Workaround for 88SX60x1 FEr SATA#26:
@@ -1369,6 +1372,18 @@ static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
*/
if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
val |= 0xf000;
+
+ if (hpriv->hp_flags & MV_HP_FIX_LP_PHY_CTL) {
+ void __iomem *lp_phy_addr =
+ mv_ap_base(link->ap) + LP_PHY_CTL;
+ /*
+ * Set PHY speed according to SControl speed.
+ */
+ if ((val & 0xf0) == 0x10)
+ writelfl(0x7, lp_phy_addr);
+ else
+ writelfl(0x227, lp_phy_addr);
+ }
}
writelfl(val, addr);
return 0;
@@ -4111,6 +4126,15 @@ static int mv_platform_probe(struct platform_device *pdev)
if (rc)
goto err;
+ /*
+ * To allow disk hotplug on Armada 370/XP SoCs, the PHY speed must be
+ * updated in the LP_PHY_CTL register.
+ */
+ if (pdev->dev.of_node &&
+ of_device_is_compatible(pdev->dev.of_node,
+ "marvell,armada-370-sata"))
+ hpriv->hp_flags |= MV_HP_FIX_LP_PHY_CTL;
+
/* initialize adapter */
rc = mv_init_host(host);
if (rc)
@@ -4216,6 +4240,7 @@ static int mv_platform_resume(struct platform_device *pdev)
#ifdef CONFIG_OF
static struct of_device_id mv_sata_dt_ids[] = {
+ { .compatible = "marvell,armada-370-sata", },
{ .compatible = "marvell,orion-sata", },
{},
};
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 3331cb1..de4a933 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1129,7 +1129,7 @@ static int blkfront_probe(struct xenbus_device *dev,
char *type;
int len;
/* no unplug has been done: do not hook devices != xen vbds */
- if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) {
+ if (xen_has_pv_and_legacy_disk_devices()) {
int major;
if (!VDEV_IS_EXTENDED(vdevice))
@@ -1656,7 +1656,7 @@ static int __init xlblk_init(void)
if (!xen_domain())
return -ENODEV;
- if (xen_hvm_domain() && !xen_platform_pci_unplug)
+ if (!xen_has_pv_disk_devices())
return -ENODEV;
if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
diff --git a/drivers/char/tpm/tpm_ppi.c b/drivers/char/tpm/tpm_ppi.c
index d0f1bdd..1900b4b 100644
--- a/drivers/char/tpm/tpm_ppi.c
+++ b/drivers/char/tpm/tpm_ppi.c
@@ -172,7 +172,7 @@ static ssize_t tpm_store_ppi_request(struct device *dev,
* is updated with function index from SUBREQ to SUBREQ2 since PPI
* version 1.1
*/
- if (strcmp(version, "1.1") == -1)
+ if (strcmp(version, "1.1") < 0)
params[2].integer.value = TPM_PPI_FN_SUBREQ;
else
params[2].integer.value = TPM_PPI_FN_SUBREQ2;
@@ -182,7 +182,7 @@ static ssize_t tpm_store_ppi_request(struct device *dev,
* string/package type. For PPI version 1.0 and 1.1, use buffer type
* for compatibility, and use package type since 1.2 according to spec.
*/
- if (strcmp(version, "1.2") == -1) {
+ if (strcmp(version, "1.2") < 0) {
params[3].type = ACPI_TYPE_BUFFER;
params[3].buffer.length = sizeof(req);
sscanf(buf, "%d", &req);
@@ -248,7 +248,7 @@ static ssize_t tpm_show_ppi_transition_action(struct device *dev,
* (e.g. Capella with PPI 1.0) need integer/string/buffer type, so for
* compatibility, define params[3].type as buffer, if PPI version < 1.2
*/
- if (strcmp(version, "1.2") == -1) {
+ if (strcmp(version, "1.2") < 0) {
params[3].type = ACPI_TYPE_BUFFER;
params[3].buffer.length = 0;
params[3].buffer.pointer = NULL;
@@ -390,7 +390,7 @@ static ssize_t show_ppi_operations(char *buf, u32 start, u32 end)
kfree(output.pointer);
output.length = ACPI_ALLOCATE_BUFFER;
output.pointer = NULL;
- if (strcmp(version, "1.2") == -1)
+ if (strcmp(version, "1.2") < 0)
return -EPERM;
params[2].integer.value = TPM_PPI_FN_GETOPR;
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 644fec5..f1e9d7b 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -1182,9 +1182,11 @@ static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL,
pvt->dev_info->err_dev, pvt->bridge_ck);
- if (pvt->bridge_ck == NULL)
+ if (pvt->bridge_ck == NULL) {
pvt->bridge_ck = pci_scan_single_device(pdev->bus,
PCI_DEVFN(0, 1));
+ pci_dev_get(pvt->bridge_ck);
+ }
if (pvt->bridge_ck == NULL) {
e752x_printk(KERN_ERR, "error reporting device not found:"
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 9138678..d56d2bf 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -50,7 +50,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
struct ast_bo *bo;
int src_offset, dst_offset;
int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
- int ret;
+ int ret = -EBUSY;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
@@ -64,7 +64,8 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- ret = ast_bo_reserve(bo, true);
+ if (!drm_can_sleep())
+ ret = ast_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
return;
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 1e64d6f..68460cb 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -24,7 +24,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
struct cirrus_bo *bo;
int src_offset, dst_offset;
int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
- int ret;
+ int ret = -EBUSY;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
@@ -38,7 +38,8 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- ret = cirrus_bo_reserve(bo, true);
+ if (!drm_can_sleep())
+ ret = cirrus_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
return;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 60685b2..379a47e 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -273,8 +273,8 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc,
sr07 |= 0x11;
break;
case 16:
- sr07 |= 0xc1;
- hdr = 0xc0;
+ sr07 |= 0x17;
+ hdr = 0xc1;
break;
case 24:
sr07 |= 0x15;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 854f215..1a2ef33 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1660,10 +1660,20 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define PORT_HOTPLUG_STAT 0x61114
-/* HDMI/DP bits are gen4+ */
-#define DPB_HOTPLUG_LIVE_STATUS (1 << 29)
-#define DPC_HOTPLUG_LIVE_STATUS (1 << 28)
-#define DPD_HOTPLUG_LIVE_STATUS (1 << 27)
+/*
+ * HDMI/DP bits are gen4+
+ *
+ * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
+ * Please check the detailed lore in the commit message for for experimental
+ * evidence.
+ */
+#define DPD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
+#define DPC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
+#define DPB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
+/* VLV DP/HDMI bits again match Bspec */
+#define DPD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
+#define DPC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
+#define DPB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
#define DPD_HOTPLUG_INT_STATUS (3 << 21)
#define DPC_HOTPLUG_INT_STATUS (3 << 19)
#define DPB_HOTPLUG_INT_STATUS (3 << 17)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 37c6d3f..a671223 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2247,18 +2247,34 @@ g4x_dp_detect(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit;
- switch (intel_dp->output_reg) {
- case DP_B:
- bit = DPB_HOTPLUG_LIVE_STATUS;
- break;
- case DP_C:
- bit = DPC_HOTPLUG_LIVE_STATUS;
- break;
- case DP_D:
- bit = DPD_HOTPLUG_LIVE_STATUS;
- break;
- default:
- return connector_status_unknown;
+ if (IS_VALLEYVIEW(dev)) {
+ switch (intel_dp->output_reg) {
+ case DP_B:
+ bit = DPB_HOTPLUG_LIVE_STATUS_VLV;
+ break;
+ case DP_C:
+ bit = DPC_HOTPLUG_LIVE_STATUS_VLV;
+ break;
+ case DP_D:
+ bit = DPD_HOTPLUG_LIVE_STATUS_VLV;
+ break;
+ default:
+ return connector_status_unknown;
+ }
+ } else {
+ switch (intel_dp->output_reg) {
+ case DP_B:
+ bit = DPB_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ case DP_C:
+ bit = DPC_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ case DP_D:
+ bit = DPD_HOTPLUG_LIVE_STATUS_G4X;
+ break;
+ default:
+ return connector_status_unknown;
+ }
}
if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 41eefc4..675ccf5 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -26,7 +26,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
struct mgag200_bo *bo;
int src_offset, dst_offset;
int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
- int ret;
+ int ret = -EBUSY;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
@@ -40,7 +40,8 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- ret = mgag200_bo_reserve(bo, true);
+ if (!drm_can_sleep())
+ ret = mgag200_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
return;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 9e0cb43..992c26d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -349,13 +349,13 @@ static u16
init_script(struct nouveau_bios *bios, int index)
{
struct nvbios_init init = { .bios = bios };
- u16 data;
+ u16 bmp_ver = bmp_version(bios), data;
- if (bmp_version(bios) && bmp_version(bios) < 0x0510) {
- if (index > 1)
+ if (bmp_ver && bmp_ver < 0x0510) {
+ if (index > 1 || bmp_ver < 0x0100)
return 0x0000;
- data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18);
+ data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
return nv_ro16(bios, data + (index * 2));
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index ef32bf6..b997e79 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -938,11 +938,14 @@ static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_
radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss,
ATOM_DP_SS_ID1);
- } else
+ } else {
radeon_crtc->ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss,
ATOM_DP_SS_ID1);
+ }
+ /* disable spread spectrum on DCE3 DP */
+ radeon_crtc->ss_enabled = false;
}
break;
case ATOM_ENCODER_MODE_LVDS:
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index ee4cff5..d843e9b 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -970,7 +970,10 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p)
if (track->cb_dirty) {
tmp = track->cb_target_mask;
for (i = 0; i < 8; i++) {
- if ((tmp >> (i * 4)) & 0xF) {
+ u32 format = G_028C70_FORMAT(track->cb_color_info[i]);
+
+ if (format != V_028C70_COLOR_INVALID &&
+ (tmp >> (i * 4)) & 0xF) {
/* at least one component is enabled */
if (track->cb_color_bo[i] == NULL) {
dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 326bdea..385e05b 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -889,13 +889,12 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_ACTION_ENA;
/* flush read cache over gart for this vmid */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
@@ -911,6 +910,8 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
+ u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
+ PACKET3_SH_ACTION_ENA;
/* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
@@ -935,14 +936,11 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
(ib->vm ? (ib->vm->id << 24) : 0));
/* flush read cache over gart for this vmid */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 10); /* poll interval */
+ radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */
}
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index e045f8c..9dbea95 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -556,6 +556,7 @@
# define PACKET3_DB_ACTION_ENA (1 << 26)
# define PACKET3_SH_ACTION_ENA (1 << 27)
# define PACKET3_SX_ACTION_ENA (1 << 28)
+# define PACKET3_ENGINE_ME (1 << 31)
#define PACKET3_ME_INITIALIZE 0x44
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 040d27a..3d14719 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2519,14 +2519,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u32 cp_coher_cntl = PACKET3_TC_ACTION_ENA | PACKET3_VC_ACTION_ENA |
+ PACKET3_SH_ACTION_ENA;
+
+ if (rdev->family >= CHIP_RV770)
+ cp_coher_cntl |= PACKET3_FULL_CACHE_ENA;
if (rdev->wb.use_event) {
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
- PACKET3_VC_ACTION_ENA |
- PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
@@ -2540,9 +2543,7 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
} else {
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
- PACKET3_VC_ACTION_ENA |
- PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, cp_coher_cntl);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 9b2512b..d3fcfe4 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -754,7 +754,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
}
for (i = 0; i < 8; i++) {
- if ((tmp >> (i * 4)) & 0xF) {
+ u32 format = G_0280A0_FORMAT(track->cb_color_info[i]);
+
+ if (format != V_0280A0_COLOR_INVALID &&
+ (tmp >> (i * 4)) & 0xF) {
/* at least one component is enabled */
if (track->cb_color_bo[i] == NULL) {
dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 8a8308b..9e2ac73 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1220,6 +1220,7 @@
# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
+# define PACKET3_FULL_CACHE_ENA (1 << 20) /* r7xx+ only */
# define PACKET3_TC_ACTION_ENA (1 << 23)
# define PACKET3_VC_ACTION_ENA (1 << 24)
# define PACKET3_CB_ACTION_ENA (1 << 25)
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 6048f2b..9e134ea 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2821,6 +2821,10 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev)
/* tell the bios not to handle mode switching */
bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
+ /* clear the vbios dpms state */
+ if (ASIC_IS_DCE4(rdev))
+ bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
+
if (rdev->family >= CHIP_R600) {
WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch);
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index fc60b74..e24ca6a 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -1020,6 +1020,9 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
/* Add the default buses */
void radeon_i2c_init(struct radeon_device *rdev)
{
+ if (radeon_hw_i2c)
+ DRM_INFO("hw_i2c forced on, you may experience display detection problems!\n");
+
if (rdev->is_atom_bios)
radeon_atombios_i2c_init(rdev);
else
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 788c64c..469ba71 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -561,8 +561,10 @@ void radeon_pm_resume(struct radeon_device *rdev)
rdev->pm.current_clock_mode_index = 0;
rdev->pm.current_sclk = rdev->pm.default_sclk;
rdev->pm.current_mclk = rdev->pm.default_mclk;
- rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
- rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
+ if (rdev->pm.power_state) {
+ rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
+ rdev->pm.current_vddci = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.vddci;
+ }
if (rdev->pm.pm_method == PM_METHOD_DYNPM
&& rdev->pm.dynpm_state == DYNPM_STATE_SUSPENDED) {
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index d6c7fe7..3ad651c 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -57,13 +57,20 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
struct qib_sge *sge;
struct ib_wc wc;
u32 length;
+ enum ib_qp_type sqptype, dqptype;
qp = qib_lookup_qpn(ibp, swqe->wr.wr.ud.remote_qpn);
if (!qp) {
ibp->n_pkt_drops++;
return;
}
- if (qp->ibqp.qp_type != sqp->ibqp.qp_type ||
+
+ sqptype = sqp->ibqp.qp_type == IB_QPT_GSI ?
+ IB_QPT_UD : sqp->ibqp.qp_type;
+ dqptype = qp->ibqp.qp_type == IB_QPT_GSI ?
+ IB_QPT_UD : qp->ibqp.qp_type;
+
+ if (dqptype != sqptype ||
!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) {
ibp->n_pkt_drops++;
goto drop;
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index e21c181..fbfdc10 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -29,6 +29,7 @@
#include <xen/interface/io/fbif.h>
#include <xen/interface/io/kbdif.h>
#include <xen/xenbus.h>
+#include <xen/platform_pci.h>
struct xenkbd_info {
struct input_dev *kbd;
@@ -380,6 +381,9 @@ static int __init xenkbd_init(void)
if (xen_initial_domain())
return -ENODEV;
+ if (!xen_has_pv_devices())
+ return -ENODEV;
+
return xenbus_register_frontend(&xenkbd_driver);
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 72f2ee9..94a5c9e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -910,7 +910,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
/* If range covers entire pagetable, free it */
if (!(start_pfn > level_pfn ||
- last_pfn < level_pfn + level_size(level))) {
+ last_pfn < level_pfn + level_size(level) - 1)) {
dma_clear_pte(pte);
domain_flush_cache(domain, pte, sizeof(*pte));
free_pgtable_page(level_pte);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 91a02ee..defbe5e1 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -185,8 +185,12 @@ config MD_FAULTY
In unsure, say N.
+config BLK_DEV_DM_BUILTIN
+ boolean
+
config BLK_DEV_DM
tristate "Device mapper support"
+ select BLK_DEV_DM_BUILTIN
---help---
Device-mapper is a low level volume manager. It works by allowing
people to specify mappings for ranges of logical sectors. Various
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 94dce8b..50ead1d 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_MD_MULTIPATH) += multipath.o
obj-$(CONFIG_MD_FAULTY) += faulty.o
obj-$(CONFIG_BLK_DEV_MD) += md-mod.o
obj-$(CONFIG_BLK_DEV_DM) += dm-mod.o
+obj-$(CONFIG_BLK_DEV_DM_BUILTIN) += dm-builtin.o
obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
diff --git a/drivers/md/dm-builtin.c b/drivers/md/dm-builtin.c
new file mode 100644
index 0000000..8b82788
--- /dev/null
+++ b/drivers/md/dm-builtin.c
@@ -0,0 +1,49 @@
+#include <linux/export.h>
+#include "dm.h"
+
+/*
+ * The kobject release method must not be placed in the module itself,
+ * otherwise we are subject to module unload races.
+ *
+ * The release method is called when the last reference to the kobject is
+ * dropped. It may be called by any other kernel code that drops the last
+ * reference.
+ *
+ * The release method suffers from module unload race. We may prevent the
+ * module from being unloaded at the start of the release method (using
+ * increased module reference count or synchronizing against the release
+ * method), however there is no way to prevent the module from being
+ * unloaded at the end of the release method.
+ *
+ * If this code were placed in the dm module, the following race may
+ * happen:
+ * 1. Some other process takes a reference to dm kobject
+ * 2. The user issues ioctl function to unload the dm device
+ * 3. dm_sysfs_exit calls kobject_put, however the object is not released
+ * because of the other reference taken at step 1
+ * 4. dm_sysfs_exit waits on the completion
+ * 5. The other process that took the reference in step 1 drops it,
+ * dm_kobject_release is called from this process
+ * 6. dm_kobject_release calls complete()
+ * 7. a reschedule happens before dm_kobject_release returns
+ * 8. dm_sysfs_exit continues, the dm device is unloaded, module reference
+ * count is decremented
+ * 9. The user unloads the dm module
+ * 10. The other process that was rescheduled in step 7 continues to run,
+ * it is now executing code in unloaded module, so it crashes
+ *
+ * Note that if the process that takes the foreign reference to dm kobject
+ * has a low priority and the system is sufficiently loaded with
+ * higher-priority processes that prevent the low-priority process from
+ * being scheduled long enough, this bug may really happen.
+ *
+ * In order to fix this module unload race, we place the release method
+ * into a helper code that is compiled directly into the kernel.
+ */
+
+void dm_kobject_release(struct kobject *kobj)
+{
+ complete(dm_get_completion_from_kobject(kobj));
+}
+
+EXPORT_SYMBOL(dm_kobject_release);
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index 84d2b91..c62c5ab 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -86,6 +86,7 @@ static const struct sysfs_ops dm_sysfs_ops = {
static struct kobj_type dm_ktype = {
.sysfs_ops = &dm_sysfs_ops,
.default_attrs = dm_attrs,
+ .release = dm_kobject_release,
};
/*
@@ -104,5 +105,7 @@ int dm_sysfs_init(struct mapped_device *md)
*/
void dm_sysfs_exit(struct mapped_device *md)
{
- kobject_put(dm_kobject(md));
+ struct kobject *kobj = dm_kobject(md);
+ kobject_put(kobj);
+ wait_for_completion(dm_get_completion_from_kobject(kobj));
}
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 0368a69..6727958 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1349,6 +1349,12 @@ dm_thin_id dm_thin_dev_id(struct dm_thin_device *td)
return td->id;
}
+/*
+ * Check whether @time (of block creation) is older than @td's last snapshot.
+ * If so then the associated block is shared with the last snapshot device.
+ * Any block on a device created *after* the device last got snapshotted is
+ * necessarily not shared.
+ */
static bool __snapshotted_since(struct dm_thin_device *td, uint32_t time)
{
return td->snapshotted_time > time;
@@ -1458,6 +1464,20 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block)
return r;
}
+int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
+{
+ int r;
+ uint32_t ref_count;
+
+ down_read(&pmd->root_lock);
+ r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
+ if (!r)
+ *result = (ref_count != 0);
+ up_read(&pmd->root_lock);
+
+ return r;
+}
+
bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
{
int r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 050a9ad..149d326 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -180,6 +180,8 @@ int dm_pool_get_data_block_size(struct dm_pool_metadata *pmd, sector_t *result);
int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
+int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
+
/*
* Returns -ENOSPC if the new size is too small and already allocated
* blocks would be lost.
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 44a715f..3bc2d39 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -440,6 +440,7 @@ struct dm_thin_new_mapping {
unsigned quiesced:1;
unsigned prepared:1;
unsigned pass_discard:1;
+ unsigned definitely_not_shared:1;
struct thin_c *tc;
dm_block_t virt_block;
@@ -610,7 +611,15 @@ static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
cell_defer_no_holder(tc, m->cell2);
if (m->pass_discard)
- remap_and_issue(tc, m->bio, m->data_block);
+ if (m->definitely_not_shared)
+ remap_and_issue(tc, m->bio, m->data_block);
+ else {
+ bool used = false;
+ if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
+ bio_endio(m->bio, 0);
+ else
+ remap_and_issue(tc, m->bio, m->data_block);
+ }
else
bio_endio(m->bio, 0);
@@ -678,13 +687,17 @@ static int ensure_next_mapping(struct pool *pool)
static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
{
- struct dm_thin_new_mapping *r = pool->next_mapping;
+ struct dm_thin_new_mapping *m = pool->next_mapping;
BUG_ON(!pool->next_mapping);
+ memset(m, 0, sizeof(struct dm_thin_new_mapping));
+ INIT_LIST_HEAD(&m->list);
+ m->bio = NULL;
+
pool->next_mapping = NULL;
- return r;
+ return m;
}
static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
@@ -696,15 +709,10 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
struct pool *pool = tc->pool;
struct dm_thin_new_mapping *m = get_next_mapping(pool);
- INIT_LIST_HEAD(&m->list);
- m->quiesced = 0;
- m->prepared = 0;
m->tc = tc;
m->virt_block = virt_block;
m->data_block = data_dest;
m->cell = cell;
- m->err = 0;
- m->bio = NULL;
if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
m->quiesced = 1;
@@ -767,15 +775,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
struct pool *pool = tc->pool;
struct dm_thin_new_mapping *m = get_next_mapping(pool);
- INIT_LIST_HEAD(&m->list);
m->quiesced = 1;
m->prepared = 0;
m->tc = tc;
m->virt_block = virt_block;
m->data_block = data_block;
m->cell = cell;
- m->err = 0;
- m->bio = NULL;
/*
* If the whole block of data is being overwritten or we are not
@@ -961,12 +966,12 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
*/
m = get_next_mapping(pool);
m->tc = tc;
- m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
+ m->pass_discard = pool->pf.discard_passdown;
+ m->definitely_not_shared = !lookup_result.shared;
m->virt_block = block;
m->data_block = lookup_result.block;
m->cell = cell;
m->cell2 = cell2;
- m->err = 0;
m->bio = bio;
if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
@@ -1303,15 +1308,16 @@ static enum pool_mode get_pool_mode(struct pool *pool)
return pool->pf.mode;
}
-static void set_pool_mode(struct pool *pool, enum pool_mode mode)
+static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
{
int r;
+ enum pool_mode old_mode = pool->pf.mode;
- pool->pf.mode = mode;
-
- switch (mode) {
+ switch (new_mode) {
case PM_FAIL:
- DMERR("switching pool to failure mode");
+ if (old_mode != new_mode)
+ DMERR("%s: switching pool to failure mode",
+ dm_device_name(pool->pool_md));
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_fail;
pool->process_discard = process_bio_fail;
@@ -1320,11 +1326,15 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
break;
case PM_READ_ONLY:
- DMERR("switching pool to read-only mode");
+ if (old_mode != new_mode)
+ DMERR("%s: switching pool to read-only mode",
+ dm_device_name(pool->pool_md));
r = dm_pool_abort_metadata(pool->pmd);
if (r) {
- DMERR("aborting transaction failed");
- set_pool_mode(pool, PM_FAIL);
+ DMERR("%s: aborting transaction failed",
+ dm_device_name(pool->pool_md));
+ new_mode = PM_FAIL;
+ set_pool_mode(pool, new_mode);
} else {
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_read_only;
@@ -1335,6 +1345,9 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
break;
case PM_WRITE:
+ if (old_mode != new_mode)
+ DMINFO("%s: switching pool to write mode",
+ dm_device_name(pool->pool_md));
dm_pool_metadata_read_write(pool->pmd);
pool->process_bio = process_bio;
pool->process_discard = process_discard;
@@ -1342,6 +1355,8 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
pool->process_prepared_discard = process_prepared_discard;
break;
}
+
+ pool->pf.mode = new_mode;
}
/*----------------------------------------------------------------*/
@@ -1552,6 +1567,17 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
enum pool_mode new_mode = pt->adjusted_pf.mode;
/*
+ * Don't change the pool's mode until set_pool_mode() below.
+ * Otherwise the pool's process_* function pointers may
+ * not match the desired pool mode.
+ */
+ pt->adjusted_pf.mode = old_mode;
+
+ pool->ti = ti;
+ pool->pf = pt->adjusted_pf;
+ pool->low_water_blocks = pt->low_water_blocks;
+
+ /*
* If we were in PM_FAIL mode, rollback of metadata failed. We're
* not going to recover without a thin_repair. So we never let the
* pool move out of the old mode. On the other hand a PM_READ_ONLY
@@ -1561,10 +1587,6 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
if (old_mode == PM_FAIL)
new_mode = old_mode;
- pool->ti = ti;
- pool->low_water_blocks = pt->low_water_blocks;
- pool->pf = pt->adjusted_pf;
-
set_pool_mode(pool, new_mode);
return 0;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6a82721..9a7a409 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -185,8 +185,8 @@ struct mapped_device {
/* forced geometry settings */
struct hd_geometry geometry;
- /* sysfs handle */
- struct kobject kobj;
+ /* kobject and completion */
+ struct dm_kobject_holder kobj_holder;
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
@@ -1899,6 +1899,7 @@ static struct mapped_device *alloc_dev(int minor)
init_waitqueue_head(&md->wait);
INIT_WORK(&md->work, dm_wq_work);
init_waitqueue_head(&md->eventq);
+ init_completion(&md->kobj_holder.completion);
md->disk->major = _major;
md->disk->first_minor = minor;
@@ -2724,7 +2725,7 @@ struct gendisk *dm_disk(struct mapped_device *md)
struct kobject *dm_kobject(struct mapped_device *md)
{
- return &md->kobj;
+ return &md->kobj_holder.kobj;
}
/*
@@ -2735,9 +2736,7 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
{
struct mapped_device *md;
- md = container_of(kobj, struct mapped_device, kobj);
- if (&md->kobj != kobj)
- return NULL;
+ md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
if (test_bit(DMF_FREEING, &md->flags) ||
dm_deleting_md(md))
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 45b97da..9b3222f 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -15,6 +15,8 @@
#include <linux/list.h>
#include <linux/blkdev.h>
#include <linux/hdreg.h>
+#include <linux/completion.h>
+#include <linux/kobject.h>
/*
* Suspend feature flags
@@ -125,12 +127,27 @@ void dm_interface_exit(void);
/*
* sysfs interface
*/
+struct dm_kobject_holder {
+ struct kobject kobj;
+ struct completion completion;
+};
+
+static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
+{
+ return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
+}
+
int dm_sysfs_init(struct mapped_device *md);
void dm_sysfs_exit(struct mapped_device *md);
struct kobject *dm_kobject(struct mapped_device *md);
struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
/*
+ * The kobject helper
+ */
+void dm_kobject_release(struct kobject *kobj);
+
+/*
* Targets for linear and striped mappings
*/
int dm_linear_init(void);
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 3e7a88d..0d24037 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -245,6 +245,10 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
return -EINVAL;
}
+ /*
+ * We need to set this before the dm_tm_new_block() call below.
+ */
+ ll->nr_blocks = nr_blocks;
for (i = old_blocks; i < blocks; i++) {
struct dm_block *b;
struct disk_index_entry idx;
@@ -252,6 +256,7 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
r = dm_tm_new_block(ll->tm, &dm_sm_bitmap_validator, &b);
if (r < 0)
return r;
+
idx.blocknr = cpu_to_le64(dm_block_location(b));
r = dm_tm_unlock(ll->tm, b);
@@ -266,7 +271,6 @@ int sm_ll_extend(struct ll_disk *ll, dm_block_t extra_blocks)
return r;
}
- ll->nr_blocks = nr_blocks;
return 0;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2ffa02c..5cffbd3 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1929,6 +1929,7 @@ static void raid5_end_write_request(struct bio *bi, int error)
set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
} else {
if (!uptodate) {
+ set_bit(STRIPE_DEGRADED, &sh->state);
set_bit(WriteErrorSeen, &rdev->flags);
set_bit(R5_WriteError, &sh->dev[i].flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 1f3bcb5..f4c669f 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -118,15 +118,10 @@ static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg)
return ret;
}
-static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
+static u16 __dib8000_read_word(struct dib8000_state *state, u16 reg)
{
u16 ret;
- if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
- dprintk("could not acquire lock");
- return 0;
- }
-
state->i2c_write_buffer[0] = reg >> 8;
state->i2c_write_buffer[1] = reg & 0xff;
@@ -144,6 +139,21 @@ static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
dprintk("i2c read error on %d", reg);
ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1];
+
+ return ret;
+}
+
+static u16 dib8000_read_word(struct dib8000_state *state, u16 reg)
+{
+ u16 ret;
+
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return 0;
+ }
+
+ ret = __dib8000_read_word(state, reg);
+
mutex_unlock(&state->i2c_buffer_lock);
return ret;
@@ -153,8 +163,15 @@ static u32 dib8000_read32(struct dib8000_state *state, u16 reg)
{
u16 rw[2];
- rw[0] = dib8000_read_word(state, reg + 0);
- rw[1] = dib8000_read_word(state, reg + 1);
+ if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) {
+ dprintk("could not acquire lock");
+ return 0;
+ }
+
+ rw[0] = __dib8000_read_word(state, reg + 0);
+ rw[1] = __dib8000_read_word(state, reg + 1);
+
+ mutex_unlock(&state->i2c_buffer_lock);
return ((rw[0] << 16) | (rw[1]));
}
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 681bc6b..1c30a63 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -176,21 +176,6 @@ unlock:
mutex_unlock(&dev->mfc_mutex);
}
-static enum s5p_mfc_node_type s5p_mfc_get_node_type(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
-
- if (!vdev) {
- mfc_err("failed to get video_device");
- return MFCNODE_INVALID;
- }
- if (vdev->index == 0)
- return MFCNODE_DECODER;
- else if (vdev->index == 1)
- return MFCNODE_ENCODER;
- return MFCNODE_INVALID;
-}
-
static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
{
mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
@@ -700,6 +685,7 @@ irq_cleanup_hw:
/* Open an MFC node */
static int s5p_mfc_open(struct file *file)
{
+ struct video_device *vdev = video_devdata(file);
struct s5p_mfc_dev *dev = video_drvdata(file);
struct s5p_mfc_ctx *ctx = NULL;
struct vb2_queue *q;
@@ -737,7 +723,7 @@ static int s5p_mfc_open(struct file *file)
/* Mark context as idle */
clear_work_bit_irqsave(ctx);
dev->ctx[ctx->num] = ctx;
- if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+ if (vdev == dev->vfd_dec) {
ctx->type = MFCINST_DECODER;
ctx->c_ops = get_dec_codec_ops();
s5p_mfc_dec_init(ctx);
@@ -747,7 +733,7 @@ static int s5p_mfc_open(struct file *file)
mfc_err("Failed to setup mfc controls\n");
goto err_ctrls_setup;
}
- } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+ } else if (vdev == dev->vfd_enc) {
ctx->type = MFCINST_ENCODER;
ctx->c_ops = get_enc_codec_ops();
/* only for encoder */
@@ -790,10 +776,10 @@ static int s5p_mfc_open(struct file *file)
q = &ctx->vq_dst;
q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
q->drv_priv = &ctx->fh;
- if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+ if (vdev == dev->vfd_dec) {
q->io_modes = VB2_MMAP;
q->ops = get_dec_queue_ops();
- } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+ } else if (vdev == dev->vfd_enc) {
q->io_modes = VB2_MMAP | VB2_USERPTR;
q->ops = get_enc_queue_ops();
} else {
@@ -811,10 +797,10 @@ static int s5p_mfc_open(struct file *file)
q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
q->io_modes = VB2_MMAP;
q->drv_priv = &ctx->fh;
- if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
+ if (vdev == dev->vfd_dec) {
q->io_modes = VB2_MMAP;
q->ops = get_dec_queue_ops();
- } else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
+ } else if (vdev == dev->vfd_enc) {
q->io_modes = VB2_MMAP | VB2_USERPTR;
q->ops = get_enc_queue_ops();
} else {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index f02e049..fb6629d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -113,15 +113,6 @@ enum s5p_mfc_fmt_type {
};
/**
- * enum s5p_mfc_node_type - The type of an MFC device node.
- */
-enum s5p_mfc_node_type {
- MFCNODE_INVALID = -1,
- MFCNODE_DECODER = 0,
- MFCNODE_ENCODER = 1,
-};
-
-/**
* enum s5p_mfc_inst_type - The type of an MFC instance.
*/
enum s5p_mfc_inst_type {
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index d05c5b5..ea5ca9a 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -445,6 +445,7 @@ static struct cxd2820r_config anysee_cxd2820r_config = {
* IOD[0] ZL10353 1=enabled
* IOE[0] tuner 0=enabled
* tuner is behind ZL10353 I2C-gate
+ * tuner is behind TDA10023 I2C-gate
*
* E7 TC VID=1c73 PID=861f HW=18 FW=0.7 AMTCI=0.5 "anysee-E7TC(LP)"
* PCB: 508TC (rev0.6)
@@ -960,7 +961,7 @@ static int anysee_tuner_attach(struct dvb_usb_adapter *adap)
if (fe && adap->fe[1]) {
/* attach tuner for 2nd FE */
- fe = dvb_attach(dvb_pll_attach, adap->fe[0],
+ fe = dvb_attach(dvb_pll_attach, adap->fe[1],
(0xc0 >> 1), &d->i2c_adap,
DVB_PLL_SAMSUNG_DTOS403IH102A);
}
diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c
index 4d73963..5b6f118 100644
--- a/drivers/mfd/max77686.c
+++ b/drivers/mfd/max77686.c
@@ -102,7 +102,7 @@ static int max77686_i2c_probe(struct i2c_client *i2c,
max77686->irq_gpio = pdata->irq_gpio;
max77686->irq = i2c->irq;
- max77686->regmap = regmap_init_i2c(i2c, &max77686_regmap_config);
+ max77686->regmap = devm_regmap_init_i2c(i2c, &max77686_regmap_config);
if (IS_ERR(max77686->regmap)) {
ret = PTR_ERR(max77686->regmap);
dev_err(max77686->dev, "Failed to allocate register map: %d\n",
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index e75774f..5211b6c 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -1181,11 +1181,22 @@ static void atmci_start_request(struct atmel_mci *host,
iflags |= ATMCI_CMDRDY;
cmd = mrq->cmd;
cmdflags = atmci_prepare_command(slot->mmc, cmd);
- atmci_send_command(host, cmd, cmdflags);
+
+ /*
+ * DMA transfer should be started before sending the command to avoid
+ * unexpected errors especially for read operations in SDIO mode.
+ * Unfortunately, in PDC mode, command has to be sent before starting
+ * the transfer.
+ */
+ if (host->submit_data != &atmci_submit_data_dma)
+ atmci_send_command(host, cmd, cmdflags);
if (data)
host->submit_data(host, data);
+ if (host->submit_data == &atmci_submit_data_dma)
+ atmci_send_command(host, cmd, cmdflags);
+
if (mrq->stop) {
host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop);
host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6f0bfc0..6a53434 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1768,12 +1768,12 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
unsigned long timeout;
int err = 0;
bool requires_tuning_nonuhs = false;
+ unsigned long flags;
host = mmc_priv(mmc);
sdhci_runtime_pm_get(host);
- disable_irq(host->irq);
- spin_lock(&host->lock);
+ spin_lock_irqsave(&host->lock, flags);
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
@@ -1793,8 +1793,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
requires_tuning_nonuhs)
ctrl |= SDHCI_CTRL_EXEC_TUNING;
else {
- spin_unlock(&host->lock);
- enable_irq(host->irq);
+ spin_unlock_irqrestore(&host->lock, flags);
sdhci_runtime_pm_put(host);
return 0;
}
@@ -1866,15 +1865,12 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
host->cmd = NULL;
host->mrq = NULL;
- spin_unlock(&host->lock);
- enable_irq(host->irq);
-
+ spin_unlock_irqrestore(&host->lock, flags);
/* Wait for Buffer Read Ready interrupt */
wait_event_interruptible_timeout(host->buf_ready_int,
(host->tuning_done == 1),
msecs_to_jiffies(50));
- disable_irq(host->irq);
- spin_lock(&host->lock);
+ spin_lock_irqsave(&host->lock, flags);
if (!host->tuning_done) {
pr_info(DRIVER_NAME ": Timeout waiting for "
@@ -1948,8 +1944,7 @@ out:
err = 0;
sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier);
- spin_unlock(&host->lock);
- enable_irq(host->irq);
+ spin_unlock_irqrestore(&host->lock, flags);
sdhci_runtime_pm_put(host);
return err;
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 45204e4..12346b1 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -665,7 +665,6 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
ecc_stat >>= 4;
} while (--no_subpages);
- mtd->ecc_stats.corrected += ret;
pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
return ret;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 3a73bb9..269eefa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -126,6 +126,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
struct sk_buff *skb = tx_buf->skb;
u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
int nbd;
+ u16 split_bd_len = 0;
/* prefetch skb end pointer to speedup dev_kfree_skb() */
prefetch(&skb->end);
@@ -133,10 +134,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
txdata->txq_index, idx, tx_buf, skb);
- /* unmap first bd */
tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
- dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
- BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
@@ -155,12 +153,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
--nbd;
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
- /* ...and the TSO split header bd since they have no mapping */
+ /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
+ tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
+ split_bd_len = BD_UNMAP_LEN(tx_data_bd);
--nbd;
bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
}
+ /* unmap first bd */
+ dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+ BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
+ DMA_TO_DEVICE);
+
/* now free frags */
while (nbd > 0) {
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 3b96bfa..7eb1963 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1600,6 +1600,7 @@ static void rhine_reset_task(struct work_struct *work)
goto out_unlock;
napi_disable(&rp->napi);
+ netif_tx_disable(dev);
spin_lock_bh(&rp->lock);
/* clear all descriptors */
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 10e288d..fdb900c 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -723,8 +723,6 @@ enum b43_firmware_file_type {
struct b43_request_fw_context {
/* The device we are requesting the fw for. */
struct b43_wldev *dev;
- /* a completion event structure needed if this call is asynchronous */
- struct completion fw_load_complete;
/* a pointer to the firmware object */
const struct firmware *blob;
/* The type of firmware to request. */
@@ -801,6 +799,8 @@ enum {
struct b43_wldev {
struct b43_bus_dev *dev;
struct b43_wl *wl;
+ /* a completion event structure needed if this call is asynchronous */
+ struct completion fw_load_complete;
/* The device initialization status.
* Use b43_status() to query. */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 911c4c0..3f4b3bd 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2061,6 +2061,7 @@ void b43_do_release_fw(struct b43_firmware_file *fw)
static void b43_release_firmware(struct b43_wldev *dev)
{
+ complete(&dev->fw_load_complete);
b43_do_release_fw(&dev->fw.ucode);
b43_do_release_fw(&dev->fw.pcm);
b43_do_release_fw(&dev->fw.initvals);
@@ -2086,7 +2087,7 @@ static void b43_fw_cb(const struct firmware *firmware, void *context)
struct b43_request_fw_context *ctx = context;
ctx->blob = firmware;
- complete(&ctx->fw_load_complete);
+ complete(&ctx->dev->fw_load_complete);
}
int b43_do_request_fw(struct b43_request_fw_context *ctx,
@@ -2133,7 +2134,7 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
}
if (async) {
/* do this part asynchronously */
- init_completion(&ctx->fw_load_complete);
+ init_completion(&ctx->dev->fw_load_complete);
err = request_firmware_nowait(THIS_MODULE, 1, ctx->fwname,
ctx->dev->dev->dev, GFP_KERNEL,
ctx, b43_fw_cb);
@@ -2141,12 +2142,11 @@ int b43_do_request_fw(struct b43_request_fw_context *ctx,
pr_err("Unable to load firmware\n");
return err;
}
- /* stall here until fw ready */
- wait_for_completion(&ctx->fw_load_complete);
+ wait_for_completion(&ctx->dev->fw_load_complete);
if (ctx->blob)
goto fw_ready;
/* On some ARM systems, the async request will fail, but the next sync
- * request works. For this reason, we dall through here
+ * request works. For this reason, we fall through here
*/
}
err = request_firmware(&ctx->blob, ctx->fwname,
@@ -2415,6 +2415,7 @@ error:
static int b43_one_core_attach(struct b43_bus_dev *dev, struct b43_wl *wl);
static void b43_one_core_detach(struct b43_bus_dev *dev);
+static int b43_rng_init(struct b43_wl *wl);
static void b43_request_firmware(struct work_struct *work)
{
@@ -2466,6 +2467,10 @@ start_ieee80211:
goto err_one_core_detach;
wl->hw_registred = true;
b43_leds_register(wl->current_dev);
+
+ /* Register HW RNG driver */
+ b43_rng_init(wl);
+
goto out;
err_one_core_detach:
@@ -4630,9 +4635,6 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
if (!dev || b43_status(dev) != B43_STAT_INITIALIZED)
return;
- /* Unregister HW RNG driver */
- b43_rng_exit(dev->wl);
-
b43_set_status(dev, B43_STAT_UNINIT);
/* Stop the microcode PSM. */
@@ -4775,9 +4777,6 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
b43_set_status(dev, B43_STAT_INITIALIZED);
- /* Register HW RNG driver */
- b43_rng_init(dev->wl);
-
out:
return err;
@@ -5438,6 +5437,9 @@ static void b43_bcma_remove(struct bcma_device *core)
b43_one_core_detach(wldev->dev);
+ /* Unregister HW RNG driver */
+ b43_rng_exit(wl);
+
b43_leds_unregister(wl);
ieee80211_free_hw(wl->hw);
@@ -5515,6 +5517,9 @@ static void b43_ssb_remove(struct ssb_device *sdev)
b43_one_core_detach(dev);
+ /* Unregister HW RNG driver */
+ b43_rng_exit(wl);
+
if (list_empty(&wl->devlist)) {
b43_leds_unregister(wl);
/* Last core on the chip unregistered.
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 8cb206a..e85d34b 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -821,10 +821,10 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
* channel number in b43. */
if (chanstat & B43_RX_CHAN_5GHZ) {
status.band = IEEE80211_BAND_5GHZ;
- status.freq = b43_freq_to_channel_5ghz(chanid);
+ status.freq = b43_channel_to_freq_5ghz(chanid);
} else {
status.band = IEEE80211_BAND_2GHZ;
- status.freq = b43_freq_to_channel_2ghz(chanid);
+ status.freq = b43_channel_to_freq_2ghz(chanid);
}
break;
default:
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 8c3f70e..ded7f6c 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -3918,6 +3918,7 @@ static void b43legacy_remove(struct ssb_device *dev)
* as the ieee80211 unreg will destroy the workqueue. */
cancel_work_sync(&wldev->restart_work);
cancel_work_sync(&wl->firmware_load);
+ complete(&wldev->fw_load_complete);
B43legacy_WARN_ON(!wl);
if (!wldev->fw.ucode)
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 771be26..084c6b9 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -1618,7 +1618,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
const u8 *ie_buf;
size_t ie_len;
u16 channel = 0;
- u64 fw_tsf = 0;
+ __le64 fw_tsf = 0;
u16 beacon_size = 0;
u32 curr_bcn_bytes;
u32 freq;
@@ -1745,7 +1745,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
ie_buf, ie_len, rssi, GFP_KERNEL);
bss_priv = (struct mwifiex_bss_priv *)bss->priv;
bss_priv->band = band;
- bss_priv->fw_tsf = fw_tsf;
+ bss_priv->fw_tsf = le64_to_cpu(fw_tsf);
if (priv->media_connected &&
!memcmp(bssid,
priv->curr_bss_params.bss_descriptor
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index be33aa1..1eb562a 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -46,10 +46,20 @@ void rtl_fw_cb(const struct firmware *firmware, void *context)
"Firmware callback routine entered!\n");
complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
+ if (rtlpriv->cfg->alt_fw_name) {
+ err = request_firmware(&firmware,
+ rtlpriv->cfg->alt_fw_name,
+ rtlpriv->io.dev);
+ pr_info("Loading alternative firmware %s\n",
+ rtlpriv->cfg->alt_fw_name);
+ if (!err)
+ goto found_alt;
+ }
pr_err("Firmware %s not available\n", rtlpriv->cfg->fw_name);
rtlpriv->max_fw_size = 0;
return;
}
+found_alt:
if (firmware->size > rtlpriv->max_fw_size) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
"Firmware is too big!\n");
@@ -177,6 +187,7 @@ static int rtl_op_add_interface(struct ieee80211_hw *hw,
rtlpriv->cfg->maps
[RTL_IBSS_INT_MASKS]);
}
+ mac->link_state = MAC80211_LINKED;
break;
case NL80211_IFTYPE_ADHOC:
RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
index 953f1a0..b878d56 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
@@ -85,17 +85,15 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
if (mac->act_scanning) {
tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
- if (turbo_scanoff) {
- for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
- tx_agc[idx1] = ppowerlevel[idx1] |
- (ppowerlevel[idx1] << 8) |
- (ppowerlevel[idx1] << 16) |
- (ppowerlevel[idx1] << 24);
- if (rtlhal->interface == INTF_USB) {
- if (tx_agc[idx1] > 0x20 &&
- rtlefuse->external_pa)
- tx_agc[idx1] = 0x20;
- }
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ if (rtlhal->interface == INTF_USB) {
+ if (tx_agc[idx1] > 0x20 &&
+ rtlefuse->external_pa)
+ tx_agc[idx1] = 0x20;
}
}
} else {
@@ -104,10 +102,10 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
tx_agc[RF90_PATH_A] = 0x10101010;
tx_agc[RF90_PATH_B] = 0x10101010;
} else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
- TXHIGHPWRLEVEL_LEVEL1) {
+ TXHIGHPWRLEVEL_LEVEL2) {
tx_agc[RF90_PATH_A] = 0x00000000;
tx_agc[RF90_PATH_B] = 0x00000000;
- } else{
+ } else {
for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
tx_agc[idx1] = ppowerlevel[idx1] |
(ppowerlevel[idx1] << 8) |
@@ -373,7 +371,12 @@ static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
regoffset == RTXAGC_B_MCS07_MCS04)
regoffset = 0xc98;
for (i = 0; i < 3; i++) {
- writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
+ if (i != 2)
+ writeVal = (writeVal > 8) ?
+ (writeVal - 8) : 0;
+ else
+ writeVal = (writeVal > 6) ?
+ (writeVal - 6) : 0;
rtl_write_byte(rtlpriv, (u32)(regoffset + i),
(u8)writeVal);
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 5212ad2..6245563 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -49,6 +49,9 @@ MODULE_AUTHOR("Larry Finger <Larry.Finger@xxxxxxxxxxxx>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n USB wireless");
MODULE_FIRMWARE("rtlwifi/rtl8192cufw.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_A.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8192cufw_TMSC.bin");
static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
{
@@ -68,15 +71,22 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
"Can't alloc buffer for fw\n");
return 1;
}
-
+ if (IS_VENDOR_UMC_A_CUT(rtlpriv->rtlhal.version) &&
+ !IS_92C_SERIAL(rtlpriv->rtlhal.version)) {
+ rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_A.bin";
+ } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlpriv->rtlhal.version)) {
+ rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_B.bin";
+ } else {
+ rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
+ }
+ /* provide name of alternative file */
+ rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin";
pr_info("Loading firmware %s\n", rtlpriv->cfg->fw_name);
rtlpriv->max_fw_size = 0x4000;
err = request_firmware_nowait(THIS_MODULE, 1,
rtlpriv->cfg->fw_name, rtlpriv->io.dev,
GFP_KERNEL, hw, rtl_fw_cb);
-
-
- return 0;
+ return err;
}
static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
@@ -306,6 +316,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
{RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
{RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/
/* HP - Lite-On ,8188CUS Slim Combo */
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 88732e7..30f96ae 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -1640,6 +1640,7 @@ struct rtl_hal_cfg {
bool write_readback;
char *name;
char *fw_name;
+ char *alt_fw_name;
struct rtl_hal_ops *ops;
struct rtl_mod_params *mod_params;
struct rtl_hal_usbint_cfg *usb_interface_cfg;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 1f57423..116a41a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -107,6 +107,7 @@ struct netfront_info {
} tx_skbs[NET_TX_RING_SIZE];
grant_ref_t gref_tx_head;
grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
+ struct page *grant_tx_page[NET_TX_RING_SIZE];
unsigned tx_skb_freelist;
spinlock_t rx_lock ____cacheline_aligned_in_smp;
@@ -387,6 +388,7 @@ static void xennet_tx_buf_gc(struct net_device *dev)
gnttab_release_grant_reference(
&np->gref_tx_head, np->grant_tx_ref[id]);
np->grant_tx_ref[id] = GRANT_INVALID_REF;
+ np->grant_tx_page[id] = NULL;
add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
dev_kfree_skb_irq(skb);
}
@@ -443,6 +445,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
mfn, GNTMAP_readonly);
+ np->grant_tx_page[id] = virt_to_page(data);
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = len;
@@ -488,6 +491,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
np->xbdev->otherend_id,
mfn, GNTMAP_readonly);
+ np->grant_tx_page[id] = page;
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = bytes;
@@ -588,6 +592,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
mfn = virt_to_mfn(data);
gnttab_grant_foreign_access_ref(
ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
+ np->grant_tx_page[id] = virt_to_page(data);
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = len;
@@ -1126,10 +1131,11 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
continue;
skb = np->tx_skbs[i].skb;
- gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
- GNTMAP_readonly);
- gnttab_release_grant_reference(&np->gref_tx_head,
- np->grant_tx_ref[i]);
+ get_page(np->grant_tx_page[i]);
+ gnttab_end_foreign_access(np->grant_tx_ref[i],
+ GNTMAP_readonly,
+ (unsigned long)page_address(np->grant_tx_page[i]));
+ np->grant_tx_page[i] = NULL;
np->grant_tx_ref[i] = GRANT_INVALID_REF;
add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
dev_kfree_skb_irq(skb);
@@ -1138,78 +1144,35 @@ static void xennet_release_tx_bufs(struct netfront_info *np)
static void xennet_release_rx_bufs(struct netfront_info *np)
{
- struct mmu_update *mmu = np->rx_mmu;
- struct multicall_entry *mcl = np->rx_mcl;
- struct sk_buff_head free_list;
- struct sk_buff *skb;
- unsigned long mfn;
- int xfer = 0, noxfer = 0, unused = 0;
int id, ref;
- dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
- __func__);
- return;
-
- skb_queue_head_init(&free_list);
-
spin_lock_bh(&np->rx_lock);
for (id = 0; id < NET_RX_RING_SIZE; id++) {
- ref = np->grant_rx_ref[id];
- if (ref == GRANT_INVALID_REF) {
- unused++;
- continue;
- }
+ struct sk_buff *skb;
+ struct page *page;
skb = np->rx_skbs[id];
- mfn = gnttab_end_foreign_transfer_ref(ref);
- gnttab_release_grant_reference(&np->gref_rx_head, ref);
- np->grant_rx_ref[id] = GRANT_INVALID_REF;
-
- if (0 == mfn) {
- skb_shinfo(skb)->nr_frags = 0;
- dev_kfree_skb(skb);
- noxfer++;
+ if (!skb)
continue;
- }
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- /* Remap the page. */
- const struct page *page =
- skb_frag_page(&skb_shinfo(skb)->frags[0]);
- unsigned long pfn = page_to_pfn(page);
- void *vaddr = page_address(page);
+ ref = np->grant_rx_ref[id];
+ if (ref == GRANT_INVALID_REF)
+ continue;
- MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
- mfn_pte(mfn, PAGE_KERNEL),
- 0);
- mcl++;
- mmu->ptr = ((u64)mfn << PAGE_SHIFT)
- | MMU_MACHPHYS_UPDATE;
- mmu->val = pfn;
- mmu++;
+ page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
- set_phys_to_machine(pfn, mfn);
- }
- __skb_queue_tail(&free_list, skb);
- xfer++;
- }
-
- dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
- __func__, xfer, noxfer, unused);
+ /* gnttab_end_foreign_access() needs a page ref until
+ * foreign access is ended (which may be deferred).
+ */
+ get_page(page);
+ gnttab_end_foreign_access(ref, 0,
+ (unsigned long)page_address(page));
+ np->grant_rx_ref[id] = GRANT_INVALID_REF;
- if (xfer) {
- if (!xen_feature(XENFEAT_auto_translated_physmap)) {
- /* Do all the remapping work and M2P updates. */
- MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
- NULL, DOMID_SELF);
- mcl++;
- HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
- }
+ kfree_skb(skb);
}
- __skb_queue_purge(&free_list);
-
spin_unlock_bh(&np->rx_lock);
}
@@ -1344,6 +1307,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
for (i = 0; i < NET_RX_RING_SIZE; i++) {
np->rx_skbs[i] = NULL;
np->grant_rx_ref[i] = GRANT_INVALID_REF;
+ np->grant_tx_page[i] = NULL;
}
/* A grant for every tx ring slot */
@@ -1994,7 +1958,7 @@ static int __init netif_init(void)
if (!xen_domain())
return -ENODEV;
- if (xen_hvm_domain() && !xen_platform_pci_unplug)
+ if (!xen_has_pv_nic_devices())
return -ENODEV;
printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 903e128..b0a0d53 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2596,8 +2596,6 @@ enum parport_pc_pci_cards {
syba_2p_epp,
syba_1p_ecp,
titan_010l,
- titan_1284p1,
- titan_1284p2,
avlab_1p,
avlab_2p,
oxsemi_952,
@@ -2656,8 +2654,6 @@ static struct parport_pc_pci {
/* syba_2p_epp AP138B */ { 2, { { 0, 0x078 }, { 0, 0x178 }, } },
/* syba_1p_ecp W83787 */ { 1, { { 0, 0x078 }, } },
/* titan_010l */ { 1, { { 3, -1 }, } },
- /* titan_1284p1 */ { 1, { { 0, 1 }, } },
- /* titan_1284p2 */ { 2, { { 0, 1 }, { 2, 3 }, } },
/* avlab_1p */ { 1, { { 0, 1}, } },
/* avlab_2p */ { 2, { { 0, 1}, { 2, 3 },} },
/* The Oxford Semi cards are unusual: 954 doesn't support ECP,
@@ -2673,8 +2669,8 @@ static struct parport_pc_pci {
/* netmos_9705 */ { 1, { { 0, -1 }, } },
/* netmos_9715 */ { 2, { { 0, 1 }, { 2, 3 },} },
/* netmos_9755 */ { 2, { { 0, 1 }, { 2, 3 },} },
- /* netmos_9805 */ { 1, { { 0, -1 }, } },
- /* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
+ /* netmos_9805 */ { 1, { { 0, 1 }, } },
+ /* netmos_9815 */ { 2, { { 0, 1 }, { 2, 3 }, } },
/* netmos_9901 */ { 1, { { 0, -1 }, } },
/* netmos_9865 */ { 1, { { 0, -1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
@@ -2718,8 +2714,6 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, syba_1p_ecp },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_010L,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, titan_010l },
- { 0x9710, 0x9805, 0x1000, 0x0010, 0, 0, titan_1284p1 },
- { 0x9710, 0x9815, 0x1000, 0x0020, 0, 0, titan_1284p2 },
/* PCI_VENDOR_ID_AVLAB/Intek21 has another bunch of cards ...*/
/* AFAVLAB_TK9902 */
{ 0x14db, 0x2120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, avlab_1p},
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index f7197a7..eae7cd9 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -20,6 +20,7 @@
#include <linux/workqueue.h>
#include <linux/bitops.h>
#include <linux/time.h>
+#include <xen/platform_pci.h>
#include <asm/xen/swiotlb-xen.h>
#define INVALID_GRANT_REF (0)
@@ -1138,6 +1139,9 @@ static int __init pcifront_init(void)
if (!xen_pv_domain() || xen_initial_domain())
return -ENODEV;
+ if (!xen_has_pv_devices())
+ return -ENODEV;
+
pci_frontend_registrar(1 /* enable */);
return xenbus_register_frontend(&xenpci_driver);
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 1c43168..99724b0 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -77,6 +77,7 @@ static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
static struct acpi_device_id lis3lv02d_device_ids[] = {
{"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
{"HPQ6000", 0}, /* HP Mobile Data Protection System PNP */
+ {"HPQ6007", 0}, /* HP Mobile Data Protection System PNP */
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, lis3lv02d_device_ids);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 1c77423..e5fd432 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -34,11 +34,11 @@
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
-#include <linux/mod_devicetable.h>
#include <linux/log2.h>
#include <linux/pm.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/dmi.h>
/* this is for "generic access to PC-style RTC" using CMOS_READ/CMOS_WRITE */
#include <asm-generic/rtc.h>
@@ -377,6 +377,51 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
+/*
+ * Do not disable RTC alarm on shutdown - workaround for b0rked BIOSes.
+ */
+static bool alarm_disable_quirk;
+
+static int __init set_alarm_disable_quirk(const struct dmi_system_id *id)
+{
+ alarm_disable_quirk = true;
+ pr_info("rtc-cmos: BIOS has alarm-disable quirk. ");
+ pr_info("RTC alarms disabled\n");
+ return 0;
+}
+
+static const struct dmi_system_id rtc_quirks[] __initconst = {
+ /* https://bugzilla.novell.com/show_bug.cgi?id=805740 */
+ {
+ .callback = set_alarm_disable_quirk,
+ .ident = "IBM Truman",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "4852570"),
+ },
+ },
+ /* https://bugzilla.novell.com/show_bug.cgi?id=812592 */
+ {
+ .callback = set_alarm_disable_quirk,
+ .ident = "Gigabyte GA-990XA-UD3",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR,
+ "Gigabyte Technology Co., Ltd."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "GA-990XA-UD3"),
+ },
+ },
+ /* http://permalink.gmane.org/gmane.linux.kernel/1604474 */
+ {
+ .callback = set_alarm_disable_quirk,
+ .ident = "Toshiba Satellite L300",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
+ },
+ },
+ {}
+};
+
static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
@@ -385,6 +430,9 @@ static int cmos_alarm_irq_enable(struct device *dev, unsigned int enabled)
if (!is_valid_irq(cmos->irq))
return -EINVAL;
+ if (alarm_disable_quirk)
+ return 0;
+
spin_lock_irqsave(&rtc_lock, flags);
if (enabled)
@@ -1166,6 +1214,8 @@ static int __init cmos_init(void)
platform_driver_registered = true;
}
+ dmi_check_system(rtc_quirks);
+
if (retval == 0)
return 0;
diff --git a/drivers/rtc/rtc-max8907.c b/drivers/rtc/rtc-max8907.c
index 1d049da..4955f4b 100644
--- a/drivers/rtc/rtc-max8907.c
+++ b/drivers/rtc/rtc-max8907.c
@@ -51,7 +51,7 @@ static irqreturn_t max8907_irq_handler(int irq, void *data)
{
struct max8907_rtc *rtc = data;
- regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x7f, 0);
+ regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0);
rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF);
@@ -64,7 +64,7 @@ static void regs_to_tm(u8 *regs, struct rtc_time *tm)
bcd2bin(regs[RTC_YEAR1]) - 1900;
tm->tm_mon = bcd2bin(regs[RTC_MONTH] & 0x1f) - 1;
tm->tm_mday = bcd2bin(regs[RTC_DATE] & 0x3f);
- tm->tm_wday = (regs[RTC_WEEKDAY] & 0x07) - 1;
+ tm->tm_wday = (regs[RTC_WEEKDAY] & 0x07);
if (regs[RTC_HOUR] & HOUR_12) {
tm->tm_hour = bcd2bin(regs[RTC_HOUR] & 0x01f);
if (tm->tm_hour == 12)
@@ -88,7 +88,7 @@ static void tm_to_regs(struct rtc_time *tm, u8 *regs)
regs[RTC_YEAR1] = bin2bcd(low);
regs[RTC_MONTH] = bin2bcd(tm->tm_mon + 1);
regs[RTC_DATE] = bin2bcd(tm->tm_mday);
- regs[RTC_WEEKDAY] = tm->tm_wday + 1;
+ regs[RTC_WEEKDAY] = tm->tm_wday;
regs[RTC_HOUR] = bin2bcd(tm->tm_hour);
regs[RTC_MIN] = bin2bcd(tm->tm_min);
regs[RTC_SEC] = bin2bcd(tm->tm_sec);
@@ -153,7 +153,7 @@ static int max8907_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
tm_to_regs(&alrm->time, regs);
/* Disable alarm while we update the target time */
- ret = regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x7f, 0);
+ ret = regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0);
if (ret < 0)
return ret;
@@ -163,8 +163,7 @@ static int max8907_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
return ret;
if (alrm->enabled)
- ret = regmap_update_bits(rtc->regmap, MAX8907_REG_ALARM0_CNTL,
- 0x7f, 0x7f);
+ ret = regmap_write(rtc->regmap, MAX8907_REG_ALARM0_CNTL, 0x77);
return ret;
}
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index e6bf126..630e09d 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1824,7 +1824,7 @@ out:
static u32 *
bfad_load_fwimg(struct pci_dev *pdev)
{
- if (pdev->device == BFA_PCI_DEVICE_ID_CT2) {
+ if (bfa_asic_id_ct2(pdev->device)) {
if (bfi_image_ct2_size == 0)
bfad_read_firmware(pdev, &bfi_image_ct2,
&bfi_image_ct2_size, BFAD_FW_FILE_CT2);
@@ -1834,12 +1834,14 @@ bfad_load_fwimg(struct pci_dev *pdev)
bfad_read_firmware(pdev, &bfi_image_ct,
&bfi_image_ct_size, BFAD_FW_FILE_CT);
return bfi_image_ct;
- } else {
+ } else if (bfa_asic_id_cb(pdev->device)) {
if (bfi_image_cb_size == 0)
bfad_read_firmware(pdev, &bfi_image_cb,
&bfi_image_cb_size, BFAD_FW_FILE_CB);
return bfi_image_cb;
}
+
+ return NULL;
}
static void
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index a96cd06..601b9f7 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -358,6 +358,10 @@ static u8 key_2char2num(u8 hch, u8 lch)
return (hex_to_bin(hch) << 4) | hex_to_bin(lch);
}
+static const struct device_type wlan_type = {
+ .name = "wlan",
+};
+
/*
* drv_init() - a device potentially for us
*
@@ -393,6 +397,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
padapter->pusb_intf = pusb_intf;
usb_set_intfdata(pusb_intf, pnetdev);
SET_NETDEV_DEV(pnetdev, &pusb_intf->dev);
+ pnetdev->dev.type = &wlan_type;
/* step 2. */
padapter->dvobj_init = &r8712_usb_dvobj_init;
padapter->dvobj_deinit = &r8712_usb_dvobj_deinit;
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 65503b9..aa769ce 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -1634,7 +1634,6 @@ BBvUpdatePreEDThreshold(
if( bScanning )
{ // need Max sensitivity //RSSI -69, -70,....
- if(pDevice->byBBPreEDIndex == 0) break;
pDevice->byBBPreEDIndex = 0;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x30); //CR206(0xCE)
@@ -1777,7 +1776,6 @@ BBvUpdatePreEDThreshold(
if( bScanning )
{ // need Max sensitivity //RSSI -69, -70, ...
- if(pDevice->byBBPreEDIndex == 0) break;
pDevice->byBBPreEDIndex = 0;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x24); //CR206(0xCE)
@@ -1929,7 +1927,6 @@ BBvUpdatePreEDThreshold(
case RF_VT3342A0: //RobertYu:20060627, testing table
if( bScanning )
{ // need Max sensitivity //RSSI -67, -68, ...
- if(pDevice->byBBPreEDIndex == 0) break;
pDevice->byBBPreEDIndex = 0;
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xC9, 0x00); //CR201(0xC9)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xCE, 0x38); //CR206(0xCE)
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 7b83945..b489363 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -52,7 +52,7 @@
static LIST_HEAD(g_tiqn_list);
static LIST_HEAD(g_np_list);
static DEFINE_SPINLOCK(tiqn_lock);
-static DEFINE_SPINLOCK(np_lock);
+static DEFINE_MUTEX(np_lock);
static struct idr tiqn_idr;
struct idr sess_idr;
@@ -264,6 +264,9 @@ int iscsit_deaccess_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
return 0;
}
+/*
+ * Called with mutex np_lock held
+ */
static struct iscsi_np *iscsit_get_np(
struct __kernel_sockaddr_storage *sockaddr,
int network_transport)
@@ -274,11 +277,10 @@ static struct iscsi_np *iscsit_get_np(
int ip_match = 0;
u16 port;
- spin_lock_bh(&np_lock);
list_for_each_entry(np, &g_np_list, np_list) {
- spin_lock(&np->np_thread_lock);
+ spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
- spin_unlock(&np->np_thread_lock);
+ spin_unlock_bh(&np->np_thread_lock);
continue;
}
@@ -311,13 +313,11 @@ static struct iscsi_np *iscsit_get_np(
* while iscsi_tpg_add_network_portal() is called.
*/
np->np_exports++;
- spin_unlock(&np->np_thread_lock);
- spin_unlock_bh(&np_lock);
+ spin_unlock_bh(&np->np_thread_lock);
return np;
}
- spin_unlock(&np->np_thread_lock);
+ spin_unlock_bh(&np->np_thread_lock);
}
- spin_unlock_bh(&np_lock);
return NULL;
}
@@ -331,16 +331,22 @@ struct iscsi_np *iscsit_add_np(
struct sockaddr_in6 *sock_in6;
struct iscsi_np *np;
int ret;
+
+ mutex_lock(&np_lock);
+
/*
* Locate the existing struct iscsi_np if already active..
*/
np = iscsit_get_np(sockaddr, network_transport);
- if (np)
+ if (np) {
+ mutex_unlock(&np_lock);
return np;
+ }
np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
if (!np) {
pr_err("Unable to allocate memory for struct iscsi_np\n");
+ mutex_unlock(&np_lock);
return ERR_PTR(-ENOMEM);
}
@@ -363,6 +369,7 @@ struct iscsi_np *iscsit_add_np(
ret = iscsi_target_setup_login_socket(np, sockaddr);
if (ret != 0) {
kfree(np);
+ mutex_unlock(&np_lock);
return ERR_PTR(ret);
}
@@ -371,6 +378,7 @@ struct iscsi_np *iscsit_add_np(
pr_err("Unable to create kthread: iscsi_np\n");
ret = PTR_ERR(np->np_thread);
kfree(np);
+ mutex_unlock(&np_lock);
return ERR_PTR(ret);
}
/*
@@ -381,10 +389,10 @@ struct iscsi_np *iscsit_add_np(
* point because iscsi_np has not been added to g_np_list yet.
*/
np->np_exports = 1;
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
- spin_lock_bh(&np_lock);
list_add_tail(&np->np_list, &g_np_list);
- spin_unlock_bh(&np_lock);
+ mutex_unlock(&np_lock);
pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
@@ -455,9 +463,9 @@ int iscsit_del_np(struct iscsi_np *np)
}
iscsit_del_np_comm(np);
- spin_lock_bh(&np_lock);
+ mutex_lock(&np_lock);
list_del(&np->np_list);
- spin_unlock_bh(&np_lock);
+ mutex_unlock(&np_lock);
pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
np->np_ip, np->np_port, (np->np_network_transport == ISCSI_TCP) ?
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 67950ca..1a890a9 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1298,6 +1298,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_TITAN_800E 0xA014
#define PCI_DEVICE_ID_TITAN_200EI 0xA016
#define PCI_DEVICE_ID_TITAN_200EISI 0xA017
+#define PCI_DEVICE_ID_TITAN_200V3 0xA306
#define PCI_DEVICE_ID_TITAN_400V3 0xA310
#define PCI_DEVICE_ID_TITAN_410V3 0xA312
#define PCI_DEVICE_ID_TITAN_800V3 0xA314
@@ -3801,6 +3802,9 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EISI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_oxsemi_2_4000000 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200V3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_bt_2_921600 },
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400V3,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b0_4_921600 },
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index edbf39f..de1c1b0 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1643,7 +1643,7 @@ static void hub_disconnect(struct usb_interface *intf)
{
struct usb_hub *hub = usb_get_intfdata(intf);
struct usb_device *hdev = interface_to_usbdev(intf);
- int i;
+ int port1;
/* Take the hub off the event list and don't let it be added again */
spin_lock_irq(&hub_event_lock);
@@ -1658,11 +1658,15 @@ static void hub_disconnect(struct usb_interface *intf)
hub->error = 0;
hub_quiesce(hub, HUB_DISCONNECT);
- usb_set_intfdata (intf, NULL);
+ /* Avoid races with recursively_mark_NOTATTACHED() */
+ spin_lock_irq(&device_state_lock);
+ port1 = hdev->maxchild;
+ hdev->maxchild = 0;
+ usb_set_intfdata(intf, NULL);
+ spin_unlock_irq(&device_state_lock);
- for (i = 0; i < hdev->maxchild; i++)
- usb_hub_remove_port_device(hub, i + 1);
- hub->hdev->maxchild = 0;
+ for (; port1 > 0; --port1)
+ usb_hub_remove_port_device(hub, port1);
if (hub->hdev->speed == USB_SPEED_HIGH)
highspeed_hubs--;
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 36c3a82..1522a07 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -200,6 +200,7 @@ struct ehci_hcd { /* one per controller */
unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/
unsigned has_synopsys_hc_bug:1; /* Synopsys HC */
unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */
+ unsigned imx28_write_fix:1; /* For Freescale i.MX28 */
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
@@ -673,6 +674,18 @@ static inline unsigned int ehci_readl(const struct ehci_hcd *ehci,
#endif
}
+#ifdef CONFIG_SOC_IMX28
+static inline void imx28_ehci_writel(const unsigned int val,
+ volatile __u32 __iomem *addr)
+{
+ __asm__ ("swp %0, %0, [%1]" : : "r"(val), "r"(addr));
+}
+#else
+static inline void imx28_ehci_writel(const unsigned int val,
+ volatile __u32 __iomem *addr)
+{
+}
+#endif
static inline void ehci_writel(const struct ehci_hcd *ehci,
const unsigned int val, __u32 __iomem *regs)
{
@@ -681,7 +694,10 @@ static inline void ehci_writel(const struct ehci_hcd *ehci,
writel_be(val, regs) :
writel(val, regs);
#else
- writel(val, regs);
+ if (ehci->imx28_write_fix)
+ imx28_ehci_writel(val, regs);
+ else
+ writel(val, regs);
#endif
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 4bade57..e00d140 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -315,6 +315,9 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ if (xhci->quirks & XHCI_PLAT)
+ return;
+
xhci_free_irq(xhci);
if (xhci->msix_entries) {
diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h
index b461311..ce13e61 100644
--- a/drivers/usb/serial/cypress_m8.h
+++ b/drivers/usb/serial/cypress_m8.h
@@ -63,7 +63,7 @@
#define UART_DSR 0x20 /* data set ready - flow control - device to host */
#define CONTROL_RTS 0x10 /* request to send - flow control - host to device */
#define UART_CTS 0x10 /* clear to send - flow control - device to host */
-#define UART_RI 0x10 /* ring indicator - modem - device to host */
+#define UART_RI 0x80 /* ring indicator - modem - device to host */
#define UART_CD 0x40 /* carrier detect - modem - device to host */
#define CYP_ERROR 0x08 /* received from input report - device to host */
/* Note - the below has nothing to do with the "feature report" reset */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ede7f9f..8856967 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -2181,10 +2181,20 @@ static void ftdi_set_termios(struct tty_struct *tty,
}
/*
- * All FTDI UART chips are limited to CS7/8. We won't pretend to
+ * All FTDI UART chips are limited to CS7/8. We shouldn't pretend to
* support CS5/6 and revert the CSIZE setting instead.
+ *
+ * CS5 however is used to control some smartcard readers which abuse
+ * this limitation to switch modes. Original FTDI chips fall back to
+ * eight data bits.
+ *
+ * TODO: Implement a quirk to only allow this with mentioned
+ * readers. One I know of (Argolis Smartreader V1)
+ * returns "USB smartcard server" as iInterface string.
+ * The vendor didn't bother with a custom VID/PID of
+ * course.
*/
- if ((C_CSIZE(tty) != CS8) && (C_CSIZE(tty) != CS7)) {
+ if (C_CSIZE(tty) == CS6) {
dev_warn(ddev, "requested CSIZE setting not supported\n");
termios->c_cflag &= ~CSIZE;
@@ -2231,6 +2241,9 @@ no_skip:
urb_value |= FTDI_SIO_SET_DATA_PARITY_NONE;
}
switch (cflag & CSIZE) {
+ case CS5:
+ dev_dbg(ddev, "Setting CS5 quirk\n");
+ break;
case CS7:
urb_value |= 7;
dev_dbg(ddev, "Setting CS7\n");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 3c1e5e3..26ed554 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -320,6 +320,9 @@ static void option_instat_callback(struct urb *urb);
* It seems to contain a Qualcomm QSC6240/6290 chipset */
#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
+/* iBall 3.5G connect wireless modem */
+#define IBALL_3_5G_CONNECT 0x9605
+
/* Zoom */
#define ZOOM_PRODUCT_4597 0x9607
@@ -1447,6 +1450,17 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8e, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8f, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff90, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff91, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
/* NOTE: most ZTE CDMA devices should be driven by zte_ev, not option */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
@@ -1489,6 +1503,7 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&four_g_w14_blacklist
},
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
/* Pirelli */
{ USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1, 0xff) },
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 037fd68..90569d2 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -141,6 +141,8 @@ struct pl2303_private {
spinlock_t lock;
u8 line_control;
u8 line_status;
+
+ u8 line_settings[7];
};
static int pl2303_vendor_read(__u16 value, __u16 index,
@@ -420,10 +422,29 @@ static void pl2303_set_termios(struct tty_struct *tty,
dev_dbg(&port->dev, "parity = none\n");
}
- i = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- SET_LINE_REQUEST, SET_LINE_REQUEST_TYPE,
- 0, 0, buf, 7, 100);
- dev_dbg(&port->dev, "0x21:0x20:0:0 %d\n", i);
+ /*
+ * Some PL2303 are known to lose bytes if you change serial settings
+ * even to the same values as before. Thus we actually need to filter
+ * in this specific case.
+ *
+ * Note that the tty_termios_hw_change check above is not sufficient
+ * as a previously requested baud rate may differ from the one
+ * actually used (and stored in old_termios).
+ *
+ * NOTE: No additional locking needed for line_settings as it is
+ * only used in set_termios, which is serialised against itself.
+ */
+ if (!old_termios || memcmp(buf, priv->line_settings, 7)) {
+ i = usb_control_msg(serial->dev,
+ usb_sndctrlpipe(serial->dev, 0),
+ SET_LINE_REQUEST, SET_LINE_REQUEST_TYPE,
+ 0, 0, buf, 7, 100);
+
+ dev_dbg(&port->dev, "0x21:0x20:0:0 %d\n", i);
+
+ if (i == 7)
+ memcpy(priv->line_settings, buf, 7);
+ }
/* change control lines if we are switching to or from B0 */
spin_lock_irqsave(&priv->lock, flags);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index df8c74b..3224305 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -226,6 +226,13 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_MAX_SECTORS_64 ),
+/* Patch submitted by Mikhail Zolotaryov <lebon@xxxxxxxxxxxx> */
+UNUSUAL_DEV( 0x0421, 0x06aa, 0x1110, 0x1110,
+ "Nokia",
+ "502",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
#ifdef NO_SDDR09
UNUSUAL_DEV( 0x0436, 0x0005, 0x0100, 0x0100,
"Microtech",
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index cd005c2..4b2d3ab 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -35,6 +35,7 @@
#include <xen/interface/io/fbif.h>
#include <xen/interface/io/protocols.h>
#include <xen/xenbus.h>
+#include <xen/platform_pci.h>
struct xenfb_info {
unsigned char *fb;
@@ -699,6 +700,9 @@ static int __init xenfb_init(void)
if (xen_initial_domain())
return -ENODEV;
+ if (!xen_has_pv_devices())
+ return -ENODEV;
+
return xenbus_register_frontend(&xenfb_driver);
}
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 3159a37..45bcbd9 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -447,7 +447,7 @@ subsys_initcall(xenbus_probe_frontend_init);
#ifndef MODULE
static int __init boot_wait_for_devices(void)
{
- if (xen_hvm_domain() && !xen_platform_pci_unplug)
+ if (!xen_has_pv_devices())
return -ENODEV;
ready_to_wait_for_devices = 1;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index aa8acfa..ce5da61 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -7153,7 +7153,7 @@ out:
*/
if (root_dropped == false)
btrfs_add_dead_root(root);
- if (err)
+ if (err && err != -EAGAIN)
btrfs_std_error(root->fs_info, err);
return err;
}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 47d5dcd..1ce5240 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1494,6 +1494,12 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
printk(KERN_INFO "btrfs: Snapshot src from "
"another FS\n");
ret = -EINVAL;
+ } else if (!inode_owner_or_capable(src_inode)) {
+ /*
+ * Subvolume creation is not restricted, but snapshots
+ * are limited to own subvolumes only
+ */
+ ret = -EPERM;
} else {
ret = btrfs_mksubvol(&file->f_path, name, namelen,
BTRFS_I(src_inode)->root,
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index b744228..85cde3e 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -103,7 +103,7 @@ int ore_verify_layout(unsigned total_comps, struct ore_layout *layout)
layout->max_io_length =
(BIO_MAX_PAGES_KMALLOC * PAGE_SIZE - layout->stripe_unit) *
- layout->group_width;
+ (layout->group_width - layout->parity);
if (layout->parity) {
unsigned stripe_length =
(layout->group_width - layout->parity) *
@@ -286,7 +286,8 @@ int ore_get_rw_state(struct ore_layout *layout, struct ore_components *oc,
if (length) {
ore_calc_stripe_info(layout, offset, length, &ios->si);
ios->length = ios->si.length;
- ios->nr_pages = (ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
+ ios->nr_pages = ((ios->offset & (PAGE_SIZE - 1)) +
+ ios->length + PAGE_SIZE - 1) / PAGE_SIZE;
if (layout->parity)
_ore_post_alloc_raid_stuff(ios);
}
@@ -536,6 +537,7 @@ void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
u64 H = LmodS - G * T;
u32 N = div_u64(H, U);
+ u32 Nlast;
/* "H - (N * U)" is just "H % U" so it's bound to u32 */
u32 C = (u32)(H - (N * U)) / stripe_unit + G * group_width;
@@ -568,6 +570,10 @@ void ore_calc_stripe_info(struct ore_layout *layout, u64 file_offset,
si->length = T - H;
if (si->length > length)
si->length = length;
+
+ Nlast = div_u64(H + si->length + U - 1, U);
+ si->maxdevUnits = Nlast - N;
+
si->M = M;
}
EXPORT_SYMBOL(ore_calc_stripe_info);
@@ -583,13 +589,16 @@ int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
int ret;
if (per_dev->bio == NULL) {
- unsigned pages_in_stripe = ios->layout->group_width *
- (ios->layout->stripe_unit / PAGE_SIZE);
- unsigned nr_pages = ios->nr_pages * ios->layout->group_width /
- (ios->layout->group_width -
- ios->layout->parity);
- unsigned bio_size = (nr_pages + pages_in_stripe) /
- ios->layout->group_width;
+ unsigned bio_size;
+
+ if (!ios->reading) {
+ bio_size = ios->si.maxdevUnits;
+ } else {
+ bio_size = (ios->si.maxdevUnits + 1) *
+ (ios->layout->group_width - ios->layout->parity) /
+ ios->layout->group_width;
+ }
+ bio_size *= (ios->layout->stripe_unit / PAGE_SIZE);
per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
if (unlikely(!per_dev->bio)) {
@@ -609,8 +618,12 @@ int _ore_add_stripe_unit(struct ore_io_state *ios, unsigned *cur_pg,
added_len = bio_add_pc_page(q, per_dev->bio, pages[pg],
pglen, pgbase);
if (unlikely(pglen != added_len)) {
- ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=%u\n",
- per_dev->bio->bi_vcnt);
+ /* If bi_vcnt == bi_max then this is a SW BUG */
+ ORE_DBGMSG("Failed bio_add_pc_page bi_vcnt=0x%x "
+ "bi_max=0x%x BIO_MAX=0x%x cur_len=0x%x\n",
+ per_dev->bio->bi_vcnt,
+ per_dev->bio->bi_max_vecs,
+ BIO_MAX_PAGES_KMALLOC, cur_len);
ret = -ENOMEM;
goto out;
}
@@ -1098,7 +1111,7 @@ int ore_truncate(struct ore_layout *layout, struct ore_components *oc,
size_attr->attr = g_attr_logical_length;
size_attr->attr.val_ptr = &size_attr->newsize;
- ORE_DBGMSG("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
+ ORE_DBGMSG2("trunc(0x%llx) obj_offset=0x%llx dev=%d\n",
_LLU(oc->comps->obj.id), _LLU(obj_size), i);
ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1,
&size_attr->attr);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index f33fcb6..10816d2 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1817,9 +1817,11 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
}
/* Clear the content within i_blocks. */
- if (i_size < EXT4_MIN_INLINE_DATA_SIZE)
- memset(ext4_raw_inode(&is.iloc)->i_block + i_size, 0,
- EXT4_MIN_INLINE_DATA_SIZE - i_size);
+ if (i_size < EXT4_MIN_INLINE_DATA_SIZE) {
+ void *p = (void *) ext4_raw_inode(&is.iloc)->i_block;
+ memset(p + i_size, 0,
+ EXT4_MIN_INLINE_DATA_SIZE - i_size);
+ }
EXT4_I(inode)->i_inline_size = i_size <
EXT4_MIN_INLINE_DATA_SIZE ?
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index e83351a..1101b04 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1193,22 +1193,6 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
}
-static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
-{
- return 1;
-}
-
-static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
- .can_merge = 0,
- .map = generic_pipe_buf_map,
- .unmap = generic_pipe_buf_unmap,
- .confirm = generic_pipe_buf_confirm,
- .release = generic_pipe_buf_release,
- .steal = fuse_dev_pipe_buf_steal,
- .get = generic_pipe_buf_get,
-};
-
static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags)
@@ -1255,7 +1239,11 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
buf->page = bufs[page_nr].page;
buf->offset = bufs[page_nr].offset;
buf->len = bufs[page_nr].len;
- buf->ops = &fuse_dev_pipe_buf_ops;
+ /*
+ * Need to be careful about this. Having buf->ops in module
+ * code can Oops if the buffer persists after module unload.
+ */
+ buf->ops = &nosteal_pipe_buf_ops;
pipe->nrbufs++;
page_nr++;
diff --git a/fs/hpfs/alloc.c b/fs/hpfs/alloc.c
index cdb84a8..58b5106 100644
--- a/fs/hpfs/alloc.c
+++ b/fs/hpfs/alloc.c
@@ -8,6 +8,58 @@
#include "hpfs_fn.h"
+static void hpfs_claim_alloc(struct super_block *s, secno sec)
+{
+ struct hpfs_sb_info *sbi = hpfs_sb(s);
+ if (sbi->sb_n_free != (unsigned)-1) {
+ if (unlikely(!sbi->sb_n_free)) {
+ hpfs_error(s, "free count underflow, allocating sector %08x", sec);
+ sbi->sb_n_free = -1;
+ return;
+ }
+ sbi->sb_n_free--;
+ }
+}
+
+static void hpfs_claim_free(struct super_block *s, secno sec)
+{
+ struct hpfs_sb_info *sbi = hpfs_sb(s);
+ if (sbi->sb_n_free != (unsigned)-1) {
+ if (unlikely(sbi->sb_n_free >= sbi->sb_fs_size)) {
+ hpfs_error(s, "free count overflow, freeing sector %08x", sec);
+ sbi->sb_n_free = -1;
+ return;
+ }
+ sbi->sb_n_free++;
+ }
+}
+
+static void hpfs_claim_dirband_alloc(struct super_block *s, secno sec)
+{
+ struct hpfs_sb_info *sbi = hpfs_sb(s);
+ if (sbi->sb_n_free_dnodes != (unsigned)-1) {
+ if (unlikely(!sbi->sb_n_free_dnodes)) {
+ hpfs_error(s, "dirband free count underflow, allocating sector %08x", sec);
+ sbi->sb_n_free_dnodes = -1;
+ return;
+ }
+ sbi->sb_n_free_dnodes--;
+ }
+}
+
+static void hpfs_claim_dirband_free(struct super_block *s, secno sec)
+{
+ struct hpfs_sb_info *sbi = hpfs_sb(s);
+ if (sbi->sb_n_free_dnodes != (unsigned)-1) {
+ if (unlikely(sbi->sb_n_free_dnodes >= sbi->sb_dirband_size / 4)) {
+ hpfs_error(s, "dirband free count overflow, freeing sector %08x", sec);
+ sbi->sb_n_free_dnodes = -1;
+ return;
+ }
+ sbi->sb_n_free_dnodes++;
+ }
+}
+
/*
* Check if a sector is allocated in bitmap
* This is really slow. Turned on only if chk==2
@@ -203,9 +255,15 @@ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forwa
}
sec = 0;
ret:
+ if (sec) {
+ i = 0;
+ do
+ hpfs_claim_alloc(s, sec + i);
+ while (unlikely(++i < n));
+ }
if (sec && f_p) {
for (i = 0; i < forward; i++) {
- if (!hpfs_alloc_if_possible(s, sec + i + 1)) {
+ if (!hpfs_alloc_if_possible(s, sec + n + i)) {
hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i);
sec = 0;
break;
@@ -228,6 +286,7 @@ static secno alloc_in_dirband(struct super_block *s, secno near)
nr >>= 2;
sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0);
if (!sec) return 0;
+ hpfs_claim_dirband_alloc(s, sec);
return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start;
}
@@ -242,6 +301,7 @@ int hpfs_alloc_if_possible(struct super_block *s, secno sec)
bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f)));
hpfs_mark_4buffers_dirty(&qbh);
hpfs_brelse4(&qbh);
+ hpfs_claim_alloc(s, sec);
return 1;
}
hpfs_brelse4(&qbh);
@@ -275,6 +335,7 @@ void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
return;
}
bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f));
+ hpfs_claim_free(s, sec);
if (!--n) {
hpfs_mark_4buffers_dirty(&qbh);
hpfs_brelse4(&qbh);
@@ -359,6 +420,7 @@ void hpfs_free_dnode(struct super_block *s, dnode_secno dno)
bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f));
hpfs_mark_4buffers_dirty(&qbh);
hpfs_brelse4(&qbh);
+ hpfs_claim_dirband_free(s, dno);
}
}
@@ -366,7 +428,7 @@ struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near,
dnode_secno *dno, struct quad_buffer_head *qbh)
{
struct dnode *d;
- if (hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_dmap) > FREE_DNODES_ADD) {
+ if (hpfs_get_free_dnodes(s) > FREE_DNODES_ADD) {
if (!(*dno = alloc_in_dirband(s, near)))
if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL;
} else {
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index b7ae286..53f1a0f 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -308,7 +308,7 @@ static inline struct hpfs_sb_info *hpfs_sb(struct super_block *sb)
__printf(2, 3)
void hpfs_error(struct super_block *, const char *, ...);
int hpfs_stop_cycles(struct super_block *, int, int *, int *, char *);
-unsigned hpfs_count_one_bitmap(struct super_block *, secno);
+unsigned hpfs_get_free_dnodes(struct super_block *);
/*
* local time (HPFS) to GMT (Unix)
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index ebc2e79..da8d392 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -115,7 +115,7 @@ static void hpfs_put_super(struct super_block *s)
kfree(sbi);
}
-unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
+static unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
{
struct quad_buffer_head qbh;
unsigned long *bits;
@@ -123,7 +123,7 @@ unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
bits = hpfs_map_4sectors(s, secno, &qbh, 4);
if (!bits)
- return 0;
+ return (unsigned)-1;
count = bitmap_weight(bits, 2048 * BITS_PER_BYTE);
hpfs_brelse4(&qbh);
return count;
@@ -134,29 +134,45 @@ static unsigned count_bitmaps(struct super_block *s)
unsigned n, count, n_bands;
n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14;
count = 0;
- for (n = 0; n < n_bands; n++)
- count += hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n]));
+ for (n = 0; n < n_bands; n++) {
+ unsigned c;
+ c = hpfs_count_one_bitmap(s, le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[n]));
+ if (c != (unsigned)-1)
+ count += c;
+ }
return count;
}
+unsigned hpfs_get_free_dnodes(struct super_block *s)
+{
+ struct hpfs_sb_info *sbi = hpfs_sb(s);
+ if (sbi->sb_n_free_dnodes == (unsigned)-1) {
+ unsigned c = hpfs_count_one_bitmap(s, sbi->sb_dmap);
+ if (c == (unsigned)-1)
+ return 0;
+ sbi->sb_n_free_dnodes = c;
+ }
+ return sbi->sb_n_free_dnodes;
+}
+
static int hpfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *s = dentry->d_sb;
struct hpfs_sb_info *sbi = hpfs_sb(s);
u64 id = huge_encode_dev(s->s_bdev->bd_dev);
+
hpfs_lock(s);
- /*if (sbi->sb_n_free == -1) {*/
+ if (sbi->sb_n_free == (unsigned)-1)
sbi->sb_n_free = count_bitmaps(s);
- sbi->sb_n_free_dnodes = hpfs_count_one_bitmap(s, sbi->sb_dmap);
- /*}*/
+
buf->f_type = s->s_magic;
buf->f_bsize = 512;
buf->f_blocks = sbi->sb_fs_size;
buf->f_bfree = sbi->sb_n_free;
buf->f_bavail = sbi->sb_n_free;
buf->f_files = sbi->sb_dirband_size / 4;
- buf->f_ffree = sbi->sb_n_free_dnodes;
+ buf->f_ffree = hpfs_get_free_dnodes(s);
buf->f_fsid.val[0] = (u32)id;
buf->f_fsid.val[1] = (u32)(id >> 32);
buf->f_namelen = 254;
diff --git a/fs/mount.h b/fs/mount.h
index cd50079..f58a3ab 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -66,7 +66,7 @@ static inline int mnt_has_parent(struct mount *mnt)
static inline int is_mounted(struct vfsmount *mnt)
{
/* neither detached nor internal? */
- return !IS_ERR_OR_NULL(real_mount(mnt));
+ return !IS_ERR_OR_NULL(real_mount(mnt)->mnt_ns);
}
extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *, int);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index c53189d..08ff307 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -236,13 +236,11 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
error = nfs4_discover_server_trunking(clp, &old);
if (error < 0)
goto error;
- nfs_put_client(clp);
- if (clp != old) {
- clp->cl_preserve_clid = true;
- clp = old;
- }
- return clp;
+ if (clp != old)
+ clp->cl_preserve_clid = true;
+ nfs_put_client(clp);
+ return old;
error:
nfs_mark_client_ready(clp, error);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 3125a9e..2049562 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -6466,7 +6466,7 @@ nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle,
switch (err) {
case 0:
case -NFS4ERR_WRONGSEC:
- case -NFS4ERR_NOTSUPP:
+ case -ENOTSUPP:
goto out;
default:
err = nfs4_handle_exception(server, err, &exception);
@@ -6498,7 +6498,7 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
* Fall back on "guess and check" method if
* the server doesn't support SECINFO_NO_NAME
*/
- if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) {
+ if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) {
err = nfs4_find_root_sec(server, fhandle, info);
goto out_freepage;
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 26b1439..691a97b 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -3017,7 +3017,8 @@ out_overflow:
return -EIO;
}
-static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
+static bool __decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected,
+ int *nfs_retval)
{
__be32 *p;
uint32_t opnum;
@@ -3027,19 +3028,32 @@ static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
if (unlikely(!p))
goto out_overflow;
opnum = be32_to_cpup(p++);
- if (opnum != expected) {
- dprintk("nfs: Server returned operation"
- " %d but we issued a request for %d\n",
- opnum, expected);
- return -EIO;
- }
+ if (unlikely(opnum != expected))
+ goto out_bad_operation;
nfserr = be32_to_cpup(p);
- if (nfserr != NFS_OK)
- return nfs4_stat_to_errno(nfserr);
- return 0;
+ if (nfserr == NFS_OK)
+ *nfs_retval = 0;
+ else
+ *nfs_retval = nfs4_stat_to_errno(nfserr);
+ return true;
+out_bad_operation:
+ dprintk("nfs: Server returned operation"
+ " %d but we issued a request for %d\n",
+ opnum, expected);
+ *nfs_retval = -EREMOTEIO;
+ return false;
out_overflow:
print_overflow_msg(__func__, xdr);
- return -EIO;
+ *nfs_retval = -EIO;
+ return false;
+}
+
+static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
+{
+ int retval;
+
+ __decode_op_hdr(xdr, expected, &retval);
+ return retval;
}
/* Dummy routine */
@@ -4860,11 +4874,12 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
uint32_t savewords, bmlen, i;
int status;
- status = decode_op_hdr(xdr, OP_OPEN);
- if (status != -EIO)
- nfs_increment_open_seqid(status, res->seqid);
- if (!status)
- status = decode_stateid(xdr, &res->stateid);
+ if (!__decode_op_hdr(xdr, OP_OPEN, &status))
+ return status;
+ nfs_increment_open_seqid(status, res->seqid);
+ if (status)
+ return status;
+ status = decode_stateid(xdr, &res->stateid);
if (unlikely(status))
return status;
diff --git a/fs/splice.c b/fs/splice.c
index 6909d89..d375a58 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -553,6 +553,24 @@ static const struct pipe_buf_operations default_pipe_buf_ops = {
.get = generic_pipe_buf_get,
};
+static int generic_pipe_buf_nosteal(struct pipe_inode_info *pipe,
+ struct pipe_buffer *buf)
+{
+ return 1;
+}
+
+/* Pipe buffer operations for a socket and similar. */
+const struct pipe_buf_operations nosteal_pipe_buf_ops = {
+ .can_merge = 0,
+ .map = generic_pipe_buf_map,
+ .unmap = generic_pipe_buf_unmap,
+ .confirm = generic_pipe_buf_confirm,
+ .release = generic_pipe_buf_release,
+ .steal = generic_pipe_buf_nosteal,
+ .get = generic_pipe_buf_get,
+};
+EXPORT_SYMBOL(nosteal_pipe_buf_ops);
+
static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
unsigned long vlen, loff_t offset)
{
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 37464c5..385d733 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -127,7 +127,7 @@ static inline void audit_syscall_exit(void *pt_regs)
{
if (unlikely(current->audit_context)) {
int success = is_syscall_success(pt_regs);
- int return_code = regs_return_value(pt_regs);
+ long return_code = regs_return_value(pt_regs);
__audit_syscall_exit(success, return_code);
}
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 0621bca..a55de7b 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -399,6 +399,8 @@ enum {
ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */
ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
+ ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
+ ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index ad1a427..8f34044 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -156,6 +156,8 @@ int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
+extern const struct pipe_buf_operations nosteal_pipe_buf_ops;
+
/* for F_SETPIPE_SZ and F_GETPIPE_SZ */
long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
struct pipe_inode_info *get_pipe_info(struct file *file);
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index a13291f..4383a39 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -147,9 +147,6 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone,
return x;
}
-extern unsigned long global_reclaimable_pages(void);
-extern unsigned long zone_reclaimable_pages(struct zone *zone);
-
#ifdef CONFIG_NUMA
/*
* Determine the per node value of a stat item. This function
diff --git a/include/scsi/osd_ore.h b/include/scsi/osd_ore.h
index a5f9b96..6ca3265 100644
--- a/include/scsi/osd_ore.h
+++ b/include/scsi/osd_ore.h
@@ -102,6 +102,7 @@ struct ore_striping_info {
unsigned unit_off;
unsigned cur_pg;
unsigned cur_comp;
+ unsigned maxdevUnits;
};
struct ore_io_state;
diff --git a/include/xen/platform_pci.h b/include/xen/platform_pci.h
index 438c256..b49eeab 100644
--- a/include/xen/platform_pci.h
+++ b/include/xen/platform_pci.h
@@ -48,4 +48,27 @@ static inline int xen_must_unplug_disks(void) {
extern int xen_platform_pci_unplug;
+#if defined(CONFIG_XEN_PVHVM)
+extern bool xen_has_pv_devices(void);
+extern bool xen_has_pv_disk_devices(void);
+extern bool xen_has_pv_nic_devices(void);
+extern bool xen_has_pv_and_legacy_disk_devices(void);
+#else
+static inline bool xen_has_pv_devices(void)
+{
+ return IS_ENABLED(CONFIG_XEN);
+}
+static inline bool xen_has_pv_disk_devices(void)
+{
+ return IS_ENABLED(CONFIG_XEN);
+}
+static inline bool xen_has_pv_nic_devices(void)
+{
+ return IS_ENABLED(CONFIG_XEN);
+}
+static inline bool xen_has_pv_and_legacy_disk_devices(void)
+{
+ return false;
+}
+#endif
#endif /* _XEN_PLATFORM_PCI_H */
diff --git a/kernel/audit.c b/kernel/audit.c
index 5924919..0538e44 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -101,7 +101,8 @@ static int audit_rate_limit;
/* Number of outstanding audit_buffers allowed. */
static int audit_backlog_limit = 64;
-static int audit_backlog_wait_time = 60 * HZ;
+#define AUDIT_BACKLOG_WAIT_TIME (60 * HZ)
+static int audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME;
static int audit_backlog_wait_overflow = 0;
/* The identity of the user shutting down the audit system. */
@@ -1185,6 +1186,8 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
return NULL;
}
+ audit_backlog_wait_time = AUDIT_BACKLOG_WAIT_TIME;
+
ab = audit_buffer_alloc(ctx, gfp_mask, type);
if (!ab) {
audit_log_lost("out of memory in audit_log_start");
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 35d7565..384cb73 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -324,9 +324,6 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list,
static int __register_ftrace_function(struct ftrace_ops *ops)
{
- if (unlikely(ftrace_disabled))
- return -ENODEV;
-
if (FTRACE_WARN_ON(ops == &global_ops))
return -EINVAL;
@@ -370,13 +367,21 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
return 0;
}
+static void ftrace_sync(struct work_struct *work)
+{
+ /*
+ * This function is just a stub to implement a hard force
+ * of synchronize_sched(). This requires synchronizing
+ * tasks even in userspace and idle.
+ *
+ * Yes, function tracing is rude.
+ */
+}
+
static int __unregister_ftrace_function(struct ftrace_ops *ops)
{
int ret;
- if (ftrace_disabled)
- return -ENODEV;
-
if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
return -EBUSY;
@@ -391,16 +396,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
} else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
ret = remove_ftrace_list_ops(&ftrace_control_list,
&control_ops, ops);
- if (!ret) {
- /*
- * The ftrace_ops is now removed from the list,
- * so there'll be no new users. We must ensure
- * all current users are done before we free
- * the control data.
- */
- synchronize_sched();
- control_ops_free(ops);
- }
} else
ret = remove_ftrace_ops(&ftrace_ops_list, ops);
@@ -410,13 +405,6 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
if (ftrace_enabled)
update_ftrace_function();
- /*
- * Dynamic ops may be freed, we must make sure that all
- * callers are done before leaving this function.
- */
- if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
- synchronize_sched();
-
return 0;
}
@@ -2009,10 +1997,15 @@ static void ftrace_startup_enable(int command)
static int ftrace_startup(struct ftrace_ops *ops, int command)
{
bool hash_enable = true;
+ int ret;
if (unlikely(ftrace_disabled))
return -ENODEV;
+ ret = __register_ftrace_function(ops);
+ if (ret)
+ return ret;
+
ftrace_start_up++;
command |= FTRACE_UPDATE_CALLS;
@@ -2034,12 +2027,17 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
return 0;
}
-static void ftrace_shutdown(struct ftrace_ops *ops, int command)
+static int ftrace_shutdown(struct ftrace_ops *ops, int command)
{
bool hash_disable = true;
+ int ret;
if (unlikely(ftrace_disabled))
- return;
+ return -ENODEV;
+
+ ret = __unregister_ftrace_function(ops);
+ if (ret)
+ return ret;
ftrace_start_up--;
/*
@@ -2073,10 +2071,42 @@ static void ftrace_shutdown(struct ftrace_ops *ops, int command)
command |= FTRACE_UPDATE_TRACE_FUNC;
}
- if (!command || !ftrace_enabled)
- return;
+ if (!command || !ftrace_enabled) {
+ /*
+ * If these are control ops, they still need their
+ * per_cpu field freed. Since, function tracing is
+ * not currently active, we can just free them
+ * without synchronizing all CPUs.
+ */
+ if (ops->flags & FTRACE_OPS_FL_CONTROL)
+ control_ops_free(ops);
+ return 0;
+ }
ftrace_run_update_code(command);
+
+ /*
+ * Dynamic ops may be freed, we must make sure that all
+ * callers are done before leaving this function.
+ * The same goes for freeing the per_cpu data of the control
+ * ops.
+ *
+ * Again, normal synchronize_sched() is not good enough.
+ * We need to do a hard force of sched synchronization.
+ * This is because we use preempt_disable() to do RCU, but
+ * the function tracers can be called where RCU is not watching
+ * (like before user_exit()). We can not rely on the RCU
+ * infrastructure to do the synchronization, thus we must do it
+ * ourselves.
+ */
+ if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
+ schedule_on_each_cpu(ftrace_sync);
+
+ if (ops->flags & FTRACE_OPS_FL_CONTROL)
+ control_ops_free(ops);
+ }
+
+ return 0;
}
static void ftrace_startup_sysctl(void)
@@ -2105,12 +2135,57 @@ static cycle_t ftrace_update_time;
static unsigned long ftrace_update_cnt;
unsigned long ftrace_update_tot_cnt;
-static int ops_traces_mod(struct ftrace_ops *ops)
+static inline int ops_traces_mod(struct ftrace_ops *ops)
{
- struct ftrace_hash *hash;
+ /*
+ * Filter_hash being empty will default to trace module.
+ * But notrace hash requires a test of individual module functions.
+ */
+ return ftrace_hash_empty(ops->filter_hash) &&
+ ftrace_hash_empty(ops->notrace_hash);
+}
+
+/*
+ * Check if the current ops references the record.
+ *
+ * If the ops traces all functions, then it was already accounted for.
+ * If the ops does not trace the current record function, skip it.
+ * If the ops ignores the function via notrace filter, skip it.
+ */
+static inline bool
+ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
+{
+ /* If ops isn't enabled, ignore it */
+ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+ return 0;
+
+ /* If ops traces all mods, we already accounted for it */
+ if (ops_traces_mod(ops))
+ return 0;
+
+ /* The function must be in the filter */
+ if (!ftrace_hash_empty(ops->filter_hash) &&
+ !ftrace_lookup_ip(ops->filter_hash, rec->ip))
+ return 0;
+
+ /* If in notrace hash, we ignore it too */
+ if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
+ return 0;
+
+ return 1;
+}
+
+static int referenced_filters(struct dyn_ftrace *rec)
+{
+ struct ftrace_ops *ops;
+ int cnt = 0;
- hash = ops->filter_hash;
- return ftrace_hash_empty(hash);
+ for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
+ if (ops_references_rec(ops, rec))
+ cnt++;
+ }
+
+ return cnt;
}
static int ftrace_update_code(struct module *mod)
@@ -2119,6 +2194,7 @@ static int ftrace_update_code(struct module *mod)
struct dyn_ftrace *p;
cycle_t start, stop;
unsigned long ref = 0;
+ bool test = false;
int i;
/*
@@ -2132,9 +2208,12 @@ static int ftrace_update_code(struct module *mod)
for (ops = ftrace_ops_list;
ops != &ftrace_list_end; ops = ops->next) {
- if (ops->flags & FTRACE_OPS_FL_ENABLED &&
- ops_traces_mod(ops))
- ref++;
+ if (ops->flags & FTRACE_OPS_FL_ENABLED) {
+ if (ops_traces_mod(ops))
+ ref++;
+ else
+ test = true;
+ }
}
}
@@ -2144,12 +2223,16 @@ static int ftrace_update_code(struct module *mod)
for (pg = ftrace_new_pgs; pg; pg = pg->next) {
for (i = 0; i < pg->index; i++) {
+ int cnt = ref;
+
/* If something went wrong, bail without enabling anything */
if (unlikely(ftrace_disabled))
return -1;
p = &pg->records[i];
- p->flags = ref;
+ if (test)
+ cnt += referenced_filters(p);
+ p->flags = cnt;
/*
* Do the initial record conversion from mcount jump
@@ -2169,7 +2252,7 @@ static int ftrace_update_code(struct module *mod)
* conversion puts the module to the correct state, thus
* passing the ftrace_make_call check.
*/
- if (ftrace_start_up && ref) {
+ if (ftrace_start_up && cnt) {
int failed = __ftrace_replace_code(p, 1);
if (failed)
ftrace_bug(failed, p->ip);
@@ -2921,16 +3004,13 @@ static void __enable_ftrace_function_probe(void)
if (i == FTRACE_FUNC_HASHSIZE)
return;
- ret = __register_ftrace_function(&trace_probe_ops);
- if (!ret)
- ret = ftrace_startup(&trace_probe_ops, 0);
+ ret = ftrace_startup(&trace_probe_ops, 0);
ftrace_probe_registered = 1;
}
static void __disable_ftrace_function_probe(void)
{
- int ret;
int i;
if (!ftrace_probe_registered)
@@ -2943,9 +3023,7 @@ static void __disable_ftrace_function_probe(void)
}
/* no more funcs left */
- ret = __unregister_ftrace_function(&trace_probe_ops);
- if (!ret)
- ftrace_shutdown(&trace_probe_ops, 0);
+ ftrace_shutdown(&trace_probe_ops, 0);
ftrace_probe_registered = 0;
}
@@ -4078,12 +4156,15 @@ core_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { }
/* Keep as macros so we do not need to define the commands */
-# define ftrace_startup(ops, command) \
- ({ \
- (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
- 0; \
+# define ftrace_startup(ops, command) \
+ ({ \
+ int ___ret = __register_ftrace_function(ops); \
+ if (!___ret) \
+ (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
+ ___ret; \
})
-# define ftrace_shutdown(ops, command) do { } while (0)
+# define ftrace_shutdown(ops, command) __unregister_ftrace_function(ops)
+
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
@@ -4483,9 +4564,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
mutex_lock(&ftrace_lock);
- ret = __register_ftrace_function(ops);
- if (!ret)
- ret = ftrace_startup(ops, 0);
+ ret = ftrace_startup(ops, 0);
mutex_unlock(&ftrace_lock);
@@ -4504,9 +4583,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)
int ret;
mutex_lock(&ftrace_lock);
- ret = __unregister_ftrace_function(ops);
- if (!ret)
- ftrace_shutdown(ops, 0);
+ ret = ftrace_shutdown(ops, 0);
mutex_unlock(&ftrace_lock);
return ret;
@@ -4700,6 +4777,13 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
return NOTIFY_DONE;
}
+/* Just a place holder for function graph */
+static struct ftrace_ops fgraph_ops __read_mostly = {
+ .func = ftrace_stub,
+ .flags = FTRACE_OPS_FL_GLOBAL |
+ FTRACE_OPS_FL_RECURSION_SAFE,
+};
+
int register_ftrace_graph(trace_func_graph_ret_t retfunc,
trace_func_graph_ent_t entryfunc)
{
@@ -4726,7 +4810,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
ftrace_graph_return = retfunc;
ftrace_graph_entry = entryfunc;
- ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
+ ret = ftrace_startup(&fgraph_ops, FTRACE_START_FUNC_RET);
out:
mutex_unlock(&ftrace_lock);
@@ -4743,7 +4827,7 @@ void unregister_ftrace_graph(void)
ftrace_graph_active--;
ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
ftrace_graph_entry = ftrace_graph_entry_stub;
- ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
+ ftrace_shutdown(&fgraph_ops, FTRACE_STOP_FUNC_RET);
unregister_pm_notifier(&ftrace_suspend_notifier);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index efabe65..eb2848e 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2519,7 +2519,7 @@ void mpol_free_shared_policy(struct shared_policy *p)
}
#ifdef CONFIG_NUMA_BALANCING
-static bool __initdata numabalancing_override;
+static int __initdata numabalancing_override;
static void __init check_numabalancing_enable(void)
{
@@ -2528,9 +2528,15 @@ static void __init check_numabalancing_enable(void)
if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
numabalancing_default = true;
+ /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
+ if (numabalancing_override)
+ set_numabalancing_state(numabalancing_override == 1);
+
if (nr_node_ids > 1 && !numabalancing_override) {
- printk(KERN_INFO "Enabling automatic NUMA balancing. "
- "Configure with numa_balancing= or sysctl");
+ printk(KERN_INFO "%s automatic NUMA balancing. "
+ "Configure with numa_balancing= or the "
+ "kernel.numa_balancing sysctl",
+ numabalancing_default ? "Enabling" : "Disabling");
set_numabalancing_state(numabalancing_default);
}
}
@@ -2540,13 +2546,12 @@ static int __init setup_numabalancing(char *str)
int ret = 0;
if (!str)
goto out;
- numabalancing_override = true;
if (!strcmp(str, "enable")) {
- set_numabalancing_state(true);
+ numabalancing_override = 1;
ret = 1;
} else if (!strcmp(str, "disable")) {
- set_numabalancing_state(false);
+ numabalancing_override = -1;
ret = 1;
}
out:
@@ -2796,7 +2801,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
*/
VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
- if (!pol || pol == &default_policy)
+ if (!pol || pol == &default_policy || (pol->flags & MPOL_F_MORON))
mode = MPOL_DEFAULT;
else
mode = pol->mode;
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 0399f14..4eac855 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -170,7 +170,7 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
* implementation used by LSMs.
*/
if (has_capability_noaudit(p, CAP_SYS_ADMIN))
- adj -= 30;
+ points -= (points * 3) / 100;
/* Normalize to oom_score_adj units */
adj *= totalpages / 1000;
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 906d540..68fe53b 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -187,6 +187,26 @@ static unsigned long writeout_period_time = 0;
* global dirtyable memory first.
*/
+/**
+ * zone_dirtyable_memory - number of dirtyable pages in a zone
+ * @zone: the zone
+ *
+ * Returns the zone's number of pages potentially available for dirty
+ * page cache. This is the base value for the per-zone dirty limits.
+ */
+static unsigned long zone_dirtyable_memory(struct zone *zone)
+{
+ unsigned long nr_pages;
+
+ nr_pages = zone_page_state(zone, NR_FREE_PAGES);
+ nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
+
+ nr_pages += zone_page_state(zone, NR_INACTIVE_FILE);
+ nr_pages += zone_page_state(zone, NR_ACTIVE_FILE);
+
+ return nr_pages;
+}
+
static unsigned long highmem_dirtyable_memory(unsigned long total)
{
#ifdef CONFIG_HIGHMEM
@@ -194,11 +214,9 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
unsigned long x = 0;
for_each_node_state(node, N_HIGH_MEMORY) {
- struct zone *z =
- &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
+ struct zone *z = &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
- x += zone_page_state(z, NR_FREE_PAGES) +
- zone_reclaimable_pages(z) - z->dirty_balance_reserve;
+ x += zone_dirtyable_memory(z);
}
/*
* Unreclaimable memory (kernel memory or anonymous memory
@@ -234,9 +252,12 @@ static unsigned long global_dirtyable_memory(void)
{
unsigned long x;
- x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();
+ x = global_page_state(NR_FREE_PAGES);
x -= min(x, dirty_balance_reserve);
+ x += global_page_state(NR_INACTIVE_FILE);
+ x += global_page_state(NR_ACTIVE_FILE);
+
if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
@@ -285,32 +306,6 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
}
/**
- * zone_dirtyable_memory - number of dirtyable pages in a zone
- * @zone: the zone
- *
- * Returns the zone's number of pages potentially available for dirty
- * page cache. This is the base value for the per-zone dirty limits.
- */
-static unsigned long zone_dirtyable_memory(struct zone *zone)
-{
- /*
- * The effective global number of dirtyable pages may exclude
- * highmem as a big-picture measure to keep the ratio between
- * dirty memory and lowmem reasonable.
- *
- * But this function is purely about the individual zone and a
- * highmem zone can hold its share of dirty pages, so we don't
- * care about vm_highmem_is_dirtyable here.
- */
- unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) +
- zone_reclaimable_pages(zone);
-
- /* don't allow this to underflow */
- nr_pages -= min(nr_pages, zone->dirty_balance_reserve);
- return nr_pages;
-}
-
-/**
* zone_dirty_limit - maximum number of dirty pages allowed in a zone
* @zone: the zone
*
diff --git a/mm/slub.c b/mm/slub.c
index ba2ca53..90fcd67 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4423,7 +4423,13 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
page = ACCESS_ONCE(c->partial);
if (page) {
- x = page->pobjects;
+ node = page_to_nid(page);
+ if (flags & SO_TOTAL)
+ WARN_ON_ONCE(1);
+ else if (flags & SO_OBJECTS)
+ WARN_ON_ONCE(1);
+ else
+ x = page->pages;
total += x;
nodes[node] += x;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8142623..9dcc836 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -146,6 +146,25 @@ static bool global_reclaim(struct scan_control *sc)
}
#endif
+static unsigned long zone_reclaimable_pages(struct zone *zone)
+{
+ int nr;
+
+ nr = zone_page_state(zone, NR_ACTIVE_FILE) +
+ zone_page_state(zone, NR_INACTIVE_FILE);
+
+ if (nr_swap_pages > 0)
+ nr += zone_page_state(zone, NR_ACTIVE_ANON) +
+ zone_page_state(zone, NR_INACTIVE_ANON);
+
+ return nr;
+}
+
+static bool zone_reclaimable(struct zone *zone)
+{
+ return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
+}
+
static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
if (!mem_cgroup_disabled())
@@ -2066,11 +2085,6 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
return aborted_reclaim;
}
-static bool zone_reclaimable(struct zone *zone)
-{
- return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
-}
-
/* All zones in zonelist are unreclaimable? */
static bool all_unreclaimable(struct zonelist *zonelist,
struct scan_control *sc)
@@ -3041,41 +3055,6 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
wake_up_interruptible(&pgdat->kswapd_wait);
}
-/*
- * The reclaimable count would be mostly accurate.
- * The less reclaimable pages may be
- * - mlocked pages, which will be moved to unevictable list when encountered
- * - mapped pages, which may require several travels to be reclaimed
- * - dirty pages, which is not "instantly" reclaimable
- */
-unsigned long global_reclaimable_pages(void)
-{
- int nr;
-
- nr = global_page_state(NR_ACTIVE_FILE) +
- global_page_state(NR_INACTIVE_FILE);
-
- if (nr_swap_pages > 0)
- nr += global_page_state(NR_ACTIVE_ANON) +
- global_page_state(NR_INACTIVE_ANON);
-
- return nr;
-}
-
-unsigned long zone_reclaimable_pages(struct zone *zone)
-{
- int nr;
-
- nr = zone_page_state(zone, NR_ACTIVE_FILE) +
- zone_page_state(zone, NR_INACTIVE_FILE);
-
- if (nr_swap_pages > 0)
- nr += zone_page_state(zone, NR_ACTIVE_ANON) +
- zone_page_state(zone, NR_INACTIVE_ANON);
-
- return nr;
-}
-
#ifdef CONFIG_HIBERNATION
/*
* Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
diff --git a/net/core/filter.c b/net/core/filter.c
index c23543c..9a8d5e8 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -36,7 +36,6 @@
#include <asm/uaccess.h>
#include <asm/unaligned.h>
#include <linux/filter.h>
-#include <linux/reciprocal_div.h>
#include <linux/ratelimit.h>
#include <linux/seccomp.h>
#include <linux/if_vlan.h>
@@ -166,7 +165,7 @@ unsigned int sk_run_filter(const struct sk_buff *skb,
A /= X;
continue;
case BPF_S_ALU_DIV_K:
- A = reciprocal_divide(A, K);
+ A /= K;
continue;
case BPF_S_ALU_MOD_X:
if (X == 0)
@@ -549,11 +548,6 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
/* Some instructions need special checks */
switch (code) {
case BPF_S_ALU_DIV_K:
- /* check for division by zero */
- if (ftest->k == 0)
- return -EINVAL;
- ftest->k = reciprocal_value(ftest->k);
- break;
case BPF_S_ALU_MOD_K:
/* check for division by zero */
if (ftest->k == 0)
@@ -835,27 +829,7 @@ static void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to)
to->code = decodes[code];
to->jt = filt->jt;
to->jf = filt->jf;
-
- if (code == BPF_S_ALU_DIV_K) {
- /*
- * When loaded this rule user gave us X, which was
- * translated into R = r(X). Now we calculate the
- * RR = r(R) and report it back. If next time this
- * value is loaded and RRR = r(RR) is calculated
- * then the R == RRR will be true.
- *
- * One exception. X == 1 translates into R == 0 and
- * we can't calculate RR out of it with r().
- */
-
- if (filt->k == 0)
- to->k = 1;
- else
- to->k = reciprocal_value(filt->k);
-
- BUG_ON(reciprocal_value(to->k) != filt->k);
- } else
- to->k = filt->k;
+ to->k = filt->k;
}
int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 32443eb..cdcb448 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -74,36 +74,6 @@
struct kmem_cache *skbuff_head_cache __read_mostly;
static struct kmem_cache *skbuff_fclone_cache __read_mostly;
-static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
-{
- put_page(buf->page);
-}
-
-static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
-{
- get_page(buf->page);
-}
-
-static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
- struct pipe_buffer *buf)
-{
- return 1;
-}
-
-
-/* Pipe buffer operations for a socket. */
-static const struct pipe_buf_operations sock_pipe_buf_ops = {
- .can_merge = 0,
- .map = generic_pipe_buf_map,
- .unmap = generic_pipe_buf_unmap,
- .confirm = generic_pipe_buf_confirm,
- .release = sock_pipe_buf_release,
- .steal = sock_pipe_buf_steal,
- .get = sock_pipe_buf_get,
-};
-
/*
* Keep out-of-line to prevent kernel bloat.
* __builtin_return_address is not used because it is not always
@@ -1796,7 +1766,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
.partial = partial,
.nr_pages_max = MAX_SKB_FRAGS,
.flags = flags,
- .ops = &sock_pipe_buf_ops,
+ .ops = &nosteal_pipe_buf_ops,
.spd_release = sock_spd_release,
};
struct sk_buff *frag_iter;
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 22b1a70..4efd237 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -224,8 +224,10 @@ static int ieee802154_add_iface(struct sk_buff *skb,
if (info->attrs[IEEE802154_ATTR_DEV_TYPE]) {
type = nla_get_u8(info->attrs[IEEE802154_ATTR_DEV_TYPE]);
- if (type >= __IEEE802154_DEV_MAX)
- return -EINVAL;
+ if (type >= __IEEE802154_DEV_MAX) {
+ rc = -EINVAL;
+ goto nla_put_failure;
+ }
}
dev = phy->add_iface(phy, devname, type);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 5cd75e2..93ea4a1 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1052,6 +1052,8 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
}
in_dev = __in_dev_get_rtnl(dev);
+ if (!in_dev)
+ return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 84e812a..f3d5db4 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -959,7 +959,7 @@ next_normal:
++num;
}
- if (r->idiag_states & TCPF_TIME_WAIT) {
+ if (r->idiag_states & (TCPF_TIME_WAIT | TCPF_FIN_WAIT2)) {
struct inet_timewait_sock *tw;
inet_twsk_for_each(tw, node,
@@ -969,6 +969,8 @@ next_normal:
if (num < s_num)
goto next_dying;
+ if (!(r->idiag_states & (1 << tw->tw_substate)))
+ goto next_dying;
if (r->sdiag_family != AF_UNSPEC &&
tw->tw_family != r->sdiag_family)
goto next_dying;
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index e1e5a6a..9bf55b6 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -318,7 +318,7 @@ static int ip_rcv_finish(struct sk_buff *skb)
const struct iphdr *iph = ip_hdr(skb);
struct rtable *rt;
- if (sysctl_ip_early_demux && !skb_dst(skb)) {
+ if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
const struct net_protocol *ipprot;
int protocol = iph->protocol;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index ae0adfe..5bb4619 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -157,9 +157,12 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id)
static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4,
struct mr_table **mrt)
{
- struct ipmr_result res;
- struct fib_lookup_arg arg = { .result = &res, };
int err;
+ struct ipmr_result res;
+ struct fib_lookup_arg arg = {
+ .result = &res,
+ .flags = FIB_LOOKUP_NOREF,
+ };
err = fib_rules_lookup(net->ipv4.mr_rules_ops,
flowi4_to_flowi(flp4), 0, &arg);
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index f190419..ca1bafa 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -22,6 +22,9 @@
int sysctl_tcp_nometrics_save __read_mostly;
+static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
+ struct net *net, unsigned int hash);
+
struct tcp_fastopen_metrics {
u16 mss;
u16 syn_loss:10; /* Recurring Fast Open SYN losses */
@@ -127,16 +130,41 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst)
tm->tcpm_fastopen.cookie.len = 0;
}
+#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
+
+static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
+{
+ if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
+ tcpm_suck_dst(tm, dst);
+}
+
+#define TCP_METRICS_RECLAIM_DEPTH 5
+#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
+
static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
struct inetpeer_addr *addr,
- unsigned int hash,
- bool reclaim)
+ unsigned int hash)
{
struct tcp_metrics_block *tm;
struct net *net;
+ bool reclaim = false;
spin_lock_bh(&tcp_metrics_lock);
net = dev_net(dst->dev);
+
+ /* While waiting for the spin-lock the cache might have been populated
+ * with this entry and so we have to check again.
+ */
+ tm = __tcp_get_metrics(addr, net, hash);
+ if (tm == TCP_METRICS_RECLAIM_PTR) {
+ reclaim = true;
+ tm = NULL;
+ }
+ if (tm) {
+ tcpm_check_stamp(tm, dst);
+ goto out_unlock;
+ }
+
if (unlikely(reclaim)) {
struct tcp_metrics_block *oldest;
@@ -166,17 +194,6 @@ out_unlock:
return tm;
}
-#define TCP_METRICS_TIMEOUT (60 * 60 * HZ)
-
-static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
-{
- if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
- tcpm_suck_dst(tm, dst);
-}
-
-#define TCP_METRICS_RECLAIM_DEPTH 5
-#define TCP_METRICS_RECLAIM_PTR (struct tcp_metrics_block *) 0x1UL
-
static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
{
if (tm)
@@ -277,7 +294,6 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
struct inetpeer_addr addr;
unsigned int hash;
struct net *net;
- bool reclaim;
addr.family = sk->sk_family;
switch (addr.family) {
@@ -297,13 +313,10 @@ static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
tm = __tcp_get_metrics(&addr, net, hash);
- reclaim = false;
- if (tm == TCP_METRICS_RECLAIM_PTR) {
- reclaim = true;
+ if (tm == TCP_METRICS_RECLAIM_PTR)
tm = NULL;
- }
if (!tm && create)
- tm = tcpm_new(dst, &addr, hash, reclaim);
+ tm = tcpm_new(dst, &addr, hash);
else
tcpm_check_stamp(tm, dst);
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 5345300..8b7f50c 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -49,7 +49,7 @@
int ip6_rcv_finish(struct sk_buff *skb)
{
- if (sysctl_ip_early_demux && !skb_dst(skb)) {
+ if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
const struct inet6_protocol *ipprot;
ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index fa3fe70..78af3c7 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1631,6 +1631,15 @@ static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
return ip6_tnl_update(t, &p);
}
+static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct net *net = dev_net(dev);
+ struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
+
+ if (dev != ip6n->fb_tnl_dev)
+ unregister_netdevice_queue(dev, head);
+}
+
static size_t ip6_tnl_get_size(const struct net_device *dev)
{
return
@@ -1695,6 +1704,7 @@ static struct rtnl_link_ops ip6_link_ops __read_mostly = {
.validate = ip6_tnl_validate,
.newlink = ip6_tnl_newlink,
.changelink = ip6_tnl_changelink,
+ .dellink = ip6_tnl_dellink,
.get_size = ip6_tnl_get_size,
.fill_info = ip6_tnl_fill_info,
};
@@ -1725,6 +1735,8 @@ static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
}
}
+ t = rtnl_dereference(ip6n->tnls_wc[0]);
+ unregister_netdevice_queue(t->dev, &list);
unregister_netdevice_many(&list);
}
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 1535096..92b9b9f 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -141,9 +141,12 @@ static struct mr6_table *ip6mr_get_table(struct net *net, u32 id)
static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6,
struct mr6_table **mrt)
{
- struct ip6mr_result res;
- struct fib_lookup_arg arg = { .result = &res, };
int err;
+ struct ip6mr_result res;
+ struct fib_lookup_arg arg = {
+ .result = &res,
+ .flags = FIB_LOOKUP_NOREF,
+ };
err = fib_rules_lookup(net->ipv6.mr6_rules_ops,
flowi6_to_flowi(flp6), 0, &arg);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index b1c5654..0511f0e 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1463,6 +1463,15 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
#endif
};
+static void ipip6_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct net *net = dev_net(dev);
+ struct sit_net *sitn = net_generic(net, sit_net_id);
+
+ if (dev != sitn->fb_tunnel_dev)
+ unregister_netdevice_queue(dev, head);
+}
+
static struct rtnl_link_ops sit_link_ops __read_mostly = {
.kind = "sit",
.maxtype = IFLA_IPTUN_MAX,
@@ -1473,6 +1482,7 @@ static struct rtnl_link_ops sit_link_ops __read_mostly = {
.changelink = ipip6_changelink,
.get_size = ipip6_get_size,
.fill_info = ipip6_fill_info,
+ .dellink = ipip6_dellink,
};
static struct xfrm_tunnel sit_handler __read_mostly = {
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d8b6ad9..eb8a2c8 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1371,14 +1371,18 @@ call_refreshresult(struct rpc_task *task)
task->tk_action = call_refresh;
switch (status) {
case 0:
- if (rpcauth_uptodatecred(task))
+ if (rpcauth_uptodatecred(task)) {
task->tk_action = call_allocate;
- return;
+ return;
+ }
+ /* Use rate-limiting and a max number of retries if refresh
+ * had status 0 but failed to update the cred.
+ */
case -ETIMEDOUT:
rpc_delay(task, 3*HZ);
- case -EKEYEXPIRED:
case -EAGAIN:
status = -EACCES;
+ case -EKEYEXPIRED:
if (!task->tk_cred_retry)
break;
task->tk_cred_retry--;
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 9cd9b7c..142a59f 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -1941,7 +1941,19 @@ static int filename_trans_read(struct policydb *p, void *fp)
if (rc)
goto out;
- hashtab_insert(p->filename_trans, ft, otype);
+ rc = hashtab_insert(p->filename_trans, ft, otype);
+ if (rc) {
+ /*
+ * Do not return -EEXIST to the caller, or the system
+ * will not boot.
+ */
+ if (rc != -EEXIST)
+ goto out;
+ /* But free memory to avoid memory leak. */
+ kfree(ft);
+ kfree(name);
+ kfree(otype);
+ }
}
hash_eval(p->filename_trans, "filenametr");
return 0;
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 947cfb4..64d8b0b 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -30,6 +30,7 @@ config SND_ALS300
select SND_PCM
select SND_AC97_CODEC
select SND_OPL3_LIB
+ select ZONE_DMA
help
Say 'Y' or 'M' to include support for Avance Logic ALS300/ALS300+
@@ -54,6 +55,7 @@ config SND_ALI5451
tristate "ALi M5451 PCI Audio Controller"
select SND_MPU401_UART
select SND_AC97_CODEC
+ select ZONE_DMA
help
Say Y here to include support for the integrated AC97 sound
device on motherboards using the ALi M5451 Audio Controller
@@ -158,6 +160,7 @@ config SND_AZT3328
select SND_PCM
select SND_RAWMIDI
select SND_AC97_CODEC
+ select ZONE_DMA
help
Say Y here to include support for Aztech AZF3328 (PCI168)
soundcards.
@@ -463,6 +466,7 @@ config SND_EMU10K1
select SND_HWDEP
select SND_RAWMIDI
select SND_AC97_CODEC
+ select ZONE_DMA
help
Say Y to include support for Sound Blaster PCI 512, Live!,
Audigy and E-mu APS (partially supported) soundcards.
@@ -478,6 +482,7 @@ config SND_EMU10K1X
tristate "Emu10k1X (Dell OEM Version)"
select SND_AC97_CODEC
select SND_RAWMIDI
+ select ZONE_DMA
help
Say Y here to include support for the Dell OEM version of the
Sound Blaster Live!.
@@ -511,6 +516,7 @@ config SND_ES1938
select SND_OPL3_LIB
select SND_MPU401_UART
select SND_AC97_CODEC
+ select ZONE_DMA
help
Say Y here to include support for soundcards based on ESS Solo-1
(ES1938, ES1946, ES1969) chips.
@@ -522,6 +528,7 @@ config SND_ES1968
tristate "ESS ES1968/1978 (Maestro-1/2/2E)"
select SND_MPU401_UART
select SND_AC97_CODEC
+ select ZONE_DMA
help
Say Y here to include support for soundcards based on ESS Maestro
1/2/2E chips.
@@ -603,6 +610,7 @@ config SND_ICE1712
select SND_MPU401_UART
select SND_AC97_CODEC
select BITREVERSE
+ select ZONE_DMA
help
Say Y here to include support for soundcards based on the
ICE1712 (Envy24) chip.
@@ -689,6 +697,7 @@ config SND_LX6464ES
config SND_MAESTRO3
tristate "ESS Allegro/Maestro3"
select SND_AC97_CODEC
+ select ZONE_DMA
help
Say Y here to include support for soundcards based on ESS Maestro 3
(Allegro) chips.
@@ -785,6 +794,7 @@ config SND_SIS7019
tristate "SiS 7019 Audio Accelerator"
depends on X86 && !X86_64
select SND_AC97_CODEC
+ select ZONE_DMA
help
Say Y here to include support for the SiS 7019 Audio Accelerator.
@@ -796,6 +806,7 @@ config SND_SONICVIBES
select SND_OPL3_LIB
select SND_MPU401_UART
select SND_AC97_CODEC
+ select ZONE_DMA
help
Say Y here to include support for soundcards based on the S3
SonicVibes chip.
@@ -807,6 +818,7 @@ config SND_TRIDENT
tristate "Trident 4D-Wave DX/NX; SiS 7018"
select SND_MPU401_UART
select SND_AC97_CODEC
+ select ZONE_DMA
help
Say Y here to include support for soundcards based on Trident
4D-Wave DX/NX or SiS 7018 chips.
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index 773a67f..431bf68 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -285,7 +285,7 @@ static char channel_map_9636_ds[26] = {
/* ADAT channels are remapped */
1, 3, 5, 7, 9, 11, 13, 15,
/* channels 8 and 9 are S/PDIF */
- 24, 25
+ 24, 25,
/* others don't exist */
-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1
};
diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
index dafdbe8..0c499c6 100644
--- a/sound/soc/codecs/adau1701.c
+++ b/sound/soc/codecs/adau1701.c
@@ -64,7 +64,7 @@
#define ADAU1701_SEROCTL_WORD_LEN_24 0x0000
#define ADAU1701_SEROCTL_WORD_LEN_20 0x0001
-#define ADAU1701_SEROCTL_WORD_LEN_16 0x0010
+#define ADAU1701_SEROCTL_WORD_LEN_16 0x0002
#define ADAU1701_SEROCTL_WORD_LEN_MASK 0x0003
#define ADAU1701_AUXNPOW_VBPD 0x40
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index bfaf96b..ca1a248 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -43,6 +43,54 @@ static const struct reg_default wm5110_sysclk_revd_patch[] = {
{ 0x3133, 0x1201 },
{ 0x3183, 0x1501 },
{ 0x31D3, 0x1401 },
+ { 0x0049, 0x01ea },
+ { 0x004a, 0x01f2 },
+ { 0x0057, 0x01e7 },
+ { 0x0058, 0x01fb },
+ { 0x33ce, 0xc4f5 },
+ { 0x33cf, 0x1361 },
+ { 0x33d0, 0x0402 },
+ { 0x33d1, 0x4700 },
+ { 0x33d2, 0x026d },
+ { 0x33d3, 0xff00 },
+ { 0x33d4, 0x026d },
+ { 0x33d5, 0x0101 },
+ { 0x33d6, 0xc4f5 },
+ { 0x33d7, 0x0361 },
+ { 0x33d8, 0x0402 },
+ { 0x33d9, 0x6701 },
+ { 0x33da, 0xc4f5 },
+ { 0x33db, 0x136f },
+ { 0x33dc, 0xc4f5 },
+ { 0x33dd, 0x134f },
+ { 0x33de, 0xc4f5 },
+ { 0x33df, 0x131f },
+ { 0x33e0, 0x026d },
+ { 0x33e1, 0x4f01 },
+ { 0x33e2, 0x026d },
+ { 0x33e3, 0xf100 },
+ { 0x33e4, 0x026d },
+ { 0x33e5, 0x0001 },
+ { 0x33e6, 0xc4f5 },
+ { 0x33e7, 0x0361 },
+ { 0x33e8, 0x0402 },
+ { 0x33e9, 0x6601 },
+ { 0x33ea, 0xc4f5 },
+ { 0x33eb, 0x136f },
+ { 0x33ec, 0xc4f5 },
+ { 0x33ed, 0x134f },
+ { 0x33ee, 0xc4f5 },
+ { 0x33ef, 0x131f },
+ { 0x33f0, 0x026d },
+ { 0x33f1, 0x4e01 },
+ { 0x33f2, 0x026d },
+ { 0x33f3, 0xf000 },
+ { 0x33f6, 0xc4f5 },
+ { 0x33f7, 0x1361 },
+ { 0x33f8, 0x0402 },
+ { 0x33f9, 0x4600 },
+ { 0x33fa, 0x026d },
+ { 0x33fb, 0xfe00 },
};
static int wm5110_sysclk_ev(struct snd_soc_dapm_widget *w,
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index ce6f511..f079b2e 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1013,6 +1013,7 @@ static struct machine *
union perf_event *event)
{
const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+ struct machine *machine;
if (perf_guest &&
((cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
@@ -1024,7 +1025,11 @@ static struct machine *
else
pid = event->ip.pid;
- return perf_session__findnew_machine(session, pid);
+ machine = perf_session__find_machine(session, pid);
+ if (!machine)
+ machine = perf_session__findnew_machine(session,
+ DEFAULT_GUEST_KERNEL_ID);
+ return machine;
}
return perf_session__find_host_machine(session);
diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
index f09641d..d1b3a36 100644
--- a/tools/power/x86/turbostat/Makefile
+++ b/tools/power/x86/turbostat/Makefile
@@ -5,7 +5,7 @@ DESTDIR :=
turbostat : turbostat.c
CFLAGS += -Wall
-CFLAGS += -I../../../../arch/x86/include/uapi/
+CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
%: %.c
@mkdir -p $(BUILD_OUTPUT)
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index ce6d460..1f4d8df 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -20,7 +20,7 @@
*/
#define _GNU_SOURCE
-#include <asm/msr.h>
+#include MSRHEADER
#include <stdio.h>
#include <unistd.h>
#include <sys/types.h>
@@ -35,6 +35,7 @@
#include <string.h>
#include <ctype.h>
#include <sched.h>
+#include <cpuid.h>
char *proc_stat = "/proc/stat";
unsigned int interval_sec = 5; /* set with -i interval_sec */
@@ -1810,7 +1811,7 @@ void check_cpuid()
eax = ebx = ecx = edx = 0;
- asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0));
+ __get_cpuid(0, &max_level, &ebx, &ecx, &edx);
if (ebx == 0x756e6547 && edx == 0x49656e69 && ecx == 0x6c65746e)
genuine_intel = 1;
@@ -1819,7 +1820,7 @@ void check_cpuid()
fprintf(stderr, "CPUID(0): %.4s%.4s%.4s ",
(char *)&ebx, (char *)&edx, (char *)&ecx);
- asm("cpuid" : "=a" (fms), "=c" (ecx), "=d" (edx) : "a" (1) : "ebx");
+ __get_cpuid(1, &fms, &ebx, &ecx, &edx);
family = (fms >> 8) & 0xf;
model = (fms >> 4) & 0xf;
stepping = fms & 0xf;
@@ -1841,7 +1842,7 @@ void check_cpuid()
* This check is valid for both Intel and AMD.
*/
ebx = ecx = edx = 0;
- asm("cpuid" : "=a" (max_level), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000000));
+ __get_cpuid(0x80000000, &max_level, &ebx, &ecx, &edx);
if (max_level < 0x80000007) {
fprintf(stderr, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level);
@@ -1852,7 +1853,7 @@ void check_cpuid()
* Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
* this check is valid for both Intel and AMD
*/
- asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
+ __get_cpuid(0x80000007, &eax, &ebx, &ecx, &edx);
has_invariant_tsc = edx & (1 << 8);
if (!has_invariant_tsc) {
@@ -1865,7 +1866,7 @@ void check_cpuid()
* this check is valid for both Intel and AMD
*/
- asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
+ __get_cpuid(0x6, &eax, &ebx, &ecx, &edx);
has_aperf = ecx & (1 << 0);
do_dts = eax & (1 << 0);
do_ptm = eax & (1 << 6);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/