Re: Linux 3.18.105

From: Greg KH
Date: Fri Apr 13 2018 - 16:25:57 EST


diff --git a/Makefile b/Makefile
index 2eae8b1039aa..ba34e4e77d96 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 18
-SUBLEVEL = 104
+SUBLEVEL = 105
EXTRAVERSION =
NAME = Diseased Newt

diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index 5fb091675582..6052ea7d9d21 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -86,6 +86,7 @@
clocks = <&clks 201>;
VDDA-supply = <&reg_2p5v>;
VDDIO-supply = <&reg_3p3v>;
+ lrclk-strength = <3>;
};
};

diff --git a/arch/arm/include/asm/xen/events.h b/arch/arm/include/asm/xen/events.h
index 8b1f37bfeeec..b7aadab9b0e8 100644
--- a/arch/arm/include/asm/xen/events.h
+++ b/arch/arm/include/asm/xen/events.h
@@ -16,7 +16,7 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
return raw_irqs_disabled_flags(regs->ARM_cpsr);
}

-#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((ptr), \
+#define xchg_xen_ulong(ptr, val) atomic64_xchg(container_of((long long*)(ptr),\
atomic64_t, \
counter), (val))

diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index b85b781b05fd..e83874ba6e6d 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -761,6 +761,8 @@ static struct platform_device da8xx_dsp = {
.resource = da8xx_rproc_resources,
};

+static bool rproc_mem_inited __initdata;
+
#if IS_ENABLED(CONFIG_DA8XX_REMOTEPROC)

static phys_addr_t rproc_base __initdata;
@@ -799,6 +801,8 @@ void __init da8xx_rproc_reserve_cma(void)
ret = dma_declare_contiguous(&da8xx_dsp.dev, rproc_size, rproc_base, 0);
if (ret)
pr_err("%s: dma_declare_contiguous failed %d\n", __func__, ret);
+ else
+ rproc_mem_inited = true;
}

#else
@@ -813,6 +817,12 @@ int __init da8xx_register_rproc(void)
{
int ret;

+ if (!rproc_mem_inited) {
+ pr_warn("%s: memory not reserved for DSP, not registering DSP device\n",
+ __func__);
+ return -ENOMEM;
+ }
+
ret = platform_device_register(&da8xx_dsp);
if (ret)
pr_err("%s: can't register DSP device: %d\n", __func__, ret);
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 5f750dc96e0f..49d057eb93d6 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -44,16 +44,16 @@
: "memory")

static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
+ int oparg = (int)(encoded_op << 8) >> 20;
+ int cmparg = (int)(encoded_op << 20) >> 20;
int oldval = 0, ret, tmp;

if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
+ oparg = 1U << (oparg & 0x1f);

if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
diff --git a/arch/mips/include/asm/kprobes.h b/arch/mips/include/asm/kprobes.h
index daba1f9a4f79..174aedce3167 100644
--- a/arch/mips/include/asm/kprobes.h
+++ b/arch/mips/include/asm/kprobes.h
@@ -40,7 +40,8 @@ typedef union mips_instruction kprobe_opcode_t;

#define flush_insn_slot(p) \
do { \
- flush_icache_range((unsigned long)p->addr, \
+ if (p->addr) \
+ flush_icache_range((unsigned long)p->addr, \
(unsigned long)p->addr + \
(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \
} while (0)
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index adc6911ba748..b19a3c506b1e 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -51,15 +51,15 @@ void __init pagetable_init(void)
/*
* Fixed mappings:
*/
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
+ fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);

#ifdef CONFIG_HIGHMEM
/*
* Permanent kmaps:
*/
vaddr = PKMAP_BASE;
- fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+ fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);

pgd = swapper_pg_dir + __pgd_offset(vaddr);
pud = pud_offset(pgd, vaddr);
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 7505599c2593..a3d0109becd3 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -646,12 +646,20 @@ static int __init get_freq(char *name, int cells, unsigned long *val)
static void start_cpu_decrementer(void)
{
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+ unsigned int tcr;
+
/* Clear any pending timer interrupts */
mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);

- /* Enable decrementer interrupt */
- mtspr(SPRN_TCR, TCR_DIE);
-#endif /* defined(CONFIG_BOOKE) || defined(CONFIG_40x) */
+ tcr = mfspr(SPRN_TCR);
+ /*
+ * The watchdog may have already been enabled by u-boot. So leave
+ * TRC[WP] (Watchdog Period) alone.
+ */
+ tcr &= TCR_WP_MASK; /* Clear all bits except for TCR[WP] */
+ tcr |= TCR_DIE; /* Enable decrementer */
+ mtspr(SPRN_TCR, tcr);
+#endif
}

void __init generic_calibrate_decr(void)
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index ce3c893d509b..e100aaf97f86 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -50,7 +50,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
pteg_addr = get_pteg_addr(vcpu, pte_index);

mutex_lock(&vcpu->kvm->arch.hpt_mutex);
- copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
+ ret = H_FUNCTION;
+ if (copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)))
+ goto done;
hpte = pteg;

ret = H_PTEG_FULL;
@@ -71,7 +73,9 @@ static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
hpte[0] = cpu_to_be64(kvmppc_get_gpr(vcpu, 6));
hpte[1] = cpu_to_be64(kvmppc_get_gpr(vcpu, 7));
pteg_addr += i * HPTE_SIZE;
- copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
+ ret = H_FUNCTION;
+ if (copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE))
+ goto done;
kvmppc_set_gpr(vcpu, 4, pte_index | i);
ret = H_SUCCESS;

@@ -93,7 +97,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)

pteg = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
- copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ ret = H_FUNCTION;
+ if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
+ goto done;
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);

@@ -103,7 +109,9 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
goto done;

- copy_to_user((void __user *)pteg, &v, sizeof(v));
+ ret = H_FUNCTION;
+ if (copy_to_user((void __user *)pteg, &v, sizeof(v)))
+ goto done;

rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
@@ -171,7 +179,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
}

pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
- copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ if (copy_from_user(pte, (void __user *)pteg, sizeof(pte))) {
+ ret = H_FUNCTION;
+ break;
+ }
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);

@@ -184,7 +195,10 @@ static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
tsh |= H_BULK_REMOVE_NOT_FOUND;
} else {
/* Splat the pteg in (userland) hpt */
- copy_to_user((void __user *)pteg, &v, sizeof(v));
+ if (copy_to_user((void __user *)pteg, &v, sizeof(v))) {
+ ret = H_FUNCTION;
+ break;
+ }

rb = compute_tlbie_rb(pte[0], pte[1],
tsh & H_BULK_REMOVE_PTEX);
@@ -211,7 +225,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)

pteg = get_pteg_addr(vcpu, pte_index);
mutex_lock(&vcpu->kvm->arch.hpt_mutex);
- copy_from_user(pte, (void __user *)pteg, sizeof(pte));
+ ret = H_FUNCTION;
+ if (copy_from_user(pte, (void __user *)pteg, sizeof(pte)))
+ goto done;
pte[0] = be64_to_cpu((__force __be64)pte[0]);
pte[1] = be64_to_cpu((__force __be64)pte[1]);

@@ -234,7 +250,9 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
pte[0] = (__force u64)cpu_to_be64(pte[0]);
pte[1] = (__force u64)cpu_to_be64(pte[1]);
- copy_to_user((void __user *)pteg, pte, sizeof(pte));
+ ret = H_FUNCTION;
+ if (copy_to_user((void __user *)pteg, pte, sizeof(pte)))
+ goto done;
ret = H_SUCCESS;

done:
diff --git a/arch/powerpc/platforms/cell/spufs/coredump.c b/arch/powerpc/platforms/cell/spufs/coredump.c
index be6212ddbf06..7e42e3ec2142 100644
--- a/arch/powerpc/platforms/cell/spufs/coredump.c
+++ b/arch/powerpc/platforms/cell/spufs/coredump.c
@@ -174,6 +174,8 @@ static int spufs_arch_write_note(struct spu_context *ctx, int i,
if (!dump_skip(cprm,
roundup(cprm->written - total + sz, 4) - cprm->written))
goto Eio;
+
+ rc = 0;
out:
free_page((unsigned long)buf);
return rc;
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index 35b13ed0af5f..3e9baf333b68 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -28,8 +28,14 @@ SECTIONS
{
. = 0x00000000;
.text : {
- _text = .; /* Text and read-only data */
+ /* Text and read-only data */
HEAD_TEXT
+ /*
+ * E.g. perf doesn't like symbols starting at address zero,
+ * therefore skip the initial PSW and channel program located
+ * at address zero and let _text start at 0x200.
+ */
+ _text = 0x200;
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c
index 71762565513e..8bca543977fe 100644
--- a/arch/sparc/kernel/ldc.c
+++ b/arch/sparc/kernel/ldc.c
@@ -1693,9 +1693,14 @@ static int read_nonraw(struct ldc_channel *lp, void *buf, unsigned int size)

lp->rcv_nxt = p->seqid;

+ /*
+ * If this is a control-only packet, there is nothing
+ * else to do but advance the rx queue since the packet
+ * was already processed above.
+ */
if (!(p->type & LDC_DATA)) {
new = rx_advance(lp, new);
- goto no_data;
+ break;
}
if (p->stype & (LDC_ACK | LDC_NACK)) {
err = data_ack_nack(lp, p);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 21187ebee7d0..37bae0d67714 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -356,6 +356,8 @@ static int __init tsc_setup(char *str)
tsc_clocksource_reliable = 1;
if (!strncmp(str, "noirqtime", 9))
no_sched_irq_time = 1;
+ if (!strcmp(str, "unstable"))
+ mark_tsc_unstable("boot parameter");
return 1;
}

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 36414d13289f..2e0c64a08549 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1467,6 +1467,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
*/
if (var->unusable)
var->db = 0;
+ /* This is symmetric with svm_set_segment() */
var->dpl = to_svm(vcpu)->vmcb->save.cpl;
break;
}
@@ -1611,18 +1612,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
s->base = var->base;
s->limit = var->limit;
s->selector = var->selector;
- if (var->unusable)
- s->attrib = 0;
- else {
- s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
- s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
- s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
- s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
- s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
- s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
- s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
- s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
- }
+ s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
+ s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
+ s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
+ s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
+ s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
+ s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
+ s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
+ s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;

/*
* This is always accurate, except if SYSRET returned to a segment
@@ -1631,7 +1628,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
* would entail passing the CPL to userspace and back.
*/
if (seg == VCPU_SREG_SS)
- svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
+ /* This is symmetric with svm_get_segment() */
+ svm->vmcb->save.cpl = (var->dpl & 3);

mark_dirty(svm->vmcb, VMCB_SEG);
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 6f474785c767..a60950b6ceb3 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6935,11 +6935,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
int cr = exit_qualification & 15;
- int reg = (exit_qualification >> 8) & 15;
- unsigned long val = kvm_register_readl(vcpu, reg);
+ int reg;
+ unsigned long val;

switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
+ reg = (exit_qualification >> 8) & 15;
+ val = kvm_register_readl(vcpu, reg);
switch (cr) {
case 0:
if (vmcs12->cr0_guest_host_mask &
@@ -6994,6 +6996,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
* lmsw can change bits 1..3 of cr0, and only set bit 0 of
* cr0. Other attempted changes are ignored, with no exit.
*/
+ val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
if (vmcs12->cr0_guest_host_mask & 0xe &
(val ^ vmcs12->cr0_read_shadow))
return 1;
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 5cbd5d9ea61d..d9f4b3caaa41 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -165,6 +165,9 @@ bool bio_integrity_enabled(struct bio *bio)
if (!bio_is_rw(bio))
return false;

+ if (!bio_sectors(bio))
+ return false;
+
/* Already protected? */
if (bio_integrity(bio))
return false;
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 0d9e5f97f0a8..94de2055365e 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -309,8 +309,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,

if (info) {
struct partition_meta_info *pinfo = alloc_part_info(disk);
- if (!pinfo)
+ if (!pinfo) {
+ err = -ENOMEM;
goto out_free_stats;
+ }
memcpy(pinfo, info, sizeof(*info));
p->info = pinfo;
}
diff --git a/crypto/async_tx/async_pq.c b/crypto/async_tx/async_pq.c
index d05327caf69d..c09a51dbe59e 100644
--- a/crypto/async_tx/async_pq.c
+++ b/crypto/async_tx/async_pq.c
@@ -62,9 +62,6 @@ do_async_gen_syndrome(struct dma_chan *chan,
dma_addr_t dma_dest[2];
int src_off = 0;

- if (submit->flags & ASYNC_TX_FENCE)
- dma_flags |= DMA_PREP_FENCE;
-
while (src_cnt > 0) {
submit->flags = flags_orig;
pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
@@ -83,6 +80,8 @@ do_async_gen_syndrome(struct dma_chan *chan,
if (cb_fn_orig)
dma_flags |= DMA_PREP_INTERRUPT;
}
+ if (submit->flags & ASYNC_TX_FENCE)
+ dma_flags |= DMA_PREP_FENCE;

/* Drivers force forward progress in case they can not provide
* a descriptor
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index bb8cbf5961bf..d375968c8072 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -180,6 +180,12 @@ acpi_status acpi_enable_event(u32 event, u32 flags)

ACPI_FUNCTION_TRACE(acpi_enable_event);

+ /* If Hardware Reduced flag is set, there are no fixed events */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Decode the Fixed Event */

if (event > ACPI_EVENT_MAX) {
@@ -237,6 +243,12 @@ acpi_status acpi_disable_event(u32 event, u32 flags)

ACPI_FUNCTION_TRACE(acpi_disable_event);

+ /* If Hardware Reduced flag is set, there are no fixed events */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Decode the Fixed Event */

if (event > ACPI_EVENT_MAX) {
@@ -290,6 +302,12 @@ acpi_status acpi_clear_event(u32 event)

ACPI_FUNCTION_TRACE(acpi_clear_event);

+ /* If Hardware Reduced flag is set, there are no fixed events */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Decode the Fixed Event */

if (event > ACPI_EVENT_MAX) {
diff --git a/drivers/acpi/acpica/psobject.c b/drivers/acpi/acpica/psobject.c
index a6885077d59e..e49a7fa4789e 100644
--- a/drivers/acpi/acpica/psobject.c
+++ b/drivers/acpi/acpica/psobject.c
@@ -118,6 +118,9 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
(u32)(walk_state->aml_offset +
sizeof(struct acpi_table_header)));

+ ACPI_ERROR((AE_INFO,
+ "Aborting disassembly, AML byte code is corrupt"));
+
/* Dump the context surrounding the invalid opcode */

acpi_ut_dump_buffer(((u8 *)walk_state->parser_state.
@@ -126,6 +129,14 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
sizeof(struct acpi_table_header) -
16));
acpi_os_printf(" */\n");
+
+ /*
+ * Just abort the disassembly, cannot continue because the
+ * parser is essentially lost. The disassembler can then
+ * randomly fail because an ill-constructed parse tree
+ * can result.
+ */
+ return_ACPI_STATUS(AE_AML_BAD_OPCODE);
#endif
}

@@ -290,6 +301,9 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
if (status == AE_CTRL_PARSE_CONTINUE) {
return_ACPI_STATUS(AE_CTRL_PARSE_CONTINUE);
}
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }

/* Create Op structure and append to parent's argument list */

diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 0b03f9056692..94249ceb99f0 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -419,8 +419,9 @@ int ahci_platform_init_host(struct platform_device *pdev,

irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
- dev_err(dev, "no irq\n");
- return -EINVAL;
+ if (irq != -EPROBE_DEFER)
+ dev_err(dev, "no irq\n");
+ return irq;
}

/* prepare host */
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d55156fc064d..6fa0efa48cc5 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -863,12 +863,16 @@ static void add_interrupt_bench(cycles_t start)
static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
{
__u32 *ptr = (__u32 *) regs;
+ unsigned int idx;

if (regs == NULL)
return 0;
- if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
- f->reg_idx = 0;
- return *(ptr + f->reg_idx++);
+ idx = READ_ONCE(f->reg_idx);
+ if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
+ idx = 0;
+ ptr += idx++;
+ WRITE_ONCE(f->reg_idx, idx);
+ return *ptr;
}

void add_interrupt_randomness(int irq, int irq_flags)
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 542fad70e360..f0e5a6aa2371 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -763,7 +763,7 @@ static int mv64x60_mc_err_probe(struct platform_device *pdev)
/* Non-ECC RAM? */
printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
res = -ENODEV;
- goto err2;
+ goto err;
}

edac_dbg(3, "init mci\n");
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index e4849413ee80..4f745c11d955 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -158,7 +158,7 @@ static void evict_entry(struct drm_gem_object *obj,
size_t size = PAGE_SIZE * n;
loff_t off = mmap_offset(obj) +
(entry->obj_pgoff << PAGE_SHIFT);
- const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+ const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);

if (m > 1) {
int i;
@@ -415,7 +415,7 @@ static int fault_2d(struct drm_gem_object *obj,
* into account in some of the math, so figure out virtual stride
* in pages
*/
- const int m = 1 + ((omap_obj->width << fmt) / PAGE_SIZE);
+ const int m = DIV_ROUND_UP(omap_obj->width << fmt, PAGE_SIZE);

/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = ((unsigned long)vmf->virtual_address -
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index a6143ea51dfc..d28685bec34e 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -49,8 +49,6 @@ static int st_magn_spi_remove(struct spi_device *spi)
}

static const struct spi_device_id st_magn_id_table[] = {
- { LSM303DLHC_MAGN_DEV_NAME },
- { LSM303DLM_MAGN_DEV_NAME },
{ LIS3MDL_MAGN_DEV_NAME },
{},
};
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 587311887d94..422e022dcbbb 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2986,12 +2986,8 @@ static void srpt_queue_response(struct se_cmd *cmd)
}
spin_unlock_irqrestore(&ioctx->spinlock, flags);

- if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
- || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
- atomic_inc(&ch->req_lim_delta);
- srpt_abort_cmd(ioctx);
+ if (unlikely(WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT)))
return;
- }

dir = ioctx->cmd.data_direction;

diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c
index 9cb4b621fbc3..b92a19a594a1 100644
--- a/drivers/isdn/mISDN/stack.c
+++ b/drivers/isdn/mISDN/stack.c
@@ -72,7 +72,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
if (sk->sk_state != MISDN_BOUND)
continue;
if (!cskb)
- cskb = skb_copy(skb, GFP_KERNEL);
+ cskb = skb_copy(skb, GFP_ATOMIC);
if (!cskb) {
printk(KERN_WARNING "%s no skb\n", __func__);
break;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index c3a08b60535b..760deffa9ad3 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -281,7 +281,7 @@ static int pca955x_probe(struct i2c_client *client,
"slave address 0x%02x\n",
id->name, chip->bits, client->addr);

- if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;

if (pdata) {
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index ea47980949ef..f8c5bc719783 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -514,15 +514,21 @@ struct open_bucket {

/*
* We keep multiple buckets open for writes, and try to segregate different
- * write streams for better cache utilization: first we look for a bucket where
- * the last write to it was sequential with the current write, and failing that
- * we look for a bucket that was last used by the same task.
+ * write streams for better cache utilization: first we try to segregate flash
+ * only volume write streams from cached devices, secondly we look for a bucket
+ * where the last write to it was sequential with the current write, and
+ * failing that we look for a bucket that was last used by the same task.
*
* The ideas is if you've got multiple tasks pulling data into the cache at the
* same time, you'll get better cache utilization if you try to segregate their
* data and preserve locality.
*
- * For example, say you've starting Firefox at the same time you're copying a
+ * For example, dirty sectors of flash only volume is not reclaimable, if their
+ * dirty sectors mixed with dirty sectors of cached device, such buckets will
+ * be marked as dirty and won't be reclaimed, though the dirty data of cached
+ * device have been written back to backend device.
+ *
+ * And say you've starting Firefox at the same time you're copying a
* bunch of files. Firefox will likely end up being fairly hot and stay in the
* cache awhile, but the data you copied might not be; if you wrote all that
* data to the same buckets it'd get invalidated at the same time.
@@ -539,7 +545,10 @@ static struct open_bucket *pick_data_bucket(struct cache_set *c,
struct open_bucket *ret, *ret_task = NULL;

list_for_each_entry_reverse(ret, &c->data_buckets, list)
- if (!bkey_cmp(&ret->key, search))
+ if (UUID_FLASH_ONLY(&c->uuids[KEY_INODE(&ret->key)]) !=
+ UUID_FLASH_ONLY(&c->uuids[KEY_INODE(search)]))
+ continue;
+ else if (!bkey_cmp(&ret->key, search))
goto found;
else if (ret->last_write_point == write_point)
ret_task = ret;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 9c56cf714b22..cb98bad58412 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -921,6 +921,12 @@ static void cached_dev_detach_finish(struct work_struct *w)

mutex_lock(&bch_register_lock);

+ cancel_delayed_work_sync(&dc->writeback_rate_update);
+ if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
+ kthread_stop(dc->writeback_thread);
+ dc->writeback_thread = NULL;
+ }
+
memset(&dc->sb.set_uuid, 0, 16);
SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);

diff --git a/drivers/media/i2c/cx25840/cx25840-core.c b/drivers/media/i2c/cx25840/cx25840-core.c
index e453a3ffe7d1..50255128ec33 100644
--- a/drivers/media/i2c/cx25840/cx25840-core.c
+++ b/drivers/media/i2c/cx25840/cx25840-core.c
@@ -420,11 +420,13 @@ static void cx25840_initialize(struct i2c_client *client)
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
- prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
- queue_work(q, &state->fw_work);
- schedule();
- finish_wait(&state->fw_wait, &wait);
- destroy_workqueue(q);
+ if (q) {
+ prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+ queue_work(q, &state->fw_work);
+ schedule();
+ finish_wait(&state->fw_wait, &wait);
+ destroy_workqueue(q);
+ }

/* 6. */
cx25840_write(client, 0x115, 0x8c);
@@ -631,11 +633,13 @@ static void cx23885_initialize(struct i2c_client *client)
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
- prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
- queue_work(q, &state->fw_work);
- schedule();
- finish_wait(&state->fw_wait, &wait);
- destroy_workqueue(q);
+ if (q) {
+ prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+ queue_work(q, &state->fw_work);
+ schedule();
+ finish_wait(&state->fw_wait, &wait);
+ destroy_workqueue(q);
+ }

/* Call the cx23888 specific std setup func, we no longer rely on
* the generic cx24840 func.
@@ -746,11 +750,13 @@ static void cx231xx_initialize(struct i2c_client *client)
INIT_WORK(&state->fw_work, cx25840_work_handler);
init_waitqueue_head(&state->fw_wait);
q = create_singlethread_workqueue("cx25840_fw");
- prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
- queue_work(q, &state->fw_work);
- schedule();
- finish_wait(&state->fw_wait, &wait);
- destroy_workqueue(q);
+ if (q) {
+ prepare_to_wait(&state->fw_wait, &wait, TASK_UNINTERRUPTIBLE);
+ queue_work(q, &state->fw_work);
+ schedule();
+ finish_wait(&state->fw_wait, &wait);
+ destroy_workqueue(q);
+ }

cx25840_std_setup(client);

diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index f838d9c7ed12..0fba4a2c1602 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1370,8 +1370,13 @@ static int mceusb_dev_probe(struct usb_interface *intf,
goto rc_dev_fail;

/* wire up inbound data handler */
- usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
- mceusb_dev_recv, ir, ep_in->bInterval);
+ if (usb_endpoint_xfer_int(ep_in))
+ usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
+ mceusb_dev_recv, ir, ep_in->bInterval);
+ else
+ usb_fill_bulk_urb(ir->urb_in, dev, pipe, ir->buf_in, maxp,
+ mceusb_dev_recv, ir);
+
ir->urb_in->transfer_dma = ir->dma_in;
ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;

diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 566054c0eb9a..17c22702782c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1384,39 +1384,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_close;
}

- /* If the mode uses primary, then the following is handled by
- * bond_change_active_slave().
- */
- if (!bond_uses_primary(bond)) {
- /* set promiscuity level to new slave */
- if (bond_dev->flags & IFF_PROMISC) {
- res = dev_set_promiscuity(slave_dev, 1);
- if (res)
- goto err_close;
- }
-
- /* set allmulti level to new slave */
- if (bond_dev->flags & IFF_ALLMULTI) {
- res = dev_set_allmulti(slave_dev, 1);
- if (res)
- goto err_close;
- }
-
- netif_addr_lock_bh(bond_dev);
-
- dev_mc_sync_multiple(slave_dev, bond_dev);
- dev_uc_sync_multiple(slave_dev, bond_dev);
-
- netif_addr_unlock_bh(bond_dev);
- }
-
- if (BOND_MODE(bond) == BOND_MODE_8023AD) {
- /* add lacpdu mc addr to mc list */
- u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
-
- dev_mc_add(slave_dev, lacpdu_multicast);
- }
-
res = vlan_vids_add_by_dev(slave_dev, bond_dev);
if (res) {
netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
@@ -1567,6 +1534,40 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
goto err_upper_unlink;
}

+ /* If the mode uses primary, then the following is handled by
+ * bond_change_active_slave().
+ */
+ if (!bond_uses_primary(bond)) {
+ /* set promiscuity level to new slave */
+ if (bond_dev->flags & IFF_PROMISC) {
+ res = dev_set_promiscuity(slave_dev, 1);
+ if (res)
+ goto err_sysfs_del;
+ }
+
+ /* set allmulti level to new slave */
+ if (bond_dev->flags & IFF_ALLMULTI) {
+ res = dev_set_allmulti(slave_dev, 1);
+ if (res) {
+ if (bond_dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(slave_dev, -1);
+ goto err_sysfs_del;
+ }
+ }
+
+ netif_addr_lock_bh(bond_dev);
+ dev_mc_sync_multiple(slave_dev, bond_dev);
+ dev_uc_sync_multiple(slave_dev, bond_dev);
+ netif_addr_unlock_bh(bond_dev);
+
+ if (BOND_MODE(bond) == BOND_MODE_8023AD) {
+ /* add lacpdu mc addr to mc list */
+ u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
+
+ dev_mc_add(slave_dev, lacpdu_multicast);
+ }
+ }
+
bond->slave_cnt++;
bond_compute_features(bond);
bond_set_carrier(bond);
@@ -1589,6 +1590,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
return 0;

/* Undo stages on error */
+err_sysfs_del:
+ bond_sysfs_slave_del(new_slave);
+
err_upper_unlink:
bond_upper_dev_unlink(bond_dev, slave_dev);

@@ -1596,9 +1600,6 @@ err_unregister:
netdev_rx_handler_unregister(slave_dev);

err_detach:
- if (!bond_uses_primary(bond))
- bond_hw_addr_flush(bond_dev, slave_dev);
-
vlan_vids_del_by_dev(slave_dev, bond_dev);
if (rcu_access_pointer(bond->primary_slave) == new_slave)
RCU_INIT_POINTER(bond->primary_slave, NULL);
@@ -2425,11 +2426,13 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
bond_for_each_slave_rcu(bond, slave, iter) {
unsigned long trans_start = dev_trans_start(slave->dev);

+ slave->new_link = BOND_LINK_NOCHANGE;
+
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, trans_start, 1) &&
bond_time_in_interval(bond, slave->last_rx, 1)) {

- slave->link = BOND_LINK_UP;
+ slave->new_link = BOND_LINK_UP;
slave_state_changed = 1;

/* primary_slave has no meaning in round-robin
@@ -2456,7 +2459,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (!bond_time_in_interval(bond, trans_start, 2) ||
!bond_time_in_interval(bond, slave->last_rx, 2)) {

- slave->link = BOND_LINK_DOWN;
+ slave->new_link = BOND_LINK_DOWN;
slave_state_changed = 1;

if (slave->link_failure_count < UINT_MAX)
@@ -2487,6 +2490,11 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (!rtnl_trylock())
goto re_arm;

+ bond_for_each_slave(bond, slave, iter) {
+ if (slave->new_link != BOND_LINK_NOCHANGE)
+ slave->link = slave->new_link;
+ }
+
if (slave_state_changed) {
bond_slave_state_change(bond);
if (BOND_MODE(bond) == BOND_MODE_XOR)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ed4f2705a65c..f41f53fe1340 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3876,15 +3876,26 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* when transmitting in a vf, start bd must hold the ethertype
* for fw to enforce it
*/
+ u16 vlan_tci = 0;
#ifndef BNX2X_STOP_ON_ERROR
- if (IS_VF(bp))
+ if (IS_VF(bp)) {
#endif
- tx_start_bd->vlan_or_ethertype =
- cpu_to_le16(ntohs(eth->h_proto));
+ /* Still need to consider inband vlan for enforced */
+ if (__vlan_get_tag(skb, &vlan_tci)) {
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(ntohs(eth->h_proto));
+ } else {
+ tx_start_bd->bd_flags.as_bitfield |=
+ (X_ETH_INBAND_VLAN <<
+ ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+ tx_start_bd->vlan_or_ethertype =
+ cpu_to_le16(vlan_tci);
+ }
#ifndef BNX2X_STOP_ON_ERROR
- else
+ } else {
/* used by FW for packet accounting */
tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+ }
#endif
}

diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 354ae9792bad..27651d26fb8a 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -2856,7 +2856,7 @@ bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
static void
bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
{
- memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+ strncpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
}

static void
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 964c6bf37710..4b9f5074ec32 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -370,7 +370,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
{
const struct of_device_id *id =
of_match_device(fsl_pq_mdio_match, &pdev->dev);
- const struct fsl_pq_mdio_data *data = id->data;
+ const struct fsl_pq_mdio_data *data;
struct device_node *np = pdev->dev.of_node;
struct resource res;
struct device_node *tbi;
@@ -378,6 +378,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
struct mii_bus *new_bus;
int err;

+ if (!id) {
+ dev_err(&pdev->dev, "Failed to match device\n");
+ return -ENODEV;
+ }
+
+ data = id->data;
+
dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible);

new_bus = mdiobus_alloc_size(sizeof(*priv));
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 87bd953cc2ee..41ce1aafcc1c 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -349,6 +349,7 @@ static int emac_reset(struct emac_instance *dev)
{
struct emac_regs __iomem *p = dev->emacp;
int n = 20;
+ bool __maybe_unused try_internal_clock = false;

DBG(dev, "reset" NL);

@@ -361,6 +362,7 @@ static int emac_reset(struct emac_instance *dev)
}

#ifdef CONFIG_PPC_DCR_NATIVE
+do_retry:
/*
* PPC460EX/GT Embedded Processor Advanced User's Manual
* section 28.10.1 Mode Register 0 (EMACx_MR0) states:
@@ -368,10 +370,19 @@ static int emac_reset(struct emac_instance *dev)
* of the EMAC. If none is present, select the internal clock
* (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
* After a soft reset, select the external clock.
+ *
+ * The AR8035-A PHY Meraki MR24 does not provide a TX Clk if the
+ * ethernet cable is not attached. This causes the reset to timeout
+ * and the PHY detection code in emac_init_phy() is unable to
+ * communicate and detect the AR8035-A PHY. As a result, the emac
+ * driver bails out early and the user has no ethernet.
+ * In order to stay compatible with existing configurations, the
+ * driver will temporarily switch to the internal clock, after
+ * the first reset fails.
*/
if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
- if (dev->phy_address == 0xffffffff &&
- dev->phy_map == 0xffffffff) {
+ if (try_internal_clock || (dev->phy_address == 0xffffffff &&
+ dev->phy_map == 0xffffffff)) {
/* No PHY: select internal loop clock before reset */
dcri_clrset(SDR0, SDR0_ETH_CFG,
0, SDR0_ETH_CFG_ECS << dev->cell_index);
@@ -389,8 +400,15 @@ static int emac_reset(struct emac_instance *dev)

#ifdef CONFIG_PPC_DCR_NATIVE
if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
- if (dev->phy_address == 0xffffffff &&
- dev->phy_map == 0xffffffff) {
+ if (!n && !try_internal_clock) {
+ /* first attempt has timed out. */
+ n = 20;
+ try_internal_clock = true;
+ goto do_retry;
+ }
+
+ if (try_internal_clock || (dev->phy_address == 0xffffffff &&
+ dev->phy_map == 0xffffffff)) {
/* No PHY: restore external clock source after reset */
dcri_clrset(SDR0, SDR0_ETH_CFG,
SDR0_ETH_CFG_ECS << dev->cell_index, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3df8366a8356..bec8dc10324d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1181,6 +1181,7 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)
struct e1000_hw *hw = &adapter->hw;

if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
+ struct sk_buff *skb = adapter->tx_hwtstamp_skb;
struct skb_shared_hwtstamps shhwtstamps;
u64 txstmp;

@@ -1189,9 +1190,14 @@ static void e1000e_tx_hwtstamp_work(struct work_struct *work)

e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);

- skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
- dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
+ /* Clear the global tx_hwtstamp_skb pointer and force writes
+ * prior to notifying the stack of a Tx timestamp.
+ */
adapter->tx_hwtstamp_skb = NULL;
+ wmb(); /* force write prior to skb_tstamp_tx */
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
} else if (time_after(jiffies, adapter->tx_hwtstamp_start
+ adapter->tx_timeout_factor * HZ)) {
dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
@@ -6357,12 +6363,17 @@ static int e1000e_pm_thaw(struct device *dev)
static int e1000e_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
+ int rc;

e1000e_flush_lpic(pdev);

e1000e_pm_freeze(dev);

- return __e1000_shutdown(pdev, false);
+ rc = __e1000_shutdown(pdev, false);
+ if (rc)
+ e1000e_pm_thaw(dev);
+
+ return rc;
}

static int e1000e_pm_resume(struct device *dev)
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index bd3366267039..edc41554aae7 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5069,7 +5069,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&hw->restart_work, sky2_restart);

pci_set_drvdata(pdev, hw);
- pdev->d3_delay = 150;
+ pdev->d3_delay = 200;

return 0;

diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 872843179f44..c704e7bcda9d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -35,6 +35,7 @@
#include <linux/etherdevice.h>

#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
#include <linux/export.h>

#include "mlx4.h"
@@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);

+ if (!mlx4_qp_lookup(dev, rule->qpn)) {
+ mlx4_err_rule(dev, "QP doesn't exist\n", rule);
+ ret = -EINVAL;
+ goto out;
+ }
+
trans_rule_ctrl_to_hw(rule, mailbox->buf);

size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);

list_for_each_entry(cur, &rule->list, list) {
ret = parse_trans_rule(dev, cur, mailbox->buf + size);
- if (ret < 0) {
- mlx4_free_cmd_mailbox(dev, mailbox);
- return ret;
- }
+ if (ret < 0)
+ goto out;
+
size += ret;
}

@@ -1006,6 +1012,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
else if (ret)
mlx4_err_rule(dev, "Fail to register network rule\n", rule);

+out:
mlx4_free_cmd_mailbox(dev, mailbox);

return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index b295eeb2af69..87e00742d401 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -358,6 +358,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
__mlx4_qp_free_icm(dev, qpn);
}

+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
+{
+ struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
+ struct mlx4_qp *qp;
+
+ spin_lock(&qp_table->lock);
+
+ qp = __mlx4_qp_lookup(dev, qpn);
+
+ spin_unlock(&qp_table->lock);
+ return qp;
+}
+
int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
{
struct mlx4_priv *priv = mlx4_priv(dev);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index b8d5270359cd..e30676515529 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -247,7 +247,7 @@ nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu)
cmd.req.arg3 = 0;

if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE)
- netxen_issue_cmd(adapter, &cmd);
+ rcode = netxen_issue_cmd(adapter, &cmd);

if (rcode != NX_RCODE_SUCCESS)
return -EIO;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 69b46c051cc0..3295580b3aa0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
}
return -EIO;
}
- usleep_range(1000, 1500);
+ udelay(1200);
}

if (id_reg)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
index be258d90de9e..e3223f2fe2ff 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_dbg.c
@@ -765,7 +765,7 @@ int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
sizeof(struct mpi_coredump_global_header);
mpi_coredump->mpi_global_header.imageSize =
sizeof(struct ql_mpi_coredump);
- memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
sizeof(mpi_coredump->mpi_global_header.idString));

/* Get generic NIC reg dump */
@@ -1255,7 +1255,7 @@ static void ql_gen_reg_dump(struct ql_adapter *qdev,
sizeof(struct mpi_coredump_global_header);
mpi_coredump->mpi_global_header.imageSize =
sizeof(struct ql_reg_dump);
- memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ strncpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
sizeof(mpi_coredump->mpi_global_header.idString));


diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index f55be6ddf3a7..487d6ab771e9 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -297,8 +297,9 @@ qcaspi_receive(struct qcaspi *qca)

/* Allocate rx SKB if we don't have one available. */
if (!qca->rx_skb) {
- qca->rx_skb = netdev_alloc_skb(net_dev,
- net_dev->mtu + VLAN_ETH_HLEN);
+ qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
+ net_dev->mtu +
+ VLAN_ETH_HLEN);
if (!qca->rx_skb) {
netdev_dbg(net_dev, "out of RX resources\n");
qca->stats.out_of_mem++;
@@ -378,7 +379,7 @@ qcaspi_receive(struct qcaspi *qca)
qca->rx_skb, qca->rx_skb->dev);
qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_rx_ni(qca->rx_skb);
- qca->rx_skb = netdev_alloc_skb(net_dev,
+ qca->rx_skb = netdev_alloc_skb_ip_align(net_dev,
net_dev->mtu + VLAN_ETH_HLEN);
if (!qca->rx_skb) {
netdev_dbg(net_dev, "out of RX resources\n");
@@ -760,7 +761,8 @@ qcaspi_netdev_init(struct net_device *dev)
if (!qca->rx_buffer)
return -ENOBUFS;

- qca->rx_skb = netdev_alloc_skb(dev, qca->net_dev->mtu + VLAN_ETH_HLEN);
+ qca->rx_skb = netdev_alloc_skb_ip_align(dev, qca->net_dev->mtu +
+ VLAN_ETH_HLEN);
if (!qca->rx_skb) {
kfree(qca->rx_buffer);
netdev_info(qca->net_dev, "Failed to allocate RX sk_buff.\n");
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 6e180db19c18..abaf73c47d78 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -8278,12 +8278,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)

tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;

+ pci_set_drvdata(pdev, dev);
+
rc = register_netdev(dev);
if (rc < 0)
goto err_out_msi_4;

- pci_set_drvdata(pdev, dev);
-
netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
(u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index b89d7c16991d..8f3f692942d8 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2933,7 +2933,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
/* MDIO bus init */
ret = sh_mdio_init(mdp, pd);
if (ret) {
- dev_err(&ndev->dev, "failed to initialise MDIO\n");
+ dev_err(&pdev->dev, "failed to initialise MDIO\n");
goto out_release;
}

diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 48645504106e..7503451a94f2 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -293,6 +293,10 @@ struct cpsw_ss_regs {
/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
#define CPSW_V1_SEQ_ID_OFS_SHIFT 16

+#define CPSW_MAX_BLKS_TX 15
+#define CPSW_MAX_BLKS_TX_SHIFT 4
+#define CPSW_MAX_BLKS_RX 5
+
struct cpsw_host_regs {
u32 max_blks;
u32 blk_cnt;
@@ -1120,11 +1124,23 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
switch (priv->version) {
case CPSW_VERSION_1:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
break;
case CPSW_VERSION_2:
case CPSW_VERSION_3:
case CPSW_VERSION_4:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
break;
}

diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index c67a27245072..69421ac06788 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case HDLCDRVCTL_CALIBRATE:
if(!capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (s->par.bitrate <= 0)
+ return -EINVAL;
if (bi.data.calibrate > INT_MAX / s->par.bitrate)
return -EINVAL;
s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7ae062743bad..59a6973fe760 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -123,6 +123,12 @@ static inline int phy_aneg_done(struct phy_device *phydev)
if (phydev->drv->aneg_done)
return phydev->drv->aneg_done(phydev);

+ /* Avoid genphy_aneg_done() if the Clause 45 PHY does not
+ * implement Clause 22 registers
+ */
+ if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0)))
+ return -EINVAL;
+
return genphy_aneg_done(phydev);
}

diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 0710214df2bf..ade047f46430 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -488,7 +488,6 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
po->chan.mtu = dst_mtu(&rt->dst);
if (!po->chan.mtu)
po->chan.mtu = PPP_MRU;
- ip_rt_put(rt);
po->chan.mtu -= PPTP_HEADER_OVERHEAD;

po->chan.hdrlen = 2 + sizeof(struct pptp_gre_header);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ce2a29971230..1da54735681b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -551,7 +551,12 @@ static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp)
hdr = skb_vnet_hdr(skb);
sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr);
- skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
+
+ err = skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
+ if (unlikely(err < 0)) {
+ dev_kfree_skb(skb);
+ return err;
+ }

err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
if (err < 0)
@@ -854,7 +859,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
struct skb_vnet_hdr *hdr;
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
struct virtnet_info *vi = sq->vq->vdev->priv;
- unsigned num_sg;
+ int num_sg;
unsigned hdr_len;
bool can_push;

@@ -906,11 +911,16 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
if (can_push) {
__skb_push(skb, hdr_len);
num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
+ if (unlikely(num_sg < 0))
+ return num_sg;
/* Pull header back to avoid skew in tx bytes calculations. */
__skb_pull(skb, hdr_len);
} else {
sg_set_buf(sq->sg, hdr, hdr_len);
- num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
+ num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
+ if (unlikely(num_sg < 0))
+ return num_sg;
+ num_sg++;
}
return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index a26ae97f755d..56a2bc86c439 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2648,6 +2648,11 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter)
/* we need to enable NAPI, otherwise dev_close will deadlock */
for (i = 0; i < adapter->num_rx_queues; i++)
napi_enable(&adapter->rx_queue[i].napi);
+ /*
+ * Need to clear the quiesce bit to ensure that vmxnet3_close
+ * can quiesce the device properly
+ */
+ clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
dev_close(adapter->netdev);
}

diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 2dc89582575a..3db77c274098 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -985,7 +985,7 @@ static bool vxlan_snoop(struct net_device *dev,
return false;

/* Don't migrate static entries, drop packets */
- if (f->state & NUD_NOARP)
+ if (f->state & (NUD_PERMANENT | NUD_NOARP))
return true;

if (net_ratelimit())
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index c70782e8f07b..b5802e37ab24 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -939,7 +939,10 @@ static int open_file_eeprom(struct inode *inode, struct file *file)
}

for (i = 0; i < eesize; ++i) {
- AR5K_EEPROM_READ(i, val);
+ if (!ath5k_hw_nvram_read(ah, i, &val)) {
+ ret = -EIO;
+ goto freebuf;
+ }
buf[i] = val;
}

diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 8330fa33e50b..52a91137f49c 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -247,7 +247,10 @@ static const UCHAR b4_default_startup_parms[] = {
0x04, 0x08, /* Noise gain, limit offset */
0x28, 0x28, /* det rssi, med busy offsets */
7, /* det sync thresh */
- 0, 2, 2 /* test mode, min, max */
+ 0, 2, 2, /* test mode, min, max */
+ 0, /* rx/tx delay */
+ 0, 0, 0, 0, 0, 0, /* current BSS id */
+ 0 /* hop set */
};

/*===========================================================================*/
@@ -598,7 +601,7 @@ static void init_startup_params(ray_dev_t *local)
* a_beacon_period = hops a_beacon_period = KuS
*//* 64ms = 010000 */
if (local->fw_ver == 0x55) {
- memcpy((UCHAR *) &local->sparm.b4, b4_default_startup_parms,
+ memcpy(&local->sparm.b4, b4_default_startup_parms,
sizeof(struct b4_startup_params));
/* Translate sane kus input values to old build 4/5 format */
/* i = hop time in uS truncated to 3 bytes */
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 15527d8aa015..8720b8ae991c 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1200,8 +1200,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);

enable = bss_conf->arp_addr_cnt == 1 && bss_conf->assoc;
- wl1251_acx_arp_ip_filter(wl, enable, addr);
-
+ ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
if (ret < 0)
goto out_sleep;
}
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 84419af16f77..fd12ccc11e26 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -538,6 +538,7 @@ struct powercap_zone *powercap_register_zone(

power_zone->id = result;
idr_init(&power_zone->idr);
+ result = -ENOMEM;
power_zone->name = kstrdup(name, GFP_KERNEL);
if (!power_zone->name)
goto err_name_alloc;
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 5b2717f5dafa..d4153892c120 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -249,6 +249,13 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
missing = year;
}

+ /* Can't proceed if alarm is still invalid after replacing
+ * missing fields.
+ */
+ err = rtc_valid_tm(&alarm->time);
+ if (err)
+ goto done;
+
/* with luck, no rollover is needed */
rtc_tm_to_time(&now, &t_now);
rtc_tm_to_time(&alarm->time, &t_alm);
@@ -300,9 +307,9 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
dev_warn(&rtc->dev, "alarm rollover not handled\n");
}

-done:
err = rtc_valid_tm(&alarm->time);

+done:
if (err) {
dev_warn(&rtc->dev, "invalid alarm value: %d-%d-%d %d:%d:%d\n",
alarm->time.tm_year + 1900, alarm->time.tm_mon + 1,
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 1346e052e03c..8009158a6639 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -191,6 +191,7 @@ struct bnx2fc_hba {
struct bnx2fc_cmd_mgr *cmd_mgr;
spinlock_t hba_lock;
struct mutex hba_mutex;
+ struct mutex hba_stats_mutex;
unsigned long adapter_state;
#define ADAPTER_STATE_UP 0
#define ADAPTER_STATE_GOING_DOWN 1
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 72533c58c1f3..2577f8e86be3 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -641,15 +641,17 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
if (!fw_stats)
return NULL;

+ mutex_lock(&hba->hba_stats_mutex);
+
bnx2fc_stats = fc_get_host_stats(shost);

init_completion(&hba->stat_req_done);
if (bnx2fc_send_stat_req(hba))
- return bnx2fc_stats;
+ goto unlock_stats_mutex;
rc = wait_for_completion_timeout(&hba->stat_req_done, (2 * HZ));
if (!rc) {
BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
- return bnx2fc_stats;
+ goto unlock_stats_mutex;
}
BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
@@ -671,6 +673,9 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)

memcpy(&hba->prev_stats, hba->stats_buffer,
sizeof(struct fcoe_statistics_params));
+
+unlock_stats_mutex:
+ mutex_unlock(&hba->hba_stats_mutex);
return bnx2fc_stats;
}

@@ -1303,6 +1308,7 @@ static struct bnx2fc_hba *bnx2fc_hba_create(struct cnic_dev *cnic)
}
spin_lock_init(&hba->hba_lock);
mutex_init(&hba->hba_mutex);
+ mutex_init(&hba->hba_stats_mutex);

hba->cnic = cnic;

diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 0f100b35f9ab..9ca83ebef776 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -1695,6 +1695,15 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
*/
switch (session->state) {
case ISCSI_STATE_FAILED:
+ /*
+ * cmds should fail during shutdown, if the session
+ * state is bad, allowing completion to happen
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ reason = FAILURE_SESSION_FAILED;
+ sc->result = DID_NO_CONNECT << 16;
+ break;
+ }
case ISCSI_STATE_IN_RECOVERY:
reason = FAILURE_SESSION_IN_RECOVERY;
sc->result = DID_IMM_RETRY << 16;
@@ -1998,6 +2007,19 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
}

if (session->state != ISCSI_STATE_LOGGED_IN) {
+ /*
+ * During shutdown, if session is prematurely disconnected,
+ * recovery won't happen and there will be hung cmds. Not
+ * handling cmds would trigger EH, also bad in this case.
+ * Instead, handle cmd, allow completion to happen and let
+ * upper layer to deal with the result.
+ */
+ if (unlikely(system_state != SYSTEM_RUNNING)) {
+ sc->result = DID_NO_CONNECT << 16;
+ ISCSI_DBG_EH(session, "sc on shutdown, handled\n");
+ rc = BLK_EH_HANDLED;
+ goto done;
+ }
/*
* We are probably in the middle of iscsi recovery so let
* that complete and handle the error.
@@ -2102,7 +2124,7 @@ done:
task->last_timeout = jiffies;
spin_unlock(&session->frwd_lock);
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
- "timer reset" : "nh");
+ "timer reset" : "shutdown or nh");
return rc;
}

diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 0cac7d8fd0f7..e2630aea4e9f 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -282,6 +282,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
phy->phy->minimum_linkrate = dr->pmin_linkrate;
phy->phy->maximum_linkrate = dr->pmax_linkrate;
phy->phy->negotiated_linkrate = phy->linkrate;
+ phy->phy->enabled = (phy->linkrate != SAS_PHY_DISABLED);

skip:
if (new_phy)
@@ -675,7 +676,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
res = smp_execute_task(dev, req, RPEL_REQ_SIZE,
resp, RPEL_RESP_SIZE);

- if (!res)
+ if (res)
goto out;

phy->invalid_dword_count = scsi_to_u32(&resp[12]);
@@ -684,6 +685,7 @@ int sas_smp_get_phy_events(struct sas_phy *phy)
phy->phy_reset_problem_count = scsi_to_u32(&resp[24]);

out:
+ kfree(req);
kfree(resp);
return res;

diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 013a6240f193..c1ad0aea23b9 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -169,7 +169,7 @@ int prism2mgmt_scan(wlandevice_t *wlandev, void *msgp)
hw->ident_sta_fw.variant) >
HFA384x_FIRMWARE_VERSION(1, 5, 0)) {
if (msg->scantype.data != P80211ENUM_scantype_active)
- word = cpu_to_le16(msg->maxchanneltime.data);
+ word = msg->maxchanneltime.data;
else
word = 0;

diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index db37ee49c3bf..450a8ca16351 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1467,6 +1467,10 @@ static void gsm_dlci_open(struct gsm_dlci *dlci)
* in which case an opening port goes back to closed and a closing port
* is simply put into closed state (any further frames from the other
* end will get a DM response)
+ *
+ * Some control dlci can stay in ADM mode with other dlci working just
+ * fine. In that case we can just keep the control dlci open after the
+ * DLCI_OPENING retries time out.
*/

static void gsm_dlci_t1(unsigned long data)
@@ -1480,8 +1484,15 @@ static void gsm_dlci_t1(unsigned long data)
if (dlci->retries) {
gsm_command(dlci->gsm, dlci->addr, SABM|PF);
mod_timer(&dlci->t1, jiffies + gsm->t1 * HZ / 100);
- } else
+ } else if (!dlci->addr && gsm->control == (DM | PF)) {
+ if (debug & 8)
+ pr_info("DLCI %d opening in ADM mode.\n",
+ dlci->addr);
+ gsm_dlci_open(dlci);
+ } else {
gsm_dlci_close(dlci);
+ }
+
break;
case DLCI_CLOSING:
dlci->retries--;
@@ -1499,8 +1510,8 @@ static void gsm_dlci_t1(unsigned long data)
* @dlci: DLCI to open
*
* Commence opening a DLCI from the Linux side. We issue SABM messages
- * to the modem which should then reply with a UA, at which point we
- * will move into open state. Opening is done asynchronously with retry
+ * to the modem which should then reply with a UA or ADM, at which point
+ * we will move into open state. Opening is done asynchronously with retry
* running off timers and the responses.
*/

diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index 75850f70b479..72f4146e8eed 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -884,14 +884,19 @@ static int sccnxp_probe(struct platform_device *pdev)

clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
- if (PTR_ERR(clk) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
+ ret = PTR_ERR(clk);
+ if (ret == -EPROBE_DEFER)
goto err_out;
- }
+ uartclk = 0;
+ } else {
+ clk_prepare_enable(clk);
+ uartclk = clk_get_rate(clk);
+ }
+
+ if (!uartclk) {
dev_notice(&pdev->dev, "Using default clock frequency\n");
uartclk = s->chip->freq_std;
- } else
- uartclk = clk_get_rate(clk);
+ }

/* Check input frequency */
if ((uartclk < s->chip->freq_min) || (uartclk > s->chip->freq_max)) {
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 78f4d70db917..4a8934abdac8 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -553,7 +553,7 @@ static inline void ci_role_destroy(struct ci_hdrc *ci)
{
ci_hdrc_gadget_destroy(ci);
ci_hdrc_host_destroy(ci);
- if (ci->is_otg)
+ if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
ci_hdrc_otg_destroy(ci);
}

@@ -653,20 +653,28 @@ static int ci_hdrc_probe(struct platform_device *pdev)
/* initialize role(s) before the interrupt is requested */
if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
ret = ci_hdrc_host_init(ci);
- if (ret)
- dev_info(dev, "doesn't support host\n");
+ if (ret) {
+ if (ret == -ENXIO)
+ dev_info(dev, "doesn't support host\n");
+ else
+ goto deinit_phy;
+ }
}

if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) {
ret = ci_hdrc_gadget_init(ci);
- if (ret)
- dev_info(dev, "doesn't support gadget\n");
+ if (ret) {
+ if (ret == -ENXIO)
+ dev_info(dev, "doesn't support gadget\n");
+ else
+ goto deinit_host;
+ }
}

if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) {
dev_err(dev, "no supported roles\n");
ret = -ENODEV;
- goto deinit_phy;
+ goto deinit_gadget;
}

if (ci->is_otg && ci->roles[CI_ROLE_GADGET]) {
@@ -676,7 +684,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ret = ci_hdrc_otg_init(ci);
if (ret) {
dev_err(dev, "init otg fails, ret = %d\n", ret);
- goto stop;
+ goto deinit_gadget;
}
}

@@ -727,7 +735,12 @@ static int ci_hdrc_probe(struct platform_device *pdev)

free_irq(ci->irq, ci);
stop:
- ci_role_destroy(ci);
+ if (ci->is_otg && ci->roles[CI_ROLE_GADGET])
+ ci_hdrc_otg_destroy(ci);
+deinit_gadget:
+ ci_hdrc_gadget_destroy(ci);
+deinit_host:
+ ci_hdrc_host_destroy(ci);
deinit_phy:
usb_phy_shutdown(ci->transceiver);

diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 7ec8495f4523..64539d84302b 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -117,6 +117,10 @@ static int kdwc3_probe(struct platform_device *pdev)
dev->dma_mask = &kdwc3_dma_mask;

kdwc->clk = devm_clk_get(kdwc->dev, "usb");
+ if (IS_ERR(kdwc->clk)) {
+ dev_err(kdwc->dev, "unable to get usb clock\n");
+ return PTR_ERR(kdwc->clk);
+ }

error = clk_prepare_enable(kdwc->clk);
if (error < 0) {
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index bd9654e9f0a7..54941bd3fe09 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -252,7 +252,6 @@ MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
static struct platform_driver usb_xhci_driver = {
.probe = xhci_plat_probe,
.remove = xhci_plat_remove,
- .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xhci-hcd",
.pm = DEV_PM_OPS,
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 26c26e3e21d3..e82e179f3558 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -1950,6 +1950,8 @@ static int ene_load_bincode(struct us_data *us, unsigned char flag)
bcb->CDB[0] = 0xEF;

result = ene_send_scsi_cmd(us, FDIR_WRITE, buf, 0);
+ if (us->srb != NULL)
+ scsi_set_resid(us->srb, 0);
info->BIN_FLAG = flag;
kfree(buf);

@@ -2303,21 +2305,22 @@ static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)

static int ene_transport(struct scsi_cmnd *srb, struct us_data *us)
{
- int result = 0;
+ int result = USB_STOR_XFER_GOOD;
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);

/*US_DEBUG(usb_stor_show_command(us, srb)); */
scsi_set_resid(srb, 0);
- if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready))) {
+ if (unlikely(!(info->SD_Status.Ready || info->MS_Status.Ready)))
result = ene_init(us);
- } else {
+ if (result == USB_STOR_XFER_GOOD) {
+ result = USB_STOR_TRANSPORT_ERROR;
if (info->SD_Status.Ready)
result = sd_scsi_irp(us, srb);

if (info->MS_Status.Ready)
result = ms_scsi_irp(us, srb);
}
- return 0;
+ return result;
}


diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 1fe2c8115be0..1d7b0a950860 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -96,8 +96,7 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
if (mask & POLLERR) {
- if (poll->wqh)
- remove_wait_queue(poll->wqh, &poll->wait);
+ vhost_poll_stop(poll);
ret = -EINVAL;
}

diff --git a/drivers/video/fbdev/vfb.c b/drivers/video/fbdev/vfb.c
index 70a897b1e458..146cc3516f61 100644
--- a/drivers/video/fbdev/vfb.c
+++ b/drivers/video/fbdev/vfb.c
@@ -284,8 +284,23 @@ static int vfb_check_var(struct fb_var_screeninfo *var,
*/
static int vfb_set_par(struct fb_info *info)
{
+ switch (info->var.bits_per_pixel) {
+ case 1:
+ info->fix.visual = FB_VISUAL_MONO01;
+ break;
+ case 8:
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ break;
+ case 16:
+ case 24:
+ case 32:
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ break;
+ }
+
info->fix.line_length = get_line_length(info->var.xres_virtual,
info->var.bits_per_pixel);
+
return 0;
}

@@ -526,6 +541,8 @@ static int vfb_probe(struct platform_device *dev)
goto err2;
platform_set_drvdata(dev, info);

+ vfb_set_par(info);
+
fb_info(info, "Virtual frame buffer device, using %ldK of video memory\n",
videomemorysize >> 10);
return 0;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index b1131c319897..a12d733a4bfc 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2438,7 +2438,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
if (!uptodate) {
ClearPageUptodate(page);
SetPageError(page);
- ret = ret < 0 ? ret : -EIO;
+ ret = err < 0 ? err : -EIO;
mapping_set_error(page->mapping, ret);
}
return 0;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 855ad5e6bd22..488dd62fc242 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -583,7 +583,7 @@ cifs_relock_file(struct cifsFileInfo *cfile)
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
int rc = 0;

- down_read(&cinode->lock_sem);
+ down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
if (cinode->can_cache_brlcks) {
/* can cache locks - no need to relock */
up_read(&cinode->lock_sem);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index c147cc58056f..f0488ba39ac6 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -921,15 +921,19 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
goto tcon_exit;
}

- if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
+ switch (rsp->ShareType) {
+ case SMB2_SHARE_TYPE_DISK:
cifs_dbg(FYI, "connection to disk share\n");
- else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
+ break;
+ case SMB2_SHARE_TYPE_PIPE:
tcon->ipc = true;
cifs_dbg(FYI, "connection to pipe share\n");
- } else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
- tcon->print = true;
+ break;
+ case SMB2_SHARE_TYPE_PRINT:
+ tcon->ipc = true;
cifs_dbg(FYI, "connection to printer\n");
- } else {
+ break;
+ default:
cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
rc = -EOPNOTSUPP;
goto tcon_error_exit;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 64623537f3b0..aa7cc4a51a84 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -300,7 +300,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode,
int i, num;
unsigned long nr_pages;

- num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+ num = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
(pgoff_t)num);
if (nr_pages == 0)
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index 61024987f97b..12d339ee6e06 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -129,6 +129,8 @@ lockd(void *vrqstp)
{
int err = 0;
struct svc_rqst *rqstp = vrqstp;
+ struct net *net = &init_net;
+ struct lockd_net *ln = net_generic(net, lockd_net_id);

/* try_to_freeze() is called from svc_recv() */
set_freezable();
@@ -173,6 +175,8 @@ lockd(void *vrqstp)
if (nlmsvc_ops)
nlmsvc_invalidate_all();
nlm_shutdown_hosts();
+ cancel_delayed_work_sync(&ln->grace_period_end);
+ locks_end_grace(&ln->lockd_manager);
return 0;
}

@@ -267,8 +271,6 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
if (ln->nlmsvc_users) {
if (--ln->nlmsvc_users == 0) {
nlm_shutdown_hosts_net(net);
- cancel_delayed_work_sync(&ln->grace_period_end);
- locks_end_grace(&ln->lockd_manager);
svc_shutdown_net(serv, net);
dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index f7957690ff20..321044c183f5 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -7429,6 +7429,12 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
/* fall through */
case -NFS4ERR_RETRY_UNCACHED_REP:
return -EAGAIN;
+ case -NFS4ERR_BADSESSION:
+ case -NFS4ERR_DEADSESSION:
+ case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ nfs4_schedule_session_recovery(clp->cl_session,
+ task->tk_status);
+ break;
default:
nfs4_schedule_lease_recovery(clp);
}
@@ -7507,7 +7513,6 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp,
if (status == 0)
status = task->tk_status;
rpc_put_task(task);
- return 0;
out:
dprintk("<-- %s status=%d\n", __func__, status);
return status;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index f471662c0a1f..972992b36660 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1563,13 +1563,14 @@ static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp)
nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_reboot);
}

-static void nfs4_reclaim_complete(struct nfs_client *clp,
+static int nfs4_reclaim_complete(struct nfs_client *clp,
const struct nfs4_state_recovery_ops *ops,
struct rpc_cred *cred)
{
/* Notify the server we're done reclaiming our state */
if (ops->reclaim_complete)
- (void)ops->reclaim_complete(clp, cred);
+ return ops->reclaim_complete(clp, cred);
+ return 0;
}

static void nfs4_clear_reclaim_server(struct nfs_server *server)
@@ -1616,13 +1617,16 @@ static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp)
{
const struct nfs4_state_recovery_ops *ops;
struct rpc_cred *cred;
+ int err;

if (!nfs4_state_clear_reclaim_reboot(clp))
return;
ops = clp->cl_mvops->reboot_recovery_ops;
cred = nfs4_get_clid_cred(clp);
- nfs4_reclaim_complete(clp, ops, cred);
+ err = nfs4_reclaim_complete(clp, ops, cred);
put_rpccred(cred);
+ if (err == -NFS4ERR_CONN_NOT_BOUND_TO_SESSION)
+ set_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state);
}

static void nfs_delegation_clear_all(struct nfs_client *clp)
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 6d52041d13cf..df5c2853c4e5 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -258,6 +258,16 @@ ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
return vfs_getxattr(realpath.dentry, name, value, size);
}

+static bool ovl_can_list(const char *s)
+{
+ /* List all non-trusted xatts */
+ if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0)
+ return true;
+
+ /* Never list trusted.overlay, list other trusted for superuser only */
+ return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
+}
+
ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
{
struct path realpath;
@@ -282,7 +292,7 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
return -EIO;

len -= slen;
- if (ovl_is_private_xattr(s)) {
+ if (!ovl_can_list(s)) {
res -= slen;
memmove(s, s + slen, len);
} else {
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 5f4e36cf0091..30a73892a8d6 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -437,6 +437,7 @@ struct mlx4_update_qp_params {
u32 flags;
};

+struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn);
int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn,
enum mlx4_update_qp_attr attr,
struct mlx4_update_qp_params *params);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index e5ba0236047e..008a270faf26 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -845,10 +845,10 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
unsigned int headroom);
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
int newtailroom, gfp_t priority);
-int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
- int offset, int len);
-int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
- int len);
+int __must_check skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
+ int offset, int len);
+int __must_check skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg,
+ int offset, int len);
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
int skb_pad(struct sk_buff *skb, int pad);
#define dev_kfree_skb(a) consume_skb(a)
diff --git a/include/net/x25.h b/include/net/x25.h
index c383aa4edbf0..6d30a01d281d 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -298,10 +298,10 @@ void x25_check_rbuf(struct sock *);

/* sysctl_net_x25.c */
#ifdef CONFIG_SYSCTL
-void x25_register_sysctl(void);
+int x25_register_sysctl(void);
void x25_unregister_sysctl(void);
#else
-static inline void x25_register_sysctl(void) {};
+static inline int x25_register_sysctl(void) { return 0; };
static inline void x25_unregister_sysctl(void) {};
#endif /* CONFIG_SYSCTL */

diff --git a/kernel/events/core.c b/kernel/events/core.c
index de3303aab7d6..9eb7710914fb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4861,9 +4861,6 @@ static void perf_output_read_one(struct perf_output_handle *handle,
__output_copy(handle, values, n * sizeof(u64));
}

-/*
- * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
- */
static void perf_output_read_group(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
@@ -4908,6 +4905,13 @@ static void perf_output_read_group(struct perf_output_handle *handle,
#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
PERF_FORMAT_TOTAL_TIME_RUNNING)

+/*
+ * XXX PERF_SAMPLE_READ vs inherited events seems difficult.
+ *
+ * The problem is that its both hard and excessively expensive to iterate the
+ * child list, not to mention that its impossible to IPI the children running
+ * on another CPU, from interrupt/NMI context.
+ */
static void perf_output_read(struct perf_output_handle *handle,
struct perf_event *event)
{
@@ -7194,9 +7198,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
local64_set(&hwc->period_left, hwc->sample_period);

/*
- * we currently do not support PERF_FORMAT_GROUP on inherited events
+ * We currently do not support PERF_SAMPLE_READ on inherited events.
+ * See perf_output_read().
*/
- if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
+ if (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))
goto err_ns;

pmu = perf_init_event(event);
diff --git a/kernel/futex.c b/kernel/futex.c
index 168f369c5eea..ed99fda98cb1 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -400,6 +400,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
struct page *page, *page_head;
+ struct address_space *mapping;
int err, ro = 0;

/*
@@ -478,7 +479,19 @@ again:
}
#endif

- lock_page(page_head);
+ /*
+ * The treatment of mapping from this point on is critical. The page
+ * lock protects many things but in this context the page lock
+ * stabilizes mapping, prevents inode freeing in the shared
+ * file-backed region case and guards against movement to swap cache.
+ *
+ * Strictly speaking the page lock is not needed in all cases being
+ * considered here and page lock forces unnecessarily serialization
+ * From this point on, mapping will be re-verified if necessary and
+ * page lock will be acquired only if it is unavoidable
+ */
+
+ mapping = READ_ONCE(page_head->mapping);

/*
* If page_head->mapping is NULL, then it cannot be a PageAnon
@@ -495,18 +508,31 @@ again:
* shmem_writepage move it from filecache to swapcache beneath us:
* an unlikely race, but we do need to retry for page_head->mapping.
*/
- if (!page_head->mapping) {
- int shmem_swizzled = PageSwapCache(page_head);
+ if (unlikely(!mapping)) {
+ int shmem_swizzled;
+
+ /*
+ * Page lock is required to identify which special case above
+ * applies. If this is really a shmem page then the page lock
+ * will prevent unexpected transitions.
+ */
+ lock_page(page);
+ shmem_swizzled = PageSwapCache(page) || page->mapping;
unlock_page(page_head);
put_page(page_head);
+
if (shmem_swizzled)
goto again;
+
return -EFAULT;
}

/*
* Private mappings are handled in a simple way.
*
+ * If the futex key is stored on an anonymous page, then the associated
+ * object is the mm which is implicitly pinned by the calling process.
+ *
* NOTE: When userspace waits on a MAP_SHARED mapping, even if
* it's a read-only handle, it's expected that futexes attach to
* the object not the particular process.
@@ -524,16 +550,74 @@ again:
key->both.offset |= FUT_OFF_MMSHARED; /* ref taken on mm */
key->private.mm = mm;
key->private.address = address;
+
+ get_futex_key_refs(key); /* implies smp_mb(); (B) */
+
} else {
+ struct inode *inode;
+
+ /*
+ * The associated futex object in this case is the inode and
+ * the page->mapping must be traversed. Ordinarily this should
+ * be stabilised under page lock but it's not strictly
+ * necessary in this case as we just want to pin the inode, not
+ * update the radix tree or anything like that.
+ *
+ * The RCU read lock is taken as the inode is finally freed
+ * under RCU. If the mapping still matches expectations then the
+ * mapping->host can be safely accessed as being a valid inode.
+ */
+ rcu_read_lock();
+
+ if (READ_ONCE(page_head->mapping) != mapping) {
+ rcu_read_unlock();
+ put_page(page_head);
+
+ goto again;
+ }
+
+ inode = READ_ONCE(mapping->host);
+ if (!inode) {
+ rcu_read_unlock();
+ put_page(page_head);
+
+ goto again;
+ }
+
+ /*
+ * Take a reference unless it is about to be freed. Previously
+ * this reference was taken by ihold under the page lock
+ * pinning the inode in place so i_lock was unnecessary. The
+ * only way for this check to fail is if the inode was
+ * truncated in parallel so warn for now if this happens.
+ *
+ * We are not calling into get_futex_key_refs() in file-backed
+ * cases, therefore a successful atomic_inc return below will
+ * guarantee that get_futex_key() will still imply smp_mb(); (B).
+ */
+ if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
+ rcu_read_unlock();
+ put_page(page_head);
+
+ goto again;
+ }
+
+ /* Should be impossible but lets be paranoid for now */
+ if (WARN_ON_ONCE(inode->i_mapping != mapping)) {
+ err = -EFAULT;
+ rcu_read_unlock();
+ iput(inode);
+
+ goto out;
+ }
+
key->both.offset |= FUT_OFF_INODE; /* inode-based key */
- key->shared.inode = page_head->mapping->host;
+ key->shared.inode = inode;
key->shared.pgoff = basepage_index(page);
+ rcu_read_unlock();
}

- get_futex_key_refs(key); /* implies MB (B) */
-
out:
- unlock_page(page_head);
put_page(page_head);
return err;
}
diff --git a/kernel/pid.c b/kernel/pid.c
index 65a386e24811..50ad2e5b2963 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -316,8 +316,10 @@ struct pid *alloc_pid(struct pid_namespace *ns)
}

if (unlikely(is_child_reaper(pid))) {
- if (pid_ns_prepare_proc(ns))
+ if (pid_ns_prepare_proc(ns)) {
+ disable_pid_allocation(ns);
goto out_free;
+ }
}

get_pid_ns(ns);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index cb05d7f16a34..7aba8783c13d 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -1635,6 +1635,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
{
struct hci_dev *hdev = req->hdev;
u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ bool changed = false;

/* If Connectionless Slave Broadcast master role is supported
* enable all necessary events for it.
@@ -1644,6 +1645,7 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
events[1] |= 0x80; /* Synchronization Train Complete */
events[2] |= 0x10; /* Slave Page Response Timeout */
events[2] |= 0x20; /* CSB Channel Map Change */
+ changed = true;
}

/* If Connectionless Slave Broadcast slave role is supported
@@ -1654,13 +1656,24 @@ static void hci_set_event_mask_page_2(struct hci_request *req)
events[2] |= 0x02; /* CSB Receive */
events[2] |= 0x04; /* CSB Timeout */
events[2] |= 0x08; /* Truncated Page Complete */
+ changed = true;
}

/* Enable Authenticated Payload Timeout Expired event if supported */
- if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
+ if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
events[2] |= 0x80;
+ changed = true;
+ }

- hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
+ /* Some Broadcom based controllers indicate support for Set Event
+ * Mask Page 2 command, but then actually do not support it. Since
+ * the default value is all bits set to zero, the command is only
+ * required if the event mask has to be changed. In case no change
+ * to the event mask is needed, skip this command.
+ */
+ if (changed)
+ hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2,
+ sizeof(events), events);
}

static void hci_init3_req(struct hci_request *req, unsigned long opt)
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index 8bb2e0c1cb50..c2f815d44914 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -270,6 +270,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
u32 yes;
struct crush_rule *r;

+ err = -EINVAL;
ceph_decode_32_safe(p, end, yes, bad);
if (!yes) {
dout("crush_decode NO rule %d off %x %p to %p\n",
diff --git a/net/core/dev.c b/net/core/dev.c
index 5fa8a6e1d0d4..8bd1e4973d8f 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -937,7 +937,7 @@ bool dev_valid_name(const char *name)
{
if (*name == '\0')
return false;
- if (strlen(name) >= IFNAMSIZ)
+ if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
return false;
if (!strcmp(name, ".") || !strcmp(name, ".."))
return false;
@@ -2372,7 +2372,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
return 0;

- eth = (struct ethhdr *)skb_mac_header(skb);
+ eth = (struct ethhdr *)skb->data;
type = eth->h_proto;
}

diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 11dc427320bd..73018d173223 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1147,10 +1147,6 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
lladdr = neigh->ha;
}

- if (new & NUD_CONNECTED)
- neigh->confirmed = jiffies;
- neigh->updated = jiffies;
-
/* If entry was valid and address is not changed,
do not change entry state, if new one is STALE.
*/
@@ -1174,6 +1170,16 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
}
}

+ /* Update timestamps only once we know we will make a change to the
+ * neighbour entry. Otherwise we risk to move the locktime window with
+ * noop updates and ignore relevant ARP updates.
+ */
+ if (new != old || lladdr != neigh->ha) {
+ if (new & NUD_CONNECTED)
+ neigh->confirmed = jiffies;
+ neigh->updated = jiffies;
+ }
+
if (new != old) {
neigh_del_timer(neigh);
if (new & NUD_IN_TIMER)
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 7f155175bba8..a81693f9753c 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -188,6 +188,25 @@ out_undo:
goto out;
}

+static int __net_init net_defaults_init_net(struct net *net)
+{
+ net->core.sysctl_somaxconn = SOMAXCONN;
+ return 0;
+}
+
+static struct pernet_operations net_defaults_ops = {
+ .init = net_defaults_init_net,
+};
+
+static __init int net_defaults_init(void)
+{
+ if (register_pernet_subsys(&net_defaults_ops))
+ panic("Cannot initialize net default settings");
+
+ return 0;
+}
+
+core_initcall(net_defaults_init);

#ifdef CONFIG_NET_NS
static struct kmem_cache *net_cachep;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 39e3de21ba71..1666c8eaaaf0 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3285,24 +3285,18 @@ void __init skb_init(void)
NULL);
}

-/**
- * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
- * @skb: Socket buffer containing the buffers to be mapped
- * @sg: The scatter-gather list to map into
- * @offset: The offset into the buffer's contents to start mapping
- * @len: Length of buffer space to be mapped
- *
- * Fill the specified scatter-gather list with mappings/pointers into a
- * region of the buffer space attached to a socket buffer.
- */
static int
-__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
+ unsigned int recursion_level)
{
int start = skb_headlen(skb);
int i, copy = start - offset;
struct sk_buff *frag_iter;
int elt = 0;

+ if (unlikely(recursion_level >= 24))
+ return -EMSGSIZE;
+
if (copy > 0) {
if (copy > len)
copy = len;
@@ -3321,6 +3315,8 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
if ((copy = end - offset) > 0) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ if (unlikely(elt && sg_is_last(&sg[elt - 1])))
+ return -EMSGSIZE;

if (copy > len)
copy = len;
@@ -3335,16 +3331,22 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
}

skb_walk_frags(skb, frag_iter) {
- int end;
+ int end, ret;

WARN_ON(start > offset + len);

end = start + frag_iter->len;
if ((copy = end - offset) > 0) {
+ if (unlikely(elt && sg_is_last(&sg[elt - 1])))
+ return -EMSGSIZE;
+
if (copy > len)
copy = len;
- elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
- copy);
+ ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
+ copy, recursion_level + 1);
+ if (unlikely(ret < 0))
+ return ret;
+ elt += ret;
if ((len -= copy) == 0)
return elt;
offset += copy;
@@ -3355,6 +3357,31 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
return elt;
}

+/**
+ * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
+ * @skb: Socket buffer containing the buffers to be mapped
+ * @sg: The scatter-gather list to map into
+ * @offset: The offset into the buffer's contents to start mapping
+ * @len: Length of buffer space to be mapped
+ *
+ * Fill the specified scatter-gather list with mappings/pointers into a
+ * region of the buffer space attached to a socket buffer. Returns either
+ * the number of scatterlist items used, or -EMSGSIZE if the contents
+ * could not fit.
+ */
+int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
+{
+ int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
+
+ if (nsg <= 0)
+ return nsg;
+
+ sg_mark_end(&sg[nsg - 1]);
+
+ return nsg;
+}
+EXPORT_SYMBOL_GPL(skb_to_sgvec);
+
/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
* sglist without mark the sg which contain last skb data as the end.
* So the caller can mannipulate sg list as will when padding new data after
@@ -3377,19 +3404,11 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
int offset, int len)
{
- return __skb_to_sgvec(skb, sg, offset, len);
+ return __skb_to_sgvec(skb, sg, offset, len, 0);
}
EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);

-int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
-{
- int nsg = __skb_to_sgvec(skb, sg, offset, len);

- sg_mark_end(&sg[nsg - 1]);
-
- return nsg;
-}
-EXPORT_SYMBOL_GPL(skb_to_sgvec);

/**
* skb_cow_data - Check that a socket buffer's data buffers are writable
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 4f00a8eaf704..c49a159c7b8a 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -395,8 +395,6 @@ static __net_init int sysctl_core_net_init(struct net *net)
{
struct ctl_table *tbl;

- net->core.sysctl_somaxconn = SOMAXCONN;
-
tbl = netns_core_table;
if (!net_eq(net, &init_net)) {
tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 0157f09c0de9..00de0e81eb64 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -220,7 +220,9 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);

sg_init_table(sg, nfrags + sglists);
- skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out_free;

if (x->props.flags & XFRM_STATE_ESN) {
/* Attach seqhi sg right after packet payload */
@@ -391,7 +393,9 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
skb_push(skb, ihl);

sg_init_table(sg, nfrags + sglists);
- skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out_free;

if (x->props.flags & XFRM_STATE_ESN) {
/* Attach seqhi sg right after packet payload */
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 360b565918c4..1ccd3466f89a 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -239,9 +239,11 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);

sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg,
- esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
- clen + alen);
+ err = skb_to_sgvec(skb, sg,
+ esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
+ clen + alen);
+ if (unlikely(err < 0))
+ goto error;

if ((x->props.flags & XFRM_STATE_ESN)) {
sg_init_table(asg, 3);
@@ -426,7 +428,9 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
iv = esph->enc_data;

sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
+ err = skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
+ if (unlikely(err < 0))
+ goto out;

if ((x->props.flags & XFRM_STATE_ESN)) {
sg_init_table(asg, 3);
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 7e01368f51ed..972615f86295 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -302,13 +302,14 @@ static struct net_device *__ip_tunnel_create(struct net *net,
struct net_device *dev;
char name[IFNAMSIZ];

- if (parms->name[0])
+ err = -E2BIG;
+ if (parms->name[0]) {
+ if (!dev_valid_name(parms->name))
+ goto failed;
strlcpy(name, parms->name, IFNAMSIZ);
- else {
- if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
- err = -E2BIG;
+ } else {
+ if (strlen(ops->kind) > (IFNAMSIZ - 3))
goto failed;
- }
strlcpy(name, ops->kind, IFNAMSIZ);
strncat(name, "%d", 2);
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 7a5967bc0f38..acdee14c9b03 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -863,7 +863,10 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
INIT_HLIST_NODE(&ifa->addr_lst);
ifa->scope = scope;
ifa->prefix_len = pfxlen;
- ifa->flags = flags | IFA_F_TENTATIVE;
+ ifa->flags = flags;
+ /* No need to add the TENTATIVE flag for addresses with NODAD */
+ if (!(flags & IFA_F_NODAD))
+ ifa->flags |= IFA_F_TENTATIVE;
ifa->valid_lft = valid_lft;
ifa->prefered_lft = prefered_lft;
ifa->cstamp = ifa->tstamp = jiffies;
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 6d16eb0e0c7f..a472dbd3344b 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -423,7 +423,9 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);

sg_init_table(sg, nfrags + sglists);
- skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out_free;

if (x->props.flags & XFRM_STATE_ESN) {
/* Attach seqhi sg right after packet payload */
@@ -601,7 +603,9 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
ip6h->hop_limit = 0;

sg_init_table(sg, nfrags + sglists);
- skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ err = skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+ if (unlikely(err < 0))
+ goto out_free;

if (x->props.flags & XFRM_STATE_ESN) {
/* Attach seqhi sg right after packet payload */
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 83fc3a385a26..8871a98845c7 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -231,9 +231,11 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);

sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg,
- esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
- clen + alen);
+ err = skb_to_sgvec(skb, sg,
+ esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
+ clen + alen);
+ if (unlikely(err < 0))
+ goto error;

if ((x->props.flags & XFRM_STATE_ESN)) {
sg_init_table(asg, 3);
@@ -381,7 +383,9 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
iv = esph->enc_data;

sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
+ ret = skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
+ if (unlikely(ret < 0))
+ goto out;

if ((x->props.flags & XFRM_STATE_ESN)) {
sg_init_table(asg, 3);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index a97e74023e34..b19125d5dd0e 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -320,11 +320,13 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
if (t || !create)
return t;

- if (parms->name[0])
+ if (parms->name[0]) {
+ if (!dev_valid_name(parms->name))
+ return NULL;
strlcpy(name, parms->name, IFNAMSIZ);
- else
+ } else {
strcpy(name, "ip6gre%d");
-
+ }
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ip6gre_tunnel_setup);
if (!dev)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 1b6637fb99cb..d68a0c72cc93 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1151,7 +1151,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_cork *cork;
struct sk_buff *skb, *skb_prev = NULL;
- unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
+ unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu, pmtu;
int exthdrlen;
int dst_exthdrlen;
int hh_len;
@@ -1259,6 +1259,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
else
maxnonfragsize = mtu;

+ /* as per RFC 7112 section 5, the entire IPv6 Header Chain must fit
+ * the first fragment
+ */
+ if (headersize + transhdrlen > mtu)
+ goto emsgsize;
+
/* dontfrag active */
if ((cork->length + length > mtu - headersize) && dontfrag &&
(sk->sk_protocol == IPPROTO_UDP ||
@@ -1270,9 +1276,8 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,

if (cork->length + length > maxnonfragsize - headersize) {
emsgsize:
- ipv6_local_error(sk, EMSGSIZE, fl6,
- mtu - headersize +
- sizeof(struct ipv6hdr));
+ pmtu = max_t(int, mtu - headersize + sizeof(struct ipv6hdr), 0);
+ ipv6_local_error(sk, EMSGSIZE, fl6, pmtu);
return -EMSGSIZE;
}
}
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 8afea07e53a2..622359686cc1 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -307,10 +307,13 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
char name[IFNAMSIZ];
int err;

- if (p->name[0])
+ if (p->name[0]) {
+ if (!dev_valid_name(p->name))
+ goto failed;
strlcpy(name, p->name, IFNAMSIZ);
- else
+ } else {
sprintf(name, "ip6tnl%%d");
+ }

dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ip6_tnl_dev_setup);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 91fdb612279f..4992cf38f83b 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -195,10 +195,13 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
char name[IFNAMSIZ];
int err;

- if (p->name[0])
+ if (p->name[0]) {
+ if (!dev_valid_name(p->name))
+ goto failed;
strlcpy(name, p->name, IFNAMSIZ);
- else
+ } else {
sprintf(name, "ip6_vti%%d");
+ }

dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup);
if (dev == NULL)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 45a35cfbafd1..7d192860f67e 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -244,11 +244,13 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
if (!create)
goto failed;

- if (parms->name[0])
+ if (parms->name[0]) {
+ if (!dev_valid_name(parms->name))
+ goto failed;
strlcpy(name, parms->name, IFNAMSIZ);
- else
+ } else {
strcpy(name, "sit%d");
-
+ }
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
ipip6_tunnel_setup);
if (dev == NULL)
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 65cce8ceaead..64dbf0dbe358 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3301,7 +3301,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
p += pol->sadb_x_policy_len*8;
sec_ctx = (struct sadb_x_sec_ctx *)p;
if (len < pol->sadb_x_policy_len*8 +
- sec_ctx->sadb_x_sec_len) {
+ sec_ctx->sadb_x_sec_len*8) {
*dir = -EINVAL;
goto out;
}
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 0ac907adb2f4..9afeec7117e3 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -633,6 +633,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl

if ((session->ifname[0] &&
nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
+ (session->offset &&
+ nla_put_u16(skb, L2TP_ATTR_OFFSET, session->offset)) ||
(session->cookie_len &&
nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
&session->cookie[0])) ||
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 3e8691895385..944fd5f6b069 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -309,6 +309,8 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
int rc = -EINVAL;

dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
+
+ lock_sock(sk);
if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
goto out;
rc = -EAFNOSUPPORT;
@@ -380,6 +382,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
out_put:
llc_sap_put(sap);
out:
+ release_sock(sk);
return rc;
}

diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b13634c677f1..156e8e13ffed 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3995,6 +3995,10 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!ifmgd->auth_data && !ifmgd->assoc_data))
return -EINVAL;

+ /* If a reconfig is happening, bail out */
+ if (local->in_reconfig)
+ return -EBUSY;
+
if (assoc) {
rcu_read_lock();
have_sta = sta_info_get(sdata, cbss->bssid);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index de9ea452dd60..4d69e924a0f1 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -828,8 +828,13 @@ restart:
}
out:
local_bh_enable();
- if (last)
+ if (last) {
+ /* nf ct hash resize happened, now clear the leftover. */
+ if ((struct nf_conn *)cb->args[1] == last)
+ cb->args[1] = 0;
+
nf_ct_put(last);
+ }

return skb->len;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 19ec93dff753..ed9f07c46815 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -977,6 +977,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
if (addr->sa_family != AF_NETLINK)
return -EINVAL;

+ if (alen < sizeof(struct sockaddr_nl))
+ return -EINVAL;
+
if ((nladdr->nl_groups || nladdr->nl_pid) &&
!netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
return -EPERM;
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index f226709ebd8f..ca5f3662a485 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -209,7 +209,7 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
struct sk_buff *trailer;
unsigned int len;
u16 check;
- int nsg;
+ int nsg, err;

sp = rxrpc_skb(skb);

@@ -240,7 +240,9 @@ static int rxkad_secure_packet_encrypt(const struct rxrpc_call *call,
len &= ~(call->conn->size_align - 1);

sg_init_table(sg, nsg);
- skb_to_sgvec(skb, sg, 0, len);
+ err = skb_to_sgvec(skb, sg, 0, len);
+ if (unlikely(err < 0))
+ return err;
crypto_blkcipher_encrypt_iv(&desc, sg, sg, len);

_leave(" = 0");
@@ -336,7 +338,7 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
struct sk_buff *trailer;
u32 data_size, buf;
u16 check;
- int nsg;
+ int nsg, ret;

_enter("");

@@ -348,7 +350,9 @@ static int rxkad_verify_packet_auth(const struct rxrpc_call *call,
goto nomem;

sg_init_table(sg, nsg);
- skb_to_sgvec(skb, sg, 0, 8);
+ ret = skb_to_sgvec(skb, sg, 0, 8);
+ if (unlikely(ret < 0))
+ return ret;

/* start the decryption afresh */
memset(&iv, 0, sizeof(iv));
@@ -411,7 +415,7 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
struct sk_buff *trailer;
u32 data_size, buf;
u16 check;
- int nsg;
+ int nsg, ret;

_enter(",{%d}", skb->len);

@@ -430,7 +434,12 @@ static int rxkad_verify_packet_encrypt(const struct rxrpc_call *call,
}

sg_init_table(sg, nsg);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ ret = skb_to_sgvec(skb, sg, 0, skb->len);
+ if (unlikely(ret < 0)) {
+ if (sg != _sg)
+ kfree(sg);
+ return ret;
+ }

/* decrypt from the session key */
token = call->conn->key->payload.data;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index df763d506181..40a9b8fb1e40 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -92,8 +92,10 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
a->order = n_i;

nest = nla_nest_start(skb, a->order);
- if (nest == NULL)
+ if (nest == NULL) {
+ index--;
goto nla_put_failure;
+ }
err = tcf_action_dump_1(skb, a, 0, 0);
if (err < 0) {
index--;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 3bee4324535c..ba1ff0f56ae3 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -722,8 +722,10 @@ static int sctp_v6_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
sctp_v6_map_v4(addr);
}

- if (addr->sa.sa_family == AF_INET)
+ if (addr->sa.sa_family == AF_INET) {
+ memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
return sizeof(struct sockaddr_in);
+ }
return sizeof(struct sockaddr_in6);
}

diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 225019397bc6..13693d0adeaf 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -336,11 +336,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
if (!opt->pf->af_supported(addr->sa.sa_family, opt))
return NULL;

- /* V4 mapped address are really of AF_INET family */
- if (addr->sa.sa_family == AF_INET6 &&
- ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
- !opt->pf->af_supported(AF_INET, opt))
- return NULL;
+ if (addr->sa.sa_family == AF_INET6) {
+ if (len < SIN6_LEN_RFC2133)
+ return NULL;
+ /* V4 mapped address are really of AF_INET family */
+ if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
+ !opt->pf->af_supported(AF_INET, opt))
+ return NULL;
+ }

/* If we get this far, af is valid. */
af = sctp_get_af_specific(addr->sa.sa_family);
@@ -1513,7 +1516,7 @@ static void sctp_close(struct sock *sk, long timeout)

pr_debug("%s: sk:%p, timeout:%ld\n", __func__, sk, timeout);

- lock_sock(sk);
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
sk->sk_shutdown = SHUTDOWN_MASK;
sk->sk_state = SCTP_SS_CLOSING;

@@ -1564,7 +1567,7 @@ static void sctp_close(struct sock *sk, long timeout)
* held and that should be grabbed before socket lock.
*/
spin_lock_bh(&net->sctp.addr_wq_lock);
- bh_lock_sock(sk);
+ bh_lock_sock_nested(sk);

/* Hold the sock, since sk_common_release() will put sock_put()
* and we have just a little more cleanup.
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 5ad4418ef093..7ac8d3869267 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1796,32 +1796,40 @@ void x25_kill_by_neigh(struct x25_neigh *nb)

static int __init x25_init(void)
{
- int rc = proto_register(&x25_proto, 0);
+ int rc;

- if (rc != 0)
+ rc = proto_register(&x25_proto, 0);
+ if (rc)
goto out;

rc = sock_register(&x25_family_ops);
- if (rc != 0)
+ if (rc)
goto out_proto;

dev_add_pack(&x25_packet_type);

rc = register_netdevice_notifier(&x25_dev_notifier);
- if (rc != 0)
+ if (rc)
goto out_sock;

- pr_info("Linux Version 0.2\n");
+ rc = x25_register_sysctl();
+ if (rc)
+ goto out_dev;

- x25_register_sysctl();
rc = x25_proc_init();
- if (rc != 0)
- goto out_dev;
+ if (rc)
+ goto out_sysctl;
+
+ pr_info("Linux Version 0.2\n");
+
out:
return rc;
+out_sysctl:
+ x25_unregister_sysctl();
out_dev:
unregister_netdevice_notifier(&x25_dev_notifier);
out_sock:
+ dev_remove_pack(&x25_packet_type);
sock_unregister(AF_X25);
out_proto:
proto_unregister(&x25_proto);
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index 43239527a205..703d46aae7a2 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -73,9 +73,12 @@ static struct ctl_table x25_table[] = {
{ 0, },
};

-void __init x25_register_sysctl(void)
+int __init x25_register_sysctl(void)
{
x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
+ if (!x25_table_header)
+ return -ENOMEM;
+ return 0;
}

void x25_unregister_sysctl(void)
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 431f512f6bde..1dbffea4da34 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1208,6 +1208,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
x->curlft.add_time = orig->curlft.add_time;
x->km.state = orig->km.state;
x->km.seq = orig->km.seq;
+ x->replay = orig->replay;
+ x->preplay = orig->preplay;

return x;

diff --git a/scripts/tags.sh b/scripts/tags.sh
index cdb491d84503..7056322b53f0 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -106,6 +106,7 @@ all_compiled_sources()
case "$i" in
*.[cS])
j=${i/\.[cS]/\.o}
+ j="${j#$tree}"
if [ -e $j ]; then
echo $i
fi
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index fb126459b134..7e74d8aea99b 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1020,6 +1020,10 @@ static struct syscall_fmt {
{ .name = "mlockall", .errmsg = true,
.arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
{ .name = "mmap", .hexret = true,
+/* The standard mmap maps to old_mmap on s390x */
+#if defined(__s390x__)
+ .alias = "old_mmap",
+#endif
.arg_scnprintf = { [0] = SCA_HEX, /* addr */
[2] = SCA_MMAP_PROT, /* prot */
[3] = SCA_MMAP_FLAGS, /* flags */
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
index 67f2d6323558..28dfd1f7f776 100644
--- a/tools/perf/tests/code-reading.c
+++ b/tools/perf/tests/code-reading.c
@@ -141,6 +141,8 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
unsigned char buf2[BUFSZ];
size_t ret_len;
u64 objdump_addr;
+ const char *objdump_name;
+ char decomp_name[KMOD_DECOMP_LEN];
int ret;

pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
@@ -202,9 +204,25 @@ static int read_object_code(u64 addr, size_t len, u8 cpumode,
state->done[state->done_cnt++] = al.map->start;
}

+ objdump_name = al.map->dso->long_name;
+ if (dso__needs_decompress(al.map->dso)) {
+ if (dso__decompress_kmodule_path(al.map->dso, objdump_name,
+ decomp_name,
+ sizeof(decomp_name)) < 0) {
+ pr_debug("decompression failed\n");
+ return -1;
+ }
+
+ objdump_name = decomp_name;
+ }
+
/* Read the object code using objdump */
objdump_addr = map__rip_2objdump(al.map, al.addr);
- ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len);
+ ret = read_via_objdump(objdump_name, objdump_addr, buf2, len);
+
+ if (dso__needs_decompress(al.map->dso))
+ unlink(objdump_name);
+
if (ret > 0) {
/*
* The kernel maps are inaccurate - assume objdump is right in
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
index 7419768c38b1..dbd9954eda4a 100644
--- a/tools/perf/util/unwind-libdw.c
+++ b/tools/perf/util/unwind-libdw.c
@@ -37,6 +37,14 @@ static int __report_module(struct addr_location *al, u64 ip,
return 0;

mod = dwfl_addrmodule(ui->dwfl, ip);
+ if (mod) {
+ Dwarf_Addr s;
+
+ dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
+ if (s != al->map->start)
+ mod = 0;
+ }
+
if (!mod)
mod = dwfl_report_elf(ui->dwfl, dso->short_name,
dso->long_name, -1, al->map->start,
diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
index 42d4c8caad81..de8dc82e2567 100644
--- a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
+++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
@@ -45,12 +45,12 @@ int test_body(void)
printf("Check DSCR TM context switch: ");
fflush(stdout);
for (;;) {
- rv = 1;
asm __volatile__ (
/* set a known value into the DSCR */
"ld 3, %[dscr1];"
"mtspr %[sprn_dscr], 3;"

+ "li %[rv], 1;"
/* start and suspend a transaction */
TBEGIN
"beq 1f;"