Re: [3.13.y-ckt stable] Linux 3.13.11-ckt18

From: Kamal Mostafa
Date: Fri Apr 03 2015 - 15:48:09 EST


diff --git a/Makefile b/Makefile
index 7c427b7..f91b5dd 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 13
SUBLEVEL = 11
-EXTRAVERSION = -ckt17
+EXTRAVERSION = -ckt18
NAME = King of Alienated Frog Porn

# *DOCUMENTATION*
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 6b0b7f7e..7670f33 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -259,7 +259,8 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
#define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)

#define pte_page(x) (mem_map + \
- (unsigned long)(((pte_val(x) - PAGE_OFFSET) >> PAGE_SHIFT)))
+ (unsigned long)(((pte_val(x) - CONFIG_LINUX_LINK_BASE) >> \
+ PAGE_SHIFT)))

#define mk_pte(page, pgprot) \
({ \
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 18f333c..3d41b06 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1669,7 +1669,7 @@ static struct omap_hwmod dra7xx_uart3_hwmod = {
.class = &dra7xx_uart_hwmod_class,
.clkdm_name = "l4per_clkdm",
.main_clk = "uart3_gfclk_mux",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP4UART3_FLAGS,
.prcm = {
.omap4 = {
.clkctrl_offs = DRA7XX_CM_L4PER_UART3_CLKCTRL_OFFSET,
diff --git a/arch/arm/mach-pxa/corgi.c b/arch/arm/mach-pxa/corgi.c
index f162f1b..82fd9dd 100644
--- a/arch/arm/mach-pxa/corgi.c
+++ b/arch/arm/mach-pxa/corgi.c
@@ -26,6 +26,7 @@
#include <linux/i2c.h>
#include <linux/i2c/pxa-i2c.h>
#include <linux/io.h>
+#include <linux/regulator/machine.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/spi/corgi_lcd.h>
@@ -711,6 +712,8 @@ static void __init corgi_init(void)
sharpsl_nand_partitions[1].size = 53 * 1024 * 1024;

platform_add_devices(devices, ARRAY_SIZE(devices));
+
+ regulator_has_full_constraints();
}

static void __init fixup_corgi(struct tag *tags, char **cmdline,
diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c
index a7c30eb..007fd8a 100644
--- a/arch/arm/mach-pxa/hx4700.c
+++ b/arch/arm/mach-pxa/hx4700.c
@@ -892,6 +892,8 @@ static void __init hx4700_init(void)
mdelay(10);
gpio_set_value(GPIO71_HX4700_ASIC3_nRESET, 1);
mdelay(10);
+
+ regulator_has_full_constraints();
}

MACHINE_START(H4700, "HP iPAQ HX4700")
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index aedf053..b4fff29 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -25,6 +25,7 @@
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/i2c/pxa-i2c.h>
+#include <linux/regulator/machine.h>
#include <linux/spi/spi.h>
#include <linux/spi/ads7846.h>
#include <linux/spi/pxa2xx_spi.h>
@@ -454,6 +455,7 @@ static void __init poodle_init(void)
pxa_set_i2c_info(NULL);
i2c_register_board_info(0, ARRAY_AND_SIZE(poodle_i2c_devices));
poodle_init_spi();
+ regulator_has_full_constraints();
}

static void __init fixup_poodle(struct tag *tags, char **cmdline,
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 0b11c1a..7f2693f 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -969,6 +969,8 @@ static void __init spitz_init(void)
spitz_nor_init();
spitz_nand_init();
spitz_i2c_init();
+
+ regulator_has_full_constraints();
}

static void __init spitz_fixup(struct tag *tags, char **cmdline,
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index 6645d1e..34853d5 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -81,6 +81,7 @@ static int sa11x0_pm_enter(suspend_state_t state)
/*
* Ensure not to come back here if it wasn't intended
*/
+ RCSR = RCSR_SMR;
PSPR = 0;

/*
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index b3fc9f5..7ed72dc 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -151,8 +151,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
case __SI_TIMER:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
- err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr,
- &to->si_ptr);
+ err |= __put_user(from->si_int, &to->si_int);
break;
case __SI_POLL:
err |= __put_user(from->si_band, &to->si_band);
@@ -181,7 +180,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
case __SI_MESGQ: /* But this is */
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
- err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr);
+ err |= __put_user(from->si_int, &to->si_int);
break;
default: /* this is just in case for now ... */
err |= __put_user(from->si_pid, &to->si_pid);
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h
index 3be8581..ba85738 100644
--- a/arch/metag/include/asm/processor.h
+++ b/arch/metag/include/asm/processor.h
@@ -149,8 +149,8 @@ extern void exit_thread(void);

unsigned long get_wchan(struct task_struct *p);

-#define KSTK_EIP(tsk) ((tsk)->thread.kernel_context->CurrPC)
-#define KSTK_ESP(tsk) ((tsk)->thread.kernel_context->AX[0].U0)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ctx.CurrPC)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->ctx.AX[0].U0)

#define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0)

diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 6e58e97..cedeb56 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <asm/uaccess.h>
#include <asm/ftrace.h>
+#include <asm/fpu.h>

extern void *__bzero(void *__s, size_t __count);
extern long __strncpy_from_user_nocheck_asm(char *__to,
@@ -26,6 +27,13 @@ extern long __strnlen_user_nocheck_asm(const char *s);
extern long __strnlen_user_asm(const char *s);

/*
+ * Core architecture code
+ */
+#ifdef CONFIG_CPU_R4K_FPU
+EXPORT_SYMBOL_GPL(_save_fp);
+#endif
+
+/*
* String functions
*/
EXPORT_SYMBOL(memset);
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
index bbace09..03a2db5 100644
--- a/arch/mips/kvm/kvm_locore.S
+++ b/arch/mips/kvm/kvm_locore.S
@@ -428,7 +428,7 @@ __kvm_mips_return_to_guest:
/* Setup status register for running guest in UM */
.set at
or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
- and v1, v1, ~ST0_CU0
+ and v1, v1, ~(ST0_CU0 | ST0_MX)
.set noat
mtc0 v1, CP0_STATUS
ehb
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index 7a8b440..bdc5eeb 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -15,6 +15,7 @@
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/bootmem.h>
+#include <asm/fpu.h>
#include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
@@ -424,11 +425,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu->mmio_needed = 0;
}

+ lose_fpu(1);
+
+ local_irq_disable();
/* Check if we have any exceptions/interrupts pending */
kvm_mips_deliver_interrupts(vcpu,
kvm_read_c0_guest_cause(vcpu->arch.cop0));

- local_irq_disable();
kvm_guest_enter();

r = __kvm_mips_vcpu_run(run, vcpu);
@@ -1028,9 +1031,6 @@ void kvm_mips_set_c0_status(void)
{
uint32_t status = read_c0_status();

- if (cpu_has_fpu)
- status |= (ST0_CU1);
-
if (cpu_has_dsp)
status |= (ST0_MX);

diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 1c16141..1fea249 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -155,7 +155,7 @@ axon_ram_direct_access(struct block_device *device, sector_t sector,
}

*kaddr = (void *)(bank->ph_addr + offset);
- *pfn = virt_to_phys(kaddr) >> PAGE_SHIFT;
+ *pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;

return 0;
}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 2480d92..1e201ad 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -398,7 +398,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->ecb2 = 8;
vcpu->arch.sie_block->eca = 0xC1002001U;
vcpu->arch.sie_block->fac = (int) (long) vfacilities;
- hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
(unsigned long) vcpu);
vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 0596e8e..5bb7b36 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -172,7 +172,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
*/
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
return 0;
- if (unlikely(pmd_large(pmd))) {
+ if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) {
/*
* NUMA hinting faults need to be handled in the GUP
* slowpath for accounting purposes and so that they
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index fa029fb..e473dbe 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -66,9 +66,15 @@ follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
return ERR_PTR(-EINVAL);
}

+/*
+ * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
+ * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
+ * Otherwise, returns 0.
+ */
int pmd_huge(pmd_t pmd)
{
- return !!(pmd_val(pmd) & _PAGE_PSE);
+ return !pmd_none(pmd) &&
+ (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
}

int pud_huge(pud_t pud)
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 0653404..a22b148 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1292,6 +1292,9 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
struct blkg_rwstat rwstat = { }, tmp;
int i, cpu;

+ if (tg->stats_cpu == NULL)
+ return 0;
+
for_each_possible_cpu(cpu) {
struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);

diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 8f5fdf3..e13e06d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3600,6 +3600,11 @@ retry:

blkcg = bio_blkcg(bio);
cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
+ if (!cfqg) {
+ cfqq = &cfqd->oom_cfqq;
+ goto out;
+ }
+
cfqq = cic_to_cfqq(cic, is_sync);

/*
@@ -3636,7 +3641,7 @@ retry:
} else
cfqq = &cfqd->oom_cfqq;
}
-
+out:
if (new_cfqq)
kmem_cache_free(cfq_pool, new_cfqq);

@@ -3666,12 +3671,17 @@ static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
struct bio *bio, gfp_t gfp_mask)
{
- const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
- const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
+ int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
+ int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
struct cfq_queue **async_cfqq = NULL;
struct cfq_queue *cfqq = NULL;

if (!is_sync) {
+ if (!ioprio_valid(cic->ioprio)) {
+ struct task_struct *tsk = current;
+ ioprio = task_nice_ioprio(tsk);
+ ioprio_class = task_nice_ioclass(tsk);
+ }
async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
cfqq = *async_cfqq;
}
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index e619031..0fd0537 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -104,6 +104,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x13d3, 0x3393) },
{ USB_DEVICE(0x13d3, 0x3402) },
{ USB_DEVICE(0x13d3, 0x3408) },
+ { USB_DEVICE(0x13d3, 0x3423) },
{ USB_DEVICE(0x13d3, 0x3432) },

/* Atheros AR5BBU12 with sflash firmware */
@@ -155,6 +156,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },

/* Atheros AR5BBU22 with sflash firmware */
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 20c0ccb..6e6c597 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -182,6 +182,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 },

/* Atheros AR5BBU12 with sflash firmware */
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 901b370..5ad180b 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -1536,7 +1536,7 @@ struct tpm_chip *tpm_register_hardware(struct device *dev,

/* Make chip available */
spin_lock(&driver_lock);
- list_add_rcu(&chip->list, &tpm_chip_list);
+ list_add_tail_rcu(&chip->list, &tpm_chip_list);
spin_unlock(&driver_lock);

return chip;
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index c3cd7fe..28a1501 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -208,6 +208,10 @@ static int i2c_atmel_probe(struct i2c_client *client,

chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
GFP_KERNEL);
+ if (!chip->vendor.priv) {
+ rc = -ENOMEM;
+ goto out_err;
+ }

/* Default timeouts */
chip->vendor.timeout_a = msecs_to_jiffies(TPM_I2C_SHORT_TIMEOUT);
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index 6276fea..58decb7 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -579,6 +579,11 @@ static int i2c_nuvoton_probe(struct i2c_client *client,

chip->vendor.priv = devm_kzalloc(dev, sizeof(struct priv_data),
GFP_KERNEL);
+ if (!chip->vendor.priv) {
+ rc = -ENOMEM;
+ goto out_err;
+ }
+
init_waitqueue_head(&chip->vendor.read_queue);
init_waitqueue_head(&chip->vendor.int_queue);

diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c
index b2cb24c..0fa5a16 100644
--- a/drivers/char/tpm/tpm_i2c_stm_st33.c
+++ b/drivers/char/tpm/tpm_i2c_stm_st33.c
@@ -398,7 +398,7 @@ static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
*/
static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
{
- int size = 0, burstcnt, len;
+ int size = 0, burstcnt, len, ret;
struct i2c_client *client;

client = (struct i2c_client *)TPM_VPRIV(chip);
@@ -413,7 +413,10 @@ static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
if (burstcnt < 0)
return burstcnt;
len = min_t(int, burstcnt, count - size);
- I2C_READ_DATA(client, TPM_DATA_FIFO, buf + size, len);
+ ret = I2C_READ_DATA(client, TPM_DATA_FIFO, buf + size, len);
+ if (ret < 0)
+ return ret;
+
size += len;
}
return size;
@@ -488,7 +491,7 @@ static int tpm_stm_i2c_send(struct tpm_chip *chip, unsigned char *buf,
if (burstcnt < 0)
return burstcnt;
size = min_t(int, len - i - 1, burstcnt);
- ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf, size);
+ ret = I2C_WRITE_DATA(client, TPM_DATA_FIFO, buf + i, size);
if (ret < 0)
goto out_err;

diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 2783a42..da6727b 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -148,7 +148,8 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
crq.len = (u16)count;
crq.data = ibmvtpm->rtce_dma_handle;

- rc = ibmvtpm_send_crq(ibmvtpm->vdev, word[0], word[1]);
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]),
+ cpu_to_be64(word[1]));
if (rc != H_SUCCESS) {
dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc);
rc = 0;
@@ -186,7 +187,8 @@ static int ibmvtpm_crq_get_rtce_size(struct ibmvtpm_dev *ibmvtpm)
crq.valid = (u8)IBMVTPM_VALID_CMD;
crq.msg = (u8)VTPM_GET_RTCE_BUFFER_SIZE;

- rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
+ cpu_to_be64(buf[1]));
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"ibmvtpm_crq_get_rtce_size failed rc=%d\n", rc);
@@ -212,7 +214,8 @@ static int ibmvtpm_crq_get_version(struct ibmvtpm_dev *ibmvtpm)
crq.valid = (u8)IBMVTPM_VALID_CMD;
crq.msg = (u8)VTPM_GET_VERSION;

- rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
+ cpu_to_be64(buf[1]));
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"ibmvtpm_crq_get_version failed rc=%d\n", rc);
@@ -307,6 +310,14 @@ static int tpm_ibmvtpm_remove(struct vio_dev *vdev)
static unsigned long tpm_ibmvtpm_get_desired_dma(struct vio_dev *vdev)
{
struct ibmvtpm_dev *ibmvtpm = ibmvtpm_get_data(&vdev->dev);
+
+ /* ibmvtpm initializes at probe time, so the data we are
+ * asking for may not be set yet. Estimate that 4K required
+ * for TCE-mapped buffer in addition to CRQ.
+ */
+ if (!ibmvtpm)
+ return CRQ_RES_BUF_SIZE + PAGE_SIZE;
+
return CRQ_RES_BUF_SIZE + ibmvtpm->rtce_size;
}

@@ -327,7 +338,8 @@ static int tpm_ibmvtpm_suspend(struct device *dev)
crq.valid = (u8)IBMVTPM_VALID_CMD;
crq.msg = (u8)VTPM_PREPARE_TO_SUSPEND;

- rc = ibmvtpm_send_crq(ibmvtpm->vdev, buf[0], buf[1]);
+ rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(buf[0]),
+ cpu_to_be64(buf[1]));
if (rc != H_SUCCESS)
dev_err(ibmvtpm->dev,
"tpm_ibmvtpm_suspend failed rc=%d\n", rc);
@@ -511,11 +523,11 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,
case IBMVTPM_VALID_CMD:
switch (crq->msg) {
case VTPM_GET_RTCE_BUFFER_SIZE_RES:
- if (crq->len <= 0) {
+ if (be16_to_cpu(crq->len) <= 0) {
dev_err(ibmvtpm->dev, "Invalid rtce size\n");
return;
}
- ibmvtpm->rtce_size = crq->len;
+ ibmvtpm->rtce_size = be16_to_cpu(crq->len);
ibmvtpm->rtce_buf = kmalloc(ibmvtpm->rtce_size,
GFP_KERNEL);
if (!ibmvtpm->rtce_buf) {
@@ -536,11 +548,11 @@ static void ibmvtpm_crq_process(struct ibmvtpm_crq *crq,

return;
case VTPM_GET_VERSION_RES:
- ibmvtpm->vtpm_version = crq->data;
+ ibmvtpm->vtpm_version = be32_to_cpu(crq->data);
return;
case VTPM_TPM_COMMAND_RES:
/* len of the data in rtce buffer */
- ibmvtpm->res_len = crq->len;
+ ibmvtpm->res_len = be16_to_cpu(crq->len);
wake_up_interruptible(&ibmvtpm->wq);
return;
default:
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 1b74459..b43dafb 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -75,6 +75,10 @@ enum tis_defaults {
#define TPM_DID_VID(l) (0x0F00 | ((l) << 12))
#define TPM_RID(l) (0x0F04 | ((l) << 12))

+struct priv_data {
+ bool irq_tested;
+};
+
static LIST_HEAD(tis_chips);
static DEFINE_MUTEX(tis_lock);

@@ -338,12 +342,27 @@ out_err:
return rc;
}

+static void disable_interrupts(struct tpm_chip *chip)
+{
+ u32 intmask;
+
+ intmask =
+ ioread32(chip->vendor.iobase +
+ TPM_INT_ENABLE(chip->vendor.locality));
+ intmask &= ~TPM_GLOBAL_INT_ENABLE;
+ iowrite32(intmask,
+ chip->vendor.iobase +
+ TPM_INT_ENABLE(chip->vendor.locality));
+ free_irq(chip->vendor.irq, chip);
+ chip->vendor.irq = 0;
+}
+
/*
* If interrupts are used (signaled by an irq set in the vendor structure)
* tpm.c can skip polling for the data to be available as the interrupt is
* waited for here
*/
-static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len)
{
int rc;
u32 ordinal;
@@ -373,6 +392,30 @@ out_err:
return rc;
}

+static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
+{
+ int rc, irq;
+ struct priv_data *priv = chip->vendor.priv;
+
+ if (!chip->vendor.irq || priv->irq_tested)
+ return tpm_tis_send_main(chip, buf, len);
+
+ /* Verify receipt of the expected IRQ */
+ irq = chip->vendor.irq;
+ chip->vendor.irq = 0;
+ rc = tpm_tis_send_main(chip, buf, len);
+ chip->vendor.irq = irq;
+ if (!priv->irq_tested)
+ msleep(1);
+ if (!priv->irq_tested) {
+ disable_interrupts(chip);
+ dev_err(chip->dev,
+ FW_BUG "TPM interrupt not working, polling instead\n");
+ }
+ priv->irq_tested = true;
+ return rc;
+}
+
/*
* Early probing for iTPM with STS_DATA_EXPECT flaw.
* Try sending command without itpm flag set and if that
@@ -515,6 +558,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
if (interrupt == 0)
return IRQ_NONE;

+ ((struct priv_data *)chip->vendor.priv)->irq_tested = true;
if (interrupt & TPM_INTF_DATA_AVAIL_INT)
wake_up_interruptible(&chip->vendor.read_queue);
if (interrupt & TPM_INTF_LOCALITY_CHANGE_INT)
@@ -544,9 +588,14 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
u32 vendor, intfcaps, intmask;
int rc, i, irq_s, irq_e, probe;
struct tpm_chip *chip;
+ struct priv_data *priv;

+ priv = devm_kzalloc(dev, sizeof(struct priv_data), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
if (!(chip = tpm_register_hardware(dev, &tpm_tis)))
return -ENODEV;
+ chip->vendor.priv = priv;

chip->vendor.iobase = ioremap(start, len);
if (!chip->vendor.iobase) {
@@ -615,19 +664,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
if (intfcaps & TPM_INTF_DATA_AVAIL_INT)
dev_dbg(dev, "\tData Avail Int Support\n");

- /* get the timeouts before testing for irqs */
- if (tpm_get_timeouts(chip)) {
- dev_err(dev, "Could not get TPM timeouts and durations\n");
- rc = -ENODEV;
- goto out_err;
- }
-
- if (tpm_do_selftest(chip)) {
- dev_err(dev, "TPM self test failed\n");
- rc = -ENODEV;
- goto out_err;
- }
-
/* INTERRUPT Setup */
init_waitqueue_head(&chip->vendor.read_queue);
init_waitqueue_head(&chip->vendor.int_queue);
@@ -729,6 +765,18 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
}
}

+ if (tpm_get_timeouts(chip)) {
+ dev_err(dev, "Could not get TPM timeouts and durations\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ if (tpm_do_selftest(chip)) {
+ dev_err(dev, "TPM self test failed\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
+
INIT_LIST_HEAD(&chip->vendor.list);
mutex_lock(&tis_lock);
list_add(&chip->vendor.list, &tis_chips);
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 4a58c55..797bab9 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -128,7 +128,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
struct clk_init_data init;

if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
- if (bit_idx > 16) {
+ if (bit_idx > 15) {
pr_err("gate bit exceeds LOWORD field\n");
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 10772aa..e7cf8aa 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -290,6 +290,7 @@ static void __init zynq_clk_setup(struct device_node *np)
clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x],
"cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL,
26, 0, &armclk_lock);
+ clk_prepare_enable(clks[cpu_2x]);

clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
4 + 2 * tmp);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a9cd300..b9f6367 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1262,9 +1262,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
unsigned long flags;
struct cpufreq_policy *policy;

- read_lock_irqsave(&cpufreq_driver_lock, flags);
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
policy = per_cpu(cpufreq_cpu_data, cpu);
- read_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);

if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
@@ -1319,7 +1320,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
}
}

- per_cpu(cpufreq_cpu_data, cpu) = NULL;
return 0;
}

diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index 8d904a0..9499306 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -263,7 +263,7 @@ out:
}

#ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
-static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
+static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
{
int count, v, i, found;
struct cpufreq_frequency_table *freq;
@@ -335,7 +335,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
.notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
};

-static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
+static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
{
struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
struct cpufreq_frequency_table *freq;
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 4850882..436a9d8 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -458,7 +458,7 @@ static struct cpufreq_driver s3c24xx_driver = {
};


-int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info)
+int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
{
if (!info || !info->name) {
printk(KERN_ERR "%s: failed to pass valid information\n",
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 7047821..4ab7a21 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -400,6 +400,7 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,

pr_debug("previous speed is %u\n", prev_speed);

+ preempt_disable();
local_irq_save(flags);

/* switch to low state */
@@ -464,6 +465,8 @@ unsigned int speedstep_get_freqs(enum speedstep_processor processor,

out:
local_irq_restore(flags);
+ preempt_enable();
+
return ret;
}
EXPORT_SYMBOL_GPL(speedstep_get_freqs);
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index 0f5326d..34bd13c 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -188,6 +188,7 @@ static void speedstep_set_state(unsigned int state)
return;

/* Disable IRQs */
+ preempt_disable();
local_irq_save(flags);

command = (smi_sig & 0xffffff00) | (smi_cmd & 0xff);
@@ -198,9 +199,19 @@ static void speedstep_set_state(unsigned int state)

do {
if (retry) {
+ /*
+ * We need to enable interrupts, otherwise the blockage
+ * won't resolve.
+ *
+ * We disable preemption so that other processes don't
+ * run. If other processes were running, they could
+ * submit more DMA requests, making the blockage worse.
+ */
pr_debug("retry %u, previous result %u, waiting...\n",
retry, result);
+ local_irq_enable();
mdelay(retry * 50);
+ local_irq_disable();
}
retry++;
__asm__ __volatile__(
@@ -217,6 +228,7 @@ static void speedstep_set_state(unsigned int state)

/* enable IRQs */
local_irq_restore(flags);
+ preempt_enable();

if (new_state == state)
pr_debug("change to %u MHz succeeded after %u tries "
diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c
index 276a422..a1c47b4 100644
--- a/drivers/gpio/gpio-tps65912.c
+++ b/drivers/gpio/gpio-tps65912.c
@@ -26,9 +26,12 @@ struct tps65912_gpio_data {
struct gpio_chip gpio_chip;
};

+#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip)
+
static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
{
- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+ struct tps65912 *tps65912 = tps65912_gpio->tps65912;
int val;

val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset);
@@ -42,7 +45,8 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset)
static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
int value)
{
- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+ struct tps65912 *tps65912 = tps65912_gpio->tps65912;

if (value)
tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset,
@@ -55,7 +59,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset,
static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,
int value)
{
- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+ struct tps65912 *tps65912 = tps65912_gpio->tps65912;

/* Set the initial value */
tps65912_gpio_set(gc, offset, value);
@@ -66,7 +71,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset,

static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset)
{
- struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio);
+ struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc);
+ struct tps65912 *tps65912 = tps65912_gpio->tps65912;

return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset,
GPIO_CFG_MASK);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 74ed17d..d26028c 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -45,12 +45,13 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)

ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
if (ret < 0) {
- /* We've found the gpio chip, but the translation failed.
- * Return true to stop looking and return the translation
- * error via out_gpio
+ /* We've found a gpio chip, but the translation failed.
+ * Store translation error in out_gpio.
+ * Return false to keep looking, as more than one gpio chip
+ * could be registered per of-node.
*/
gg_data->out_gpio = ERR_PTR(ret);
- return true;
+ return false;
}

gg_data->out_gpio = gpio_to_desc(ret + gc->base);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 74c2a37..35f8e47 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1759,6 +1759,7 @@ struct drm_i915_file_private {
((dev)->pdev->device & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
(((dev)->pdev->device & 0xf) == 0x6 || \
+ ((dev)->pdev->device & 0xf) == 0xb || \
((dev)->pdev->device & 0xf) == 0xe))
#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0A00)
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index 9944d81..5b487d5 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -73,7 +73,7 @@ u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr)
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));

mutex_lock(&dev_priv->dpio_lock);
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
PUNIT_OPCODE_REG_READ, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);

@@ -85,7 +85,7 @@ void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val)
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));

mutex_lock(&dev_priv->dpio_lock);
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_PUNIT,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
PUNIT_OPCODE_REG_WRITE, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);
}
@@ -97,7 +97,7 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));

mutex_lock(&dev_priv->dpio_lock);
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_NC,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_NC,
PUNIT_OPCODE_REG_READ, addr, &val);
mutex_unlock(&dev_priv->dpio_lock);

@@ -107,56 +107,56 @@ u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr)
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
PUNIT_OPCODE_REG_READ, reg, &val);
return val;
}

void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPIO_NC,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPIO_NC,
PUNIT_OPCODE_REG_WRITE, reg, &val);
}

u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
PUNIT_OPCODE_REG_READ, reg, &val);
return val;
}

void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCK,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
PUNIT_OPCODE_REG_WRITE, reg, &val);
}

u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
PUNIT_OPCODE_REG_READ, reg, &val);
return val;
}

void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_CCU,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
PUNIT_OPCODE_REG_WRITE, reg, &val);
}

u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val = 0;
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
PUNIT_OPCODE_REG_READ, reg, &val);
return val;
}

void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val)
{
- vlv_sideband_rw(dev_priv, PCI_DEVFN(2, 0), IOSF_PORT_GPS_CORE,
+ vlv_sideband_rw(dev_priv, PCI_DEVFN(0, 0), IOSF_PORT_GPS_CORE,
PUNIT_OPCODE_REG_WRITE, reg, &val);
}

diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 719c7a1..13b458e 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -700,10 +700,8 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_DOWNSPREAD_CTRL, 0);

- if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
- (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
+ if (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
- }

/* set the lane count on the sink */
tmp = dp_info->dp_lane_count;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index de0652e..9d8e30d 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3513,7 +3513,21 @@ void cik_fence_gfx_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;

- /* EVENT_WRITE_EOP - flush caches, send int */
+ /* Workaround for cache flush problems. First send a dummy EOP
+ * event down the pipe with seq one below.
+ */
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
+ EOP_TC_ACTION_EN |
+ EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
+ EVENT_INDEX(5)));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) |
+ DATA_SEL(1) | INT_SEL(0));
+ radeon_ring_write(ring, fence->seq - 1);
+ radeon_ring_write(ring, 0);
+
+ /* Then send the real EOP event down the pipe. */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN |
@@ -6617,7 +6631,6 @@ int cik_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 dma_cntl, dma_cntl1;
- u32 thermal_int;

if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -6654,13 +6667,6 @@ int cik_irq_set(struct radeon_device *rdev)
cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;

- if (rdev->flags & RADEON_IS_IGP)
- thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
- ~(THERM_INTH_MASK | THERM_INTL_MASK);
- else
- thermal_int = RREG32_SMC(CG_THERMAL_INT) &
- ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
-
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("cik_irq_set: sw int gfx\n");
@@ -6818,14 +6824,6 @@ int cik_irq_set(struct radeon_device *rdev)
hpd6 |= DC_HPDx_INT_EN;
}

- if (rdev->irq.dpm_thermal) {
- DRM_DEBUG("dpm thermal\n");
- if (rdev->flags & RADEON_IS_IGP)
- thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
- else
- thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
- }
-
WREG32(CP_INT_CNTL_RING0, cp_int_cntl);

WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
@@ -6879,11 +6877,6 @@ int cik_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);

- if (rdev->flags & RADEON_IS_IGP)
- WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
- else
- WREG32_SMC(CG_THERMAL_INT, thermal_int);
-
return 0;
}

diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 8fcc491..c8372ef 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -1121,6 +1121,19 @@ void kv_dpm_enable_bapm(struct radeon_device *rdev, bool enable)
}
}

+static void kv_enable_thermal_int(struct radeon_device *rdev, bool enable)
+{
+ u32 thermal_int;
+
+ thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL);
+ if (enable)
+ thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
+ else
+ thermal_int &= ~(THERM_INTH_MASK | THERM_INTL_MASK);
+ WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
+
+}
+
int kv_dpm_enable(struct radeon_device *rdev)
{
struct kv_power_info *pi = kv_get_pi(rdev);
@@ -1222,8 +1235,7 @@ int kv_dpm_enable(struct radeon_device *rdev)
DRM_ERROR("kv_set_thermal_temperature_range failed\n");
return ret;
}
- rdev->irq.dpm_thermal = true;
- radeon_irq_set(rdev);
+ kv_enable_thermal_int(rdev, true);
}

ret = kv_smc_bapm_enable(rdev, false);
@@ -1269,6 +1281,7 @@ void kv_dpm_disable(struct radeon_device *rdev)
kv_stop_dpm(rdev);
kv_enable_ulv(rdev, false);
kv_reset_am(rdev);
+ kv_enable_thermal_int(rdev, false);

kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index e086b77..5cd96d5 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1072,12 +1072,12 @@ static void cayman_gpu_init(struct radeon_device *rdev)

if ((rdev->config.cayman.max_backends_per_se == 1) &&
(rdev->flags & RADEON_IS_IGP)) {
- if ((disabled_rb_mask & 3) == 1) {
- /* RB0 disabled, RB1 enabled */
- tmp = 0x11111111;
- } else {
+ if ((disabled_rb_mask & 3) == 2) {
/* RB1 disabled, RB0 enabled */
tmp = 0x00000000;
+ } else {
+ /* RB0 disabled, RB1 enabled */
+ tmp = 0x11111111;
}
} else {
tmp = gb_addr_config & NUM_PIPES_MASK;
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index cc4258a..729ad83 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -187,7 +187,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
radeon_crtc = to_radeon_crtc(crtc);
if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
- vrefresh = radeon_crtc->hw_mode.vrefresh;
+ vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
break;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 2f7e9b8..87b0d72 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -3280,6 +3280,7 @@ int radeon_atom_get_voltage_evv(struct radeon_device *rdev,

args.in.ucVoltageType = VOLTAGE_TYPE_VDDC;
args.in.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
+ args.in.usVoltageLevel = cpu_to_le16(virtual_voltage_id);
args.in.ulSCLKFreq =
cpu_to_le32(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[entry_id].clk);

diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index d97f232..bd6a436 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1063,6 +1063,23 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
return;
}

+ /*
+ * Ignore reports for absolute data if the data didn't change. This is
+ * not only an optimization but also fixes 'dead' key reports. Some
+ * RollOver implementations for localized keys (like BACKSLASH/PIPE; HID
+ * 0x31 and 0x32) report multiple keys, even though a localized keyboard
+ * can only have one of them physically available. The 'dead' keys
+ * report constant 0. As all map to the same keycode, they'd confuse
+ * the input layer. If we filter the 'dead' keys on the HID level, we
+ * skip the keycode translation and only forward real events.
+ */
+ if (!(field->flags & (HID_MAIN_ITEM_RELATIVE |
+ HID_MAIN_ITEM_BUFFERED_BYTE)) &&
+ (field->flags & HID_MAIN_ITEM_VARIABLE) &&
+ usage->usage_index < field->maxusage &&
+ value == field->value[usage->usage_index])
+ return;
+
/* report the usage code as scancode if the key status has changed */
if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
input_event(input, EV_MSC, MSC_SCAN, usage->hid);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 05c2134..cc4b092 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -356,7 +356,10 @@ static int i2c_hid_hwreset(struct i2c_client *client)
static void i2c_hid_get_input(struct i2c_hid *ihid)
{
int ret, ret_size;
- int size = ihid->bufsize;
+ int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
+
+ if (size > ihid->bufsize)
+ size = ihid->bufsize;

ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
if (ret != size) {
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 1946101..675d3c7 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1080,12 +1080,6 @@ struct qib_devdata {
/* control high-level access to EEPROM */
struct mutex eep_lock;
uint64_t traffic_wds;
- /* active time is kept in seconds, but logged in hours */
- atomic_t active_time;
- /* Below are nominal shadow of EEPROM, new since last EEPROM update */
- uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
- uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
- uint16_t eep_hrs;
/*
* masks for which bits of errs, hwerrs that cause
* each of the counters to increment.
@@ -1307,8 +1301,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
const void *buffer, int len);
void qib_get_eeprom_info(struct qib_devdata *);
-int qib_update_eeprom_log(struct qib_devdata *dd);
-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
+#define qib_inc_eeprom_err(dd, eidx, incr)
void qib_dump_lookup_output_queue(struct qib_devdata *);
void qib_force_pio_avail_update(struct qib_devdata *);
void qib_clear_symerror_on_linkup(unsigned long opaque);
diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c
index 4d5d71a..e2280b0 100644
--- a/drivers/infiniband/hw/qib/qib_eeprom.c
+++ b/drivers/infiniband/hw/qib/qib_eeprom.c
@@ -267,190 +267,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
"Board SN %s did not pass functional test: %s\n",
dd->serial, ifp->if_comment);

- memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
- /*
- * Power-on (actually "active") hours are kept as little-endian value
- * in EEPROM, but as seconds in a (possibly as small as 24-bit)
- * atomic_t while running.
- */
- atomic_set(&dd->active_time, 0);
- dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
-
done:
vfree(buf);

bail:;
}

-/**
- * qib_update_eeprom_log - copy active-time and error counters to eeprom
- * @dd: the qlogic_ib device
- *
- * Although the time is kept as seconds in the qib_devdata struct, it is
- * rounded to hours for re-write, as we have only 16 bits in EEPROM.
- * First-cut code reads whole (expected) struct qib_flash, modifies,
- * re-writes. Future direction: read/write only what we need, assuming
- * that the EEPROM had to have been "good enough" for driver init, and
- * if not, we aren't making it worse.
- *
- */
-int qib_update_eeprom_log(struct qib_devdata *dd)
-{
- void *buf;
- struct qib_flash *ifp;
- int len, hi_water;
- uint32_t new_time, new_hrs;
- u8 csum;
- int ret, idx;
- unsigned long flags;
-
- /* first, check if we actually need to do anything. */
- ret = 0;
- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
- if (dd->eep_st_new_errs[idx]) {
- ret = 1;
- break;
- }
- }
- new_time = atomic_read(&dd->active_time);
-
- if (ret == 0 && new_time < 3600)
- goto bail;
-
- /*
- * The quick-check above determined that there is something worthy
- * of logging, so get current contents and do a more detailed idea.
- * read full flash, not just currently used part, since it may have
- * been written with a newer definition
- */
- len = sizeof(struct qib_flash);
- buf = vmalloc(len);
- ret = 1;
- if (!buf) {
- qib_dev_err(dd,
- "Couldn't allocate memory to read %u bytes from eeprom for logging\n",
- len);
- goto bail;
- }
-
- /* Grab semaphore and read current EEPROM. If we get an
- * error, let go, but if not, keep it until we finish write.
- */
- ret = mutex_lock_interruptible(&dd->eep_lock);
- if (ret) {
- qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
- goto free_bail;
- }
- ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
- if (ret) {
- mutex_unlock(&dd->eep_lock);
- qib_dev_err(dd, "Unable read EEPROM for logging\n");
- goto free_bail;
- }
- ifp = (struct qib_flash *)buf;
-
- csum = flash_csum(ifp, 0);
- if (csum != ifp->if_csum) {
- mutex_unlock(&dd->eep_lock);
- qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
- csum, ifp->if_csum);
- ret = 1;
- goto free_bail;
- }
- hi_water = 0;
- spin_lock_irqsave(&dd->eep_st_lock, flags);
- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
- int new_val = dd->eep_st_new_errs[idx];
- if (new_val) {
- /*
- * If we have seen any errors, add to EEPROM values
- * We need to saturate at 0xFF (255) and we also
- * would need to adjust the checksum if we were
- * trying to minimize EEPROM traffic
- * Note that we add to actual current count in EEPROM,
- * in case it was altered while we were running.
- */
- new_val += ifp->if_errcntp[idx];
- if (new_val > 0xFF)
- new_val = 0xFF;
- if (ifp->if_errcntp[idx] != new_val) {
- ifp->if_errcntp[idx] = new_val;
- hi_water = offsetof(struct qib_flash,
- if_errcntp) + idx;
- }
- /*
- * update our shadow (used to minimize EEPROM
- * traffic), to match what we are about to write.
- */
- dd->eep_st_errs[idx] = new_val;
- dd->eep_st_new_errs[idx] = 0;
- }
- }
- /*
- * Now update active-time. We would like to round to the nearest hour
- * but unless atomic_t are sure to be proper signed ints we cannot,
- * because we need to account for what we "transfer" to EEPROM and
- * if we log an hour at 31 minutes, then we would need to set
- * active_time to -29 to accurately count the _next_ hour.
- */
- if (new_time >= 3600) {
- new_hrs = new_time / 3600;
- atomic_sub((new_hrs * 3600), &dd->active_time);
- new_hrs += dd->eep_hrs;
- if (new_hrs > 0xFFFF)
- new_hrs = 0xFFFF;
- dd->eep_hrs = new_hrs;
- if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
- ifp->if_powerhour[0] = new_hrs & 0xFF;
- hi_water = offsetof(struct qib_flash, if_powerhour);
- }
- if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
- ifp->if_powerhour[1] = new_hrs >> 8;
- hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
- }
- }
- /*
- * There is a tiny possibility that we could somehow fail to write
- * the EEPROM after updating our shadows, but problems from holding
- * the spinlock too long are a much bigger issue.
- */
- spin_unlock_irqrestore(&dd->eep_st_lock, flags);
- if (hi_water) {
- /* we made some change to the data, uopdate cksum and write */
- csum = flash_csum(ifp, 1);
- ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
- }
- mutex_unlock(&dd->eep_lock);
- if (ret)
- qib_dev_err(dd, "Failed updating EEPROM\n");
-
-free_bail:
- vfree(buf);
-bail:
- return ret;
-}
-
-/**
- * qib_inc_eeprom_err - increment one of the four error counters
- * that are logged to EEPROM.
- * @dd: the qlogic_ib device
- * @eidx: 0..3, the counter to increment
- * @incr: how much to add
- *
- * Each counter is 8-bits, and saturates at 255 (0xFF). They
- * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
- * is called, but it can only be called in a context that allows sleep.
- * This function can be called even at interrupt level.
- */
-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
-{
- uint new_val;
- unsigned long flags;
-
- spin_lock_irqsave(&dd->eep_st_lock, flags);
- new_val = dd->eep_st_new_errs[eidx] + incr;
- if (new_val > 255)
- new_val = 255;
- dd->eep_st_new_errs[eidx] = new_val;
- spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-}
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 84e593d..295f631 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2682,8 +2682,6 @@ static void qib_get_6120_faststats(unsigned long opaque)
spin_lock_irqsave(&dd->eep_st_lock, flags);
traffic_wds -= dd->traffic_wds;
dd->traffic_wds += traffic_wds;
- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
- atomic_add(5, &dd->active_time); /* S/B #define */
spin_unlock_irqrestore(&dd->eep_st_lock, flags);

qib_chk_6120_errormask(dd);
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 454c2e7..c86e71b 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -3299,8 +3299,6 @@ static void qib_get_7220_faststats(unsigned long opaque)
spin_lock_irqsave(&dd->eep_st_lock, flags);
traffic_wds -= dd->traffic_wds;
dd->traffic_wds += traffic_wds;
- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
- atomic_add(5, &dd->active_time); /* S/B #define */
spin_unlock_irqrestore(&dd->eep_st_lock, flags);
done:
mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index d1bd213..0f8d1f0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -5191,8 +5191,6 @@ static void qib_get_7322_faststats(unsigned long opaque)
spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
traffic_wds -= ppd->dd->traffic_wds;
ppd->dd->traffic_wds += traffic_wds;
- if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
- atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
QIB_IB_QDR) &&
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 76c3e17..8c9bb6c 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -922,7 +922,6 @@ static void qib_shutdown_device(struct qib_devdata *dd)
}
}

- qib_update_eeprom_log(dd);
}

/**
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 3c8e4e3..b9ccbda 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -611,28 +611,6 @@ bail:
return ret < 0 ? ret : count;
}

-static ssize_t show_logged_errs(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct qib_ibdev *dev =
- container_of(device, struct qib_ibdev, ibdev.dev);
- struct qib_devdata *dd = dd_from_dev(dev);
- int idx, count;
-
- /* force consistency with actual EEPROM */
- if (qib_update_eeprom_log(dd) != 0)
- return -ENXIO;
-
- count = 0;
- for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
- count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
- dd->eep_st_errs[idx],
- idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
- }
-
- return count;
-}
-
/*
* Dump tempsense regs. in decimal, to ease shell-scripts.
*/
@@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
@@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = {
&dev_attr_nfreectxts,
&dev_attr_serial,
&dev_attr_boardversion,
- &dev_attr_logged_errors,
&dev_attr_tempsense,
&dev_attr_localbus_info,
&dev_attr_chip_reset,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index e60c2ea..951addc 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -290,6 +290,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
unsigned short logical_block_size = queue_logical_block_size(q);
sector_t num_sectors;

+ /* Reject unsupported discard requests */
+ if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
+ dec_count(io, region, -EOPNOTSUPP);
+ return;
+ }
+
/*
* where->count may be zero if rw holds a flush and we need to
* send a zero-sized flush.
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9584443..9388c36 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context)
return;
}

+ /*
+ * If the bio is discard, return an error, but do not
+ * degrade the array.
+ */
+ if (bio->bi_rw & REQ_DISCARD) {
+ bio_endio(bio, -EOPNOTSUPP);
+ return;
+ }
+
for (i = 0; i < ms->nr_mirrors; i++)
if (test_bit(i, &error))
fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 944690b..d892a05 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1439,8 +1439,6 @@ out:
full_bio->bi_end_io = pe->full_bio_end_io;
full_bio->bi_private = pe->full_bio_private;
}
- free_pending_exception(pe);
-
increment_pending_exceptions_done_count();

up_write(&s->lock);
@@ -1457,6 +1455,8 @@ out:
}

retry_origin_bios(s, origin_bios);
+
+ free_pending_exception(pe);
}

static void commit_callback(void *context, int success)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 72859fa..b8570e9 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2421,7 +2421,7 @@ int dm_setup_md_queue(struct mapped_device *md)
return 0;
}

-static struct mapped_device *dm_find_md(dev_t dev)
+struct mapped_device *dm_get_md(dev_t dev)
{
struct mapped_device *md;
unsigned minor = MINOR(dev);
@@ -2432,12 +2432,15 @@ static struct mapped_device *dm_find_md(dev_t dev)
spin_lock(&_minor_lock);

md = idr_find(&_minor_idr, minor);
- if (md && (md == MINOR_ALLOCED ||
- (MINOR(disk_devt(dm_disk(md))) != minor) ||
- dm_deleting_md(md) ||
- test_bit(DMF_FREEING, &md->flags))) {
- md = NULL;
- goto out;
+ if (md) {
+ if ((md == MINOR_ALLOCED ||
+ (MINOR(disk_devt(dm_disk(md))) != minor) ||
+ dm_deleting_md(md) ||
+ test_bit(DMF_FREEING, &md->flags))) {
+ md = NULL;
+ goto out;
+ }
+ dm_get(md);
}

out:
@@ -2445,16 +2448,6 @@ out:

return md;
}
-
-struct mapped_device *dm_get_md(dev_t dev)
-{
- struct mapped_device *md = dm_find_md(dev);
-
- if (md)
- dm_get(md);
-
- return md;
-}
EXPORT_SYMBOL_GPL(dm_get_md);

void *dm_get_mdptr(struct mapped_device *md)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e4885c0..53457ee 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -563,7 +563,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
if (test_bit(WriteMostly, &rdev->flags)) {
/* Don't balance among write-mostly, just
* use the first as a last resort */
- if (best_disk < 0) {
+ if (best_dist_disk < 0) {
if (is_badblock(rdev, this_sector, sectors,
&first_bad, &bad_sectors)) {
if (first_bad < this_sector)
@@ -572,7 +572,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
best_good_sectors = first_bad - this_sector;
} else
best_good_sectors = sectors;
- best_disk = disk;
+ best_dist_disk = disk;
+ best_pending_disk = disk;
}
continue;
}
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 3782c2f..dcdc327 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3079,7 +3079,8 @@ static void handle_stripe_dirtying(struct r5conf *conf,
* generate correct data from the parity.
*/
if (conf->max_degraded == 2 ||
- (recovery_cp < MaxSector && sh->sector >= recovery_cp)) {
+ (recovery_cp < MaxSector && sh->sector >= recovery_cp &&
+ s->failed == 0)) {
/* Calculate the real rcw later - for now make it
* look like rcw is cheaper
*/
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index f674dc0..d2a4e6d 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -350,6 +350,7 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
{
struct dvb_usb_device *d = adap_to_d(adap);
struct lme2510_state *lme_int = adap_to_priv(adap);
+ struct usb_host_endpoint *ep;

lme_int->lme_urb = usb_alloc_urb(0, GFP_ATOMIC);

@@ -371,6 +372,12 @@ static int lme2510_int_read(struct dvb_usb_adapter *adap)
adap,
8);

+ /* Quirk of pipe reporting PIPE_BULK but behaves as interrupt */
+ ep = usb_pipe_endpoint(d->udev, lme_int->lme_urb->pipe);
+
+ if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
+ lme_int->lme_urb->pipe = usb_rcvbulkpipe(d->udev, 0xa),
+
lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;

usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC);
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index eebe329..df71a7f 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -187,7 +187,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
if ((hcsr & H_RST) == H_RST) {
dev_warn(&dev->pdev->dev, "H_RST is set = 0x%08X", hcsr);
hcsr &= ~H_RST;
- mei_me_reg_write(hw, H_CSR, hcsr);
+ mei_hcsr_set(hw, hcsr);
hcsr = mei_hcsr_read(hw);
}

@@ -276,6 +276,7 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
return err;
}

+ mei_me_hw_reset_release(dev);
dev->recvd_hw_ready = false;
return 0;
}
@@ -522,9 +523,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* check if we need to start the dev */
if (!mei_host_is_ready(dev)) {
if (mei_hw_is_ready(dev)) {
- mei_me_hw_reset_release(dev);
dev_dbg(&dev->pdev->dev, "we need to start the dev.\n");
-
dev->recvd_hw_ready = true;
wake_up_interruptible(&dev->wait_hw_ready);

diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 793dacd..9654246 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -201,8 +201,8 @@ static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev)
if (!pdata)
return NULL;

- of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles);
- if (clk_delay_cycles > 0)
+ if (!of_property_read_u32(np, "mrvl,clk-delay-cycles",
+ &clk_delay_cycles))
pdata->clk_delay_cycles = clk_delay_cycles;

return pdata;
@@ -288,10 +288,11 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
}
}

- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS);
pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
pm_suspend_ignore_children(&pdev->dev, 1);

ret = sdhci_add_host(host);
@@ -316,8 +317,8 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
err_of_parse:
err_cd_req:
err_add_host:
- pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
clk_disable_unprepare(clk);
clk_put(clk);
err_clk_get:
@@ -378,15 +379,13 @@ static int sdhci_pxav3_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- unsigned long flags;
+ int ret;

- if (pltfm_host->clk) {
- spin_lock_irqsave(&host->lock, flags);
- host->runtime_suspended = true;
- spin_unlock_irqrestore(&host->lock, flags);
+ ret = sdhci_runtime_suspend_host(host);
+ if (ret)
+ return ret;

- clk_disable_unprepare(pltfm_host->clk);
- }
+ clk_disable_unprepare(pltfm_host->clk);

return 0;
}
@@ -395,17 +394,10 @@ static int sdhci_pxav3_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- unsigned long flags;
-
- if (pltfm_host->clk) {
- clk_prepare_enable(pltfm_host->clk);

- spin_lock_irqsave(&host->lock, flags);
- host->runtime_suspended = false;
- spin_unlock_irqrestore(&host->lock, flags);
- }
+ clk_prepare_enable(pltfm_host->clk);

- return 0;
+ return sdhci_runtime_resume_host(host);
}
#endif

diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a9324f4..280cb5c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3100,7 +3100,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
}
#endif
if (!bnx2x_fp_lock_napi(fp))
- return work_done;
+ return budget;

for_each_cos_in_tx_queue(fp, cos)
if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 3bec8cf..024cd49 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2392,7 +2392,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)

work_done = netxen_process_rcv_ring(sds_ring, budget);

- if ((work_done < budget) && tx_complete) {
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
napi_complete(&sds_ring->napi);
if (test_bit(__NX_DEV_UP, &adapter->state))
netxen_nic_enable_int(sds_ring);
diff --git a/drivers/net/ppp/ppp_deflate.c b/drivers/net/ppp/ppp_deflate.c
index 602c625..b5edc7f 100644
--- a/drivers/net/ppp/ppp_deflate.c
+++ b/drivers/net/ppp/ppp_deflate.c
@@ -246,7 +246,7 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
/*
* See if we managed to reduce the size of the packet.
*/
- if (olen < isize) {
+ if (olen < isize && olen <= osize) {
state->stats.comp_bytes += olen;
state->stats.comp_packets++;
} else {
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index a3399c4..b9b651e 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -478,7 +478,7 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
regval = ioread32(reg);
iowrite32(regval | val, reg);
regval = ioread32(reg);
- usleep_range(100, 150);
+ udelay(100); /* NB: should be atomic */

/* Bring BB/MAC out of reset */
iowrite32(regval & ~val, reg);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 3960541..d82dc72 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -403,9 +403,6 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
mvmvif->uploaded = false;
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;

- /* does this make sense at all? */
- mvmvif->color++;
-
spin_lock_bh(&mvm->time_event_lock);
iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
spin_unlock_bh(&mvm->time_event_lock);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index c8f6974..e43d223 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -835,6 +835,11 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
sta_id = ba_notif->sta_id;
tid = ba_notif->tid;

+ if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
+ tid >= IWL_MAX_TID_COUNT,
+ "sta_id %d tid %d", sta_id, tid))
+ return 0;
+
rcu_read_lock();

sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 0adde91..d4654ba 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -729,7 +729,12 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
trans_pcie->kw.dma >> 4);

- iwl_pcie_tx_start(trans, trans_pcie->scd_base_addr);
+ /*
+ * Send 0 as the scd_base_addr since the device may have be reset
+ * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
+ * contain garbage.
+ */
+ iwl_pcie_tx_start(trans, 0);
}

/*
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 25f0bc6..7f41551 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1324,7 +1324,7 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
if (add_uevent_var(env, "PCI_SLOT_NAME=%s", pci_name(pdev)))
return -ENOMEM;

- if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x",
+ if (add_uevent_var(env, "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X",
pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device,
(u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index c5d0a08..d6d4997 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -69,6 +69,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
{
void __iomem *image;
int last_image;
+ unsigned length;

image = rom;
do {
@@ -91,9 +92,9 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
if (readb(pds + 3) != 'R')
break;
last_image = readb(pds + 21) & 0x80;
- /* this length is reliable */
- image += readw(pds + 16) * 512;
- } while (!last_image);
+ length = readw(pds + 16);
+ image += length * 512;
+ } while (length && !last_image);

/* never return a size larger than the PCI resource window */
/* there are known ROMs that get the size wrong */
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c
index 4779b8e..380eeb5 100644
--- a/drivers/pinctrl/pinctrl-imx.c
+++ b/drivers/pinctrl/pinctrl-imx.c
@@ -365,7 +365,7 @@ static void imx_pinconf_dbg_show(struct pinctrl_dev *pctldev,
const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
unsigned long config;

- if (!pin_reg || !pin_reg->conf_reg) {
+ if (!pin_reg || pin_reg->conf_reg == -1) {
seq_printf(s, "N/A");
return;
}
diff --git a/drivers/power/88pm860x_charger.c b/drivers/power/88pm860x_charger.c
index de029bb..5ccca87 100644
--- a/drivers/power/88pm860x_charger.c
+++ b/drivers/power/88pm860x_charger.c
@@ -711,6 +711,7 @@ static int pm860x_charger_probe(struct platform_device *pdev)
return 0;

out_irq:
+ power_supply_unregister(&info->usb);
while (--i >= 0)
free_irq(info->irq[i], info);
out:
diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
index ad3ff8f..e4c95e1 100644
--- a/drivers/power/bq24190_charger.c
+++ b/drivers/power/bq24190_charger.c
@@ -929,7 +929,7 @@ static void bq24190_charger_init(struct power_supply *charger)
charger->properties = bq24190_charger_properties;
charger->num_properties = ARRAY_SIZE(bq24190_charger_properties);
charger->supplied_to = bq24190_charger_supplied_to;
- charger->num_supplies = ARRAY_SIZE(bq24190_charger_supplied_to);
+ charger->num_supplicants = ARRAY_SIZE(bq24190_charger_supplied_to);
charger->get_property = bq24190_charger_get_property;
charger->set_property = bq24190_charger_set_property;
charger->property_is_writeable = bq24190_charger_property_is_writeable;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 1f37505..2d5a97d 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -581,7 +581,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
"beiscsi_hba_alloc - iscsi_host_alloc failed\n");
return NULL;
}
- shost->dma_boundary = pcidev->dma_mask;
shost->max_id = BE2_MAX_SESSIONS;
shost->max_channel = 0;
shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f655592..a1f04e3 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -92,6 +92,8 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)
{
struct megasas_register_set __iomem *regs;
regs = instance->reg_set;
+
+ instance->mask_interrupts = 0;
/* For Thunderbolt/Invader also clear intr on enable */
writel(~0, &regs->outbound_intr_status);
readl(&regs->outbound_intr_status);
@@ -100,7 +102,6 @@ megasas_enable_intr_fusion(struct megasas_instance *instance)

/* Dummy readl to force pci flush */
readl(&regs->outbound_intr_mask);
- instance->mask_interrupts = 0;
}

/**
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index df5e961..eb81c98 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -522,7 +522,7 @@ static ssize_t
sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
{
sg_io_hdr_t *hp = &srp->header;
- int err = 0;
+ int err = 0, err2;
int len;

if (count < SZ_SG_IO_HDR) {
@@ -551,8 +551,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
goto err_out;
}
err_out:
- err = sg_finish_rem_req(srp);
- return (0 == err) ? count : err;
+ err2 = sg_finish_rem_req(srp);
+ return err ? : err2 ? : count;
}

static ssize_t
diff --git a/drivers/staging/comedi/comedi_compat32.c b/drivers/staging/comedi/comedi_compat32.c
index 1e9da40..5287810 100644
--- a/drivers/staging/comedi/comedi_compat32.c
+++ b/drivers/staging/comedi/comedi_compat32.c
@@ -262,7 +262,7 @@ static int compat_cmd(struct file *file, unsigned long arg)
{
struct comedi_cmd __user *cmd;
struct comedi32_cmd_struct __user *cmd32;
- int rc;
+ int rc, err;

cmd32 = compat_ptr(arg);
cmd = compat_alloc_user_space(sizeof(*cmd));
@@ -271,7 +271,15 @@ static int compat_cmd(struct file *file, unsigned long arg)
if (rc)
return rc;

- return translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
+ rc = translated_ioctl(file, COMEDI_CMD, (unsigned long)cmd);
+ if (rc == -EAGAIN) {
+ /* Special case: copy cmd back to user. */
+ err = put_compat_cmd(cmd32, cmd);
+ if (err)
+ rc = err;
+ }
+
+ return rc;
}

/* Handle 32-bit COMEDI_CMDTEST ioctl. */
diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c
index 601e9cc..bb2890e 100644
--- a/drivers/target/iscsi/iscsi_target_tq.c
+++ b/drivers/target/iscsi/iscsi_target_tq.c
@@ -24,36 +24,22 @@
#include "iscsi_target_tq.h"
#include "iscsi_target.h"

-static LIST_HEAD(active_ts_list);
static LIST_HEAD(inactive_ts_list);
-static DEFINE_SPINLOCK(active_ts_lock);
static DEFINE_SPINLOCK(inactive_ts_lock);
static DEFINE_SPINLOCK(ts_bitmap_lock);

-static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
-{
- spin_lock(&active_ts_lock);
- list_add_tail(&ts->ts_list, &active_ts_list);
- iscsit_global->active_ts++;
- spin_unlock(&active_ts_lock);
-}
-
static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
{
+ if (!list_empty(&ts->ts_list)) {
+ WARN_ON(1);
+ return;
+ }
spin_lock(&inactive_ts_lock);
list_add_tail(&ts->ts_list, &inactive_ts_list);
iscsit_global->inactive_ts++;
spin_unlock(&inactive_ts_lock);
}

-static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
-{
- spin_lock(&active_ts_lock);
- list_del(&ts->ts_list);
- iscsit_global->active_ts--;
- spin_unlock(&active_ts_lock);
-}
-
static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
{
struct iscsi_thread_set *ts;
@@ -66,7 +52,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)

ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);

- list_del(&ts->ts_list);
+ list_del_init(&ts->ts_list);
iscsit_global->inactive_ts--;
spin_unlock(&inactive_ts_lock);

@@ -204,8 +190,6 @@ static void iscsi_deallocate_extra_thread_sets(void)

void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
{
- iscsi_add_ts_to_active_list(ts);
-
spin_lock_bh(&ts->ts_state_lock);
conn->thread_set = ts;
ts->conn = conn;
@@ -397,7 +381,6 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)

if (ts->delay_inactive && (--ts->thread_count == 0)) {
spin_unlock_bh(&ts->ts_state_lock);
- iscsi_del_ts_from_active_list(ts);

if (!iscsit_global->in_shutdown)
iscsi_deallocate_extra_thread_sets();
@@ -452,7 +435,6 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)

if (ts->delay_inactive && (--ts->thread_count == 0)) {
spin_unlock_bh(&ts->ts_state_lock);
- iscsi_del_ts_from_active_list(ts);

if (!iscsit_global->in_shutdown)
iscsi_deallocate_extra_thread_sets();
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 1205dbd..0fccdcf 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1877,8 +1877,8 @@ static int core_scsi3_update_aptpl_buf(
}

if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
- pr_err("Unable to update renaming"
- " APTPL metadata\n");
+ pr_err("Unable to update renaming APTPL metadata,"
+ " reallocating larger buffer\n");
ret = -EMSGSIZE;
goto out;
}
@@ -1895,8 +1895,8 @@ static int core_scsi3_update_aptpl_buf(
lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);

if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
- pr_err("Unable to update renaming"
- " APTPL metadata\n");
+ pr_err("Unable to update renaming APTPL metadata,"
+ " reallocating larger buffer\n");
ret = -EMSGSIZE;
goto out;
}
@@ -1959,7 +1959,7 @@ static int __core_scsi3_write_aptpl_to_file(
static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
{
unsigned char *buf;
- int rc;
+ int rc, len = PR_APTPL_BUF_LEN;

if (!aptpl) {
char *null_buf = "No Registrations or Reservations\n";
@@ -1973,25 +1973,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b

return 0;
}
-
- buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL);
+retry:
+ buf = vzalloc(len);
if (!buf)
return TCM_OUT_OF_RESOURCES;

- rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN);
+ rc = core_scsi3_update_aptpl_buf(dev, buf, len);
if (rc < 0) {
- kfree(buf);
- return TCM_OUT_OF_RESOURCES;
+ vfree(buf);
+ len *= 2;
+ goto retry;
}

rc = __core_scsi3_write_aptpl_to_file(dev, buf);
if (rc != 0) {
pr_err("SPC-3 PR: Could not update APTPL\n");
- kfree(buf);
+ vfree(buf);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
dev->t10_pr.pr_aptpl_active = 1;
- kfree(buf);
+ vfree(buf);
pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
return 0;
}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 6aae89d..e19fabf 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -260,6 +260,8 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
static sense_reason_t
sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
{
+ struct se_device *dev = cmd->se_dev;
+ sector_t end_lba = dev->transport->get_blocks(dev) + 1;
unsigned int sectors = sbc_get_write_same_sectors(cmd);

if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
@@ -273,6 +275,16 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
sectors, cmd->se_dev->dev_attrib.max_write_same_len);
return TCM_INVALID_CDB_FIELD;
}
+ /*
+ * Sanity check for LBA wrap and request past end of device.
+ */
+ if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
+ ((cmd->t_task_lba + sectors) > end_lba)) {
+ pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
+ (unsigned long long)end_lba, cmd->t_task_lba, sectors);
+ return TCM_ADDRESS_OUT_OF_RANGE;
+ }
+
/* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
if (flags[0] & 0x10) {
pr_warn("WRITE SAME with ANCHOR not supported\n");
@@ -840,7 +852,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
unsigned long long end_lba;

end_lba = dev->transport->get_blocks(dev) + 1;
- if (cmd->t_task_lba + sectors > end_lba) {
+ if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
+ ((cmd->t_task_lba + sectors) > end_lba)) {
pr_err("cmd exceeds last lba %llu "
"(lba %llu, sectors %u)\n",
end_lba, cmd->t_task_lba, sectors);
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 25c9bc7..e49616e 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -209,6 +209,9 @@ static int pty_signal(struct tty_struct *tty, int sig)
unsigned long flags;
struct pid *pgrp;

+ if (sig != SIGINT && sig != SIGQUIT && sig != SIGTSTP)
+ return -EINVAL;
+
if (tty->link) {
spin_lock_irqsave(&tty->link->ctrl_lock, flags);
pgrp = get_pid(tty->link->pgrp);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index a49f10d..52ada70 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -2392,7 +2392,7 @@ static int atmel_serial_probe(struct platform_device *pdev)

ret = atmel_init_port(port, pdev);
if (ret)
- goto err;
+ goto err_clear_bit;

if (!atmel_use_pdc_rx(&port->uart)) {
ret = -ENOMEM;
@@ -2421,6 +2421,12 @@ static int atmel_serial_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, port);

+ /*
+ * The peripheral clock has been disabled by atmel_init_port():
+ * enable it before accessing I/O registers
+ */
+ clk_prepare_enable(port->clk);
+
if (port->rs485.flags & SER_RS485_ENABLED) {
UART_PUT_MR(&port->uart, ATMEL_US_USMODE_NORMAL);
UART_PUT_CR(&port->uart, ATMEL_US_RTSEN);
@@ -2431,6 +2437,12 @@ static int atmel_serial_probe(struct platform_device *pdev)
*/
atmel_get_ip_name(&port->uart);

+ /*
+ * The peripheral clock can now safely be disabled till the port
+ * is used
+ */
+ clk_disable_unprepare(port->clk);
+
return 0;

err_add_port:
@@ -2441,6 +2453,8 @@ err_alloc_ring:
clk_put(port->clk);
port->clk = NULL;
}
+err_clear_bit:
+ clear_bit(port->uart.line, atmel_ports_in_use);
err:
return ret;
}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 23b5d32..693091a 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -498,6 +498,7 @@ void invert_screen(struct vc_data *vc, int offset, int count, int viewed)
#endif
if (DO_UPDATE(vc))
do_update_region(vc, (unsigned long) p, count);
+ notify_update(vc);
}

/* used by selection: complement pointer position */
@@ -514,6 +515,7 @@ void complement_pos(struct vc_data *vc, int offset)
scr_writew(old, screenpos(vc, old_offset, 1));
if (DO_UPDATE(vc))
vc->vc_sw->con_putc(vc, old, oldy, oldx);
+ notify_update(vc);
}

old_offset = offset;
@@ -531,8 +533,8 @@ void complement_pos(struct vc_data *vc, int offset)
oldy = (offset >> 1) / vc->vc_cols;
vc->vc_sw->con_putc(vc, new, oldy, oldx);
}
+ notify_update(vc);
}
-
}

static void insert_char(struct vc_data *vc, unsigned int nr)
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 6746103..82dea40 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -943,6 +943,7 @@ static int acm_probe(struct usb_interface *intf,
unsigned long quirks;
int num_rx_buf;
int i;
+ unsigned int elength = 0;
int combined_interfaces = 0;
struct device *tty_dev;
int rv = -ENOMEM;
@@ -988,9 +989,12 @@ static int acm_probe(struct usb_interface *intf,
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
}
+ elength = buffer[0];

switch (buffer[2]) {
case USB_CDC_UNION_TYPE: /* we've found it */
+ if (elength < sizeof(struct usb_cdc_union_desc))
+ goto next_desc;
if (union_header) {
dev_err(&intf->dev, "More than one "
"union descriptor, skipping ...\n");
@@ -999,31 +1003,38 @@ static int acm_probe(struct usb_interface *intf,
union_header = (struct usb_cdc_union_desc *)buffer;
break;
case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/
+ if (elength < sizeof(struct usb_cdc_country_functional_desc))
+ goto next_desc;
cfd = (struct usb_cdc_country_functional_desc *)buffer;
break;
case USB_CDC_HEADER_TYPE: /* maybe check version */
break; /* for now we ignore it */
case USB_CDC_ACM_TYPE:
+ if (elength < 4)
+ goto next_desc;
ac_management_function = buffer[3];
break;
case USB_CDC_CALL_MANAGEMENT_TYPE:
+ if (elength < 5)
+ goto next_desc;
call_management_function = buffer[3];
call_interface_num = buffer[4];
if ((quirks & NOT_A_MODEM) == 0 && (call_management_function & 3) != 3)
dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n");
break;
default:
- /* there are LOTS more CDC descriptors that
+ /*
+ * there are LOTS more CDC descriptors that
* could legitimately be found here.
*/
dev_dbg(&intf->dev, "Ignoring descriptor: "
- "type %02x, length %d\n",
- buffer[2], buffer[0]);
+ "type %02x, length %ud\n",
+ buffer[2], elength);
break;
}
next_desc:
- buflen -= buffer[0];
- buffer += buffer[0];
+ buflen -= elength;
+ buffer += elength;
}

if (!union_header) {
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index 2355974..4596f7e 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -22,17 +22,25 @@
*/

/* FIXME tune these based on pool statistics ... */
-static const size_t pool_max[HCD_BUFFER_POOLS] = {
- /* platforms without dma-friendly caches might need to
- * prevent cacheline sharing...
- */
- 32,
- 128,
- 512,
- PAGE_SIZE / 2
- /* bigger --> allocate pages */
+static size_t pool_max[HCD_BUFFER_POOLS] = {
+ 32, 128, 512, 2048,
};

+void __init usb_init_pool_max(void)
+{
+ /*
+ * The pool_max values must never be smaller than
+ * ARCH_KMALLOC_MINALIGN.
+ */
+ if (ARCH_KMALLOC_MINALIGN <= 32)
+ ; /* Original value is okay */
+ else if (ARCH_KMALLOC_MINALIGN <= 64)
+ pool_max[0] = 64;
+ else if (ARCH_KMALLOC_MINALIGN <= 128)
+ pool_max[0] = 0; /* Don't use this pool */
+ else
+ BUILD_BUG(); /* We don't allow this */
+}

/* SETUP primitives */

diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 2ff20e1..f573c1d 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1748,6 +1748,18 @@ static int autosuspend_check(struct usb_device *udev)
dev_dbg(&udev->dev, "remote wakeup needed for autosuspend\n");
return -EOPNOTSUPP;
}
+
+ /*
+ * If the device is a direct child of the root hub and the HCD
+ * doesn't handle wakeup requests, don't allow autosuspend when
+ * wakeup is needed.
+ */
+ if (w && udev->parent == udev->bus->root_hub &&
+ bus_to_hcd(udev->bus)->cant_recv_wakeups) {
+ dev_dbg(&udev->dev, "HCD doesn't handle wakeup requests\n");
+ return -EOPNOTSUPP;
+ }
+
udev->do_remote_wakeup = w;
return 0;
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 4a4dae5..178696f 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1616,6 +1616,7 @@ static int unlink1(struct usb_hcd *hcd, struct urb *urb, int status)
int usb_hcd_unlink_urb (struct urb *urb, int status)
{
struct usb_hcd *hcd;
+ struct usb_device *udev = urb->dev;
int retval = -EIDRM;
unsigned long flags;

@@ -1627,20 +1628,19 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
spin_lock_irqsave(&hcd_urb_unlink_lock, flags);
if (atomic_read(&urb->use_count) > 0) {
retval = 0;
- usb_get_dev(urb->dev);
+ usb_get_dev(udev);
}
spin_unlock_irqrestore(&hcd_urb_unlink_lock, flags);
if (retval == 0) {
hcd = bus_to_hcd(urb->dev->bus);
retval = unlink1(hcd, urb, status);
- usb_put_dev(urb->dev);
+ if (retval == 0)
+ retval = -EINPROGRESS;
+ else if (retval != -EIDRM && retval != -EBUSY)
+ dev_dbg(&udev->dev, "hcd_unlink_urb %p fail %d\n",
+ urb, retval);
+ usb_put_dev(udev);
}
-
- if (retval == 0)
- retval = -EINPROGRESS;
- else if (retval != -EIDRM && retval != -EBUSY)
- dev_dbg(&urb->dev->dev, "hcd_unlink_urb %p fail %d\n",
- urb, retval);
return retval;
}

diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 4d11449..a922730 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -1050,6 +1050,7 @@ static int __init usb_init(void)
pr_info("%s: USB support disabled\n", usbcore_name);
return 0;
}
+ usb_init_pool_max();

retval = usb_debugfs_init();
if (retval)
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 2facee5..ea50291 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -2247,6 +2247,9 @@ struct usb_hcd *isp1760_register(phys_addr_t res_start, resource_size_t res_len,
hcd->rsrc_start = res_start;
hcd->rsrc_len = res_len;

+ /* This driver doesn't support wakeup requests */
+ hcd->cant_recv_wakeups = 1;
+
ret = usb_add_hcd(hcd, irq, irqflags);
if (ret)
goto err_unmap;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 57dfc0c..a70f46f 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -74,7 +74,7 @@ config USB_MUSB_TUSB6010

config USB_MUSB_OMAP2PLUS
tristate "OMAP2430 and onwards"
- depends on ARCH_OMAP2PLUS
+ depends on ARCH_OMAP2PLUS && USB
select GENERIC_PHY

config USB_MUSB_AM35X
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 5312e18..df46ab0 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -56,6 +56,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
+ { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index a344372..6343145 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -112,10 +112,16 @@ static void do_suspend(void)

err = freeze_processes();
if (err) {
- pr_err("%s: freeze failed %d\n", __func__, err);
+ pr_err("%s: freeze processes failed %d\n", __func__, err);
goto out;
}

+ err = freeze_kernel_threads();
+ if (err) {
+ pr_err("%s: freeze kernel threads failed %d\n", __func__, err);
+ goto out_thaw;
+ }
+
err = dpm_suspend_start(PMSG_FREEZE);
if (err) {
pr_err("%s: dpm_suspend_start %d\n", __func__, err);
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index 1818ce7..214f1b5 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param)
*/
static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in)
{
- struct autofs_dev_ioctl tmp;
+ struct autofs_dev_ioctl tmp, *res;

if (copy_from_user(&tmp, in, sizeof(tmp)))
return ERR_PTR(-EFAULT);
@@ -103,7 +103,11 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i
if (tmp.size < sizeof(tmp))
return ERR_PTR(-EINVAL);

- return memdup_user(in, tmp.size);
+ res = memdup_user(in, tmp.size);
+ if (!IS_ERR(res))
+ res->size = tmp.size;
+
+ return res;
}

static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index aae2276..15eaae5 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2421,7 +2421,7 @@ int open_ctree(struct super_block *sb,
features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;

if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
- printk(KERN_ERR "btrfs: has skinny extents\n");
+ printk(KERN_INFO "BTRFS: has skinny extents\n");

/*
* flag our filesystem as having big metadata blocks if
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 51c038d..18bf34e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -469,8 +469,20 @@ insert:
src_item = (struct btrfs_inode_item *)src_ptr;
dst_item = (struct btrfs_inode_item *)dst_ptr;

- if (btrfs_inode_generation(eb, src_item) == 0)
+ if (btrfs_inode_generation(eb, src_item) == 0) {
+ struct extent_buffer *dst_eb = path->nodes[0];
+
+ if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
+ S_ISREG(btrfs_inode_mode(dst_eb, dst_item))) {
+ struct btrfs_map_token token;
+ u64 ino_size = btrfs_inode_size(eb, src_item);
+
+ btrfs_init_map_token(&token);
+ btrfs_set_token_inode_size(dst_eb, dst_item,
+ ino_size, &token);
+ }
goto no_copy;
+ }

if (overwrite_root &&
S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
@@ -3121,7 +3133,8 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
static void fill_inode_item(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf,
struct btrfs_inode_item *item,
- struct inode *inode, int log_inode_only)
+ struct inode *inode, int log_inode_only,
+ u64 logged_isize)
{
struct btrfs_map_token token;

@@ -3134,7 +3147,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
* to say 'update this inode with these values'
*/
btrfs_set_token_inode_generation(leaf, item, 0, &token);
- btrfs_set_token_inode_size(leaf, item, 0, &token);
+ btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
} else {
btrfs_set_token_inode_generation(leaf, item,
BTRFS_I(inode)->generation,
@@ -3186,7 +3199,7 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
return ret;
inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
struct btrfs_inode_item);
- fill_inode_item(trans, path->nodes[0], inode_item, inode, 0);
+ fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0);
btrfs_release_path(path);
return 0;
}
@@ -3195,7 +3208,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
struct inode *inode,
struct btrfs_path *dst_path,
struct extent_buffer *src,
- int start_slot, int nr, int inode_only)
+ int start_slot, int nr, int inode_only,
+ u64 logged_isize)
{
unsigned long src_offset;
unsigned long dst_offset;
@@ -3242,7 +3256,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
dst_path->slots[0],
struct btrfs_inode_item);
fill_inode_item(trans, dst_path->nodes[0], inode_item,
- inode, inode_only == LOG_INODE_EXISTS);
+ inode, inode_only == LOG_INODE_EXISTS,
+ logged_isize);
} else {
copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
src_offset, ins_sizes[i]);
@@ -3606,6 +3621,33 @@ process:
return ret;
}

+static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
+ struct btrfs_path *path, u64 *size_ret)
+{
+ struct btrfs_key key;
+ int ret;
+
+ key.objectid = btrfs_ino(inode);
+ key.type = BTRFS_INODE_ITEM_KEY;
+ key.offset = 0;
+
+ ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
+ if (ret < 0) {
+ return ret;
+ } else if (ret > 0) {
+ *size_ret = i_size_read(inode);
+ } else {
+ struct btrfs_inode_item *item;
+
+ item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_inode_item);
+ *size_ret = btrfs_inode_size(path->nodes[0], item);
+ }
+
+ btrfs_release_path(path);
+ return 0;
+}
+
/* log a single inode in the tree log.
* At least one parent directory for this inode must exist in the tree
* or be logged already.
@@ -3637,6 +3679,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
int ins_nr;
bool fast_search = false;
u64 ino = btrfs_ino(inode);
+ u64 logged_isize = 0;

path = btrfs_alloc_path();
if (!path)
@@ -3690,6 +3733,25 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
max_key_type = BTRFS_XATTR_ITEM_KEY;
ret = drop_objectid_items(trans, log, path, ino, max_key_type);
} else {
+ if (inode_only == LOG_INODE_EXISTS) {
+ /*
+ * Make sure the new inode item we write to the log has
+ * the same isize as the current one (if it exists).
+ * This is necessary to prevent data loss after log
+ * replay, and also to prevent doing a wrong expanding
+ * truncate - for e.g. create file, write 4K into offset
+ * 0, fsync, write 4K into offset 4096, add hard link,
+ * fsync some other file (to sync log), power fail - if
+ * we use the inode's current i_size, after log replay
+ * we get a 8Kb file, with the last 4Kb extent as a hole
+ * (zeroes), as if an expanding truncate happened,
+ * instead of getting a file of 4Kb only.
+ */
+ err = logged_inode_size(log, inode, path,
+ &logged_isize);
+ if (err)
+ goto out_unlock;
+ }
if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags)) {
clear_bit(BTRFS_INODE_COPY_EVERYTHING,
@@ -3746,7 +3808,7 @@ again:
}

ret = copy_items(trans, inode, dst_path, src, ins_start_slot,
- ins_nr, inode_only);
+ ins_nr, inode_only, logged_isize);
if (ret) {
err = ret;
goto out_unlock;
@@ -3765,7 +3827,7 @@ next_slot:
if (ins_nr) {
ret = copy_items(trans, inode, dst_path, src,
ins_start_slot,
- ins_nr, inode_only);
+ ins_nr, inode_only, logged_isize);
if (ret) {
err = ret;
goto out_unlock;
@@ -3785,7 +3847,7 @@ next_slot:
}
if (ins_nr) {
ret = copy_items(trans, inode, dst_path, src, ins_start_slot,
- ins_nr, inode_only);
+ ins_nr, inode_only, logged_isize);
if (ret) {
err = ret;
goto out_unlock;
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 7654e87..9ad5ba4 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -510,6 +510,10 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
sumlen = c->sector_size - je32_to_cpu(sm->offset);
sumptr = buf + buf_size - sumlen;

+ /* sm->offset maybe wrong but MAGIC maybe right */
+ if (sumlen > c->sector_size)
+ goto full_scan;
+
/* Now, make sure the summary itself is available */
if (sumlen > buf_size) {
/* Need to kmalloc for this. */
@@ -544,6 +548,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo
}
}

+full_scan:
buf_ofs = jeb->offset;

if (!buf_size) {
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index f4ccfe6..02f8d09 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -464,8 +464,10 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,

for (i = 0; i < args->csa_nrclists; i++) {
status = decode_rc_list(xdr, &args->csa_rclists[i]);
- if (status)
+ if (status) {
+ args->csa_nrclists = i;
goto out_free;
+ }
}
}
status = 0;
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index b2e3ff3..ecdbae1 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -31,6 +31,8 @@
#include "alloc.h"
#include "dat.h"

+static void __nilfs_btree_init(struct nilfs_bmap *bmap);
+
static struct nilfs_btree_path *nilfs_btree_alloc_path(void)
{
struct nilfs_btree_path *path;
@@ -368,6 +370,34 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
return ret;
}

+/**
+ * nilfs_btree_root_broken - verify consistency of btree root node
+ * @node: btree root node to be examined
+ * @ino: inode number
+ *
+ * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
+ */
+static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
+ unsigned long ino)
+{
+ int level, flags, nchildren;
+ int ret = 0;
+
+ level = nilfs_btree_node_get_level(node);
+ flags = nilfs_btree_node_get_flags(node);
+ nchildren = nilfs_btree_node_get_nchildren(node);
+
+ if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
+ level > NILFS_BTREE_LEVEL_MAX ||
+ nchildren < 0 ||
+ nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
+ pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
+ ino, level, flags, nchildren);
+ ret = 1;
+ }
+ return ret;
+}
+
int nilfs_btree_broken_node_block(struct buffer_head *bh)
{
int ret;
@@ -1713,7 +1743,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,

/* convert and insert */
dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL;
- nilfs_btree_init(btree);
+ __nilfs_btree_init(btree);
if (nreq != NULL) {
nilfs_bmap_commit_alloc_ptr(btree, dreq, dat);
nilfs_bmap_commit_alloc_ptr(btree, nreq, dat);
@@ -2294,12 +2324,23 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = {
.bop_gather_data = NULL,
};

-int nilfs_btree_init(struct nilfs_bmap *bmap)
+static void __nilfs_btree_init(struct nilfs_bmap *bmap)
{
bmap->b_ops = &nilfs_btree_ops;
bmap->b_nchildren_per_block =
NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap));
- return 0;
+}
+
+int nilfs_btree_init(struct nilfs_bmap *bmap)
+{
+ int ret = 0;
+
+ __nilfs_btree_init(bmap);
+
+ if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap),
+ bmap->b_inode->i_ino))
+ ret = -EIO;
+ return ret;
}

void nilfs_btree_init_gc(struct nilfs_bmap *bmap)
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 2001862..068b525 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -700,8 +700,8 @@ static int ocfs2_local_read_info(struct super_block *sb, int type)
/* We don't need the lock and we have to acquire quota file locks
* which will later depend on this lock */
mutex_unlock(&sb_dqopt(sb)->dqio_mutex);
- info->dqi_maxblimit = 0x7fffffffffffffffLL;
- info->dqi_maxilimit = 0x7fffffffffffffffLL;
+ info->dqi_max_spc_limit = 0x7fffffffffffffffLL;
+ info->dqi_max_ino_limit = 0x7fffffffffffffffLL;
oinfo = kmalloc(sizeof(struct ocfs2_mem_dqinfo), GFP_NOFS);
if (!oinfo) {
mlog(ML_ERROR, "failed to allocate memory for ocfs2 quota"
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index cca93b6..c8310e5 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -19,7 +19,6 @@
#include <linux/mount.h>
#include <linux/init.h>
#include <linux/idr.h>
-#include <linux/namei.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
@@ -163,17 +162,6 @@ void proc_free_inum(unsigned int inum)
spin_unlock_irqrestore(&proc_inum_lock, flags);
}

-static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
- nd_set_link(nd, __PDE_DATA(dentry->d_inode));
- return NULL;
-}
-
-static const struct inode_operations proc_link_inode_operations = {
- .readlink = generic_readlink,
- .follow_link = proc_follow_link,
-};
-
/*
* Don't create negative dentries here, return -ENOENT by hand
* instead.
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 124fc43..2f2815f 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/mount.h>
#include <linux/magic.h>
+#include <linux/namei.h>

#include <asm/uaccess.h>

@@ -401,6 +402,26 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
};
#endif

+static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ struct proc_dir_entry *pde = PDE(dentry->d_inode);
+ if (unlikely(!use_pde(pde)))
+ return ERR_PTR(-EINVAL);
+ nd_set_link(nd, pde->data);
+ return pde;
+}
+
+static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+{
+ unuse_pde(p);
+}
+
+const struct inode_operations proc_link_inode_operations = {
+ .readlink = generic_readlink,
+ .follow_link = proc_follow_link,
+ .put_link = proc_put_link,
+};
+
struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
{
struct inode *inode = new_inode_pseudo(sb);
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 651d09a..8b8ca1d 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -202,6 +202,7 @@ struct pde_opener {
int closing;
struct completion *c;
};
+extern const struct inode_operations proc_link_inode_operations;

extern const struct inode_operations proc_pid_link_inode_operations;

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 8f78819..903c19f 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -992,9 +992,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
struct vm_area_struct *vma;
struct pagemapread *pm = walk->private;
spinlock_t *ptl;
- pte_t *pte;
+ pte_t *pte, *orig_pte;
int err = 0;
- pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));

/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -1008,6 +1007,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,

for (; addr != end; addr += PAGE_SIZE) {
unsigned long offset;
+ pagemap_entry_t pme;

offset = (addr & ~PAGEMAP_WALK_MASK) >>
PAGE_SHIFT;
@@ -1022,32 +1022,55 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,

if (pmd_trans_unstable(pmd))
return 0;
- for (; addr != end; addr += PAGE_SIZE) {
- int flags2;
-
- /* check to see if we've left 'vma' behind
- * and need a new, higher one */
- if (vma && (addr >= vma->vm_end)) {
- vma = find_vma(walk->mm, addr);
- if (vma && (vma->vm_flags & VM_SOFTDIRTY))
- flags2 = __PM_SOFT_DIRTY;
- else
- flags2 = 0;
- pme = make_pme(PM_NOT_PRESENT(pm->v2) | PM_STATUS2(pm->v2, flags2));
+
+ while (1) {
+ /* End of address space hole, which we mark as non-present. */
+ unsigned long hole_end;
+
+ if (vma)
+ hole_end = min(end, vma->vm_start);
+ else
+ hole_end = end;
+
+ for (; addr < hole_end; addr += PAGE_SIZE) {
+ pagemap_entry_t pme = make_pme(PM_NOT_PRESENT(pm->v2));
+
+ err = add_to_pagemap(addr, &pme, pm);
+ if (err)
+ return err;
}

- /* check that 'vma' actually covers this address,
- * and that it isn't a huge page vma */
- if (vma && (vma->vm_start <= addr) &&
- !is_vm_hugetlb_page(vma)) {
- pte = pte_offset_map(pmd, addr);
+ if (!vma || vma->vm_start >= end)
+ break;
+ /*
+ * We can't possibly be in a hugetlb VMA. In general,
+ * for a mm_walk with a pmd_entry and a hugetlb_entry,
+ * the pmd_entry can only be called on addresses in a
+ * hugetlb if the walk starts in a non-hugetlb VMA and
+ * spans a hugepage VMA. Since pagemap_read walks are
+ * PMD-sized and PMD-aligned, this will never be true.
+ */
+ BUG_ON(is_vm_hugetlb_page(vma));
+
+ /* Addresses in the VMA. */
+ orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ for (; addr < min(end, vma->vm_end); pte++, addr += PAGE_SIZE) {
+ pagemap_entry_t pme;
+
pte_to_pagemap_entry(&pme, pm, vma, addr, *pte);
- /* unmap before userspace copy */
- pte_unmap(pte);
+ err = add_to_pagemap(addr, &pme, pm);
+ if (err)
+ break;
}
- err = add_to_pagemap(addr, &pme, pm);
+ pte_unmap_unlock(orig_pte, ptl);
+
if (err)
return err;
+
+ if (addr == end)
+ break;
+
+ vma = find_vma(walk->mm, addr);
}

cond_resched();
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 274e310..064e9eb 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2396,16 +2396,6 @@ out:
}
EXPORT_SYMBOL(dquot_quota_on_mount);

-static inline qsize_t qbtos(qsize_t blocks)
-{
- return blocks << QIF_DQBLKSIZE_BITS;
-}
-
-static inline qsize_t stoqb(qsize_t space)
-{
- return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
-}
-
/* Generic routine for getting common part of quota structure */
static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
{
@@ -2455,13 +2445,13 @@ static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
return -EINVAL;

if (((di->d_fieldmask & QC_SPC_SOFT) &&
- stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) ||
+ di->d_spc_softlimit > dqi->dqi_max_spc_limit) ||
((di->d_fieldmask & QC_SPC_HARD) &&
- stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) ||
+ di->d_spc_hardlimit > dqi->dqi_max_spc_limit) ||
((di->d_fieldmask & QC_INO_SOFT) &&
- (di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
+ (di->d_ino_softlimit > dqi->dqi_max_ino_limit)) ||
((di->d_fieldmask & QC_INO_HARD) &&
- (di->d_ino_hardlimit > dqi->dqi_maxilimit)))
+ (di->d_ino_hardlimit > dqi->dqi_max_ino_limit)))
return -ERANGE;

spin_lock(&dq_data_lock);
diff --git a/fs/quota/quota_v1.c b/fs/quota/quota_v1.c
index 469c684..8fe79be 100644
--- a/fs/quota/quota_v1.c
+++ b/fs/quota/quota_v1.c
@@ -169,8 +169,8 @@ static int v1_read_file_info(struct super_block *sb, int type)
}
ret = 0;
/* limits are stored as unsigned 32-bit data */
- dqopt->info[type].dqi_maxblimit = 0xffffffff;
- dqopt->info[type].dqi_maxilimit = 0xffffffff;
+ dqopt->info[type].dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
+ dqopt->info[type].dqi_max_ino_limit = 0xffffffff;
dqopt->info[type].dqi_igrace =
dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
dqopt->info[type].dqi_bgrace =
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c
index 02751ec..d1a8054 100644
--- a/fs/quota/quota_v2.c
+++ b/fs/quota/quota_v2.c
@@ -117,12 +117,12 @@ static int v2_read_file_info(struct super_block *sb, int type)
qinfo = info->dqi_priv;
if (version == 0) {
/* limits are stored as unsigned 32-bit data */
- info->dqi_maxblimit = 0xffffffff;
- info->dqi_maxilimit = 0xffffffff;
+ info->dqi_max_spc_limit = 0xffffffffULL << QUOTABLOCK_BITS;
+ info->dqi_max_ino_limit = 0xffffffff;
} else {
- /* used space is stored as unsigned 64-bit value */
- info->dqi_maxblimit = 0xffffffffffffffffULL; /* 2^64-1 */
- info->dqi_maxilimit = 0xffffffffffffffffULL;
+ /* used space is stored as unsigned 64-bit value in bytes */
+ info->dqi_max_spc_limit = 0xffffffffffffffffULL; /* 2^64-1 */
+ info->dqi_max_ino_limit = 0xffffffffffffffffULL;
}
info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 3b2c14b..6beb7a9 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -977,7 +977,11 @@ xfs_bmap_local_to_extents(
*firstblock = args.fsbno;
bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);

- /* initialise the block and copy the data */
+ /*
+ * Initialise the block and copy the data
+ *
+ * Note: init_fn must set the buffer log item type correctly!
+ */
init_fn(tp, bp, ip, ifp);

/* account for the change in fork size and log everything */
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 2227b9b..71f04c3 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -320,6 +320,10 @@ xfs_buf_item_format(
ASSERT(atomic_read(&bip->bli_refcount) > 0);
ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
(bip->bli_flags & XFS_BLI_STALE));
+ ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
+ (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
+ && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));
+

/*
* If it is an inode buffer, transfer the in-memory state to the
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 001aa89..16bb5d0 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1950,6 +1950,7 @@ xfs_iunlink(
agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
offset = offsetof(xfs_agi_t, agi_unlinked) +
(sizeof(xfs_agino_t) * bucket_index);
+ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
xfs_trans_log_buf(tp, agibp, offset,
(offset + sizeof(xfs_agino_t) - 1));
return 0;
@@ -2041,6 +2042,7 @@ xfs_iunlink_remove(
agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
offset = offsetof(xfs_agi_t, agi_unlinked) +
(sizeof(xfs_agino_t) * bucket_index);
+ xfs_trans_buf_set_type(tp, agibp, XFS_BLFT_AGI_BUF);
xfs_trans_log_buf(tp, agibp, offset,
(offset + sizeof(xfs_agino_t) - 1));
} else {
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index dd88f0e..4f6f0d4 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1108,6 +1108,11 @@ xfs_qm_reset_dqcounts(
*/
xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
"xfs_quotacheck");
+ /*
+ * Reset type in case we are reusing group quota file for
+ * project quotas or vice versa
+ */
+ ddq->d_flags = type;
ddq->d_bcount = 0;
ddq->d_icount = 0;
ddq->d_rtbcount = 0;
diff --git a/fs/xfs/xfs_symlink_remote.c b/fs/xfs/xfs_symlink_remote.c
index bf59a2b..41f64a4 100644
--- a/fs/xfs/xfs_symlink_remote.c
+++ b/fs/xfs/xfs_symlink_remote.c
@@ -181,6 +181,8 @@ xfs_symlink_local_to_remote(
struct xfs_mount *mp = ip->i_mount;
char *buf;

+ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF);
+
if (!xfs_sb_version_hascrc(&mp->m_sb)) {
bp->b_ops = NULL;
memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index c812c5c..b626f3d 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -474,6 +474,7 @@ xfs_trans_apply_sb_deltas(
whole = 1;
}

+ xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
if (whole)
/*
* Log the whole thing, the fields are noncontiguous.
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 940ece4..729a96c 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -214,9 +214,9 @@
INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info)

#define _INTEL_BDW_M_IDS(gt, info) \
- _INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \
+ _INTEL_BDW_M(gt, 0x1602, info), /* Halo */ \
_INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \
- _INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \
+ _INTEL_BDW_M(gt, 0x160B, info), /* ULT */ \
_INTEL_BDW_M(gt, 0x160E, info) /* ULX */

#define _INTEL_BDW_D_IDS(gt, info) \
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 1c804b0..7ee1774 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -101,8 +101,10 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
new_dir_mask |= FS_ISDIR;
}

- fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie);
- fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie);
+ fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name,
+ fs_cookie);
+ fsnotify(new_dir, new_dir_mask, source, FSNOTIFY_EVENT_INODE, new_name,
+ fs_cookie);

if (target)
fsnotify_link_count(target);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 5572709..723c75c 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -89,9 +89,9 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write);
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write);
+ pmd_t *pmd, int flags);
struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int write);
+ pud_t *pud, int flags);
int pmd_huge(pmd_t pmd);
int pud_huge(pud_t pmd);
unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
@@ -128,8 +128,8 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
static inline void hugetlb_show_meminfo(void)
{
}
-#define follow_huge_pmd(mm, addr, pmd, write) NULL
-#define follow_huge_pud(mm, addr, pud, write) NULL
+#define follow_huge_pmd(mm, addr, pmd, flags) NULL
+#define follow_huge_pud(mm, addr, pud, flags) NULL
#define prepare_hugepage_range(file, addr, len) (-EINVAL)
#define pmd_huge(x) 0
#define pud_huge(x) 0
diff --git a/include/linux/quota.h b/include/linux/quota.h
index bc395be..a1066e9 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -211,8 +211,8 @@ struct mem_dqinfo {
unsigned long dqi_flags;
unsigned int dqi_bgrace;
unsigned int dqi_igrace;
- qsize_t dqi_maxblimit;
- qsize_t dqi_maxilimit;
+ qsize_t dqi_max_spc_limit;
+ qsize_t dqi_max_ino_limit;
void *dqi_priv;
};

diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index c0f7526..d7f3b3f 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -137,6 +137,8 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
*entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
}

+extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+ spinlock_t *ptl);
extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address);
extern void migration_entry_wait_huge(struct vm_area_struct *vma,
@@ -150,6 +152,8 @@ static inline int is_migration_entry(swp_entry_t swp)
}
#define migration_entry_to_page(swp) NULL
static inline void make_migration_entry_read(swp_entry_t *entryp) { }
+static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+ spinlock_t *ptl) { }
static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address) { }
static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index b8aba19..9e714eb 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -142,6 +142,8 @@ struct usb_hcd {
unsigned authorized_default:1;
unsigned has_tt:1; /* Integrated TT in root hub */
unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
+ unsigned cant_recv_wakeups:1;
+ /* wakeup requests from downstream aren't received */

unsigned int irq; /* irq allocated */
void __iomem *regs; /* device memory/io */
@@ -444,6 +446,7 @@ extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
#endif /* CONFIG_PCI */

/* pci-ish (pdev null is ok) buffer alloc/mapping support */
+void usb_init_pool_max(void);
int hcd_buffer_create(struct usb_hcd *hcd);
void hcd_buffer_destroy(struct usb_hcd *hcd);

diff --git a/include/net/ip.h b/include/net/ip.h
index 5c76b1f..4613b19 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -174,7 +174,7 @@ static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
}

-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
__be32 saddr, const struct ip_reply_arg *arg,
unsigned int len);

diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index ee520cb..cf69c31 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -47,6 +47,7 @@ struct netns_ipv4 {
struct inet_peer_base *peers;
struct tcpm_hash_bucket *tcp_metrics_hash;
unsigned int tcp_metrics_hash_log;
+ struct sock * __percpu *tcp_sk;
struct netns_frags frags;
#ifdef CONFIG_NETFILTER
struct xt_table *iptable_filter;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index e3569f8..8baf440 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -380,7 +380,7 @@ struct t10_reservation {
/* Activate Persistence across Target Power Loss enabled
* for SCSI device */
int pr_aptpl_active;
-#define PR_APTPL_BUF_LEN 8192
+#define PR_APTPL_BUF_LEN 262144
u32 pr_generation;
spinlock_t registration_lock;
spinlock_t aptpl_reg_lock;
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index aece134..4ad10ba 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -268,11 +268,11 @@ TRACE_EVENT(mm_page_alloc_extfrag,

TP_PROTO(struct page *page,
int alloc_order, int fallback_order,
- int alloc_migratetype, int fallback_migratetype, int new_migratetype),
+ int alloc_migratetype, int fallback_migratetype),

TP_ARGS(page,
alloc_order, fallback_order,
- alloc_migratetype, fallback_migratetype, new_migratetype),
+ alloc_migratetype, fallback_migratetype),

TP_STRUCT__entry(
__field( struct page *, page )
@@ -289,7 +289,8 @@ TRACE_EVENT(mm_page_alloc_extfrag,
__entry->fallback_order = fallback_order;
__entry->alloc_migratetype = alloc_migratetype;
__entry->fallback_migratetype = fallback_migratetype;
- __entry->change_ownership = (new_migratetype == alloc_migratetype);
+ __entry->change_ownership = (alloc_migratetype ==
+ get_pageblock_migratetype(page));
),

TP_printk("page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_migratetype=%d fallback_migratetype=%d fragmenting=%d change_ownership=%d",
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 0b097c8..449518e 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -2535,7 +2535,7 @@ static int kdb_summary(int argc, const char **argv)
#define K(x) ((x) << (PAGE_SHIFT - 10))
kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n"
"Buffers: %8lu kB\n",
- val.totalram, val.freeram, val.bufferram);
+ K(val.totalram), K(val.freeram), K(val.bufferram));
return 0;
}

diff --git a/kernel/softirq.c b/kernel/softirq.c
index 11025cc..824a7ff 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -654,9 +654,13 @@ static void run_ksoftirqd(unsigned int cpu)
* in the task stack here.
*/
__do_softirq();
- rcu_note_context_switch(cpu);
local_irq_enable();
cond_resched();
+
+ preempt_disable();
+ rcu_note_context_switch(cpu);
+ preempt_enable();
+
return;
}
local_irq_enable();
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 111cc34..bc88bc2 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4604,7 +4604,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
*fpos += written;

out_unlock:
- for (i = 0; i < nr_pages; i++){
+ for (i = nr_pages - 1; i >= 0; i--) {
kunmap_atomic(map_page[i]);
put_page(pages[i]);
}
diff --git a/mm/compaction.c b/mm/compaction.c
index 1b99ee9..b1067a4 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -887,7 +887,7 @@ static int compact_finished(struct zone *zone,
return COMPACT_PARTIAL;

/* Job done if allocation would set block type */
- if (cc->order >= pageblock_order && area->nr_free)
+ if (order >= pageblock_order && area->nr_free)
return COMPACT_PARTIAL;
}

diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index d821a7e..a716aac 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2473,9 +2473,10 @@ again:
goto unlock;

/*
- * HWPoisoned hugepage is already unmapped and dropped reference
+ * Migrating hugepage or HWPoisoned hugepage is already
+ * unmapped and its refcount is dropped, so just clear pte here.
*/
- if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
+ if (unlikely(!pte_present(pte))) {
huge_pte_clear(mm, address, ptep);
goto unlock;
}
@@ -2923,6 +2924,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *pagecache_page = NULL;
static DEFINE_MUTEX(hugetlb_instantiation_mutex);
struct hstate *h = hstate_vma(vma);
+ int need_wait_lock = 0;

address &= huge_page_mask(h);

@@ -2956,6 +2958,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ret = 0;

/*
+ * entry could be a migration/hwpoison entry at this point, so this
+ * check prevents the kernel from going below assuming that we have
+ * a active hugepage in pagecache. This goto expects the 2nd page fault,
+ * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
+ * handle it.
+ */
+ if (!pte_present(entry))
+ goto out_mutex;
+
+ /*
* If we are going to COW the mapping later, we examine the pending
* reservations for this page now. This will ensure that any
* allocations necessary to record that reservation occur outside the
@@ -2974,30 +2986,31 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
vma, address);
}

+ ptl = huge_pte_lock(h, mm, ptep);
+
+ /* Check for a racing update before calling hugetlb_cow */
+ if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
+ goto out_ptl;
+
/*
* hugetlb_cow() requires page locks of pte_page(entry) and
* pagecache_page, so here we need take the former one
* when page != pagecache_page or !pagecache_page.
- * Note that locking order is always pagecache_page -> page,
- * so no worry about deadlock.
*/
page = pte_page(entry);
- get_page(page);
if (page != pagecache_page)
- lock_page(page);
-
- ptl = huge_pte_lockptr(h, mm, ptep);
- spin_lock(ptl);
- /* Check for a racing update before calling hugetlb_cow */
- if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
- goto out_ptl;
+ if (!trylock_page(page)) {
+ need_wait_lock = 1;
+ goto out_ptl;
+ }

+ get_page(page);

if (flags & FAULT_FLAG_WRITE) {
if (!huge_pte_write(entry)) {
ret = hugetlb_cow(mm, vma, address, ptep, entry,
pagecache_page, ptl);
- goto out_ptl;
+ goto out_put_page;
}
entry = huge_pte_mkdirty(entry);
}
@@ -3005,7 +3018,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (huge_ptep_set_access_flags(vma, address, ptep, entry,
flags & FAULT_FLAG_WRITE))
update_mmu_cache(vma, address, ptep);
-
+out_put_page:
+ if (page != pagecache_page)
+ unlock_page(page);
+ put_page(page);
out_ptl:
spin_unlock(ptl);

@@ -3013,13 +3029,18 @@ out_ptl:
unlock_page(pagecache_page);
put_page(pagecache_page);
}
- if (page != pagecache_page)
- unlock_page(page);
- put_page(page);
-
out_mutex:
mutex_unlock(&hugetlb_instantiation_mutex);

+ /*
+ * Generally it's safe to hold refcount during waiting page lock. But
+ * here we just wait to defer the next page fault to avoid busy loop and
+ * the page is not used after unlocked before returning from the current
+ * page fault. So we are safe from accessing freed page, even if we wait
+ * here without taking refcount.
+ */
+ if (need_wait_lock)
+ wait_on_page_locked(page);
return ret;
}

@@ -3148,7 +3169,26 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
spin_unlock(ptl);
continue;
}
- if (!huge_pte_none(huge_ptep_get(ptep))) {
+ pte = huge_ptep_get(ptep);
+ if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
+ spin_unlock(ptl);
+ continue;
+ }
+ if (unlikely(is_hugetlb_entry_migration(pte))) {
+ swp_entry_t entry = pte_to_swp_entry(pte);
+
+ if (is_write_migration_entry(entry)) {
+ pte_t newpte;
+
+ make_migration_entry_read(&entry);
+ newpte = swp_entry_to_pte(entry);
+ set_huge_pte_at(mm, address, ptep, newpte);
+ pages++;
+ }
+ spin_unlock(ptl);
+ continue;
+ }
+ if (!huge_pte_none(pte)) {
pte = huge_ptep_get_and_clear(mm, address, ptep);
pte = pte_mkhuge(huge_pte_modify(pte, newprot));
pte = arch_make_huge_pte(pte, vma, NULL, 0);
@@ -3437,26 +3477,48 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)

struct page *
follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
+ pmd_t *pmd, int flags)
{
- struct page *page;
-
- page = pte_page(*(pte_t *)pmd);
- if (page)
- page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ struct page *page = NULL;
+ spinlock_t *ptl;
+retry:
+ ptl = pmd_lockptr(mm, pmd);
+ spin_lock(ptl);
+ /*
+ * make sure that the address range covered by this pmd is not
+ * unmapped from other threads.
+ */
+ if (!pmd_huge(*pmd))
+ goto out;
+ if (pmd_present(*pmd)) {
+ page = pte_page(*(pte_t *)pmd) +
+ ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ if (flags & FOLL_GET)
+ get_page(page);
+ } else {
+ if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
+ spin_unlock(ptl);
+ __migration_entry_wait(mm, (pte_t *)pmd, ptl);
+ goto retry;
+ }
+ /*
+ * hwpoisoned entry is treated as no_page_table in
+ * follow_page_mask().
+ */
+ }
+out:
+ spin_unlock(ptl);
return page;
}

struct page *
follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int write)
+ pud_t *pud, int flags)
{
- struct page *page;
+ if (flags & FOLL_GET)
+ return NULL;

- page = pte_page(*(pte_t *)pud);
- if (page)
- page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
- return page;
+ return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
}

#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index ba1ab14..112be59f 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1639,8 +1639,6 @@ static int __soft_offline_page(struct page *page, int flags)
* setting PG_hwpoison.
*/
if (!is_free_buddy_page(page))
- lru_add_drain_all();
- if (!is_free_buddy_page(page))
drain_all_pages();
SetPageHWPoison(page);
if (!is_free_buddy_page(page))
diff --git a/mm/memory.c b/mm/memory.c
index 9ddabd0..0d58cbc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1440,105 +1440,35 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
}
EXPORT_SYMBOL_GPL(zap_vma_ptes);

-/**
- * follow_page_mask - look up a page descriptor from a user-virtual address
- * @vma: vm_area_struct mapping @address
- * @address: virtual address to look up
- * @flags: flags modifying lookup behaviour
- * @page_mask: on output, *page_mask is set according to the size of the page
- *
- * @flags can have FOLL_ flags set, defined in <linux/mm.h>
- *
- * Returns the mapped (struct page *), %NULL if no mapping exists, or
- * an error pointer if there is a mapping to something not represented
- * by a page descriptor (see also vm_normal_page()).
- */
-struct page *follow_page_mask(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags,
- unsigned int *page_mask)
+static struct page *no_page_table(struct vm_area_struct *vma,
+ unsigned int flags)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *ptep, pte;
- spinlock_t *ptl;
- struct page *page;
- struct mm_struct *mm = vma->vm_mm;
-
- *page_mask = 0;
-
- page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
- if (!IS_ERR(page)) {
- BUG_ON(flags & FOLL_GET);
- goto out;
- }
-
- page = NULL;
- pgd = pgd_offset(mm, address);
- if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
- goto no_page_table;
+ /*
+ * When core dumping an enormous anonymous area that nobody
+ * has touched so far, we don't want to allocate unnecessary pages or
+ * page tables. Return error instead of NULL to skip handle_mm_fault,
+ * then get_dump_page() will return NULL to leave a hole in the dump.
+ * But we can only make this optimization where a hole would surely
+ * be zero-filled if handle_mm_fault() actually did handle it.
+ */
+ if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
+ return ERR_PTR(-EFAULT);
+ return NULL;
+}

- pud = pud_offset(pgd, address);
- if (pud_none(*pud))
- goto no_page_table;
- if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
- if (flags & FOLL_GET)
- goto out;
- page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
- goto out;
- }
- if (unlikely(pud_bad(*pud)))
- goto no_page_table;
+static struct page *follow_page_pte(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmd, unsigned int flags)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ struct page *page;
+ spinlock_t *ptl;
+ pte_t *ptep, pte;

- pmd = pmd_offset(pud, address);
- if (pmd_none(*pmd))
- goto no_page_table;
- if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
- page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
- if (flags & FOLL_GET) {
- /*
- * Refcount on tail pages are not well-defined and
- * shouldn't be taken. The caller should handle a NULL
- * return when trying to follow tail pages.
- */
- if (PageHead(page))
- get_page(page);
- else {
- page = NULL;
- goto out;
- }
- }
- goto out;
- }
- if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
- goto no_page_table;
- if (pmd_trans_huge(*pmd)) {
- if (flags & FOLL_SPLIT) {
- split_huge_page_pmd(vma, address, pmd);
- goto split_fallthrough;
- }
- ptl = pmd_lock(mm, pmd);
- if (likely(pmd_trans_huge(*pmd))) {
- if (unlikely(pmd_trans_splitting(*pmd))) {
- spin_unlock(ptl);
- wait_split_huge_page(vma->anon_vma, pmd);
- } else {
- page = follow_trans_huge_pmd(vma, address,
- pmd, flags);
- spin_unlock(ptl);
- *page_mask = HPAGE_PMD_NR - 1;
- goto out;
- }
- } else
- spin_unlock(ptl);
- /* fall through */
- }
-split_fallthrough:
+retry:
if (unlikely(pmd_bad(*pmd)))
- goto no_page_table;
+ return no_page_table(vma, flags);

ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
-
pte = *ptep;
if (!pte_present(pte)) {
swp_entry_t entry;
@@ -1556,12 +1486,14 @@ split_fallthrough:
goto no_page;
pte_unmap_unlock(ptep, ptl);
migration_entry_wait(mm, pmd, address);
- goto split_fallthrough;
+ goto retry;
}
if ((flags & FOLL_NUMA) && pte_numa(pte))
goto no_page;
- if ((flags & FOLL_WRITE) && !pte_write(pte))
- goto unlock;
+ if ((flags & FOLL_WRITE) && !pte_write(pte)) {
+ pte_unmap_unlock(ptep, ptl);
+ return NULL;
+ }

page = vm_normal_page(vma, address, pte);
if (unlikely(!page)) {
@@ -1606,11 +1538,8 @@ split_fallthrough:
unlock_page(page);
}
}
-unlock:
pte_unmap_unlock(ptep, ptl);
-out:
return page;
-
bad_page:
pte_unmap_unlock(ptep, ptl);
return ERR_PTR(-EFAULT);
@@ -1618,21 +1547,90 @@ bad_page:
no_page:
pte_unmap_unlock(ptep, ptl);
if (!pte_none(pte))
+ return NULL;
+ return no_page_table(vma, flags);
+}
+
+/**
+ * follow_page_mask - look up a page descriptor from a user-virtual address
+ * @vma: vm_area_struct mapping @address
+ * @address: virtual address to look up
+ * @flags: flags modifying lookup behaviour
+ * @page_mask: on output, *page_mask is set according to the size of the page
+ *
+ * @flags can have FOLL_ flags set, defined in <linux/mm.h>
+ *
+ * Returns the mapped (struct page *), %NULL if no mapping exists, or
+ * an error pointer if there is a mapping to something not represented
+ * by a page descriptor (see also vm_normal_page()).
+ */
+struct page *follow_page_mask(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags,
+ unsigned int *page_mask)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ spinlock_t *ptl;
+ struct page *page;
+ struct mm_struct *mm = vma->vm_mm;
+
+ *page_mask = 0;
+
+ page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
+ if (!IS_ERR(page)) {
+ BUG_ON(flags & FOLL_GET);
return page;
+ }

-no_page_table:
- /*
- * When core dumping an enormous anonymous area that nobody
- * has touched so far, we don't want to allocate unnecessary pages or
- * page tables. Return error instead of NULL to skip handle_mm_fault,
- * then get_dump_page() will return NULL to leave a hole in the dump.
- * But we can only make this optimization where a hole would surely
- * be zero-filled if handle_mm_fault() actually did handle it.
- */
- if ((flags & FOLL_DUMP) &&
- (!vma->vm_ops || !vma->vm_ops->fault))
- return ERR_PTR(-EFAULT);
- return page;
+ pgd = pgd_offset(mm, address);
+ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+ return no_page_table(vma, flags);
+
+ pud = pud_offset(pgd, address);
+ if (pud_none(*pud))
+ return no_page_table(vma, flags);
+ if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
+ page = follow_huge_pud(mm, address, pud, flags);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
+ if (unlikely(pud_bad(*pud)))
+ return no_page_table(vma, flags);
+
+ pmd = pmd_offset(pud, address);
+ if (pmd_none(*pmd))
+ return no_page_table(vma, flags);
+ if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
+ page = follow_huge_pmd(mm, address, pmd, flags);
+ if (page)
+ return page;
+ return no_page_table(vma, flags);
+ }
+ if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
+ return no_page_table(vma, flags);
+ if (pmd_trans_huge(*pmd)) {
+ if (flags & FOLL_SPLIT) {
+ split_huge_page_pmd(vma, address, pmd);
+ return follow_page_pte(vma, address, pmd, flags);
+ }
+ ptl = pmd_lock(mm, pmd);
+ if (likely(pmd_trans_huge(*pmd))) {
+ if (unlikely(pmd_trans_splitting(*pmd))) {
+ spin_unlock(ptl);
+ wait_split_huge_page(vma->anon_vma, pmd);
+ } else {
+ page = follow_trans_huge_pmd(vma, address,
+ pmd, flags);
+ spin_unlock(ptl);
+ *page_mask = HPAGE_PMD_NR - 1;
+ return page;
+ }
+ } else
+ spin_unlock(ptl);
+ }
+ return follow_page_pte(vma, address, pmd, flags);
}

static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
@@ -4027,7 +4025,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
if (follow_phys(vma, addr, write, &prot, &phys_addr))
return -EINVAL;

- maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
+ maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
if (write)
memcpy_toio(maddr + offset, buf, len);
else
diff --git a/mm/migrate.c b/mm/migrate.c
index 62047f8..af1bf18 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -210,7 +210,7 @@ static void remove_migration_ptes(struct page *old, struct page *new)
* get to the page and wait until migration is finished.
* When we return from this function the fault will be retried.
*/
-static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
+void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
spinlock_t *ptl)
{
pte_t pte;
@@ -1232,7 +1232,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
goto put_and_set;

if (PageHuge(page)) {
- isolate_huge_page(page, &pagelist);
+ if (PageHead(page))
+ isolate_huge_page(page, &pagelist);
goto put_and_set;
}

diff --git a/mm/mmap.c b/mm/mmap.c
index d3b8887..e359e9e 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -127,7 +127,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed);
*/
int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
{
- unsigned long free, allowed, reserve;
+ long free, allowed, reserve;

vm_acct_memory(pages);

@@ -191,7 +191,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
*/
if (mm) {
reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
- allowed -= min(mm->total_vm / 32, reserve);
+ allowed -= min_t(long, mm->total_vm / 32, reserve);
}

if (percpu_counter_read_positive(&vm_committed_as) < allowed)
diff --git a/mm/nommu.c b/mm/nommu.c
index fec093a..0f28834 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1896,7 +1896,7 @@ EXPORT_SYMBOL(unmap_mapping_range);
*/
int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
{
- unsigned long free, allowed, reserve;
+ long free, allowed, reserve;

vm_acct_memory(pages);

@@ -1960,7 +1960,7 @@ int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
*/
if (mm) {
reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
- allowed -= min(mm->total_vm / 32, reserve);
+ allowed -= min_t(long, mm->total_vm / 32, reserve);
}

if (percpu_counter_read_positive(&vm_committed_as) < allowed)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 83a20df..b764a6a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1034,8 +1034,8 @@ static void change_pageblock_range(struct page *pageblock_page,
* nor move CMA pages to different free lists. We don't want unmovable pages
* to be allocated from MIGRATE_CMA areas.
*
- * Returns the new migratetype of the pageblock (or the same old migratetype
- * if it was unchanged).
+ * Returns the allocation migratetype if free pages were stolen, or the
+ * fallback migratetype if it was decided not to steal.
*/
static int try_to_steal_freepages(struct zone *zone, struct page *page,
int start_type, int fallback_type)
@@ -1064,12 +1064,10 @@ static int try_to_steal_freepages(struct zone *zone, struct page *page,

/* Claim the whole block if over half of it is free */
if (pages >= (1 << (pageblock_order-1)) ||
- page_group_by_mobility_disabled) {
-
+ page_group_by_mobility_disabled)
set_pageblock_migratetype(page, start_type);
- return start_type;
- }

+ return start_type;
}

return fallback_type;
@@ -1114,7 +1112,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
new_type);

trace_mm_page_alloc_extfrag(page, order, current_order,
- start_migratetype, migratetype, new_type);
+ start_migratetype, migratetype);

return page;
}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index e6b2db6..aab7336 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -974,12 +974,24 @@ static void put_osd(struct ceph_osd *osd)
*/
static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
{
- dout("__remove_osd %p\n", osd);
- BUG_ON(!list_empty(&osd->o_requests));
- rb_erase(&osd->o_node, &osdc->osds);
+ dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
+ WARN_ON(!list_empty(&osd->o_requests));
+ WARN_ON(!list_empty(&osd->o_linger_requests));
+
list_del_init(&osd->o_osd_lru);
- ceph_con_close(&osd->o_con);
- put_osd(osd);
+ rb_erase(&osd->o_node, &osdc->osds);
+ RB_CLEAR_NODE(&osd->o_node);
+}
+
+static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
+{
+ dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
+
+ if (!RB_EMPTY_NODE(&osd->o_node)) {
+ ceph_con_close(&osd->o_con);
+ __remove_osd(osdc, osd);
+ put_osd(osd);
+ }
}

static void remove_all_osds(struct ceph_osd_client *osdc)
@@ -989,7 +1001,7 @@ static void remove_all_osds(struct ceph_osd_client *osdc)
while (!RB_EMPTY_ROOT(&osdc->osds)) {
struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
struct ceph_osd, o_node);
- __remove_osd(osdc, osd);
+ remove_osd(osdc, osd);
}
mutex_unlock(&osdc->request_mutex);
}
@@ -1019,7 +1031,7 @@ static void remove_old_osds(struct ceph_osd_client *osdc)
list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
if (time_before(jiffies, osd->lru_ttl))
break;
- __remove_osd(osdc, osd);
+ remove_osd(osdc, osd);
}
mutex_unlock(&osdc->request_mutex);
}
@@ -1034,8 +1046,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
if (list_empty(&osd->o_requests) &&
list_empty(&osd->o_linger_requests)) {
- __remove_osd(osdc, osd);
-
+ remove_osd(osdc, osd);
return -ENODEV;
}

@@ -1617,6 +1628,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
{
struct rb_node *p, *n;

+ dout("%s %p\n", __func__, osdc);
for (p = rb_first(&osdc->osds); p; p = n) {
struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);

diff --git a/net/core/dev.c b/net/core/dev.c
index ce67174..ce649f2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6663,10 +6663,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
oldsd->output_queue = NULL;
oldsd->output_queue_tailp = &oldsd->output_queue;
}
- /* Append NAPI poll list from offline CPU. */
- if (!list_empty(&oldsd->poll_list)) {
- list_splice_init(&oldsd->poll_list, &sd->poll_list);
- raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ /* Append NAPI poll list from offline CPU, with one exception :
+ * process_backlog() must be called by cpu owning percpu backlog.
+ * We properly handle process_queue & input_pkt_queue later.
+ */
+ while (!list_empty(&oldsd->poll_list)) {
+ struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
+ struct napi_struct,
+ poll_list);
+
+ list_del_init(&napi->poll_list);
+ if (napi->poll == process_backlog)
+ napi->state = 0;
+ else
+ ____napi_schedule(sd, napi);
}

raise_softirq_irqoff(NET_TX_SOFTIRQ);
@@ -6677,7 +6687,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
netif_rx(skb);
input_queue_head_incr(oldsd);
}
- while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
+ while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
netif_rx(skb);
input_queue_head_incr(oldsd);
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index a797fff..a104ba3 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2771,25 +2771,25 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
skb->dev = odev;
skb->pkt_type = PACKET_HOST;

+ pktgen_finalize_skb(pkt_dev, skb, datalen);
+
if (!(pkt_dev->flags & F_UDPCSUM)) {
skb->ip_summed = CHECKSUM_NONE;
} else if (odev->features & NETIF_F_V4_CSUM) {
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- udp4_hwcsum(skb, udph->source, udph->dest);
+ udp4_hwcsum(skb, iph->saddr, iph->daddr);
} else {
- __wsum csum = udp_csum(skb);
+ __wsum csum = skb_checksum(skb, skb_transport_offset(skb), datalen + 8, 0);

/* add protocol-dependent pseudo-header */
- udph->check = csum_tcpudp_magic(udph->source, udph->dest,
+ udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
datalen + 8, IPPROTO_UDP, csum);

if (udph->check == 0)
udph->check = CSUM_MANGLED_0;
}

- pktgen_finalize_skb(pkt_dev, skb, datalen);
-
#ifdef CONFIG_XFRM
if (!process_ipsec(pkt_dev, skb, protocol))
return NULL;
@@ -2905,6 +2905,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
skb->dev = odev;
skb->pkt_type = PACKET_HOST;

+ pktgen_finalize_skb(pkt_dev, skb, datalen);
+
if (!(pkt_dev->flags & F_UDPCSUM)) {
skb->ip_summed = CHECKSUM_NONE;
} else if (odev->features & NETIF_F_V6_CSUM) {
@@ -2913,7 +2915,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
skb->csum_offset = offsetof(struct udphdr, check);
udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0);
} else {
- __wsum csum = udp_csum(skb);
+ __wsum csum = skb_checksum(skb, skb_transport_offset(skb), udplen, 0);

/* add protocol-dependent pseudo-header */
udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum);
@@ -2922,8 +2924,6 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
udph->check = CSUM_MANGLED_0;
}

- pktgen_finalize_skb(pkt_dev, skb, datalen);
-
return skb;
}

diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 7dca4b4..f48db99 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2542,12 +2542,16 @@ static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
goto errout;
}

+ if (!skb->len)
+ goto errout;
+
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
return 0;
errout:
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
- rtnl_set_sk_err(net, RTNLGRP_LINK, err);
+ if (err)
+ rtnl_set_sk_err(net, RTNLGRP_LINK, err);
return err;
}

diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 0b8fe5b..3730214 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1461,24 +1461,8 @@ static int ip_reply_glue_bits(void *dptr, char *to, int offset,
/*
* Generic function to send a packet as reply to another packet.
* Used to send some TCP resets/acks so far.
- *
- * Use a fake percpu inet socket to avoid false sharing and contention.
*/
-static DEFINE_PER_CPU(struct inet_sock, unicast_sock) = {
- .sk = {
- .__sk_common = {
- .skc_refcnt = ATOMIC_INIT(1),
- },
- .sk_wmem_alloc = ATOMIC_INIT(1),
- .sk_allocation = GFP_ATOMIC,
- .sk_flags = (1UL << SOCK_USE_WRITE_QUEUE),
- .sk_pacing_rate = ~0U,
- },
- .pmtudisc = IP_PMTUDISC_WANT,
- .uc_ttl = -1,
-};
-
-void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
+void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, __be32 daddr,
__be32 saddr, const struct ip_reply_arg *arg,
unsigned int len)
{
@@ -1486,9 +1470,8 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
struct ipcm_cookie ipc;
struct flowi4 fl4;
struct rtable *rt = skb_rtable(skb);
+ struct net *net = sock_net(sk);
struct sk_buff *nskb;
- struct sock *sk;
- struct inet_sock *inet;
int err;

if (ip_options_echo(&replyopts.opt.opt, skb))
@@ -1518,15 +1501,11 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
if (IS_ERR(rt))
return;

- inet = &get_cpu_var(unicast_sock);
+ inet_sk(sk)->tos = arg->tos;

- inet->tos = arg->tos;
- sk = &inet->sk;
sk->sk_priority = skb->priority;
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
- sock_net_set(sk, net);
- __skb_queue_head_init(&sk->sk_write_queue);
sk->sk_sndbuf = sysctl_wmem_default;
err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
len, 0, &ipc, &rt, MSG_DONTWAIT);
@@ -1542,13 +1521,10 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr,
arg->csumoffset) = csum_fold(csum_add(nskb->csum,
arg->csum));
nskb->ip_summed = CHECKSUM_NONE;
- skb_orphan(nskb);
skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
ip_push_pending_frames(sk, &fl4);
}
out:
- put_cpu_var(unicast_sock);
-
ip_rt_put(rt);
}

diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index ddf32a6..304cd16 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -428,15 +428,11 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)

memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
sin = &errhdr.offender;
- sin->sin_family = AF_UNSPEC;
+ memset(sin, 0, sizeof(*sin));
if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP) {
- struct inet_sock *inet = inet_sk(sk);
-
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
- sin->sin_port = 0;
- memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
- if (inet->cmsg_flags)
+ if (inet_sk(sk)->cmsg_flags)
ip_cmsg_recv(msg, skb);
}

diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 8e0f65c..8bba193 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -962,8 +962,11 @@ void ping_rcv(struct sk_buff *skb)

sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
if (sk != NULL) {
+ struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+
pr_debug("rcv on socket %p\n", sk);
- ping_queue_rcv_skb(sk, skb_get(skb));
+ if (skb2)
+ ping_queue_rcv_skb(sk, skb2);
sock_put(sk);
return;
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 9b726c0..9f627b9 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -690,7 +690,8 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)

net = dev_net(skb_dst(skb)->dev);
arg.tos = ip_hdr(skb)->tos;
- ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
+ ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
+ skb, ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);

TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
@@ -773,7 +774,8 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
if (oif)
arg.bound_dev_if = oif;
arg.tos = tos;
- ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
+ ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
+ skb, ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);

TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
@@ -2754,14 +2756,39 @@ struct proto tcp_prot = {
};
EXPORT_SYMBOL(tcp_prot);

+static void __net_exit tcp_sk_exit(struct net *net)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
+ free_percpu(net->ipv4.tcp_sk);
+}
+
static int __net_init tcp_sk_init(struct net *net)
{
+ int res, cpu;
+
+ net->ipv4.tcp_sk = alloc_percpu(struct sock *);
+ if (!net->ipv4.tcp_sk)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ struct sock *sk;
+
+ res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
+ IPPROTO_TCP, net);
+ if (res)
+ goto fail;
+ *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
+ }
net->ipv4.sysctl_tcp_ecn = 2;
return 0;
-}

-static void __net_exit tcp_sk_exit(struct net *net)
-{
+fail:
+ tcp_sk_exit(net);
+
+ return res;
}

static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 7927db0..4a000f1 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
s_slot = cb->args[0];
num = s_num = cb->args[1];

- for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
+ for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
struct sock *sk;
struct hlist_nulls_node *node;
struct udp_hslot *hslot = &table->hash[slot];

+ num = 0;
+
if (hlist_nulls_empty(&hslot->head))
continue;

diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 93b1aa3..53939a8 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -373,11 +373,10 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)

memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
sin = &errhdr.offender;
- sin->sin6_family = AF_UNSPEC;
+ memset(sin, 0, sizeof(*sin));
+
if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
sin->sin6_family = AF_INET6;
- sin->sin6_flowinfo = 0;
- sin->sin6_port = 0;
if (skb->protocol == htons(ETH_P_IPV6)) {
sin->sin6_addr = ipv6_hdr(skb)->saddr;
if (np->rxopt.all)
@@ -386,12 +385,9 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
ipv6_iface_scope_id(&sin->sin6_addr,
IP6CB(skb)->iif);
} else {
- struct inet_sock *inet = inet_sk(sk);
-
ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
&sin->sin6_addr);
- sin->sin6_scope_id = 0;
- if (inet->cmsg_flags)
+ if (inet_sk(sk)->cmsg_flags)
ip_cmsg_recv(msg, skb);
}
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0caafb5..9502a60 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -110,7 +110,7 @@ static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
u32 *p = NULL;

if (!(rt->dst.flags & DST_HOST))
- return NULL;
+ return dst_cow_metrics_generic(dst, old);

peer = rt6_get_peer_create(rt);
if (peer) {
@@ -1142,12 +1142,9 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct net *net = dev_net(dst->dev);

rt6->rt6i_flags |= RTF_MODIFIED;
- if (mtu < IPV6_MIN_MTU) {
- u32 features = dst_metric(dst, RTAX_FEATURES);
+ if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
- features |= RTAX_FEATURE_ALLFRAG;
- dst_metric_set(dst, RTAX_FEATURES, features);
- }
+
dst_metric_set(dst, RTAX_MTU, mtu);
rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
}
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index ca22389..72cf5a3 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2608,7 +2608,7 @@ do_addr_param:

addr_param = param.v + sizeof(sctp_addip_param_t);

- af = sctp_get_af_specific(param_type2af(param.p->type));
+ af = sctp_get_af_specific(param_type2af(addr_param->p.type));
if (af == NULL)
break;

diff --git a/net/socket.c b/net/socket.c
index dc57dae..404be5e 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -885,9 +885,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
struct sock_iocb *siocb)
{
- if (!is_sync_kiocb(iocb))
- BUG();
-
siocb->kiocb = iocb;
iocb->private = siocb;
return siocb;
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 364cc64..051e358 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -286,6 +286,16 @@ static inline struct smack_known *smk_of_task(const struct task_smack *tsp)
return tsp->smk_task;
}

+static inline struct smack_known *smk_of_task_struct(const struct task_struct *t)
+{
+ struct smack_known *skp;
+
+ rcu_read_lock();
+ skp = smk_of_task(__task_cred(t)->security);
+ rcu_read_unlock();
+ return skp;
+}
+
/*
* Present a pointer to the forked smack label entry in an task blob.
*/
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index b0be893..5b56590 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -43,8 +43,6 @@
#include <linux/binfmts.h>
#include "smack.h"

-#define task_security(task) (task_cred_xxx((task), security))
-
#define TRANS_TRUE "TRUE"
#define TRANS_TRUE_SIZE 4

@@ -181,7 +179,7 @@ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
if (rc != 0)
return rc;

- skp = smk_of_task(task_security(ctp));
+ skp = smk_of_task_struct(ctp);
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, ctp);

@@ -207,7 +205,7 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
if (rc != 0)
return rc;

- skp = smk_of_task(task_security(ptp));
+ skp = smk_of_task_struct(ptp);
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, ptp);

@@ -1544,7 +1542,7 @@ static int smk_curacc_on_task(struct task_struct *p, int access,
const char *caller)
{
struct smk_audit_info ad;
- struct smack_known *skp = smk_of_task(task_security(p));
+ struct smack_known *skp = smk_of_task_struct(p);

smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, p);
@@ -1594,7 +1592,7 @@ static int smack_task_getsid(struct task_struct *p)
*/
static void smack_task_getsecid(struct task_struct *p, u32 *secid)
{
- struct smack_known *skp = smk_of_task(task_security(p));
+ struct smack_known *skp = smk_of_task_struct(p);

*secid = skp->smk_secid;
}
@@ -1701,7 +1699,7 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info,
{
struct smk_audit_info ad;
struct smack_known *skp;
- struct smack_known *tkp = smk_of_task(task_security(p));
+ struct smack_known *tkp = smk_of_task_struct(p);

smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, p);
@@ -1749,7 +1747,7 @@ static int smack_task_wait(struct task_struct *p)
static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
{
struct inode_smack *isp = inode->i_security;
- struct smack_known *skp = smk_of_task(task_security(p));
+ struct smack_known *skp = smk_of_task_struct(p);

isp->smk_inode = skp->smk_known;
}
@@ -2878,7 +2876,7 @@ unlockandout:
*/
static int smack_getprocattr(struct task_struct *p, char *name, char **value)
{
- struct smack_known *skp = smk_of_task(task_security(p));
+ struct smack_known *skp = smk_of_task_struct(p);
char *cp;
int slen;

diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 566b0f6..ee24057 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1404,6 +1404,8 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state)
if (! snd_pcm_playback_empty(substream)) {
snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING);
snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING);
+ } else {
+ runtime->status->state = SNDRV_PCM_STATE_SETUP;
}
break;
case SNDRV_PCM_STATE_RUNNING:
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 7f57874..deec881 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -270,7 +270,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
{
/* We currently only handle front, HP */
static hda_nid_t pins[] = {
- 0x0f, 0x10, 0x14, 0x15, 0
+ 0x0f, 0x10, 0x14, 0x15, 0x17, 0
};
hda_nid_t *p;
for (p = pins; *p; p++)
@@ -4449,6 +4449,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
/* ALC282 */
SND_PCI_QUIRK(0x103c, 0x2191, "HP Touchsmart 14", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2192, "HP Touchsmart 15", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x220d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x220e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x220f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 7467559..2247c21 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -85,6 +85,7 @@ enum {
STAC_ALIENWARE_M17X,
STAC_92HD89XX_HP_FRONT_JACK,
STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK,
+ STAC_92HD73XX_ASUS_MOBO,
STAC_92HD73XX_MODELS
};

@@ -104,6 +105,8 @@ enum {
STAC_92HD83XXX_HP,
STAC_HP_ENVY_BASS,
STAC_HP_BNB13_EQ,
+ STAC_HP_ENVY_TS_BASS,
+ STAC_92HD83XXX_GPIO10_EAPD,
STAC_92HD83XXX_MODELS
};

@@ -1917,7 +1920,18 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
[STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = {
.type = HDA_FIXUP_PINS,
.v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs,
- }
+ },
+ [STAC_92HD73XX_ASUS_MOBO] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ /* enable 5.1 and SPDIF out */
+ { 0x0c, 0x01014411 },
+ { 0x0d, 0x01014410 },
+ { 0x0e, 0x01014412 },
+ { 0x22, 0x014b1180 },
+ { }
+ }
+ },
};

static const struct hda_model_fixup stac92hd73xx_models[] = {
@@ -1929,6 +1943,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = {
{ .id = STAC_DELL_M6_BOTH, .name = "dell-m6" },
{ .id = STAC_DELL_EQ, .name = "dell-eq" },
{ .id = STAC_ALIENWARE_M17X, .name = "alienware" },
+ { .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" },
{}
};

@@ -1981,6 +1996,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
"HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
"unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_ASUSTEK, 0x83f8, "ASUS AT4NM10",
+ STAC_92HD73XX_ASUS_MOBO),
{} /* terminator */
};

@@ -2145,6 +2162,19 @@ static void stac92hd83xxx_fixup_headset_jack(struct hda_codec *codec,
spec->headset_jack = 1;
}

+static void stac92hd83xxx_fixup_gpio10_eapd(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ struct sigmatel_spec *spec = codec->spec;
+
+ if (action != HDA_FIXUP_ACT_PRE_PROBE)
+ return;
+ spec->eapd_mask = spec->gpio_mask = spec->gpio_dir =
+ spec->gpio_data = 0x10;
+ spec->eapd_switch = 0;
+}
+
static const struct hda_verb hp_bnb13_eq_verbs[] = {
/* 44.1KHz base */
{ 0x22, 0x7A6, 0x3E },
@@ -2653,6 +2683,17 @@ static const struct hda_fixup stac92hd83xxx_fixups[] = {
.chained = true,
.chain_id = STAC_92HD83XXX_HP_MIC_LED,
},
+ [STAC_HP_ENVY_TS_BASS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x10, 0x92170111 },
+ {}
+ },
+ },
+ [STAC_92HD83XXX_GPIO10_EAPD] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = stac92hd83xxx_fixup_gpio10_eapd,
+ },
};

static const struct hda_model_fixup stac92hd83xxx_models[] = {
@@ -2669,6 +2710,7 @@ static const struct hda_model_fixup stac92hd83xxx_models[] = {
{ .id = STAC_92HD83XXX_HEADSET_JACK, .name = "headset-jack" },
{ .id = STAC_HP_ENVY_BASS, .name = "hp-envy-bass" },
{ .id = STAC_HP_BNB13_EQ, .name = "hp-bnb13-eq" },
+ { .id = STAC_HP_ENVY_TS_BASS, .name = "hp-envy-ts-bass" },
{}
};

@@ -2724,6 +2766,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
"HP bNB13", STAC_HP_BNB13_EQ),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x190A,
"HP bNB13", STAC_HP_BNB13_EQ),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x190e,
+ "HP ENVY TS", STAC_HP_ENVY_TS_BASS),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1940,
"HP bNB13", STAC_HP_BNB13_EQ),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1941,
@@ -2855,6 +2899,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a,
"HP Mini", STAC_92HD83XXX_HP_LED),
SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_TOSHIBA, 0xfa91,
+ "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD),
{} /* terminator */
};

diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c
index 56cc891..d99c8d3 100644
--- a/sound/pci/riptide/riptide.c
+++ b/sound/pci/riptide/riptide.c
@@ -2032,32 +2032,43 @@ snd_riptide_joystick_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
static int dev;
struct gameport *gameport;
+ int ret;

if (dev >= SNDRV_CARDS)
return -ENODEV;
+
if (!enable[dev]) {
- dev++;
- return -ENOENT;
+ ret = -ENOENT;
+ goto inc_dev;
}

- if (!joystick_port[dev++])
- return 0;
+ if (!joystick_port[dev]) {
+ ret = 0;
+ goto inc_dev;
+ }

gameport = gameport_allocate_port();
- if (!gameport)
- return -ENOMEM;
+ if (!gameport) {
+ ret = -ENOMEM;
+ goto inc_dev;
+ }
if (!request_region(joystick_port[dev], 8, "Riptide gameport")) {
snd_printk(KERN_WARNING
"Riptide: cannot grab gameport 0x%x\n",
joystick_port[dev]);
gameport_free_port(gameport);
- return -EBUSY;
+ ret = -EBUSY;
+ goto inc_dev;
}

gameport->io = joystick_port[dev];
gameport_register_port(gameport);
pci_set_drvdata(pci, gameport);
- return 0;
+
+ ret = 0;
+inc_dev:
+ dev++;
+ return ret;
}

static void snd_riptide_joystick_remove(struct pci_dev *pci)
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index e98dc00..2116750 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -6102,6 +6102,9 @@ static int snd_hdspm_playback_open(struct snd_pcm_substream *substream)
snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
64, 8192);
+ snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS,
+ 2, 2);
break;
}

@@ -6176,6 +6179,9 @@ static int snd_hdspm_capture_open(struct snd_pcm_substream *substream)
snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
64, 8192);
+ snd_pcm_hw_constraint_minmax(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS,
+ 2, 2);
break;
}

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/