Re: Linux 4.13.13
From: Greg KH
Date: Wed Nov 15 2017 - 04:44:10 EST
diff --git a/Makefile b/Makefile
index a7c847f495b0..1608a9b71381 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 13
-SUBLEVEL = 12
+SUBLEVEL = 13
EXTRAVERSION =
NAME = Fearless Coyote
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 948c648fea00..0fcd82f01388 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -154,30 +154,26 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
set_fs(fs);
}
-static void dump_instr(const char *lvl, struct pt_regs *regs)
+static void __dump_instr(const char *lvl, struct pt_regs *regs)
{
unsigned long addr = instruction_pointer(regs);
const int thumb = thumb_mode(regs);
const int width = thumb ? 4 : 8;
- mm_segment_t fs;
char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
int i;
/*
- * We need to switch to kernel mode so that we can use __get_user
- * to safely read from kernel space. Note that we now dump the
- * code first, just in case the backtrace kills us.
+ * Note that we now dump the code first, just in case the backtrace
+ * kills us.
*/
- fs = get_fs();
- set_fs(KERNEL_DS);
for (i = -4; i < 1 + !!thumb; i++) {
unsigned int val, bad;
if (thumb)
- bad = __get_user(val, &((u16 *)addr)[i]);
+ bad = get_user(val, &((u16 *)addr)[i]);
else
- bad = __get_user(val, &((u32 *)addr)[i]);
+ bad = get_user(val, &((u32 *)addr)[i]);
if (!bad)
p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
@@ -188,8 +184,20 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
}
}
printk("%sCode: %s\n", lvl, str);
+}
- set_fs(fs);
+static void dump_instr(const char *lvl, struct pt_regs *regs)
+{
+ mm_segment_t fs;
+
+ if (!user_mode(regs)) {
+ fs = get_fs();
+ set_fs(KERNEL_DS);
+ __dump_instr(lvl, regs);
+ set_fs(fs);
+ } else {
+ __dump_instr(lvl, regs);
+ }
}
#ifdef CONFIG_ARM_UNWIND
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index df7acea3747a..4674f1efbe7a 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -575,6 +575,7 @@ static int __init ar7_register_uarts(void)
uart_port.type = PORT_AR7;
uart_port.uartclk = clk_get_rate(bus_clk) / 2;
uart_port.iotype = UPIO_MEM32;
+ uart_port.flags = UPF_FIXED_TYPE;
uart_port.regshift = 2;
uart_port.line = 0;
@@ -653,6 +654,10 @@ static int __init ar7_register_devices(void)
u32 val;
int res;
+ res = ar7_gpio_init();
+ if (res)
+ pr_warn("unable to register gpios: %d\n", res);
+
res = ar7_register_uarts();
if (res)
pr_err("unable to setup uart(s): %d\n", res);
diff --git a/arch/mips/ar7/prom.c b/arch/mips/ar7/prom.c
index 4fd83336131a..dd53987a690f 100644
--- a/arch/mips/ar7/prom.c
+++ b/arch/mips/ar7/prom.c
@@ -246,8 +246,6 @@ void __init prom_init(void)
ar7_init_cmdline(fw_arg0, (char **)fw_arg1);
ar7_init_env((struct env_var *)fw_arg2);
console_config();
-
- ar7_gpio_init();
}
#define PORT(offset) (KSEG1ADDR(AR7_REGS_UART0 + (offset * 4)))
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index cfdbab015769..163317fd3d7e 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -240,8 +240,8 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80)
#define CM_GCR_BASE_GCRBASE_MSK (_ULCAST_(0x1ffff) << 15)
#define CM_GCR_BASE_CMDEFTGT_SHF 0
#define CM_GCR_BASE_CMDEFTGT_MSK (_ULCAST_(0x3) << 0)
-#define CM_GCR_BASE_CMDEFTGT_DISABLED 0
-#define CM_GCR_BASE_CMDEFTGT_MEM 1
+#define CM_GCR_BASE_CMDEFTGT_MEM 0
+#define CM_GCR_BASE_CMDEFTGT_RESERVED 1
#define CM_GCR_BASE_CMDEFTGT_IOCU0 2
#define CM_GCR_BASE_CMDEFTGT_IOCU1 3
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 1b070a76fcdd..5e0d87f4958f 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -589,11 +589,11 @@ void __init bmips_cpu_setup(void)
/* Flush and enable RAC */
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
- __raw_writel(cfg | 0x100, BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
__raw_readl(cbr + BMIPS_RAC_CONFIG);
cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
- __raw_writel(cfg | 0xf, BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0xf, cbr + BMIPS_RAC_CONFIG);
__raw_readl(cbr + BMIPS_RAC_CONFIG);
cfg = __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index b42812e014c0..1fcc30ff9569 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -645,6 +645,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
hnow_v = hpte_new_to_old_v(hnow_v, hnow_r);
hnow_r = hpte_new_to_old_r(hnow_r);
}
+
+ /*
+ * If the HPT is being resized, don't update the HPTE,
+ * instead let the guest retry after the resize operation is complete.
+ * The synchronization for hpte_setup_done test vs. set is provided
+ * by the HPTE lock.
+ */
+ if (!kvm->arch.hpte_setup_done)
+ goto out_unlock;
+
if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] ||
rev->guest_rpte != hpte[2])
/* HPTE has been changed under us; let the guest retry */
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 9ecd9aea0b54..c059541743f0 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2688,11 +2688,14 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* Hard-disable interrupts, and check resched flag and signals.
* If we need to reschedule or deliver a signal, clean up
* and return without going into the guest(s).
+ * If the hpte_setup_done flag has been cleared, don't go into the
+ * guest because that means a HPT resize operation is in progress.
*/
local_irq_disable();
hard_irq_disable();
if (lazy_irq_pending() || need_resched() ||
- recheck_signals(&core_info)) {
+ recheck_signals(&core_info) ||
+ (!kvm_is_radix(vc->kvm) && !vc->kvm->arch.hpte_setup_done)) {
local_irq_enable();
vc->vcore_state = VCORE_INACTIVE;
/* Unlock all except the primary vcore */
@@ -3061,7 +3064,7 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
- int n_ceded, i;
+ int n_ceded, i, r;
struct kvmppc_vcore *vc;
struct kvm_vcpu *v;
@@ -3115,6 +3118,20 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
!signal_pending(current)) {
+ /* See if the HPT and VRMA are ready to go */
+ if (!kvm_is_radix(vcpu->kvm) &&
+ !vcpu->kvm->arch.hpte_setup_done) {
+ spin_unlock(&vc->lock);
+ r = kvmppc_hv_setup_htab_rma(vcpu);
+ spin_lock(&vc->lock);
+ if (r) {
+ kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ kvm_run->fail_entry.hardware_entry_failure_reason = 0;
+ vcpu->arch.ret = r;
+ break;
+ }
+ }
+
if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
kvmppc_vcore_end_preempt(vc);
@@ -3232,13 +3249,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */
smp_mb();
- /* On the first time here, set up HTAB and VRMA */
- if (!kvm_is_radix(vcpu->kvm) && !vcpu->kvm->arch.hpte_setup_done) {
- r = kvmppc_hv_setup_htab_rma(vcpu);
- if (r)
- goto out;
- }
-
flush_all_to_thread(current);
/* Save userspace EBB and other register values */
@@ -3286,7 +3296,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
mtspr(SPRN_VRSAVE, user_vrsave);
- out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running);
return r;
diff --git a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
index 93b945597ecf..7cfba738f104 100644
--- a/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha1-mb/sha1_mb_mgr_flush_avx2.S
@@ -157,8 +157,8 @@ LABEL skip_ %I
.endr
# Find min length
- vmovdqa _lens+0*16(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens+0*16(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
@@ -178,8 +178,8 @@ LABEL skip_ %I
vpsubd %xmm2, %xmm0, %xmm0
vpsubd %xmm2, %xmm1, %xmm1
- vmovdqa %xmm0, _lens+0*16(state)
- vmovdqa %xmm1, _lens+1*16(state)
+ vmovdqu %xmm0, _lens+0*16(state)
+ vmovdqu %xmm1, _lens+1*16(state)
# "state" and "args" are the same address, arg1
# len is arg2
@@ -235,8 +235,8 @@ ENTRY(sha1_mb_mgr_get_comp_job_avx2)
jc .return_null
# Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
diff --git a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
index 8fe6338bcc84..16c4ccb1f154 100644
--- a/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
+++ b/arch/x86/crypto/sha256-mb/sha256_mb_mgr_flush_avx2.S
@@ -155,8 +155,8 @@ LABEL skip_ %I
.endr
# Find min length
- vmovdqa _lens+0*16(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens+0*16(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
@@ -176,8 +176,8 @@ LABEL skip_ %I
vpsubd %xmm2, %xmm0, %xmm0
vpsubd %xmm2, %xmm1, %xmm1
- vmovdqa %xmm0, _lens+0*16(state)
- vmovdqa %xmm1, _lens+1*16(state)
+ vmovdqu %xmm0, _lens+0*16(state)
+ vmovdqu %xmm1, _lens+1*16(state)
# "state" and "args" are the same address, arg1
# len is arg2
@@ -234,8 +234,8 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
jc .return_null
# Find min length
- vmovdqa _lens(state), %xmm0
- vmovdqa _lens+1*16(state), %xmm1
+ vmovdqu _lens(state), %xmm0
+ vmovdqu _lens+1*16(state), %xmm1
vpminud %xmm1, %xmm0, %xmm2 # xmm2 has {D,C,B,A}
vpalignr $8, %xmm2, %xmm3, %xmm3 # xmm3 has {x,x,D,C}
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 836877e2da22..cdf82492b770 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -21,7 +21,7 @@ obj-y += common.o
obj-y += rdrand.o
obj-y += match.o
obj-y += bugs.o
-obj-y += aperfmperf.o
+obj-$(CONFIG_CPU_FREQ) += aperfmperf.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o
diff --git a/arch/x86/kernel/cpu/aperfmperf.c b/arch/x86/kernel/cpu/aperfmperf.c
index 957813e0180d..0ee83321a313 100644
--- a/arch/x86/kernel/cpu/aperfmperf.c
+++ b/arch/x86/kernel/cpu/aperfmperf.c
@@ -42,6 +42,10 @@ static void aperfmperf_snapshot_khz(void *dummy)
s64 time_delta = ktime_ms_delta(now, s->time);
unsigned long flags;
+ /* Don't bother re-computing within the cache threshold time. */
+ if (time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
+ return;
+
local_irq_save(flags);
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
@@ -70,7 +74,6 @@ static void aperfmperf_snapshot_khz(void *dummy)
unsigned int arch_freq_get_on_cpu(int cpu)
{
- s64 time_delta;
unsigned int khz;
if (!cpu_khz)
@@ -79,12 +82,6 @@ unsigned int arch_freq_get_on_cpu(int cpu)
if (!static_cpu_has(X86_FEATURE_APERFMPERF))
return 0;
- /* Don't bother re-computing within the cache threshold time. */
- time_delta = ktime_ms_delta(ktime_get(), per_cpu(samples.time, cpu));
- khz = per_cpu(samples.khz, cpu);
- if (khz && time_delta < APERFMPERF_CACHE_THRESHOLD_MS)
- return khz;
-
smp_call_function_single(cpu, aperfmperf_snapshot_khz, NULL, 1);
khz = per_cpu(samples.khz, cpu);
if (khz)
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 510e69596278..6df621ae62a7 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -77,10 +77,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "microcode\t: 0x%x\n", c->microcode);
if (cpu_has(c, X86_FEATURE_TSC)) {
- unsigned int freq = arch_freq_get_on_cpu(cpu);
+ unsigned int freq = cpufreq_quick_get(cpu);
- if (!freq)
- freq = cpufreq_quick_get(cpu);
if (!freq)
freq = cpu_khz;
seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 54b9e89d4d6b..893fd8c849e2 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -193,6 +193,12 @@ static void smp_callin(void)
*/
smp_store_cpu_info(cpuid);
+ /*
+ * The topology information must be up to date before
+ * calibrate_delay() and notify_cpu_starting().
+ */
+ set_cpu_sibling_map(raw_smp_processor_id());
+
/*
* Get our bogomips.
* Update loops_per_jiffy in cpu_data. Previous call to
@@ -203,11 +209,6 @@ static void smp_callin(void)
cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
pr_debug("Stack at about %p\n", &cpuid);
- /*
- * This must be done before setting cpu_online_mask
- * or calling notify_cpu_starting.
- */
- set_cpu_sibling_map(raw_smp_processor_id());
wmb();
notify_cpu_starting(cpuid);
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index bf54309b85da..b2157d4a5338 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -221,9 +221,6 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
if (fixup_exception(regs, trapnr))
return 0;
- if (fixup_bug(regs, trapnr))
- return 0;
-
tsk->thread.error_code = error_code;
tsk->thread.trap_nr = trapnr;
die(str, regs, error_code);
@@ -304,6 +301,13 @@ static void do_error_trap(struct pt_regs *regs, long error_code, char *str,
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
+ /*
+ * WARN*()s end up here; fix them up before we call the
+ * notifier chain.
+ */
+ if (!user_mode(regs) && fixup_bug(regs, trapnr))
+ return;
+
if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
NOTIFY_STOP) {
cond_local_irq_enable(regs);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 796d96bb0821..ad2b925a808e 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1346,12 +1346,10 @@ void __init tsc_init(void)
unsigned long calibrate_delay_is_known(void)
{
int sibling, cpu = smp_processor_id();
- struct cpumask *mask = topology_core_cpumask(cpu);
+ int constant_tsc = cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC);
+ const struct cpumask *mask = topology_core_cpumask(cpu);
- if (!tsc_disabled && !cpu_has(&cpu_data(cpu), X86_FEATURE_CONSTANT_TSC))
- return 0;
-
- if (!mask)
+ if (tsc_disabled || !constant_tsc || !mask)
return 0;
sibling = cpumask_any_but(mask, cpu);
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index 350f7096baac..7913b6921959 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -212,8 +212,8 @@ static void arch_perfmon_setup_counters(void)
eax.full = cpuid_eax(0xa);
/* Workaround for BIOS bugs in 6/15. Taken from perfmon2 */
- if (eax.split.version_id == 0 && __this_cpu_read(cpu_info.x86) == 6 &&
- __this_cpu_read(cpu_info.x86_model) == 15) {
+ if (eax.split.version_id == 0 && boot_cpu_data.x86 == 6 &&
+ boot_cpu_data.x86_model == 15) {
eax.split.version_id = 2;
eax.split.num_counters = 2;
eax.split.bit_width = 40;
diff --git a/crypto/ccm.c b/crypto/ccm.c
index 1ce37ae0ce56..0a083342ec8c 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -363,7 +363,7 @@ static int crypto_ccm_decrypt(struct aead_request *req)
unsigned int cryptlen = req->cryptlen;
u8 *authtag = pctx->auth_tag;
u8 *odata = pctx->odata;
- u8 *iv = req->iv;
+ u8 *iv = pctx->idata;
int err;
cryptlen -= authsize;
@@ -379,6 +379,8 @@ static int crypto_ccm_decrypt(struct aead_request *req)
if (req->src != req->dst)
dst = pctx->dst;
+ memcpy(iv, req->iv, 16);
+
skcipher_request_set_tfm(skreq, ctx->ctr);
skcipher_request_set_callback(skreq, pctx->flags,
crypto_ccm_decrypt_done, req);
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index 9c941947a063..3a3cb8624f41 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -440,9 +440,11 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
void *ignored)
{
acpi_status status;
+ acpi_event_status event_status;
struct acpi_gpe_event_info *gpe_event_info;
u32 gpe_enabled_count;
u32 gpe_index;
+ u32 gpe_number;
u32 i;
u32 j;
@@ -470,30 +472,40 @@ acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
gpe_event_info = &gpe_block->event_info[gpe_index];
+ gpe_number = gpe_block->block_base_number + gpe_index;
/*
* Ignore GPEs that have no corresponding _Lxx/_Exx method
- * and GPEs that are used to wake the system
+ * and GPEs that are used for wakeup
*/
- if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
- ACPI_GPE_DISPATCH_NONE)
- || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
- ACPI_GPE_DISPATCH_HANDLER)
- || (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
- ACPI_GPE_DISPATCH_RAW_HANDLER)
+ if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) !=
+ ACPI_GPE_DISPATCH_METHOD)
|| (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
continue;
}
+ event_status = 0;
+ (void)acpi_hw_get_gpe_status(gpe_event_info,
+ &event_status);
+
status = acpi_ev_add_gpe_reference(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not enable GPE 0x%02X",
- gpe_index +
- gpe_block->block_base_number));
+ gpe_number));
continue;
}
+ gpe_event_info->flags |= ACPI_GPE_AUTO_ENABLED;
+
+ if (event_status & ACPI_EVENT_FLAG_STATUS_SET) {
+ ACPI_INFO(("GPE 0x%02X active on init",
+ gpe_number));
+ (void)acpi_ev_gpe_dispatch(gpe_block->node,
+ gpe_event_info,
+ gpe_number);
+ }
+
gpe_enabled_count++;
}
}
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 57718a3e029a..67c7c4ce276c 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -435,6 +435,14 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
*/
gpe_event_info->flags =
(ACPI_GPE_DISPATCH_NOTIFY | ACPI_GPE_LEVEL_TRIGGERED);
+ } else if (gpe_event_info->flags & ACPI_GPE_AUTO_ENABLED) {
+ /*
+ * A reference to this GPE has been added during the GPE block
+ * initialization, so drop it now to prevent the GPE from being
+ * permanently enabled and clear its ACPI_GPE_AUTO_ENABLED flag.
+ */
+ (void)acpi_ev_remove_gpe_reference(gpe_event_info);
+ gpe_event_info->flags &= ~ACPI_GPE_AUTO_ENABLED;
}
/*
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 70fd5502c284..b7bdf9d0f5c0 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2058,6 +2058,9 @@ int __init acpi_scan_init(void)
acpi_get_spcr_uart_addr();
}
+ acpi_gpe_apply_masked_gpes();
+ acpi_update_all_gpes();
+
mutex_lock(&acpi_scan_lock);
/*
* Enumerate devices in the ACPI namespace.
@@ -2082,9 +2085,6 @@ int __init acpi_scan_init(void)
}
}
- acpi_gpe_apply_masked_gpes();
- acpi_update_all_gpes();
-
acpi_scan_initialized = true;
out:
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index fa8243c5c062..59b78e42a58b 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -160,6 +160,14 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
return 0;
}
+static bool acpi_sleep_no_lps0;
+
+static int __init init_no_lps0(const struct dmi_system_id *d)
+{
+ acpi_sleep_no_lps0 = true;
+ return 0;
+}
+
static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
{
.callback = init_old_suspend_ordering,
@@ -343,6 +351,19 @@ static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
},
},
+ /*
+ * https://bugzilla.kernel.org/show_bug.cgi?id=196907
+ * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power
+ * S0 Idle firmware interface.
+ */
+ {
+ .callback = init_no_lps0,
+ .ident = "Dell XPS13 9360",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
+ },
+ },
{},
};
@@ -485,6 +506,7 @@ static void acpi_pm_end(void)
}
#else /* !CONFIG_ACPI_SLEEP */
#define acpi_target_sleep_state ACPI_STATE_S0
+#define acpi_sleep_no_lps0 (false)
static inline void acpi_sleep_dmi_check(void) {}
#endif /* CONFIG_ACPI_SLEEP */
@@ -702,6 +724,12 @@ static int lps0_device_attach(struct acpi_device *adev,
if (lps0_device_handle)
return 0;
+ if (acpi_sleep_no_lps0) {
+ acpi_handle_info(adev->handle,
+ "Low Power S0 Idle interface disabled\n");
+ return 0;
+ }
+
if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
return 0;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index b008b6a98098..cf54a1cf8c55 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2692,7 +2692,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
* from the parent.
*/
page_count = (u32)calc_pages_for(0, length);
- pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages)) {
result = PTR_ERR(pages);
pages = NULL;
@@ -2827,7 +2827,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
*/
size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
page_count = (u32)calc_pages_for(0, size);
- pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
+ pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail_stat_request;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 4436d53ae16c..f160a66b7098 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -722,7 +722,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
* allocation taken by fbdev
*/
if (!(dev_priv->capabilities & SVGA_CAP_3D))
- mem_size *= 2;
+ mem_size *= 3;
dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
dev_priv->prim_bb_mem =
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index a6b762271a40..47a9696e7874 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1253,6 +1253,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0605", 0 },
{ "ELAN0609", 0 },
{ "ELAN060B", 0 },
+ { "ELAN060C", 0 },
{ "ELAN0611", 0 },
{ "ELAN1000", 0 },
{ }
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index cf7c18947189..d065c0e2d18e 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -178,7 +178,6 @@ static int c_can_pci_probe(struct pci_dev *pdev,
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
break;
default:
ret = -EINVAL;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index e36d10520e24..717530eac70c 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -320,7 +320,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
break;
case BOSCH_D_CAN:
priv->regs = reg_map_d_can;
- priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
priv->read_reg = c_can_plat_read_reg_aligned_to_16bit;
priv->write_reg = c_can_plat_write_reg_aligned_to_16bit;
priv->read_reg32 = d_can_plat_read_reg32;
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 4d1fe8d95042..2772d05ff11c 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -670,9 +670,9 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
priv->base + IFI_CANFD_FTIME);
/* Configure transmitter delay */
- tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK;
- writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc,
- priv->base + IFI_CANFD_TDELAY);
+ tdc = dbt->brp * (dbt->prop_seg + dbt->phase_seg1);
+ tdc &= IFI_CANFD_TDELAY_MASK;
+ writel(IFI_CANFD_TDELAY_EN | tdc, priv->base + IFI_CANFD_TDELAY);
}
static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index 51c2d182a33a..b4efd711f824 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -29,14 +29,19 @@
#include "peak_canfd_user.h"
MODULE_AUTHOR("Stephane Grosjean <s.grosjean@xxxxxxxxxxxxxxx>");
-MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe FD family cards");
-MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe FD CAN cards");
+MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards");
+MODULE_SUPPORTED_DEVICE("PEAK PCAN PCIe/M.2 FD CAN cards");
MODULE_LICENSE("GPL v2");
#define PCIEFD_DRV_NAME "peak_pciefd"
#define PEAK_PCI_VENDOR_ID 0x001c /* The PCI device and vendor IDs */
#define PEAK_PCIEFD_ID 0x0013 /* for PCIe slot cards */
+#define PCAN_CPCIEFD_ID 0x0014 /* for Compact-PCI Serial slot cards */
+#define PCAN_PCIE104FD_ID 0x0017 /* for PCIe-104 Express slot cards */
+#define PCAN_MINIPCIEFD_ID 0x0018 /* for mini-PCIe slot cards */
+#define PCAN_PCIEFD_OEM_ID 0x0019 /* for PCIe slot OEM cards */
+#define PCAN_M2_ID 0x001a /* for M2 slot cards */
/* PEAK PCIe board access description */
#define PCIEFD_BAR0_SIZE (64 * 1024)
@@ -203,6 +208,11 @@ struct pciefd_board {
/* supported device ids. */
static const struct pci_device_id peak_pciefd_tbl[] = {
{PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_CPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_PCIE104FD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_MINIPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_PCIEFD_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
+ {PEAK_PCI_VENDOR_ID, PCAN_M2_ID, PCI_ANY_ID, PCI_ANY_ID,},
{0,}
};
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c
index b0c80859f746..1ac2090a1721 100644
--- a/drivers/net/can/sun4i_can.c
+++ b/drivers/net/can/sun4i_can.c
@@ -539,6 +539,13 @@ static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status)
}
stats->rx_over_errors++;
stats->rx_errors++;
+
+ /* reset the CAN IP by entering reset mode
+ * ignoring timeout error
+ */
+ set_reset_mode(dev);
+ set_normal_mode(dev);
+
/* clear bit */
sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG);
}
@@ -653,8 +660,9 @@ static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id)
netif_wake_queue(dev);
can_led_event(dev, CAN_LED_EVENT_TX);
}
- if (isrc & SUN4I_INT_RBUF_VLD) {
- /* receive interrupt */
+ if ((isrc & SUN4I_INT_RBUF_VLD) &&
+ !(isrc & SUN4I_INT_DATA_OR)) {
+ /* receive interrupt - don't read if overrun occurred */
while (status & SUN4I_STA_RBUF_RDY) {
/* RX buffer is not empty */
sun4i_can_rx(dev);
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 2fcbaec8b368..71eddf645566 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -775,7 +775,7 @@ typedef u32 acpi_event_status;
* | | | | +-- Type of dispatch:to method, handler, notify, or none
* | | | +----- Interrupt type: edge or level triggered
* | | +------- Is a Wake GPE
- * | +--------- Is GPE masked by the software GPE masking mechanism
+ * | +--------- Has been enabled automatically at init time
* +------------ <Reserved>
*/
#define ACPI_GPE_DISPATCH_NONE (u8) 0x00
@@ -791,6 +791,7 @@ typedef u32 acpi_event_status;
#define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x08
#define ACPI_GPE_CAN_WAKE (u8) 0x10
+#define ACPI_GPE_AUTO_ENABLED (u8) 0x20
/*
* Flags for GPE and Lock interfaces
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 48407569585d..a7c2cee39570 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -17,7 +17,6 @@
#include <linux/bitops.h>
#include <linux/compiler.h>
#include <linux/atomic.h>
-#include <linux/rhashtable.h>
#include <linux/netfilter/nf_conntrack_tcp.h>
#include <linux/netfilter/nf_conntrack_dccp.h>
@@ -83,7 +82,7 @@ struct nf_conn {
possible_net_t ct_net;
#if IS_ENABLED(CONFIG_NF_NAT)
- struct rhlist_head nat_bysource;
+ struct hlist_node nat_bysource;
#endif
/* all members below initialized via memset */
u8 __nfct_init_offset[0];
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index 05c82a1a4267..b71701302e61 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -1,6 +1,5 @@
#ifndef _NF_NAT_H
#define _NF_NAT_H
-#include <linux/rhashtable.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter/nf_nat.h>
#include <net/netfilter/nf_conntrack_tuple.h>
diff --git a/include/sound/seq_kernel.h b/include/sound/seq_kernel.h
index feb58d455560..4b9ee3009aa0 100644
--- a/include/sound/seq_kernel.h
+++ b/include/sound/seq_kernel.h
@@ -49,7 +49,8 @@ typedef union snd_seq_timestamp snd_seq_timestamp_t;
#define SNDRV_SEQ_DEFAULT_CLIENT_EVENTS 200
/* max delivery path length */
-#define SNDRV_SEQ_MAX_HOPS 10
+/* NOTE: this shouldn't be greater than MAX_LOCKDEP_SUBCLASSES */
+#define SNDRV_SEQ_MAX_HOPS 8
/* max size of event size */
#define SNDRV_SEQ_MAX_EVENT_LEN 0x3fffffff
diff --git a/include/sound/timer.h b/include/sound/timer.h
index c4d76ff056c6..7ae226ab6990 100644
--- a/include/sound/timer.h
+++ b/include/sound/timer.h
@@ -90,6 +90,8 @@ struct snd_timer {
struct list_head ack_list_head;
struct list_head sack_list_head; /* slow ack list head */
struct tasklet_struct task_queue;
+ int max_instances; /* upper limit of timer instances */
+ int num_instances; /* current number of timer instances */
};
struct snd_timer_instance {
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 8635417c587b..29fa81f0f51a 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -9,6 +9,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
+#include <linux/preempt.h>
struct worker_pool;
@@ -59,7 +60,7 @@ struct worker {
*/
static inline struct worker *current_wq_worker(void)
{
- if (current->flags & PF_WQ_WORKER)
+ if (in_task() && (current->flags & PF_WQ_WORKER))
return kthread_data(current);
return NULL;
}
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index fef5d2e114be..1ef0cec38d78 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -228,7 +228,7 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
hdr = 2;
/* Extract a tag from the data */
- if (unlikely(dp >= datalen - 1))
+ if (unlikely(datalen - dp < 2))
goto data_overrun_error;
tag = data[dp++];
if (unlikely((tag & 0x1f) == ASN1_LONG_TAG))
@@ -274,7 +274,7 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
int n = len - 0x80;
if (unlikely(n > 2))
goto length_too_long;
- if (unlikely(dp >= datalen - n))
+ if (unlikely(n > datalen - dp))
goto data_overrun_error;
hdr += n;
for (len = 0; n > 0; n--) {
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index b1d3740ae36a..2fb80a4bfb34 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -30,19 +30,17 @@
#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_nat.h>
+static DEFINE_SPINLOCK(nf_nat_lock);
+
static DEFINE_MUTEX(nf_nat_proto_mutex);
static const struct nf_nat_l3proto __rcu *nf_nat_l3protos[NFPROTO_NUMPROTO]
__read_mostly;
static const struct nf_nat_l4proto __rcu **nf_nat_l4protos[NFPROTO_NUMPROTO]
__read_mostly;
-struct nf_nat_conn_key {
- const struct net *net;
- const struct nf_conntrack_tuple *tuple;
- const struct nf_conntrack_zone *zone;
-};
-
-static struct rhltable nf_nat_bysource_table;
+static struct hlist_head *nf_nat_bysource __read_mostly;
+static unsigned int nf_nat_htable_size __read_mostly;
+static unsigned int nf_nat_hash_rnd __read_mostly;
inline const struct nf_nat_l3proto *
__nf_nat_l3proto_find(u8 family)
@@ -118,17 +116,19 @@ int nf_xfrm_me_harder(struct net *net, struct sk_buff *skb, unsigned int family)
EXPORT_SYMBOL(nf_xfrm_me_harder);
#endif /* CONFIG_XFRM */
-static u32 nf_nat_bysource_hash(const void *data, u32 len, u32 seed)
+/* We keep an extra hash for each conntrack, for fast searching. */
+static unsigned int
+hash_by_src(const struct net *n, const struct nf_conntrack_tuple *tuple)
{
- const struct nf_conntrack_tuple *t;
- const struct nf_conn *ct = data;
+ unsigned int hash;
+
+ get_random_once(&nf_nat_hash_rnd, sizeof(nf_nat_hash_rnd));
- t = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
/* Original src, to ensure we map it consistently if poss. */
+ hash = jhash2((u32 *)&tuple->src, sizeof(tuple->src) / sizeof(u32),
+ tuple->dst.protonum ^ nf_nat_hash_rnd ^ net_hash_mix(n));
- seed ^= net_hash_mix(nf_ct_net(ct));
- return jhash2((const u32 *)&t->src, sizeof(t->src) / sizeof(u32),
- t->dst.protonum ^ seed);
+ return reciprocal_scale(hash, nf_nat_htable_size);
}
/* Is this tuple already taken? (not by us) */
@@ -184,28 +184,6 @@ same_src(const struct nf_conn *ct,
t->src.u.all == tuple->src.u.all);
}
-static int nf_nat_bysource_cmp(struct rhashtable_compare_arg *arg,
- const void *obj)
-{
- const struct nf_nat_conn_key *key = arg->key;
- const struct nf_conn *ct = obj;
-
- if (!same_src(ct, key->tuple) ||
- !net_eq(nf_ct_net(ct), key->net) ||
- !nf_ct_zone_equal(ct, key->zone, IP_CT_DIR_ORIGINAL))
- return 1;
-
- return 0;
-}
-
-static struct rhashtable_params nf_nat_bysource_params = {
- .head_offset = offsetof(struct nf_conn, nat_bysource),
- .obj_hashfn = nf_nat_bysource_hash,
- .obj_cmpfn = nf_nat_bysource_cmp,
- .nelem_hint = 256,
- .min_size = 1024,
-};
-
/* Only called for SRC manip */
static int
find_appropriate_src(struct net *net,
@@ -216,26 +194,22 @@ find_appropriate_src(struct net *net,
struct nf_conntrack_tuple *result,
const struct nf_nat_range *range)
{
+ unsigned int h = hash_by_src(net, tuple);
const struct nf_conn *ct;
- struct nf_nat_conn_key key = {
- .net = net,
- .tuple = tuple,
- .zone = zone
- };
- struct rhlist_head *hl, *h;
-
- hl = rhltable_lookup(&nf_nat_bysource_table, &key,
- nf_nat_bysource_params);
- rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
- nf_ct_invert_tuplepr(result,
- &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
- result->dst = tuple->dst;
-
- if (in_range(l3proto, l4proto, result, range))
- return 1;
+ hlist_for_each_entry_rcu(ct, &nf_nat_bysource[h], nat_bysource) {
+ if (same_src(ct, tuple) &&
+ net_eq(net, nf_ct_net(ct)) &&
+ nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL)) {
+ /* Copy source part from reply tuple. */
+ nf_ct_invert_tuplepr(result,
+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ result->dst = tuple->dst;
+
+ if (in_range(l3proto, l4proto, result, range))
+ return 1;
+ }
}
-
return 0;
}
@@ -408,6 +382,7 @@ nf_nat_setup_info(struct nf_conn *ct,
const struct nf_nat_range *range,
enum nf_nat_manip_type maniptype)
{
+ struct net *net = nf_ct_net(ct);
struct nf_conntrack_tuple curr_tuple, new_tuple;
/* Can't setup nat info for confirmed ct. */
@@ -447,19 +422,14 @@ nf_nat_setup_info(struct nf_conn *ct,
}
if (maniptype == NF_NAT_MANIP_SRC) {
- struct nf_nat_conn_key key = {
- .net = nf_ct_net(ct),
- .tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
- .zone = nf_ct_zone(ct),
- };
- int err;
-
- err = rhltable_insert_key(&nf_nat_bysource_table,
- &key,
- &ct->nat_bysource,
- nf_nat_bysource_params);
- if (err)
- return NF_DROP;
+ unsigned int srchash;
+
+ srchash = hash_by_src(net,
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ spin_lock_bh(&nf_nat_lock);
+ hlist_add_head_rcu(&ct->nat_bysource,
+ &nf_nat_bysource[srchash]);
+ spin_unlock_bh(&nf_nat_lock);
}
/* It's done. */
@@ -568,8 +538,9 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
* will delete entry from already-freed table.
*/
clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
- rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
- nf_nat_bysource_params);
+ spin_lock_bh(&nf_nat_lock);
+ hlist_del_rcu(&ct->nat_bysource);
+ spin_unlock_bh(&nf_nat_lock);
/* don't delete conntrack. Although that would make things a lot
* simpler, we'd end up flushing all conntracks on nat rmmod.
@@ -697,9 +668,11 @@ EXPORT_SYMBOL_GPL(nf_nat_l3proto_unregister);
/* No one using conntrack by the time this called. */
static void nf_nat_cleanup_conntrack(struct nf_conn *ct)
{
- if (ct->status & IPS_SRC_NAT_DONE)
- rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
- nf_nat_bysource_params);
+ if (ct->status & IPS_SRC_NAT_DONE) {
+ spin_lock_bh(&nf_nat_lock);
+ hlist_del_rcu(&ct->nat_bysource);
+ spin_unlock_bh(&nf_nat_lock);
+ }
}
static struct nf_ct_ext_type nat_extend __read_mostly = {
@@ -823,13 +796,16 @@ static int __init nf_nat_init(void)
{
int ret;
- ret = rhltable_init(&nf_nat_bysource_table, &nf_nat_bysource_params);
- if (ret)
- return ret;
+ /* Leave them the same for the moment. */
+ nf_nat_htable_size = nf_conntrack_htable_size;
+
+ nf_nat_bysource = nf_ct_alloc_hashtable(&nf_nat_htable_size, 0);
+ if (!nf_nat_bysource)
+ return -ENOMEM;
ret = nf_ct_extend_register(&nat_extend);
if (ret < 0) {
- rhltable_destroy(&nf_nat_bysource_table);
+ nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
printk(KERN_ERR "nf_nat_core: Unable to register extension\n");
return ret;
}
@@ -863,8 +839,8 @@ static void __exit nf_nat_cleanup(void)
for (i = 0; i < NFPROTO_NUMPROTO; i++)
kfree(nf_nat_l4protos[i]);
-
- rhltable_destroy(&nf_nat_bysource_table);
+ synchronize_net();
+ nf_ct_free_hashtable(nf_nat_bysource, nf_nat_htable_size);
}
MODULE_LICENSE("GPL");
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 0fa01d772c5e..9c0d5a7ce5f9 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -643,7 +643,6 @@ nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc,
{
if (desc->size) {
switch (desc->klen) {
- case 2:
case 4:
return &nft_hash_fast_ops;
default:
diff --git a/sound/core/hrtimer.c b/sound/core/hrtimer.c
index 1ac0c423903e..6e47b823bcaa 100644
--- a/sound/core/hrtimer.c
+++ b/sound/core/hrtimer.c
@@ -159,6 +159,7 @@ static int __init snd_hrtimer_init(void)
timer->hw = hrtimer_hw;
timer->hw.resolution = resolution;
timer->hw.ticks = NANO_SEC / resolution;
+ timer->max_instances = 100; /* lower the limit */
err = snd_timer_global_register(timer);
if (err < 0) {
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index aaff9ee32695..b30b2139e3f0 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -612,9 +612,7 @@ send_midi_event(struct seq_oss_devinfo *dp, struct snd_seq_event *ev, struct seq
if (!dp->timer->running)
len = snd_seq_oss_timer_start(dp->timer);
if (ev->type == SNDRV_SEQ_EVENT_SYSEX) {
- if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
- snd_seq_oss_readq_puts(dp->readq, mdev->seq_device,
- ev->data.ext.ptr, ev->data.ext.len);
+ snd_seq_oss_readq_sysex(dp->readq, mdev->seq_device, ev);
} else {
len = snd_midi_event_decode(mdev->coder, msg, sizeof(msg), ev);
if (len > 0)
diff --git a/sound/core/seq/oss/seq_oss_readq.c b/sound/core/seq/oss/seq_oss_readq.c
index 046cb586fb2f..06b21226b4e7 100644
--- a/sound/core/seq/oss/seq_oss_readq.c
+++ b/sound/core/seq/oss/seq_oss_readq.c
@@ -117,6 +117,35 @@ snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, in
return 0;
}
+/*
+ * put MIDI sysex bytes; the event buffer may be chained, thus it has
+ * to be expanded via snd_seq_dump_var_event().
+ */
+struct readq_sysex_ctx {
+ struct seq_oss_readq *readq;
+ int dev;
+};
+
+static int readq_dump_sysex(void *ptr, void *buf, int count)
+{
+ struct readq_sysex_ctx *ctx = ptr;
+
+ return snd_seq_oss_readq_puts(ctx->readq, ctx->dev, buf, count);
+}
+
+int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
+ struct snd_seq_event *ev)
+{
+ struct readq_sysex_ctx ctx = {
+ .readq = q,
+ .dev = dev
+ };
+
+ if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
+ return 0;
+ return snd_seq_dump_var_event(ev, readq_dump_sysex, &ctx);
+}
+
/*
* copy an event to input queue:
* return zero if enqueued
diff --git a/sound/core/seq/oss/seq_oss_readq.h b/sound/core/seq/oss/seq_oss_readq.h
index f1463f1f449e..8d033ca2d23f 100644
--- a/sound/core/seq/oss/seq_oss_readq.h
+++ b/sound/core/seq/oss/seq_oss_readq.h
@@ -44,6 +44,8 @@ void snd_seq_oss_readq_delete(struct seq_oss_readq *q);
void snd_seq_oss_readq_clear(struct seq_oss_readq *readq);
unsigned int snd_seq_oss_readq_poll(struct seq_oss_readq *readq, struct file *file, poll_table *wait);
int snd_seq_oss_readq_puts(struct seq_oss_readq *readq, int dev, unsigned char *data, int len);
+int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev,
+ struct snd_seq_event *ev);
int snd_seq_oss_readq_put_event(struct seq_oss_readq *readq, union evrec *ev);
int snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *readq, unsigned long curt, int seq_mode);
int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec);
diff --git a/sound/core/timer.c b/sound/core/timer.c
index a9b9a277e00c..c8e4d0da13b4 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -180,7 +180,7 @@ static void snd_timer_request(struct snd_timer_id *tid)
*
* call this with register_mutex down.
*/
-static void snd_timer_check_slave(struct snd_timer_instance *slave)
+static int snd_timer_check_slave(struct snd_timer_instance *slave)
{
struct snd_timer *timer;
struct snd_timer_instance *master;
@@ -190,16 +190,21 @@ static void snd_timer_check_slave(struct snd_timer_instance *slave)
list_for_each_entry(master, &timer->open_list_head, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
+ if (master->timer->num_instances >=
+ master->timer->max_instances)
+ return -EBUSY;
list_move_tail(&slave->open_list,
&master->slave_list_head);
+ master->timer->num_instances++;
spin_lock_irq(&slave_active_lock);
slave->master = master;
slave->timer = master->timer;
spin_unlock_irq(&slave_active_lock);
- return;
+ return 0;
}
}
}
+ return 0;
}
/*
@@ -208,7 +213,7 @@ static void snd_timer_check_slave(struct snd_timer_instance *slave)
*
* call this with register_mutex down.
*/
-static void snd_timer_check_master(struct snd_timer_instance *master)
+static int snd_timer_check_master(struct snd_timer_instance *master)
{
struct snd_timer_instance *slave, *tmp;
@@ -216,7 +221,11 @@ static void snd_timer_check_master(struct snd_timer_instance *master)
list_for_each_entry_safe(slave, tmp, &snd_timer_slave_list, open_list) {
if (slave->slave_class == master->slave_class &&
slave->slave_id == master->slave_id) {
+ if (master->timer->num_instances >=
+ master->timer->max_instances)
+ return -EBUSY;
list_move_tail(&slave->open_list, &master->slave_list_head);
+ master->timer->num_instances++;
spin_lock_irq(&slave_active_lock);
spin_lock(&master->timer->lock);
slave->master = master;
@@ -228,8 +237,11 @@ static void snd_timer_check_master(struct snd_timer_instance *master)
spin_unlock_irq(&slave_active_lock);
}
}
+ return 0;
}
+static int snd_timer_close_locked(struct snd_timer_instance *timeri);
+
/*
* open a timer instance
* when opening a master, the slave id must be here given.
@@ -240,6 +252,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
{
struct snd_timer *timer;
struct snd_timer_instance *timeri = NULL;
+ int err;
if (tid->dev_class == SNDRV_TIMER_CLASS_SLAVE) {
/* open a slave instance */
@@ -259,10 +272,14 @@ int snd_timer_open(struct snd_timer_instance **ti,
timeri->slave_id = tid->device;
timeri->flags |= SNDRV_TIMER_IFLG_SLAVE;
list_add_tail(&timeri->open_list, &snd_timer_slave_list);
- snd_timer_check_slave(timeri);
+ err = snd_timer_check_slave(timeri);
+ if (err < 0) {
+ snd_timer_close_locked(timeri);
+ timeri = NULL;
+ }
mutex_unlock(®ister_mutex);
*ti = timeri;
- return 0;
+ return err;
}
/* open a master instance */
@@ -288,6 +305,10 @@ int snd_timer_open(struct snd_timer_instance **ti,
return -EBUSY;
}
}
+ if (timer->num_instances >= timer->max_instances) {
+ mutex_unlock(®ister_mutex);
+ return -EBUSY;
+ }
timeri = snd_timer_instance_new(owner, timer);
if (!timeri) {
mutex_unlock(®ister_mutex);
@@ -314,25 +335,27 @@ int snd_timer_open(struct snd_timer_instance **ti,
}
list_add_tail(&timeri->open_list, &timer->open_list_head);
- snd_timer_check_master(timeri);
+ timer->num_instances++;
+ err = snd_timer_check_master(timeri);
+ if (err < 0) {
+ snd_timer_close_locked(timeri);
+ timeri = NULL;
+ }
mutex_unlock(®ister_mutex);
*ti = timeri;
- return 0;
+ return err;
}
EXPORT_SYMBOL(snd_timer_open);
/*
* close a timer instance
+ * call this with register_mutex down.
*/
-int snd_timer_close(struct snd_timer_instance *timeri)
+static int snd_timer_close_locked(struct snd_timer_instance *timeri)
{
struct snd_timer *timer = NULL;
struct snd_timer_instance *slave, *tmp;
- if (snd_BUG_ON(!timeri))
- return -ENXIO;
-
- mutex_lock(®ister_mutex);
list_del(&timeri->open_list);
/* force to stop the timer */
@@ -340,6 +363,7 @@ int snd_timer_close(struct snd_timer_instance *timeri)
timer = timeri->timer;
if (timer) {
+ timer->num_instances--;
/* wait, until the active callback is finished */
spin_lock_irq(&timer->lock);
while (timeri->flags & SNDRV_TIMER_IFLG_CALLBACK) {
@@ -355,6 +379,7 @@ int snd_timer_close(struct snd_timer_instance *timeri)
list_for_each_entry_safe(slave, tmp, &timeri->slave_list_head,
open_list) {
list_move_tail(&slave->open_list, &snd_timer_slave_list);
+ timer->num_instances--;
slave->master = NULL;
slave->timer = NULL;
list_del_init(&slave->ack_list);
@@ -382,9 +407,24 @@ int snd_timer_close(struct snd_timer_instance *timeri)
module_put(timer->module);
}
- mutex_unlock(®ister_mutex);
return 0;
}
+
+/*
+ * close a timer instance
+ */
+int snd_timer_close(struct snd_timer_instance *timeri)
+{
+ int err;
+
+ if (snd_BUG_ON(!timeri))
+ return -ENXIO;
+
+ mutex_lock(®ister_mutex);
+ err = snd_timer_close_locked(timeri);
+ mutex_unlock(®ister_mutex);
+ return err;
+}
EXPORT_SYMBOL(snd_timer_close);
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri)
@@ -855,6 +895,7 @@ int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid,
spin_lock_init(&timer->lock);
tasklet_init(&timer->task_queue, snd_timer_tasklet,
(unsigned long)timer);
+ timer->max_instances = 1000; /* default limit per timer */
if (card != NULL) {
timer->module = card->module;
err = snd_device_new(card, SNDRV_DEV_TIMER, timer, &ops);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index fe4d06398fc3..c5f0e8d42d22 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6527,6 +6527,11 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x14, 0x90170110},
{0x1b, 0x90a70130},
{0x21, 0x03211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x12, 0xb7a60130},
+ {0x13, 0xb8a61140},
+ {0x16, 0x90170110},
+ {0x21, 0x04211020}),
SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
{0x12, 0x90a60130},
{0x14, 0x90170110},
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 0e54fe490458..f910c4fd932b 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1373,6 +1373,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
case 0x199:
return SNDRV_PCM_FMTBIT_DSD_U32_LE;
case 0x19b:
+ case 0x203:
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
default:
break;