Re: Linux 5.4.37
From: Greg KH
Date: Sat May 02 2020 - 03:15:15 EST
diff --git a/Makefile b/Makefile
index 947bf9e3a954..33690285d6aa 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
-SUBLEVEL = 36
+SUBLEVEL = 37
EXTRAVERSION =
NAME = Kleptomaniac Octopus
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi
index 90125ce19a1b..50c64146d492 100644
--- a/arch/arm/boot/dts/bcm283x.dtsi
+++ b/arch/arm/boot/dts/bcm283x.dtsi
@@ -488,6 +488,7 @@
"dsi0_ddr2",
"dsi0_ddr";
+ status = "disabled";
};
thermal: thermal@7e212000 {
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 6e919fafb43d..9b68f1b3915e 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -49,7 +49,9 @@
#ifndef CONFIG_BROKEN_GAS_INST
#ifdef __ASSEMBLY__
-#define __emit_inst(x) .inst (x)
+// The space separator is omitted so that __emit_inst(x) can be parsed as
+// either an assembler directive or an assembler macro argument.
+#define __emit_inst(x) .inst(x)
#else
#define __emit_inst(x) ".inst " __stringify((x)) "\n\t"
#endif
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index 61f2b0412345..ccba63aaeb47 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -133,7 +133,7 @@ void diag_stat_inc(enum diag_stat_enum nr)
}
EXPORT_SYMBOL(diag_stat_inc);
-void diag_stat_inc_norecursion(enum diag_stat_enum nr)
+void notrace diag_stat_inc_norecursion(enum diag_stat_enum nr)
{
this_cpu_inc(diag_stat.counter[nr]);
trace_s390_diagnose_norecursion(diag_map[nr].code);
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index f468a10e5206..66bf050d785c 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -403,7 +403,7 @@ int smp_find_processor_id(u16 address)
return -1;
}
-bool arch_vcpu_is_preempted(int cpu)
+bool notrace arch_vcpu_is_preempted(int cpu)
{
if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
return false;
@@ -413,7 +413,7 @@ bool arch_vcpu_is_preempted(int cpu)
}
EXPORT_SYMBOL(arch_vcpu_is_preempted);
-void smp_yield_cpu(int cpu)
+void notrace smp_yield_cpu(int cpu)
{
if (MACHINE_HAS_DIAG9C) {
diag_stat_inc_norecursion(DIAG_STAT_X09C);
diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c
index 490b52e85014..11a669f3cc93 100644
--- a/arch/s390/kernel/trace.c
+++ b/arch/s390/kernel/trace.c
@@ -14,7 +14,7 @@ EXPORT_TRACEPOINT_SYMBOL(s390_diagnose);
static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth);
-void trace_s390_diagnose_norecursion(int diag_nr)
+void notrace trace_s390_diagnose_norecursion(int diag_nr)
{
unsigned long flags;
unsigned int *depth;
diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
index fbe97ab2e228..743f257cf2cb 100644
--- a/arch/s390/pci/pci_irq.c
+++ b/arch/s390/pci/pci_irq.c
@@ -115,7 +115,6 @@ static struct irq_chip zpci_irq_chip = {
.name = "PCI-MSI",
.irq_unmask = pci_msi_unmask_irq,
.irq_mask = pci_msi_mask_irq,
- .irq_set_affinity = zpci_set_irq_affinity,
};
static void zpci_handle_cpu_local_irq(bool rescan)
@@ -276,7 +275,9 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
rc = -EIO;
if (hwirq - bit >= msi_vecs)
break;
- irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE, msi->affinity);
+ irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE,
+ (irq_delivery == DIRECTED) ?
+ msi->affinity : NULL);
if (irq < 0)
return -ENOMEM;
rc = irq_set_msi_desc(irq, msi);
diff --git a/arch/um/Makefile b/arch/um/Makefile
index d2daa206872d..275f5ffdf6f0 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -140,6 +140,7 @@ export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
# When cleaning we don't include .config, so we don't include
# TT or skas makefiles and don't clean skas_ptregs.h.
CLEAN_FILES += linux x.i gmon.out
+MRPROPER_DIRS += arch/$(SUBARCH)/include/generated
archclean:
@find . \( -name '*.bb' -o -name '*.bbg' -o -name '*.da' \
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index fc8814faae62..1c2f9baf8483 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -227,8 +227,8 @@ static void __init ms_hyperv_init_platform(void)
ms_hyperv.misc_features = cpuid_edx(HYPERV_CPUID_FEATURES);
ms_hyperv.hints = cpuid_eax(HYPERV_CPUID_ENLIGHTMENT_INFO);
- pr_info("Hyper-V: features 0x%x, hints 0x%x\n",
- ms_hyperv.features, ms_hyperv.hints);
+ pr_info("Hyper-V: features 0x%x, hints 0x%x, misc 0x%x\n",
+ ms_hyperv.features, ms_hyperv.hints, ms_hyperv.misc_features);
ms_hyperv.max_vp_index = cpuid_eax(HYPERV_CPUID_IMPLEMENT_LIMITS);
ms_hyperv.max_lp_index = cpuid_ebx(HYPERV_CPUID_IMPLEMENT_LIMITS);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 991549a1c5f3..18936533666e 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -138,6 +138,19 @@ static bool is_ereg(u32 reg)
BIT(BPF_REG_AX));
}
+/*
+ * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
+ * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
+ * of encoding. al,cl,dl,bl have simpler encoding.
+ */
+static bool is_ereg_8l(u32 reg)
+{
+ return is_ereg(reg) ||
+ (1 << reg) & (BIT(BPF_REG_1) |
+ BIT(BPF_REG_2) |
+ BIT(BPF_REG_FP));
+}
+
static bool is_axreg(u32 reg)
{
return reg == BPF_REG_0;
@@ -748,9 +761,8 @@ st: if (is_imm8(insn->off))
/* STX: *(u8*)(dst_reg + off) = src_reg */
case BPF_STX | BPF_MEM | BPF_B:
/* Emit 'mov byte ptr [rax + off], al' */
- if (is_ereg(dst_reg) || is_ereg(src_reg) ||
- /* We have to add extra byte for x86 SIL, DIL regs */
- src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
+ if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
+ /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
else
EMIT1(0x88);
diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
index 4d2a7a764602..66cd150b7e54 100644
--- a/arch/x86/net/bpf_jit_comp32.c
+++ b/arch/x86/net/bpf_jit_comp32.c
@@ -1847,14 +1847,16 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_B:
case BPF_H:
case BPF_W:
- if (!bpf_prog->aux->verifier_zext)
+ if (bpf_prog->aux->verifier_zext)
break;
if (dstk) {
EMIT3(0xC7, add_1reg(0x40, IA32_EBP),
STACK_VAR(dst_hi));
EMIT(0x0, 4);
} else {
- EMIT3(0xC7, add_1reg(0xC0, dst_hi), 0);
+ /* xor dst_hi,dst_hi */
+ EMIT2(0x33,
+ add_2reg(0xC0, dst_hi, dst_hi));
}
break;
case BPF_DW:
@@ -2013,8 +2015,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_JMP | BPF_JSET | BPF_X:
case BPF_JMP32 | BPF_JSET | BPF_X: {
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
- u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
- u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
+ u8 dreg_lo = IA32_EAX;
+ u8 dreg_hi = IA32_EDX;
u8 sreg_lo = sstk ? IA32_ECX : src_lo;
u8 sreg_hi = sstk ? IA32_EBX : src_hi;
@@ -2026,6 +2028,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
add_2reg(0x40, IA32_EBP,
IA32_EDX),
STACK_VAR(dst_hi));
+ } else {
+ /* mov dreg_lo,dst_lo */
+ EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo));
+ if (is_jmp64)
+ /* mov dreg_hi,dst_hi */
+ EMIT2(0x89,
+ add_2reg(0xC0, dreg_hi, dst_hi));
}
if (sstk) {
@@ -2050,8 +2059,8 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_JMP | BPF_JSET | BPF_K:
case BPF_JMP32 | BPF_JSET | BPF_K: {
bool is_jmp64 = BPF_CLASS(insn->code) == BPF_JMP;
- u8 dreg_lo = dstk ? IA32_EAX : dst_lo;
- u8 dreg_hi = dstk ? IA32_EDX : dst_hi;
+ u8 dreg_lo = IA32_EAX;
+ u8 dreg_hi = IA32_EDX;
u8 sreg_lo = IA32_ECX;
u8 sreg_hi = IA32_EBX;
u32 hi;
@@ -2064,6 +2073,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
add_2reg(0x40, IA32_EBP,
IA32_EDX),
STACK_VAR(dst_hi));
+ } else {
+ /* mov dreg_lo,dst_lo */
+ EMIT2(0x89, add_2reg(0xC0, dreg_lo, dst_lo));
+ if (is_jmp64)
+ /* mov dreg_hi,dst_hi */
+ EMIT2(0x89,
+ add_2reg(0xC0, dreg_hi, dst_hi));
}
/* mov ecx,imm32 */
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 9a599cc28c29..2dc5dc54e257 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -1594,7 +1594,7 @@ static void ioc_timer_fn(struct timer_list *timer)
vrate_min, vrate_max);
}
- trace_iocost_ioc_vrate_adj(ioc, vrate, &missed_ppm, rq_wait_pct,
+ trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
nr_lagging, nr_shortages,
nr_surpluses);
@@ -1603,7 +1603,7 @@ static void ioc_timer_fn(struct timer_list *timer)
ioc->period_us * vrate * INUSE_MARGIN_PCT, 100);
} else if (ioc->busy_level != prev_busy_level || nr_lagging) {
trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
- &missed_ppm, rq_wait_pct, nr_lagging,
+ missed_ppm, rq_wait_pct, nr_lagging,
nr_shortages, nr_surpluses);
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a8c1a45cedde..757c0fd9f0cc 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1232,8 +1232,10 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
rq = list_first_entry(list, struct request, queuelist);
hctx = rq->mq_hctx;
- if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
+ if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
+ blk_mq_put_driver_tag(rq);
break;
+ }
if (!blk_mq_get_driver_tag(rq)) {
/*
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 0e99a760aebd..8646147dc194 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -726,7 +726,7 @@ static bool dpm_async_fn(struct device *dev, async_func_t func)
if (is_async(dev)) {
get_device(dev);
- async_schedule(func, dev);
+ async_schedule_dev(func, dev);
return true;
}
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 00b113f4b958..5c23a9a56921 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -42,6 +42,7 @@ MODULE_PARM_DESC(base, "ACCES 104-QUAD-8 base addresses");
* @base: base port address of the IIO device
*/
struct quad8_iio {
+ struct mutex lock;
struct counter_device counter;
unsigned int preset[QUAD8_NUM_COUNTERS];
unsigned int count_mode[QUAD8_NUM_COUNTERS];
@@ -116,6 +117,8 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
/* Borrow XOR Carry effectively doubles count range */
*val = (borrow ^ carry) << 24;
+ mutex_lock(&priv->lock);
+
/* Reset Byte Pointer; transfer Counter to Output Latch */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
base_offset + 1);
@@ -123,6 +126,8 @@ static int quad8_read_raw(struct iio_dev *indio_dev,
for (i = 0; i < 3; i++)
*val |= (unsigned int)inb(base_offset) << (8 * i);
+ mutex_unlock(&priv->lock);
+
return IIO_VAL_INT;
case IIO_CHAN_INFO_ENABLE:
*val = priv->ab_enable[chan->channel];
@@ -153,6 +158,8 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
if ((unsigned int)val > 0xFFFFFF)
return -EINVAL;
+ mutex_lock(&priv->lock);
+
/* Reset Byte Pointer */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
@@ -176,12 +183,16 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
/* Reset Error flag */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
+ mutex_unlock(&priv->lock);
+
return 0;
case IIO_CHAN_INFO_ENABLE:
/* only boolean values accepted */
if (val < 0 || val > 1)
return -EINVAL;
+ mutex_lock(&priv->lock);
+
priv->ab_enable[chan->channel] = val;
ior_cfg = val | priv->preset_enable[chan->channel] << 1;
@@ -189,11 +200,18 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
/* Load I/O control configuration */
outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
+ mutex_unlock(&priv->lock);
+
return 0;
case IIO_CHAN_INFO_SCALE:
+ mutex_lock(&priv->lock);
+
/* Quadrature scaling only available in quadrature mode */
- if (!priv->quadrature_mode[chan->channel] && (val2 || val != 1))
+ if (!priv->quadrature_mode[chan->channel] &&
+ (val2 || val != 1)) {
+ mutex_unlock(&priv->lock);
return -EINVAL;
+ }
/* Only three gain states (1, 0.5, 0.25) */
if (val == 1 && !val2)
@@ -207,11 +225,15 @@ static int quad8_write_raw(struct iio_dev *indio_dev,
priv->quadrature_scale[chan->channel] = 2;
break;
default:
+ mutex_unlock(&priv->lock);
return -EINVAL;
}
- else
+ else {
+ mutex_unlock(&priv->lock);
return -EINVAL;
+ }
+ mutex_unlock(&priv->lock);
return 0;
}
@@ -248,6 +270,8 @@ static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
if (preset > 0xFFFFFF)
return -EINVAL;
+ mutex_lock(&priv->lock);
+
priv->preset[chan->channel] = preset;
/* Reset Byte Pointer */
@@ -257,6 +281,8 @@ static ssize_t quad8_write_preset(struct iio_dev *indio_dev, uintptr_t private,
for (i = 0; i < 3; i++)
outb(preset >> (8 * i), base_offset);
+ mutex_unlock(&priv->lock);
+
return len;
}
@@ -286,6 +312,8 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
/* Preset enable is active low in Input/Output Control register */
preset_enable = !preset_enable;
+ mutex_lock(&priv->lock);
+
priv->preset_enable[chan->channel] = preset_enable;
ior_cfg = priv->ab_enable[chan->channel] |
@@ -294,6 +322,8 @@ static ssize_t quad8_write_set_to_preset_on_index(struct iio_dev *indio_dev,
/* Load I/O control configuration to Input / Output Control Register */
outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return len;
}
@@ -351,6 +381,8 @@ static int quad8_set_count_mode(struct iio_dev *indio_dev,
unsigned int mode_cfg = cnt_mode << 1;
const int base_offset = priv->base + 2 * chan->channel + 1;
+ mutex_lock(&priv->lock);
+
priv->count_mode[chan->channel] = cnt_mode;
/* Add quadrature mode configuration */
@@ -360,6 +392,8 @@ static int quad8_set_count_mode(struct iio_dev *indio_dev,
/* Load mode configuration to Counter Mode Register */
outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -387,19 +421,26 @@ static int quad8_set_synchronous_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int synchronous_mode)
{
struct quad8_iio *const priv = iio_priv(indio_dev);
- const unsigned int idr_cfg = synchronous_mode |
- priv->index_polarity[chan->channel] << 1;
const int base_offset = priv->base + 2 * chan->channel + 1;
+ unsigned int idr_cfg = synchronous_mode;
+
+ mutex_lock(&priv->lock);
+
+ idr_cfg |= priv->index_polarity[chan->channel] << 1;
/* Index function must be non-synchronous in non-quadrature mode */
- if (synchronous_mode && !priv->quadrature_mode[chan->channel])
+ if (synchronous_mode && !priv->quadrature_mode[chan->channel]) {
+ mutex_unlock(&priv->lock);
return -EINVAL;
+ }
priv->synchronous_mode[chan->channel] = synchronous_mode;
/* Load Index Control configuration to Index Control Register */
outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -427,8 +468,12 @@ static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int quadrature_mode)
{
struct quad8_iio *const priv = iio_priv(indio_dev);
- unsigned int mode_cfg = priv->count_mode[chan->channel] << 1;
const int base_offset = priv->base + 2 * chan->channel + 1;
+ unsigned int mode_cfg;
+
+ mutex_lock(&priv->lock);
+
+ mode_cfg = priv->count_mode[chan->channel] << 1;
if (quadrature_mode)
mode_cfg |= (priv->quadrature_scale[chan->channel] + 1) << 3;
@@ -446,6 +491,8 @@ static int quad8_set_quadrature_mode(struct iio_dev *indio_dev,
/* Load mode configuration to Counter Mode Register */
outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -473,15 +520,20 @@ static int quad8_set_index_polarity(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int index_polarity)
{
struct quad8_iio *const priv = iio_priv(indio_dev);
- const unsigned int idr_cfg = priv->synchronous_mode[chan->channel] |
- index_polarity << 1;
const int base_offset = priv->base + 2 * chan->channel + 1;
+ unsigned int idr_cfg = index_polarity << 1;
+
+ mutex_lock(&priv->lock);
+
+ idr_cfg |= priv->synchronous_mode[chan->channel];
priv->index_polarity[chan->channel] = index_polarity;
/* Load Index Control configuration to Index Control Register */
outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -585,7 +637,7 @@ static int quad8_signal_read(struct counter_device *counter,
static int quad8_count_read(struct counter_device *counter,
struct counter_count *count, struct counter_count_read_value *val)
{
- const struct quad8_iio *const priv = counter->priv;
+ struct quad8_iio *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id;
unsigned int flags;
unsigned int borrow;
@@ -600,6 +652,8 @@ static int quad8_count_read(struct counter_device *counter,
/* Borrow XOR Carry effectively doubles count range */
position = (unsigned long)(borrow ^ carry) << 24;
+ mutex_lock(&priv->lock);
+
/* Reset Byte Pointer; transfer Counter to Output Latch */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_CNTR_OUT,
base_offset + 1);
@@ -609,13 +663,15 @@ static int quad8_count_read(struct counter_device *counter,
counter_count_read_value_set(val, COUNTER_COUNT_POSITION, &position);
+ mutex_unlock(&priv->lock);
+
return 0;
}
static int quad8_count_write(struct counter_device *counter,
struct counter_count *count, struct counter_count_write_value *val)
{
- const struct quad8_iio *const priv = counter->priv;
+ struct quad8_iio *const priv = counter->priv;
const int base_offset = priv->base + 2 * count->id;
int err;
unsigned long position;
@@ -630,6 +686,8 @@ static int quad8_count_write(struct counter_device *counter,
if (position > 0xFFFFFF)
return -EINVAL;
+ mutex_lock(&priv->lock);
+
/* Reset Byte Pointer */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
@@ -653,6 +711,8 @@ static int quad8_count_write(struct counter_device *counter,
/* Reset Error flag */
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_E, base_offset + 1);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -673,13 +733,13 @@ static enum counter_count_function quad8_count_functions_list[] = {
static int quad8_function_get(struct counter_device *counter,
struct counter_count *count, size_t *function)
{
- const struct quad8_iio *const priv = counter->priv;
+ struct quad8_iio *const priv = counter->priv;
const int id = count->id;
- const unsigned int quadrature_mode = priv->quadrature_mode[id];
- const unsigned int scale = priv->quadrature_scale[id];
- if (quadrature_mode)
- switch (scale) {
+ mutex_lock(&priv->lock);
+
+ if (priv->quadrature_mode[id])
+ switch (priv->quadrature_scale[id]) {
case 0:
*function = QUAD8_COUNT_FUNCTION_QUADRATURE_X1;
break;
@@ -693,6 +753,8 @@ static int quad8_function_get(struct counter_device *counter,
else
*function = QUAD8_COUNT_FUNCTION_PULSE_DIRECTION;
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -703,10 +765,15 @@ static int quad8_function_set(struct counter_device *counter,
const int id = count->id;
unsigned int *const quadrature_mode = priv->quadrature_mode + id;
unsigned int *const scale = priv->quadrature_scale + id;
- unsigned int mode_cfg = priv->count_mode[id] << 1;
unsigned int *const synchronous_mode = priv->synchronous_mode + id;
- const unsigned int idr_cfg = priv->index_polarity[id] << 1;
const int base_offset = priv->base + 2 * id + 1;
+ unsigned int mode_cfg;
+ unsigned int idr_cfg;
+
+ mutex_lock(&priv->lock);
+
+ mode_cfg = priv->count_mode[id] << 1;
+ idr_cfg = priv->index_polarity[id] << 1;
if (function == QUAD8_COUNT_FUNCTION_PULSE_DIRECTION) {
*quadrature_mode = 0;
@@ -742,6 +809,8 @@ static int quad8_function_set(struct counter_device *counter,
/* Load mode configuration to Counter Mode Register */
outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -858,15 +927,20 @@ static int quad8_index_polarity_set(struct counter_device *counter,
{
struct quad8_iio *const priv = counter->priv;
const size_t channel_id = signal->id - 16;
- const unsigned int idr_cfg = priv->synchronous_mode[channel_id] |
- index_polarity << 1;
const int base_offset = priv->base + 2 * channel_id + 1;
+ unsigned int idr_cfg = index_polarity << 1;
+
+ mutex_lock(&priv->lock);
+
+ idr_cfg |= priv->synchronous_mode[channel_id];
priv->index_polarity[channel_id] = index_polarity;
/* Load Index Control configuration to Index Control Register */
outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -893,19 +967,26 @@ static int quad8_synchronous_mode_set(struct counter_device *counter,
{
struct quad8_iio *const priv = counter->priv;
const size_t channel_id = signal->id - 16;
- const unsigned int idr_cfg = synchronous_mode |
- priv->index_polarity[channel_id] << 1;
const int base_offset = priv->base + 2 * channel_id + 1;
+ unsigned int idr_cfg = synchronous_mode;
+
+ mutex_lock(&priv->lock);
+
+ idr_cfg |= priv->index_polarity[channel_id] << 1;
/* Index function must be non-synchronous in non-quadrature mode */
- if (synchronous_mode && !priv->quadrature_mode[channel_id])
+ if (synchronous_mode && !priv->quadrature_mode[channel_id]) {
+ mutex_unlock(&priv->lock);
return -EINVAL;
+ }
priv->synchronous_mode[channel_id] = synchronous_mode;
/* Load Index Control configuration to Index Control Register */
outb(QUAD8_CTR_IDR | idr_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -970,6 +1051,8 @@ static int quad8_count_mode_set(struct counter_device *counter,
break;
}
+ mutex_lock(&priv->lock);
+
priv->count_mode[count->id] = cnt_mode;
/* Set count mode configuration value */
@@ -982,6 +1065,8 @@ static int quad8_count_mode_set(struct counter_device *counter,
/* Load mode configuration to Counter Mode Register */
outb(QUAD8_CTR_CMR | mode_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -1023,6 +1108,8 @@ static ssize_t quad8_count_enable_write(struct counter_device *counter,
if (err)
return err;
+ mutex_lock(&priv->lock);
+
priv->ab_enable[count->id] = ab_enable;
ior_cfg = ab_enable | priv->preset_enable[count->id] << 1;
@@ -1030,6 +1117,8 @@ static ssize_t quad8_count_enable_write(struct counter_device *counter,
/* Load I/O control configuration */
outb(QUAD8_CTR_IOR | ior_cfg, base_offset + 1);
+ mutex_unlock(&priv->lock);
+
return len;
}
@@ -1058,14 +1147,28 @@ static ssize_t quad8_count_preset_read(struct counter_device *counter,
return sprintf(buf, "%u\n", priv->preset[count->id]);
}
+static void quad8_preset_register_set(struct quad8_iio *quad8iio, int id,
+ unsigned int preset)
+{
+ const unsigned int base_offset = quad8iio->base + 2 * id;
+ int i;
+
+ quad8iio->preset[id] = preset;
+
+ /* Reset Byte Pointer */
+ outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+
+ /* Set Preset Register */
+ for (i = 0; i < 3; i++)
+ outb(preset >> (8 * i), base_offset);
+}
+
static ssize_t quad8_count_preset_write(struct counter_device *counter,
struct counter_count *count, void *private, const char *buf, size_t len)
{
struct quad8_iio *const priv = counter->priv;
- const int base_offset = priv->base + 2 * count->id;
unsigned int preset;
int ret;
- int i;
ret = kstrtouint(buf, 0, &preset);
if (ret)
@@ -1075,14 +1178,11 @@ static ssize_t quad8_count_preset_write(struct counter_device *counter,
if (preset > 0xFFFFFF)
return -EINVAL;
- priv->preset[count->id] = preset;
+ mutex_lock(&priv->lock);
- /* Reset Byte Pointer */
- outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP, base_offset + 1);
+ quad8_preset_register_set(priv, count->id, preset);
- /* Set Preset Register */
- for (i = 0; i < 3; i++)
- outb(preset >> (8 * i), base_offset);
+ mutex_unlock(&priv->lock);
return len;
}
@@ -1090,15 +1190,20 @@ static ssize_t quad8_count_preset_write(struct counter_device *counter,
static ssize_t quad8_count_ceiling_read(struct counter_device *counter,
struct counter_count *count, void *private, char *buf)
{
- const struct quad8_iio *const priv = counter->priv;
+ struct quad8_iio *const priv = counter->priv;
+
+ mutex_lock(&priv->lock);
/* Range Limit and Modulo-N count modes use preset value as ceiling */
switch (priv->count_mode[count->id]) {
case 1:
case 3:
- return quad8_count_preset_read(counter, count, private, buf);
+ mutex_unlock(&priv->lock);
+ return sprintf(buf, "%u\n", priv->preset[count->id]);
}
+ mutex_unlock(&priv->lock);
+
/* By default 0x1FFFFFF (25 bits unsigned) is maximum count */
return sprintf(buf, "33554431\n");
}
@@ -1107,15 +1212,29 @@ static ssize_t quad8_count_ceiling_write(struct counter_device *counter,
struct counter_count *count, void *private, const char *buf, size_t len)
{
struct quad8_iio *const priv = counter->priv;
+ unsigned int ceiling;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &ceiling);
+ if (ret)
+ return ret;
+
+ /* Only 24-bit values are supported */
+ if (ceiling > 0xFFFFFF)
+ return -EINVAL;
+
+ mutex_lock(&priv->lock);
/* Range Limit and Modulo-N count modes use preset value as ceiling */
switch (priv->count_mode[count->id]) {
case 1:
case 3:
- return quad8_count_preset_write(counter, count, private, buf,
- len);
+ quad8_preset_register_set(priv, count->id, ceiling);
+ break;
}
+ mutex_unlock(&priv->lock);
+
return len;
}
@@ -1143,6 +1262,8 @@ static ssize_t quad8_count_preset_enable_write(struct counter_device *counter,
/* Preset enable is active low in Input/Output Control register */
preset_enable = !preset_enable;
+ mutex_lock(&priv->lock);
+
priv->preset_enable[count->id] = preset_enable;
ior_cfg = priv->ab_enable[count->id] | (unsigned int)preset_enable << 1;
@@ -1150,6 +1271,8 @@ static ssize_t quad8_count_preset_enable_write(struct counter_device *counter,
/* Load I/O control configuration to Input / Output Control Register */
outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
+ mutex_unlock(&priv->lock);
+
return len;
}
@@ -1320,6 +1443,9 @@ static int quad8_probe(struct device *dev, unsigned int id)
quad8iio->counter.priv = quad8iio;
quad8iio->base = base[id];
+ /* Initialize mutex */
+ mutex_init(&quad8iio->lock);
+
/* Reset all counters and disable interrupt function */
outb(QUAD8_CHAN_OP_RESET_COUNTERS, base[id] + QUAD8_REG_CHAN_OP);
/* Set initial configuration for all counters */
diff --git a/drivers/crypto/chelsio/chcr_core.c b/drivers/crypto/chelsio/chcr_core.c
index 029a7354f541..5c16f368879b 100644
--- a/drivers/crypto/chelsio/chcr_core.c
+++ b/drivers/crypto/chelsio/chcr_core.c
@@ -125,8 +125,6 @@ static void chcr_dev_init(struct uld_ctx *u_ctx)
atomic_set(&dev->inflight, 0);
mutex_lock(&drv_data.drv_mutex);
list_add_tail(&u_ctx->entry, &drv_data.inact_dev);
- if (!drv_data.last_dev)
- drv_data.last_dev = u_ctx;
mutex_unlock(&drv_data.drv_mutex);
}
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index f2d81b0558e5..e3f1ebee7130 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -506,7 +506,7 @@ static int jc42_probe(struct i2c_client *client, const struct i2c_device_id *id)
}
data->config = config;
- hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, "jc42",
data, &jc42_chip_info,
NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
index 1de23b4f3809..92d2c706c2a7 100644
--- a/drivers/i2c/busses/i2c-altera.c
+++ b/drivers/i2c/busses/i2c-altera.c
@@ -384,7 +384,6 @@ static int altr_i2c_probe(struct platform_device *pdev)
struct altr_i2c_dev *idev = NULL;
struct resource *res;
int irq, ret;
- u32 val;
idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL);
if (!idev)
@@ -411,17 +410,17 @@ static int altr_i2c_probe(struct platform_device *pdev)
init_completion(&idev->msg_complete);
spin_lock_init(&idev->lock);
- val = device_property_read_u32(idev->dev, "fifo-size",
+ ret = device_property_read_u32(idev->dev, "fifo-size",
&idev->fifo_size);
- if (val) {
+ if (ret) {
dev_err(&pdev->dev, "FIFO size set to default of %d\n",
ALTR_I2C_DFLT_FIFO_SZ);
idev->fifo_size = ALTR_I2C_DFLT_FIFO_SZ;
}
- val = device_property_read_u32(idev->dev, "clock-frequency",
+ ret = device_property_read_u32(idev->dev, "clock-frequency",
&idev->bus_clk_rate);
- if (val) {
+ if (ret) {
dev_err(&pdev->dev, "Default to 100kHz\n");
idev->bus_clk_rate = 100000; /* default clock rate */
}
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index bbc41ecf0d2f..6ed6d1410201 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -541,7 +541,7 @@ static const struct iio_info ad7797_info = {
.read_raw = &ad7793_read_raw,
.write_raw = &ad7793_write_raw,
.write_raw_get_fmt = &ad7793_write_raw_get_fmt,
- .attrs = &ad7793_attribute_group,
+ .attrs = &ad7797_attribute_group,
.validate_trigger = ad_sd_validate_trigger,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 3f6813daf3c1..31fcfc58e337 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3748,7 +3748,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
¶m, &val);
- if (ret < 0)
+ if (ret)
return ret;
*phy_fw_ver = val;
return 0;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index f79e57f735b3..d89568f810bc 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -488,6 +488,12 @@ struct fec_enet_priv_rx_q {
struct sk_buff *rx_skbuff[RX_RING_SIZE];
};
+struct fec_stop_mode_gpr {
+ struct regmap *gpr;
+ u8 reg;
+ u8 bit;
+};
+
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
* tx_bd_base always point to the base of the buffer descriptors. The
* cur_rx and cur_tx point to the currently available buffer.
@@ -562,6 +568,7 @@ struct fec_enet_private {
int hwts_tx_en;
struct delayed_work time_keep;
struct regulator *reg_phy;
+ struct fec_stop_mode_gpr stop_gpr;
unsigned int tx_align;
unsigned int rx_align;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 3fc8a66e4f41..39c112f1543c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -62,6 +62,8 @@
#include <linux/if_vlan.h>
#include <linux/pinctrl/consumer.h>
#include <linux/prefetch.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <soc/imx/cpuidle.h>
#include <asm/cacheflush.h>
@@ -84,6 +86,56 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
#define FEC_ENET_OPD_V 0xFFF0
#define FEC_MDIO_PM_TIMEOUT 100 /* ms */
+struct fec_devinfo {
+ u32 quirks;
+ u8 stop_gpr_reg;
+ u8 stop_gpr_bit;
+};
+
+static const struct fec_devinfo fec_imx25_info = {
+ .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
+ FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx27_info = {
+ .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx28_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+ FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx6q_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
+ FEC_QUIRK_HAS_RACC,
+ .stop_gpr_reg = 0x34,
+ .stop_gpr_bit = 27,
+};
+
+static const struct fec_devinfo fec_mvf600_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+};
+
+static const struct fec_devinfo fec_imx6x_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+};
+
+static const struct fec_devinfo fec_imx6ul_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
+ FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
+ FEC_QUIRK_HAS_COALESCE,
+};
+
static struct platform_device_id fec_devtype[] = {
{
/* keep it for coldfire */
@@ -91,39 +143,25 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx25-fec",
- .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
- FEC_QUIRK_HAS_FRREG,
+ .driver_data = (kernel_ulong_t)&fec_imx25_info,
}, {
.name = "imx27-fec",
- .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+ .driver_data = (kernel_ulong_t)&fec_imx27_info,
}, {
.name = "imx28-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
- FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_FRREG,
+ .driver_data = (kernel_ulong_t)&fec_imx28_info,
}, {
.name = "imx6q-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
- FEC_QUIRK_HAS_RACC,
+ .driver_data = (kernel_ulong_t)&fec_imx6q_info,
}, {
.name = "mvf600-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+ .driver_data = (kernel_ulong_t)&fec_mvf600_info,
}, {
.name = "imx6sx-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
- FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
- FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+ .driver_data = (kernel_ulong_t)&fec_imx6x_info,
}, {
.name = "imx6ul-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
- FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
- FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
- FEC_QUIRK_HAS_COALESCE,
+ .driver_data = (kernel_ulong_t)&fec_imx6ul_info,
}, {
/* sentinel */
}
@@ -1092,11 +1130,28 @@ fec_restart(struct net_device *ndev)
}
+static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
+{
+ struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
+ struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
+
+ if (stop_gpr->gpr) {
+ if (enabled)
+ regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
+ BIT(stop_gpr->bit),
+ BIT(stop_gpr->bit));
+ else
+ regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
+ BIT(stop_gpr->bit), 0);
+ } else if (pdata && pdata->sleep_mode_enable) {
+ pdata->sleep_mode_enable(enabled);
+ }
+}
+
static void
fec_stop(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
u32 val;
@@ -1125,9 +1180,7 @@ fec_stop(struct net_device *ndev)
val = readl(fep->hwp + FEC_ECNTRL);
val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
writel(val, fep->hwp + FEC_ECNTRL);
-
- if (pdata && pdata->sleep_mode_enable)
- pdata->sleep_mode_enable(true);
+ fec_enet_stop_mode(fep, true);
}
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
@@ -3398,6 +3451,37 @@ static int fec_enet_get_irq_cnt(struct platform_device *pdev)
return irq_cnt;
}
+static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
+ struct fec_devinfo *dev_info,
+ struct device_node *np)
+{
+ struct device_node *gpr_np;
+ int ret = 0;
+
+ if (!dev_info)
+ return 0;
+
+ gpr_np = of_parse_phandle(np, "gpr", 0);
+ if (!gpr_np)
+ return 0;
+
+ fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
+ if (IS_ERR(fep->stop_gpr.gpr)) {
+ dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
+ ret = PTR_ERR(fep->stop_gpr.gpr);
+ fep->stop_gpr.gpr = NULL;
+ goto out;
+ }
+
+ fep->stop_gpr.reg = dev_info->stop_gpr_reg;
+ fep->stop_gpr.bit = dev_info->stop_gpr_bit;
+
+out:
+ of_node_put(gpr_np);
+
+ return ret;
+}
+
static int
fec_probe(struct platform_device *pdev)
{
@@ -3412,6 +3496,7 @@ fec_probe(struct platform_device *pdev)
int num_rx_qs;
char irq_name[8];
int irq_cnt;
+ struct fec_devinfo *dev_info;
fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
@@ -3429,7 +3514,9 @@ fec_probe(struct platform_device *pdev)
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
pdev->id_entry = of_id->data;
- fep->quirks = pdev->id_entry->driver_data;
+ dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data;
+ if (dev_info)
+ fep->quirks = dev_info->quirks;
fep->netdev = ndev;
fep->num_rx_queues = num_rx_qs;
@@ -3463,6 +3550,10 @@ fec_probe(struct platform_device *pdev)
if (of_get_property(np, "fsl,magic-packet", NULL))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
+ ret = fec_enet_init_stop_mode(fep, dev_info, np);
+ if (ret)
+ goto failed_stop_mode;
+
phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!phy_node && of_phy_is_fixed_link(np)) {
ret = of_phy_register_fixed_link(np);
@@ -3631,6 +3722,7 @@ fec_probe(struct platform_device *pdev)
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(phy_node);
+failed_stop_mode:
failed_phy:
dev_id--;
failed_ioremap:
@@ -3708,7 +3800,6 @@ static int __maybe_unused fec_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
int ret;
int val;
@@ -3726,8 +3817,8 @@ static int __maybe_unused fec_resume(struct device *dev)
goto failed_clk;
}
if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
- if (pdata && pdata->sleep_mode_enable)
- pdata->sleep_mode_enable(false);
+ fec_enet_stop_mode(fep, false);
+
val = readl(fep->hwp + FEC_ECNTRL);
val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
writel(val, fep->hwp + FEC_ECNTRL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index 94d7b69a95c7..eb2e57ff08a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -935,7 +935,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
return NULL;
}
- tracer = kzalloc(sizeof(*tracer), GFP_KERNEL);
+ tracer = kvzalloc(sizeof(*tracer), GFP_KERNEL);
if (!tracer)
return ERR_PTR(-ENOMEM);
@@ -982,7 +982,7 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev)
tracer->dev = NULL;
destroy_workqueue(tracer->work_queue);
free_tracer:
- kfree(tracer);
+ kvfree(tracer);
return ERR_PTR(err);
}
@@ -1061,7 +1061,7 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
mlx5_fw_tracer_destroy_log_buf(tracer);
flush_workqueue(tracer->work_queue);
destroy_workqueue(tracer->work_queue);
- kfree(tracer);
+ kvfree(tracer);
}
static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 11426f94c90c..38aa55638bbe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -367,6 +367,7 @@ enum {
MLX5E_SQ_STATE_AM,
MLX5E_SQ_STATE_TLS,
MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE,
+ MLX5E_SQ_STATE_PENDING_XSK_TX,
};
struct mlx5e_sq_wqe_info {
@@ -948,7 +949,7 @@ void mlx5e_page_release_dynamic(struct mlx5e_rq *rq,
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
-void mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
+int mlx5e_poll_ico_cq(struct mlx5e_cq *cq);
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq);
void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
index fe2d596cb361..3bcdb5b2fc20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
@@ -33,6 +33,9 @@ int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &c->xskicosq.state)))
return 0;
+ if (test_and_set_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state))
+ return 0;
+
spin_lock(&c->xskicosq_lock);
mlx5e_trigger_irq(&c->xskicosq);
spin_unlock(&c->xskicosq_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 88ea279c29bb..0e340893ca00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3579,7 +3579,12 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
- if (!mlx5e_monitor_counter_supported(priv)) {
+ /* In switchdev mode, monitor counters doesn't monitor
+ * rx/tx stats of 802_3. The update stats mechanism
+ * should keep the 802_3 layout counters updated
+ */
+ if (!mlx5e_monitor_counter_supported(priv) ||
+ mlx5e_is_uplink_rep(priv)) {
/* update HW stats in background for next time */
mlx5e_queue_update_stats(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 1d295a7afc8c..c4eed5bbcd45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -587,7 +587,7 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
return !!err;
}
-void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
+int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
struct mlx5_cqe64 *cqe;
@@ -595,11 +595,11 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
int i;
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
- return;
+ return 0;
cqe = mlx5_cqwq_get_cqe(&cq->wq);
if (likely(!cqe))
- return;
+ return 0;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
@@ -646,6 +646,8 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
sq->cc = sqcc;
mlx5_cqwq_update_db_record(&cq->wq);
+
+ return i;
}
bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 800d34ed8a96..76efa9579215 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -145,7 +145,11 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
busy |= rq->post_wqes(rq);
if (xsk_open) {
- mlx5e_poll_ico_cq(&c->xskicosq.cq);
+ if (mlx5e_poll_ico_cq(&c->xskicosq.cq))
+ /* Don't clear the flag if nothing was polled to prevent
+ * queueing more WQEs and overflowing XSKICOSQ.
+ */
+ clear_bit(MLX5E_SQ_STATE_PENDING_XSK_TX, &c->xskicosq.state);
busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq);
busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index a1ebc2b1ca0b..0bf91df80d47 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -4648,26 +4648,20 @@ static void qed_chain_free_single(struct qed_dev *cdev,
static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
{
- void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
+ struct addr_tbl_entry *pp_addr_tbl = p_chain->pbl.pp_addr_tbl;
u32 page_cnt = p_chain->page_cnt, i, pbl_size;
- u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table;
- if (!pp_virt_addr_tbl)
+ if (!pp_addr_tbl)
return;
- if (!p_pbl_virt)
- goto out;
-
for (i = 0; i < page_cnt; i++) {
- if (!pp_virt_addr_tbl[i])
+ if (!pp_addr_tbl[i].virt_addr || !pp_addr_tbl[i].dma_map)
break;
dma_free_coherent(&cdev->pdev->dev,
QED_CHAIN_PAGE_SIZE,
- pp_virt_addr_tbl[i],
- *(dma_addr_t *)p_pbl_virt);
-
- p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
+ pp_addr_tbl[i].virt_addr,
+ pp_addr_tbl[i].dma_map);
}
pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
@@ -4677,9 +4671,9 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
pbl_size,
p_chain->pbl_sp.p_virt_table,
p_chain->pbl_sp.p_phys_table);
-out:
- vfree(p_chain->pbl.pp_virt_addr_tbl);
- p_chain->pbl.pp_virt_addr_tbl = NULL;
+
+ vfree(p_chain->pbl.pp_addr_tbl);
+ p_chain->pbl.pp_addr_tbl = NULL;
}
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
@@ -4780,19 +4774,19 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
{
u32 page_cnt = p_chain->page_cnt, size, i;
dma_addr_t p_phys = 0, p_pbl_phys = 0;
- void **pp_virt_addr_tbl = NULL;
+ struct addr_tbl_entry *pp_addr_tbl;
u8 *p_pbl_virt = NULL;
void *p_virt = NULL;
- size = page_cnt * sizeof(*pp_virt_addr_tbl);
- pp_virt_addr_tbl = vzalloc(size);
- if (!pp_virt_addr_tbl)
+ size = page_cnt * sizeof(*pp_addr_tbl);
+ pp_addr_tbl = vzalloc(size);
+ if (!pp_addr_tbl)
return -ENOMEM;
/* The allocation of the PBL table is done with its full size, since it
* is expected to be successive.
* qed_chain_init_pbl_mem() is called even in a case of an allocation
- * failure, since pp_virt_addr_tbl was previously allocated, and it
+ * failure, since tbl was previously allocated, and it
* should be saved to allow its freeing during the error flow.
*/
size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
@@ -4806,8 +4800,7 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
p_chain->b_external_pbl = true;
}
- qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
- pp_virt_addr_tbl);
+ qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, pp_addr_tbl);
if (!p_pbl_virt)
return -ENOMEM;
@@ -4826,7 +4819,8 @@ qed_chain_alloc_pbl(struct qed_dev *cdev,
/* Fill the PBL table with the physical address of the page */
*(dma_addr_t *)p_pbl_virt = p_phys;
/* Keep the virtual address of the page */
- p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
+ p_chain->pbl.pp_addr_tbl[i].virt_addr = p_virt;
+ p_chain->pbl.pp_addr_tbl[i].dma_map = p_phys;
p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 38f7f40b3a4d..e72f9f1d2e94 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -1087,9 +1087,6 @@ static void qed_update_pf_params(struct qed_dev *cdev,
#define QED_PERIODIC_DB_REC_INTERVAL_MS 100
#define QED_PERIODIC_DB_REC_INTERVAL \
msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
-#define QED_PERIODIC_DB_REC_WAIT_COUNT 10
-#define QED_PERIODIC_DB_REC_WAIT_INTERVAL \
- (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT)
static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
enum qed_slowpath_wq_flag wq_flag,
@@ -1123,7 +1120,7 @@ void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
static void qed_slowpath_wq_stop(struct qed_dev *cdev)
{
- int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT;
+ int i;
if (IS_VF(cdev))
return;
@@ -1135,13 +1132,7 @@ static void qed_slowpath_wq_stop(struct qed_dev *cdev)
/* Stop queuing new delayed works */
cdev->hwfns[i].slowpath_wq_active = false;
- /* Wait until the last periodic doorbell recovery is executed */
- while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
- &cdev->hwfns[i].slowpath_task_flags) &&
- sleep_count--)
- msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL);
-
- flush_workqueue(cdev->hwfns[i].slowpath_wq);
+ cancel_delayed_work(&cdev->hwfns[i].slowpath_task);
destroy_workqueue(cdev->hwfns[i].slowpath_wq);
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index e0212d2fc2a1..fa32cd5b418e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -241,6 +241,8 @@ static int socfpga_set_phy_mode_common(int phymode, u32 *val)
switch (phymode) {
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
*val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
break;
case PHY_INTERFACE_MODE_MII:
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index a51b3e3f248b..798e52051ecc 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4352,6 +4352,47 @@ static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
quirk_chelsio_T5_disable_root_port_attributes);
+/*
+ * pci_acs_ctrl_enabled - compare desired ACS controls with those provided
+ * by a device
+ * @acs_ctrl_req: Bitmask of desired ACS controls
+ * @acs_ctrl_ena: Bitmask of ACS controls enabled or provided implicitly by
+ * the hardware design
+ *
+ * Return 1 if all ACS controls in the @acs_ctrl_req bitmask are included
+ * in @acs_ctrl_ena, i.e., the device provides all the access controls the
+ * caller desires. Return 0 otherwise.
+ */
+static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena)
+{
+ if ((acs_ctrl_req & acs_ctrl_ena) == acs_ctrl_req)
+ return 1;
+ return 0;
+}
+
+/*
+ * Many Zhaoxin Root Ports and Switch Downstream Ports have no ACS capability.
+ * But the implementation could block peer-to-peer transactions between them
+ * and provide ACS-like functionality.
+ */
+static int pci_quirk_zhaoxin_pcie_ports_acs(struct pci_dev *dev, u16 acs_flags)
+{
+ if (!pci_is_pcie(dev) ||
+ ((pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
+ return -ENOTTY;
+
+ switch (dev->device) {
+ case 0x0710 ... 0x071e:
+ case 0x0721:
+ case 0x0723 ... 0x0732:
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+ }
+
+ return false;
+}
+
/*
* AMD has indicated that the devices below do not support peer-to-peer
* in any system where they are found in the southbridge with an AMD
@@ -4395,7 +4436,7 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
/* Filter out flags not applicable to multifunction */
acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
- return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags, PCI_ACS_RR | PCI_ACS_CR);
#else
return -ENODEV;
#endif
@@ -4422,20 +4463,19 @@ static bool pci_quirk_cavium_acs_match(struct pci_dev *dev)
static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
{
+ if (!pci_quirk_cavium_acs_match(dev))
+ return -ENOTTY;
+
/*
- * Cavium root ports don't advertise an ACS capability. However,
+ * Cavium Root Ports don't advertise an ACS capability. However,
* the RTL internally implements similar protection as if ACS had
- * Request Redirection, Completion Redirection, Source Validation,
+ * Source Validation, Request Redirection, Completion Redirection,
* and Upstream Forwarding features enabled. Assert that the
* hardware implements and enables equivalent ACS functionality for
* these flags.
*/
- acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
-
- if (!pci_quirk_cavium_acs_match(dev))
- return -ENOTTY;
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4445,13 +4485,12 @@ static int pci_quirk_xgene_acs(struct pci_dev *dev, u16 acs_flags)
* transactions with others, allowing masking out these bits as if they
* were unimplemented in the ACS capability.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
/*
- * Many Intel PCH root ports do provide ACS-like features to disable peer
+ * Many Intel PCH Root Ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. This is the list of device IDs known to fall
* into that category as provided by Intel in Red Hat bugzilla 1037684.
@@ -4499,37 +4538,32 @@ static bool pci_quirk_intel_pch_acs_match(struct pci_dev *dev)
return false;
}
-#define INTEL_PCH_ACS_FLAGS (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV)
-
static int pci_quirk_intel_pch_acs(struct pci_dev *dev, u16 acs_flags)
{
- u16 flags = dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK ?
- INTEL_PCH_ACS_FLAGS : 0;
-
if (!pci_quirk_intel_pch_acs_match(dev))
return -ENOTTY;
- return acs_flags & ~flags ? 0 : 1;
+ if (dev->dev_flags & PCI_DEV_FLAGS_ACS_ENABLED_QUIRK)
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
+
+ return pci_acs_ctrl_enabled(acs_flags, 0);
}
/*
- * These QCOM root ports do provide ACS-like features to disable peer
+ * These QCOM Root Ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
* actual PCIe ACS capability. Hardware supports source validation but it
* will report the issue as Completer Abort instead of ACS Violation.
- * Hardware doesn't support peer-to-peer and each root port is a root
- * complex with unique segment numbers. It is not possible for one root
- * port to pass traffic to another root port. All PCIe transactions are
- * terminated inside the root port.
+ * Hardware doesn't support peer-to-peer and each Root Port is a Root
+ * Complex with unique segment numbers. It is not possible for one Root
+ * Port to pass traffic to another Root Port. All PCIe transactions are
+ * terminated inside the Root Port.
*/
static int pci_quirk_qcom_rp_acs(struct pci_dev *dev, u16 acs_flags)
{
- u16 flags = (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_SV);
- int ret = acs_flags & ~flags ? 0 : 1;
-
- pci_info(dev, "Using QCOM ACS Quirk (%d)\n", ret);
-
- return ret;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4630,7 +4664,7 @@ static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags)
pci_read_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, &ctrl);
- return acs_flags & ~ctrl ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags, ctrl);
}
static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4644,10 +4678,9 @@ static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
* perform peer-to-peer with other functions, allowing us to mask out
* these bits as if they were unimplemented in the ACS capability.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
- PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
+ PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
}
static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
@@ -4658,9 +4691,8 @@ static int pci_quirk_brcm_acs(struct pci_dev *dev, u16 acs_flags)
* Allow each Root Port to be in a separate IOMMU group by masking
* SV/RR/CR/UF bits.
*/
- acs_flags &= ~(PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
-
- return acs_flags ? 0 : 1;
+ return pci_acs_ctrl_enabled(acs_flags,
+ PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF);
}
static const struct pci_dev_acs_enabled {
@@ -4759,9 +4791,26 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_BROADCOM, 0xD714, pci_quirk_brcm_acs },
/* Amazon Annapurna Labs */
{ PCI_VENDOR_ID_AMAZON_ANNAPURNA_LABS, 0x0031, pci_quirk_al_acs },
+ /* Zhaoxin multi-function devices */
+ { PCI_VENDOR_ID_ZHAOXIN, 0x3038, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_ZHAOXIN, 0x3104, pci_quirk_mf_endpoint_acs },
+ { PCI_VENDOR_ID_ZHAOXIN, 0x9083, pci_quirk_mf_endpoint_acs },
+ /* Zhaoxin Root/Downstream Ports */
+ { PCI_VENDOR_ID_ZHAOXIN, PCI_ANY_ID, pci_quirk_zhaoxin_pcie_ports_acs },
{ 0 }
};
+/*
+ * pci_dev_specific_acs_enabled - check whether device provides ACS controls
+ * @dev: PCI device
+ * @acs_flags: Bitmask of desired ACS controls
+ *
+ * Returns:
+ * -ENOTTY: No quirk applies to this device; we can't tell whether the
+ * device provides the desired controls
+ * 0: Device does not provide all the desired controls
+ * >0: Device provides all the controls in @acs_flags
+ */
int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags)
{
const struct pci_dev_acs_enabled *i;
@@ -5490,3 +5539,21 @@ static void quirk_reset_lenovo_thinkpad_p50_nvgpu(struct pci_dev *pdev)
DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_NVIDIA, 0x13b1,
PCI_CLASS_DISPLAY_VGA, 8,
quirk_reset_lenovo_thinkpad_p50_nvgpu);
+
+/*
+ * Device [1b21:2142]
+ * When in D0, PME# doesn't get asserted when plugging USB 3.0 device.
+ */
+static void pci_fixup_no_d0_pme(struct pci_dev *dev)
+{
+ pci_info(dev, "PME# does not work under D0, disabling it\n");
+ dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASMEDIA, 0x2142, pci_fixup_no_d0_pme);
+
+static void apex_pci_fixup_class(struct pci_dev *pdev)
+{
+ pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
+}
+DECLARE_PCI_FIXUP_CLASS_HEADER(0x1ac1, 0x089a,
+ PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index b542debbc6f0..010f541a5002 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -400,7 +400,7 @@ rproc_parse_vring(struct rproc_vdev *rvdev, struct fw_rsc_vdev *rsc, int i)
void rproc_free_vring(struct rproc_vring *rvring)
{
struct rproc *rproc = rvring->rvdev->rproc;
- int idx = rvring->rvdev->vring - rvring;
+ int idx = rvring - rvring->rvdev->vring;
struct fw_rsc_vdev *rsc;
idr_remove(&rproc->notifyids, rvring->notifyid);
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 01e76b58dd78..3fa162c1fde7 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -19,7 +19,7 @@ config XILINX_VCU
config ZYNQMP_POWER
bool "Enable Xilinx Zynq MPSoC Power Management driver"
- depends on PM && ARCH_ZYNQMP
+ depends on PM && ZYNQMP_FIRMWARE
default y
help
Say yes to enable power management support for ZyqnMP SoC.
@@ -31,7 +31,7 @@ config ZYNQMP_POWER
config ZYNQMP_PM_DOMAINS
bool "Enable Zynq MPSoC generic PM domains"
default y
- depends on PM && ARCH_ZYNQMP && ZYNQMP_FIRMWARE
+ depends on PM && ZYNQMP_FIRMWARE
select PM_GENERIC_DOMAINS
help
Say yes to enable device power management through PM domains
diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c
index 46199c8ca441..f12f81c8dd2f 100644
--- a/drivers/staging/gasket/apex_driver.c
+++ b/drivers/staging/gasket/apex_driver.c
@@ -570,13 +570,6 @@ static const struct pci_device_id apex_pci_ids[] = {
{ PCI_DEVICE(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID) }, { 0 }
};
-static void apex_pci_fixup_class(struct pci_dev *pdev)
-{
- pdev->class = (PCI_CLASS_SYSTEM_OTHER << 8) | pdev->class;
-}
-DECLARE_PCI_FIXUP_CLASS_HEADER(APEX_PCI_VENDOR_ID, APEX_PCI_DEVICE_ID,
- PCI_CLASS_NOT_DEFINED, 8, apex_pci_fixup_class);
-
static int apex_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 6b4b354c88aa..b5c970faf585 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -63,7 +63,7 @@ static int fc_get_pr_transport_id(
* encoded TransportID.
*/
ptr = &se_nacl->initiatorname[0];
- for (i = 0; i < 24; ) {
+ for (i = 0; i < 23; ) {
if (!strncmp(&ptr[i], ":", 1)) {
i++;
continue;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 35be1be87d2a..9425354aef99 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -2073,6 +2073,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
mb->cmd_tail = 0;
mb->cmd_head = 0;
tcmu_flush_dcache_range(mb, sizeof(*mb));
+ clear_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
del_timer(&udev->cmd_timer);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 18251efd216d..379f978db13d 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1725,7 +1725,6 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
u32 reg;
u8 link_state;
- u8 speed;
/*
* According to the Databook Remote wakeup request should
@@ -1735,16 +1734,13 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc)
*/
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
- speed = reg & DWC3_DSTS_CONNECTSPD;
- if ((speed == DWC3_DSTS_SUPERSPEED) ||
- (speed == DWC3_DSTS_SUPERSPEED_PLUS))
- return 0;
-
link_state = DWC3_DSTS_USBLNKST(reg);
switch (link_state) {
+ case DWC3_LINK_STATE_RESET:
case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
+ case DWC3_LINK_STATE_RESUME:
break;
default:
return -EINVAL;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 1d0d8952a74b..58e5b015d40e 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1950,10 +1950,10 @@ static irqreturn_t usba_vbus_irq_thread(int irq, void *devid)
usba_start(udc);
} else {
udc->suspended = false;
- usba_stop(udc);
-
if (udc->driver->disconnect)
udc->driver->disconnect(&udc->gadget);
+
+ usba_stop(udc);
}
udc->vbus_prev = vbus;
}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index a4d9b5e1e50e..d49c6dc1082d 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -540,7 +540,7 @@ static void bdc_req_complete(struct bdc_ep *ep, struct bdc_req *req,
{
struct bdc *bdc = ep->bdc;
- if (req == NULL || &req->queue == NULL || &req->usb_req == NULL)
+ if (req == NULL)
return;
dev_dbg(bdc->dev, "%s ep:%s status:%d\n", __func__, ep->name, status);
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index e17ca8156171..a38292ef79f6 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -448,7 +448,14 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
unsigned int nr_grefs, void **vaddr)
{
- return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
+ int err;
+
+ err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
+ /* Some hypervisors are buggy and can return 1. */
+ if (err > 0)
+ err = GNTST_general_error;
+
+ return err;
}
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index b378cd780ed5..fc5eb0f89304 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -169,7 +169,7 @@ static int afs_record_cm_probe(struct afs_call *call, struct afs_server *server)
spin_lock(&server->probe_lock);
- if (!test_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags)) {
+ if (!test_and_set_bit(AFS_SERVER_FL_HAVE_EPOCH, &server->flags)) {
server->cm_epoch = call->epoch;
server->probe.cm_epoch = call->epoch;
goto out;
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index d5efb1debebf..485cc3b2aaa8 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -1329,7 +1329,7 @@ extern struct afs_volume *afs_create_volume(struct afs_fs_context *);
extern void afs_activate_volume(struct afs_volume *);
extern void afs_deactivate_volume(struct afs_volume *);
extern void afs_put_volume(struct afs_cell *, struct afs_volume *);
-extern int afs_check_volume_status(struct afs_volume *, struct key *);
+extern int afs_check_volume_status(struct afs_volume *, struct afs_fs_cursor *);
/*
* write.c
diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c
index 172ba569cd60..2a3305e42b14 100644
--- a/fs/afs/rotate.c
+++ b/fs/afs/rotate.c
@@ -192,7 +192,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
write_unlock(&vnode->volume->servers_lock);
set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags);
- error = afs_check_volume_status(vnode->volume, fc->key);
+ error = afs_check_volume_status(vnode->volume, fc);
if (error < 0)
goto failed_set_error;
@@ -281,7 +281,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
set_bit(AFS_VOLUME_WAIT, &vnode->volume->flags);
set_bit(AFS_VOLUME_NEEDS_UPDATE, &vnode->volume->flags);
- error = afs_check_volume_status(vnode->volume, fc->key);
+ error = afs_check_volume_status(vnode->volume, fc);
if (error < 0)
goto failed_set_error;
@@ -341,7 +341,7 @@ bool afs_select_fileserver(struct afs_fs_cursor *fc)
/* See if we need to do an update of the volume record. Note that the
* volume may have moved or even have been deleted.
*/
- error = afs_check_volume_status(vnode->volume, fc->key);
+ error = afs_check_volume_status(vnode->volume, fc);
if (error < 0)
goto failed_set_error;
diff --git a/fs/afs/server.c b/fs/afs/server.c
index ca8115ba1724..d3a9288f7556 100644
--- a/fs/afs/server.c
+++ b/fs/afs/server.c
@@ -595,12 +595,9 @@ bool afs_check_server_record(struct afs_fs_cursor *fc, struct afs_server *server
}
ret = wait_on_bit(&server->flags, AFS_SERVER_FL_UPDATING,
- TASK_INTERRUPTIBLE);
+ (fc->flags & AFS_FS_CURSOR_INTR) ?
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (ret == -ERESTARTSYS) {
- if (!(fc->flags & AFS_FS_CURSOR_INTR) && server->addresses) {
- _leave(" = t [intr]");
- return true;
- }
fc->error = ret;
_leave(" = f [intr]");
return false;
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 92ca5e27573b..4310336b9bb8 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -281,7 +281,7 @@ static int afs_update_volume_status(struct afs_volume *volume, struct key *key)
/*
* Make sure the volume record is up to date.
*/
-int afs_check_volume_status(struct afs_volume *volume, struct key *key)
+int afs_check_volume_status(struct afs_volume *volume, struct afs_fs_cursor *fc)
{
time64_t now = ktime_get_real_seconds();
int ret, retries = 0;
@@ -299,7 +299,7 @@ int afs_check_volume_status(struct afs_volume *volume, struct key *key)
}
if (!test_and_set_bit_lock(AFS_VOLUME_UPDATING, &volume->flags)) {
- ret = afs_update_volume_status(volume, key);
+ ret = afs_update_volume_status(volume, fc->key);
clear_bit_unlock(AFS_VOLUME_WAIT, &volume->flags);
clear_bit_unlock(AFS_VOLUME_UPDATING, &volume->flags);
wake_up_bit(&volume->flags, AFS_VOLUME_WAIT);
@@ -312,7 +312,9 @@ int afs_check_volume_status(struct afs_volume *volume, struct key *key)
return 0;
}
- ret = wait_on_bit(&volume->flags, AFS_VOLUME_WAIT, TASK_INTERRUPTIBLE);
+ ret = wait_on_bit(&volume->flags, AFS_VOLUME_WAIT,
+ (fc->flags & AFS_FS_CURSOR_INTR) ?
+ TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (ret == -ERESTARTSYS) {
_leave(" = %d", ret);
return ret;
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 31b236c6b1f7..39230880f372 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -165,15 +165,15 @@ static void xdr_dump_bad(const __be32 *bp)
int i;
pr_notice("YFS XDR: Bad status record\n");
- for (i = 0; i < 5 * 4 * 4; i += 16) {
+ for (i = 0; i < 6 * 4 * 4; i += 16) {
memcpy(x, bp, 16);
bp += 4;
pr_notice("%03x: %08x %08x %08x %08x\n",
i, ntohl(x[0]), ntohl(x[1]), ntohl(x[2]), ntohl(x[3]));
}
- memcpy(x, bp, 4);
- pr_notice("0x50: %08x\n", ntohl(x[0]));
+ memcpy(x, bp, 8);
+ pr_notice("0x60: %08x %08x\n", ntohl(x[0]), ntohl(x[1]));
}
/*
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index a6288730210e..64b6549dd901 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -660,7 +660,7 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
* block has been written back to disk. (Yes, these values are
* somewhat arbitrary...)
*/
-#define RECENTCY_MIN 5
+#define RECENTCY_MIN 60
#define RECENTCY_DIRTY 300
static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index b9473fcc110f..7e0c77de551b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2131,7 +2131,7 @@ static int ext4_writepage(struct page *page,
bool keep_towrite = false;
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
- ext4_invalidatepage(page, 0, PAGE_SIZE);
+ inode->i_mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
unlock_page(page);
return -EIO;
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c76ffc259d19..e1782b2e2e2d 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1936,7 +1936,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
int free;
free = e4b->bd_info->bb_free;
- BUG_ON(free <= 0);
+ if (WARN_ON(free <= 0))
+ return;
i = e4b->bd_info->bb_first_free;
@@ -1959,7 +1960,8 @@ void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
}
mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex);
- BUG_ON(ex.fe_len <= 0);
+ if (WARN_ON(ex.fe_len <= 0))
+ break;
if (free < ex.fe_len) {
ext4_grp_locked_error(sb, e4b->bd_group, 0, 0,
"%d free clusters as per "
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 53d4c67a20df..d3500eaf900e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3562,7 +3562,8 @@ int ext4_calculate_overhead(struct super_block *sb)
*/
if (sbi->s_journal && !sbi->journal_bdev)
overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
- else if (ext4_has_feature_journal(sb) && !sbi->s_journal) {
+ else if (ext4_has_feature_journal(sb) && !sbi->s_journal && j_inum) {
+ /* j_inum for internal journal is non-zero */
j_inode = ext4_get_journal_inode(sb, j_inum);
if (j_inode) {
j_blocks = j_inode->i_size >> sb->s_blocksize_bits;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 1c82d7dd54df..8650a97e2ba9 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -266,6 +266,8 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
if (!nbl) {
nbl= kmalloc(sizeof(*nbl), GFP_KERNEL);
if (nbl) {
+ INIT_LIST_HEAD(&nbl->nbl_list);
+ INIT_LIST_HEAD(&nbl->nbl_lru);
fh_copy_shallow(&nbl->nbl_fh, fh);
locks_init_lock(&nbl->nbl_lock);
nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
diff --git a/fs/pnode.c b/fs/pnode.c
index 49f6d7ff2139..1106137c747a 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -261,14 +261,13 @@ static int propagate_one(struct mount *m)
child = copy_tree(last_source, last_source->mnt.mnt_root, type);
if (IS_ERR(child))
return PTR_ERR(child);
+ read_seqlock_excl(&mount_lock);
mnt_set_mountpoint(m, mp, child);
+ if (m->mnt_master != dest_master)
+ SET_MNT_MARK(m->mnt_master);
+ read_sequnlock_excl(&mount_lock);
last_dest = m;
last_source = child;
- if (m->mnt_master != dest_master) {
- read_seqlock_excl(&mount_lock);
- SET_MNT_MARK(m->mnt_master);
- read_sequnlock_excl(&mount_lock);
- }
hlist_add_head(&child->mnt_hash, list);
return count_mounts(m->mnt_ns, child);
}
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index edf43ddd7dce..7dd740e3692d 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -688,14 +688,14 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
ino_key_init(c, &key1, inum);
err = ubifs_tnc_lookup(c, &key1, ino);
- if (err)
+ if (err && err != -ENOENT)
goto out_free;
/*
* Check whether an inode can really get deleted.
* linkat() with O_TMPFILE allows rebirth of an inode.
*/
- if (ino->nlink == 0) {
+ if (err == 0 && ino->nlink == 0) {
dbg_rcvry("deleting orphaned inode %lu",
(unsigned long)inum);
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 944add5ff8e0..d95dc9b0f0bb 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -907,7 +907,12 @@ xfs_eofblocks_worker(
{
struct xfs_mount *mp = container_of(to_delayed_work(work),
struct xfs_mount, m_eofblocks_work);
+
+ if (!sb_start_write_trylock(mp->m_super))
+ return;
xfs_icache_free_eofblocks(mp, NULL);
+ sb_end_write(mp->m_super);
+
xfs_queue_eofblocks(mp);
}
@@ -934,7 +939,12 @@ xfs_cowblocks_worker(
{
struct xfs_mount *mp = container_of(to_delayed_work(work),
struct xfs_mount, m_cowblocks_work);
+
+ if (!sb_start_write_trylock(mp->m_super))
+ return;
xfs_icache_free_cowblocks(mp, NULL);
+ sb_end_write(mp->m_super);
+
xfs_queue_cowblocks(mp);
}
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 2a1909397cb4..c93c4b7328ef 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -2401,7 +2401,10 @@ xfs_file_ioctl(
if (error)
return error;
- return xfs_icache_free_eofblocks(mp, &keofb);
+ sb_start_write(mp->m_super);
+ error = xfs_icache_free_eofblocks(mp, &keofb);
+ sb_end_write(mp->m_super);
+ return error;
}
default:
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 0f08153b4994..6a4fd1738b08 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -1053,6 +1053,7 @@ xfs_reflink_remap_extent(
uirec.br_startblock = irec->br_startblock + rlen;
uirec.br_startoff = irec->br_startoff + rlen;
uirec.br_blockcount = unmap_len - rlen;
+ uirec.br_state = irec->br_state;
unmap_len = rlen;
/* If this isn't a real mapping, we're done. */
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 6ccfd75d3c24..812108f6cc89 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -529,8 +529,9 @@ xfsaild(
{
struct xfs_ail *ailp = data;
long tout = 0; /* milliseconds */
+ unsigned int noreclaim_flag;
- current->flags |= PF_MEMALLOC;
+ noreclaim_flag = memalloc_noreclaim_save();
set_freezable();
while (1) {
@@ -601,6 +602,7 @@ xfsaild(
tout = xfsaild_push(ailp);
}
+ memalloc_noreclaim_restore(noreclaim_flag);
return 0;
}
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 21a572469a4e..228f66347620 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2582,6 +2582,8 @@
#define PCI_VENDOR_ID_AMAZON 0x1d0f
+#define PCI_VENDOR_ID_ZHAOXIN 0x1d17
+
#define PCI_VENDOR_ID_HYGON 0x1d94
#define PCI_VENDOR_ID_HXT 0x1dbf
diff --git a/include/linux/printk.h b/include/linux/printk.h
index c09d67edda3a..3b5cb66d8bc1 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -202,7 +202,6 @@ __printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
void show_regs_print_info(const char *log_lvl);
extern asmlinkage void dump_stack(void) __cold;
-extern void printk_safe_init(void);
extern void printk_safe_flush(void);
extern void printk_safe_flush_on_panic(void);
#else
@@ -269,10 +268,6 @@ static inline void dump_stack(void)
{
}
-static inline void printk_safe_init(void)
-{
-}
-
static inline void printk_safe_flush(void)
{
}
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 2dd0a9ed5b36..733fad7dfbed 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -97,6 +97,11 @@ struct qed_chain_u32 {
u32 cons_idx;
};
+struct addr_tbl_entry {
+ void *virt_addr;
+ dma_addr_t dma_map;
+};
+
struct qed_chain {
/* fastpath portion of the chain - required for commands such
* as produce / consume.
@@ -107,10 +112,11 @@ struct qed_chain {
/* Fastpath portions of the PBL [if exists] */
struct {
- /* Table for keeping the virtual addresses of the chain pages,
- * respectively to the physical addresses in the pbl table.
+ /* Table for keeping the virtual and physical addresses of the
+ * chain pages, respectively to the physical addresses
+ * in the pbl table.
*/
- void **pp_virt_addr_tbl;
+ struct addr_tbl_entry *pp_addr_tbl;
union {
struct qed_chain_pbl_u16 u16;
@@ -287,7 +293,7 @@ qed_chain_advance_page(struct qed_chain *p_chain,
*(u32 *)page_to_inc = 0;
page_index = *(u32 *)page_to_inc;
}
- *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
+ *p_next_elem = p_chain->pbl.pp_addr_tbl[page_index].virt_addr;
}
}
@@ -537,7 +543,7 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain,
p_chain->pbl_sp.p_phys_table = 0;
p_chain->pbl_sp.p_virt_table = NULL;
- p_chain->pbl.pp_virt_addr_tbl = NULL;
+ p_chain->pbl.pp_addr_tbl = NULL;
}
/**
@@ -575,11 +581,11 @@ static inline void qed_chain_init_mem(struct qed_chain *p_chain,
static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
void *p_virt_pbl,
dma_addr_t p_phys_pbl,
- void **pp_virt_addr_tbl)
+ struct addr_tbl_entry *pp_addr_tbl)
{
p_chain->pbl_sp.p_phys_table = p_phys_pbl;
p_chain->pbl_sp.p_virt_table = p_virt_pbl;
- p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
+ p_chain->pbl.pp_addr_tbl = pp_addr_tbl;
}
/**
@@ -644,7 +650,7 @@ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
break;
case QED_CHAIN_MODE_PBL:
last_page_idx = p_chain->page_cnt - 1;
- p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
+ p_virt_addr = p_chain->pbl.pp_addr_tbl[last_page_idx].virt_addr;
break;
}
/* p_virt_addr points at this stage to the last page of the chain */
@@ -716,7 +722,7 @@ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
page_cnt = qed_chain_get_page_cnt(p_chain);
for (i = 0; i < page_cnt; i++)
- memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
+ memset(p_chain->pbl.pp_addr_tbl[i].virt_addr, 0,
QED_CHAIN_PAGE_SIZE);
}
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index 40f65888dd38..fddad9f5b390 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -162,6 +162,7 @@ extern bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma);
extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
struct svc_rdma_recv_ctxt *ctxt);
extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
+extern void svc_rdma_release_rqst(struct svc_rqst *rqstp);
extern int svc_rdma_recvfrom(struct svc_rqst *);
/* svc_rdma_rw.c */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index f264c6509f00..6d2662c3126c 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1059,6 +1059,7 @@ struct snd_soc_card {
const struct snd_soc_dapm_route *of_dapm_routes;
int num_of_dapm_routes;
bool fully_routed;
+ bool disable_route_checks;
/* lists of probed devices belonging to this card */
struct list_head component_dev_list;
diff --git a/include/trace/events/iocost.h b/include/trace/events/iocost.h
index 7ecaa65b7106..c2f580fd371b 100644
--- a/include/trace/events/iocost.h
+++ b/include/trace/events/iocost.h
@@ -130,7 +130,7 @@ DEFINE_EVENT(iocg_inuse_update, iocost_inuse_reset,
TRACE_EVENT(iocost_ioc_vrate_adj,
- TP_PROTO(struct ioc *ioc, u64 new_vrate, u32 (*missed_ppm)[2],
+ TP_PROTO(struct ioc *ioc, u64 new_vrate, u32 *missed_ppm,
u32 rq_wait_pct, int nr_lagging, int nr_shortages,
int nr_surpluses),
@@ -155,8 +155,8 @@ TRACE_EVENT(iocost_ioc_vrate_adj,
__entry->old_vrate = atomic64_read(&ioc->vtime_rate);;
__entry->new_vrate = new_vrate;
__entry->busy_level = ioc->busy_level;
- __entry->read_missed_ppm = (*missed_ppm)[READ];
- __entry->write_missed_ppm = (*missed_ppm)[WRITE];
+ __entry->read_missed_ppm = missed_ppm[READ];
+ __entry->write_missed_ppm = missed_ppm[WRITE];
__entry->rq_wait_pct = rq_wait_pct;
__entry->nr_lagging = nr_lagging;
__entry->nr_shortages = nr_shortages;
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index 7fd11ec1c9a4..2464311b0389 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -1638,17 +1638,15 @@ DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
TRACE_EVENT(svcrdma_post_send,
TP_PROTO(
- const struct ib_send_wr *wr,
- int status
+ const struct ib_send_wr *wr
),
- TP_ARGS(wr, status),
+ TP_ARGS(wr),
TP_STRUCT__entry(
__field(const void *, cqe)
__field(unsigned int, num_sge)
__field(u32, inv_rkey)
- __field(int, status)
),
TP_fast_assign(
@@ -1656,12 +1654,11 @@ TRACE_EVENT(svcrdma_post_send,
__entry->num_sge = wr->num_sge;
__entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
wr->ex.invalidate_rkey : 0;
- __entry->status = status;
),
- TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x status=%d",
+ TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x",
__entry->cqe, __entry->num_sge,
- __entry->inv_rkey, __entry->status
+ __entry->inv_rkey
)
);
@@ -1726,26 +1723,23 @@ TRACE_EVENT(svcrdma_wc_receive,
TRACE_EVENT(svcrdma_post_rw,
TP_PROTO(
const void *cqe,
- int sqecount,
- int status
+ int sqecount
),
- TP_ARGS(cqe, sqecount, status),
+ TP_ARGS(cqe, sqecount),
TP_STRUCT__entry(
__field(const void *, cqe)
__field(int, sqecount)
- __field(int, status)
),
TP_fast_assign(
__entry->cqe = cqe;
__entry->sqecount = sqecount;
- __entry->status = status;
),
- TP_printk("cqe=%p sqecount=%d status=%d",
- __entry->cqe, __entry->sqecount, __entry->status
+ TP_printk("cqe=%p sqecount=%d",
+ __entry->cqe, __entry->sqecount
)
);
@@ -1841,6 +1835,34 @@ DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
DEFINE_SQ_EVENT(full);
DEFINE_SQ_EVENT(retry);
+TRACE_EVENT(svcrdma_sq_post_err,
+ TP_PROTO(
+ const struct svcxprt_rdma *rdma,
+ int status
+ ),
+
+ TP_ARGS(rdma, status),
+
+ TP_STRUCT__entry(
+ __field(int, avail)
+ __field(int, depth)
+ __field(int, status)
+ __string(addr, rdma->sc_xprt.xpt_remotebuf)
+ ),
+
+ TP_fast_assign(
+ __entry->avail = atomic_read(&rdma->sc_sq_avail);
+ __entry->depth = rdma->sc_sq_depth;
+ __entry->status = status;
+ __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
+ ),
+
+ TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
+ __get_str(addr), __entry->avail, __entry->depth,
+ __entry->status
+ )
+);
+
#endif /* _TRACE_RPCRDMA_H */
#include <trace/define_trace.h>
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 5011259b8f67..edbbf4bfdd9e 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -1160,8 +1160,8 @@ enum {
* [TCA_TAPRIO_ATTR_SCHED_ENTRY_INTERVAL]
*/
-#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST BIT(0)
-#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD BIT(1)
+#define TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST _BITUL(0)
+#define TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD _BITUL(1)
enum {
TCA_TAPRIO_ATTR_UNSPEC,
diff --git a/init/main.c b/init/main.c
index c0206c507eba..5cbb9fe937e0 100644
--- a/init/main.c
+++ b/init/main.c
@@ -694,7 +694,6 @@ asmlinkage __visible void __init start_kernel(void)
boot_init_stack_canary();
time_init();
- printk_safe_init();
perf_event_init();
profile_init();
call_function_init();
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index ef49e17ae47c..a367fc850393 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -486,7 +486,7 @@ static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
return -EOVERFLOW;
/* Make sure CPU is a valid possible cpu */
- if (!cpu_possible(key_cpu))
+ if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
return -ENODEV;
if (qsize == 0) {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index e1a65303cfd7..ae27dd77a73c 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1866,6 +1866,15 @@ static bool register_is_const(struct bpf_reg_state *reg)
return reg->type == SCALAR_VALUE && tnum_is_const(reg->var_off);
}
+static bool __is_pointer_value(bool allow_ptr_leaks,
+ const struct bpf_reg_state *reg)
+{
+ if (allow_ptr_leaks)
+ return false;
+
+ return reg->type != SCALAR_VALUE;
+}
+
static void save_register_state(struct bpf_func_state *state,
int spi, struct bpf_reg_state *reg)
{
@@ -2056,6 +2065,16 @@ static int check_stack_read(struct bpf_verifier_env *env,
* which resets stack/reg liveness for state transitions
*/
state->regs[value_regno].live |= REG_LIVE_WRITTEN;
+ } else if (__is_pointer_value(env->allow_ptr_leaks, reg)) {
+ /* If value_regno==-1, the caller is asking us whether
+ * it is acceptable to use this value as a SCALAR_VALUE
+ * (e.g. for XADD).
+ * We must not allow unprivileged callers to do that
+ * with spilled pointers.
+ */
+ verbose(env, "leaking pointer from stack off %d\n",
+ off);
+ return -EACCES;
}
mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
} else {
@@ -2416,15 +2435,6 @@ static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
return -EACCES;
}
-static bool __is_pointer_value(bool allow_ptr_leaks,
- const struct bpf_reg_state *reg)
-{
- if (allow_ptr_leaks)
- return false;
-
- return reg->type != SCALAR_VALUE;
-}
-
static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno)
{
return cur_regs(env) + regno;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 72d0cfd73cf1..7382fc95d41e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7052,10 +7052,17 @@ static void perf_event_task_output(struct perf_event *event,
goto out;
task_event->event_id.pid = perf_event_pid(event, task);
- task_event->event_id.ppid = perf_event_pid(event, current);
-
task_event->event_id.tid = perf_event_tid(event, task);
- task_event->event_id.ptid = perf_event_tid(event, current);
+
+ if (task_event->event_id.header.type == PERF_RECORD_EXIT) {
+ task_event->event_id.ppid = perf_event_pid(event,
+ task->real_parent);
+ task_event->event_id.ptid = perf_event_pid(event,
+ task->real_parent);
+ } else { /* PERF_RECORD_FORK */
+ task_event->event_id.ppid = perf_event_pid(event, current);
+ task_event->event_id.ptid = perf_event_tid(event, current);
+ }
task_event->event_id.time = perf_event_clock(event);
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
index c8e6ab689d42..b2b0f526f249 100644
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -23,6 +23,9 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args);
void __printk_safe_enter(void);
void __printk_safe_exit(void);
+void printk_safe_init(void);
+bool printk_percpu_data_ready(void);
+
#define printk_safe_enter_irqsave(flags) \
do { \
local_irq_save(flags); \
@@ -64,4 +67,6 @@ __printf(1, 0) int vprintk_func(const char *fmt, va_list args) { return 0; }
#define printk_safe_enter_irq() local_irq_disable()
#define printk_safe_exit_irq() local_irq_enable()
+static inline void printk_safe_init(void) { }
+static inline bool printk_percpu_data_ready(void) { return false; }
#endif /* CONFIG_PRINTK */
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index c0a5b56aea4e..971197f5d8ee 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -460,6 +460,18 @@ static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
static char *log_buf = __log_buf;
static u32 log_buf_len = __LOG_BUF_LEN;
+/*
+ * We cannot access per-CPU data (e.g. per-CPU flush irq_work) before
+ * per_cpu_areas are initialised. This variable is set to true when
+ * it's safe to access per-CPU data.
+ */
+static bool __printk_percpu_data_ready __read_mostly;
+
+bool printk_percpu_data_ready(void)
+{
+ return __printk_percpu_data_ready;
+}
+
/* Return log buffer address */
char *log_buf_addr_get(void)
{
@@ -1146,12 +1158,28 @@ static void __init log_buf_add_cpu(void)
static inline void log_buf_add_cpu(void) {}
#endif /* CONFIG_SMP */
+static void __init set_percpu_data_ready(void)
+{
+ printk_safe_init();
+ /* Make sure we set this flag only after printk_safe() init is done */
+ barrier();
+ __printk_percpu_data_ready = true;
+}
+
void __init setup_log_buf(int early)
{
unsigned long flags;
char *new_log_buf;
unsigned int free;
+ /*
+ * Some archs call setup_log_buf() multiple times - first is very
+ * early, e.g. from setup_arch(), and second - when percpu_areas
+ * are initialised.
+ */
+ if (!early)
+ set_percpu_data_ready();
+
if (log_buf != __log_buf)
return;
@@ -2966,6 +2994,9 @@ static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
void wake_up_klogd(void)
{
+ if (!printk_percpu_data_ready())
+ return;
+
preempt_disable();
if (waitqueue_active(&log_wait)) {
this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
@@ -2976,6 +3007,9 @@ void wake_up_klogd(void)
void defer_console_output(void)
{
+ if (!printk_percpu_data_ready())
+ return;
+
preempt_disable();
__this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index b4045e782743..d9a659a686f3 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -27,7 +27,6 @@
* There are situations when we want to make sure that all buffers
* were handled or when IRQs are blocked.
*/
-static int printk_safe_irq_ready __read_mostly;
#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
sizeof(atomic_t) - \
@@ -51,7 +50,7 @@ static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
/* Get flushed in a more safe context. */
static void queue_flush_work(struct printk_safe_seq_buf *s)
{
- if (printk_safe_irq_ready)
+ if (printk_percpu_data_ready())
irq_work_queue(&s->work);
}
@@ -402,14 +401,6 @@ void __init printk_safe_init(void)
#endif
}
- /*
- * In the highly unlikely event that a NMI were to trigger at
- * this moment. Make sure IRQ work is set up before this
- * variable is set.
- */
- barrier();
- printk_safe_irq_ready = 1;
-
/* Flush pending messages that did not have scheduled IRQ works. */
printk_safe_flush();
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 195d0019e6bb..e99d326fa569 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1233,13 +1233,8 @@ static void uclamp_fork(struct task_struct *p)
return;
for_each_clamp_id(clamp_id) {
- unsigned int clamp_value = uclamp_none(clamp_id);
-
- /* By default, RT tasks always get 100% boost */
- if (unlikely(rt_task(p) && clamp_id == UCLAMP_MIN))
- clamp_value = uclamp_none(UCLAMP_MAX);
-
- uclamp_se_set(&p->uclamp_req[clamp_id], clamp_value, false);
+ uclamp_se_set(&p->uclamp_req[clamp_id],
+ uclamp_none(clamp_id), false);
}
}
diff --git a/kernel/signal.c b/kernel/signal.c
index 2b9295f2d244..595a36ab87d0 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1510,15 +1510,15 @@ int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
unsigned long flags;
int ret = -EINVAL;
+ if (!valid_signal(sig))
+ return ret;
+
clear_siginfo(&info);
info.si_signo = sig;
info.si_errno = errno;
info.si_code = SI_ASYNCIO;
*((sigval_t *)&info.si_pid) = addr;
- if (!valid_signal(sig))
- return ret;
-
rcu_read_lock();
p = pid_task(pid, PIDTYPE_PID);
if (!p) {
diff --git a/mm/shmem.c b/mm/shmem.c
index 312e31196720..e71b15da1985 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2403,11 +2403,11 @@ static int shmem_mfill_atomic_pte(struct mm_struct *dst_mm,
lru_cache_add_anon(page);
- spin_lock(&info->lock);
+ spin_lock_irq(&info->lock);
info->alloced++;
inode->i_blocks += BLOCKS_PER_PAGE;
shmem_recalc_inode(inode);
- spin_unlock(&info->lock);
+ spin_unlock_irq(&info->lock);
inc_mm_counter(dst_mm, mm_counter_file(page));
page_add_file_rmap(page, false);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index da3c24ed129c..189ad4c73a3f 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -51,6 +51,7 @@
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/uio.h>
+#include <linux/indirect_call_wrapper.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
@@ -407,6 +408,11 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
}
EXPORT_SYMBOL(skb_kill_datagram);
+INDIRECT_CALLABLE_DECLARE(static size_t simple_copy_to_iter(const void *addr,
+ size_t bytes,
+ void *data __always_unused,
+ struct iov_iter *i));
+
static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
struct iov_iter *to, int len, bool fault_short,
size_t (*cb)(const void *, size_t, void *,
@@ -420,7 +426,8 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
if (copy > 0) {
if (copy > len)
copy = len;
- n = cb(skb->data + offset, copy, data, to);
+ n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
+ skb->data + offset, copy, data, to);
offset += n;
if (n != copy)
goto short_copy;
@@ -442,8 +449,9 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
if (copy > len)
copy = len;
- n = cb(vaddr + skb_frag_off(frag) + offset - start,
- copy, data, to);
+ n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
+ vaddr + skb_frag_off(frag) + offset - start,
+ copy, data, to);
kunmap(page);
offset += n;
if (n != copy)
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index d09b3c789314..36978a0e5000 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -1257,15 +1257,15 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
sdata->u.mesh.mshcfg.rssi_threshold < rx_status->signal)
mesh_neighbour_update(sdata, mgmt->sa, &elems,
rx_status);
+
+ if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT &&
+ !sdata->vif.csa_active)
+ ieee80211_mesh_process_chnswitch(sdata, &elems, true);
}
if (ifmsh->sync_ops)
ifmsh->sync_ops->rx_bcn_presp(sdata,
stype, mgmt, &elems, rx_status);
-
- if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT &&
- !sdata->vif.csa_active)
- ieee80211_mesh_process_chnswitch(sdata, &elems, true);
}
int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
@@ -1373,6 +1373,9 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
ieee802_11_parse_elems(pos, len - baselen, true, &elems,
mgmt->bssid, NULL);
+ if (!mesh_matches_local(sdata, &elems))
+ return;
+
ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl;
if (!--ifmsh->chsw_ttl)
fwd_csa = false;
diff --git a/net/netfilter/nf_nat_proto.c b/net/netfilter/nf_nat_proto.c
index 64eedc17037a..3d816a1e5442 100644
--- a/net/netfilter/nf_nat_proto.c
+++ b/net/netfilter/nf_nat_proto.c
@@ -1035,8 +1035,8 @@ int nf_nat_inet_register_fn(struct net *net, const struct nf_hook_ops *ops)
ret = nf_nat_register_fn(net, NFPROTO_IPV4, ops, nf_nat_ipv4_ops,
ARRAY_SIZE(nf_nat_ipv4_ops));
if (ret)
- nf_nat_ipv6_unregister_fn(net, ops);
-
+ nf_nat_unregister_fn(net, NFPROTO_IPV6, ops,
+ ARRAY_SIZE(nf_nat_ipv6_ops));
return ret;
}
EXPORT_SYMBOL_GPL(nf_nat_inet_register_fn);
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index a6c1349e965d..01135e54d95d 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -165,15 +165,6 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
goto error;
}
- /* we want to set the don't fragment bit */
- opt = IPV6_PMTUDISC_DO;
- ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
- (char *) &opt, sizeof(opt));
- if (ret < 0) {
- _debug("setsockopt failed");
- goto error;
- }
-
/* Fall through and set IPv4 options too otherwise we don't get
* errors from IPv4 packets sent through the IPv6 socket.
*/
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index bad3d2420344..90e263c6aa69 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -474,41 +474,21 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
skb->tstamp = ktime_get_real();
switch (conn->params.local->srx.transport.family) {
+ case AF_INET6:
case AF_INET:
opt = IP_PMTUDISC_DONT;
- ret = kernel_setsockopt(conn->params.local->socket,
- SOL_IP, IP_MTU_DISCOVER,
- (char *)&opt, sizeof(opt));
- if (ret == 0) {
- ret = kernel_sendmsg(conn->params.local->socket, &msg,
- iov, 2, len);
- conn->params.peer->last_tx_at = ktime_get_seconds();
-
- opt = IP_PMTUDISC_DO;
- kernel_setsockopt(conn->params.local->socket, SOL_IP,
- IP_MTU_DISCOVER,
- (char *)&opt, sizeof(opt));
- }
- break;
-
-#ifdef CONFIG_AF_RXRPC_IPV6
- case AF_INET6:
- opt = IPV6_PMTUDISC_DONT;
- ret = kernel_setsockopt(conn->params.local->socket,
- SOL_IPV6, IPV6_MTU_DISCOVER,
- (char *)&opt, sizeof(opt));
- if (ret == 0) {
- ret = kernel_sendmsg(conn->params.local->socket, &msg,
- iov, 2, len);
- conn->params.peer->last_tx_at = ktime_get_seconds();
-
- opt = IPV6_PMTUDISC_DO;
- kernel_setsockopt(conn->params.local->socket,
- SOL_IPV6, IPV6_MTU_DISCOVER,
- (char *)&opt, sizeof(opt));
- }
+ kernel_setsockopt(conn->params.local->socket,
+ SOL_IP, IP_MTU_DISCOVER,
+ (char *)&opt, sizeof(opt));
+ ret = kernel_sendmsg(conn->params.local->socket, &msg,
+ iov, 2, len);
+ conn->params.peer->last_tx_at = ktime_get_seconds();
+
+ opt = IP_PMTUDISC_DO;
+ kernel_setsockopt(conn->params.local->socket,
+ SOL_IP, IP_MTU_DISCOVER,
+ (char *)&opt, sizeof(opt));
break;
-#endif
default:
BUG();
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 298557744818..dc74519286be 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -897,9 +897,6 @@ int svc_send(struct svc_rqst *rqstp)
if (!xprt)
goto out;
- /* release the receive skb before sending the reply */
- xprt->xpt_ops->xpo_release_rqst(rqstp);
-
/* calculate over-all length */
xb = &rqstp->rq_res;
xb->len = xb->head[0].iov_len +
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 2934dd711715..4260924ad9db 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -605,6 +605,8 @@ svc_udp_sendto(struct svc_rqst *rqstp)
{
int error;
+ svc_release_udp_skb(rqstp);
+
error = svc_sendto(rqstp, &rqstp->rq_res);
if (error == -ECONNREFUSED)
/* ICMP error on earlier request. */
@@ -1137,6 +1139,8 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
int sent;
__be32 reclen;
+ svc_release_skb(rqstp);
+
/* Set up the first element of the reply kvec.
* Any other kvecs that may be in use have been taken
* care of by the server implementation itself.
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 96bccd398469..b8ee91ffedda 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -222,6 +222,26 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
svc_rdma_recv_ctxt_destroy(rdma, ctxt);
}
+/**
+ * svc_rdma_release_rqst - Release transport-specific per-rqst resources
+ * @rqstp: svc_rqst being released
+ *
+ * Ensure that the recv_ctxt is released whether or not a Reply
+ * was sent. For example, the client could close the connection,
+ * or svc_process could drop an RPC, before the Reply is sent.
+ */
+void svc_rdma_release_rqst(struct svc_rqst *rqstp)
+{
+ struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt;
+ struct svc_xprt *xprt = rqstp->rq_xprt;
+ struct svcxprt_rdma *rdma =
+ container_of(xprt, struct svcxprt_rdma, sc_xprt);
+
+ rqstp->rq_xprt_ctxt = NULL;
+ if (ctxt)
+ svc_rdma_recv_ctxt_put(rdma, ctxt);
+}
+
static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
struct svc_rdma_recv_ctxt *ctxt)
{
@@ -756,6 +776,8 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
__be32 *p;
int ret;
+ rqstp->rq_xprt_ctxt = NULL;
+
spin_lock(&rdma_xprt->sc_rq_dto_lock);
ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
if (ctxt) {
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
index 48fe3b16b0d9..a59912e2666d 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
@@ -323,8 +323,6 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
if (atomic_sub_return(cc->cc_sqecount,
&rdma->sc_sq_avail) > 0) {
ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
- trace_svcrdma_post_rw(&cc->cc_cqe,
- cc->cc_sqecount, ret);
if (ret)
break;
return 0;
@@ -337,6 +335,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
trace_svcrdma_sq_retry(rdma);
} while (1);
+ trace_svcrdma_sq_post_err(rdma, ret);
set_bit(XPT_CLOSE, &xprt->xpt_flags);
/* If even one was posted, there will be a completion. */
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 6fdba72f89f4..93ff7967389a 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -306,15 +306,17 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct ib_send_wr *wr)
}
svc_xprt_get(&rdma->sc_xprt);
+ trace_svcrdma_post_send(wr);
ret = ib_post_send(rdma->sc_qp, wr, NULL);
- trace_svcrdma_post_send(wr, ret);
- if (ret) {
- set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
- svc_xprt_put(&rdma->sc_xprt);
- wake_up(&rdma->sc_send_wait);
- }
- break;
+ if (ret)
+ break;
+ return 0;
}
+
+ trace_svcrdma_sq_post_err(rdma, ret);
+ set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
+ svc_xprt_put(&rdma->sc_xprt);
+ wake_up(&rdma->sc_send_wait);
return ret;
}
@@ -871,12 +873,7 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
wr_lst, rp_ch);
if (ret < 0)
goto err1;
- ret = 0;
-
-out:
- rqstp->rq_xprt_ctxt = NULL;
- svc_rdma_recv_ctxt_put(rdma, rctxt);
- return ret;
+ return 0;
err2:
if (ret != -E2BIG && ret != -EINVAL)
@@ -885,14 +882,12 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
ret = svc_rdma_send_error_msg(rdma, sctxt, rqstp);
if (ret < 0)
goto err1;
- ret = 0;
- goto out;
+ return 0;
err1:
svc_rdma_send_ctxt_put(rdma, sctxt);
err0:
trace_svcrdma_send_failed(rqstp, ret);
set_bit(XPT_CLOSE, &xprt->xpt_flags);
- ret = -ENOTCONN;
- goto out;
+ return -ENOTCONN;
}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 145a3615c319..889220f11a70 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -71,7 +71,6 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
struct sockaddr *sa, int salen,
int flags);
static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
-static void svc_rdma_release_rqst(struct svc_rqst *);
static void svc_rdma_detach(struct svc_xprt *xprt);
static void svc_rdma_free(struct svc_xprt *xprt);
static int svc_rdma_has_wspace(struct svc_xprt *xprt);
@@ -558,10 +557,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
return NULL;
}
-static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
-{
-}
-
/*
* When connected, an svc_xprt has at least two references:
*
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index a66fc0acad1e..342618a2bccb 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -297,7 +297,7 @@ define rule_dtc
endef
$(obj)/%.dt.yaml: $(src)/%.dts $(DTC) $(DT_TMP_SCHEMA) FORCE
- $(call if_changed_rule,dtc)
+ $(call if_changed_rule,dtc,yaml)
dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index dd77b9ffe5fd..1673479b4eef 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1200,10 +1200,8 @@ static void azx_vs_set_state(struct pci_dev *pci,
if (!disabled) {
dev_info(chip->card->dev,
"Start delayed initialization\n");
- if (azx_probe_continue(chip) < 0) {
+ if (azx_probe_continue(chip) < 0)
dev_err(chip->card->dev, "initialization error\n");
- hda->init_failed = true;
- }
}
} else {
dev_info(chip->card->dev, "%s via vga_switcheroo\n",
@@ -1336,12 +1334,15 @@ static int register_vga_switcheroo(struct azx *chip)
/*
* destructor
*/
-static int azx_free(struct azx *chip)
+static void azx_free(struct azx *chip)
{
struct pci_dev *pci = chip->pci;
struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
struct hdac_bus *bus = azx_bus(chip);
+ if (hda->freed)
+ return;
+
if (azx_has_pm_runtime(chip) && chip->running)
pm_runtime_get_noresume(&pci->dev);
chip->running = 0;
@@ -1385,9 +1386,8 @@ static int azx_free(struct azx *chip)
if (chip->driver_caps & AZX_DCAPS_I915_COMPONENT)
snd_hdac_i915_exit(bus);
- kfree(hda);
- return 0;
+ hda->freed = 1;
}
static int azx_dev_disconnect(struct snd_device *device)
@@ -1403,7 +1403,8 @@ static int azx_dev_disconnect(struct snd_device *device)
static int azx_dev_free(struct snd_device *device)
{
- return azx_free(device->device_data);
+ azx_free(device->device_data);
+ return 0;
}
#ifdef SUPPORT_VGA_SWITCHEROO
@@ -1717,7 +1718,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
if (err < 0)
return err;
- hda = kzalloc(sizeof(*hda), GFP_KERNEL);
+ hda = devm_kzalloc(&pci->dev, sizeof(*hda), GFP_KERNEL);
if (!hda) {
pci_disable_device(pci);
return -ENOMEM;
@@ -1758,7 +1759,6 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
err = azx_bus_init(chip, model[dev]);
if (err < 0) {
- kfree(hda);
pci_disable_device(pci);
return err;
}
@@ -1958,7 +1958,7 @@ static int azx_first_init(struct azx *chip)
/* codec detection */
if (!azx_bus(chip)->codec_mask) {
dev_err(card->dev, "no codecs found!\n");
- return -ENODEV;
+ /* keep running the rest for the runtime PM */
}
if (azx_acquire_irq(chip, 0) < 0)
@@ -2268,9 +2268,11 @@ static int azx_probe_continue(struct azx *chip)
#endif
/* create codec instances */
- err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]);
- if (err < 0)
- goto out_free;
+ if (bus->codec_mask) {
+ err = azx_probe_codecs(chip, azx_max_codecs[chip->driver_type]);
+ if (err < 0)
+ goto out_free;
+ }
#ifdef CONFIG_SND_HDA_PATCH_LOADER
if (chip->fw) {
@@ -2284,7 +2286,7 @@ static int azx_probe_continue(struct azx *chip)
#endif
}
#endif
- if ((probe_only[dev] & 1) == 0) {
+ if (bus->codec_mask && !(probe_only[dev] & 1)) {
err = azx_codec_configure(chip);
if (err < 0)
goto out_free;
@@ -2301,17 +2303,23 @@ static int azx_probe_continue(struct azx *chip)
set_default_power_save(chip);
- if (azx_has_pm_runtime(chip))
+ if (azx_has_pm_runtime(chip)) {
+ pm_runtime_use_autosuspend(&pci->dev);
+ pm_runtime_allow(&pci->dev);
pm_runtime_put_autosuspend(&pci->dev);
+ }
out_free:
- if (err < 0 || !hda->need_i915_power)
+ if (err < 0) {
+ azx_free(chip);
+ return err;
+ }
+
+ if (!hda->need_i915_power)
display_power(chip, false);
- if (err < 0)
- hda->init_failed = 1;
complete_all(&hda->probe_wait);
to_hda_bus(bus)->bus_probing = 0;
- return err;
+ return 0;
}
static void azx_remove(struct pci_dev *pci)
diff --git a/sound/pci/hda/hda_intel.h b/sound/pci/hda/hda_intel.h
index 2acfff3da1a0..3fb119f09040 100644
--- a/sound/pci/hda/hda_intel.h
+++ b/sound/pci/hda/hda_intel.h
@@ -27,6 +27,7 @@ struct hda_intel {
unsigned int use_vga_switcheroo:1;
unsigned int vga_switcheroo_registered:1;
unsigned int init_failed:1; /* delayed init failed */
+ unsigned int freed:1; /* resources already released */
bool need_i915_power:1; /* the hda controller needs i915 power */
};
diff --git a/sound/soc/codecs/tas571x.c b/sound/soc/codecs/tas571x.c
index 1554631cb397..5b7f9fcf6cbf 100644
--- a/sound/soc/codecs/tas571x.c
+++ b/sound/soc/codecs/tas571x.c
@@ -820,8 +820,10 @@ static int tas571x_i2c_probe(struct i2c_client *client,
priv->regmap = devm_regmap_init(dev, NULL, client,
priv->chip->regmap_config);
- if (IS_ERR(priv->regmap))
- return PTR_ERR(priv->regmap);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ goto disable_regs;
+ }
priv->pdn_gpio = devm_gpiod_get_optional(dev, "pdn", GPIOD_OUT_LOW);
if (IS_ERR(priv->pdn_gpio)) {
@@ -845,7 +847,7 @@ static int tas571x_i2c_probe(struct i2c_client *client,
ret = regmap_write(priv->regmap, TAS571X_OSC_TRIM_REG, 0);
if (ret)
- return ret;
+ goto disable_regs;
usleep_range(50000, 60000);
@@ -861,12 +863,20 @@ static int tas571x_i2c_probe(struct i2c_client *client,
*/
ret = regmap_update_bits(priv->regmap, TAS571X_MVOL_REG, 1, 0);
if (ret)
- return ret;
+ goto disable_regs;
}
- return devm_snd_soc_register_component(&client->dev,
+ ret = devm_snd_soc_register_component(&client->dev,
&priv->component_driver,
&tas571x_dai, 1);
+ if (ret)
+ goto disable_regs;
+
+ return ret;
+
+disable_regs:
+ regulator_bulk_disable(priv->chip->num_supply_names, priv->supplies);
+ return ret;
}
static int tas571x_i2c_remove(struct i2c_client *client)
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 55112c1bba5e..6cf0f6612bda 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -860,8 +860,7 @@ static int wm8960_hw_params(struct snd_pcm_substream *substream,
wm8960->is_stream_in_use[tx] = true;
- if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_ON &&
- !wm8960->is_stream_in_use[!tx])
+ if (!wm8960->is_stream_in_use[!tx])
return wm8960_configure_clocking(component);
return 0;
diff --git a/sound/soc/meson/axg-card.c b/sound/soc/meson/axg-card.c
index 1f698adde506..2b04ac3d8fd3 100644
--- a/sound/soc/meson/axg-card.c
+++ b/sound/soc/meson/axg-card.c
@@ -586,8 +586,10 @@ static int axg_card_add_link(struct snd_soc_card *card, struct device_node *np,
if (axg_card_cpu_is_tdm_iface(dai_link->cpus->of_node))
ret = axg_card_parse_tdm(card, np, index);
- else if (axg_card_cpu_is_codec(dai_link->cpus->of_node))
+ else if (axg_card_cpu_is_codec(dai_link->cpus->of_node)) {
dai_link->params = &codec_params;
+ dai_link->no_pcm = 0; /* link is not a DPCM BE */
+ }
return ret;
}
diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c
index c1a7624eaf17..2a5302f1db98 100644
--- a/sound/soc/qcom/qdsp6/q6afe-dai.c
+++ b/sound/soc/qcom/qdsp6/q6afe-dai.c
@@ -902,6 +902,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
@@ -917,6 +919,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
@@ -931,6 +935,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
@@ -946,6 +952,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
@@ -960,6 +968,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
@@ -975,6 +985,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
@@ -989,6 +1001,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
.rates = SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_8000 |
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
+ .channels_min = 1,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
@@ -1004,6 +1018,8 @@ static struct snd_soc_dai_driver q6afe_dais[] = {
SNDRV_PCM_RATE_16000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE,
+ .channels_min = 1,
+ .channels_max = 8,
.rate_min = 8000,
.rate_max = 48000,
},
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 9d3b546bae7b..0215e2c94bf0 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1076,8 +1076,18 @@ static int soc_probe_component(struct snd_soc_card *card,
ret = snd_soc_dapm_add_routes(dapm,
component->driver->dapm_routes,
component->driver->num_dapm_routes);
- if (ret < 0)
- goto err_probe;
+ if (ret < 0) {
+ if (card->disable_route_checks) {
+ dev_info(card->dev,
+ "%s: disable_route_checks set, ignoring errors on add_routes\n",
+ __func__);
+ } else {
+ dev_err(card->dev,
+ "%s: snd_soc_dapm_add_routes failed: %d\n",
+ __func__, ret);
+ goto err_probe;
+ }
+ }
/* see for_each_card_components */
list_add(&component->card_list, &card->component_dev_list);
@@ -2067,8 +2077,18 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
ret = snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes,
card->num_dapm_routes);
- if (ret < 0)
- goto probe_end;
+ if (ret < 0) {
+ if (card->disable_route_checks) {
+ dev_info(card->dev,
+ "%s: disable_route_checks set, ignoring errors on add_routes\n",
+ __func__);
+ } else {
+ dev_err(card->dev,
+ "%s: snd_soc_dapm_add_routes failed: %d\n",
+ __func__, ret);
+ goto probe_end;
+ }
+ }
ret = snd_soc_dapm_add_routes(&card->dapm, card->of_dapm_routes,
card->num_of_dapm_routes);
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index d3259de43712..7e965848796c 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -1543,6 +1543,9 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
return ret;
}
+ if (STM_SAI_PROTOCOL_IS_SPDIF(sai))
+ conf = &stm32_sai_pcm_config_spdif;
+
ret = snd_dmaengine_pcm_register(&pdev->dev, conf, 0);
if (ret) {
dev_err(&pdev->dev, "Could not register pcm dma\n");
@@ -1551,15 +1554,10 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
ret = snd_soc_register_component(&pdev->dev, &stm32_component,
&sai->cpu_dai_drv, 1);
- if (ret) {
+ if (ret)
snd_dmaengine_pcm_unregister(&pdev->dev);
- return ret;
- }
-
- if (STM_SAI_PROTOCOL_IS_SPDIF(sai))
- conf = &stm32_sai_pcm_config_spdif;
- return 0;
+ return ret;
}
static int stm32_sai_sub_remove(struct platform_device *pdev)
diff --git a/sound/soc/stm/stm32_spdifrx.c b/sound/soc/stm/stm32_spdifrx.c
index e53fb4bd66b3..9fc2a1767eb1 100644
--- a/sound/soc/stm/stm32_spdifrx.c
+++ b/sound/soc/stm/stm32_spdifrx.c
@@ -995,6 +995,8 @@ static int stm32_spdifrx_probe(struct platform_device *pdev)
if (idr == SPDIFRX_IPIDR_NUMBER) {
ret = regmap_read(spdifrx->regmap, STM32_SPDIFRX_VERR, &ver);
+ if (ret)
+ goto error;
dev_dbg(&pdev->dev, "SPDIFRX version: %lu.%lu registered\n",
FIELD_GET(SPDIFRX_VERR_MAJ_MASK, ver),
diff --git a/tools/lib/bpf/netlink.c b/tools/lib/bpf/netlink.c
index ce3ec81b71c0..88416be2bf99 100644
--- a/tools/lib/bpf/netlink.c
+++ b/tools/lib/bpf/netlink.c
@@ -137,7 +137,7 @@ int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
struct ifinfomsg ifinfo;
char attrbuf[64];
} req;
- __u32 nl_pid;
+ __u32 nl_pid = 0;
sock = libbpf_netlink_open(&nl_pid);
if (sock < 0)
@@ -254,7 +254,7 @@ int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags)
{
struct xdp_id_md xdp_id = {};
int sock, ret;
- __u32 nl_pid;
+ __u32 nl_pid = 0;
__u32 mask;
if (flags & ~XDP_FLAGS_MASK)
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 9fa4e1a46ca9..d6a971326f87 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -2306,14 +2306,27 @@ static bool ignore_unreachable_insn(struct instruction *insn)
!strcmp(insn->sec->name, ".altinstr_aux"))
return true;
+ if (!insn->func)
+ return false;
+
+ /*
+ * CONFIG_UBSAN_TRAP inserts a UD2 when it sees
+ * __builtin_unreachable(). The BUG() macro has an unreachable() after
+ * the UD2, which causes GCC's undefined trap logic to emit another UD2
+ * (or occasionally a JMP to UD2).
+ */
+ if (list_prev_entry(insn, list)->dead_end &&
+ (insn->type == INSN_BUG ||
+ (insn->type == INSN_JUMP_UNCONDITIONAL &&
+ insn->jump_dest && insn->jump_dest->type == INSN_BUG)))
+ return true;
+
/*
* Check if this (or a subsequent) instruction is related to
* CONFIG_UBSAN or CONFIG_KASAN.
*
* End the search at 5 instructions to avoid going into the weeds.
*/
- if (!insn->func)
- return false;
for (i = 0; i < 5; i++) {
if (is_kasan_insn(insn) || is_ubsan_insn(insn))
diff --git a/tools/objtool/orc_dump.c b/tools/objtool/orc_dump.c
index 13ccf775a83a..ba4cbb1cdd63 100644
--- a/tools/objtool/orc_dump.c
+++ b/tools/objtool/orc_dump.c
@@ -66,7 +66,7 @@ int orc_dump(const char *_objname)
char *name;
size_t nr_sections;
Elf64_Addr orc_ip_addr = 0;
- size_t shstrtab_idx;
+ size_t shstrtab_idx, strtab_idx = 0;
Elf *elf;
Elf_Scn *scn;
GElf_Shdr sh;
@@ -127,6 +127,8 @@ int orc_dump(const char *_objname)
if (!strcmp(name, ".symtab")) {
symtab = data;
+ } else if (!strcmp(name, ".strtab")) {
+ strtab_idx = i;
} else if (!strcmp(name, ".orc_unwind")) {
orc = data->d_buf;
orc_size = sh.sh_size;
@@ -138,7 +140,7 @@ int orc_dump(const char *_objname)
}
}
- if (!symtab || !orc || !orc_ip)
+ if (!symtab || !strtab_idx || !orc || !orc_ip)
return 0;
if (orc_size % sizeof(*orc) != 0) {
@@ -159,21 +161,29 @@ int orc_dump(const char *_objname)
return -1;
}
- scn = elf_getscn(elf, sym.st_shndx);
- if (!scn) {
- WARN_ELF("elf_getscn");
- return -1;
- }
-
- if (!gelf_getshdr(scn, &sh)) {
- WARN_ELF("gelf_getshdr");
- return -1;
- }
-
- name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
- if (!name || !*name) {
- WARN_ELF("elf_strptr");
- return -1;
+ if (GELF_ST_TYPE(sym.st_info) == STT_SECTION) {
+ scn = elf_getscn(elf, sym.st_shndx);
+ if (!scn) {
+ WARN_ELF("elf_getscn");
+ return -1;
+ }
+
+ if (!gelf_getshdr(scn, &sh)) {
+ WARN_ELF("gelf_getshdr");
+ return -1;
+ }
+
+ name = elf_strptr(elf, shstrtab_idx, sh.sh_name);
+ if (!name) {
+ WARN_ELF("elf_strptr");
+ return -1;
+ }
+ } else {
+ name = elf_strptr(elf, strtab_idx, sym.st_name);
+ if (!name) {
+ WARN_ELF("elf_strptr");
+ return -1;
+ }
}
printf("%s+%llx:", name, (unsigned long long)rela.r_addend);
diff --git a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
index 7f6c232cd842..ed1c2cea1dea 100644
--- a/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
+++ b/tools/testing/selftests/bpf/verifier/value_illegal_alu.c
@@ -88,6 +88,7 @@
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 3 },
+ .errstr_unpriv = "leaking pointer from stack off -8",
.errstr = "R0 invalid mem access 'inv'",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,