Re: Linux 5.10.17
From: Greg Kroah-Hartman
Date: Wed Feb 17 2021 - 05:14:19 EST
diff --git a/Makefile b/Makefile
index 9a1f26680d83..b740f9c933cb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 10
-SUBLEVEL = 16
+SUBLEVEL = 17
EXTRAVERSION =
NAME = Kleptomaniac Octopus
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
index 7b7ec7b1217b..824393e1bcfb 100644
--- a/arch/arm/boot/dts/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
@@ -329,9 +329,6 @@ clk: clock-controller@0 {
clocks = <&xtal_32k>, <&xtal>;
clock-names = "xtal_32k", "xtal";
-
- assigned-clocks = <&clk LPC32XX_CLK_HCLK_PLL>;
- assigned-clock-rates = <208000000>;
};
};
diff --git a/arch/arm/include/asm/kexec-internal.h b/arch/arm/include/asm/kexec-internal.h
new file mode 100644
index 000000000000..ecc2322db7aa
--- /dev/null
+++ b/arch/arm/include/asm/kexec-internal.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ARM_KEXEC_INTERNAL_H
+#define _ARM_KEXEC_INTERNAL_H
+
+struct kexec_relocate_data {
+ unsigned long kexec_start_address;
+ unsigned long kexec_indirection_page;
+ unsigned long kexec_mach_type;
+ unsigned long kexec_r2;
+};
+
+#endif
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index a1570c8bab25..be8050b0c3df 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -12,6 +12,7 @@
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <asm/cacheflush.h>
+#include <asm/kexec-internal.h>
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/mach/arch.h>
@@ -170,5 +171,9 @@ int main(void)
DEFINE(MPU_RGN_PRBAR, offsetof(struct mpu_rgn, prbar));
DEFINE(MPU_RGN_PRLAR, offsetof(struct mpu_rgn, prlar));
#endif
+ DEFINE(KEXEC_START_ADDR, offsetof(struct kexec_relocate_data, kexec_start_address));
+ DEFINE(KEXEC_INDIR_PAGE, offsetof(struct kexec_relocate_data, kexec_indirection_page));
+ DEFINE(KEXEC_MACH_TYPE, offsetof(struct kexec_relocate_data, kexec_mach_type));
+ DEFINE(KEXEC_R2, offsetof(struct kexec_relocate_data, kexec_r2));
return 0;
}
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 5d84ad333f05..2b09dad7935e 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -13,6 +13,7 @@
#include <linux/of_fdt.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
+#include <asm/kexec-internal.h>
#include <asm/fncpy.h>
#include <asm/mach-types.h>
#include <asm/smp_plat.h>
@@ -22,11 +23,6 @@
extern void relocate_new_kernel(void);
extern const unsigned int relocate_new_kernel_size;
-extern unsigned long kexec_start_address;
-extern unsigned long kexec_indirection_page;
-extern unsigned long kexec_mach_type;
-extern unsigned long kexec_boot_atags;
-
static atomic_t waiting_for_crash_ipi;
/*
@@ -159,6 +155,7 @@ void (*kexec_reinit)(void);
void machine_kexec(struct kimage *image)
{
unsigned long page_list, reboot_entry_phys;
+ struct kexec_relocate_data *data;
void (*reboot_entry)(void);
void *reboot_code_buffer;
@@ -174,18 +171,17 @@ void machine_kexec(struct kimage *image)
reboot_code_buffer = page_address(image->control_code_page);
- /* Prepare parameters for reboot_code_buffer*/
- set_kernel_text_rw();
- kexec_start_address = image->start;
- kexec_indirection_page = page_list;
- kexec_mach_type = machine_arch_type;
- kexec_boot_atags = image->arch.kernel_r2;
-
/* copy our kernel relocation code to the control code page */
reboot_entry = fncpy(reboot_code_buffer,
&relocate_new_kernel,
relocate_new_kernel_size);
+ data = reboot_code_buffer + relocate_new_kernel_size;
+ data->kexec_start_address = image->start;
+ data->kexec_indirection_page = page_list;
+ data->kexec_mach_type = machine_arch_type;
+ data->kexec_r2 = image->arch.kernel_r2;
+
/* get the identity mapping physical address for the reboot code */
reboot_entry_phys = virt_to_idmap(reboot_entry);
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index 72a08786e16e..218d524360fc 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -5,14 +5,16 @@
#include <linux/linkage.h>
#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
#include <asm/kexec.h>
.align 3 /* not needed for this code, but keeps fncpy() happy */
ENTRY(relocate_new_kernel)
- ldr r0,kexec_indirection_page
- ldr r1,kexec_start_address
+ adr r7, relocate_new_kernel_end
+ ldr r0, [r7, #KEXEC_INDIR_PAGE]
+ ldr r1, [r7, #KEXEC_START_ADDR]
/*
* If there is no indirection page (we are doing crashdumps)
@@ -57,34 +59,16 @@ ENTRY(relocate_new_kernel)
2:
/* Jump to relocated kernel */
- mov lr,r1
- mov r0,#0
- ldr r1,kexec_mach_type
- ldr r2,kexec_boot_atags
- ARM( ret lr )
- THUMB( bx lr )
-
- .align
-
- .globl kexec_start_address
-kexec_start_address:
- .long 0x0
-
- .globl kexec_indirection_page
-kexec_indirection_page:
- .long 0x0
-
- .globl kexec_mach_type
-kexec_mach_type:
- .long 0x0
-
- /* phy addr of the atags for the new kernel */
- .globl kexec_boot_atags
-kexec_boot_atags:
- .long 0x0
+ mov lr, r1
+ mov r0, #0
+ ldr r1, [r7, #KEXEC_MACH_TYPE]
+ ldr r2, [r7, #KEXEC_R2]
+ ARM( ret lr )
+ THUMB( bx lr )
ENDPROC(relocate_new_kernel)
+ .align 3
relocate_new_kernel_end:
.globl relocate_new_kernel_size
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 585edbfccf6d..2f81d3af5f9a 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -693,18 +693,20 @@ struct page *get_signal_page(void)
addr = page_address(page);
+ /* Poison the entire page */
+ memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
+ PAGE_SIZE / sizeof(u32));
+
/* Give the signal return code some randomness */
offset = 0x200 + (get_random_int() & 0x7fc);
signal_return_offset = offset;
- /*
- * Copy signal return handlers into the vector page, and
- * set sigreturn to be a pointer to these.
- */
+ /* Copy signal return handlers into the page */
memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
- ptr = (unsigned long)addr + offset;
- flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+ /* Flush out all instructions in this page */
+ ptr = (unsigned long)addr;
+ flush_icache_range(ptr, ptr + PAGE_SIZE);
return page;
}
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index c8d317fafe2e..de37027ad758 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -151,10 +151,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
(cx->mpu_logic_state == PWRDM_POWER_OFF);
/* Enter broadcast mode for periodic timers */
- tick_broadcast_enable();
+ RCU_NONIDLE(tick_broadcast_enable());
/* Enter broadcast mode for one-shot timers */
- tick_broadcast_enter();
+ RCU_NONIDLE(tick_broadcast_enter());
/*
* Call idle CPU PM enter notifier chain so that
@@ -166,7 +166,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
if (dev->cpu == 0) {
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
- omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+ RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
/*
* Call idle CPU cluster PM enter notifier chain
@@ -178,7 +178,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
index = 0;
cx = state_ptr + index;
pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
- omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
+ RCU_NONIDLE(omap_set_pwrdm_state(mpu_pd, cx->mpu_state));
mpuss_can_lose_context = 0;
}
}
@@ -194,9 +194,9 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
mpuss_can_lose_context)
gic_dist_disable();
- clkdm_deny_idle(cpu_clkdm[1]);
- omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
- clkdm_allow_idle(cpu_clkdm[1]);
+ RCU_NONIDLE(clkdm_deny_idle(cpu_clkdm[1]));
+ RCU_NONIDLE(omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON));
+ RCU_NONIDLE(clkdm_allow_idle(cpu_clkdm[1]));
if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
mpuss_can_lose_context) {
@@ -222,7 +222,7 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev,
cpu_pm_exit();
cpu_pm_out:
- tick_broadcast_exit();
+ RCU_NONIDLE(tick_broadcast_exit());
fail:
cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 5a957a9a0984..8ad576ecd0f1 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -370,8 +370,6 @@ static int __init xen_guest_init(void)
return -ENOMEM;
}
gnttab_init();
- if (!xen_initial_domain())
- xenbus_probe();
/*
* Making sure board specific code will not set up ops for
diff --git a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
index 7cc236575ee2..c0b93813ea9a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-db845c.dts
@@ -415,7 +415,9 @@ &dsi0_phy {
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
- <GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
+ <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+ <GCC_LPASS_Q6_AXI_CLK>,
+ <GCC_LPASS_SWAY_CLK>;
};
&gpu {
diff --git a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
index d70aae77a6e8..888dc23a530e 100644
--- a/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
+++ b/arch/arm64/boot/dts/qcom/sdm850-lenovo-yoga-c630.dts
@@ -245,7 +245,9 @@ &cdsp_pas {
&gcc {
protected-clocks = <GCC_QSPI_CORE_CLK>,
<GCC_QSPI_CORE_CLK_SRC>,
- <GCC_QSPI_CNOC_PERIPH_AHB_CLK>;
+ <GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
+ <GCC_LPASS_Q6_AXI_CLK>,
+ <GCC_LPASS_SWAY_CLK>;
};
&gpu {
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
index 2ee07d15a6e3..1eecad724f04 100644
--- a/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
+++ b/arch/arm64/boot/dts/rockchip/rk3328-nanopi-r2s.dts
@@ -114,6 +114,10 @@ &cpu3 {
cpu-supply = <&vdd_arm>;
};
+&display_subsystem {
+ status = "disabled";
+};
+
&gmac2io {
assigned-clocks = <&cru SCLK_MAC2IO>, <&cru SCLK_MAC2IO_EXT>;
assigned-clock-parents = <&gmac_clk>, <&gmac_clk>;
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index 7a9a7aca86c6..7e69603fb41c 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -234,6 +234,7 @@ pcie0: pcie@f8000000 {
reg = <0x0 0xf8000000 0x0 0x2000000>,
<0x0 0xfd000000 0x0 0x1000000>;
reg-names = "axi-base", "apb-base";
+ device_type = "pci";
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
@@ -252,7 +253,6 @@ pcie0: pcie@f8000000 {
<0 0 0 2 &pcie0_intc 1>,
<0 0 0 3 &pcie0_intc 2>,
<0 0 0 4 &pcie0_intc 3>;
- linux,pci-domain = <0>;
max-link-speed = <1>;
msi-map = <0x0 &its 0x0 0x1000>;
phys = <&pcie_phy 0>, <&pcie_phy 1>,
@@ -1278,7 +1278,6 @@ vdec: video-codec@ff660000 {
compatible = "rockchip,rk3399-vdec";
reg = <0x0 0xff660000 0x0 0x400>;
interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH 0>;
- interrupt-names = "vdpu";
clocks = <&cru ACLK_VDU>, <&cru HCLK_VDU>,
<&cru SCLK_VDU_CA>, <&cru SCLK_VDU_CORE>;
clock-names = "axi", "ahb", "cabac", "core";
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 0a52e076153b..65a522fbd874 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1696,16 +1696,12 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
#ifdef CONFIG_ARM64_MTE
static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
{
- static bool cleared_zero_page = false;
-
/*
* Clear the tags in the zero page. This needs to be done via the
* linear map which has the Tagged attribute.
*/
- if (!cleared_zero_page) {
- cleared_zero_page = true;
+ if (!test_and_set_bit(PG_mte_tagged, &ZERO_PAGE(0)->flags))
mte_clear_page_tags(lm_alias(empty_zero_page));
- }
}
#endif /* CONFIG_ARM64_MTE */
diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
index ef15c8a2a49d..7a66a7d9c1ff 100644
--- a/arch/arm64/kernel/mte.c
+++ b/arch/arm64/kernel/mte.c
@@ -239,11 +239,12 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr,
* would cause the existing tags to be cleared if the page
* was never mapped with PROT_MTE.
*/
- if (!test_bit(PG_mte_tagged, &page->flags)) {
+ if (!(vma->vm_flags & VM_MTE)) {
ret = -EOPNOTSUPP;
put_page(page);
break;
}
+ WARN_ON_ONCE(!test_bit(PG_mte_tagged, &page->flags));
/* limit access to the end of the page */
offset = offset_in_page(addr);
diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c
index 85e60509f0a8..d4b53af657c8 100644
--- a/arch/h8300/kernel/asm-offsets.c
+++ b/arch/h8300/kernel/asm-offsets.c
@@ -63,6 +63,9 @@ int main(void)
OFFSET(TI_FLAGS, thread_info, flags);
OFFSET(TI_CPU, thread_info, cpu);
OFFSET(TI_PRE, thread_info, preempt_count);
+#ifdef CONFIG_PREEMPTION
+ DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
+#endif
return 0;
}
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
index 24d75a146e02..60846e88ae4b 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
@@ -90,7 +90,6 @@ ð0 {
phy0: ethernet-phy@0 {
compatible = "ethernet-phy-id0007.0771";
reg = <0>;
- reset-gpios = <&gpio 12 GPIO_ACTIVE_LOW>;
};
};
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 2d50f76efe48..64a675c5c30a 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -135,7 +135,10 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x);
#endif /* __ASSEMBLY__ */
-#define virt_addr_valid(vaddr) (pfn_valid(virt_to_pfn(vaddr)))
+#define virt_addr_valid(vaddr) ({ \
+ unsigned long _addr = (unsigned long)vaddr; \
+ (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
+})
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 6a7efa78eba2..0a6d497221e4 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -57,6 +57,9 @@ export BITS
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
+# Intel CET isn't enabled in the kernel
+KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
+
ifeq ($(CONFIG_X86_32),y)
BITS := 32
UTS_MACHINE := i386
@@ -127,9 +130,6 @@ else
KBUILD_CFLAGS += -mno-red-zone
KBUILD_CFLAGS += -mcmodel=kernel
-
- # Intel CET isn't enabled in the kernel
- KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
endif
ifdef CONFIG_X86_X32
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 59a1e3ce3f14..816fdbec795a 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -1159,6 +1159,7 @@ static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, 1),
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, 1),
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 1),
+ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 1),
{}
};
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index de776b2e6046..c65642c10aae 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1829,6 +1829,7 @@ void arch_set_max_freq_ratio(bool turbo_disabled)
arch_max_freq_ratio = turbo_disabled ? SCHED_CAPACITY_SCALE :
arch_turbo_freq_ratio;
}
+EXPORT_SYMBOL_GPL(arch_set_max_freq_ratio);
static bool turbo_disabled(void)
{
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 65e40acde71a..4fbe190c7915 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -231,6 +231,7 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
{
+ struct kvm_vcpu *vcpu = &svm->vcpu;
bool vmcb12_lma;
if ((vmcb12->save.efer & EFER_SVME) == 0)
@@ -244,18 +245,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
vmcb12_lma = (vmcb12->save.efer & EFER_LME) && (vmcb12->save.cr0 & X86_CR0_PG);
- if (!vmcb12_lma) {
- if (vmcb12->save.cr4 & X86_CR4_PAE) {
- if (vmcb12->save.cr3 & MSR_CR3_LEGACY_PAE_RESERVED_MASK)
- return false;
- } else {
- if (vmcb12->save.cr3 & MSR_CR3_LEGACY_RESERVED_MASK)
- return false;
- }
- } else {
+ if (vmcb12_lma) {
if (!(vmcb12->save.cr4 & X86_CR4_PAE) ||
!(vmcb12->save.cr0 & X86_CR0_PE) ||
- (vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
+ (vmcb12->save.cr3 & vcpu->arch.cr3_lm_rsvd_bits))
return false;
}
if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 1d853fe4c778..be74e22b82ea 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -346,9 +346,6 @@ static inline bool gif_set(struct vcpu_svm *svm)
}
/* svm.c */
-#define MSR_CR3_LEGACY_RESERVED_MASK 0xfe7U
-#define MSR_CR3_LEGACY_PAE_RESERVED_MASK 0x7U
-#define MSR_CR3_LONG_MBZ_MASK 0xfff0000000000000U
#define MSR_INVALID 0xffffffffU
u32 svm_msrpm_offset(u32 msr);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 18a315bbcb79..fa5f059c2b94 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9558,6 +9558,8 @@ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
if (!(sregs->cr4 & X86_CR4_PAE)
|| !(sregs->efer & EFER_LMA))
return -EINVAL;
+ if (sregs->cr3 & vcpu->arch.cr3_lm_rsvd_bits)
+ return -EINVAL;
} else {
/*
* Not in 64-bit mode: EFER.LMA is clear and the code
diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c
index 00bfa1ebad6c..0bb3b8b44e4e 100644
--- a/arch/x86/pci/init.c
+++ b/arch/x86/pci/init.c
@@ -9,16 +9,23 @@
in the right sequence from here. */
static __init int pci_arch_init(void)
{
- int type;
-
- x86_create_pci_msi_domain();
+ int type, pcbios = 1;
type = pci_direct_probe();
if (!(pci_probe & PCI_PROBE_NOEARLY))
pci_mmcfg_early_init();
- if (x86_init.pci.arch_init && !x86_init.pci.arch_init())
+ if (x86_init.pci.arch_init)
+ pcbios = x86_init.pci.arch_init();
+
+ /*
+ * Must happen after x86_init.pci.arch_init(). Xen sets up the
+ * x86_init.irqs.create_pci_msi_domain there.
+ */
+ x86_create_pci_msi_domain();
+
+ if (!pcbios)
return 0;
pci_pcbios_init();
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index e1e8d4e3a213..8efd003540ca 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -115,31 +115,12 @@ void efi_sync_low_kernel_mappings(void)
pud_t *pud_k, *pud_efi;
pgd_t *efi_pgd = efi_mm.pgd;
- /*
- * We can share all PGD entries apart from the one entry that
- * covers the EFI runtime mapping space.
- *
- * Make sure the EFI runtime region mappings are guaranteed to
- * only span a single PGD entry and that the entry also maps
- * other important kernel regions.
- */
- MAYBE_BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
- MAYBE_BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
- (EFI_VA_END & PGDIR_MASK));
-
pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
pgd_k = pgd_offset_k(PAGE_OFFSET);
num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
- /*
- * As with PGDs, we share all P4D entries apart from the one entry
- * that covers the EFI runtime mapping space.
- */
- BUILD_BUG_ON(p4d_index(EFI_VA_END) != p4d_index(MODULES_END));
- BUILD_BUG_ON((EFI_VA_START & P4D_MASK) != (EFI_VA_END & P4D_MASK));
-
pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
pgd_k = pgd_offset_k(EFI_VA_END);
p4d_efi = p4d_offset(pgd_efi, 0);
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index 9e4eb0fc1c16..9e81d1052091 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -6332,13 +6332,13 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
* limit 'something'.
*/
/* no more than 50% of tags for async I/O */
- bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U);
+ bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
/*
* no more than 75% of tags for sync writes (25% extra tags
* w.r.t. async I/O, to prevent async I/O from starving sync
* writes)
*/
- bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U);
+ bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
/*
* In-word depths in case some bfq_queue is being weight-
@@ -6348,9 +6348,9 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
* shortage.
*/
/* no more than ~18% of tags for async I/O */
- bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U);
+ bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
/* no more than ~37% of tags for sync writes (~20% extra tags) */
- bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U);
+ bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
for (i = 0; i < 2; i++)
for (j = 0; j < 2; j++)
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index fa4ecb915590..9d3a76604d94 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -108,7 +108,7 @@ static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux,
max_m = cmp->m.max ?: 1 << cmp->m.width;
max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1);
- if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
+ if (!clk_hw_can_set_rate_parent(&cmp->common.hw)) {
ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p);
rate = *parent_rate / p / m;
} else {
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 1e4fbb002a31..d3e5a6fceb61 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -26,6 +26,7 @@
#include <linux/uaccess.h>
#include <acpi/processor.h>
+#include <acpi/cppc_acpi.h>
#include <asm/msr.h>
#include <asm/processor.h>
@@ -53,6 +54,7 @@ struct acpi_cpufreq_data {
unsigned int resume;
unsigned int cpu_feature;
unsigned int acpi_perf_cpu;
+ unsigned int first_perf_state;
cpumask_var_t freqdomain_cpus;
void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val);
u32 (*cpu_freq_read)(struct acpi_pct_register *reg);
@@ -221,10 +223,10 @@ static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr)
perf = to_perf_data(data);
- cpufreq_for_each_entry(pos, policy->freq_table)
+ cpufreq_for_each_entry(pos, policy->freq_table + data->first_perf_state)
if (msr == perf->states[pos->driver_data].status)
return pos->frequency;
- return policy->freq_table[0].frequency;
+ return policy->freq_table[data->first_perf_state].frequency;
}
static unsigned extract_freq(struct cpufreq_policy *policy, u32 val)
@@ -363,6 +365,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
struct cpufreq_policy *policy;
unsigned int freq;
unsigned int cached_freq;
+ unsigned int state;
pr_debug("%s (%d)\n", __func__, cpu);
@@ -374,7 +377,11 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
if (unlikely(!data || !policy->freq_table))
return 0;
- cached_freq = policy->freq_table[to_perf_data(data)->state].frequency;
+ state = to_perf_data(data)->state;
+ if (state < data->first_perf_state)
+ state = data->first_perf_state;
+
+ cached_freq = policy->freq_table[state].frequency;
freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data));
if (freq != cached_freq) {
/*
@@ -628,16 +635,54 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
}
#endif
+#ifdef CONFIG_ACPI_CPPC_LIB
+static u64 get_max_boost_ratio(unsigned int cpu)
+{
+ struct cppc_perf_caps perf_caps;
+ u64 highest_perf, nominal_perf;
+ int ret;
+
+ if (acpi_pstate_strict)
+ return 0;
+
+ ret = cppc_get_perf_caps(cpu, &perf_caps);
+ if (ret) {
+ pr_debug("CPU%d: Unable to get performance capabilities (%d)\n",
+ cpu, ret);
+ return 0;
+ }
+
+ highest_perf = perf_caps.highest_perf;
+ nominal_perf = perf_caps.nominal_perf;
+
+ if (!highest_perf || !nominal_perf) {
+ pr_debug("CPU%d: highest or nominal performance missing\n", cpu);
+ return 0;
+ }
+
+ if (highest_perf < nominal_perf) {
+ pr_debug("CPU%d: nominal performance above highest\n", cpu);
+ return 0;
+ }
+
+ return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
+}
+#else
+static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; }
+#endif
+
static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
- unsigned int i;
- unsigned int valid_states = 0;
- unsigned int cpu = policy->cpu;
+ struct cpufreq_frequency_table *freq_table;
+ struct acpi_processor_performance *perf;
struct acpi_cpufreq_data *data;
+ unsigned int cpu = policy->cpu;
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+ unsigned int valid_states = 0;
unsigned int result = 0;
- struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
- struct acpi_processor_performance *perf;
- struct cpufreq_frequency_table *freq_table;
+ unsigned int state_count;
+ u64 max_boost_ratio;
+ unsigned int i;
#ifdef CONFIG_SMP
static int blacklisted;
#endif
@@ -750,8 +795,28 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_unreg;
}
- freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table),
- GFP_KERNEL);
+ state_count = perf->state_count + 1;
+
+ max_boost_ratio = get_max_boost_ratio(cpu);
+ if (max_boost_ratio) {
+ /*
+ * Make a room for one more entry to represent the highest
+ * available "boost" frequency.
+ */
+ state_count++;
+ valid_states++;
+ data->first_perf_state = valid_states;
+ } else {
+ /*
+ * If the maximum "boost" frequency is unknown, ask the arch
+ * scale-invariance code to use the "nominal" performance for
+ * CPU utilization scaling so as to prevent the schedutil
+ * governor from selecting inadequate CPU frequencies.
+ */
+ arch_set_max_freq_ratio(true);
+ }
+
+ freq_table = kcalloc(state_count, sizeof(*freq_table), GFP_KERNEL);
if (!freq_table) {
result = -ENOMEM;
goto err_unreg;
@@ -785,6 +850,30 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
valid_states++;
}
freq_table[valid_states].frequency = CPUFREQ_TABLE_END;
+
+ if (max_boost_ratio) {
+ unsigned int state = data->first_perf_state;
+ unsigned int freq = freq_table[state].frequency;
+
+ /*
+ * Because the loop above sorts the freq_table entries in the
+ * descending order, freq is the maximum frequency in the table.
+ * Assume that it corresponds to the CPPC nominal frequency and
+ * use it to populate the frequency field of the extra "boost"
+ * frequency entry.
+ */
+ freq_table[0].frequency = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT;
+ /*
+ * The purpose of the extra "boost" frequency entry is to make
+ * the rest of cpufreq aware of the real maximum frequency, but
+ * the way to request it is the same as for the first_perf_state
+ * entry that is expected to cover the entire range of "boost"
+ * frequencies of the CPU, so copy the driver_data value from
+ * that entry.
+ */
+ freq_table[0].driver_data = freq_table[state].driver_data;
+ }
+
policy->freq_table = freq_table;
perf->state = 0;
@@ -858,8 +947,10 @@ static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy)
{
struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data,
policy->cpu);
+ struct acpi_cpufreq_data *data = policy->driver_data;
+ unsigned int freq = policy->freq_table[data->first_perf_state].frequency;
- if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq)
+ if (perf->states[0].core_frequency * 1000 != freq)
pr_warn(FW_WARN "P-state 0 is not max freq\n");
}
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 962cbb5e5f7f..fe6a460c4373 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1110,7 +1110,6 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
"%s called while %d clients hold a reference\n",
__func__, chan->client_count);
mutex_lock(&dma_list_mutex);
- list_del(&chan->device_node);
device->chancnt--;
chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex);
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 663344987e3f..a6704838ffcb 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -325,17 +325,31 @@ static inline bool idxd_is_enabled(struct idxd_device *idxd)
return false;
}
+static inline bool idxd_device_is_halted(struct idxd_device *idxd)
+{
+ union gensts_reg gensts;
+
+ gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+
+ return (gensts.state == IDXD_DEVICE_STATE_HALT);
+}
+
/*
* This is function is only used for reset during probe and will
* poll for completion. Once the device is setup with interrupts,
* all commands will be done via interrupt completion.
*/
-void idxd_device_init_reset(struct idxd_device *idxd)
+int idxd_device_init_reset(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
union idxd_command_reg cmd;
unsigned long flags;
+ if (idxd_device_is_halted(idxd)) {
+ dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
+ return -ENXIO;
+ }
+
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = IDXD_CMD_RESET_DEVICE;
dev_dbg(dev, "%s: sending reset for init.\n", __func__);
@@ -346,6 +360,7 @@ void idxd_device_init_reset(struct idxd_device *idxd)
IDXD_CMDSTS_ACTIVE)
cpu_relax();
spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ return 0;
}
static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
@@ -355,6 +370,12 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
DECLARE_COMPLETION_ONSTACK(done);
unsigned long flags;
+ if (idxd_device_is_halted(idxd)) {
+ dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
+ *status = IDXD_CMDSTS_HW_ERR;
+ return;
+ }
+
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = cmd_code;
cmd.operand = operand;
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index 0c892cbd72e0..8b14ba0bae1c 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -214,5 +214,8 @@ int idxd_register_dma_channel(struct idxd_wq *wq)
void idxd_unregister_dma_channel(struct idxd_wq *wq)
{
- dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);
+ struct dma_chan *chan = &wq->dma_chan;
+
+ dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
+ list_del(&chan->device_node);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index d48f193daacc..953ef6536aac 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -281,7 +281,7 @@ void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
/* device control */
-void idxd_device_init_reset(struct idxd_device *idxd);
+int idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
void idxd_device_reset(struct idxd_device *idxd);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 0a4432b063b5..fa8c4228f358 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -289,7 +289,10 @@ static int idxd_probe(struct idxd_device *idxd)
int rc;
dev_dbg(dev, "%s entered and resetting device\n", __func__);
- idxd_device_init_reset(idxd);
+ rc = idxd_device_init_reset(idxd);
+ if (rc < 0)
+ return rc;
+
dev_dbg(dev, "IDXD reset complete\n");
idxd_read_caps(idxd);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 17a65a13fb64..552e2e270705 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -53,19 +53,14 @@ irqreturn_t idxd_irq_handler(int vec, void *data)
return IRQ_WAKE_THREAD;
}
-irqreturn_t idxd_misc_thread(int vec, void *data)
+static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
{
- struct idxd_irq_entry *irq_entry = data;
- struct idxd_device *idxd = irq_entry->idxd;
struct device *dev = &idxd->pdev->dev;
union gensts_reg gensts;
- u32 cause, val = 0;
+ u32 val = 0;
int i;
bool err = false;
- cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
- iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
-
if (cause & IDXD_INTC_ERR) {
spin_lock_bh(&idxd->dev_lock);
for (i = 0; i < 4; i++)
@@ -123,7 +118,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
val);
if (!err)
- goto out;
+ return 0;
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
if (gensts.state == IDXD_DEVICE_STATE_HALT) {
@@ -144,10 +139,33 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
"FLR" : "system reset");
spin_unlock_bh(&idxd->dev_lock);
+ return -ENXIO;
}
}
- out:
+ return 0;
+}
+
+irqreturn_t idxd_misc_thread(int vec, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ struct idxd_device *idxd = irq_entry->idxd;
+ int rc;
+ u32 cause;
+
+ cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ if (cause)
+ iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+
+ while (cause) {
+ rc = process_misc_interrupts(idxd, cause);
+ if (rc < 0)
+ break;
+ cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ if (cause)
+ iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ }
+
idxd_unmask_msix_vector(idxd, irq_entry->id);
return IRQ_HANDLED;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index f20ac3d69424..14751c7ccd1f 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -428,8 +428,9 @@ config GPIO_MXC
select GENERIC_IRQ_CHIP
config GPIO_MXS
- def_bool y
+ bool "Freescale MXS GPIO support" if COMPILE_TEST
depends on ARCH_MXS || COMPILE_TEST
+ default y if ARCH_MXS
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 226da8df6f10..94d9fa0d6aa7 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -25,6 +25,9 @@
/* Maximum value for gpio line identifiers */
#define EP93XX_GPIO_LINE_MAX 63
+/* Number of GPIO chips in EP93XX */
+#define EP93XX_GPIO_CHIP_NUM 8
+
/* Maximum value for irq capable line identifiers */
#define EP93XX_GPIO_LINE_MAX_IRQ 23
@@ -34,74 +37,75 @@
*/
#define EP93XX_GPIO_F_IRQ_BASE 80
-struct ep93xx_gpio {
- void __iomem *base;
- struct gpio_chip gc[8];
+struct ep93xx_gpio_irq_chip {
+ struct irq_chip ic;
+ u8 irq_offset;
+ u8 int_unmasked;
+ u8 int_enabled;
+ u8 int_type1;
+ u8 int_type2;
+ u8 int_debounce;
};
-/*************************************************************************
- * Interrupt handling for EP93xx on-chip GPIOs
- *************************************************************************/
-static unsigned char gpio_int_unmasked[3];
-static unsigned char gpio_int_enabled[3];
-static unsigned char gpio_int_type1[3];
-static unsigned char gpio_int_type2[3];
-static unsigned char gpio_int_debounce[3];
-
-/* Port ordering is: A B F */
-static const u8 int_type1_register_offset[3] = { 0x90, 0xac, 0x4c };
-static const u8 int_type2_register_offset[3] = { 0x94, 0xb0, 0x50 };
-static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 };
-static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 };
-static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 };
-
-static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg, unsigned port)
-{
- BUG_ON(port > 2);
+struct ep93xx_gpio_chip {
+ struct gpio_chip gc;
+ struct ep93xx_gpio_irq_chip *eic;
+};
- writeb_relaxed(0, epg->base + int_en_register_offset[port]);
+struct ep93xx_gpio {
+ void __iomem *base;
+ struct ep93xx_gpio_chip gc[EP93XX_GPIO_CHIP_NUM];
+};
- writeb_relaxed(gpio_int_type2[port],
- epg->base + int_type2_register_offset[port]);
+#define to_ep93xx_gpio_chip(x) container_of(x, struct ep93xx_gpio_chip, gc)
- writeb_relaxed(gpio_int_type1[port],
- epg->base + int_type1_register_offset[port]);
+static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc)
+{
+ struct ep93xx_gpio_chip *egc = to_ep93xx_gpio_chip(gc);
- writeb(gpio_int_unmasked[port] & gpio_int_enabled[port],
- epg->base + int_en_register_offset[port]);
+ return egc->eic;
}
-static int ep93xx_gpio_port(struct gpio_chip *gc)
+/*************************************************************************
+ * Interrupt handling for EP93xx on-chip GPIOs
+ *************************************************************************/
+#define EP93XX_INT_TYPE1_OFFSET 0x00
+#define EP93XX_INT_TYPE2_OFFSET 0x04
+#define EP93XX_INT_EOI_OFFSET 0x08
+#define EP93XX_INT_EN_OFFSET 0x0c
+#define EP93XX_INT_STATUS_OFFSET 0x10
+#define EP93XX_INT_RAW_STATUS_OFFSET 0x14
+#define EP93XX_INT_DEBOUNCE_OFFSET 0x18
+
+static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg,
+ struct ep93xx_gpio_irq_chip *eic)
{
- struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = 0;
+ writeb_relaxed(0, epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET);
- while (port < ARRAY_SIZE(epg->gc) && gc != &epg->gc[port])
- port++;
+ writeb_relaxed(eic->int_type2,
+ epg->base + eic->irq_offset + EP93XX_INT_TYPE2_OFFSET);
- /* This should not happen but is there as a last safeguard */
- if (port == ARRAY_SIZE(epg->gc)) {
- pr_crit("can't find the GPIO port\n");
- return 0;
- }
+ writeb_relaxed(eic->int_type1,
+ epg->base + eic->irq_offset + EP93XX_INT_TYPE1_OFFSET);
- return port;
+ writeb_relaxed(eic->int_unmasked & eic->int_enabled,
+ epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET);
}
static void ep93xx_gpio_int_debounce(struct gpio_chip *gc,
unsigned int offset, bool enable)
{
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
int port_mask = BIT(offset);
if (enable)
- gpio_int_debounce[port] |= port_mask;
+ eic->int_debounce |= port_mask;
else
- gpio_int_debounce[port] &= ~port_mask;
+ eic->int_debounce &= ~port_mask;
- writeb(gpio_int_debounce[port],
- epg->base + int_debounce_register_offset[port]);
+ writeb(eic->int_debounce,
+ epg->base + eic->irq_offset + EP93XX_INT_DEBOUNCE_OFFSET);
}
static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
@@ -122,12 +126,12 @@ static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
*/
stat = readb(epg->base + EP93XX_GPIO_A_INT_STATUS);
for_each_set_bit(offset, &stat, 8)
- generic_handle_irq(irq_find_mapping(epg->gc[0].irq.domain,
+ generic_handle_irq(irq_find_mapping(epg->gc[0].gc.irq.domain,
offset));
stat = readb(epg->base + EP93XX_GPIO_B_INT_STATUS);
for_each_set_bit(offset, &stat, 8)
- generic_handle_irq(irq_find_mapping(epg->gc[1].irq.domain,
+ generic_handle_irq(irq_find_mapping(epg->gc[1].gc.irq.domain,
offset));
chained_irq_exit(irqchip, desc);
@@ -153,52 +157,52 @@ static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
static void ep93xx_gpio_irq_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
int port_mask = BIT(d->irq & 7);
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
- gpio_int_type2[port] ^= port_mask; /* switch edge direction */
- ep93xx_gpio_update_int_params(epg, port);
+ eic->int_type2 ^= port_mask; /* switch edge direction */
+ ep93xx_gpio_update_int_params(epg, eic);
}
- writeb(port_mask, epg->base + eoi_register_offset[port]);
+ writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
}
static void ep93xx_gpio_irq_mask_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
int port_mask = BIT(d->irq & 7);
if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH)
- gpio_int_type2[port] ^= port_mask; /* switch edge direction */
+ eic->int_type2 ^= port_mask; /* switch edge direction */
- gpio_int_unmasked[port] &= ~port_mask;
- ep93xx_gpio_update_int_params(epg, port);
+ eic->int_unmasked &= ~port_mask;
+ ep93xx_gpio_update_int_params(epg, eic);
- writeb(port_mask, epg->base + eoi_register_offset[port]);
+ writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET);
}
static void ep93xx_gpio_irq_mask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
- gpio_int_unmasked[port] &= ~BIT(d->irq & 7);
- ep93xx_gpio_update_int_params(epg, port);
+ eic->int_unmasked &= ~BIT(d->irq & 7);
+ ep93xx_gpio_update_int_params(epg, eic);
}
static void ep93xx_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
- gpio_int_unmasked[port] |= BIT(d->irq & 7);
- ep93xx_gpio_update_int_params(epg, port);
+ eic->int_unmasked |= BIT(d->irq & 7);
+ ep93xx_gpio_update_int_params(epg, eic);
}
/*
@@ -209,8 +213,8 @@ static void ep93xx_gpio_irq_unmask(struct irq_data *d)
static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc);
struct ep93xx_gpio *epg = gpiochip_get_data(gc);
- int port = ep93xx_gpio_port(gc);
int offset = d->irq & 7;
int port_mask = BIT(offset);
irq_flow_handler_t handler;
@@ -219,32 +223,32 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- gpio_int_type1[port] |= port_mask;
- gpio_int_type2[port] |= port_mask;
+ eic->int_type1 |= port_mask;
+ eic->int_type2 |= port_mask;
handler = handle_edge_irq;
break;
case IRQ_TYPE_EDGE_FALLING:
- gpio_int_type1[port] |= port_mask;
- gpio_int_type2[port] &= ~port_mask;
+ eic->int_type1 |= port_mask;
+ eic->int_type2 &= ~port_mask;
handler = handle_edge_irq;
break;
case IRQ_TYPE_LEVEL_HIGH:
- gpio_int_type1[port] &= ~port_mask;
- gpio_int_type2[port] |= port_mask;
+ eic->int_type1 &= ~port_mask;
+ eic->int_type2 |= port_mask;
handler = handle_level_irq;
break;
case IRQ_TYPE_LEVEL_LOW:
- gpio_int_type1[port] &= ~port_mask;
- gpio_int_type2[port] &= ~port_mask;
+ eic->int_type1 &= ~port_mask;
+ eic->int_type2 &= ~port_mask;
handler = handle_level_irq;
break;
case IRQ_TYPE_EDGE_BOTH:
- gpio_int_type1[port] |= port_mask;
+ eic->int_type1 |= port_mask;
/* set initial polarity based on current input level */
if (gc->get(gc, offset))
- gpio_int_type2[port] &= ~port_mask; /* falling */
+ eic->int_type2 &= ~port_mask; /* falling */
else
- gpio_int_type2[port] |= port_mask; /* rising */
+ eic->int_type2 |= port_mask; /* rising */
handler = handle_edge_irq;
break;
default:
@@ -253,22 +257,13 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type)
irq_set_handler_locked(d, handler);
- gpio_int_enabled[port] |= port_mask;
+ eic->int_enabled |= port_mask;
- ep93xx_gpio_update_int_params(epg, port);
+ ep93xx_gpio_update_int_params(epg, eic);
return 0;
}
-static struct irq_chip ep93xx_gpio_irq_chip = {
- .name = "GPIO",
- .irq_ack = ep93xx_gpio_irq_ack,
- .irq_mask_ack = ep93xx_gpio_irq_mask_ack,
- .irq_mask = ep93xx_gpio_irq_mask,
- .irq_unmask = ep93xx_gpio_irq_unmask,
- .irq_set_type = ep93xx_gpio_irq_type,
-};
-
/*************************************************************************
* gpiolib interface for EP93xx on-chip GPIOs
*************************************************************************/
@@ -276,17 +271,19 @@ struct ep93xx_gpio_bank {
const char *label;
int data;
int dir;
+ int irq;
int base;
bool has_irq;
bool has_hierarchical_irq;
unsigned int irq_base;
};
-#define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _has_irq, _has_hier, _irq_base) \
+#define EP93XX_GPIO_BANK(_label, _data, _dir, _irq, _base, _has_irq, _has_hier, _irq_base) \
{ \
.label = _label, \
.data = _data, \
.dir = _dir, \
+ .irq = _irq, \
.base = _base, \
.has_irq = _has_irq, \
.has_hierarchical_irq = _has_hier, \
@@ -295,16 +292,16 @@ struct ep93xx_gpio_bank {
static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = {
/* Bank A has 8 IRQs */
- EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true, false, 64),
+ EP93XX_GPIO_BANK("A", 0x00, 0x10, 0x90, 0, true, false, 64),
/* Bank B has 8 IRQs */
- EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true, false, 72),
- EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false, false, 0),
- EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false, false, 0),
- EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false, false, 0),
+ EP93XX_GPIO_BANK("B", 0x04, 0x14, 0xac, 8, true, false, 72),
+ EP93XX_GPIO_BANK("C", 0x08, 0x18, 0x00, 40, false, false, 0),
+ EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 0x00, 24, false, false, 0),
+ EP93XX_GPIO_BANK("E", 0x20, 0x24, 0x00, 32, false, false, 0),
/* Bank F has 8 IRQs */
- EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, false, true, 0),
- EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false, false, 0),
- EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false, false, 0),
+ EP93XX_GPIO_BANK("F", 0x30, 0x34, 0x4c, 16, false, true, 0),
+ EP93XX_GPIO_BANK("G", 0x38, 0x3c, 0x00, 48, false, false, 0),
+ EP93XX_GPIO_BANK("H", 0x40, 0x44, 0x00, 56, false, false, 0),
};
static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset,
@@ -326,13 +323,23 @@ static int ep93xx_gpio_f_to_irq(struct gpio_chip *gc, unsigned offset)
return EP93XX_GPIO_F_IRQ_BASE + offset;
}
-static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
+static void ep93xx_init_irq_chip(struct device *dev, struct irq_chip *ic)
+{
+ ic->irq_ack = ep93xx_gpio_irq_ack;
+ ic->irq_mask_ack = ep93xx_gpio_irq_mask_ack;
+ ic->irq_mask = ep93xx_gpio_irq_mask;
+ ic->irq_unmask = ep93xx_gpio_irq_unmask;
+ ic->irq_set_type = ep93xx_gpio_irq_type;
+}
+
+static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc,
struct platform_device *pdev,
struct ep93xx_gpio *epg,
struct ep93xx_gpio_bank *bank)
{
void __iomem *data = epg->base + bank->data;
void __iomem *dir = epg->base + bank->dir;
+ struct gpio_chip *gc = &egc->gc;
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
int err;
@@ -346,8 +353,21 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
girq = &gc->irq;
if (bank->has_irq || bank->has_hierarchical_irq) {
+ struct irq_chip *ic;
+
gc->set_config = ep93xx_gpio_set_config;
- girq->chip = &ep93xx_gpio_irq_chip;
+ egc->eic = devm_kcalloc(dev, 1,
+ sizeof(*egc->eic),
+ GFP_KERNEL);
+ if (!egc->eic)
+ return -ENOMEM;
+ egc->eic->irq_offset = bank->irq;
+ ic = &egc->eic->ic;
+ ic->name = devm_kasprintf(dev, GFP_KERNEL, "gpio-irq-%s", bank->label);
+ if (!ic->name)
+ return -ENOMEM;
+ ep93xx_init_irq_chip(dev, ic);
+ girq->chip = ic;
}
if (bank->has_irq) {
@@ -389,7 +409,7 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc,
gpio_irq = EP93XX_GPIO_F_IRQ_BASE + i;
irq_set_chip_data(gpio_irq, &epg->gc[5]);
irq_set_chip_and_handler(gpio_irq,
- &ep93xx_gpio_irq_chip,
+ girq->chip,
handle_level_irq);
irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST);
}
@@ -415,7 +435,7 @@ static int ep93xx_gpio_probe(struct platform_device *pdev)
return PTR_ERR(epg->base);
for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) {
- struct gpio_chip *gc = &epg->gc[i];
+ struct ep93xx_gpio_chip *gc = &epg->gc[i];
struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i];
if (ep93xx_gpio_add_bank(gc, pdev, epg, bank))
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 580880212e55..fdca76fc598c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1792,8 +1792,8 @@ static void emulated_link_detect(struct dc_link *link)
link->type = dc_connection_none;
prev_sink = link->local_sink;
- if (prev_sink != NULL)
- dc_sink_retain(prev_sink);
+ if (prev_sink)
+ dc_sink_release(prev_sink);
switch (link->connector_signal) {
case SIGNAL_TYPE_HDMI_TYPE_A: {
@@ -2261,8 +2261,10 @@ void amdgpu_dm_update_connector_after_detect(
* TODO: check if we still need the S3 mode update workaround.
* If yes, put it here.
*/
- if (aconnector->dc_sink)
+ if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(connector, NULL);
+ dc_sink_release(aconnector->dc_sink);
+ }
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
@@ -7870,14 +7872,14 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(conn_state);
if (ret)
- goto err;
+ goto out;
/* Attach crtc to drm_atomic_state*/
crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
ret = PTR_ERR_OR_ZERO(crtc_state);
if (ret)
- goto err;
+ goto out;
/* force a restore */
crtc_state->mode_changed = true;
@@ -7887,17 +7889,15 @@ static int dm_force_atomic_commit(struct drm_connector *connector)
ret = PTR_ERR_OR_ZERO(plane_state);
if (ret)
- goto err;
-
+ goto out;
/* Call commit internally with the state we just constructed */
ret = drm_atomic_commit(state);
- if (!ret)
- return 0;
-err:
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+out:
drm_atomic_state_put(state);
+ if (ret)
+ DRM_ERROR("Restoring old state failed with %i\n", ret);
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index eee19edeeee5..1e448f1b39a1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -828,6 +828,9 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (computed_streams[i])
continue;
+ if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
+ return false;
+
mutex_lock(&aconnector->mst_mgr.lock);
if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) {
mutex_unlock(&aconnector->mst_mgr.lock);
@@ -845,7 +848,8 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
stream = dc_state->streams[i];
if (stream->timing.flags.DSC == 1)
- dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream);
+ if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
+ return false;
}
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 17e6fd820139..32b73ea86673 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -877,13 +877,13 @@ static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_inte
switch (dpcd_aux_read_interval) {
case 0x01:
- aux_rd_interval_us = 400;
+ aux_rd_interval_us = 4000;
break;
case 0x02:
- aux_rd_interval_us = 4000;
+ aux_rd_interval_us = 8000;
break;
case 0x03:
- aux_rd_interval_us = 8000;
+ aux_rd_interval_us = 12000;
break;
case 0x04:
aux_rd_interval_us = 16000;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index a92f6e4b2eb8..121643ddb719 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -297,8 +297,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
},
},
.num_states = 5,
- .sr_exit_time_us = 11.6,
- .sr_enter_plus_exit_time_us = 13.9,
+ .sr_exit_time_us = 8.6,
+ .sr_enter_plus_exit_time_us = 10.9,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 20441127783b..c99385440412 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -902,6 +902,8 @@ enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
DCN20_CLK_SRC_PLL2,
+ DCN20_CLK_SRC_PLL3,
+ DCN20_CLK_SRC_PLL4,
DCN20_CLK_SRC_TOTAL_DCN21
};
@@ -1880,6 +1882,14 @@ static bool dcn21_resource_construct(
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL2,
&clk_src_regs[2], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL3] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL3,
+ &clk_src_regs[3], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL4] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL4,
+ &clk_src_regs[4], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 7749b0ceabba..17bdad95978a 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -4224,6 +4224,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
switch (port->pdt) {
case DP_PEER_DEVICE_NONE:
+ break;
case DP_PEER_DEVICE_MST_BRANCHING:
if (!port->mcs)
ret = connector_status_connected;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 0095c8cac9b4..b73d51e766ce 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -182,6 +182,7 @@ struct intel_overlay {
struct intel_crtc *crtc;
struct i915_vma *vma;
struct i915_vma *old_vma;
+ struct intel_frontbuffer *frontbuffer;
bool active;
bool pfit_active;
u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
@@ -282,21 +283,19 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
struct i915_vma *vma)
{
enum pipe pipe = overlay->crtc->pipe;
- struct intel_frontbuffer *from = NULL, *to = NULL;
+ struct intel_frontbuffer *frontbuffer = NULL;
drm_WARN_ON(&overlay->i915->drm, overlay->old_vma);
- if (overlay->vma)
- from = intel_frontbuffer_get(overlay->vma->obj);
if (vma)
- to = intel_frontbuffer_get(vma->obj);
+ frontbuffer = intel_frontbuffer_get(vma->obj);
- intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe));
+ intel_frontbuffer_track(overlay->frontbuffer, frontbuffer,
+ INTEL_FRONTBUFFER_OVERLAY(pipe));
- if (to)
- intel_frontbuffer_put(to);
- if (from)
- intel_frontbuffer_put(from);
+ if (overlay->frontbuffer)
+ intel_frontbuffer_put(overlay->frontbuffer);
+ overlay->frontbuffer = frontbuffer;
intel_frontbuffer_flip_prepare(overlay->i915,
INTEL_FRONTBUFFER_OVERLAY(pipe));
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 8f67aef18b2d..1d81da31796e 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -23,36 +23,6 @@ static const char *tc_port_mode_name(enum tc_port_mode mode)
return names[mode];
}
-static void
-tc_port_load_fia_params(struct drm_i915_private *i915,
- struct intel_digital_port *dig_port)
-{
- enum port port = dig_port->base.port;
- enum tc_port tc_port = intel_port_to_tc(i915, port);
- u32 modular_fia;
-
- if (INTEL_INFO(i915)->display.has_modular_fia) {
- modular_fia = intel_uncore_read(&i915->uncore,
- PORT_TX_DFLEXDPSP(FIA1));
- drm_WARN_ON(&i915->drm, modular_fia == 0xffffffff);
- modular_fia &= MODULAR_FIA_MASK;
- } else {
- modular_fia = 0;
- }
-
- /*
- * Each Modular FIA instance houses 2 TC ports. In SOC that has more
- * than two TC ports, there are multiple instances of Modular FIA.
- */
- if (modular_fia) {
- dig_port->tc_phy_fia = tc_port / 2;
- dig_port->tc_phy_fia_idx = tc_port % 2;
- } else {
- dig_port->tc_phy_fia = FIA1;
- dig_port->tc_phy_fia_idx = tc_port;
- }
-}
-
static enum intel_display_power_domain
tc_cold_get_power_domain(struct intel_digital_port *dig_port)
{
@@ -646,6 +616,43 @@ void intel_tc_port_put_link(struct intel_digital_port *dig_port)
mutex_unlock(&dig_port->tc_lock);
}
+static bool
+tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
+{
+ intel_wakeref_t wakeref;
+ u32 val;
+
+ if (!INTEL_INFO(i915)->display.has_modular_fia)
+ return false;
+
+ wakeref = tc_cold_block(dig_port);
+ val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
+ tc_cold_unblock(dig_port, wakeref);
+
+ drm_WARN_ON(&i915->drm, val == 0xffffffff);
+
+ return val & MODULAR_FIA_MASK;
+}
+
+static void
+tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
+{
+ enum port port = dig_port->base.port;
+ enum tc_port tc_port = intel_port_to_tc(i915, port);
+
+ /*
+ * Each Modular FIA instance houses 2 TC ports. In SOC that has more
+ * than two TC ports, there are multiple instances of Modular FIA.
+ */
+ if (tc_has_modular_fia(i915, dig_port)) {
+ dig_port->tc_phy_fia = tc_port / 2;
+ dig_port->tc_phy_fia_idx = tc_port % 2;
+ } else {
+ dig_port->tc_phy_fia = FIA1;
+ dig_port->tc_phy_fia_idx = tc_port;
+ }
+}
+
void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index eaaf5d70e352..1e643bc7e786 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -689,6 +689,30 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon,
SUN4I_TCON1_BASIC5_V_SYNC(vsync) |
SUN4I_TCON1_BASIC5_H_SYNC(hsync));
+ /* Setup the polarity of multiple signals */
+ if (tcon->quirks->polarity_in_ch0) {
+ val = 0;
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
+
+ regmap_write(tcon->regs, SUN4I_TCON0_IO_POL_REG, val);
+ } else {
+ /* according to vendor driver, this bit must be always set */
+ val = SUN4I_TCON1_IO_POL_UNKNOWN;
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ val |= SUN4I_TCON1_IO_POL_HSYNC_POSITIVE;
+
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ val |= SUN4I_TCON1_IO_POL_VSYNC_POSITIVE;
+
+ regmap_write(tcon->regs, SUN4I_TCON1_IO_POL_REG, val);
+ }
+
/* Map output pins to channel 1 */
regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG,
SUN4I_TCON_GCTL_IOMAP_MASK,
@@ -1517,6 +1541,7 @@ static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
.has_channel_1 = true,
+ .polarity_in_ch0 = true,
.set_mux = sun8i_r40_tcon_tv_set_mux,
};
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index cfbf4e6c1679..ee555318e3c2 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -153,6 +153,11 @@
#define SUN4I_TCON1_BASIC5_V_SYNC(height) (((height) - 1) & 0x3ff)
#define SUN4I_TCON1_IO_POL_REG 0xf0
+/* there is no documentation about this bit */
+#define SUN4I_TCON1_IO_POL_UNKNOWN BIT(26)
+#define SUN4I_TCON1_IO_POL_HSYNC_POSITIVE BIT(25)
+#define SUN4I_TCON1_IO_POL_VSYNC_POSITIVE BIT(24)
+
#define SUN4I_TCON1_IO_TRI_REG 0xf4
#define SUN4I_TCON_ECC_FIFO_REG 0xf8
@@ -235,6 +240,7 @@ struct sun4i_tcon_quirks {
bool needs_de_be_mux; /* sun6i needs mux to select backend */
bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */
bool supports_lvds; /* Does the TCON support an LVDS output? */
+ bool polarity_in_ch0; /* some tcon1 channels have polarity bits in tcon0 pol register */
u8 dclk_min_div; /* minimum divider for TCON0 DCLK */
/* callback to handle tcon muxing options */
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 92add2cef2e7..bbdfd5e26ec8 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -21,8 +21,7 @@ static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder,
{
struct sun8i_dw_hdmi *hdmi = encoder_to_sun8i_dw_hdmi(encoder);
- if (hdmi->quirks->set_rate)
- clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
+ clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000);
}
static const struct drm_encoder_helper_funcs
@@ -48,11 +47,9 @@ sun8i_dw_hdmi_mode_valid_h6(struct dw_hdmi *hdmi, void *data,
{
/*
* Controller support maximum of 594 MHz, which correlates to
- * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than
- * 340 MHz scrambling has to be enabled. Because scrambling is
- * not yet implemented, just limit to 340 MHz for now.
+ * 4K@60Hz 4:4:4 or RGB.
*/
- if (mode->clock > 340000)
+ if (mode->clock > 594000)
return MODE_CLOCK_HIGH;
return MODE_OK;
@@ -295,7 +292,6 @@ static int sun8i_dw_hdmi_remove(struct platform_device *pdev)
static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = {
.mode_valid = sun8i_dw_hdmi_mode_valid_a83t,
- .set_rate = true,
};
static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index d983746fa194..d4b55af0592f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -179,7 +179,6 @@ struct sun8i_dw_hdmi_quirks {
enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data,
const struct drm_display_info *info,
const struct drm_display_mode *mode);
- unsigned int set_rate : 1;
unsigned int use_drm_infoframe : 1;
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index 35c2133724e2..9994edf67509 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -104,29 +104,21 @@ static const struct dw_hdmi_mpll_config sun50i_h6_mpll_cfg[] = {
static const struct dw_hdmi_curr_ctrl sun50i_h6_cur_ctr[] = {
/* pixelclk bpp8 bpp10 bpp12 */
- { 25175000, { 0x0000, 0x0000, 0x0000 }, },
{ 27000000, { 0x0012, 0x0000, 0x0000 }, },
- { 59400000, { 0x0008, 0x0008, 0x0008 }, },
- { 72000000, { 0x0008, 0x0008, 0x001b }, },
- { 74250000, { 0x0013, 0x0013, 0x0013 }, },
- { 90000000, { 0x0008, 0x001a, 0x001b }, },
- { 118800000, { 0x001b, 0x001a, 0x001b }, },
- { 144000000, { 0x001b, 0x001a, 0x0034 }, },
- { 180000000, { 0x001b, 0x0033, 0x0034 }, },
- { 216000000, { 0x0036, 0x0033, 0x0034 }, },
- { 237600000, { 0x0036, 0x0033, 0x001b }, },
- { 288000000, { 0x0036, 0x001b, 0x001b }, },
- { 297000000, { 0x0019, 0x001b, 0x0019 }, },
- { 330000000, { 0x0036, 0x001b, 0x001b }, },
- { 594000000, { 0x003f, 0x001b, 0x001b }, },
+ { 74250000, { 0x0013, 0x001a, 0x001b }, },
+ { 148500000, { 0x0019, 0x0033, 0x0034 }, },
+ { 297000000, { 0x0019, 0x001b, 0x001b }, },
+ { 594000000, { 0x0010, 0x001b, 0x001b }, },
{ ~0UL, { 0x0000, 0x0000, 0x0000 }, }
};
static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = {
/*pixelclk symbol term vlev*/
- { 74250000, 0x8009, 0x0004, 0x0232},
- { 148500000, 0x8029, 0x0004, 0x0273},
- { 594000000, 0x8039, 0x0004, 0x014a},
+ { 27000000, 0x8009, 0x0007, 0x02b0 },
+ { 74250000, 0x8009, 0x0006, 0x022d },
+ { 148500000, 0x8029, 0x0006, 0x0270 },
+ { 297000000, 0x8039, 0x0005, 0x01ab },
+ { 594000000, 0x8029, 0x0000, 0x008a },
{ ~0UL, 0x0000, 0x0000, 0x0000}
};
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 5612cab55227..af4b8944a603 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -220,7 +220,7 @@ static void vc4_plane_reset(struct drm_plane *plane)
__drm_atomic_helper_plane_reset(plane, &vc4_state->base);
}
-static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
+static void vc4_dlist_counter_increment(struct vc4_plane_state *vc4_state)
{
if (vc4_state->dlist_count == vc4_state->dlist_size) {
u32 new_size = max(4u, vc4_state->dlist_count * 2);
@@ -235,7 +235,15 @@ static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
vc4_state->dlist_size = new_size;
}
- vc4_state->dlist[vc4_state->dlist_count++] = val;
+ vc4_state->dlist_count++;
+}
+
+static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
+{
+ unsigned int idx = vc4_state->dlist_count;
+
+ vc4_dlist_counter_increment(vc4_state);
+ vc4_state->dlist[idx] = val;
}
/* Returns the scl0/scl1 field based on whether the dimensions need to
@@ -978,8 +986,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
* be set when calling vc4_plane_allocate_lbm().
*/
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
- vc4_state->y_scaling[1] != VC4_SCALING_NONE)
- vc4_state->lbm_offset = vc4_state->dlist_count++;
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
+ vc4_state->lbm_offset = vc4_state->dlist_count;
+ vc4_dlist_counter_increment(vc4_state);
+ }
if (num_planes > 1) {
/* Emit Cb/Cr as channel 0 and Y as channel
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index f41f51a176a1..674735334547 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -57,6 +57,8 @@
#define STM32F7_I2C_CR1_RXDMAEN BIT(15)
#define STM32F7_I2C_CR1_TXDMAEN BIT(14)
#define STM32F7_I2C_CR1_ANFOFF BIT(12)
+#define STM32F7_I2C_CR1_DNF_MASK GENMASK(11, 8)
+#define STM32F7_I2C_CR1_DNF(n) (((n) & 0xf) << 8)
#define STM32F7_I2C_CR1_ERRIE BIT(7)
#define STM32F7_I2C_CR1_TCIE BIT(6)
#define STM32F7_I2C_CR1_STOPIE BIT(5)
@@ -160,7 +162,7 @@ enum {
};
#define STM32F7_I2C_DNF_DEFAULT 0
-#define STM32F7_I2C_DNF_MAX 16
+#define STM32F7_I2C_DNF_MAX 15
#define STM32F7_I2C_ANALOG_FILTER_ENABLE 1
#define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */
@@ -725,6 +727,13 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev)
else
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_ANFOFF);
+
+ /* Program the Digital Filter */
+ stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
+ STM32F7_I2C_CR1_DNF_MASK);
+ stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
+ STM32F7_I2C_CR1_DNF(i2c_dev->setup.dnf));
+
stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_PE);
}
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index c70b3822013f..30c8ac24635d 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -16,7 +16,7 @@ KCOV_INSTRUMENT_rodata.o := n
OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \
- --rename-section .text=.rodata,alloc,readonly,load
+ --rename-section .noinstr.text=.rodata,alloc,readonly,load
targets += rodata.o rodata_objcopy.o
$(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE
$(call if_changed,objcopy)
diff --git a/drivers/misc/lkdtm/rodata.c b/drivers/misc/lkdtm/rodata.c
index 58d180af72cf..baacb876d1d9 100644
--- a/drivers/misc/lkdtm/rodata.c
+++ b/drivers/misc/lkdtm/rodata.c
@@ -5,7 +5,7 @@
*/
#include "lkdtm.h"
-void notrace lkdtm_rodata_do_nothing(void)
+void noinstr lkdtm_rodata_do_nothing(void)
{
/* Does nothing. We just want an architecture agnostic "return". */
}
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index c444ef3da3e2..89d7c9b23186 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -214,9 +214,24 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
{
struct ocelot *ocelot = ds->priv;
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int err;
+
+ ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
+ DEV_MAC_ENA_CFG);
- ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
+
+ err = ocelot_port_flush(ocelot, port);
+ if (err)
+ dev_err(ocelot->dev, "failed to flush port %d: %d\n",
+ port, err);
+
+ /* Put the port in reset. */
+ ocelot_port_writel(ocelot_port,
+ DEV_CLOCK_CFG_MAC_TX_RST |
+ DEV_CLOCK_CFG_MAC_RX_RST |
+ DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000),
+ DEV_CLOCK_CFG);
}
static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 4cbf1667d7ff..014ca6ae121f 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -196,6 +196,8 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_CBS_BW_MASK GENMASK(6, 0)
#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/
#define ENETC_RSSHASH_KEY_SIZE 40
+#define ENETC_PRSSCAPR 0x1404
+#define ENETC_PRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */
#define ENETC_PSIVLANFMR 0x1700
#define ENETC_PSIVLANFMR_VS BIT(0)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 419306342ac5..06514af0df10 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -1004,6 +1004,51 @@ static void enetc_phylink_destroy(struct enetc_ndev_priv *priv)
phylink_destroy(priv->phylink);
}
+/* Initialize the entire shared memory for the flow steering entries
+ * of this port (PF + VFs)
+ */
+static int enetc_init_port_rfs_memory(struct enetc_si *si)
+{
+ struct enetc_cmd_rfse rfse = {0};
+ struct enetc_hw *hw = &si->hw;
+ int num_rfs, i, err = 0;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PRFSCAPR);
+ num_rfs = ENETC_PRFSCAPR_GET_NUM_RFS(val);
+
+ for (i = 0; i < num_rfs; i++) {
+ err = enetc_set_fs_entry(si, &rfse, i);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int enetc_init_port_rss_memory(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+ int num_rss, err;
+ int *rss_table;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PRSSCAPR);
+ num_rss = ENETC_PRSSCAPR_GET_NUM_RSS(val);
+ if (!num_rss)
+ return 0;
+
+ rss_table = kcalloc(num_rss, sizeof(*rss_table), GFP_KERNEL);
+ if (!rss_table)
+ return -ENOMEM;
+
+ err = enetc_set_rss_table(si, rss_table, num_rss);
+
+ kfree(rss_table);
+
+ return err;
+}
+
static int enetc_pf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -1058,6 +1103,18 @@ static int enetc_pf_probe(struct pci_dev *pdev,
goto err_alloc_si_res;
}
+ err = enetc_init_port_rfs_memory(si);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
+ goto err_init_port_rfs;
+ }
+
+ err = enetc_init_port_rss_memory(si);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
+ goto err_init_port_rss;
+ }
+
err = enetc_alloc_msix(priv);
if (err) {
dev_err(&pdev->dev, "MSIX alloc failed\n");
@@ -1086,6 +1143,8 @@ static int enetc_pf_probe(struct pci_dev *pdev,
enetc_mdiobus_destroy(pf);
err_mdiobus_create:
enetc_free_msix(priv);
+err_init_port_rss:
+err_init_port_rfs:
err_alloc_msix:
enetc_free_si_resources(priv);
err_alloc_si_res:
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 4321132a4f63..c40820baf48a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -9404,12 +9404,19 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
{
+ struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back;
int reset_try_times = 0;
int reset_status;
u16 queue_gid;
int ret;
+ if (queue_id >= handle->kinfo.num_tqps) {
+ dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
+ queue_id);
+ return;
+ }
+
queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index 3ab6db2588d3..9c8004fc9dc4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -158,21 +158,31 @@ static int hclge_get_ring_chain_from_mbx(
struct hclge_vport *vport)
{
struct hnae3_ring_chain_node *cur_chain, *new_chain;
+ struct hclge_dev *hdev = vport->back;
int ring_num;
- int i = 0;
+ int i;
ring_num = req->msg.ring_num;
if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
return -ENOMEM;
+ for (i = 0; i < ring_num; i++) {
+ if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
+ dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
+ req->msg.param[i].tqp_index,
+ vport->nic.kinfo.rss_size - 1);
+ return -EINVAL;
+ }
+ }
+
hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B,
- req->msg.param[i].ring_type);
+ req->msg.param[0].ring_type);
ring_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp
- [req->msg.param[i].tqp_index]);
+ [req->msg.param[0].tqp_index]);
hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, req->msg.param[i].int_gl_index);
+ HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index);
cur_chain = ring_chain;
@@ -581,6 +591,17 @@ static void hclge_get_rss_key(struct hclge_vport *vport,
index = mbx_req->msg.data[0];
+ /* Check the query index of rss_hash_key from VF, make sure no
+ * more than the size of rss_hash_key.
+ */
+ if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
+ sizeof(vport[0].rss_hash_key)) {
+ dev_warn(&hdev->pdev->dev,
+ "failed to get the rss hash key, the index(%u) invalid !\n",
+ index);
+ return;
+ }
+
memcpy(resp_msg->data,
&hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 2f281d0f9807..ee16e0e4fa5f 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -4813,7 +4813,22 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
complete(&adapter->init_done);
adapter->init_done_rc = -EIO;
}
- ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+ rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+ if (rc && rc != -EBUSY) {
+ /* We were unable to schedule the failover
+ * reset either because the adapter was still
+ * probing (eg: during kexec) or we could not
+ * allocate memory. Clear the failover_pending
+ * flag since no one else will. We ignore
+ * EBUSY because it means either FAILOVER reset
+ * is already scheduled or the adapter is
+ * being removed.
+ */
+ netdev_err(netdev,
+ "Error %ld scheduling failover reset\n",
+ rc);
+ adapter->failover_pending = false;
+ }
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index d4768dcb6c69..aa400b925b08 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -348,6 +348,60 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
}
}
+static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
+{
+ return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port);
+}
+
+int ocelot_port_flush(struct ocelot *ocelot, int port)
+{
+ int err, val;
+
+ /* Disable dequeuing from the egress queues */
+ ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS,
+ QSYS_PORT_MODE_DEQUEUE_DIS,
+ QSYS_PORT_MODE, port);
+
+ /* Disable flow control */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
+
+ /* Disable priority flow control */
+ ocelot_fields_write(ocelot, port,
+ QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0);
+
+ /* Wait at least the time it takes to receive a frame of maximum length
+ * at the port.
+ * Worst-case delays for 10 kilobyte jumbo frames are:
+ * 8 ms on a 10M port
+ * 800 μs on a 100M port
+ * 80 μs on a 1G port
+ * 32 μs on a 2.5G port
+ */
+ usleep_range(8000, 10000);
+
+ /* Disable half duplex backpressure. */
+ ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE,
+ SYS_FRONT_PORT_MODE, port);
+
+ /* Flush the queues associated with the port. */
+ ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA,
+ REW_PORT_CFG, port);
+
+ /* Enable dequeuing from the egress queues. */
+ ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE,
+ port);
+
+ /* Wait until flushing is complete. */
+ err = read_poll_timeout(ocelot_read_eq_avail, val, !val,
+ 100, 2000000, false, ocelot, port);
+
+ /* Clear flushing again. */
+ ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
+
+ return err;
+}
+EXPORT_SYMBOL(ocelot_port_flush);
+
void ocelot_adjust_link(struct ocelot *ocelot, int port,
struct phy_device *phydev)
{
diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c
index 0acb45948418..ea4e83410fe4 100644
--- a/drivers/net/ethernet/mscc/ocelot_io.c
+++ b/drivers/net/ethernet/mscc/ocelot_io.c
@@ -71,6 +71,14 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg)
}
EXPORT_SYMBOL(ocelot_port_writel);
+void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
+{
+ u32 cur = ocelot_port_readl(port, reg);
+
+ ocelot_port_writel(port, (cur & (~mask)) | val, reg);
+}
+EXPORT_SYMBOL(ocelot_port_rmwl);
+
u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
u32 reg, u32 offset)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 06553d028d74..6088071cb192 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -330,7 +330,12 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
} else if (!qopt->enable) {
- return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
+ ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
+ MTL_QUEUE_DCB);
+ if (ret)
+ return ret;
+
+ priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
}
/* Port Transmit Rate and Speed Divider */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 0c3de94b5178..6a7ab930ef70 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1253,8 +1253,11 @@ static int netvsc_receive(struct net_device *ndev,
ret = rndis_filter_receive(ndev, net_device,
nvchan, data, buflen);
- if (unlikely(ret != NVSP_STAT_SUCCESS))
+ if (unlikely(ret != NVSP_STAT_SUCCESS)) {
+ /* Drop incomplete packet */
+ nvchan->rsc.cnt = 0;
status = NVSP_STAT_FAIL;
+ }
}
enq_receive_complete(ndev, net_device, q_idx,
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index b22e47bcfeca..90bc0008fa2f 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -508,8 +508,6 @@ static int rndis_filter_receive_data(struct net_device *ndev,
return ret;
drop:
- /* Drop incomplete packet */
- nvchan->rsc.cnt = 0;
return NVSP_STAT_FAIL;
}
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 4a68da7115d1..2a65efd3e8da 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1573,6 +1573,7 @@ static int gsi_channel_setup(struct gsi *gsi, bool legacy)
if (!channel->gsi)
continue; /* Ignore uninitialized channels */
+ ret = -EINVAL;
dev_err(gsi->dev, "channel %u not supported by hardware\n",
channel_id - 1);
channel_id = gsi->channel_count;
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index f52b9fed0593..34bc53facd11 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -171,11 +171,11 @@ static int x25_open(struct net_device *dev)
result = lapb_register(dev, &cb);
if (result != LAPB_OK)
- return result;
+ return -ENOMEM;
result = lapb_getparms(dev, ¶ms);
if (result != LAPB_OK)
- return result;
+ return -EINVAL;
if (state(hdlc)->settings.dce)
params.mode = params.mode | LAPB_DCE;
@@ -190,7 +190,7 @@ static int x25_open(struct net_device *dev)
result = lapb_setparms(dev, ¶ms);
if (result != LAPB_OK)
- return result;
+ return -EINVAL;
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index a84bb9b6573f..e150d82eddb6 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -21,11 +21,9 @@ config ATH9K_BTCOEX_SUPPORT
config ATH9K
tristate "Atheros 802.11n wireless cards support"
depends on MAC80211 && HAS_DMA
+ select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
select ATH9K_HW
select ATH9K_COMMON
- imply NEW_LEDS
- imply LEDS_CLASS
- imply MAC80211_LEDS
help
This module adds support for wireless adapters based on
Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family
@@ -176,11 +174,9 @@ config ATH9K_PCI_NO_EEPROM
config ATH9K_HTC
tristate "Atheros HTC based wireless cards support"
depends on USB && MAC80211
+ select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
select ATH9K_HW
select ATH9K_COMMON
- imply NEW_LEDS
- imply LEDS_CLASS
- imply MAC80211_LEDS
help
Support for Atheros HTC based cards.
Chipsets supported: AR9271
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 145e839fea4e..917617aad8d3 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -519,15 +519,17 @@ static void
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
int len, bool more)
{
- struct page *page = virt_to_head_page(data);
- int offset = data - page_address(page);
struct sk_buff *skb = q->rx_head;
struct skb_shared_info *shinfo = skb_shinfo(skb);
if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
- offset += q->buf_offset;
+ struct page *page = virt_to_head_page(data);
+ int offset = data - page_address(page) + q->buf_offset;
+
skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
q->buf_size);
+ } else {
+ skb_free_frag(data);
}
if (more)
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index b8febe1d1bfd..accc991d153f 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -38,10 +38,15 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
RING_IDX prod, cons;
struct sk_buff *skb;
int needed;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->rx_queue.lock, flags);
skb = skb_peek(&queue->rx_queue);
- if (!skb)
+ if (!skb) {
+ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
return false;
+ }
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
if (skb_is_gso(skb))
@@ -49,6 +54,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
if (skb->sw_hash)
needed++;
+ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+
do {
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index a32494cde61f..4a33287371bd 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3247,6 +3247,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+ { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */
+ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 18bf8aeb5f87..e94e59283ecb 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -32,6 +32,10 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C");
MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
+static int enable_tablet_mode_sw = -1;
+module_param(enable_tablet_mode_sw, int, 0444);
+MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=auto, 0=no, 1=yes)");
+
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
@@ -654,10 +658,12 @@ static int __init hp_wmi_input_setup(void)
}
/* Tablet mode */
- val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
- if (!(val < 0)) {
- __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
- input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ if (enable_tablet_mode_sw > 0) {
+ val = hp_wmi_hw_state(HPWMI_TABLET_MASK);
+ if (val >= 0) {
+ __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit);
+ input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val);
+ }
}
err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 69f1a0457f51..03c81cec6bc9 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -714,6 +714,9 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
return -ENODEV;
}
+ if (!vport->phba->sli4_hba.nvmels_wq)
+ return -ENOMEM;
+
/*
* there are two dma buf in the request, actually there is one and
* the second one is just the start address + cmd size.
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 4a08c450b756..b6540b92f566 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -6881,6 +6881,7 @@ static void __exit scsi_debug_exit(void)
sdebug_erase_all_stores(false);
xa_destroy(per_store_ap);
+ kfree(sdebug_q_arr);
}
device_initcall(scsi_debug_init);
diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
index 4d41dc3cdce1..c8b14b3a171f 100644
--- a/drivers/soc/ti/omap_prm.c
+++ b/drivers/soc/ti/omap_prm.c
@@ -552,6 +552,7 @@ static int omap_prm_reset_init(struct platform_device *pdev,
const struct omap_rst_map *map;
struct ti_prm_platform_data *pdata = dev_get_platdata(&pdev->dev);
char buf[32];
+ u32 v;
/*
* Check if we have controllable resets. If either rstctrl is non-zero
@@ -599,6 +600,16 @@ static int omap_prm_reset_init(struct platform_device *pdev,
map++;
}
+ /* Quirk handling to assert rst_map_012 bits on reset and avoid errors */
+ if (prm->data->rstmap == rst_map_012) {
+ v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl);
+ if ((v & reset->mask) != reset->mask) {
+ dev_dbg(&pdev->dev, "Asserting all resets: %08x\n", v);
+ writel_relaxed(reset->mask, reset->prm->base +
+ reset->prm->data->rstctrl);
+ }
+ }
+
return devm_reset_controller_register(&pdev->dev, &reset->rcdev);
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 2c6b9578a7d3..99908d8d2dd3 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1646,9 +1646,16 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
/* pass ownership to the completion handler */
urb->status = status;
- kcov_remote_start_usb((u64)urb->dev->bus->busnum);
+ /*
+ * This function can be called in task context inside another remote
+ * coverage collection section, but KCOV doesn't support that kind of
+ * recursion yet. Only collect coverage in softirq context for now.
+ */
+ if (in_serving_softirq())
+ kcov_remote_start_usb((u64)urb->dev->bus->busnum);
urb->complete(urb);
- kcov_remote_stop();
+ if (in_serving_softirq())
+ kcov_remote_stop();
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
index dc1537335414..2a93b7c9c159 100644
--- a/drivers/xen/xenbus/xenbus.h
+++ b/drivers/xen/xenbus/xenbus.h
@@ -115,7 +115,6 @@ int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
const char *nodename);
int xenbus_probe_devices(struct xen_bus_type *bus);
-void xenbus_probe(void);
void xenbus_dev_changed(const char *node, struct xen_bus_type *bus);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 18ffd0551b54..8a75092bb148 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -683,7 +683,7 @@ void unregister_xenstore_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_xenstore_notifier);
-void xenbus_probe(void)
+static void xenbus_probe(void)
{
xenstored_ready = 1;
diff --git a/fs/Kconfig b/fs/Kconfig
index aa4c12282301..da524c4d7b7e 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -203,7 +203,7 @@ config TMPFS_XATTR
config TMPFS_INODE64
bool "Use 64-bit ino_t by default in tmpfs"
- depends on TMPFS && 64BIT
+ depends on TMPFS && 64BIT && !(S390 || ALPHA)
default n
help
tmpfs has historically used only inode numbers as wide as an unsigned
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 955ecd4030f0..89d5d59c7d7a 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -84,6 +84,14 @@ int ovl_copy_xattr(struct super_block *sb, struct dentry *old,
if (ovl_is_private_xattr(sb, name))
continue;
+
+ error = security_inode_copy_up_xattr(name);
+ if (error < 0 && error != -EOPNOTSUPP)
+ break;
+ if (error == 1) {
+ error = 0;
+ continue; /* Discard */
+ }
retry:
size = vfs_getxattr(old, name, value, value_size);
if (size == -ERANGE)
@@ -107,13 +115,6 @@ int ovl_copy_xattr(struct super_block *sb, struct dentry *old,
goto retry;
}
- error = security_inode_copy_up_xattr(name);
- if (error < 0 && error != -EOPNOTSUPP)
- break;
- if (error == 1) {
- error = 0;
- continue; /* Discard */
- }
error = vfs_setxattr(new, name, value, size, 0);
if (error) {
if (error != -EOPNOTSUPP || ovl_must_copy_xattr(name))
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index b584dca845ba..4fadafd8bdc1 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -346,7 +346,9 @@ int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
goto out;
if (!value && !upperdentry) {
+ old_cred = ovl_override_creds(dentry->d_sb);
err = vfs_getxattr(realdentry, name, NULL, 0);
+ revert_creds(old_cred);
if (err < 0)
goto out_drop_write;
}
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index d23177a53c95..50529a4e7bf3 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -79,7 +79,7 @@ static void ovl_dentry_release(struct dentry *dentry)
static struct dentry *ovl_d_real(struct dentry *dentry,
const struct inode *inode)
{
- struct dentry *real;
+ struct dentry *real = NULL, *lower;
/* It's an overlay file */
if (inode && d_inode(dentry) == inode)
@@ -98,9 +98,10 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
if (real && !inode && ovl_has_upperdata(d_inode(dentry)))
return real;
- real = ovl_dentry_lowerdata(dentry);
- if (!real)
+ lower = ovl_dentry_lowerdata(dentry);
+ if (!lower)
goto bug;
+ real = lower;
/* Handle recursion */
real = d_real(real, inode);
@@ -108,8 +109,10 @@ static struct dentry *ovl_d_real(struct dentry *dentry,
if (!inode || inode == d_inode(real))
return real;
bug:
- WARN(1, "ovl_d_real(%pd4, %s:%lu): real dentry not found\n", dentry,
- inode ? inode->i_sb->s_id : "NULL", inode ? inode->i_ino : 0);
+ WARN(1, "%s(%pd4, %s:%lu): real dentry (%p/%lu) not found\n",
+ __func__, dentry, inode ? inode->i_sb->s_id : "NULL",
+ inode ? inode->i_ino : 0, real,
+ real && d_inode(real) ? d_inode(real)->i_ino : 0);
return dentry;
}
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index b2b3d81b1535..b97c628ad91f 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -459,7 +459,7 @@
} \
\
/* Built-in firmware blobs */ \
- .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
+ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
__start_builtin_fw = .; \
KEEP(*(.builtin_fw)) \
__end_builtin_fw = .; \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7c3da0e1ea9d..9de5312edeb8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -4313,6 +4313,7 @@ static inline void netif_tx_disable(struct net_device *dev)
local_bh_disable();
cpu = smp_processor_id();
+ spin_lock(&dev->tx_global_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
@@ -4320,6 +4321,7 @@ static inline void netif_tx_disable(struct net_device *dev)
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
+ spin_unlock(&dev->tx_global_lock);
local_bh_enable();
}
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 72d88566694e..27ff8eb786dc 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -260,7 +260,13 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
{
i->count = count;
}
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
+
+struct csum_state {
+ __wsum csum;
+ size_t off;
+};
+
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index 53e8b4994296..8528015590e4 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -41,7 +41,6 @@ enum switchdev_attr_id {
SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
- SWITCHDEV_ATTR_ID_MRP_PORT_STATE,
SWITCHDEV_ATTR_ID_MRP_PORT_ROLE,
#endif
};
@@ -60,7 +59,6 @@ struct switchdev_attr {
bool vlan_filtering; /* BRIDGE_VLAN_FILTERING */
bool mc_disabled; /* MC_DISABLED */
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
- u8 mrp_port_state; /* MRP_PORT_STATE */
u8 mrp_port_role; /* MRP_PORT_ROLE */
#endif
} u;
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
index 49b46df476f2..4971b45860a4 100644
--- a/include/soc/mscc/ocelot.h
+++ b/include/soc/mscc/ocelot.h
@@ -703,6 +703,7 @@ struct ocelot_policer {
/* I/O */
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
+void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg);
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
@@ -731,6 +732,7 @@ int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset);
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
struct ethtool_ts_info *info);
void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs);
+int ocelot_port_flush(struct ocelot *ocelot, int port);
void ocelot_adjust_link(struct ocelot *ocelot, int port,
struct phy_device *phydev);
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled,
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 2c43b0ef1e4d..bf3cfc7c35d0 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -192,8 +192,6 @@ void xs_suspend_cancel(void);
struct work_struct;
-void xenbus_probe(void);
-
#define XENBUS_IS_ERR_READ(str) ({ \
if (!IS_ERR(str) && strlen(str) == 0) { \
kfree(str); \
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 06065fa27124..6e83bf8c080d 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -116,6 +116,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
/* hash table size must be power of 2 */
n_buckets = roundup_pow_of_two(attr->max_entries);
+ if (!n_buckets)
+ return ERR_PTR(-E2BIG);
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 32596fdbcd5b..a5751784ad74 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -917,6 +917,9 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param)
for_each_subsys(ss, i) {
if (strcmp(param->key, ss->legacy_name))
continue;
+ if (!cgroup_ssid_enabled(i) || cgroup1_ssid_disabled(i))
+ return invalfc(fc, "Disabled controller '%s'",
+ param->key);
ctx->subsys_mask |= (1 << i);
return 0;
}
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index e41c21819ba0..5d1fdf7c3ec6 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -3567,6 +3567,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
{
struct psi_trigger *new;
struct cgroup *cgrp;
+ struct psi_group *psi;
cgrp = cgroup_kn_lock_live(of->kn, false);
if (!cgrp)
@@ -3575,7 +3576,8 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
cgroup_get(cgrp);
cgroup_kn_unlock(of->kn);
- new = psi_trigger_create(&cgrp->psi, buf, nbytes, res);
+ psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
+ new = psi_trigger_create(psi, buf, nbytes, res);
if (IS_ERR(new)) {
cgroup_put(cgrp);
return PTR_ERR(new);
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 0dde84b9d29f..fcbfc9564996 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -93,9 +93,6 @@ unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
{
unsigned int ret;
- if (in_nmi()) /* not supported yet */
- return 1;
-
cant_sleep();
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3119d68d012d..ee4be813ba85 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2745,7 +2745,7 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
(entry = this_cpu_read(trace_buffered_event))) {
/* Try to use the per cpu buffer first */
val = this_cpu_inc_return(trace_buffered_event_cnt);
- if (val == 1) {
+ if ((len < (PAGE_SIZE - sizeof(*entry))) && val == 1) {
trace_event_setup(entry, type, flags, pc);
entry->array[0] = len;
return entry;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 802f3e7d8b8b..ab3cb67b869e 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1212,7 +1212,8 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
mutex_lock(&event_mutex);
list_for_each_entry(file, &tr->events, list) {
call = file->event_call;
- if (!trace_event_name(call) || !call->class || !call->class->reg)
+ if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
+ !trace_event_name(call) || !call->class || !call->class->reg)
continue;
if (system && strcmp(call->class->system, system->name) != 0)
diff --git a/lib/cpumask.c b/lib/cpumask.c
index 85da6ab4fbb5..fb22fb266f93 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -6,7 +6,6 @@
#include <linux/export.h>
#include <linux/memblock.h>
#include <linux/numa.h>
-#include <linux/sched/isolation.h>
/**
* cpumask_next - get the next cpu in a cpumask
@@ -206,27 +205,22 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
*/
unsigned int cpumask_local_spread(unsigned int i, int node)
{
- int cpu, hk_flags;
- const struct cpumask *mask;
+ int cpu;
- hk_flags = HK_FLAG_DOMAIN | HK_FLAG_MANAGED_IRQ;
- mask = housekeeping_cpumask(hk_flags);
/* Wrap: we always want a cpu. */
- i %= cpumask_weight(mask);
+ i %= num_online_cpus();
if (node == NUMA_NO_NODE) {
- for_each_cpu(cpu, mask) {
+ for_each_cpu(cpu, cpu_online_mask)
if (i-- == 0)
return cpu;
- }
} else {
/* NUMA first. */
- for_each_cpu_and(cpu, cpumask_of_node(node), mask) {
+ for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
if (i-- == 0)
return cpu;
- }
- for_each_cpu(cpu, mask) {
+ for_each_cpu(cpu, cpu_online_mask) {
/* Skip NUMA nodes, done above. */
if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
continue;
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index a21e6a5792c5..f0b2ccb1bb01 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -592,14 +592,15 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len,
}
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
- __wsum *csum, struct iov_iter *i)
+ struct csum_state *csstate,
+ struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
unsigned int p_mask = pipe->ring_size - 1;
+ __wsum sum = csstate->csum;
+ size_t off = csstate->off;
unsigned int i_head;
size_t n, r;
- size_t off = 0;
- __wsum sum = *csum;
if (!sanity(i))
return 0;
@@ -621,7 +622,8 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
i_head++;
} while (n);
i->count -= bytes;
- *csum = sum;
+ csstate->csum = sum;
+ csstate->off = off;
return bytes;
}
@@ -1522,18 +1524,19 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
}
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
struct iov_iter *i)
{
+ struct csum_state *csstate = _csstate;
const char *from = addr;
- __wsum *csum = csump;
__wsum sum, next;
- size_t off = 0;
+ size_t off;
if (unlikely(iov_iter_is_pipe(i)))
- return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
+ return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
- sum = *csum;
+ sum = csstate->csum;
+ off = csstate->off;
if (unlikely(iov_iter_is_discard(i))) {
WARN_ON(1); /* for now */
return 0;
@@ -1561,7 +1564,8 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
off += v.iov_len;
})
)
- *csum = sum;
+ csstate->csum = sum;
+ csstate->off = off;
return bytes;
}
EXPORT_SYMBOL(csum_and_copy_to_iter);
diff --git a/lib/ubsan.c b/lib/ubsan.c
index cb9af3f6b77e..adf8dcf3c84e 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -427,3 +427,34 @@ void __ubsan_handle_load_invalid_value(void *_data, void *val)
ubsan_epilogue();
}
EXPORT_SYMBOL(__ubsan_handle_load_invalid_value);
+
+void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset);
+void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset)
+{
+ struct alignment_assumption_data *data = _data;
+ unsigned long real_ptr;
+
+ if (suppress_report(&data->location))
+ return;
+
+ ubsan_prologue(&data->location, "alignment-assumption");
+
+ if (offset)
+ pr_err("assumption of %lu byte alignment (with offset of %lu byte) for pointer of type %s failed",
+ align, offset, data->type->type_name);
+ else
+ pr_err("assumption of %lu byte alignment for pointer of type %s failed",
+ align, data->type->type_name);
+
+ real_ptr = ptr - offset;
+ pr_err("%saddress is %lu aligned, misalignment offset is %lu bytes",
+ offset ? "offset " : "", BIT(real_ptr ? __ffs(real_ptr) : 0),
+ real_ptr & (align - 1));
+
+ ubsan_epilogue();
+}
+EXPORT_SYMBOL(__ubsan_handle_alignment_assumption);
diff --git a/lib/ubsan.h b/lib/ubsan.h
index 7b56c09473a9..9a0b71c5ff9f 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -78,6 +78,12 @@ struct invalid_value_data {
struct type_descriptor *type;
};
+struct alignment_assumption_data {
+ struct source_location location;
+ struct source_location assumption_location;
+ struct type_descriptor *type;
+};
+
#if defined(CONFIG_ARCH_SUPPORTS_INT128)
typedef __int128 s_max;
typedef unsigned __int128 u_max;
diff --git a/net/bridge/br_mrp.c b/net/bridge/br_mrp.c
index b36689e6e7cb..d1336a7ad7ff 100644
--- a/net/bridge/br_mrp.c
+++ b/net/bridge/br_mrp.c
@@ -544,19 +544,22 @@ int br_mrp_del(struct net_bridge *br, struct br_mrp_instance *instance)
int br_mrp_set_port_state(struct net_bridge_port *p,
enum br_mrp_port_state_type state)
{
+ u32 port_state;
+
if (!p || !(p->flags & BR_MRP_AWARE))
return -EINVAL;
spin_lock_bh(&p->br->lock);
if (state == BR_MRP_PORT_STATE_FORWARDING)
- p->state = BR_STATE_FORWARDING;
+ port_state = BR_STATE_FORWARDING;
else
- p->state = BR_STATE_BLOCKING;
+ port_state = BR_STATE_BLOCKING;
+ p->state = port_state;
spin_unlock_bh(&p->br->lock);
- br_mrp_port_switchdev_set_state(p, state);
+ br_mrp_port_switchdev_set_state(p, port_state);
return 0;
}
diff --git a/net/bridge/br_mrp_switchdev.c b/net/bridge/br_mrp_switchdev.c
index ed547e03ace1..75a7e8d0a268 100644
--- a/net/bridge/br_mrp_switchdev.c
+++ b/net/bridge/br_mrp_switchdev.c
@@ -169,13 +169,12 @@ int br_mrp_switchdev_send_in_test(struct net_bridge *br, struct br_mrp *mrp,
return err;
}
-int br_mrp_port_switchdev_set_state(struct net_bridge_port *p,
- enum br_mrp_port_state_type state)
+int br_mrp_port_switchdev_set_state(struct net_bridge_port *p, u32 state)
{
struct switchdev_attr attr = {
.orig_dev = p->dev,
- .id = SWITCHDEV_ATTR_ID_MRP_PORT_STATE,
- .u.mrp_port_state = state,
+ .id = SWITCHDEV_ATTR_ID_PORT_STP_STATE,
+ .u.stp_state = state,
};
int err;
diff --git a/net/bridge/br_private_mrp.h b/net/bridge/br_private_mrp.h
index af0e9eff6549..6657705b94b1 100644
--- a/net/bridge/br_private_mrp.h
+++ b/net/bridge/br_private_mrp.h
@@ -72,8 +72,7 @@ int br_mrp_switchdev_set_ring_state(struct net_bridge *br, struct br_mrp *mrp,
int br_mrp_switchdev_send_ring_test(struct net_bridge *br, struct br_mrp *mrp,
u32 interval, u8 max_miss, u32 period,
bool monitor);
-int br_mrp_port_switchdev_set_state(struct net_bridge_port *p,
- enum br_mrp_port_state_type state);
+int br_mrp_port_switchdev_set_state(struct net_bridge_port *p, u32 state);
int br_mrp_port_switchdev_set_role(struct net_bridge_port *p,
enum br_mrp_port_role_type role);
int br_mrp_switchdev_set_in_role(struct net_bridge *br, struct br_mrp *mrp,
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 9fcaa544f11a..bc92683fdcdb 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -721,8 +721,16 @@ static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
struct iov_iter *to, int len,
__wsum *csump)
{
- return __skb_datagram_iter(skb, offset, to, len, true,
- csum_and_copy_to_iter, csump);
+ struct csum_state csdata = { .csum = *csump };
+ int ret;
+
+ ret = __skb_datagram_iter(skb, offset, to, len, true,
+ csum_and_copy_to_iter, &csdata);
+ if (ret)
+ return ret;
+
+ *csump = csdata.csum;
+ return 0;
}
/**
diff --git a/net/core/dev.c b/net/core/dev.c
index 81e5d482c238..da85cb939869 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5723,10 +5723,11 @@ static void gro_normal_list(struct napi_struct *napi)
/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
* pass the whole batch up to the stack.
*/
-static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
+static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
{
list_add_tail(&skb->list, &napi->rx_list);
- if (++napi->rx_count >= gro_normal_batch)
+ napi->rx_count += segs;
+ if (napi->rx_count >= gro_normal_batch)
gro_normal_list(napi);
}
@@ -5765,7 +5766,7 @@ static int napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
}
out:
- gro_normal_one(napi, skb);
+ gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
return NET_RX_SUCCESS;
}
@@ -6055,7 +6056,7 @@ static gro_result_t napi_skb_finish(struct napi_struct *napi,
{
switch (ret) {
case GRO_NORMAL:
- gro_normal_one(napi, skb);
+ gro_normal_one(napi, skb, 1);
break;
case GRO_DROP:
@@ -6143,7 +6144,7 @@ static gro_result_t napi_frags_finish(struct napi_struct *napi,
__skb_push(skb, ETH_HLEN);
skb->protocol = eth_type_trans(skb, skb->dev);
if (ret == GRO_NORMAL)
- gro_normal_one(napi, skb);
+ gro_normal_one(napi, skb, 1);
break;
case GRO_DROP:
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index a47e0f9b20d0..a04fd637b4cd 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -462,20 +462,23 @@ static int dsa_switch_setup(struct dsa_switch *ds)
ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
if (!ds->slave_mii_bus) {
err = -ENOMEM;
- goto unregister_notifier;
+ goto teardown;
}
dsa_slave_mii_bus_init(ds);
err = mdiobus_register(ds->slave_mii_bus);
if (err < 0)
- goto unregister_notifier;
+ goto teardown;
}
ds->setup = true;
return 0;
+teardown:
+ if (ds->ops->teardown)
+ ds->ops->teardown(ds);
unregister_notifier:
dsa_switch_unregister_notifier(ds);
unregister_devlink_ports:
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index cd9a9bd242ba..51ec8256b7fa 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -69,7 +69,7 @@ config MAC80211_MESH
config MAC80211_LEDS
bool "Enable LED triggers"
depends on MAC80211
- depends on LEDS_CLASS
+ depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
select LEDS_TRIGGERS
help
This option enables a few LED triggers for different
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 234b7cab37c3..ff0168736f6e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1229,7 +1229,8 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
* Let nf_ct_resolve_clash() deal with this later.
*/
if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
+ nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
continue;
NF_CT_STAT_INC_ATOMIC(net, found);
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 513f78db3cb2..4a4acbba78ff 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -399,7 +399,7 @@ static int nf_flow_nat_port_tcp(struct sk_buff *skb, unsigned int thoff,
return -1;
tcph = (void *)(skb_network_header(skb) + thoff);
- inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
+ inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
return 0;
}
@@ -415,7 +415,7 @@ static int nf_flow_nat_port_udp(struct sk_buff *skb, unsigned int thoff,
udph = (void *)(skb_network_header(skb) + thoff);
if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace2(&udph->check, skb, port,
- new_port, true);
+ new_port, false);
if (!udph->check)
udph->check = CSUM_MANGLED_0;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 9a080767667b..8739ef135156 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -8775,6 +8775,17 @@ int __nft_release_basechain(struct nft_ctx *ctx)
}
EXPORT_SYMBOL_GPL(__nft_release_basechain);
+static void __nft_release_hooks(struct net *net)
+{
+ struct nft_table *table;
+ struct nft_chain *chain;
+
+ list_for_each_entry(table, &net->nft.tables, list) {
+ list_for_each_entry(chain, &table->chains, list)
+ nf_tables_unregister_hook(net, table, chain);
+ }
+}
+
static void __nft_release_tables(struct net *net)
{
struct nft_flowtable *flowtable, *nf;
@@ -8790,10 +8801,6 @@ static void __nft_release_tables(struct net *net)
list_for_each_entry_safe(table, nt, &net->nft.tables, list) {
ctx.family = table->family;
-
- list_for_each_entry(chain, &table->chains, list)
- nf_tables_unregister_hook(net, table, chain);
- /* No packets are walking on these chains anymore. */
ctx.table = table;
list_for_each_entry(chain, &table->chains, list) {
ctx.chain = chain;
@@ -8842,6 +8849,11 @@ static int __net_init nf_tables_init_net(struct net *net)
return 0;
}
+static void __net_exit nf_tables_pre_exit_net(struct net *net)
+{
+ __nft_release_hooks(net);
+}
+
static void __net_exit nf_tables_exit_net(struct net *net)
{
mutex_lock(&net->nft.commit_mutex);
@@ -8855,8 +8867,9 @@ static void __net_exit nf_tables_exit_net(struct net *net)
}
static struct pernet_operations nf_tables_net_ops = {
- .init = nf_tables_init_net,
- .exit = nf_tables_exit_net,
+ .init = nf_tables_init_net,
+ .pre_exit = nf_tables_pre_exit_net,
+ .exit = nf_tables_exit_net,
};
static int __init nf_tables_module_init(void)
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 606411869698..0446307516cd 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -152,7 +152,8 @@ static void recent_entry_remove(struct recent_table *t, struct recent_entry *e)
/*
* Drop entries with timestamps older then 'time'.
*/
-static void recent_entry_reap(struct recent_table *t, unsigned long time)
+static void recent_entry_reap(struct recent_table *t, unsigned long time,
+ struct recent_entry *working, bool update)
{
struct recent_entry *e;
@@ -161,6 +162,12 @@ static void recent_entry_reap(struct recent_table *t, unsigned long time)
*/
e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
+ /*
+ * Do not reap the entry which are going to be updated.
+ */
+ if (e == working && update)
+ return;
+
/*
* The last time stamp is the most recent.
*/
@@ -303,7 +310,8 @@ recent_mt(const struct sk_buff *skb, struct xt_action_param *par)
/* info->seconds must be non-zero */
if (info->check_set & XT_RECENT_REAP)
- recent_entry_reap(t, time);
+ recent_entry_reap(t, time, e,
+ info->check_set & XT_RECENT_UPDATE && ret);
}
if (info->check_set & XT_RECENT_SET ||
diff --git a/net/qrtr/tun.c b/net/qrtr/tun.c
index 15ce9b642b25..b238c40a9984 100644
--- a/net/qrtr/tun.c
+++ b/net/qrtr/tun.c
@@ -80,6 +80,12 @@ static ssize_t qrtr_tun_write_iter(struct kiocb *iocb, struct iov_iter *from)
ssize_t ret;
void *kbuf;
+ if (!len)
+ return -EINVAL;
+
+ if (len > KMALLOC_MAX_SIZE)
+ return -ENOMEM;
+
kbuf = kzalloc(len, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 1d0afb1dd77b..6f1a50d50d06 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -565,6 +565,9 @@ int rds_rdma_extra_size(struct rds_rdma_args *args,
if (args->nr_local == 0)
return -EINVAL;
+ if (args->nr_local > UIO_MAXIOV)
+ return -EMSGSIZE;
+
iov->iov = kcalloc(args->nr_local,
sizeof(struct rds_iovec),
GFP_KERNEL);
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index c845594b663f..4eb91d958a48 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -548,8 +548,6 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
rxrpc_disconnect_call(call);
if (call->security)
call->security->free_call_crypto(call);
-
- rxrpc_cleanup_ring(call);
_leave("");
}
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index f7da88ae20a5..982a87b3e11f 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -215,6 +215,12 @@ static void sctp_transport_seq_stop(struct seq_file *seq, void *v)
{
struct sctp_ht_iter *iter = seq->private;
+ if (v && v != SEQ_START_TOKEN) {
+ struct sctp_transport *transport = v;
+
+ sctp_transport_put(transport);
+ }
+
sctp_transport_walk_stop(&iter->hti);
}
@@ -222,6 +228,12 @@ static void *sctp_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct sctp_ht_iter *iter = seq->private;
+ if (v && v != SEQ_START_TOKEN) {
+ struct sctp_transport *transport = v;
+
+ sctp_transport_put(transport);
+ }
+
++*pos;
return sctp_transport_get_next(seq_file_net(seq), &iter->hti);
@@ -277,8 +289,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
sk->sk_rcvbuf);
seq_printf(seq, "\n");
- sctp_transport_put(transport);
-
return 0;
}
@@ -354,8 +364,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "\n");
}
- sctp_transport_put(transport);
-
return 0;
}
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index f64e681493a5..791955f5e7ec 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -926,10 +926,12 @@ static int vsock_shutdown(struct socket *sock, int mode)
*/
sk = sock->sk;
+
+ lock_sock(sk);
if (sock->state == SS_UNCONNECTED) {
err = -ENOTCONN;
if (sk->sk_type == SOCK_STREAM)
- return err;
+ goto out;
} else {
sock->state = SS_DISCONNECTING;
err = 0;
@@ -938,10 +940,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
/* Receive and send shutdowns are treated alike. */
mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
if (mode) {
- lock_sock(sk);
sk->sk_shutdown |= mode;
sk->sk_state_change(sk);
- release_sock(sk);
if (sk->sk_type == SOCK_STREAM) {
sock_reset_flag(sk, SOCK_DONE);
@@ -949,6 +949,8 @@ static int vsock_shutdown(struct socket *sock, int mode)
}
}
+out:
+ release_sock(sk);
return err;
}
@@ -1216,7 +1218,7 @@ static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
{
const struct vsock_transport *transport = vsk->transport;
- if (!transport->cancel_pkt)
+ if (!transport || !transport->cancel_pkt)
return -EOPNOTSUPP;
return transport->cancel_pkt(vsk);
@@ -1226,7 +1228,6 @@ static void vsock_connect_timeout(struct work_struct *work)
{
struct sock *sk;
struct vsock_sock *vsk;
- int cancel = 0;
vsk = container_of(work, struct vsock_sock, connect_work.work);
sk = sk_vsock(vsk);
@@ -1237,11 +1238,9 @@ static void vsock_connect_timeout(struct work_struct *work)
sk->sk_state = TCP_CLOSE;
sk->sk_err = ETIMEDOUT;
sk->sk_error_report(sk);
- cancel = 1;
+ vsock_transport_cancel_pkt(vsk);
}
release_sock(sk);
- if (cancel)
- vsock_transport_cancel_pkt(vsk);
sock_put(sk);
}
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index 630b851f8150..cc3bae2659e7 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -474,14 +474,10 @@ static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
static int hvs_shutdown(struct vsock_sock *vsk, int mode)
{
- struct sock *sk = sk_vsock(vsk);
-
if (!(mode & SEND_SHUTDOWN))
return 0;
- lock_sock(sk);
hvs_shutdown_lock_held(vsk->trans, mode);
- release_sock(sk);
return 0;
}
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 5956939eebb7..e4370b1b7494 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -1130,8 +1130,6 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
vsk = vsock_sk(sk);
- space_available = virtio_transport_space_update(sk, pkt);
-
lock_sock(sk);
/* Check if sk has been closed before lock_sock */
@@ -1142,6 +1140,8 @@ void virtio_transport_recv_pkt(struct virtio_transport *t,
goto free_pkt;
}
+ space_available = virtio_transport_space_update(sk, pkt);
+
/* Update CID in case it has changed after a transport reset event */
vsk->local_addr.svm_cid = dst.svm_cid;
diff --git a/scripts/Makefile b/scripts/Makefile
index 9de3c03b94aa..c36106bce80e 100644
--- a/scripts/Makefile
+++ b/scripts/Makefile
@@ -17,6 +17,7 @@ hostprogs-always-$(CONFIG_SYSTEM_EXTRA_CERTIFICATE) += insert-sys-cert
HOSTCFLAGS_sorttable.o = -I$(srctree)/tools/include
HOSTCFLAGS_asn1_compiler.o = -I$(srctree)/include
+HOSTCFLAGS_sign-file.o = $(CRYPTO_CFLAGS)
HOSTLDLIBS_sign-file = $(CRYPTO_LIBS)
HOSTCFLAGS_extract-cert.o = $(CRYPTO_CFLAGS)
HOSTLDLIBS_extract-cert = $(CRYPTO_LIBS)
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 7ecd2ccba531..54ad86d13784 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -112,6 +112,12 @@ static bool is_ignored_symbol(const char *name, char type)
"__crc_", /* modversions */
"__efistub_", /* arm64 EFI stub namespace */
"__kvm_nvhe_", /* arm64 non-VHE KVM namespace */
+ "__AArch64ADRPThunk_", /* arm64 lld */
+ "__ARMV5PILongThunk_", /* arm lld */
+ "__ARMV7PILongThunk_",
+ "__ThumbV7PILongThunk_",
+ "__LA25Thunk_", /* mips lld */
+ "__microLA25Thunk_",
NULL
};
diff --git a/security/commoncap.c b/security/commoncap.c
index 59bf3c1674c8..a6c9bb4441d5 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -371,10 +371,11 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
{
int size, ret;
kuid_t kroot;
+ u32 nsmagic, magic;
uid_t root, mappedroot;
char *tmpbuf = NULL;
struct vfs_cap_data *cap;
- struct vfs_ns_cap_data *nscap;
+ struct vfs_ns_cap_data *nscap = NULL;
struct dentry *dentry;
struct user_namespace *fs_ns;
@@ -396,46 +397,61 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
fs_ns = inode->i_sb->s_user_ns;
cap = (struct vfs_cap_data *) tmpbuf;
if (is_v2header((size_t) ret, cap)) {
- /* If this is sizeof(vfs_cap_data) then we're ok with the
- * on-disk value, so return that. */
- if (alloc)
- *buffer = tmpbuf;
- else
- kfree(tmpbuf);
- return ret;
- } else if (!is_v3header((size_t) ret, cap)) {
- kfree(tmpbuf);
- return -EINVAL;
+ root = 0;
+ } else if (is_v3header((size_t) ret, cap)) {
+ nscap = (struct vfs_ns_cap_data *) tmpbuf;
+ root = le32_to_cpu(nscap->rootid);
+ } else {
+ size = -EINVAL;
+ goto out_free;
}
- nscap = (struct vfs_ns_cap_data *) tmpbuf;
- root = le32_to_cpu(nscap->rootid);
kroot = make_kuid(fs_ns, root);
/* If the root kuid maps to a valid uid in current ns, then return
* this as a nscap. */
mappedroot = from_kuid(current_user_ns(), kroot);
if (mappedroot != (uid_t)-1 && mappedroot != (uid_t)0) {
+ size = sizeof(struct vfs_ns_cap_data);
if (alloc) {
- *buffer = tmpbuf;
+ if (!nscap) {
+ /* v2 -> v3 conversion */
+ nscap = kzalloc(size, GFP_ATOMIC);
+ if (!nscap) {
+ size = -ENOMEM;
+ goto out_free;
+ }
+ nsmagic = VFS_CAP_REVISION_3;
+ magic = le32_to_cpu(cap->magic_etc);
+ if (magic & VFS_CAP_FLAGS_EFFECTIVE)
+ nsmagic |= VFS_CAP_FLAGS_EFFECTIVE;
+ memcpy(&nscap->data, &cap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
+ nscap->magic_etc = cpu_to_le32(nsmagic);
+ } else {
+ /* use allocated v3 buffer */
+ tmpbuf = NULL;
+ }
nscap->rootid = cpu_to_le32(mappedroot);
- } else
- kfree(tmpbuf);
- return size;
+ *buffer = nscap;
+ }
+ goto out_free;
}
if (!rootid_owns_currentns(kroot)) {
- kfree(tmpbuf);
- return -EOPNOTSUPP;
+ size = -EOVERFLOW;
+ goto out_free;
}
/* This comes from a parent namespace. Return as a v2 capability */
size = sizeof(struct vfs_cap_data);
if (alloc) {
- *buffer = kmalloc(size, GFP_ATOMIC);
- if (*buffer) {
- struct vfs_cap_data *cap = *buffer;
- __le32 nsmagic, magic;
+ if (nscap) {
+ /* v3 -> v2 conversion */
+ cap = kzalloc(size, GFP_ATOMIC);
+ if (!cap) {
+ size = -ENOMEM;
+ goto out_free;
+ }
magic = VFS_CAP_REVISION_2;
nsmagic = le32_to_cpu(nscap->magic_etc);
if (nsmagic & VFS_CAP_FLAGS_EFFECTIVE)
@@ -443,9 +459,12 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer,
memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32);
cap->magic_etc = cpu_to_le32(magic);
} else {
- size = -ENOMEM;
+ /* use unconverted v2 */
+ tmpbuf = NULL;
}
+ *buffer = cap;
}
+out_free:
kfree(tmpbuf);
return size;
}
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 956383d5fa62..4bd30315eb62 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -467,13 +467,20 @@ static int create_static_call_sections(struct objtool_file *file)
/* populate reloc for 'addr' */
reloc = malloc(sizeof(*reloc));
+
if (!reloc) {
perror("malloc");
return -1;
}
memset(reloc, 0, sizeof(*reloc));
- reloc->sym = insn->sec->sym;
- reloc->addend = insn->offset;
+
+ insn_to_reloc_sym_addend(insn->sec, insn->offset, reloc);
+ if (!reloc->sym) {
+ WARN_FUNC("static call tramp: missing containing symbol",
+ insn->sec, insn->offset);
+ return -1;
+ }
+
reloc->type = R_X86_64_PC32;
reloc->offset = idx * sizeof(struct static_call_site);
reloc->sec = reloc_sec;
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index f4f3e8d99593..d8421e1d06be 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -262,6 +262,32 @@ struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, uns
return find_reloc_by_dest_range(elf, sec, offset, 1);
}
+void insn_to_reloc_sym_addend(struct section *sec, unsigned long offset,
+ struct reloc *reloc)
+{
+ if (sec->sym) {
+ reloc->sym = sec->sym;
+ reloc->addend = offset;
+ return;
+ }
+
+ /*
+ * The Clang assembler strips section symbols, so we have to reference
+ * the function symbol instead:
+ */
+ reloc->sym = find_symbol_containing(sec, offset);
+ if (!reloc->sym) {
+ /*
+ * Hack alert. This happens when we need to reference the NOP
+ * pad insn immediately after the function.
+ */
+ reloc->sym = find_symbol_containing(sec, offset - 1);
+ }
+
+ if (reloc->sym)
+ reloc->addend = offset - reloc->sym->offset;
+}
+
static int read_sections(struct elf *elf)
{
Elf_Scn *s = NULL;
diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
index 807f8c670097..e6890cc70a25 100644
--- a/tools/objtool/elf.h
+++ b/tools/objtool/elf.h
@@ -140,6 +140,8 @@ struct reloc *find_reloc_by_dest(const struct elf *elf, struct section *sec, uns
struct reloc *find_reloc_by_dest_range(const struct elf *elf, struct section *sec,
unsigned long offset, unsigned int len);
struct symbol *find_func_containing(struct section *sec, unsigned long offset);
+void insn_to_reloc_sym_addend(struct section *sec, unsigned long offset,
+ struct reloc *reloc);
int elf_rebuild_reloc_section(struct elf *elf, struct section *sec);
#define for_each_sec(file, sec) \
diff --git a/tools/objtool/orc_gen.c b/tools/objtool/orc_gen.c
index 235663b96adc..9ce68b385a1b 100644
--- a/tools/objtool/orc_gen.c
+++ b/tools/objtool/orc_gen.c
@@ -105,30 +105,11 @@ static int create_orc_entry(struct elf *elf, struct section *u_sec, struct secti
}
memset(reloc, 0, sizeof(*reloc));
- if (insn_sec->sym) {
- reloc->sym = insn_sec->sym;
- reloc->addend = insn_off;
- } else {
- /*
- * The Clang assembler doesn't produce section symbols, so we
- * have to reference the function symbol instead:
- */
- reloc->sym = find_symbol_containing(insn_sec, insn_off);
- if (!reloc->sym) {
- /*
- * Hack alert. This happens when we need to reference
- * the NOP pad insn immediately after the function.
- */
- reloc->sym = find_symbol_containing(insn_sec,
- insn_off - 1);
- }
- if (!reloc->sym) {
- WARN("missing symbol for insn at offset 0x%lx\n",
- insn_off);
- return -1;
- }
-
- reloc->addend = insn_off - reloc->sym->offset;
+ insn_to_reloc_sym_addend(insn_sec, insn_off, reloc);
+ if (!reloc->sym) {
+ WARN("missing symbol for insn at offset 0x%lx",
+ insn_off);
+ return -1;
}
reloc->type = R_X86_64_PC32;
diff --git a/tools/testing/selftests/net/txtimestamp.c b/tools/testing/selftests/net/txtimestamp.c
index 490a8cca708a..fabb1d555ee5 100644
--- a/tools/testing/selftests/net/txtimestamp.c
+++ b/tools/testing/selftests/net/txtimestamp.c
@@ -26,6 +26,7 @@
#include <inttypes.h>
#include <linux/errqueue.h>
#include <linux/if_ether.h>
+#include <linux/if_packet.h>
#include <linux/ipv6.h>
#include <linux/net_tstamp.h>
#include <netdb.h>
@@ -34,7 +35,6 @@
#include <netinet/ip.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
-#include <netpacket/packet.h>
#include <poll.h>
#include <stdarg.h>
#include <stdbool.h>
@@ -495,12 +495,12 @@ static void do_test(int family, unsigned int report_opt)
total_len = cfg_payload_len;
if (cfg_use_pf_packet || cfg_proto == SOCK_RAW) {
total_len += sizeof(struct udphdr);
- if (cfg_use_pf_packet || cfg_ipproto == IPPROTO_RAW)
+ if (cfg_use_pf_packet || cfg_ipproto == IPPROTO_RAW) {
if (family == PF_INET)
total_len += sizeof(struct iphdr);
else
total_len += sizeof(struct ipv6hdr);
-
+ }
/* special case, only rawv6_sendmsg:
* pass proto in sin6_port if not connected
* also see ANK comment in net/ipv4/raw.c
diff --git a/tools/testing/selftests/netfilter/nft_meta.sh b/tools/testing/selftests/netfilter/nft_meta.sh
index 087f0e6e71ce..f33154c04d34 100755
--- a/tools/testing/selftests/netfilter/nft_meta.sh
+++ b/tools/testing/selftests/netfilter/nft_meta.sh
@@ -23,7 +23,7 @@ ip -net "$ns0" addr add 127.0.0.1 dev lo
trap cleanup EXIT
-currentyear=$(date +%G)
+currentyear=$(date +%Y)
lastyear=$((currentyear-1))
ip netns exec "$ns0" nft -f /dev/stdin <<EOF
table inet filter {