Re: Linux 5.15.105

From: Greg Kroah-Hartman
Date: Thu Mar 30 2023 - 07:16:47 EST


diff --git a/Makefile b/Makefile
index d3c808f4bf6b..915e9a34994f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 15
-SUBLEVEL = 104
+SUBLEVEL = 105
EXTRAVERSION =
NAME = Trick or Treat

diff --git a/arch/arm/boot/dts/e60k02.dtsi b/arch/arm/boot/dts/e60k02.dtsi
index cfb239d5186a..54b4de6a5925 100644
--- a/arch/arm/boot/dts/e60k02.dtsi
+++ b/arch/arm/boot/dts/e60k02.dtsi
@@ -302,6 +302,7 @@ &usdhc3 {

&usbotg1 {
pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg1>;
disable-over-current;
srp-disable;
hnp-disable;
diff --git a/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts b/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts
index a17b8bbbdb95..f2231cb1e32d 100644
--- a/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts
+++ b/arch/arm/boot/dts/imx6sl-tolino-shine2hd.dts
@@ -597,6 +597,7 @@ &usdhc3 {

&usbotg1 {
pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usbotg1>;
disable-over-current;
srp-disable;
hnp-disable;
diff --git a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
index 6357078185ed..0e8f0d7161ad 100644
--- a/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
+++ b/arch/arm64/boot/dts/freescale/imx8mm-nitrogen-r2.dts
@@ -247,7 +247,7 @@ wm8960: codec@1a {
compatible = "wlf,wm8960";
reg = <0x1a>;
clocks = <&clk IMX8MM_CLK_SAI1_ROOT>;
- clock-names = "mclk1";
+ clock-names = "mclk";
wlf,shared-lrclk;
#sound-dai-cells = <0>;
};
diff --git a/arch/arm64/boot/dts/freescale/imx8mn.dtsi b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
index 0c47ff242641..16a5efba17f3 100644
--- a/arch/arm64/boot/dts/freescale/imx8mn.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mn.dtsi
@@ -265,6 +265,7 @@ spba2: spba-bus@30000000 {
sai2: sai@30020000 {
compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x30020000 0x10000>;
+ #sound-dai-cells = <0>;
interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI2_IPG>,
<&clk IMX8MN_CLK_DUMMY>,
@@ -279,6 +280,7 @@ sai2: sai@30020000 {
sai3: sai@30030000 {
compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x30030000 0x10000>;
+ #sound-dai-cells = <0>;
interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI3_IPG>,
<&clk IMX8MN_CLK_DUMMY>,
@@ -293,6 +295,7 @@ sai3: sai@30030000 {
sai5: sai@30050000 {
compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x30050000 0x10000>;
+ #sound-dai-cells = <0>;
interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI5_IPG>,
<&clk IMX8MN_CLK_DUMMY>,
@@ -309,6 +312,7 @@ sai5: sai@30050000 {
sai6: sai@30060000 {
compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x30060000 0x10000>;
+ #sound-dai-cells = <0>;
interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI6_IPG>,
<&clk IMX8MN_CLK_DUMMY>,
@@ -366,6 +370,7 @@ spdif1: spdif@30090000 {
sai7: sai@300b0000 {
compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai";
reg = <0x300b0000 0x10000>;
+ #sound-dai-cells = <0>;
interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX8MN_CLK_SAI7_IPG>,
<&clk IMX8MN_CLK_DUMMY>,
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 59fc63feb0dc..6f647742a6ca 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -30,6 +30,7 @@
#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/kallsyms.h>
+#include <linux/extable.h>

#include <asm/setup.h>
#include <asm/fpu.h>
@@ -544,7 +545,8 @@ static inline void bus_error030 (struct frame *fp)
errorcode |= 2;

if (mmusr & (MMU_I | MMU_WP)) {
- if (ssw & 4) {
+ /* We might have an exception table for this PC */
+ if (ssw & 4 && !search_exception_tables(fp->ptregs.pc)) {
pr_err("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 1b21f332f58c..bf602e38962f 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -361,6 +361,28 @@ config RISCV_BASE_PMU

endmenu

+config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
+ def_bool y
+ # https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc
+ depends on AS_IS_GNU && AS_VERSION >= 23800
+ help
+ Newer binutils versions default to ISA spec version 20191213 which
+ moves some instructions from the I extension to the Zicsr and Zifencei
+ extensions.
+
+config TOOLCHAIN_NEEDS_OLD_ISA_SPEC
+ def_bool y
+ depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
+ # https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16
+ depends on CC_IS_CLANG && CLANG_VERSION < 170000
+ help
+ Certain versions of clang do not support zicsr and zifencei via -march
+ but newer versions of binutils require it for the reasons noted in the
+ help text of CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. This
+ option causes an older ISA spec compatible with these older versions
+ of clang to be passed to GAS, which has the same result as passing zicsr
+ and zifencei to -march.
+
config FPU
bool "FPU support"
default y
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index b3592be7fa60..0f17c6b6b729 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -59,10 +59,12 @@ riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c

-# Newer binutils versions default to ISA spec version 20191213 which moves some
-# instructions from the I extension to the Zicsr and Zifencei extensions.
-toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zicsr_zifencei)
-riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei
+ifdef CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC
+KBUILD_CFLAGS += -Wa,-misa-spec=2.2
+KBUILD_AFLAGS += -Wa,-misa-spec=2.2
+else
+riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei
+endif

KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
KBUILD_AFLAGS += -march=$(riscv-march-y)
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 801019381dea..a09196f8de68 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -12,6 +12,8 @@
#include <asm/errata_list.h>

#ifdef CONFIG_MMU
+extern unsigned long asid_mask;
+
static inline void local_flush_tlb_all(void)
{
__asm__ __volatile__ ("sfence.vma" : : : "memory");
diff --git a/arch/riscv/include/uapi/asm/setup.h b/arch/riscv/include/uapi/asm/setup.h
new file mode 100644
index 000000000000..66b13a522880
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/setup.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+
+#ifndef _UAPI_ASM_RISCV_SETUP_H
+#define _UAPI_ASM_RISCV_SETUP_H
+
+#define COMMAND_LINE_SIZE 1024
+
+#endif /* _UAPI_ASM_RISCV_SETUP_H */
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 68d7f5f407cd..64bfb4575f3e 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -22,7 +22,7 @@ DEFINE_STATIC_KEY_FALSE(use_asid_allocator);

static unsigned long asid_bits;
static unsigned long num_asids;
-static unsigned long asid_mask;
+unsigned long asid_mask;

static atomic_long_t current_version;

diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 64f8201237c2..39d18fc07b9c 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -43,7 +43,7 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
/* check if the tlbflush needs to be sent to other CPUs */
broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
if (static_branch_unlikely(&use_asid_allocator)) {
- unsigned long asid = atomic_long_read(&mm->context.id);
+ unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;

if (broadcast) {
riscv_cpuid_to_hartid_mask(cmask, &hmask);
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index aa92cc933889..6c7966e62775 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -50,6 +50,7 @@
#define SR_FD 0x00008000
#define SR_MD 0x40000000

+#define SR_USER_MASK 0x00000303 // M, Q, S, T bits
/*
* DSP structure and data
*/
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index dd3092911efa..dc13702003f0 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -115,6 +115,7 @@ static int
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p)
{
unsigned int err = 0;
+ unsigned int sr = regs->sr & ~SR_USER_MASK;

#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
COPY(regs[1]);
@@ -130,6 +131,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
COPY(sr); COPY(pc);
#undef COPY

+ regs->sr = (regs->sr & SR_USER_MASK) | sr;
+
#ifdef CONFIG_SH_FPU
if (boot_cpu_data.flags & CPU_HAS_FPU) {
int owned_fp;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 762b43f0d919..a067c7ce8e19 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1846,16 +1846,19 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)

cpumask_clear(&hv_vcpu->tlb_flush);

- vcpu_mask = all_cpus ? NULL :
- sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
- vp_bitmap, vcpu_bitmap);
-
/*
* vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
* analyze it here, flush TLB regardless of the specified address space.
*/
- kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
- NULL, vcpu_mask, &hv_vcpu->tlb_flush);
+ if (all_cpus) {
+ kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH_GUEST);
+ } else {
+ vcpu_mask = sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask,
+ vp_bitmap, vcpu_bitmap);
+
+ kvm_make_vcpus_request_mask(kvm, KVM_REQ_TLB_FLUSH_GUEST,
+ NULL, vcpu_mask, &hv_vcpu->tlb_flush);
+ }

ret_success:
/* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */
diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c
index 222b951ff56a..f1dd086d0b87 100644
--- a/drivers/acpi/x86/utils.c
+++ b/drivers/acpi/x86/utils.c
@@ -191,37 +191,26 @@ bool acpi_device_override_status(struct acpi_device *adev, unsigned long long *s
* a hardcoded allowlist for D3 support, which was used for these platforms.
*
* This allows quirking on Linux in a similar fashion.
+ *
+ * Cezanne systems shouldn't *normally* need this as the BIOS includes
+ * StorageD3Enable. But for two reasons we have added it.
+ * 1) The BIOS on a number of Dell systems have ambiguity
+ * between the same value used for _ADR on ACPI nodes GPP1.DEV0 and GPP1.NVME.
+ * GPP1.NVME is needed to get StorageD3Enable node set properly.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216440
+ * https://bugzilla.kernel.org/show_bug.cgi?id=216773
+ * https://bugzilla.kernel.org/show_bug.cgi?id=217003
+ * 2) On at least one HP system StorageD3Enable is missing on the second NVME
+ disk in the system.
*/
static const struct x86_cpu_id storage_d3_cpu_ids[] = {
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 96, NULL), /* Renoir */
X86_MATCH_VENDOR_FAM_MODEL(AMD, 23, 104, NULL), /* Lucienne */
- {}
-};
-
-static const struct dmi_system_id force_storage_d3_dmi[] = {
- {
- /*
- * _ADR is ambiguous between GPP1.DEV0 and GPP1.NVME
- * but .NVME is needed to get StorageD3Enable node
- * https://bugzilla.kernel.org/show_bug.cgi?id=216440
- */
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 14 7425 2-in-1"),
- }
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 16 5625"),
- }
- },
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 25, 80, NULL), /* Cezanne */
{}
};

bool force_storage_d3(void)
{
- const struct dmi_system_id *dmi_id = dmi_first_match(force_storage_d3_dmi);
-
- return dmi_id || x86_match_cpu(storage_d3_cpu_ids);
+ return x86_match_cpu(storage_d3_cpu_ids);
}
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 681cb3786794..49cb4537344a 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -2909,6 +2909,7 @@ close_card_oam(struct idt77252_dev *card)

recycle_rx_pool_skb(card, &vc->rcv.rx_pool);
}
+ kfree(vc);
}
}
}
@@ -2952,6 +2953,15 @@ open_card_ubr0(struct idt77252_dev *card)
return 0;
}

+static void
+close_card_ubr0(struct idt77252_dev *card)
+{
+ struct vc_map *vc = card->vcs[0];
+
+ free_scq(card, vc->scq);
+ kfree(vc);
+}
+
static int
idt77252_dev_open(struct idt77252_dev *card)
{
@@ -3001,6 +3011,7 @@ static void idt77252_dev_close(struct atm_dev *dev)
struct idt77252_dev *card = dev->dev_data;
u32 conf;

+ close_card_ubr0(card);
close_card_oam(card);

conf = SAR_CFG_RXPTH | /* enable receive path */
diff --git a/drivers/bluetooth/btqcomsmd.c b/drivers/bluetooth/btqcomsmd.c
index 2acb719e596f..11c7e04bf394 100644
--- a/drivers/bluetooth/btqcomsmd.c
+++ b/drivers/bluetooth/btqcomsmd.c
@@ -122,6 +122,21 @@ static int btqcomsmd_setup(struct hci_dev *hdev)
return 0;
}

+static int btqcomsmd_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ int ret;
+
+ ret = qca_set_bdaddr_rome(hdev, bdaddr);
+ if (ret)
+ return ret;
+
+ /* The firmware stops responding for a while after setting the bdaddr,
+ * causing timeouts for subsequent commands. Sleep a bit to avoid this.
+ */
+ usleep_range(1000, 10000);
+ return 0;
+}
+
static int btqcomsmd_probe(struct platform_device *pdev)
{
struct btqcomsmd *btq;
@@ -162,7 +177,7 @@ static int btqcomsmd_probe(struct platform_device *pdev)
hdev->close = btqcomsmd_close;
hdev->send = btqcomsmd_send;
hdev->setup = btqcomsmd_setup;
- hdev->set_bdaddr = qca_set_bdaddr_rome;
+ hdev->set_bdaddr = btqcomsmd_set_bdaddr;

ret = hci_register_dev(hdev);
if (ret < 0)
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 199e8f7d426d..7050a16e7efe 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -352,6 +352,7 @@ static void btsdio_remove(struct sdio_func *func)

BT_DBG("func %p", func);

+ cancel_work_sync(&data->work);
if (!data)
return;

diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c
index e09eb12bf421..ed9b83aee8bd 100644
--- a/drivers/firmware/arm_scmi/mailbox.c
+++ b/drivers/firmware/arm_scmi/mailbox.c
@@ -52,6 +52,39 @@ static bool mailbox_chan_available(struct device *dev, int idx)
"#mbox-cells", idx, NULL);
}

+static int mailbox_chan_validate(struct device *cdev)
+{
+ int num_mb, num_sh, ret = 0;
+ struct device_node *np = cdev->of_node;
+
+ num_mb = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
+ num_sh = of_count_phandle_with_args(np, "shmem", NULL);
+ /* Bail out if mboxes and shmem descriptors are inconsistent */
+ if (num_mb <= 0 || num_sh > 2 || num_mb != num_sh) {
+ dev_warn(cdev, "Invalid channel descriptor for '%s'\n",
+ of_node_full_name(np));
+ return -EINVAL;
+ }
+
+ if (num_sh > 1) {
+ struct device_node *np_tx, *np_rx;
+
+ np_tx = of_parse_phandle(np, "shmem", 0);
+ np_rx = of_parse_phandle(np, "shmem", 1);
+ /* SCMI Tx and Rx shared mem areas have to be distinct */
+ if (!np_tx || !np_rx || np_tx == np_rx) {
+ dev_warn(cdev, "Invalid shmem descriptor for '%s'\n",
+ of_node_full_name(np));
+ ret = -EINVAL;
+ }
+
+ of_node_put(np_tx);
+ of_node_put(np_rx);
+ }
+
+ return ret;
+}
+
static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
bool tx)
{
@@ -64,6 +97,10 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
resource_size_t size;
struct resource res;

+ ret = mailbox_chan_validate(cdev);
+ if (ret)
+ return ret;
+
smbox = devm_kzalloc(dev, sizeof(*smbox), GFP_KERNEL);
if (!smbox)
return -ENOMEM;
diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
index 6aa2bb5bbd5e..7ac757843dcf 100644
--- a/drivers/firmware/efi/sysfb_efi.c
+++ b/drivers/firmware/efi/sysfb_efi.c
@@ -343,7 +343,7 @@ static const struct fwnode_operations efifb_fwnode_ops = {
#ifdef CONFIG_EFI
static struct fwnode_handle efifb_fwnode;

-__init void sysfb_apply_efi_quirks(struct platform_device *pd)
+__init void sysfb_apply_efi_quirks(void)
{
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
!(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
@@ -357,7 +357,10 @@ __init void sysfb_apply_efi_quirks(struct platform_device *pd)
screen_info.lfb_height = temp;
screen_info.lfb_linelength = 4 * screen_info.lfb_width;
}
+}

+__init void sysfb_set_efifb_fwnode(struct platform_device *pd)
+{
if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && IS_ENABLED(CONFIG_PCI)) {
fwnode_init(&efifb_fwnode, &efifb_fwnode_ops);
pd->dev.fwnode = &efifb_fwnode;
diff --git a/drivers/firmware/sysfb.c b/drivers/firmware/sysfb.c
index 1f276f108cc9..abc3279c706d 100644
--- a/drivers/firmware/sysfb.c
+++ b/drivers/firmware/sysfb.c
@@ -81,6 +81,8 @@ static __init int sysfb_init(void)
if (disabled)
goto unlock_mutex;

+ sysfb_apply_efi_quirks();
+
/* try to create a simple-framebuffer device */
compatible = sysfb_parse_mode(si, &mode);
if (compatible) {
@@ -103,7 +105,7 @@ static __init int sysfb_init(void)
goto unlock_mutex;
}

- sysfb_apply_efi_quirks(pd);
+ sysfb_set_efifb_fwnode(pd);

ret = platform_device_add_data(pd, si, sizeof(*si));
if (ret)
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index eac51c2a27ba..fd4fa923088a 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -110,7 +110,7 @@ __init struct platform_device *sysfb_create_simplefb(const struct screen_info *s
if (!pd)
return ERR_PTR(-ENOMEM);

- sysfb_apply_efi_quirks(pd);
+ sysfb_set_efifb_fwnode(pd);

ret = platform_device_add_resources(pd, &res, 1);
if (ret)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 4f62f422bcb7..d90da384d185 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1286,6 +1286,7 @@ void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
+bool amdgpu_device_aspm_support_quirk(void);

void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index b5fe2c91f58c..2f51789d9818 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -75,6 +75,10 @@

#include <drm/drm_drv.h>

+#if IS_ENABLED(CONFIG_X86)
+#include <asm/intel-family.h>
+#endif
+
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
@@ -1337,6 +1341,17 @@ bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev)
return pcie_aspm_enabled(adev->pdev);
}

+bool amdgpu_device_aspm_support_quirk(void)
+{
+#if IS_ENABLED(CONFIG_X86)
+ struct cpuinfo_x86 *c = &cpu_data(0);
+
+ return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
+#else
+ return true;
+#endif
+}
+
/* if we get transitioned to only one device, take VGA back */
/**
* amdgpu_device_vga_set_decode - enable/disable vga decode
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 6e277236b44f..947e8c09493d 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -584,7 +584,7 @@ static void nv_pcie_gen3_enable(struct amdgpu_device *adev)

static void nv_program_aspm(struct amdgpu_device *adev)
{
- if (!amdgpu_device_should_use_aspm(adev))
+ if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
return;

if (!(adev->flags & AMD_IS_APU) &&
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 45f0188c4273..bb414e7cf024 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -81,10 +81,6 @@
#include "mxgpu_vi.h"
#include "amdgpu_dm.h"

-#if IS_ENABLED(CONFIG_X86)
-#include <asm/intel-family.h>
-#endif
-
#define ixPCIE_LC_L1_PM_SUBSTATE 0x100100C6
#define PCIE_LC_L1_PM_SUBSTATE__LC_L1_SUBSTATES_OVERRIDE_EN_MASK 0x00000001L
#define PCIE_LC_L1_PM_SUBSTATE__LC_PCI_PM_L1_2_OVERRIDE_MASK 0x00000002L
@@ -1138,24 +1134,13 @@ static void vi_enable_aspm(struct amdgpu_device *adev)
WREG32_PCIE(ixPCIE_LC_CNTL, data);
}

-static bool aspm_support_quirk_check(void)
-{
-#if IS_ENABLED(CONFIG_X86)
- struct cpuinfo_x86 *c = &cpu_data(0);
-
- return !(c->x86 == 6 && c->x86_model == INTEL_FAM6_ALDERLAKE);
-#else
- return true;
-#endif
-}
-
static void vi_program_aspm(struct amdgpu_device *adev)
{
u32 data, data1, orig;
bool bL1SS = false;
bool bClkReqSupport = true;

- if (!amdgpu_device_should_use_aspm(adev) || !aspm_support_quirk_check())
+ if (!amdgpu_device_should_use_aspm(adev) || !amdgpu_device_aspm_support_quirk())
return;

if (adev->flags & AMD_IS_APU ||
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index 82169b6bfca1..96808190250d 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -670,8 +670,8 @@ static int lt8912_parse_dt(struct lt8912 *lt)

lt->hdmi_port = of_drm_find_bridge(port_node);
if (!lt->hdmi_port) {
- dev_err(lt->dev, "%s: Failed to get hdmi port\n", __func__);
- ret = -ENODEV;
+ ret = -EPROBE_DEFER;
+ dev_err_probe(lt->dev, ret, "%s: Failed to get hdmi port\n", __func__);
goto err_free_host_node;
}

diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index c9b051ab18e0..566b34ba57e1 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -7824,6 +7824,7 @@ intel_crtc_prepare_cleared_state(struct intel_atomic_state *state,
* only fields that are know to not cause problems are preserved. */

saved_state->uapi = crtc_state->uapi;
+ saved_state->inherited = crtc_state->inherited;
saved_state->scaler_state = crtc_state->scaler_state;
saved_state->shared_dpll = crtc_state->shared_dpll;
saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 952e7177409b..b2a003127d31 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -709,12 +709,12 @@ int intel_gt_init(struct intel_gt *gt)
if (err)
goto err_gt;

- intel_uc_init_late(&gt->uc);
-
err = i915_inject_probe_error(gt->i915, -EIO);
if (err)
goto err_gt;

+ intel_uc_init_late(&gt->uc);
+
intel_migrate_init(&gt->migrate, gt);

goto out_fw;
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index 283c5091005e..2f640b9fdf4a 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -92,8 +92,7 @@ static void debug_active_init(struct i915_active *ref)
static void debug_active_activate(struct i915_active *ref)
{
lockdep_assert_held(&ref->tree_lock);
- if (!atomic_read(&ref->count)) /* before the first inc */
- debug_object_activate(ref, &active_debug_desc);
+ debug_object_activate(ref, &active_debug_desc);
}

static void debug_active_deactivate(struct i915_active *ref)
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 6e37de4fcb46..207b309a21c0 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -324,23 +324,23 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)

ret = meson_encoder_hdmi_init(priv);
if (ret)
- goto exit_afbcd;
+ goto unbind_all;

ret = meson_plane_create(priv);
if (ret)
- goto exit_afbcd;
+ goto unbind_all;

ret = meson_overlay_create(priv);
if (ret)
- goto exit_afbcd;
+ goto unbind_all;

ret = meson_crtc_create(priv);
if (ret)
- goto exit_afbcd;
+ goto unbind_all;

ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm);
if (ret)
- goto exit_afbcd;
+ goto unbind_all;

drm_mode_config_reset(drm);

@@ -358,6 +358,9 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)

uninstall_irq:
free_irq(priv->vsync_irq, drm);
+unbind_all:
+ if (has_components)
+ component_unbind_all(drm->dev, drm);
exit_afbcd:
if (priv->afbcd.ops)
priv->afbcd.ops->exit(priv);
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index 4611ec408506..2a81311b2217 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -450,7 +450,7 @@ static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
if (state->fb && cirrus->cpp != cirrus_cpp(state->fb))
cirrus_mode_set(cirrus, &crtc->mode, state->fb);

- if (drm_atomic_helper_damage_merged(old_state, state, &rect))
+ if (state->fb && drm_atomic_helper_damage_merged(old_state, state, &rect))
cirrus_fb_blit_rect(state->fb, &shadow_plane_state->data[0], &rect);
}

diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 172f20e88c6c..d902fe43cb81 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -1352,6 +1352,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
girq->parents = NULL;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_simple_irq;
+ girq->threaded = true;

ret = gpiochip_add_data(&dev->gc, dev);
if (ret < 0) {
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 45e0c7b1c9ec..6c942dd1abca 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -5,6 +5,7 @@
* Copyright (c) 2014-2016, Intel Corporation.
*/

+#include <linux/devm-helpers.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
@@ -621,7 +622,6 @@ static void recv_ipc(struct ishtp_device *dev, uint32_t doorbell_val)
case MNG_RESET_NOTIFY:
if (!ishtp_dev) {
ishtp_dev = dev;
- INIT_WORK(&fw_reset_work, fw_reset_work_fn);
}
schedule_work(&fw_reset_work);
break;
@@ -936,6 +936,7 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
{
struct ishtp_device *dev;
int i;
+ int ret;

dev = devm_kzalloc(&pdev->dev,
sizeof(struct ishtp_device) + sizeof(struct ish_hw),
@@ -971,6 +972,12 @@ struct ishtp_device *ish_dev_init(struct pci_dev *pdev)
list_add_tail(&tx_buf->link, &dev->wr_free_list);
}

+ ret = devm_work_autocancel(&pdev->dev, &fw_reset_work, fw_reset_work_fn);
+ if (ret) {
+ dev_err(dev->devc, "Failed to initialise FW reset work\n");
+ return NULL;
+ }
+
dev->ops = &ish_hw_ops;
dev->devc = &pdev->dev;
dev->mtu = IPC_PAYLOAD_SIZE - sizeof(struct ishtp_msg_hdr);
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 3ae961986fc3..fd3b277d340a 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -736,6 +736,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
{
struct hwmon_device *hwdev;
struct device *hdev;
+ struct device *tdev = dev;
int i, err, id;

/* Complain about invalid characters in hwmon name attribute */
@@ -793,7 +794,9 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
hwdev->name = name;
hdev->class = &hwmon_class;
hdev->parent = dev;
- hdev->of_node = dev ? dev->of_node : NULL;
+ while (tdev && !tdev->of_node)
+ tdev = tdev->parent;
+ hdev->of_node = tdev ? tdev->of_node : NULL;
hwdev->chip = chip;
dev_set_drvdata(hdev, drvdata);
dev_set_name(hdev, HWMON_ID_FORMAT, id);
@@ -805,7 +808,7 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,

INIT_LIST_HEAD(&hwdev->tzdata);

- if (dev && dev->of_node && chip && chip->ops->read &&
+ if (hdev->of_node && chip && chip->ops->read &&
chip->info[0]->type == hwmon_chip &&
(chip->info[0]->config[0] & HWMON_C_REGISTER_TZ)) {
err = hwmon_thermal_register_sensors(hdev);
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 1f93134afcb9..485d68ab79e1 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -486,6 +486,8 @@ static const struct it87_devices it87_devices[] = {
#define has_pwm_freq2(data) ((data)->features & FEAT_PWM_FREQ2)
#define has_six_temp(data) ((data)->features & FEAT_SIX_TEMP)
#define has_vin3_5v(data) ((data)->features & FEAT_VIN3_5V)
+#define has_scaling(data) ((data)->features & (FEAT_12MV_ADC | \
+ FEAT_10_9MV_ADC))

struct it87_sio_data {
int sioaddr;
@@ -3098,7 +3100,7 @@ static int it87_probe(struct platform_device *pdev)
"Detected broken BIOS defaults, disabling PWM interface\n");

/* Starting with IT8721F, we handle scaling of internal voltages */
- if (has_12mv_adc(data)) {
+ if (has_scaling(data)) {
if (sio_data->internal & BIT(0))
data->in_scaled |= BIT(3); /* in3 is AVCC */
if (sio_data->internal & BIT(1))
diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c
index acf394812061..72e43ecaff13 100644
--- a/drivers/i2c/busses/i2c-hisi.c
+++ b/drivers/i2c/busses/i2c-hisi.c
@@ -340,7 +340,11 @@ static irqreturn_t hisi_i2c_irq(int irq, void *context)
hisi_i2c_read_rx_fifo(ctlr);

out:
- if (int_stat & HISI_I2C_INT_TRANS_CPLT || ctlr->xfer_err) {
+ /*
+ * Only use TRANS_CPLT to indicate the completion. On error cases we'll
+ * get two interrupts, INT_ERR first then TRANS_CPLT.
+ */
+ if (int_stat & HISI_I2C_INT_TRANS_CPLT) {
hisi_i2c_disable_int(ctlr, HISI_I2C_INT_ALL);
hisi_i2c_clear_int(ctlr, HISI_I2C_INT_ALL);
complete(ctlr->completion);
diff --git a/drivers/i2c/busses/i2c-imx-lpi2c.c b/drivers/i2c/busses/i2c-imx-lpi2c.c
index 8b9ba055c418..2018dbcf241e 100644
--- a/drivers/i2c/busses/i2c-imx-lpi2c.c
+++ b/drivers/i2c/busses/i2c-imx-lpi2c.c
@@ -502,10 +502,14 @@ static int lpi2c_imx_xfer(struct i2c_adapter *adapter,
static irqreturn_t lpi2c_imx_isr(int irq, void *dev_id)
{
struct lpi2c_imx_struct *lpi2c_imx = dev_id;
+ unsigned int enabled;
unsigned int temp;

+ enabled = readl(lpi2c_imx->base + LPI2C_MIER);
+
lpi2c_imx_intctrl(lpi2c_imx, 0);
temp = readl(lpi2c_imx->base + LPI2C_MSR);
+ temp &= enabled;

if (temp & MSR_RDF)
lpi2c_imx_read_rxfifo(lpi2c_imx);
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index bba08cbce6e1..6c39881d9e0f 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -307,6 +307,9 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip,
u32 msg[3];
int rc;

+ if (writelen > I2C_SMBUS_BLOCK_MAX)
+ return -EINVAL;
+
memcpy(ctx->dma_buffer, data, writelen);
paddr = dma_map_single(ctx->dev, ctx->dma_buffer, writelen,
DMA_TO_DEVICE);
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
index c7af143980de..87edab1bf987 100644
--- a/drivers/interconnect/qcom/osm-l3.c
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -275,7 +275,7 @@ static int qcom_osm_l3_probe(struct platform_device *pdev)
qnodes = desc->nodes;
num_nodes = desc->num_nodes;

- data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ data = devm_kzalloc(&pdev->dev, struct_size(data, nodes, num_nodes), GFP_KERNEL);
if (!data)
return -ENOMEM;

diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index f30fd38c3773..a428770102a3 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -68,7 +68,9 @@ struct dm_crypt_io {
struct crypt_config *cc;
struct bio *base_bio;
u8 *integrity_metadata;
- bool integrity_metadata_from_pool;
+ bool integrity_metadata_from_pool:1;
+ bool in_tasklet:1;
+
struct work_struct work;
struct tasklet_struct tasklet;

@@ -1723,6 +1725,7 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
io->ctx.r.req = NULL;
io->integrity_metadata = NULL;
io->integrity_metadata_from_pool = false;
+ io->in_tasklet = false;
atomic_set(&io->io_pending, 0);
}

@@ -1768,14 +1771,13 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
* our tasklet. In this case we need to delay bio_endio()
* execution to after the tasklet is done and dequeued.
*/
- if (tasklet_trylock(&io->tasklet)) {
- tasklet_unlock(&io->tasklet);
- bio_endio(base_bio);
+ if (io->in_tasklet) {
+ INIT_WORK(&io->work, kcryptd_io_bio_endio);
+ queue_work(cc->io_queue, &io->work);
return;
}

- INIT_WORK(&io->work, kcryptd_io_bio_endio);
- queue_work(cc->io_queue, &io->work);
+ bio_endio(base_bio);
}

/*
@@ -1935,6 +1937,7 @@ static int dmcrypt_write(void *data)
io = crypt_io_from_node(rb_first(&write_tree));
rb_erase(&io->rb_node, &write_tree);
kcryptd_io_write(io);
+ cond_resched();
} while (!RB_EMPTY_ROOT(&write_tree));
blk_finish_plug(&plug);
}
@@ -2228,6 +2231,7 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io)
* it is being executed with irqs disabled.
*/
if (in_hardirq() || irqs_disabled()) {
+ io->in_tasklet = true;
tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work);
tasklet_schedule(&io->tasklet);
return;
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index a3f2050b9c9b..9f71d169587f 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -188,7 +188,7 @@ static int dm_stat_in_flight(struct dm_stat_shared *shared)
atomic_read(&shared->in_flight[WRITE]);
}

-void dm_stats_init(struct dm_stats *stats)
+int dm_stats_init(struct dm_stats *stats)
{
int cpu;
struct dm_stats_last_position *last;
@@ -197,11 +197,16 @@ void dm_stats_init(struct dm_stats *stats)
INIT_LIST_HEAD(&stats->list);
stats->precise_timestamps = false;
stats->last = alloc_percpu(struct dm_stats_last_position);
+ if (!stats->last)
+ return -ENOMEM;
+
for_each_possible_cpu(cpu) {
last = per_cpu_ptr(stats->last, cpu);
last->last_sector = (sector_t)ULLONG_MAX;
last->last_rw = UINT_MAX;
}
+
+ return 0;
}

void dm_stats_cleanup(struct dm_stats *stats)
diff --git a/drivers/md/dm-stats.h b/drivers/md/dm-stats.h
index 09c81a1ec057..ee32b099f1cf 100644
--- a/drivers/md/dm-stats.h
+++ b/drivers/md/dm-stats.h
@@ -21,7 +21,7 @@ struct dm_stats_aux {
unsigned long long duration_ns;
};

-void dm_stats_init(struct dm_stats *st);
+int dm_stats_init(struct dm_stats *st);
void dm_stats_cleanup(struct dm_stats *st);

struct mapped_device;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index f7124f257703..1cf652670a7f 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3383,6 +3383,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
pt->low_water_blocks = low_water_blocks;
pt->adjusted_pf = pt->requested_pf = pf;
ti->num_flush_bios = 1;
+ ti->limit_swap_bios = true;

/*
* Only need to enable discards if the pool should pass
@@ -4263,6 +4264,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;

ti->num_flush_bios = 1;
+ ti->limit_swap_bios = true;
ti->flush_supported = true;
ti->per_io_data_size = sizeof(struct dm_thin_endio_hook);

diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0bd2185d5194..8bc121d39447 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1818,7 +1818,9 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->pending_io)
goto bad;

- dm_stats_init(&md->stats);
+ r = dm_stats_init(&md->stats);
+ if (r < 0)
+ goto bad;

/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index ae4c79d39bc0..3388f620fac9 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -263,7 +263,7 @@ static int b53_mmap_probe_of(struct platform_device *pdev,
if (of_property_read_u32(of_port, "reg", &reg))
continue;

- if (reg < B53_CPU_PORT)
+ if (reg < B53_N_PORTS)
pdata->enabled_ports |= BIT(reg);
}

diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 793992c37855..dfea2ab0c297 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -391,6 +391,9 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid,
/* Set up switch core clock for MT7530 */
static void mt7530_pll_setup(struct mt7530_priv *priv)
{
+ /* Disable core clock */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
+
/* Disable PLL */
core_write(priv, CORE_GSWPLL_GRP1, 0);

@@ -404,14 +407,19 @@ static void mt7530_pll_setup(struct mt7530_priv *priv)
RG_GSWPLL_EN_PRE |
RG_GSWPLL_POSDIV_200M(2) |
RG_GSWPLL_FBKDIV_200M(32));
+
+ udelay(20);
+
+ /* Enable core clock */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_GSWCK_EN);
}

-/* Setup TX circuit including relevant PAD and driving */
+/* Setup port 6 interface mode and TRGMII TX circuit */
static int
mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
{
struct mt7530_priv *priv = ds->priv;
- u32 ncpo1, ssc_delta, trgint, i, xtal;
+ u32 ncpo1, ssc_delta, trgint, xtal;

xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;

@@ -428,6 +436,10 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
break;
case PHY_INTERFACE_MODE_TRGMII:
trgint = 1;
+ if (xtal == HWTRAP_XTAL_25MHZ)
+ ssc_delta = 0x57;
+ else
+ ssc_delta = 0x87;
if (priv->id == ID_MT7621) {
/* PLL frequency: 150MHz: 1.2GBit */
if (xtal == HWTRAP_XTAL_40MHZ)
@@ -447,23 +459,12 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
return -EINVAL;
}

- if (xtal == HWTRAP_XTAL_25MHZ)
- ssc_delta = 0x57;
- else
- ssc_delta = 0x87;
-
mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK,
P6_INTF_MODE(trgint));

if (trgint) {
- /* Lower Tx Driving for TRGMII path */
- for (i = 0 ; i < NUM_TRGMII_CTRL ; i++)
- mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
- TD_DM_DRVP(8) | TD_DM_DRVN(8));
-
- /* Disable MT7530 core and TRGMII Tx clocks */
- core_clear(priv, CORE_TRGMII_GSW_CLK_CG,
- REG_GSWCK_EN | REG_TRGMIICK_EN);
+ /* Disable the MT7530 TRGMII clocks */
+ core_clear(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);

/* Setup the MT7530 TRGMII Tx Clock */
core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1));
@@ -480,13 +481,8 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface)
RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) |
RG_LCDDS_PWDB | RG_LCDDS_ISO_EN);

- /* Enable MT7530 core and TRGMII Tx clocks */
- core_set(priv, CORE_TRGMII_GSW_CLK_CG,
- REG_GSWCK_EN | REG_TRGMIICK_EN);
- } else {
- for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
- mt7530_rmw(priv, MT7530_TRGMII_RD(i),
- RD_TAP_MASK, RD_TAP(16));
+ /* Enable the MT7530 TRGMII clocks */
+ core_set(priv, CORE_TRGMII_GSW_CLK_CG, REG_TRGMIICK_EN);
}

return 0;
@@ -2168,6 +2164,15 @@ mt7530_setup(struct dsa_switch *ds)

mt7530_pll_setup(priv);

+ /* Lower Tx driving for TRGMII path */
+ for (i = 0; i < NUM_TRGMII_CTRL; i++)
+ mt7530_write(priv, MT7530_TRGMII_TD_ODT(i),
+ TD_DM_DRVP(8) | TD_DM_DRVN(8));
+
+ for (i = 0; i < NUM_TRGMII_CTRL; i++)
+ mt7530_rmw(priv, MT7530_TRGMII_RD(i),
+ RD_TAP_MASK, RD_TAP(16));
+
/* Enable port 6 */
val = mt7530_read(priv, MT7530_MHWTRAP);
val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 878329ddcf8d..6a0663aadd1e 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -526,7 +526,10 @@ static int gve_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
struct gve_priv *priv = netdev_priv(netdev);
- int err = gve_adminq_report_link_speed(priv);
+ int err = 0;
+
+ if (priv->link_speed == 0)
+ err = gve_adminq_report_link_speed(priv);

cmd->base.speed = priv->link_speed;
return err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 8f5aad9bbba3..9787e794eeda 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -170,10 +170,10 @@ static char *i40e_create_dummy_packet(u8 *dummy_packet, bool ipv4, u8 l4proto,
struct i40e_fdir_filter *data)
{
bool is_vlan = !!data->vlan_tag;
- struct vlan_hdr vlan;
- struct ipv6hdr ipv6;
- struct ethhdr eth;
- struct iphdr ip;
+ struct vlan_hdr vlan = {};
+ struct ipv6hdr ipv6 = {};
+ struct ethhdr eth = {};
+ struct iphdr ip = {};
u8 *tmp;

if (ipv4) {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index e9cc7f6ddc46..c423e73c2d02 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -661,7 +661,7 @@ struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = {
/* Non Tunneled IPv6 */
IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
IAVF_PTT_UNUSED_ENTRY(91),
IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 82c4f1190e41..f5e6ae2c683f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -4213,6 +4213,11 @@ static void iavf_remove(struct pci_dev *pdev)
mutex_unlock(&adapter->crit_lock);
break;
}
+ /* Simply return if we already went through iavf_shutdown */
+ if (adapter->state == __IAVF_REMOVE) {
+ mutex_unlock(&adapter->crit_lock);
+ return;
+ }

mutex_unlock(&adapter->crit_lock);
usleep_range(500, 1000);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index e76e3df3e2d9..643dbe5bf997 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -1061,7 +1061,7 @@ static inline void iavf_rx_hash(struct iavf_ring *ring,
cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);

- if (ring->netdev->features & NETIF_F_RXHASH)
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
return;

if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index b246ff8b7c20..bff9649d8abd 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3820,9 +3820,7 @@ static void igb_remove(struct pci_dev *pdev)
igb_release_hw_control(adapter);

#ifdef CONFIG_PCI_IOV
- rtnl_lock();
igb_disable_sriov(pdev);
- rtnl_unlock();
#endif

unregister_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index d051918dfdff..ebd6d464fa0c 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1074,7 +1074,7 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
igbvf_intr_msix_rx, 0, adapter->rx_ring->name,
netdev);
if (err)
- goto out;
+ goto free_irq_tx;

adapter->rx_ring->itr_register = E1000_EITR(vector);
adapter->rx_ring->itr_val = adapter->current_itr;
@@ -1083,10 +1083,14 @@ static int igbvf_request_msix(struct igbvf_adapter *adapter)
err = request_irq(adapter->msix_entries[vector].vector,
igbvf_msix_other, 0, netdev->name, netdev);
if (err)
- goto out;
+ goto free_irq_rx;

igbvf_configure_msix(adapter);
return 0;
+free_irq_rx:
+ free_irq(adapter->msix_entries[--vector].vector, netdev);
+free_irq_tx:
+ free_irq(adapter->msix_entries[--vector].vector, netdev);
out:
return err;
}
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index b8ba3f94c363..a47a2e3e548c 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2009 - 2018 Intel Corporation. */

+#include <linux/etherdevice.h>
+
#include "vf.h"

static s32 e1000_check_for_link_vf(struct e1000_hw *hw);
@@ -131,11 +133,16 @@ static s32 e1000_reset_hw_vf(struct e1000_hw *hw)
/* set our "perm_addr" based on info provided by PF */
ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
if (!ret_val) {
- if (msgbuf[0] == (E1000_VF_RESET |
- E1000_VT_MSGTYPE_ACK))
+ switch (msgbuf[0]) {
+ case E1000_VF_RESET | E1000_VT_MSGTYPE_ACK:
memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
- else
+ break;
+ case E1000_VF_RESET | E1000_VT_MSGTYPE_NACK:
+ eth_zero_addr(hw->mac.perm_addr);
+ break;
+ default:
ret_val = -E1000_ERR_MAC_INIT;
+ }
}
}

diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index bde3fea2c442..e255b0a004f8 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -5951,18 +5951,18 @@ static bool validate_schedule(struct igc_adapter *adapter,
if (e->command != TC_TAPRIO_CMD_SET_GATES)
return false;

- for (i = 0; i < adapter->num_tx_queues; i++) {
- if (e->gate_mask & BIT(i))
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (e->gate_mask & BIT(i)) {
queue_uses[i]++;

- /* There are limitations: A single queue cannot be
- * opened and closed multiple times per cycle unless the
- * gate stays open. Check for it.
- */
- if (queue_uses[i] > 1 &&
- !(prev->gate_mask & BIT(i)))
- return false;
- }
+ /* There are limitations: A single queue cannot
+ * be opened and closed multiple times per cycle
+ * unless the gate stays open. Check for it.
+ */
+ if (queue_uses[i] > 1 &&
+ !(prev->gate_mask & BIT(i)))
+ return false;
+ }
}

return true;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 03b4ec630432..9822db362c88 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -704,6 +704,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
err_detach_rsrc:
+ free_percpu(vf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
otx2_detach_resources(&vf->mbox);
@@ -738,6 +739,7 @@ static void otx2vf_remove(struct pci_dev *pdev)
destroy_workqueue(vf->otx2_wq);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
+ free_percpu(vf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 72e08559e0d0..f2862100d1a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -117,12 +117,14 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
if (!MLX5_CAP_GEN(priv->mdev, ets))
return -EOPNOTSUPP;

- ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
- for (i = 0; i < ets->ets_cap; i++) {
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
if (err)
return err;
+ }

+ ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
+ for (i = 0; i < ets->ets_cap; i++) {
err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index f1dd966e2bdb..ec1c667bd145 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -3527,8 +3527,12 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_warn(netdev, "Disabling rxhash, not supported when CQE compress is active\n");
}

- if (mlx5e_is_uplink_rep(priv))
+ if (mlx5e_is_uplink_rep(priv)) {
features = mlx5e_fix_uplink_rep_features(netdev, features);
+ features |= NETIF_F_NETNS_LOCAL;
+ } else {
+ features &= ~NETIF_F_NETNS_LOCAL;
+ }

mutex_unlock(&priv->state_lock);

diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
index 39e948bc1204..34a6542c03f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
@@ -301,8 +301,7 @@ int mlx5_esw_acl_ingress_vport_bond_update(struct mlx5_eswitch *esw, u16 vport_n

if (WARN_ON_ONCE(IS_ERR(vport))) {
esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num);
- err = PTR_ERR(vport);
- goto out;
+ return PTR_ERR(vport);
}

esw_acl_ingress_ofld_rules_destroy(esw, vport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 2b9278002354..7315bf447e06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -918,6 +918,7 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
+ esw_apply_vport_rx_mode(esw, vport, false, false);
esw_vport_cleanup(esw, vport);
esw->enabled_vports--;

diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index d17d1b4f2585..825356ee3492 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -292,7 +292,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
*/

laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
- if (!laddr) {
+ if (dma_mapping_error(lp->device, laddr)) {
pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -509,7 +509,7 @@ static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp,

*new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE),
SONIC_RBSIZE, DMA_FROM_DEVICE);
- if (!*new_addr) {
+ if (dma_mapping_error(lp->device, *new_addr)) {
dev_kfree_skb(*new_skb);
*new_skb = NULL;
return false;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 3eb05376e7c3..bf0ba3855da1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -4378,6 +4378,9 @@ qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
}

vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
+ if (!vf)
+ return -EINVAL;
+
vport_id = vf->vport_id;

return qed_configure_vport_wfq(cdev, vport_id, rate);
@@ -5124,7 +5127,7 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)

/* Validate that the VF has a configured vport */
vf = qed_iov_get_vf_info(hwfn, i, true);
- if (!vf->vport_instance)
+ if (!vf || !vf->vport_instance)
continue;

memset(&params, 0, sizeof(params));
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 9015a38eaced..bb7f3286824f 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -728,9 +728,15 @@ static int emac_remove(struct platform_device *pdev)
struct net_device *netdev = dev_get_drvdata(&pdev->dev);
struct emac_adapter *adpt = netdev_priv(netdev);

+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
unregister_netdev(netdev);
netif_napi_del(&adpt->rx_q.napi);

+ free_irq(adpt->irq.irq, &adpt->irq);
+ cancel_work_sync(&adpt->work_thread);
+
emac_clks_teardown(adpt);

put_device(&adpt->phydev->mdio.dev);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 55e652624bd7..78e484ea279b 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -317,15 +317,17 @@ static int gelic_card_init_chain(struct gelic_card *card,

/* set up the hardware pointers in each descriptor */
for (i = 0; i < no; i++, descr++) {
+ dma_addr_t cpu_addr;
+
gelic_descr_set_status(descr, GELIC_DESCR_DMA_NOT_IN_USE);
- descr->bus_addr =
- dma_map_single(ctodev(card), descr,
- GELIC_DESCR_SIZE,
- DMA_BIDIRECTIONAL);

- if (!descr->bus_addr)
+ cpu_addr = dma_map_single(ctodev(card), descr,
+ GELIC_DESCR_SIZE, DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(ctodev(card), cpu_addr))
goto iommu_error;

+ descr->bus_addr = cpu_to_be32(cpu_addr);
descr->next = descr + 1;
descr->prev = descr - 1;
}
@@ -365,26 +367,28 @@ static int gelic_card_init_chain(struct gelic_card *card,
*
* allocates a new rx skb, iommu-maps it and attaches it to the descriptor.
* Activate the descriptor state-wise
+ *
+ * Gelic RX sk_buffs must be aligned to GELIC_NET_RXBUF_ALIGN and the length
+ * must be a multiple of GELIC_NET_RXBUF_ALIGN.
*/
static int gelic_descr_prepare_rx(struct gelic_card *card,
struct gelic_descr *descr)
{
+ static const unsigned int rx_skb_size =
+ ALIGN(GELIC_NET_MAX_FRAME, GELIC_NET_RXBUF_ALIGN) +
+ GELIC_NET_RXBUF_ALIGN - 1;
+ dma_addr_t cpu_addr;
int offset;
- unsigned int bufsize;

if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
dev_info(ctodev(card), "%s: ERROR status\n", __func__);
- /* we need to round up the buffer size to a multiple of 128 */
- bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);

- /* and we need to have it 128 byte aligned, therefore we allocate a
- * bit more */
- descr->skb = dev_alloc_skb(bufsize + GELIC_NET_RXBUF_ALIGN - 1);
+ descr->skb = netdev_alloc_skb(*card->netdev, rx_skb_size);
if (!descr->skb) {
descr->buf_addr = 0; /* tell DMAC don't touch memory */
return -ENOMEM;
}
- descr->buf_size = cpu_to_be32(bufsize);
+ descr->buf_size = cpu_to_be32(rx_skb_size);
descr->dmac_cmd_status = 0;
descr->result_size = 0;
descr->valid_size = 0;
@@ -395,11 +399,10 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
if (offset)
skb_reserve(descr->skb, GELIC_NET_RXBUF_ALIGN - offset);
/* io-mmu-map the skb */
- descr->buf_addr = cpu_to_be32(dma_map_single(ctodev(card),
- descr->skb->data,
- GELIC_NET_MAX_MTU,
- DMA_FROM_DEVICE));
- if (!descr->buf_addr) {
+ cpu_addr = dma_map_single(ctodev(card), descr->skb->data,
+ GELIC_NET_MAX_FRAME, DMA_FROM_DEVICE);
+ descr->buf_addr = cpu_to_be32(cpu_addr);
+ if (dma_mapping_error(ctodev(card), cpu_addr)) {
dev_kfree_skb_any(descr->skb);
descr->skb = NULL;
dev_info(ctodev(card),
@@ -779,7 +782,7 @@ static int gelic_descr_prepare_tx(struct gelic_card *card,

buf = dma_map_single(ctodev(card), skb->data, skb->len, DMA_TO_DEVICE);

- if (!buf) {
+ if (dma_mapping_error(ctodev(card), buf)) {
dev_err(ctodev(card),
"dma map 2 failed (%p, %i). Dropping packet\n",
skb->data, skb->len);
@@ -915,7 +918,7 @@ static void gelic_net_pass_skb_up(struct gelic_descr *descr,
data_error = be32_to_cpu(descr->data_error);
/* unmap skb buffer */
dma_unmap_single(ctodev(card), be32_to_cpu(descr->buf_addr),
- GELIC_NET_MAX_MTU,
+ GELIC_NET_MAX_FRAME,
DMA_FROM_DEVICE);

skb_put(skb, be32_to_cpu(descr->valid_size)?
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index 68f324ed4eaf..0d98defb011e 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -19,8 +19,9 @@
#define GELIC_NET_RX_DESCRIPTORS 128 /* num of descriptors */
#define GELIC_NET_TX_DESCRIPTORS 128 /* num of descriptors */

-#define GELIC_NET_MAX_MTU VLAN_ETH_FRAME_LEN
-#define GELIC_NET_MIN_MTU VLAN_ETH_ZLEN
+#define GELIC_NET_MAX_FRAME 2312
+#define GELIC_NET_MAX_MTU 2294
+#define GELIC_NET_MIN_MTU 64
#define GELIC_NET_RXBUF_ALIGN 128
#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index ae611e46da6a..f8bbd1489af1 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -503,6 +503,11 @@ static void
xirc2ps_detach(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ struct local_info *local = netdev_priv(dev);
+
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+ cancel_work_sync(&local->tx_timeout_task);

dev_dbg(&link->dev, "detach\n");

diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 0362917fce7a..e2322bc3a4e9 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -1956,6 +1956,8 @@ static int ca8210_skb_tx(
* packet
*/
mac_len = ieee802154_hdr_peek_addrs(skb, &header);
+ if (mac_len < 0)
+ return mac_len;

secspec.security_level = header.sec.level;
secspec.key_id_mode = header.sec.key_id_mode;
diff --git a/drivers/net/mdio/acpi_mdio.c b/drivers/net/mdio/acpi_mdio.c
index d77c987fda9c..4630dde01974 100644
--- a/drivers/net/mdio/acpi_mdio.c
+++ b/drivers/net/mdio/acpi_mdio.c
@@ -18,16 +18,18 @@ MODULE_AUTHOR("Calvin Johnson <calvin.johnson@xxxxxxxxxxx>");
MODULE_LICENSE("GPL");

/**
- * acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
+ * __acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL.
* @mdio: pointer to mii_bus structure
* @fwnode: pointer to fwnode of MDIO bus. This fwnode is expected to represent
+ * @owner: module owning this @mdio object.
* an ACPI device object corresponding to the MDIO bus and its children are
* expected to correspond to the PHY devices on that bus.
*
* This function registers the mii_bus structure and registers a phy_device
* for each child node of @fwnode.
*/
-int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
+int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode,
+ struct module *owner)
{
struct fwnode_handle *child;
u32 addr;
@@ -35,7 +37,7 @@ int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)

/* Mask out all PHYs from auto probing. */
mdio->phy_mask = GENMASK(31, 0);
- ret = mdiobus_register(mdio);
+ ret = __mdiobus_register(mdio, owner);
if (ret)
return ret;

@@ -55,4 +57,4 @@ int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
}
return 0;
}
-EXPORT_SYMBOL(acpi_mdiobus_register);
+EXPORT_SYMBOL(__acpi_mdiobus_register);
diff --git a/drivers/net/mdio/mdio-thunder.c b/drivers/net/mdio/mdio-thunder.c
index 822d2cdd2f35..394b864aaa37 100644
--- a/drivers/net/mdio/mdio-thunder.c
+++ b/drivers/net/mdio/mdio-thunder.c
@@ -104,6 +104,7 @@ static int thunder_mdiobus_pci_probe(struct pci_dev *pdev,
if (i >= ARRAY_SIZE(nexus->buses))
break;
}
+ fwnode_handle_put(fwn);
return 0;

err_release_regions:
diff --git a/drivers/net/mdio/of_mdio.c b/drivers/net/mdio/of_mdio.c
index 510822d6d0d9..1e46e39f5f46 100644
--- a/drivers/net/mdio/of_mdio.c
+++ b/drivers/net/mdio/of_mdio.c
@@ -139,21 +139,23 @@ bool of_mdiobus_child_is_phy(struct device_node *child)
EXPORT_SYMBOL(of_mdiobus_child_is_phy);

/**
- * of_mdiobus_register - Register mii_bus and create PHYs from the device tree
+ * __of_mdiobus_register - Register mii_bus and create PHYs from the device tree
* @mdio: pointer to mii_bus structure
* @np: pointer to device_node of MDIO bus.
+ * @owner: module owning the @mdio object.
*
* This function registers the mii_bus structure and registers a phy_device
* for each child node of @np.
*/
-int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
+int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
+ struct module *owner)
{
struct device_node *child;
bool scanphys = false;
int addr, rc;

if (!np)
- return mdiobus_register(mdio);
+ return __mdiobus_register(mdio, owner);

/* Do not continue if the node is disabled */
if (!of_device_is_available(np))
@@ -172,7 +174,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
of_property_read_u32(np, "reset-post-delay-us", &mdio->reset_post_delay_us);

/* Register the MDIO bus */
- rc = mdiobus_register(mdio);
+ rc = __mdiobus_register(mdio, owner);
if (rc)
return rc;

@@ -236,7 +238,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
mdiobus_unregister(mdio);
return rc;
}
-EXPORT_SYMBOL(of_mdiobus_register);
+EXPORT_SYMBOL(__of_mdiobus_register);

/**
* of_mdio_find_device - Given a device tree node, find the mdio_device
diff --git a/drivers/net/phy/mdio_devres.c b/drivers/net/phy/mdio_devres.c
index b560e99695df..69b829e6ab35 100644
--- a/drivers/net/phy/mdio_devres.c
+++ b/drivers/net/phy/mdio_devres.c
@@ -98,13 +98,14 @@ EXPORT_SYMBOL(__devm_mdiobus_register);

#if IS_ENABLED(CONFIG_OF_MDIO)
/**
- * devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register()
+ * __devm_of_mdiobus_register - Resource managed variant of of_mdiobus_register()
* @dev: Device to register mii_bus for
* @mdio: MII bus structure to register
* @np: Device node to parse
+ * @owner: Owning module
*/
-int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
- struct device_node *np)
+int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+ struct device_node *np, struct module *owner)
{
struct mdiobus_devres *dr;
int ret;
@@ -117,7 +118,7 @@ int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
if (!dr)
return -ENOMEM;

- ret = of_mdiobus_register(mdio, np);
+ ret = __of_mdiobus_register(mdio, np, owner);
if (ret) {
devres_free(dr);
return ret;
@@ -127,7 +128,7 @@ int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
devres_add(dev, dr);
return 0;
}
-EXPORT_SYMBOL(devm_of_mdiobus_register);
+EXPORT_SYMBOL(__devm_of_mdiobus_register);
#endif /* CONFIG_OF_MDIO */

MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 2fc851082e7b..1135e63a4a76 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -57,6 +57,18 @@ static const char *phy_state_to_str(enum phy_state st)
return NULL;
}

+static void phy_process_state_change(struct phy_device *phydev,
+ enum phy_state old_state)
+{
+ if (old_state != phydev->state) {
+ phydev_dbg(phydev, "PHY state change %s -> %s\n",
+ phy_state_to_str(old_state),
+ phy_state_to_str(phydev->state));
+ if (phydev->drv && phydev->drv->link_change_notify)
+ phydev->drv->link_change_notify(phydev);
+ }
+}
+
static void phy_link_up(struct phy_device *phydev)
{
phydev->phy_link_change(phydev, true);
@@ -1061,6 +1073,7 @@ EXPORT_SYMBOL(phy_free_interrupt);
void phy_stop(struct phy_device *phydev)
{
struct net_device *dev = phydev->attached_dev;
+ enum phy_state old_state;

if (!phy_is_started(phydev) && phydev->state != PHY_DOWN) {
WARN(1, "called from state %s\n",
@@ -1069,6 +1082,7 @@ void phy_stop(struct phy_device *phydev)
}

mutex_lock(&phydev->lock);
+ old_state = phydev->state;

if (phydev->state == PHY_CABLETEST) {
phy_abort_cable_test(phydev);
@@ -1079,6 +1093,7 @@ void phy_stop(struct phy_device *phydev)
sfp_upstream_stop(phydev->sfp_bus);

phydev->state = PHY_HALTED;
+ phy_process_state_change(phydev, old_state);

mutex_unlock(&phydev->lock);

@@ -1196,13 +1211,7 @@ void phy_state_machine(struct work_struct *work)
if (err < 0)
phy_error(phydev);

- if (old_state != phydev->state) {
- phydev_dbg(phydev, "PHY state change %s -> %s\n",
- phy_state_to_str(old_state),
- phy_state_to_str(phydev->state));
- if (phydev->drv && phydev->drv->link_change_notify)
- phydev->drv->link_change_notify(phydev);
- }
+ phy_process_state_change(phydev, old_state);

/* Only re-schedule a PHY state machine change if we are polling the
* PHY, if PHY_MAC_INTERRUPT is set, then we will be moving
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index c0b8b4aa78f3..a3ccf0cee093 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -664,6 +664,11 @@ static const struct usb_device_id mbim_devs[] = {
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},

+ /* Telit FE990 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1081, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+ },
+
/* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 7b358b896a6d..8646c4d90361 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1358,6 +1358,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1060, 2)}, /* Telit LN920 */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1070, 2)}, /* Telit FN990 */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1080, 2)}, /* Telit FE990 */
{QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
{QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */
{QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 7cf9206638c3..649d9f9af6e6 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1808,6 +1808,12 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
size = (u16)((header & RX_STS_FL_) >> 16);
align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;

+ if (unlikely(size > skb->len)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "size err header=0x%08x\n", header);
+ return 0;
+ }
+
if (unlikely(header & RX_STS_ES_)) {
netif_dbg(dev, rx_err, dev->net,
"Error header=0x%08x\n", header);
diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
index 0de7c255254e..d6de5a294128 100644
--- a/drivers/platform/chrome/cros_ec_chardev.c
+++ b/drivers/platform/chrome/cros_ec_chardev.c
@@ -284,7 +284,7 @@ static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
u_cmd.insize > EC_MAX_MSG_BYTES)
return -EINVAL;

- s_cmd = kmalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
+ s_cmd = kzalloc(sizeof(*s_cmd) + max(u_cmd.outsize, u_cmd.insize),
GFP_KERNEL);
if (!s_cmd)
return -ENOMEM;
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index 16c4876fe5af..ebb5ba7f8bb6 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -446,11 +446,9 @@ static ssize_t bq24190_sysfs_show(struct device *dev,
if (!info)
return -EINVAL;

- ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bdi->dev);
+ ret = pm_runtime_resume_and_get(bdi->dev);
+ if (ret < 0)
return ret;
- }

ret = bq24190_read_mask(bdi, info->reg, info->mask, info->shift, &v);
if (ret)
@@ -481,11 +479,9 @@ static ssize_t bq24190_sysfs_store(struct device *dev,
if (ret < 0)
return ret;

- ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bdi->dev);
+ ret = pm_runtime_resume_and_get(bdi->dev);
+ if (ret < 0)
return ret;
- }

ret = bq24190_write_mask(bdi, info->reg, info->mask, info->shift, v);
if (ret)
@@ -504,10 +500,9 @@ static int bq24190_set_charge_mode(struct regulator_dev *dev, u8 val)
struct bq24190_dev_info *bdi = rdev_get_drvdata(dev);
int ret;

- ret = pm_runtime_get_sync(bdi->dev);
+ ret = pm_runtime_resume_and_get(bdi->dev);
if (ret < 0) {
dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", ret);
- pm_runtime_put_noidle(bdi->dev);
return ret;
}

@@ -537,10 +532,9 @@ static int bq24190_vbus_is_enabled(struct regulator_dev *dev)
int ret;
u8 val;

- ret = pm_runtime_get_sync(bdi->dev);
+ ret = pm_runtime_resume_and_get(bdi->dev);
if (ret < 0) {
dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", ret);
- pm_runtime_put_noidle(bdi->dev);
return ret;
}

@@ -1081,11 +1075,9 @@ static int bq24190_charger_get_property(struct power_supply *psy,

dev_dbg(bdi->dev, "prop: %d\n", psp);

- ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bdi->dev);
+ ret = pm_runtime_resume_and_get(bdi->dev);
+ if (ret < 0)
return ret;
- }

switch (psp) {
case POWER_SUPPLY_PROP_CHARGE_TYPE:
@@ -1155,11 +1147,9 @@ static int bq24190_charger_set_property(struct power_supply *psy,

dev_dbg(bdi->dev, "prop: %d\n", psp);

- ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bdi->dev);
+ ret = pm_runtime_resume_and_get(bdi->dev);
+ if (ret < 0)
return ret;
- }

switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
@@ -1418,11 +1408,9 @@ static int bq24190_battery_get_property(struct power_supply *psy,
dev_warn(bdi->dev, "warning: /sys/class/power_supply/bq24190-battery is deprecated\n");
dev_dbg(bdi->dev, "prop: %d\n", psp);

- ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bdi->dev);
+ ret = pm_runtime_resume_and_get(bdi->dev);
+ if (ret < 0)
return ret;
- }

switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -1466,11 +1454,9 @@ static int bq24190_battery_set_property(struct power_supply *psy,
dev_warn(bdi->dev, "warning: /sys/class/power_supply/bq24190-battery is deprecated\n");
dev_dbg(bdi->dev, "prop: %d\n", psp);

- ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(bdi->dev);
+ ret = pm_runtime_resume_and_get(bdi->dev);
+ if (ret < 0)
return ret;
- }

switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
@@ -1624,10 +1610,9 @@ static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
int error;

bdi->irq_event = true;
- error = pm_runtime_get_sync(bdi->dev);
+ error = pm_runtime_resume_and_get(bdi->dev);
if (error < 0) {
dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
- pm_runtime_put_noidle(bdi->dev);
return IRQ_NONE;
}
bq24190_check_status(bdi);
@@ -1847,11 +1832,10 @@ static int bq24190_remove(struct i2c_client *client)
struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
int error;

- error = pm_runtime_get_sync(bdi->dev);
- if (error < 0) {
+ cancel_delayed_work_sync(&bdi->input_current_limit_work);
+ error = pm_runtime_resume_and_get(bdi->dev);
+ if (error < 0)
dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
- pm_runtime_put_noidle(bdi->dev);
- }

bq24190_register_reset(bdi);
if (bdi->battery)
@@ -1900,11 +1884,9 @@ static __maybe_unused int bq24190_pm_suspend(struct device *dev)
struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
int error;

- error = pm_runtime_get_sync(bdi->dev);
- if (error < 0) {
+ error = pm_runtime_resume_and_get(bdi->dev);
+ if (error < 0)
dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
- pm_runtime_put_noidle(bdi->dev);
- }

bq24190_register_reset(bdi);

@@ -1925,11 +1907,9 @@ static __maybe_unused int bq24190_pm_resume(struct device *dev)
bdi->f_reg = 0;
bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */

- error = pm_runtime_get_sync(bdi->dev);
- if (error < 0) {
+ error = pm_runtime_resume_and_get(bdi->dev);
+ if (error < 0)
dev_warn(bdi->dev, "pm_runtime_get failed: %i\n", error);
- pm_runtime_put_noidle(bdi->dev);
- }

bq24190_register_reset(bdi);
bq24190_set_config(bdi);
diff --git a/drivers/power/supply/da9150-charger.c b/drivers/power/supply/da9150-charger.c
index f9314cc0cd75..6b987da58655 100644
--- a/drivers/power/supply/da9150-charger.c
+++ b/drivers/power/supply/da9150-charger.c
@@ -662,6 +662,7 @@ static int da9150_charger_remove(struct platform_device *pdev)

if (!IS_ERR_OR_NULL(charger->usb_phy))
usb_unregister_notifier(charger->usb_phy, &charger->otg_nb);
+ cancel_work_sync(&charger->otg_work);

power_supply_unregister(charger->battery);
power_supply_unregister(charger->usb);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 1d9be771f3ee..a9c4a5e2ccb9 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -1117,10 +1117,12 @@ static int alua_activate(struct scsi_device *sdev,
rcu_read_unlock();
mutex_unlock(&h->init_mutex);

- if (alua_rtpg_queue(pg, sdev, qdata, true))
+ if (alua_rtpg_queue(pg, sdev, qdata, true)) {
fn = NULL;
- else
+ } else {
+ kfree(qdata);
err = SCSI_DH_DEV_OFFLINED;
+ }
kref_put(&pg->kref, release_port_group);
out:
if (fn)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index fa22cb712be5..9515ab66a778 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -2424,8 +2424,7 @@ static int interrupt_preinit_v3_hw(struct hisi_hba *hisi_hba)
hisi_hba->cq_nvecs = vectors - BASE_VECTORS_V3_HW;
shost->nr_hw_queues = hisi_hba->cq_nvecs;

- devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
- return 0;
+ return devm_add_action(&pdev->dev, hisi_sas_v3_free_vectors, pdev);
}

static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba)
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 855817f6fe67..f79299f6178c 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -7056,6 +7056,8 @@ lpfc_sli4_cgn_params_read(struct lpfc_hba *phba)
/* Find out if the FW has a new set of congestion parameters. */
len = sizeof(struct lpfc_cgn_param);
pdata = kzalloc(len, GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
ret = lpfc_read_object(phba, (char *)LPFC_PORT_CFG_NAME,
pdata, len);

diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 7d333167047f..30bc72324f06 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -22166,20 +22166,20 @@ lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
static struct lpfc_io_buf *
lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
{
- struct lpfc_io_buf *lpfc_ncmd;
+ struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
struct lpfc_io_buf *lpfc_ncmd_next;
unsigned long iflag;
struct lpfc_epd_pool *epd_pool;

epd_pool = &phba->epd_pool;
- lpfc_ncmd = NULL;

spin_lock_irqsave(&epd_pool->lock, iflag);
if (epd_pool->count > 0) {
- list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
+ list_for_each_entry_safe(iter, lpfc_ncmd_next,
&epd_pool->list, list) {
- list_del(&lpfc_ncmd->list);
+ list_del(&iter->list);
epd_pool->count--;
+ lpfc_ncmd = iter;
break;
}
}
@@ -22376,10 +22376,6 @@ lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
struct lpfc_dmabuf *pcmd;
u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};

- /* sanity check on queue memory */
- if (!datap)
- return -ENODEV;
-
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox)
return -ENOMEM;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index e855d291db3c..665959938e5e 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1897,6 +1897,8 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
}

req->outstanding_cmds[index] = NULL;
+
+ qla_put_fw_resources(sp->qpair, &sp->iores);
return sp;
}

@@ -3099,7 +3101,6 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
}
bsg_reply->reply_payload_rcv_len = 0;

- qla_put_fw_resources(sp->qpair, &sp->iores);
done:
/* Return the vendor specific reply to API */
bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 330f34c8724f..6063f4855808 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1845,6 +1845,17 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res)
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
+ /*
+ * perform lockless completion during driver unload
+ */
+ if (qla2x00_chip_is_down(vha)) {
+ req->outstanding_cmds[cnt] = NULL;
+ spin_unlock_irqrestore(qp->qp_lock_ptr, flags);
+ sp->done(sp, res);
+ spin_lock_irqsave(qp->qp_lock_ptr, flags);
+ continue;
+ }
+
switch (sp->cmd_type) {
case TYPE_SRB:
qla2x00_abort_srb(qp, sp, res, &flags);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index c7080454aea9..bd110a93d047 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -233,6 +233,7 @@ static struct {
{"SGI", "RAID5", "*", BLIST_SPARSELUN},
{"SGI", "TP9100", "*", BLIST_REPORTLUN2},
{"SGI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"SKhynix", "H28U74301AMR", NULL, BLIST_SKIP_VPD_PAGES},
{"IBM", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SUN", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"DELL", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 6110dfd903f7..83a3d9f085d8 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1050,6 +1050,22 @@ static void storvsc_handle_error(struct vmscsi_request *vm_srb,
goto do_work;
}

+ /*
+ * Check for "Operating parameters have changed"
+ * due to Hyper-V changing the VHD/VHDX BlockSize
+ * when adding/removing a differencing disk. This
+ * causes discard_granularity to change, so do a
+ * rescan to pick up the new granularity. We don't
+ * want scsi_report_sense() to output a message
+ * that a sysadmin wouldn't know what to do with.
+ */
+ if ((asc == 0x3f) && (ascq != 0x03) &&
+ (ascq != 0x0e)) {
+ process_err_fn = storvsc_device_scan;
+ set_host_byte(scmnd, DID_REQUEUE);
+ goto do_work;
+ }
+
/*
* Otherwise, let upper layer deal with the
* error when sense message is present
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 120831428ec6..bc2b16701a11 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -9734,5 +9734,6 @@ module_exit(ufshcd_core_exit);
MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@xxxxxxxxxxx>");
MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@xxxxxxxxxxx>");
MODULE_DESCRIPTION("Generic UFS host controller driver Core");
+MODULE_SOFTDEP("pre: governor_simpleondemand");
MODULE_LICENSE("GPL");
MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 6bc3aaf655fc..62004e3fe1cc 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -1262,18 +1262,20 @@ static struct iscsi_param *iscsi_check_key(
return param;

if (!(param->phase & phase)) {
- pr_err("Key \"%s\" may not be negotiated during ",
- param->name);
+ char *phase_name;
+
switch (phase) {
case PHASE_SECURITY:
- pr_debug("Security phase.\n");
+ phase_name = "Security";
break;
case PHASE_OPERATIONAL:
- pr_debug("Operational phase.\n");
+ phase_name = "Operational";
break;
default:
- pr_debug("Unknown phase.\n");
+ phase_name = "Unknown";
}
+ pr_err("Key \"%s\" may not be negotiated during %s phase.\n",
+ param->name, phase_name);
return NULL;
}

diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
index 297dc62bca29..372d64756ed6 100644
--- a/drivers/tee/amdtee/core.c
+++ b/drivers/tee/amdtee/core.c
@@ -267,35 +267,34 @@ int amdtee_open_session(struct tee_context *ctx,
goto out;
}

+ /* Open session with loaded TA */
+ handle_open_session(arg, &session_info, param);
+ if (arg->ret != TEEC_SUCCESS) {
+ pr_err("open_session failed %d\n", arg->ret);
+ handle_unload_ta(ta_handle);
+ kref_put(&sess->refcount, destroy_session);
+ goto out;
+ }
+
/* Find an empty session index for the given TA */
spin_lock(&sess->lock);
i = find_first_zero_bit(sess->sess_mask, TEE_NUM_SESSIONS);
- if (i < TEE_NUM_SESSIONS)
+ if (i < TEE_NUM_SESSIONS) {
+ sess->session_info[i] = session_info;
+ set_session_id(ta_handle, i, &arg->session);
set_bit(i, sess->sess_mask);
+ }
spin_unlock(&sess->lock);

if (i >= TEE_NUM_SESSIONS) {
pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ handle_close_session(ta_handle, session_info);
handle_unload_ta(ta_handle);
kref_put(&sess->refcount, destroy_session);
rc = -ENOMEM;
goto out;
}

- /* Open session with loaded TA */
- handle_open_session(arg, &session_info, param);
- if (arg->ret != TEEC_SUCCESS) {
- pr_err("open_session failed %d\n", arg->ret);
- spin_lock(&sess->lock);
- clear_bit(i, sess->sess_mask);
- spin_unlock(&sess->lock);
- handle_unload_ta(ta_handle);
- kref_put(&sess->refcount, destroy_session);
- goto out;
- }
-
- sess->session_info[i] = session_info;
- set_session_id(ta_handle, i, &arg->session);
out:
free_pages((u64)ta, get_order(ta_size));
return rc;
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index aa6cf7f2f438..f1e4f39bda2d 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -43,7 +43,7 @@
#define QUIRK_AUTO_CLEAR_INT BIT(0)
#define QUIRK_E2E BIT(1)

-static int ring_interrupt_index(struct tb_ring *ring)
+static int ring_interrupt_index(const struct tb_ring *ring)
{
int bit = ring->hop;
if (!ring->is_tx)
@@ -60,13 +60,14 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
{
int reg = REG_RING_INTERRUPT_BASE +
ring_interrupt_index(ring) / 32 * 4;
- int bit = ring_interrupt_index(ring) & 31;
- int mask = 1 << bit;
+ int interrupt_bit = ring_interrupt_index(ring) & 31;
+ int mask = 1 << interrupt_bit;
u32 old, new;

if (ring->irq > 0) {
u32 step, shift, ivr, misc;
void __iomem *ivr_base;
+ int auto_clear_bit;
int index;

if (ring->is_tx)
@@ -74,18 +75,25 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)
else
index = ring->hop + ring->nhi->hop_count;

- if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) {
- /*
- * Ask the hardware to clear interrupt status
- * bits automatically since we already know
- * which interrupt was triggered.
- */
- misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
- if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
- misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
- iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
- }
- }
+ /*
+ * Intel routers support a bit that isn't part of
+ * the USB4 spec to ask the hardware to clear
+ * interrupt status bits automatically since
+ * we already know which interrupt was triggered.
+ *
+ * Other routers explicitly disable auto-clear
+ * to prevent conditions that may occur where two
+ * MSIX interrupts are simultaneously active and
+ * reading the register clears both of them.
+ */
+ misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
+ if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
+ auto_clear_bit = REG_DMA_MISC_INT_AUTO_CLEAR;
+ else
+ auto_clear_bit = REG_DMA_MISC_DISABLE_AUTO_CLEAR;
+ if (!(misc & auto_clear_bit))
+ iowrite32(misc | auto_clear_bit,
+ ring->nhi->iobase + REG_DMA_MISC);

ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
@@ -105,7 +113,7 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active)

dev_dbg(&ring->nhi->pdev->dev,
"%s interrupt at register %#x bit %d (%#x -> %#x)\n",
- active ? "enabling" : "disabling", reg, bit, old, new);
+ active ? "enabling" : "disabling", reg, interrupt_bit, old, new);

if (new == old)
dev_WARN(&ring->nhi->pdev->dev,
@@ -390,14 +398,17 @@ EXPORT_SYMBOL_GPL(tb_ring_poll_complete);

static void ring_clear_msix(const struct tb_ring *ring)
{
+ int bit;
+
if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT)
return;

+ bit = ring_interrupt_index(ring) & 31;
if (ring->is_tx)
- ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE);
+ iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR);
else
- ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE +
- 4 * (ring->nhi->hop_count / 32));
+ iowrite32(BIT(bit), ring->nhi->iobase + REG_RING_INT_CLEAR +
+ 4 * (ring->nhi->hop_count / 32));
}

static irqreturn_t ring_msix(int irq, void *data)
diff --git a/drivers/thunderbolt/nhi_regs.h b/drivers/thunderbolt/nhi_regs.h
index 0d4970dcef84..faef165a919c 100644
--- a/drivers/thunderbolt/nhi_regs.h
+++ b/drivers/thunderbolt/nhi_regs.h
@@ -77,12 +77,13 @@ struct ring_desc {

/*
* three bitfields: tx, rx, rx overflow
- * Every bitfield contains one bit for every hop (REG_HOP_COUNT). Registers are
- * cleared on read. New interrupts are fired only after ALL registers have been
+ * Every bitfield contains one bit for every hop (REG_HOP_COUNT).
+ * New interrupts are fired only after ALL registers have been
* read (even those containing only disabled rings).
*/
#define REG_RING_NOTIFY_BASE 0x37800
#define RING_NOTIFY_REG_COUNT(nhi) ((31 + 3 * nhi->hop_count) / 32)
+#define REG_RING_INT_CLEAR 0x37808

/*
* two bitfields: rx, tx
@@ -105,6 +106,7 @@ struct ring_desc {

#define REG_DMA_MISC 0x39864
#define REG_DMA_MISC_INT_AUTO_CLEAR BIT(2)
+#define REG_DMA_MISC_DISABLE_AUTO_CLEAR BIT(17)

#define REG_INMAIL_DATA 0x39900

diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 722694052f4a..566c03105fb8 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -208,6 +208,22 @@ static ssize_t nvm_authenticate_show(struct device *dev,
return ret;
}

+static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
+{
+ int i;
+
+ for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
+ usb4_port_retimer_set_inbound_sbtx(port, i);
+}
+
+static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
+{
+ int i;
+
+ for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
+ usb4_port_retimer_unset_inbound_sbtx(port, i);
+}
+
static ssize_t nvm_authenticate_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -234,6 +250,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
rt->auth_status = 0;

if (val) {
+ tb_retimer_set_inbound_sbtx(rt->port);
if (val == AUTHENTICATE_ONLY) {
ret = tb_retimer_nvm_authenticate(rt, true);
} else {
@@ -253,6 +270,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
}

exit_unlock:
+ tb_retimer_unset_inbound_sbtx(rt->port);
mutex_unlock(&rt->tb->lock);
exit_rpm:
pm_runtime_mark_last_busy(&rt->dev);
@@ -466,8 +484,7 @@ int tb_retimer_scan(struct tb_port *port, bool add)
* Enable sideband channel for each retimer. We can do this
* regardless whether there is device connected or not.
*/
- for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
- usb4_port_retimer_set_inbound_sbtx(port, i);
+ tb_retimer_set_inbound_sbtx(port);

/*
* Before doing anything else, read the authentication status.
@@ -490,6 +507,8 @@ int tb_retimer_scan(struct tb_port *port, bool add)
break;
}

+ tb_retimer_unset_inbound_sbtx(port);
+
if (!last_idx)
return 0;

diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h
index bda889ff3bda..a8a35b04035b 100644
--- a/drivers/thunderbolt/sb_regs.h
+++ b/drivers/thunderbolt/sb_regs.h
@@ -20,6 +20,7 @@ enum usb4_sb_opcode {
USB4_SB_OPCODE_ROUTER_OFFLINE = 0x4e45534c, /* "LSEN" */
USB4_SB_OPCODE_ENUMERATE_RETIMERS = 0x4d554e45, /* "ENUM" */
USB4_SB_OPCODE_SET_INBOUND_SBTX = 0x5055534c, /* "LSUP" */
+ USB4_SB_OPCODE_UNSET_INBOUND_SBTX = 0x50555355, /* "USUP" */
USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c, /* "LAST" */
USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47, /* "GNSS" */
USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42, /* "BOPS" */
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index ef647477ab38..8cc9e8c55e40 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -2750,8 +2750,6 @@ int tb_switch_add(struct tb_switch *sw)
}
tb_sw_dbg(sw, "uid: %#llx\n", sw->uid);

- tb_check_quirks(sw);
-
ret = tb_switch_set_uuid(sw);
if (ret) {
dev_err(&sw->dev, "failed to set UUID\n");
@@ -2770,6 +2768,8 @@ int tb_switch_add(struct tb_switch *sw)
}
}

+ tb_check_quirks(sw);
+
tb_switch_default_link_ports(sw);

ret = tb_switch_update_link_attributes(sw);
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 8922217d580c..db0d3d37772f 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -1080,6 +1080,7 @@ int usb4_port_router_online(struct tb_port *port);
int usb4_port_enumerate_retimers(struct tb_port *port);

int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
+int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index);
int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
u8 size);
int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg,
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 90986567f1f9..36547afa1896 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -1441,6 +1441,20 @@ int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index)
500);
}

+/**
+ * usb4_port_retimer_unset_inbound_sbtx() - Disable sideband channel transactions
+ * @port: USB4 port
+ * @index: Retimer index
+ *
+ * Disables sideband channel transations on SBTX. The reverse of
+ * usb4_port_retimer_set_inbound_sbtx().
+ */
+int usb4_port_retimer_unset_inbound_sbtx(struct tb_port *port, u8 index)
+{
+ return usb4_port_retimer_op(port, index,
+ USB4_SB_OPCODE_UNSET_INBOUND_SBTX, 500);
+}
+
/**
* usb4_port_retimer_read() - Read from retimer sideband registers
* @port: USB4 port
@@ -1930,18 +1944,30 @@ static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port,
int downstream_bw)
{
u32 val, ubw, dbw, scale;
- int ret;
+ int ret, max_bw;

- /* Read the used scale, hardware default is 0 */
- ret = tb_port_read(port, &scale, TB_CFG_PORT,
- port->cap_adap + ADP_USB3_CS_3, 1);
+ /* Figure out suitable scale */
+ scale = 0;
+ max_bw = max(upstream_bw, downstream_bw);
+ while (scale < 64) {
+ if (mbps_to_usb3_bw(max_bw, scale) < 4096)
+ break;
+ scale++;
+ }
+
+ if (WARN_ON(scale >= 64))
+ return -EINVAL;
+
+ ret = tb_port_write(port, &scale, TB_CFG_PORT,
+ port->cap_adap + ADP_USB3_CS_3, 1);
if (ret)
return ret;

- scale &= ADP_USB3_CS_3_SCALE_MASK;
ubw = mbps_to_usb3_bw(upstream_bw, scale);
dbw = mbps_to_usb3_bw(downstream_bw, scale);

+ tb_port_dbg(port, "scaled bandwidth %u/%u, scale %u\n", ubw, dbw, scale);
+
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_USB3_CS_2, 1);
if (ret)
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 609a51137e96..f2f066ce8d9e 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -43,6 +43,7 @@ struct xencons_info {
int irq;
int vtermno;
grant_ref_t gntref;
+ spinlock_t ring_lock;
};

static LIST_HEAD(xenconsoles);
@@ -89,12 +90,15 @@ static int __write_console(struct xencons_info *xencons,
XENCONS_RING_IDX cons, prod;
struct xencons_interface *intf = xencons->intf;
int sent = 0;
+ unsigned long flags;

+ spin_lock_irqsave(&xencons->ring_lock, flags);
cons = intf->out_cons;
prod = intf->out_prod;
mb(); /* update queue values before going on */

if ((prod - cons) > sizeof(intf->out)) {
+ spin_unlock_irqrestore(&xencons->ring_lock, flags);
pr_err_once("xencons: Illegal ring page indices");
return -EINVAL;
}
@@ -104,6 +108,7 @@ static int __write_console(struct xencons_info *xencons,

wmb(); /* write ring before updating pointer */
intf->out_prod = prod;
+ spin_unlock_irqrestore(&xencons->ring_lock, flags);

if (sent)
notify_daemon(xencons);
@@ -146,16 +151,19 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
int recv = 0;
struct xencons_info *xencons = vtermno_to_xencons(vtermno);
unsigned int eoiflag = 0;
+ unsigned long flags;

if (xencons == NULL)
return -EINVAL;
intf = xencons->intf;

+ spin_lock_irqsave(&xencons->ring_lock, flags);
cons = intf->in_cons;
prod = intf->in_prod;
mb(); /* get pointers before reading ring */

if ((prod - cons) > sizeof(intf->in)) {
+ spin_unlock_irqrestore(&xencons->ring_lock, flags);
pr_err_once("xencons: Illegal ring page indices");
return -EINVAL;
}
@@ -179,10 +187,13 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
xencons->out_cons = intf->out_cons;
xencons->out_cons_same = 0;
}
+ if (!recv && xencons->out_cons_same++ > 1) {
+ eoiflag = XEN_EOI_FLAG_SPURIOUS;
+ }
+ spin_unlock_irqrestore(&xencons->ring_lock, flags);
+
if (recv) {
notify_daemon(xencons);
- } else if (xencons->out_cons_same++ > 1) {
- eoiflag = XEN_EOI_FLAG_SPURIOUS;
}

xen_irq_lateeoi(xencons->irq, eoiflag);
@@ -239,6 +250,7 @@ static int xen_hvm_console_init(void)
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ spin_lock_init(&info->ring_lock);
} else if (info->intf != NULL) {
/* already configured */
return 0;
@@ -275,6 +287,7 @@ static int xen_hvm_console_init(void)

static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
{
+ spin_lock_init(&info->ring_lock);
info->evtchn = xen_start_info->console.domU.evtchn;
/* GFN == MFN for PV guest */
info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
@@ -325,6 +338,7 @@ static int xen_initial_domain_console_init(void)
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ spin_lock_init(&info->ring_lock);
}

info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
@@ -482,6 +496,7 @@ static int xencons_probe(struct xenbus_device *dev,
info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ spin_lock_init(&info->ring_lock);
dev_set_drvdata(&dev->dev, info);
info->xbdev = dev;
info->vtermno = xenbus_devid_to_vtermno(devid);
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index da63e76c7530..6ccadfa0caf0 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -253,7 +253,9 @@ config SERIAL_8250_ASPEED_VUART
tristate "Aspeed Virtual UART"
depends on SERIAL_8250
depends on OF
- depends on REGMAP && MFD_SYSCON
+ depends on MFD_SYSCON
+ depends on ARCH_ASPEED || COMPILE_TEST
+ select REGMAP
help
If you want to use the virtual UART (VUART) device on Aspeed
BMC platforms, enable this option. This enables the 16550A-
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index e1ff109d7a14..ac3c6c1e80cc 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -573,7 +573,7 @@ static void lpuart_flush_buffer(struct uart_port *port)
sport->dma_tx_nents, DMA_TO_DEVICE);
sport->dma_tx_in_progress = false;
}
- dmaengine_terminate_all(chan);
+ dmaengine_terminate_async(chan);
}

if (lpuart_is_32(sport)) {
@@ -1307,7 +1307,8 @@ static void lpuart_dma_rx_free(struct uart_port *port)
struct lpuart_port, port);
struct dma_chan *chan = sport->dma_rx_chan;

- dmaengine_terminate_all(chan);
+ dmaengine_terminate_sync(chan);
+ del_timer_sync(&sport->lpuart_timer);
dma_unmap_sg(chan->device->dev, &sport->rx_sgl, 1, DMA_FROM_DEVICE);
kfree(sport->rx_ring.buf);
sport->rx_ring.tail = 0;
@@ -1773,7 +1774,6 @@ static int lpuart32_startup(struct uart_port *port)
static void lpuart_dma_shutdown(struct lpuart_port *sport)
{
if (sport->lpuart_dma_rx_use) {
- del_timer_sync(&sport->lpuart_timer);
lpuart_dma_rx_free(&sport->port);
sport->lpuart_dma_rx_use = false;
}
@@ -1782,7 +1782,7 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport)
if (wait_event_interruptible(sport->dma_wait,
!sport->dma_tx_in_progress) != false) {
sport->dma_tx_in_progress = false;
- dmaengine_terminate_all(sport->dma_tx_chan);
+ dmaengine_terminate_sync(sport->dma_tx_chan);
}
sport->lpuart_dma_tx_use = false;
}
@@ -1933,10 +1933,8 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
* Since timer function acqures sport->port.lock, need to stop before
* acquring same lock because otherwise del_timer_sync() can deadlock.
*/
- if (old && sport->lpuart_dma_rx_use) {
- del_timer_sync(&sport->lpuart_timer);
+ if (old && sport->lpuart_dma_rx_use)
lpuart_dma_rx_free(&sport->port);
- }

spin_lock_irqsave(&sport->port.lock, flags);

@@ -2171,10 +2169,8 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
* Since timer function acqures sport->port.lock, need to stop before
* acquring same lock because otherwise del_timer_sync() can deadlock.
*/
- if (old && sport->lpuart_dma_rx_use) {
- del_timer_sync(&sport->lpuart_timer);
+ if (old && sport->lpuart_dma_rx_use)
lpuart_dma_rx_free(&sport->port);
- }

spin_lock_irqsave(&sport->port.lock, flags);

@@ -2866,11 +2862,10 @@ static int __maybe_unused lpuart_suspend(struct device *dev)
* EDMA driver during suspend will forcefully release any
* non-idle DMA channels. If port wakeup is enabled or if port
* is console port or 'no_console_suspend' is set the Rx DMA
- * cannot resume as as expected, hence gracefully release the
+ * cannot resume as expected, hence gracefully release the
* Rx DMA path before suspend and start Rx DMA path on resume.
*/
if (irq_wake) {
- del_timer_sync(&sport->lpuart_timer);
lpuart_dma_rx_free(&sport->port);
}

diff --git a/drivers/usb/cdns3/cdns3-pci-wrap.c b/drivers/usb/cdns3/cdns3-pci-wrap.c
index deeea618ba33..1f6320d98a76 100644
--- a/drivers/usb/cdns3/cdns3-pci-wrap.c
+++ b/drivers/usb/cdns3/cdns3-pci-wrap.c
@@ -60,6 +60,11 @@ static struct pci_dev *cdns3_get_second_fun(struct pci_dev *pdev)
return NULL;
}

+ if (func->devfn != PCI_DEV_FN_HOST_DEVICE &&
+ func->devfn != PCI_DEV_FN_OTG) {
+ return NULL;
+ }
+
return func;
}

diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c
index 9b8325f82499..d63d5d92f255 100644
--- a/drivers/usb/cdns3/cdnsp-ep0.c
+++ b/drivers/usb/cdns3/cdnsp-ep0.c
@@ -403,20 +403,6 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
case USB_REQ_SET_ISOCH_DELAY:
ret = cdnsp_ep0_set_isoch_delay(pdev, ctrl);
break;
- case USB_REQ_SET_INTERFACE:
- /*
- * Add request into pending list to block sending status stage
- * by libcomposite.
- */
- list_add_tail(&pdev->ep0_preq.list,
- &pdev->ep0_preq.pep->pending_list);
-
- ret = cdnsp_ep0_delegate_req(pdev, ctrl);
- if (ret == -EBUSY)
- ret = 0;
-
- list_del(&pdev->ep0_preq.list);
- break;
default:
ret = cdnsp_ep0_delegate_req(pdev, ctrl);
break;
@@ -474,9 +460,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
else
ret = cdnsp_ep0_delegate_req(pdev, ctrl);

- if (!len)
- pdev->ep0_stage = CDNSP_STATUS_STAGE;
-
if (ret == USB_GADGET_DELAYED_STATUS) {
trace_cdnsp_ep0_status_stage("delayed");
return;
@@ -484,6 +467,6 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev)
out:
if (ret < 0)
cdnsp_ep0_stall(pdev);
- else if (pdev->ep0_stage == CDNSP_STATUS_STAGE)
+ else if (!len && pdev->ep0_stage != CDNSP_STATUS_STAGE)
cdnsp_status_stage(pdev);
}
diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
index fe8a114c586c..29f433c5a6f3 100644
--- a/drivers/usb/cdns3/cdnsp-pci.c
+++ b/drivers/usb/cdns3/cdnsp-pci.c
@@ -29,30 +29,23 @@
#define PLAT_DRIVER_NAME "cdns-usbssp"

#define CDNS_VENDOR_ID 0x17cd
-#define CDNS_DEVICE_ID 0x0100
+#define CDNS_DEVICE_ID 0x0200
+#define CDNS_DRD_ID 0x0100
#define CDNS_DRD_IF (PCI_CLASS_SERIAL_USB << 8 | 0x80)

static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
{
- struct pci_dev *func;
-
/*
* Gets the second function.
- * It's little tricky, but this platform has two function.
- * The fist keeps resources for Host/Device while the second
- * keeps resources for DRD/OTG.
+ * Platform has two function. The fist keeps resources for
+ * Host/Device while the secon keeps resources for DRD/OTG.
*/
- func = pci_get_device(pdev->vendor, pdev->device, NULL);
- if (!func)
- return NULL;
+ if (pdev->device == CDNS_DEVICE_ID)
+ return pci_get_device(pdev->vendor, CDNS_DRD_ID, NULL);
+ else if (pdev->device == CDNS_DRD_ID)
+ return pci_get_device(pdev->vendor, CDNS_DEVICE_ID, NULL);

- if (func->devfn == pdev->devfn) {
- func = pci_get_device(pdev->vendor, pdev->device, func);
- if (!func)
- return NULL;
- }
-
- return func;
+ return NULL;
}

static int cdnsp_pci_probe(struct pci_dev *pdev,
@@ -232,6 +225,8 @@ static const struct pci_device_id cdnsp_pci_ids[] = {
PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
{ PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
CDNS_DRD_IF, PCI_ANY_ID },
+ { PCI_VENDOR_ID_CDNS, CDNS_DRD_ID, PCI_ANY_ID, PCI_ANY_ID,
+ CDNS_DRD_IF, PCI_ANY_ID },
{ 0, }
};

diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 99440baa6458..50e37846f037 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -203,6 +203,7 @@ struct hw_bank {
* @in_lpm: if the core in low power mode
* @wakeup_int: if wakeup interrupt occur
* @rev: The revision number for controller
+ * @mutex: protect code from concorrent running when doing role switch
*/
struct ci_hdrc {
struct device *dev;
@@ -255,6 +256,7 @@ struct ci_hdrc {
bool in_lpm;
bool wakeup_int;
enum ci_revision rev;
+ struct mutex mutex;
};

static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci)
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index a56f06368d14..a9869975ce32 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -974,9 +974,16 @@ static ssize_t role_store(struct device *dev,
strlen(ci->roles[role]->name)))
break;

- if (role == CI_ROLE_END || role == ci->role)
+ if (role == CI_ROLE_END)
return -EINVAL;

+ mutex_lock(&ci->mutex);
+
+ if (role == ci->role) {
+ mutex_unlock(&ci->mutex);
+ return n;
+ }
+
pm_runtime_get_sync(dev);
disable_irq(ci->irq);
ci_role_stop(ci);
@@ -985,6 +992,7 @@ static ssize_t role_store(struct device *dev,
ci_handle_vbus_change(ci);
enable_irq(ci->irq);
pm_runtime_put_sync(dev);
+ mutex_unlock(&ci->mutex);

return (ret == 0) ? n : ret;
}
@@ -1020,6 +1028,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
return -ENOMEM;

spin_lock_init(&ci->lock);
+ mutex_init(&ci->mutex);
ci->dev = dev;
ci->platdata = dev_get_platdata(dev);
ci->imx28_write_fix = !!(ci->platdata->flags &
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 8dd59282827b..2d9d694eb0bd 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -167,8 +167,10 @@ static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci)

static void ci_handle_id_switch(struct ci_hdrc *ci)
{
- enum ci_role role = ci_otg_role(ci);
+ enum ci_role role;

+ mutex_lock(&ci->mutex);
+ role = ci_otg_role(ci);
if (role != ci->role) {
dev_dbg(ci->dev, "switching from %s to %s\n",
ci_role(ci)->name, ci->roles[role]->name);
@@ -198,6 +200,7 @@ static void ci_handle_id_switch(struct ci_hdrc *ci)
if (role == CI_ROLE_GADGET)
ci_handle_vbus_change(ci);
}
+ mutex_unlock(&ci->mutex);
}
/**
* ci_otg_work - perform otg (vbus/id) event handle
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index 265d437ca0f1..7a09476e9f19 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -121,13 +121,6 @@ static int dwc2_get_dr_mode(struct dwc2_hsotg *hsotg)
return 0;
}

-static void __dwc2_disable_regulators(void *data)
-{
- struct dwc2_hsotg *hsotg = data;
-
- regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
-}
-
static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
{
struct platform_device *pdev = to_platform_device(hsotg->dev);
@@ -138,11 +131,6 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg)
if (ret)
return ret;

- ret = devm_add_action_or_reset(&pdev->dev,
- __dwc2_disable_regulators, hsotg);
- if (ret)
- return ret;
-
if (hsotg->clk) {
ret = clk_prepare_enable(hsotg->clk);
if (ret)
@@ -198,7 +186,7 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg)
if (hsotg->clk)
clk_disable_unprepare(hsotg->clk);

- return 0;
+ return regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies);
}

/**
@@ -658,7 +646,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (hsotg->params.activate_stm_id_vb_detection)
regulator_disable(hsotg->usb33d);
error:
- if (hsotg->dr_mode != USB_DR_MODE_PERIPHERAL)
+ if (hsotg->ll_hw_enabled)
dwc2_lowlevel_hw_disable(hsotg);
return retval;
}
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index d22ac23c94b0..200eb788a74b 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -1174,7 +1174,7 @@ void g_audio_cleanup(struct g_audio *g_audio)
uac = g_audio->uac;
card = uac->card;
if (card)
- snd_card_free(card);
+ snd_card_free_when_closed(card);

kfree(uac->p_prm.reqs);
kfree(uac->c_prm.reqs);
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index c7b763d6d102..1f8c9b16a0fb 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -111,6 +111,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_BROKEN_FUA),

+/* Reported by: Yaroslav Furman <yaro330@xxxxxxxxx> */
+UNUSUAL_DEV(0x152d, 0x0583, 0x0000, 0x9999,
+ "JMicron",
+ "JMS583Gen 2",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
+
/* Reported-by: Thinh Nguyen <thinhn@xxxxxxxxxxxx> */
UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
"PNY",
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index ee461d314927..81329605757f 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -1428,10 +1428,18 @@ static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
const u32 *data, int cnt)
{
+ u32 vdo_hdr = port->vdo_data[0];
+
WARN_ON(!mutex_is_locked(&port->lock));

- /* Make sure we are not still processing a previous VDM packet */
- WARN_ON(port->vdm_state > VDM_STATE_DONE);
+ /* If is sending discover_identity, handle received message first */
+ if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
+ port->send_discover = true;
+ mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
+ } else {
+ /* Make sure we are not still processing a previous VDM packet */
+ WARN_ON(port->vdm_state > VDM_STATE_DONE);
+ }

port->vdo_count = cnt + 1;
port->vdo_data[0] = header;
@@ -1934,11 +1942,13 @@ static void vdm_run_state_machine(struct tcpm_port *port)
switch (PD_VDO_CMD(vdo_hdr)) {
case CMD_DISCOVER_IDENT:
res = tcpm_ams_start(port, DISCOVER_IDENTITY);
- if (res == 0)
+ if (res == 0) {
port->send_discover = false;
- else if (res == -EAGAIN)
+ } else if (res == -EAGAIN) {
+ port->vdo_data[0] = 0;
mod_send_discover_delayed_work(port,
SEND_DISCOVER_RETRY_MS);
+ }
break;
case CMD_DISCOVER_SVID:
res = tcpm_ams_start(port, DISCOVER_SVIDS);
@@ -2021,6 +2031,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
unsigned long timeout;

port->vdm_retries = 0;
+ port->vdo_data[0] = 0;
port->vdm_state = VDM_STATE_BUSY;
timeout = vdm_ready_timeout(vdo_hdr);
mod_vdm_delayed_work(port, timeout);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 7363958ca165..dca6803a75bd 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -1202,7 +1202,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
static int ucsi_init(struct ucsi *ucsi)
{
struct ucsi_connector *con;
- u64 command;
+ u64 command, ntfy;
int ret;
int i;

@@ -1214,8 +1214,8 @@ static int ucsi_init(struct ucsi *ucsi)
}

/* Enable basic notifications */
- ucsi->ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
- command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
+ ntfy = UCSI_ENABLE_NTFY_CMD_COMPLETE | UCSI_ENABLE_NTFY_ERROR;
+ command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_reset;
@@ -1247,12 +1247,13 @@ static int ucsi_init(struct ucsi *ucsi)
}

/* Enable all notifications */
- ucsi->ntfy = UCSI_ENABLE_NTFY_ALL;
- command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy;
+ ntfy = UCSI_ENABLE_NTFY_ALL;
+ command = UCSI_SET_NOTIFICATION_ENABLE | ntfy;
ret = ucsi_send_command(ucsi, command, NULL, 0);
if (ret < 0)
goto err_unregister;

+ ucsi->ntfy = ntfy;
return 0;

err_unregister:
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 248a8f973cf9..e7501533c2ec 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -173,7 +173,7 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)

seq_puts(m, "# Version:1\n");
seq_puts(m, "# Format:\n");
- seq_puts(m, "# <tree id> <persistent fid> <flags> <count> <pid> <uid>");
+ seq_puts(m, "# <tree id> <ses id> <persistent fid> <flags> <count> <pid> <uid>");
#ifdef CONFIG_CIFS_DEBUG2
seq_printf(m, " <filename> <mid>\n");
#else
@@ -190,8 +190,9 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
cfile = list_entry(tmp2, struct cifsFileInfo,
tlist);
seq_printf(m,
- "0x%x 0x%llx 0x%x %d %d %d %pd",
+ "0x%x 0x%llx 0x%llx 0x%x %d %d %d %pd",
tcon->tid,
+ ses->Suid,
cfile->fid.persistent_fid,
cfile->f_flags,
cfile->count,
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 1fef721f60c9..fab582933909 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -653,7 +653,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
if (rc == -EOPNOTSUPP) {
cifs_dbg(FYI,
"server does not support query network interfaces\n");
- goto out;
+ ret_data_len = 0;
} else if (rc != 0) {
cifs_tcon_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
goto out;
diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
index ce0edf926c2a..6105649b4ebb 100644
--- a/fs/ksmbd/connection.c
+++ b/fs/ksmbd/connection.c
@@ -313,13 +313,10 @@ int ksmbd_conn_handler_loop(void *p)
}

/*
- * Check if pdu size is valid (min : smb header size,
- * max : 0x00FFFFFF).
+ * Check maximum pdu size(0x00FFFFFF).
*/
- if (pdu_size < __SMB2_HEADER_STRUCTURE_SIZE ||
- pdu_size > MAX_STREAM_PROT_LEN) {
+ if (pdu_size > MAX_STREAM_PROT_LEN)
break;
- }

/* 4 for rfc1002 length field */
size = pdu_size + 4;
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index ac029dfd23ab..1cd6170b9a5c 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -2979,8 +2979,11 @@ int smb2_open(struct ksmbd_work *work)
sizeof(struct smb_acl) +
sizeof(struct smb_ace) * ace_num * 2,
GFP_KERNEL);
- if (!pntsd)
+ if (!pntsd) {
+ posix_acl_release(fattr.cf_acls);
+ posix_acl_release(fattr.cf_dacls);
goto err_out;
+ }

rc = build_sec_desc(user_ns,
pntsd, NULL, 0,
@@ -4920,6 +4923,10 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,

info->Attributes |= cpu_to_le32(server_conf.share_fake_fscaps);

+ if (test_share_config_flag(work->tcon->share_conf,
+ KSMBD_SHARE_FLAG_STREAMS))
+ info->Attributes |= cpu_to_le32(FILE_NAMED_STREAMS);
+
info->MaxPathNameComponentLength = cpu_to_le32(stfs.f_namelen);
len = smbConvertToUTF16((__le16 *)info->FileSystemName,
"NTFS", PATH_MAX, conn->local_nls, 0);
@@ -7415,13 +7422,16 @@ static int fsctl_query_allocated_ranges(struct ksmbd_work *work, u64 id,
if (in_count == 0)
return -EINVAL;

+ start = le64_to_cpu(qar_req->file_offset);
+ length = le64_to_cpu(qar_req->length);
+
+ if (start < 0 || length < 0)
+ return -EINVAL;
+
fp = ksmbd_lookup_fd_fast(work, id);
if (!fp)
return -ENOENT;

- start = le64_to_cpu(qar_req->file_offset);
- length = le64_to_cpu(qar_req->length);
-
ret = ksmbd_vfs_fqar_lseek(fp, start, length,
qar_rsp, in_count, out_count);
if (ret && ret != -E2BIG)
@@ -7725,7 +7735,7 @@ int smb2_ioctl(struct ksmbd_work *work)

off = le64_to_cpu(zero_data->FileOffset);
bfz = le64_to_cpu(zero_data->BeyondFinalZero);
- if (off > bfz) {
+ if (off < 0 || bfz < 0 || off > bfz) {
ret = -EINVAL;
goto out;
}
diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
index 22f460984742..f034b75c6d7f 100644
--- a/fs/ksmbd/smb_common.c
+++ b/fs/ksmbd/smb_common.c
@@ -434,7 +434,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,

static int __smb2_negotiate(struct ksmbd_conn *conn)
{
- return (conn->dialect >= SMB21_PROT_ID &&
+ return (conn->dialect >= SMB20_PROT_ID &&
conn->dialect <= SMB311_PROT_ID);
}

@@ -442,9 +442,26 @@ static int smb_handle_negotiate(struct ksmbd_work *work)
{
struct smb_negotiate_rsp *neg_rsp = work->response_buf;

- ksmbd_debug(SMB, "Unsupported SMB protocol\n");
- neg_rsp->hdr.Status.CifsError = STATUS_INVALID_LOGON_TYPE;
- return -EINVAL;
+ ksmbd_debug(SMB, "Unsupported SMB1 protocol\n");
+
+ /*
+ * Remove 4 byte direct TCP header, add 2 byte bcc and
+ * 2 byte DialectIndex.
+ */
+ *(__be32 *)work->response_buf =
+ cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2 + 2);
+ neg_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+
+ neg_rsp->hdr.Command = SMB_COM_NEGOTIATE;
+ *(__le32 *)neg_rsp->hdr.Protocol = SMB1_PROTO_NUMBER;
+ neg_rsp->hdr.Flags = SMBFLG_RESPONSE;
+ neg_rsp->hdr.Flags2 = SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS |
+ SMBFLG2_EXT_SEC | SMBFLG2_IS_LONG_NAME;
+
+ neg_rsp->hdr.WordCount = 1;
+ neg_rsp->DialectIndex = cpu_to_le16(work->conn->dialect);
+ neg_rsp->ByteCount = 0;
+ return 0;
}

int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
@@ -464,7 +481,7 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
}
}

- if (command == SMB2_NEGOTIATE_HE && __smb2_negotiate(conn)) {
+ if (command == SMB2_NEGOTIATE_HE) {
ret = smb2_handle_negotiate(work);
init_smb2_neg_rsp(work);
return ret;
diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
index 1eba8dabaf31..b9fe3fa149c2 100644
--- a/fs/ksmbd/smb_common.h
+++ b/fs/ksmbd/smb_common.h
@@ -205,8 +205,15 @@

#define SMB1_PROTO_NUMBER cpu_to_le32(0x424d53ff)
#define SMB_COM_NEGOTIATE 0x72
-
#define SMB1_CLIENT_GUID_SIZE (16)
+
+#define SMBFLG_RESPONSE 0x80 /* this PDU is a response from server */
+
+#define SMBFLG2_IS_LONG_NAME cpu_to_le16(0x40)
+#define SMBFLG2_EXT_SEC cpu_to_le16(0x800)
+#define SMBFLG2_ERR_STATUS cpu_to_le16(0x4000)
+#define SMBFLG2_UNICODE cpu_to_le16(0x8000)
+
struct smb_hdr {
__be32 smb_buf_length;
__u8 Protocol[4];
@@ -246,28 +253,7 @@ struct smb_negotiate_req {
struct smb_negotiate_rsp {
struct smb_hdr hdr; /* wct = 17 */
__le16 DialectIndex; /* 0xFFFF = no dialect acceptable */
- __u8 SecurityMode;
- __le16 MaxMpxCount;
- __le16 MaxNumberVcs;
- __le32 MaxBufferSize;
- __le32 MaxRawSize;
- __le32 SessionKey;
- __le32 Capabilities; /* see below */
- __le32 SystemTimeLow;
- __le32 SystemTimeHigh;
- __le16 ServerTimeZone;
- __u8 EncryptionKeyLength;
__le16 ByteCount;
- union {
- unsigned char EncryptionKey[8]; /* cap extended security off */
- /* followed by Domain name - if extended security is off */
- /* followed by 16 bytes of server GUID */
- /* then security blob if cap_extended_security negotiated */
- struct {
- unsigned char GUID[SMB1_CLIENT_GUID_SIZE];
- unsigned char SecurityBlob[1];
- } __packed extended_response;
- } __packed u;
} __packed;

struct filesystem_attribute_info {
diff --git a/fs/lockd/clnt4xdr.c b/fs/lockd/clnt4xdr.c
index 7df6324ccb8a..8161667c976f 100644
--- a/fs/lockd/clnt4xdr.c
+++ b/fs/lockd/clnt4xdr.c
@@ -261,7 +261,6 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
u32 exclusive;
int error;
__be32 *p;
- s32 end;

memset(lock, 0, sizeof(*lock));
locks_init_lock(fl);
@@ -285,13 +284,7 @@ static int decode_nlm4_holder(struct xdr_stream *xdr, struct nlm_res *result)
fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK;
p = xdr_decode_hyper(p, &l_offset);
xdr_decode_hyper(p, &l_len);
- end = l_offset + l_len - 1;
-
- fl->fl_start = (loff_t)l_offset;
- if (l_len == 0 || end < 0)
- fl->fl_end = OFFSET_MAX;
- else
- fl->fl_end = (loff_t)end;
+ nlm4svc_set_file_lock_range(fl, l_offset, l_len);
error = 0;
out:
return error;
diff --git a/fs/lockd/xdr4.c b/fs/lockd/xdr4.c
index 72f7d190fb3b..b303ecd74f33 100644
--- a/fs/lockd/xdr4.c
+++ b/fs/lockd/xdr4.c
@@ -33,6 +33,17 @@ loff_t_to_s64(loff_t offset)
return res;
}

+void nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len)
+{
+ s64 end = off + len - 1;
+
+ fl->fl_start = off;
+ if (len == 0 || end < 0)
+ fl->fl_end = OFFSET_MAX;
+ else
+ fl->fl_end = end;
+}
+
/*
* NLM file handles are defined by specification to be a variable-length
* XDR opaque no longer than 1024 bytes. However, this implementation
@@ -80,7 +91,7 @@ svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock)
locks_init_lock(fl);
fl->fl_flags = FL_POSIX;
fl->fl_type = F_RDLCK;
-
+ nlm4svc_set_file_lock_range(fl, lock->lock_start, lock->lock_len);
return true;
}

diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 57af9c30eb48..b817d24d25a6 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1351,13 +1351,6 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
return status;
}

-static void
-nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
-{
- nfs_do_sb_deactive(ss_mnt->mnt_sb);
- mntput(ss_mnt);
-}
-
/*
* Verify COPY destination stateid.
*
@@ -1460,11 +1453,6 @@ nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct nfsd_file *src,
{
}

-static void
-nfsd4_interssc_disconnect(struct vfsmount *ss_mnt)
-{
-}
-
static struct file *nfs42_ssc_open(struct vfsmount *ss_mnt,
struct nfs_fh *src_fh,
nfs4_stateid *stateid)
@@ -1622,14 +1610,14 @@ static int nfsd4_do_async_copy(void *data)
copy->nf_src = kzalloc(sizeof(struct nfsd_file), GFP_KERNEL);
if (!copy->nf_src) {
copy->nfserr = nfserr_serverfault;
- nfsd4_interssc_disconnect(copy->ss_mnt);
+ /* ss_mnt will be unmounted by the laundromat */
goto do_callback;
}
copy->nf_src->nf_file = nfs42_ssc_open(copy->ss_mnt, &copy->c_fh,
&copy->stateid);
if (IS_ERR(copy->nf_src->nf_file)) {
copy->nfserr = nfserr_offload_denied;
- nfsd4_interssc_disconnect(copy->ss_mnt);
+ /* ss_mnt will be unmounted by the laundromat */
goto do_callback;
}
}
@@ -1714,8 +1702,10 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (async_copy)
cleanup_async_copy(async_copy);
status = nfserrno(-ENOMEM);
- if (!copy->cp_intra)
- nfsd4_interssc_disconnect(copy->ss_mnt);
+ /*
+ * source's vfsmount of inter-copy will be unmounted
+ * by the laundromat
+ */
goto out;
}

diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index a8509f364bf5..a39206705dd1 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -71,7 +71,7 @@ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
if (argv->v_index > ~(__u64)0 - argv->v_nmembs)
return -EINVAL;

- buf = (void *)__get_free_pages(GFP_NOFS, 0);
+ buf = (void *)get_zeroed_page(GFP_NOFS);
if (unlikely(!buf))
return -ENOMEM;
maxmembs = PAGE_SIZE / argv->v_size;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 68d11c295dd3..c051074016ce 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1978,11 +1978,25 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
}

if (unlikely(copied < len) && wc->w_target_page) {
+ loff_t new_isize;
+
if (!PageUptodate(wc->w_target_page))
copied = 0;

- ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
- start+len);
+ new_isize = max_t(loff_t, i_size_read(inode), pos + copied);
+ if (new_isize > page_offset(wc->w_target_page))
+ ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
+ start+len);
+ else {
+ /*
+ * When page is fully beyond new isize (data copy
+ * failed), do not bother zeroing the page. Invalidate
+ * it instead so that writeback does not get confused
+ * put page & buffer dirty bits into inconsistent
+ * state.
+ */
+ block_invalidatepage(wc->w_target_page, 0, PAGE_SIZE);
+ }
}
if (wc->w_target_page)
flush_dcache_page(wc->w_target_page);
diff --git a/fs/super.c b/fs/super.c
index 7fa3ee79ec89..297630540f43 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -450,13 +450,22 @@ void generic_shutdown_super(struct super_block *sb)

cgroup_writeback_umount();

- /* evict all inodes with zero refcount */
+ /* Evict all inodes with zero refcount. */
evict_inodes(sb);
- /* only nonzero refcount inodes can have marks */
+
+ /*
+ * Clean up and evict any inodes that still have references due
+ * to fsnotify or the security policy.
+ */
fsnotify_sb_delete(sb);
- fscrypt_destroy_keyring(sb);
security_sb_delete(sb);

+ /*
+ * Now that all potentially-encrypted inodes have been evicted,
+ * the fscrypt keyring can be destroyed.
+ */
+ fscrypt_destroy_keyring(sb);
+
if (sb->s_dio_done_wq) {
destroy_workqueue(sb->s_dio_done_wq);
sb->s_dio_done_wq = NULL;
diff --git a/fs/verity/verify.c b/fs/verity/verify.c
index 0adb970f4e73..10e41883dfa1 100644
--- a/fs/verity/verify.c
+++ b/fs/verity/verify.c
@@ -279,15 +279,15 @@ EXPORT_SYMBOL_GPL(fsverity_enqueue_verify_work);
int __init fsverity_init_workqueue(void)
{
/*
- * Use an unbound workqueue to allow bios to be verified in parallel
- * even when they happen to complete on the same CPU. This sacrifices
- * locality, but it's worthwhile since hashing is CPU-intensive.
+ * Use a high-priority workqueue to prioritize verification work, which
+ * blocks reads from completing, over regular application tasks.
*
- * Also use a high-priority workqueue to prioritize verification work,
- * which blocks reads from completing, over regular application tasks.
+ * For performance reasons, don't use an unbound workqueue. Using an
+ * unbound workqueue for crypto operations causes excessive scheduler
+ * latency on ARM64.
*/
fsverity_read_workqueue = alloc_workqueue("fsverity_read_queue",
- WQ_UNBOUND | WQ_HIGHPRI,
+ WQ_HIGHPRI,
num_online_cpus());
if (!fsverity_read_workqueue)
return -ENOMEM;
diff --git a/include/linux/acpi_mdio.h b/include/linux/acpi_mdio.h
index 0a24ab7cb66f..8e2eefa9fbc0 100644
--- a/include/linux/acpi_mdio.h
+++ b/include/linux/acpi_mdio.h
@@ -9,7 +9,14 @@
#include <linux/phy.h>

#if IS_ENABLED(CONFIG_ACPI_MDIO)
-int acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode);
+int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode,
+ struct module *owner);
+
+static inline int
+acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *handle)
+{
+ return __acpi_mdiobus_register(mdio, handle, THIS_MODULE);
+}
#else /* CONFIG_ACPI_MDIO */
static inline int
acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode)
diff --git a/include/linux/entry-kvm.h b/include/linux/entry-kvm.h
index 0d7865a0731c..07c878d6e323 100644
--- a/include/linux/entry-kvm.h
+++ b/include/linux/entry-kvm.h
@@ -75,7 +75,7 @@ static inline void xfer_to_guest_mode_prepare(void)
*/
static inline bool __xfer_to_guest_mode_work_pending(void)
{
- unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
+ unsigned long ti_work = read_thread_flags();

return !!(ti_work & XFER_TO_GUEST_MODE_WORK);
}
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 346b0f269161..db47aae7c481 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -56,6 +56,31 @@ bool kthread_is_per_cpu(struct task_struct *k);
__k; \
})

+/**
+ * kthread_run_on_cpu - create and wake a cpu bound thread.
+ * @threadfn: the function to run until signal_pending(current).
+ * @data: data ptr for @threadfn.
+ * @cpu: The cpu on which the thread should be bound,
+ * @namefmt: printf-style name for the thread. Format is restricted
+ * to "name.*%u". Code fills in cpu number.
+ *
+ * Description: Convenient wrapper for kthread_create_on_cpu()
+ * followed by wake_up_process(). Returns the kthread or
+ * ERR_PTR(-ENOMEM).
+ */
+static inline struct task_struct *
+kthread_run_on_cpu(int (*threadfn)(void *data), void *data,
+ unsigned int cpu, const char *namefmt)
+{
+ struct task_struct *p;
+
+ p = kthread_create_on_cpu(threadfn, data, cpu, namefmt);
+ if (!IS_ERR(p))
+ wake_up_process(p);
+
+ return p;
+}
+
void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
diff --git a/include/linux/lockd/xdr4.h b/include/linux/lockd/xdr4.h
index 5ae766f26e04..025250ade98e 100644
--- a/include/linux/lockd/xdr4.h
+++ b/include/linux/lockd/xdr4.h
@@ -24,6 +24,7 @@



+void nlm4svc_set_file_lock_range(struct file_lock *fl, u64 off, u64 len);
int nlm4svc_decode_testargs(struct svc_rqst *, __be32 *);
int nlm4svc_encode_testres(struct svc_rqst *, __be32 *);
int nlm4svc_decode_lockargs(struct svc_rqst *, __be32 *);
diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
index 959e0bd9a913..73364ae91689 100644
--- a/include/linux/nvme-tcp.h
+++ b/include/linux/nvme-tcp.h
@@ -114,8 +114,9 @@ struct nvme_tcp_icresp_pdu {
struct nvme_tcp_term_pdu {
struct nvme_tcp_hdr hdr;
__le16 fes;
- __le32 fei;
- __u8 rsvd[8];
+ __le16 feil;
+ __le16 feiu;
+ __u8 rsvd[10];
};

/**
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index da633d34ab86..8a52ef2e6fa6 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -14,9 +14,25 @@

#if IS_ENABLED(CONFIG_OF_MDIO)
bool of_mdiobus_child_is_phy(struct device_node *child);
-int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
-int devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
- struct device_node *np);
+int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np,
+ struct module *owner);
+
+static inline int of_mdiobus_register(struct mii_bus *mdio,
+ struct device_node *np)
+{
+ return __of_mdiobus_register(mdio, np, THIS_MODULE);
+}
+
+int __devm_of_mdiobus_register(struct device *dev, struct mii_bus *mdio,
+ struct device_node *np, struct module *owner);
+
+static inline int devm_of_mdiobus_register(struct device *dev,
+ struct mii_bus *mdio,
+ struct device_node *np)
+{
+ return __devm_of_mdiobus_register(dev, mdio, np, THIS_MODULE);
+}
+
struct mdio_device *of_mdio_find_device(struct device_node *np);
struct phy_device *of_phy_find_device(struct device_node *phy_np);
struct phy_device *
diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h
index 8ba8b5be5567..c1ef5fc60a3c 100644
--- a/include/linux/sysfb.h
+++ b/include/linux/sysfb.h
@@ -70,11 +70,16 @@ static inline void sysfb_disable(void)
#ifdef CONFIG_EFI

extern struct efifb_dmi_info efifb_dmi_list[];
-void sysfb_apply_efi_quirks(struct platform_device *pd);
+void sysfb_apply_efi_quirks(void);
+void sysfb_set_efifb_fwnode(struct platform_device *pd);

#else /* CONFIG_EFI */

-static inline void sysfb_apply_efi_quirks(struct platform_device *pd)
+static inline void sysfb_apply_efi_quirks(void)
+{
+}
+
+static inline void sysfb_set_efifb_fwnode(struct platform_device *pd)
{
}

diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 0999f6317978..9a073535c0bd 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -118,6 +118,15 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
return test_bit(flag, (unsigned long *)&ti->flags);
}

+/*
+ * This may be used in noinstr code, and needs to be __always_inline to prevent
+ * inadvertent instrumentation.
+ */
+static __always_inline unsigned long read_ti_thread_flags(struct thread_info *ti)
+{
+ return READ_ONCE(ti->flags);
+}
+
#define set_thread_flag(flag) \
set_ti_thread_flag(current_thread_info(), flag)
#define clear_thread_flag(flag) \
@@ -130,6 +139,11 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
test_and_clear_ti_thread_flag(current_thread_info(), flag)
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
+#define read_thread_flags() \
+ read_ti_thread_flags(current_thread_info())
+
+#define read_task_thread_flags(t) \
+ read_ti_thread_flags(task_thread_info(t))

#ifdef CONFIG_GENERIC_ENTRY
#define set_syscall_work(fl) \
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index cea0d1296599..f7c27c1cc593 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -829,7 +829,7 @@ static int __init bpf_jit_charge_init(void)
{
/* Only used as heuristic here to derive limit. */
bpf_jit_limit_max = bpf_jit_alloc_exec_limit();
- bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 2,
+ bpf_jit_limit = min_t(u64, round_up(bpf_jit_limit_max >> 1,
PAGE_SIZE), LONG_MAX);
return 0;
}
diff --git a/kernel/entry/common.c b/kernel/entry/common.c
index 998bdb7b8bf7..e002bea6b4be 100644
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
@@ -187,7 +187,7 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,
/* Check if any of the above work has queued a deferred wakeup */
tick_nohz_user_enter_prepare();

- ti_work = READ_ONCE(current_thread_info()->flags);
+ ti_work = read_thread_flags();
}

/* Return the latest work state for arch_exit_to_user_mode() */
@@ -196,13 +196,14 @@ static unsigned long exit_to_user_mode_loop(struct pt_regs *regs,

static void exit_to_user_mode_prepare(struct pt_regs *regs)
{
- unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
+ unsigned long ti_work;

lockdep_assert_irqs_disabled();

/* Flush pending rcuog wakeup before the last need_resched() check */
tick_nohz_user_enter_prepare();

+ ti_work = read_thread_flags();
if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
ti_work = exit_to_user_mode_loop(regs, ti_work);

diff --git a/kernel/entry/kvm.c b/kernel/entry/kvm.c
index 49972ee99aff..96d476e06c77 100644
--- a/kernel/entry/kvm.c
+++ b/kernel/entry/kvm.c
@@ -26,7 +26,7 @@ static int xfer_to_guest_mode_work(struct kvm_vcpu *vcpu, unsigned long ti_work)
if (ret)
return ret;

- ti_work = READ_ONCE(current_thread_info()->flags);
+ ti_work = read_thread_flags();
} while (ti_work & XFER_TO_GUEST_MODE_WORK || need_resched());
return 0;
}
@@ -43,7 +43,7 @@ int xfer_to_guest_mode_handle_work(struct kvm_vcpu *vcpu)
* disabled in the inner loop before going into guest mode. No need
* to disable interrupts here.
*/
- ti_work = READ_ONCE(current_thread_info()->flags);
+ ti_work = read_thread_flags();
if (!(ti_work & XFER_TO_GUEST_MODE_WORK))
return 0;

diff --git a/kernel/events/core.c b/kernel/events/core.c
index d2b415820183..2cdee62c3de7 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3909,7 +3909,7 @@ ctx_sched_in(struct perf_event_context *ctx,
if (likely(!ctx->nr_events))
return;

- if (is_active ^ EVENT_TIME) {
+ if (!(is_active & EVENT_TIME)) {
/* start ctx time */
__update_context_time(ctx, false);
perf_cgroup_set_timestamp(task, ctx);
@@ -9056,7 +9056,7 @@ static void perf_event_bpf_output(struct perf_event *event, void *data)

perf_event_header__init_id(&bpf_event->event_id.header,
&sample, event);
- ret = perf_output_begin(&handle, data, event,
+ ret = perf_output_begin(&handle, &sample, event,
bpf_event->event_id.header.size);
if (ret)
return;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 5b37a8567168..e319a1b62586 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -523,6 +523,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
to_kthread(p)->cpu = cpu;
return p;
}
+EXPORT_SYMBOL(kthread_create_on_cpu);

void kthread_set_per_cpu(struct task_struct *k, int cpu)
{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c1458fa8beb3..0c72459d5f42 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1999,6 +1999,9 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)

void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
+ if (task_on_rq_migrating(p))
+ flags |= ENQUEUE_MIGRATED;
+
enqueue_task(rq, p, flags);

p->on_rq = TASK_ON_RQ_QUEUED;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 6648683cd964..8f5a5e72bdb3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4327,6 +4327,29 @@ static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
#endif
}

+static inline bool entity_is_long_sleeper(struct sched_entity *se)
+{
+ struct cfs_rq *cfs_rq;
+ u64 sleep_time;
+
+ if (se->exec_start == 0)
+ return false;
+
+ cfs_rq = cfs_rq_of(se);
+
+ sleep_time = rq_clock_task(rq_of(cfs_rq));
+
+ /* Happen while migrating because of clock task divergence */
+ if (sleep_time <= se->exec_start)
+ return false;
+
+ sleep_time -= se->exec_start;
+ if (sleep_time > ((1ULL << 63) / scale_load_down(NICE_0_LOAD)))
+ return true;
+
+ return false;
+}
+
static void
place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
@@ -4355,8 +4378,29 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
vruntime -= thresh;
}

- /* ensure we never gain time by being placed backwards. */
- se->vruntime = max_vruntime(se->vruntime, vruntime);
+ /*
+ * Pull vruntime of the entity being placed to the base level of
+ * cfs_rq, to prevent boosting it if placed backwards.
+ * However, min_vruntime can advance much faster than real time, with
+ * the extreme being when an entity with the minimal weight always runs
+ * on the cfs_rq. If the waking entity slept for a long time, its
+ * vruntime difference from min_vruntime may overflow s64 and their
+ * comparison may get inversed, so ignore the entity's original
+ * vruntime in that case.
+ * The maximal vruntime speedup is given by the ratio of normal to
+ * minimal weight: scale_load_down(NICE_0_LOAD) / MIN_SHARES.
+ * When placing a migrated waking entity, its exec_start has been set
+ * from a different rq. In order to take into account a possible
+ * divergence between new and prev rq's clocks task because of irq and
+ * stolen time, we take an additional margin.
+ * So, cutting off on the sleep time of
+ * 2^63 / scale_load_down(NICE_0_LOAD) ~ 104 days
+ * should be safe.
+ */
+ if (entity_is_long_sleeper(se))
+ se->vruntime = vruntime;
+ else
+ se->vruntime = max_vruntime(se->vruntime, vruntime);
}

static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
@@ -4452,6 +4496,9 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)

if (flags & ENQUEUE_WAKEUP)
place_entity(cfs_rq, se, 0);
+ /* Entity has migrated, no longer consider this task hot */
+ if (flags & ENQUEUE_MIGRATED)
+ se->exec_start = 0;

check_schedstat_required();
update_stats_enqueue(cfs_rq, se, flags);
@@ -7179,9 +7226,6 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu)
/* Tell new CPU we are migrated */
p->se.avg.last_update_time = 0;

- /* We have migrated, no longer consider this task hot */
- p->se.exec_start = 0;
-
update_scan_period(p, new_cpu);
}

diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 11f32e947c45..9ec032f22531 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -339,7 +339,7 @@ static void move_to_next_cpu(void)
cpumask_clear(current_mask);
cpumask_set_cpu(next_cpu, current_mask);

- sched_setaffinity(0, current_mask);
+ set_cpus_allowed_ptr(current, current_mask);
return;

change_mode:
@@ -446,7 +446,7 @@ static int start_single_kthread(struct trace_array *tr)

}

- sched_setaffinity(kthread->pid, current_mask);
+ set_cpus_allowed_ptr(kthread, current_mask);

kdata->kthread = kthread;
wake_up_process(kthread);
@@ -491,18 +491,18 @@ static void stop_per_cpu_kthreads(void)
static int start_cpu_kthread(unsigned int cpu)
{
struct task_struct *kthread;
- char comm[24];

- snprintf(comm, 24, "hwlatd/%d", cpu);
+ /* Do not start a new hwlatd thread if it is already running */
+ if (per_cpu(hwlat_per_cpu_data, cpu).kthread)
+ return 0;

- kthread = kthread_create_on_cpu(kthread_fn, NULL, cpu, comm);
+ kthread = kthread_run_on_cpu(kthread_fn, NULL, cpu, "hwlatd/%u");
if (IS_ERR(kthread)) {
pr_err(BANNER "could not start sampling thread\n");
return -ENOMEM;
}

per_cpu(hwlat_per_cpu_data, cpu).kthread = kthread;
- wake_up_process(kthread);

return 0;
}
diff --git a/mm/kfence/Makefile b/mm/kfence/Makefile
index 6872cd5e5390..cb2bcf773083 100644
--- a/mm/kfence/Makefile
+++ b/mm/kfence/Makefile
@@ -2,5 +2,5 @@

obj-$(CONFIG_KFENCE) := core.o report.o

-CFLAGS_kfence_test.o := -g -fno-omit-frame-pointer -fno-optimize-sibling-calls
+CFLAGS_kfence_test.o := -fno-omit-frame-pointer -fno-optimize-sibling-calls
obj-$(CONFIG_KFENCE_KUNIT_TEST) += kfence_test.o
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index d25202766fbb..3eab72fb3d8c 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -678,10 +678,14 @@ static const struct file_operations objects_fops = {
.release = seq_release,
};

-static int __init kfence_debugfs_init(void)
+static int kfence_debugfs_init(void)
{
- struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
+ struct dentry *kfence_dir;

+ if (!READ_ONCE(kfence_enabled))
+ return 0;
+
+ kfence_dir = debugfs_create_dir("kfence", NULL);
debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
return 0;
diff --git a/mm/slab.c b/mm/slab.c
index 1bd283e98c58..f5b2246f832d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -855,7 +855,7 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp)
return 0;
}

-#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP)
+#if defined(CONFIG_NUMA) || defined(CONFIG_SMP)
/*
* Allocates and initializes node for a node on each slab cache, used for
* either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index a21e086d69d0..0194c25b8dc5 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -708,6 +708,17 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
}
EXPORT_SYMBOL_GPL(l2cap_chan_del);

+static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
+ l2cap_chan_func_t func, void *data)
+{
+ struct l2cap_chan *chan, *l;
+
+ list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
+ if (chan->ident == id)
+ func(chan, data);
+ }
+}
+
static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
void *data)
{
@@ -775,23 +786,9 @@ static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)

static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
{
- struct l2cap_conn *conn = chan->conn;
- struct l2cap_ecred_conn_rsp rsp;
- u16 result;
-
- if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
- result = L2CAP_CR_LE_AUTHORIZATION;
- else
- result = L2CAP_CR_LE_BAD_PSM;
-
l2cap_state_change(chan, BT_DISCONN);

- memset(&rsp, 0, sizeof(rsp));
-
- rsp.result = cpu_to_le16(result);
-
- l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
- &rsp);
+ __l2cap_ecred_conn_rsp_defer(chan);
}

static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
@@ -846,7 +843,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
break;
case L2CAP_MODE_EXT_FLOWCTL:
l2cap_chan_ecred_connect_reject(chan);
- break;
+ return;
}
}
}
@@ -3938,43 +3935,86 @@ void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
&rsp);
}

-void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
+static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
{
+ int *result = data;
+
+ if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
+ return;
+
+ switch (chan->state) {
+ case BT_CONNECT2:
+ /* If channel still pending accept add to result */
+ (*result)++;
+ return;
+ case BT_CONNECTED:
+ return;
+ default:
+ /* If not connected or pending accept it has been refused */
+ *result = -ECONNREFUSED;
+ return;
+ }
+}
+
+struct l2cap_ecred_rsp_data {
struct {
struct l2cap_ecred_conn_rsp rsp;
- __le16 dcid[5];
+ __le16 scid[L2CAP_ECRED_MAX_CID];
} __packed pdu;
+ int count;
+};
+
+static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
+{
+ struct l2cap_ecred_rsp_data *rsp = data;
+
+ if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
+ return;
+
+ /* Reset ident so only one response is sent */
+ chan->ident = 0;
+
+ /* Include all channels pending with the same ident */
+ if (!rsp->pdu.rsp.result)
+ rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
+ else
+ l2cap_chan_del(chan, ECONNRESET);
+}
+
+void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
+{
struct l2cap_conn *conn = chan->conn;
- u16 ident = chan->ident;
- int i = 0;
+ struct l2cap_ecred_rsp_data data;
+ u16 id = chan->ident;
+ int result = 0;

- if (!ident)
+ if (!id)
return;

- BT_DBG("chan %p ident %d", chan, ident);
+ BT_DBG("chan %p id %d", chan, id);

- pdu.rsp.mtu = cpu_to_le16(chan->imtu);
- pdu.rsp.mps = cpu_to_le16(chan->mps);
- pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
- pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
+ memset(&data, 0, sizeof(data));

- mutex_lock(&conn->chan_lock);
+ data.pdu.rsp.mtu = cpu_to_le16(chan->imtu);
+ data.pdu.rsp.mps = cpu_to_le16(chan->mps);
+ data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
+ data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);

- list_for_each_entry(chan, &conn->chan_l, list) {
- if (chan->ident != ident)
- continue;
+ /* Verify that all channels are ready */
+ __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);

- /* Reset ident so only one response is sent */
- chan->ident = 0;
+ if (result > 0)
+ return;

- /* Include all channels pending with the same ident */
- pdu.dcid[i++] = cpu_to_le16(chan->scid);
- }
+ if (result < 0)
+ data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);

- mutex_unlock(&conn->chan_lock);
+ /* Build response */
+ __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);

- l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
- sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
+ l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
+ sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
+ &data.pdu);
}

void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
@@ -6078,6 +6118,7 @@ static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));

chan->ident = cmd->ident;
+ chan->mode = L2CAP_MODE_EXT_FLOWCTL;

if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
l2cap_state_change(chan, BT_CONNECT2);
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index 96dbb8ee2fee..ed5f68c4f1da 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -7,6 +7,7 @@

#include <linux/dsa/brcm.h>
#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include <linux/list.h>
#include <linux/slab.h>

@@ -248,6 +249,7 @@ static struct sk_buff *brcm_leg_tag_xmit(struct sk_buff *skb,
static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
struct net_device *dev)
{
+ int len = BRCM_LEG_TAG_LEN;
int source_port;
u8 *brcm_tag;

@@ -262,12 +264,16 @@ static struct sk_buff *brcm_leg_tag_rcv(struct sk_buff *skb,
if (!skb->dev)
return NULL;

+ /* VLAN tag is added by BCM63xx internal switch */
+ if (netdev_uses_dsa(skb->dev))
+ len += VLAN_HLEN;
+
/* Remove Broadcom tag and update checksum */
- skb_pull_rcsum(skb, BRCM_LEG_TAG_LEN);
+ skb_pull_rcsum(skb, len);

dsa_default_offload_fwd_mark(skb);

- dsa_strip_etype_header(skb, BRCM_LEG_TAG_LEN);
+ dsa_strip_etype_header(skb, len);

return skb;
}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 454c4357a297..c094963a86f1 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -552,7 +552,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
truncate = true;
}

- nhoff = skb_network_header(skb) - skb_mac_header(skb);
+ nhoff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_IP) &&
(ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
truncate = true;
@@ -561,7 +561,7 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
int thoff;

if (skb_transport_header_was_set(skb))
- thoff = skb_transport_header(skb) - skb_mac_header(skb);
+ thoff = skb_transport_offset(skb);
else
thoff = nhoff + sizeof(struct ipv6hdr);
if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 13b1748b8b46..a91f93ec7d2b 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -959,7 +959,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
truncate = true;
}

- nhoff = skb_network_header(skb) - skb_mac_header(skb);
+ nhoff = skb_network_offset(skb);
if (skb->protocol == htons(ETH_P_IP) &&
(ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
truncate = true;
@@ -968,7 +968,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
int thoff;

if (skb_transport_header_was_set(skb))
- thoff = skb_transport_header(skb) - skb_mac_header(skb);
+ thoff = skb_transport_offset(skb);
else
thoff = nhoff + sizeof(struct ipv6hdr);
if (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff)
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index d50480b31750..93691301577b 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -143,12 +143,14 @@ u16 ieee80211_select_queue_80211(struct ieee80211_sub_if_data *sdata,
u16 __ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, struct sk_buff *skb)
{
+ const struct ethhdr *eth = (void *)skb->data;
struct mac80211_qos_map *qos_map;
bool qos;

/* all mesh/ocb stations are required to support WME */
- if (sta && (sdata->vif.type == NL80211_IFTYPE_MESH_POINT ||
- sdata->vif.type == NL80211_IFTYPE_OCB))
+ if ((sdata->vif.type == NL80211_IFTYPE_MESH_POINT &&
+ !is_multicast_ether_addr(eth->h_dest)) ||
+ (sdata->vif.type == NL80211_IFTYPE_OCB && sta))
qos = true;
else if (sta)
qos = sta->sta.wme;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index efc963ab995a..6f39789d9d14 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -28,8 +28,8 @@
static LIST_HEAD(mirred_list);
static DEFINE_SPINLOCK(mirred_list_lock);

-#define MIRRED_RECURSION_LIMIT 4
-static DEFINE_PER_CPU(unsigned int, mirred_rec_level);
+#define MIRRED_NEST_LIMIT 4
+static DEFINE_PER_CPU(unsigned int, mirred_nest_level);

static bool tcf_mirred_is_act_redirect(int action)
{
@@ -204,12 +204,19 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
return err;
}

+static bool is_mirred_nested(void)
+{
+ return unlikely(__this_cpu_read(mirred_nest_level) > 1);
+}
+
static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
{
int err;

if (!want_ingress)
err = tcf_dev_queue_xmit(skb, dev_queue_xmit);
+ else if (is_mirred_nested())
+ err = netif_rx(skb);
else
err = netif_receive_skb(skb);

@@ -223,7 +230,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
struct sk_buff *skb2 = skb;
bool m_mac_header_xmit;
struct net_device *dev;
- unsigned int rec_level;
+ unsigned int nest_level;
int retval, err = 0;
bool use_reinsert;
bool want_ingress;
@@ -234,11 +241,11 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
int mac_len;
bool at_nh;

- rec_level = __this_cpu_inc_return(mirred_rec_level);
- if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) {
+ nest_level = __this_cpu_inc_return(mirred_nest_level);
+ if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
netdev_name(skb->dev));
- __this_cpu_dec(mirred_rec_level);
+ __this_cpu_dec(mirred_nest_level);
return TC_ACT_SHOT;
}

@@ -308,7 +315,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
err = tcf_mirred_forward(res->ingress, skb);
if (err)
tcf_action_inc_overlimit_qstats(&m->common);
- __this_cpu_dec(mirred_rec_level);
+ __this_cpu_dec(mirred_nest_level);
return TC_ACT_CONSUMED;
}
}
@@ -320,7 +327,7 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
if (tcf_mirred_is_act_redirect(m_eaction))
retval = TC_ACT_SHOT;
}
- __this_cpu_dec(mirred_rec_level);
+ __this_cpu_dec(mirred_nest_level);

return retval;
}
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index a947cfb100bd..abd0c4557cb9 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -386,13 +386,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
rc = -EINVAL;
goto out;
}
- lock_sock(sk);
memcpy(crypto_info_aes_gcm_128->iv,
cctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
TLS_CIPHER_AES_GCM_128_IV_SIZE);
memcpy(crypto_info_aes_gcm_128->rec_seq, cctx->rec_seq,
TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
- release_sock(sk);
if (copy_to_user(optval,
crypto_info_aes_gcm_128,
sizeof(*crypto_info_aes_gcm_128)))
@@ -410,13 +408,11 @@ static int do_tls_getsockopt_conf(struct sock *sk, char __user *optval,
rc = -EINVAL;
goto out;
}
- lock_sock(sk);
memcpy(crypto_info_aes_gcm_256->iv,
cctx->iv + TLS_CIPHER_AES_GCM_256_SALT_SIZE,
TLS_CIPHER_AES_GCM_256_IV_SIZE);
memcpy(crypto_info_aes_gcm_256->rec_seq, cctx->rec_seq,
TLS_CIPHER_AES_GCM_256_REC_SEQ_SIZE);
- release_sock(sk);
if (copy_to_user(optval,
crypto_info_aes_gcm_256,
sizeof(*crypto_info_aes_gcm_256)))
@@ -436,6 +432,8 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
{
int rc = 0;

+ lock_sock(sk);
+
switch (optname) {
case TLS_TX:
case TLS_RX:
@@ -446,6 +444,9 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
rc = -ENOPROTOOPT;
break;
}
+
+ release_sock(sk);
+
return rc;
}

diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c
index f01ef6bda390..65f918d29531 100644
--- a/net/xdp/xdp_umem.c
+++ b/net/xdp/xdp_umem.c
@@ -152,10 +152,11 @@ static int xdp_umem_account_pages(struct xdp_umem *umem)

static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
{
- u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom;
bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
- u64 npgs, addr = mr->addr, size = mr->len;
- unsigned int chunks, chunks_rem;
+ u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
+ u64 addr = mr->addr, size = mr->len;
+ u32 chunks_rem, npgs_rem;
+ u64 chunks, npgs;
int err;

if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
@@ -190,8 +191,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
if (npgs > U32_MAX)
return -EINVAL;

- chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem);
- if (chunks == 0)
+ chunks = div_u64_rem(size, chunk_size, &chunks_rem);
+ if (!chunks || chunks > U32_MAX)
return -EINVAL;

if (!unaligned_chunks && chunks_rem)
@@ -204,7 +205,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
umem->headroom = headroom;
umem->chunk_size = chunk_size;
umem->chunks = chunks;
- umem->npgs = (u32)npgs;
+ umem->npgs = npgs;
umem->pgs = NULL;
umem->user = NULL;
umem->flags = mr->flags;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 2da4404276f0..07a0ef2baacd 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -38,9 +38,12 @@ static void cache_requested_key(struct key *key)
#ifdef CONFIG_KEYS_REQUEST_CACHE
struct task_struct *t = current;

- key_put(t->cached_requested_key);
- t->cached_requested_key = key_get(key);
- set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+ /* Do not cache key if it is a kernel thread */
+ if (!(t->flags & PF_KTHREAD)) {
+ key_put(t->cached_requested_key);
+ t->cached_requested_key = key_get(key);
+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+ }
#endif
}

diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh
index f68e2e9eef8b..a2c484c243f5 100755
--- a/tools/bootconfig/test-bootconfig.sh
+++ b/tools/bootconfig/test-bootconfig.sh
@@ -87,10 +87,14 @@ xfail grep -i "error" $OUTFILE

echo "Max node number check"

-echo -n > $TEMPCONF
-for i in `seq 1 1024` ; do
- echo "node$i" >> $TEMPCONF
-done
+awk '
+BEGIN {
+ for (i = 0; i < 26; i += 1)
+ printf("%c\n", 65 + i % 26)
+ for (i = 26; i < 8192; i += 1)
+ printf("%c%c%c\n", 65 + i % 26, 65 + (i / 26) % 26, 65 + (i / 26 / 26))
+}
+' > $TEMPCONF
xpass $BOOTCONF -a $TEMPCONF $INITRD

echo "badnode" >> $TEMPCONF
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index 50afa75bd45b..2a04dbec510d 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -882,6 +882,34 @@ static struct btf_raw_test raw_tests[] = {
.btf_load_err = true,
.err_str = "Invalid elem",
},
+{
+ .descr = "var after datasec, ptr followed by modifier",
+ .raw_types = {
+ /* .bss section */ /* [1] */
+ BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 2),
+ sizeof(void*)+4),
+ BTF_VAR_SECINFO_ENC(4, 0, sizeof(void*)),
+ BTF_VAR_SECINFO_ENC(6, sizeof(void*), 4),
+ /* int */ /* [2] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),
+ /* int* */ /* [3] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
+ BTF_VAR_ENC(NAME_TBD, 3, 0), /* [4] */
+ /* const int */ /* [5] */
+ BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 2),
+ BTF_VAR_ENC(NAME_TBD, 5, 0), /* [6] */
+ BTF_END_RAW,
+ },
+ .str_sec = "\0a\0b\0c\0",
+ .str_sec_size = sizeof("\0a\0b\0c\0"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = ".bss",
+ .key_size = sizeof(int),
+ .value_size = sizeof(void*)+4,
+ .key_type_id = 0,
+ .value_type_id = 1,
+ .max_entries = 1,
+},
/* Test member exceeds the size of struct.
*
* struct A {
diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh
index d9eca227136b..22a1e4c9553a 100755
--- a/tools/testing/selftests/net/forwarding/tc_actions.sh
+++ b/tools/testing/selftests/net/forwarding/tc_actions.sh
@@ -3,7 +3,7 @@

ALL_TESTS="gact_drop_and_ok_test mirred_egress_redirect_test \
mirred_egress_mirror_test matchall_mirred_egress_mirror_test \
- gact_trap_test"
+ gact_trap_test mirred_egress_to_ingress_tcp_test"
NUM_NETIFS=4
source tc_common.sh
source lib.sh
@@ -153,6 +153,53 @@ gact_trap_test()
log_test "trap ($tcflags)"
}

+mirred_egress_to_ingress_tcp_test()
+{
+ local tmpfile=$(mktemp) tmpfile1=$(mktemp)
+
+ RET=0
+ dd conv=sparse status=none if=/dev/zero bs=1M count=2 of=$tmpfile
+ tc filter add dev $h1 protocol ip pref 100 handle 100 egress flower \
+ $tcflags ip_proto tcp src_ip 192.0.2.1 dst_ip 192.0.2.2 \
+ action ct commit nat src addr 192.0.2.2 pipe \
+ action ct clear pipe \
+ action ct commit nat dst addr 192.0.2.1 pipe \
+ action ct clear pipe \
+ action skbedit ptype host pipe \
+ action mirred ingress redirect dev $h1
+ tc filter add dev $h1 protocol ip pref 101 handle 101 egress flower \
+ $tcflags ip_proto icmp \
+ action mirred ingress redirect dev $h1
+ tc filter add dev $h1 protocol ip pref 102 handle 102 ingress flower \
+ ip_proto icmp \
+ action drop
+
+ ip vrf exec v$h1 nc --recv-only -w10 -l -p 12345 -o $tmpfile1 &
+ local rpid=$!
+ ip vrf exec v$h1 nc -w1 --send-only 192.0.2.2 12345 <$tmpfile
+ wait -n $rpid
+ cmp -s $tmpfile $tmpfile1
+ check_err $? "server output check failed"
+
+ $MZ $h1 -c 10 -p 64 -a $h1mac -b $h1mac -A 192.0.2.1 -B 192.0.2.1 \
+ -t icmp "ping,id=42,seq=5" -q
+ tc_check_packets "dev $h1 egress" 101 10
+ check_err $? "didn't mirred redirect ICMP"
+ tc_check_packets "dev $h1 ingress" 102 10
+ check_err $? "didn't drop mirred ICMP"
+ local overlimits=$(tc_rule_stats_get ${h1} 101 egress .overlimits)
+ test ${overlimits} = 10
+ check_err $? "wrong overlimits, expected 10 got ${overlimits}"
+
+ tc filter del dev $h1 egress protocol ip pref 100 handle 100 flower
+ tc filter del dev $h1 egress protocol ip pref 101 handle 101 flower
+ tc filter del dev $h1 ingress protocol ip pref 102 handle 102 flower
+
+ rm -f $tmpfile $tmpfile1
+ log_test "mirred_egress_to_ingress_tcp ($tcflags)"
+}
+
+>>>>>>> e921d05033293 (act_mirred: use the backlog for nested calls to mirred ingress)
setup_prepare()
{
h1=${NETIFS[p1]}