Re: Linux 4.20.1
From: Greg KH
Date: Wed Jan 09 2019 - 13:30:14 EST
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index aefd358a5ca3..bb5c9dc4d270 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2096,6 +2096,9 @@
off
Disables hypervisor mitigations and doesn't
emit any warnings.
+ It also drops the swap size and available
+ RAM limit restriction on both hypervisor and
+ bare metal.
Default is 'flush'.
diff --git a/Documentation/admin-guide/l1tf.rst b/Documentation/admin-guide/l1tf.rst
index b85dd80510b0..9af977384168 100644
--- a/Documentation/admin-guide/l1tf.rst
+++ b/Documentation/admin-guide/l1tf.rst
@@ -405,6 +405,9 @@ time with the option "l1tf=". The valid arguments for this option are:
off Disables hypervisor mitigations and doesn't emit any
warnings.
+ It also drops the swap size and available RAM limit restrictions
+ on both hypervisor and bare metal.
+
============ =============================================================
The default is 'flush'. For details about L1D flushing see :ref:`l1d_flush`.
@@ -576,7 +579,8 @@ Default mitigations
The kernel default mitigations for vulnerable processors are:
- PTE inversion to protect against malicious user space. This is done
- unconditionally and cannot be controlled.
+ unconditionally and cannot be controlled. The swap storage is limited
+ to ~16TB.
- L1D conditional flushing on VMENTER when EPT is enabled for
a guest.
diff --git a/Makefile b/Makefile
index 7a2a9a175756..84d2f8deea30 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 20
-SUBLEVEL = 0
+SUBLEVEL = 1
EXTRAVERSION =
NAME = Shy Crocodile
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 6dd783557330..dadb494d83fd 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -26,6 +26,7 @@ config ARC
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
select GENERIC_PENDING_IRQ if SMP
+ select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
index 03611d50c5a9..e84544b220b9 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroidxu3-audio.dtsi
@@ -26,8 +26,7 @@
"Speakers", "SPKL",
"Speakers", "SPKR";
- assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>,
- <&clock CLK_MOUT_EPLL>,
+ assigned-clocks = <&clock CLK_MOUT_EPLL>,
<&clock CLK_MOUT_MAU_EPLL>,
<&clock CLK_MOUT_USER_MAU_EPLL>,
<&clock_audss EXYNOS_MOUT_AUDSS>,
@@ -36,8 +35,7 @@
<&clock_audss EXYNOS_DOUT_AUD_BUS>,
<&clock_audss EXYNOS_DOUT_I2S>;
- assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>,
- <&clock CLK_FOUT_EPLL>,
+ assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
<&clock CLK_MOUT_EPLL>,
<&clock CLK_MOUT_MAU_EPLL>,
<&clock CLK_MAU_EPLL>,
@@ -48,7 +46,6 @@
<0>,
<0>,
<0>,
- <0>,
<196608001>,
<(196608002 / 2)>,
<196608000>;
@@ -84,4 +81,6 @@
&i2s0 {
status = "okay";
+ assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>;
+ assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>;
};
diff --git a/arch/arm/boot/dts/exynos5422-odroidxu4.dts b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
index 4a30cc849b00..122174ea9e0a 100644
--- a/arch/arm/boot/dts/exynos5422-odroidxu4.dts
+++ b/arch/arm/boot/dts/exynos5422-odroidxu4.dts
@@ -33,8 +33,7 @@
compatible = "samsung,odroid-xu3-audio";
model = "Odroid-XU4";
- assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>,
- <&clock CLK_MOUT_EPLL>,
+ assigned-clocks = <&clock CLK_MOUT_EPLL>,
<&clock CLK_MOUT_MAU_EPLL>,
<&clock CLK_MOUT_USER_MAU_EPLL>,
<&clock_audss EXYNOS_MOUT_AUDSS>,
@@ -43,8 +42,7 @@
<&clock_audss EXYNOS_DOUT_AUD_BUS>,
<&clock_audss EXYNOS_DOUT_I2S>;
- assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>,
- <&clock CLK_FOUT_EPLL>,
+ assigned-clock-parents = <&clock CLK_FOUT_EPLL>,
<&clock CLK_MOUT_EPLL>,
<&clock CLK_MOUT_MAU_EPLL>,
<&clock CLK_MAU_EPLL>,
@@ -55,7 +53,6 @@
<0>,
<0>,
<0>,
- <0>,
<196608001>,
<(196608002 / 2)>,
<196608000>;
@@ -79,6 +76,8 @@
&i2s0 {
status = "okay";
+ assigned-clocks = <&i2s0 CLK_I2S_RCLK_SRC>;
+ assigned-clock-parents = <&clock_audss EXYNOS_SCLK_I2S>;
};
&pwm {
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 6f602af5263c..2dafd936d84d 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -104,7 +104,7 @@
TCR_EL2_ORGN0_MASK | TCR_EL2_IRGN0_MASK | TCR_EL2_T0SZ_MASK)
/* VTCR_EL2 Registers bits */
-#define VTCR_EL2_RES1 (1 << 31)
+#define VTCR_EL2_RES1 (1U << 31)
#define VTCR_EL2_HD (1 << 22)
#define VTCR_EL2_HA (1 << 21)
#define VTCR_EL2_PS_SHIFT TCR_EL2_PS_SHIFT
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index b13ca091f833..85d5c1026204 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -40,8 +40,9 @@
* The following SVCs are ARM private.
*/
#define __ARM_NR_COMPAT_BASE 0x0f0000
-#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
-#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
+#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE + 2)
+#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
+#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
#define __NR_compat_syscalls 399
#endif
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c
index 32653d156747..bc348ab3dd6b 100644
--- a/arch/arm64/kernel/sys_compat.c
+++ b/arch/arm64/kernel/sys_compat.c
@@ -66,12 +66,11 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
/*
* Handle all unrecognised system calls.
*/
-long compat_arm_syscall(struct pt_regs *regs)
+long compat_arm_syscall(struct pt_regs *regs, int scno)
{
- unsigned int no = regs->regs[7];
void __user *addr;
- switch (no) {
+ switch (scno) {
/*
* Flush a region from virtual address 'r0' to virtual address 'r1'
* _exclusive_. There is no alignment requirement on either address;
@@ -102,12 +101,12 @@ long compat_arm_syscall(struct pt_regs *regs)
default:
/*
- * Calls 9f00xx..9f07ff are defined to return -ENOSYS
+ * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS
* if not implemented, rather than raising SIGILL. This
* way the calling program can gracefully determine whether
* a feature is supported.
*/
- if ((no & 0xffff) <= 0x7ff)
+ if (scno < __ARM_NR_COMPAT_END)
return -ENOSYS;
break;
}
@@ -116,6 +115,6 @@ long compat_arm_syscall(struct pt_regs *regs)
(compat_thumb_mode(regs) ? 2 : 4);
arm64_notify_die("Oops - bad compat syscall(2)", regs,
- SIGILL, ILL_ILLTRP, addr, no);
+ SIGILL, ILL_ILLTRP, addr, scno);
return 0;
}
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 032d22312881..5610ac01c1ec 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -13,16 +13,15 @@
#include <asm/thread_info.h>
#include <asm/unistd.h>
-long compat_arm_syscall(struct pt_regs *regs);
-
+long compat_arm_syscall(struct pt_regs *regs, int scno);
long sys_ni_syscall(void);
-asmlinkage long do_ni_syscall(struct pt_regs *regs)
+static long do_ni_syscall(struct pt_regs *regs, int scno)
{
#ifdef CONFIG_COMPAT
long ret;
if (is_compat_task()) {
- ret = compat_arm_syscall(regs);
+ ret = compat_arm_syscall(regs, scno);
if (ret != -ENOSYS)
return ret;
}
@@ -47,7 +46,7 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
syscall_fn = syscall_table[array_index_nospec(scno, sc_nr)];
ret = __invoke_syscall(regs, syscall_fn);
} else {
- ret = do_ni_syscall(regs);
+ ret = do_ni_syscall(regs, scno);
}
regs->regs[0] = ret;
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 4dbd9c69a96d..7fcc9c1a5f45 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -15,14 +15,19 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/irqflags.h>
+
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/tlbflush.h>
-static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
+ unsigned long *flags)
{
u64 val;
+ local_irq_save(*flags);
+
/*
* With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
* most TLB operations target EL2/EL0. In order to affect the
@@ -37,7 +42,8 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm)
isb();
}
-static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
+ unsigned long *flags)
{
__load_guest_stage2(kvm);
isb();
@@ -48,7 +54,8 @@ static hyp_alternate_select(__tlb_switch_to_guest,
__tlb_switch_to_guest_vhe,
ARM64_HAS_VIRT_HOST_EXTN);
-static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
+ unsigned long flags)
{
/*
* We're done with the TLB operation, let's restore the host's
@@ -56,9 +63,12 @@ static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm)
*/
write_sysreg(0, vttbr_el2);
write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+ isb();
+ local_irq_restore(flags);
}
-static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm)
+static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
+ unsigned long flags)
{
write_sysreg(0, vttbr_el2);
}
@@ -70,11 +80,13 @@ static hyp_alternate_select(__tlb_switch_to_host,
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
+ unsigned long flags;
+
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest()(kvm, &flags);
/*
* We could do so much better if we had the VA as well.
@@ -117,36 +129,39 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
if (!has_vhe() && icache_is_vpipt())
__flush_icache_all();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host()(kvm, flags);
}
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
{
+ unsigned long flags;
+
dsb(ishst);
/* Switch to requested VMID */
kvm = kern_hyp_va(kvm);
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest()(kvm, &flags);
__tlbi(vmalls12e1is);
dsb(ish);
isb();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host()(kvm, flags);
}
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
+ unsigned long flags;
/* Switch to requested VMID */
- __tlb_switch_to_guest()(kvm);
+ __tlb_switch_to_guest()(kvm, &flags);
__tlbi(vmalle1);
dsb(nsh);
isb();
- __tlb_switch_to_host()(kvm);
+ __tlb_switch_to_host()(kvm, flags);
}
void __hyp_text __kvm_flush_vm_context(void)
diff --git a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
index 37fe58c19a90..542c3ede9722 100644
--- a/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
+++ b/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c
@@ -13,6 +13,7 @@
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
+#include "../../../../include/linux/sizes.h"
int main(int argc, char *argv[])
{
@@ -45,11 +46,11 @@ int main(int argc, char *argv[])
vmlinuz_load_addr = vmlinux_load_addr + vmlinux_size;
/*
- * Align with 16 bytes: "greater than that used for any standard data
- * types by a MIPS compiler." -- See MIPS Run Linux (Second Edition).
+ * Align with 64KB: KEXEC needs load sections to be aligned to PAGE_SIZE,
+ * which may be as large as 64KB depending on the kernel configuration.
*/
- vmlinuz_load_addr += (16 - vmlinux_size % 16);
+ vmlinuz_load_addr += (SZ_64K - vmlinux_size % SZ_64K);
printf("0x%llx\n", vmlinuz_load_addr);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index 6c79e8a16a26..3ddbb98dff84 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -286,7 +286,8 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_cn7xxx(int interface)
case 3:
return CVMX_HELPER_INTERFACE_MODE_LOOP;
case 4:
- return CVMX_HELPER_INTERFACE_MODE_RGMII;
+ /* TODO: Implement support for AGL (RGMII). */
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
default:
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
}
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index d4ea7a5b60cf..9e805317847d 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -306,7 +306,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
{ \
long result; \
\
- if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ if (kernel_uses_llsc) { \
long temp; \
\
__asm__ __volatile__( \
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index a41059d47d31..ed7ffe4e63a3 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -50,7 +50,7 @@ struct guest_info {
#define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */
struct cpuinfo_mips {
- unsigned long asid_cache;
+ u64 asid_cache;
#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
unsigned long asid_mask;
#endif
diff --git a/arch/mips/include/asm/mach-loongson64/mmzone.h b/arch/mips/include/asm/mach-loongson64/mmzone.h
index c9f7e231e66b..59c8b11c090e 100644
--- a/arch/mips/include/asm/mach-loongson64/mmzone.h
+++ b/arch/mips/include/asm/mach-loongson64/mmzone.h
@@ -21,6 +21,7 @@
#define NODE3_ADDRSPACE_OFFSET 0x300000000000UL
#define pa_to_nid(addr) (((addr) & 0xf00000000000) >> NODE_ADDRSPACE_SHIFT)
+#define nid_to_addrbase(nid) ((nid) << NODE_ADDRSPACE_SHIFT)
#define LEVELS_PER_SLICE 128
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index 0740be7d5d4a..24d6b42345fb 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -7,7 +7,7 @@
#include <linux/wait.h>
typedef struct {
- unsigned long asid[NR_CPUS];
+ u64 asid[NR_CPUS];
void *vdso;
atomic_t fp_mode_switching;
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 94414561de0e..a589585be21b 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -76,14 +76,14 @@ extern unsigned long pgd_current[];
* All unused by hardware upper bits will be considered
* as a software asid extension.
*/
-static unsigned long asid_version_mask(unsigned int cpu)
+static inline u64 asid_version_mask(unsigned int cpu)
{
unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
- return ~(asid_mask | (asid_mask - 1));
+ return ~(u64)(asid_mask | (asid_mask - 1));
}
-static unsigned long asid_first_version(unsigned int cpu)
+static inline u64 asid_first_version(unsigned int cpu)
{
return ~asid_version_mask(cpu) + 1;
}
@@ -102,14 +102,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
{
- unsigned long asid = asid_cache(cpu);
+ u64 asid = asid_cache(cpu);
if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
if (cpu_has_vtag_icache)
flush_icache_all();
local_flush_tlb_all(); /* start new asid cycle */
- if (!asid) /* fix version if needed */
- asid = asid_first_version(cpu);
}
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
diff --git a/arch/mips/include/asm/mmzone.h b/arch/mips/include/asm/mmzone.h
index f085fba41da5..b826b8473e95 100644
--- a/arch/mips/include/asm/mmzone.h
+++ b/arch/mips/include/asm/mmzone.h
@@ -7,7 +7,18 @@
#define _ASM_MMZONE_H_
#include <asm/page.h>
-#include <mmzone.h>
+
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+# include <mmzone.h>
+#endif
+
+#ifndef pa_to_nid
+#define pa_to_nid(addr) 0
+#endif
+
+#ifndef nid_to_addrbase
+#define nid_to_addrbase(nid) 0
+#endif
#ifdef CONFIG_DISCONTIGMEM
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index 0036ea0c7173..93a9dce31f25 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -265,6 +265,11 @@ static inline int pmd_bad(pmd_t pmd)
static inline int pmd_present(pmd_t pmd)
{
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
+ return pmd_val(pmd) & _PAGE_PRESENT;
+#endif
+
return pmd_val(pmd) != (unsigned long) invalid_pte_table;
}
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index d19b2d65336b..7f4a32d3345a 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -20,6 +20,7 @@
#include <asm/cpu-features.h>
#include <asm/cpu-type.h>
#include <asm/mipsmtregs.h>
+#include <asm/mmzone.h>
#include <linux/uaccess.h> /* for uaccess_kernel() */
extern void (*r4k_blast_dcache)(void);
@@ -674,4 +675,25 @@ __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
__BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
+/* Currently, this is very specific to Loongson-3 */
+#define __BUILD_BLAST_CACHE_NODE(pfx, desc, indexop, hitop, lsize) \
+static inline void blast_##pfx##cache##lsize##_node(long node) \
+{ \
+ unsigned long start = CAC_BASE | nid_to_addrbase(node); \
+ unsigned long end = start + current_cpu_data.desc.waysize; \
+ unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
+ unsigned long ws_end = current_cpu_data.desc.ways << \
+ current_cpu_data.desc.waybit; \
+ unsigned long ws, addr; \
+ \
+ for (ws = 0; ws < ws_end; ws += ws_inc) \
+ for (addr = start; addr < end; addr += lsize * 32) \
+ cache##lsize##_unroll32(addr|ws, indexop); \
+}
+
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64)
+__BUILD_BLAST_CACHE_NODE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128)
+
#endif /* _ASM_R4KCACHE_H */
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 48a9c6b90e07..9df3ebdc7b0f 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -126,8 +126,8 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
/* Map delay slot emulation page */
base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
- VM_READ|VM_WRITE|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
0, NULL);
if (IS_ERR_VALUE(base)) {
ret = base;
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 5450f4d1c920..e2d46cb93ca9 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -214,8 +214,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
{
int isa16 = get_isa16_mode(regs->cp0_epc);
mips_instruction break_math;
- struct emuframe __user *fr;
- int err, fr_idx;
+ unsigned long fr_uaddr;
+ struct emuframe fr;
+ int fr_idx, ret;
/* NOP is easy */
if (ir == 0)
@@ -250,27 +251,31 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
fr_idx = alloc_emuframe();
if (fr_idx == BD_EMUFRAME_NONE)
return SIGBUS;
- fr = &dsemul_page()[fr_idx];
/* Retrieve the appropriately encoded break instruction */
break_math = BREAK_MATH(isa16);
/* Write the instructions to the frame */
if (isa16) {
- err = __put_user(ir >> 16,
- (u16 __user *)(&fr->emul));
- err |= __put_user(ir & 0xffff,
- (u16 __user *)((long)(&fr->emul) + 2));
- err |= __put_user(break_math >> 16,
- (u16 __user *)(&fr->badinst));
- err |= __put_user(break_math & 0xffff,
- (u16 __user *)((long)(&fr->badinst) + 2));
+ union mips_instruction _emul = {
+ .halfword = { ir >> 16, ir }
+ };
+ union mips_instruction _badinst = {
+ .halfword = { break_math >> 16, break_math }
+ };
+
+ fr.emul = _emul.word;
+ fr.badinst = _badinst.word;
} else {
- err = __put_user(ir, &fr->emul);
- err |= __put_user(break_math, &fr->badinst);
+ fr.emul = ir;
+ fr.badinst = break_math;
}
- if (unlikely(err)) {
+ /* Write the frame to user memory */
+ fr_uaddr = (unsigned long)&dsemul_page()[fr_idx];
+ ret = access_process_vm(current, fr_uaddr, &fr, sizeof(fr),
+ FOLL_FORCE | FOLL_WRITE);
+ if (unlikely(ret != sizeof(fr))) {
MIPS_FPU_EMU_INC_STATS(errors);
free_emuframe(fr_idx, current->mm);
return SIGBUS;
@@ -282,10 +287,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
atomic_set(¤t->thread.bd_emu_frame, fr_idx);
/* Change user register context to execute the frame */
- regs->cp0_epc = (unsigned long)&fr->emul | isa16;
-
- /* Ensure the icache observes our newly written frame */
- flush_cache_sigtramp((unsigned long)&fr->emul);
+ regs->cp0_epc = fr_uaddr | isa16;
return 0;
}
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 3466fcdae0ca..01848cdf2074 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -245,7 +245,7 @@ static void r3k_flush_cache_page(struct vm_area_struct *vma,
pmd_t *pmdp;
pte_t *ptep;
- pr_debug("cpage[%08lx,%08lx]\n",
+ pr_debug("cpage[%08llx,%08lx]\n",
cpu_context(smp_processor_id(), mm), addr);
/* No ASID => no such page in the cache. */
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 05bd77727fb9..2a6ad461286f 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -459,11 +459,28 @@ static void r4k_blast_scache_setup(void)
r4k_blast_scache = blast_scache128;
}
+static void (*r4k_blast_scache_node)(long node);
+
+static void r4k_blast_scache_node_setup(void)
+{
+ unsigned long sc_lsize = cpu_scache_line_size();
+
+ if (current_cpu_type() != CPU_LOONGSON3)
+ r4k_blast_scache_node = (void *)cache_noop;
+ else if (sc_lsize == 16)
+ r4k_blast_scache_node = blast_scache16_node;
+ else if (sc_lsize == 32)
+ r4k_blast_scache_node = blast_scache32_node;
+ else if (sc_lsize == 64)
+ r4k_blast_scache_node = blast_scache64_node;
+ else if (sc_lsize == 128)
+ r4k_blast_scache_node = blast_scache128_node;
+}
+
static inline void local_r4k___flush_cache_all(void * args)
{
switch (current_cpu_type()) {
case CPU_LOONGSON2:
- case CPU_LOONGSON3:
case CPU_R4000SC:
case CPU_R4000MC:
case CPU_R4400SC:
@@ -480,6 +497,11 @@ static inline void local_r4k___flush_cache_all(void * args)
r4k_blast_scache();
break;
+ case CPU_LOONGSON3:
+ /* Use get_ebase_cpunum() for both NUMA=y/n */
+ r4k_blast_scache_node(get_ebase_cpunum() >> 2);
+ break;
+
case CPU_BMIPS5000:
r4k_blast_scache();
__sync();
@@ -840,10 +862,14 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
preempt_disable();
if (cpu_has_inclusive_pcaches) {
- if (size >= scache_size)
- r4k_blast_scache();
- else
+ if (size >= scache_size) {
+ if (current_cpu_type() != CPU_LOONGSON3)
+ r4k_blast_scache();
+ else
+ r4k_blast_scache_node(pa_to_nid(addr));
+ } else {
blast_scache_range(addr, addr + size);
+ }
preempt_enable();
__sync();
return;
@@ -877,9 +903,12 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
preempt_disable();
if (cpu_has_inclusive_pcaches) {
- if (size >= scache_size)
- r4k_blast_scache();
- else {
+ if (size >= scache_size) {
+ if (current_cpu_type() != CPU_LOONGSON3)
+ r4k_blast_scache();
+ else
+ r4k_blast_scache_node(pa_to_nid(addr));
+ } else {
/*
* There is no clearly documented alignment requirement
* for the cache instruction on MIPS processors and
@@ -1918,6 +1947,7 @@ void r4k_cache_init(void)
r4k_blast_scache_page_setup();
r4k_blast_scache_page_indexed_setup();
r4k_blast_scache_setup();
+ r4k_blast_scache_node_setup();
#ifdef CONFIG_EVA
r4k_blast_dcache_user_page_setup();
r4k_blast_icache_user_page_setup();
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 2d7cffcaa476..059187a3ded7 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -512,8 +512,8 @@ static void __init map_pages(unsigned long start_vaddr,
void __init set_kernel_text_rw(int enable_read_write)
{
- unsigned long start = (unsigned long)__init_begin;
- unsigned long end = (unsigned long)_etext;
+ unsigned long start = (unsigned long) _text;
+ unsigned long end = (unsigned long) &data_start;
map_pages(start, __pa(start), end-start,
PAGE_KERNEL_RWX, enable_read_write ? 1:0);
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index f6f469fc4073..1b395b85132b 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -22,7 +22,7 @@ enum count_cache_flush_type {
COUNT_CACHE_FLUSH_SW = 0x2,
COUNT_CACHE_FLUSH_HW = 0x4,
};
-static enum count_cache_flush_type count_cache_flush_type;
+static enum count_cache_flush_type count_cache_flush_type = COUNT_CACHE_FLUSH_NONE;
bool barrier_nospec_enabled;
static bool no_nospec;
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index e6474a45cef5..6327fd79b0fb 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -1140,11 +1140,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
{
struct rt_sigframe __user *rt_sf;
struct pt_regs *regs = current_pt_regs();
+ int tm_restore = 0;
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
struct ucontext __user *uc_transact;
unsigned long msr_hi;
unsigned long tmp;
- int tm_restore = 0;
#endif
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
@@ -1192,11 +1192,19 @@ SYSCALL_DEFINE0(rt_sigreturn)
goto bad;
}
}
- if (!tm_restore)
- /* Fall through, for non-TM restore */
+ if (!tm_restore) {
+ /*
+ * Unset regs->msr because ucontext MSR TS is not
+ * set, and recheckpoint was not called. This avoid
+ * hitting a TM Bad thing at RFID
+ */
+ regs->msr &= ~MSR_TS_MASK;
+ }
+ /* Fall through, for non-TM restore */
#endif
- if (do_setcontext(&rt_sf->uc, regs, 1))
- goto bad;
+ if (!tm_restore)
+ if (do_setcontext(&rt_sf->uc, regs, 1))
+ goto bad;
/*
* It's not clear whether or why it is desirable to save the
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 83d51bf586c7..daa28cb72272 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -740,11 +740,23 @@ SYSCALL_DEFINE0(rt_sigreturn)
&uc_transact->uc_mcontext))
goto badframe;
}
- else
- /* Fall through, for non-TM restore */
#endif
- if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
- goto badframe;
+ /* Fall through, for non-TM restore */
+ if (!MSR_TM_ACTIVE(msr)) {
+ /*
+ * Unset MSR[TS] on the thread regs since MSR from user
+ * context does not have MSR active, and recheckpoint was
+ * not called since restore_tm_sigcontexts() was not called
+ * also.
+ *
+ * If not unsetting it, the code can RFID to userspace with
+ * MSR[TS] set, but without CPU in the proper state,
+ * causing a TM bad thing.
+ */
+ current->thread.regs->msr &= ~MSR_TS_MASK;
+ if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext))
+ goto badframe;
+ }
if (restore_altstack(&uc->uc_stack))
goto badframe;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index c615617e78ac..a18afda3d0f0 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -743,12 +743,15 @@ void kvmppc_rmap_reset(struct kvm *kvm)
srcu_idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
kvm_for_each_memslot(memslot, slots) {
+ /* Mutual exclusion with kvm_unmap_hva_range etc. */
+ spin_lock(&kvm->mmu_lock);
/*
* This assumes it is acceptable to lose reference and
* change bits across a reset.
*/
memset(memslot->arch.rmap, 0,
memslot->npages * sizeof(*memslot->arch.rmap));
+ spin_unlock(&kvm->mmu_lock);
}
srcu_read_unlock(&kvm->srcu, srcu_idx);
}
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index a56f8413758a..ab43306c4ea1 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -4532,12 +4532,15 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
{
if (nesting_enabled(kvm))
kvmhv_release_all_nested(kvm);
+ kvmppc_rmap_reset(kvm);
+ kvm->arch.process_table = 0;
+ /* Mutual exclusion with kvm_unmap_hva_range etc. */
+ spin_lock(&kvm->mmu_lock);
+ kvm->arch.radix = 0;
+ spin_unlock(&kvm->mmu_lock);
kvmppc_free_radix(kvm);
kvmppc_update_lpcr(kvm, LPCR_VPM1,
LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
- kvmppc_rmap_reset(kvm);
- kvm->arch.radix = 0;
- kvm->arch.process_table = 0;
return 0;
}
@@ -4549,12 +4552,14 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
err = kvmppc_init_vm_radix(kvm);
if (err)
return err;
-
+ kvmppc_rmap_reset(kvm);
+ /* Mutual exclusion with kvm_unmap_hva_range etc. */
+ spin_lock(&kvm->mmu_lock);
+ kvm->arch.radix = 1;
+ spin_unlock(&kvm->mmu_lock);
kvmppc_free_hpt(&kvm->arch.hpt);
kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
- kvmppc_rmap_reset(kvm);
- kvm->arch.radix = 1;
return 0;
}
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 19b2d2a9b43d..eeb7450db18c 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -436,7 +436,7 @@ int clp_get_state(u32 fid, enum zpci_state *state)
struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
int rc;
- rrb = clp_alloc_block(GFP_KERNEL);
+ rrb = clp_alloc_block(GFP_ATOMIC);
if (!rrb)
return -ENOMEM;
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index fbda5a917c5b..e5c0174e330e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1492,7 +1492,7 @@ asmlinkage void kvm_spurious_fault(void);
"cmpb $0, kvm_rebooting \n\t" \
"jne 668b \n\t" \
__ASM_SIZE(push) " $666b \n\t" \
- "call kvm_spurious_fault \n\t" \
+ "jmp kvm_spurious_fault \n\t" \
".popsection \n\t" \
_ASM_EXTABLE(666b, 667b)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 500278f5308e..362f3cde6a31 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -1002,7 +1002,8 @@ static void __init l1tf_select_mitigation(void)
#endif
half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
- if (e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
+ if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
+ e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
half_pa);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8d5d984541be..95784bc4a53c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8031,13 +8031,16 @@ static __init int hardware_setup(void)
kvm_mce_cap_supported |= MCG_LMCE_P;
- return alloc_kvm_area();
+ r = alloc_kvm_area();
+ if (r)
+ goto out;
+ return 0;
out:
for (i = 0; i < VMX_BITMAP_NR; i++)
free_page((unsigned long)vmx_bitmap[i]);
- return r;
+ return r;
}
static __exit void hardware_unsetup(void)
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index ef99f3892e1f..427a955a2cf2 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -931,7 +931,7 @@ unsigned long max_swapfile_size(void)
pages = generic_max_swapfile_size();
- if (boot_cpu_has_bug(X86_BUG_L1TF)) {
+ if (boot_cpu_has_bug(X86_BUG_L1TF) && l1tf_mitigation != L1TF_MITIGATION_OFF) {
/* Limit the swap file size to MAX_PA/2 for L1TF workaround */
unsigned long long l1tf_limit = l1tf_pfn_limit();
/*
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 5fab264948c2..de95db8ac52f 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -584,7 +584,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
paddr_end,
page_size_mask,
prot);
- __flush_tlb_all();
continue;
}
/*
@@ -627,7 +626,6 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
pud_populate(&init_mm, pud, pmd);
spin_unlock(&init_mm.page_table_lock);
}
- __flush_tlb_all();
update_page_count(PG_LEVEL_1G, pages);
@@ -668,7 +666,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
paddr_last = phys_pud_init(pud, paddr,
paddr_end,
page_size_mask);
- __flush_tlb_all();
continue;
}
@@ -680,7 +677,6 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end,
p4d_populate(&init_mm, p4d, pud);
spin_unlock(&init_mm.page_table_lock);
}
- __flush_tlb_all();
return paddr_last;
}
@@ -733,8 +729,6 @@ kernel_physical_mapping_init(unsigned long paddr_start,
if (pgd_changed)
sync_global_pgds(vaddr_start, vaddr_end - 1);
- __flush_tlb_all();
-
return paddr_last;
}
diff --git a/crypto/cfb.c b/crypto/cfb.c
index 20987d0e09d8..e81e45673498 100644
--- a/crypto/cfb.c
+++ b/crypto/cfb.c
@@ -144,7 +144,7 @@ static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk,
do {
crypto_cfb_encrypt_one(tfm, iv, dst);
- crypto_xor(dst, iv, bsize);
+ crypto_xor(dst, src, bsize);
iv = src;
src += bsize;
diff --git a/crypto/tcrypt.c b/crypto/tcrypt.c
index c20c9f5c18f2..1026173d721a 100644
--- a/crypto/tcrypt.c
+++ b/crypto/tcrypt.c
@@ -1736,6 +1736,7 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
ret += tcrypt_test("ctr(aes)");
ret += tcrypt_test("rfc3686(ctr(aes))");
ret += tcrypt_test("ofb(aes)");
+ ret += tcrypt_test("cfb(aes)");
break;
case 11:
@@ -2060,6 +2061,10 @@ static int do_test(const char *alg, u32 type, u32 mask, int m, u32 num_mb)
speed_template_16_24_32);
test_cipher_speed("ctr(aes)", DECRYPT, sec, NULL, 0,
speed_template_16_24_32);
+ test_cipher_speed("cfb(aes)", ENCRYPT, sec, NULL, 0,
+ speed_template_16_24_32);
+ test_cipher_speed("cfb(aes)", DECRYPT, sec, NULL, 0,
+ speed_template_16_24_32);
break;
case 201:
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index b1f79c6bf409..84937ceb4bd8 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -2690,6 +2690,13 @@ static const struct alg_test_desc alg_test_descs[] = {
.dec = __VECS(aes_ccm_dec_tv_template)
}
}
+ }, {
+ .alg = "cfb(aes)",
+ .test = alg_test_skcipher,
+ .fips_allowed = 1,
+ .suite = {
+ .cipher = __VECS(aes_cfb_tv_template)
+ },
}, {
.alg = "chacha20",
.test = alg_test_skcipher,
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 1fe7b97ba03f..b5b0d29761ce 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -11449,6 +11449,82 @@ static const struct cipher_testvec aes_cbc_tv_template[] = {
},
};
+static const struct cipher_testvec aes_cfb_tv_template[] = {
+ { /* From NIST SP800-38A */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
+ "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
+ "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f"
+ "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b"
+ "\x26\x75\x1f\x67\xa3\xcb\xb1\x40"
+ "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf"
+ "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e"
+ "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6",
+ .len = 64,
+ }, {
+ .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
+ "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
+ "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
+ .klen = 24,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ctext = "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab"
+ "\x34\xc2\x59\x09\xc9\x9a\x41\x74"
+ "\x67\xce\x7f\x7f\x81\x17\x36\x21"
+ "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a"
+ "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1"
+ "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9"
+ "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0"
+ "\x42\xae\x8f\xba\x58\x4b\x09\xff",
+ .len = 64,
+ }, {
+ .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+ "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+ "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+ "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+ .klen = 32,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ctext = "\xdc\x7e\x84\xbf\xda\x79\x16\x4b"
+ "\x7e\xcd\x84\x86\x98\x5d\x38\x60"
+ "\x39\xff\xed\x14\x3b\x28\xb1\xc8"
+ "\x32\x11\x3c\x63\x31\xe5\x40\x7b"
+ "\xdf\x10\x13\x24\x15\xe5\x4b\x92"
+ "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9"
+ "\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
+ "\x20\x31\x62\x3d\x55\xb1\xe4\x71",
+ .len = 64,
+ },
+};
+
static const struct aead_testvec hmac_md5_ecb_cipher_null_enc_tv_template[] = {
{ /* Input data from RFC 2410 Case 1 */
#ifdef __LITTLE_ENDIAN
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 9f1000d2a40c..b834ee335d9a 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -72,6 +72,7 @@
#include <linux/spinlock.h>
#include <linux/ratelimit.h>
#include <linux/syscalls.h>
+#include <linux/task_work.h>
#include <uapi/linux/android/binder.h>
@@ -2160,6 +2161,64 @@ static bool binder_validate_fixup(struct binder_buffer *b,
return (fixup_offset >= last_min_offset);
}
+/**
+ * struct binder_task_work_cb - for deferred close
+ *
+ * @twork: callback_head for task work
+ * @fd: fd to close
+ *
+ * Structure to pass task work to be handled after
+ * returning from binder_ioctl() via task_work_add().
+ */
+struct binder_task_work_cb {
+ struct callback_head twork;
+ struct file *file;
+};
+
+/**
+ * binder_do_fd_close() - close list of file descriptors
+ * @twork: callback head for task work
+ *
+ * It is not safe to call ksys_close() during the binder_ioctl()
+ * function if there is a chance that binder's own file descriptor
+ * might be closed. This is to meet the requirements for using
+ * fdget() (see comments for __fget_light()). Therefore use
+ * task_work_add() to schedule the close operation once we have
+ * returned from binder_ioctl(). This function is a callback
+ * for that mechanism and does the actual ksys_close() on the
+ * given file descriptor.
+ */
+static void binder_do_fd_close(struct callback_head *twork)
+{
+ struct binder_task_work_cb *twcb = container_of(twork,
+ struct binder_task_work_cb, twork);
+
+ fput(twcb->file);
+ kfree(twcb);
+}
+
+/**
+ * binder_deferred_fd_close() - schedule a close for the given file-descriptor
+ * @fd: file-descriptor to close
+ *
+ * See comments in binder_do_fd_close(). This function is used to schedule
+ * a file-descriptor to be closed after returning from binder_ioctl().
+ */
+static void binder_deferred_fd_close(int fd)
+{
+ struct binder_task_work_cb *twcb;
+
+ twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
+ if (!twcb)
+ return;
+ init_task_work(&twcb->twork, binder_do_fd_close);
+ __close_fd_get_file(fd, &twcb->file);
+ if (twcb->file)
+ task_work_add(current, &twcb->twork, true);
+ else
+ kfree(twcb);
+}
+
static void binder_transaction_buffer_release(struct binder_proc *proc,
struct binder_buffer *buffer,
binder_size_t *failed_at)
@@ -2299,7 +2358,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
}
fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
- ksys_close(fd_array[fd_index]);
+ binder_deferred_fd_close(fd_array[fd_index]);
} break;
default:
pr_err("transaction release %d bad object type %x\n",
@@ -3912,7 +3971,7 @@ static int binder_apply_fd_fixups(struct binder_transaction *t)
} else if (ret) {
u32 *fdp = (u32 *)(t->buffer->data + fixup->offset);
- ksys_close(*fdp);
+ binder_deferred_fd_close(*fdp);
}
list_del(&fixup->fixup_entry);
kfree(fixup);
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index f39a920496fb..8da314b81eab 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -368,14 +368,16 @@ void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nvec)
{
struct platform_msi_priv_data *data = domain->host_data;
- struct msi_desc *desc;
- for_each_msi_entry(desc, data->dev) {
+ struct msi_desc *desc, *tmp;
+ for_each_msi_entry_safe(desc, tmp, data->dev) {
if (WARN_ON(!desc->irq || desc->nvec_used != 1))
return;
if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
continue;
irq_domain_free_irqs_common(domain, desc->irq, 1);
+ list_del(&desc->list);
+ free_msi_entry(desc);
}
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 129f640424b7..95db630dd722 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -477,13 +477,15 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
if (need_locality) {
rc = tpm_request_locality(chip, flags);
- if (rc < 0)
- goto out_no_locality;
+ if (rc < 0) {
+ need_locality = false;
+ goto out_locality;
+ }
}
rc = tpm_cmd_ready(chip, flags);
if (rc)
- goto out;
+ goto out_locality;
rc = tpm2_prepare_space(chip, space, ordinal, buf);
if (rc)
@@ -547,14 +549,13 @@ static ssize_t tpm_try_transmit(struct tpm_chip *chip,
dev_err(&chip->dev, "tpm2_commit_space: error %d\n", rc);
out:
- rc = tpm_go_idle(chip, flags);
- if (rc)
- goto out;
+ /* may fail but do not override previous error value in rc */
+ tpm_go_idle(chip, flags);
+out_locality:
if (need_locality)
tpm_relinquish_locality(chip, flags);
-out_no_locality:
if (chip->ops->clk_enable != NULL)
chip->ops->clk_enable(chip, false);
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index caa86b19c76d..f74f451baf6a 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -369,6 +369,7 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
struct device *dev = chip->dev.parent;
struct i2c_client *client = to_i2c_client(dev);
u32 ordinal;
+ unsigned long duration;
size_t count = 0;
int burst_count, bytes2write, retries, rc = -EIO;
@@ -455,10 +456,12 @@ static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
return rc;
}
ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
- rc = i2c_nuvoton_wait_for_data_avail(chip,
- tpm_calc_ordinal_duration(chip,
- ordinal),
- &priv->read_queue);
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ duration = tpm2_calc_ordinal_duration(chip, ordinal);
+ else
+ duration = tpm_calc_ordinal_duration(chip, ordinal);
+
+ rc = i2c_nuvoton_wait_for_data_avail(chip, duration, &priv->read_queue);
if (rc) {
dev_err(dev, "%s() timeout command duration\n", __func__);
i2c_nuvoton_ready(chip);
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index fa25e35ce7d5..08b42b053fce 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -382,7 +382,7 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(0), 13, GFLAGS),
- COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pll", CLK_SET_RATE_PARENT,
+ COMPOSITE_FRACMUX(0, "spdif_frac", "spdif_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(9), 0,
RK2928_CLKGATE_CON(0), 14, GFLAGS,
&common_spdif_fracmux),
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index 6fe3c14f7b2d..424d8635b053 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -19,6 +19,17 @@ struct _ccu_nm {
unsigned long m, min_m, max_m;
};
+static unsigned long ccu_nm_calc_rate(unsigned long parent,
+ unsigned long n, unsigned long m)
+{
+ u64 rate = parent;
+
+ rate *= n;
+ do_div(rate, m);
+
+ return rate;
+}
+
static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
struct _ccu_nm *nm)
{
@@ -28,7 +39,8 @@ static void ccu_nm_find_best(unsigned long parent, unsigned long rate,
for (_n = nm->min_n; _n <= nm->max_n; _n++) {
for (_m = nm->min_m; _m <= nm->max_m; _m++) {
- unsigned long tmp_rate = parent * _n / _m;
+ unsigned long tmp_rate = ccu_nm_calc_rate(parent,
+ _n, _m);
if (tmp_rate > rate)
continue;
@@ -100,7 +112,7 @@ static unsigned long ccu_nm_recalc_rate(struct clk_hw *hw,
if (ccu_sdm_helper_is_enabled(&nm->common, &nm->sdm))
rate = ccu_sdm_helper_read_rate(&nm->common, &nm->sdm, m, n);
else
- rate = parent_rate * n / m;
+ rate = ccu_nm_calc_rate(parent_rate, n, m);
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate /= nm->fixed_post_div;
@@ -149,7 +161,7 @@ static long ccu_nm_round_rate(struct clk_hw *hw, unsigned long rate,
_nm.max_m = nm->m.max ?: 1 << nm->m.width;
ccu_nm_find_best(*parent_rate, rate, &_nm);
- rate = *parent_rate * _nm.n / _nm.m;
+ rate = ccu_nm_calc_rate(*parent_rate, _nm.n, _nm.m);
if (nm->common.features & CCU_FEATURE_FIXED_POSTDIV)
rate /= nm->fixed_post_div;
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 55c77e44bb2d..d9c8a779dd7d 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -290,6 +290,7 @@ config CLKSRC_MPS2
config ARC_TIMERS
bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST
+ depends on GENERIC_SCHED_CLOCK
select TIMER_OF
help
These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
index 20da9b1d7f7d..b28970ca4a7a 100644
--- a/drivers/clocksource/arc_timer.c
+++ b/drivers/clocksource/arc_timer.c
@@ -23,6 +23,7 @@
#include <linux/cpu.h>
#include <linux/of.h>
#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
#include <soc/arc/timers.h>
#include <soc/arc/mcip.h>
@@ -88,6 +89,11 @@ static u64 arc_read_gfrc(struct clocksource *cs)
return (((u64)h) << 32) | l;
}
+static notrace u64 arc_gfrc_clock_read(void)
+{
+ return arc_read_gfrc(NULL);
+}
+
static struct clocksource arc_counter_gfrc = {
.name = "ARConnect GFRC",
.rating = 400,
@@ -111,6 +117,8 @@ static int __init arc_cs_setup_gfrc(struct device_node *node)
if (ret)
return ret;
+ sched_clock_register(arc_gfrc_clock_read, 64, arc_timer_freq);
+
return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq);
}
TIMER_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc);
@@ -139,6 +147,11 @@ static u64 arc_read_rtc(struct clocksource *cs)
return (((u64)h) << 32) | l;
}
+static notrace u64 arc_rtc_clock_read(void)
+{
+ return arc_read_rtc(NULL);
+}
+
static struct clocksource arc_counter_rtc = {
.name = "ARCv2 RTC",
.rating = 350,
@@ -170,6 +183,8 @@ static int __init arc_cs_setup_rtc(struct device_node *node)
write_aux_reg(AUX_RTC_CTRL, 1);
+ sched_clock_register(arc_rtc_clock_read, 64, arc_timer_freq);
+
return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq);
}
TIMER_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc);
@@ -185,6 +200,11 @@ static u64 arc_read_timer1(struct clocksource *cs)
return (u64) read_aux_reg(ARC_REG_TIMER1_CNT);
}
+static notrace u64 arc_timer1_clock_read(void)
+{
+ return arc_read_timer1(NULL);
+}
+
static struct clocksource arc_counter_timer1 = {
.name = "ARC Timer1",
.rating = 300,
@@ -209,6 +229,8 @@ static int __init arc_cs_setup_timer1(struct device_node *node)
write_aux_reg(ARC_REG_TIMER1_CNT, 0);
write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
+ sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq);
+
return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq);
}
diff --git a/drivers/crypto/cavium/nitrox/nitrox_algs.c b/drivers/crypto/cavium/nitrox/nitrox_algs.c
index 2ae6124e5da6..5d54ebc20cb3 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_algs.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_algs.c
@@ -73,7 +73,7 @@ static int flexi_aes_keylen(int keylen)
static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
{
struct nitrox_crypto_ctx *nctx = crypto_skcipher_ctx(tfm);
- void *fctx;
+ struct crypto_ctx_hdr *chdr;
/* get the first device */
nctx->ndev = nitrox_get_first_device();
@@ -81,12 +81,14 @@ static int nitrox_skcipher_init(struct crypto_skcipher *tfm)
return -ENODEV;
/* allocate nitrox crypto context */
- fctx = crypto_alloc_context(nctx->ndev);
- if (!fctx) {
+ chdr = crypto_alloc_context(nctx->ndev);
+ if (!chdr) {
nitrox_put_device(nctx->ndev);
return -ENOMEM;
}
- nctx->u.ctx_handle = (uintptr_t)fctx;
+ nctx->chdr = chdr;
+ nctx->u.ctx_handle = (uintptr_t)((u8 *)chdr->vaddr +
+ sizeof(struct ctx_hdr));
crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(tfm) +
sizeof(struct nitrox_kcrypt_request));
return 0;
@@ -102,7 +104,7 @@ static void nitrox_skcipher_exit(struct crypto_skcipher *tfm)
memset(&fctx->crypto, 0, sizeof(struct crypto_keys));
memset(&fctx->auth, 0, sizeof(struct auth_keys));
- crypto_free_context((void *)fctx);
+ crypto_free_context((void *)nctx->chdr);
}
nitrox_put_device(nctx->ndev);
diff --git a/drivers/crypto/cavium/nitrox/nitrox_lib.c b/drivers/crypto/cavium/nitrox/nitrox_lib.c
index 2260efa42308..9138bae12521 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_lib.c
+++ b/drivers/crypto/cavium/nitrox/nitrox_lib.c
@@ -158,12 +158,19 @@ static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
void *crypto_alloc_context(struct nitrox_device *ndev)
{
struct ctx_hdr *ctx;
+ struct crypto_ctx_hdr *chdr;
void *vaddr;
dma_addr_t dma;
+ chdr = kmalloc(sizeof(*chdr), GFP_KERNEL);
+ if (!chdr)
+ return NULL;
+
vaddr = dma_pool_zalloc(ndev->ctx_pool, GFP_KERNEL, &dma);
- if (!vaddr)
+ if (!vaddr) {
+ kfree(chdr);
return NULL;
+ }
/* fill meta data */
ctx = vaddr;
@@ -171,7 +178,11 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
ctx->dma = dma;
ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
- return ((u8 *)vaddr + sizeof(struct ctx_hdr));
+ chdr->pool = ndev->ctx_pool;
+ chdr->dma = dma;
+ chdr->vaddr = vaddr;
+
+ return chdr;
}
/**
@@ -180,13 +191,14 @@ void *crypto_alloc_context(struct nitrox_device *ndev)
*/
void crypto_free_context(void *ctx)
{
- struct ctx_hdr *ctxp;
+ struct crypto_ctx_hdr *ctxp;
if (!ctx)
return;
- ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr));
- dma_pool_free(ctxp->pool, ctxp, ctxp->dma);
+ ctxp = ctx;
+ dma_pool_free(ctxp->pool, ctxp->vaddr, ctxp->dma);
+ kfree(ctxp);
}
/**
diff --git a/drivers/crypto/cavium/nitrox/nitrox_req.h b/drivers/crypto/cavium/nitrox/nitrox_req.h
index d091b6f5f5dd..19f0a20e3bb3 100644
--- a/drivers/crypto/cavium/nitrox/nitrox_req.h
+++ b/drivers/crypto/cavium/nitrox/nitrox_req.h
@@ -181,12 +181,19 @@ struct flexi_crypto_context {
struct auth_keys auth;
};
+struct crypto_ctx_hdr {
+ struct dma_pool *pool;
+ dma_addr_t dma;
+ void *vaddr;
+};
+
struct nitrox_crypto_ctx {
struct nitrox_device *ndev;
union {
u64 ctx_handle;
struct flexi_crypto_context *fctx;
} u;
+ struct crypto_ctx_hdr *chdr;
};
struct nitrox_kcrypt_request {
diff --git a/drivers/crypto/chelsio/chcr_ipsec.c b/drivers/crypto/chelsio/chcr_ipsec.c
index 461b97e2f1fd..1ff8738631a3 100644
--- a/drivers/crypto/chelsio/chcr_ipsec.c
+++ b/drivers/crypto/chelsio/chcr_ipsec.c
@@ -303,7 +303,10 @@ static bool chcr_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
static inline int is_eth_imm(const struct sk_buff *skb, unsigned int kctx_len)
{
- int hdrlen = sizeof(struct chcr_ipsec_req) + kctx_len;
+ int hdrlen;
+
+ hdrlen = sizeof(struct fw_ulptx_wr) +
+ sizeof(struct chcr_ipsec_req) + kctx_len;
hdrlen += sizeof(struct cpl_tx_pkt);
if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1aaccbe7e1de..c45711fd78e9 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1605,6 +1605,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
(char __user *)urelocs + copied,
len)) {
end_user:
+ user_access_end();
kvfree(relocs);
err = -EFAULT;
goto err;
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index f455f095a146..1b014d92855b 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -350,15 +350,10 @@ int udl_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto err;
- ret = drm_vblank_init(dev, 1);
- if (ret)
- goto err_fb;
-
drm_kms_helper_poll_init(dev);
return 0;
-err_fb:
- udl_fbdev_cleanup(dev);
+
err:
if (udl->urbs.count)
udl_free_urb_list(dev);
diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c
index 4db62c545748..26470c77eb6e 100644
--- a/drivers/gpu/drm/v3d/v3d_debugfs.c
+++ b/drivers/gpu/drm/v3d/v3d_debugfs.c
@@ -71,10 +71,13 @@ static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused)
V3D_READ(v3d_hub_reg_defs[i].reg));
}
- for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
- seq_printf(m, "%s (0x%04x): 0x%08x\n",
- v3d_gca_reg_defs[i].name, v3d_gca_reg_defs[i].reg,
- V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
+ if (v3d->ver < 41) {
+ for (i = 0; i < ARRAY_SIZE(v3d_gca_reg_defs); i++) {
+ seq_printf(m, "%s (0x%04x): 0x%08x\n",
+ v3d_gca_reg_defs[i].name,
+ v3d_gca_reg_defs[i].reg,
+ V3D_GCA_READ(v3d_gca_reg_defs[i].reg));
+ }
}
for (core = 0; core < v3d->cores; core++) {
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index a365089a9305..b7257d7dd925 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -919,6 +919,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
if (slen > len)
slen = len;
+ if (slen > ss->sge.sge_length)
+ slen = ss->sge.sge_length;
rvt_update_sge(ss, slen, false);
seg_pio_copy_mid(pbuf, addr, slen);
len -= slen;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index a94b6494e71a..f322a1768fbb 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1336,6 +1336,7 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0000", 0 },
{ "ELAN0100", 0 },
+ { "ELAN0501", 0 },
{ "ELAN0600", 0 },
{ "ELAN0602", 0 },
{ "ELAN0605", 0 },
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index d3aacd534e9c..5c63d25ce84e 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -1585,10 +1585,10 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *fw)
/* T7 config may have changed */
mxt_init_t7_power_cfg(data);
-release_raw:
- kfree(cfg.raw);
release_mem:
kfree(cfg.mem);
+release_raw:
+ kfree(cfg.raw);
return ret;
}
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 6947ccf26512..71eda422c926 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -828,7 +828,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_CS, CMDQ_SYNC_0_CS_SEV);
cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSH, ARM_SMMU_SH_ISH);
cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIATTR, ARM_SMMU_MEMATTR_OIWB);
- cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA, ent->sync.msidata);
+ /*
+ * Commands are written little-endian, but we want the SMMU to
+ * receive MSIData, and thus write it back to memory, in CPU
+ * byte order, so big-endian needs an extra byteswap here.
+ */
+ cmd[0] |= FIELD_PREP(CMDQ_SYNC_0_MSIDATA,
+ cpu_to_le32(ent->sync.msidata));
cmd[1] |= ent->sync.msiaddr & CMDQ_SYNC_1_MSIADDR_MASK;
break;
default:
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 0ff517d3c98f..a4ceb61c5b60 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -852,7 +852,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
u16 ret;
if (contr == 0) {
- strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
+ strncpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
return CAPI_NOERROR;
}
@@ -860,7 +860,7 @@ u16 capi20_get_manufacturer(u32 contr, u8 *buf)
ctr = get_capi_ctr_by_nr(contr);
if (ctr && ctr->state == CAPI_CTR_RUNNING) {
- strlcpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
+ strncpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
ret = CAPI_NOERROR;
} else
ret = CAPI_REGNOTINSTALLED;
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 65a933a21e68..9a5334b726d6 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -455,7 +455,7 @@ int cec_thread_func(void *_adap)
(adap->needs_hpd &&
(!adap->is_configured && !adap->is_configuring)) ||
kthread_should_stop() ||
- (!adap->transmitting &&
+ (!adap->transmit_in_progress &&
!list_empty(&adap->transmit_queue)),
msecs_to_jiffies(CEC_XFER_TIMEOUT_MS));
timeout = err == 0;
@@ -463,7 +463,7 @@ int cec_thread_func(void *_adap)
/* Otherwise we just wait for something to happen. */
wait_event_interruptible(adap->kthread_waitq,
kthread_should_stop() ||
- (!adap->transmitting &&
+ (!adap->transmit_in_progress &&
!list_empty(&adap->transmit_queue)));
}
@@ -488,6 +488,7 @@ int cec_thread_func(void *_adap)
pr_warn("cec-%s: message %*ph timed out\n", adap->name,
adap->transmitting->msg.len,
adap->transmitting->msg.msg);
+ adap->transmit_in_progress = false;
adap->tx_timeouts++;
/* Just give up on this. */
cec_data_cancel(adap->transmitting,
@@ -499,7 +500,7 @@ int cec_thread_func(void *_adap)
* If we are still transmitting, or there is nothing new to
* transmit, then just continue waiting.
*/
- if (adap->transmitting || list_empty(&adap->transmit_queue))
+ if (adap->transmit_in_progress || list_empty(&adap->transmit_queue))
goto unlock;
/* Get a new message to transmit */
@@ -545,6 +546,8 @@ int cec_thread_func(void *_adap)
if (adap->ops->adap_transmit(adap, data->attempts,
signal_free_time, &data->msg))
cec_data_cancel(data, CEC_TX_STATUS_ABORTED);
+ else
+ adap->transmit_in_progress = true;
unlock:
mutex_unlock(&adap->lock);
@@ -575,14 +578,17 @@ void cec_transmit_done_ts(struct cec_adapter *adap, u8 status,
data = adap->transmitting;
if (!data) {
/*
- * This can happen if a transmit was issued and the cable is
+ * This might happen if a transmit was issued and the cable is
* unplugged while the transmit is ongoing. Ignore this
* transmit in that case.
*/
- dprintk(1, "%s was called without an ongoing transmit!\n",
- __func__);
- goto unlock;
+ if (!adap->transmit_in_progress)
+ dprintk(1, "%s was called without an ongoing transmit!\n",
+ __func__);
+ adap->transmit_in_progress = false;
+ goto wake_thread;
}
+ adap->transmit_in_progress = false;
msg = &data->msg;
@@ -648,7 +654,6 @@ void cec_transmit_done_ts(struct cec_adapter *adap, u8 status,
* for transmitting or to retry the current message.
*/
wake_up_interruptible(&adap->kthread_waitq);
-unlock:
mutex_unlock(&adap->lock);
}
EXPORT_SYMBOL_GPL(cec_transmit_done_ts);
@@ -1496,8 +1501,11 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
if (adap->monitor_all_cnt)
WARN_ON(call_op(adap, adap_monitor_all_enable, false));
mutex_lock(&adap->devnode.lock);
- if (adap->needs_hpd || list_empty(&adap->devnode.fhs))
+ if (adap->needs_hpd || list_empty(&adap->devnode.fhs)) {
WARN_ON(adap->ops->adap_enable(adap, false));
+ adap->transmit_in_progress = false;
+ wake_up_interruptible(&adap->kthread_waitq);
+ }
mutex_unlock(&adap->devnode.lock);
if (phys_addr == CEC_PHYS_ADDR_INVALID)
return;
@@ -1505,6 +1513,7 @@ void __cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr, bool block)
mutex_lock(&adap->devnode.lock);
adap->last_initiator = 0xff;
+ adap->transmit_in_progress = false;
if ((adap->needs_hpd || list_empty(&adap->devnode.fhs)) &&
adap->ops->adap_enable(adap, true)) {
diff --git a/drivers/media/cec/cec-pin.c b/drivers/media/cec/cec-pin.c
index 635db8e70ead..8f987bc0dd88 100644
--- a/drivers/media/cec/cec-pin.c
+++ b/drivers/media/cec/cec-pin.c
@@ -601,8 +601,9 @@ static void cec_pin_tx_states(struct cec_pin *pin, ktime_t ts)
break;
/* Was the message ACKed? */
ack = cec_msg_is_broadcast(&pin->tx_msg) ? v : !v;
- if (!ack && !pin->tx_ignore_nack_until_eom &&
- pin->tx_bit / 10 < pin->tx_msg.len && !pin->tx_post_eom) {
+ if (!ack && (!pin->tx_ignore_nack_until_eom ||
+ pin->tx_bit / 10 == pin->tx_msg.len - 1) &&
+ !pin->tx_post_eom) {
/*
* Note: the CEC spec is ambiguous regarding
* what action to take when a NACK appears
diff --git a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
index fa483b95bc5a..d9a590ae7545 100644
--- a/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
+++ b/drivers/media/common/v4l2-tpg/v4l2-tpg-core.c
@@ -1769,7 +1769,7 @@ typedef struct { u16 __; u8 _; } __packed x24;
unsigned s; \
\
for (s = 0; s < len; s++) { \
- u8 chr = font8x16[text[s] * 16 + line]; \
+ u8 chr = font8x16[(u8)text[s] * 16 + line]; \
\
if (hdiv == 2 && tpg->hflip) { \
pos[3] = (chr & (0x01 << 6) ? fg : bg); \
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 8ff8722cb6b1..99f736c81286 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -812,6 +812,9 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
q->memory = memory;
q->waiting_for_buffers = !q->is_output;
+ } else if (q->memory != memory) {
+ dprintk(1, "memory model mismatch\n");
+ return -EINVAL;
}
num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
diff --git a/drivers/media/i2c/imx274.c b/drivers/media/i2c/imx274.c
index 11c69281692e..95a0e7d9851a 100644
--- a/drivers/media/i2c/imx274.c
+++ b/drivers/media/i2c/imx274.c
@@ -619,16 +619,19 @@ static int imx274_write_table(struct stimx274 *priv, const struct reg_8 table[])
static inline int imx274_read_reg(struct stimx274 *priv, u16 addr, u8 *val)
{
+ unsigned int uint_val;
int err;
- err = regmap_read(priv->regmap, addr, (unsigned int *)val);
+ err = regmap_read(priv->regmap, addr, &uint_val);
if (err)
dev_err(&priv->client->dev,
"%s : i2c read failed, addr = %x\n", __func__, addr);
else
dev_dbg(&priv->client->dev,
"%s : addr 0x%x, val=0x%x\n", __func__,
- addr, *val);
+ addr, uint_val);
+
+ *val = uint_val;
return err;
}
diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
index eaefdb58653b..703d29abb363 100644
--- a/drivers/media/i2c/ov5640.c
+++ b/drivers/media/i2c/ov5640.c
@@ -2020,6 +2020,7 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
struct ov5640_dev *sensor = to_ov5640_dev(sd);
const struct ov5640_mode_info *new_mode;
struct v4l2_mbus_framefmt *mbus_fmt = &format->format;
+ struct v4l2_mbus_framefmt *fmt;
int ret;
if (format->pad != 0)
@@ -2037,22 +2038,20 @@ static int ov5640_set_fmt(struct v4l2_subdev *sd,
if (ret)
goto out;
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- struct v4l2_mbus_framefmt *fmt =
- v4l2_subdev_get_try_format(sd, cfg, 0);
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY)
+ fmt = v4l2_subdev_get_try_format(sd, cfg, 0);
+ else
+ fmt = &sensor->fmt;
- *fmt = *mbus_fmt;
- goto out;
- }
+ *fmt = *mbus_fmt;
if (new_mode != sensor->current_mode) {
sensor->current_mode = new_mode;
sensor->pending_mode_change = true;
}
- if (mbus_fmt->code != sensor->fmt.code) {
- sensor->fmt = *mbus_fmt;
+ if (mbus_fmt->code != sensor->fmt.code)
sensor->pending_fmt_change = true;
- }
+
out:
mutex_unlock(&sensor->lock);
return ret;
diff --git a/drivers/media/platform/vim2m.c b/drivers/media/platform/vim2m.c
index d82db738f174..f938a2c54314 100644
--- a/drivers/media/platform/vim2m.c
+++ b/drivers/media/platform/vim2m.c
@@ -805,10 +805,11 @@ static int vim2m_start_streaming(struct vb2_queue *q, unsigned count)
static void vim2m_stop_streaming(struct vb2_queue *q)
{
struct vim2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct vim2m_dev *dev = ctx->dev;
struct vb2_v4l2_buffer *vbuf;
unsigned long flags;
- flush_scheduled_work();
+ cancel_delayed_work_sync(&dev->work_run);
for (;;) {
if (V4L2_TYPE_IS_OUTPUT(q->type))
vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
index 673772cd17d6..a88637a42f44 100644
--- a/drivers/media/platform/vivid/vivid-vid-cap.c
+++ b/drivers/media/platform/vivid/vivid-vid-cap.c
@@ -449,6 +449,8 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
break;
}
+ vfree(dev->bitmap_cap);
+ dev->bitmap_cap = NULL;
vivid_update_quality(dev);
tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
dev->crop_cap = dev->src_rect;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 552bbe82a160..877978dbd409 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -695,7 +695,8 @@ void rc_repeat(struct rc_dev *dev)
(dev->last_toggle ? LIRC_SCANCODE_FLAG_TOGGLE : 0)
};
- ir_lirc_scancode_event(dev, &sc);
+ if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
+ ir_lirc_scancode_event(dev, &sc);
spin_lock_irqsave(&dev->keylock, flags);
@@ -735,7 +736,8 @@ static void ir_do_keydown(struct rc_dev *dev, enum rc_proto protocol,
.keycode = keycode
};
- ir_lirc_scancode_event(dev, &sc);
+ if (dev->allowed_protocols != RC_PROTO_BIT_CEC)
+ ir_lirc_scancode_event(dev, &sc);
if (new_event && dev->keypressed)
ir_do_keyup(dev, false);
diff --git a/drivers/media/usb/dvb-usb-v2/usb_urb.c b/drivers/media/usb/dvb-usb-v2/usb_urb.c
index 024c751eb165..2ad2ddeaff51 100644
--- a/drivers/media/usb/dvb-usb-v2/usb_urb.c
+++ b/drivers/media/usb/dvb-usb-v2/usb_urb.c
@@ -155,7 +155,6 @@ static int usb_urb_alloc_bulk_urbs(struct usb_data_stream *stream)
stream->props.u.bulk.buffersize,
usb_urb_complete, stream);
- stream->urb_list[i]->transfer_flags = URB_FREE_BUFFER;
stream->urbs_initialized++;
}
return 0;
@@ -186,7 +185,7 @@ static int usb_urb_alloc_isoc_urbs(struct usb_data_stream *stream)
urb->complete = usb_urb_complete;
urb->pipe = usb_rcvisocpipe(stream->udev,
stream->props.endpoint);
- urb->transfer_flags = URB_ISO_ASAP | URB_FREE_BUFFER;
+ urb->transfer_flags = URB_ISO_ASAP;
urb->interval = stream->props.u.isoc.interval;
urb->number_of_packets = stream->props.u.isoc.framesperurb;
urb->transfer_buffer_length = stream->props.u.isoc.framesize *
@@ -210,7 +209,7 @@ static int usb_free_stream_buffers(struct usb_data_stream *stream)
if (stream->state & USB_STATE_URB_BUF) {
while (stream->buf_num) {
stream->buf_num--;
- stream->buf_list[stream->buf_num] = NULL;
+ kfree(stream->buf_list[stream->buf_num]);
}
}
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 218f0da0ce76..edd34cf09cf8 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -310,8 +310,8 @@ v4l2_fwnode_endpoint_parse_parallel_bus(struct fwnode_handle *fwnode,
}
if (!fwnode_property_read_u32(fwnode, "data-active", &v)) {
- flags &= ~(V4L2_MBUS_PCLK_SAMPLE_RISING |
- V4L2_MBUS_PCLK_SAMPLE_FALLING);
+ flags &= ~(V4L2_MBUS_DATA_ACTIVE_HIGH |
+ V4L2_MBUS_DATA_ACTIVE_LOW);
flags |= v ? V4L2_MBUS_DATA_ACTIVE_HIGH :
V4L2_MBUS_DATA_ACTIVE_LOW;
pr_debug("data-active %s\n", v ? "high" : "low");
diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
index 57a6bb1fd3c9..8f2c5d8bd2ee 100644
--- a/drivers/misc/ocxl/config.c
+++ b/drivers/misc/ocxl/config.c
@@ -318,7 +318,7 @@ static int read_afu_name(struct pci_dev *dev, struct ocxl_fn_config *fn,
if (rc)
return rc;
ptr = (u32 *) &afu->name[i];
- *ptr = val;
+ *ptr = le32_to_cpu((__force __le32) val);
}
afu->name[OCXL_AFU_NAME_SZ - 1] = '\0'; /* play safe */
return 0;
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index 31695a078485..646d16450066 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -566,7 +566,7 @@ int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid)
mutex_lock(&spa->spa_lock);
- pe->tid = tid;
+ pe->tid = cpu_to_be32(tid);
/*
* The barrier makes sure the PE is updated
diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c
index 650f2b490a05..9dc16a23429a 100644
--- a/drivers/mtd/nand/raw/marvell_nand.c
+++ b/drivers/mtd/nand/raw/marvell_nand.c
@@ -514,9 +514,14 @@ static void marvell_nfc_enable_int(struct marvell_nfc *nfc, u32 int_mask)
writel_relaxed(reg & ~int_mask, nfc->regs + NDCR);
}
-static void marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
+static u32 marvell_nfc_clear_int(struct marvell_nfc *nfc, u32 int_mask)
{
+ u32 reg;
+
+ reg = readl_relaxed(nfc->regs + NDSR);
writel_relaxed(int_mask, nfc->regs + NDSR);
+
+ return reg & int_mask;
}
static void marvell_nfc_force_byte_access(struct nand_chip *chip,
@@ -683,6 +688,7 @@ static int marvell_nfc_wait_cmdd(struct nand_chip *chip)
static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
{
struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
+ u32 pending;
int ret;
/* Timeout is expressed in ms */
@@ -695,8 +701,13 @@ static int marvell_nfc_wait_op(struct nand_chip *chip, unsigned int timeout_ms)
ret = wait_for_completion_timeout(&nfc->complete,
msecs_to_jiffies(timeout_ms));
marvell_nfc_disable_int(nfc, NDCR_RDYM);
- marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
- if (!ret) {
+ pending = marvell_nfc_clear_int(nfc, NDSR_RDY(0) | NDSR_RDY(1));
+
+ /*
+ * In case the interrupt was not served in the required time frame,
+ * check if the ISR was not served or if something went actually wrong.
+ */
+ if (ret && !pending) {
dev_err(nfc->dev, "Timeout waiting for RB signal\n");
return -ETIMEDOUT;
}
diff --git a/drivers/mtd/nand/raw/nand_jedec.c b/drivers/mtd/nand/raw/nand_jedec.c
index 5c26492c841d..38b5dc22cb30 100644
--- a/drivers/mtd/nand/raw/nand_jedec.c
+++ b/drivers/mtd/nand/raw/nand_jedec.c
@@ -107,6 +107,8 @@ int nand_jedec_detect(struct nand_chip *chip)
pr_warn("Invalid codeword size\n");
}
+ ret = 1;
+
free_jedec_param_page:
kfree(p);
return ret;
diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c
index 886d05c391ef..68e8b9f7f372 100644
--- a/drivers/mtd/nand/raw/omap2.c
+++ b/drivers/mtd/nand/raw/omap2.c
@@ -1944,7 +1944,7 @@ static int omap_nand_attach_chip(struct nand_chip *chip)
case NAND_OMAP_PREFETCH_DMA:
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
- info->dma = dma_request_chan(dev, "rxtx");
+ info->dma = dma_request_chan(dev->parent, "rxtx");
if (IS_ERR(info->dma)) {
dev_err(dev, "DMA engine request failed\n");
diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig
index 6cc9c929ff57..37775fc09e09 100644
--- a/drivers/mtd/spi-nor/Kconfig
+++ b/drivers/mtd/spi-nor/Kconfig
@@ -41,7 +41,7 @@ config SPI_ASPEED_SMC
config SPI_ATMEL_QUADSPI
tristate "Atmel Quad SPI Controller"
- depends on ARCH_AT91 || (ARM && COMPILE_TEST)
+ depends on ARCH_AT91 || (ARM && COMPILE_TEST && !ARCH_EBSA110)
depends on OF && HAS_IOMEM
help
This enables support for the Quad SPI controller in master mode.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b164f705709d..3b5b47e98c73 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -9360,10 +9360,16 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
rc);
- /* Remove all currently configured VLANs */
- rc = bnx2x_del_all_vlans(bp);
- if (rc < 0)
- BNX2X_ERR("Failed to delete all VLANs\n");
+ /* The whole *vlan_obj structure may be not initialized if VLAN
+ * filtering offload is not supported by hardware. Currently this is
+ * true for all hardware covered by CHIP_IS_E1x().
+ */
+ if (!CHIP_IS_E1x(bp)) {
+ /* Remove all currently configured VLANs */
+ rc = bnx2x_del_all_vlans(bp);
+ if (rc < 0)
+ BNX2X_ERR("Failed to delete all VLANs\n");
+ }
/* Disable LLH */
if (!CHIP_IS_E1(bp))
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index a4681780a55d..098d8764c0ea 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1171,11 +1171,15 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
map_failed_frags:
last = i+1;
- for (i = 0; i < last; i++)
+ for (i = 1; i < last; i++)
dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
DMA_TO_DEVICE);
+ dma_unmap_single(&adapter->vdev->dev,
+ descs[0].fields.address,
+ descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
+ DMA_TO_DEVICE);
map_failed:
if (!firmware_has_feature(FW_FEATURE_CMO))
netdev_err(netdev, "tx: unable to map xmit buffer\n");
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 17e6dcd2eb42..c99dd3f1e6a8 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -523,10 +523,7 @@ static void resync_tnc(struct timer_list *t)
/* Start resync timer again -- the TNC might be still absent */
-
- del_timer(&sp->resync_t);
- sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
- add_timer(&sp->resync_t);
+ mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
}
static inline int tnc_init(struct sixpack *sp)
@@ -537,9 +534,7 @@ static inline int tnc_init(struct sixpack *sp)
sp->tty->ops->write(sp->tty, &inbyte, 1);
- del_timer(&sp->resync_t);
- sp->resync_t.expires = jiffies + SIXP_RESYNC_TIMEOUT;
- add_timer(&sp->resync_t);
+ mod_timer(&sp->resync_t, jiffies + SIXP_RESYNC_TIMEOUT);
return 0;
}
@@ -897,11 +892,8 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
/* if the state byte has been received, the TNC is present,
so the resync timer can be reset. */
- if (sp->tnc_state == TNC_IN_SYNC) {
- del_timer(&sp->resync_t);
- sp->resync_t.expires = jiffies + SIXP_INIT_RESYNC_TIMEOUT;
- add_timer(&sp->resync_t);
- }
+ if (sp->tnc_state == TNC_IN_SYNC)
+ mod_timer(&sp->resync_t, jiffies + SIXP_INIT_RESYNC_TIMEOUT);
sp->status1 = cmd & SIXP_PRIO_DATA_MASK;
}
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index f03004f37eca..276f800ed57f 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -1177,8 +1177,6 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
goto err_kfree;
}
- skb_probe_transport_header(skb, ETH_HLEN);
-
/* Move network header to the right position for VLAN tagged packets */
if ((skb->protocol == htons(ETH_P_8021Q) ||
skb->protocol == htons(ETH_P_8021AD)) &&
@@ -1189,6 +1187,7 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
tap = rcu_dereference(q->tap);
if (tap) {
skb->dev = tap->dev;
+ skb_probe_transport_header(skb, ETH_HLEN);
dev_queue_xmit(skb);
} else {
kfree_skb(skb);
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 1098263ab862..46c3d983b7b7 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -485,8 +485,10 @@ static int x25_asy_open(struct net_device *dev)
/* Cleanup */
kfree(sl->xbuff);
+ sl->xbuff = NULL;
noxbuff:
kfree(sl->rbuff);
+ sl->rbuff = NULL;
norbuff:
return -ENOMEM;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 7f0a5bade70a..c0e3ae7bf2ae 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -5196,10 +5196,17 @@ static struct cfg80211_ops brcmf_cfg80211_ops = {
.del_pmk = brcmf_cfg80211_del_pmk,
};
-struct cfg80211_ops *brcmf_cfg80211_get_ops(void)
+struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings)
{
- return kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops),
+ struct cfg80211_ops *ops;
+
+ ops = kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops),
GFP_KERNEL);
+
+ if (ops && settings->roamoff)
+ ops->update_connect_params = NULL;
+
+ return ops;
}
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
index a4aec0004e4f..9a6287f084a9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h
@@ -404,7 +404,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg);
s32 brcmf_cfg80211_up(struct net_device *ndev);
s32 brcmf_cfg80211_down(struct net_device *ndev);
-struct cfg80211_ops *brcmf_cfg80211_get_ops(void);
+struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings);
enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp);
struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index b1f702faff4f..860a4372cb56 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -1130,7 +1130,7 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings)
brcmf_dbg(TRACE, "Enter\n");
- ops = brcmf_cfg80211_get_ops();
+ ops = brcmf_cfg80211_get_ops(settings);
if (!ops)
return -ENOMEM;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 9095b830ae4d..9927079a9ace 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -641,8 +641,9 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
struct brcmf_fw_request *fwreq;
char chipname[12];
const char *mp_path;
+ size_t mp_path_len;
u32 i, j;
- char end;
+ char end = '\0';
size_t reqsz;
for (i = 0; i < table_size; i++) {
@@ -667,7 +668,10 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev,
mapping_table[i].fw_base, chipname);
mp_path = brcmf_mp_global.firmware_path;
- end = mp_path[strlen(mp_path) - 1];
+ mp_path_len = strnlen(mp_path, BRCMF_FW_ALTPATH_LEN);
+ if (mp_path_len)
+ end = mp_path[mp_path_len - 1];
+
fwreq->n_items = n_fwnames;
for (j = 0; j < n_fwnames; j++) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 9e015212c2c0..8d4711590dfc 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -513,6 +513,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
/* 9000 Series */
+ {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index a3fb235fea0d..7431a795a624 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -393,7 +393,7 @@ static int m41t80_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->time.tm_min = bcd2bin(alarmvals[3] & 0x7f);
alrm->time.tm_hour = bcd2bin(alarmvals[2] & 0x3f);
alrm->time.tm_mday = bcd2bin(alarmvals[1] & 0x3f);
- alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f);
+ alrm->time.tm_mon = bcd2bin(alarmvals[0] & 0x3f) - 1;
alrm->enabled = !!(alarmvals[0] & M41T80_ALMON_AFE);
alrm->pending = (flags & M41T80_FLAGS_AF) && alrm->enabled;
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index f35cc10772f6..25abf2d1732a 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -88,7 +88,7 @@ struct bcm2835_spi {
u8 *rx_buf;
int tx_len;
int rx_len;
- bool dma_pending;
+ unsigned int dma_pending;
};
static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
@@ -155,8 +155,7 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
/* Write as many bytes as possible to FIFO */
bcm2835_wr_fifo(bs);
- /* based on flags decide if we can finish the transfer */
- if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
+ if (!bs->rx_len) {
/* Transfer complete - reset SPI HW */
bcm2835_spi_reset_hw(master);
/* wake up the framework */
@@ -233,10 +232,9 @@ static void bcm2835_spi_dma_done(void *data)
* is called the tx-dma must have finished - can't get to this
* situation otherwise...
*/
- dmaengine_terminate_all(master->dma_tx);
-
- /* mark as no longer pending */
- bs->dma_pending = 0;
+ if (cmpxchg(&bs->dma_pending, true, false)) {
+ dmaengine_terminate_all(master->dma_tx);
+ }
/* and mark as completed */;
complete(&master->xfer_completion);
@@ -342,6 +340,7 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master,
if (ret) {
/* need to reset on errors */
dmaengine_terminate_all(master->dma_tx);
+ bs->dma_pending = false;
bcm2835_spi_reset_hw(master);
return ret;
}
@@ -617,10 +616,9 @@ static void bcm2835_spi_handle_err(struct spi_master *master,
struct bcm2835_spi *bs = spi_master_get_devdata(master);
/* if an error occurred and we have an active dma, then terminate */
- if (bs->dma_pending) {
+ if (cmpxchg(&bs->dma_pending, true, false)) {
dmaengine_terminate_all(master->dma_tx);
dmaengine_terminate_all(master->dma_rx);
- bs->dma_pending = 0;
}
/* and reset */
bcm2835_spi_reset_hw(master);
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
index 781754f36da7..8da66e996d23 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
@@ -143,7 +143,6 @@ vc_vchi_audio_init(VCHI_INSTANCE_T vchi_instance,
dev_err(instance->dev,
"failed to open VCHI service connection (status=%d)\n",
status);
- kfree(instance);
return -EPERM;
}
diff --git a/drivers/staging/wilc1000/wilc_sdio.c b/drivers/staging/wilc1000/wilc_sdio.c
index ca351c950344..5c3e4df804eb 100644
--- a/drivers/staging/wilc1000/wilc_sdio.c
+++ b/drivers/staging/wilc1000/wilc_sdio.c
@@ -841,6 +841,7 @@ static int sdio_read_int(struct wilc *wilc, u32 *int_status)
if (!sdio_priv->irq_gpio) {
int i;
+ cmd.read_write = 0;
cmd.function = 1;
cmd.address = 0x04;
cmd.data = 0;
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 57c66d2c3471..5413a04023f9 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -123,7 +123,7 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
#define CDNS_UART_IXR_RXTRIG 0x00000001 /* RX FIFO trigger interrupt */
#define CDNS_UART_IXR_RXFULL 0x00000004 /* RX FIFO full interrupt. */
#define CDNS_UART_IXR_RXEMPTY 0x00000002 /* RX FIFO empty interrupt. */
-#define CDNS_UART_IXR_MASK 0x00001FFF /* Valid bit mask */
+#define CDNS_UART_IXR_RXMASK 0x000021e7 /* Valid RX bit mask */
/*
* Do not enable parity error interrupt for the following
@@ -364,7 +364,7 @@ static irqreturn_t cdns_uart_isr(int irq, void *dev_id)
cdns_uart_handle_tx(dev_id);
isrstatus &= ~CDNS_UART_IXR_TXEMPTY;
}
- if (isrstatus & CDNS_UART_IXR_MASK)
+ if (isrstatus & CDNS_UART_IXR_RXMASK)
cdns_uart_handle_rx(dev_id, isrstatus);
spin_unlock(&port->lock);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 987fc5ba6321..70e6c956c23c 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -205,8 +205,4 @@ config USB_ULPI_BUS
To compile this driver as a module, choose M here: the module will
be called ulpi.
-config USB_ROLE_SWITCH
- tristate
- select USB_COMMON
-
endif # USB_SUPPORT
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 1b68fed464cb..ed8c62b2d9d1 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -581,6 +581,13 @@ static int acm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
if (retval)
goto error_init_termios;
+ /*
+ * Suppress initial echoing for some devices which might send data
+ * immediately after acm driver has been installed.
+ */
+ if (acm->quirks & DISABLE_ECHO)
+ tty->termios.c_lflag &= ~ECHO;
+
tty->driver_data = acm;
return 0;
@@ -1657,6 +1664,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@xxxxxxxxx */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
+ { USB_DEVICE(0x0e8d, 0x2000), /* MediaTek Inc Preloader */
+ .driver_info = DISABLE_ECHO, /* DISABLE ECHO in termios flag */
+ },
{ USB_DEVICE(0x0e8d, 0x3329), /* MediaTek Inc GPS */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index ca06b20d7af9..515aad0847ee 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -140,3 +140,4 @@ struct acm {
#define QUIRK_CONTROL_LINE_STATE BIT(6)
#define CLEAR_HALT_CONDITIONS BIT(7)
#define SEND_ZERO_PACKET BIT(8)
+#define DISABLE_ECHO BIT(9)
diff --git a/drivers/usb/common/Makefile b/drivers/usb/common/Makefile
index fb4d5ef4165c..0a7c45e85481 100644
--- a/drivers/usb/common/Makefile
+++ b/drivers/usb/common/Makefile
@@ -9,4 +9,3 @@ usb-common-$(CONFIG_USB_LED_TRIG) += led.o
obj-$(CONFIG_USB_OTG_FSM) += usb-otg-fsm.o
obj-$(CONFIG_USB_ULPI_BUS) += ulpi.o
-obj-$(CONFIG_USB_ROLE_SWITCH) += roles.o
diff --git a/drivers/usb/common/roles.c b/drivers/usb/common/roles.c
deleted file mode 100644
index 99116af07f1d..000000000000
--- a/drivers/usb/common/roles.c
+++ /dev/null
@@ -1,314 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * USB Role Switch Support
- *
- * Copyright (C) 2018 Intel Corporation
- * Author: Heikki Krogerus <heikki.krogerus@xxxxxxxxxxxxxxx>
- * Hans de Goede <hdegoede@xxxxxxxxxx>
- */
-
-#include <linux/usb/role.h>
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-
-static struct class *role_class;
-
-struct usb_role_switch {
- struct device dev;
- struct mutex lock; /* device lock*/
- enum usb_role role;
-
- /* From descriptor */
- struct device *usb2_port;
- struct device *usb3_port;
- struct device *udc;
- usb_role_switch_set_t set;
- usb_role_switch_get_t get;
- bool allow_userspace_control;
-};
-
-#define to_role_switch(d) container_of(d, struct usb_role_switch, dev)
-
-/**
- * usb_role_switch_set_role - Set USB role for a switch
- * @sw: USB role switch
- * @role: USB role to be switched to
- *
- * Set USB role @role for @sw.
- */
-int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
-{
- int ret;
-
- if (IS_ERR_OR_NULL(sw))
- return 0;
-
- mutex_lock(&sw->lock);
-
- ret = sw->set(sw->dev.parent, role);
- if (!ret)
- sw->role = role;
-
- mutex_unlock(&sw->lock);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(usb_role_switch_set_role);
-
-/**
- * usb_role_switch_get_role - Get the USB role for a switch
- * @sw: USB role switch
- *
- * Depending on the role-switch-driver this function returns either a cached
- * value of the last set role, or reads back the actual value from the hardware.
- */
-enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
-{
- enum usb_role role;
-
- if (IS_ERR_OR_NULL(sw))
- return USB_ROLE_NONE;
-
- mutex_lock(&sw->lock);
-
- if (sw->get)
- role = sw->get(sw->dev.parent);
- else
- role = sw->role;
-
- mutex_unlock(&sw->lock);
-
- return role;
-}
-EXPORT_SYMBOL_GPL(usb_role_switch_get_role);
-
-static int __switch_match(struct device *dev, const void *name)
-{
- return !strcmp((const char *)name, dev_name(dev));
-}
-
-static void *usb_role_switch_match(struct device_connection *con, int ep,
- void *data)
-{
- struct device *dev;
-
- dev = class_find_device(role_class, NULL, con->endpoint[ep],
- __switch_match);
-
- return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
-}
-
-/**
- * usb_role_switch_get - Find USB role switch linked with the caller
- * @dev: The caller device
- *
- * Finds and returns role switch linked with @dev. The reference count for the
- * found switch is incremented.
- */
-struct usb_role_switch *usb_role_switch_get(struct device *dev)
-{
- struct usb_role_switch *sw;
-
- sw = device_connection_find_match(dev, "usb-role-switch", NULL,
- usb_role_switch_match);
-
- if (!IS_ERR_OR_NULL(sw))
- WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
-
- return sw;
-}
-EXPORT_SYMBOL_GPL(usb_role_switch_get);
-
-/**
- * usb_role_switch_put - Release handle to a switch
- * @sw: USB Role Switch
- *
- * Decrement reference count for @sw.
- */
-void usb_role_switch_put(struct usb_role_switch *sw)
-{
- if (!IS_ERR_OR_NULL(sw)) {
- put_device(&sw->dev);
- module_put(sw->dev.parent->driver->owner);
- }
-}
-EXPORT_SYMBOL_GPL(usb_role_switch_put);
-
-static umode_t
-usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n)
-{
- struct device *dev = container_of(kobj, typeof(*dev), kobj);
- struct usb_role_switch *sw = to_role_switch(dev);
-
- if (sw->allow_userspace_control)
- return attr->mode;
-
- return 0;
-}
-
-static const char * const usb_roles[] = {
- [USB_ROLE_NONE] = "none",
- [USB_ROLE_HOST] = "host",
- [USB_ROLE_DEVICE] = "device",
-};
-
-static ssize_t
-role_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct usb_role_switch *sw = to_role_switch(dev);
- enum usb_role role = usb_role_switch_get_role(sw);
-
- return sprintf(buf, "%s\n", usb_roles[role]);
-}
-
-static ssize_t role_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct usb_role_switch *sw = to_role_switch(dev);
- int ret;
-
- ret = sysfs_match_string(usb_roles, buf);
- if (ret < 0) {
- bool res;
-
- /* Extra check if the user wants to disable the switch */
- ret = kstrtobool(buf, &res);
- if (ret || res)
- return -EINVAL;
- }
-
- ret = usb_role_switch_set_role(sw, ret);
- if (ret)
- return ret;
-
- return size;
-}
-static DEVICE_ATTR_RW(role);
-
-static struct attribute *usb_role_switch_attrs[] = {
- &dev_attr_role.attr,
- NULL,
-};
-
-static const struct attribute_group usb_role_switch_group = {
- .is_visible = usb_role_switch_is_visible,
- .attrs = usb_role_switch_attrs,
-};
-
-static const struct attribute_group *usb_role_switch_groups[] = {
- &usb_role_switch_group,
- NULL,
-};
-
-static int
-usb_role_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
- int ret;
-
- ret = add_uevent_var(env, "USB_ROLE_SWITCH=%s", dev_name(dev));
- if (ret)
- dev_err(dev, "failed to add uevent USB_ROLE_SWITCH\n");
-
- return ret;
-}
-
-static void usb_role_switch_release(struct device *dev)
-{
- struct usb_role_switch *sw = to_role_switch(dev);
-
- kfree(sw);
-}
-
-static const struct device_type usb_role_dev_type = {
- .name = "usb_role_switch",
- .groups = usb_role_switch_groups,
- .uevent = usb_role_switch_uevent,
- .release = usb_role_switch_release,
-};
-
-/**
- * usb_role_switch_register - Register USB Role Switch
- * @parent: Parent device for the switch
- * @desc: Description of the switch
- *
- * USB Role Switch is a device capable or choosing the role for USB connector.
- * On platforms where the USB controller is dual-role capable, the controller
- * driver will need to register the switch. On platforms where the USB host and
- * USB device controllers behind the connector are separate, there will be a
- * mux, and the driver for that mux will need to register the switch.
- *
- * Returns handle to a new role switch or ERR_PTR. The content of @desc is
- * copied.
- */
-struct usb_role_switch *
-usb_role_switch_register(struct device *parent,
- const struct usb_role_switch_desc *desc)
-{
- struct usb_role_switch *sw;
- int ret;
-
- if (!desc || !desc->set)
- return ERR_PTR(-EINVAL);
-
- sw = kzalloc(sizeof(*sw), GFP_KERNEL);
- if (!sw)
- return ERR_PTR(-ENOMEM);
-
- mutex_init(&sw->lock);
-
- sw->allow_userspace_control = desc->allow_userspace_control;
- sw->usb2_port = desc->usb2_port;
- sw->usb3_port = desc->usb3_port;
- sw->udc = desc->udc;
- sw->set = desc->set;
- sw->get = desc->get;
-
- sw->dev.parent = parent;
- sw->dev.class = role_class;
- sw->dev.type = &usb_role_dev_type;
- dev_set_name(&sw->dev, "%s-role-switch", dev_name(parent));
-
- ret = device_register(&sw->dev);
- if (ret) {
- put_device(&sw->dev);
- return ERR_PTR(ret);
- }
-
- /* TODO: Symlinks for the host port and the device controller. */
-
- return sw;
-}
-EXPORT_SYMBOL_GPL(usb_role_switch_register);
-
-/**
- * usb_role_switch_unregister - Unregsiter USB Role Switch
- * @sw: USB Role Switch
- *
- * Unregister switch that was registered with usb_role_switch_register().
- */
-void usb_role_switch_unregister(struct usb_role_switch *sw)
-{
- if (!IS_ERR_OR_NULL(sw))
- device_unregister(&sw->dev);
-}
-EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
-
-static int __init usb_roles_init(void)
-{
- role_class = class_create(THIS_MODULE, "usb_role");
- return PTR_ERR_OR_ZERO(role_class);
-}
-subsys_initcall(usb_roles_init);
-
-static void __exit usb_roles_exit(void)
-{
- class_destroy(role_class);
-}
-module_exit(usb_roles_exit);
-
-MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@xxxxxxxxxxxxxxx>");
-MODULE_AUTHOR("Hans de Goede <hdegoede@xxxxxxxxxx>");
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("USB Role Class");
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 3f9bccc95add..c089ffa1f0a8 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -366,7 +366,7 @@ struct dwc2_qh {
u32 desc_list_sz;
u32 *n_bytes;
struct timer_list unreserve_timer;
- struct timer_list wait_timer;
+ struct hrtimer wait_timer;
struct dwc2_tt *dwc_tt;
int ttport;
unsigned tt_buffer_dirty:1;
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index 40839591d2ec..ea3aa640c15c 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -59,7 +59,7 @@
#define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5))
/* If we get a NAK, wait this long before retrying */
-#define DWC2_RETRY_WAIT_DELAY (msecs_to_jiffies(1))
+#define DWC2_RETRY_WAIT_DELAY 1*1E6L
/**
* dwc2_periodic_channel_available() - Checks that a channel is available for a
@@ -1464,10 +1464,12 @@ static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
* qh back to the "inactive" list, then queues transactions.
*
* @t: Pointer to wait_timer in a qh.
+ *
+ * Return: HRTIMER_NORESTART to not automatically restart this timer.
*/
-static void dwc2_wait_timer_fn(struct timer_list *t)
+static enum hrtimer_restart dwc2_wait_timer_fn(struct hrtimer *t)
{
- struct dwc2_qh *qh = from_timer(qh, t, wait_timer);
+ struct dwc2_qh *qh = container_of(t, struct dwc2_qh, wait_timer);
struct dwc2_hsotg *hsotg = qh->hsotg;
unsigned long flags;
@@ -1491,6 +1493,7 @@ static void dwc2_wait_timer_fn(struct timer_list *t)
}
spin_unlock_irqrestore(&hsotg->lock, flags);
+ return HRTIMER_NORESTART;
}
/**
@@ -1521,7 +1524,8 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
/* Initialize QH */
qh->hsotg = hsotg;
timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
- timer_setup(&qh->wait_timer, dwc2_wait_timer_fn, 0);
+ hrtimer_init(&qh->wait_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ qh->wait_timer.function = &dwc2_wait_timer_fn;
qh->ep_type = ep_type;
qh->ep_is_in = ep_is_in;
@@ -1690,7 +1694,7 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
* won't do anything anyway, but we want it to finish before we free
* memory.
*/
- del_timer_sync(&qh->wait_timer);
+ hrtimer_cancel(&qh->wait_timer);
dwc2_host_put_tt_info(hsotg, qh->dwc_tt);
@@ -1716,6 +1720,7 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
{
int status;
u32 intr_mask;
+ ktime_t delay;
if (dbg_qh(qh))
dev_vdbg(hsotg->dev, "%s()\n", __func__);
@@ -1734,8 +1739,8 @@ int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
list_add_tail(&qh->qh_list_entry,
&hsotg->non_periodic_sched_waiting);
qh->wait_timer_cancel = false;
- mod_timer(&qh->wait_timer,
- jiffies + DWC2_RETRY_WAIT_DELAY + 1);
+ delay = ktime_set(0, DWC2_RETRY_WAIT_DELAY);
+ hrtimer_start(&qh->wait_timer, delay, HRTIMER_MODE_REL);
} else {
list_add_tail(&qh->qh_list_entry,
&hsotg->non_periodic_sched_inactive);
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 7c1b6938f212..38c813b1d203 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -111,6 +111,7 @@ static void dwc2_set_amlogic_params(struct dwc2_hsotg *hsotg)
p->phy_type = DWC2_PHY_TYPE_PARAM_UTMI;
p->ahbcfg = GAHBCFG_HBSTLEN_INCR8 <<
GAHBCFG_HBSTLEN_SHIFT;
+ p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
}
static void dwc2_set_amcc_params(struct dwc2_hsotg *hsotg)
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 842795856bf4..fdc6e4e403e8 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -170,20 +170,20 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc)
* put the gpio descriptors again here because the phy driver
* might want to grab them, too.
*/
- gpio = devm_gpiod_get_optional(&pdev->dev, "cs",
- GPIOD_OUT_LOW);
+ gpio = gpiod_get_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
gpiod_set_value_cansleep(gpio, 1);
+ gpiod_put(gpio);
- gpio = devm_gpiod_get_optional(&pdev->dev, "reset",
- GPIOD_OUT_LOW);
+ gpio = gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpio))
return PTR_ERR(gpio);
if (gpio) {
gpiod_set_value_cansleep(gpio, 1);
+ gpiod_put(gpio);
usleep_range(10000, 11000);
}
}
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 984892dd72f5..42668aeca57c 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1979,6 +1979,8 @@ static int r8a66597_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *hep)
+__acquires(r8a66597->lock)
+__releases(r8a66597->lock)
{
struct r8a66597 *r8a66597 = hcd_to_r8a66597(hcd);
struct r8a66597_pipe *pipe = (struct r8a66597_pipe *)hep->hcpriv;
@@ -1991,13 +1993,14 @@ static void r8a66597_endpoint_disable(struct usb_hcd *hcd,
return;
pipenum = pipe->info.pipenum;
+ spin_lock_irqsave(&r8a66597->lock, flags);
if (pipenum == 0) {
kfree(hep->hcpriv);
hep->hcpriv = NULL;
+ spin_unlock_irqrestore(&r8a66597->lock, flags);
return;
}
- spin_lock_irqsave(&r8a66597->lock, flags);
pipe_stop(r8a66597, pipe);
pipe_irq_disable(r8a66597, pipenum);
disable_irq_empty(r8a66597, pipenum);
diff --git a/drivers/usb/roles/Kconfig b/drivers/usb/roles/Kconfig
index f5a5e6f79f1b..e4194ac94510 100644
--- a/drivers/usb/roles/Kconfig
+++ b/drivers/usb/roles/Kconfig
@@ -1,3 +1,16 @@
+config USB_ROLE_SWITCH
+ tristate "USB Role Switch Support"
+ help
+ USB Role Switch is a device that can select the USB role - host or
+ device - for a USB port (connector). In most cases dual-role capable
+ USB controller will also represent the switch, but on some platforms
+ multiplexer/demultiplexer switch is used to route the data lines on
+ the USB connector between separate USB host and device controllers.
+
+ Say Y here if your USB connectors support both device and host roles.
+ To compile the driver as module, choose M here: the module will be
+ called roles.ko.
+
if USB_ROLE_SWITCH
config USB_ROLES_INTEL_XHCI
diff --git a/drivers/usb/roles/Makefile b/drivers/usb/roles/Makefile
index e44b179ba275..c02873206fc1 100644
--- a/drivers/usb/roles/Makefile
+++ b/drivers/usb/roles/Makefile
@@ -1 +1,3 @@
-obj-$(CONFIG_USB_ROLES_INTEL_XHCI) += intel-xhci-usb-role-switch.o
+obj-$(CONFIG_USB_ROLE_SWITCH) += roles.o
+roles-y := class.o
+obj-$(CONFIG_USB_ROLES_INTEL_XHCI) += intel-xhci-usb-role-switch.o
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
new file mode 100644
index 000000000000..99116af07f1d
--- /dev/null
+++ b/drivers/usb/roles/class.c
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * USB Role Switch Support
+ *
+ * Copyright (C) 2018 Intel Corporation
+ * Author: Heikki Krogerus <heikki.krogerus@xxxxxxxxxxxxxxx>
+ * Hans de Goede <hdegoede@xxxxxxxxxx>
+ */
+
+#include <linux/usb/role.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+static struct class *role_class;
+
+struct usb_role_switch {
+ struct device dev;
+ struct mutex lock; /* device lock*/
+ enum usb_role role;
+
+ /* From descriptor */
+ struct device *usb2_port;
+ struct device *usb3_port;
+ struct device *udc;
+ usb_role_switch_set_t set;
+ usb_role_switch_get_t get;
+ bool allow_userspace_control;
+};
+
+#define to_role_switch(d) container_of(d, struct usb_role_switch, dev)
+
+/**
+ * usb_role_switch_set_role - Set USB role for a switch
+ * @sw: USB role switch
+ * @role: USB role to be switched to
+ *
+ * Set USB role @role for @sw.
+ */
+int usb_role_switch_set_role(struct usb_role_switch *sw, enum usb_role role)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(sw))
+ return 0;
+
+ mutex_lock(&sw->lock);
+
+ ret = sw->set(sw->dev.parent, role);
+ if (!ret)
+ sw->role = role;
+
+ mutex_unlock(&sw->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(usb_role_switch_set_role);
+
+/**
+ * usb_role_switch_get_role - Get the USB role for a switch
+ * @sw: USB role switch
+ *
+ * Depending on the role-switch-driver this function returns either a cached
+ * value of the last set role, or reads back the actual value from the hardware.
+ */
+enum usb_role usb_role_switch_get_role(struct usb_role_switch *sw)
+{
+ enum usb_role role;
+
+ if (IS_ERR_OR_NULL(sw))
+ return USB_ROLE_NONE;
+
+ mutex_lock(&sw->lock);
+
+ if (sw->get)
+ role = sw->get(sw->dev.parent);
+ else
+ role = sw->role;
+
+ mutex_unlock(&sw->lock);
+
+ return role;
+}
+EXPORT_SYMBOL_GPL(usb_role_switch_get_role);
+
+static int __switch_match(struct device *dev, const void *name)
+{
+ return !strcmp((const char *)name, dev_name(dev));
+}
+
+static void *usb_role_switch_match(struct device_connection *con, int ep,
+ void *data)
+{
+ struct device *dev;
+
+ dev = class_find_device(role_class, NULL, con->endpoint[ep],
+ __switch_match);
+
+ return dev ? to_role_switch(dev) : ERR_PTR(-EPROBE_DEFER);
+}
+
+/**
+ * usb_role_switch_get - Find USB role switch linked with the caller
+ * @dev: The caller device
+ *
+ * Finds and returns role switch linked with @dev. The reference count for the
+ * found switch is incremented.
+ */
+struct usb_role_switch *usb_role_switch_get(struct device *dev)
+{
+ struct usb_role_switch *sw;
+
+ sw = device_connection_find_match(dev, "usb-role-switch", NULL,
+ usb_role_switch_match);
+
+ if (!IS_ERR_OR_NULL(sw))
+ WARN_ON(!try_module_get(sw->dev.parent->driver->owner));
+
+ return sw;
+}
+EXPORT_SYMBOL_GPL(usb_role_switch_get);
+
+/**
+ * usb_role_switch_put - Release handle to a switch
+ * @sw: USB Role Switch
+ *
+ * Decrement reference count for @sw.
+ */
+void usb_role_switch_put(struct usb_role_switch *sw)
+{
+ if (!IS_ERR_OR_NULL(sw)) {
+ put_device(&sw->dev);
+ module_put(sw->dev.parent->driver->owner);
+ }
+}
+EXPORT_SYMBOL_GPL(usb_role_switch_put);
+
+static umode_t
+usb_role_switch_is_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ struct device *dev = container_of(kobj, typeof(*dev), kobj);
+ struct usb_role_switch *sw = to_role_switch(dev);
+
+ if (sw->allow_userspace_control)
+ return attr->mode;
+
+ return 0;
+}
+
+static const char * const usb_roles[] = {
+ [USB_ROLE_NONE] = "none",
+ [USB_ROLE_HOST] = "host",
+ [USB_ROLE_DEVICE] = "device",
+};
+
+static ssize_t
+role_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct usb_role_switch *sw = to_role_switch(dev);
+ enum usb_role role = usb_role_switch_get_role(sw);
+
+ return sprintf(buf, "%s\n", usb_roles[role]);
+}
+
+static ssize_t role_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct usb_role_switch *sw = to_role_switch(dev);
+ int ret;
+
+ ret = sysfs_match_string(usb_roles, buf);
+ if (ret < 0) {
+ bool res;
+
+ /* Extra check if the user wants to disable the switch */
+ ret = kstrtobool(buf, &res);
+ if (ret || res)
+ return -EINVAL;
+ }
+
+ ret = usb_role_switch_set_role(sw, ret);
+ if (ret)
+ return ret;
+
+ return size;
+}
+static DEVICE_ATTR_RW(role);
+
+static struct attribute *usb_role_switch_attrs[] = {
+ &dev_attr_role.attr,
+ NULL,
+};
+
+static const struct attribute_group usb_role_switch_group = {
+ .is_visible = usb_role_switch_is_visible,
+ .attrs = usb_role_switch_attrs,
+};
+
+static const struct attribute_group *usb_role_switch_groups[] = {
+ &usb_role_switch_group,
+ NULL,
+};
+
+static int
+usb_role_switch_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ int ret;
+
+ ret = add_uevent_var(env, "USB_ROLE_SWITCH=%s", dev_name(dev));
+ if (ret)
+ dev_err(dev, "failed to add uevent USB_ROLE_SWITCH\n");
+
+ return ret;
+}
+
+static void usb_role_switch_release(struct device *dev)
+{
+ struct usb_role_switch *sw = to_role_switch(dev);
+
+ kfree(sw);
+}
+
+static const struct device_type usb_role_dev_type = {
+ .name = "usb_role_switch",
+ .groups = usb_role_switch_groups,
+ .uevent = usb_role_switch_uevent,
+ .release = usb_role_switch_release,
+};
+
+/**
+ * usb_role_switch_register - Register USB Role Switch
+ * @parent: Parent device for the switch
+ * @desc: Description of the switch
+ *
+ * USB Role Switch is a device capable or choosing the role for USB connector.
+ * On platforms where the USB controller is dual-role capable, the controller
+ * driver will need to register the switch. On platforms where the USB host and
+ * USB device controllers behind the connector are separate, there will be a
+ * mux, and the driver for that mux will need to register the switch.
+ *
+ * Returns handle to a new role switch or ERR_PTR. The content of @desc is
+ * copied.
+ */
+struct usb_role_switch *
+usb_role_switch_register(struct device *parent,
+ const struct usb_role_switch_desc *desc)
+{
+ struct usb_role_switch *sw;
+ int ret;
+
+ if (!desc || !desc->set)
+ return ERR_PTR(-EINVAL);
+
+ sw = kzalloc(sizeof(*sw), GFP_KERNEL);
+ if (!sw)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_init(&sw->lock);
+
+ sw->allow_userspace_control = desc->allow_userspace_control;
+ sw->usb2_port = desc->usb2_port;
+ sw->usb3_port = desc->usb3_port;
+ sw->udc = desc->udc;
+ sw->set = desc->set;
+ sw->get = desc->get;
+
+ sw->dev.parent = parent;
+ sw->dev.class = role_class;
+ sw->dev.type = &usb_role_dev_type;
+ dev_set_name(&sw->dev, "%s-role-switch", dev_name(parent));
+
+ ret = device_register(&sw->dev);
+ if (ret) {
+ put_device(&sw->dev);
+ return ERR_PTR(ret);
+ }
+
+ /* TODO: Symlinks for the host port and the device controller. */
+
+ return sw;
+}
+EXPORT_SYMBOL_GPL(usb_role_switch_register);
+
+/**
+ * usb_role_switch_unregister - Unregsiter USB Role Switch
+ * @sw: USB Role Switch
+ *
+ * Unregister switch that was registered with usb_role_switch_register().
+ */
+void usb_role_switch_unregister(struct usb_role_switch *sw)
+{
+ if (!IS_ERR_OR_NULL(sw))
+ device_unregister(&sw->dev);
+}
+EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
+
+static int __init usb_roles_init(void)
+{
+ role_class = class_create(THIS_MODULE, "usb_role");
+ return PTR_ERR_OR_ZERO(role_class);
+}
+subsys_initcall(usb_roles_init);
+
+static void __exit usb_roles_exit(void)
+{
+ class_destroy(role_class);
+}
+module_exit(usb_roles_exit);
+
+MODULE_AUTHOR("Heikki Krogerus <heikki.krogerus@xxxxxxxxxxxxxxx>");
+MODULE_AUTHOR("Hans de Goede <hdegoede@xxxxxxxxxx>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("USB Role Class");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1ce27f3ff7a7..aef15497ff31 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1955,6 +1955,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0xa31d, 0xff, 0x06, 0x1b) },
{ USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 */
.driver_info = RSVD(4) | RSVD(5) | RSVD(6) },
+ { USB_DEVICE(0x2cb7, 0x0104), /* Fibocom NL678 series */
+ .driver_info = RSVD(4) | RSVD(5) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
+ .driver_info = RSVD(6) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index a4e0d13fc121..98e7a5df0f6d 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -91,9 +91,14 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(YCCABLE_VENDOR_ID, YCCABLE_PRODUCT_ID) },
{ USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LD220TA_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LD960_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LD960TA_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM220_PRODUCT_ID) },
{ USB_DEVICE(HP_VENDOR_ID, HP_LCM960_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LM920_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_LM940_PRODUCT_ID) },
+ { USB_DEVICE(HP_VENDOR_ID, HP_TD620_PRODUCT_ID) },
{ USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) },
{ USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) },
{ USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 26965cc23c17..4e2554d55362 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -119,10 +119,15 @@
/* Hewlett-Packard POS Pole Displays */
#define HP_VENDOR_ID 0x03f0
+#define HP_LM920_PRODUCT_ID 0x026b
+#define HP_TD620_PRODUCT_ID 0x0956
#define HP_LD960_PRODUCT_ID 0x0b39
#define HP_LCM220_PRODUCT_ID 0x3139
#define HP_LCM960_PRODUCT_ID 0x3239
#define HP_LD220_PRODUCT_ID 0x3524
+#define HP_LD220TA_PRODUCT_ID 0x4349
+#define HP_LD960TA_PRODUCT_ID 0x4439
+#define HP_LM940_PRODUCT_ID 0x5039
/* Cressi Edy (diving computer) PC interface */
#define CRESSI_VENDOR_ID 0x04b8
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 97d91e55b70a..a0e230b31a88 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -146,6 +146,12 @@ struct btrfs_inode {
*/
u64 last_unlink_trans;
+ /*
+ * Track the transaction id of the last transaction used to create a
+ * hard link for the inode. This is used by the log tree (fsync).
+ */
+ u64 last_link_trans;
+
/*
* Number of bytes outstanding that are going to need csums. This is
* used in ENOSPC accounting.
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 539901fb5165..99e7645ad94e 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2584,14 +2584,27 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
root_lock = BTRFS_READ_LOCK;
if (p->search_commit_root) {
- /* The commit roots are read only so we always do read locks */
- if (p->need_commit_sem)
+ /*
+ * The commit roots are read only so we always do read locks,
+ * and we always must hold the commit_root_sem when doing
+ * searches on them, the only exception is send where we don't
+ * want to block transaction commits for a long time, so
+ * we need to clone the commit root in order to avoid races
+ * with transaction commits that create a snapshot of one of
+ * the roots used by a send operation.
+ */
+ if (p->need_commit_sem) {
down_read(&fs_info->commit_root_sem);
- b = root->commit_root;
- extent_buffer_get(b);
- level = btrfs_header_level(b);
- if (p->need_commit_sem)
+ b = btrfs_clone_extent_buffer(root->commit_root);
up_read(&fs_info->commit_root_sem);
+ if (!b)
+ return ERR_PTR(-ENOMEM);
+
+ } else {
+ b = root->commit_root;
+ extent_buffer_get(b);
+ }
+ level = btrfs_header_level(b);
/*
* Ensure that all callers have set skip_locking when
* p->search_commit_root = 1.
@@ -2717,6 +2730,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
again:
prev_cmp = -1;
b = btrfs_search_slot_get_root(root, p, write_lock_level);
+ if (IS_ERR(b)) {
+ ret = PTR_ERR(b);
+ goto done;
+ }
while (b) {
level = btrfs_header_level(b);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 2aa48aecc52b..329d3afcf304 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -884,6 +884,8 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
"cannot continue dev_replace, tgtdev is missing");
btrfs_info(fs_info,
"you may cancel the operation after 'mount -o degraded'");
+ dev_replace->replace_state =
+ BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
btrfs_dev_replace_write_unlock(dev_replace);
return 0;
}
@@ -895,6 +897,10 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
* dev-replace to start anyway.
*/
if (test_and_set_bit(BTRFS_FS_EXCL_OP, &fs_info->flags)) {
+ btrfs_dev_replace_write_lock(dev_replace);
+ dev_replace->replace_state =
+ BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
+ btrfs_dev_replace_write_unlock(dev_replace);
btrfs_info(fs_info,
"cannot resume dev-replace, other exclusive operation running");
return 0;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a1febf155747..fe1fef3d7eed 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -8944,6 +8944,10 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
goto out_free;
}
+ err = btrfs_run_delayed_items(trans);
+ if (err)
+ goto out_end_trans;
+
if (block_rsv)
trans->block_rsv = block_rsv;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 9ea4c6f0352f..423281c19fad 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1372,7 +1372,8 @@ static noinline int run_delalloc_nocow(struct inode *inode,
* Do the same check as in btrfs_cross_ref_exist but
* without the unnecessary search.
*/
- if (btrfs_file_extent_generation(leaf, fi) <=
+ if (!nolock &&
+ btrfs_file_extent_generation(leaf, fi) <=
btrfs_root_last_snapshot(&root->root_item))
goto out_check;
if (extent_type == BTRFS_FILE_EXTENT_REG && !force)
@@ -3686,6 +3687,21 @@ static int btrfs_read_locked_inode(struct inode *inode,
* inode is not a directory, logging its parent unnecessarily.
*/
BTRFS_I(inode)->last_unlink_trans = BTRFS_I(inode)->last_trans;
+ /*
+ * Similar reasoning for last_link_trans, needs to be set otherwise
+ * for a case like the following:
+ *
+ * mkdir A
+ * touch foo
+ * ln foo A/bar
+ * echo 2 > /proc/sys/vm/drop_caches
+ * fsync foo
+ * <power failure>
+ *
+ * Would result in link bar and directory A not existing after the power
+ * failure.
+ */
+ BTRFS_I(inode)->last_link_trans = BTRFS_I(inode)->last_trans;
path->slots[0]++;
if (inode->i_nlink != 1 ||
@@ -6625,6 +6641,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
if (err)
goto fail;
}
+ BTRFS_I(inode)->last_link_trans = trans->transid;
d_instantiate(dentry, inode);
ret = btrfs_log_new_name(trans, BTRFS_I(inode), NULL, parent,
true, NULL);
@@ -9157,6 +9174,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
ei->index_cnt = (u64)-1;
ei->dir_index = 0;
ei->last_unlink_trans = 0;
+ ei->last_link_trans = 0;
ei->last_log_commit = 0;
spin_lock_init(&ei->lock);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 902819d3cf41..bbd1b36f4918 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -322,6 +322,7 @@ static struct full_stripe_lock *insert_full_stripe_lock(
struct rb_node *parent = NULL;
struct full_stripe_lock *entry;
struct full_stripe_lock *ret;
+ unsigned int nofs_flag;
lockdep_assert_held(&locks_root->lock);
@@ -339,8 +340,17 @@ static struct full_stripe_lock *insert_full_stripe_lock(
}
}
- /* Insert new lock */
+ /*
+ * Insert new lock.
+ *
+ * We must use GFP_NOFS because the scrub task might be waiting for a
+ * worker task executing this function and in turn a transaction commit
+ * might be waiting the scrub task to pause (which needs to wait for all
+ * the worker tasks to complete before pausing).
+ */
+ nofs_flag = memalloc_nofs_save();
ret = kmalloc(sizeof(*ret), GFP_KERNEL);
+ memalloc_nofs_restore(nofs_flag);
if (!ret)
return ERR_PTR(-ENOMEM);
ret->logical = fstripe_logical;
@@ -1620,8 +1630,19 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
mutex_lock(&sctx->wr_lock);
again:
if (!sctx->wr_curr_bio) {
+ unsigned int nofs_flag;
+
+ /*
+ * We must use GFP_NOFS because the scrub task might be waiting
+ * for a worker task executing this function and in turn a
+ * transaction commit might be waiting the scrub task to pause
+ * (which needs to wait for all the worker tasks to complete
+ * before pausing).
+ */
+ nofs_flag = memalloc_nofs_save();
sctx->wr_curr_bio = kzalloc(sizeof(*sctx->wr_curr_bio),
GFP_KERNEL);
+ memalloc_nofs_restore(nofs_flag);
if (!sctx->wr_curr_bio) {
mutex_unlock(&sctx->wr_lock);
return -ENOMEM;
@@ -3772,6 +3793,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
struct scrub_ctx *sctx;
int ret;
struct btrfs_device *dev;
+ unsigned int nofs_flag;
if (btrfs_fs_closing(fs_info))
return -EINVAL;
@@ -3875,6 +3897,16 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
atomic_inc(&fs_info->scrubs_running);
mutex_unlock(&fs_info->scrub_lock);
+ /*
+ * In order to avoid deadlock with reclaim when there is a transaction
+ * trying to pause scrub, make sure we use GFP_NOFS for all the
+ * allocations done at btrfs_scrub_pages() and scrub_pages_for_parity()
+ * invoked by our callees. The pausing request is done when the
+ * transaction commit starts, and it blocks the transaction until scrub
+ * is paused (done at specific points at scrub_stripe() or right above
+ * before incrementing fs_info->scrubs_running).
+ */
+ nofs_flag = memalloc_nofs_save();
if (!is_dev_replace) {
/*
* by holding device list mutex, we can
@@ -3887,6 +3919,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
if (!ret)
ret = scrub_enumerate_chunks(sctx, dev, start, end);
+ memalloc_nofs_restore(nofs_flag);
wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
atomic_dec(&fs_info->scrubs_running);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index a5ce99a6c936..15d2914f0a67 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -5778,6 +5778,22 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
goto end_trans;
}
+ /*
+ * If a new hard link was added to the inode in the current transaction
+ * and its link count is now greater than 1, we need to fallback to a
+ * transaction commit, otherwise we can end up not logging all its new
+ * parents for all the hard links. Here just from the dentry used to
+ * fsync, we can not visit the ancestor inodes for all the other hard
+ * links to figure out if any is new, so we fallback to a transaction
+ * commit (instead of adding a lot of complexity of scanning a btree,
+ * since this scenario is not a common use case).
+ */
+ if (inode->vfs_inode.i_nlink > 1 &&
+ inode->last_link_trans > last_committed) {
+ ret = -EMLINK;
+ goto end_trans;
+ }
+
while (1) {
if (!parent || d_really_is_negative(parent) || sb != parent->d_sb)
break;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index c9bc56b1baac..c23bf9da93d2 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2617,11 +2617,13 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
if (rc)
break;
+ cur_len = min_t(const size_t, len, wsize);
+
if (ctx->direct_io) {
ssize_t result;
result = iov_iter_get_pages_alloc(
- from, &pagevec, wsize, &start);
+ from, &pagevec, cur_len, &start);
if (result < 0) {
cifs_dbg(VFS,
"direct_writev couldn't get user pages "
@@ -2630,6 +2632,9 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
result, from->type,
from->iov_offset, from->count);
dump_stack();
+
+ rc = result;
+ add_credits_and_wake_if(server, credits, 0);
break;
}
cur_len = (size_t)result;
@@ -3313,13 +3318,16 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
cur_len, &start);
if (result < 0) {
cifs_dbg(VFS,
- "couldn't get user pages (cur_len=%zd)"
+ "couldn't get user pages (rc=%zd)"
" iter type %d"
" iov_offset %zd count %zd\n",
result, direct_iov.type,
direct_iov.iov_offset,
direct_iov.count);
dump_stack();
+
+ rc = result;
+ add_credits_and_wake_if(server, credits, 0);
break;
}
cur_len = (size_t)result;
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c
index d47b7f5dfa6c..924269cec135 100644
--- a/fs/cifs/smb2maperror.c
+++ b/fs/cifs/smb2maperror.c
@@ -379,8 +379,8 @@ static const struct status_to_posix_error smb2_error_map_table[] = {
{STATUS_NONEXISTENT_EA_ENTRY, -EIO, "STATUS_NONEXISTENT_EA_ENTRY"},
{STATUS_NO_EAS_ON_FILE, -ENODATA, "STATUS_NO_EAS_ON_FILE"},
{STATUS_EA_CORRUPT_ERROR, -EIO, "STATUS_EA_CORRUPT_ERROR"},
- {STATUS_FILE_LOCK_CONFLICT, -EIO, "STATUS_FILE_LOCK_CONFLICT"},
- {STATUS_LOCK_NOT_GRANTED, -EIO, "STATUS_LOCK_NOT_GRANTED"},
+ {STATUS_FILE_LOCK_CONFLICT, -EACCES, "STATUS_FILE_LOCK_CONFLICT"},
+ {STATUS_LOCK_NOT_GRANTED, -EACCES, "STATUS_LOCK_NOT_GRANTED"},
{STATUS_DELETE_PENDING, -ENOENT, "STATUS_DELETE_PENDING"},
{STATUS_CTL_FILE_NOT_SUPPORTED, -ENOSYS,
"STATUS_CTL_FILE_NOT_SUPPORTED"},
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index e25c7aade98a..391b40e91910 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -3384,8 +3384,10 @@ smb3_receive_transform(struct TCP_Server_Info *server,
}
/* TODO: add support for compounds containing READ. */
- if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server))
+ if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server)) {
+ *num_mids = 1;
return receive_encrypted_read(server, &mids[0]);
+ }
return receive_encrypted_standard(server, mids, bufs, num_mids);
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 3f89d0ab08fc..185a05d3257e 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2454,8 +2454,19 @@ int do_journal_get_write_access(handle_t *handle,
#define FALL_BACK_TO_NONDELALLOC 1
#define CONVERT_INLINE_DATA 2
-extern struct inode *ext4_iget(struct super_block *, unsigned long);
-extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
+typedef enum {
+ EXT4_IGET_NORMAL = 0,
+ EXT4_IGET_SPECIAL = 0x0001, /* OK to iget a system inode */
+ EXT4_IGET_HANDLE = 0x0002 /* Inode # is from a handle */
+} ext4_iget_flags;
+
+extern struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ ext4_iget_flags flags, const char *function,
+ unsigned int line);
+
+#define ext4_iget(sb, ino, flags) \
+ __ext4_iget((sb), (ino), (flags), __func__, __LINE__)
+
extern int ext4_write_inode(struct inode *, struct writeback_control *);
extern int ext4_setattr(struct dentry *, struct iattr *);
extern int ext4_getattr(const struct path *, struct kstat *, u32, unsigned int);
@@ -2538,6 +2549,8 @@ extern int ext4_group_extend(struct super_block *sb,
extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
/* super.c */
+extern struct buffer_head *ext4_sb_bread(struct super_block *sb,
+ sector_t block, int op_flags);
extern int ext4_seq_options_show(struct seq_file *seq, void *offset);
extern int ext4_calculate_overhead(struct super_block *sb);
extern void ext4_superblock_csum_set(struct super_block *sb);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 014f6a698cb7..7ff14a1adba3 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1225,7 +1225,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
if (!ext4_test_bit(bit, bitmap_bh->b_data))
goto bad_orphan;
- inode = ext4_iget(sb, ino);
+ inode = ext4_iget(sb, ino, EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
ext4_error(sb, "couldn't read orphan inode %lu (err %d)",
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 9c4bac18cc6c..27373d88b5f0 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -705,8 +705,11 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
if (!PageUptodate(page)) {
ret = ext4_read_inline_page(inode, page);
- if (ret < 0)
+ if (ret < 0) {
+ unlock_page(page);
+ put_page(page);
goto out_up_read;
+ }
}
ret = 1;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 22a9d8159720..9affabd07682 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4817,7 +4817,9 @@ static inline u64 ext4_inode_peek_iversion(const struct inode *inode)
return inode_peek_iversion(inode);
}
-struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
+struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
+ ext4_iget_flags flags, const char *function,
+ unsigned int line)
{
struct ext4_iloc iloc;
struct ext4_inode *raw_inode;
@@ -4831,6 +4833,18 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
gid_t i_gid;
projid_t i_projid;
+ if (((flags & EXT4_IGET_NORMAL) &&
+ (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)) ||
+ (ino < EXT4_ROOT_INO) ||
+ (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))) {
+ if (flags & EXT4_IGET_HANDLE)
+ return ERR_PTR(-ESTALE);
+ __ext4_error(sb, function, line,
+ "inode #%lu: comm %s: iget: illegal inode #",
+ ino, current->comm);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
+
inode = iget_locked(sb, ino);
if (!inode)
return ERR_PTR(-ENOMEM);
@@ -4846,18 +4860,26 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
raw_inode = ext4_raw_inode(&iloc);
if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
- EXT4_ERROR_INODE(inode, "root inode unallocated");
+ ext4_error_inode(inode, function, line, 0,
+ "iget: root inode unallocated");
ret = -EFSCORRUPTED;
goto bad_inode;
}
+ if ((flags & EXT4_IGET_HANDLE) &&
+ (raw_inode->i_links_count == 0) && (raw_inode->i_mode == 0)) {
+ ret = -ESTALE;
+ goto bad_inode;
+ }
+
if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
EXT4_INODE_SIZE(inode->i_sb) ||
(ei->i_extra_isize & 3)) {
- EXT4_ERROR_INODE(inode,
- "bad extra_isize %u (inode size %u)",
+ ext4_error_inode(inode, function, line, 0,
+ "iget: bad extra_isize %u "
+ "(inode size %u)",
ei->i_extra_isize,
EXT4_INODE_SIZE(inode->i_sb));
ret = -EFSCORRUPTED;
@@ -4879,7 +4901,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
}
if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
- EXT4_ERROR_INODE(inode, "checksum invalid");
+ ext4_error_inode(inode, function, line, 0,
+ "iget: checksum invalid");
ret = -EFSBADCRC;
goto bad_inode;
}
@@ -4936,7 +4959,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
inode->i_size = ext4_isize(sb, raw_inode);
if ((size = i_size_read(inode)) < 0) {
- EXT4_ERROR_INODE(inode, "bad i_size value: %lld", size);
+ ext4_error_inode(inode, function, line, 0,
+ "iget: bad i_size value: %lld", size);
ret = -EFSCORRUPTED;
goto bad_inode;
}
@@ -5012,7 +5036,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
ret = 0;
if (ei->i_file_acl &&
!ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
- EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
+ ext4_error_inode(inode, function, line, 0,
+ "iget: bad extended attribute block %llu",
ei->i_file_acl);
ret = -EFSCORRUPTED;
goto bad_inode;
@@ -5040,8 +5065,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
} else if (S_ISLNK(inode->i_mode)) {
/* VFS does not allow setting these so must be corruption */
if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
- EXT4_ERROR_INODE(inode,
- "immutable or append flags not allowed on symlinks");
+ ext4_error_inode(inode, function, line, 0,
+ "iget: immutable or append flags "
+ "not allowed on symlinks");
ret = -EFSCORRUPTED;
goto bad_inode;
}
@@ -5071,7 +5097,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
make_bad_inode(inode);
} else {
ret = -EFSCORRUPTED;
- EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
+ ext4_error_inode(inode, function, line, 0,
+ "iget: bogus i_mode (%o)", inode->i_mode);
goto bad_inode;
}
brelse(iloc.bh);
@@ -5085,13 +5112,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
return ERR_PTR(ret);
}
-struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
-{
- if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
- return ERR_PTR(-EFSCORRUPTED);
- return ext4_iget(sb, ino);
-}
-
static int ext4_inode_blocks_set(handle_t *handle,
struct ext4_inode *raw_inode,
struct ext4_inode_info *ei)
@@ -5380,9 +5400,13 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
{
int err;
- if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
+ if (WARN_ON_ONCE(current->flags & PF_MEMALLOC) ||
+ sb_rdonly(inode->i_sb))
return 0;
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ return -EIO;
+
if (EXT4_SB(inode->i_sb)->s_journal) {
if (ext4_journal_current_handle()) {
jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
@@ -5398,7 +5422,8 @@ int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
if (wbc->sync_mode != WB_SYNC_ALL || wbc->for_sync)
return 0;
- err = ext4_force_commit(inode->i_sb);
+ err = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal,
+ EXT4_I(inode)->i_sync_tid);
} else {
struct ext4_iloc iloc;
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 0edee31913d1..d37dafa1d133 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -125,7 +125,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
!inode_owner_or_capable(inode) || !capable(CAP_SYS_ADMIN))
return -EPERM;
- inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO);
+ inode_bl = ext4_iget(sb, EXT4_BOOT_LOADER_INO, EXT4_IGET_SPECIAL);
if (IS_ERR(inode_bl))
return PTR_ERR(inode_bl);
ei_bl = EXT4_I(inode_bl);
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 61a9d1927817..a98bfca9c463 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -116,9 +116,9 @@ static int update_ind_extent_range(handle_t *handle, struct inode *inode,
int i, retval = 0;
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
- bh = sb_bread(inode->i_sb, pblock);
- if (!bh)
- return -EIO;
+ bh = ext4_sb_bread(inode->i_sb, pblock, 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
i_data = (__le32 *)bh->b_data;
for (i = 0; i < max_entries; i++) {
@@ -145,9 +145,9 @@ static int update_dind_extent_range(handle_t *handle, struct inode *inode,
int i, retval = 0;
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
- bh = sb_bread(inode->i_sb, pblock);
- if (!bh)
- return -EIO;
+ bh = ext4_sb_bread(inode->i_sb, pblock, 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
i_data = (__le32 *)bh->b_data;
for (i = 0; i < max_entries; i++) {
@@ -175,9 +175,9 @@ static int update_tind_extent_range(handle_t *handle, struct inode *inode,
int i, retval = 0;
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
- bh = sb_bread(inode->i_sb, pblock);
- if (!bh)
- return -EIO;
+ bh = ext4_sb_bread(inode->i_sb, pblock, 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
i_data = (__le32 *)bh->b_data;
for (i = 0; i < max_entries; i++) {
@@ -224,9 +224,9 @@ static int free_dind_blocks(handle_t *handle,
struct buffer_head *bh;
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
- bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
- if (!bh)
- return -EIO;
+ bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
tmp_idata = (__le32 *)bh->b_data;
for (i = 0; i < max_entries; i++) {
@@ -254,9 +254,9 @@ static int free_tind_blocks(handle_t *handle,
struct buffer_head *bh;
unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
- bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
- if (!bh)
- return -EIO;
+ bh = ext4_sb_bread(inode->i_sb, le32_to_cpu(i_data), 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
tmp_idata = (__le32 *)bh->b_data;
for (i = 0; i < max_entries; i++) {
@@ -382,9 +382,9 @@ static int free_ext_idx(handle_t *handle, struct inode *inode,
struct ext4_extent_header *eh;
block = ext4_idx_pblock(ix);
- bh = sb_bread(inode->i_sb, block);
- if (!bh)
- return -EIO;
+ bh = ext4_sb_bread(inode->i_sb, block, 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
eh = (struct ext4_extent_header *)bh->b_data;
if (eh->eh_depth != 0) {
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 437f71fe83ae..2b928eb07fa2 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1571,7 +1571,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
dentry);
return ERR_PTR(-EFSCORRUPTED);
}
- inode = ext4_iget_normal(dir->i_sb, ino);
+ inode = ext4_iget(dir->i_sb, ino, EXT4_IGET_NORMAL);
if (inode == ERR_PTR(-ESTALE)) {
EXT4_ERROR_INODE(dir,
"deleted inode referenced: %u",
@@ -1613,7 +1613,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
return ERR_PTR(-EFSCORRUPTED);
}
- return d_obtain_alias(ext4_iget_normal(child->d_sb, ino));
+ return d_obtain_alias(ext4_iget(child->d_sb, ino, EXT4_IGET_NORMAL));
}
/*
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index a5efee34415f..48421de803b7 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -127,10 +127,12 @@ static int verify_group_input(struct super_block *sb,
else if (free_blocks_count < 0)
ext4_warning(sb, "Bad blocks count %u",
input->blocks_count);
- else if (!(bh = sb_bread(sb, end - 1)))
+ else if (IS_ERR(bh = ext4_sb_bread(sb, end - 1, 0))) {
+ err = PTR_ERR(bh);
+ bh = NULL;
ext4_warning(sb, "Cannot read last block (%llu)",
end - 1);
- else if (outside(input->block_bitmap, start, end))
+ } else if (outside(input->block_bitmap, start, end))
ext4_warning(sb, "Block bitmap not in group (block %llu)",
(unsigned long long)input->block_bitmap);
else if (outside(input->inode_bitmap, start, end))
@@ -781,11 +783,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
unsigned long gdb_num = group / EXT4_DESC_PER_BLOCK(sb);
ext4_fsblk_t gdblock = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + gdb_num;
- struct buffer_head **o_group_desc, **n_group_desc;
- struct buffer_head *dind;
- struct buffer_head *gdb_bh;
+ struct buffer_head **o_group_desc, **n_group_desc = NULL;
+ struct buffer_head *dind = NULL;
+ struct buffer_head *gdb_bh = NULL;
int gdbackups;
- struct ext4_iloc iloc;
+ struct ext4_iloc iloc = { .bh = NULL };
__le32 *data;
int err;
@@ -794,21 +796,22 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
"EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
gdb_num);
- gdb_bh = sb_bread(sb, gdblock);
- if (!gdb_bh)
- return -EIO;
+ gdb_bh = ext4_sb_bread(sb, gdblock, 0);
+ if (IS_ERR(gdb_bh))
+ return PTR_ERR(gdb_bh);
gdbackups = verify_reserved_gdb(sb, group, gdb_bh);
if (gdbackups < 0) {
err = gdbackups;
- goto exit_bh;
+ goto errout;
}
data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
- dind = sb_bread(sb, le32_to_cpu(*data));
- if (!dind) {
- err = -EIO;
- goto exit_bh;
+ dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
+ if (IS_ERR(dind)) {
+ err = PTR_ERR(dind);
+ dind = NULL;
+ goto errout;
}
data = (__le32 *)dind->b_data;
@@ -816,18 +819,18 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
ext4_warning(sb, "new group %u GDT block %llu not reserved",
group, gdblock);
err = -EINVAL;
- goto exit_dind;
+ goto errout;
}
BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
if (unlikely(err))
- goto exit_dind;
+ goto errout;
BUFFER_TRACE(gdb_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, gdb_bh);
if (unlikely(err))
- goto exit_dind;
+ goto errout;
BUFFER_TRACE(dind, "get_write_access");
err = ext4_journal_get_write_access(handle, dind);
@@ -837,7 +840,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
/* ext4_reserve_inode_write() gets a reference on the iloc */
err = ext4_reserve_inode_write(handle, inode, &iloc);
if (unlikely(err))
- goto exit_dind;
+ goto errout;
n_group_desc = ext4_kvmalloc((gdb_num + 1) *
sizeof(struct buffer_head *),
@@ -846,7 +849,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
err = -ENOMEM;
ext4_warning(sb, "not enough memory for %lu groups",
gdb_num + 1);
- goto exit_inode;
+ goto errout;
}
/*
@@ -862,7 +865,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
err = ext4_handle_dirty_metadata(handle, NULL, dind);
if (unlikely(err)) {
ext4_std_error(sb, err);
- goto exit_inode;
+ goto errout;
}
inode->i_blocks -= (gdbackups + 1) * sb->s_blocksize >>
(9 - EXT4_SB(sb)->s_cluster_bits);
@@ -871,8 +874,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
if (unlikely(err)) {
ext4_std_error(sb, err);
- iloc.bh = NULL;
- goto exit_inode;
+ goto errout;
}
brelse(dind);
@@ -888,15 +890,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
err = ext4_handle_dirty_super(handle, sb);
if (err)
ext4_std_error(sb, err);
-
return err;
-
-exit_inode:
+errout:
kvfree(n_group_desc);
brelse(iloc.bh);
-exit_dind:
brelse(dind);
-exit_bh:
brelse(gdb_bh);
ext4_debug("leaving with error %d\n", err);
@@ -916,9 +914,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
gdblock = ext4_meta_bg_first_block_no(sb, group) +
ext4_bg_has_super(sb, group);
- gdb_bh = sb_bread(sb, gdblock);
- if (!gdb_bh)
- return -EIO;
+ gdb_bh = ext4_sb_bread(sb, gdblock, 0);
+ if (IS_ERR(gdb_bh))
+ return PTR_ERR(gdb_bh);
n_group_desc = ext4_kvmalloc((gdb_num + 1) *
sizeof(struct buffer_head *),
GFP_NOFS);
@@ -975,9 +973,10 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
return -ENOMEM;
data = EXT4_I(inode)->i_data + EXT4_DIND_BLOCK;
- dind = sb_bread(sb, le32_to_cpu(*data));
- if (!dind) {
- err = -EIO;
+ dind = ext4_sb_bread(sb, le32_to_cpu(*data), 0);
+ if (IS_ERR(dind)) {
+ err = PTR_ERR(dind);
+ dind = NULL;
goto exit_free;
}
@@ -996,9 +995,10 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
err = -EINVAL;
goto exit_bh;
}
- primary[res] = sb_bread(sb, blk);
- if (!primary[res]) {
- err = -EIO;
+ primary[res] = ext4_sb_bread(sb, blk, 0);
+ if (IS_ERR(primary[res])) {
+ err = PTR_ERR(primary[res]);
+ primary[res] = NULL;
goto exit_bh;
}
gdbackups = verify_reserved_gdb(sb, group, primary[res]);
@@ -1631,13 +1631,13 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
}
if (reserved_gdb || gdb_off == 0) {
- if (ext4_has_feature_resize_inode(sb) ||
+ if (!ext4_has_feature_resize_inode(sb) ||
!le16_to_cpu(es->s_reserved_gdt_blocks)) {
ext4_warning(sb,
"No reserved GDT blocks, can't resize");
return -EPERM;
}
- inode = ext4_iget(sb, EXT4_RESIZE_INO);
+ inode = ext4_iget(sb, EXT4_RESIZE_INO, EXT4_IGET_SPECIAL);
if (IS_ERR(inode)) {
ext4_warning(sb, "Error opening resize inode");
return PTR_ERR(inode);
@@ -1965,7 +1965,8 @@ int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count)
}
if (!resize_inode)
- resize_inode = ext4_iget(sb, EXT4_RESIZE_INO);
+ resize_inode = ext4_iget(sb, EXT4_RESIZE_INO,
+ EXT4_IGET_SPECIAL);
if (IS_ERR(resize_inode)) {
ext4_warning(sb, "Error opening resize inode");
return PTR_ERR(resize_inode);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 53ff6c2a26ed..6641a1b8a6a5 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -140,6 +140,29 @@ MODULE_ALIAS_FS("ext3");
MODULE_ALIAS("ext3");
#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
+/*
+ * This works like sb_bread() except it uses ERR_PTR for error
+ * returns. Currently with sb_bread it's impossible to distinguish
+ * between ENOMEM and EIO situations (since both result in a NULL
+ * return.
+ */
+struct buffer_head *
+ext4_sb_bread(struct super_block *sb, sector_t block, int op_flags)
+{
+ struct buffer_head *bh = sb_getblk(sb, block);
+
+ if (bh == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (buffer_uptodate(bh))
+ return bh;
+ ll_rw_block(REQ_OP_READ, REQ_META | op_flags, 1, &bh);
+ wait_on_buffer(bh);
+ if (buffer_uptodate(bh))
+ return bh;
+ put_bh(bh);
+ return ERR_PTR(-EIO);
+}
+
static int ext4_verify_csum_type(struct super_block *sb,
struct ext4_super_block *es)
{
@@ -1151,20 +1174,11 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
{
struct inode *inode;
- if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
- return ERR_PTR(-ESTALE);
- if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
- return ERR_PTR(-ESTALE);
-
- /* iget isn't really right if the inode is currently unallocated!!
- *
- * ext4_read_inode will return a bad_inode if the inode had been
- * deleted, so we should be safe.
- *
+ /*
* Currently we don't know the generation for parent directory, so
* a generation of 0 means "accept any"
*/
- inode = ext4_iget_normal(sb, ino);
+ inode = ext4_iget(sb, ino, EXT4_IGET_HANDLE);
if (IS_ERR(inode))
return ERR_CAST(inode);
if (generation && inode->i_generation != generation) {
@@ -1189,6 +1203,16 @@ static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
ext4_nfs_get_inode);
}
+static int ext4_nfs_commit_metadata(struct inode *inode)
+{
+ struct writeback_control wbc = {
+ .sync_mode = WB_SYNC_ALL
+ };
+
+ trace_ext4_nfs_commit_metadata(inode);
+ return ext4_write_inode(inode, &wbc);
+}
+
/*
* Try to release metadata pages (indirect blocks, directories) which are
* mapped via the block device. Since these pages could have journal heads
@@ -1393,6 +1417,7 @@ static const struct export_operations ext4_export_ops = {
.fh_to_dentry = ext4_fh_to_dentry,
.fh_to_parent = ext4_fh_to_parent,
.get_parent = ext4_get_parent,
+ .commit_metadata = ext4_nfs_commit_metadata,
};
enum {
@@ -4328,7 +4353,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
* so we can safely mount the rest of the filesystem now.
*/
- root = ext4_iget(sb, EXT4_ROOT_INO);
+ root = ext4_iget(sb, EXT4_ROOT_INO, EXT4_IGET_SPECIAL);
if (IS_ERR(root)) {
ext4_msg(sb, KERN_ERR, "get root inode failed");
ret = PTR_ERR(root);
@@ -4598,7 +4623,7 @@ static struct inode *ext4_get_journal_inode(struct super_block *sb,
* happen if we iget() an unused inode, as the subsequent iput()
* will try to delete it.
*/
- journal_inode = ext4_iget(sb, journal_inum);
+ journal_inode = ext4_iget(sb, journal_inum, EXT4_IGET_SPECIAL);
if (IS_ERR(journal_inode)) {
ext4_msg(sb, KERN_ERR, "no journal found");
return NULL;
@@ -5680,7 +5705,7 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
if (!qf_inums[type])
return -EPERM;
- qf_inode = ext4_iget(sb, qf_inums[type]);
+ qf_inode = ext4_iget(sb, qf_inums[type], EXT4_IGET_SPECIAL);
if (IS_ERR(qf_inode)) {
ext4_error(sb, "Bad quota inode # %lu", qf_inums[type]);
return PTR_ERR(qf_inode);
@@ -5690,9 +5715,9 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
qf_inode->i_flags |= S_NOQUOTA;
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_QUOTA);
err = dquot_enable(qf_inode, type, format_id, flags);
- iput(qf_inode);
if (err)
lockdep_set_quota_inode(qf_inode, I_DATA_SEM_NORMAL);
+ iput(qf_inode);
return err;
}
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 7643d52c776c..86ed9c686249 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -384,7 +384,7 @@ static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
struct inode *inode;
int err;
- inode = ext4_iget(parent->i_sb, ea_ino);
+ inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
ext4_error(parent->i_sb,
@@ -522,14 +522,13 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
name_index, name, buffer, (long)buffer_size);
- error = -ENODATA;
if (!EXT4_I(inode)->i_file_acl)
- goto cleanup;
+ return -ENODATA;
ea_idebug(inode, "reading block %llu",
(unsigned long long)EXT4_I(inode)->i_file_acl);
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
- if (!bh)
- goto cleanup;
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
error = ext4_xattr_check_block(inode, bh);
@@ -696,26 +695,23 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
ea_idebug(inode, "buffer=%p, buffer_size=%ld",
buffer, (long)buffer_size);
- error = 0;
if (!EXT4_I(inode)->i_file_acl)
- goto cleanup;
+ return 0;
ea_idebug(inode, "reading block %llu",
(unsigned long long)EXT4_I(inode)->i_file_acl);
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
- error = -EIO;
- if (!bh)
- goto cleanup;
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
error = ext4_xattr_check_block(inode, bh);
if (error)
goto cleanup;
ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
- error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
-
+ error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer,
+ buffer_size);
cleanup:
brelse(bh);
-
return error;
}
@@ -830,9 +826,9 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
}
if (EXT4_I(inode)->i_file_acl) {
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
- if (!bh) {
- ret = -EIO;
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+ if (IS_ERR(bh)) {
+ ret = PTR_ERR(bh);
goto out;
}
@@ -1486,7 +1482,8 @@ ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
}
while (ce) {
- ea_inode = ext4_iget(inode->i_sb, ce->e_value);
+ ea_inode = ext4_iget(inode->i_sb, ce->e_value,
+ EXT4_IGET_NORMAL);
if (!IS_ERR(ea_inode) &&
!is_bad_inode(ea_inode) &&
(EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
@@ -1821,16 +1818,15 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
if (EXT4_I(inode)->i_file_acl) {
/* The inode already has an extended attribute block. */
- bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
- error = -EIO;
- if (!bs->bh)
- goto cleanup;
+ bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+ if (IS_ERR(bs->bh))
+ return PTR_ERR(bs->bh);
ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
atomic_read(&(bs->bh->b_count)),
le32_to_cpu(BHDR(bs->bh)->h_refcount));
error = ext4_xattr_check_block(inode, bs->bh);
if (error)
- goto cleanup;
+ return error;
/* Find the named attribute. */
bs->s.base = BHDR(bs->bh);
bs->s.first = BFIRST(bs->bh);
@@ -1839,13 +1835,10 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
error = xattr_find_entry(inode, &bs->s.here, bs->s.end,
i->name_index, i->name, 1);
if (error && error != -ENODATA)
- goto cleanup;
+ return error;
bs->s.not_found = error;
}
- error = 0;
-
-cleanup:
- return error;
+ return 0;
}
static int
@@ -2274,9 +2267,9 @@ static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
if (!EXT4_I(inode)->i_file_acl)
return NULL;
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
- if (!bh)
- return ERR_PTR(-EIO);
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+ if (IS_ERR(bh))
+ return bh;
error = ext4_xattr_check_block(inode, bh);
if (error) {
brelse(bh);
@@ -2729,7 +2722,7 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
base = IFIRST(header);
end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
min_offs = end - base;
- total_ino = sizeof(struct ext4_xattr_ibody_header);
+ total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
error = xattr_check_inode(inode, header, end);
if (error)
@@ -2746,10 +2739,11 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
if (EXT4_I(inode)->i_file_acl) {
struct buffer_head *bh;
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
- error = -EIO;
- if (!bh)
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+ if (IS_ERR(bh)) {
+ error = PTR_ERR(bh);
goto cleanup;
+ }
error = ext4_xattr_check_block(inode, bh);
if (error) {
brelse(bh);
@@ -2903,11 +2897,12 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
}
if (EXT4_I(inode)->i_file_acl) {
- bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
- if (!bh) {
- EXT4_ERROR_INODE(inode, "block %llu read error",
- EXT4_I(inode)->i_file_acl);
- error = -EIO;
+ bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
+ if (IS_ERR(bh)) {
+ error = PTR_ERR(bh);
+ if (error == -EIO)
+ EXT4_ERROR_INODE(inode, "block %llu read error",
+ EXT4_I(inode)->i_file_acl);
goto cleanup;
}
error = ext4_xattr_check_block(inode, bh);
@@ -3060,8 +3055,10 @@ ext4_xattr_block_cache_find(struct inode *inode,
while (ce) {
struct buffer_head *bh;
- bh = sb_bread(inode->i_sb, ce->e_value);
- if (!bh) {
+ bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
+ if (IS_ERR(bh)) {
+ if (PTR_ERR(bh) == -ENOMEM)
+ return NULL;
EXT4_ERROR_INODE(inode, "block %lu read error",
(unsigned long)ce->e_value);
} else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index b293cb3e27a2..17049b030b6c 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1102,8 +1102,10 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
if (test_opt(sbi, LFS) && create &&
flag == F2FS_GET_BLOCK_DIO) {
err = __allocate_data_block(&dn, map->m_seg_type);
- if (!err)
+ if (!err) {
+ blkaddr = dn.data_blkaddr;
set_inode_flag(inode, FI_APPEND_WRITE);
+ }
}
} else {
if (create) {
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index d338740d0fda..88be946dedd4 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -826,6 +826,7 @@ static int truncate_node(struct dnode_of_data *dn)
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct node_info ni;
int err;
+ pgoff_t index;
err = f2fs_get_node_info(sbi, dn->nid, &ni);
if (err)
@@ -845,10 +846,11 @@ static int truncate_node(struct dnode_of_data *dn)
clear_node_page_dirty(dn->node_page);
set_sbi_flag(sbi, SBI_IS_DIRTY);
+ index = dn->node_page->index;
f2fs_put_page(dn->node_page, 1);
invalidate_mapping_pages(NODE_MAPPING(sbi),
- dn->node_page->index, dn->node_page->index);
+ index, index);
dn->node_page = NULL;
trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index af58b2cc21b8..855a622fb052 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1457,19 +1457,16 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
sbi->sb->s_flags |= SB_ACTIVE;
- mutex_lock(&sbi->gc_mutex);
f2fs_update_time(sbi, DISABLE_TIME);
while (!f2fs_time_over(sbi, DISABLE_TIME)) {
+ mutex_lock(&sbi->gc_mutex);
err = f2fs_gc(sbi, true, false, NULL_SEGNO);
if (err == -ENODATA)
break;
- if (err && err != -EAGAIN) {
- mutex_unlock(&sbi->gc_mutex);
+ if (err && err != -EAGAIN)
return err;
- }
}
- mutex_unlock(&sbi->gc_mutex);
err = sync_filesystem(sbi->sb);
if (err)
@@ -2496,10 +2493,10 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
return 1;
}
- if (segment_count > (le32_to_cpu(raw_super->block_count) >> 9)) {
+ if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
f2fs_msg(sb, KERN_INFO,
- "Wrong segment_count / block_count (%u > %u)",
- segment_count, le32_to_cpu(raw_super->block_count));
+ "Wrong segment_count / block_count (%u > %llu)",
+ segment_count, le64_to_cpu(raw_super->block_count));
return 1;
}
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 7261245c208d..ecd2cf2fc584 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -288,7 +288,7 @@ static int read_xattr_block(struct inode *inode, void *txattr_addr)
static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
unsigned int index, unsigned int len,
const char *name, struct f2fs_xattr_entry **xe,
- void **base_addr)
+ void **base_addr, int *base_size)
{
void *cur_addr, *txattr_addr, *last_addr = NULL;
nid_t xnid = F2FS_I(inode)->i_xattr_nid;
@@ -299,8 +299,8 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
if (!size && !inline_size)
return -ENODATA;
- txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode),
- inline_size + size + XATTR_PADDING_SIZE, GFP_NOFS);
+ *base_size = inline_size + size + XATTR_PADDING_SIZE;
+ txattr_addr = f2fs_kzalloc(F2FS_I_SB(inode), *base_size, GFP_NOFS);
if (!txattr_addr)
return -ENOMEM;
@@ -312,8 +312,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
*xe = __find_inline_xattr(inode, txattr_addr, &last_addr,
index, len, name);
- if (*xe)
+ if (*xe) {
+ *base_size = inline_size;
goto check;
+ }
}
/* read from xattr node block */
@@ -474,6 +476,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
int error = 0;
unsigned int size, len;
void *base_addr = NULL;
+ int base_size;
if (name == NULL)
return -EINVAL;
@@ -484,7 +487,7 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
down_read(&F2FS_I(inode)->i_xattr_sem);
error = lookup_all_xattrs(inode, ipage, index, len, name,
- &entry, &base_addr);
+ &entry, &base_addr, &base_size);
up_read(&F2FS_I(inode)->i_xattr_sem);
if (error)
return error;
@@ -498,6 +501,11 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name,
if (buffer) {
char *pval = entry->e_name + entry->e_name_len;
+
+ if (base_size - (pval - (char *)base_addr) < size) {
+ error = -ERANGE;
+ goto out;
+ }
memcpy(buffer, pval, size);
}
error = size;
diff --git a/fs/file.c b/fs/file.c
index 7ffd6e9d103d..8d059d8973e9 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -640,6 +640,35 @@ int __close_fd(struct files_struct *files, unsigned fd)
}
EXPORT_SYMBOL(__close_fd); /* for ksys_close() */
+/*
+ * variant of __close_fd that gets a ref on the file for later fput
+ */
+int __close_fd_get_file(unsigned int fd, struct file **res)
+{
+ struct files_struct *files = current->files;
+ struct file *file;
+ struct fdtable *fdt;
+
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+ if (fd >= fdt->max_fds)
+ goto out_unlock;
+ file = fdt->fd[fd];
+ if (!file)
+ goto out_unlock;
+ rcu_assign_pointer(fdt->fd[fd], NULL);
+ __put_unused_fd(files, fd);
+ spin_unlock(&files->file_lock);
+ get_file(file);
+ *res = file;
+ return filp_close(file, files);
+
+out_unlock:
+ spin_unlock(&files->file_lock);
+ *res = NULL;
+ return -ENOENT;
+}
+
void do_close_on_exec(struct files_struct *files)
{
unsigned i;
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index 41615f38bcff..f07c55ea0c22 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -121,6 +121,7 @@ extern void __fd_install(struct files_struct *files,
unsigned int fd, struct file *file);
extern int __close_fd(struct files_struct *files,
unsigned int fd);
+extern int __close_fd_get_file(unsigned int fd, struct file **res);
extern struct kmem_cache *files_cachep;
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 0e9c50052ff3..eb213b87617c 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -116,6 +116,8 @@ struct msi_desc {
list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
#define for_each_msi_entry(desc, dev) \
list_for_each_entry((desc), dev_to_msi_list((dev)), list)
+#define for_each_msi_entry_safe(desc, tmp, dev) \
+ list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
#ifdef CONFIG_PCI_MSI
#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 6894976b54e3..186cd8e970c7 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -573,6 +573,8 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
else if (destroy)
destroy(ptr);
+ if (producer >= size)
+ producer = 0;
__ptr_ring_set_size(r, size);
r->producer = producer;
r->consumer_head = 0;
diff --git a/include/media/cec.h b/include/media/cec.h
index 3fe5e5d2bb7e..707411ef8ba2 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -155,6 +155,7 @@ struct cec_adapter {
unsigned int transmit_queue_sz;
struct list_head wait_queue;
struct cec_data *transmitting;
+ bool transmit_in_progress;
struct task_struct *kthread_config;
struct completion config_completion;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 5ce926701bd0..5f67efbb77e8 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -307,6 +307,26 @@ int ip_tunnel_encap_del_ops(const struct ip_tunnel_encap_ops *op,
int ip_tunnel_encap_setup(struct ip_tunnel *t,
struct ip_tunnel_encap *ipencap);
+static inline bool pskb_inet_may_pull(struct sk_buff *skb)
+{
+ int nhlen;
+
+ switch (skb->protocol) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case htons(ETH_P_IPV6):
+ nhlen = sizeof(struct ipv6hdr);
+ break;
+#endif
+ case htons(ETH_P_IP):
+ nhlen = sizeof(struct iphdr);
+ break;
+ default:
+ nhlen = 0;
+ }
+
+ return pskb_network_may_pull(skb, nhlen);
+}
+
static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
{
const struct ip_tunnel_encap_ops *ops;
diff --git a/include/net/sock.h b/include/net/sock.h
index 0e3a09380655..13f11e905a00 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -298,6 +298,7 @@ struct sock_common {
* @sk_filter: socket filtering instructions
* @sk_timer: sock cleanup timer
* @sk_stamp: time stamp of last packet received
+ * @sk_stamp_seq: lock for accessing sk_stamp on 32 bit architectures only
* @sk_tsflags: SO_TIMESTAMPING socket options
* @sk_tskey: counter to disambiguate concurrent tstamp requests
* @sk_zckey: counter to order MSG_ZEROCOPY notifications
@@ -474,6 +475,9 @@ struct sock {
const struct cred *sk_peer_cred;
long sk_rcvtimeo;
ktime_t sk_stamp;
+#if BITS_PER_LONG==32
+ seqlock_t sk_stamp_seq;
+#endif
u16 sk_tsflags;
u8 sk_shutdown;
u32 sk_tskey;
@@ -2287,6 +2291,34 @@ static inline void sk_drops_add(struct sock *sk, const struct sk_buff *skb)
atomic_add(segs, &sk->sk_drops);
}
+static inline ktime_t sock_read_timestamp(struct sock *sk)
+{
+#if BITS_PER_LONG==32
+ unsigned int seq;
+ ktime_t kt;
+
+ do {
+ seq = read_seqbegin(&sk->sk_stamp_seq);
+ kt = sk->sk_stamp;
+ } while (read_seqretry(&sk->sk_stamp_seq, seq));
+
+ return kt;
+#else
+ return sk->sk_stamp;
+#endif
+}
+
+static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
+{
+#if BITS_PER_LONG==32
+ write_seqlock(&sk->sk_stamp_seq);
+ sk->sk_stamp = kt;
+ write_sequnlock(&sk->sk_stamp_seq);
+#else
+ sk->sk_stamp = kt;
+#endif
+}
+
void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
@@ -2311,7 +2343,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
(sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
else
- sk->sk_stamp = kt;
+ sock_write_timestamp(sk, kt);
if (sock_flag(sk, SOCK_WIFI_STATUS) && skb->wifi_acked_valid)
__sock_recv_wifi_status(msg, sk, skb);
@@ -2332,9 +2364,9 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
__sock_recv_ts_and_drops(msg, sk, skb);
else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
- sk->sk_stamp = skb->tstamp;
+ sock_write_timestamp(sk, skb->tstamp);
else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
- sk->sk_stamp = 0;
+ sock_write_timestamp(sk, 0);
}
void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 698e0d8a5ca4..d68e9e536814 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -226,6 +226,26 @@ TRACE_EVENT(ext4_drop_inode,
(unsigned long) __entry->ino, __entry->drop)
);
+TRACE_EVENT(ext4_nfs_commit_metadata,
+ TP_PROTO(struct inode *inode),
+
+ TP_ARGS(inode),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ ),
+
+ TP_printk("dev %d,%d ino %lu",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino)
+);
+
TRACE_EVENT(ext4_mark_inode_dirty,
TP_PROTO(struct inode *inode, unsigned long IP),
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 6aaf5dd5383b..1f84977fab47 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -4202,20 +4202,25 @@ static void css_task_iter_advance(struct css_task_iter *it)
lockdep_assert_held(&css_set_lock);
repeat:
- /*
- * Advance iterator to find next entry. cset->tasks is consumed
- * first and then ->mg_tasks. After ->mg_tasks, we move onto the
- * next cset.
- */
- next = it->task_pos->next;
+ if (it->task_pos) {
+ /*
+ * Advance iterator to find next entry. cset->tasks is
+ * consumed first and then ->mg_tasks. After ->mg_tasks,
+ * we move onto the next cset.
+ */
+ next = it->task_pos->next;
- if (next == it->tasks_head)
- next = it->mg_tasks_head->next;
+ if (next == it->tasks_head)
+ next = it->mg_tasks_head->next;
- if (next == it->mg_tasks_head)
+ if (next == it->mg_tasks_head)
+ css_task_iter_advance_css_set(it);
+ else
+ it->task_pos = next;
+ } else {
+ /* called from start, proceed to the first cset */
css_task_iter_advance_css_set(it);
- else
- it->task_pos = next;
+ }
/* if PROCS, skip over tasks which aren't group leaders */
if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
@@ -4255,7 +4260,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
it->cset_head = it->cset_pos;
- css_task_iter_advance_css_set(it);
+ css_task_iter_advance(it);
spin_unlock_irq(&css_set_lock);
}
diff --git a/kernel/panic.c b/kernel/panic.c
index f6d549a29a5c..d10c340c43b0 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -14,6 +14,7 @@
#include <linux/kmsg_dump.h>
#include <linux/kallsyms.h>
#include <linux/notifier.h>
+#include <linux/vt_kern.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/ftrace.h>
@@ -237,7 +238,10 @@ void panic(const char *fmt, ...)
if (_crash_kexec_post_notifiers)
__crash_kexec(NULL);
- bust_spinlocks(0);
+#ifdef CONFIG_VT
+ unblank_screen();
+#endif
+ console_unblank();
/*
* We may have ended up stopping the CPU holding the lock (in
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index c603d33d5410..5d01edf8d819 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -653,15 +653,22 @@ static int ax25_setsockopt(struct socket *sock, int level, int optname,
break;
}
- dev = dev_get_by_name(&init_net, devname);
+ rtnl_lock();
+ dev = __dev_get_by_name(&init_net, devname);
if (!dev) {
+ rtnl_unlock();
res = -ENODEV;
break;
}
ax25->ax25_dev = ax25_dev_ax25dev(dev);
+ if (!ax25->ax25_dev) {
+ rtnl_unlock();
+ res = -ENODEV;
+ break;
+ }
ax25_fillin_cb(ax25, ax25->ax25_dev);
- dev_put(dev);
+ rtnl_unlock();
break;
default:
diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c
index 9a3a301e1e2f..d92195cd7834 100644
--- a/net/ax25/ax25_dev.c
+++ b/net/ax25/ax25_dev.c
@@ -116,6 +116,7 @@ void ax25_dev_device_down(struct net_device *dev)
if ((s = ax25_dev_list) == ax25_dev) {
ax25_dev_list = s->next;
spin_unlock_bh(&ax25_dev_lock);
+ dev->ax25_ptr = NULL;
dev_put(dev);
kfree(ax25_dev);
return;
@@ -125,6 +126,7 @@ void ax25_dev_device_down(struct net_device *dev)
if (s->next == ax25_dev) {
s->next = ax25_dev->next;
spin_unlock_bh(&ax25_dev_lock);
+ dev->ax25_ptr = NULL;
dev_put(dev);
kfree(ax25_dev);
return;
diff --git a/net/compat.c b/net/compat.c
index 47a614b370cd..d1f3a8a0b3ef 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -467,12 +467,14 @@ int compat_sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
ctv = (struct compat_timeval __user *) userstamp;
err = -ENOENT;
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- tv = ktime_to_timeval(sk->sk_stamp);
+ tv = ktime_to_timeval(sock_read_timestamp(sk));
+
if (tv.tv_sec == -1)
return err;
if (tv.tv_sec == 0) {
- sk->sk_stamp = ktime_get_real();
- tv = ktime_to_timeval(sk->sk_stamp);
+ ktime_t kt = ktime_get_real();
+ sock_write_timestamp(sk, kt);
+ tv = ktime_to_timeval(kt);
}
err = 0;
if (put_user(tv.tv_sec, &ctv->tv_sec) ||
@@ -494,12 +496,13 @@ int compat_sock_get_timestampns(struct sock *sk, struct timespec __user *usersta
ctv = (struct compat_timespec __user *) userstamp;
err = -ENOENT;
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- ts = ktime_to_timespec(sk->sk_stamp);
+ ts = ktime_to_timespec(sock_read_timestamp(sk));
if (ts.tv_sec == -1)
return err;
if (ts.tv_sec == 0) {
- sk->sk_stamp = ktime_get_real();
- ts = ktime_to_timespec(sk->sk_stamp);
+ ktime_t kt = ktime_get_real();
+ sock_write_timestamp(sk, kt);
+ ts = ktime_to_timespec(kt);
}
err = 0;
if (put_user(ts.tv_sec, &ctv->tv_sec) ||
diff --git a/net/core/sock.c b/net/core/sock.c
index 080a880a1761..98659fb6e9fb 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2743,6 +2743,9 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
sk->sk_stamp = SK_DEFAULT_STAMP;
+#if BITS_PER_LONG==32
+ seqlock_init(&sk->sk_stamp_seq);
+#endif
atomic_set(&sk->sk_zckey, 0);
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -2842,12 +2845,13 @@ int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
struct timeval tv;
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- tv = ktime_to_timeval(sk->sk_stamp);
+ tv = ktime_to_timeval(sock_read_timestamp(sk));
if (tv.tv_sec == -1)
return -ENOENT;
if (tv.tv_sec == 0) {
- sk->sk_stamp = ktime_get_real();
- tv = ktime_to_timeval(sk->sk_stamp);
+ ktime_t kt = ktime_get_real();
+ sock_write_timestamp(sk, kt);
+ tv = ktime_to_timeval(kt);
}
return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
}
@@ -2858,11 +2862,12 @@ int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
struct timespec ts;
sock_enable_timestamp(sk, SOCK_TIMESTAMP);
- ts = ktime_to_timespec(sk->sk_stamp);
+ ts = ktime_to_timespec(sock_read_timestamp(sk));
if (ts.tv_sec == -1)
return -ENOENT;
if (ts.tv_sec == 0) {
- sk->sk_stamp = ktime_get_real();
+ ktime_t kt = ktime_get_real();
+ sock_write_timestamp(sk, kt);
ts = ktime_to_timespec(sk->sk_stamp);
}
return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
diff --git a/net/ieee802154/6lowpan/tx.c b/net/ieee802154/6lowpan/tx.c
index ca53efa17be1..8bec827081cd 100644
--- a/net/ieee802154/6lowpan/tx.c
+++ b/net/ieee802154/6lowpan/tx.c
@@ -48,6 +48,9 @@ int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev,
const struct ipv6hdr *hdr = ipv6_hdr(skb);
struct neighbour *n;
+ if (!daddr)
+ return -EINVAL;
+
/* TODO:
* if this package isn't ipv6 one, where should it be routed?
*/
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 38befe829caf..0fe9419bd12b 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -674,6 +674,9 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
struct ip_tunnel *tunnel = netdev_priv(dev);
const struct iphdr *tnl_params;
+ if (!pskb_inet_may_pull(skb))
+ goto free_skb;
+
if (tunnel->collect_md) {
gre_fb_xmit(skb, dev, skb->protocol);
return NETDEV_TX_OK;
@@ -717,6 +720,9 @@ static netdev_tx_t erspan_xmit(struct sk_buff *skb,
struct ip_tunnel *tunnel = netdev_priv(dev);
bool truncate = false;
+ if (!pskb_inet_may_pull(skb))
+ goto free_skb;
+
if (tunnel->collect_md) {
erspan_fb_xmit(skb, dev, skb->protocol);
return NETDEV_TX_OK;
@@ -760,6 +766,9 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
{
struct ip_tunnel *tunnel = netdev_priv(dev);
+ if (!pskb_inet_may_pull(skb))
+ goto free_skb;
+
if (tunnel->collect_md) {
gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
return NETDEV_TX_OK;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 284a22154b4e..c4f5602308ed 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -627,7 +627,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, u8 protocol)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- unsigned int inner_nhdr_len = 0;
const struct iphdr *inner_iph;
struct flowi4 fl4;
u8 tos, ttl;
@@ -637,14 +636,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
__be32 dst;
bool connected;
- /* ensure we can access the inner net header, for several users below */
- if (skb->protocol == htons(ETH_P_IP))
- inner_nhdr_len = sizeof(struct iphdr);
- else if (skb->protocol == htons(ETH_P_IPV6))
- inner_nhdr_len = sizeof(struct ipv6hdr);
- if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
- goto tx_error;
-
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
connected = (tunnel->parms.iph.daddr != 0);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index de31b302d69c..d7b43e700023 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -241,6 +241,9 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
struct ip_tunnel *tunnel = netdev_priv(dev);
struct flowi fl;
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
memset(&fl, 0, sizeof(fl));
switch (skb->protocol) {
@@ -253,15 +256,18 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
break;
default:
- dev->stats.tx_errors++;
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
+ goto tx_err;
}
/* override mark with tunnel output key */
fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
return vti_xmit(skb, dev, &fl);
+
+tx_err:
+ dev->stats.tx_errors++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
}
static int vti4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 515adbdba1d2..0f7d434c1eed 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -879,6 +879,9 @@ static netdev_tx_t ip6gre_tunnel_xmit(struct sk_buff *skb,
struct net_device_stats *stats = &t->dev->stats;
int ret;
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
goto tx_err;
@@ -921,6 +924,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
int nhoff;
int thoff;
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
if (!ip6_tnl_xmit_ctl(t, &t->parms.laddr, &t->parms.raddr))
goto tx_err;
@@ -993,8 +999,6 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
goto tx_err;
}
} else {
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
-
switch (skb->protocol) {
case htons(ETH_P_IP):
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
@@ -1002,7 +1006,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
&dsfield, &encap_limit);
break;
case htons(ETH_P_IPV6):
- if (ipv6_addr_equal(&t->parms.raddr, &ipv6h->saddr))
+ if (ipv6_addr_equal(&t->parms.raddr, &ipv6_hdr(skb)->saddr))
goto tx_err;
if (prepare_ip6gre_xmit_ipv6(skb, dev, &fl6,
&dsfield, &encap_limit))
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 99179b9c8384..0c6403cf8b52 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1243,10 +1243,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
u8 tproto;
int err;
- /* ensure we can access the full inner ip header */
- if (!pskb_may_pull(skb, sizeof(struct iphdr)))
- return -1;
-
iph = ip_hdr(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
@@ -1321,9 +1317,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
u8 tproto;
int err;
- if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
- return -1;
-
ipv6h = ipv6_hdr(skb);
tproto = READ_ONCE(t->parms.proto);
if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
@@ -1405,6 +1398,9 @@ ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device_stats *stats = &t->dev->stats;
int ret;
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
switch (skb->protocol) {
case htons(ETH_P_IP):
ret = ip4ip6_tnl_xmit(skb, dev);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 706fe42e4928..8b6eefff2f7e 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -522,18 +522,18 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
struct net_device_stats *stats = &t->dev->stats;
- struct ipv6hdr *ipv6h;
struct flowi fl;
int ret;
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
memset(&fl, 0, sizeof(fl));
switch (skb->protocol) {
case htons(ETH_P_IPV6):
- ipv6h = ipv6_hdr(skb);
-
if ((t->parms.proto != IPPROTO_IPV6 && t->parms.proto != 0) ||
- vti6_addr_conflict(t, ipv6h))
+ vti6_addr_conflict(t, ipv6_hdr(skb)))
goto tx_err;
xfrm_decode_session(skb, &fl, AF_INET6);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 377a2ee5d9ad..eb3220812b56 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -51,6 +51,7 @@
#include <linux/export.h>
#include <net/ip6_checksum.h>
#include <linux/netconf.h>
+#include <net/ip_tunnels.h>
#include <linux/nospec.h>
@@ -599,13 +600,12 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
.flowi6_iif = skb->skb_iif ? : LOOPBACK_IFINDEX,
.flowi6_mark = skb->mark,
};
- int err;
- err = ip6mr_fib_lookup(net, &fl6, &mrt);
- if (err < 0) {
- kfree_skb(skb);
- return err;
- }
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
+ if (ip6mr_fib_lookup(net, &fl6, &mrt) < 0)
+ goto tx_err;
read_lock(&mrt_lock);
dev->stats.tx_bytes += skb->len;
@@ -614,6 +614,11 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb,
read_unlock(&mrt_lock);
kfree_skb(skb);
return NETDEV_TX_OK;
+
+tx_err:
+ dev->stats.tx_errors++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
}
static int reg_vif_get_iflink(const struct net_device *dev)
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 51c9f75f34b9..1e03305c0549 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1021,6 +1021,9 @@ static netdev_tx_t sit_tunnel_xmit__(struct sk_buff *skb,
static netdev_tx_t sit_tunnel_xmit(struct sk_buff *skb,
struct net_device *dev)
{
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
switch (skb->protocol) {
case htons(ETH_P_IP):
sit_tunnel_xmit__(skb, dev, IPPROTO_IPIP);
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 03f37c4e64fe..1d3144d19903 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -153,7 +153,7 @@ static struct sock *nr_find_listener(ax25_address *addr)
sk_for_each(s, &nr_list)
if (!ax25cmp(&nr_sk(s)->source_addr, addr) &&
s->sk_state == TCP_LISTEN) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
s = NULL;
@@ -174,7 +174,7 @@ static struct sock *nr_find_socket(unsigned char index, unsigned char id)
struct nr_sock *nr = nr_sk(s);
if (nr->my_index == index && nr->my_id == id) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
}
@@ -198,7 +198,7 @@ static struct sock *nr_find_peer(unsigned char index, unsigned char id,
if (nr->your_index == index && nr->your_id == id &&
!ax25cmp(&nr->dest_addr, dest)) {
- bh_lock_sock(s);
+ sock_hold(s);
goto found;
}
}
@@ -224,7 +224,7 @@ static unsigned short nr_find_next_circuit(void)
if (i != 0 && j != 0) {
if ((sk=nr_find_socket(i, j)) == NULL)
break;
- bh_unlock_sock(sk);
+ sock_put(sk);
}
id++;
@@ -920,6 +920,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
}
if (sk != NULL) {
+ bh_lock_sock(sk);
skb_reset_transport_header(skb);
if (frametype == NR_CONNACK && skb->len == 22)
@@ -929,6 +930,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
ret = nr_process_rx_frame(sk, skb);
bh_unlock_sock(sk);
+ sock_put(sk);
return ret;
}
@@ -960,10 +962,12 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
(make = nr_make_new(sk)) == NULL) {
nr_transmit_refusal(skb, 0);
if (sk)
- bh_unlock_sock(sk);
+ sock_put(sk);
return 0;
}
+ bh_lock_sock(sk);
+
window = skb->data[20];
skb->sk = make;
@@ -1016,6 +1020,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
sk->sk_data_ready(sk);
bh_unlock_sock(sk);
+ sock_put(sk);
nr_insert_socket(make);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 5dda263b4a0a..eedacdebcd4c 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2625,7 +2625,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
sll_addr)))
goto out;
proto = saddr->sll_protocol;
- addr = saddr->sll_addr;
+ addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
if (addr && dev && saddr->sll_halen < dev->addr_len)
goto out;
@@ -2825,7 +2825,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
goto out;
proto = saddr->sll_protocol;
- addr = saddr->sll_addr;
+ addr = saddr->sll_halen ? saddr->sll_addr : NULL;
dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
if (addr && dev && saddr->sll_halen < dev->addr_len)
goto out;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 986f3ed7d1a2..b7e67310ec37 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -549,7 +549,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
/* Don't enable netstamp, sunrpc doesn't
need that much accuracy */
}
- svsk->sk_sk->sk_stamp = skb->tstamp;
+ sock_write_timestamp(svsk->sk_sk, skb->tstamp);
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
len = skb->len;
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index e65c3a8551e4..040153ffc357 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -317,7 +317,6 @@ static int tipc_enable_bearer(struct net *net, const char *name,
res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
if (res) {
bearer_disable(net, b);
- kfree(b);
errstr = "failed to create discoverer";
goto rejected;
}
diff --git a/security/keys/keyctl_pkey.c b/security/keys/keyctl_pkey.c
index 70e65a2ff207..8bdea5abad11 100644
--- a/security/keys/keyctl_pkey.c
+++ b/security/keys/keyctl_pkey.c
@@ -50,6 +50,8 @@ static int keyctl_pkey_params_parse(struct kernel_pkey_params *params)
if (*p == '\0' || *p == ' ' || *p == '\t')
continue;
token = match_token(p, param_keys, args);
+ if (token == Opt_err)
+ return -EINVAL;
if (__test_and_set_bit(token, &token_mask))
return -EINVAL;
q = args[0].from;
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index fdb9b92fc8d6..01b9d62eef14 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -25,6 +25,7 @@
#include <linux/time.h>
#include <linux/mutex.h>
#include <linux/device.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/minors.h>
#include <sound/pcm.h>
@@ -129,6 +130,7 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
return -EFAULT;
if (stream < 0 || stream > 1)
return -EINVAL;
+ stream = array_index_nospec(stream, 2);
if (get_user(subdevice, &info->subdevice))
return -EFAULT;
mutex_lock(®ister_mutex);
diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
index 54cdd4ffa9ce..ac20acf48fc6 100644
--- a/sound/firewire/amdtp-stream-trace.h
+++ b/sound/firewire/amdtp-stream-trace.h
@@ -131,7 +131,7 @@ TRACE_EVENT(in_packet_without_header,
__entry->index = index;
),
TP_printk(
- "%02u %04u %04x %04x %02d %03u %3u %3u %02u %01u %02u",
+ "%02u %04u %04x %04x %02d %03u %02u %03u %02u %01u %02u",
__entry->second,
__entry->cycle,
__entry->src,
@@ -169,7 +169,7 @@ TRACE_EVENT(out_packet_without_header,
__entry->dest = fw_parent_device(s->unit)->node_id;
__entry->payload_quadlets = payload_length / 4;
__entry->data_blocks = data_blocks,
- __entry->data_blocks = s->data_block_counter,
+ __entry->data_block_counter = s->data_block_counter,
__entry->packet_index = s->packet_index;
__entry->irq = !!in_interrupt();
__entry->index = index;
diff --git a/sound/firewire/amdtp-stream.c b/sound/firewire/amdtp-stream.c
index 9be76c808fcc..3ada55ed5381 100644
--- a/sound/firewire/amdtp-stream.c
+++ b/sound/firewire/amdtp-stream.c
@@ -654,15 +654,17 @@ static int handle_in_packet(struct amdtp_stream *s,
}
static int handle_in_packet_without_header(struct amdtp_stream *s,
- unsigned int payload_quadlets, unsigned int cycle,
+ unsigned int payload_length, unsigned int cycle,
unsigned int index)
{
__be32 *buffer;
+ unsigned int payload_quadlets;
unsigned int data_blocks;
struct snd_pcm_substream *pcm;
unsigned int pcm_frames;
buffer = s->buffer.packets[s->packet_index].buffer;
+ payload_quadlets = payload_length / 4;
data_blocks = payload_quadlets / s->data_block_quadlets;
trace_in_packet_without_header(s, cycle, payload_quadlets, data_blocks,
diff --git a/sound/firewire/fireface/ff-protocol-ff400.c b/sound/firewire/fireface/ff-protocol-ff400.c
index 654a50319198..4d191172fe3f 100644
--- a/sound/firewire/fireface/ff-protocol-ff400.c
+++ b/sound/firewire/fireface/ff-protocol-ff400.c
@@ -152,7 +152,7 @@ static int ff400_switch_fetching_mode(struct snd_ff *ff, bool enable)
if (reg == NULL)
return -ENOMEM;
- if (enable) {
+ if (!enable) {
/*
* Each quadlet is corresponding to data channels in a data
* blocks in reverse order. Precisely, quadlets for available
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 6ebe817801ea..1f25e6d029d8 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -36,6 +36,7 @@
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/moduleparam.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/tlv.h>
@@ -1026,6 +1027,8 @@ static int snd_emu10k1_ipcm_poke(struct snd_emu10k1 *emu,
if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
return -EINVAL;
+ ipcm->substream = array_index_nospec(ipcm->substream,
+ EMU10K1_FX8010_PCM_COUNT);
if (ipcm->channels > 32)
return -EINVAL;
pcm = &emu->fx8010.pcm[ipcm->substream];
@@ -1072,6 +1075,8 @@ static int snd_emu10k1_ipcm_peek(struct snd_emu10k1 *emu,
if (ipcm->substream >= EMU10K1_FX8010_PCM_COUNT)
return -EINVAL;
+ ipcm->substream = array_index_nospec(ipcm->substream,
+ EMU10K1_FX8010_PCM_COUNT);
pcm = &emu->fx8010.pcm[ipcm->substream];
mutex_lock(&emu->fx8010.lock);
spin_lock_irq(&emu->reg_lock);
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index dd7d4242d6d2..86841d46a8fc 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -233,10 +233,12 @@ static int hda_tegra_suspend(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
struct hda_tegra *hda = container_of(chip, struct hda_tegra, chip);
+ struct hdac_bus *bus = azx_bus(chip);
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
azx_stop_chip(chip);
+ synchronize_irq(bus->irq);
azx_enter_link_reset(chip);
hda_tegra_disable_clocks(hda);
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 950e02e71766..51cc6589443f 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -923,6 +923,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x8079, "HP EliteBook 840 G3", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 15021c839372..54fc9c0f07de 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6424,7 +6424,7 @@ static const struct hda_fixup alc269_fixups[] = {
[ALC294_FIXUP_ASUS_HEADSET_MIC] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
- { 0x19, 0x01a1113c }, /* use as headset mic, without its own jack detect */
+ { 0x19, 0x01a1103c }, /* use as headset mic */
{ }
},
.chained = true,
@@ -6573,6 +6573,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x1043, 0x10a1, "ASUS UX391UA", ALC294_FIXUP_ASUS_SPK),
SND_PCI_QUIRK(0x1043, 0x10c0, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x10d0, "ASUS X540LA/X540LJ", ALC255_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 1bff4b1b39cd..ba99ff0e93e0 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -30,6 +30,7 @@
#include <linux/math64.h>
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -4092,15 +4093,16 @@ static int snd_hdsp_channel_info(struct snd_pcm_substream *substream,
struct snd_pcm_channel_info *info)
{
struct hdsp *hdsp = snd_pcm_substream_chip(substream);
- int mapped_channel;
+ unsigned int channel = info->channel;
- if (snd_BUG_ON(info->channel >= hdsp->max_channels))
+ if (snd_BUG_ON(channel >= hdsp->max_channels))
return -EINVAL;
+ channel = array_index_nospec(channel, hdsp->max_channels);
- if ((mapped_channel = hdsp->channel_map[info->channel]) < 0)
+ if (hdsp->channel_map[channel] < 0)
return -EINVAL;
- info->offset = mapped_channel * HDSP_CHANNEL_BUFFER_BYTES;
+ info->offset = hdsp->channel_map[channel] * HDSP_CHANNEL_BUFFER_BYTES;
info->first = 0;
info->step = 32;
return 0;
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index 9d9f6e41d81c..08a5152e635a 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -389,6 +389,20 @@ static struct snd_soc_card snd_soc_card_cht = {
};
static const struct dmi_system_id cht_max98090_quirk_table[] = {
+ {
+ /* Clapper model Chromebook */
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Clapper"),
+ },
+ .driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
+ },
+ {
+ /* Gnawty model Chromebook (Acer Chromebook CB3-111) */
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Gnawty"),
+ },
+ .driver_data = (void *)QUIRK_PMC_PLT_CLK_0,
+ },
{
/* Swanky model Chromebook (Toshiba Chromebook 2) */
.matches = {
diff --git a/sound/synth/emux/emux_hwdep.c b/sound/synth/emux/emux_hwdep.c
index e557946718a9..d9fcae071b47 100644
--- a/sound/synth/emux/emux_hwdep.c
+++ b/sound/synth/emux/emux_hwdep.c
@@ -22,9 +22,9 @@
#include <sound/core.h>
#include <sound/hwdep.h>
#include <linux/uaccess.h>
+#include <linux/nospec.h>
#include "emux_voice.h"
-
#define TMP_CLIENT_ID 0x1001
/*
@@ -66,13 +66,16 @@ snd_emux_hwdep_misc_mode(struct snd_emux *emu, void __user *arg)
return -EFAULT;
if (info.mode < 0 || info.mode >= EMUX_MD_END)
return -EINVAL;
+ info.mode = array_index_nospec(info.mode, EMUX_MD_END);
if (info.port < 0) {
for (i = 0; i < emu->num_ports; i++)
emu->portptrs[i]->ctrls[info.mode] = info.value;
} else {
- if (info.port < emu->num_ports)
+ if (info.port < emu->num_ports) {
+ info.port = array_index_nospec(info.port, emu->num_ports);
emu->portptrs[info.port]->ctrls[info.mode] = info.value;
+ }
}
return 0;
}
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 3692f29fee46..70144b98141c 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -4970,6 +4970,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct tep_e
if (arg->type == TEP_PRINT_BSTRING) {
trace_seq_puts(s, arg->string.string);
+ arg = arg->next;
break;
}
diff --git a/tools/perf/arch/common.c b/tools/perf/arch/common.c
index 82657c01a3b8..5f69fd0b745a 100644
--- a/tools/perf/arch/common.c
+++ b/tools/perf/arch/common.c
@@ -200,3 +200,13 @@ int perf_env__lookup_objdump(struct perf_env *env, const char **path)
return perf_env__lookup_binutils_path(env, "objdump", path);
}
+
+/*
+ * Some architectures have a single address space for kernel and user addresses,
+ * which makes it possible to determine if an address is in kernel space or user
+ * space.
+ */
+bool perf_env__single_address_space(struct perf_env *env)
+{
+ return strcmp(perf_env__arch(env), "sparc");
+}
diff --git a/tools/perf/arch/common.h b/tools/perf/arch/common.h
index 2167001b18c5..c298a446d1f6 100644
--- a/tools/perf/arch/common.h
+++ b/tools/perf/arch/common.h
@@ -5,5 +5,6 @@
#include "../util/env.h"
int perf_env__lookup_objdump(struct perf_env *env, const char **path);
+bool perf_env__single_address_space(struct perf_env *env);
#endif /* ARCH_PERF_COMMON_H */
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index b5bc85bd0bbe..a7b4d3f611c5 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -728,8 +728,8 @@ static int perf_sample__fprintf_brstack(struct perf_sample *sample,
if (PRINT_FIELD(DSO)) {
memset(&alf, 0, sizeof(alf));
memset(&alt, 0, sizeof(alt));
- thread__find_map(thread, sample->cpumode, from, &alf);
- thread__find_map(thread, sample->cpumode, to, &alt);
+ thread__find_map_fb(thread, sample->cpumode, from, &alf);
+ thread__find_map_fb(thread, sample->cpumode, to, &alt);
}
printed += fprintf(fp, " 0x%"PRIx64, from);
@@ -775,8 +775,8 @@ static int perf_sample__fprintf_brstacksym(struct perf_sample *sample,
from = br->entries[i].from;
to = br->entries[i].to;
- thread__find_symbol(thread, sample->cpumode, from, &alf);
- thread__find_symbol(thread, sample->cpumode, to, &alt);
+ thread__find_symbol_fb(thread, sample->cpumode, from, &alf);
+ thread__find_symbol_fb(thread, sample->cpumode, to, &alt);
printed += symbol__fprintf_symname_offs(alf.sym, &alf, fp);
if (PRINT_FIELD(DSO)) {
@@ -820,11 +820,11 @@ static int perf_sample__fprintf_brstackoff(struct perf_sample *sample,
from = br->entries[i].from;
to = br->entries[i].to;
- if (thread__find_map(thread, sample->cpumode, from, &alf) &&
+ if (thread__find_map_fb(thread, sample->cpumode, from, &alf) &&
!alf.map->dso->adjust_symbols)
from = map__map_ip(alf.map, from);
- if (thread__find_map(thread, sample->cpumode, to, &alt) &&
+ if (thread__find_map_fb(thread, sample->cpumode, to, &alt) &&
!alt.map->dso->adjust_symbols)
to = map__map_ip(alt.map, to);
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 59f38c7693f8..4c23779e271a 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -166,7 +166,7 @@ const char *perf_env__arch(struct perf_env *env)
struct utsname uts;
char *arch_name;
- if (!env) { /* Assume local operation */
+ if (!env || !env->arch) { /* Assume local operation */
if (uname(&uts) < 0)
return NULL;
arch_name = uts.machine;
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index e9c108a6b1c3..24493200cf80 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -1577,6 +1577,24 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
return al->map;
}
+/*
+ * For branch stacks or branch samples, the sample cpumode might not be correct
+ * because it applies only to the sample 'ip' and not necessary to 'addr' or
+ * branch stack addresses. If possible, use a fallback to deal with those cases.
+ */
+struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
+ struct addr_location *al)
+{
+ struct map *map = thread__find_map(thread, cpumode, addr, al);
+ struct machine *machine = thread->mg->machine;
+ u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
+
+ if (map || addr_cpumode == cpumode)
+ return map;
+
+ return thread__find_map(thread, addr_cpumode, addr, al);
+}
+
struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
u64 addr, struct addr_location *al)
{
@@ -1586,6 +1604,15 @@ struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
return al->sym;
}
+struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
+ u64 addr, struct addr_location *al)
+{
+ al->sym = NULL;
+ if (thread__find_map_fb(thread, cpumode, addr, al))
+ al->sym = map__find_symbol(al->map, al->addr);
+ return al->sym;
+}
+
/*
* Callers need to drop the reference to al->thread, obtained in
* machine__findnew_thread()
@@ -1679,7 +1706,7 @@ bool sample_addr_correlates_sym(struct perf_event_attr *attr)
void thread__resolve(struct thread *thread, struct addr_location *al,
struct perf_sample *sample)
{
- thread__find_map(thread, sample->cpumode, sample->addr, al);
+ thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
al->cpu = sample->cpu;
al->sym = NULL;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 8f36ce813bc5..9397e3f2444d 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -2592,6 +2592,33 @@ int machine__get_kernel_start(struct machine *machine)
return err;
}
+u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
+{
+ u8 addr_cpumode = cpumode;
+ bool kernel_ip;
+
+ if (!machine->single_address_space)
+ goto out;
+
+ kernel_ip = machine__kernel_ip(machine, addr);
+ switch (cpumode) {
+ case PERF_RECORD_MISC_KERNEL:
+ case PERF_RECORD_MISC_USER:
+ addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
+ PERF_RECORD_MISC_USER;
+ break;
+ case PERF_RECORD_MISC_GUEST_KERNEL:
+ case PERF_RECORD_MISC_GUEST_USER:
+ addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
+ PERF_RECORD_MISC_GUEST_USER;
+ break;
+ default:
+ break;
+ }
+out:
+ return addr_cpumode;
+}
+
struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
{
return dsos__findnew(&machine->dsos, filename);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index d856b85862e2..ebde3ea70225 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -42,6 +42,7 @@ struct machine {
u16 id_hdr_size;
bool comm_exec;
bool kptr_restrict_warned;
+ bool single_address_space;
char *root_dir;
char *mmap_name;
struct threads threads[THREADS__TABLE_SIZE];
@@ -99,6 +100,8 @@ static inline bool machine__kernel_ip(struct machine *machine, u64 ip)
return ip >= kernel_start;
}
+u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr);
+
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
pid_t tid);
struct comm *machine__thread_exec_comm(struct machine *machine,
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 7e49baad304d..7348eea0248f 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -145,7 +145,7 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *
int fd, ret = -1;
char path[PATH_MAX];
- snprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
+ scnprintf(path, PATH_MAX, "%s/%s.scale", dir, name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -175,7 +175,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n
ssize_t sret;
int fd;
- snprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
+ scnprintf(path, PATH_MAX, "%s/%s.unit", dir, name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -205,7 +205,7 @@ perf_pmu__parse_per_pkg(struct perf_pmu_alias *alias, char *dir, char *name)
char path[PATH_MAX];
int fd;
- snprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
+ scnprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name);
fd = open(path, O_RDONLY);
if (fd == -1)
@@ -223,7 +223,7 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
char path[PATH_MAX];
int fd;
- snprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
+ scnprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name);
fd = open(path, O_RDONLY);
if (fd == -1)
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 69aa93d4ee99..0c4b050f6fc2 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -494,14 +494,14 @@ static PyObject *python_process_brstack(struct perf_sample *sample,
pydict_set_item_string_decref(pyelem, "cycles",
PyLong_FromUnsignedLongLong(br->entries[i].flags.cycles));
- thread__find_map(thread, sample->cpumode,
- br->entries[i].from, &al);
+ thread__find_map_fb(thread, sample->cpumode,
+ br->entries[i].from, &al);
dsoname = get_dsoname(al.map);
pydict_set_item_string_decref(pyelem, "from_dsoname",
_PyUnicode_FromString(dsoname));
- thread__find_map(thread, sample->cpumode,
- br->entries[i].to, &al);
+ thread__find_map_fb(thread, sample->cpumode,
+ br->entries[i].to, &al);
dsoname = get_dsoname(al.map);
pydict_set_item_string_decref(pyelem, "to_dsoname",
_PyUnicode_FromString(dsoname));
@@ -576,14 +576,14 @@ static PyObject *python_process_brstacksym(struct perf_sample *sample,
if (!pyelem)
Py_FatalError("couldn't create Python dictionary");
- thread__find_symbol(thread, sample->cpumode,
- br->entries[i].from, &al);
+ thread__find_symbol_fb(thread, sample->cpumode,
+ br->entries[i].from, &al);
get_symoff(al.sym, &al, true, bf, sizeof(bf));
pydict_set_item_string_decref(pyelem, "from",
_PyUnicode_FromString(bf));
- thread__find_symbol(thread, sample->cpumode,
- br->entries[i].to, &al);
+ thread__find_symbol_fb(thread, sample->cpumode,
+ br->entries[i].to, &al);
get_symoff(al.sym, &al, true, bf, sizeof(bf));
pydict_set_item_string_decref(pyelem, "to",
_PyUnicode_FromString(bf));
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 7d2c8ce6cfad..f8eab197f35c 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -24,6 +24,7 @@
#include "thread.h"
#include "thread-stack.h"
#include "stat.h"
+#include "arch/common.h"
static int perf_session__deliver_event(struct perf_session *session,
union perf_event *event,
@@ -150,6 +151,9 @@ struct perf_session *perf_session__new(struct perf_data *data,
session->machines.host.env = &perf_env;
}
+ session->machines.host.single_address_space =
+ perf_env__single_address_space(session->machines.host.env);
+
if (!data || perf_data__is_write(data)) {
/*
* In O_RDONLY mode this will be performed when reading the
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 30e2b4c165fe..5920c3bb8ffe 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -96,9 +96,13 @@ struct thread *thread__main_thread(struct machine *machine, struct thread *threa
struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
struct addr_location *al);
+struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
+ struct addr_location *al);
struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
u64 addr, struct addr_location *al);
+struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
+ u64 addr, struct addr_location *al);
void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
struct addr_location *al);
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 23774970c9df..abcd29db2d7a 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -66,7 +66,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u32 kvm_next_vmid;
static unsigned int kvm_vmid_bits __read_mostly;
-static DEFINE_RWLOCK(kvm_vmid_lock);
+static DEFINE_SPINLOCK(kvm_vmid_lock);
static bool vgic_present;
@@ -484,7 +484,9 @@ void force_vm_exit(const cpumask_t *mask)
*/
static bool need_new_vmid_gen(struct kvm *kvm)
{
- return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
+ u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
+ smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
+ return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen);
}
/**
@@ -499,16 +501,11 @@ static void update_vttbr(struct kvm *kvm)
{
phys_addr_t pgd_phys;
u64 vmid, cnp = kvm_cpu_has_cnp() ? VTTBR_CNP_BIT : 0;
- bool new_gen;
- read_lock(&kvm_vmid_lock);
- new_gen = need_new_vmid_gen(kvm);
- read_unlock(&kvm_vmid_lock);
-
- if (!new_gen)
+ if (!need_new_vmid_gen(kvm))
return;
- write_lock(&kvm_vmid_lock);
+ spin_lock(&kvm_vmid_lock);
/*
* We need to re-check the vmid_gen here to ensure that if another vcpu
@@ -516,7 +513,7 @@ static void update_vttbr(struct kvm *kvm)
* use the same vmid.
*/
if (!need_new_vmid_gen(kvm)) {
- write_unlock(&kvm_vmid_lock);
+ spin_unlock(&kvm_vmid_lock);
return;
}
@@ -539,7 +536,6 @@ static void update_vttbr(struct kvm *kvm)
kvm_call_hyp(__kvm_flush_vm_context);
}
- kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
kvm->arch.vmid = kvm_next_vmid;
kvm_next_vmid++;
kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
@@ -550,7 +546,10 @@ static void update_vttbr(struct kvm *kvm)
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid | cnp;
- write_unlock(&kvm_vmid_lock);
+ smp_wmb();
+ WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen));
+
+ spin_unlock(&kvm_vmid_lock);
}
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index f56ff1cf52ec..ceeda7e04a4d 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -313,36 +313,30 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
spin_lock_irqsave(&irq->irq_lock, flags);
- /*
- * If this virtual IRQ was written into a list register, we
- * have to make sure the CPU that runs the VCPU thread has
- * synced back the LR state to the struct vgic_irq.
- *
- * As long as the conditions below are true, we know the VCPU thread
- * may be on its way back from the guest (we kicked the VCPU thread in
- * vgic_change_active_prepare) and still has to sync back this IRQ,
- * so we release and re-acquire the spin_lock to let the other thread
- * sync back the IRQ.
- *
- * When accessing VGIC state from user space, requester_vcpu is
- * NULL, which is fine, because we guarantee that no VCPUs are running
- * when accessing VGIC state from user space so irq->vcpu->cpu is
- * always -1.
- */
- while (irq->vcpu && /* IRQ may have state in an LR somewhere */
- irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
- irq->vcpu->cpu != -1) /* VCPU thread is running */
- cond_resched_lock(&irq->irq_lock);
-
if (irq->hw) {
vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
} else {
u32 model = vcpu->kvm->arch.vgic.vgic_model;
+ u8 active_source;
irq->active = active;
+
+ /*
+ * The GICv2 architecture indicates that the source CPUID for
+ * an SGI should be provided during an EOI which implies that
+ * the active state is stored somewhere, but at the same time
+ * this state is not architecturally exposed anywhere and we
+ * have no way of knowing the right source.
+ *
+ * This may lead to a VCPU not being able to receive
+ * additional instances of a particular SGI after migration
+ * for a GICv2 VM on some GIC implementations. Oh well.
+ */
+ active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0;
+
if (model == KVM_DEV_TYPE_ARM_VGIC_V2 &&
active && vgic_irq_is_sgi(irq->intid))
- irq->active_source = requester_vcpu->vcpu_id;
+ irq->active_source = active_source;
}
if (irq->active)
@@ -368,14 +362,16 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
*/
static void vgic_change_active_prepare(struct kvm_vcpu *vcpu, u32 intid)
{
- if (intid > VGIC_NR_PRIVATE_IRQS)
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
+ intid > VGIC_NR_PRIVATE_IRQS)
kvm_arm_halt_guest(vcpu->kvm);
}
/* See vgic_change_active_prepare */
static void vgic_change_active_finish(struct kvm_vcpu *vcpu, u32 intid)
{
- if (intid > VGIC_NR_PRIVATE_IRQS)
+ if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 ||
+ intid > VGIC_NR_PRIVATE_IRQS)
kvm_arm_resume_guest(vcpu->kvm);
}
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 7cfdfbc910e0..f884a54b2601 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -103,13 +103,13 @@ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
{
/* SGIs and PPIs */
if (intid <= VGIC_MAX_PRIVATE) {
- intid = array_index_nospec(intid, VGIC_MAX_PRIVATE);
+ intid = array_index_nospec(intid, VGIC_MAX_PRIVATE + 1);
return &vcpu->arch.vgic_cpu.private_irqs[intid];
}
/* SPIs */
- if (intid <= VGIC_MAX_SPI) {
- intid = array_index_nospec(intid, VGIC_MAX_SPI);
+ if (intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) {
+ intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS);
return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS];
}