Re: Linux 4.19.121

From: Greg KH
Date: Wed May 06 2020 - 08:46:07 EST


diff --git a/Makefile b/Makefile
index 74b60bf2ff79..f7406a6f8330 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
-SUBLEVEL = 120
+SUBLEVEL = 121
EXTRAVERSION =
NAME = "People's Front"

diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 54b6547d32b2..3837cad94f35 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -227,13 +227,13 @@ int acpi_device_set_power(struct acpi_device *device, int state)
end:
if (result) {
dev_warn(&device->dev, "Failed to change power state to %s\n",
- acpi_power_state_string(state));
+ acpi_power_state_string(target_state));
} else {
device->power.state = target_state;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"Device [%s] transitioned to %s\n",
device->pnp.bus_id,
- acpi_power_state_string(state)));
+ acpi_power_state_string(target_state)));
}

return result;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 7b7fba0c9253..e38a653dd208 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -567,8 +567,8 @@ static int dmatest_func(void *data)
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;

ktime = ktime_get();
- while (!kthread_should_stop()
- && !(params->iterations && total_tests >= params->iterations)) {
+ while (!(kthread_should_stop() ||
+ (params->iterations && total_tests >= params->iterations))) {
struct dma_async_tx_descriptor *tx = NULL;
struct dmaengine_unmap_data *um;
dma_addr_t *dsts;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f5926bf5dabd..d5dcee7f1fc8 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -4706,7 +4706,7 @@ static struct drm_display_mode *drm_mode_displayid_detailed(struct drm_device *d
struct drm_display_mode *mode;
unsigned pixel_clock = (timings->pixel_clock[0] |
(timings->pixel_clock[1] << 8) |
- (timings->pixel_clock[2] << 16));
+ (timings->pixel_clock[2] << 16)) + 1;
unsigned hactive = (timings->hactive[0] | timings->hactive[1] << 8) + 1;
unsigned hblank = (timings->hblank[0] | timings->hblank[1] << 8) + 1;
unsigned hsync = (timings->hsync[0] | (timings->hsync[1] & 0x7f) << 8) + 1;
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 208af9f37914..5177e37d81e6 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -472,9 +472,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
return ret;

ret = qxl_release_reserve_list(release, true);
- if (ret)
+ if (ret) {
+ qxl_release_free(qdev, release);
return ret;
-
+ }
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_CREATE;
cmd->flags = QXL_SURF_FLAG_KEEP_DATA;
@@ -500,8 +501,8 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
/* no need to add a release to the fence for this surface bo,
since it is only released when we ask to destroy the surface
and it would never signal otherwise */
- qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);

surf->hw_surf_alloc = true;
spin_lock(&qdev->surf_id_idr_lock);
@@ -543,9 +544,8 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
cmd->surface_id = id;
qxl_release_unmap(qdev, release, &cmd->release_info);

- qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
-
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);

return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 0570c6826bff..769e12b4c13c 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -532,8 +532,8 @@ static int qxl_primary_apply_cursor(struct drm_plane *plane)
cmd->u.set.visible = 1;
qxl_release_unmap(qdev, release, &cmd->release_info);

- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);

return ret;

@@ -694,8 +694,8 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
cmd->u.position.y = plane->state->crtc_y + fb->hot_y;

qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);

if (old_cursor_bo)
qxl_bo_unref(&old_cursor_bo);
@@ -740,8 +740,8 @@ static void qxl_cursor_atomic_disable(struct drm_plane *plane,
cmd->type = QXL_CURSOR_HIDE;
qxl_release_unmap(qdev, release, &cmd->release_info);

- qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
}

static int qxl_plane_prepare_fb(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 4d8681e84e68..d009f2bc28e9 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -241,8 +241,8 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
qxl_bo_physical_address(qdev, dimage->bo, 0);
qxl_release_unmap(qdev, release, &drawable->release_info);

- qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);

out_free_palette:
if (palette_bo)
@@ -348,9 +348,10 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
goto out_release_backoff;

rects = drawable_set_clipping(qdev, num_clips, clips_bo);
- if (!rects)
+ if (!rects) {
+ ret = -EINVAL;
goto out_release_backoff;
-
+ }
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);

drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
@@ -381,8 +382,8 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
}
qxl_bo_kunmap(clips_bo);

- qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);

out_release_backoff:
if (ret)
@@ -432,8 +433,8 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
drawable->u.copy_bits.src_pos.y = sy;
qxl_release_unmap(qdev, release, &drawable->release_info);

- qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);

out_free_release:
if (ret)
@@ -476,8 +477,8 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)

qxl_release_unmap(qdev, release, &drawable->release_info);

- qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qxl_release_fence_buffer_objects(release);
+ qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);

out_free_release:
if (ret)
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 6cc9f3367fa0..5536b2f7b7b0 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -257,11 +257,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
apply_surf_reloc(qdev, &reloc_info[i]);
}

+ qxl_release_fence_buffer_objects(release);
ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
- if (ret)
- qxl_release_backoff_reserve_list(release);
- else
- qxl_release_fence_buffer_objects(release);

out_free_bos:
out_free_release:
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index c4118bcd5103..bf937fec50dc 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -381,7 +381,7 @@ lookup_get_fd_uobject(const struct uverbs_api_object *obj,
* and the caller is expected to ensure that uverbs_close_fd is never
* done while a call top lookup is possible.
*/
- if (f->f_op != fd_type->fops) {
+ if (f->f_op != fd_type->fops || uobject->ufile != ufile) {
fput(f);
return ERR_PTR(-EBADF);
}
@@ -697,7 +697,6 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
enum rdma_lookup_mode mode)
{
assert_uverbs_usecnt(uobj, mode);
- uobj->uapi_object->type_class->lookup_put(uobj, mode);
/*
* In order to unlock an object, either decrease its usecnt for
* read access or zero it in case of exclusive access. See
@@ -714,6 +713,7 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
break;
}

+ uobj->uapi_object->type_class->lookup_put(uobj, mode);
/* Pairs with the kref obtained by type->lookup_get */
uverbs_uobject_put(uobj);
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index a19d3ad14dc3..eac4ade45611 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1606,8 +1606,9 @@ static int __mlx4_ib_create_default_rules(
int i;

for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) {
+ union ib_flow_spec ib_spec = {};
int ret;
- union ib_flow_spec ib_spec;
+
switch (pdefault_rules->rules_create_list[i]) {
case 0:
/* no rule */
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 4fc9278d0dde..10f6ae4f8f3f 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4887,7 +4887,9 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
rdma_ah_set_path_bits(ah_attr, path->grh_mlid & 0x7f);
rdma_ah_set_static_rate(ah_attr,
path->static_rate ? path->static_rate - 5 : 0);
- if (path->grh_mlid & (1 << 7)) {
+
+ if (path->grh_mlid & (1 << 7) ||
+ ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
u32 tc_fl = be32_to_cpu(path->tclass_flowlabel);

rdma_ah_set_grh(ah_attr, NULL,
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 465f28a7844c..2557ed112bc2 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2836,7 +2836,7 @@ static int __init parse_amd_iommu_intr(char *str)
{
for (; *str; ++str) {
if (strncmp(str, "legacy", 6) == 0) {
- amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
+ amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY_GA;
break;
}
if (strncmp(str, "vapic", 5) == 0) {
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index 9a6ed5eeaad1..b0a4a3d2f60e 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -797,8 +797,11 @@ static int qcom_iommu_device_probe(struct platform_device *pdev)
qcom_iommu->dev = dev;

res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res)
+ if (res) {
qcom_iommu->local_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qcom_iommu->local_base))
+ return PTR_ERR(qcom_iommu->local_base);
+ }

qcom_iommu->iface_clk = devm_clk_get(dev, "iface");
if (IS_ERR(qcom_iommu->iface_clk)) {
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index def4f6ec290b..207ca0ad0b59 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -586,10 +586,12 @@ static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)

/* Do we need to select a new pgpath? */
pgpath = READ_ONCE(m->current_pgpath);
- queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
- if (!pgpath || !queue_io)
+ if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
pgpath = choose_pgpath(m, bio->bi_iter.bi_size);

+ /* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */
+ queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
+
if ((pgpath && queue_io) ||
(!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
/* Queue for the daemon to resubmit */
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 0f4a2143bf55..bb8327999705 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -436,7 +436,7 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
fio->level++;

if (type == DM_VERITY_BLOCK_TYPE_METADATA)
- block += v->data_blocks;
+ block = block - v->hash_start + v->data_blocks;

/*
* For RS(M, N), the continuous FEC data is divided into blocks of N
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index a76eda50ad48..4321c48eba6b 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -884,6 +884,24 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
return 0;
}

+static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
+{
+ struct dm_io_region region;
+ struct dm_io_request req;
+
+ region.bdev = wc->ssd_dev->bdev;
+ region.sector = wc->start_sector;
+ region.count = n_sectors;
+ req.bi_op = REQ_OP_READ;
+ req.bi_op_flags = REQ_SYNC;
+ req.mem.type = DM_IO_VMA;
+ req.mem.ptr.vma = (char *)wc->memory_map;
+ req.client = wc->dm_io;
+ req.notify.fn = NULL;
+
+ return dm_io(&req, 1, &region, NULL);
+}
+
static void writecache_resume(struct dm_target *ti)
{
struct dm_writecache *wc = ti->private;
@@ -894,8 +912,18 @@ static void writecache_resume(struct dm_target *ti)

wc_lock(wc);

- if (WC_MODE_PMEM(wc))
+ if (WC_MODE_PMEM(wc)) {
persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
+ } else {
+ r = writecache_read_metadata(wc, wc->metadata_sectors);
+ if (r) {
+ size_t sb_entries_offset;
+ writecache_error(wc, r, "unable to read metadata: %d", r);
+ sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
+ memset((char *)wc->memory_map + sb_entries_offset, -1,
+ (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
+ }
+ }

wc->tree = RB_ROOT;
INIT_LIST_HEAD(&wc->lru);
@@ -1978,6 +2006,12 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "Invalid block size";
goto bad;
}
+ if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
+ wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
+ r = -EINVAL;
+ ti->error = "Block size is smaller than device logical block size";
+ goto bad;
+ }
wc->block_size_bits = __ffs(wc->block_size);

wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
@@ -2066,8 +2100,6 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
} else {
- struct dm_io_region region;
- struct dm_io_request req;
size_t n_blocks, n_metadata_blocks;
uint64_t n_bitmap_bits;

@@ -2124,19 +2156,9 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}

- region.bdev = wc->ssd_dev->bdev;
- region.sector = wc->start_sector;
- region.count = wc->metadata_sectors;
- req.bi_op = REQ_OP_READ;
- req.bi_op_flags = REQ_SYNC;
- req.mem.type = DM_IO_VMA;
- req.mem.ptr.vma = (char *)wc->memory_map;
- req.client = wc->dm_io;
- req.notify.fn = NULL;
-
- r = dm_io(&req, 1, &region, NULL);
+ r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
if (r) {
- ti->error = "Unable to read metadata";
+ ti->error = "Unable to read first block of metadata";
goto bad;
}
}
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index 28f5aaca505a..2c5a6e7aadc0 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/highmem.h>
#include <linux/io.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -351,12 +352,16 @@ static int cqhci_enable(struct mmc_host *mmc, struct mmc_card *card)
/* CQHCI is idle and should halt immediately, so set a small timeout */
#define CQHCI_OFF_TIMEOUT 100

+static u32 cqhci_read_ctl(struct cqhci_host *cq_host)
+{
+ return cqhci_readl(cq_host, CQHCI_CTL);
+}
+
static void cqhci_off(struct mmc_host *mmc)
{
struct cqhci_host *cq_host = mmc->cqe_private;
- ktime_t timeout;
- bool timed_out;
u32 reg;
+ int err;

if (!cq_host->enabled || !mmc->cqe_on || cq_host->recovery_halt)
return;
@@ -366,15 +371,9 @@ static void cqhci_off(struct mmc_host *mmc)

cqhci_writel(cq_host, CQHCI_HALT, CQHCI_CTL);

- timeout = ktime_add_us(ktime_get(), CQHCI_OFF_TIMEOUT);
- while (1) {
- timed_out = ktime_compare(ktime_get(), timeout) > 0;
- reg = cqhci_readl(cq_host, CQHCI_CTL);
- if ((reg & CQHCI_HALT) || timed_out)
- break;
- }
-
- if (timed_out)
+ err = readx_poll_timeout(cqhci_read_ctl, cq_host, reg,
+ reg & CQHCI_HALT, 0, CQHCI_OFF_TIMEOUT);
+ if (err < 0)
pr_err("%s: cqhci: CQE stuck on\n", mmc_hostname(mmc));
else
pr_debug("%s: cqhci: CQE off\n", mmc_hostname(mmc));
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index f6c76be2be0d..1c062473b1c2 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -360,14 +360,6 @@ static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
meson_mx_mmc_start_cmd(mmc, mrq->cmd);
}

-static int meson_mx_mmc_card_busy(struct mmc_host *mmc)
-{
- struct meson_mx_mmc_host *host = mmc_priv(mmc);
- u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC);
-
- return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK);
-}
-
static void meson_mx_mmc_read_response(struct mmc_host *mmc,
struct mmc_command *cmd)
{
@@ -509,7 +501,6 @@ static void meson_mx_mmc_timeout(struct timer_list *t)
static struct mmc_host_ops meson_mx_mmc_ops = {
.request = meson_mx_mmc_request,
.set_ios = meson_mx_mmc_set_ios,
- .card_busy = meson_mx_mmc_card_busy,
.get_cd = mmc_gpio_get_cd,
.get_ro = mmc_gpio_get_ro,
};
@@ -573,7 +564,7 @@ static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host)
mmc->f_max = clk_round_rate(host->cfg_div_clk,
clk_get_rate(host->parent_clk));

- mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23;
+ mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY;
mmc->ops = &meson_mx_mmc_ops;

ret = mmc_of_parse(mmc);
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 17b2054d9b62..19ae527ecc72 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1909,6 +1909,8 @@ static int sdhci_msm_probe(struct platform_device *pdev)
goto clk_disable;
}

+ msm_host->mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_NEED_RSP_BUSY;
+
pm_runtime_get_noresume(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index 65985dc3e1a7..35168b47afe6 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -556,6 +556,9 @@ static int intel_select_drive_strength(struct mmc_card *card,
struct sdhci_pci_slot *slot = sdhci_priv(host);
struct intel_host *intel_host = sdhci_pci_priv(slot);

+ if (!(mmc_driver_type_mask(intel_host->drv_strength) & card_drv))
+ return 0;
+
return intel_host->drv_strength;
}

diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index a0b5089b3274..fafb02644efd 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -238,6 +238,16 @@ static void xenon_voltage_switch(struct sdhci_host *host)
{
/* Wait for 5ms after set 1.8V signal enable bit */
usleep_range(5000, 5500);
+
+ /*
+ * For some reason the controller's Host Control2 register reports
+ * the bit representing 1.8V signaling as 0 when read after it was
+ * written as 1. Subsequent read reports 1.
+ *
+ * Since this may cause some issues, do an empty read of the Host
+ * Control2 register here to circumvent this.
+ */
+ sdhci_readw(host, SDHCI_HOST_CONTROL2);
}

static const struct sdhci_ops sdhci_xenon_ops = {
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index fff20a370767..59b8681842be 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3654,6 +3654,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
}
qla2x00_wait_for_hba_ready(base_vha);

+ /*
+ * if UNLOADING flag is already set, then continue unload,
+ * where it was set first.
+ */
+ if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
+ return;
+
if (IS_QLA25XX(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha)) {
if (ha->flags.fw_started)
qla2x00_abort_isp_cleanup(base_vha);
@@ -3671,15 +3678,6 @@ qla2x00_remove_one(struct pci_dev *pdev)

qla2x00_wait_for_sess_deletion(base_vha);

- /*
- * if UNLOAD flag is already set, then continue unload,
- * where it was set first.
- */
- if (test_bit(UNLOADING, &base_vha->dpc_flags))
- return;
-
- set_bit(UNLOADING, &base_vha->dpc_flags);
-
qla_nvme_delete(base_vha);

dma_free_coherent(&ha->pdev->dev,
@@ -4647,6 +4645,9 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
struct qla_work_evt *e;
uint8_t bail;

+ if (test_bit(UNLOADING, &vha->dpc_flags))
+ return NULL;
+
QLA_VHA_MARK_BUSY(vha, bail);
if (bail)
return NULL;
@@ -5845,13 +5846,6 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
struct pci_dev *pdev = ha->pdev;
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);

- /*
- * if UNLOAD flag is already set, then continue unload,
- * where it was set first.
- */
- if (test_bit(UNLOADING, &base_vha->dpc_flags))
- return;
-
ql_log(ql_log_warn, base_vha, 0x015b,
"Disabling adapter.\n");

@@ -5862,9 +5856,14 @@ qla2x00_disable_board_on_pci_error(struct work_struct *work)
return;
}

- qla2x00_wait_for_sess_deletion(base_vha);
+ /*
+ * if UNLOADING flag is already set, then continue unload,
+ * where it was set first.
+ */
+ if (test_and_set_bit(UNLOADING, &base_vha->dpc_flags))
+ return;

- set_bit(UNLOADING, &base_vha->dpc_flags);
+ qla2x00_wait_for_sess_deletion(base_vha);

qla2x00_delete_all_vps(ha, base_vha);

diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 854b2bcca7c1..5668d02de10b 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -445,7 +445,7 @@ iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd)
target_to_linux_sector(dev, cmd->t_task_lba),
target_to_linux_sector(dev,
sbc_get_write_same_sectors(cmd)),
- GFP_KERNEL, false);
+ GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
if (ret)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index c36275754086..6dbdadb936a8 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -385,8 +385,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
vma = find_vma_intersection(mm, vaddr, vaddr + 1);

if (vma && vma->vm_flags & VM_PFNMAP) {
- *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
- if (is_invalid_reserved_pfn(*pfn))
+ if (!follow_pfn(vma, vaddr, pfn) &&
+ is_invalid_reserved_pfn(*pfn))
ret = 0;
}

@@ -598,7 +598,7 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
continue;
}

- remote_vaddr = dma->vaddr + iova - dma->iova;
+ remote_vaddr = dma->vaddr + (iova - dma->iova);
ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
do_accounting);
if (ret)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 47ca1ebda056..271e70c45d5b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -10286,7 +10286,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
if (!path) {
ret = -ENOMEM;
- goto out;
+ goto out_put_group;
}

/*
@@ -10323,7 +10323,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
ret = btrfs_orphan_add(trans, BTRFS_I(inode));
if (ret) {
btrfs_add_delayed_iput(inode);
- goto out;
+ goto out_put_group;
}
clear_nlink(inode);
/* One for the block groups ref */
@@ -10346,13 +10346,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,

ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
if (ret < 0)
- goto out;
+ goto out_put_group;
if (ret > 0)
btrfs_release_path(path);
if (ret == 0) {
ret = btrfs_del_item(trans, tree_root, path);
if (ret)
- goto out;
+ goto out_put_group;
btrfs_release_path(path);
}

@@ -10494,9 +10494,9 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,

ret = remove_block_group_free_space(trans, block_group);
if (ret)
- goto out;
+ goto out_put_group;

- btrfs_put_block_group(block_group);
+ /* Once for the block groups rbtree */
btrfs_put_block_group(block_group);

ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
@@ -10524,6 +10524,10 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
/* once for the tree */
free_extent_map(em);
}
+
+out_put_group:
+ /* Once for the lookup reference */
+ btrfs_put_block_group(block_group);
out:
btrfs_free_path(path);
return ret;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 4b1491e1b803..8829d89eb4af 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -572,10 +572,19 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
}

got_it:
- btrfs_record_root_in_trans(h, root);
-
if (!current->journal_info)
current->journal_info = h;
+
+ /*
+ * btrfs_record_root_in_trans() needs to alloc new extents, and may
+ * call btrfs_join_transaction() while we're also starting a
+ * transaction.
+ *
+ * Thus it need to be called after current->journal_info initialized,
+ * or we can deadlock.
+ */
+ btrfs_record_root_in_trans(h, root);
+
return h;

join_fail:
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index d4c86c6cbe39..928ac2c4899e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4182,6 +4182,9 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
const u64 ino = btrfs_ino(inode);
struct btrfs_path *dst_path = NULL;
bool dropped_extents = false;
+ u64 truncate_offset = i_size;
+ struct extent_buffer *leaf;
+ int slot;
int ins_nr = 0;
int start_slot;
int ret;
@@ -4196,9 +4199,43 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
if (ret < 0)
goto out;

+ /*
+ * We must check if there is a prealloc extent that starts before the
+ * i_size and crosses the i_size boundary. This is to ensure later we
+ * truncate down to the end of that extent and not to the i_size, as
+ * otherwise we end up losing part of the prealloc extent after a log
+ * replay and with an implicit hole if there is another prealloc extent
+ * that starts at an offset beyond i_size.
+ */
+ ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY);
+ if (ret < 0)
+ goto out;
+
+ if (ret == 0) {
+ struct btrfs_file_extent_item *ei;
+
+ leaf = path->nodes[0];
+ slot = path->slots[0];
+ ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
+
+ if (btrfs_file_extent_type(leaf, ei) ==
+ BTRFS_FILE_EXTENT_PREALLOC) {
+ u64 extent_end;
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ extent_end = key.offset +
+ btrfs_file_extent_num_bytes(leaf, ei);
+
+ if (extent_end > i_size)
+ truncate_offset = extent_end;
+ }
+ } else {
+ ret = 0;
+ }
+
while (true) {
- struct extent_buffer *leaf = path->nodes[0];
- int slot = path->slots[0];
+ leaf = path->nodes[0];
+ slot = path->slots[0];

if (slot >= btrfs_header_nritems(leaf)) {
if (ins_nr > 0) {
@@ -4236,7 +4273,7 @@ static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans,
ret = btrfs_truncate_inode_items(trans,
root->log_root,
&inode->vfs_inode,
- i_size,
+ truncate_offset,
BTRFS_EXTENT_DATA_KEY);
} while (ret == -EAGAIN);
if (ret)
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 9fce18548f7e..24d39cce8ba4 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -255,37 +255,45 @@ int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,

int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
- struct posix_acl *alloc = NULL, *dfacl = NULL;
+ struct posix_acl *orig = acl, *dfacl = NULL, *alloc;
int status;

if (S_ISDIR(inode->i_mode)) {
switch(type) {
case ACL_TYPE_ACCESS:
- alloc = dfacl = get_acl(inode, ACL_TYPE_DEFAULT);
+ alloc = get_acl(inode, ACL_TYPE_DEFAULT);
if (IS_ERR(alloc))
goto fail;
+ dfacl = alloc;
break;

case ACL_TYPE_DEFAULT:
- dfacl = acl;
- alloc = acl = get_acl(inode, ACL_TYPE_ACCESS);
+ alloc = get_acl(inode, ACL_TYPE_ACCESS);
if (IS_ERR(alloc))
goto fail;
+ dfacl = acl;
+ acl = alloc;
break;
}
}

if (acl == NULL) {
- alloc = acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
+ alloc = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
if (IS_ERR(alloc))
goto fail;
+ acl = alloc;
}
status = __nfs3_proc_setacls(inode, acl, dfacl);
- posix_acl_release(alloc);
+out:
+ if (acl != orig)
+ posix_acl_release(acl);
+ if (dfacl != orig)
+ posix_acl_release(dfacl);
return status;

fail:
- return PTR_ERR(alloc);
+ status = PTR_ERR(alloc);
+ goto out;
}

const struct xattr_handler *nfs3_xattr_handlers[] = {
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index f5ce9f7ec132..537a2a3c1dea 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -901,6 +901,13 @@ static int software_resume(void)
error = freeze_processes();
if (error)
goto Close_Finish;
+
+ error = freeze_kernel_threads();
+ if (error) {
+ thaw_processes();
+ goto Close_Finish;
+ }
+
error = load_image_and_restore();
thaw_processes();
Finish:
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index c574285966f9..452254fd89f8 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -5595,40 +5595,60 @@ static int selinux_tun_dev_open(void *security)

static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
{
- int err = 0;
- u32 perm;
+ int rc = 0;
+ unsigned int msg_len;
+ unsigned int data_len = skb->len;
+ unsigned char *data = skb->data;
struct nlmsghdr *nlh;
struct sk_security_struct *sksec = sk->sk_security;
+ u16 sclass = sksec->sclass;
+ u32 perm;

- if (skb->len < NLMSG_HDRLEN) {
- err = -EINVAL;
- goto out;
- }
- nlh = nlmsg_hdr(skb);
+ while (data_len >= nlmsg_total_size(0)) {
+ nlh = (struct nlmsghdr *)data;
+
+ /* NOTE: the nlmsg_len field isn't reliably set by some netlink
+ * users which means we can't reject skb's with bogus
+ * length fields; our solution is to follow what
+ * netlink_rcv_skb() does and simply skip processing at
+ * messages with length fields that are clearly junk
+ */
+ if (nlh->nlmsg_len < NLMSG_HDRLEN || nlh->nlmsg_len > data_len)
+ return 0;

- err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
- if (err) {
- if (err == -EINVAL) {
+ rc = selinux_nlmsg_lookup(sclass, nlh->nlmsg_type, &perm);
+ if (rc == 0) {
+ rc = sock_has_perm(sk, perm);
+ if (rc)
+ return rc;
+ } else if (rc == -EINVAL) {
+ /* -EINVAL is a missing msg/perm mapping */
pr_warn_ratelimited("SELinux: unrecognized netlink"
- " message: protocol=%hu nlmsg_type=%hu sclass=%s"
- " pig=%d comm=%s\n",
- sk->sk_protocol, nlh->nlmsg_type,
- secclass_map[sksec->sclass - 1].name,
- task_pid_nr(current), current->comm);
- if (!enforcing_enabled(&selinux_state) ||
- security_get_allow_unknown(&selinux_state))
- err = 0;
+ " message: protocol=%hu nlmsg_type=%hu sclass=%s"
+ " pid=%d comm=%s\n",
+ sk->sk_protocol, nlh->nlmsg_type,
+ secclass_map[sclass - 1].name,
+ task_pid_nr(current), current->comm);
+ if (enforcing_enabled(&selinux_state) &&
+ !security_get_allow_unknown(&selinux_state))
+ return rc;
+ rc = 0;
+ } else if (rc == -ENOENT) {
+ /* -ENOENT is a missing socket/class mapping, ignore */
+ rc = 0;
+ } else {
+ return rc;
}

- /* Ignore */
- if (err == -ENOENT)
- err = 0;
- goto out;
+ /* move to the next message after applying netlink padding */
+ msg_len = NLMSG_ALIGN(nlh->nlmsg_len);
+ if (msg_len >= data_len)
+ return 0;
+ data_len -= msg_len;
+ data += msg_len;
}

- err = sock_has_perm(sk, perm);
-out:
- return err;
+ return rc;
}

#ifdef CONFIG_NETFILTER
diff --git a/sound/core/oss/pcm_plugin.c b/sound/core/oss/pcm_plugin.c
index 8539047145de..da400da1fafe 100644
--- a/sound/core/oss/pcm_plugin.c
+++ b/sound/core/oss/pcm_plugin.c
@@ -211,21 +211,23 @@ static snd_pcm_sframes_t plug_client_size(struct snd_pcm_substream *plug,
if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
plugin = snd_pcm_plug_last(plug);
while (plugin && drv_frames > 0) {
- if (check_size && drv_frames > plugin->buf_frames)
- drv_frames = plugin->buf_frames;
plugin_prev = plugin->prev;
if (plugin->src_frames)
drv_frames = plugin->src_frames(plugin, drv_frames);
+ if (check_size && plugin->buf_frames &&
+ drv_frames > plugin->buf_frames)
+ drv_frames = plugin->buf_frames;
plugin = plugin_prev;
}
} else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
plugin = snd_pcm_plug_first(plug);
while (plugin && drv_frames > 0) {
plugin_next = plugin->next;
+ if (check_size && plugin->buf_frames &&
+ drv_frames > plugin->buf_frames)
+ drv_frames = plugin->buf_frames;
if (plugin->dst_frames)
drv_frames = plugin->dst_frames(plugin, drv_frames);
- if (check_size && drv_frames > plugin->buf_frames)
- drv_frames = plugin->buf_frames;
plugin = plugin_next;
}
} else
@@ -251,26 +253,28 @@ static snd_pcm_sframes_t plug_slave_size(struct snd_pcm_substream *plug,
plugin = snd_pcm_plug_first(plug);
while (plugin && frames > 0) {
plugin_next = plugin->next;
+ if (check_size && plugin->buf_frames &&
+ frames > plugin->buf_frames)
+ frames = plugin->buf_frames;
if (plugin->dst_frames) {
frames = plugin->dst_frames(plugin, frames);
if (frames < 0)
return frames;
}
- if (check_size && frames > plugin->buf_frames)
- frames = plugin->buf_frames;
plugin = plugin_next;
}
} else if (stream == SNDRV_PCM_STREAM_CAPTURE) {
plugin = snd_pcm_plug_last(plug);
while (plugin) {
- if (check_size && frames > plugin->buf_frames)
- frames = plugin->buf_frames;
plugin_prev = plugin->prev;
if (plugin->src_frames) {
frames = plugin->src_frames(plugin, frames);
if (frames < 0)
return frames;
}
+ if (check_size && plugin->buf_frames &&
+ frames > plugin->buf_frames)
+ frames = plugin->buf_frames;
plugin = plugin_prev;
}
} else
diff --git a/sound/isa/opti9xx/miro.c b/sound/isa/opti9xx/miro.c
index c6136c6b0214..81eb9c89428d 100644
--- a/sound/isa/opti9xx/miro.c
+++ b/sound/isa/opti9xx/miro.c
@@ -880,10 +880,13 @@ static void snd_miro_write(struct snd_miro *chip, unsigned char reg,
spin_unlock_irqrestore(&chip->lock, flags);
}

+static inline void snd_miro_write_mask(struct snd_miro *chip,
+ unsigned char reg, unsigned char value, unsigned char mask)
+{
+ unsigned char oldval = snd_miro_read(chip, reg);

-#define snd_miro_write_mask(chip, reg, value, mask) \
- snd_miro_write(chip, reg, \
- (snd_miro_read(chip, reg) & ~(mask)) | ((value) & (mask)))
+ snd_miro_write(chip, reg, (oldval & ~mask) | (value & mask));
+}

/*
* Proc Interface
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index ac0ab6eb40f0..0d0fa6f54bdd 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -329,10 +329,13 @@ static void snd_opti9xx_write(struct snd_opti9xx *chip, unsigned char reg,
}


-#define snd_opti9xx_write_mask(chip, reg, value, mask) \
- snd_opti9xx_write(chip, reg, \
- (snd_opti9xx_read(chip, reg) & ~(mask)) | ((value) & (mask)))
+static inline void snd_opti9xx_write_mask(struct snd_opti9xx *chip,
+ unsigned char reg, unsigned char value, unsigned char mask)
+{
+ unsigned char oldval = snd_opti9xx_read(chip, reg);

+ snd_opti9xx_write(chip, reg, (oldval & ~mask) | (value & mask));
+}

static int snd_opti9xx_configure(struct snd_opti9xx *chip,
long port,
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index c67fadd5aae5..12a064f994b1 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1848,8 +1848,10 @@ static bool check_non_pcm_per_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
/* Add sanity check to pass klockwork check.
* This should never happen.
*/
- if (WARN_ON(spdif == NULL))
+ if (WARN_ON(spdif == NULL)) {
+ mutex_unlock(&codec->spdif_mutex);
return true;
+ }
non_pcm = !!(spdif->status & IEC958_AES0_NONAUDIO);
mutex_unlock(&codec->spdif_mutex);
return non_pcm;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9620a8461d91..fc39550f6c5d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -6991,6 +6991,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x8560, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1558, 0x8561, "System76 Gazelle (gaze14)", ALC269_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
+ SND_PCI_QUIRK(0x17aa, 0x1048, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 5bbfd7577b33..6836f827965c 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1385,7 +1385,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,

case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */
case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */
- case USB_ID(0x16b0, 0x06b2): /* NuPrime DAC-10 */
+ case USB_ID(0x16d0, 0x06b2): /* NuPrime DAC-10 */
case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */
case USB_ID(0x16d0, 0x0733): /* Furutech ADL Stratos */
case USB_ID(0x16d0, 0x09db): /* NuPrime Audio DAC-9 */