Re: Linux 4.9.18

From: Greg KH
Date: Mon Mar 27 2017 - 06:43:23 EST


diff --git a/Makefile b/Makefile
index 004f90a4e613..c10d0e634e68 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
-SUBLEVEL = 17
+SUBLEVEL = 18
EXTRAVERSION =
NAME = Roaring Lionus

diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 7bd69bd43a01..1d8c24dc04d4 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -45,28 +45,9 @@ static inline void flush_kernel_dcache_page(struct page *page)

#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
-/* vmap range flushes and invalidates. Architecturally, we don't need
- * the invalidate, because the CPU should refuse to speculate once an
- * area has been flushed, so invalidate is left empty */
-static inline void flush_kernel_vmap_range(void *vaddr, int size)
-{
- unsigned long start = (unsigned long)vaddr;
-
- flush_kernel_dcache_range_asm(start, start + size);
-}
-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
-{
- unsigned long start = (unsigned long)vaddr;
- void *cursor = vaddr;

- for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) {
- struct page *page = vmalloc_to_page(cursor);
-
- if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
- flush_kernel_dcache_page(page);
- }
- flush_kernel_dcache_range_asm(start, start + size);
-}
+void flush_kernel_vmap_range(void *vaddr, int size);
+void invalidate_kernel_vmap_range(void *vaddr, int size);

#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 977f0a4f5ecf..53ec75f8e237 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -633,3 +633,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}
+
+void flush_kernel_vmap_range(void *vaddr, int size)
+{
+ unsigned long start = (unsigned long)vaddr;
+
+ if ((unsigned long)size > parisc_cache_flush_threshold)
+ flush_data_cache();
+ else
+ flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(flush_kernel_vmap_range);
+
+void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+ unsigned long start = (unsigned long)vaddr;
+
+ if ((unsigned long)size > parisc_cache_flush_threshold)
+ flush_data_cache();
+ else
+ flush_kernel_dcache_range_asm(start, start + size);
+}
+EXPORT_SYMBOL(invalidate_kernel_vmap_range);
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index 40639439d8b3..e81afc378850 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -139,6 +139,8 @@ void machine_power_off(void)

printk(KERN_EMERG "System shut down completed.\n"
"Please power this system off now.");
+
+ for (;;);
}

void (*pm_power_off)(void) = machine_power_off;
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index 861e72109df2..f080abfc2f83 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -68,6 +68,7 @@ SECTIONS
}

#ifdef CONFIG_PPC64_BOOT_WRAPPER
+ . = ALIGN(256);
.got :
{
__toc_start = .;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6e6c1fb60fbc..272608f102fb 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -680,9 +680,11 @@ static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
char *buf)
{
unsigned int cur_freq = __cpufreq_get(policy);
- if (!cur_freq)
- return sprintf(buf, "<unknown>");
- return sprintf(buf, "%u\n", cur_freq);
+
+ if (cur_freq)
+ return sprintf(buf, "%u\n", cur_freq);
+
+ return sprintf(buf, "<unknown>\n");
}

/**
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index b447a01ab21a..09e6a7320bb2 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -3506,6 +3506,12 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
max_sclk = 75000;
max_mclk = 80000;
}
+ } else if (adev->asic_type == CHIP_OLAND) {
+ if ((adev->pdev->device == 0x6604) &&
+ (adev->pdev->subsystem_vendor == 0x1028) &&
+ (adev->pdev->subsystem_device == 0x066F)) {
+ max_sclk = 75000;
+ }
}
/* Apply dpm quirks */
while (p && p->chip_device != 0) {
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 8703f56b7947..246d1aea87bc 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -61,21 +61,24 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT0);
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_V3D_IDENT1:
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT1);
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_V3D_IDENT2:
ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
if (ret < 0)
return ret;
args->value = V3D_READ(V3D_IDENT2);
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
break;
case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
args->value = true;
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 18e37171e9c8..ab3016982466 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -711,8 +711,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
}

mutex_lock(&vc4->power_lock);
- if (--vc4->power_refcount == 0)
- pm_runtime_put(&vc4->v3d->pdev->dev);
+ if (--vc4->power_refcount == 0) {
+ pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev);
+ pm_runtime_put_autosuspend(&vc4->v3d->pdev->dev);
+ }
mutex_unlock(&vc4->power_lock);

kfree(exec);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index e6d3c6028341..7cc346ad9b0b 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -222,6 +222,8 @@ static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
return ret;
}

+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */
pm_runtime_enable(dev);

return 0;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index 2543cf5b8b51..917321ce832f 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -608,9 +608,7 @@ static bool
vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
{
uint32_t max_branch_target = 0;
- bool found_shader_end = false;
int ip;
- int shader_end_ip = 0;
int last_branch = -2;

for (ip = 0; ip < validation_state->max_ip; ip++) {
@@ -621,8 +619,13 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
uint32_t branch_target_ip;

if (sig == QPU_SIG_PROG_END) {
- shader_end_ip = ip;
- found_shader_end = true;
+ /* There are two delay slots after program end is
+ * signaled that are still executed, then we're
+ * finished. validation_state->max_ip is the
+ * instruction after the last valid instruction in the
+ * program.
+ */
+ validation_state->max_ip = ip + 3;
continue;
}

@@ -676,15 +679,9 @@ vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
}
set_bit(after_delay_ip, validation_state->branch_targets);
max_branch_target = max(max_branch_target, after_delay_ip);
-
- /* There are two delay slots after program end is signaled
- * that are still executed, then we're finished.
- */
- if (found_shader_end && ip == shader_end_ip + 2)
- break;
}

- if (max_branch_target > shader_end_ip) {
+ if (max_branch_target > validation_state->max_ip - 3) {
DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
return false;
}
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index aecec6d32463..7f1c625b08ec 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
return -ENODEV;
}

+ if (hostif->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
dev_info(&udev->dev,
"%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
__func__, le16_to_cpu(udev->descriptor.idVendor),
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 39fddda2fef2..55b5e0e77b17 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1470,7 +1470,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
split = bio;
}

+ /*
+ * If a bio is splitted, the first part of bio will pass
+ * barrier but the bio is queued in current->bio_list (see
+ * generic_make_request). If there is a raise_barrier() called
+ * here, the second part of bio can't pass barrier. But since
+ * the first part bio isn't dispatched to underlaying disks
+ * yet, the barrier is never released, hence raise_barrier will
+ * alays wait. We have a deadlock.
+ * Note, this only happens in read path. For write path, the
+ * first part of bio is dispatched in a schedule() call
+ * (because of blk plug) or offloaded to raid10d.
+ * Quitting from the function immediately can change the bio
+ * order queued in bio_list and avoid the deadlock.
+ */
__make_request(mddev, split);
+ if (split != bio && bio_data_dir(bio) == READ) {
+ generic_make_request(bio);
+ break;
+ }
} while (split != bio);

/* In case raid10d snuck in to freeze_array */
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index f9b6fba689ff..a530f08592cd 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -560,8 +560,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
task->state = state;

- if (!list_empty(&task->running))
+ spin_lock_bh(&conn->taskqueuelock);
+ if (!list_empty(&task->running)) {
+ pr_debug_once("%s while task on list", __func__);
list_del_init(&task->running);
+ }
+ spin_unlock_bh(&conn->taskqueuelock);

if (conn->task == task)
conn->task = NULL;
@@ -783,7 +787,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
if (session->tt->xmit_task(task))
goto free_task;
} else {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->mgmtqueue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}

@@ -1474,8 +1480,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
* this may be on the requeue list already if the xmit_task callout
* is handling the r2ts while we are adding new ones
*/
+ spin_lock_bh(&conn->taskqueuelock);
if (list_empty(&task->running))
list_add_tail(&task->running, &conn->requeue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1512,22 +1520,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
* only have one nop-out as a ping from us and targets should not
* overflow us with nop-ins
*/
+ spin_lock_bh(&conn->taskqueuelock);
check_mgmt:
while (!list_empty(&conn->mgmtqueue)) {
conn->task = list_entry(conn->mgmtqueue.next,
struct iscsi_task, running);
list_del_init(&conn->task->running);
+ spin_unlock_bh(&conn->taskqueuelock);
if (iscsi_prep_mgmt_task(conn, conn->task)) {
/* regular RX path uses back_lock */
spin_lock_bh(&conn->session->back_lock);
__iscsi_put_task(conn->task);
spin_unlock_bh(&conn->session->back_lock);
conn->task = NULL;
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_xmit_task(conn);
if (rc)
goto done;
+ spin_lock_bh(&conn->taskqueuelock);
}

/* process pending command queue */
@@ -1535,19 +1547,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
running);
list_del_init(&conn->task->running);
+ spin_unlock_bh(&conn->taskqueuelock);
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
fail_scsi_task(conn->task, DID_IMM_RETRY);
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_prep_scsi_cmd_pdu(conn->task);
if (rc) {
if (rc == -ENOMEM || rc == -EACCES) {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&conn->task->running,
&conn->cmdqueue);
conn->task = NULL;
+ spin_unlock_bh(&conn->taskqueuelock);
goto done;
} else
fail_scsi_task(conn->task, DID_ABORT);
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_xmit_task(conn);
@@ -1558,6 +1575,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
* we need to check the mgmt queue for nops that need to
* be sent to aviod starvation
*/
+ spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
@@ -1577,12 +1595,15 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
conn->task = task;
list_del_init(&conn->task->running);
conn->task->state = ISCSI_TASK_RUNNING;
+ spin_unlock_bh(&conn->taskqueuelock);
rc = iscsi_xmit_task(conn);
if (rc)
goto done;
+ spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
+ spin_unlock_bh(&conn->taskqueuelock);
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;

@@ -1738,7 +1759,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
goto prepd_reject;
}
} else {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->cmdqueue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}

@@ -2897,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
INIT_LIST_HEAD(&conn->mgmtqueue);
INIT_LIST_HEAD(&conn->cmdqueue);
INIT_LIST_HEAD(&conn->requeue);
+ spin_lock_init(&conn->taskqueuelock);
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);

/* allocate login_task used for the login/text sequences */
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 734a0428ef0e..f7e3f27bb5c5 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -11393,6 +11393,7 @@ static struct pci_driver lpfc_driver = {
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = lpfc_pci_remove_one,
+ .shutdown = lpfc_pci_remove_one,
.suspend = lpfc_pci_suspend_one,
.resume = lpfc_pci_resume_one,
.err_handler = &lpfc_err_handler,
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index bff9689f5ca9..feab7ea8e823 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -5375,16 +5375,22 @@ qlt_send_busy(struct scsi_qla_host *vha,

static int
qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
- struct atio_from_isp *atio)
+ struct atio_from_isp *atio, bool ha_locked)
{
struct qla_hw_data *ha = vha->hw;
uint16_t status;
+ unsigned long flags;

if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
return 0;

+ if (!ha_locked)
+ spin_lock_irqsave(&ha->hardware_lock, flags);
status = temp_sam_status;
qlt_send_busy(vha, atio, status);
+ if (!ha_locked)
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
return 1;
}

@@ -5429,7 +5435,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,


if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
- rc = qlt_chk_qfull_thresh_hold(vha, atio);
+ rc = qlt_chk_qfull_thresh_hold(vha, atio, ha_locked);
if (rc != 0) {
tgt->atio_irq_cmd_count--;
return;
@@ -5552,7 +5558,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
break;
}

- rc = qlt_chk_qfull_thresh_hold(vha, atio);
+ rc = qlt_chk_qfull_thresh_hold(vha, atio, true);
if (rc != 0) {
tgt->irq_cmd_count--;
return;
@@ -6794,6 +6800,8 @@ qlt_handle_abts_recv_work(struct work_struct *work)
spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ kfree(op);
}

void
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 9125d9358dea..ef1c8c158f66 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -154,7 +154,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,

buf = kzalloc(12, GFP_KERNEL);
if (!buf)
- return;
+ goto out_free;

memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = MODE_SENSE;
@@ -169,9 +169,10 @@ static void pscsi_tape_read_blocksize(struct se_device *dev,
* If MODE_SENSE still returns zero, set the default value to 1024.
*/
sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+out_free:
if (!sdev->sector_size)
sdev->sector_size = 1024;
-out_free:
+
kfree(buf);
}

@@ -314,9 +315,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
sd->lun, sd->queue_depth);
}

- dev->dev_attrib.hw_block_size = sd->sector_size;
+ dev->dev_attrib.hw_block_size =
+ min_not_zero((int)sd->sector_size, 512);
dev->dev_attrib.hw_max_sectors =
- min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+ min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
dev->dev_attrib.hw_queue_depth = sd->queue_depth;

/*
@@ -339,8 +341,10 @@ static int pscsi_add_device_to_list(struct se_device *dev,
/*
* For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
*/
- if (sd->type == TYPE_TAPE)
+ if (sd->type == TYPE_TAPE) {
pscsi_tape_read_blocksize(dev, sd);
+ dev->dev_attrib.hw_block_size = sd->sector_size;
+ }
return 0;
}

@@ -406,7 +410,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd)
/*
* Called with struct Scsi_Host->host_lock called.
*/
-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
+static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
@@ -433,28 +437,6 @@ static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
return 0;
}

-/*
- * Called with struct Scsi_Host->host_lock called.
- */
-static int pscsi_create_type_other(struct se_device *dev,
- struct scsi_device *sd)
- __releases(sh->host_lock)
-{
- struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
- struct Scsi_Host *sh = sd->host;
- int ret;
-
- spin_unlock_irq(sh->host_lock);
- ret = pscsi_add_device_to_list(dev, sd);
- if (ret)
- return ret;
-
- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
- phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
- sd->channel, sd->id, sd->lun);
- return 0;
-}
-
static int pscsi_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
@@ -542,11 +524,8 @@ static int pscsi_configure_device(struct se_device *dev)
case TYPE_DISK:
ret = pscsi_create_type_disk(dev, sd);
break;
- case TYPE_ROM:
- ret = pscsi_create_type_rom(dev, sd);
- break;
default:
- ret = pscsi_create_type_other(dev, sd);
+ ret = pscsi_create_type_nondisk(dev, sd);
break;
}

@@ -611,8 +590,7 @@ static void pscsi_free_device(struct se_device *dev)
else if (pdv->pdv_lld_host)
scsi_host_put(pdv->pdv_lld_host);

- if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
- scsi_device_put(sd);
+ scsi_device_put(sd);

pdv->pdv_sd = NULL;
}
@@ -1069,7 +1047,6 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
return pdv->pdv_bd->bd_part->nr_sects;

- dump_stack();
return 0;
}

diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index aabd6602da6c..a53fb23a0411 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1104,9 +1104,15 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
return ret;
break;
case VERIFY:
+ case VERIFY_16:
size = 0;
- sectors = transport_get_sectors_10(cdb);
- cmd->t_task_lba = transport_lba_32(cdb);
+ if (cdb[0] == VERIFY) {
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ } else {
+ sectors = transport_get_sectors_16(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ }
cmd->execute_cmd = sbc_emulate_noop;
goto check_lba;
case REZERO_UNIT:
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index afe29ba42a4e..5fa9ba1de429 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3830,7 +3830,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
if (ext4_has_feature_meta_bg(sb)) {
- if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
+ if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
ext4_msg(sb, KERN_WARNING,
"first meta block group too large: %u "
"(group descriptor block count %u)",
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index a6a3389a07fc..51519c2836b5 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -207,7 +207,7 @@ struct lm_lockname {
struct gfs2_sbd *ln_sbd;
u64 ln_number;
unsigned int ln_type;
-};
+} __packed __aligned(sizeof(int));

#define lm_name_equal(name1, name2) \
(((name1)->ln_number == (name2)->ln_number) && \
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 609840de31d3..1536aeb0abab 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -7426,11 +7426,11 @@ static void nfs4_exchange_id_release(void *data)
struct nfs41_exchange_id_data *cdata =
(struct nfs41_exchange_id_data *)data;

- nfs_put_client(cdata->args.client);
if (cdata->xprt) {
xprt_put(cdata->xprt);
rpc_clnt_xprt_switch_put(cdata->args.client->cl_rpcclient);
}
+ nfs_put_client(cdata->args.client);
kfree(cdata->res.impl_id);
kfree(cdata->res.server_scope);
kfree(cdata->res.server_owner);
@@ -7537,10 +7537,8 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
task_setup_data.callback_data = calldata;

task = rpc_run_task(&task_setup_data);
- if (IS_ERR(task)) {
- status = PTR_ERR(task);
- goto out_impl_id;
- }
+ if (IS_ERR(task))
+ return PTR_ERR(task);

if (!xprt) {
status = rpc_wait_for_completion_task(task);
@@ -7568,6 +7566,7 @@ static int _nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred,
kfree(calldata->res.server_owner);
out_calldata:
kfree(calldata);
+ nfs_put_client(clp);
goto out;
}

diff --git a/include/linux/log2.h b/include/linux/log2.h
index fd7ff3d91e6a..f38fae23bdac 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -16,12 +16,6 @@
#include <linux/bitops.h>

/*
- * deal with unrepresentable constant logarithms
- */
-extern __attribute__((const, noreturn))
-int ____ilog2_NaN(void);
-
-/*
* non-constant log of base 2 calculators
* - the arch may override these in asm/bitops.h if they can be implemented
* more efficiently than using fls() and fls64()
@@ -85,7 +79,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define ilog2(n) \
( \
__builtin_constant_p(n) ? ( \
- (n) < 1 ? ____ilog2_NaN() : \
+ (n) < 2 ? 0 : \
(n) & (1ULL << 63) ? 63 : \
(n) & (1ULL << 62) ? 62 : \
(n) & (1ULL << 61) ? 61 : \
@@ -148,10 +142,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
(n) & (1ULL << 4) ? 4 : \
(n) & (1ULL << 3) ? 3 : \
(n) & (1ULL << 2) ? 2 : \
- (n) & (1ULL << 1) ? 1 : \
- (n) & (1ULL << 0) ? 0 : \
- ____ilog2_NaN() \
- ) : \
+ 1 ) : \
(sizeof(n) <= 4) ? \
__ilog2_u32(n) : \
__ilog2_u64(n) \
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 4d1c46aac331..c7b1dc713cdd 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -196,6 +196,7 @@ struct iscsi_conn {
struct iscsi_task *task; /* xmit task in progress */

/* xmit */
+ spinlock_t taskqueuelock; /* protects the next three lists */
struct list_head mgmtqueue; /* mgmt (control) xmit queue */
struct list_head cmdqueue; /* data-path cmd queue */
struct list_head requeue; /* tasks needing another run */
diff --git a/kernel/cgroup_pids.c b/kernel/cgroup_pids.c
index 2bd673783f1a..a57242e0d5a6 100644
--- a/kernel/cgroup_pids.c
+++ b/kernel/cgroup_pids.c
@@ -229,7 +229,7 @@ static int pids_can_fork(struct task_struct *task)
/* Only log the first time events_limit is incremented. */
if (atomic64_inc_return(&pids->events_limit) == 1) {
pr_info("cgroup: fork rejected by pids controller in ");
- pr_cont_cgroup_path(task_cgroup(current, pids_cgrp_id));
+ pr_cont_cgroup_path(css->cgroup);
pr_cont("\n");
}
cgroup_file_notify(&pids->events_file);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4b3323151a2f..07c0dc806dfc 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -10333,6 +10333,17 @@ void perf_event_free_task(struct task_struct *task)
continue;

mutex_lock(&ctx->mutex);
+ raw_spin_lock_irq(&ctx->lock);
+ /*
+ * Destroy the task <-> ctx relation and mark the context dead.
+ *
+ * This is important because even though the task hasn't been
+ * exposed yet the context has been (through child_list).
+ */
+ RCU_INIT_POINTER(task->perf_event_ctxp[ctxn], NULL);
+ WRITE_ONCE(ctx->task, TASK_TOMBSTONE);
+ put_task_struct(task); /* cannot be last */
+ raw_spin_unlock_irq(&ctx->lock);
again:
list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
group_entry)
@@ -10586,7 +10597,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
- break;
+ goto out_unlock;
}

/*
@@ -10602,7 +10613,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
- break;
+ goto out_unlock;
}

raw_spin_lock_irqsave(&parent_ctx->lock, flags);
@@ -10630,6 +10641,7 @@ static int perf_event_init_context(struct task_struct *child, int ctxn)
}

raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
+out_unlock:
mutex_unlock(&parent_ctx->mutex);

perf_unpin_context(parent_ctx);
diff --git a/mm/percpu.c b/mm/percpu.c
index 255714302394..f014cebbf405 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1010,8 +1010,11 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
mutex_unlock(&pcpu_alloc_mutex);
}

- if (chunk != pcpu_reserved_chunk)
+ if (chunk != pcpu_reserved_chunk) {
+ spin_lock_irqsave(&pcpu_lock, flags);
pcpu_nr_empty_pop_pages -= occ_pages;
+ spin_unlock_irqrestore(&pcpu_lock, flags);
+ }

if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
pcpu_schedule_balance_work();
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index e2c37061edbe..69502fa68a3c 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -486,7 +486,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
struct ib_cq *sendcq, *recvcq;
int rc;

- max_sge = min(ia->ri_device->attrs.max_sge, RPCRDMA_MAX_SEND_SGES);
+ max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
+ RPCRDMA_MAX_SEND_SGES);
if (max_sge < RPCRDMA_MIN_SEND_SGES) {
pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
return -ENOMEM;
diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h
index 41446668ccce..d5677d39c1e4 100644
--- a/tools/include/linux/log2.h
+++ b/tools/include/linux/log2.h
@@ -13,12 +13,6 @@
#define _TOOLS_LINUX_LOG2_H

/*
- * deal with unrepresentable constant logarithms
- */
-extern __attribute__((const, noreturn))
-int ____ilog2_NaN(void);
-
-/*
* non-constant log of base 2 calculators
* - the arch may override these in asm/bitops.h if they can be implemented
* more efficiently than using fls() and fls64()
@@ -78,7 +72,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
#define ilog2(n) \
( \
__builtin_constant_p(n) ? ( \
- (n) < 1 ? ____ilog2_NaN() : \
+ (n) < 2 ? 0 : \
(n) & (1ULL << 63) ? 63 : \
(n) & (1ULL << 62) ? 62 : \
(n) & (1ULL << 61) ? 61 : \
@@ -141,10 +135,7 @@ unsigned long __rounddown_pow_of_two(unsigned long n)
(n) & (1ULL << 4) ? 4 : \
(n) & (1ULL << 3) ? 3 : \
(n) & (1ULL << 2) ? 2 : \
- (n) & (1ULL << 1) ? 1 : \
- (n) & (1ULL << 0) ? 0 : \
- ____ilog2_NaN() \
- ) : \
+ 1 ) : \
(sizeof(n) <= 4) ? \
__ilog2_u32(n) : \
__ilog2_u64(n) \