Re: Linux 3.10.82

From: Greg KH
Date: Mon Jun 29 2015 - 16:45:52 EST


diff --git a/Makefile b/Makefile
index 6d19e37d36d5..5e3e665a10b7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 10
-SUBLEVEL = 81
+SUBLEVEL = 82
EXTRAVERSION =
NAME = TOSSUG Baby Fish

diff --git a/drivers/crypto/caam/caamrng.c b/drivers/crypto/caam/caamrng.c
index d1939a9539c0..04aefffb4dd9 100644
--- a/drivers/crypto/caam/caamrng.c
+++ b/drivers/crypto/caam/caamrng.c
@@ -56,7 +56,7 @@

/* Buffer, its dma address and lock */
struct buf_data {
- u8 buf[RN_BUF_SIZE];
+ u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
dma_addr_t addr;
struct completion filled;
u32 hw_desc[DESC_JOB_O_LEN];
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index f6341e8622ee..7bd2acce9f81 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1487,6 +1487,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
return MODE_BANDWIDTH;
}

+ if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 ||
+ (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) {
+ return MODE_H_ILLEGAL;
+ }
+
if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 572579f87de4..90861416b9e9 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -263,6 +263,16 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
return NULL;

q->hba_index = idx;
+
+ /*
+ * insert barrier for instruction interlock : data from the hardware
+ * must have the valid bit checked before it can be copied and acted
+ * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
+ * instructions allowing action on content before valid bit checked,
+ * add barrier here as well. May not be needed as "content" is a
+ * single 32-bit entity here (vs multi word structure for cq's).
+ */
+ mb();
return eqe;
}

@@ -368,6 +378,17 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)

cqe = q->qe[q->hba_index].cqe;
q->hba_index = idx;
+
+ /*
+ * insert barrier for instruction interlock : data from the hardware
+ * must have the valid bit checked before it can be copied and acted
+ * upon. Speculative instructions were allowing a bcopy at the start
+ * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
+ * after our return, to copy data before the valid bit check above
+ * was done. As such, some of the copied data was stale. The barrier
+ * ensures the check is before any data is copied.
+ */
+ mb();
return cqe;
}

diff --git a/fs/pipe.c b/fs/pipe.c
index 0e0752ef2715..3e7ab278bb0c 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -117,25 +117,27 @@ void pipe_wait(struct pipe_inode_info *pipe)
}

static int
-pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
- int atomic)
+pipe_iov_copy_from_user(void *addr, int *offset, struct iovec *iov,
+ size_t *remaining, int atomic)
{
unsigned long copy;

- while (len > 0) {
+ while (*remaining > 0) {
while (!iov->iov_len)
iov++;
- copy = min_t(unsigned long, len, iov->iov_len);
+ copy = min_t(unsigned long, *remaining, iov->iov_len);

if (atomic) {
- if (__copy_from_user_inatomic(to, iov->iov_base, copy))
+ if (__copy_from_user_inatomic(addr + *offset,
+ iov->iov_base, copy))
return -EFAULT;
} else {
- if (copy_from_user(to, iov->iov_base, copy))
+ if (copy_from_user(addr + *offset,
+ iov->iov_base, copy))
return -EFAULT;
}
- to += copy;
- len -= copy;
+ *offset += copy;
+ *remaining -= copy;
iov->iov_base += copy;
iov->iov_len -= copy;
}
@@ -143,25 +145,27 @@ pipe_iov_copy_from_user(void *to, struct iovec *iov, unsigned long len,
}

static int
-pipe_iov_copy_to_user(struct iovec *iov, const void *from, unsigned long len,
- int atomic)
+pipe_iov_copy_to_user(struct iovec *iov, void *addr, int *offset,
+ size_t *remaining, int atomic)
{
unsigned long copy;

- while (len > 0) {
+ while (*remaining > 0) {
while (!iov->iov_len)
iov++;
- copy = min_t(unsigned long, len, iov->iov_len);
+ copy = min_t(unsigned long, *remaining, iov->iov_len);

if (atomic) {
- if (__copy_to_user_inatomic(iov->iov_base, from, copy))
+ if (__copy_to_user_inatomic(iov->iov_base,
+ addr + *offset, copy))
return -EFAULT;
} else {
- if (copy_to_user(iov->iov_base, from, copy))
+ if (copy_to_user(iov->iov_base,
+ addr + *offset, copy))
return -EFAULT;
}
- from += copy;
- len -= copy;
+ *offset += copy;
+ *remaining -= copy;
iov->iov_base += copy;
iov->iov_len -= copy;
}
@@ -395,7 +399,7 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
struct pipe_buffer *buf = pipe->bufs + curbuf;
const struct pipe_buf_operations *ops = buf->ops;
void *addr;
- size_t chars = buf->len;
+ size_t chars = buf->len, remaining;
int error, atomic;

if (chars > total_len)
@@ -409,9 +413,11 @@ pipe_read(struct kiocb *iocb, const struct iovec *_iov,
}

atomic = !iov_fault_in_pages_write(iov, chars);
+ remaining = chars;
redo:
addr = ops->map(pipe, buf, atomic);
- error = pipe_iov_copy_to_user(iov, addr + buf->offset, chars, atomic);
+ error = pipe_iov_copy_to_user(iov, addr, &buf->offset,
+ &remaining, atomic);
ops->unmap(pipe, buf, addr);
if (unlikely(error)) {
/*
@@ -426,7 +432,6 @@ redo:
break;
}
ret += chars;
- buf->offset += chars;
buf->len -= chars;

/* Was it a packet buffer? Clean up and exit */
@@ -531,6 +536,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
if (ops->can_merge && offset + chars <= PAGE_SIZE) {
int error, atomic = 1;
void *addr;
+ size_t remaining = chars;

error = ops->confirm(pipe, buf);
if (error)
@@ -539,8 +545,8 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
iov_fault_in_pages_read(iov, chars);
redo1:
addr = ops->map(pipe, buf, atomic);
- error = pipe_iov_copy_from_user(offset + addr, iov,
- chars, atomic);
+ error = pipe_iov_copy_from_user(addr, &offset, iov,
+ &remaining, atomic);
ops->unmap(pipe, buf, addr);
ret = error;
do_wakeup = 1;
@@ -575,6 +581,8 @@ redo1:
struct page *page = pipe->tmp_page;
char *src;
int error, atomic = 1;
+ int offset = 0;
+ size_t remaining;

if (!page) {
page = alloc_page(GFP_HIGHUSER);
@@ -595,14 +603,15 @@ redo1:
chars = total_len;

iov_fault_in_pages_read(iov, chars);
+ remaining = chars;
redo2:
if (atomic)
src = kmap_atomic(page);
else
src = kmap(page);

- error = pipe_iov_copy_from_user(src, iov, chars,
- atomic);
+ error = pipe_iov_copy_from_user(src, &offset, iov,
+ &remaining, atomic);
if (atomic)
kunmap_atomic(src);
else
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 0a1edc694d67..fe3e086d38e9 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1328,19 +1328,24 @@ static int check_preds(struct filter_parse_state *ps)
{
int n_normal_preds = 0, n_logical_preds = 0;
struct postfix_elt *elt;
+ int cnt = 0;

list_for_each_entry(elt, &ps->postfix, list) {
- if (elt->op == OP_NONE)
+ if (elt->op == OP_NONE) {
+ cnt++;
continue;
+ }

+ cnt--;
if (elt->op == OP_AND || elt->op == OP_OR) {
n_logical_preds++;
continue;
}
n_normal_preds++;
+ WARN_ON_ONCE(cnt < 0);
}

- if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
+ if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
return -EINVAL;
}
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/