RE: [Xen-devel] [RFC PATCH 3/3] xen/netback: Fix grant copy across page boundary with KASAN
From: Durrant, Paul
Date: Tue Dec 17 2019 - 10:14:25 EST
> -----Original Message-----
> From: Xen-devel <xen-devel-bounces@xxxxxxxxxxxxxxxxxxxx> On Behalf Of
> Sergey Dyasli
> Sent: 17 December 2019 14:08
> To: xen-devel@xxxxxxxxxxxxx; kasan-dev@xxxxxxxxxxxxxxxx; linux-
> kernel@xxxxxxxxxxxxxxx
> Cc: Juergen Gross <jgross@xxxxxxxx>; Sergey Dyasli
> <sergey.dyasli@xxxxxxxxxx>; Stefano Stabellini <sstabellini@xxxxxxxxxx>;
> George Dunlap <george.dunlap@xxxxxxxxxx>; Ross Lagerwall
> <ross.lagerwall@xxxxxxxxxx>; Alexander Potapenko <glider@xxxxxxxxxx>;
> Andrey Ryabinin <aryabinin@xxxxxxxxxxxxx>; Boris Ostrovsky
> <boris.ostrovsky@xxxxxxxxxx>; Dmitry Vyukov <dvyukov@xxxxxxxxxx>
> Subject: [Xen-devel] [RFC PATCH 3/3] xen/netback: Fix grant copy across
> page boundary with KASAN
>
> From: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>
>
> When KASAN (or SLUB_DEBUG) is turned on, the normal expectation that
> allocations are aligned to the next power of 2 of the size does not
> hold. Therefore, handle grant copies that cross page boundaries.
>
> Signed-off-by: Ross Lagerwall <ross.lagerwall@xxxxxxxxxx>
> Signed-off-by: Sergey Dyasli <sergey.dyasli@xxxxxxxxxx>
Would have been nice to cc netback maintainers...
> ---
> drivers/net/xen-netback/common.h | 2 +-
> drivers/net/xen-netback/netback.c | 55 ++++++++++++++++++++++++-------
> 2 files changed, 45 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-
> netback/common.h
> index 05847eb91a1b..e57684415edd 100644
> --- a/drivers/net/xen-netback/common.h
> +++ b/drivers/net/xen-netback/common.h
> @@ -155,7 +155,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
> struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
> grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
>
> - struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
> + struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS * 2];
> struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
> struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
> /* passed to gnttab_[un]map_refs with pages under (un)mapping */
> diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-
> netback/netback.c
> index 0020b2e8c279..1541b6e0cc62 100644
> --- a/drivers/net/xen-netback/netback.c
> +++ b/drivers/net/xen-netback/netback.c
> @@ -320,6 +320,7 @@ static int xenvif_count_requests(struct xenvif_queue
> *queue,
>
> struct xenvif_tx_cb {
> u16 pending_idx;
> + u8 copies;
> };
I know we're a way off the limit (48 bytes) but I wonder if we ought to have a compile time check here that we're not overflowing skb->cb.
>
> #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
> @@ -439,6 +440,7 @@ static int xenvif_tx_check_gop(struct xenvif_queue
> *queue,
> {
> struct gnttab_map_grant_ref *gop_map = *gopp_map;
> u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
> + u8 copies = XENVIF_TX_CB(skb)->copies;
> /* This always points to the shinfo of the skb being checked, which
> * could be either the first or the one on the frag_list
> */
> @@ -450,23 +452,27 @@ static int xenvif_tx_check_gop(struct xenvif_queue
> *queue,
> int nr_frags = shinfo->nr_frags;
> const bool sharedslot = nr_frags &&
> frag_get_pending_idx(&shinfo->frags[0]) ==
> pending_idx;
> - int i, err;
> + int i, err = 0;
>
> - /* Check status of header. */
> - err = (*gopp_copy)->status;
> - if (unlikely(err)) {
> - if (net_ratelimit())
> - netdev_dbg(queue->vif->dev,
> + while (copies) {
> + /* Check status of header. */
> + int newerr = (*gopp_copy)->status;
> + if (unlikely(newerr)) {
> + if (net_ratelimit())
> + netdev_dbg(queue->vif->dev,
> "Grant copy of header failed! status: %d
> pending_idx: %u ref: %u\n",
> (*gopp_copy)->status,
> pending_idx,
> (*gopp_copy)->source.u.ref);
> - /* The first frag might still have this slot mapped */
> - if (!sharedslot)
> - xenvif_idx_release(queue, pending_idx,
> - XEN_NETIF_RSP_ERROR);
> + /* The first frag might still have this slot mapped */
> + if (!sharedslot && !err)
> + xenvif_idx_release(queue, pending_idx,
> + XEN_NETIF_RSP_ERROR);
Can't this be done after the loop, if there is an accumulated err? I think it would make the code slightly neater.
> + err = newerr;
> + }
> + (*gopp_copy)++;
> + copies--;
> }
> - (*gopp_copy)++;
>
> check_frags:
> for (i = 0; i < nr_frags; i++, gop_map++) {
> @@ -910,6 +916,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue
> *queue,
> xenvif_tx_err(queue, &txreq, extra_count, idx);
> break;
> }
> + XENVIF_TX_CB(skb)->copies = 0;
>
> skb_shinfo(skb)->nr_frags = ret;
> if (data_len < txreq.size)
> @@ -933,6 +940,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue
> *queue,
> "Can't allocate the frag_list
> skb.\n");
> break;
> }
> + XENVIF_TX_CB(nskb)->copies = 0;
> }
>
> if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
> @@ -990,6 +998,31 @@ static void xenvif_tx_build_gops(struct xenvif_queue
> *queue,
>
> queue->tx_copy_ops[*copy_ops].len = data_len;
If offset_in_page(skb->data)+ data_len can exceed XEN_PAGE_SIZE, does this not need to be truncated?
Paul
> queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
> + XENVIF_TX_CB(skb)->copies++;
> +
> + if (offset_in_page(skb->data) + data_len > XEN_PAGE_SIZE) {
> + unsigned int extra_len = offset_in_page(skb->data) +
> + data_len - XEN_PAGE_SIZE;
> +
> + queue->tx_copy_ops[*copy_ops].len -= extra_len;
> + (*copy_ops)++;
> +
> + queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
> + queue->tx_copy_ops[*copy_ops].source.domid =
> + queue->vif->domid;
> + queue->tx_copy_ops[*copy_ops].source.offset =
> + txreq.offset + data_len - extra_len;
> +
> + queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
> + virt_to_gfn(skb->data + data_len - extra_len);
> + queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
> + queue->tx_copy_ops[*copy_ops].dest.offset = 0;
> +
> + queue->tx_copy_ops[*copy_ops].len = extra_len;
> + queue->tx_copy_ops[*copy_ops].flags =
> GNTCOPY_source_gref;
> +
> + XENVIF_TX_CB(skb)->copies++;
> + }
>
> (*copy_ops)++;
>
> --
> 2.17.1
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxxxxxxxxx
> https://lists.xenproject.org/mailman/listinfo/xen-devel