[PATCH 2/3] net: macb: Add support for header data spliting
From: Rafal Ozieblo
Date: Sat Apr 14 2018 - 16:54:56 EST
This patch adds support for frames splited between
many rx buffers. Header data spliting can be used
but also buffers shorter than max frame length.
The only limitation is that frame header can't
be splited.
Signed-off-by: Rafal Ozieblo <rafalo@xxxxxxxxxxx>
---
drivers/net/ethernet/cadence/macb.h | 13 +++
drivers/net/ethernet/cadence/macb_main.c | 137 +++++++++++++++++++++++--------
2 files changed, 118 insertions(+), 32 deletions(-)
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 33c9a48..a2cb805 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -295,6 +295,8 @@
/* Bitfields in DMACFG. */
#define GEM_FBLDO_OFFSET 0 /* fixed burst length for DMA */
#define GEM_FBLDO_SIZE 5
+#define GEM_HDRS_OFFSET 5 /* Header Data Splitting */
+#define GEM_HDRS_SIZE 1
#define GEM_ENDIA_DESC_OFFSET 6 /* endian swap mode for management descriptor access */
#define GEM_ENDIA_DESC_SIZE 1
#define GEM_ENDIA_PKT_OFFSET 7 /* endian swap mode for packet data access */
@@ -755,8 +757,12 @@ struct gem_tx_ts {
#define MACB_RX_SOF_SIZE 1
#define MACB_RX_EOF_OFFSET 15
#define MACB_RX_EOF_SIZE 1
+#define MACB_RX_HDR_OFFSET 16
+#define MACB_RX_HDR_SIZE 1
#define MACB_RX_CFI_OFFSET 16
#define MACB_RX_CFI_SIZE 1
+#define MACB_RX_EOH_OFFSET 17
+#define MACB_RX_EOH_SIZE 1
#define MACB_RX_VLAN_PRI_OFFSET 17
#define MACB_RX_VLAN_PRI_SIZE 3
#define MACB_RX_PRI_TAG_OFFSET 20
@@ -1086,6 +1092,11 @@ struct tsu_incr {
u32 ns;
};
+struct rx_frag_list {
+ struct sk_buff *skb_head;
+ struct sk_buff *skb_tail;
+};
+
struct macb_queue {
struct macb *bp;
int irq;
@@ -1121,6 +1132,8 @@ struct macb_queue {
unsigned int tx_ts_head, tx_ts_tail;
struct gem_tx_ts tx_timestamps[PTP_TS_BUFFER_SIZE];
#endif
+ struct rx_frag_list rx_frag;
+ u32 rx_frag_len;
};
struct ethtool_rx_fs_item {
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 43201a8..27c406c 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -967,6 +967,13 @@ static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
*/
}
+void gem_reset_rx_state(struct macb_queue *queue)
+{
+ queue->rx_frag.skb_head = NULL;
+ queue->rx_frag.skb_tail = NULL;
+ queue->rx_frag_len = 0;
+}
+
static int gem_rx(struct macb_queue *queue, int budget)
{
struct macb *bp = queue->bp;
@@ -977,6 +984,9 @@ static int gem_rx(struct macb_queue *queue, int budget)
int count = 0;
while (count < budget) {
+ struct sk_buff *skb_head, *skb_tail;
+ bool eoh = false, header = false;
+ bool sof, eof;
u32 ctrl;
dma_addr_t addr;
bool rxused;
@@ -995,57 +1005,118 @@ static int gem_rx(struct macb_queue *queue, int budget)
break;
queue->rx_tail++;
- count++;
-
- if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
+ skb = queue->rx_skbuff[entry];
+ if (unlikely(!skb)) {
netdev_err(bp->dev,
- "not whole frame pointed by descriptor\n");
+ "inconsistent Rx descriptor chain\n");
bp->dev->stats.rx_dropped++;
queue->stats.rx_dropped++;
break;
}
- skb = queue->rx_skbuff[entry];
- if (unlikely(!skb)) {
+ skb_head = queue->rx_frag.skb_head;
+ skb_tail = queue->rx_frag.skb_tail;
+ sof = !!(ctrl & MACB_BIT(RX_SOF));
+ eof = !!(ctrl & MACB_BIT(RX_EOF));
+ if (GEM_BFEXT(HDRS, gem_readl(bp, DMACFG))) {
+ eoh = !!(ctrl & MACB_BIT(RX_EOH));
+ if (!eof)
+ header = !!(ctrl & MACB_BIT(RX_HDR));
+ }
+
+ queue->rx_skbuff[entry] = NULL;
+ /* Discard if out-of-sequence or header split across buffers */
+ if ((!skb_head /* first frame buffer */
+ && (!sof /* without start of frame */
+ || (header && !eoh))) /* or without whole header */
+ || (skb_head && sof)) { /* or new start before EOF */
+ struct sk_buff *tmp_skb;
+
netdev_err(bp->dev,
- "inconsistent Rx descriptor chain\n");
+ "Incomplete frame received! (skb_head=%p sof=%u hdr=%u eoh=%u)\n",
+ skb_head, (u32)sof, (u32)header, (u32)eoh);
+ dev_kfree_skb(skb);
+ if (skb_head) {
+ skb = skb_shinfo(skb_head)->frag_list;
+ dev_kfree_skb(skb_head);
+ while (skb) {
+ tmp_skb = skb;
+ skb = skb->next;
+ dev_kfree_skb(tmp_skb);
+ }
+ }
bp->dev->stats.rx_dropped++;
queue->stats.rx_dropped++;
+ gem_reset_rx_state(queue);
break;
}
+
/* now everything is ready for receiving packet */
- queue->rx_skbuff[entry] = NULL;
len = ctrl & bp->rx_frm_len_mask;
+ /* Buffer lengths in the descriptor:
+ * eoh: len = header size,
+ * eof: len = frame size (including header),
+ * else: len = 0, length equals bp->rx_buffer_size
+ */
+ if (!len)
+ len = bp->rx_buffer_size;
+ else
+ /* If EOF or EOH reduce the size of the packet
+ * by already received bytes
+ */
+ len -= queue->rx_frag_len;
+
netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
+ gem_ptp_do_rxstamp(bp, skb, desc);
+
skb_put(skb, len);
dma_unmap_single(&bp->pdev->dev, addr,
bp->rx_buffer_size, DMA_FROM_DEVICE);
- skb->protocol = eth_type_trans(skb, bp->dev);
- skb_checksum_none_assert(skb);
- if (bp->dev->features & NETIF_F_RXCSUM &&
- !(bp->dev->flags & IFF_PROMISC) &&
- GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- bp->dev->stats.rx_packets++;
- queue->stats.rx_packets++;
- bp->dev->stats.rx_bytes += skb->len;
- queue->stats.rx_bytes += skb->len;
-
- gem_ptp_do_rxstamp(bp, skb, desc);
-
-#if defined(DEBUG) && defined(VERBOSE_DEBUG)
- netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
- skb->len, skb->csum);
- print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
- skb_mac_header(skb), 16, true);
- print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
- skb->data, 32, true);
-#endif
-
- netif_receive_skb(skb);
+ if (!skb_head) {
+ /* first buffer in frame */
+ skb->protocol = eth_type_trans(skb, bp->dev);
+ skb_checksum_none_assert(skb);
+ if (bp->dev->features & NETIF_F_RXCSUM &&
+ !(bp->dev->flags & IFF_PROMISC) &&
+ GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ queue->rx_frag.skb_head = skb;
+ queue->rx_frag.skb_tail = skb;
+ skb_head = skb;
+ } else {
+ /* not first buffer in frame */
+ if (!skb_shinfo(skb_head)->frag_list)
+ skb_shinfo(skb_head)->frag_list = skb;
+ else
+ skb_tail->next = skb;
+ queue->rx_frag.skb_tail = skb;
+ skb_head->len += len;
+ skb_head->data_len += len;
+ skb_head->truesize += len;
+ }
+ if (eof) {
+ bp->dev->stats.rx_packets++;
+ queue->stats.rx_packets++;
+ bp->dev->stats.rx_bytes += skb->len;
+ queue->stats.rx_bytes += skb->len;
+
+ #if defined(DEBUG) && defined(VERBOSE_DEBUG)
+ netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
+ skb->len, skb->csum);
+ print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ skb_mac_header(skb), 16, true);
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ skb->data, 32, true);
+ #endif
+
+ netif_receive_skb(skb_head);
+ gem_reset_rx_state(queue);
+ count++;
+ } else {
+ queue->rx_frag_len += len;
+ }
}
gem_rx_refill(queue);
@@ -1905,6 +1976,8 @@ static int macb_alloc_consistent(struct macb *bp)
netdev_dbg(bp->dev,
"Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
+
+ gem_reset_rx_state(queue);
}
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
--
2.4.5