[RFC 11/19] staging: qlge: the number of pages to contain a buffer queue is constant
From: Coiby Xu
Date: Mon Jun 21 2021 - 09:51:04 EST
This patch is extented work of commit ec705b983b46b8e2d3cafd40c188458bf4241f11
("staging: qlge: Remove qlge_bq.len & size"). Since the same len is used
for both sbq (small buffer queue) and lbq (large buffer queue), the
number of pages to contain a buffer queue is also known at compile time.
Signed-off-by: Coiby Xu <coiby.xu@xxxxxxxxx>
---
drivers/staging/qlge/qlge.h | 13 ++++++-------
drivers/staging/qlge/qlge_main.c | 8 ++++----
2 files changed, 10 insertions(+), 11 deletions(-)
diff --git a/drivers/staging/qlge/qlge.h b/drivers/staging/qlge/qlge.h
index 9177baa9f022..32755b0e2fb7 100644
--- a/drivers/staging/qlge/qlge.h
+++ b/drivers/staging/qlge/qlge.h
@@ -42,16 +42,15 @@
#define DB_PAGE_SIZE 4096
-/* Calculate the number of (4k) pages required to
- * contain a buffer queue of the given length.
+/*
+ * The number of (4k) pages required to contain a buffer queue.
*/
-#define MAX_DB_PAGES_PER_BQ(x) \
- (((x * sizeof(u64)) / DB_PAGE_SIZE) + \
- (((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
+#define MAX_DB_PAGES_PER_BQ \
+ (((QLGE_BQ_LEN * sizeof(u64)) / DB_PAGE_SIZE) + \
+ (((QLGE_BQ_LEN * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
- MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN) * sizeof(u64) + \
- MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN) * sizeof(u64))
+ MAX_DB_PAGES_PER_BQ * sizeof(u64) * 2)
#define LARGE_BUFFER_MAX_SIZE 4096
#define LARGE_BUFFER_MIN_SIZE 2048
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 94853b182608..7aee9e904097 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -3015,8 +3015,8 @@ static int qlge_start_cq(struct qlge_adapter *qdev, struct qlge_cq *cq)
shadow_reg_dma += sizeof(u64);
rx_ring->lbq.base_indirect = shadow_reg;
rx_ring->lbq.base_indirect_dma = shadow_reg_dma;
- shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
- shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+ shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ);
+ shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ);
rx_ring->sbq.base_indirect = shadow_reg;
rx_ring->sbq.base_indirect_dma = shadow_reg_dma;
/* PCI doorbell mem area + 0x18 for large buffer consumer */
@@ -3034,7 +3034,7 @@ static int qlge_start_cq(struct qlge_adapter *qdev, struct qlge_cq *cq)
tmp += DB_PAGE_SIZE;
base_indirect_ptr++;
page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+ } while (page_entries < MAX_DB_PAGES_PER_BQ);
cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
cqicb->lbq_buf_size =
cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
@@ -3051,7 +3051,7 @@ static int qlge_start_cq(struct qlge_adapter *qdev, struct qlge_cq *cq)
tmp += DB_PAGE_SIZE;
base_indirect_ptr++;
page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+ } while (page_entries < MAX_DB_PAGES_PER_BQ);
cqicb->sbq_addr = cpu_to_le64(rx_ring->sbq.base_indirect_dma);
cqicb->sbq_buf_size = cpu_to_le16(QLGE_SMALL_BUFFER_SIZE);
cqicb->sbq_len = cpu_to_le16(QLGE_FIT16(QLGE_BQ_LEN));
--
2.32.0