Since ghost sndbuf shares the same physical memory with peer DMB,
the cursor update processing needs to be adapted to ensure that the
data to be consumed won't be overwritten.
So in this case, the fin_curs and sndbuf_space that were originally
updated after sending the CDC message should be modified to not be
update until the peer updates cons_curs.
Signed-off-by: Wen Gu <guwen@xxxxxxxxxxxxxxxxx>
---
net/smc/smc_cdc.c | 52 +++++++++++++++++++++++++++++++++++++----------
1 file changed, 41 insertions(+), 11 deletions(-)
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c
index 3c06625ceb20..bf5b214ec15a 100644
--- a/net/smc/smc_cdc.c
+++ b/net/smc/smc_cdc.c
@@ -18,6 +18,7 @@
#include "smc_tx.h"
#include "smc_rx.h"
#include "smc_close.h"
+#include "smc_ism.h"
/********************************** send *************************************/
@@ -255,17 +256,25 @@ int smcd_cdc_msg_send(struct smc_connection *conn)
return rc;
smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn);
conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0;
- /* Calculate transmitted data and increment free send buffer space */
- diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
- &conn->tx_curs_sent);
- /* increased by confirmed number of bytes */
- smp_mb__before_atomic();
- atomic_add(diff, &conn->sndbuf_space);
- /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
- smp_mb__after_atomic();
- smc_curs_copy(&conn->tx_curs_fin, &conn->tx_curs_sent, conn);
+ if (!smc_ism_support_dmb_nocopy(conn->lgr->smcd)) {
+ /* Ghost sndbuf shares the same memory region with
+ * peer DMB, so don't update the tx_curs_fin and
+ * sndbuf_space until peer has consumed the data.
+ */
+ /* Calculate transmitted data and increment free
+ * send buffer space
+ */
+ diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
+ &conn->tx_curs_sent);
+ /* increased by confirmed number of bytes */
+ smp_mb__before_atomic();
+ atomic_add(diff, &conn->sndbuf_space);
+ /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
+ smp_mb__after_atomic();
+ smc_curs_copy(&conn->tx_curs_fin, &conn->tx_curs_sent, conn);
- smc_tx_sndbuf_nonfull(smc);
+ smc_tx_sndbuf_nonfull(smc);
+ }
return rc;
}
@@ -323,7 +332,7 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
{
union smc_host_cursor cons_old, prod_old;
struct smc_connection *conn = &smc->conn;
- int diff_cons, diff_prod;
+ int diff_cons, diff_prod, diff_tx;
smc_curs_copy(&prod_old, &conn->local_rx_ctrl.prod, conn);
smc_curs_copy(&cons_old, &conn->local_rx_ctrl.cons, conn);
@@ -339,6 +348,27 @@ static void smc_cdc_msg_recv_action(struct smc_sock *smc,
atomic_add(diff_cons, &conn->peer_rmbe_space);
/* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
smp_mb__after_atomic();
+
+ if (conn->lgr->is_smcd &&
+ smc_ism_support_dmb_nocopy(conn->lgr->smcd)) {
+ /* Ghost sndbuf shares the same memory region with
+ * peer RMB, so update tx_curs_fin and sndbuf_space
+ * when peer has consumed the data.
+ */
+ /* calculate peer rmb consumed data */
+ diff_tx = smc_curs_diff(conn->sndbuf_desc->len,
+ &conn->tx_curs_fin,
+ &conn->local_rx_ctrl.cons);
+ /* increase local sndbuf space and fin_curs */
+ smp_mb__before_atomic();
+ atomic_add(diff_tx, &conn->sndbuf_space);
+ /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
+ smp_mb__after_atomic();
+ smc_curs_copy(&conn->tx_curs_fin,
+ &conn->local_rx_ctrl.cons, conn);
+
+ smc_tx_sndbuf_nonfull(smc);
+ }
}
diff_prod = smc_curs_diff(conn->rmb_desc->len, &prod_old,