RE: [PATCH net-next 02/37] rxrpc: Use umin() and umax() rather than min_t()/max_t() where possible

From: David Laight
Date: Wed Dec 04 2024 - 21:38:13 EST


From: David Howells <dhowells@xxxxxxxxxx>
> Sent: 02 December 2024 14:30
>
> Use umin() and umax() rather than min_t()/max_t() where the type specified
> is an unsigned type.

You are also changing some max() to umax().
Presumably they have always passed the type check so max() is fine.
And max(foo, 1) would have required that 'foo' be 'signed int' and could
potentially be negative when max(-1, 1) will be 1 but umax(-1, 1) is
undefined.

I actually suspect a lot of the min_t/max_t could be plain min/max now.
It looks like someone couldn't be bothered to generate unsigned constants.
Now min(unsigned_val, 1) is accepted as well as min(unsigned_val, 1u).

David


>
> Signed-off-by: David Howells <dhowells@xxxxxxxxxx>
> cc: Marc Dionne <marc.dionne@xxxxxxxxxxxx>
> cc: "David S. Miller" <davem@xxxxxxxxxxxxx>
> cc: Eric Dumazet <edumazet@xxxxxxxxxx>
> cc: Jakub Kicinski <kuba@xxxxxxxxxx>
> cc: Paolo Abeni <pabeni@xxxxxxxxxx>
> cc: linux-afs@xxxxxxxxxxxxxxxxxxx
> cc: netdev@xxxxxxxxxxxxxxx
> ---
> net/rxrpc/call_event.c | 5 ++---
> net/rxrpc/call_object.c | 4 ++--
> net/rxrpc/conn_client.c | 2 +-
> net/rxrpc/input.c | 13 +++++--------
> net/rxrpc/insecure.c | 2 +-
> net/rxrpc/io_thread.c | 2 +-
> net/rxrpc/output.c | 2 +-
> net/rxrpc/rtt.c | 6 +++---
> net/rxrpc/rxkad.c | 6 +++---
> net/rxrpc/rxperf.c | 2 +-
> net/rxrpc/sendmsg.c | 2 +-
> 11 files changed, 21 insertions(+), 25 deletions(-)
>
> diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
> index 7bbb68504766..c4754cc9b8d4 100644
> --- a/net/rxrpc/call_event.c
> +++ b/net/rxrpc/call_event.c
> @@ -233,8 +233,7 @@ static void rxrpc_close_tx_phase(struct rxrpc_call *call)
>
> static bool rxrpc_tx_window_has_space(struct rxrpc_call *call)
> {
> - unsigned int winsize = min_t(unsigned int, call->tx_winsize,
> - call->cong_cwnd + call->cong_extra);
> + unsigned int winsize = umin(call->tx_winsize, call->cong_cwnd + call->cong_extra);
> rxrpc_seq_t window = call->acks_hard_ack, wtop = window + winsize;
> rxrpc_seq_t tx_top = call->tx_top;
> int space;
> @@ -467,7 +466,7 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
> } else {
> unsigned long nowj = jiffies, delayj, nextj;
>
> - delayj = max(nsecs_to_jiffies(delay), 1);
> + delayj = umax(nsecs_to_jiffies(delay), 1);
> nextj = nowj + delayj;
> if (time_before(nextj, call->timer.expires) ||
> !timer_pending(&call->timer)) {
> diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
> index f9e983a12c14..0df647d1d3a2 100644
> --- a/net/rxrpc/call_object.c
> +++ b/net/rxrpc/call_object.c
> @@ -220,9 +220,9 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
> __set_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
>
> if (p->timeouts.normal)
> - call->next_rx_timo = min(p->timeouts.normal, 1);
> + call->next_rx_timo = umin(p->timeouts.normal, 1);
> if (p->timeouts.idle)
> - call->next_req_timo = min(p->timeouts.idle, 1);
> + call->next_req_timo = umin(p->timeouts.idle, 1);
> if (p->timeouts.hard)
> call->hard_timo = p->timeouts.hard;
>
> diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
> index bb11e8289d6d..86fb18bcd188 100644
> --- a/net/rxrpc/conn_client.c
> +++ b/net/rxrpc/conn_client.c
> @@ -231,7 +231,7 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
> distance = id - id_cursor;
> if (distance < 0)
> distance = -distance;
> - limit = max_t(unsigned long, atomic_read(&rxnet->nr_conns) * 4, 1024);
> + limit = umax(atomic_read(&rxnet->nr_conns) * 4, 1024);
> if (distance > limit)
> goto mark_dont_reuse;
>
> diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
> index 16d49a861dbb..49e35be7dc13 100644
> --- a/net/rxrpc/input.c
> +++ b/net/rxrpc/input.c
> @@ -44,8 +44,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
>
> if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
> summary->retrans_timeo = true;
> - call->cong_ssthresh = max_t(unsigned int,
> - summary->flight_size / 2, 2);
> + call->cong_ssthresh = umax(summary->flight_size / 2, 2);
> cwnd = 1;
> if (cwnd >= call->cong_ssthresh &&
> call->cong_mode == RXRPC_CALL_SLOW_START) {
> @@ -113,8 +112,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
>
> change = rxrpc_cong_begin_retransmission;
> call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT;
> - call->cong_ssthresh = max_t(unsigned int,
> - summary->flight_size / 2, 2);
> + call->cong_ssthresh = umax(summary->flight_size / 2, 2);
> cwnd = call->cong_ssthresh + 3;
> call->cong_extra = 0;
> call->cong_dup_acks = 0;
> @@ -206,9 +204,8 @@ void rxrpc_congestion_degrade(struct rxrpc_call *call)
> rxrpc_inc_stat(call->rxnet, stat_tx_data_cwnd_reset);
> call->tx_last_sent = now;
> call->cong_mode = RXRPC_CALL_SLOW_START;
> - call->cong_ssthresh = max_t(unsigned int, call->cong_ssthresh,
> - call->cong_cwnd * 3 / 4);
> - call->cong_cwnd = max_t(unsigned int, call->cong_cwnd / 2, RXRPC_MIN_CWND);
> + call->cong_ssthresh = umax(call->cong_ssthresh, call->cong_cwnd * 3 / 4);
> + call->cong_cwnd = umax(call->cong_cwnd / 2, RXRPC_MIN_CWND);
> }
>
> /*
> @@ -709,7 +706,7 @@ static void rxrpc_input_ack_trailer(struct rxrpc_call *call, struct sk_buff *skb
> call->tx_winsize = rwind;
> }
>
> - mtu = min(ntohl(trailer->maxMTU), ntohl(trailer->ifMTU));
> + mtu = umin(ntohl(trailer->maxMTU), ntohl(trailer->ifMTU));
>
> peer = call->peer;
> if (mtu < peer->maxdata) {
> diff --git a/net/rxrpc/insecure.c b/net/rxrpc/insecure.c
> index 6716c021a532..751eb621021d 100644
> --- a/net/rxrpc/insecure.c
> +++ b/net/rxrpc/insecure.c
> @@ -19,7 +19,7 @@ static int none_init_connection_security(struct rxrpc_connection *conn,
> */
> static struct rxrpc_txbuf *none_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp)
> {
> - return rxrpc_alloc_data_txbuf(call, min_t(size_t, remain, RXRPC_JUMBO_DATALEN), 1, gfp);
> + return rxrpc_alloc_data_txbuf(call, umin(remain, RXRPC_JUMBO_DATALEN), 1, gfp);
> }
>
> static int none_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
> diff --git a/net/rxrpc/io_thread.c b/net/rxrpc/io_thread.c
> index 07c74c77d802..7af5adf53b25 100644
> --- a/net/rxrpc/io_thread.c
> +++ b/net/rxrpc/io_thread.c
> @@ -558,7 +558,7 @@ int rxrpc_io_thread(void *data)
> }
>
> timeout = nsecs_to_jiffies(delay_ns);
> - timeout = max(timeout, 1UL);
> + timeout = umax(timeout, 1);
> schedule_timeout(timeout);
> __set_current_state(TASK_RUNNING);
> continue;
> diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
> index 5ea9601efd05..85112ea31a39 100644
> --- a/net/rxrpc/output.c
> +++ b/net/rxrpc/output.c
> @@ -118,7 +118,7 @@ static void rxrpc_fill_out_ack(struct rxrpc_call *call,
> txb->kvec[1].iov_len = ack->nAcks;
>
> wrap = RXRPC_SACK_SIZE - sack;
> - to = min_t(unsigned int, ack->nAcks, RXRPC_SACK_SIZE);
> + to = umin(ack->nAcks, RXRPC_SACK_SIZE);
>
> if (sack + ack->nAcks <= RXRPC_SACK_SIZE) {
> memcpy(sackp, call->ackr_sack_table + sack, ack->nAcks);
> diff --git a/net/rxrpc/rtt.c b/net/rxrpc/rtt.c
> index cdab7b7d08a0..6dc51486b5a6 100644
> --- a/net/rxrpc/rtt.c
> +++ b/net/rxrpc/rtt.c
> @@ -27,7 +27,7 @@ static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer)
>
> static u32 rxrpc_bound_rto(u32 rto)
> {
> - return min(rto, RXRPC_RTO_MAX);
> + return umin(rto, RXRPC_RTO_MAX);
> }
>
> /*
> @@ -91,11 +91,11 @@ static void rxrpc_rtt_estimator(struct rxrpc_peer *peer, long sample_rtt_us)
> /* no previous measure. */
> srtt = m << 3; /* take the measured time to be rtt */
> peer->mdev_us = m << 1; /* make sure rto = 3*rtt */
> - peer->rttvar_us = max(peer->mdev_us, rxrpc_rto_min_us(peer));
> + peer->rttvar_us = umax(peer->mdev_us, rxrpc_rto_min_us(peer));
> peer->mdev_max_us = peer->rttvar_us;
> }
>
> - peer->srtt_us = max(1U, srtt);
> + peer->srtt_us = umax(srtt, 1);
> }
>
> /*
> diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
> index 48a1475e6b06..e3194d73dd84 100644
> --- a/net/rxrpc/rxkad.c
> +++ b/net/rxrpc/rxkad.c
> @@ -150,11 +150,11 @@ static struct rxrpc_txbuf *rxkad_alloc_txbuf(struct rxrpc_call *call, size_t rem
> struct rxrpc_txbuf *txb;
> size_t shdr, space;
>
> - remain = min(remain, 65535 - sizeof(struct rxrpc_wire_header));
> + remain = umin(remain, 65535 - sizeof(struct rxrpc_wire_header));
>
> switch (call->conn->security_level) {
> default:
> - space = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
> + space = umin(remain, RXRPC_JUMBO_DATALEN);
> return rxrpc_alloc_data_txbuf(call, space, 1, gfp);
> case RXRPC_SECURITY_AUTH:
> shdr = sizeof(struct rxkad_level1_hdr);
> @@ -164,7 +164,7 @@ static struct rxrpc_txbuf *rxkad_alloc_txbuf(struct rxrpc_call *call, size_t rem
> break;
> }
>
> - space = min_t(size_t, round_down(RXRPC_JUMBO_DATALEN, RXKAD_ALIGN), remain + shdr);
> + space = umin(round_down(RXRPC_JUMBO_DATALEN, RXKAD_ALIGN), remain + shdr);
> space = round_up(space, RXKAD_ALIGN);
>
> txb = rxrpc_alloc_data_txbuf(call, space, RXKAD_ALIGN, gfp);
> diff --git a/net/rxrpc/rxperf.c b/net/rxrpc/rxperf.c
> index 085e7892d310..7ef93407be83 100644
> --- a/net/rxrpc/rxperf.c
> +++ b/net/rxrpc/rxperf.c
> @@ -503,7 +503,7 @@ static int rxperf_process_call(struct rxperf_call *call)
> reply_len + sizeof(rxperf_magic_cookie));
>
> while (reply_len > 0) {
> - len = min_t(size_t, reply_len, PAGE_SIZE);
> + len = umin(reply_len, PAGE_SIZE);
> bvec_set_page(&bv, ZERO_PAGE(0), len, 0);
> iov_iter_bvec(&msg.msg_iter, WRITE, &bv, 1, len);
> msg.msg_flags = MSG_MORE;
> diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
> index 6abb8eec1b2b..b04afb5df241 100644
> --- a/net/rxrpc/sendmsg.c
> +++ b/net/rxrpc/sendmsg.c
> @@ -360,7 +360,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
>
> /* append next segment of data to the current buffer */
> if (msg_data_left(msg) > 0) {
> - size_t copy = min_t(size_t, txb->space, msg_data_left(msg));
> + size_t copy = umin(txb->space, msg_data_left(msg));
>
> _debug("add %zu", copy);
> if (!copy_from_iter_full(txb->kvec[0].iov_base + txb->offset,
>

-
Registered Address Lakeside, Bramley Road, Mount Farm, Milton Keynes, MK1 1PT, UK
Registration No: 1397386 (Wales)