RE: [PATCH net-next v6 2/5] net: phy: microchip_rds_ptp : Add rds ptp library for Microchip phys

From: Divya.Koppera
Date: Thu Dec 12 2024 - 06:18:46 EST


Hi @Paolo Abeni,

Thanks for the review.

> -----Original Message-----
> From: Paolo Abeni <pabeni@xxxxxxxxxx>
> Sent: Thursday, December 12, 2024 3:45 PM
> To: Divya Koppera - I30481 <Divya.Koppera@xxxxxxxxxxxxx>;
> andrew@xxxxxxx; Arun Ramadoss - I17769
> <Arun.Ramadoss@xxxxxxxxxxxxx>; UNGLinuxDriver
> <UNGLinuxDriver@xxxxxxxxxxxxx>; hkallweit1@xxxxxxxxx;
> linux@xxxxxxxxxxxxxxx; davem@xxxxxxxxxxxxx; edumazet@xxxxxxxxxx;
> kuba@xxxxxxxxxx; netdev@xxxxxxxxxxxxxxx; linux-kernel@xxxxxxxxxxxxxxx;
> richardcochran@xxxxxxxxx; vadim.fedorenko@xxxxxxxxx
> Subject: Re: [PATCH net-next v6 2/5] net: phy: microchip_rds_ptp : Add rds
> ptp library for Microchip phys
>
> EXTERNAL EMAIL: Do not click links or open attachments unless you know the
> content is safe
>
> On 12/9/24 16:17, Divya Koppera wrote:
> > diff --git a/drivers/net/phy/microchip_rds_ptp.c
> > b/drivers/net/phy/microchip_rds_ptp.c
> > new file mode 100644
> > index 000000000000..d1c91c0f5e03
> > --- /dev/null
> > +++ b/drivers/net/phy/microchip_rds_ptp.c
> > @@ -0,0 +1,1009 @@
> > +// SPDX-License-Identifier: GPL-2.0
> > +// Copyright (C) 2024 Microchip Technology
> > +
> > +#include "microchip_rds_ptp.h"
> > +
> > +static int mchp_rds_ptp_flush_fifo(struct mchp_rds_ptp_clock *clock,
> > + enum mchp_rds_ptp_fifo_dir dir)
>
> If you pass as 2nd argument the relevant fifo to flush compute 'dir'
> from such value and call skb_queue_purge(), you can avoid a bit of duplicate
> code.
>

I will change in next revision.

> > +{
> > + struct phy_device *phydev = clock->phydev;
> > + int rc;
> > +
> > + for (int i = 0; i < MCHP_RDS_PTP_FIFO_SIZE; ++i) {
> > + rc = phy_read_mmd(phydev, PTP_MMD(clock),
> > + dir == MCHP_RDS_PTP_EGRESS_FIFO ?
> > + MCHP_RDS_PTP_TX_MSG_HDR2(BASE_PORT(clock)) :
> > + MCHP_RDS_PTP_RX_MSG_HDR2(BASE_PORT(clock)));
> > + if (rc < 0)
> > + return rc;
> > + }
> > + return phy_read_mmd(phydev, PTP_MMD(clock),
> > + MCHP_RDS_PTP_INT_STS(BASE_PORT(clock)));
> > +}
> > +
> > +static int mchp_rds_ptp_config_intr(struct mchp_rds_ptp_clock *clock,
> > + bool enable) {
> > + struct phy_device *phydev = clock->phydev;
> > +
> > + /* Enable or disable ptp interrupts */
> > + return phy_write_mmd(phydev, PTP_MMD(clock),
> > + MCHP_RDS_PTP_INT_EN(BASE_PORT(clock)),
> > + enable ? MCHP_RDS_PTP_INT_ALL_MSK : 0); }
> > +
> > +static void mchp_rds_ptp_txtstamp(struct mii_timestamper *mii_ts,
> > + struct sk_buff *skb, int type) {
> > + struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
> > + struct mchp_rds_ptp_clock,
> > + mii_ts);
> > +
> > + switch (clock->hwts_tx_type) {
> > + case HWTSTAMP_TX_ONESTEP_SYNC:
> > + if (ptp_msg_is_sync(skb, type)) {
> > + kfree_skb(skb);
> > + return;
> > + }
> > + fallthrough;
> > + case HWTSTAMP_TX_ON:
> > + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
> > + skb_queue_tail(&clock->tx_queue, skb);
> > + break;
> > + case HWTSTAMP_TX_OFF:
> > + default:
> > + kfree_skb(skb);
> > + break;
> > + }
> > +}
> > +
> > +static bool mchp_rds_ptp_get_sig_rx(struct sk_buff *skb, u16 *sig) {
> > + struct ptp_header *ptp_header;
> > + int type;
> > +
> > + skb_push(skb, ETH_HLEN);
> > + type = ptp_classify_raw(skb);
> > + if (type == PTP_CLASS_NONE)
> > + return false;
> > +
> > + ptp_header = ptp_parse_header(skb, type);
> > + if (!ptp_header)
> > + return false;
> > +
> > + skb_pull_inline(skb, ETH_HLEN);
> > +
> > + *sig = (__force u16)(ntohs(ptp_header->sequence_id));
> > +
> > + return true;
> > +}
> > +
> > +static bool mchp_rds_ptp_match_skb(struct mchp_rds_ptp_clock *clock,
> > + struct mchp_rds_ptp_rx_ts *rx_ts) {
> > + struct skb_shared_hwtstamps *shhwtstamps;
> > + struct sk_buff *skb, *skb_tmp;
> > + unsigned long flags;
> > + bool rc = false;
> > + u16 skb_sig;
> > +
> > + spin_lock_irqsave(&clock->rx_queue.lock, flags);
> > + skb_queue_walk_safe(&clock->rx_queue, skb, skb_tmp) {
> > + if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig))
> > + continue;
> > +
> > + if (skb_sig != rx_ts->seq_id)
> > + continue;
> > +
> > + __skb_unlink(skb, &clock->rx_queue);
> > +
> > + rc = true;
> > + break;
> > + }
> > + spin_unlock_irqrestore(&clock->rx_queue.lock, flags);
> > +
> > + if (rc) {
> > + shhwtstamps = skb_hwtstamps(skb);
> > + shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec);
> > + netif_rx(skb);
> > + }
> > +
> > + return rc;
> > +}
> > +
> > +static void mchp_rds_ptp_match_rx_ts(struct mchp_rds_ptp_clock *clock,
> > + struct mchp_rds_ptp_rx_ts *rx_ts) {
> > + unsigned long flags;
> > +
> > + /* If we failed to match the skb add it to the queue for when
> > + * the frame will come
> > + */
> > + if (!mchp_rds_ptp_match_skb(clock, rx_ts)) {
> > + spin_lock_irqsave(&clock->rx_ts_lock, flags);
> > + list_add(&rx_ts->list, &clock->rx_ts_list);
> > + spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
> > + } else {
> > + kfree(rx_ts);
> > + }
> > +}
> > +
> > +static void mchp_rds_ptp_match_rx_skb(struct mchp_rds_ptp_clock
> *clock,
> > + struct sk_buff *skb) {
> > + struct mchp_rds_ptp_rx_ts *rx_ts, *tmp, *rx_ts_var = NULL;
> > + struct skb_shared_hwtstamps *shhwtstamps;
> > + unsigned long flags;
> > + u16 skb_sig;
> > +
> > + if (!mchp_rds_ptp_get_sig_rx(skb, &skb_sig))
> > + return;
> > +
> > + /* Iterate over all RX timestamps and match it with the received skbs */
> > + spin_lock_irqsave(&clock->rx_ts_lock, flags);
> > + list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) {
> > + /* Check if we found the signature we were looking for. */
> > + if (skb_sig != rx_ts->seq_id)
> > + continue;
> > +
> > + shhwtstamps = skb_hwtstamps(skb);
> > + shhwtstamps->hwtstamp = ktime_set(rx_ts->seconds, rx_ts->nsec);
> > + netif_rx(skb);
> > +
> > + rx_ts_var = rx_ts;
> > +
> > + break;
> > + }
> > + spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
> > +
> > + if (rx_ts_var) {
> > + list_del(&rx_ts_var->list);
> > + kfree(rx_ts_var);
> > + } else {
> > + skb_queue_tail(&clock->rx_queue, skb);
> > + }
> > +}
> > +
> > +static bool mchp_rds_ptp_rxtstamp(struct mii_timestamper *mii_ts,
> > + struct sk_buff *skb, int type) {
> > + struct mchp_rds_ptp_clock *clock = container_of(mii_ts,
> > + struct mchp_rds_ptp_clock,
> > + mii_ts);
> > +
> > + if (clock->rx_filter == HWTSTAMP_FILTER_NONE ||
> > + type == PTP_CLASS_NONE)
> > + return false;
> > +
> > + if ((type & clock->version) == 0 || (type & clock->layer) == 0)
> > + return false;
> > +
> > + /* Here if match occurs skb is sent to application, If not skb is added
> > + * to queue and sending skb to application will get handled when
> > + * interrupt occurs i.e., it get handles in interrupt handler. By
> > + * any means skb will reach the application so we should not return
> > + * false here if skb doesn't matches.
> > + */
> > + mchp_rds_ptp_match_rx_skb(clock, skb);
> > +
> > + return true;
> > +}
> > +
> > +static int mchp_rds_ptp_hwtstamp(struct mii_timestamper *mii_ts,
> > + struct kernel_hwtstamp_config *config,
> > + struct netlink_ext_ack *extack) {
> > + struct mchp_rds_ptp_clock *clock =
> > + container_of(mii_ts, struct mchp_rds_ptp_clock,
> > + mii_ts);
> > + struct phy_device *phydev = clock->phydev;
> > + struct mchp_rds_ptp_rx_ts *rx_ts, *tmp;
> > + int txcfg = 0, rxcfg = 0;
> > + unsigned long flags;
> > + int rc;
> > +
> > + clock->hwts_tx_type = config->tx_type;
> > + clock->rx_filter = config->rx_filter;
> > +
> > + switch (config->rx_filter) {
> > + case HWTSTAMP_FILTER_NONE:
> > + clock->layer = 0;
> > + clock->version = 0;
> > + break;
> > + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
> > + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
> > + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
> > + clock->layer = PTP_CLASS_L4;
> > + clock->version = PTP_CLASS_V2;
> > + break;
> > + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
> > + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
> > + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
> > + clock->layer = PTP_CLASS_L2;
> > + clock->version = PTP_CLASS_V2;
> > + break;
> > + case HWTSTAMP_FILTER_PTP_V2_EVENT:
> > + case HWTSTAMP_FILTER_PTP_V2_SYNC:
> > + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
> > + clock->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
> > + clock->version = PTP_CLASS_V2;
> > + break;
> > + default:
> > + return -ERANGE;
> > + }
> > +
> > + /* Setup parsing of the frames and enable the timestamping for ptp
> > + * frames
> > + */
> > + if (clock->layer & PTP_CLASS_L2) {
> > + rxcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN;
> > + txcfg = MCHP_RDS_PTP_PARSE_CONFIG_LAYER2_EN;
> > + }
> > + if (clock->layer & PTP_CLASS_L4) {
> > + rxcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN |
> > + MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN;
> > + txcfg |= MCHP_RDS_PTP_PARSE_CONFIG_IPV4_EN |
> > + MCHP_RDS_PTP_PARSE_CONFIG_IPV6_EN;
> > + }
> > + rc = phy_write_mmd(phydev, PTP_MMD(clock),
> > + MCHP_RDS_PTP_RX_PARSE_CONFIG(BASE_PORT(clock)),
> > + rxcfg);
> > + if (rc < 0)
> > + return rc;
> > +
> > + rc = phy_write_mmd(phydev, PTP_MMD(clock),
> > + MCHP_RDS_PTP_TX_PARSE_CONFIG(BASE_PORT(clock)),
> > + txcfg);
> > + if (rc < 0)
> > + return rc;
> > +
> > + rc = phy_write_mmd(phydev, PTP_MMD(clock),
> > + MCHP_RDS_PTP_RX_TIMESTAMP_EN(BASE_PORT(clock)),
> > + MCHP_RDS_PTP_TIMESTAMP_EN_ALL);
> > + if (rc < 0)
> > + return rc;
> > +
> > + rc = phy_write_mmd(phydev, PTP_MMD(clock),
> > + MCHP_RDS_PTP_TX_TIMESTAMP_EN(BASE_PORT(clock)),
> > + MCHP_RDS_PTP_TIMESTAMP_EN_ALL);
> > + if (rc < 0)
> > + return rc;
> > +
> > + if (clock->hwts_tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
> > + /* Enable / disable of the TX timestamp in the SYNC frames */
> > + rc = phy_modify_mmd(phydev, PTP_MMD(clock),
> > + MCHP_RDS_PTP_TX_MOD(BASE_PORT(clock)),
> > + MCHP_RDS_PTP_TX_MOD_PTP_SYNC_TS_INSERT,
> > + MCHP_RDS_PTP_TX_MOD_PTP_SYNC_TS_INSERT);
> > + else
> > + rc = phy_modify_mmd(phydev, PTP_MMD(clock),
> > + MCHP_RDS_PTP_TX_MOD(BASE_PORT(clock)),
> > + MCHP_RDS_PTP_TX_MOD_PTP_SYNC_TS_INSERT,
> > +
> > + (u16)~MCHP_RDS_PTP_TX_MOD_PTP_SYNC_TS_INSERT);
> > +
> > + if (rc < 0)
> > + return rc;
> > +
> > + /* Now enable the timestamping interrupts */
> > + rc = mchp_rds_ptp_config_intr(clock,
> > + config->rx_filter !=
> > + HWTSTAMP_FILTER_NONE);
> > + if (rc < 0)
> > + return rc;
>
> At this point the H/W can trigger ptp interruts and
> mchp_rds_ptp_handle_interrupt() right?
>

Yes, for Application start case.
For application stop case interrupts will be disabled.

> > +
> > + /* In case of multiple starts and stops, these needs to be cleared */
> > + spin_lock_irqsave(&clock->rx_ts_lock, flags);
> > + list_for_each_entry_safe(rx_ts, tmp, &clock->rx_ts_list, list) {
> > + list_del(&rx_ts->list);
> > + kfree(rx_ts);
> > + }
> > + spin_unlock_irqrestore(&clock->rx_ts_lock, flags);
> > + skb_queue_purge(&clock->rx_queue);
> > + skb_queue_purge(&clock->tx_queue);
> > +
> > + rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_INGRESS_FIFO);
> > + if (rc < 0)
> > + return rc;
> > +
> > + rc = mchp_rds_ptp_flush_fifo(clock, MCHP_RDS_PTP_EGRESS_FIFO);
> Should the above moved bedore enabling the irqs?
>

This change is valid for application stop case. For start case it is not valid.

I'll take care in next revision.

> Thanks!
>
> Paolo

Thanks,
Divya