RE: [net-next, v3, 02/10] ptp: support ptp physical/virtual clocks conversion

From: Y.b. Lu
Date: Tue Jun 22 2021 - 06:39:37 EST


Hi Richard,

> -----Original Message-----
> From: Richard Cochran <richardcochran@xxxxxxxxx>
> Sent: 2021年6月19日 12:06
> To: Y.b. Lu <yangbo.lu@xxxxxxx>
> Cc: netdev@xxxxxxxxxxxxxxx; linux-kernel@xxxxxxxxxxxxxxx;
> linux-kselftest@xxxxxxxxxxxxxxx; mptcp@xxxxxxxxxxxxxxx; David S . Miller
> <davem@xxxxxxxxxxxxx>; Jakub Kicinski <kuba@xxxxxxxxxx>; Mat Martineau
> <mathew.j.martineau@xxxxxxxxxxxxxxx>; Matthieu Baerts
> <matthieu.baerts@xxxxxxxxxxxx>; Shuah Khan <shuah@xxxxxxxxxx>; Michal
> Kubecek <mkubecek@xxxxxxx>; Florian Fainelli <f.fainelli@xxxxxxxxx>;
> Andrew Lunn <andrew@xxxxxxx>; Rui Sousa <rui.sousa@xxxxxxx>; Sebastien
> Laveze <sebastien.laveze@xxxxxxx>
> Subject: Re: [net-next, v3, 02/10] ptp: support ptp physical/virtual clocks
> conversion
>
> On Tue, Jun 15, 2021 at 05:45:09PM +0800, Yangbo Lu wrote:
>
> > diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index
> > a780435331c8..78414b3e16dd 100644
> > --- a/drivers/ptp/ptp_clock.c
> > +++ b/drivers/ptp/ptp_clock.c
> > @@ -76,6 +76,11 @@ static int ptp_clock_settime(struct posix_clock
> > *pc, const struct timespec64 *tp {
> > struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
> >
> > + if (ptp_guaranteed_pclock(ptp)) {
>
> Can we please invent a more descriptive name for this method?
> The word "guaranteed" suggests much more.
>
> > + pr_err("ptp: virtual clock in use, guarantee physical clock free
> > +running\n");
>
> This is good: ^^^^^^^^^^^^^^^^^^^^^^^^^
> You can drop this part:
> ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>
> So, please rename ptp_guaranteed_pclock() to ptp_vclock_in_use();
>

Thank you. Will convert to that.

> > + return -EBUSY;
> > + }
> > +
> > return ptp->info->settime64(ptp->info, tp); }
>
>
> > diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
> > index 3f388d63904c..6949afc9d733 100644
> > --- a/drivers/ptp/ptp_private.h
> > +++ b/drivers/ptp/ptp_private.h
> > @@ -46,6 +46,9 @@ struct ptp_clock {
> > const struct attribute_group *pin_attr_groups[2];
> > struct kthread_worker *kworker;
> > struct kthread_delayed_work aux_work;
> > + u8 n_vclocks;
>
> Why not use "unsigned int" type? I don't see a need to set an artificial limit.

Please see my explain in another email thread. Thanks.

>
> > + struct mutex n_vclocks_mux; /* protect concurrent n_vclocks access */
> > + bool vclock_flag;
>
> "flag" is vague. How about "is_virtual_clock" instead?

That's better. Will use it. Thank you.

>
> > };
> >
> > #define info_to_vclock(d) container_of((d), struct ptp_vclock, info)
> > @@ -75,6 +78,18 @@ static inline int queue_cnt(struct
> timestamp_event_queue *q)
> > return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt; }
> >
> > +/*
> > + * Guarantee physical clock to stay free running, if ptp virtual
> > +clocks
> > + * on it are in use.
> > + */
> > +static inline bool ptp_guaranteed_pclock(struct ptp_clock *ptp) {
> > + if (!ptp->vclock_flag && ptp->n_vclocks)
>
> Need to take mutex for n_vclocks to prevent load tearing.
>
> > + return true;
> > +
> > + return false;
> > +}
> > +
> > /*
> > * see ptp_chardev.c
> > */
>
> > @@ -148,6 +149,90 @@ static ssize_t pps_enable_store(struct device
> > *dev, } static DEVICE_ATTR(pps_enable, 0220, NULL,
> > pps_enable_store);
> >
> > +static int unregister_vclock(struct device *dev, void *data) {
> > + struct ptp_clock *ptp = dev_get_drvdata(dev);
> > + struct ptp_clock_info *info = ptp->info;
> > + struct ptp_vclock *vclock;
> > + u8 *num = data;
> > +
> > + vclock = info_to_vclock(info);
> > + dev_info(dev->parent, "delete virtual clock ptp%d\n",
> > + vclock->clock->index);
> > +
> > + ptp_vclock_unregister(vclock);
> > + (*num)--;
> > +
> > + /* For break. Not error. */
> > + if (*num == 0)
> > + return -EINVAL;
> > +
> > + return 0;
> > +}
> > +
> > +static ssize_t n_vclocks_show(struct device *dev,
> > + struct device_attribute *attr, char *page) {
> > + struct ptp_clock *ptp = dev_get_drvdata(dev);
> > +
> > + return snprintf(page, PAGE_SIZE-1, "%d\n", ptp->n_vclocks);
>
> Take mutex.

Will take mutex everywhere to access it.

>
> > +}
> > +
> > +static ssize_t n_vclocks_store(struct device *dev,
> > + struct device_attribute *attr,
> > + const char *buf, size_t count) {
> > + struct ptp_clock *ptp = dev_get_drvdata(dev);
> > + struct ptp_vclock *vclock;
> > + int err = -EINVAL;
> > + u8 num, i;
> > +
> > + if (kstrtou8(buf, 0, &num))
> > + goto out;
> > +
> > + if (num > PTP_MAX_VCLOCKS) {
> > + dev_err(dev, "max value is %d\n", PTP_MAX_VCLOCKS);
> > + goto out;
> > + }
> > +
> > + if (mutex_lock_interruptible(&ptp->n_vclocks_mux))
> > + return -ERESTARTSYS;
> > +
> > + /* Need to create more vclocks */
> > + if (num > ptp->n_vclocks) {
> > + for (i = 0; i < num - ptp->n_vclocks; i++) {
> > + vclock = ptp_vclock_register(ptp);
> > + if (!vclock) {
> > + mutex_unlock(&ptp->n_vclocks_mux);
> > + goto out;
> > + }
> > +
> > + dev_info(dev, "new virtual clock ptp%d\n",
> > + vclock->clock->index);
> > + }
> > + }
> > +
> > + /* Need to delete vclocks */
> > + if (num < ptp->n_vclocks) {
> > + i = ptp->n_vclocks - num;
> > + device_for_each_child_reverse(dev, &i,
> > + unregister_vclock);
> > + }
> > +
> > + if (num == 0)
> > + dev_info(dev, "only physical clock in use now\n");
> > + else
> > + dev_info(dev, "guarantee physical clock free running\n");
> > +
> > + ptp->n_vclocks = num;
> > + mutex_unlock(&ptp->n_vclocks_mux);
> > +
> > + return count;
> > +out:
> > + return err;
> > +}
> > +static DEVICE_ATTR_RW(n_vclocks);
> > +
> > static struct attribute *ptp_attrs[] = {
> > &dev_attr_clock_name.attr,
> >
>
>
> > diff --git a/include/uapi/linux/ptp_clock.h
> > b/include/uapi/linux/ptp_clock.h index 1d108d597f66..4b933dc1b81b
> > 100644
> > --- a/include/uapi/linux/ptp_clock.h
> > +++ b/include/uapi/linux/ptp_clock.h
> > @@ -69,6 +69,11 @@
> > */
> > #define PTP_PEROUT_V1_VALID_FLAGS (0)
> >
> > +/*
> > + * Max number of PTP virtual clocks per PTP physical clock */
> > +#define PTP_MAX_VCLOCKS 20
>
> Why limit this to twenty clocks?

Please see my explain in another email thread. Thanks.

>
> Thanks,
> Richard