Re: [PATCH 03/18] xen/pvcalls: initialize the module and register the xenbus backend

From: Boris Ostrovsky
Date: Mon May 15 2017 - 21:41:40 EST




On 05/15/2017 04:35 PM, Stefano Stabellini wrote:
The pvcalls backend has one ioworker per cpu: the ioworkers are
implemented as a cpu bound workqueue, and will deal with the actual
socket and data ring reads/writes.

ioworkers are global: we only have one set for all the frontends. They
process requests on their wqs list in order, once they are done with a
request, they'll remove it from the list. A spinlock is used for
protecting the list. Each ioworker is bound to a different cpu to
maximize throughput.

Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx>
CC: boris.ostrovsky@xxxxxxxxxx
CC: jgross@xxxxxxxx
---
drivers/xen/pvcalls-back.c | 64 ++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 64 insertions(+)

diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index 2dbf7d8..46a889a 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -25,6 +25,26 @@
#include <xen/xenbus.h>
#include <xen/interface/io/pvcalls.h>

+struct pvcalls_ioworker {
+ struct work_struct register_work;
+ atomic_t io;
+ struct list_head wqs;
+ spinlock_t lock;
+ int num;
+};
+
+struct pvcalls_back_global {
+ struct pvcalls_ioworker *ioworkers;
+ int nr_ioworkers;
+ struct workqueue_struct *wq;
+ struct list_head privs;
+ struct rw_semaphore privs_lock;

Is there a reason why these are called "privs"?

And why are you using a rw semaphore --- I only noticed two instances of use and both are writes.


+} pvcalls_back_global;
+
+static void pvcalls_back_ioworker(struct work_struct *work)
+{
+}
+
static int pvcalls_back_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
@@ -59,3 +79,47 @@ static int pvcalls_back_uevent(struct xenbus_device *xdev,
.uevent = pvcalls_back_uevent,
.otherend_changed = pvcalls_back_changed,
};
+
+static int __init pvcalls_back_init(void)
+{
+ int ret, i, cpu;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ ret = xenbus_register_backend(&pvcalls_back_driver);
+ if (ret < 0)
+ return ret;
+
+ init_rwsem(&pvcalls_back_global.privs_lock);
+ INIT_LIST_HEAD(&pvcalls_back_global.privs);
+ pvcalls_back_global.wq = alloc_workqueue("pvcalls_io", 0, 0);
+ if (!pvcalls_back_global.wq)
+ goto error;
+ pvcalls_back_global.nr_ioworkers = num_online_cpus();


Should nr_ioworkers be updated on CPU hot(un)plug?


+ pvcalls_back_global.ioworkers = kzalloc(
+ sizeof(*pvcalls_back_global.ioworkers) *
+ pvcalls_back_global.nr_ioworkers, GFP_KERNEL);
+ if (!pvcalls_back_global.ioworkers)
+ goto error;
+ i = 0;
+ for_each_online_cpu(cpu) {
+ pvcalls_back_global.ioworkers[i].num = i;
+ atomic_set(&pvcalls_back_global.ioworkers[i].io, 1);
+ spin_lock_init(&pvcalls_back_global.ioworkers[i].lock);
+ INIT_LIST_HEAD(&pvcalls_back_global.ioworkers[i].wqs);
+ INIT_WORK(&pvcalls_back_global.ioworkers[i].register_work,
+ pvcalls_back_ioworker);
+ i++;
+ }
+ return 0;
+
+error:
+ if (pvcalls_back_global.wq)
+ destroy_workqueue(pvcalls_back_global.wq);
+ xenbus_unregister_driver(&pvcalls_back_driver);
+ kfree(pvcalls_back_global.ioworkers);
+ memset(&pvcalls_back_global, 0, sizeof(pvcalls_back_global));
+ return -ENOMEM;

This routine could use more newlines. (and in other patches too)

-boris

+}
+module_init(pvcalls_back_init);