[PATCH v2 08/18] xen/pvcalls: implement connect command
From: Stefano Stabellini
Date: Fri May 19 2017 - 19:25:38 EST
Allocate a socket. Keep track of socket <-> ring mappings with a new data
structure, called sock_mapping. Implement the connect command by calling
inet_stream_connect, and mapping the new indexes page and data ring.
Allocate a workqueue and a work_struct, called ioworker, to perform
reads and writes to the socket.
When an active socket is closed (sk_state_change), set in_error to
-ENOTCONN and notify the other end, as specified by the protocol.
sk_data_ready and pvcalls_back_ioworker will be implemented later.
Signed-off-by: Stefano Stabellini <stefano@xxxxxxxxxxx>
CC: boris.ostrovsky@xxxxxxxxxx
CC: jgross@xxxxxxxx
---
drivers/xen/pvcalls-back.c | 161 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 161 insertions(+)
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index fed54bf..65fbc39 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -57,6 +57,40 @@ struct pvcalls_back_priv {
struct work_struct register_work;
};
+struct pvcalls_ioworker {
+ struct work_struct register_work;
+ struct workqueue_struct *wq;
+ unsigned int cpu;
+};
+
+struct sock_mapping {
+ struct list_head list;
+ struct pvcalls_back_priv *priv;
+ struct socket *sock;
+ uint64_t id;
+ grant_ref_t ref;
+ struct pvcalls_data_intf *ring;
+ void *bytes;
+ struct pvcalls_data data;
+ uint32_t ring_order;
+ int irq;
+ atomic_t read;
+ atomic_t write;
+ atomic_t io;
+ atomic_t release;
+ void (*saved_data_ready)(struct sock *sk);
+ struct pvcalls_ioworker ioworker;
+};
+
+static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map);
+static int pvcalls_back_release_active(struct xenbus_device *dev,
+ struct pvcalls_back_priv *priv,
+ struct sock_mapping *map);
+
+static void pvcalls_back_ioworker(struct work_struct *work)
+{
+}
+
static int pvcalls_back_socket(struct xenbus_device *dev,
struct xen_pvcalls_request *req)
{
@@ -85,9 +119,131 @@ static int pvcalls_back_socket(struct xenbus_device *dev,
return ret;
}
+static void pvcalls_sk_state_change(struct sock *sock)
+{
+ struct sock_mapping *map = sock->sk_user_data;
+ struct pvcalls_data_intf *intf;
+
+ if (map == NULL)
+ return;
+
+ intf = map->ring;
+ intf->in_error = -ENOTCONN;
+ notify_remote_via_irq(map->irq);
+}
+
+static void pvcalls_sk_data_ready(struct sock *sock)
+{
+}
+
static int pvcalls_back_connect(struct xenbus_device *dev,
struct xen_pvcalls_request *req)
{
+ struct pvcalls_back_priv *priv;
+ int ret;
+ struct socket *sock;
+ struct sock_mapping *map = NULL;
+ void *page;
+ struct xen_pvcalls_response *rsp;
+
+ priv = dev_get_drvdata(&dev->dev);
+
+ map = kzalloc(sizeof(*map), GFP_KERNEL);
+ if (map == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock);
+ if (ret < 0) {
+ kfree(map);
+ goto out;
+ }
+
+ map->priv = priv;
+ map->sock = sock;
+ map->id = req->u.connect.id;
+ map->ref = req->u.connect.ref;
+
+ ret = xenbus_map_ring_valloc(dev, &req->u.connect.ref, 1, &page);
+ if (ret < 0) {
+ sock_release(map->sock);
+ kfree(map);
+ goto out;
+ }
+ map->ring = page;
+ map->ring_order = map->ring->ring_order;
+ /* first read the order, then map the data ring */
+ virt_rmb();
+ if (map->ring_order > MAX_RING_ORDER) {
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = xenbus_map_ring_valloc(dev, map->ring->ref,
+ (1 << map->ring_order), &page);
+ if (ret < 0) {
+ sock_release(map->sock);
+ xenbus_unmap_ring_vfree(dev, map->ring);
+ kfree(map);
+ goto out;
+ }
+ map->bytes = page;
+
+ ret = bind_interdomain_evtchn_to_irqhandler(priv->dev->otherend_id,
+ req->u.connect.evtchn,
+ pvcalls_back_conn_event,
+ 0,
+ "pvcalls-backend",
+ map);
+ if (ret < 0) {
+ sock_release(map->sock);
+ kfree(map);
+ goto out;
+ }
+ map->irq = ret;
+
+ map->data.in = map->bytes;
+ map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order);
+
+ map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1);
+ if (!map->ioworker.wq) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ map->ioworker.cpu = get_random_int() % num_online_cpus();
+ atomic_set(&map->io, 1);
+ INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker);
+
+ down(&priv->socket_lock);
+ list_add_tail(&map->list, &priv->socket_mappings);
+ up(&priv->socket_lock);
+
+ ret = inet_stream_connect(sock, (struct sockaddr *)&req->u.connect.addr,
+ req->u.connect.len, req->u.connect.flags);
+ if (ret < 0) {
+ pvcalls_back_release_active(dev, priv, map);
+ } else {
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ map->saved_data_ready = sock->sk->sk_data_ready;
+ sock->sk->sk_user_data = map;
+ sock->sk->sk_data_ready = pvcalls_sk_data_ready;
+ sock->sk->sk_state_change = pvcalls_sk_state_change;
+ write_unlock_bh(&sock->sk->sk_callback_lock);
+ }
+
+out:
+ rsp = RING_GET_RESPONSE(&priv->ring, priv->ring.rsp_prod_pvt++);
+ rsp->req_id = req->req_id;
+ rsp->cmd = req->cmd;
+ rsp->u.connect.id = req->u.connect.id;
+ rsp->ret = ret;
+
+ return ret;
+}
+
+static int pvcalls_back_release_active(struct xenbus_device *dev,
+ struct pvcalls_back_priv *priv,
+ struct sock_mapping *map)
+{
return 0;
}
@@ -203,6 +359,11 @@ static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
+{
+ return IRQ_HANDLED;
+}
+
static int backend_connect(struct xenbus_device *dev)
{
int err, evtchn;
--
1.9.1