2 * (c) 2017 Stefano Stabellini <stefano@aporeto.com>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
15 #include <linux/inet.h>
16 #include <linux/kthread.h>
17 #include <linux/list.h>
18 #include <linux/radix-tree.h>
19 #include <linux/module.h>
20 #include <linux/semaphore.h>
21 #include <linux/wait.h>
23 #include <net/inet_common.h>
24 #include <net/inet_connection_sock.h>
25 #include <net/request_sock.h>
27 #include <xen/events.h>
28 #include <xen/grant_table.h>
30 #include <xen/xenbus.h>
31 #include <xen/interface/io/pvcalls.h>
33 #define PVCALLS_VERSIONS "1"
34 #define MAX_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
36 struct pvcalls_back_global {
37 struct list_head frontends;
38 struct semaphore frontends_lock;
39 } pvcalls_back_global;
42 * Per-frontend data structure. It contains pointers to the command
43 * ring, its event channel, a list of active sockets and a tree of
46 struct pvcalls_fedata {
47 struct list_head list;
48 struct xenbus_device *dev;
49 struct xen_pvcalls_sring *sring;
50 struct xen_pvcalls_back_ring ring;
52 struct list_head socket_mappings;
53 struct radix_tree_root socketpass_mappings;
54 struct semaphore socket_lock;
57 struct pvcalls_ioworker {
58 struct work_struct register_work;
59 struct workqueue_struct *wq;
63 struct list_head list;
64 struct pvcalls_fedata *fedata;
65 struct sockpass_mapping *sockpass;
69 struct pvcalls_data_intf *ring;
71 struct pvcalls_data data;
79 void (*saved_data_ready)(struct sock *sk);
80 struct pvcalls_ioworker ioworker;
83 struct sockpass_mapping {
84 struct list_head list;
85 struct pvcalls_fedata *fedata;
88 struct xen_pvcalls_request reqcopy;
90 struct workqueue_struct *wq;
91 struct work_struct register_work;
92 void (*saved_data_ready)(struct sock *sk);
95 static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map);
96 static int pvcalls_back_release_active(struct xenbus_device *dev,
97 struct pvcalls_fedata *fedata,
98 struct sock_mapping *map);
100 static bool pvcalls_conn_back_read(void *opaque)
102 struct sock_mapping *map = (struct sock_mapping *)opaque;
105 RING_IDX cons, prod, size, wanted, array_size, masked_prod, masked_cons;
107 struct pvcalls_data_intf *intf = map->ring;
108 struct pvcalls_data *data = &map->data;
112 array_size = XEN_FLEX_RING_SIZE(map->ring_order);
113 cons = intf->in_cons;
114 prod = intf->in_prod;
115 error = intf->in_error;
116 /* read the indexes first, then deal with the data */
122 size = pvcalls_queued(prod, cons, array_size);
123 if (size >= array_size)
125 spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
126 if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) {
127 atomic_set(&map->read, 0);
128 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock,
132 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
133 wanted = array_size - size;
134 masked_prod = pvcalls_mask(prod, array_size);
135 masked_cons = pvcalls_mask(cons, array_size);
137 memset(&msg, 0, sizeof(msg));
138 msg.msg_iter.type = ITER_KVEC|WRITE;
139 msg.msg_iter.count = wanted;
140 if (masked_prod < masked_cons) {
141 vec[0].iov_base = data->in + masked_prod;
142 vec[0].iov_len = wanted;
143 msg.msg_iter.kvec = vec;
144 msg.msg_iter.nr_segs = 1;
146 vec[0].iov_base = data->in + masked_prod;
147 vec[0].iov_len = array_size - masked_prod;
148 vec[1].iov_base = data->in;
149 vec[1].iov_len = wanted - vec[0].iov_len;
150 msg.msg_iter.kvec = vec;
151 msg.msg_iter.nr_segs = 2;
154 atomic_set(&map->read, 0);
155 ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT);
156 WARN_ON(ret > wanted);
157 if (ret == -EAGAIN) /* shouldn't happen */
161 spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
162 if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue))
163 atomic_inc(&map->read);
164 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
166 /* write the data, then modify the indexes */
169 atomic_set(&map->read, 0);
170 intf->in_error = ret;
172 intf->in_prod = prod + ret;
173 /* update the indexes, then notify the other end */
175 notify_remote_via_irq(map->irq);
180 static bool pvcalls_conn_back_write(struct sock_mapping *map)
182 struct pvcalls_data_intf *intf = map->ring;
183 struct pvcalls_data *data = &map->data;
186 RING_IDX cons, prod, size, array_size;
189 cons = intf->out_cons;
190 prod = intf->out_prod;
191 /* read the indexes before dealing with the data */
194 array_size = XEN_FLEX_RING_SIZE(map->ring_order);
195 size = pvcalls_queued(prod, cons, array_size);
199 memset(&msg, 0, sizeof(msg));
200 msg.msg_flags |= MSG_DONTWAIT;
201 msg.msg_iter.type = ITER_KVEC|READ;
202 msg.msg_iter.count = size;
203 if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
204 vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
205 vec[0].iov_len = size;
206 msg.msg_iter.kvec = vec;
207 msg.msg_iter.nr_segs = 1;
209 vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
210 vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
211 vec[1].iov_base = data->out;
212 vec[1].iov_len = size - vec[0].iov_len;
213 msg.msg_iter.kvec = vec;
214 msg.msg_iter.nr_segs = 2;
217 atomic_set(&map->write, 0);
218 ret = inet_sendmsg(map->sock, &msg, size);
219 if (ret == -EAGAIN) {
220 atomic_inc(&map->write);
221 atomic_inc(&map->io);
225 /* write the data, then update the indexes */
228 intf->out_error = ret;
231 intf->out_cons = cons + ret;
232 prod = intf->out_prod;
234 /* update the indexes, then notify the other end */
236 if (prod != cons + ret) {
237 atomic_inc(&map->write);
238 atomic_inc(&map->io);
240 notify_remote_via_irq(map->irq);
245 static void pvcalls_back_ioworker(struct work_struct *work)
247 struct pvcalls_ioworker *ioworker = container_of(work,
248 struct pvcalls_ioworker, register_work);
249 struct sock_mapping *map = container_of(ioworker, struct sock_mapping,
251 unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
253 while (atomic_read(&map->io) > 0) {
254 if (atomic_read(&map->release) > 0) {
255 atomic_set(&map->release, 0);
259 if (atomic_read(&map->read) > 0 &&
260 pvcalls_conn_back_read(map))
262 if (atomic_read(&map->write) > 0 &&
263 pvcalls_conn_back_write(map))
266 if (atomic_read(&map->eoi) > 0 && !atomic_read(&map->write)) {
267 atomic_set(&map->eoi, 0);
268 xen_irq_lateeoi(map->irq, eoi_flags);
269 eoi_flags = XEN_EOI_FLAG_SPURIOUS;
272 atomic_dec(&map->io);
276 static int pvcalls_back_socket(struct xenbus_device *dev,
277 struct xen_pvcalls_request *req)
279 struct pvcalls_fedata *fedata;
281 struct xen_pvcalls_response *rsp;
283 fedata = dev_get_drvdata(&dev->dev);
285 if (req->u.socket.domain != AF_INET ||
286 req->u.socket.type != SOCK_STREAM ||
287 (req->u.socket.protocol != IPPROTO_IP &&
288 req->u.socket.protocol != AF_INET))
293 /* leave the actual socket allocation for later */
295 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
296 rsp->req_id = req->req_id;
298 rsp->u.socket.id = req->u.socket.id;
304 static void pvcalls_sk_state_change(struct sock *sock)
306 struct sock_mapping *map = sock->sk_user_data;
311 atomic_inc(&map->read);
312 notify_remote_via_irq(map->irq);
315 static void pvcalls_sk_data_ready(struct sock *sock)
317 struct sock_mapping *map = sock->sk_user_data;
318 struct pvcalls_ioworker *iow;
323 iow = &map->ioworker;
324 atomic_inc(&map->read);
325 atomic_inc(&map->io);
326 queue_work(iow->wq, &iow->register_work);
329 static struct sock_mapping *pvcalls_new_active_socket(
330 struct pvcalls_fedata *fedata,
337 struct sock_mapping *map;
340 map = kzalloc(sizeof(*map), GFP_KERNEL);
344 map->fedata = fedata;
349 ret = xenbus_map_ring_valloc(fedata->dev, &ref, 1, &page);
353 map->ring_order = map->ring->ring_order;
354 /* first read the order, then map the data ring */
356 if (map->ring_order > MAX_RING_ORDER) {
357 pr_warn("%s frontend requested ring_order %u, which is > MAX (%u)\n",
358 __func__, map->ring_order, MAX_RING_ORDER);
361 ret = xenbus_map_ring_valloc(fedata->dev, map->ring->ref,
362 (1 << map->ring_order), &page);
367 ret = bind_interdomain_evtchn_to_irqhandler_lateeoi(
368 fedata->dev->otherend_id, evtchn,
369 pvcalls_back_conn_event, 0, "pvcalls-backend", map);
374 map->data.in = map->bytes;
375 map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order);
377 map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1);
378 if (!map->ioworker.wq)
380 atomic_set(&map->io, 1);
381 INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker);
383 down(&fedata->socket_lock);
384 list_add_tail(&map->list, &fedata->socket_mappings);
385 up(&fedata->socket_lock);
387 write_lock_bh(&map->sock->sk->sk_callback_lock);
388 map->saved_data_ready = map->sock->sk->sk_data_ready;
389 map->sock->sk->sk_user_data = map;
390 map->sock->sk->sk_data_ready = pvcalls_sk_data_ready;
391 map->sock->sk->sk_state_change = pvcalls_sk_state_change;
392 write_unlock_bh(&map->sock->sk->sk_callback_lock);
396 down(&fedata->socket_lock);
397 list_del(&map->list);
398 pvcalls_back_release_active(fedata->dev, fedata, map);
399 up(&fedata->socket_lock);
403 static int pvcalls_back_connect(struct xenbus_device *dev,
404 struct xen_pvcalls_request *req)
406 struct pvcalls_fedata *fedata;
409 struct sock_mapping *map;
410 struct xen_pvcalls_response *rsp;
411 struct sockaddr *sa = (struct sockaddr *)&req->u.connect.addr;
413 fedata = dev_get_drvdata(&dev->dev);
415 if (req->u.connect.len < sizeof(sa->sa_family) ||
416 req->u.connect.len > sizeof(req->u.connect.addr) ||
417 sa->sa_family != AF_INET)
420 ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock);
423 ret = inet_stream_connect(sock, sa, req->u.connect.len, 0);
429 map = pvcalls_new_active_socket(fedata,
432 req->u.connect.evtchn,
440 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
441 rsp->req_id = req->req_id;
443 rsp->u.connect.id = req->u.connect.id;
449 static int pvcalls_back_release_active(struct xenbus_device *dev,
450 struct pvcalls_fedata *fedata,
451 struct sock_mapping *map)
453 disable_irq(map->irq);
454 if (map->sock->sk != NULL) {
455 write_lock_bh(&map->sock->sk->sk_callback_lock);
456 map->sock->sk->sk_user_data = NULL;
457 map->sock->sk->sk_data_ready = map->saved_data_ready;
458 write_unlock_bh(&map->sock->sk->sk_callback_lock);
461 atomic_set(&map->release, 1);
462 flush_work(&map->ioworker.register_work);
464 xenbus_unmap_ring_vfree(dev, map->bytes);
465 xenbus_unmap_ring_vfree(dev, (void *)map->ring);
466 unbind_from_irqhandler(map->irq, map);
468 sock_release(map->sock);
474 static int pvcalls_back_release_passive(struct xenbus_device *dev,
475 struct pvcalls_fedata *fedata,
476 struct sockpass_mapping *mappass)
478 if (mappass->sock->sk != NULL) {
479 write_lock_bh(&mappass->sock->sk->sk_callback_lock);
480 mappass->sock->sk->sk_user_data = NULL;
481 mappass->sock->sk->sk_data_ready = mappass->saved_data_ready;
482 write_unlock_bh(&mappass->sock->sk->sk_callback_lock);
484 sock_release(mappass->sock);
485 flush_workqueue(mappass->wq);
486 destroy_workqueue(mappass->wq);
492 static int pvcalls_back_release(struct xenbus_device *dev,
493 struct xen_pvcalls_request *req)
495 struct pvcalls_fedata *fedata;
496 struct sock_mapping *map, *n;
497 struct sockpass_mapping *mappass;
499 struct xen_pvcalls_response *rsp;
501 fedata = dev_get_drvdata(&dev->dev);
503 down(&fedata->socket_lock);
504 list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
505 if (map->id == req->u.release.id) {
506 list_del(&map->list);
507 up(&fedata->socket_lock);
508 ret = pvcalls_back_release_active(dev, fedata, map);
512 mappass = radix_tree_lookup(&fedata->socketpass_mappings,
514 if (mappass != NULL) {
515 radix_tree_delete(&fedata->socketpass_mappings, mappass->id);
516 up(&fedata->socket_lock);
517 ret = pvcalls_back_release_passive(dev, fedata, mappass);
519 up(&fedata->socket_lock);
522 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
523 rsp->req_id = req->req_id;
524 rsp->u.release.id = req->u.release.id;
530 static void __pvcalls_back_accept(struct work_struct *work)
532 struct sockpass_mapping *mappass = container_of(
533 work, struct sockpass_mapping, register_work);
534 struct sock_mapping *map;
535 struct pvcalls_ioworker *iow;
536 struct pvcalls_fedata *fedata;
538 struct xen_pvcalls_response *rsp;
539 struct xen_pvcalls_request *req;
544 fedata = mappass->fedata;
546 * __pvcalls_back_accept can race against pvcalls_back_accept.
547 * We only need to check the value of "cmd" on read. It could be
548 * done atomically, but to simplify the code on the write side, we
551 spin_lock_irqsave(&mappass->copy_lock, flags);
552 req = &mappass->reqcopy;
553 if (req->cmd != PVCALLS_ACCEPT) {
554 spin_unlock_irqrestore(&mappass->copy_lock, flags);
557 spin_unlock_irqrestore(&mappass->copy_lock, flags);
562 sock->type = mappass->sock->type;
563 sock->ops = mappass->sock->ops;
565 ret = inet_accept(mappass->sock, sock, O_NONBLOCK, true);
566 if (ret == -EAGAIN) {
571 map = pvcalls_new_active_socket(fedata,
572 req->u.accept.id_new,
574 req->u.accept.evtchn,
582 map->sockpass = mappass;
583 iow = &map->ioworker;
584 atomic_inc(&map->read);
585 atomic_inc(&map->io);
586 queue_work(iow->wq, &iow->register_work);
589 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
590 rsp->req_id = req->req_id;
592 rsp->u.accept.id = req->u.accept.id;
594 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
596 notify_remote_via_irq(fedata->irq);
598 mappass->reqcopy.cmd = 0;
601 static void pvcalls_pass_sk_data_ready(struct sock *sock)
603 struct sockpass_mapping *mappass = sock->sk_user_data;
604 struct pvcalls_fedata *fedata;
605 struct xen_pvcalls_response *rsp;
612 fedata = mappass->fedata;
613 spin_lock_irqsave(&mappass->copy_lock, flags);
614 if (mappass->reqcopy.cmd == PVCALLS_POLL) {
615 rsp = RING_GET_RESPONSE(&fedata->ring,
616 fedata->ring.rsp_prod_pvt++);
617 rsp->req_id = mappass->reqcopy.req_id;
618 rsp->u.poll.id = mappass->reqcopy.u.poll.id;
619 rsp->cmd = mappass->reqcopy.cmd;
622 mappass->reqcopy.cmd = 0;
623 spin_unlock_irqrestore(&mappass->copy_lock, flags);
625 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
627 notify_remote_via_irq(mappass->fedata->irq);
629 spin_unlock_irqrestore(&mappass->copy_lock, flags);
630 queue_work(mappass->wq, &mappass->register_work);
634 static int pvcalls_back_bind(struct xenbus_device *dev,
635 struct xen_pvcalls_request *req)
637 struct pvcalls_fedata *fedata;
639 struct sockpass_mapping *map;
640 struct xen_pvcalls_response *rsp;
642 fedata = dev_get_drvdata(&dev->dev);
644 map = kzalloc(sizeof(*map), GFP_KERNEL);
650 INIT_WORK(&map->register_work, __pvcalls_back_accept);
651 spin_lock_init(&map->copy_lock);
652 map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1);
658 ret = sock_create(AF_INET, SOCK_STREAM, 0, &map->sock);
662 ret = inet_bind(map->sock, (struct sockaddr *)&req->u.bind.addr,
667 map->fedata = fedata;
668 map->id = req->u.bind.id;
670 down(&fedata->socket_lock);
671 ret = radix_tree_insert(&fedata->socketpass_mappings, map->id,
673 up(&fedata->socket_lock);
677 write_lock_bh(&map->sock->sk->sk_callback_lock);
678 map->saved_data_ready = map->sock->sk->sk_data_ready;
679 map->sock->sk->sk_user_data = map;
680 map->sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready;
681 write_unlock_bh(&map->sock->sk->sk_callback_lock);
685 if (map && map->sock)
686 sock_release(map->sock);
688 destroy_workqueue(map->wq);
691 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
692 rsp->req_id = req->req_id;
694 rsp->u.bind.id = req->u.bind.id;
699 static int pvcalls_back_listen(struct xenbus_device *dev,
700 struct xen_pvcalls_request *req)
702 struct pvcalls_fedata *fedata;
704 struct sockpass_mapping *map;
705 struct xen_pvcalls_response *rsp;
707 fedata = dev_get_drvdata(&dev->dev);
709 down(&fedata->socket_lock);
710 map = radix_tree_lookup(&fedata->socketpass_mappings, req->u.listen.id);
711 up(&fedata->socket_lock);
715 ret = inet_listen(map->sock, req->u.listen.backlog);
718 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
719 rsp->req_id = req->req_id;
721 rsp->u.listen.id = req->u.listen.id;
726 static int pvcalls_back_accept(struct xenbus_device *dev,
727 struct xen_pvcalls_request *req)
729 struct pvcalls_fedata *fedata;
730 struct sockpass_mapping *mappass;
732 struct xen_pvcalls_response *rsp;
735 fedata = dev_get_drvdata(&dev->dev);
737 down(&fedata->socket_lock);
738 mappass = radix_tree_lookup(&fedata->socketpass_mappings,
740 up(&fedata->socket_lock);
745 * Limitation of the current implementation: only support one
746 * concurrent accept or poll call on one socket.
748 spin_lock_irqsave(&mappass->copy_lock, flags);
749 if (mappass->reqcopy.cmd != 0) {
750 spin_unlock_irqrestore(&mappass->copy_lock, flags);
755 mappass->reqcopy = *req;
756 spin_unlock_irqrestore(&mappass->copy_lock, flags);
757 queue_work(mappass->wq, &mappass->register_work);
759 /* Tell the caller we don't need to send back a notification yet */
763 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
764 rsp->req_id = req->req_id;
766 rsp->u.accept.id = req->u.accept.id;
771 static int pvcalls_back_poll(struct xenbus_device *dev,
772 struct xen_pvcalls_request *req)
774 struct pvcalls_fedata *fedata;
775 struct sockpass_mapping *mappass;
776 struct xen_pvcalls_response *rsp;
777 struct inet_connection_sock *icsk;
778 struct request_sock_queue *queue;
783 fedata = dev_get_drvdata(&dev->dev);
785 down(&fedata->socket_lock);
786 mappass = radix_tree_lookup(&fedata->socketpass_mappings,
788 up(&fedata->socket_lock);
793 * Limitation of the current implementation: only support one
794 * concurrent accept or poll call on one socket.
796 spin_lock_irqsave(&mappass->copy_lock, flags);
797 if (mappass->reqcopy.cmd != 0) {
802 mappass->reqcopy = *req;
803 icsk = inet_csk(mappass->sock->sk);
804 queue = &icsk->icsk_accept_queue;
805 data = READ_ONCE(queue->rskq_accept_head) != NULL;
807 mappass->reqcopy.cmd = 0;
811 spin_unlock_irqrestore(&mappass->copy_lock, flags);
813 /* Tell the caller we don't need to send back a notification yet */
817 spin_unlock_irqrestore(&mappass->copy_lock, flags);
819 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
820 rsp->req_id = req->req_id;
822 rsp->u.poll.id = req->u.poll.id;
827 static int pvcalls_back_handle_cmd(struct xenbus_device *dev,
828 struct xen_pvcalls_request *req)
834 ret = pvcalls_back_socket(dev, req);
836 case PVCALLS_CONNECT:
837 ret = pvcalls_back_connect(dev, req);
839 case PVCALLS_RELEASE:
840 ret = pvcalls_back_release(dev, req);
843 ret = pvcalls_back_bind(dev, req);
846 ret = pvcalls_back_listen(dev, req);
849 ret = pvcalls_back_accept(dev, req);
852 ret = pvcalls_back_poll(dev, req);
856 struct pvcalls_fedata *fedata;
857 struct xen_pvcalls_response *rsp;
859 fedata = dev_get_drvdata(&dev->dev);
860 rsp = RING_GET_RESPONSE(
861 &fedata->ring, fedata->ring.rsp_prod_pvt++);
862 rsp->req_id = req->req_id;
864 rsp->ret = -ENOTSUPP;
871 static void pvcalls_back_work(struct pvcalls_fedata *fedata)
873 int notify, notify_all = 0, more = 1;
874 struct xen_pvcalls_request req;
875 struct xenbus_device *dev = fedata->dev;
878 while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) {
879 RING_COPY_REQUEST(&fedata->ring,
880 fedata->ring.req_cons++,
883 if (!pvcalls_back_handle_cmd(dev, &req)) {
884 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(
885 &fedata->ring, notify);
886 notify_all += notify;
891 notify_remote_via_irq(fedata->irq);
895 RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more);
899 static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
901 struct xenbus_device *dev = dev_id;
902 struct pvcalls_fedata *fedata = NULL;
903 unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
906 fedata = dev_get_drvdata(&dev->dev);
908 pvcalls_back_work(fedata);
913 xen_irq_lateeoi(irq, eoi_flags);
918 static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
920 struct sock_mapping *map = sock_map;
921 struct pvcalls_ioworker *iow;
923 if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
924 map->sock->sk->sk_user_data != map) {
925 xen_irq_lateeoi(irq, 0);
929 iow = &map->ioworker;
931 atomic_inc(&map->write);
932 atomic_inc(&map->eoi);
933 atomic_inc(&map->io);
934 queue_work(iow->wq, &iow->register_work);
939 static int backend_connect(struct xenbus_device *dev)
942 grant_ref_t ring_ref;
943 struct pvcalls_fedata *fedata = NULL;
945 fedata = kzalloc(sizeof(struct pvcalls_fedata), GFP_KERNEL);
950 err = xenbus_scanf(XBT_NIL, dev->otherend, "port", "%u",
954 xenbus_dev_fatal(dev, err, "reading %s/event-channel",
959 err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", "%u", &ring_ref);
962 xenbus_dev_fatal(dev, err, "reading %s/ring-ref",
967 err = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn);
972 err = request_threaded_irq(fedata->irq, NULL, pvcalls_back_event,
973 IRQF_ONESHOT, "pvcalls-back", dev);
977 err = xenbus_map_ring_valloc(dev, &ring_ref, 1,
978 (void **)&fedata->sring);
982 BACK_RING_INIT(&fedata->ring, fedata->sring, XEN_PAGE_SIZE * 1);
985 INIT_LIST_HEAD(&fedata->socket_mappings);
986 INIT_RADIX_TREE(&fedata->socketpass_mappings, GFP_KERNEL);
987 sema_init(&fedata->socket_lock, 1);
988 dev_set_drvdata(&dev->dev, fedata);
990 down(&pvcalls_back_global.frontends_lock);
991 list_add_tail(&fedata->list, &pvcalls_back_global.frontends);
992 up(&pvcalls_back_global.frontends_lock);
997 if (fedata->irq >= 0)
998 unbind_from_irqhandler(fedata->irq, dev);
999 if (fedata->sring != NULL)
1000 xenbus_unmap_ring_vfree(dev, fedata->sring);
1005 static int backend_disconnect(struct xenbus_device *dev)
1007 struct pvcalls_fedata *fedata;
1008 struct sock_mapping *map, *n;
1009 struct sockpass_mapping *mappass;
1010 struct radix_tree_iter iter;
1014 fedata = dev_get_drvdata(&dev->dev);
1016 down(&fedata->socket_lock);
1017 list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
1018 list_del(&map->list);
1019 pvcalls_back_release_active(dev, fedata, map);
1022 radix_tree_for_each_slot(slot, &fedata->socketpass_mappings, &iter, 0) {
1023 mappass = radix_tree_deref_slot(slot);
1026 if (radix_tree_exception(mappass)) {
1027 if (radix_tree_deref_retry(mappass))
1028 slot = radix_tree_iter_retry(&iter);
1030 radix_tree_delete(&fedata->socketpass_mappings,
1032 pvcalls_back_release_passive(dev, fedata, mappass);
1035 up(&fedata->socket_lock);
1037 unbind_from_irqhandler(fedata->irq, dev);
1038 xenbus_unmap_ring_vfree(dev, fedata->sring);
1040 list_del(&fedata->list);
1042 dev_set_drvdata(&dev->dev, NULL);
1047 static int pvcalls_back_probe(struct xenbus_device *dev,
1048 const struct xenbus_device_id *id)
1051 struct xenbus_transaction xbt;
1056 err = xenbus_transaction_start(&xbt);
1058 pr_warn("%s cannot create xenstore transaction\n", __func__);
1062 err = xenbus_printf(xbt, dev->nodename, "versions", "%s",
1065 pr_warn("%s write out 'versions' failed\n", __func__);
1069 err = xenbus_printf(xbt, dev->nodename, "max-page-order", "%u",
1072 pr_warn("%s write out 'max-page-order' failed\n", __func__);
1076 err = xenbus_printf(xbt, dev->nodename, "function-calls",
1077 XENBUS_FUNCTIONS_CALLS);
1079 pr_warn("%s write out 'function-calls' failed\n", __func__);
1085 err = xenbus_transaction_end(xbt, abort);
1087 if (err == -EAGAIN && !abort)
1089 pr_warn("%s cannot complete xenstore transaction\n", __func__);
1096 xenbus_switch_state(dev, XenbusStateInitWait);
1101 static void set_backend_state(struct xenbus_device *dev,
1102 enum xenbus_state state)
1104 while (dev->state != state) {
1105 switch (dev->state) {
1106 case XenbusStateClosed:
1108 case XenbusStateInitWait:
1109 case XenbusStateConnected:
1110 xenbus_switch_state(dev, XenbusStateInitWait);
1112 case XenbusStateClosing:
1113 xenbus_switch_state(dev, XenbusStateClosing);
1119 case XenbusStateInitWait:
1120 case XenbusStateInitialised:
1122 case XenbusStateConnected:
1123 if (backend_connect(dev))
1125 xenbus_switch_state(dev, XenbusStateConnected);
1127 case XenbusStateClosing:
1128 case XenbusStateClosed:
1129 xenbus_switch_state(dev, XenbusStateClosing);
1135 case XenbusStateConnected:
1137 case XenbusStateInitWait:
1138 case XenbusStateClosing:
1139 case XenbusStateClosed:
1140 down(&pvcalls_back_global.frontends_lock);
1141 backend_disconnect(dev);
1142 up(&pvcalls_back_global.frontends_lock);
1143 xenbus_switch_state(dev, XenbusStateClosing);
1149 case XenbusStateClosing:
1151 case XenbusStateInitWait:
1152 case XenbusStateConnected:
1153 case XenbusStateClosed:
1154 xenbus_switch_state(dev, XenbusStateClosed);
1166 static void pvcalls_back_changed(struct xenbus_device *dev,
1167 enum xenbus_state frontend_state)
1169 switch (frontend_state) {
1170 case XenbusStateInitialising:
1171 set_backend_state(dev, XenbusStateInitWait);
1174 case XenbusStateInitialised:
1175 case XenbusStateConnected:
1176 set_backend_state(dev, XenbusStateConnected);
1179 case XenbusStateClosing:
1180 set_backend_state(dev, XenbusStateClosing);
1183 case XenbusStateClosed:
1184 set_backend_state(dev, XenbusStateClosed);
1185 if (xenbus_dev_is_online(dev))
1187 device_unregister(&dev->dev);
1189 case XenbusStateUnknown:
1190 set_backend_state(dev, XenbusStateClosed);
1191 device_unregister(&dev->dev);
1195 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
1201 static int pvcalls_back_remove(struct xenbus_device *dev)
1206 static int pvcalls_back_uevent(struct xenbus_device *xdev,
1207 struct kobj_uevent_env *env)
1212 static const struct xenbus_device_id pvcalls_back_ids[] = {
1217 static struct xenbus_driver pvcalls_back_driver = {
1218 .ids = pvcalls_back_ids,
1219 .probe = pvcalls_back_probe,
1220 .remove = pvcalls_back_remove,
1221 .uevent = pvcalls_back_uevent,
1222 .otherend_changed = pvcalls_back_changed,
1225 static int __init pvcalls_back_init(void)
1232 ret = xenbus_register_backend(&pvcalls_back_driver);
1236 sema_init(&pvcalls_back_global.frontends_lock, 1);
1237 INIT_LIST_HEAD(&pvcalls_back_global.frontends);
1240 module_init(pvcalls_back_init);
1242 static void __exit pvcalls_back_fin(void)
1244 struct pvcalls_fedata *fedata, *nfedata;
1246 down(&pvcalls_back_global.frontends_lock);
1247 list_for_each_entry_safe(fedata, nfedata,
1248 &pvcalls_back_global.frontends, list) {
1249 backend_disconnect(fedata->dev);
1251 up(&pvcalls_back_global.frontends_lock);
1253 xenbus_unregister_driver(&pvcalls_back_driver);
1256 module_exit(pvcalls_back_fin);