GNU Linux-libre 4.9.337-gnu1
[releases.git] / drivers / net / xen-netfront.c
1 /*
2  * Virtual network driver for conversing with remote driver backends.
3  *
4  * Copyright (c) 2002-2005, K A Fraser
5  * Copyright (c) 2005, XenSource Ltd
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version 2
9  * as published by the Free Software Foundation; or, when distributed
10  * separately from the Linux kernel or incorporated into other
11  * software packages, subject to the following license:
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this source file (the "Software"), to deal in the Software without
15  * restriction, including without limitation the rights to use, copy, modify,
16  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17  * and to permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29  * IN THE SOFTWARE.
30  */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
41 #include <net/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mm.h>
45 #include <linux/slab.h>
46 #include <net/ip.h>
47
48 #include <xen/xen.h>
49 #include <xen/xenbus.h>
50 #include <xen/events.h>
51 #include <xen/page.h>
52 #include <xen/platform_pci.h>
53 #include <xen/grant_table.h>
54
55 #include <xen/interface/io/netif.h>
56 #include <xen/interface/memory.h>
57 #include <xen/interface/grant_table.h>
58
59 /* Module parameters */
60 static unsigned int xennet_max_queues;
61 module_param_named(max_queues, xennet_max_queues, uint, 0644);
62 MODULE_PARM_DESC(max_queues,
63                  "Maximum number of queues per virtual interface");
64
65 static bool __read_mostly xennet_trusted = true;
66 module_param_named(trusted, xennet_trusted, bool, 0644);
67 MODULE_PARM_DESC(trusted, "Is the backend trusted");
68
69 #define XENNET_TIMEOUT  (5 * HZ)
70
71 static const struct ethtool_ops xennet_ethtool_ops;
72
73 struct netfront_cb {
74         int pull_to;
75 };
76
77 #define NETFRONT_SKB_CB(skb)    ((struct netfront_cb *)((skb)->cb))
78
79 #define RX_COPY_THRESHOLD 256
80
81 #define GRANT_INVALID_REF       0
82
83 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE)
84 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE)
85
86 /* Minimum number of Rx slots (includes slot for GSO metadata). */
87 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
88
89 /* Queue name is interface name with "-qNNN" appended */
90 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
91
92 /* IRQ name is queue name with "-tx" or "-rx" appended */
93 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
94
95 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
96
97 struct netfront_stats {
98         u64                     packets;
99         u64                     bytes;
100         struct u64_stats_sync   syncp;
101 };
102
103 struct netfront_info;
104
105 struct netfront_queue {
106         unsigned int id; /* Queue ID, 0-based */
107         char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
108         struct netfront_info *info;
109
110         struct napi_struct napi;
111
112         /* Split event channels support, tx_* == rx_* when using
113          * single event channel.
114          */
115         unsigned int tx_evtchn, rx_evtchn;
116         unsigned int tx_irq, rx_irq;
117         /* Only used when split event channels support is enabled */
118         char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
119         char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
120
121         spinlock_t   tx_lock;
122         struct xen_netif_tx_front_ring tx;
123         int tx_ring_ref;
124
125         /*
126          * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
127          * are linked from tx_skb_freelist through tx_link.
128          */
129         struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
130         unsigned short tx_link[NET_TX_RING_SIZE];
131 #define TX_LINK_NONE 0xffff
132 #define TX_PENDING   0xfffe
133         grant_ref_t gref_tx_head;
134         grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
135         struct page *grant_tx_page[NET_TX_RING_SIZE];
136         unsigned tx_skb_freelist;
137         unsigned int tx_pend_queue;
138
139         spinlock_t   rx_lock ____cacheline_aligned_in_smp;
140         struct xen_netif_rx_front_ring rx;
141         int rx_ring_ref;
142
143         struct timer_list rx_refill_timer;
144
145         struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
146         grant_ref_t gref_rx_head;
147         grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
148
149         unsigned int rx_rsp_unconsumed;
150         spinlock_t rx_cons_lock;
151 };
152
153 struct netfront_info {
154         struct list_head list;
155         struct net_device *netdev;
156
157         struct xenbus_device *xbdev;
158
159         /* Multi-queue support */
160         struct netfront_queue *queues;
161
162         /* Statistics */
163         struct netfront_stats __percpu *rx_stats;
164         struct netfront_stats __percpu *tx_stats;
165
166         /* Is device behaving sane? */
167         bool broken;
168
169         /* Should skbs be bounced into a zeroed buffer? */
170         bool bounce;
171
172         atomic_t rx_gso_checksum_fixup;
173 };
174
175 struct netfront_rx_info {
176         struct xen_netif_rx_response rx;
177         struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
178 };
179
180 /*
181  * Access macros for acquiring freeing slots in tx_skbs[].
182  */
183
184 static void add_id_to_list(unsigned *head, unsigned short *list,
185                            unsigned short id)
186 {
187         list[id] = *head;
188         *head = id;
189 }
190
191 static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
192 {
193         unsigned int id = *head;
194
195         if (id != TX_LINK_NONE) {
196                 *head = list[id];
197                 list[id] = TX_LINK_NONE;
198         }
199         return id;
200 }
201
202 static int xennet_rxidx(RING_IDX idx)
203 {
204         return idx & (NET_RX_RING_SIZE - 1);
205 }
206
207 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
208                                          RING_IDX ri)
209 {
210         int i = xennet_rxidx(ri);
211         struct sk_buff *skb = queue->rx_skbs[i];
212         queue->rx_skbs[i] = NULL;
213         return skb;
214 }
215
216 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
217                                             RING_IDX ri)
218 {
219         int i = xennet_rxidx(ri);
220         grant_ref_t ref = queue->grant_rx_ref[i];
221         queue->grant_rx_ref[i] = GRANT_INVALID_REF;
222         return ref;
223 }
224
225 #ifdef CONFIG_SYSFS
226 static const struct attribute_group xennet_dev_group;
227 #endif
228
229 static bool xennet_can_sg(struct net_device *dev)
230 {
231         return dev->features & NETIF_F_SG;
232 }
233
234
235 static void rx_refill_timeout(unsigned long data)
236 {
237         struct netfront_queue *queue = (struct netfront_queue *)data;
238         napi_schedule(&queue->napi);
239 }
240
241 static int netfront_tx_slot_available(struct netfront_queue *queue)
242 {
243         return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
244                 (NET_TX_RING_SIZE - XEN_NETIF_NR_SLOTS_MIN - 1);
245 }
246
247 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
248 {
249         struct net_device *dev = queue->info->netdev;
250         struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
251
252         if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
253             netfront_tx_slot_available(queue) &&
254             likely(netif_running(dev)))
255                 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
256 }
257
258
259 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
260 {
261         struct sk_buff *skb;
262         struct page *page;
263
264         skb = __netdev_alloc_skb(queue->info->netdev,
265                                  RX_COPY_THRESHOLD + NET_IP_ALIGN,
266                                  GFP_ATOMIC | __GFP_NOWARN);
267         if (unlikely(!skb))
268                 return NULL;
269
270         page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | __GFP_ZERO);
271         if (!page) {
272                 kfree_skb(skb);
273                 return NULL;
274         }
275         skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
276
277         /* Align ip header to a 16 bytes boundary */
278         skb_reserve(skb, NET_IP_ALIGN);
279         skb->dev = queue->info->netdev;
280
281         return skb;
282 }
283
284
285 static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
286 {
287         RING_IDX req_prod = queue->rx.req_prod_pvt;
288         int notify;
289         int err = 0;
290
291         if (unlikely(!netif_carrier_ok(queue->info->netdev)))
292                 return;
293
294         for (req_prod = queue->rx.req_prod_pvt;
295              req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
296              req_prod++) {
297                 struct sk_buff *skb;
298                 unsigned short id;
299                 grant_ref_t ref;
300                 struct page *page;
301                 struct xen_netif_rx_request *req;
302
303                 skb = xennet_alloc_one_rx_buffer(queue);
304                 if (!skb) {
305                         err = -ENOMEM;
306                         break;
307                 }
308
309                 id = xennet_rxidx(req_prod);
310
311                 BUG_ON(queue->rx_skbs[id]);
312                 queue->rx_skbs[id] = skb;
313
314                 ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
315                 WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
316                 queue->grant_rx_ref[id] = ref;
317
318                 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
319
320                 req = RING_GET_REQUEST(&queue->rx, req_prod);
321                 gnttab_page_grant_foreign_access_ref_one(ref,
322                                                          queue->info->xbdev->otherend_id,
323                                                          page,
324                                                          0);
325                 req->id = id;
326                 req->gref = ref;
327         }
328
329         queue->rx.req_prod_pvt = req_prod;
330
331         /* Try again later if there are not enough requests or skb allocation
332          * failed.
333          * Enough requests is quantified as the sum of newly created slots and
334          * the unconsumed slots at the backend.
335          */
336         if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN ||
337             unlikely(err)) {
338                 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
339                 return;
340         }
341
342         wmb();          /* barrier so backend seens requests */
343
344         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
345         if (notify)
346                 notify_remote_via_irq(queue->rx_irq);
347 }
348
349 static int xennet_open(struct net_device *dev)
350 {
351         struct netfront_info *np = netdev_priv(dev);
352         unsigned int num_queues = dev->real_num_tx_queues;
353         unsigned int i = 0;
354         struct netfront_queue *queue = NULL;
355
356         if (!np->queues || np->broken)
357                 return -ENODEV;
358
359         for (i = 0; i < num_queues; ++i) {
360                 queue = &np->queues[i];
361                 napi_enable(&queue->napi);
362
363                 spin_lock_bh(&queue->rx_lock);
364                 if (netif_carrier_ok(dev)) {
365                         xennet_alloc_rx_buffers(queue);
366                         queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
367                         if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
368                                 napi_schedule(&queue->napi);
369                 }
370                 spin_unlock_bh(&queue->rx_lock);
371         }
372
373         netif_tx_start_all_queues(dev);
374
375         return 0;
376 }
377
378 static bool xennet_tx_buf_gc(struct netfront_queue *queue)
379 {
380         RING_IDX cons, prod;
381         unsigned short id;
382         struct sk_buff *skb;
383         bool more_to_do;
384         bool work_done = false;
385         const struct device *dev = &queue->info->netdev->dev;
386
387         BUG_ON(!netif_carrier_ok(queue->info->netdev));
388
389         do {
390                 prod = queue->tx.sring->rsp_prod;
391                 if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
392                         dev_alert(dev, "Illegal number of responses %u\n",
393                                   prod - queue->tx.rsp_cons);
394                         goto err;
395                 }
396                 rmb(); /* Ensure we see responses up to 'rp'. */
397
398                 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
399                         struct xen_netif_tx_response txrsp;
400
401                         work_done = true;
402
403                         RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
404                         if (txrsp.status == XEN_NETIF_RSP_NULL)
405                                 continue;
406
407                         id = txrsp.id;
408                         if (id >= RING_SIZE(&queue->tx)) {
409                                 dev_alert(dev,
410                                           "Response has incorrect id (%u)\n",
411                                           id);
412                                 goto err;
413                         }
414                         if (queue->tx_link[id] != TX_PENDING) {
415                                 dev_alert(dev,
416                                           "Response for inactive request\n");
417                                 goto err;
418                         }
419
420                         queue->tx_link[id] = TX_LINK_NONE;
421                         skb = queue->tx_skbs[id];
422                         queue->tx_skbs[id] = NULL;
423                         if (unlikely(!gnttab_end_foreign_access_ref(
424                                 queue->grant_tx_ref[id], GNTMAP_readonly))) {
425                                 dev_alert(dev,
426                                           "Grant still in use by backend domain\n");
427                                 goto err;
428                         }
429                         gnttab_release_grant_reference(
430                                 &queue->gref_tx_head, queue->grant_tx_ref[id]);
431                         queue->grant_tx_ref[id] = GRANT_INVALID_REF;
432                         queue->grant_tx_page[id] = NULL;
433                         add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
434                         dev_kfree_skb_irq(skb);
435                 }
436
437                 queue->tx.rsp_cons = prod;
438
439                 RING_FINAL_CHECK_FOR_RESPONSES(&queue->tx, more_to_do);
440         } while (more_to_do);
441
442         xennet_maybe_wake_tx(queue);
443
444         return work_done;
445
446  err:
447         queue->info->broken = true;
448         dev_alert(dev, "Disabled for further use\n");
449
450         return work_done;
451 }
452
453 struct xennet_gnttab_make_txreq {
454         struct netfront_queue *queue;
455         struct sk_buff *skb;
456         struct page *page;
457         struct xen_netif_tx_request *tx;      /* Last request on ring page */
458         struct xen_netif_tx_request tx_local; /* Last request local copy*/
459         unsigned int size;
460 };
461
462 static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
463                                   unsigned int len, void *data)
464 {
465         struct xennet_gnttab_make_txreq *info = data;
466         unsigned int id;
467         struct xen_netif_tx_request *tx;
468         grant_ref_t ref;
469         /* convenient aliases */
470         struct page *page = info->page;
471         struct netfront_queue *queue = info->queue;
472         struct sk_buff *skb = info->skb;
473
474         id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
475         tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
476         ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
477         WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
478
479         gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
480                                         gfn, GNTMAP_readonly);
481
482         queue->tx_skbs[id] = skb;
483         queue->grant_tx_page[id] = page;
484         queue->grant_tx_ref[id] = ref;
485
486         info->tx_local.id = id;
487         info->tx_local.gref = ref;
488         info->tx_local.offset = offset;
489         info->tx_local.size = len;
490         info->tx_local.flags = 0;
491
492         *tx = info->tx_local;
493
494         /*
495          * Put the request in the pending queue, it will be set to be pending
496          * when the producer index is about to be raised.
497          */
498         add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
499
500         info->tx = tx;
501         info->size += info->tx_local.size;
502 }
503
504 static struct xen_netif_tx_request *xennet_make_first_txreq(
505         struct xennet_gnttab_make_txreq *info,
506         unsigned int offset, unsigned int len)
507 {
508         info->size = 0;
509
510         gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
511
512         return info->tx;
513 }
514
515 static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
516                                   unsigned int len, void *data)
517 {
518         struct xennet_gnttab_make_txreq *info = data;
519
520         info->tx->flags |= XEN_NETTXF_more_data;
521         skb_get(info->skb);
522         xennet_tx_setup_grant(gfn, offset, len, data);
523 }
524
525 static void xennet_make_txreqs(
526         struct xennet_gnttab_make_txreq *info,
527         struct page *page,
528         unsigned int offset, unsigned int len)
529 {
530         /* Skip unused frames from start of page */
531         page += offset >> PAGE_SHIFT;
532         offset &= ~PAGE_MASK;
533
534         while (len) {
535                 info->page = page;
536                 info->size = 0;
537
538                 gnttab_foreach_grant_in_range(page, offset, len,
539                                               xennet_make_one_txreq,
540                                               info);
541
542                 page++;
543                 offset = 0;
544                 len -= info->size;
545         }
546 }
547
548 /*
549  * Count how many ring slots are required to send this skb. Each frag
550  * might be a compound page.
551  */
552 static int xennet_count_skb_slots(struct sk_buff *skb)
553 {
554         int i, frags = skb_shinfo(skb)->nr_frags;
555         int slots;
556
557         slots = gnttab_count_grant(offset_in_page(skb->data),
558                                    skb_headlen(skb));
559
560         for (i = 0; i < frags; i++) {
561                 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
562                 unsigned long size = skb_frag_size(frag);
563                 unsigned long offset = frag->page_offset;
564
565                 /* Skip unused frames from start of page */
566                 offset &= ~PAGE_MASK;
567
568                 slots += gnttab_count_grant(offset, size);
569         }
570
571         return slots;
572 }
573
574 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
575                                void *accel_priv, select_queue_fallback_t fallback)
576 {
577         unsigned int num_queues = dev->real_num_tx_queues;
578         u32 hash;
579         u16 queue_idx;
580
581         /* First, check if there is only one queue */
582         if (num_queues == 1) {
583                 queue_idx = 0;
584         } else {
585                 hash = skb_get_hash(skb);
586                 queue_idx = hash % num_queues;
587         }
588
589         return queue_idx;
590 }
591
592 static void xennet_mark_tx_pending(struct netfront_queue *queue)
593 {
594         unsigned int i;
595
596         while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
597                 TX_LINK_NONE)
598                 queue->tx_link[i] = TX_PENDING;
599 }
600
601 struct sk_buff *bounce_skb(const struct sk_buff *skb)
602 {
603         unsigned int headerlen = skb_headroom(skb);
604         /* Align size to allocate full pages and avoid contiguous data leaks */
605         unsigned int size = ALIGN(skb_end_offset(skb) + skb->data_len,
606                                   XEN_PAGE_SIZE);
607         struct sk_buff *n = alloc_skb(size, GFP_ATOMIC | __GFP_ZERO);
608
609         if (!n)
610                 return NULL;
611
612         if (!IS_ALIGNED((uintptr_t)n->head, XEN_PAGE_SIZE)) {
613                 WARN_ONCE(1, "misaligned skb allocated\n");
614                 kfree_skb(n);
615                 return NULL;
616         }
617
618         /* Set the data pointer */
619         skb_reserve(n, headerlen);
620         /* Set the tail pointer and length */
621         skb_put(n, skb->len);
622
623         BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
624
625         skb_copy_header(n, skb);
626         return n;
627 }
628
629 #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1)
630
631 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
632 {
633         struct netfront_info *np = netdev_priv(dev);
634         struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
635         struct xen_netif_tx_request *first_tx;
636         unsigned int i;
637         int notify;
638         int slots;
639         struct page *page;
640         unsigned int offset;
641         unsigned int len;
642         unsigned long flags;
643         struct netfront_queue *queue = NULL;
644         struct xennet_gnttab_make_txreq info = { };
645         unsigned int num_queues = dev->real_num_tx_queues;
646         u16 queue_index;
647         struct sk_buff *nskb;
648
649         /* Drop the packet if no queues are set up */
650         if (num_queues < 1)
651                 goto drop;
652         if (unlikely(np->broken))
653                 goto drop;
654         /* Determine which queue to transmit this SKB on */
655         queue_index = skb_get_queue_mapping(skb);
656         queue = &np->queues[queue_index];
657
658         /* If skb->len is too big for wire format, drop skb and alert
659          * user about misconfiguration.
660          */
661         if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
662                 net_alert_ratelimited(
663                         "xennet: skb->len = %u, too big for wire format\n",
664                         skb->len);
665                 goto drop;
666         }
667
668         slots = xennet_count_skb_slots(skb);
669         if (unlikely(slots > MAX_XEN_SKB_FRAGS + 1)) {
670                 net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
671                                     slots, skb->len);
672                 if (skb_linearize(skb))
673                         goto drop;
674         }
675
676         page = virt_to_page(skb->data);
677         offset = offset_in_page(skb->data);
678
679         /* The first req should be at least ETH_HLEN size or the packet will be
680          * dropped by netback.
681          *
682          * If the backend is not trusted bounce all data to zeroed pages to
683          * avoid exposing contiguous data on the granted page not belonging to
684          * the skb.
685          */
686         if (np->bounce || unlikely(PAGE_SIZE - offset < ETH_HLEN)) {
687                 nskb = bounce_skb(skb);
688                 if (!nskb)
689                         goto drop;
690                 dev_kfree_skb_any(skb);
691                 skb = nskb;
692                 page = virt_to_page(skb->data);
693                 offset = offset_in_page(skb->data);
694         }
695
696         len = skb_headlen(skb);
697
698         spin_lock_irqsave(&queue->tx_lock, flags);
699
700         if (unlikely(!netif_carrier_ok(dev) ||
701                      (slots > 1 && !xennet_can_sg(dev)) ||
702                      netif_needs_gso(skb, netif_skb_features(skb)))) {
703                 spin_unlock_irqrestore(&queue->tx_lock, flags);
704                 goto drop;
705         }
706
707         /* First request for the linear area. */
708         info.queue = queue;
709         info.skb = skb;
710         info.page = page;
711         first_tx = xennet_make_first_txreq(&info, offset, len);
712         offset += info.tx_local.size;
713         if (offset == PAGE_SIZE) {
714                 page++;
715                 offset = 0;
716         }
717         len -= info.tx_local.size;
718
719         if (skb->ip_summed == CHECKSUM_PARTIAL)
720                 /* local packet? */
721                 first_tx->flags |= XEN_NETTXF_csum_blank |
722                                    XEN_NETTXF_data_validated;
723         else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
724                 /* remote but checksummed. */
725                 first_tx->flags |= XEN_NETTXF_data_validated;
726
727         /* Optional extra info after the first request. */
728         if (skb_shinfo(skb)->gso_size) {
729                 struct xen_netif_extra_info *gso;
730
731                 gso = (struct xen_netif_extra_info *)
732                         RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
733
734                 first_tx->flags |= XEN_NETTXF_extra_info;
735
736                 gso->u.gso.size = skb_shinfo(skb)->gso_size;
737                 gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
738                         XEN_NETIF_GSO_TYPE_TCPV6 :
739                         XEN_NETIF_GSO_TYPE_TCPV4;
740                 gso->u.gso.pad = 0;
741                 gso->u.gso.features = 0;
742
743                 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
744                 gso->flags = 0;
745         }
746
747         /* Requests for the rest of the linear area. */
748         xennet_make_txreqs(&info, page, offset, len);
749
750         /* Requests for all the frags. */
751         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
752                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
753                 xennet_make_txreqs(&info, skb_frag_page(frag),
754                                         frag->page_offset,
755                                         skb_frag_size(frag));
756         }
757
758         /* First request has the packet length. */
759         first_tx->size = skb->len;
760
761         xennet_mark_tx_pending(queue);
762
763         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
764         if (notify)
765                 notify_remote_via_irq(queue->tx_irq);
766
767         u64_stats_update_begin(&tx_stats->syncp);
768         tx_stats->bytes += skb->len;
769         tx_stats->packets++;
770         u64_stats_update_end(&tx_stats->syncp);
771
772         /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
773         xennet_tx_buf_gc(queue);
774
775         if (!netfront_tx_slot_available(queue))
776                 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
777
778         spin_unlock_irqrestore(&queue->tx_lock, flags);
779
780         return NETDEV_TX_OK;
781
782  drop:
783         dev->stats.tx_dropped++;
784         dev_kfree_skb_any(skb);
785         return NETDEV_TX_OK;
786 }
787
788 static int xennet_close(struct net_device *dev)
789 {
790         struct netfront_info *np = netdev_priv(dev);
791         unsigned int num_queues = dev->real_num_tx_queues;
792         unsigned int i;
793         struct netfront_queue *queue;
794         netif_tx_stop_all_queues(np->netdev);
795         for (i = 0; i < num_queues; ++i) {
796                 queue = &np->queues[i];
797                 napi_disable(&queue->napi);
798         }
799         return 0;
800 }
801
802 static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
803 {
804         unsigned long flags;
805
806         spin_lock_irqsave(&queue->rx_cons_lock, flags);
807         queue->rx.rsp_cons = val;
808         queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
809         spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
810 }
811
812 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
813                                 grant_ref_t ref)
814 {
815         int new = xennet_rxidx(queue->rx.req_prod_pvt);
816
817         BUG_ON(queue->rx_skbs[new]);
818         queue->rx_skbs[new] = skb;
819         queue->grant_rx_ref[new] = ref;
820         RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
821         RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
822         queue->rx.req_prod_pvt++;
823 }
824
825 static int xennet_get_extras(struct netfront_queue *queue,
826                              struct xen_netif_extra_info *extras,
827                              RING_IDX rp)
828
829 {
830         struct xen_netif_extra_info extra;
831         struct device *dev = &queue->info->netdev->dev;
832         RING_IDX cons = queue->rx.rsp_cons;
833         int err = 0;
834
835         do {
836                 struct sk_buff *skb;
837                 grant_ref_t ref;
838
839                 if (unlikely(cons + 1 == rp)) {
840                         if (net_ratelimit())
841                                 dev_warn(dev, "Missing extra info\n");
842                         err = -EBADR;
843                         break;
844                 }
845
846                 RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
847
848                 if (unlikely(!extra.type ||
849                              extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
850                         if (net_ratelimit())
851                                 dev_warn(dev, "Invalid extra type: %d\n",
852                                          extra.type);
853                         err = -EINVAL;
854                 } else {
855                         extras[extra.type - 1] = extra;
856                 }
857
858                 skb = xennet_get_rx_skb(queue, cons);
859                 ref = xennet_get_rx_ref(queue, cons);
860                 xennet_move_rx_slot(queue, skb, ref);
861         } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
862
863         xennet_set_rx_rsp_cons(queue, cons);
864         return err;
865 }
866
867 static int xennet_get_responses(struct netfront_queue *queue,
868                                 struct netfront_rx_info *rinfo, RING_IDX rp,
869                                 struct sk_buff_head *list)
870 {
871         struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
872         struct xen_netif_extra_info *extras = rinfo->extras;
873         struct device *dev = &queue->info->netdev->dev;
874         RING_IDX cons = queue->rx.rsp_cons;
875         struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
876         grant_ref_t ref = xennet_get_rx_ref(queue, cons);
877         int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
878         int slots = 1;
879         int err = 0;
880
881         if (rx->flags & XEN_NETRXF_extra_info) {
882                 err = xennet_get_extras(queue, extras, rp);
883                 cons = queue->rx.rsp_cons;
884         }
885
886         for (;;) {
887                 if (unlikely(rx->status < 0 ||
888                              rx->offset + rx->status > XEN_PAGE_SIZE)) {
889                         if (net_ratelimit())
890                                 dev_warn(dev, "rx->offset: %u, size: %d\n",
891                                          rx->offset, rx->status);
892                         xennet_move_rx_slot(queue, skb, ref);
893                         err = -EINVAL;
894                         goto next;
895                 }
896
897                 /*
898                  * This definitely indicates a bug, either in this driver or in
899                  * the backend driver. In future this should flag the bad
900                  * situation to the system controller to reboot the backend.
901                  */
902                 if (ref == GRANT_INVALID_REF) {
903                         if (net_ratelimit())
904                                 dev_warn(dev, "Bad rx response id %d.\n",
905                                          rx->id);
906                         err = -EINVAL;
907                         goto next;
908                 }
909
910                 if (!gnttab_end_foreign_access_ref(ref, 0)) {
911                         dev_alert(dev,
912                                   "Grant still in use by backend domain\n");
913                         queue->info->broken = true;
914                         dev_alert(dev, "Disabled for further use\n");
915                         return -EINVAL;
916                 }
917
918                 gnttab_release_grant_reference(&queue->gref_rx_head, ref);
919
920                 __skb_queue_tail(list, skb);
921
922 next:
923                 if (!(rx->flags & XEN_NETRXF_more_data))
924                         break;
925
926                 if (cons + slots == rp) {
927                         if (net_ratelimit())
928                                 dev_warn(dev, "Need more slots\n");
929                         err = -ENOENT;
930                         break;
931                 }
932
933                 RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
934                 rx = &rx_local;
935                 skb = xennet_get_rx_skb(queue, cons + slots);
936                 ref = xennet_get_rx_ref(queue, cons + slots);
937                 slots++;
938         }
939
940         if (unlikely(slots > max)) {
941                 if (net_ratelimit())
942                         dev_warn(dev, "Too many slots\n");
943                 err = -E2BIG;
944         }
945
946         if (unlikely(err))
947                 xennet_set_rx_rsp_cons(queue, cons + slots);
948
949         return err;
950 }
951
952 static int xennet_set_skb_gso(struct sk_buff *skb,
953                               struct xen_netif_extra_info *gso)
954 {
955         if (!gso->u.gso.size) {
956                 if (net_ratelimit())
957                         pr_warn("GSO size must not be zero\n");
958                 return -EINVAL;
959         }
960
961         if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
962             gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
963                 if (net_ratelimit())
964                         pr_warn("Bad GSO type %d\n", gso->u.gso.type);
965                 return -EINVAL;
966         }
967
968         skb_shinfo(skb)->gso_size = gso->u.gso.size;
969         skb_shinfo(skb)->gso_type =
970                 (gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
971                 SKB_GSO_TCPV4 :
972                 SKB_GSO_TCPV6;
973
974         /* Header must be checked, and gso_segs computed. */
975         skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
976         skb_shinfo(skb)->gso_segs = 0;
977
978         return 0;
979 }
980
981 static int xennet_fill_frags(struct netfront_queue *queue,
982                              struct sk_buff *skb,
983                              struct sk_buff_head *list)
984 {
985         RING_IDX cons = queue->rx.rsp_cons;
986         struct sk_buff *nskb;
987
988         while ((nskb = __skb_dequeue(list))) {
989                 struct xen_netif_rx_response rx;
990                 skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
991
992                 RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
993
994                 if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
995                         unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
996
997                         BUG_ON(pull_to < skb_headlen(skb));
998                         __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
999                 }
1000                 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
1001                         xennet_set_rx_rsp_cons(queue,
1002                                                ++cons + skb_queue_len(list));
1003                         kfree_skb(nskb);
1004                         return -ENOENT;
1005                 }
1006
1007                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
1008                                 skb_frag_page(nfrag),
1009                                 rx.offset, rx.status, PAGE_SIZE);
1010
1011                 skb_shinfo(nskb)->nr_frags = 0;
1012                 kfree_skb(nskb);
1013         }
1014
1015         xennet_set_rx_rsp_cons(queue, cons);
1016
1017         return 0;
1018 }
1019
1020 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
1021 {
1022         bool recalculate_partial_csum = false;
1023
1024         /*
1025          * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1026          * peers can fail to set NETRXF_csum_blank when sending a GSO
1027          * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1028          * recalculate the partial checksum.
1029          */
1030         if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1031                 struct netfront_info *np = netdev_priv(dev);
1032                 atomic_inc(&np->rx_gso_checksum_fixup);
1033                 skb->ip_summed = CHECKSUM_PARTIAL;
1034                 recalculate_partial_csum = true;
1035         }
1036
1037         /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1038         if (skb->ip_summed != CHECKSUM_PARTIAL)
1039                 return 0;
1040
1041         return skb_checksum_setup(skb, recalculate_partial_csum);
1042 }
1043
1044 static int handle_incoming_queue(struct netfront_queue *queue,
1045                                  struct sk_buff_head *rxq)
1046 {
1047         struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
1048         int packets_dropped = 0;
1049         struct sk_buff *skb;
1050
1051         while ((skb = __skb_dequeue(rxq)) != NULL) {
1052                 int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
1053
1054                 if (pull_to > skb_headlen(skb))
1055                         __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
1056
1057                 /* Ethernet work: Delayed to here as it peeks the header. */
1058                 skb->protocol = eth_type_trans(skb, queue->info->netdev);
1059                 skb_reset_network_header(skb);
1060
1061                 if (checksum_setup(queue->info->netdev, skb)) {
1062                         kfree_skb(skb);
1063                         packets_dropped++;
1064                         queue->info->netdev->stats.rx_errors++;
1065                         continue;
1066                 }
1067
1068                 u64_stats_update_begin(&rx_stats->syncp);
1069                 rx_stats->packets++;
1070                 rx_stats->bytes += skb->len;
1071                 u64_stats_update_end(&rx_stats->syncp);
1072
1073                 /* Pass it up. */
1074                 napi_gro_receive(&queue->napi, skb);
1075         }
1076
1077         return packets_dropped;
1078 }
1079
1080 static int xennet_poll(struct napi_struct *napi, int budget)
1081 {
1082         struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
1083         struct net_device *dev = queue->info->netdev;
1084         struct sk_buff *skb;
1085         struct netfront_rx_info rinfo;
1086         struct xen_netif_rx_response *rx = &rinfo.rx;
1087         struct xen_netif_extra_info *extras = rinfo.extras;
1088         RING_IDX i, rp;
1089         int work_done;
1090         struct sk_buff_head rxq;
1091         struct sk_buff_head errq;
1092         struct sk_buff_head tmpq;
1093         int err;
1094
1095         spin_lock(&queue->rx_lock);
1096
1097         skb_queue_head_init(&rxq);
1098         skb_queue_head_init(&errq);
1099         skb_queue_head_init(&tmpq);
1100
1101         rp = queue->rx.sring->rsp_prod;
1102         if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
1103                 dev_alert(&dev->dev, "Illegal number of responses %u\n",
1104                           rp - queue->rx.rsp_cons);
1105                 queue->info->broken = true;
1106                 spin_unlock(&queue->rx_lock);
1107                 return 0;
1108         }
1109         rmb(); /* Ensure we see queued responses up to 'rp'. */
1110
1111         i = queue->rx.rsp_cons;
1112         work_done = 0;
1113         while ((i != rp) && (work_done < budget)) {
1114                 RING_COPY_RESPONSE(&queue->rx, i, rx);
1115                 memset(extras, 0, sizeof(rinfo.extras));
1116
1117                 err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
1118
1119                 if (unlikely(err)) {
1120                         if (queue->info->broken) {
1121                                 spin_unlock(&queue->rx_lock);
1122                                 return 0;
1123                         }
1124 err:
1125                         while ((skb = __skb_dequeue(&tmpq)))
1126                                 __skb_queue_tail(&errq, skb);
1127                         dev->stats.rx_errors++;
1128                         i = queue->rx.rsp_cons;
1129                         continue;
1130                 }
1131
1132                 skb = __skb_dequeue(&tmpq);
1133
1134                 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1135                         struct xen_netif_extra_info *gso;
1136                         gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1137
1138                         if (unlikely(xennet_set_skb_gso(skb, gso))) {
1139                                 __skb_queue_head(&tmpq, skb);
1140                                 xennet_set_rx_rsp_cons(queue,
1141                                                        queue->rx.rsp_cons +
1142                                                        skb_queue_len(&tmpq));
1143                                 goto err;
1144                         }
1145                 }
1146
1147                 NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1148                 if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1149                         NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1150
1151                 skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1152                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1153                 skb->data_len = rx->status;
1154                 skb->len += rx->status;
1155
1156                 if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
1157                         goto err;
1158
1159                 if (rx->flags & XEN_NETRXF_csum_blank)
1160                         skb->ip_summed = CHECKSUM_PARTIAL;
1161                 else if (rx->flags & XEN_NETRXF_data_validated)
1162                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1163
1164                 __skb_queue_tail(&rxq, skb);
1165
1166                 i = queue->rx.rsp_cons + 1;
1167                 xennet_set_rx_rsp_cons(queue, i);
1168                 work_done++;
1169         }
1170
1171         __skb_queue_purge(&errq);
1172
1173         work_done -= handle_incoming_queue(queue, &rxq);
1174
1175         xennet_alloc_rx_buffers(queue);
1176
1177         if (work_done < budget) {
1178                 int more_to_do = 0;
1179
1180                 napi_complete(napi);
1181
1182                 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1183                 if (more_to_do)
1184                         napi_schedule(napi);
1185         }
1186
1187         spin_unlock(&queue->rx_lock);
1188
1189         return work_done;
1190 }
1191
1192 static int xennet_change_mtu(struct net_device *dev, int mtu)
1193 {
1194         int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1195
1196         if (mtu > max)
1197                 return -EINVAL;
1198         dev->mtu = mtu;
1199         return 0;
1200 }
1201
1202 static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1203                                                     struct rtnl_link_stats64 *tot)
1204 {
1205         struct netfront_info *np = netdev_priv(dev);
1206         int cpu;
1207
1208         for_each_possible_cpu(cpu) {
1209                 struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1210                 struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1211                 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1212                 unsigned int start;
1213
1214                 do {
1215                         start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1216                         tx_packets = tx_stats->packets;
1217                         tx_bytes = tx_stats->bytes;
1218                 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1219
1220                 do {
1221                         start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1222                         rx_packets = rx_stats->packets;
1223                         rx_bytes = rx_stats->bytes;
1224                 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1225
1226                 tot->rx_packets += rx_packets;
1227                 tot->tx_packets += tx_packets;
1228                 tot->rx_bytes   += rx_bytes;
1229                 tot->tx_bytes   += tx_bytes;
1230         }
1231
1232         tot->rx_errors  = dev->stats.rx_errors;
1233         tot->tx_dropped = dev->stats.tx_dropped;
1234
1235         return tot;
1236 }
1237
1238 static void xennet_release_tx_bufs(struct netfront_queue *queue)
1239 {
1240         struct sk_buff *skb;
1241         int i;
1242
1243         for (i = 0; i < NET_TX_RING_SIZE; i++) {
1244                 /* Skip over entries which are actually freelist references */
1245                 if (!queue->tx_skbs[i])
1246                         continue;
1247
1248                 skb = queue->tx_skbs[i];
1249                 queue->tx_skbs[i] = NULL;
1250                 get_page(queue->grant_tx_page[i]);
1251                 gnttab_end_foreign_access(queue->grant_tx_ref[i],
1252                                           GNTMAP_readonly,
1253                                           (unsigned long)page_address(queue->grant_tx_page[i]));
1254                 queue->grant_tx_page[i] = NULL;
1255                 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1256                 add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
1257                 dev_kfree_skb_irq(skb);
1258         }
1259 }
1260
1261 static void xennet_release_rx_bufs(struct netfront_queue *queue)
1262 {
1263         int id, ref;
1264
1265         spin_lock_bh(&queue->rx_lock);
1266
1267         for (id = 0; id < NET_RX_RING_SIZE; id++) {
1268                 struct sk_buff *skb;
1269                 struct page *page;
1270
1271                 skb = queue->rx_skbs[id];
1272                 if (!skb)
1273                         continue;
1274
1275                 ref = queue->grant_rx_ref[id];
1276                 if (ref == GRANT_INVALID_REF)
1277                         continue;
1278
1279                 page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1280
1281                 /* gnttab_end_foreign_access() needs a page ref until
1282                  * foreign access is ended (which may be deferred).
1283                  */
1284                 get_page(page);
1285                 gnttab_end_foreign_access(ref, 0,
1286                                           (unsigned long)page_address(page));
1287                 queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1288
1289                 kfree_skb(skb);
1290         }
1291
1292         spin_unlock_bh(&queue->rx_lock);
1293 }
1294
1295 static netdev_features_t xennet_fix_features(struct net_device *dev,
1296         netdev_features_t features)
1297 {
1298         struct netfront_info *np = netdev_priv(dev);
1299         int val;
1300
1301         if (features & NETIF_F_SG) {
1302                 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1303                                  "%d", &val) < 0)
1304                         val = 0;
1305
1306                 if (!val)
1307                         features &= ~NETIF_F_SG;
1308         }
1309
1310         if (features & NETIF_F_IPV6_CSUM) {
1311                 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1312                                  "feature-ipv6-csum-offload", "%d", &val) < 0)
1313                         val = 0;
1314
1315                 if (!val)
1316                         features &= ~NETIF_F_IPV6_CSUM;
1317         }
1318
1319         if (features & NETIF_F_TSO) {
1320                 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1321                                  "feature-gso-tcpv4", "%d", &val) < 0)
1322                         val = 0;
1323
1324                 if (!val)
1325                         features &= ~NETIF_F_TSO;
1326         }
1327
1328         if (features & NETIF_F_TSO6) {
1329                 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1330                                  "feature-gso-tcpv6", "%d", &val) < 0)
1331                         val = 0;
1332
1333                 if (!val)
1334                         features &= ~NETIF_F_TSO6;
1335         }
1336
1337         return features;
1338 }
1339
1340 static int xennet_set_features(struct net_device *dev,
1341         netdev_features_t features)
1342 {
1343         if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1344                 netdev_info(dev, "Reducing MTU because no SG offload");
1345                 dev->mtu = ETH_DATA_LEN;
1346         }
1347
1348         return 0;
1349 }
1350
1351 static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
1352 {
1353         unsigned long flags;
1354
1355         if (unlikely(queue->info->broken))
1356                 return false;
1357
1358         spin_lock_irqsave(&queue->tx_lock, flags);
1359         if (xennet_tx_buf_gc(queue))
1360                 *eoi = 0;
1361         spin_unlock_irqrestore(&queue->tx_lock, flags);
1362
1363         return true;
1364 }
1365
1366 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1367 {
1368         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1369
1370         if (likely(xennet_handle_tx(dev_id, &eoiflag)))
1371                 xen_irq_lateeoi(irq, eoiflag);
1372
1373         return IRQ_HANDLED;
1374 }
1375
1376 static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
1377 {
1378         unsigned int work_queued;
1379         unsigned long flags;
1380
1381         if (unlikely(queue->info->broken))
1382                 return false;
1383
1384         spin_lock_irqsave(&queue->rx_cons_lock, flags);
1385         work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
1386         if (work_queued > queue->rx_rsp_unconsumed) {
1387                 queue->rx_rsp_unconsumed = work_queued;
1388                 *eoi = 0;
1389         } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1390                 const struct device *dev = &queue->info->netdev->dev;
1391
1392                 spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1393                 dev_alert(dev, "RX producer index going backwards\n");
1394                 dev_alert(dev, "Disabled for further use\n");
1395                 queue->info->broken = true;
1396                 return false;
1397         }
1398         spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1399
1400         if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1401                 napi_schedule(&queue->napi);
1402
1403         return true;
1404 }
1405
1406 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1407 {
1408         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1409
1410         if (likely(xennet_handle_rx(dev_id, &eoiflag)))
1411                 xen_irq_lateeoi(irq, eoiflag);
1412
1413         return IRQ_HANDLED;
1414 }
1415
1416 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1417 {
1418         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1419
1420         if (xennet_handle_tx(dev_id, &eoiflag) &&
1421             xennet_handle_rx(dev_id, &eoiflag))
1422                 xen_irq_lateeoi(irq, eoiflag);
1423
1424         return IRQ_HANDLED;
1425 }
1426
1427 #ifdef CONFIG_NET_POLL_CONTROLLER
1428 static void xennet_poll_controller(struct net_device *dev)
1429 {
1430         /* Poll each queue */
1431         struct netfront_info *info = netdev_priv(dev);
1432         unsigned int num_queues = dev->real_num_tx_queues;
1433         unsigned int i;
1434
1435         if (info->broken)
1436                 return;
1437
1438         for (i = 0; i < num_queues; ++i)
1439                 xennet_interrupt(0, &info->queues[i]);
1440 }
1441 #endif
1442
1443 static const struct net_device_ops xennet_netdev_ops = {
1444         .ndo_open            = xennet_open,
1445         .ndo_stop            = xennet_close,
1446         .ndo_start_xmit      = xennet_start_xmit,
1447         .ndo_change_mtu      = xennet_change_mtu,
1448         .ndo_get_stats64     = xennet_get_stats64,
1449         .ndo_set_mac_address = eth_mac_addr,
1450         .ndo_validate_addr   = eth_validate_addr,
1451         .ndo_fix_features    = xennet_fix_features,
1452         .ndo_set_features    = xennet_set_features,
1453         .ndo_select_queue    = xennet_select_queue,
1454 #ifdef CONFIG_NET_POLL_CONTROLLER
1455         .ndo_poll_controller = xennet_poll_controller,
1456 #endif
1457 };
1458
1459 static void xennet_free_netdev(struct net_device *netdev)
1460 {
1461         struct netfront_info *np = netdev_priv(netdev);
1462
1463         free_percpu(np->rx_stats);
1464         free_percpu(np->tx_stats);
1465         free_netdev(netdev);
1466 }
1467
1468 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1469 {
1470         int err;
1471         struct net_device *netdev;
1472         struct netfront_info *np;
1473
1474         netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1475         if (!netdev)
1476                 return ERR_PTR(-ENOMEM);
1477
1478         np                   = netdev_priv(netdev);
1479         np->xbdev            = dev;
1480
1481         np->queues = NULL;
1482
1483         err = -ENOMEM;
1484         np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1485         if (np->rx_stats == NULL)
1486                 goto exit;
1487         np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1488         if (np->tx_stats == NULL)
1489                 goto exit;
1490
1491         netdev->netdev_ops      = &xennet_netdev_ops;
1492
1493         netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1494                                   NETIF_F_GSO_ROBUST;
1495         netdev->hw_features     = NETIF_F_SG |
1496                                   NETIF_F_IPV6_CSUM |
1497                                   NETIF_F_TSO | NETIF_F_TSO6;
1498
1499         /*
1500          * Assume that all hw features are available for now. This set
1501          * will be adjusted by the call to netdev_update_features() in
1502          * xennet_connect() which is the earliest point where we can
1503          * negotiate with the backend regarding supported features.
1504          */
1505         netdev->features |= netdev->hw_features;
1506
1507         netdev->ethtool_ops = &xennet_ethtool_ops;
1508         SET_NETDEV_DEV(netdev, &dev->dev);
1509
1510         np->netdev = netdev;
1511
1512         netif_carrier_off(netdev);
1513
1514         do {
1515                 xenbus_switch_state(dev, XenbusStateInitialising);
1516                 err = wait_event_timeout(module_wq,
1517                                  xenbus_read_driver_state(dev->otherend) !=
1518                                  XenbusStateClosed &&
1519                                  xenbus_read_driver_state(dev->otherend) !=
1520                                  XenbusStateUnknown, XENNET_TIMEOUT);
1521         } while (!err);
1522
1523         return netdev;
1524
1525  exit:
1526         xennet_free_netdev(netdev);
1527         return ERR_PTR(err);
1528 }
1529
1530 /**
1531  * Entry point to this code when a new device is created.  Allocate the basic
1532  * structures and the ring buffers for communication with the backend, and
1533  * inform the backend of the appropriate details for those.
1534  */
1535 static int netfront_probe(struct xenbus_device *dev,
1536                           const struct xenbus_device_id *id)
1537 {
1538         int err;
1539         struct net_device *netdev;
1540         struct netfront_info *info;
1541
1542         netdev = xennet_create_dev(dev);
1543         if (IS_ERR(netdev)) {
1544                 err = PTR_ERR(netdev);
1545                 xenbus_dev_fatal(dev, err, "creating netdev");
1546                 return err;
1547         }
1548
1549         info = netdev_priv(netdev);
1550         dev_set_drvdata(&dev->dev, info);
1551 #ifdef CONFIG_SYSFS
1552         info->netdev->sysfs_groups[0] = &xennet_dev_group;
1553 #endif
1554
1555         return 0;
1556 }
1557
1558 static void xennet_end_access(int ref, void *page)
1559 {
1560         /* This frees the page as a side-effect */
1561         if (ref != GRANT_INVALID_REF)
1562                 gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1563 }
1564
1565 static void xennet_disconnect_backend(struct netfront_info *info)
1566 {
1567         unsigned int i = 0;
1568         unsigned int num_queues = info->netdev->real_num_tx_queues;
1569
1570         netif_carrier_off(info->netdev);
1571
1572         for (i = 0; i < num_queues && info->queues; ++i) {
1573                 struct netfront_queue *queue = &info->queues[i];
1574
1575                 del_timer_sync(&queue->rx_refill_timer);
1576
1577                 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1578                         unbind_from_irqhandler(queue->tx_irq, queue);
1579                 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1580                         unbind_from_irqhandler(queue->tx_irq, queue);
1581                         unbind_from_irqhandler(queue->rx_irq, queue);
1582                 }
1583                 queue->tx_evtchn = queue->rx_evtchn = 0;
1584                 queue->tx_irq = queue->rx_irq = 0;
1585
1586                 if (netif_running(info->netdev))
1587                         napi_synchronize(&queue->napi);
1588
1589                 xennet_release_tx_bufs(queue);
1590                 xennet_release_rx_bufs(queue);
1591                 gnttab_free_grant_references(queue->gref_tx_head);
1592                 gnttab_free_grant_references(queue->gref_rx_head);
1593
1594                 /* End access and free the pages */
1595                 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1596                 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1597
1598                 queue->tx_ring_ref = GRANT_INVALID_REF;
1599                 queue->rx_ring_ref = GRANT_INVALID_REF;
1600                 queue->tx.sring = NULL;
1601                 queue->rx.sring = NULL;
1602         }
1603 }
1604
1605 /**
1606  * We are reconnecting to the backend, due to a suspend/resume, or a backend
1607  * driver restart.  We tear down our netif structure and recreate it, but
1608  * leave the device-layer structures intact so that this is transparent to the
1609  * rest of the kernel.
1610  */
1611 static int netfront_resume(struct xenbus_device *dev)
1612 {
1613         struct netfront_info *info = dev_get_drvdata(&dev->dev);
1614
1615         dev_dbg(&dev->dev, "%s\n", dev->nodename);
1616
1617         netif_tx_lock_bh(info->netdev);
1618         netif_device_detach(info->netdev);
1619         netif_tx_unlock_bh(info->netdev);
1620
1621         xennet_disconnect_backend(info);
1622         return 0;
1623 }
1624
1625 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1626 {
1627         char *s, *e, *macstr;
1628         int i;
1629
1630         macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1631         if (IS_ERR(macstr))
1632                 return PTR_ERR(macstr);
1633
1634         for (i = 0; i < ETH_ALEN; i++) {
1635                 mac[i] = simple_strtoul(s, &e, 16);
1636                 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1637                         kfree(macstr);
1638                         return -ENOENT;
1639                 }
1640                 s = e+1;
1641         }
1642
1643         kfree(macstr);
1644         return 0;
1645 }
1646
1647 static int setup_netfront_single(struct netfront_queue *queue)
1648 {
1649         int err;
1650
1651         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1652         if (err < 0)
1653                 goto fail;
1654
1655         err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1656                                                 xennet_interrupt, 0,
1657                                                 queue->info->netdev->name,
1658                                                 queue);
1659         if (err < 0)
1660                 goto bind_fail;
1661         queue->rx_evtchn = queue->tx_evtchn;
1662         queue->rx_irq = queue->tx_irq = err;
1663
1664         return 0;
1665
1666 bind_fail:
1667         xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1668         queue->tx_evtchn = 0;
1669 fail:
1670         return err;
1671 }
1672
1673 static int setup_netfront_split(struct netfront_queue *queue)
1674 {
1675         int err;
1676
1677         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1678         if (err < 0)
1679                 goto fail;
1680         err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1681         if (err < 0)
1682                 goto alloc_rx_evtchn_fail;
1683
1684         snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1685                  "%s-tx", queue->name);
1686         err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1687                                                 xennet_tx_interrupt, 0,
1688                                                 queue->tx_irq_name, queue);
1689         if (err < 0)
1690                 goto bind_tx_fail;
1691         queue->tx_irq = err;
1692
1693         snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1694                  "%s-rx", queue->name);
1695         err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1696                                                 xennet_rx_interrupt, 0,
1697                                                 queue->rx_irq_name, queue);
1698         if (err < 0)
1699                 goto bind_rx_fail;
1700         queue->rx_irq = err;
1701
1702         return 0;
1703
1704 bind_rx_fail:
1705         unbind_from_irqhandler(queue->tx_irq, queue);
1706         queue->tx_irq = 0;
1707 bind_tx_fail:
1708         xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1709         queue->rx_evtchn = 0;
1710 alloc_rx_evtchn_fail:
1711         xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1712         queue->tx_evtchn = 0;
1713 fail:
1714         return err;
1715 }
1716
1717 static int setup_netfront(struct xenbus_device *dev,
1718                         struct netfront_queue *queue, unsigned int feature_split_evtchn)
1719 {
1720         struct xen_netif_tx_sring *txs;
1721         struct xen_netif_rx_sring *rxs = NULL;
1722         grant_ref_t gref;
1723         int err;
1724
1725         queue->tx_ring_ref = GRANT_INVALID_REF;
1726         queue->rx_ring_ref = GRANT_INVALID_REF;
1727         queue->rx.sring = NULL;
1728         queue->tx.sring = NULL;
1729
1730         txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1731         if (!txs) {
1732                 err = -ENOMEM;
1733                 xenbus_dev_fatal(dev, err, "allocating tx ring page");
1734                 goto fail;
1735         }
1736         SHARED_RING_INIT(txs);
1737         FRONT_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1738
1739         err = xenbus_grant_ring(dev, txs, 1, &gref);
1740         if (err < 0)
1741                 goto fail;
1742         queue->tx_ring_ref = gref;
1743
1744         rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1745         if (!rxs) {
1746                 err = -ENOMEM;
1747                 xenbus_dev_fatal(dev, err, "allocating rx ring page");
1748                 goto fail;
1749         }
1750         SHARED_RING_INIT(rxs);
1751         FRONT_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1752
1753         err = xenbus_grant_ring(dev, rxs, 1, &gref);
1754         if (err < 0)
1755                 goto fail;
1756         queue->rx_ring_ref = gref;
1757
1758         if (feature_split_evtchn)
1759                 err = setup_netfront_split(queue);
1760         /* setup single event channel if
1761          *  a) feature-split-event-channels == 0
1762          *  b) feature-split-event-channels == 1 but failed to setup
1763          */
1764         if (!feature_split_evtchn || (feature_split_evtchn && err))
1765                 err = setup_netfront_single(queue);
1766
1767         if (err)
1768                 goto fail;
1769
1770         return 0;
1771
1772         /* If we fail to setup netfront, it is safe to just revoke access to
1773          * granted pages because backend is not accessing it at this point.
1774          */
1775  fail:
1776         if (queue->rx_ring_ref != GRANT_INVALID_REF) {
1777                 gnttab_end_foreign_access(queue->rx_ring_ref, 0,
1778                                           (unsigned long)rxs);
1779                 queue->rx_ring_ref = GRANT_INVALID_REF;
1780         } else {
1781                 free_page((unsigned long)rxs);
1782         }
1783         if (queue->tx_ring_ref != GRANT_INVALID_REF) {
1784                 gnttab_end_foreign_access(queue->tx_ring_ref, 0,
1785                                           (unsigned long)txs);
1786                 queue->tx_ring_ref = GRANT_INVALID_REF;
1787         } else {
1788                 free_page((unsigned long)txs);
1789         }
1790         return err;
1791 }
1792
1793 /* Queue-specific initialisation
1794  * This used to be done in xennet_create_dev() but must now
1795  * be run per-queue.
1796  */
1797 static int xennet_init_queue(struct netfront_queue *queue)
1798 {
1799         unsigned short i;
1800         int err = 0;
1801         char *devid;
1802
1803         spin_lock_init(&queue->tx_lock);
1804         spin_lock_init(&queue->rx_lock);
1805         spin_lock_init(&queue->rx_cons_lock);
1806
1807         setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
1808                     (unsigned long)queue);
1809
1810         devid = strrchr(queue->info->xbdev->nodename, '/') + 1;
1811         snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
1812                  devid, queue->id);
1813
1814         /* Initialise tx_skb_freelist as a free chain containing every entry. */
1815         queue->tx_skb_freelist = 0;
1816         queue->tx_pend_queue = TX_LINK_NONE;
1817         for (i = 0; i < NET_TX_RING_SIZE; i++) {
1818                 queue->tx_link[i] = i + 1;
1819                 queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1820                 queue->grant_tx_page[i] = NULL;
1821         }
1822         queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
1823
1824         /* Clear out rx_skbs */
1825         for (i = 0; i < NET_RX_RING_SIZE; i++) {
1826                 queue->rx_skbs[i] = NULL;
1827                 queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1828         }
1829
1830         /* A grant for every tx ring slot */
1831         if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1832                                           &queue->gref_tx_head) < 0) {
1833                 pr_alert("can't alloc tx grant refs\n");
1834                 err = -ENOMEM;
1835                 goto exit;
1836         }
1837
1838         /* A grant for every rx ring slot */
1839         if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
1840                                           &queue->gref_rx_head) < 0) {
1841                 pr_alert("can't alloc rx grant refs\n");
1842                 err = -ENOMEM;
1843                 goto exit_free_tx;
1844         }
1845
1846         return 0;
1847
1848  exit_free_tx:
1849         gnttab_free_grant_references(queue->gref_tx_head);
1850  exit:
1851         return err;
1852 }
1853
1854 static int write_queue_xenstore_keys(struct netfront_queue *queue,
1855                            struct xenbus_transaction *xbt, int write_hierarchical)
1856 {
1857         /* Write the queue-specific keys into XenStore in the traditional
1858          * way for a single queue, or in a queue subkeys for multiple
1859          * queues.
1860          */
1861         struct xenbus_device *dev = queue->info->xbdev;
1862         int err;
1863         const char *message;
1864         char *path;
1865         size_t pathsize;
1866
1867         /* Choose the correct place to write the keys */
1868         if (write_hierarchical) {
1869                 pathsize = strlen(dev->nodename) + 10;
1870                 path = kzalloc(pathsize, GFP_KERNEL);
1871                 if (!path) {
1872                         err = -ENOMEM;
1873                         message = "out of memory while writing ring references";
1874                         goto error;
1875                 }
1876                 snprintf(path, pathsize, "%s/queue-%u",
1877                                 dev->nodename, queue->id);
1878         } else {
1879                 path = (char *)dev->nodename;
1880         }
1881
1882         /* Write ring references */
1883         err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1884                         queue->tx_ring_ref);
1885         if (err) {
1886                 message = "writing tx-ring-ref";
1887                 goto error;
1888         }
1889
1890         err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1891                         queue->rx_ring_ref);
1892         if (err) {
1893                 message = "writing rx-ring-ref";
1894                 goto error;
1895         }
1896
1897         /* Write event channels; taking into account both shared
1898          * and split event channel scenarios.
1899          */
1900         if (queue->tx_evtchn == queue->rx_evtchn) {
1901                 /* Shared event channel */
1902                 err = xenbus_printf(*xbt, path,
1903                                 "event-channel", "%u", queue->tx_evtchn);
1904                 if (err) {
1905                         message = "writing event-channel";
1906                         goto error;
1907                 }
1908         } else {
1909                 /* Split event channels */
1910                 err = xenbus_printf(*xbt, path,
1911                                 "event-channel-tx", "%u", queue->tx_evtchn);
1912                 if (err) {
1913                         message = "writing event-channel-tx";
1914                         goto error;
1915                 }
1916
1917                 err = xenbus_printf(*xbt, path,
1918                                 "event-channel-rx", "%u", queue->rx_evtchn);
1919                 if (err) {
1920                         message = "writing event-channel-rx";
1921                         goto error;
1922                 }
1923         }
1924
1925         if (write_hierarchical)
1926                 kfree(path);
1927         return 0;
1928
1929 error:
1930         if (write_hierarchical)
1931                 kfree(path);
1932         xenbus_dev_fatal(dev, err, "%s", message);
1933         return err;
1934 }
1935
1936 static void xennet_destroy_queues(struct netfront_info *info)
1937 {
1938         unsigned int i;
1939
1940         for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1941                 struct netfront_queue *queue = &info->queues[i];
1942
1943                 if (netif_running(info->netdev))
1944                         napi_disable(&queue->napi);
1945                 netif_napi_del(&queue->napi);
1946         }
1947
1948         kfree(info->queues);
1949         info->queues = NULL;
1950 }
1951
1952 static int xennet_create_queues(struct netfront_info *info,
1953                                 unsigned int *num_queues)
1954 {
1955         unsigned int i;
1956         int ret;
1957
1958         info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
1959                                GFP_KERNEL);
1960         if (!info->queues)
1961                 return -ENOMEM;
1962
1963         for (i = 0; i < *num_queues; i++) {
1964                 struct netfront_queue *queue = &info->queues[i];
1965
1966                 queue->id = i;
1967                 queue->info = info;
1968
1969                 ret = xennet_init_queue(queue);
1970                 if (ret < 0) {
1971                         dev_warn(&info->xbdev->dev,
1972                                  "only created %d queues\n", i);
1973                         *num_queues = i;
1974                         break;
1975                 }
1976
1977                 netif_napi_add(queue->info->netdev, &queue->napi,
1978                                xennet_poll, 64);
1979                 if (netif_running(info->netdev))
1980                         napi_enable(&queue->napi);
1981         }
1982
1983         netif_set_real_num_tx_queues(info->netdev, *num_queues);
1984
1985         if (*num_queues == 0) {
1986                 dev_err(&info->xbdev->dev, "no queues\n");
1987                 return -EINVAL;
1988         }
1989         return 0;
1990 }
1991
1992 /* Common code used when first setting up, and when resuming. */
1993 static int talk_to_netback(struct xenbus_device *dev,
1994                            struct netfront_info *info)
1995 {
1996         const char *message;
1997         struct xenbus_transaction xbt;
1998         int err;
1999         unsigned int feature_split_evtchn;
2000         unsigned int i = 0;
2001         unsigned int max_queues = 0;
2002         struct netfront_queue *queue = NULL;
2003         unsigned int num_queues = 1;
2004         unsigned int trusted;
2005
2006         info->netdev->irq = 0;
2007
2008         /* Check if backend is trusted. */
2009         err = xenbus_scanf(XBT_NIL, dev->nodename, "trusted", "%u", &trusted);
2010         if (err < 0)
2011                 trusted = 1;
2012         info->bounce = !xennet_trusted || !trusted;
2013
2014         /* Check if backend supports multiple queues */
2015         err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2016                            "multi-queue-max-queues", "%u", &max_queues);
2017         if (err < 0)
2018                 max_queues = 1;
2019         num_queues = min(max_queues, xennet_max_queues);
2020
2021         /* Check feature-split-event-channels */
2022         err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2023                            "feature-split-event-channels", "%u",
2024                            &feature_split_evtchn);
2025         if (err < 0)
2026                 feature_split_evtchn = 0;
2027
2028         /* Read mac addr. */
2029         err = xen_net_read_mac(dev, info->netdev->dev_addr);
2030         if (err) {
2031                 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
2032                 goto out_unlocked;
2033         }
2034
2035         rtnl_lock();
2036         if (info->queues)
2037                 xennet_destroy_queues(info);
2038
2039         /* For the case of a reconnect reset the "broken" indicator. */
2040         info->broken = false;
2041
2042         err = xennet_create_queues(info, &num_queues);
2043         if (err < 0) {
2044                 xenbus_dev_fatal(dev, err, "creating queues");
2045                 kfree(info->queues);
2046                 info->queues = NULL;
2047                 goto out;
2048         }
2049         rtnl_unlock();
2050
2051         /* Create shared ring, alloc event channel -- for each queue */
2052         for (i = 0; i < num_queues; ++i) {
2053                 queue = &info->queues[i];
2054                 err = setup_netfront(dev, queue, feature_split_evtchn);
2055                 if (err)
2056                         goto destroy_ring;
2057         }
2058
2059 again:
2060         err = xenbus_transaction_start(&xbt);
2061         if (err) {
2062                 xenbus_dev_fatal(dev, err, "starting transaction");
2063                 goto destroy_ring;
2064         }
2065
2066         if (xenbus_exists(XBT_NIL,
2067                           info->xbdev->otherend, "multi-queue-max-queues")) {
2068                 /* Write the number of queues */
2069                 err = xenbus_printf(xbt, dev->nodename,
2070                                     "multi-queue-num-queues", "%u", num_queues);
2071                 if (err) {
2072                         message = "writing multi-queue-num-queues";
2073                         goto abort_transaction_no_dev_fatal;
2074                 }
2075         }
2076
2077         if (num_queues == 1) {
2078                 err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
2079                 if (err)
2080                         goto abort_transaction_no_dev_fatal;
2081         } else {
2082                 /* Write the keys for each queue */
2083                 for (i = 0; i < num_queues; ++i) {
2084                         queue = &info->queues[i];
2085                         err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
2086                         if (err)
2087                                 goto abort_transaction_no_dev_fatal;
2088                 }
2089         }
2090
2091         /* The remaining keys are not queue-specific */
2092         err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
2093                             1);
2094         if (err) {
2095                 message = "writing request-rx-copy";
2096                 goto abort_transaction;
2097         }
2098
2099         err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
2100         if (err) {
2101                 message = "writing feature-rx-notify";
2102                 goto abort_transaction;
2103         }
2104
2105         err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
2106         if (err) {
2107                 message = "writing feature-sg";
2108                 goto abort_transaction;
2109         }
2110
2111         err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
2112         if (err) {
2113                 message = "writing feature-gso-tcpv4";
2114                 goto abort_transaction;
2115         }
2116
2117         err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
2118         if (err) {
2119                 message = "writing feature-gso-tcpv6";
2120                 goto abort_transaction;
2121         }
2122
2123         err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
2124                            "1");
2125         if (err) {
2126                 message = "writing feature-ipv6-csum-offload";
2127                 goto abort_transaction;
2128         }
2129
2130         err = xenbus_transaction_end(xbt, 0);
2131         if (err) {
2132                 if (err == -EAGAIN)
2133                         goto again;
2134                 xenbus_dev_fatal(dev, err, "completing transaction");
2135                 goto destroy_ring;
2136         }
2137
2138         return 0;
2139
2140  abort_transaction:
2141         xenbus_dev_fatal(dev, err, "%s", message);
2142 abort_transaction_no_dev_fatal:
2143         xenbus_transaction_end(xbt, 1);
2144  destroy_ring:
2145         xennet_disconnect_backend(info);
2146         rtnl_lock();
2147         xennet_destroy_queues(info);
2148  out:
2149         rtnl_unlock();
2150 out_unlocked:
2151         device_unregister(&dev->dev);
2152         return err;
2153 }
2154
2155 static int xennet_connect(struct net_device *dev)
2156 {
2157         struct netfront_info *np = netdev_priv(dev);
2158         unsigned int num_queues = 0;
2159         int err;
2160         unsigned int feature_rx_copy;
2161         unsigned int j = 0;
2162         struct netfront_queue *queue = NULL;
2163
2164         err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
2165                            "feature-rx-copy", "%u", &feature_rx_copy);
2166         if (err != 1)
2167                 feature_rx_copy = 0;
2168
2169         if (!feature_rx_copy) {
2170                 dev_info(&dev->dev,
2171                          "backend does not support copying receive path\n");
2172                 return -ENODEV;
2173         }
2174
2175         err = talk_to_netback(np->xbdev, np);
2176         if (err)
2177                 return err;
2178         if (np->bounce)
2179                 dev_info(&np->xbdev->dev,
2180                          "bouncing transmitted data to zeroed pages\n");
2181
2182         /* talk_to_netback() sets the correct number of queues */
2183         num_queues = dev->real_num_tx_queues;
2184
2185         if (dev->reg_state == NETREG_UNINITIALIZED) {
2186                 err = register_netdev(dev);
2187                 if (err) {
2188                         pr_warn("%s: register_netdev err=%d\n", __func__, err);
2189                         device_unregister(&np->xbdev->dev);
2190                         return err;
2191                 }
2192         }
2193
2194         rtnl_lock();
2195         netdev_update_features(dev);
2196         rtnl_unlock();
2197
2198         /*
2199          * All public and private state should now be sane.  Get
2200          * ready to start sending and receiving packets and give the driver
2201          * domain a kick because we've probably just requeued some
2202          * packets.
2203          */
2204         netif_tx_lock_bh(np->netdev);
2205         netif_device_attach(np->netdev);
2206         netif_tx_unlock_bh(np->netdev);
2207
2208         netif_carrier_on(np->netdev);
2209         for (j = 0; j < num_queues; ++j) {
2210                 queue = &np->queues[j];
2211
2212                 notify_remote_via_irq(queue->tx_irq);
2213                 if (queue->tx_irq != queue->rx_irq)
2214                         notify_remote_via_irq(queue->rx_irq);
2215
2216                 spin_lock_irq(&queue->tx_lock);
2217                 xennet_tx_buf_gc(queue);
2218                 spin_unlock_irq(&queue->tx_lock);
2219
2220                 spin_lock_bh(&queue->rx_lock);
2221                 xennet_alloc_rx_buffers(queue);
2222                 spin_unlock_bh(&queue->rx_lock);
2223         }
2224
2225         return 0;
2226 }
2227
2228 /**
2229  * Callback received when the backend's state changes.
2230  */
2231 static void netback_changed(struct xenbus_device *dev,
2232                             enum xenbus_state backend_state)
2233 {
2234         struct netfront_info *np = dev_get_drvdata(&dev->dev);
2235         struct net_device *netdev = np->netdev;
2236
2237         dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
2238
2239         wake_up_all(&module_wq);
2240
2241         switch (backend_state) {
2242         case XenbusStateInitialising:
2243         case XenbusStateInitialised:
2244         case XenbusStateReconfiguring:
2245         case XenbusStateReconfigured:
2246         case XenbusStateUnknown:
2247                 break;
2248
2249         case XenbusStateInitWait:
2250                 if (dev->state != XenbusStateInitialising)
2251                         break;
2252                 if (xennet_connect(netdev) != 0)
2253                         break;
2254                 xenbus_switch_state(dev, XenbusStateConnected);
2255                 break;
2256
2257         case XenbusStateConnected:
2258                 netdev_notify_peers(netdev);
2259                 break;
2260
2261         case XenbusStateClosed:
2262                 if (dev->state == XenbusStateClosed)
2263                         break;
2264                 /* Missed the backend's CLOSING state -- fallthrough */
2265         case XenbusStateClosing:
2266                 xenbus_frontend_closed(dev);
2267                 break;
2268         }
2269 }
2270
2271 static const struct xennet_stat {
2272         char name[ETH_GSTRING_LEN];
2273         u16 offset;
2274 } xennet_stats[] = {
2275         {
2276                 "rx_gso_checksum_fixup",
2277                 offsetof(struct netfront_info, rx_gso_checksum_fixup)
2278         },
2279 };
2280
2281 static int xennet_get_sset_count(struct net_device *dev, int string_set)
2282 {
2283         switch (string_set) {
2284         case ETH_SS_STATS:
2285                 return ARRAY_SIZE(xennet_stats);
2286         default:
2287                 return -EINVAL;
2288         }
2289 }
2290
2291 static void xennet_get_ethtool_stats(struct net_device *dev,
2292                                      struct ethtool_stats *stats, u64 * data)
2293 {
2294         void *np = netdev_priv(dev);
2295         int i;
2296
2297         for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2298                 data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2299 }
2300
2301 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2302 {
2303         int i;
2304
2305         switch (stringset) {
2306         case ETH_SS_STATS:
2307                 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2308                         memcpy(data + i * ETH_GSTRING_LEN,
2309                                xennet_stats[i].name, ETH_GSTRING_LEN);
2310                 break;
2311         }
2312 }
2313
2314 static const struct ethtool_ops xennet_ethtool_ops =
2315 {
2316         .get_link = ethtool_op_get_link,
2317
2318         .get_sset_count = xennet_get_sset_count,
2319         .get_ethtool_stats = xennet_get_ethtool_stats,
2320         .get_strings = xennet_get_strings,
2321 };
2322
2323 #ifdef CONFIG_SYSFS
2324 static ssize_t show_rxbuf(struct device *dev,
2325                           struct device_attribute *attr, char *buf)
2326 {
2327         return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2328 }
2329
2330 static ssize_t store_rxbuf(struct device *dev,
2331                            struct device_attribute *attr,
2332                            const char *buf, size_t len)
2333 {
2334         char *endp;
2335         unsigned long target;
2336
2337         if (!capable(CAP_NET_ADMIN))
2338                 return -EPERM;
2339
2340         target = simple_strtoul(buf, &endp, 0);
2341         if (endp == buf)
2342                 return -EBADMSG;
2343
2344         /* rxbuf_min and rxbuf_max are no longer configurable. */
2345
2346         return len;
2347 }
2348
2349 static DEVICE_ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2350 static DEVICE_ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2351 static DEVICE_ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL);
2352
2353 static struct attribute *xennet_dev_attrs[] = {
2354         &dev_attr_rxbuf_min.attr,
2355         &dev_attr_rxbuf_max.attr,
2356         &dev_attr_rxbuf_cur.attr,
2357         NULL
2358 };
2359
2360 static const struct attribute_group xennet_dev_group = {
2361         .attrs = xennet_dev_attrs
2362 };
2363 #endif /* CONFIG_SYSFS */
2364
2365 static void xennet_bus_close(struct xenbus_device *dev)
2366 {
2367         int ret;
2368
2369         if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2370                 return;
2371         do {
2372                 xenbus_switch_state(dev, XenbusStateClosing);
2373                 ret = wait_event_timeout(module_wq,
2374                                    xenbus_read_driver_state(dev->otherend) ==
2375                                    XenbusStateClosing ||
2376                                    xenbus_read_driver_state(dev->otherend) ==
2377                                    XenbusStateClosed ||
2378                                    xenbus_read_driver_state(dev->otherend) ==
2379                                    XenbusStateUnknown,
2380                                    XENNET_TIMEOUT);
2381         } while (!ret);
2382
2383         if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
2384                 return;
2385
2386         do {
2387                 xenbus_switch_state(dev, XenbusStateClosed);
2388                 ret = wait_event_timeout(module_wq,
2389                                    xenbus_read_driver_state(dev->otherend) ==
2390                                    XenbusStateClosed ||
2391                                    xenbus_read_driver_state(dev->otherend) ==
2392                                    XenbusStateUnknown,
2393                                    XENNET_TIMEOUT);
2394         } while (!ret);
2395 }
2396
2397 static int xennet_remove(struct xenbus_device *dev)
2398 {
2399         struct netfront_info *info = dev_get_drvdata(&dev->dev);
2400
2401         xennet_bus_close(dev);
2402         xennet_disconnect_backend(info);
2403
2404         if (info->netdev->reg_state == NETREG_REGISTERED)
2405                 unregister_netdev(info->netdev);
2406
2407         if (info->queues) {
2408                 rtnl_lock();
2409                 xennet_destroy_queues(info);
2410                 rtnl_unlock();
2411         }
2412         xennet_free_netdev(info->netdev);
2413
2414         return 0;
2415 }
2416
2417 static const struct xenbus_device_id netfront_ids[] = {
2418         { "vif" },
2419         { "" }
2420 };
2421
2422 static struct xenbus_driver netfront_driver = {
2423         .ids = netfront_ids,
2424         .probe = netfront_probe,
2425         .remove = xennet_remove,
2426         .resume = netfront_resume,
2427         .otherend_changed = netback_changed,
2428 };
2429
2430 static int __init netif_init(void)
2431 {
2432         if (!xen_domain())
2433                 return -ENODEV;
2434
2435         if (!xen_has_pv_nic_devices())
2436                 return -ENODEV;
2437
2438         pr_info("Initialising Xen virtual ethernet driver\n");
2439
2440         /* Allow as many queues as there are CPUs if user has not
2441          * specified a value.
2442          */
2443         if (xennet_max_queues == 0)
2444                 xennet_max_queues = num_online_cpus();
2445
2446         return xenbus_register_frontend(&netfront_driver);
2447 }
2448 module_init(netif_init);
2449
2450
2451 static void __exit netif_exit(void)
2452 {
2453         xenbus_unregister_driver(&netfront_driver);
2454 }
2455 module_exit(netif_exit);
2456
2457 MODULE_DESCRIPTION("Xen virtual network device frontend");
2458 MODULE_LICENSE("GPL");
2459 MODULE_ALIAS("xen:vif");
2460 MODULE_ALIAS("xennet");