GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / net / xen-netback / netback.c
1 /*
2  * Back-end of the driver for virtual network devices. This portion of the
3  * driver exports a 'unified' network-device interface that can be accessed
4  * by any operating system that implements a compatible front end. A
5  * reference front-end implementation can be found in:
6  *  drivers/net/xen-netfront.c
7  *
8  * Copyright (c) 2002-2005, K A Fraser
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation; or, when distributed
13  * separately from the Linux kernel or incorporated into other
14  * software packages, subject to the following license:
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a copy
17  * of this source file (the "Software"), to deal in the Software without
18  * restriction, including without limitation the rights to use, copy, modify,
19  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20  * and to permit persons to whom the Software is furnished to do so, subject to
21  * the following conditions:
22  *
23  * The above copyright notice and this permission notice shall be included in
24  * all copies or substantial portions of the Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32  * IN THE SOFTWARE.
33  */
34
35 #include "common.h"
36
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40 #include <linux/highmem.h>
41
42 #include <net/tcp.h>
43
44 #include <xen/xen.h>
45 #include <xen/events.h>
46 #include <xen/interface/memory.h>
47 #include <xen/page.h>
48
49 #include <asm/xen/hypercall.h>
50
51 /* Provide an option to disable split event channels at load time as
52  * event channels are limited resource. Split event channels are
53  * enabled by default.
54  */
55 bool separate_tx_rx_irq = true;
56 module_param(separate_tx_rx_irq, bool, 0644);
57
58 /* The time that packets can stay on the guest Rx internal queue
59  * before they are dropped.
60  */
61 unsigned int rx_drain_timeout_msecs = 10000;
62 module_param(rx_drain_timeout_msecs, uint, 0444);
63
64 /* The length of time before the frontend is considered unresponsive
65  * because it isn't providing Rx slots.
66  */
67 unsigned int rx_stall_timeout_msecs = 60000;
68 module_param(rx_stall_timeout_msecs, uint, 0444);
69
70 #define MAX_QUEUES_DEFAULT 8
71 unsigned int xenvif_max_queues;
72 module_param_named(max_queues, xenvif_max_queues, uint, 0644);
73 MODULE_PARM_DESC(max_queues,
74                  "Maximum number of queues per virtual interface");
75
76 /*
77  * This is the maximum slots a skb can have. If a guest sends a skb
78  * which exceeds this limit it is considered malicious.
79  */
80 #define FATAL_SKB_SLOTS_DEFAULT 20
81 static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
82 module_param(fatal_skb_slots, uint, 0444);
83
84 /* The amount to copy out of the first guest Tx slot into the skb's
85  * linear area.  If the first slot has more data, it will be mapped
86  * and put into the first frag.
87  *
88  * This is sized to avoid pulling headers from the frags for most
89  * TCP/IP packets.
90  */
91 #define XEN_NETBACK_TX_COPY_LEN 128
92
93 /* This is the maximum number of flows in the hash cache. */
94 #define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
95 unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
96 module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
97 MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
98
99 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
100                                u8 status);
101
102 static void make_tx_response(struct xenvif_queue *queue,
103                              struct xen_netif_tx_request *txp,
104                              unsigned int extra_count,
105                              s8       st);
106 static void push_tx_responses(struct xenvif_queue *queue);
107
108 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
109
110 static inline int tx_work_todo(struct xenvif_queue *queue);
111
112 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
113                                        u16 idx)
114 {
115         return page_to_pfn(queue->mmap_pages[idx]);
116 }
117
118 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
119                                          u16 idx)
120 {
121         return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
122 }
123
124 #define callback_param(vif, pending_idx) \
125         (vif->pending_tx_info[pending_idx].callback_struct)
126
127 /* Find the containing VIF's structure from a pointer in pending_tx_info array
128  */
129 static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
130 {
131         u16 pending_idx = ubuf->desc;
132         struct pending_tx_info *temp =
133                 container_of(ubuf, struct pending_tx_info, callback_struct);
134         return container_of(temp - pending_idx,
135                             struct xenvif_queue,
136                             pending_tx_info[0]);
137 }
138
139 static u16 frag_get_pending_idx(skb_frag_t *frag)
140 {
141         return (u16)frag->page_offset;
142 }
143
144 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
145 {
146         frag->page_offset = pending_idx;
147 }
148
149 static inline pending_ring_idx_t pending_index(unsigned i)
150 {
151         return i & (MAX_PENDING_REQS-1);
152 }
153
154 void xenvif_kick_thread(struct xenvif_queue *queue)
155 {
156         wake_up(&queue->wq);
157 }
158
159 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
160 {
161         int more_to_do;
162
163         RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
164
165         if (more_to_do)
166                 napi_schedule(&queue->napi);
167         else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
168                                      &queue->eoi_pending) &
169                  (NETBK_TX_EOI | NETBK_COMMON_EOI))
170                 xen_irq_lateeoi(queue->tx_irq, 0);
171 }
172
173 static void tx_add_credit(struct xenvif_queue *queue)
174 {
175         unsigned long max_burst, max_credit;
176
177         /*
178          * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
179          * Otherwise the interface can seize up due to insufficient credit.
180          */
181         max_burst = max(131072UL, queue->credit_bytes);
182
183         /* Take care that adding a new chunk of credit doesn't wrap to zero. */
184         max_credit = queue->remaining_credit + queue->credit_bytes;
185         if (max_credit < queue->remaining_credit)
186                 max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
187
188         queue->remaining_credit = min(max_credit, max_burst);
189         queue->rate_limited = false;
190 }
191
192 void xenvif_tx_credit_callback(struct timer_list *t)
193 {
194         struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
195         tx_add_credit(queue);
196         xenvif_napi_schedule_or_enable_events(queue);
197 }
198
199 static void xenvif_tx_err(struct xenvif_queue *queue,
200                           struct xen_netif_tx_request *txp,
201                           unsigned int extra_count, RING_IDX end)
202 {
203         RING_IDX cons = queue->tx.req_cons;
204         unsigned long flags;
205
206         do {
207                 spin_lock_irqsave(&queue->response_lock, flags);
208                 make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
209                 push_tx_responses(queue);
210                 spin_unlock_irqrestore(&queue->response_lock, flags);
211                 if (cons == end)
212                         break;
213                 RING_COPY_REQUEST(&queue->tx, cons++, txp);
214                 extra_count = 0; /* only the first frag can have extras */
215         } while (1);
216         queue->tx.req_cons = cons;
217 }
218
219 static void xenvif_fatal_tx_err(struct xenvif *vif)
220 {
221         netdev_err(vif->dev, "fatal error; disabling device\n");
222         vif->disabled = true;
223         /* Disable the vif from queue 0's kthread */
224         if (vif->num_queues)
225                 xenvif_kick_thread(&vif->queues[0]);
226 }
227
228 static int xenvif_count_requests(struct xenvif_queue *queue,
229                                  struct xen_netif_tx_request *first,
230                                  unsigned int extra_count,
231                                  struct xen_netif_tx_request *txp,
232                                  int work_to_do)
233 {
234         RING_IDX cons = queue->tx.req_cons;
235         int slots = 0;
236         int drop_err = 0;
237         int more_data;
238
239         if (!(first->flags & XEN_NETTXF_more_data))
240                 return 0;
241
242         do {
243                 struct xen_netif_tx_request dropped_tx = { 0 };
244
245                 if (slots >= work_to_do) {
246                         netdev_err(queue->vif->dev,
247                                    "Asked for %d slots but exceeds this limit\n",
248                                    work_to_do);
249                         xenvif_fatal_tx_err(queue->vif);
250                         return -ENODATA;
251                 }
252
253                 /* This guest is really using too many slots and
254                  * considered malicious.
255                  */
256                 if (unlikely(slots >= fatal_skb_slots)) {
257                         netdev_err(queue->vif->dev,
258                                    "Malicious frontend using %d slots, threshold %u\n",
259                                    slots, fatal_skb_slots);
260                         xenvif_fatal_tx_err(queue->vif);
261                         return -E2BIG;
262                 }
263
264                 /* Xen network protocol had implicit dependency on
265                  * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
266                  * the historical MAX_SKB_FRAGS value 18 to honor the
267                  * same behavior as before. Any packet using more than
268                  * 18 slots but less than fatal_skb_slots slots is
269                  * dropped
270                  */
271                 if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
272                         if (net_ratelimit())
273                                 netdev_dbg(queue->vif->dev,
274                                            "Too many slots (%d) exceeding limit (%d), dropping packet\n",
275                                            slots, XEN_NETBK_LEGACY_SLOTS_MAX);
276                         drop_err = -E2BIG;
277                 }
278
279                 if (drop_err)
280                         txp = &dropped_tx;
281
282                 RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
283
284                 /* If the guest submitted a frame >= 64 KiB then
285                  * first->size overflowed and following slots will
286                  * appear to be larger than the frame.
287                  *
288                  * This cannot be fatal error as there are buggy
289                  * frontends that do this.
290                  *
291                  * Consume all slots and drop the packet.
292                  */
293                 if (!drop_err && txp->size > first->size) {
294                         if (net_ratelimit())
295                                 netdev_dbg(queue->vif->dev,
296                                            "Invalid tx request, slot size %u > remaining size %u\n",
297                                            txp->size, first->size);
298                         drop_err = -EIO;
299                 }
300
301                 first->size -= txp->size;
302                 slots++;
303
304                 if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
305                         netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
306                                  txp->offset, txp->size);
307                         xenvif_fatal_tx_err(queue->vif);
308                         return -EINVAL;
309                 }
310
311                 more_data = txp->flags & XEN_NETTXF_more_data;
312
313                 if (!drop_err)
314                         txp++;
315
316         } while (more_data);
317
318         if (drop_err) {
319                 xenvif_tx_err(queue, first, extra_count, cons + slots);
320                 return drop_err;
321         }
322
323         return slots;
324 }
325
326
327 struct xenvif_tx_cb {
328         u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
329         u8 copy_count;
330         u32 split_mask;
331 };
332
333 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
334 #define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
335 #define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
336
337 static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
338                                            u16 pending_idx,
339                                            struct xen_netif_tx_request *txp,
340                                            unsigned int extra_count,
341                                            struct gnttab_map_grant_ref *mop)
342 {
343         queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
344         gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
345                           GNTMAP_host_map | GNTMAP_readonly,
346                           txp->gref, queue->vif->domid);
347
348         memcpy(&queue->pending_tx_info[pending_idx].req, txp,
349                sizeof(*txp));
350         queue->pending_tx_info[pending_idx].extra_count = extra_count;
351 }
352
353 static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
354 {
355         struct sk_buff *skb =
356                 alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
357                           GFP_ATOMIC | __GFP_NOWARN);
358
359         BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
360         if (unlikely(skb == NULL))
361                 return NULL;
362
363         /* Packets passed to netif_rx() must have some headroom. */
364         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
365
366         /* Initialize it here to avoid later surprises */
367         skb_shinfo(skb)->destructor_arg = NULL;
368
369         return skb;
370 }
371
372 static void xenvif_get_requests(struct xenvif_queue *queue,
373                                 struct sk_buff *skb,
374                                 struct xen_netif_tx_request *first,
375                                 struct xen_netif_tx_request *txfrags,
376                                 unsigned *copy_ops,
377                                 unsigned *map_ops,
378                                 unsigned int frag_overflow,
379                                 struct sk_buff *nskb,
380                                 unsigned int extra_count,
381                                 unsigned int data_len)
382 {
383         struct skb_shared_info *shinfo = skb_shinfo(skb);
384         skb_frag_t *frags = shinfo->frags;
385         u16 pending_idx;
386         pending_ring_idx_t index;
387         unsigned int nr_slots;
388         struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
389         struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
390         struct xen_netif_tx_request *txp = first;
391
392         nr_slots = shinfo->nr_frags + 1;
393
394         copy_count(skb) = 0;
395         XENVIF_TX_CB(skb)->split_mask = 0;
396
397         /* Create copy ops for exactly data_len bytes into the skb head. */
398         __skb_put(skb, data_len);
399         while (data_len > 0) {
400                 int amount = data_len > txp->size ? txp->size : data_len;
401                 bool split = false;
402
403                 cop->source.u.ref = txp->gref;
404                 cop->source.domid = queue->vif->domid;
405                 cop->source.offset = txp->offset;
406
407                 cop->dest.domid = DOMID_SELF;
408                 cop->dest.offset = (offset_in_page(skb->data +
409                                                    skb_headlen(skb) -
410                                                    data_len)) & ~XEN_PAGE_MASK;
411                 cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
412                                                - data_len);
413
414                 /* Don't cross local page boundary! */
415                 if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
416                         amount = XEN_PAGE_SIZE - cop->dest.offset;
417                         XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
418                         split = true;
419                 }
420
421                 cop->len = amount;
422                 cop->flags = GNTCOPY_source_gref;
423
424                 index = pending_index(queue->pending_cons);
425                 pending_idx = queue->pending_ring[index];
426                 callback_param(queue, pending_idx).ctx = NULL;
427                 copy_pending_idx(skb, copy_count(skb)) = pending_idx;
428                 if (!split)
429                         copy_count(skb)++;
430
431                 cop++;
432                 data_len -= amount;
433
434                 if (amount == txp->size) {
435                         /* The copy op covered the full tx_request */
436
437                         memcpy(&queue->pending_tx_info[pending_idx].req,
438                                txp, sizeof(*txp));
439                         queue->pending_tx_info[pending_idx].extra_count =
440                                 (txp == first) ? extra_count : 0;
441
442                         if (txp == first)
443                                 txp = txfrags;
444                         else
445                                 txp++;
446                         queue->pending_cons++;
447                         nr_slots--;
448                 } else {
449                         /* The copy op partially covered the tx_request.
450                          * The remainder will be mapped or copied in the next
451                          * iteration.
452                          */
453                         txp->offset += amount;
454                         txp->size -= amount;
455                 }
456         }
457
458         for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
459              shinfo->nr_frags++, gop++) {
460                 index = pending_index(queue->pending_cons++);
461                 pending_idx = queue->pending_ring[index];
462                 xenvif_tx_create_map_op(queue, pending_idx, txp,
463                                         txp == first ? extra_count : 0, gop);
464                 frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
465
466                 if (txp == first)
467                         txp = txfrags;
468                 else
469                         txp++;
470         }
471
472         if (frag_overflow) {
473
474                 shinfo = skb_shinfo(nskb);
475                 frags = shinfo->frags;
476
477                 for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
478                      shinfo->nr_frags++, txp++, gop++) {
479                         index = pending_index(queue->pending_cons++);
480                         pending_idx = queue->pending_ring[index];
481                         xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
482                                                 gop);
483                         frag_set_pending_idx(&frags[shinfo->nr_frags],
484                                              pending_idx);
485                 }
486
487                 skb_shinfo(skb)->frag_list = nskb;
488         }
489
490         (*copy_ops) = cop - queue->tx_copy_ops;
491         (*map_ops) = gop - queue->tx_map_ops;
492 }
493
494 static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
495                                            u16 pending_idx,
496                                            grant_handle_t handle)
497 {
498         if (unlikely(queue->grant_tx_handle[pending_idx] !=
499                      NETBACK_INVALID_HANDLE)) {
500                 netdev_err(queue->vif->dev,
501                            "Trying to overwrite active handle! pending_idx: 0x%x\n",
502                            pending_idx);
503                 BUG();
504         }
505         queue->grant_tx_handle[pending_idx] = handle;
506 }
507
508 static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
509                                              u16 pending_idx)
510 {
511         if (unlikely(queue->grant_tx_handle[pending_idx] ==
512                      NETBACK_INVALID_HANDLE)) {
513                 netdev_err(queue->vif->dev,
514                            "Trying to unmap invalid handle! pending_idx: 0x%x\n",
515                            pending_idx);
516                 BUG();
517         }
518         queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
519 }
520
521 static int xenvif_tx_check_gop(struct xenvif_queue *queue,
522                                struct sk_buff *skb,
523                                struct gnttab_map_grant_ref **gopp_map,
524                                struct gnttab_copy **gopp_copy)
525 {
526         struct gnttab_map_grant_ref *gop_map = *gopp_map;
527         u16 pending_idx;
528         /* This always points to the shinfo of the skb being checked, which
529          * could be either the first or the one on the frag_list
530          */
531         struct skb_shared_info *shinfo = skb_shinfo(skb);
532         /* If this is non-NULL, we are currently checking the frag_list skb, and
533          * this points to the shinfo of the first one
534          */
535         struct skb_shared_info *first_shinfo = NULL;
536         int nr_frags = shinfo->nr_frags;
537         const bool sharedslot = nr_frags &&
538                                 frag_get_pending_idx(&shinfo->frags[0]) ==
539                                     copy_pending_idx(skb, copy_count(skb) - 1);
540         int i, err = 0;
541
542         for (i = 0; i < copy_count(skb); i++) {
543                 int newerr;
544
545                 /* Check status of header. */
546                 pending_idx = copy_pending_idx(skb, i);
547
548                 newerr = (*gopp_copy)->status;
549
550                 /* Split copies need to be handled together. */
551                 if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
552                         (*gopp_copy)++;
553                         if (!newerr)
554                                 newerr = (*gopp_copy)->status;
555                 }
556                 if (likely(!newerr)) {
557                         /* The first frag might still have this slot mapped */
558                         if (i < copy_count(skb) - 1 || !sharedslot)
559                                 xenvif_idx_release(queue, pending_idx,
560                                                    XEN_NETIF_RSP_OKAY);
561                 } else {
562                         err = newerr;
563                         if (net_ratelimit())
564                                 netdev_dbg(queue->vif->dev,
565                                            "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
566                                            (*gopp_copy)->status,
567                                            pending_idx,
568                                            (*gopp_copy)->source.u.ref);
569                         /* The first frag might still have this slot mapped */
570                         if (i < copy_count(skb) - 1 || !sharedslot)
571                                 xenvif_idx_release(queue, pending_idx,
572                                                    XEN_NETIF_RSP_ERROR);
573                 }
574                 (*gopp_copy)++;
575         }
576
577 check_frags:
578         for (i = 0; i < nr_frags; i++, gop_map++) {
579                 int j, newerr;
580
581                 pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
582
583                 /* Check error status: if okay then remember grant handle. */
584                 newerr = gop_map->status;
585
586                 if (likely(!newerr)) {
587                         xenvif_grant_handle_set(queue,
588                                                 pending_idx,
589                                                 gop_map->handle);
590                         /* Had a previous error? Invalidate this fragment. */
591                         if (unlikely(err)) {
592                                 xenvif_idx_unmap(queue, pending_idx);
593                                 /* If the mapping of the first frag was OK, but
594                                  * the header's copy failed, and they are
595                                  * sharing a slot, send an error
596                                  */
597                                 if (i == 0 && !first_shinfo && sharedslot)
598                                         xenvif_idx_release(queue, pending_idx,
599                                                            XEN_NETIF_RSP_ERROR);
600                                 else
601                                         xenvif_idx_release(queue, pending_idx,
602                                                            XEN_NETIF_RSP_OKAY);
603                         }
604                         continue;
605                 }
606
607                 /* Error on this fragment: respond to client with an error. */
608                 if (net_ratelimit())
609                         netdev_dbg(queue->vif->dev,
610                                    "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
611                                    i,
612                                    gop_map->status,
613                                    pending_idx,
614                                    gop_map->ref);
615
616                 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
617
618                 /* Not the first error? Preceding frags already invalidated. */
619                 if (err)
620                         continue;
621
622                 /* Invalidate preceding fragments of this skb. */
623                 for (j = 0; j < i; j++) {
624                         pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
625                         xenvif_idx_unmap(queue, pending_idx);
626                         xenvif_idx_release(queue, pending_idx,
627                                            XEN_NETIF_RSP_OKAY);
628                 }
629
630                 /* And if we found the error while checking the frag_list, unmap
631                  * the first skb's frags
632                  */
633                 if (first_shinfo) {
634                         for (j = 0; j < first_shinfo->nr_frags; j++) {
635                                 pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
636                                 xenvif_idx_unmap(queue, pending_idx);
637                                 xenvif_idx_release(queue, pending_idx,
638                                                    XEN_NETIF_RSP_OKAY);
639                         }
640                 }
641
642                 /* Remember the error: invalidate all subsequent fragments. */
643                 err = newerr;
644         }
645
646         if (skb_has_frag_list(skb) && !first_shinfo) {
647                 first_shinfo = skb_shinfo(skb);
648                 shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
649                 nr_frags = shinfo->nr_frags;
650
651                 goto check_frags;
652         }
653
654         *gopp_map = gop_map;
655         return err;
656 }
657
658 static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
659 {
660         struct skb_shared_info *shinfo = skb_shinfo(skb);
661         int nr_frags = shinfo->nr_frags;
662         int i;
663         u16 prev_pending_idx = INVALID_PENDING_IDX;
664
665         for (i = 0; i < nr_frags; i++) {
666                 skb_frag_t *frag = shinfo->frags + i;
667                 struct xen_netif_tx_request *txp;
668                 struct page *page;
669                 u16 pending_idx;
670
671                 pending_idx = frag_get_pending_idx(frag);
672
673                 /* If this is not the first frag, chain it to the previous*/
674                 if (prev_pending_idx == INVALID_PENDING_IDX)
675                         skb_shinfo(skb)->destructor_arg =
676                                 &callback_param(queue, pending_idx);
677                 else
678                         callback_param(queue, prev_pending_idx).ctx =
679                                 &callback_param(queue, pending_idx);
680
681                 callback_param(queue, pending_idx).ctx = NULL;
682                 prev_pending_idx = pending_idx;
683
684                 txp = &queue->pending_tx_info[pending_idx].req;
685                 page = virt_to_page(idx_to_kaddr(queue, pending_idx));
686                 __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
687                 skb->len += txp->size;
688                 skb->data_len += txp->size;
689                 skb->truesize += txp->size;
690
691                 /* Take an extra reference to offset network stack's put_page */
692                 get_page(queue->mmap_pages[pending_idx]);
693         }
694 }
695
696 static int xenvif_get_extras(struct xenvif_queue *queue,
697                              struct xen_netif_extra_info *extras,
698                              unsigned int *extra_count,
699                              int work_to_do)
700 {
701         struct xen_netif_extra_info extra;
702         RING_IDX cons = queue->tx.req_cons;
703
704         do {
705                 if (unlikely(work_to_do-- <= 0)) {
706                         netdev_err(queue->vif->dev, "Missing extra info\n");
707                         xenvif_fatal_tx_err(queue->vif);
708                         return -EBADR;
709                 }
710
711                 RING_COPY_REQUEST(&queue->tx, cons, &extra);
712
713                 queue->tx.req_cons = ++cons;
714                 (*extra_count)++;
715
716                 if (unlikely(!extra.type ||
717                              extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
718                         netdev_err(queue->vif->dev,
719                                    "Invalid extra type: %d\n", extra.type);
720                         xenvif_fatal_tx_err(queue->vif);
721                         return -EINVAL;
722                 }
723
724                 memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
725         } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
726
727         return work_to_do;
728 }
729
730 static int xenvif_set_skb_gso(struct xenvif *vif,
731                               struct sk_buff *skb,
732                               struct xen_netif_extra_info *gso)
733 {
734         if (!gso->u.gso.size) {
735                 netdev_err(vif->dev, "GSO size must not be zero.\n");
736                 xenvif_fatal_tx_err(vif);
737                 return -EINVAL;
738         }
739
740         switch (gso->u.gso.type) {
741         case XEN_NETIF_GSO_TYPE_TCPV4:
742                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
743                 break;
744         case XEN_NETIF_GSO_TYPE_TCPV6:
745                 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
746                 break;
747         default:
748                 netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
749                 xenvif_fatal_tx_err(vif);
750                 return -EINVAL;
751         }
752
753         skb_shinfo(skb)->gso_size = gso->u.gso.size;
754         /* gso_segs will be calculated later */
755
756         return 0;
757 }
758
759 static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
760 {
761         bool recalculate_partial_csum = false;
762
763         /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
764          * peers can fail to set NETRXF_csum_blank when sending a GSO
765          * frame. In this case force the SKB to CHECKSUM_PARTIAL and
766          * recalculate the partial checksum.
767          */
768         if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
769                 queue->stats.rx_gso_checksum_fixup++;
770                 skb->ip_summed = CHECKSUM_PARTIAL;
771                 recalculate_partial_csum = true;
772         }
773
774         /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
775         if (skb->ip_summed != CHECKSUM_PARTIAL)
776                 return 0;
777
778         return skb_checksum_setup(skb, recalculate_partial_csum);
779 }
780
781 static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
782 {
783         u64 now = get_jiffies_64();
784         u64 next_credit = queue->credit_window_start +
785                 msecs_to_jiffies(queue->credit_usec / 1000);
786
787         /* Timer could already be pending in rare cases. */
788         if (timer_pending(&queue->credit_timeout)) {
789                 queue->rate_limited = true;
790                 return true;
791         }
792
793         /* Passed the point where we can replenish credit? */
794         if (time_after_eq64(now, next_credit)) {
795                 queue->credit_window_start = now;
796                 tx_add_credit(queue);
797         }
798
799         /* Still too big to send right now? Set a callback. */
800         if (size > queue->remaining_credit) {
801                 mod_timer(&queue->credit_timeout,
802                           next_credit);
803                 queue->credit_window_start = next_credit;
804                 queue->rate_limited = true;
805
806                 return true;
807         }
808
809         return false;
810 }
811
812 /* No locking is required in xenvif_mcast_add/del() as they are
813  * only ever invoked from NAPI poll. An RCU list is used because
814  * xenvif_mcast_match() is called asynchronously, during start_xmit.
815  */
816
817 static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
818 {
819         struct xenvif_mcast_addr *mcast;
820
821         if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
822                 if (net_ratelimit())
823                         netdev_err(vif->dev,
824                                    "Too many multicast addresses\n");
825                 return -ENOSPC;
826         }
827
828         mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
829         if (!mcast)
830                 return -ENOMEM;
831
832         ether_addr_copy(mcast->addr, addr);
833         list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
834         vif->fe_mcast_count++;
835
836         return 0;
837 }
838
839 static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
840 {
841         struct xenvif_mcast_addr *mcast;
842
843         list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
844                 if (ether_addr_equal(addr, mcast->addr)) {
845                         --vif->fe_mcast_count;
846                         list_del_rcu(&mcast->entry);
847                         kfree_rcu(mcast, rcu);
848                         break;
849                 }
850         }
851 }
852
853 bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
854 {
855         struct xenvif_mcast_addr *mcast;
856
857         rcu_read_lock();
858         list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
859                 if (ether_addr_equal(addr, mcast->addr)) {
860                         rcu_read_unlock();
861                         return true;
862                 }
863         }
864         rcu_read_unlock();
865
866         return false;
867 }
868
869 void xenvif_mcast_addr_list_free(struct xenvif *vif)
870 {
871         /* No need for locking or RCU here. NAPI poll and TX queue
872          * are stopped.
873          */
874         while (!list_empty(&vif->fe_mcast_addr)) {
875                 struct xenvif_mcast_addr *mcast;
876
877                 mcast = list_first_entry(&vif->fe_mcast_addr,
878                                          struct xenvif_mcast_addr,
879                                          entry);
880                 --vif->fe_mcast_count;
881                 list_del(&mcast->entry);
882                 kfree(mcast);
883         }
884 }
885
886 static void xenvif_tx_build_gops(struct xenvif_queue *queue,
887                                      int budget,
888                                      unsigned *copy_ops,
889                                      unsigned *map_ops)
890 {
891         struct sk_buff *skb, *nskb;
892         int ret;
893         unsigned int frag_overflow;
894
895         while (skb_queue_len(&queue->tx_queue) < budget) {
896                 struct xen_netif_tx_request txreq;
897                 struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
898                 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
899                 unsigned int extra_count;
900                 u16 pending_idx;
901                 RING_IDX idx;
902                 int work_to_do;
903                 unsigned int data_len;
904                 pending_ring_idx_t index;
905
906                 if (queue->tx.sring->req_prod - queue->tx.req_cons >
907                     XEN_NETIF_TX_RING_SIZE) {
908                         netdev_err(queue->vif->dev,
909                                    "Impossible number of requests. "
910                                    "req_prod %d, req_cons %d, size %ld\n",
911                                    queue->tx.sring->req_prod, queue->tx.req_cons,
912                                    XEN_NETIF_TX_RING_SIZE);
913                         xenvif_fatal_tx_err(queue->vif);
914                         break;
915                 }
916
917                 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
918                 if (!work_to_do)
919                         break;
920
921                 idx = queue->tx.req_cons;
922                 rmb(); /* Ensure that we see the request before we copy it. */
923                 RING_COPY_REQUEST(&queue->tx, idx, &txreq);
924
925                 /* Credit-based scheduling. */
926                 if (txreq.size > queue->remaining_credit &&
927                     tx_credit_exceeded(queue, txreq.size))
928                         break;
929
930                 queue->remaining_credit -= txreq.size;
931
932                 work_to_do--;
933                 queue->tx.req_cons = ++idx;
934
935                 memset(extras, 0, sizeof(extras));
936                 extra_count = 0;
937                 if (txreq.flags & XEN_NETTXF_extra_info) {
938                         work_to_do = xenvif_get_extras(queue, extras,
939                                                        &extra_count,
940                                                        work_to_do);
941                         idx = queue->tx.req_cons;
942                         if (unlikely(work_to_do < 0))
943                                 break;
944                 }
945
946                 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
947                         struct xen_netif_extra_info *extra;
948
949                         extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
950                         ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
951
952                         make_tx_response(queue, &txreq, extra_count,
953                                          (ret == 0) ?
954                                          XEN_NETIF_RSP_OKAY :
955                                          XEN_NETIF_RSP_ERROR);
956                         push_tx_responses(queue);
957                         continue;
958                 }
959
960                 if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
961                         struct xen_netif_extra_info *extra;
962
963                         extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
964                         xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
965
966                         make_tx_response(queue, &txreq, extra_count,
967                                          XEN_NETIF_RSP_OKAY);
968                         push_tx_responses(queue);
969                         continue;
970                 }
971
972                 data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
973                         XEN_NETBACK_TX_COPY_LEN : txreq.size;
974
975                 ret = xenvif_count_requests(queue, &txreq, extra_count,
976                                             txfrags, work_to_do);
977
978                 if (unlikely(ret < 0))
979                         break;
980
981                 idx += ret;
982
983                 if (unlikely(txreq.size < ETH_HLEN)) {
984                         netdev_dbg(queue->vif->dev,
985                                    "Bad packet size: %d\n", txreq.size);
986                         xenvif_tx_err(queue, &txreq, extra_count, idx);
987                         break;
988                 }
989
990                 /* No crossing a page as the payload mustn't fragment. */
991                 if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
992                         netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
993                                    txreq.offset, txreq.size);
994                         xenvif_fatal_tx_err(queue->vif);
995                         break;
996                 }
997
998                 index = pending_index(queue->pending_cons);
999                 pending_idx = queue->pending_ring[index];
1000
1001                 if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
1002                         data_len = txreq.size;
1003
1004                 skb = xenvif_alloc_skb(data_len);
1005                 if (unlikely(skb == NULL)) {
1006                         netdev_dbg(queue->vif->dev,
1007                                    "Can't allocate a skb in start_xmit.\n");
1008                         xenvif_tx_err(queue, &txreq, extra_count, idx);
1009                         break;
1010                 }
1011
1012                 skb_shinfo(skb)->nr_frags = ret;
1013                 /* At this point shinfo->nr_frags is in fact the number of
1014                  * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1015                  */
1016                 frag_overflow = 0;
1017                 nskb = NULL;
1018                 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1019                         frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1020                         BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1021                         skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1022                         nskb = xenvif_alloc_skb(0);
1023                         if (unlikely(nskb == NULL)) {
1024                                 skb_shinfo(skb)->nr_frags = 0;
1025                                 kfree_skb(skb);
1026                                 xenvif_tx_err(queue, &txreq, extra_count, idx);
1027                                 if (net_ratelimit())
1028                                         netdev_err(queue->vif->dev,
1029                                                    "Can't allocate the frag_list skb.\n");
1030                                 break;
1031                         }
1032                 }
1033
1034                 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1035                         struct xen_netif_extra_info *gso;
1036                         gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1037
1038                         if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1039                                 /* Failure in xenvif_set_skb_gso is fatal. */
1040                                 skb_shinfo(skb)->nr_frags = 0;
1041                                 kfree_skb(skb);
1042                                 kfree_skb(nskb);
1043                                 break;
1044                         }
1045                 }
1046
1047                 if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
1048                         struct xen_netif_extra_info *extra;
1049                         enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
1050
1051                         extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
1052
1053                         switch (extra->u.hash.type) {
1054                         case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
1055                         case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
1056                                 type = PKT_HASH_TYPE_L3;
1057                                 break;
1058
1059                         case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
1060                         case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
1061                                 type = PKT_HASH_TYPE_L4;
1062                                 break;
1063
1064                         default:
1065                                 break;
1066                         }
1067
1068                         if (type != PKT_HASH_TYPE_NONE)
1069                                 skb_set_hash(skb,
1070                                              *(u32 *)extra->u.hash.value,
1071                                              type);
1072                 }
1073
1074                 xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
1075                                     map_ops, frag_overflow, nskb, extra_count,
1076                                     data_len);
1077
1078                 __skb_queue_tail(&queue->tx_queue, skb);
1079
1080                 queue->tx.req_cons = idx;
1081
1082                 if ((*map_ops >= ARRAY_SIZE(queue->tx_map_ops)) ||
1083                     (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1084                         break;
1085         }
1086
1087         return;
1088 }
1089
1090 /* Consolidate skb with a frag_list into a brand new one with local pages on
1091  * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1092  */
1093 static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1094 {
1095         unsigned int offset = skb_headlen(skb);
1096         skb_frag_t frags[MAX_SKB_FRAGS];
1097         int i, f;
1098         struct ubuf_info *uarg;
1099         struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1100
1101         queue->stats.tx_zerocopy_sent += 2;
1102         queue->stats.tx_frag_overflow++;
1103
1104         xenvif_fill_frags(queue, nskb);
1105         /* Subtract frags size, we will correct it later */
1106         skb->truesize -= skb->data_len;
1107         skb->len += nskb->len;
1108         skb->data_len += nskb->len;
1109
1110         /* create a brand new frags array and coalesce there */
1111         for (i = 0; offset < skb->len; i++) {
1112                 struct page *page;
1113                 unsigned int len;
1114
1115                 BUG_ON(i >= MAX_SKB_FRAGS);
1116                 page = alloc_page(GFP_ATOMIC);
1117                 if (!page) {
1118                         int j;
1119                         skb->truesize += skb->data_len;
1120                         for (j = 0; j < i; j++)
1121                                 put_page(frags[j].page.p);
1122                         return -ENOMEM;
1123                 }
1124
1125                 if (offset + PAGE_SIZE < skb->len)
1126                         len = PAGE_SIZE;
1127                 else
1128                         len = skb->len - offset;
1129                 if (skb_copy_bits(skb, offset, page_address(page), len))
1130                         BUG();
1131
1132                 offset += len;
1133                 frags[i].page.p = page;
1134                 frags[i].page_offset = 0;
1135                 skb_frag_size_set(&frags[i], len);
1136         }
1137
1138         /* Release all the original (foreign) frags. */
1139         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
1140                 skb_frag_unref(skb, f);
1141         uarg = skb_shinfo(skb)->destructor_arg;
1142         /* increase inflight counter to offset decrement in callback */
1143         atomic_inc(&queue->inflight_packets);
1144         uarg->callback(uarg, true);
1145         skb_shinfo(skb)->destructor_arg = NULL;
1146
1147         /* Fill the skb with the new (local) frags. */
1148         memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1149         skb_shinfo(skb)->nr_frags = i;
1150         skb->truesize += i * PAGE_SIZE;
1151
1152         return 0;
1153 }
1154
1155 static int xenvif_tx_submit(struct xenvif_queue *queue)
1156 {
1157         struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1158         struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1159         struct sk_buff *skb;
1160         int work_done = 0;
1161
1162         while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1163                 struct xen_netif_tx_request *txp;
1164                 u16 pending_idx;
1165
1166                 pending_idx = copy_pending_idx(skb, 0);
1167                 txp = &queue->pending_tx_info[pending_idx].req;
1168
1169                 /* Check the remap error code. */
1170                 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1171                         /* If there was an error, xenvif_tx_check_gop is
1172                          * expected to release all the frags which were mapped,
1173                          * so kfree_skb shouldn't do it again
1174                          */
1175                         skb_shinfo(skb)->nr_frags = 0;
1176                         if (skb_has_frag_list(skb)) {
1177                                 struct sk_buff *nskb =
1178                                                 skb_shinfo(skb)->frag_list;
1179                                 skb_shinfo(nskb)->nr_frags = 0;
1180                         }
1181                         kfree_skb(skb);
1182                         continue;
1183                 }
1184
1185                 if (txp->flags & XEN_NETTXF_csum_blank)
1186                         skb->ip_summed = CHECKSUM_PARTIAL;
1187                 else if (txp->flags & XEN_NETTXF_data_validated)
1188                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1189
1190                 xenvif_fill_frags(queue, skb);
1191
1192                 if (unlikely(skb_has_frag_list(skb))) {
1193                         struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1194                         xenvif_skb_zerocopy_prepare(queue, nskb);
1195                         if (xenvif_handle_frag_list(queue, skb)) {
1196                                 if (net_ratelimit())
1197                                         netdev_err(queue->vif->dev,
1198                                                    "Not enough memory to consolidate frag_list!\n");
1199                                 xenvif_skb_zerocopy_prepare(queue, skb);
1200                                 kfree_skb(skb);
1201                                 continue;
1202                         }
1203                         /* Copied all the bits from the frag list -- free it. */
1204                         skb_frag_list_init(skb);
1205                         kfree_skb(nskb);
1206                 }
1207
1208                 skb->dev      = queue->vif->dev;
1209                 skb->protocol = eth_type_trans(skb, skb->dev);
1210                 skb_reset_network_header(skb);
1211
1212                 if (checksum_setup(queue, skb)) {
1213                         netdev_dbg(queue->vif->dev,
1214                                    "Can't setup checksum in net_tx_action\n");
1215                         /* We have to set this flag to trigger the callback */
1216                         if (skb_shinfo(skb)->destructor_arg)
1217                                 xenvif_skb_zerocopy_prepare(queue, skb);
1218                         kfree_skb(skb);
1219                         continue;
1220                 }
1221
1222                 skb_probe_transport_header(skb, 0);
1223
1224                 /* If the packet is GSO then we will have just set up the
1225                  * transport header offset in checksum_setup so it's now
1226                  * straightforward to calculate gso_segs.
1227                  */
1228                 if (skb_is_gso(skb)) {
1229                         int mss = skb_shinfo(skb)->gso_size;
1230                         int hdrlen = skb_transport_header(skb) -
1231                                 skb_mac_header(skb) +
1232                                 tcp_hdrlen(skb);
1233
1234                         skb_shinfo(skb)->gso_segs =
1235                                 DIV_ROUND_UP(skb->len - hdrlen, mss);
1236                 }
1237
1238                 queue->stats.rx_bytes += skb->len;
1239                 queue->stats.rx_packets++;
1240
1241                 work_done++;
1242
1243                 /* Set this flag right before netif_receive_skb, otherwise
1244                  * someone might think this packet already left netback, and
1245                  * do a skb_copy_ubufs while we are still in control of the
1246                  * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1247                  */
1248                 if (skb_shinfo(skb)->destructor_arg) {
1249                         xenvif_skb_zerocopy_prepare(queue, skb);
1250                         queue->stats.tx_zerocopy_sent++;
1251                 }
1252
1253                 netif_receive_skb(skb);
1254         }
1255
1256         return work_done;
1257 }
1258
1259 void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1260 {
1261         unsigned long flags;
1262         pending_ring_idx_t index;
1263         struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1264
1265         /* This is the only place where we grab this lock, to protect callbacks
1266          * from each other.
1267          */
1268         spin_lock_irqsave(&queue->callback_lock, flags);
1269         do {
1270                 u16 pending_idx = ubuf->desc;
1271                 ubuf = (struct ubuf_info *) ubuf->ctx;
1272                 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1273                         MAX_PENDING_REQS);
1274                 index = pending_index(queue->dealloc_prod);
1275                 queue->dealloc_ring[index] = pending_idx;
1276                 /* Sync with xenvif_tx_dealloc_action:
1277                  * insert idx then incr producer.
1278                  */
1279                 smp_wmb();
1280                 queue->dealloc_prod++;
1281         } while (ubuf);
1282         spin_unlock_irqrestore(&queue->callback_lock, flags);
1283
1284         if (likely(zerocopy_success))
1285                 queue->stats.tx_zerocopy_success++;
1286         else
1287                 queue->stats.tx_zerocopy_fail++;
1288         xenvif_skb_zerocopy_complete(queue);
1289 }
1290
1291 static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1292 {
1293         struct gnttab_unmap_grant_ref *gop;
1294         pending_ring_idx_t dc, dp;
1295         u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1296         unsigned int i = 0;
1297
1298         dc = queue->dealloc_cons;
1299         gop = queue->tx_unmap_ops;
1300
1301         /* Free up any grants we have finished using */
1302         do {
1303                 dp = queue->dealloc_prod;
1304
1305                 /* Ensure we see all indices enqueued by all
1306                  * xenvif_zerocopy_callback().
1307                  */
1308                 smp_rmb();
1309
1310                 while (dc != dp) {
1311                         BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1312                         pending_idx =
1313                                 queue->dealloc_ring[pending_index(dc++)];
1314
1315                         pending_idx_release[gop - queue->tx_unmap_ops] =
1316                                 pending_idx;
1317                         queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1318                                 queue->mmap_pages[pending_idx];
1319                         gnttab_set_unmap_op(gop,
1320                                             idx_to_kaddr(queue, pending_idx),
1321                                             GNTMAP_host_map,
1322                                             queue->grant_tx_handle[pending_idx]);
1323                         xenvif_grant_handle_reset(queue, pending_idx);
1324                         ++gop;
1325                 }
1326
1327         } while (dp != queue->dealloc_prod);
1328
1329         queue->dealloc_cons = dc;
1330
1331         if (gop - queue->tx_unmap_ops > 0) {
1332                 int ret;
1333                 ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1334                                         NULL,
1335                                         queue->pages_to_unmap,
1336                                         gop - queue->tx_unmap_ops);
1337                 if (ret) {
1338                         netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1339                                    gop - queue->tx_unmap_ops, ret);
1340                         for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1341                                 if (gop[i].status != GNTST_okay)
1342                                         netdev_err(queue->vif->dev,
1343                                                    " host_addr: 0x%llx handle: 0x%x status: %d\n",
1344                                                    gop[i].host_addr,
1345                                                    gop[i].handle,
1346                                                    gop[i].status);
1347                         }
1348                         BUG();
1349                 }
1350         }
1351
1352         for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1353                 xenvif_idx_release(queue, pending_idx_release[i],
1354                                    XEN_NETIF_RSP_OKAY);
1355 }
1356
1357
1358 /* Called after netfront has transmitted */
1359 int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1360 {
1361         unsigned nr_mops = 0, nr_cops = 0;
1362         int work_done, ret;
1363
1364         if (unlikely(!tx_work_todo(queue)))
1365                 return 0;
1366
1367         xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1368
1369         if (nr_cops == 0)
1370                 return 0;
1371
1372         gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1373         if (nr_mops != 0) {
1374                 ret = gnttab_map_refs(queue->tx_map_ops,
1375                                       NULL,
1376                                       queue->pages_to_map,
1377                                       nr_mops);
1378                 if (ret) {
1379                         unsigned int i;
1380
1381                         netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
1382                                    nr_mops, ret);
1383                         for (i = 0; i < nr_mops; ++i)
1384                                 WARN_ON_ONCE(queue->tx_map_ops[i].status ==
1385                                              GNTST_okay);
1386                 }
1387         }
1388
1389         work_done = xenvif_tx_submit(queue);
1390
1391         return work_done;
1392 }
1393
1394 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1395                                u8 status)
1396 {
1397         struct pending_tx_info *pending_tx_info;
1398         pending_ring_idx_t index;
1399         unsigned long flags;
1400
1401         pending_tx_info = &queue->pending_tx_info[pending_idx];
1402
1403         spin_lock_irqsave(&queue->response_lock, flags);
1404
1405         make_tx_response(queue, &pending_tx_info->req,
1406                          pending_tx_info->extra_count, status);
1407
1408         /* Release the pending index before pusing the Tx response so
1409          * its available before a new Tx request is pushed by the
1410          * frontend.
1411          */
1412         index = pending_index(queue->pending_prod++);
1413         queue->pending_ring[index] = pending_idx;
1414
1415         push_tx_responses(queue);
1416
1417         spin_unlock_irqrestore(&queue->response_lock, flags);
1418 }
1419
1420
1421 static void make_tx_response(struct xenvif_queue *queue,
1422                              struct xen_netif_tx_request *txp,
1423                              unsigned int extra_count,
1424                              s8       st)
1425 {
1426         RING_IDX i = queue->tx.rsp_prod_pvt;
1427         struct xen_netif_tx_response *resp;
1428
1429         resp = RING_GET_RESPONSE(&queue->tx, i);
1430         resp->id     = txp->id;
1431         resp->status = st;
1432
1433         while (extra_count-- != 0)
1434                 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1435
1436         queue->tx.rsp_prod_pvt = ++i;
1437 }
1438
1439 static void push_tx_responses(struct xenvif_queue *queue)
1440 {
1441         int notify;
1442
1443         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1444         if (notify)
1445                 notify_remote_via_irq(queue->tx_irq);
1446 }
1447
1448 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1449 {
1450         int ret;
1451         struct gnttab_unmap_grant_ref tx_unmap_op;
1452
1453         gnttab_set_unmap_op(&tx_unmap_op,
1454                             idx_to_kaddr(queue, pending_idx),
1455                             GNTMAP_host_map,
1456                             queue->grant_tx_handle[pending_idx]);
1457         xenvif_grant_handle_reset(queue, pending_idx);
1458
1459         ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1460                                 &queue->mmap_pages[pending_idx], 1);
1461         if (ret) {
1462                 netdev_err(queue->vif->dev,
1463                            "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
1464                            ret,
1465                            pending_idx,
1466                            tx_unmap_op.host_addr,
1467                            tx_unmap_op.handle,
1468                            tx_unmap_op.status);
1469                 BUG();
1470         }
1471 }
1472
1473 static inline int tx_work_todo(struct xenvif_queue *queue)
1474 {
1475         if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1476                 return 1;
1477
1478         return 0;
1479 }
1480
1481 static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1482 {
1483         return queue->dealloc_cons != queue->dealloc_prod;
1484 }
1485
1486 void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
1487 {
1488         if (queue->tx.sring)
1489                 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1490                                         queue->tx.sring);
1491         if (queue->rx.sring)
1492                 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1493                                         queue->rx.sring);
1494 }
1495
1496 int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
1497                                    grant_ref_t tx_ring_ref,
1498                                    grant_ref_t rx_ring_ref)
1499 {
1500         void *addr;
1501         struct xen_netif_tx_sring *txs;
1502         struct xen_netif_rx_sring *rxs;
1503
1504         int err = -ENOMEM;
1505
1506         err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1507                                      &tx_ring_ref, 1, &addr);
1508         if (err)
1509                 goto err;
1510
1511         txs = (struct xen_netif_tx_sring *)addr;
1512         BACK_RING_INIT(&queue->tx, txs, XEN_PAGE_SIZE);
1513
1514         err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1515                                      &rx_ring_ref, 1, &addr);
1516         if (err)
1517                 goto err;
1518
1519         rxs = (struct xen_netif_rx_sring *)addr;
1520         BACK_RING_INIT(&queue->rx, rxs, XEN_PAGE_SIZE);
1521
1522         return 0;
1523
1524 err:
1525         xenvif_unmap_frontend_data_rings(queue);
1526         return err;
1527 }
1528
1529 static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
1530 {
1531         /* Dealloc thread must remain running until all inflight
1532          * packets complete.
1533          */
1534         return kthread_should_stop() &&
1535                 !atomic_read(&queue->inflight_packets);
1536 }
1537
1538 int xenvif_dealloc_kthread(void *data)
1539 {
1540         struct xenvif_queue *queue = data;
1541
1542         for (;;) {
1543                 wait_event_interruptible(queue->dealloc_wq,
1544                                          tx_dealloc_work_todo(queue) ||
1545                                          xenvif_dealloc_kthread_should_stop(queue));
1546                 if (xenvif_dealloc_kthread_should_stop(queue))
1547                         break;
1548
1549                 xenvif_tx_dealloc_action(queue);
1550                 cond_resched();
1551         }
1552
1553         /* Unmap anything remaining*/
1554         if (tx_dealloc_work_todo(queue))
1555                 xenvif_tx_dealloc_action(queue);
1556
1557         return 0;
1558 }
1559
1560 static void make_ctrl_response(struct xenvif *vif,
1561                                const struct xen_netif_ctrl_request *req,
1562                                u32 status, u32 data)
1563 {
1564         RING_IDX idx = vif->ctrl.rsp_prod_pvt;
1565         struct xen_netif_ctrl_response rsp = {
1566                 .id = req->id,
1567                 .type = req->type,
1568                 .status = status,
1569                 .data = data,
1570         };
1571
1572         *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
1573         vif->ctrl.rsp_prod_pvt = ++idx;
1574 }
1575
1576 static void push_ctrl_response(struct xenvif *vif)
1577 {
1578         int notify;
1579
1580         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
1581         if (notify)
1582                 notify_remote_via_irq(vif->ctrl_irq);
1583 }
1584
1585 static void process_ctrl_request(struct xenvif *vif,
1586                                  const struct xen_netif_ctrl_request *req)
1587 {
1588         u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
1589         u32 data = 0;
1590
1591         switch (req->type) {
1592         case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
1593                 status = xenvif_set_hash_alg(vif, req->data[0]);
1594                 break;
1595
1596         case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
1597                 status = xenvif_get_hash_flags(vif, &data);
1598                 break;
1599
1600         case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
1601                 status = xenvif_set_hash_flags(vif, req->data[0]);
1602                 break;
1603
1604         case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
1605                 status = xenvif_set_hash_key(vif, req->data[0],
1606                                              req->data[1]);
1607                 break;
1608
1609         case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
1610                 status = XEN_NETIF_CTRL_STATUS_SUCCESS;
1611                 data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
1612                 break;
1613
1614         case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
1615                 status = xenvif_set_hash_mapping_size(vif,
1616                                                       req->data[0]);
1617                 break;
1618
1619         case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
1620                 status = xenvif_set_hash_mapping(vif, req->data[0],
1621                                                  req->data[1],
1622                                                  req->data[2]);
1623                 break;
1624
1625         default:
1626                 break;
1627         }
1628
1629         make_ctrl_response(vif, req, status, data);
1630         push_ctrl_response(vif);
1631 }
1632
1633 static void xenvif_ctrl_action(struct xenvif *vif)
1634 {
1635         for (;;) {
1636                 RING_IDX req_prod, req_cons;
1637
1638                 req_prod = vif->ctrl.sring->req_prod;
1639                 req_cons = vif->ctrl.req_cons;
1640
1641                 /* Make sure we can see requests before we process them. */
1642                 rmb();
1643
1644                 if (req_cons == req_prod)
1645                         break;
1646
1647                 while (req_cons != req_prod) {
1648                         struct xen_netif_ctrl_request req;
1649
1650                         RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
1651                         req_cons++;
1652
1653                         process_ctrl_request(vif, &req);
1654                 }
1655
1656                 vif->ctrl.req_cons = req_cons;
1657                 vif->ctrl.sring->req_event = req_cons + 1;
1658         }
1659 }
1660
1661 static bool xenvif_ctrl_work_todo(struct xenvif *vif)
1662 {
1663         if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
1664                 return true;
1665
1666         return false;
1667 }
1668
1669 irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
1670 {
1671         struct xenvif *vif = data;
1672         unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
1673
1674         while (xenvif_ctrl_work_todo(vif)) {
1675                 xenvif_ctrl_action(vif);
1676                 eoi_flag = 0;
1677         }
1678
1679         xen_irq_lateeoi(irq, eoi_flag);
1680
1681         return IRQ_HANDLED;
1682 }
1683
1684 static int __init netback_init(void)
1685 {
1686         int rc = 0;
1687
1688         if (!xen_domain())
1689                 return -ENODEV;
1690
1691         /* Allow as many queues as there are CPUs but max. 8 if user has not
1692          * specified a value.
1693          */
1694         if (xenvif_max_queues == 0)
1695                 xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
1696                                           num_online_cpus());
1697
1698         if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1699                 pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1700                         fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1701                 fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1702         }
1703
1704         rc = xenvif_xenbus_init();
1705         if (rc)
1706                 goto failed_init;
1707
1708 #ifdef CONFIG_DEBUG_FS
1709         xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
1710         if (IS_ERR_OR_NULL(xen_netback_dbg_root))
1711                 pr_warn("Init of debugfs returned %ld!\n",
1712                         PTR_ERR(xen_netback_dbg_root));
1713 #endif /* CONFIG_DEBUG_FS */
1714
1715         return 0;
1716
1717 failed_init:
1718         return rc;
1719 }
1720
1721 module_init(netback_init);
1722
1723 static void __exit netback_fini(void)
1724 {
1725 #ifdef CONFIG_DEBUG_FS
1726         if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
1727                 debugfs_remove_recursive(xen_netback_dbg_root);
1728 #endif /* CONFIG_DEBUG_FS */
1729         xenvif_xenbus_fini();
1730 }
1731 module_exit(netback_fini);
1732
1733 MODULE_LICENSE("Dual BSD/GPL");
1734 MODULE_ALIAS("xen-backend:vif");