GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / block / xen-blkback / blkback.c
1 /******************************************************************************
2  *
3  * Back-end of the driver for virtual block devices. This portion of the
4  * driver exports a 'unified' block-device interface that can be accessed
5  * by any operating system that implements a compatible front end. A
6  * reference front-end implementation can be found in:
7  *  drivers/block/xen-blkfront.c
8  *
9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10  * Copyright (c) 2005, Christopher Clark
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License version 2
14  * as published by the Free Software Foundation; or, when distributed
15  * separately from the Linux kernel or incorporated into other
16  * software packages, subject to the following license:
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a copy
19  * of this source file (the "Software"), to deal in the Software without
20  * restriction, including without limitation the rights to use, copy, modify,
21  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22  * and to permit persons to whom the Software is furnished to do so, subject to
23  * the following conditions:
24  *
25  * The above copyright notice and this permission notice shall be included in
26  * all copies or substantial portions of the Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34  * IN THE SOFTWARE.
35  */
36
37 #define pr_fmt(fmt) "xen-blkback: " fmt
38
39 #include <linux/spinlock.h>
40 #include <linux/kthread.h>
41 #include <linux/list.h>
42 #include <linux/delay.h>
43 #include <linux/freezer.h>
44 #include <linux/bitmap.h>
45
46 #include <xen/events.h>
47 #include <xen/page.h>
48 #include <xen/xen.h>
49 #include <asm/xen/hypervisor.h>
50 #include <asm/xen/hypercall.h>
51 #include <xen/balloon.h>
52 #include <xen/grant_table.h>
53 #include "common.h"
54
55 /*
56  * Maximum number of unused free pages to keep in the internal buffer.
57  * Setting this to a value too low will reduce memory used in each backend,
58  * but can have a performance penalty.
59  *
60  * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
61  * be set to a lower value that might degrade performance on some intensive
62  * IO workloads.
63  */
64
65 static int xen_blkif_max_buffer_pages = 1024;
66 module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
67 MODULE_PARM_DESC(max_buffer_pages,
68 "Maximum number of free pages to keep in each block backend buffer");
69
70 /*
71  * Maximum number of grants to map persistently in blkback. For maximum
72  * performance this should be the total numbers of grants that can be used
73  * to fill the ring, but since this might become too high, specially with
74  * the use of indirect descriptors, we set it to a value that provides good
75  * performance without using too much memory.
76  *
77  * When the list of persistent grants is full we clean it up using a LRU
78  * algorithm.
79  */
80
81 static int xen_blkif_max_pgrants = 1056;
82 module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
83 MODULE_PARM_DESC(max_persistent_grants,
84                  "Maximum number of grants to map persistently");
85
86 /*
87  * How long a persistent grant is allowed to remain allocated without being in
88  * use. The time is in seconds, 0 means indefinitely long.
89  */
90
91 static unsigned int xen_blkif_pgrant_timeout = 60;
92 module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout,
93                    uint, 0644);
94 MODULE_PARM_DESC(persistent_grant_unused_seconds,
95                  "Time in seconds an unused persistent grant is allowed to "
96                  "remain allocated. Default is 60, 0 means unlimited.");
97
98 /*
99  * Maximum number of rings/queues blkback supports, allow as many queues as there
100  * are CPUs if user has not specified a value.
101  */
102 unsigned int xenblk_max_queues;
103 module_param_named(max_queues, xenblk_max_queues, uint, 0644);
104 MODULE_PARM_DESC(max_queues,
105                  "Maximum number of hardware queues per virtual disk." \
106                  "By default it is the number of online CPUs.");
107
108 /*
109  * Maximum order of pages to be used for the shared ring between front and
110  * backend, 4KB page granularity is used.
111  */
112 unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
113 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
114 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
115 /*
116  * The LRU mechanism to clean the lists of persistent grants needs to
117  * be executed periodically. The time interval between consecutive executions
118  * of the purge mechanism is set in ms.
119  */
120 #define LRU_INTERVAL 100
121
122 /*
123  * When the persistent grants list is full we will remove unused grants
124  * from the list. The percent number of grants to be removed at each LRU
125  * execution.
126  */
127 #define LRU_PERCENT_CLEAN 5
128
129 /* Run-time switchable: /sys/module/blkback/parameters/ */
130 static unsigned int log_stats;
131 module_param(log_stats, int, 0644);
132
133 #define BLKBACK_INVALID_HANDLE (~0)
134
135 /* Number of free pages to remove on each call to gnttab_free_pages */
136 #define NUM_BATCH_FREE_PAGES 10
137
138 static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
139 {
140         return xen_blkif_pgrant_timeout &&
141                (jiffies - persistent_gnt->last_used >=
142                 HZ * xen_blkif_pgrant_timeout);
143 }
144
145 static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
146 {
147         unsigned long flags;
148
149         spin_lock_irqsave(&ring->free_pages_lock, flags);
150         if (list_empty(&ring->free_pages)) {
151                 BUG_ON(ring->free_pages_num != 0);
152                 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
153                 return gnttab_alloc_pages(1, page);
154         }
155         BUG_ON(ring->free_pages_num == 0);
156         page[0] = list_first_entry(&ring->free_pages, struct page, lru);
157         list_del(&page[0]->lru);
158         ring->free_pages_num--;
159         spin_unlock_irqrestore(&ring->free_pages_lock, flags);
160
161         return 0;
162 }
163
164 static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
165                                   int num)
166 {
167         unsigned long flags;
168         int i;
169
170         spin_lock_irqsave(&ring->free_pages_lock, flags);
171         for (i = 0; i < num; i++)
172                 list_add(&page[i]->lru, &ring->free_pages);
173         ring->free_pages_num += num;
174         spin_unlock_irqrestore(&ring->free_pages_lock, flags);
175 }
176
177 static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
178 {
179         /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
180         struct page *page[NUM_BATCH_FREE_PAGES];
181         unsigned int num_pages = 0;
182         unsigned long flags;
183
184         spin_lock_irqsave(&ring->free_pages_lock, flags);
185         while (ring->free_pages_num > num) {
186                 BUG_ON(list_empty(&ring->free_pages));
187                 page[num_pages] = list_first_entry(&ring->free_pages,
188                                                    struct page, lru);
189                 list_del(&page[num_pages]->lru);
190                 ring->free_pages_num--;
191                 if (++num_pages == NUM_BATCH_FREE_PAGES) {
192                         spin_unlock_irqrestore(&ring->free_pages_lock, flags);
193                         gnttab_free_pages(num_pages, page);
194                         spin_lock_irqsave(&ring->free_pages_lock, flags);
195                         num_pages = 0;
196                 }
197         }
198         spin_unlock_irqrestore(&ring->free_pages_lock, flags);
199         if (num_pages != 0)
200                 gnttab_free_pages(num_pages, page);
201 }
202
203 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
204
205 static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
206 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
207                                 struct blkif_request *req,
208                                 struct pending_req *pending_req);
209 static void make_response(struct xen_blkif_ring *ring, u64 id,
210                           unsigned short op, int st);
211
212 #define foreach_grant_safe(pos, n, rbtree, node) \
213         for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
214              (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
215              &(pos)->node != NULL; \
216              (pos) = container_of(n, typeof(*(pos)), node), \
217              (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
218
219
220 /*
221  * We don't need locking around the persistent grant helpers
222  * because blkback uses a single-thread for each backend, so we
223  * can be sure that this functions will never be called recursively.
224  *
225  * The only exception to that is put_persistent_grant, that can be called
226  * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
227  * bit operations to modify the flags of a persistent grant and to count
228  * the number of used grants.
229  */
230 static int add_persistent_gnt(struct xen_blkif_ring *ring,
231                                struct persistent_gnt *persistent_gnt)
232 {
233         struct rb_node **new = NULL, *parent = NULL;
234         struct persistent_gnt *this;
235         struct xen_blkif *blkif = ring->blkif;
236
237         if (ring->persistent_gnt_c >= xen_blkif_max_pgrants) {
238                 if (!blkif->vbd.overflow_max_grants)
239                         blkif->vbd.overflow_max_grants = 1;
240                 return -EBUSY;
241         }
242         /* Figure out where to put new node */
243         new = &ring->persistent_gnts.rb_node;
244         while (*new) {
245                 this = container_of(*new, struct persistent_gnt, node);
246
247                 parent = *new;
248                 if (persistent_gnt->gnt < this->gnt)
249                         new = &((*new)->rb_left);
250                 else if (persistent_gnt->gnt > this->gnt)
251                         new = &((*new)->rb_right);
252                 else {
253                         pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
254                         return -EINVAL;
255                 }
256         }
257
258         persistent_gnt->active = true;
259         /* Add new node and rebalance tree. */
260         rb_link_node(&(persistent_gnt->node), parent, new);
261         rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
262         ring->persistent_gnt_c++;
263         atomic_inc(&ring->persistent_gnt_in_use);
264         return 0;
265 }
266
267 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
268                                                  grant_ref_t gref)
269 {
270         struct persistent_gnt *data;
271         struct rb_node *node = NULL;
272
273         node = ring->persistent_gnts.rb_node;
274         while (node) {
275                 data = container_of(node, struct persistent_gnt, node);
276
277                 if (gref < data->gnt)
278                         node = node->rb_left;
279                 else if (gref > data->gnt)
280                         node = node->rb_right;
281                 else {
282                         if (data->active) {
283                                 pr_alert_ratelimited("requesting a grant already in use\n");
284                                 return NULL;
285                         }
286                         data->active = true;
287                         atomic_inc(&ring->persistent_gnt_in_use);
288                         return data;
289                 }
290         }
291         return NULL;
292 }
293
294 static void put_persistent_gnt(struct xen_blkif_ring *ring,
295                                struct persistent_gnt *persistent_gnt)
296 {
297         if (!persistent_gnt->active)
298                 pr_alert_ratelimited("freeing a grant already unused\n");
299         persistent_gnt->last_used = jiffies;
300         persistent_gnt->active = false;
301         atomic_dec(&ring->persistent_gnt_in_use);
302 }
303
304 static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
305                                  unsigned int num)
306 {
307         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
308         struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
309         struct persistent_gnt *persistent_gnt;
310         struct rb_node *n;
311         int segs_to_unmap = 0;
312         struct gntab_unmap_queue_data unmap_data;
313
314         unmap_data.pages = pages;
315         unmap_data.unmap_ops = unmap;
316         unmap_data.kunmap_ops = NULL;
317
318         foreach_grant_safe(persistent_gnt, n, root, node) {
319                 BUG_ON(persistent_gnt->handle ==
320                         BLKBACK_INVALID_HANDLE);
321                 gnttab_set_unmap_op(&unmap[segs_to_unmap],
322                         (unsigned long) pfn_to_kaddr(page_to_pfn(
323                                 persistent_gnt->page)),
324                         GNTMAP_host_map,
325                         persistent_gnt->handle);
326
327                 pages[segs_to_unmap] = persistent_gnt->page;
328
329                 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
330                         !rb_next(&persistent_gnt->node)) {
331
332                         unmap_data.count = segs_to_unmap;
333                         BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
334
335                         put_free_pages(ring, pages, segs_to_unmap);
336                         segs_to_unmap = 0;
337                 }
338
339                 rb_erase(&persistent_gnt->node, root);
340                 kfree(persistent_gnt);
341                 num--;
342         }
343         BUG_ON(num != 0);
344 }
345
346 void xen_blkbk_unmap_purged_grants(struct work_struct *work)
347 {
348         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
349         struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
350         struct persistent_gnt *persistent_gnt;
351         int segs_to_unmap = 0;
352         struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
353         struct gntab_unmap_queue_data unmap_data;
354
355         unmap_data.pages = pages;
356         unmap_data.unmap_ops = unmap;
357         unmap_data.kunmap_ops = NULL;
358
359         while(!list_empty(&ring->persistent_purge_list)) {
360                 persistent_gnt = list_first_entry(&ring->persistent_purge_list,
361                                                   struct persistent_gnt,
362                                                   remove_node);
363                 list_del(&persistent_gnt->remove_node);
364
365                 gnttab_set_unmap_op(&unmap[segs_to_unmap],
366                         vaddr(persistent_gnt->page),
367                         GNTMAP_host_map,
368                         persistent_gnt->handle);
369
370                 pages[segs_to_unmap] = persistent_gnt->page;
371
372                 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
373                         unmap_data.count = segs_to_unmap;
374                         BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
375                         put_free_pages(ring, pages, segs_to_unmap);
376                         segs_to_unmap = 0;
377                 }
378                 kfree(persistent_gnt);
379         }
380         if (segs_to_unmap > 0) {
381                 unmap_data.count = segs_to_unmap;
382                 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
383                 put_free_pages(ring, pages, segs_to_unmap);
384         }
385 }
386
387 static void purge_persistent_gnt(struct xen_blkif_ring *ring)
388 {
389         struct persistent_gnt *persistent_gnt;
390         struct rb_node *n;
391         unsigned int num_clean, total;
392         bool scan_used = false;
393         struct rb_root *root;
394
395         if (work_busy(&ring->persistent_purge_work)) {
396                 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
397                 goto out;
398         }
399
400         if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
401             (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
402             !ring->blkif->vbd.overflow_max_grants)) {
403                 num_clean = 0;
404         } else {
405                 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
406                 num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants +
407                             num_clean;
408                 num_clean = min(ring->persistent_gnt_c, num_clean);
409                 pr_debug("Going to purge at least %u persistent grants\n",
410                          num_clean);
411         }
412
413         /*
414          * At this point, we can assure that there will be no calls
415          * to get_persistent_grant (because we are executing this code from
416          * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
417          * which means that the number of currently used grants will go down,
418          * but never up, so we will always be able to remove the requested
419          * number of grants.
420          */
421
422         total = 0;
423
424         BUG_ON(!list_empty(&ring->persistent_purge_list));
425         root = &ring->persistent_gnts;
426 purge_list:
427         foreach_grant_safe(persistent_gnt, n, root, node) {
428                 BUG_ON(persistent_gnt->handle ==
429                         BLKBACK_INVALID_HANDLE);
430
431                 if (persistent_gnt->active)
432                         continue;
433                 if (!scan_used && !persistent_gnt_timeout(persistent_gnt))
434                         continue;
435                 if (scan_used && total >= num_clean)
436                         continue;
437
438                 rb_erase(&persistent_gnt->node, root);
439                 list_add(&persistent_gnt->remove_node,
440                          &ring->persistent_purge_list);
441                 total++;
442         }
443         /*
444          * Check whether we also need to start cleaning
445          * grants that were used since last purge in order to cope
446          * with the requested num
447          */
448         if (!scan_used && total < num_clean) {
449                 pr_debug("Still missing %u purged frames\n", num_clean - total);
450                 scan_used = true;
451                 goto purge_list;
452         }
453
454         if (total) {
455                 ring->persistent_gnt_c -= total;
456                 ring->blkif->vbd.overflow_max_grants = 0;
457
458                 /* We can defer this work */
459                 schedule_work(&ring->persistent_purge_work);
460                 pr_debug("Purged %u/%u\n", num_clean, total);
461         }
462
463 out:
464         return;
465 }
466
467 /*
468  * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
469  */
470 static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
471 {
472         struct pending_req *req = NULL;
473         unsigned long flags;
474
475         spin_lock_irqsave(&ring->pending_free_lock, flags);
476         if (!list_empty(&ring->pending_free)) {
477                 req = list_entry(ring->pending_free.next, struct pending_req,
478                                  free_list);
479                 list_del(&req->free_list);
480         }
481         spin_unlock_irqrestore(&ring->pending_free_lock, flags);
482         return req;
483 }
484
485 /*
486  * Return the 'pending_req' structure back to the freepool. We also
487  * wake up the thread if it was waiting for a free page.
488  */
489 static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
490 {
491         unsigned long flags;
492         int was_empty;
493
494         spin_lock_irqsave(&ring->pending_free_lock, flags);
495         was_empty = list_empty(&ring->pending_free);
496         list_add(&req->free_list, &ring->pending_free);
497         spin_unlock_irqrestore(&ring->pending_free_lock, flags);
498         if (was_empty)
499                 wake_up(&ring->pending_free_wq);
500 }
501
502 /*
503  * Routines for managing virtual block devices (vbds).
504  */
505 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
506                              int operation)
507 {
508         struct xen_vbd *vbd = &blkif->vbd;
509         int rc = -EACCES;
510
511         if ((operation != REQ_OP_READ) && vbd->readonly)
512                 goto out;
513
514         if (likely(req->nr_sects)) {
515                 blkif_sector_t end = req->sector_number + req->nr_sects;
516
517                 if (unlikely(end < req->sector_number))
518                         goto out;
519                 if (unlikely(end > vbd_sz(vbd)))
520                         goto out;
521         }
522
523         req->dev  = vbd->pdevice;
524         req->bdev = vbd->bdev;
525         rc = 0;
526
527  out:
528         return rc;
529 }
530
531 static void xen_vbd_resize(struct xen_blkif *blkif)
532 {
533         struct xen_vbd *vbd = &blkif->vbd;
534         struct xenbus_transaction xbt;
535         int err;
536         struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
537         unsigned long long new_size = vbd_sz(vbd);
538
539         pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
540                 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
541         pr_info("VBD Resize: new size %llu\n", new_size);
542         vbd->size = new_size;
543 again:
544         err = xenbus_transaction_start(&xbt);
545         if (err) {
546                 pr_warn("Error starting transaction\n");
547                 return;
548         }
549         err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
550                             (unsigned long long)vbd_sz(vbd));
551         if (err) {
552                 pr_warn("Error writing new size\n");
553                 goto abort;
554         }
555         /*
556          * Write the current state; we will use this to synchronize
557          * the front-end. If the current state is "connected" the
558          * front-end will get the new size information online.
559          */
560         err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
561         if (err) {
562                 pr_warn("Error writing the state\n");
563                 goto abort;
564         }
565
566         err = xenbus_transaction_end(xbt, 0);
567         if (err == -EAGAIN)
568                 goto again;
569         if (err)
570                 pr_warn("Error ending transaction\n");
571         return;
572 abort:
573         xenbus_transaction_end(xbt, 1);
574 }
575
576 /*
577  * Notification from the guest OS.
578  */
579 static void blkif_notify_work(struct xen_blkif_ring *ring)
580 {
581         ring->waiting_reqs = 1;
582         wake_up(&ring->wq);
583 }
584
585 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
586 {
587         blkif_notify_work(dev_id);
588         return IRQ_HANDLED;
589 }
590
591 /*
592  * SCHEDULER FUNCTIONS
593  */
594
595 static void print_stats(struct xen_blkif_ring *ring)
596 {
597         pr_info("(%s): oo %3llu  |  rd %4llu  |  wr %4llu  |  f %4llu"
598                  "  |  ds %4llu | pg: %4u/%4d\n",
599                  current->comm, ring->st_oo_req,
600                  ring->st_rd_req, ring->st_wr_req,
601                  ring->st_f_req, ring->st_ds_req,
602                  ring->persistent_gnt_c,
603                  xen_blkif_max_pgrants);
604         ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
605         ring->st_rd_req = 0;
606         ring->st_wr_req = 0;
607         ring->st_oo_req = 0;
608         ring->st_ds_req = 0;
609 }
610
611 int xen_blkif_schedule(void *arg)
612 {
613         struct xen_blkif_ring *ring = arg;
614         struct xen_blkif *blkif = ring->blkif;
615         struct xen_vbd *vbd = &blkif->vbd;
616         unsigned long timeout;
617         int ret;
618         bool do_eoi;
619         unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
620
621         set_freezable();
622         while (!kthread_should_stop()) {
623                 if (try_to_freeze())
624                         continue;
625                 if (unlikely(vbd->size != vbd_sz(vbd)))
626                         xen_vbd_resize(blkif);
627
628                 timeout = msecs_to_jiffies(LRU_INTERVAL);
629
630                 timeout = wait_event_interruptible_timeout(
631                         ring->wq,
632                         ring->waiting_reqs || kthread_should_stop(),
633                         timeout);
634                 if (timeout == 0)
635                         goto purge_gnt_list;
636                 timeout = wait_event_interruptible_timeout(
637                         ring->pending_free_wq,
638                         !list_empty(&ring->pending_free) ||
639                         kthread_should_stop(),
640                         timeout);
641                 if (timeout == 0)
642                         goto purge_gnt_list;
643
644                 do_eoi = ring->waiting_reqs;
645
646                 ring->waiting_reqs = 0;
647                 smp_mb(); /* clear flag *before* checking for work */
648
649                 ret = do_block_io_op(ring, &eoi_flags);
650                 if (ret > 0)
651                         ring->waiting_reqs = 1;
652                 if (ret == -EACCES)
653                         wait_event_interruptible(ring->shutdown_wq,
654                                                  kthread_should_stop());
655
656                 if (do_eoi && !ring->waiting_reqs) {
657                         xen_irq_lateeoi(ring->irq, eoi_flags);
658                         eoi_flags |= XEN_EOI_FLAG_SPURIOUS;
659                 }
660
661 purge_gnt_list:
662                 if (blkif->vbd.feature_gnt_persistent &&
663                     time_after(jiffies, ring->next_lru)) {
664                         purge_persistent_gnt(ring);
665                         ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
666                 }
667
668                 /* Shrink if we have more than xen_blkif_max_buffer_pages */
669                 shrink_free_pagepool(ring, xen_blkif_max_buffer_pages);
670
671                 if (log_stats && time_after(jiffies, ring->st_print))
672                         print_stats(ring);
673         }
674
675         /* Drain pending purge work */
676         flush_work(&ring->persistent_purge_work);
677
678         if (log_stats)
679                 print_stats(ring);
680
681         ring->xenblkd = NULL;
682
683         return 0;
684 }
685
686 /*
687  * Remove persistent grants and empty the pool of free pages
688  */
689 void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
690 {
691         /* Free all persistent grant pages */
692         if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
693                 free_persistent_gnts(ring, &ring->persistent_gnts,
694                         ring->persistent_gnt_c);
695
696         BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
697         ring->persistent_gnt_c = 0;
698
699         /* Since we are shutting down remove all pages from the buffer */
700         shrink_free_pagepool(ring, 0 /* All */);
701 }
702
703 static unsigned int xen_blkbk_unmap_prepare(
704         struct xen_blkif_ring *ring,
705         struct grant_page **pages,
706         unsigned int num,
707         struct gnttab_unmap_grant_ref *unmap_ops,
708         struct page **unmap_pages)
709 {
710         unsigned int i, invcount = 0;
711
712         for (i = 0; i < num; i++) {
713                 if (pages[i]->persistent_gnt != NULL) {
714                         put_persistent_gnt(ring, pages[i]->persistent_gnt);
715                         continue;
716                 }
717                 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
718                         continue;
719                 unmap_pages[invcount] = pages[i]->page;
720                 gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
721                                     GNTMAP_host_map, pages[i]->handle);
722                 pages[i]->handle = BLKBACK_INVALID_HANDLE;
723                 invcount++;
724         }
725
726         return invcount;
727 }
728
729 static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
730 {
731         struct pending_req *pending_req = (struct pending_req *)(data->data);
732         struct xen_blkif_ring *ring = pending_req->ring;
733         struct xen_blkif *blkif = ring->blkif;
734
735         /* BUG_ON used to reproduce existing behaviour,
736            but is this the best way to deal with this? */
737         BUG_ON(result);
738
739         put_free_pages(ring, data->pages, data->count);
740         make_response(ring, pending_req->id,
741                       pending_req->operation, pending_req->status);
742         free_req(ring, pending_req);
743         /*
744          * Make sure the request is freed before releasing blkif,
745          * or there could be a race between free_req and the
746          * cleanup done in xen_blkif_free during shutdown.
747          *
748          * NB: The fact that we might try to wake up pending_free_wq
749          * before drain_complete (in case there's a drain going on)
750          * it's not a problem with our current implementation
751          * because we can assure there's no thread waiting on
752          * pending_free_wq if there's a drain going on, but it has
753          * to be taken into account if the current model is changed.
754          */
755         if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
756                 complete(&blkif->drain_complete);
757         }
758         xen_blkif_put(blkif);
759 }
760
761 static void xen_blkbk_unmap_and_respond(struct pending_req *req)
762 {
763         struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
764         struct xen_blkif_ring *ring = req->ring;
765         struct grant_page **pages = req->segments;
766         unsigned int invcount;
767
768         invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
769                                            req->unmap, req->unmap_pages);
770
771         work->data = req;
772         work->done = xen_blkbk_unmap_and_respond_callback;
773         work->unmap_ops = req->unmap;
774         work->kunmap_ops = NULL;
775         work->pages = req->unmap_pages;
776         work->count = invcount;
777
778         gnttab_unmap_refs_async(&req->gnttab_unmap_data);
779 }
780
781
782 /*
783  * Unmap the grant references.
784  *
785  * This could accumulate ops up to the batch size to reduce the number
786  * of hypercalls, but since this is only used in error paths there's
787  * no real need.
788  */
789 static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
790                             struct grant_page *pages[],
791                             int num)
792 {
793         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
794         struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
795         unsigned int invcount = 0;
796         int ret;
797
798         while (num) {
799                 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
800
801                 invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
802                                                    unmap, unmap_pages);
803                 if (invcount) {
804                         ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
805                         BUG_ON(ret);
806                         put_free_pages(ring, unmap_pages, invcount);
807                 }
808                 pages += batch;
809                 num -= batch;
810         }
811 }
812
813 static int xen_blkbk_map(struct xen_blkif_ring *ring,
814                          struct grant_page *pages[],
815                          int num, bool ro)
816 {
817         struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
818         struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
819         struct persistent_gnt *persistent_gnt = NULL;
820         phys_addr_t addr = 0;
821         int i, seg_idx, new_map_idx;
822         int segs_to_map = 0;
823         int ret = 0;
824         int last_map = 0, map_until = 0;
825         int use_persistent_gnts;
826         struct xen_blkif *blkif = ring->blkif;
827
828         use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
829
830         /*
831          * Fill out preq.nr_sects with proper amount of sectors, and setup
832          * assign map[..] with the PFN of the page in our domain with the
833          * corresponding grant reference for each page.
834          */
835 again:
836         for (i = map_until; i < num; i++) {
837                 uint32_t flags;
838
839                 if (use_persistent_gnts) {
840                         persistent_gnt = get_persistent_gnt(
841                                 ring,
842                                 pages[i]->gref);
843                 }
844
845                 if (persistent_gnt) {
846                         /*
847                          * We are using persistent grants and
848                          * the grant is already mapped
849                          */
850                         pages[i]->page = persistent_gnt->page;
851                         pages[i]->persistent_gnt = persistent_gnt;
852                 } else {
853                         if (get_free_page(ring, &pages[i]->page)) {
854                                 put_free_pages(ring, pages_to_gnt, segs_to_map);
855                                 ret = -ENOMEM;
856                                 goto out;
857                         }
858                         addr = vaddr(pages[i]->page);
859                         pages_to_gnt[segs_to_map] = pages[i]->page;
860                         pages[i]->persistent_gnt = NULL;
861                         flags = GNTMAP_host_map;
862                         if (!use_persistent_gnts && ro)
863                                 flags |= GNTMAP_readonly;
864                         gnttab_set_map_op(&map[segs_to_map++], addr,
865                                           flags, pages[i]->gref,
866                                           blkif->domid);
867                 }
868                 map_until = i + 1;
869                 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
870                         break;
871         }
872
873         if (segs_to_map)
874                 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
875
876         /*
877          * Now swizzle the MFN in our domain with the MFN from the other domain
878          * so that when we access vaddr(pending_req,i) it has the contents of
879          * the page from the other domain.
880          */
881         for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
882                 if (!pages[seg_idx]->persistent_gnt) {
883                         /* This is a newly mapped grant */
884                         BUG_ON(new_map_idx >= segs_to_map);
885                         if (unlikely(map[new_map_idx].status != 0)) {
886                                 pr_debug("invalid buffer -- could not remap it\n");
887                                 put_free_pages(ring, &pages[seg_idx]->page, 1);
888                                 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
889                                 ret |= !ret;
890                                 goto next;
891                         }
892                         pages[seg_idx]->handle = map[new_map_idx].handle;
893                 } else {
894                         continue;
895                 }
896                 if (use_persistent_gnts &&
897                     ring->persistent_gnt_c < xen_blkif_max_pgrants) {
898                         /*
899                          * We are using persistent grants, the grant is
900                          * not mapped but we might have room for it.
901                          */
902                         persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
903                                                  GFP_KERNEL);
904                         if (!persistent_gnt) {
905                                 /*
906                                  * If we don't have enough memory to
907                                  * allocate the persistent_gnt struct
908                                  * map this grant non-persistenly
909                                  */
910                                 goto next;
911                         }
912                         persistent_gnt->gnt = map[new_map_idx].ref;
913                         persistent_gnt->handle = map[new_map_idx].handle;
914                         persistent_gnt->page = pages[seg_idx]->page;
915                         if (add_persistent_gnt(ring,
916                                                persistent_gnt)) {
917                                 kfree(persistent_gnt);
918                                 persistent_gnt = NULL;
919                                 goto next;
920                         }
921                         pages[seg_idx]->persistent_gnt = persistent_gnt;
922                         pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
923                                  persistent_gnt->gnt, ring->persistent_gnt_c,
924                                  xen_blkif_max_pgrants);
925                         goto next;
926                 }
927                 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
928                         blkif->vbd.overflow_max_grants = 1;
929                         pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
930                                  blkif->domid, blkif->vbd.handle);
931                 }
932                 /*
933                  * We could not map this grant persistently, so use it as
934                  * a non-persistent grant.
935                  */
936 next:
937                 new_map_idx++;
938         }
939         segs_to_map = 0;
940         last_map = map_until;
941         if (!ret && map_until != num)
942                 goto again;
943
944 out:
945         for (i = last_map; i < num; i++) {
946                 /* Don't zap current batch's valid persistent grants. */
947                 if(i >= map_until)
948                         pages[i]->persistent_gnt = NULL;
949                 pages[i]->handle = BLKBACK_INVALID_HANDLE;
950         }
951
952         return ret;
953 }
954
955 static int xen_blkbk_map_seg(struct pending_req *pending_req)
956 {
957         int rc;
958
959         rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
960                            pending_req->nr_segs,
961                            (pending_req->operation != BLKIF_OP_READ));
962
963         return rc;
964 }
965
966 static int xen_blkbk_parse_indirect(struct blkif_request *req,
967                                     struct pending_req *pending_req,
968                                     struct seg_buf seg[],
969                                     struct phys_req *preq)
970 {
971         struct grant_page **pages = pending_req->indirect_pages;
972         struct xen_blkif_ring *ring = pending_req->ring;
973         int indirect_grefs, rc, n, nseg, i;
974         struct blkif_request_segment *segments = NULL;
975
976         nseg = pending_req->nr_segs;
977         indirect_grefs = INDIRECT_PAGES(nseg);
978         BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
979
980         for (i = 0; i < indirect_grefs; i++)
981                 pages[i]->gref = req->u.indirect.indirect_grefs[i];
982
983         rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
984         if (rc)
985                 goto unmap;
986
987         for (n = 0, i = 0; n < nseg; n++) {
988                 uint8_t first_sect, last_sect;
989
990                 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
991                         /* Map indirect segments */
992                         if (segments)
993                                 kunmap_atomic(segments);
994                         segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
995                 }
996                 i = n % SEGS_PER_INDIRECT_FRAME;
997
998                 pending_req->segments[n]->gref = segments[i].gref;
999
1000                 first_sect = READ_ONCE(segments[i].first_sect);
1001                 last_sect = READ_ONCE(segments[i].last_sect);
1002                 if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
1003                         rc = -EINVAL;
1004                         goto unmap;
1005                 }
1006
1007                 seg[n].nsec = last_sect - first_sect + 1;
1008                 seg[n].offset = first_sect << 9;
1009                 preq->nr_sects += seg[n].nsec;
1010         }
1011
1012 unmap:
1013         if (segments)
1014                 kunmap_atomic(segments);
1015         xen_blkbk_unmap(ring, pages, indirect_grefs);
1016         return rc;
1017 }
1018
1019 static int dispatch_discard_io(struct xen_blkif_ring *ring,
1020                                 struct blkif_request *req)
1021 {
1022         int err = 0;
1023         int status = BLKIF_RSP_OKAY;
1024         struct xen_blkif *blkif = ring->blkif;
1025         struct block_device *bdev = blkif->vbd.bdev;
1026         unsigned long secure;
1027         struct phys_req preq;
1028
1029         xen_blkif_get(blkif);
1030
1031         preq.sector_number = req->u.discard.sector_number;
1032         preq.nr_sects      = req->u.discard.nr_sectors;
1033
1034         err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
1035         if (err) {
1036                 pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
1037                         preq.sector_number,
1038                         preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
1039                 goto fail_response;
1040         }
1041         ring->st_ds_req++;
1042
1043         secure = (blkif->vbd.discard_secure &&
1044                  (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
1045                  BLKDEV_DISCARD_SECURE : 0;
1046
1047         err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
1048                                    req->u.discard.nr_sectors,
1049                                    GFP_KERNEL, secure);
1050 fail_response:
1051         if (err == -EOPNOTSUPP) {
1052                 pr_debug("discard op failed, not supported\n");
1053                 status = BLKIF_RSP_EOPNOTSUPP;
1054         } else if (err)
1055                 status = BLKIF_RSP_ERROR;
1056
1057         make_response(ring, req->u.discard.id, req->operation, status);
1058         xen_blkif_put(blkif);
1059         return err;
1060 }
1061
1062 static int dispatch_other_io(struct xen_blkif_ring *ring,
1063                              struct blkif_request *req,
1064                              struct pending_req *pending_req)
1065 {
1066         free_req(ring, pending_req);
1067         make_response(ring, req->u.other.id, req->operation,
1068                       BLKIF_RSP_EOPNOTSUPP);
1069         return -EIO;
1070 }
1071
1072 static void xen_blk_drain_io(struct xen_blkif_ring *ring)
1073 {
1074         struct xen_blkif *blkif = ring->blkif;
1075
1076         atomic_set(&blkif->drain, 1);
1077         do {
1078                 if (atomic_read(&ring->inflight) == 0)
1079                         break;
1080                 wait_for_completion_interruptible_timeout(
1081                                 &blkif->drain_complete, HZ);
1082
1083                 if (!atomic_read(&blkif->drain))
1084                         break;
1085         } while (!kthread_should_stop());
1086         atomic_set(&blkif->drain, 0);
1087 }
1088
1089 static void __end_block_io_op(struct pending_req *pending_req,
1090                 blk_status_t error)
1091 {
1092         /* An error fails the entire request. */
1093         if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
1094             error == BLK_STS_NOTSUPP) {
1095                 pr_debug("flush diskcache op failed, not supported\n");
1096                 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
1097                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1098         } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
1099                    error == BLK_STS_NOTSUPP) {
1100                 pr_debug("write barrier op failed, not supported\n");
1101                 xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
1102                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1103         } else if (error) {
1104                 pr_debug("Buffer not up-to-date at end of operation,"
1105                          " error=%d\n", error);
1106                 pending_req->status = BLKIF_RSP_ERROR;
1107         }
1108
1109         /*
1110          * If all of the bio's have completed it is time to unmap
1111          * the grant references associated with 'request' and provide
1112          * the proper response on the ring.
1113          */
1114         if (atomic_dec_and_test(&pending_req->pendcnt))
1115                 xen_blkbk_unmap_and_respond(pending_req);
1116 }
1117
1118 /*
1119  * bio callback.
1120  */
1121 static void end_block_io_op(struct bio *bio)
1122 {
1123         __end_block_io_op(bio->bi_private, bio->bi_status);
1124         bio_put(bio);
1125 }
1126
1127
1128
1129 /*
1130  * Function to copy the from the ring buffer the 'struct blkif_request'
1131  * (which has the sectors we want, number of them, grant references, etc),
1132  * and transmute  it to the block API to hand it over to the proper block disk.
1133  */
1134 static int
1135 __do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
1136 {
1137         union blkif_back_rings *blk_rings = &ring->blk_rings;
1138         struct blkif_request req;
1139         struct pending_req *pending_req;
1140         RING_IDX rc, rp;
1141         int more_to_do = 0;
1142
1143         rc = blk_rings->common.req_cons;
1144         rp = blk_rings->common.sring->req_prod;
1145         rmb(); /* Ensure we see queued requests up to 'rp'. */
1146
1147         if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1148                 rc = blk_rings->common.rsp_prod_pvt;
1149                 pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1150                         rp, rc, rp - rc, ring->blkif->vbd.pdevice);
1151                 return -EACCES;
1152         }
1153         while (rc != rp) {
1154
1155                 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1156                         break;
1157
1158                 /* We've seen a request, so clear spurious eoi flag. */
1159                 *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
1160
1161                 if (kthread_should_stop()) {
1162                         more_to_do = 1;
1163                         break;
1164                 }
1165
1166                 pending_req = alloc_req(ring);
1167                 if (NULL == pending_req) {
1168                         ring->st_oo_req++;
1169                         more_to_do = 1;
1170                         break;
1171                 }
1172
1173                 switch (ring->blkif->blk_protocol) {
1174                 case BLKIF_PROTOCOL_NATIVE:
1175                         memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1176                         break;
1177                 case BLKIF_PROTOCOL_X86_32:
1178                         blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1179                         break;
1180                 case BLKIF_PROTOCOL_X86_64:
1181                         blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1182                         break;
1183                 default:
1184                         BUG();
1185                 }
1186                 blk_rings->common.req_cons = ++rc; /* before make_response() */
1187
1188                 /* Apply all sanity checks to /private copy/ of request. */
1189                 barrier();
1190
1191                 switch (req.operation) {
1192                 case BLKIF_OP_READ:
1193                 case BLKIF_OP_WRITE:
1194                 case BLKIF_OP_WRITE_BARRIER:
1195                 case BLKIF_OP_FLUSH_DISKCACHE:
1196                 case BLKIF_OP_INDIRECT:
1197                         if (dispatch_rw_block_io(ring, &req, pending_req))
1198                                 goto done;
1199                         break;
1200                 case BLKIF_OP_DISCARD:
1201                         free_req(ring, pending_req);
1202                         if (dispatch_discard_io(ring, &req))
1203                                 goto done;
1204                         break;
1205                 default:
1206                         if (dispatch_other_io(ring, &req, pending_req))
1207                                 goto done;
1208                         break;
1209                 }
1210
1211                 /* Yield point for this unbounded loop. */
1212                 cond_resched();
1213         }
1214 done:
1215         return more_to_do;
1216 }
1217
1218 static int
1219 do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
1220 {
1221         union blkif_back_rings *blk_rings = &ring->blk_rings;
1222         int more_to_do;
1223
1224         do {
1225                 more_to_do = __do_block_io_op(ring, eoi_flags);
1226                 if (more_to_do)
1227                         break;
1228
1229                 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1230         } while (more_to_do);
1231
1232         return more_to_do;
1233 }
1234 /*
1235  * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1236  * and call the 'submit_bio' to pass it to the underlying storage.
1237  */
1238 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1239                                 struct blkif_request *req,
1240                                 struct pending_req *pending_req)
1241 {
1242         struct phys_req preq;
1243         struct seg_buf *seg = pending_req->seg;
1244         unsigned int nseg;
1245         struct bio *bio = NULL;
1246         struct bio **biolist = pending_req->biolist;
1247         int i, nbio = 0;
1248         int operation;
1249         int operation_flags = 0;
1250         struct blk_plug plug;
1251         bool drain = false;
1252         struct grant_page **pages = pending_req->segments;
1253         unsigned short req_operation;
1254
1255         req_operation = req->operation == BLKIF_OP_INDIRECT ?
1256                         req->u.indirect.indirect_op : req->operation;
1257
1258         if ((req->operation == BLKIF_OP_INDIRECT) &&
1259             (req_operation != BLKIF_OP_READ) &&
1260             (req_operation != BLKIF_OP_WRITE)) {
1261                 pr_debug("Invalid indirect operation (%u)\n", req_operation);
1262                 goto fail_response;
1263         }
1264
1265         switch (req_operation) {
1266         case BLKIF_OP_READ:
1267                 ring->st_rd_req++;
1268                 operation = REQ_OP_READ;
1269                 break;
1270         case BLKIF_OP_WRITE:
1271                 ring->st_wr_req++;
1272                 operation = REQ_OP_WRITE;
1273                 operation_flags = REQ_SYNC | REQ_IDLE;
1274                 break;
1275         case BLKIF_OP_WRITE_BARRIER:
1276                 drain = true;
1277                 /* fall through */
1278         case BLKIF_OP_FLUSH_DISKCACHE:
1279                 ring->st_f_req++;
1280                 operation = REQ_OP_WRITE;
1281                 operation_flags = REQ_PREFLUSH;
1282                 break;
1283         default:
1284                 operation = 0; /* make gcc happy */
1285                 goto fail_response;
1286                 break;
1287         }
1288
1289         /* Check that the number of segments is sane. */
1290         nseg = req->operation == BLKIF_OP_INDIRECT ?
1291                req->u.indirect.nr_segments : req->u.rw.nr_segments;
1292
1293         if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
1294             unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1295                      (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1296             unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1297                      (nseg > MAX_INDIRECT_SEGMENTS))) {
1298                 pr_debug("Bad number of segments in request (%d)\n", nseg);
1299                 /* Haven't submitted any bio's yet. */
1300                 goto fail_response;
1301         }
1302
1303         preq.nr_sects      = 0;
1304
1305         pending_req->ring      = ring;
1306         pending_req->id        = req->u.rw.id;
1307         pending_req->operation = req_operation;
1308         pending_req->status    = BLKIF_RSP_OKAY;
1309         pending_req->nr_segs   = nseg;
1310
1311         if (req->operation != BLKIF_OP_INDIRECT) {
1312                 preq.dev               = req->u.rw.handle;
1313                 preq.sector_number     = req->u.rw.sector_number;
1314                 for (i = 0; i < nseg; i++) {
1315                         pages[i]->gref = req->u.rw.seg[i].gref;
1316                         seg[i].nsec = req->u.rw.seg[i].last_sect -
1317                                 req->u.rw.seg[i].first_sect + 1;
1318                         seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1319                         if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
1320                             (req->u.rw.seg[i].last_sect <
1321                              req->u.rw.seg[i].first_sect))
1322                                 goto fail_response;
1323                         preq.nr_sects += seg[i].nsec;
1324                 }
1325         } else {
1326                 preq.dev               = req->u.indirect.handle;
1327                 preq.sector_number     = req->u.indirect.sector_number;
1328                 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1329                         goto fail_response;
1330         }
1331
1332         if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
1333                 pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1334                          operation == REQ_OP_READ ? "read" : "write",
1335                          preq.sector_number,
1336                          preq.sector_number + preq.nr_sects,
1337                          ring->blkif->vbd.pdevice);
1338                 goto fail_response;
1339         }
1340
1341         /*
1342          * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1343          * is set there.
1344          */
1345         for (i = 0; i < nseg; i++) {
1346                 if (((int)preq.sector_number|(int)seg[i].nsec) &
1347                     ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1348                         pr_debug("Misaligned I/O request from domain %d\n",
1349                                  ring->blkif->domid);
1350                         goto fail_response;
1351                 }
1352         }
1353
1354         /* Wait on all outstanding I/O's and once that has been completed
1355          * issue the flush.
1356          */
1357         if (drain)
1358                 xen_blk_drain_io(pending_req->ring);
1359
1360         /*
1361          * If we have failed at this point, we need to undo the M2P override,
1362          * set gnttab_set_unmap_op on all of the grant references and perform
1363          * the hypercall to unmap the grants - that is all done in
1364          * xen_blkbk_unmap.
1365          */
1366         if (xen_blkbk_map_seg(pending_req))
1367                 goto fail_flush;
1368
1369         /*
1370          * This corresponding xen_blkif_put is done in __end_block_io_op, or
1371          * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1372          */
1373         xen_blkif_get(ring->blkif);
1374         atomic_inc(&ring->inflight);
1375
1376         for (i = 0; i < nseg; i++) {
1377                 while ((bio == NULL) ||
1378                        (bio_add_page(bio,
1379                                      pages[i]->page,
1380                                      seg[i].nsec << 9,
1381                                      seg[i].offset) == 0)) {
1382
1383                         int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1384                         bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1385                         if (unlikely(bio == NULL))
1386                                 goto fail_put_bio;
1387
1388                         biolist[nbio++] = bio;
1389                         bio_set_dev(bio, preq.bdev);
1390                         bio->bi_private = pending_req;
1391                         bio->bi_end_io  = end_block_io_op;
1392                         bio->bi_iter.bi_sector  = preq.sector_number;
1393                         bio_set_op_attrs(bio, operation, operation_flags);
1394                 }
1395
1396                 preq.sector_number += seg[i].nsec;
1397         }
1398
1399         /* This will be hit if the operation was a flush or discard. */
1400         if (!bio) {
1401                 BUG_ON(operation_flags != REQ_PREFLUSH);
1402
1403                 bio = bio_alloc(GFP_KERNEL, 0);
1404                 if (unlikely(bio == NULL))
1405                         goto fail_put_bio;
1406
1407                 biolist[nbio++] = bio;
1408                 bio_set_dev(bio, preq.bdev);
1409                 bio->bi_private = pending_req;
1410                 bio->bi_end_io  = end_block_io_op;
1411                 bio_set_op_attrs(bio, operation, operation_flags);
1412         }
1413
1414         atomic_set(&pending_req->pendcnt, nbio);
1415         blk_start_plug(&plug);
1416
1417         for (i = 0; i < nbio; i++)
1418                 submit_bio(biolist[i]);
1419
1420         /* Let the I/Os go.. */
1421         blk_finish_plug(&plug);
1422
1423         if (operation == REQ_OP_READ)
1424                 ring->st_rd_sect += preq.nr_sects;
1425         else if (operation == REQ_OP_WRITE)
1426                 ring->st_wr_sect += preq.nr_sects;
1427
1428         return 0;
1429
1430  fail_flush:
1431         xen_blkbk_unmap(ring, pending_req->segments,
1432                         pending_req->nr_segs);
1433  fail_response:
1434         /* Haven't submitted any bio's yet. */
1435         make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1436         free_req(ring, pending_req);
1437         msleep(1); /* back off a bit */
1438         return -EIO;
1439
1440  fail_put_bio:
1441         for (i = 0; i < nbio; i++)
1442                 bio_put(biolist[i]);
1443         atomic_set(&pending_req->pendcnt, 1);
1444         __end_block_io_op(pending_req, BLK_STS_RESOURCE);
1445         msleep(1); /* back off a bit */
1446         return -EIO;
1447 }
1448
1449
1450
1451 /*
1452  * Put a response on the ring on how the operation fared.
1453  */
1454 static void make_response(struct xen_blkif_ring *ring, u64 id,
1455                           unsigned short op, int st)
1456 {
1457         struct blkif_response *resp;
1458         unsigned long     flags;
1459         union blkif_back_rings *blk_rings;
1460         int notify;
1461
1462         spin_lock_irqsave(&ring->blk_ring_lock, flags);
1463         blk_rings = &ring->blk_rings;
1464         /* Place on the response ring for the relevant domain. */
1465         switch (ring->blkif->blk_protocol) {
1466         case BLKIF_PROTOCOL_NATIVE:
1467                 resp = RING_GET_RESPONSE(&blk_rings->native,
1468                                          blk_rings->native.rsp_prod_pvt);
1469                 break;
1470         case BLKIF_PROTOCOL_X86_32:
1471                 resp = RING_GET_RESPONSE(&blk_rings->x86_32,
1472                                          blk_rings->x86_32.rsp_prod_pvt);
1473                 break;
1474         case BLKIF_PROTOCOL_X86_64:
1475                 resp = RING_GET_RESPONSE(&blk_rings->x86_64,
1476                                          blk_rings->x86_64.rsp_prod_pvt);
1477                 break;
1478         default:
1479                 BUG();
1480         }
1481
1482         resp->id        = id;
1483         resp->operation = op;
1484         resp->status    = st;
1485
1486         blk_rings->common.rsp_prod_pvt++;
1487         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1488         spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
1489         if (notify)
1490                 notify_remote_via_irq(ring->irq);
1491 }
1492
1493 static int __init xen_blkif_init(void)
1494 {
1495         int rc = 0;
1496
1497         if (!xen_domain())
1498                 return -ENODEV;
1499
1500         if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
1501                 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
1502                         xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
1503                 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
1504         }
1505
1506         if (xenblk_max_queues == 0)
1507                 xenblk_max_queues = num_online_cpus();
1508
1509         rc = xen_blkif_interface_init();
1510         if (rc)
1511                 goto failed_init;
1512
1513         rc = xen_blkif_xenbus_init();
1514         if (rc)
1515                 goto failed_init;
1516
1517  failed_init:
1518         return rc;
1519 }
1520
1521 module_init(xen_blkif_init);
1522
1523 MODULE_LICENSE("Dual BSD/GPL");
1524 MODULE_ALIAS("xen-backend:vbd");