GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / block / xen-blkback / blkback.c
1 /******************************************************************************
2  *
3  * Back-end of the driver for virtual block devices. This portion of the
4  * driver exports a 'unified' block-device interface that can be accessed
5  * by any operating system that implements a compatible front end. A
6  * reference front-end implementation can be found in:
7  *  drivers/block/xen-blkfront.c
8  *
9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10  * Copyright (c) 2005, Christopher Clark
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License version 2
14  * as published by the Free Software Foundation; or, when distributed
15  * separately from the Linux kernel or incorporated into other
16  * software packages, subject to the following license:
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a copy
19  * of this source file (the "Software"), to deal in the Software without
20  * restriction, including without limitation the rights to use, copy, modify,
21  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22  * and to permit persons to whom the Software is furnished to do so, subject to
23  * the following conditions:
24  *
25  * The above copyright notice and this permission notice shall be included in
26  * all copies or substantial portions of the Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34  * IN THE SOFTWARE.
35  */
36
37 #define pr_fmt(fmt) "xen-blkback: " fmt
38
39 #include <linux/spinlock.h>
40 #include <linux/kthread.h>
41 #include <linux/list.h>
42 #include <linux/delay.h>
43 #include <linux/freezer.h>
44 #include <linux/bitmap.h>
45
46 #include <xen/events.h>
47 #include <xen/page.h>
48 #include <xen/xen.h>
49 #include <asm/xen/hypervisor.h>
50 #include <asm/xen/hypercall.h>
51 #include <xen/balloon.h>
52 #include <xen/grant_table.h>
53 #include "common.h"
54
55 /*
56  * Maximum number of unused free pages to keep in the internal buffer.
57  * Setting this to a value too low will reduce memory used in each backend,
58  * but can have a performance penalty.
59  *
60  * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
61  * be set to a lower value that might degrade performance on some intensive
62  * IO workloads.
63  */
64
65 static int xen_blkif_max_buffer_pages = 1024;
66 module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
67 MODULE_PARM_DESC(max_buffer_pages,
68 "Maximum number of free pages to keep in each block backend buffer");
69
70 /*
71  * Maximum number of grants to map persistently in blkback. For maximum
72  * performance this should be the total numbers of grants that can be used
73  * to fill the ring, but since this might become too high, specially with
74  * the use of indirect descriptors, we set it to a value that provides good
75  * performance without using too much memory.
76  *
77  * When the list of persistent grants is full we clean it up using a LRU
78  * algorithm.
79  */
80
81 static int xen_blkif_max_pgrants = 1056;
82 module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
83 MODULE_PARM_DESC(max_persistent_grants,
84                  "Maximum number of grants to map persistently");
85
86 /*
87  * Maximum number of rings/queues blkback supports, allow as many queues as there
88  * are CPUs if user has not specified a value.
89  */
90 unsigned int xenblk_max_queues;
91 module_param_named(max_queues, xenblk_max_queues, uint, 0644);
92 MODULE_PARM_DESC(max_queues,
93                  "Maximum number of hardware queues per virtual disk." \
94                  "By default it is the number of online CPUs.");
95
96 /*
97  * Maximum order of pages to be used for the shared ring between front and
98  * backend, 4KB page granularity is used.
99  */
100 unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
101 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
102 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
103 /*
104  * The LRU mechanism to clean the lists of persistent grants needs to
105  * be executed periodically. The time interval between consecutive executions
106  * of the purge mechanism is set in ms.
107  */
108 #define LRU_INTERVAL 100
109
110 /*
111  * When the persistent grants list is full we will remove unused grants
112  * from the list. The percent number of grants to be removed at each LRU
113  * execution.
114  */
115 #define LRU_PERCENT_CLEAN 5
116
117 /* Run-time switchable: /sys/module/blkback/parameters/ */
118 static unsigned int log_stats;
119 module_param(log_stats, int, 0644);
120
121 #define BLKBACK_INVALID_HANDLE (~0)
122
123 /* Number of free pages to remove on each call to gnttab_free_pages */
124 #define NUM_BATCH_FREE_PAGES 10
125
126 static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
127 {
128         unsigned long flags;
129
130         spin_lock_irqsave(&ring->free_pages_lock, flags);
131         if (list_empty(&ring->free_pages)) {
132                 BUG_ON(ring->free_pages_num != 0);
133                 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
134                 return gnttab_alloc_pages(1, page);
135         }
136         BUG_ON(ring->free_pages_num == 0);
137         page[0] = list_first_entry(&ring->free_pages, struct page, lru);
138         list_del(&page[0]->lru);
139         ring->free_pages_num--;
140         spin_unlock_irqrestore(&ring->free_pages_lock, flags);
141
142         return 0;
143 }
144
145 static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
146                                   int num)
147 {
148         unsigned long flags;
149         int i;
150
151         spin_lock_irqsave(&ring->free_pages_lock, flags);
152         for (i = 0; i < num; i++)
153                 list_add(&page[i]->lru, &ring->free_pages);
154         ring->free_pages_num += num;
155         spin_unlock_irqrestore(&ring->free_pages_lock, flags);
156 }
157
158 static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
159 {
160         /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
161         struct page *page[NUM_BATCH_FREE_PAGES];
162         unsigned int num_pages = 0;
163         unsigned long flags;
164
165         spin_lock_irqsave(&ring->free_pages_lock, flags);
166         while (ring->free_pages_num > num) {
167                 BUG_ON(list_empty(&ring->free_pages));
168                 page[num_pages] = list_first_entry(&ring->free_pages,
169                                                    struct page, lru);
170                 list_del(&page[num_pages]->lru);
171                 ring->free_pages_num--;
172                 if (++num_pages == NUM_BATCH_FREE_PAGES) {
173                         spin_unlock_irqrestore(&ring->free_pages_lock, flags);
174                         gnttab_free_pages(num_pages, page);
175                         spin_lock_irqsave(&ring->free_pages_lock, flags);
176                         num_pages = 0;
177                 }
178         }
179         spin_unlock_irqrestore(&ring->free_pages_lock, flags);
180         if (num_pages != 0)
181                 gnttab_free_pages(num_pages, page);
182 }
183
184 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
185
186 static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
187 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
188                                 struct blkif_request *req,
189                                 struct pending_req *pending_req);
190 static void make_response(struct xen_blkif_ring *ring, u64 id,
191                           unsigned short op, int st);
192
193 #define foreach_grant_safe(pos, n, rbtree, node) \
194         for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
195              (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
196              &(pos)->node != NULL; \
197              (pos) = container_of(n, typeof(*(pos)), node), \
198              (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
199
200
201 /*
202  * We don't need locking around the persistent grant helpers
203  * because blkback uses a single-thread for each backend, so we
204  * can be sure that this functions will never be called recursively.
205  *
206  * The only exception to that is put_persistent_grant, that can be called
207  * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
208  * bit operations to modify the flags of a persistent grant and to count
209  * the number of used grants.
210  */
211 static int add_persistent_gnt(struct xen_blkif_ring *ring,
212                                struct persistent_gnt *persistent_gnt)
213 {
214         struct rb_node **new = NULL, *parent = NULL;
215         struct persistent_gnt *this;
216         struct xen_blkif *blkif = ring->blkif;
217
218         if (ring->persistent_gnt_c >= xen_blkif_max_pgrants) {
219                 if (!blkif->vbd.overflow_max_grants)
220                         blkif->vbd.overflow_max_grants = 1;
221                 return -EBUSY;
222         }
223         /* Figure out where to put new node */
224         new = &ring->persistent_gnts.rb_node;
225         while (*new) {
226                 this = container_of(*new, struct persistent_gnt, node);
227
228                 parent = *new;
229                 if (persistent_gnt->gnt < this->gnt)
230                         new = &((*new)->rb_left);
231                 else if (persistent_gnt->gnt > this->gnt)
232                         new = &((*new)->rb_right);
233                 else {
234                         pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
235                         return -EINVAL;
236                 }
237         }
238
239         bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
240         set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
241         /* Add new node and rebalance tree. */
242         rb_link_node(&(persistent_gnt->node), parent, new);
243         rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
244         ring->persistent_gnt_c++;
245         atomic_inc(&ring->persistent_gnt_in_use);
246         return 0;
247 }
248
249 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
250                                                  grant_ref_t gref)
251 {
252         struct persistent_gnt *data;
253         struct rb_node *node = NULL;
254
255         node = ring->persistent_gnts.rb_node;
256         while (node) {
257                 data = container_of(node, struct persistent_gnt, node);
258
259                 if (gref < data->gnt)
260                         node = node->rb_left;
261                 else if (gref > data->gnt)
262                         node = node->rb_right;
263                 else {
264                         if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
265                                 pr_alert_ratelimited("requesting a grant already in use\n");
266                                 return NULL;
267                         }
268                         set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
269                         atomic_inc(&ring->persistent_gnt_in_use);
270                         return data;
271                 }
272         }
273         return NULL;
274 }
275
276 static void put_persistent_gnt(struct xen_blkif_ring *ring,
277                                struct persistent_gnt *persistent_gnt)
278 {
279         if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
280                 pr_alert_ratelimited("freeing a grant already unused\n");
281         set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
282         clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
283         atomic_dec(&ring->persistent_gnt_in_use);
284 }
285
286 static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
287                                  unsigned int num)
288 {
289         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
290         struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
291         struct persistent_gnt *persistent_gnt;
292         struct rb_node *n;
293         int segs_to_unmap = 0;
294         struct gntab_unmap_queue_data unmap_data;
295
296         unmap_data.pages = pages;
297         unmap_data.unmap_ops = unmap;
298         unmap_data.kunmap_ops = NULL;
299
300         foreach_grant_safe(persistent_gnt, n, root, node) {
301                 BUG_ON(persistent_gnt->handle ==
302                         BLKBACK_INVALID_HANDLE);
303                 gnttab_set_unmap_op(&unmap[segs_to_unmap],
304                         (unsigned long) pfn_to_kaddr(page_to_pfn(
305                                 persistent_gnt->page)),
306                         GNTMAP_host_map,
307                         persistent_gnt->handle);
308
309                 pages[segs_to_unmap] = persistent_gnt->page;
310
311                 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
312                         !rb_next(&persistent_gnt->node)) {
313
314                         unmap_data.count = segs_to_unmap;
315                         BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
316
317                         put_free_pages(ring, pages, segs_to_unmap);
318                         segs_to_unmap = 0;
319                 }
320
321                 rb_erase(&persistent_gnt->node, root);
322                 kfree(persistent_gnt);
323                 num--;
324         }
325         BUG_ON(num != 0);
326 }
327
328 void xen_blkbk_unmap_purged_grants(struct work_struct *work)
329 {
330         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
331         struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
332         struct persistent_gnt *persistent_gnt;
333         int segs_to_unmap = 0;
334         struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
335         struct gntab_unmap_queue_data unmap_data;
336
337         unmap_data.pages = pages;
338         unmap_data.unmap_ops = unmap;
339         unmap_data.kunmap_ops = NULL;
340
341         while(!list_empty(&ring->persistent_purge_list)) {
342                 persistent_gnt = list_first_entry(&ring->persistent_purge_list,
343                                                   struct persistent_gnt,
344                                                   remove_node);
345                 list_del(&persistent_gnt->remove_node);
346
347                 gnttab_set_unmap_op(&unmap[segs_to_unmap],
348                         vaddr(persistent_gnt->page),
349                         GNTMAP_host_map,
350                         persistent_gnt->handle);
351
352                 pages[segs_to_unmap] = persistent_gnt->page;
353
354                 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
355                         unmap_data.count = segs_to_unmap;
356                         BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
357                         put_free_pages(ring, pages, segs_to_unmap);
358                         segs_to_unmap = 0;
359                 }
360                 kfree(persistent_gnt);
361         }
362         if (segs_to_unmap > 0) {
363                 unmap_data.count = segs_to_unmap;
364                 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
365                 put_free_pages(ring, pages, segs_to_unmap);
366         }
367 }
368
369 static void purge_persistent_gnt(struct xen_blkif_ring *ring)
370 {
371         struct persistent_gnt *persistent_gnt;
372         struct rb_node *n;
373         unsigned int num_clean, total;
374         bool scan_used = false, clean_used = false;
375         struct rb_root *root;
376
377         if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
378             (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
379             !ring->blkif->vbd.overflow_max_grants)) {
380                 goto out;
381         }
382
383         if (work_busy(&ring->persistent_purge_work)) {
384                 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
385                 goto out;
386         }
387
388         num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
389         num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
390         num_clean = min(ring->persistent_gnt_c, num_clean);
391         if ((num_clean == 0) ||
392             (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use))))
393                 goto out;
394
395         /*
396          * At this point, we can assure that there will be no calls
397          * to get_persistent_grant (because we are executing this code from
398          * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
399          * which means that the number of currently used grants will go down,
400          * but never up, so we will always be able to remove the requested
401          * number of grants.
402          */
403
404         total = num_clean;
405
406         pr_debug("Going to purge %u persistent grants\n", num_clean);
407
408         BUG_ON(!list_empty(&ring->persistent_purge_list));
409         root = &ring->persistent_gnts;
410 purge_list:
411         foreach_grant_safe(persistent_gnt, n, root, node) {
412                 BUG_ON(persistent_gnt->handle ==
413                         BLKBACK_INVALID_HANDLE);
414
415                 if (clean_used) {
416                         clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
417                         continue;
418                 }
419
420                 if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
421                         continue;
422                 if (!scan_used &&
423                     (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
424                         continue;
425
426                 rb_erase(&persistent_gnt->node, root);
427                 list_add(&persistent_gnt->remove_node,
428                          &ring->persistent_purge_list);
429                 if (--num_clean == 0)
430                         goto finished;
431         }
432         /*
433          * If we get here it means we also need to start cleaning
434          * grants that were used since last purge in order to cope
435          * with the requested num
436          */
437         if (!scan_used && !clean_used) {
438                 pr_debug("Still missing %u purged frames\n", num_clean);
439                 scan_used = true;
440                 goto purge_list;
441         }
442 finished:
443         if (!clean_used) {
444                 pr_debug("Finished scanning for grants to clean, removing used flag\n");
445                 clean_used = true;
446                 goto purge_list;
447         }
448
449         ring->persistent_gnt_c -= (total - num_clean);
450         ring->blkif->vbd.overflow_max_grants = 0;
451
452         /* We can defer this work */
453         schedule_work(&ring->persistent_purge_work);
454         pr_debug("Purged %u/%u\n", (total - num_clean), total);
455
456 out:
457         return;
458 }
459
460 /*
461  * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
462  */
463 static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
464 {
465         struct pending_req *req = NULL;
466         unsigned long flags;
467
468         spin_lock_irqsave(&ring->pending_free_lock, flags);
469         if (!list_empty(&ring->pending_free)) {
470                 req = list_entry(ring->pending_free.next, struct pending_req,
471                                  free_list);
472                 list_del(&req->free_list);
473         }
474         spin_unlock_irqrestore(&ring->pending_free_lock, flags);
475         return req;
476 }
477
478 /*
479  * Return the 'pending_req' structure back to the freepool. We also
480  * wake up the thread if it was waiting for a free page.
481  */
482 static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
483 {
484         unsigned long flags;
485         int was_empty;
486
487         spin_lock_irqsave(&ring->pending_free_lock, flags);
488         was_empty = list_empty(&ring->pending_free);
489         list_add(&req->free_list, &ring->pending_free);
490         spin_unlock_irqrestore(&ring->pending_free_lock, flags);
491         if (was_empty)
492                 wake_up(&ring->pending_free_wq);
493 }
494
495 /*
496  * Routines for managing virtual block devices (vbds).
497  */
498 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
499                              int operation)
500 {
501         struct xen_vbd *vbd = &blkif->vbd;
502         int rc = -EACCES;
503
504         if ((operation != REQ_OP_READ) && vbd->readonly)
505                 goto out;
506
507         if (likely(req->nr_sects)) {
508                 blkif_sector_t end = req->sector_number + req->nr_sects;
509
510                 if (unlikely(end < req->sector_number))
511                         goto out;
512                 if (unlikely(end > vbd_sz(vbd)))
513                         goto out;
514         }
515
516         req->dev  = vbd->pdevice;
517         req->bdev = vbd->bdev;
518         rc = 0;
519
520  out:
521         return rc;
522 }
523
524 static void xen_vbd_resize(struct xen_blkif *blkif)
525 {
526         struct xen_vbd *vbd = &blkif->vbd;
527         struct xenbus_transaction xbt;
528         int err;
529         struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
530         unsigned long long new_size = vbd_sz(vbd);
531
532         pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
533                 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
534         pr_info("VBD Resize: new size %llu\n", new_size);
535         vbd->size = new_size;
536 again:
537         err = xenbus_transaction_start(&xbt);
538         if (err) {
539                 pr_warn("Error starting transaction\n");
540                 return;
541         }
542         err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
543                             (unsigned long long)vbd_sz(vbd));
544         if (err) {
545                 pr_warn("Error writing new size\n");
546                 goto abort;
547         }
548         /*
549          * Write the current state; we will use this to synchronize
550          * the front-end. If the current state is "connected" the
551          * front-end will get the new size information online.
552          */
553         err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
554         if (err) {
555                 pr_warn("Error writing the state\n");
556                 goto abort;
557         }
558
559         err = xenbus_transaction_end(xbt, 0);
560         if (err == -EAGAIN)
561                 goto again;
562         if (err)
563                 pr_warn("Error ending transaction\n");
564         return;
565 abort:
566         xenbus_transaction_end(xbt, 1);
567 }
568
569 /*
570  * Notification from the guest OS.
571  */
572 static void blkif_notify_work(struct xen_blkif_ring *ring)
573 {
574         ring->waiting_reqs = 1;
575         wake_up(&ring->wq);
576 }
577
578 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
579 {
580         blkif_notify_work(dev_id);
581         return IRQ_HANDLED;
582 }
583
584 /*
585  * SCHEDULER FUNCTIONS
586  */
587
588 static void print_stats(struct xen_blkif_ring *ring)
589 {
590         pr_info("(%s): oo %3llu  |  rd %4llu  |  wr %4llu  |  f %4llu"
591                  "  |  ds %4llu | pg: %4u/%4d\n",
592                  current->comm, ring->st_oo_req,
593                  ring->st_rd_req, ring->st_wr_req,
594                  ring->st_f_req, ring->st_ds_req,
595                  ring->persistent_gnt_c,
596                  xen_blkif_max_pgrants);
597         ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
598         ring->st_rd_req = 0;
599         ring->st_wr_req = 0;
600         ring->st_oo_req = 0;
601         ring->st_ds_req = 0;
602 }
603
604 int xen_blkif_schedule(void *arg)
605 {
606         struct xen_blkif_ring *ring = arg;
607         struct xen_blkif *blkif = ring->blkif;
608         struct xen_vbd *vbd = &blkif->vbd;
609         unsigned long timeout;
610         int ret;
611         bool do_eoi;
612         unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
613
614         set_freezable();
615         while (!kthread_should_stop()) {
616                 if (try_to_freeze())
617                         continue;
618                 if (unlikely(vbd->size != vbd_sz(vbd)))
619                         xen_vbd_resize(blkif);
620
621                 timeout = msecs_to_jiffies(LRU_INTERVAL);
622
623                 timeout = wait_event_interruptible_timeout(
624                         ring->wq,
625                         ring->waiting_reqs || kthread_should_stop(),
626                         timeout);
627                 if (timeout == 0)
628                         goto purge_gnt_list;
629                 timeout = wait_event_interruptible_timeout(
630                         ring->pending_free_wq,
631                         !list_empty(&ring->pending_free) ||
632                         kthread_should_stop(),
633                         timeout);
634                 if (timeout == 0)
635                         goto purge_gnt_list;
636
637                 do_eoi = ring->waiting_reqs;
638
639                 ring->waiting_reqs = 0;
640                 smp_mb(); /* clear flag *before* checking for work */
641
642                 ret = do_block_io_op(ring, &eoi_flags);
643                 if (ret > 0)
644                         ring->waiting_reqs = 1;
645                 if (ret == -EACCES)
646                         wait_event_interruptible(ring->shutdown_wq,
647                                                  kthread_should_stop());
648
649                 if (do_eoi && !ring->waiting_reqs) {
650                         xen_irq_lateeoi(ring->irq, eoi_flags);
651                         eoi_flags |= XEN_EOI_FLAG_SPURIOUS;
652                 }
653
654 purge_gnt_list:
655                 if (blkif->vbd.feature_gnt_persistent &&
656                     time_after(jiffies, ring->next_lru)) {
657                         purge_persistent_gnt(ring);
658                         ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
659                 }
660
661                 /* Shrink if we have more than xen_blkif_max_buffer_pages */
662                 shrink_free_pagepool(ring, xen_blkif_max_buffer_pages);
663
664                 if (log_stats && time_after(jiffies, ring->st_print))
665                         print_stats(ring);
666         }
667
668         /* Drain pending purge work */
669         flush_work(&ring->persistent_purge_work);
670
671         if (log_stats)
672                 print_stats(ring);
673
674         ring->xenblkd = NULL;
675
676         return 0;
677 }
678
679 /*
680  * Remove persistent grants and empty the pool of free pages
681  */
682 void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
683 {
684         /* Free all persistent grant pages */
685         if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
686                 free_persistent_gnts(ring, &ring->persistent_gnts,
687                         ring->persistent_gnt_c);
688
689         BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
690         ring->persistent_gnt_c = 0;
691
692         /* Since we are shutting down remove all pages from the buffer */
693         shrink_free_pagepool(ring, 0 /* All */);
694 }
695
696 static unsigned int xen_blkbk_unmap_prepare(
697         struct xen_blkif_ring *ring,
698         struct grant_page **pages,
699         unsigned int num,
700         struct gnttab_unmap_grant_ref *unmap_ops,
701         struct page **unmap_pages)
702 {
703         unsigned int i, invcount = 0;
704
705         for (i = 0; i < num; i++) {
706                 if (pages[i]->persistent_gnt != NULL) {
707                         put_persistent_gnt(ring, pages[i]->persistent_gnt);
708                         continue;
709                 }
710                 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
711                         continue;
712                 unmap_pages[invcount] = pages[i]->page;
713                 gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
714                                     GNTMAP_host_map, pages[i]->handle);
715                 pages[i]->handle = BLKBACK_INVALID_HANDLE;
716                 invcount++;
717         }
718
719         return invcount;
720 }
721
722 static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
723 {
724         struct pending_req *pending_req = (struct pending_req *)(data->data);
725         struct xen_blkif_ring *ring = pending_req->ring;
726         struct xen_blkif *blkif = ring->blkif;
727
728         /* BUG_ON used to reproduce existing behaviour,
729            but is this the best way to deal with this? */
730         BUG_ON(result);
731
732         put_free_pages(ring, data->pages, data->count);
733         make_response(ring, pending_req->id,
734                       pending_req->operation, pending_req->status);
735         free_req(ring, pending_req);
736         /*
737          * Make sure the request is freed before releasing blkif,
738          * or there could be a race between free_req and the
739          * cleanup done in xen_blkif_free during shutdown.
740          *
741          * NB: The fact that we might try to wake up pending_free_wq
742          * before drain_complete (in case there's a drain going on)
743          * it's not a problem with our current implementation
744          * because we can assure there's no thread waiting on
745          * pending_free_wq if there's a drain going on, but it has
746          * to be taken into account if the current model is changed.
747          */
748         if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
749                 complete(&blkif->drain_complete);
750         }
751         xen_blkif_put(blkif);
752 }
753
754 static void xen_blkbk_unmap_and_respond(struct pending_req *req)
755 {
756         struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
757         struct xen_blkif_ring *ring = req->ring;
758         struct grant_page **pages = req->segments;
759         unsigned int invcount;
760
761         invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
762                                            req->unmap, req->unmap_pages);
763
764         work->data = req;
765         work->done = xen_blkbk_unmap_and_respond_callback;
766         work->unmap_ops = req->unmap;
767         work->kunmap_ops = NULL;
768         work->pages = req->unmap_pages;
769         work->count = invcount;
770
771         gnttab_unmap_refs_async(&req->gnttab_unmap_data);
772 }
773
774
775 /*
776  * Unmap the grant references.
777  *
778  * This could accumulate ops up to the batch size to reduce the number
779  * of hypercalls, but since this is only used in error paths there's
780  * no real need.
781  */
782 static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
783                             struct grant_page *pages[],
784                             int num)
785 {
786         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
787         struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
788         unsigned int invcount = 0;
789         int ret;
790
791         while (num) {
792                 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
793
794                 invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
795                                                    unmap, unmap_pages);
796                 if (invcount) {
797                         ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
798                         BUG_ON(ret);
799                         put_free_pages(ring, unmap_pages, invcount);
800                 }
801                 pages += batch;
802                 num -= batch;
803         }
804 }
805
806 static int xen_blkbk_map(struct xen_blkif_ring *ring,
807                          struct grant_page *pages[],
808                          int num, bool ro)
809 {
810         struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
811         struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
812         struct persistent_gnt *persistent_gnt = NULL;
813         phys_addr_t addr = 0;
814         int i, seg_idx, new_map_idx;
815         int segs_to_map = 0;
816         int ret = 0;
817         int last_map = 0, map_until = 0;
818         int use_persistent_gnts;
819         struct xen_blkif *blkif = ring->blkif;
820
821         use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
822
823         /*
824          * Fill out preq.nr_sects with proper amount of sectors, and setup
825          * assign map[..] with the PFN of the page in our domain with the
826          * corresponding grant reference for each page.
827          */
828 again:
829         for (i = map_until; i < num; i++) {
830                 uint32_t flags;
831
832                 if (use_persistent_gnts) {
833                         persistent_gnt = get_persistent_gnt(
834                                 ring,
835                                 pages[i]->gref);
836                 }
837
838                 if (persistent_gnt) {
839                         /*
840                          * We are using persistent grants and
841                          * the grant is already mapped
842                          */
843                         pages[i]->page = persistent_gnt->page;
844                         pages[i]->persistent_gnt = persistent_gnt;
845                 } else {
846                         if (get_free_page(ring, &pages[i]->page)) {
847                                 put_free_pages(ring, pages_to_gnt, segs_to_map);
848                                 ret = -ENOMEM;
849                                 goto out;
850                         }
851                         addr = vaddr(pages[i]->page);
852                         pages_to_gnt[segs_to_map] = pages[i]->page;
853                         pages[i]->persistent_gnt = NULL;
854                         flags = GNTMAP_host_map;
855                         if (!use_persistent_gnts && ro)
856                                 flags |= GNTMAP_readonly;
857                         gnttab_set_map_op(&map[segs_to_map++], addr,
858                                           flags, pages[i]->gref,
859                                           blkif->domid);
860                 }
861                 map_until = i + 1;
862                 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
863                         break;
864         }
865
866         if (segs_to_map)
867                 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
868
869         /*
870          * Now swizzle the MFN in our domain with the MFN from the other domain
871          * so that when we access vaddr(pending_req,i) it has the contents of
872          * the page from the other domain.
873          */
874         for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
875                 if (!pages[seg_idx]->persistent_gnt) {
876                         /* This is a newly mapped grant */
877                         BUG_ON(new_map_idx >= segs_to_map);
878                         if (unlikely(map[new_map_idx].status != 0)) {
879                                 pr_debug("invalid buffer -- could not remap it\n");
880                                 put_free_pages(ring, &pages[seg_idx]->page, 1);
881                                 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
882                                 ret |= !ret;
883                                 goto next;
884                         }
885                         pages[seg_idx]->handle = map[new_map_idx].handle;
886                 } else {
887                         continue;
888                 }
889                 if (use_persistent_gnts &&
890                     ring->persistent_gnt_c < xen_blkif_max_pgrants) {
891                         /*
892                          * We are using persistent grants, the grant is
893                          * not mapped but we might have room for it.
894                          */
895                         persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
896                                                  GFP_KERNEL);
897                         if (!persistent_gnt) {
898                                 /*
899                                  * If we don't have enough memory to
900                                  * allocate the persistent_gnt struct
901                                  * map this grant non-persistenly
902                                  */
903                                 goto next;
904                         }
905                         persistent_gnt->gnt = map[new_map_idx].ref;
906                         persistent_gnt->handle = map[new_map_idx].handle;
907                         persistent_gnt->page = pages[seg_idx]->page;
908                         if (add_persistent_gnt(ring,
909                                                persistent_gnt)) {
910                                 kfree(persistent_gnt);
911                                 persistent_gnt = NULL;
912                                 goto next;
913                         }
914                         pages[seg_idx]->persistent_gnt = persistent_gnt;
915                         pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
916                                  persistent_gnt->gnt, ring->persistent_gnt_c,
917                                  xen_blkif_max_pgrants);
918                         goto next;
919                 }
920                 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
921                         blkif->vbd.overflow_max_grants = 1;
922                         pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
923                                  blkif->domid, blkif->vbd.handle);
924                 }
925                 /*
926                  * We could not map this grant persistently, so use it as
927                  * a non-persistent grant.
928                  */
929 next:
930                 new_map_idx++;
931         }
932         segs_to_map = 0;
933         last_map = map_until;
934         if (!ret && map_until != num)
935                 goto again;
936
937 out:
938         for (i = last_map; i < num; i++) {
939                 /* Don't zap current batch's valid persistent grants. */
940                 if(i >= map_until)
941                         pages[i]->persistent_gnt = NULL;
942                 pages[i]->handle = BLKBACK_INVALID_HANDLE;
943         }
944
945         return ret;
946 }
947
948 static int xen_blkbk_map_seg(struct pending_req *pending_req)
949 {
950         int rc;
951
952         rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
953                            pending_req->nr_segs,
954                            (pending_req->operation != BLKIF_OP_READ));
955
956         return rc;
957 }
958
959 static int xen_blkbk_parse_indirect(struct blkif_request *req,
960                                     struct pending_req *pending_req,
961                                     struct seg_buf seg[],
962                                     struct phys_req *preq)
963 {
964         struct grant_page **pages = pending_req->indirect_pages;
965         struct xen_blkif_ring *ring = pending_req->ring;
966         int indirect_grefs, rc, n, nseg, i;
967         struct blkif_request_segment *segments = NULL;
968
969         nseg = pending_req->nr_segs;
970         indirect_grefs = INDIRECT_PAGES(nseg);
971         BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
972
973         for (i = 0; i < indirect_grefs; i++)
974                 pages[i]->gref = req->u.indirect.indirect_grefs[i];
975
976         rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
977         if (rc)
978                 goto unmap;
979
980         for (n = 0, i = 0; n < nseg; n++) {
981                 uint8_t first_sect, last_sect;
982
983                 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
984                         /* Map indirect segments */
985                         if (segments)
986                                 kunmap_atomic(segments);
987                         segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
988                 }
989                 i = n % SEGS_PER_INDIRECT_FRAME;
990
991                 pending_req->segments[n]->gref = segments[i].gref;
992
993                 first_sect = READ_ONCE(segments[i].first_sect);
994                 last_sect = READ_ONCE(segments[i].last_sect);
995                 if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
996                         rc = -EINVAL;
997                         goto unmap;
998                 }
999
1000                 seg[n].nsec = last_sect - first_sect + 1;
1001                 seg[n].offset = first_sect << 9;
1002                 preq->nr_sects += seg[n].nsec;
1003         }
1004
1005 unmap:
1006         if (segments)
1007                 kunmap_atomic(segments);
1008         xen_blkbk_unmap(ring, pages, indirect_grefs);
1009         return rc;
1010 }
1011
1012 static int dispatch_discard_io(struct xen_blkif_ring *ring,
1013                                 struct blkif_request *req)
1014 {
1015         int err = 0;
1016         int status = BLKIF_RSP_OKAY;
1017         struct xen_blkif *blkif = ring->blkif;
1018         struct block_device *bdev = blkif->vbd.bdev;
1019         unsigned long secure;
1020         struct phys_req preq;
1021
1022         xen_blkif_get(blkif);
1023
1024         preq.sector_number = req->u.discard.sector_number;
1025         preq.nr_sects      = req->u.discard.nr_sectors;
1026
1027         err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
1028         if (err) {
1029                 pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
1030                         preq.sector_number,
1031                         preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
1032                 goto fail_response;
1033         }
1034         ring->st_ds_req++;
1035
1036         secure = (blkif->vbd.discard_secure &&
1037                  (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
1038                  BLKDEV_DISCARD_SECURE : 0;
1039
1040         err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
1041                                    req->u.discard.nr_sectors,
1042                                    GFP_KERNEL, secure);
1043 fail_response:
1044         if (err == -EOPNOTSUPP) {
1045                 pr_debug("discard op failed, not supported\n");
1046                 status = BLKIF_RSP_EOPNOTSUPP;
1047         } else if (err)
1048                 status = BLKIF_RSP_ERROR;
1049
1050         make_response(ring, req->u.discard.id, req->operation, status);
1051         xen_blkif_put(blkif);
1052         return err;
1053 }
1054
1055 static int dispatch_other_io(struct xen_blkif_ring *ring,
1056                              struct blkif_request *req,
1057                              struct pending_req *pending_req)
1058 {
1059         free_req(ring, pending_req);
1060         make_response(ring, req->u.other.id, req->operation,
1061                       BLKIF_RSP_EOPNOTSUPP);
1062         return -EIO;
1063 }
1064
1065 static void xen_blk_drain_io(struct xen_blkif_ring *ring)
1066 {
1067         struct xen_blkif *blkif = ring->blkif;
1068
1069         atomic_set(&blkif->drain, 1);
1070         do {
1071                 if (atomic_read(&ring->inflight) == 0)
1072                         break;
1073                 wait_for_completion_interruptible_timeout(
1074                                 &blkif->drain_complete, HZ);
1075
1076                 if (!atomic_read(&blkif->drain))
1077                         break;
1078         } while (!kthread_should_stop());
1079         atomic_set(&blkif->drain, 0);
1080 }
1081
1082 static void __end_block_io_op(struct pending_req *pending_req,
1083                 blk_status_t error)
1084 {
1085         /* An error fails the entire request. */
1086         if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
1087             error == BLK_STS_NOTSUPP) {
1088                 pr_debug("flush diskcache op failed, not supported\n");
1089                 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
1090                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1091         } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
1092                    error == BLK_STS_NOTSUPP) {
1093                 pr_debug("write barrier op failed, not supported\n");
1094                 xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
1095                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1096         } else if (error) {
1097                 pr_debug("Buffer not up-to-date at end of operation,"
1098                          " error=%d\n", error);
1099                 pending_req->status = BLKIF_RSP_ERROR;
1100         }
1101
1102         /*
1103          * If all of the bio's have completed it is time to unmap
1104          * the grant references associated with 'request' and provide
1105          * the proper response on the ring.
1106          */
1107         if (atomic_dec_and_test(&pending_req->pendcnt))
1108                 xen_blkbk_unmap_and_respond(pending_req);
1109 }
1110
1111 /*
1112  * bio callback.
1113  */
1114 static void end_block_io_op(struct bio *bio)
1115 {
1116         __end_block_io_op(bio->bi_private, bio->bi_status);
1117         bio_put(bio);
1118 }
1119
1120
1121
1122 /*
1123  * Function to copy the from the ring buffer the 'struct blkif_request'
1124  * (which has the sectors we want, number of them, grant references, etc),
1125  * and transmute  it to the block API to hand it over to the proper block disk.
1126  */
1127 static int
1128 __do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
1129 {
1130         union blkif_back_rings *blk_rings = &ring->blk_rings;
1131         struct blkif_request req;
1132         struct pending_req *pending_req;
1133         RING_IDX rc, rp;
1134         int more_to_do = 0;
1135
1136         rc = blk_rings->common.req_cons;
1137         rp = blk_rings->common.sring->req_prod;
1138         rmb(); /* Ensure we see queued requests up to 'rp'. */
1139
1140         if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1141                 rc = blk_rings->common.rsp_prod_pvt;
1142                 pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1143                         rp, rc, rp - rc, ring->blkif->vbd.pdevice);
1144                 return -EACCES;
1145         }
1146         while (rc != rp) {
1147
1148                 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1149                         break;
1150
1151                 /* We've seen a request, so clear spurious eoi flag. */
1152                 *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
1153
1154                 if (kthread_should_stop()) {
1155                         more_to_do = 1;
1156                         break;
1157                 }
1158
1159                 pending_req = alloc_req(ring);
1160                 if (NULL == pending_req) {
1161                         ring->st_oo_req++;
1162                         more_to_do = 1;
1163                         break;
1164                 }
1165
1166                 switch (ring->blkif->blk_protocol) {
1167                 case BLKIF_PROTOCOL_NATIVE:
1168                         memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1169                         break;
1170                 case BLKIF_PROTOCOL_X86_32:
1171                         blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1172                         break;
1173                 case BLKIF_PROTOCOL_X86_64:
1174                         blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1175                         break;
1176                 default:
1177                         BUG();
1178                 }
1179                 blk_rings->common.req_cons = ++rc; /* before make_response() */
1180
1181                 /* Apply all sanity checks to /private copy/ of request. */
1182                 barrier();
1183
1184                 switch (req.operation) {
1185                 case BLKIF_OP_READ:
1186                 case BLKIF_OP_WRITE:
1187                 case BLKIF_OP_WRITE_BARRIER:
1188                 case BLKIF_OP_FLUSH_DISKCACHE:
1189                 case BLKIF_OP_INDIRECT:
1190                         if (dispatch_rw_block_io(ring, &req, pending_req))
1191                                 goto done;
1192                         break;
1193                 case BLKIF_OP_DISCARD:
1194                         free_req(ring, pending_req);
1195                         if (dispatch_discard_io(ring, &req))
1196                                 goto done;
1197                         break;
1198                 default:
1199                         if (dispatch_other_io(ring, &req, pending_req))
1200                                 goto done;
1201                         break;
1202                 }
1203
1204                 /* Yield point for this unbounded loop. */
1205                 cond_resched();
1206         }
1207 done:
1208         return more_to_do;
1209 }
1210
1211 static int
1212 do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
1213 {
1214         union blkif_back_rings *blk_rings = &ring->blk_rings;
1215         int more_to_do;
1216
1217         do {
1218                 more_to_do = __do_block_io_op(ring, eoi_flags);
1219                 if (more_to_do)
1220                         break;
1221
1222                 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1223         } while (more_to_do);
1224
1225         return more_to_do;
1226 }
1227 /*
1228  * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1229  * and call the 'submit_bio' to pass it to the underlying storage.
1230  */
1231 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1232                                 struct blkif_request *req,
1233                                 struct pending_req *pending_req)
1234 {
1235         struct phys_req preq;
1236         struct seg_buf *seg = pending_req->seg;
1237         unsigned int nseg;
1238         struct bio *bio = NULL;
1239         struct bio **biolist = pending_req->biolist;
1240         int i, nbio = 0;
1241         int operation;
1242         int operation_flags = 0;
1243         struct blk_plug plug;
1244         bool drain = false;
1245         struct grant_page **pages = pending_req->segments;
1246         unsigned short req_operation;
1247
1248         req_operation = req->operation == BLKIF_OP_INDIRECT ?
1249                         req->u.indirect.indirect_op : req->operation;
1250
1251         if ((req->operation == BLKIF_OP_INDIRECT) &&
1252             (req_operation != BLKIF_OP_READ) &&
1253             (req_operation != BLKIF_OP_WRITE)) {
1254                 pr_debug("Invalid indirect operation (%u)\n", req_operation);
1255                 goto fail_response;
1256         }
1257
1258         switch (req_operation) {
1259         case BLKIF_OP_READ:
1260                 ring->st_rd_req++;
1261                 operation = REQ_OP_READ;
1262                 break;
1263         case BLKIF_OP_WRITE:
1264                 ring->st_wr_req++;
1265                 operation = REQ_OP_WRITE;
1266                 operation_flags = REQ_SYNC | REQ_IDLE;
1267                 break;
1268         case BLKIF_OP_WRITE_BARRIER:
1269                 drain = true;
1270                 /* fall through */
1271         case BLKIF_OP_FLUSH_DISKCACHE:
1272                 ring->st_f_req++;
1273                 operation = REQ_OP_WRITE;
1274                 operation_flags = REQ_PREFLUSH;
1275                 break;
1276         default:
1277                 operation = 0; /* make gcc happy */
1278                 goto fail_response;
1279                 break;
1280         }
1281
1282         /* Check that the number of segments is sane. */
1283         nseg = req->operation == BLKIF_OP_INDIRECT ?
1284                req->u.indirect.nr_segments : req->u.rw.nr_segments;
1285
1286         if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
1287             unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1288                      (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1289             unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1290                      (nseg > MAX_INDIRECT_SEGMENTS))) {
1291                 pr_debug("Bad number of segments in request (%d)\n", nseg);
1292                 /* Haven't submitted any bio's yet. */
1293                 goto fail_response;
1294         }
1295
1296         preq.nr_sects      = 0;
1297
1298         pending_req->ring      = ring;
1299         pending_req->id        = req->u.rw.id;
1300         pending_req->operation = req_operation;
1301         pending_req->status    = BLKIF_RSP_OKAY;
1302         pending_req->nr_segs   = nseg;
1303
1304         if (req->operation != BLKIF_OP_INDIRECT) {
1305                 preq.dev               = req->u.rw.handle;
1306                 preq.sector_number     = req->u.rw.sector_number;
1307                 for (i = 0; i < nseg; i++) {
1308                         pages[i]->gref = req->u.rw.seg[i].gref;
1309                         seg[i].nsec = req->u.rw.seg[i].last_sect -
1310                                 req->u.rw.seg[i].first_sect + 1;
1311                         seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1312                         if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
1313                             (req->u.rw.seg[i].last_sect <
1314                              req->u.rw.seg[i].first_sect))
1315                                 goto fail_response;
1316                         preq.nr_sects += seg[i].nsec;
1317                 }
1318         } else {
1319                 preq.dev               = req->u.indirect.handle;
1320                 preq.sector_number     = req->u.indirect.sector_number;
1321                 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1322                         goto fail_response;
1323         }
1324
1325         if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
1326                 pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1327                          operation == REQ_OP_READ ? "read" : "write",
1328                          preq.sector_number,
1329                          preq.sector_number + preq.nr_sects,
1330                          ring->blkif->vbd.pdevice);
1331                 goto fail_response;
1332         }
1333
1334         /*
1335          * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1336          * is set there.
1337          */
1338         for (i = 0; i < nseg; i++) {
1339                 if (((int)preq.sector_number|(int)seg[i].nsec) &
1340                     ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1341                         pr_debug("Misaligned I/O request from domain %d\n",
1342                                  ring->blkif->domid);
1343                         goto fail_response;
1344                 }
1345         }
1346
1347         /* Wait on all outstanding I/O's and once that has been completed
1348          * issue the flush.
1349          */
1350         if (drain)
1351                 xen_blk_drain_io(pending_req->ring);
1352
1353         /*
1354          * If we have failed at this point, we need to undo the M2P override,
1355          * set gnttab_set_unmap_op on all of the grant references and perform
1356          * the hypercall to unmap the grants - that is all done in
1357          * xen_blkbk_unmap.
1358          */
1359         if (xen_blkbk_map_seg(pending_req))
1360                 goto fail_flush;
1361
1362         /*
1363          * This corresponding xen_blkif_put is done in __end_block_io_op, or
1364          * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1365          */
1366         xen_blkif_get(ring->blkif);
1367         atomic_inc(&ring->inflight);
1368
1369         for (i = 0; i < nseg; i++) {
1370                 while ((bio == NULL) ||
1371                        (bio_add_page(bio,
1372                                      pages[i]->page,
1373                                      seg[i].nsec << 9,
1374                                      seg[i].offset) == 0)) {
1375
1376                         int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1377                         bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1378                         if (unlikely(bio == NULL))
1379                                 goto fail_put_bio;
1380
1381                         biolist[nbio++] = bio;
1382                         bio_set_dev(bio, preq.bdev);
1383                         bio->bi_private = pending_req;
1384                         bio->bi_end_io  = end_block_io_op;
1385                         bio->bi_iter.bi_sector  = preq.sector_number;
1386                         bio_set_op_attrs(bio, operation, operation_flags);
1387                 }
1388
1389                 preq.sector_number += seg[i].nsec;
1390         }
1391
1392         /* This will be hit if the operation was a flush or discard. */
1393         if (!bio) {
1394                 BUG_ON(operation_flags != REQ_PREFLUSH);
1395
1396                 bio = bio_alloc(GFP_KERNEL, 0);
1397                 if (unlikely(bio == NULL))
1398                         goto fail_put_bio;
1399
1400                 biolist[nbio++] = bio;
1401                 bio_set_dev(bio, preq.bdev);
1402                 bio->bi_private = pending_req;
1403                 bio->bi_end_io  = end_block_io_op;
1404                 bio_set_op_attrs(bio, operation, operation_flags);
1405         }
1406
1407         atomic_set(&pending_req->pendcnt, nbio);
1408         blk_start_plug(&plug);
1409
1410         for (i = 0; i < nbio; i++)
1411                 submit_bio(biolist[i]);
1412
1413         /* Let the I/Os go.. */
1414         blk_finish_plug(&plug);
1415
1416         if (operation == REQ_OP_READ)
1417                 ring->st_rd_sect += preq.nr_sects;
1418         else if (operation == REQ_OP_WRITE)
1419                 ring->st_wr_sect += preq.nr_sects;
1420
1421         return 0;
1422
1423  fail_flush:
1424         xen_blkbk_unmap(ring, pending_req->segments,
1425                         pending_req->nr_segs);
1426  fail_response:
1427         /* Haven't submitted any bio's yet. */
1428         make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1429         free_req(ring, pending_req);
1430         msleep(1); /* back off a bit */
1431         return -EIO;
1432
1433  fail_put_bio:
1434         for (i = 0; i < nbio; i++)
1435                 bio_put(biolist[i]);
1436         atomic_set(&pending_req->pendcnt, 1);
1437         __end_block_io_op(pending_req, BLK_STS_RESOURCE);
1438         msleep(1); /* back off a bit */
1439         return -EIO;
1440 }
1441
1442
1443
1444 /*
1445  * Put a response on the ring on how the operation fared.
1446  */
1447 static void make_response(struct xen_blkif_ring *ring, u64 id,
1448                           unsigned short op, int st)
1449 {
1450         struct blkif_response *resp;
1451         unsigned long     flags;
1452         union blkif_back_rings *blk_rings;
1453         int notify;
1454
1455         spin_lock_irqsave(&ring->blk_ring_lock, flags);
1456         blk_rings = &ring->blk_rings;
1457         /* Place on the response ring for the relevant domain. */
1458         switch (ring->blkif->blk_protocol) {
1459         case BLKIF_PROTOCOL_NATIVE:
1460                 resp = RING_GET_RESPONSE(&blk_rings->native,
1461                                          blk_rings->native.rsp_prod_pvt);
1462                 break;
1463         case BLKIF_PROTOCOL_X86_32:
1464                 resp = RING_GET_RESPONSE(&blk_rings->x86_32,
1465                                          blk_rings->x86_32.rsp_prod_pvt);
1466                 break;
1467         case BLKIF_PROTOCOL_X86_64:
1468                 resp = RING_GET_RESPONSE(&blk_rings->x86_64,
1469                                          blk_rings->x86_64.rsp_prod_pvt);
1470                 break;
1471         default:
1472                 BUG();
1473         }
1474
1475         resp->id        = id;
1476         resp->operation = op;
1477         resp->status    = st;
1478
1479         blk_rings->common.rsp_prod_pvt++;
1480         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1481         spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
1482         if (notify)
1483                 notify_remote_via_irq(ring->irq);
1484 }
1485
1486 static int __init xen_blkif_init(void)
1487 {
1488         int rc = 0;
1489
1490         if (!xen_domain())
1491                 return -ENODEV;
1492
1493         if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
1494                 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
1495                         xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
1496                 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
1497         }
1498
1499         if (xenblk_max_queues == 0)
1500                 xenblk_max_queues = num_online_cpus();
1501
1502         rc = xen_blkif_interface_init();
1503         if (rc)
1504                 goto failed_init;
1505
1506         rc = xen_blkif_xenbus_init();
1507         if (rc)
1508                 goto failed_init;
1509
1510  failed_init:
1511         return rc;
1512 }
1513
1514 module_init(xen_blkif_init);
1515
1516 MODULE_LICENSE("Dual BSD/GPL");
1517 MODULE_ALIAS("xen-backend:vbd");