1 /******************************************************************************
3 * Back-end of the driver for virtual block devices. This portion of the
4 * driver exports a 'unified' block-device interface that can be accessed
5 * by any operating system that implements a compatible front end. A
6 * reference front-end implementation can be found in:
7 * drivers/block/xen-blkfront.c
9 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10 * Copyright (c) 2005, Christopher Clark
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
37 #define pr_fmt(fmt) "xen-blkback: " fmt
39 #include <linux/spinlock.h>
40 #include <linux/kthread.h>
41 #include <linux/list.h>
42 #include <linux/delay.h>
43 #include <linux/freezer.h>
44 #include <linux/bitmap.h>
46 #include <xen/events.h>
49 #include <asm/xen/hypervisor.h>
50 #include <asm/xen/hypercall.h>
51 #include <xen/balloon.h>
52 #include <xen/grant_table.h>
56 * Maximum number of unused free pages to keep in the internal buffer.
57 * Setting this to a value too low will reduce memory used in each backend,
58 * but can have a performance penalty.
60 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
61 * be set to a lower value that might degrade performance on some intensive
65 static int xen_blkif_max_buffer_pages = 1024;
66 module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
67 MODULE_PARM_DESC(max_buffer_pages,
68 "Maximum number of free pages to keep in each block backend buffer");
71 * Maximum number of grants to map persistently in blkback. For maximum
72 * performance this should be the total numbers of grants that can be used
73 * to fill the ring, but since this might become too high, specially with
74 * the use of indirect descriptors, we set it to a value that provides good
75 * performance without using too much memory.
77 * When the list of persistent grants is full we clean it up using a LRU
81 static int xen_blkif_max_pgrants = 1056;
82 module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
83 MODULE_PARM_DESC(max_persistent_grants,
84 "Maximum number of grants to map persistently");
87 * How long a persistent grant is allowed to remain allocated without being in
88 * use. The time is in seconds, 0 means indefinitely long.
91 static unsigned int xen_blkif_pgrant_timeout = 60;
92 module_param_named(persistent_grant_unused_seconds, xen_blkif_pgrant_timeout,
94 MODULE_PARM_DESC(persistent_grant_unused_seconds,
95 "Time in seconds an unused persistent grant is allowed to "
96 "remain allocated. Default is 60, 0 means unlimited.");
99 * Maximum number of rings/queues blkback supports, allow as many queues as there
100 * are CPUs if user has not specified a value.
102 unsigned int xenblk_max_queues;
103 module_param_named(max_queues, xenblk_max_queues, uint, 0644);
104 MODULE_PARM_DESC(max_queues,
105 "Maximum number of hardware queues per virtual disk." \
106 "By default it is the number of online CPUs.");
109 * Maximum order of pages to be used for the shared ring between front and
110 * backend, 4KB page granularity is used.
112 unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
113 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
114 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
116 * The LRU mechanism to clean the lists of persistent grants needs to
117 * be executed periodically. The time interval between consecutive executions
118 * of the purge mechanism is set in ms.
120 #define LRU_INTERVAL 100
123 * When the persistent grants list is full we will remove unused grants
124 * from the list. The percent number of grants to be removed at each LRU
127 #define LRU_PERCENT_CLEAN 5
129 /* Run-time switchable: /sys/module/blkback/parameters/ */
130 static unsigned int log_stats;
131 module_param(log_stats, int, 0644);
133 #define BLKBACK_INVALID_HANDLE (~0)
135 /* Number of free pages to remove on each call to gnttab_free_pages */
136 #define NUM_BATCH_FREE_PAGES 10
138 static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
140 return xen_blkif_pgrant_timeout &&
141 (jiffies - persistent_gnt->last_used >=
142 HZ * xen_blkif_pgrant_timeout);
145 static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
149 spin_lock_irqsave(&ring->free_pages_lock, flags);
150 if (list_empty(&ring->free_pages)) {
151 BUG_ON(ring->free_pages_num != 0);
152 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
153 return gnttab_alloc_pages(1, page);
155 BUG_ON(ring->free_pages_num == 0);
156 page[0] = list_first_entry(&ring->free_pages, struct page, lru);
157 list_del(&page[0]->lru);
158 ring->free_pages_num--;
159 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
164 static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
170 spin_lock_irqsave(&ring->free_pages_lock, flags);
171 for (i = 0; i < num; i++)
172 list_add(&page[i]->lru, &ring->free_pages);
173 ring->free_pages_num += num;
174 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
177 static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
179 /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
180 struct page *page[NUM_BATCH_FREE_PAGES];
181 unsigned int num_pages = 0;
184 spin_lock_irqsave(&ring->free_pages_lock, flags);
185 while (ring->free_pages_num > num) {
186 BUG_ON(list_empty(&ring->free_pages));
187 page[num_pages] = list_first_entry(&ring->free_pages,
189 list_del(&page[num_pages]->lru);
190 ring->free_pages_num--;
191 if (++num_pages == NUM_BATCH_FREE_PAGES) {
192 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
193 gnttab_free_pages(num_pages, page);
194 spin_lock_irqsave(&ring->free_pages_lock, flags);
198 spin_unlock_irqrestore(&ring->free_pages_lock, flags);
200 gnttab_free_pages(num_pages, page);
203 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
205 static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
206 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
207 struct blkif_request *req,
208 struct pending_req *pending_req);
209 static void make_response(struct xen_blkif_ring *ring, u64 id,
210 unsigned short op, int st);
212 #define foreach_grant_safe(pos, n, rbtree, node) \
213 for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
214 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
215 &(pos)->node != NULL; \
216 (pos) = container_of(n, typeof(*(pos)), node), \
217 (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
221 * We don't need locking around the persistent grant helpers
222 * because blkback uses a single-thread for each backend, so we
223 * can be sure that this functions will never be called recursively.
225 * The only exception to that is put_persistent_grant, that can be called
226 * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
227 * bit operations to modify the flags of a persistent grant and to count
228 * the number of used grants.
230 static int add_persistent_gnt(struct xen_blkif_ring *ring,
231 struct persistent_gnt *persistent_gnt)
233 struct rb_node **new = NULL, *parent = NULL;
234 struct persistent_gnt *this;
235 struct xen_blkif *blkif = ring->blkif;
237 if (ring->persistent_gnt_c >= xen_blkif_max_pgrants) {
238 if (!blkif->vbd.overflow_max_grants)
239 blkif->vbd.overflow_max_grants = 1;
242 /* Figure out where to put new node */
243 new = &ring->persistent_gnts.rb_node;
245 this = container_of(*new, struct persistent_gnt, node);
248 if (persistent_gnt->gnt < this->gnt)
249 new = &((*new)->rb_left);
250 else if (persistent_gnt->gnt > this->gnt)
251 new = &((*new)->rb_right);
253 pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
258 persistent_gnt->active = true;
259 /* Add new node and rebalance tree. */
260 rb_link_node(&(persistent_gnt->node), parent, new);
261 rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
262 ring->persistent_gnt_c++;
263 atomic_inc(&ring->persistent_gnt_in_use);
267 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
270 struct persistent_gnt *data;
271 struct rb_node *node = NULL;
273 node = ring->persistent_gnts.rb_node;
275 data = container_of(node, struct persistent_gnt, node);
277 if (gref < data->gnt)
278 node = node->rb_left;
279 else if (gref > data->gnt)
280 node = node->rb_right;
283 pr_alert_ratelimited("requesting a grant already in use\n");
287 atomic_inc(&ring->persistent_gnt_in_use);
294 static void put_persistent_gnt(struct xen_blkif_ring *ring,
295 struct persistent_gnt *persistent_gnt)
297 if (!persistent_gnt->active)
298 pr_alert_ratelimited("freeing a grant already unused\n");
299 persistent_gnt->last_used = jiffies;
300 persistent_gnt->active = false;
301 atomic_dec(&ring->persistent_gnt_in_use);
304 static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
307 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
308 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
309 struct persistent_gnt *persistent_gnt;
311 int segs_to_unmap = 0;
312 struct gntab_unmap_queue_data unmap_data;
314 unmap_data.pages = pages;
315 unmap_data.unmap_ops = unmap;
316 unmap_data.kunmap_ops = NULL;
318 foreach_grant_safe(persistent_gnt, n, root, node) {
319 BUG_ON(persistent_gnt->handle ==
320 BLKBACK_INVALID_HANDLE);
321 gnttab_set_unmap_op(&unmap[segs_to_unmap],
322 (unsigned long) pfn_to_kaddr(page_to_pfn(
323 persistent_gnt->page)),
325 persistent_gnt->handle);
327 pages[segs_to_unmap] = persistent_gnt->page;
329 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
330 !rb_next(&persistent_gnt->node)) {
332 unmap_data.count = segs_to_unmap;
333 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
335 put_free_pages(ring, pages, segs_to_unmap);
339 rb_erase(&persistent_gnt->node, root);
340 kfree(persistent_gnt);
346 void xen_blkbk_unmap_purged_grants(struct work_struct *work)
348 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
349 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
350 struct persistent_gnt *persistent_gnt;
351 int segs_to_unmap = 0;
352 struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
353 struct gntab_unmap_queue_data unmap_data;
355 unmap_data.pages = pages;
356 unmap_data.unmap_ops = unmap;
357 unmap_data.kunmap_ops = NULL;
359 while(!list_empty(&ring->persistent_purge_list)) {
360 persistent_gnt = list_first_entry(&ring->persistent_purge_list,
361 struct persistent_gnt,
363 list_del(&persistent_gnt->remove_node);
365 gnttab_set_unmap_op(&unmap[segs_to_unmap],
366 vaddr(persistent_gnt->page),
368 persistent_gnt->handle);
370 pages[segs_to_unmap] = persistent_gnt->page;
372 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
373 unmap_data.count = segs_to_unmap;
374 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
375 put_free_pages(ring, pages, segs_to_unmap);
378 kfree(persistent_gnt);
380 if (segs_to_unmap > 0) {
381 unmap_data.count = segs_to_unmap;
382 BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
383 put_free_pages(ring, pages, segs_to_unmap);
387 static void purge_persistent_gnt(struct xen_blkif_ring *ring)
389 struct persistent_gnt *persistent_gnt;
391 unsigned int num_clean, total;
392 bool scan_used = false;
393 struct rb_root *root;
395 if (work_busy(&ring->persistent_purge_work)) {
396 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
400 if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
401 (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
402 !ring->blkif->vbd.overflow_max_grants)) {
405 num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
406 num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants +
408 num_clean = min(ring->persistent_gnt_c, num_clean);
409 pr_debug("Going to purge at least %u persistent grants\n",
414 * At this point, we can assure that there will be no calls
415 * to get_persistent_grant (because we are executing this code from
416 * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
417 * which means that the number of currently used grants will go down,
418 * but never up, so we will always be able to remove the requested
424 BUG_ON(!list_empty(&ring->persistent_purge_list));
425 root = &ring->persistent_gnts;
427 foreach_grant_safe(persistent_gnt, n, root, node) {
428 BUG_ON(persistent_gnt->handle ==
429 BLKBACK_INVALID_HANDLE);
431 if (persistent_gnt->active)
433 if (!scan_used && !persistent_gnt_timeout(persistent_gnt))
435 if (scan_used && total >= num_clean)
438 rb_erase(&persistent_gnt->node, root);
439 list_add(&persistent_gnt->remove_node,
440 &ring->persistent_purge_list);
444 * Check whether we also need to start cleaning
445 * grants that were used since last purge in order to cope
446 * with the requested num
448 if (!scan_used && total < num_clean) {
449 pr_debug("Still missing %u purged frames\n", num_clean - total);
455 ring->persistent_gnt_c -= total;
456 ring->blkif->vbd.overflow_max_grants = 0;
458 /* We can defer this work */
459 schedule_work(&ring->persistent_purge_work);
460 pr_debug("Purged %u/%u\n", num_clean, total);
468 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
470 static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
472 struct pending_req *req = NULL;
475 spin_lock_irqsave(&ring->pending_free_lock, flags);
476 if (!list_empty(&ring->pending_free)) {
477 req = list_entry(ring->pending_free.next, struct pending_req,
479 list_del(&req->free_list);
481 spin_unlock_irqrestore(&ring->pending_free_lock, flags);
486 * Return the 'pending_req' structure back to the freepool. We also
487 * wake up the thread if it was waiting for a free page.
489 static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
494 spin_lock_irqsave(&ring->pending_free_lock, flags);
495 was_empty = list_empty(&ring->pending_free);
496 list_add(&req->free_list, &ring->pending_free);
497 spin_unlock_irqrestore(&ring->pending_free_lock, flags);
499 wake_up(&ring->pending_free_wq);
503 * Routines for managing virtual block devices (vbds).
505 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
508 struct xen_vbd *vbd = &blkif->vbd;
511 if ((operation != REQ_OP_READ) && vbd->readonly)
514 if (likely(req->nr_sects)) {
515 blkif_sector_t end = req->sector_number + req->nr_sects;
517 if (unlikely(end < req->sector_number))
519 if (unlikely(end > vbd_sz(vbd)))
523 req->dev = vbd->pdevice;
524 req->bdev = vbd->bdev;
531 static void xen_vbd_resize(struct xen_blkif *blkif)
533 struct xen_vbd *vbd = &blkif->vbd;
534 struct xenbus_transaction xbt;
536 struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
537 unsigned long long new_size = vbd_sz(vbd);
539 pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
540 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
541 pr_info("VBD Resize: new size %llu\n", new_size);
542 vbd->size = new_size;
544 err = xenbus_transaction_start(&xbt);
546 pr_warn("Error starting transaction\n");
549 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
550 (unsigned long long)vbd_sz(vbd));
552 pr_warn("Error writing new size\n");
556 * Write the current state; we will use this to synchronize
557 * the front-end. If the current state is "connected" the
558 * front-end will get the new size information online.
560 err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
562 pr_warn("Error writing the state\n");
566 err = xenbus_transaction_end(xbt, 0);
570 pr_warn("Error ending transaction\n");
573 xenbus_transaction_end(xbt, 1);
577 * Notification from the guest OS.
579 static void blkif_notify_work(struct xen_blkif_ring *ring)
581 ring->waiting_reqs = 1;
585 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
587 blkif_notify_work(dev_id);
592 * SCHEDULER FUNCTIONS
595 static void print_stats(struct xen_blkif_ring *ring)
597 pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
598 " | ds %4llu | pg: %4u/%4d\n",
599 current->comm, ring->st_oo_req,
600 ring->st_rd_req, ring->st_wr_req,
601 ring->st_f_req, ring->st_ds_req,
602 ring->persistent_gnt_c,
603 xen_blkif_max_pgrants);
604 ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
611 int xen_blkif_schedule(void *arg)
613 struct xen_blkif_ring *ring = arg;
614 struct xen_blkif *blkif = ring->blkif;
615 struct xen_vbd *vbd = &blkif->vbd;
616 unsigned long timeout;
619 unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
622 while (!kthread_should_stop()) {
625 if (unlikely(vbd->size != vbd_sz(vbd)))
626 xen_vbd_resize(blkif);
628 timeout = msecs_to_jiffies(LRU_INTERVAL);
630 timeout = wait_event_interruptible_timeout(
632 ring->waiting_reqs || kthread_should_stop(),
636 timeout = wait_event_interruptible_timeout(
637 ring->pending_free_wq,
638 !list_empty(&ring->pending_free) ||
639 kthread_should_stop(),
644 do_eoi = ring->waiting_reqs;
646 ring->waiting_reqs = 0;
647 smp_mb(); /* clear flag *before* checking for work */
649 ret = do_block_io_op(ring, &eoi_flags);
651 ring->waiting_reqs = 1;
653 wait_event_interruptible(ring->shutdown_wq,
654 kthread_should_stop());
656 if (do_eoi && !ring->waiting_reqs) {
657 xen_irq_lateeoi(ring->irq, eoi_flags);
658 eoi_flags |= XEN_EOI_FLAG_SPURIOUS;
662 if (blkif->vbd.feature_gnt_persistent &&
663 time_after(jiffies, ring->next_lru)) {
664 purge_persistent_gnt(ring);
665 ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
668 /* Shrink if we have more than xen_blkif_max_buffer_pages */
669 shrink_free_pagepool(ring, xen_blkif_max_buffer_pages);
671 if (log_stats && time_after(jiffies, ring->st_print))
675 /* Drain pending purge work */
676 flush_work(&ring->persistent_purge_work);
681 ring->xenblkd = NULL;
687 * Remove persistent grants and empty the pool of free pages
689 void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
691 /* Free all persistent grant pages */
692 if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
693 free_persistent_gnts(ring, &ring->persistent_gnts,
694 ring->persistent_gnt_c);
696 BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
697 ring->persistent_gnt_c = 0;
699 /* Since we are shutting down remove all pages from the buffer */
700 shrink_free_pagepool(ring, 0 /* All */);
703 static unsigned int xen_blkbk_unmap_prepare(
704 struct xen_blkif_ring *ring,
705 struct grant_page **pages,
707 struct gnttab_unmap_grant_ref *unmap_ops,
708 struct page **unmap_pages)
710 unsigned int i, invcount = 0;
712 for (i = 0; i < num; i++) {
713 if (pages[i]->persistent_gnt != NULL) {
714 put_persistent_gnt(ring, pages[i]->persistent_gnt);
717 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
719 unmap_pages[invcount] = pages[i]->page;
720 gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
721 GNTMAP_host_map, pages[i]->handle);
722 pages[i]->handle = BLKBACK_INVALID_HANDLE;
729 static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
731 struct pending_req *pending_req = (struct pending_req *)(data->data);
732 struct xen_blkif_ring *ring = pending_req->ring;
733 struct xen_blkif *blkif = ring->blkif;
735 /* BUG_ON used to reproduce existing behaviour,
736 but is this the best way to deal with this? */
739 put_free_pages(ring, data->pages, data->count);
740 make_response(ring, pending_req->id,
741 pending_req->operation, pending_req->status);
742 free_req(ring, pending_req);
744 * Make sure the request is freed before releasing blkif,
745 * or there could be a race between free_req and the
746 * cleanup done in xen_blkif_free during shutdown.
748 * NB: The fact that we might try to wake up pending_free_wq
749 * before drain_complete (in case there's a drain going on)
750 * it's not a problem with our current implementation
751 * because we can assure there's no thread waiting on
752 * pending_free_wq if there's a drain going on, but it has
753 * to be taken into account if the current model is changed.
755 if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
756 complete(&blkif->drain_complete);
758 xen_blkif_put(blkif);
761 static void xen_blkbk_unmap_and_respond(struct pending_req *req)
763 struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
764 struct xen_blkif_ring *ring = req->ring;
765 struct grant_page **pages = req->segments;
766 unsigned int invcount;
768 invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
769 req->unmap, req->unmap_pages);
772 work->done = xen_blkbk_unmap_and_respond_callback;
773 work->unmap_ops = req->unmap;
774 work->kunmap_ops = NULL;
775 work->pages = req->unmap_pages;
776 work->count = invcount;
778 gnttab_unmap_refs_async(&req->gnttab_unmap_data);
783 * Unmap the grant references.
785 * This could accumulate ops up to the batch size to reduce the number
786 * of hypercalls, but since this is only used in error paths there's
789 static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
790 struct grant_page *pages[],
793 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
794 struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
795 unsigned int invcount = 0;
799 unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
801 invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
804 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
806 put_free_pages(ring, unmap_pages, invcount);
813 static int xen_blkbk_map(struct xen_blkif_ring *ring,
814 struct grant_page *pages[],
817 struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
818 struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
819 struct persistent_gnt *persistent_gnt = NULL;
820 phys_addr_t addr = 0;
821 int i, seg_idx, new_map_idx;
824 int last_map = 0, map_until = 0;
825 int use_persistent_gnts;
826 struct xen_blkif *blkif = ring->blkif;
828 use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
831 * Fill out preq.nr_sects with proper amount of sectors, and setup
832 * assign map[..] with the PFN of the page in our domain with the
833 * corresponding grant reference for each page.
836 for (i = map_until; i < num; i++) {
839 if (use_persistent_gnts) {
840 persistent_gnt = get_persistent_gnt(
845 if (persistent_gnt) {
847 * We are using persistent grants and
848 * the grant is already mapped
850 pages[i]->page = persistent_gnt->page;
851 pages[i]->persistent_gnt = persistent_gnt;
853 if (get_free_page(ring, &pages[i]->page)) {
854 put_free_pages(ring, pages_to_gnt, segs_to_map);
858 addr = vaddr(pages[i]->page);
859 pages_to_gnt[segs_to_map] = pages[i]->page;
860 pages[i]->persistent_gnt = NULL;
861 flags = GNTMAP_host_map;
862 if (!use_persistent_gnts && ro)
863 flags |= GNTMAP_readonly;
864 gnttab_set_map_op(&map[segs_to_map++], addr,
865 flags, pages[i]->gref,
869 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
874 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
877 * Now swizzle the MFN in our domain with the MFN from the other domain
878 * so that when we access vaddr(pending_req,i) it has the contents of
879 * the page from the other domain.
881 for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
882 if (!pages[seg_idx]->persistent_gnt) {
883 /* This is a newly mapped grant */
884 BUG_ON(new_map_idx >= segs_to_map);
885 if (unlikely(map[new_map_idx].status != 0)) {
886 pr_debug("invalid buffer -- could not remap it\n");
887 put_free_pages(ring, &pages[seg_idx]->page, 1);
888 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
892 pages[seg_idx]->handle = map[new_map_idx].handle;
896 if (use_persistent_gnts &&
897 ring->persistent_gnt_c < xen_blkif_max_pgrants) {
899 * We are using persistent grants, the grant is
900 * not mapped but we might have room for it.
902 persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
904 if (!persistent_gnt) {
906 * If we don't have enough memory to
907 * allocate the persistent_gnt struct
908 * map this grant non-persistenly
912 persistent_gnt->gnt = map[new_map_idx].ref;
913 persistent_gnt->handle = map[new_map_idx].handle;
914 persistent_gnt->page = pages[seg_idx]->page;
915 if (add_persistent_gnt(ring,
917 kfree(persistent_gnt);
918 persistent_gnt = NULL;
921 pages[seg_idx]->persistent_gnt = persistent_gnt;
922 pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
923 persistent_gnt->gnt, ring->persistent_gnt_c,
924 xen_blkif_max_pgrants);
927 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
928 blkif->vbd.overflow_max_grants = 1;
929 pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
930 blkif->domid, blkif->vbd.handle);
933 * We could not map this grant persistently, so use it as
934 * a non-persistent grant.
940 last_map = map_until;
941 if (!ret && map_until != num)
945 for (i = last_map; i < num; i++) {
946 /* Don't zap current batch's valid persistent grants. */
948 pages[i]->persistent_gnt = NULL;
949 pages[i]->handle = BLKBACK_INVALID_HANDLE;
955 static int xen_blkbk_map_seg(struct pending_req *pending_req)
959 rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
960 pending_req->nr_segs,
961 (pending_req->operation != BLKIF_OP_READ));
966 static int xen_blkbk_parse_indirect(struct blkif_request *req,
967 struct pending_req *pending_req,
968 struct seg_buf seg[],
969 struct phys_req *preq)
971 struct grant_page **pages = pending_req->indirect_pages;
972 struct xen_blkif_ring *ring = pending_req->ring;
973 int indirect_grefs, rc, n, nseg, i;
974 struct blkif_request_segment *segments = NULL;
976 nseg = pending_req->nr_segs;
977 indirect_grefs = INDIRECT_PAGES(nseg);
978 BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
980 for (i = 0; i < indirect_grefs; i++)
981 pages[i]->gref = req->u.indirect.indirect_grefs[i];
983 rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
987 for (n = 0, i = 0; n < nseg; n++) {
988 uint8_t first_sect, last_sect;
990 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
991 /* Map indirect segments */
993 kunmap_atomic(segments);
994 segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
996 i = n % SEGS_PER_INDIRECT_FRAME;
998 pending_req->segments[n]->gref = segments[i].gref;
1000 first_sect = READ_ONCE(segments[i].first_sect);
1001 last_sect = READ_ONCE(segments[i].last_sect);
1002 if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
1007 seg[n].nsec = last_sect - first_sect + 1;
1008 seg[n].offset = first_sect << 9;
1009 preq->nr_sects += seg[n].nsec;
1014 kunmap_atomic(segments);
1015 xen_blkbk_unmap(ring, pages, indirect_grefs);
1019 static int dispatch_discard_io(struct xen_blkif_ring *ring,
1020 struct blkif_request *req)
1023 int status = BLKIF_RSP_OKAY;
1024 struct xen_blkif *blkif = ring->blkif;
1025 struct block_device *bdev = blkif->vbd.bdev;
1026 unsigned long secure;
1027 struct phys_req preq;
1029 xen_blkif_get(blkif);
1031 preq.sector_number = req->u.discard.sector_number;
1032 preq.nr_sects = req->u.discard.nr_sectors;
1034 err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
1036 pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
1038 preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
1043 secure = (blkif->vbd.discard_secure &&
1044 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
1045 BLKDEV_DISCARD_SECURE : 0;
1047 err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
1048 req->u.discard.nr_sectors,
1049 GFP_KERNEL, secure);
1051 if (err == -EOPNOTSUPP) {
1052 pr_debug("discard op failed, not supported\n");
1053 status = BLKIF_RSP_EOPNOTSUPP;
1055 status = BLKIF_RSP_ERROR;
1057 make_response(ring, req->u.discard.id, req->operation, status);
1058 xen_blkif_put(blkif);
1062 static int dispatch_other_io(struct xen_blkif_ring *ring,
1063 struct blkif_request *req,
1064 struct pending_req *pending_req)
1066 free_req(ring, pending_req);
1067 make_response(ring, req->u.other.id, req->operation,
1068 BLKIF_RSP_EOPNOTSUPP);
1072 static void xen_blk_drain_io(struct xen_blkif_ring *ring)
1074 struct xen_blkif *blkif = ring->blkif;
1076 atomic_set(&blkif->drain, 1);
1078 if (atomic_read(&ring->inflight) == 0)
1080 wait_for_completion_interruptible_timeout(
1081 &blkif->drain_complete, HZ);
1083 if (!atomic_read(&blkif->drain))
1085 } while (!kthread_should_stop());
1086 atomic_set(&blkif->drain, 0);
1089 static void __end_block_io_op(struct pending_req *pending_req,
1092 /* An error fails the entire request. */
1093 if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
1094 error == BLK_STS_NOTSUPP) {
1095 pr_debug("flush diskcache op failed, not supported\n");
1096 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
1097 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1098 } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
1099 error == BLK_STS_NOTSUPP) {
1100 pr_debug("write barrier op failed, not supported\n");
1101 xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
1102 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1104 pr_debug("Buffer not up-to-date at end of operation,"
1105 " error=%d\n", error);
1106 pending_req->status = BLKIF_RSP_ERROR;
1110 * If all of the bio's have completed it is time to unmap
1111 * the grant references associated with 'request' and provide
1112 * the proper response on the ring.
1114 if (atomic_dec_and_test(&pending_req->pendcnt))
1115 xen_blkbk_unmap_and_respond(pending_req);
1121 static void end_block_io_op(struct bio *bio)
1123 __end_block_io_op(bio->bi_private, bio->bi_status);
1130 * Function to copy the from the ring buffer the 'struct blkif_request'
1131 * (which has the sectors we want, number of them, grant references, etc),
1132 * and transmute it to the block API to hand it over to the proper block disk.
1135 __do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
1137 union blkif_back_rings *blk_rings = &ring->blk_rings;
1138 struct blkif_request req;
1139 struct pending_req *pending_req;
1143 rc = blk_rings->common.req_cons;
1144 rp = blk_rings->common.sring->req_prod;
1145 rmb(); /* Ensure we see queued requests up to 'rp'. */
1147 if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1148 rc = blk_rings->common.rsp_prod_pvt;
1149 pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1150 rp, rc, rp - rc, ring->blkif->vbd.pdevice);
1155 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1158 /* We've seen a request, so clear spurious eoi flag. */
1159 *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
1161 if (kthread_should_stop()) {
1166 pending_req = alloc_req(ring);
1167 if (NULL == pending_req) {
1173 switch (ring->blkif->blk_protocol) {
1174 case BLKIF_PROTOCOL_NATIVE:
1175 memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1177 case BLKIF_PROTOCOL_X86_32:
1178 blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1180 case BLKIF_PROTOCOL_X86_64:
1181 blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1186 blk_rings->common.req_cons = ++rc; /* before make_response() */
1188 /* Apply all sanity checks to /private copy/ of request. */
1191 switch (req.operation) {
1193 case BLKIF_OP_WRITE:
1194 case BLKIF_OP_WRITE_BARRIER:
1195 case BLKIF_OP_FLUSH_DISKCACHE:
1196 case BLKIF_OP_INDIRECT:
1197 if (dispatch_rw_block_io(ring, &req, pending_req))
1200 case BLKIF_OP_DISCARD:
1201 free_req(ring, pending_req);
1202 if (dispatch_discard_io(ring, &req))
1206 if (dispatch_other_io(ring, &req, pending_req))
1211 /* Yield point for this unbounded loop. */
1219 do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
1221 union blkif_back_rings *blk_rings = &ring->blk_rings;
1225 more_to_do = __do_block_io_op(ring, eoi_flags);
1229 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1230 } while (more_to_do);
1235 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1236 * and call the 'submit_bio' to pass it to the underlying storage.
1238 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1239 struct blkif_request *req,
1240 struct pending_req *pending_req)
1242 struct phys_req preq;
1243 struct seg_buf *seg = pending_req->seg;
1245 struct bio *bio = NULL;
1246 struct bio **biolist = pending_req->biolist;
1249 int operation_flags = 0;
1250 struct blk_plug plug;
1252 struct grant_page **pages = pending_req->segments;
1253 unsigned short req_operation;
1255 req_operation = req->operation == BLKIF_OP_INDIRECT ?
1256 req->u.indirect.indirect_op : req->operation;
1258 if ((req->operation == BLKIF_OP_INDIRECT) &&
1259 (req_operation != BLKIF_OP_READ) &&
1260 (req_operation != BLKIF_OP_WRITE)) {
1261 pr_debug("Invalid indirect operation (%u)\n", req_operation);
1265 switch (req_operation) {
1268 operation = REQ_OP_READ;
1270 case BLKIF_OP_WRITE:
1272 operation = REQ_OP_WRITE;
1273 operation_flags = REQ_SYNC | REQ_IDLE;
1275 case BLKIF_OP_WRITE_BARRIER:
1278 case BLKIF_OP_FLUSH_DISKCACHE:
1280 operation = REQ_OP_WRITE;
1281 operation_flags = REQ_PREFLUSH;
1284 operation = 0; /* make gcc happy */
1289 /* Check that the number of segments is sane. */
1290 nseg = req->operation == BLKIF_OP_INDIRECT ?
1291 req->u.indirect.nr_segments : req->u.rw.nr_segments;
1293 if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
1294 unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1295 (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1296 unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1297 (nseg > MAX_INDIRECT_SEGMENTS))) {
1298 pr_debug("Bad number of segments in request (%d)\n", nseg);
1299 /* Haven't submitted any bio's yet. */
1305 pending_req->ring = ring;
1306 pending_req->id = req->u.rw.id;
1307 pending_req->operation = req_operation;
1308 pending_req->status = BLKIF_RSP_OKAY;
1309 pending_req->nr_segs = nseg;
1311 if (req->operation != BLKIF_OP_INDIRECT) {
1312 preq.dev = req->u.rw.handle;
1313 preq.sector_number = req->u.rw.sector_number;
1314 for (i = 0; i < nseg; i++) {
1315 pages[i]->gref = req->u.rw.seg[i].gref;
1316 seg[i].nsec = req->u.rw.seg[i].last_sect -
1317 req->u.rw.seg[i].first_sect + 1;
1318 seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1319 if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
1320 (req->u.rw.seg[i].last_sect <
1321 req->u.rw.seg[i].first_sect))
1323 preq.nr_sects += seg[i].nsec;
1326 preq.dev = req->u.indirect.handle;
1327 preq.sector_number = req->u.indirect.sector_number;
1328 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1332 if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
1333 pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1334 operation == REQ_OP_READ ? "read" : "write",
1336 preq.sector_number + preq.nr_sects,
1337 ring->blkif->vbd.pdevice);
1342 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1345 for (i = 0; i < nseg; i++) {
1346 if (((int)preq.sector_number|(int)seg[i].nsec) &
1347 ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1348 pr_debug("Misaligned I/O request from domain %d\n",
1349 ring->blkif->domid);
1354 /* Wait on all outstanding I/O's and once that has been completed
1358 xen_blk_drain_io(pending_req->ring);
1361 * If we have failed at this point, we need to undo the M2P override,
1362 * set gnttab_set_unmap_op on all of the grant references and perform
1363 * the hypercall to unmap the grants - that is all done in
1366 if (xen_blkbk_map_seg(pending_req))
1370 * This corresponding xen_blkif_put is done in __end_block_io_op, or
1371 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1373 xen_blkif_get(ring->blkif);
1374 atomic_inc(&ring->inflight);
1376 for (i = 0; i < nseg; i++) {
1377 while ((bio == NULL) ||
1381 seg[i].offset) == 0)) {
1383 int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1384 bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1385 if (unlikely(bio == NULL))
1388 biolist[nbio++] = bio;
1389 bio_set_dev(bio, preq.bdev);
1390 bio->bi_private = pending_req;
1391 bio->bi_end_io = end_block_io_op;
1392 bio->bi_iter.bi_sector = preq.sector_number;
1393 bio_set_op_attrs(bio, operation, operation_flags);
1396 preq.sector_number += seg[i].nsec;
1399 /* This will be hit if the operation was a flush or discard. */
1401 BUG_ON(operation_flags != REQ_PREFLUSH);
1403 bio = bio_alloc(GFP_KERNEL, 0);
1404 if (unlikely(bio == NULL))
1407 biolist[nbio++] = bio;
1408 bio_set_dev(bio, preq.bdev);
1409 bio->bi_private = pending_req;
1410 bio->bi_end_io = end_block_io_op;
1411 bio_set_op_attrs(bio, operation, operation_flags);
1414 atomic_set(&pending_req->pendcnt, nbio);
1415 blk_start_plug(&plug);
1417 for (i = 0; i < nbio; i++)
1418 submit_bio(biolist[i]);
1420 /* Let the I/Os go.. */
1421 blk_finish_plug(&plug);
1423 if (operation == REQ_OP_READ)
1424 ring->st_rd_sect += preq.nr_sects;
1425 else if (operation == REQ_OP_WRITE)
1426 ring->st_wr_sect += preq.nr_sects;
1431 xen_blkbk_unmap(ring, pending_req->segments,
1432 pending_req->nr_segs);
1434 /* Haven't submitted any bio's yet. */
1435 make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1436 free_req(ring, pending_req);
1437 msleep(1); /* back off a bit */
1441 for (i = 0; i < nbio; i++)
1442 bio_put(biolist[i]);
1443 atomic_set(&pending_req->pendcnt, 1);
1444 __end_block_io_op(pending_req, BLK_STS_RESOURCE);
1445 msleep(1); /* back off a bit */
1452 * Put a response on the ring on how the operation fared.
1454 static void make_response(struct xen_blkif_ring *ring, u64 id,
1455 unsigned short op, int st)
1457 struct blkif_response *resp;
1458 unsigned long flags;
1459 union blkif_back_rings *blk_rings;
1462 spin_lock_irqsave(&ring->blk_ring_lock, flags);
1463 blk_rings = &ring->blk_rings;
1464 /* Place on the response ring for the relevant domain. */
1465 switch (ring->blkif->blk_protocol) {
1466 case BLKIF_PROTOCOL_NATIVE:
1467 resp = RING_GET_RESPONSE(&blk_rings->native,
1468 blk_rings->native.rsp_prod_pvt);
1470 case BLKIF_PROTOCOL_X86_32:
1471 resp = RING_GET_RESPONSE(&blk_rings->x86_32,
1472 blk_rings->x86_32.rsp_prod_pvt);
1474 case BLKIF_PROTOCOL_X86_64:
1475 resp = RING_GET_RESPONSE(&blk_rings->x86_64,
1476 blk_rings->x86_64.rsp_prod_pvt);
1483 resp->operation = op;
1486 blk_rings->common.rsp_prod_pvt++;
1487 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1488 spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
1490 notify_remote_via_irq(ring->irq);
1493 static int __init xen_blkif_init(void)
1500 if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
1501 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
1502 xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
1503 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
1506 if (xenblk_max_queues == 0)
1507 xenblk_max_queues = num_online_cpus();
1509 rc = xen_blkif_interface_init();
1513 rc = xen_blkif_xenbus_init();
1521 module_init(xen_blkif_init);
1523 MODULE_LICENSE("Dual BSD/GPL");
1524 MODULE_ALIAS("xen-backend:vbd");