2 * Copyright (c) 2017-2018 Christoph Hellwig.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #include <linux/backing-dev.h>
15 #include <linux/moduleparam.h>
16 #include <trace/events/block.h>
19 static bool multipath = true;
20 module_param(multipath, bool, 0444);
21 MODULE_PARM_DESC(multipath,
22 "turn on native support for multiple controllers per subsystem");
24 void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
26 struct nvme_ns_head *h;
28 lockdep_assert_held(&subsys->lock);
29 list_for_each_entry(h, &subsys->nsheads, entry)
31 blk_mq_unfreeze_queue(h->disk->queue);
34 void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
36 struct nvme_ns_head *h;
38 lockdep_assert_held(&subsys->lock);
39 list_for_each_entry(h, &subsys->nsheads, entry)
41 blk_mq_freeze_queue_wait(h->disk->queue);
44 void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
46 struct nvme_ns_head *h;
48 lockdep_assert_held(&subsys->lock);
49 list_for_each_entry(h, &subsys->nsheads, entry)
51 blk_freeze_queue_start(h->disk->queue);
55 * If multipathing is enabled we need to always use the subsystem instance
56 * number for numbering our devices to avoid conflicts between subsystems that
57 * have multiple controllers and thus use the multipath-aware subsystem node
58 * and those that have a single controller and use the controller node
61 void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
62 struct nvme_ctrl *ctrl, int *flags)
65 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
66 } else if (ns->head->disk) {
67 sprintf(disk_name, "nvme%dc%dn%d", ctrl->subsys->instance,
68 ctrl->cntlid, ns->head->instance);
69 *flags = GENHD_FL_HIDDEN;
71 sprintf(disk_name, "nvme%dn%d", ctrl->subsys->instance,
76 bool nvme_failover_req(struct request *req)
78 struct nvme_ns *ns = req->q->queuedata;
79 u16 status = nvme_req(req)->status;
82 switch (status & 0x7ff) {
83 case NVME_SC_ANA_TRANSITION:
84 case NVME_SC_ANA_INACCESSIBLE:
85 case NVME_SC_ANA_PERSISTENT_LOSS:
87 * If we got back an ANA error we know the controller is alive,
88 * but not ready to serve this namespaces. The spec suggests
89 * we should update our general state here, but due to the fact
90 * that the admin and I/O queues are not serialized that is
91 * fundamentally racy. So instead just clear the current path,
92 * mark the the path as pending and kick of a re-read of the ANA
95 nvme_mpath_clear_current_path(ns);
96 if (ns->ctrl->ana_log_buf) {
97 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
98 queue_work(nvme_wq, &ns->ctrl->ana_work);
101 case NVME_SC_HOST_PATH_ERROR:
103 * Temporary transport disruption in talking to the controller.
104 * Try to send on a new path.
106 nvme_mpath_clear_current_path(ns);
109 /* This was a non-ANA error so follow the normal error path. */
113 spin_lock_irqsave(&ns->head->requeue_lock, flags);
114 blk_steal_bios(&ns->head->requeue_list, req);
115 spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
116 blk_mq_end_request(req, 0);
118 kblockd_schedule_work(&ns->head->requeue_work);
122 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
126 down_read(&ctrl->namespaces_rwsem);
127 list_for_each_entry(ns, &ctrl->namespaces, list) {
129 kblockd_schedule_work(&ns->head->requeue_work);
131 up_read(&ctrl->namespaces_rwsem);
134 static const char *nvme_ana_state_names[] = {
135 [0] = "invalid state",
136 [NVME_ANA_OPTIMIZED] = "optimized",
137 [NVME_ANA_NONOPTIMIZED] = "non-optimized",
138 [NVME_ANA_INACCESSIBLE] = "inaccessible",
139 [NVME_ANA_PERSISTENT_LOSS] = "persistent-loss",
140 [NVME_ANA_CHANGE] = "change",
143 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head)
145 struct nvme_ns *ns, *fallback = NULL;
147 list_for_each_entry_rcu(ns, &head->list, siblings) {
148 if (ns->ctrl->state != NVME_CTRL_LIVE ||
149 test_bit(NVME_NS_ANA_PENDING, &ns->flags))
151 switch (ns->ana_state) {
152 case NVME_ANA_OPTIMIZED:
153 rcu_assign_pointer(head->current_path, ns);
155 case NVME_ANA_NONOPTIMIZED:
164 rcu_assign_pointer(head->current_path, fallback);
168 static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
170 return ns->ctrl->state == NVME_CTRL_LIVE &&
171 ns->ana_state == NVME_ANA_OPTIMIZED;
174 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
176 struct nvme_ns *ns = srcu_dereference(head->current_path, &head->srcu);
178 if (unlikely(!ns || !nvme_path_is_optimized(ns)))
179 ns = __nvme_find_path(head);
183 static blk_qc_t nvme_ns_head_make_request(struct request_queue *q,
186 struct nvme_ns_head *head = q->queuedata;
187 struct device *dev = disk_to_dev(head->disk);
189 blk_qc_t ret = BLK_QC_T_NONE;
192 srcu_idx = srcu_read_lock(&head->srcu);
193 ns = nvme_find_path(head);
195 bio->bi_disk = ns->disk;
196 bio->bi_opf |= REQ_NVME_MPATH;
197 trace_block_bio_remap(bio->bi_disk->queue, bio,
198 disk_devt(ns->head->disk),
199 bio->bi_iter.bi_sector);
200 ret = direct_make_request(bio);
201 } else if (!list_empty_careful(&head->list)) {
202 dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
204 spin_lock_irq(&head->requeue_lock);
205 bio_list_add(&head->requeue_list, bio);
206 spin_unlock_irq(&head->requeue_lock);
208 dev_warn_ratelimited(dev, "no path - failing I/O\n");
210 bio->bi_status = BLK_STS_IOERR;
214 srcu_read_unlock(&head->srcu, srcu_idx);
218 static bool nvme_ns_head_poll(struct request_queue *q, blk_qc_t qc)
220 struct nvme_ns_head *head = q->queuedata;
225 srcu_idx = srcu_read_lock(&head->srcu);
226 ns = srcu_dereference(head->current_path, &head->srcu);
227 if (likely(ns && nvme_path_is_optimized(ns)))
228 found = ns->queue->poll_fn(q, qc);
229 srcu_read_unlock(&head->srcu, srcu_idx);
233 static void nvme_requeue_work(struct work_struct *work)
235 struct nvme_ns_head *head =
236 container_of(work, struct nvme_ns_head, requeue_work);
237 struct bio *bio, *next;
239 spin_lock_irq(&head->requeue_lock);
240 next = bio_list_get(&head->requeue_list);
241 spin_unlock_irq(&head->requeue_lock);
243 while ((bio = next) != NULL) {
248 * Reset disk to the mpath node and resubmit to select a new
251 bio->bi_disk = head->disk;
252 generic_make_request(bio);
256 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
258 struct request_queue *q;
261 mutex_init(&head->lock);
262 bio_list_init(&head->requeue_list);
263 spin_lock_init(&head->requeue_lock);
264 INIT_WORK(&head->requeue_work, nvme_requeue_work);
267 * Add a multipath node if the subsystems supports multiple controllers.
268 * We also do this for private namespaces as the namespace sharing data could
269 * change after a rescan.
271 if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath)
274 q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL);
278 blk_queue_make_request(q, nvme_ns_head_make_request);
279 q->poll_fn = nvme_ns_head_poll;
280 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
281 /* set to a default value for 512 until disk is validated */
282 blk_queue_logical_block_size(q, 512);
283 blk_set_stacking_limits(&q->limits);
285 /* we need to propagate up the VMC settings */
286 if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
288 blk_queue_write_cache(q, vwc, vwc);
290 head->disk = alloc_disk(0);
292 goto out_cleanup_queue;
293 head->disk->fops = &nvme_ns_head_ops;
294 head->disk->private_data = head;
295 head->disk->queue = q;
296 head->disk->flags = GENHD_FL_EXT_DEVT;
297 sprintf(head->disk->disk_name, "nvme%dn%d",
298 ctrl->subsys->instance, head->instance);
302 blk_cleanup_queue(q);
307 static void nvme_mpath_set_live(struct nvme_ns *ns)
309 struct nvme_ns_head *head = ns->head;
311 lockdep_assert_held(&ns->head->lock);
316 if (!(head->disk->flags & GENHD_FL_UP))
317 device_add_disk(&head->subsys->dev, head->disk,
318 nvme_ns_id_attr_groups);
320 synchronize_srcu(&ns->head->srcu);
321 kblockd_schedule_work(&ns->head->requeue_work);
324 static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
325 int (*cb)(struct nvme_ctrl *ctrl, struct nvme_ana_group_desc *,
328 void *base = ctrl->ana_log_buf;
329 size_t offset = sizeof(struct nvme_ana_rsp_hdr);
332 lockdep_assert_held(&ctrl->ana_lock);
334 for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) {
335 struct nvme_ana_group_desc *desc = base + offset;
336 u32 nr_nsids = le32_to_cpu(desc->nnsids);
337 size_t nsid_buf_size = nr_nsids * sizeof(__le32);
339 if (WARN_ON_ONCE(desc->grpid == 0))
341 if (WARN_ON_ONCE(le32_to_cpu(desc->grpid) > ctrl->anagrpmax))
343 if (WARN_ON_ONCE(desc->state == 0))
345 if (WARN_ON_ONCE(desc->state > NVME_ANA_CHANGE))
348 offset += sizeof(*desc);
349 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - nsid_buf_size))
352 error = cb(ctrl, desc, data);
356 offset += nsid_buf_size;
357 if (WARN_ON_ONCE(offset > ctrl->ana_log_size - sizeof(*desc)))
364 static inline bool nvme_state_is_live(enum nvme_ana_state state)
366 return state == NVME_ANA_OPTIMIZED || state == NVME_ANA_NONOPTIMIZED;
369 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
372 mutex_lock(&ns->head->lock);
373 ns->ana_grpid = le32_to_cpu(desc->grpid);
374 ns->ana_state = desc->state;
375 clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
377 if (nvme_state_is_live(ns->ana_state))
378 nvme_mpath_set_live(ns);
379 mutex_unlock(&ns->head->lock);
382 static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
383 struct nvme_ana_group_desc *desc, void *data)
385 u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0;
386 unsigned *nr_change_groups = data;
389 dev_info(ctrl->device, "ANA group %d: %s.\n",
390 le32_to_cpu(desc->grpid),
391 nvme_ana_state_names[desc->state]);
393 if (desc->state == NVME_ANA_CHANGE)
394 (*nr_change_groups)++;
399 down_read(&ctrl->namespaces_rwsem);
400 list_for_each_entry(ns, &ctrl->namespaces, list) {
403 nsid = le32_to_cpu(desc->nsids[n]);
404 if (ns->head->ns_id < nsid)
406 if (ns->head->ns_id == nsid)
407 nvme_update_ns_ana_state(desc, ns);
410 if (ns->head->ns_id > nsid)
413 up_read(&ctrl->namespaces_rwsem);
417 static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only)
419 u32 nr_change_groups = 0;
422 mutex_lock(&ctrl->ana_lock);
423 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA,
424 groups_only ? NVME_ANA_LOG_RGO : 0,
425 ctrl->ana_log_buf, ctrl->ana_log_size, 0);
427 dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
431 error = nvme_parse_ana_log(ctrl, &nr_change_groups,
432 nvme_update_ana_state);
437 * In theory we should have an ANATT timer per group as they might enter
438 * the change state at different times. But that is a lot of overhead
439 * just to protect against a target that keeps entering new changes
440 * states while never finishing previous ones. But we'll still
441 * eventually time out once all groups are in change state, so this
444 * We also double the ANATT value to provide some slack for transports
445 * or AEN processing overhead.
447 if (nr_change_groups)
448 mod_timer(&ctrl->anatt_timer, ctrl->anatt * HZ * 2 + jiffies);
450 del_timer_sync(&ctrl->anatt_timer);
452 mutex_unlock(&ctrl->ana_lock);
456 static void nvme_ana_work(struct work_struct *work)
458 struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
460 nvme_read_ana_log(ctrl, false);
463 static void nvme_anatt_timeout(struct timer_list *t)
465 struct nvme_ctrl *ctrl = from_timer(ctrl, t, anatt_timer);
467 dev_info(ctrl->device, "ANATT timeout, resetting controller.\n");
468 nvme_reset_ctrl(ctrl);
471 void nvme_mpath_stop(struct nvme_ctrl *ctrl)
473 if (!nvme_ctrl_use_ana(ctrl))
475 del_timer_sync(&ctrl->anatt_timer);
476 cancel_work_sync(&ctrl->ana_work);
479 static ssize_t ana_grpid_show(struct device *dev, struct device_attribute *attr,
482 return sprintf(buf, "%d\n", nvme_get_ns_from_dev(dev)->ana_grpid);
484 DEVICE_ATTR_RO(ana_grpid);
486 static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
489 struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
491 return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]);
493 DEVICE_ATTR_RO(ana_state);
495 static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
496 struct nvme_ana_group_desc *desc, void *data)
498 struct nvme_ana_group_desc *dst = data;
500 if (desc->grpid != dst->grpid)
504 return -ENXIO; /* just break out of the loop */
507 void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
509 if (nvme_ctrl_use_ana(ns->ctrl)) {
510 struct nvme_ana_group_desc desc = {
511 .grpid = id->anagrpid,
515 mutex_lock(&ns->ctrl->ana_lock);
516 ns->ana_grpid = le32_to_cpu(id->anagrpid);
517 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
518 mutex_unlock(&ns->ctrl->ana_lock);
520 /* found the group desc: update */
521 nvme_update_ns_ana_state(&desc, ns);
523 /* group desc not found: trigger a re-read */
524 set_bit(NVME_NS_ANA_PENDING, &ns->flags);
525 queue_work(nvme_wq, &ns->ctrl->ana_work);
528 mutex_lock(&ns->head->lock);
529 ns->ana_state = NVME_ANA_OPTIMIZED;
530 nvme_mpath_set_live(ns);
531 mutex_unlock(&ns->head->lock);
534 if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
535 struct gendisk *disk = ns->head->disk;
538 disk->queue->backing_dev_info->capabilities |=
539 BDI_CAP_STABLE_WRITES;
543 void nvme_mpath_remove_disk(struct nvme_ns_head *head)
547 if (head->disk->flags & GENHD_FL_UP)
548 del_gendisk(head->disk);
549 blk_set_queue_dying(head->disk->queue);
550 /* make sure all pending bios are cleaned up */
551 kblockd_schedule_work(&head->requeue_work);
552 flush_work(&head->requeue_work);
553 blk_cleanup_queue(head->disk->queue);
554 put_disk(head->disk);
557 int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
561 /* check if multipath is enabled and we have the capability */
562 if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3)))
565 ctrl->anacap = id->anacap;
566 ctrl->anatt = id->anatt;
567 ctrl->nanagrpid = le32_to_cpu(id->nanagrpid);
568 ctrl->anagrpmax = le32_to_cpu(id->anagrpmax);
570 mutex_init(&ctrl->ana_lock);
571 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
572 ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
573 ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
574 ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
576 if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
577 dev_err(ctrl->device,
578 "ANA log page size (%zd) larger than MDTS (%d).\n",
580 ctrl->max_hw_sectors << SECTOR_SHIFT);
581 dev_err(ctrl->device, "disabling ANA support.\n");
585 INIT_WORK(&ctrl->ana_work, nvme_ana_work);
586 kfree(ctrl->ana_log_buf);
587 ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
588 if (!ctrl->ana_log_buf) {
593 error = nvme_read_ana_log(ctrl, false);
595 goto out_free_ana_log_buf;
597 out_free_ana_log_buf:
598 kfree(ctrl->ana_log_buf);
599 ctrl->ana_log_buf = NULL;
604 void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
606 kfree(ctrl->ana_log_buf);
607 ctrl->ana_log_buf = NULL;