2 * Common code for the NVMe target.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/random.h>
17 #include <linux/rculist.h>
21 struct workqueue_struct *buffered_io_wq;
22 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
23 static DEFINE_IDA(cntlid_ida);
26 * This read/write semaphore is used to synchronize access to configuration
27 * information on a target system that will result in discovery log page
28 * information change for at least one host.
29 * The full list of resources to protected by this semaphore is:
32 * - per-subsystem allowed hosts list
33 * - allow_any_host subsystem attribute
35 * - the nvmet_transports array
37 * When updating any of those lists/structures write lock should be obtained,
38 * while when reading (popolating discovery log page or checking host-subsystem
39 * link) read lock is obtained to allow concurrent reads.
41 DECLARE_RWSEM(nvmet_config_sem);
43 u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
45 DECLARE_RWSEM(nvmet_ana_sem);
47 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
48 const char *subsysnqn);
50 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
53 if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
54 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
58 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
60 if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
61 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
65 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
67 if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len)
68 return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
72 static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
76 if (list_empty(&subsys->namespaces))
79 ns = list_last_entry(&subsys->namespaces, struct nvmet_ns, dev_link);
83 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
85 return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
88 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
90 struct nvmet_req *req;
93 mutex_lock(&ctrl->lock);
94 if (!ctrl->nr_async_event_cmds) {
95 mutex_unlock(&ctrl->lock);
99 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
100 mutex_unlock(&ctrl->lock);
101 nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
105 static void nvmet_async_event_work(struct work_struct *work)
107 struct nvmet_ctrl *ctrl =
108 container_of(work, struct nvmet_ctrl, async_event_work);
109 struct nvmet_async_event *aen;
110 struct nvmet_req *req;
113 mutex_lock(&ctrl->lock);
114 aen = list_first_entry_or_null(&ctrl->async_events,
115 struct nvmet_async_event, entry);
116 if (!aen || !ctrl->nr_async_event_cmds) {
117 mutex_unlock(&ctrl->lock);
121 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
122 nvmet_set_result(req, nvmet_async_event_result(aen));
124 list_del(&aen->entry);
127 mutex_unlock(&ctrl->lock);
128 nvmet_req_complete(req, 0);
132 static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
133 u8 event_info, u8 log_page)
135 struct nvmet_async_event *aen;
137 aen = kmalloc(sizeof(*aen), GFP_KERNEL);
141 aen->event_type = event_type;
142 aen->event_info = event_info;
143 aen->log_page = log_page;
145 mutex_lock(&ctrl->lock);
146 list_add_tail(&aen->entry, &ctrl->async_events);
147 mutex_unlock(&ctrl->lock);
149 schedule_work(&ctrl->async_event_work);
152 static bool nvmet_aen_disabled(struct nvmet_ctrl *ctrl, u32 aen)
154 if (!(READ_ONCE(ctrl->aen_enabled) & aen))
156 return test_and_set_bit(aen, &ctrl->aen_masked);
159 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
163 mutex_lock(&ctrl->lock);
164 if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
167 for (i = 0; i < ctrl->nr_changed_ns; i++) {
168 if (ctrl->changed_ns_list[i] == nsid)
172 if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
173 ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
174 ctrl->nr_changed_ns = U32_MAX;
178 ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
180 mutex_unlock(&ctrl->lock);
183 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
185 struct nvmet_ctrl *ctrl;
187 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
188 nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
189 if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_NS_ATTR))
191 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
192 NVME_AER_NOTICE_NS_CHANGED,
193 NVME_LOG_CHANGED_NS);
197 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
198 struct nvmet_port *port)
200 struct nvmet_ctrl *ctrl;
202 mutex_lock(&subsys->lock);
203 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
204 if (port && ctrl->port != port)
206 if (nvmet_aen_disabled(ctrl, NVME_AEN_CFG_ANA_CHANGE))
208 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
209 NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
211 mutex_unlock(&subsys->lock);
214 void nvmet_port_send_ana_event(struct nvmet_port *port)
216 struct nvmet_subsys_link *p;
218 down_read(&nvmet_config_sem);
219 list_for_each_entry(p, &port->subsystems, entry)
220 nvmet_send_ana_event(p->subsys, port);
221 up_read(&nvmet_config_sem);
224 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
228 down_write(&nvmet_config_sem);
229 if (nvmet_transports[ops->type])
232 nvmet_transports[ops->type] = ops;
233 up_write(&nvmet_config_sem);
237 EXPORT_SYMBOL_GPL(nvmet_register_transport);
239 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
241 down_write(&nvmet_config_sem);
242 nvmet_transports[ops->type] = NULL;
243 up_write(&nvmet_config_sem);
245 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
247 int nvmet_enable_port(struct nvmet_port *port)
249 const struct nvmet_fabrics_ops *ops;
252 lockdep_assert_held(&nvmet_config_sem);
254 ops = nvmet_transports[port->disc_addr.trtype];
256 up_write(&nvmet_config_sem);
257 request_module("nvmet-transport-%d", port->disc_addr.trtype);
258 down_write(&nvmet_config_sem);
259 ops = nvmet_transports[port->disc_addr.trtype];
261 pr_err("transport type %d not supported\n",
262 port->disc_addr.trtype);
267 if (!try_module_get(ops->owner))
270 ret = ops->add_port(port);
272 module_put(ops->owner);
276 /* If the transport didn't set inline_data_size, then disable it. */
277 if (port->inline_data_size < 0)
278 port->inline_data_size = 0;
280 port->enabled = true;
284 void nvmet_disable_port(struct nvmet_port *port)
286 const struct nvmet_fabrics_ops *ops;
288 lockdep_assert_held(&nvmet_config_sem);
290 port->enabled = false;
292 ops = nvmet_transports[port->disc_addr.trtype];
293 ops->remove_port(port);
294 module_put(ops->owner);
297 static void nvmet_keep_alive_timer(struct work_struct *work)
299 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
300 struct nvmet_ctrl, ka_work);
302 pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
303 ctrl->cntlid, ctrl->kato);
305 nvmet_ctrl_fatal_error(ctrl);
308 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
310 if (unlikely(ctrl->kato == 0))
313 pr_debug("ctrl %d start keep-alive timer for %d secs\n",
314 ctrl->cntlid, ctrl->kato);
316 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
317 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
320 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
322 if (unlikely(ctrl->kato == 0))
325 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
327 cancel_delayed_work_sync(&ctrl->ka_work);
330 static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl,
335 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
336 if (ns->nsid == le32_to_cpu(nsid))
343 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
348 ns = __nvmet_find_namespace(ctrl, nsid);
350 percpu_ref_get(&ns->ref);
356 static void nvmet_destroy_namespace(struct percpu_ref *ref)
358 struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
360 complete(&ns->disable_done);
363 void nvmet_put_namespace(struct nvmet_ns *ns)
365 percpu_ref_put(&ns->ref);
368 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
370 nvmet_bdev_ns_disable(ns);
371 nvmet_file_ns_disable(ns);
374 int nvmet_ns_enable(struct nvmet_ns *ns)
376 struct nvmet_subsys *subsys = ns->subsys;
379 mutex_lock(&subsys->lock);
381 if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
387 ret = nvmet_bdev_ns_enable(ns);
389 ret = nvmet_file_ns_enable(ns);
393 ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
398 if (ns->nsid > subsys->max_nsid)
399 subsys->max_nsid = ns->nsid;
402 * The namespaces list needs to be sorted to simplify the implementation
403 * of the Identify Namepace List subcommand.
405 if (list_empty(&subsys->namespaces)) {
406 list_add_tail_rcu(&ns->dev_link, &subsys->namespaces);
408 struct nvmet_ns *old;
410 list_for_each_entry_rcu(old, &subsys->namespaces, dev_link) {
411 BUG_ON(ns->nsid == old->nsid);
412 if (ns->nsid < old->nsid)
416 list_add_tail_rcu(&ns->dev_link, &old->dev_link);
418 subsys->nr_namespaces++;
420 nvmet_ns_changed(subsys, ns->nsid);
424 mutex_unlock(&subsys->lock);
427 nvmet_ns_dev_disable(ns);
431 void nvmet_ns_disable(struct nvmet_ns *ns)
433 struct nvmet_subsys *subsys = ns->subsys;
435 mutex_lock(&subsys->lock);
440 list_del_rcu(&ns->dev_link);
441 if (ns->nsid == subsys->max_nsid)
442 subsys->max_nsid = nvmet_max_nsid(subsys);
443 mutex_unlock(&subsys->lock);
446 * Now that we removed the namespaces from the lookup list, we
447 * can kill the per_cpu ref and wait for any remaining references
448 * to be dropped, as well as a RCU grace period for anyone only
449 * using the namepace under rcu_read_lock(). Note that we can't
450 * use call_rcu here as we need to ensure the namespaces have
451 * been fully destroyed before unloading the module.
453 percpu_ref_kill(&ns->ref);
455 wait_for_completion(&ns->disable_done);
456 percpu_ref_exit(&ns->ref);
458 mutex_lock(&subsys->lock);
459 subsys->nr_namespaces--;
460 nvmet_ns_changed(subsys, ns->nsid);
461 nvmet_ns_dev_disable(ns);
463 mutex_unlock(&subsys->lock);
466 void nvmet_ns_free(struct nvmet_ns *ns)
468 nvmet_ns_disable(ns);
470 down_write(&nvmet_ana_sem);
471 nvmet_ana_group_enabled[ns->anagrpid]--;
472 up_write(&nvmet_ana_sem);
474 kfree(ns->device_path);
478 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
482 ns = kzalloc(sizeof(*ns), GFP_KERNEL);
486 INIT_LIST_HEAD(&ns->dev_link);
487 init_completion(&ns->disable_done);
492 down_write(&nvmet_ana_sem);
493 ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
494 nvmet_ana_group_enabled[ns->anagrpid]++;
495 up_write(&nvmet_ana_sem);
498 ns->buffered_io = false;
503 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
505 u32 old_sqhd, new_sqhd;
507 struct nvmet_ns *ns = req->ns;
510 nvmet_set_status(req, status);
514 old_sqhd = req->sq->sqhd;
515 new_sqhd = (old_sqhd + 1) % req->sq->size;
516 } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
519 sqhd = req->sq->sqhd & 0x0000FFFF;
520 req->rsp->sq_head = cpu_to_le16(sqhd);
521 req->rsp->sq_id = cpu_to_le16(req->sq->qid);
522 req->rsp->command_id = req->cmd->common.command_id;
524 req->ops->queue_response(req);
526 nvmet_put_namespace(ns);
529 void nvmet_req_complete(struct nvmet_req *req, u16 status)
531 __nvmet_req_complete(req, status);
532 percpu_ref_put(&req->sq->ref);
534 EXPORT_SYMBOL_GPL(nvmet_req_complete);
536 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
545 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
555 static void nvmet_confirm_sq(struct percpu_ref *ref)
557 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
559 complete(&sq->confirm_done);
562 void nvmet_sq_destroy(struct nvmet_sq *sq)
565 * If this is the admin queue, complete all AERs so that our
566 * queue doesn't have outstanding requests on it.
568 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq)
569 nvmet_async_events_free(sq->ctrl);
570 percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
571 wait_for_completion(&sq->confirm_done);
572 wait_for_completion(&sq->free_done);
573 percpu_ref_exit(&sq->ref);
576 nvmet_ctrl_put(sq->ctrl);
577 sq->ctrl = NULL; /* allows reusing the queue later */
580 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
582 static void nvmet_sq_free(struct percpu_ref *ref)
584 struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
586 complete(&sq->free_done);
589 int nvmet_sq_init(struct nvmet_sq *sq)
593 ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
595 pr_err("percpu_ref init failed!\n");
598 init_completion(&sq->free_done);
599 init_completion(&sq->confirm_done);
603 EXPORT_SYMBOL_GPL(nvmet_sq_init);
605 static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
608 enum nvme_ana_state state = port->ana_state[ns->anagrpid];
610 if (unlikely(state == NVME_ANA_INACCESSIBLE))
611 return NVME_SC_ANA_INACCESSIBLE;
612 if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
613 return NVME_SC_ANA_PERSISTENT_LOSS;
614 if (unlikely(state == NVME_ANA_CHANGE))
615 return NVME_SC_ANA_TRANSITION;
619 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
621 if (unlikely(req->ns->readonly)) {
622 switch (req->cmd->common.opcode) {
627 return NVME_SC_NS_WRITE_PROTECTED;
634 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
636 struct nvme_command *cmd = req->cmd;
639 ret = nvmet_check_ctrl_status(req, cmd);
643 req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
644 if (unlikely(!req->ns))
645 return NVME_SC_INVALID_NS | NVME_SC_DNR;
646 ret = nvmet_check_ana_state(req->port, req->ns);
649 ret = nvmet_io_cmd_check_access(req);
654 return nvmet_file_parse_io_cmd(req);
656 return nvmet_bdev_parse_io_cmd(req);
659 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
660 struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
662 u8 flags = req->cmd->common.flags;
670 req->transfer_len = 0;
671 req->rsp->status = 0;
674 /* no support for fused commands yet */
675 if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
676 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
681 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
682 * contains an address of a single contiguous physical buffer that is
685 if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
686 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
690 if (unlikely(!req->sq->ctrl))
691 /* will return an error for any Non-connect command: */
692 status = nvmet_parse_connect_cmd(req);
693 else if (likely(req->sq->qid != 0))
694 status = nvmet_parse_io_cmd(req);
695 else if (req->cmd->common.opcode == nvme_fabrics_command)
696 status = nvmet_parse_fabrics_cmd(req);
697 else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC)
698 status = nvmet_parse_discovery_cmd(req);
700 status = nvmet_parse_admin_cmd(req);
705 if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
706 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
713 __nvmet_req_complete(req, status);
716 EXPORT_SYMBOL_GPL(nvmet_req_init);
718 void nvmet_req_uninit(struct nvmet_req *req)
720 percpu_ref_put(&req->sq->ref);
722 nvmet_put_namespace(req->ns);
724 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
726 void nvmet_req_execute(struct nvmet_req *req)
728 if (unlikely(req->data_len != req->transfer_len))
729 nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
733 EXPORT_SYMBOL_GPL(nvmet_req_execute);
735 static inline bool nvmet_cc_en(u32 cc)
737 return (cc >> NVME_CC_EN_SHIFT) & 0x1;
740 static inline u8 nvmet_cc_css(u32 cc)
742 return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
745 static inline u8 nvmet_cc_mps(u32 cc)
747 return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
750 static inline u8 nvmet_cc_ams(u32 cc)
752 return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
755 static inline u8 nvmet_cc_shn(u32 cc)
757 return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
760 static inline u8 nvmet_cc_iosqes(u32 cc)
762 return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
765 static inline u8 nvmet_cc_iocqes(u32 cc)
767 return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
770 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
772 lockdep_assert_held(&ctrl->lock);
775 * Only I/O controllers should verify iosqes,iocqes.
776 * Strictly speaking, the spec says a discovery controller
777 * should verify iosqes,iocqes are zeroed, however that
778 * would break backwards compatibility, so don't enforce it.
780 if (ctrl->subsys->type != NVME_NQN_DISC &&
781 (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
782 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
783 ctrl->csts = NVME_CSTS_CFS;
787 if (nvmet_cc_mps(ctrl->cc) != 0 ||
788 nvmet_cc_ams(ctrl->cc) != 0 ||
789 nvmet_cc_css(ctrl->cc) != 0) {
790 ctrl->csts = NVME_CSTS_CFS;
794 ctrl->csts = NVME_CSTS_RDY;
797 * Controllers that are not yet enabled should not really enforce the
798 * keep alive timeout, but we still want to track a timeout and cleanup
799 * in case a host died before it enabled the controller. Hence, simply
800 * reset the keep alive timer when the controller is enabled.
803 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
806 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
808 lockdep_assert_held(&ctrl->lock);
810 /* XXX: tear down queues? */
811 ctrl->csts &= ~NVME_CSTS_RDY;
815 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
819 mutex_lock(&ctrl->lock);
823 if (nvmet_cc_en(new) && !nvmet_cc_en(old))
824 nvmet_start_ctrl(ctrl);
825 if (!nvmet_cc_en(new) && nvmet_cc_en(old))
826 nvmet_clear_ctrl(ctrl);
827 if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
828 nvmet_clear_ctrl(ctrl);
829 ctrl->csts |= NVME_CSTS_SHST_CMPLT;
831 if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
832 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
833 mutex_unlock(&ctrl->lock);
836 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
838 /* command sets supported: NVMe command set: */
839 ctrl->cap = (1ULL << 37);
840 /* CC.EN timeout in 500msec units: */
841 ctrl->cap |= (15ULL << 24);
842 /* maximum queue entries supported: */
843 ctrl->cap |= NVMET_QUEUE_SIZE - 1;
846 u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
847 struct nvmet_req *req, struct nvmet_ctrl **ret)
849 struct nvmet_subsys *subsys;
850 struct nvmet_ctrl *ctrl;
853 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
855 pr_warn("connect request for invalid subsystem %s!\n",
857 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
858 return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
861 mutex_lock(&subsys->lock);
862 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
863 if (ctrl->cntlid == cntlid) {
864 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
865 pr_warn("hostnqn mismatch.\n");
868 if (!kref_get_unless_zero(&ctrl->ref))
876 pr_warn("could not find controller %d for subsys %s / host %s\n",
877 cntlid, subsysnqn, hostnqn);
878 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
879 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
882 mutex_unlock(&subsys->lock);
883 nvmet_subsys_put(subsys);
887 u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
889 if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
890 pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
891 cmd->common.opcode, req->sq->qid);
892 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
895 if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
896 pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
897 cmd->common.opcode, req->sq->qid);
898 return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
903 static bool __nvmet_host_allowed(struct nvmet_subsys *subsys,
906 struct nvmet_host_link *p;
908 if (subsys->allow_any_host)
911 list_for_each_entry(p, &subsys->hosts, entry) {
912 if (!strcmp(nvmet_host_name(p->host), hostnqn))
919 static bool nvmet_host_discovery_allowed(struct nvmet_req *req,
922 struct nvmet_subsys_link *s;
924 list_for_each_entry(s, &req->port->subsystems, entry) {
925 if (__nvmet_host_allowed(s->subsys, hostnqn))
932 bool nvmet_host_allowed(struct nvmet_req *req, struct nvmet_subsys *subsys,
935 lockdep_assert_held(&nvmet_config_sem);
937 if (subsys->type == NVME_NQN_DISC)
938 return nvmet_host_discovery_allowed(req, hostnqn);
940 return __nvmet_host_allowed(subsys, hostnqn);
943 static void nvmet_fatal_error_handler(struct work_struct *work)
945 struct nvmet_ctrl *ctrl =
946 container_of(work, struct nvmet_ctrl, fatal_err_work);
948 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
949 ctrl->ops->delete_ctrl(ctrl);
952 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
953 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
955 struct nvmet_subsys *subsys;
956 struct nvmet_ctrl *ctrl;
960 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
961 subsys = nvmet_find_get_subsys(req->port, subsysnqn);
963 pr_warn("connect request for invalid subsystem %s!\n",
965 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
969 status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
970 down_read(&nvmet_config_sem);
971 if (!nvmet_host_allowed(req, subsys, hostnqn)) {
972 pr_info("connect by host %s for subsystem %s not allowed\n",
974 req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
975 up_read(&nvmet_config_sem);
976 status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
977 goto out_put_subsystem;
979 up_read(&nvmet_config_sem);
981 status = NVME_SC_INTERNAL;
982 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
984 goto out_put_subsystem;
985 mutex_init(&ctrl->lock);
987 nvmet_init_cap(ctrl);
989 ctrl->port = req->port;
991 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
992 INIT_LIST_HEAD(&ctrl->async_events);
993 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
995 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
996 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
998 kref_init(&ctrl->ref);
999 ctrl->subsys = subsys;
1000 WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1002 ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1003 sizeof(__le32), GFP_KERNEL);
1004 if (!ctrl->changed_ns_list)
1007 ctrl->cqs = kcalloc(subsys->max_qid + 1,
1008 sizeof(struct nvmet_cq *),
1011 goto out_free_changed_ns_list;
1013 ctrl->sqs = kcalloc(subsys->max_qid + 1,
1014 sizeof(struct nvmet_sq *),
1019 ret = ida_simple_get(&cntlid_ida,
1020 NVME_CNTLID_MIN, NVME_CNTLID_MAX,
1023 status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1028 ctrl->ops = req->ops;
1029 if (ctrl->subsys->type == NVME_NQN_DISC) {
1030 /* Don't accept keep-alive timeout for discovery controllers */
1032 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
1033 goto out_remove_ida;
1037 * Discovery controllers use some arbitrary high value in order
1038 * to cleanup stale discovery sessions
1040 * From the latest base diff RC:
1041 * "The Keep Alive command is not supported by
1042 * Discovery controllers. A transport may specify a
1043 * fixed Discovery controller activity timeout value
1044 * (e.g., 2 minutes). If no commands are received
1045 * by a Discovery controller within that time
1046 * period, the controller may perform the
1047 * actions for Keep Alive Timer expiration".
1049 ctrl->kato = NVMET_DISC_KATO;
1051 /* keep-alive timeout in seconds */
1052 ctrl->kato = DIV_ROUND_UP(kato, 1000);
1054 nvmet_start_keep_alive_timer(ctrl);
1056 mutex_lock(&subsys->lock);
1057 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1058 mutex_unlock(&subsys->lock);
1064 ida_simple_remove(&cntlid_ida, ctrl->cntlid);
1069 out_free_changed_ns_list:
1070 kfree(ctrl->changed_ns_list);
1074 nvmet_subsys_put(subsys);
1079 static void nvmet_ctrl_free(struct kref *ref)
1081 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1082 struct nvmet_subsys *subsys = ctrl->subsys;
1084 mutex_lock(&subsys->lock);
1085 list_del(&ctrl->subsys_entry);
1086 mutex_unlock(&subsys->lock);
1088 nvmet_stop_keep_alive_timer(ctrl);
1090 flush_work(&ctrl->async_event_work);
1091 cancel_work_sync(&ctrl->fatal_err_work);
1093 ida_simple_remove(&cntlid_ida, ctrl->cntlid);
1097 kfree(ctrl->changed_ns_list);
1100 nvmet_subsys_put(subsys);
1103 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1105 kref_put(&ctrl->ref, nvmet_ctrl_free);
1108 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1110 mutex_lock(&ctrl->lock);
1111 if (!(ctrl->csts & NVME_CSTS_CFS)) {
1112 ctrl->csts |= NVME_CSTS_CFS;
1113 schedule_work(&ctrl->fatal_err_work);
1115 mutex_unlock(&ctrl->lock);
1117 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1119 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1120 const char *subsysnqn)
1122 struct nvmet_subsys_link *p;
1127 if (!strncmp(NVME_DISC_SUBSYS_NAME, subsysnqn,
1129 if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1131 return nvmet_disc_subsys;
1134 down_read(&nvmet_config_sem);
1135 list_for_each_entry(p, &port->subsystems, entry) {
1136 if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1138 if (!kref_get_unless_zero(&p->subsys->ref))
1140 up_read(&nvmet_config_sem);
1144 up_read(&nvmet_config_sem);
1148 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1149 enum nvme_subsys_type type)
1151 struct nvmet_subsys *subsys;
1153 subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1157 subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
1158 /* generate a random serial number as our controllers are ephemeral: */
1159 get_random_bytes(&subsys->serial, sizeof(subsys->serial));
1163 subsys->max_qid = NVMET_NR_QUEUES;
1166 subsys->max_qid = 0;
1169 pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1173 subsys->type = type;
1174 subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1176 if (!subsys->subsysnqn) {
1181 kref_init(&subsys->ref);
1183 mutex_init(&subsys->lock);
1184 INIT_LIST_HEAD(&subsys->namespaces);
1185 INIT_LIST_HEAD(&subsys->ctrls);
1186 INIT_LIST_HEAD(&subsys->hosts);
1191 static void nvmet_subsys_free(struct kref *ref)
1193 struct nvmet_subsys *subsys =
1194 container_of(ref, struct nvmet_subsys, ref);
1196 WARN_ON_ONCE(!list_empty(&subsys->namespaces));
1198 kfree(subsys->subsysnqn);
1202 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1204 struct nvmet_ctrl *ctrl;
1206 mutex_lock(&subsys->lock);
1207 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1208 ctrl->ops->delete_ctrl(ctrl);
1209 mutex_unlock(&subsys->lock);
1212 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1214 kref_put(&subsys->ref, nvmet_subsys_free);
1217 static int __init nvmet_init(void)
1221 nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1223 buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1225 if (!buffered_io_wq) {
1230 error = nvmet_init_discovery();
1232 goto out_free_work_queue;
1234 error = nvmet_init_configfs();
1236 goto out_exit_discovery;
1240 nvmet_exit_discovery();
1241 out_free_work_queue:
1242 destroy_workqueue(buffered_io_wq);
1247 static void __exit nvmet_exit(void)
1249 nvmet_exit_configfs();
1250 nvmet_exit_discovery();
1251 ida_destroy(&cntlid_ida);
1252 destroy_workqueue(buffered_io_wq);
1254 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1255 BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1258 module_init(nvmet_init);
1259 module_exit(nvmet_exit);
1261 MODULE_LICENSE("GPL v2");