2 * NVMe admin command implementation.
3 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/module.h>
16 #include <linux/rculist.h>
18 #include <generated/utsrelease.h>
19 #include <asm/unaligned.h>
23 * This helper allows us to clear the AEN based on the RAE bit,
24 * Please use this helper when processing the log pages which are
25 * associated with the AEN.
27 static inline void nvmet_clear_aen(struct nvmet_req *req, u32 aen_bit)
29 int rae = le32_to_cpu(req->cmd->common.cdw10[0]) & 1 << 15;
32 clear_bit(aen_bit, &req->sq->ctrl->aen_masked);
35 u32 nvmet_get_log_page_len(struct nvme_command *cmd)
37 u32 len = le16_to_cpu(cmd->get_log_page.numdu);
40 len += le16_to_cpu(cmd->get_log_page.numdl);
41 /* NUMD is a 0's based value */
48 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
50 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
53 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
54 struct nvme_smart_log *slog)
57 u64 host_reads, host_writes, data_units_read, data_units_written;
59 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid);
61 pr_err("nvmet : Could not find namespace id : %d\n",
62 le32_to_cpu(req->cmd->get_log_page.nsid));
63 return NVME_SC_INVALID_NS;
66 /* we don't have the right data for file backed ns */
70 host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
71 data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
72 sectors[READ]), 1000);
73 host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
74 data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
75 sectors[WRITE]), 1000);
77 put_unaligned_le64(host_reads, &slog->host_reads[0]);
78 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
79 put_unaligned_le64(host_writes, &slog->host_writes[0]);
80 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
82 nvmet_put_namespace(ns);
84 return NVME_SC_SUCCESS;
87 static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
88 struct nvme_smart_log *slog)
90 u64 host_reads = 0, host_writes = 0;
91 u64 data_units_read = 0, data_units_written = 0;
93 struct nvmet_ctrl *ctrl;
98 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
99 /* we don't have the right data for file backed ns */
102 host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
103 data_units_read += DIV_ROUND_UP(
104 part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
105 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
106 data_units_written += DIV_ROUND_UP(
107 part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
112 put_unaligned_le64(host_reads, &slog->host_reads[0]);
113 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
114 put_unaligned_le64(host_writes, &slog->host_writes[0]);
115 put_unaligned_le64(data_units_written, &slog->data_units_written[0]);
117 return NVME_SC_SUCCESS;
120 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
122 struct nvme_smart_log *log;
123 u16 status = NVME_SC_INTERNAL;
125 if (req->data_len != sizeof(*log))
128 log = kzalloc(sizeof(*log), GFP_KERNEL);
132 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL))
133 status = nvmet_get_smart_log_all(req, log);
135 status = nvmet_get_smart_log_nsid(req, log);
139 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
143 nvmet_req_complete(req, status);
146 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
148 u16 status = NVME_SC_INTERNAL;
149 struct nvme_effects_log *log;
151 log = kzalloc(sizeof(*log), GFP_KERNEL);
155 log->acs[nvme_admin_get_log_page] = cpu_to_le32(1 << 0);
156 log->acs[nvme_admin_identify] = cpu_to_le32(1 << 0);
157 log->acs[nvme_admin_abort_cmd] = cpu_to_le32(1 << 0);
158 log->acs[nvme_admin_set_features] = cpu_to_le32(1 << 0);
159 log->acs[nvme_admin_get_features] = cpu_to_le32(1 << 0);
160 log->acs[nvme_admin_async_event] = cpu_to_le32(1 << 0);
161 log->acs[nvme_admin_keep_alive] = cpu_to_le32(1 << 0);
163 log->iocs[nvme_cmd_read] = cpu_to_le32(1 << 0);
164 log->iocs[nvme_cmd_write] = cpu_to_le32(1 << 0);
165 log->iocs[nvme_cmd_flush] = cpu_to_le32(1 << 0);
166 log->iocs[nvme_cmd_dsm] = cpu_to_le32(1 << 0);
167 log->iocs[nvme_cmd_write_zeroes] = cpu_to_le32(1 << 0);
169 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
173 nvmet_req_complete(req, status);
176 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req)
178 struct nvmet_ctrl *ctrl = req->sq->ctrl;
179 u16 status = NVME_SC_INTERNAL;
182 if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32))
185 mutex_lock(&ctrl->lock);
186 if (ctrl->nr_changed_ns == U32_MAX)
187 len = sizeof(__le32);
189 len = ctrl->nr_changed_ns * sizeof(__le32);
190 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len);
192 status = nvmet_zero_sgl(req, len, req->data_len - len);
193 ctrl->nr_changed_ns = 0;
194 nvmet_clear_aen(req, NVME_AEN_CFG_NS_ATTR);
195 mutex_unlock(&ctrl->lock);
197 nvmet_req_complete(req, status);
200 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
201 struct nvme_ana_group_desc *desc)
203 struct nvmet_ctrl *ctrl = req->sq->ctrl;
207 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) {
209 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link)
210 if (ns->anagrpid == grpid)
211 desc->nsids[count++] = cpu_to_le32(ns->nsid);
215 desc->grpid = cpu_to_le32(grpid);
216 desc->nnsids = cpu_to_le32(count);
217 desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
218 desc->state = req->port->ana_state[grpid];
219 memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
220 return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
223 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
225 struct nvme_ana_rsp_hdr hdr = { 0, };
226 struct nvme_ana_group_desc *desc;
227 size_t offset = sizeof(struct nvme_ana_rsp_hdr); /* start beyond hdr */
233 status = NVME_SC_INTERNAL;
234 desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
235 NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
239 down_read(&nvmet_ana_sem);
240 for (grpid = 1; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
241 if (!nvmet_ana_group_enabled[grpid])
243 len = nvmet_format_ana_group(req, grpid, desc);
244 status = nvmet_copy_to_sgl(req, offset, desc, len);
250 for ( ; grpid <= NVMET_MAX_ANAGRPS; grpid++) {
251 if (nvmet_ana_group_enabled[grpid])
255 hdr.chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
256 hdr.ngrps = cpu_to_le16(ngrps);
257 nvmet_clear_aen(req, NVME_AEN_CFG_ANA_CHANGE);
258 up_read(&nvmet_ana_sem);
262 /* copy the header last once we know the number of groups */
263 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr));
265 nvmet_req_complete(req, status);
268 static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
270 struct nvmet_ctrl *ctrl = req->sq->ctrl;
271 struct nvme_id_ctrl *id;
273 const char model[] = "Linux";
275 id = kzalloc(sizeof(*id), GFP_KERNEL);
277 status = NVME_SC_INTERNAL;
281 /* XXX: figure out how to assign real vendors IDs. */
285 memset(id->sn, ' ', sizeof(id->sn));
286 bin2hex(id->sn, &ctrl->subsys->serial,
287 min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
288 memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
289 memcpy_and_pad(id->fr, sizeof(id->fr),
290 UTS_RELEASE, strlen(UTS_RELEASE), ' ');
295 * XXX: figure out how we can assign a IEEE OUI, but until then
296 * the safest is to leave it as zeroes.
299 /* we support multiple ports, multiples hosts and ANA: */
300 id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
302 /* no limit on data transfer sizes for now */
304 id->cntlid = cpu_to_le16(ctrl->cntlid);
305 id->ver = cpu_to_le32(ctrl->subsys->ver);
307 /* XXX: figure out what to do about RTD3R/RTD3 */
308 id->oaes = cpu_to_le32(NVMET_AEN_CFG_OPTIONAL);
309 id->ctratt = cpu_to_le32(1 << 0);
314 * We don't really have a practical limit on the number of abort
315 * comands. But we don't do anything useful for abort either, so
316 * no point in allowing more abort commands than the spec requires.
320 id->aerl = NVMET_ASYNC_EVENTS - 1;
322 /* first slot is read-only, only one slot supported */
323 id->frmw = (1 << 0) | (1 << 1);
324 id->lpa = (1 << 0) | (1 << 1) | (1 << 2);
325 id->elpe = NVMET_ERROR_LOG_SLOTS - 1;
328 /* We support keep-alive timeout in granularity of seconds */
329 id->kas = cpu_to_le16(NVMET_KAS);
331 id->sqes = (0x6 << 4) | 0x6;
332 id->cqes = (0x4 << 4) | 0x4;
334 /* no enforcement soft-limit for maxcmd - pick arbitrary high value */
335 id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
337 id->nn = cpu_to_le32(ctrl->subsys->max_nsid);
338 id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
339 id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
340 NVME_CTRL_ONCS_WRITE_ZEROES);
342 /* XXX: don't report vwc if the underlying device is write through */
343 id->vwc = NVME_CTRL_VWC_PRESENT;
346 * We can't support atomic writes bigger than a LBA without support
347 * from the backend device.
352 id->sgls = cpu_to_le32(1 << 0); /* we always support SGLs */
353 if (ctrl->ops->has_keyed_sgls)
354 id->sgls |= cpu_to_le32(1 << 2);
355 if (req->port->inline_data_size)
356 id->sgls |= cpu_to_le32(1 << 20);
358 strcpy(id->subnqn, ctrl->subsys->subsysnqn);
360 /* Max command capsule size is sqe + single page of in-capsule data */
361 id->ioccsz = cpu_to_le32((sizeof(struct nvme_command) +
362 req->port->inline_data_size) / 16);
363 /* Max response capsule size is cqe */
364 id->iorcsz = cpu_to_le32(sizeof(struct nvme_completion) / 16);
366 id->msdbd = ctrl->ops->msdbd;
368 id->anacap = (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3) | (1 << 4);
369 id->anatt = 10; /* random value */
370 id->anagrpmax = cpu_to_le32(NVMET_MAX_ANAGRPS);
371 id->nanagrpid = cpu_to_le32(NVMET_MAX_ANAGRPS);
374 * Meh, we don't really support any power state. Fake up the same
375 * values that qemu does.
377 id->psd[0].max_power = cpu_to_le16(0x9c4);
378 id->psd[0].entry_lat = cpu_to_le32(0x10);
379 id->psd[0].exit_lat = cpu_to_le32(0x4);
381 id->nwpc = 1 << 0; /* write protect and no write protect */
383 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
387 nvmet_req_complete(req, status);
390 static void nvmet_execute_identify_ns(struct nvmet_req *req)
393 struct nvme_id_ns *id;
396 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
397 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
401 id = kzalloc(sizeof(*id), GFP_KERNEL);
403 status = NVME_SC_INTERNAL;
407 /* return an all zeroed buffer if we can't find an active namespace */
408 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
413 * nuse = ncap = nsze isn't always true, but we have no way to find
414 * that out from the underlying device.
416 id->ncap = id->nsze = cpu_to_le64(ns->size >> ns->blksize_shift);
417 switch (req->port->ana_state[ns->anagrpid]) {
418 case NVME_ANA_INACCESSIBLE:
419 case NVME_ANA_PERSISTENT_LOSS:
427 * We just provide a single LBA format that matches what the
428 * underlying device reports.
434 * Our namespace might always be shared. Not just with other
435 * controllers, but also with any other user of the block device.
438 id->anagrpid = cpu_to_le32(ns->anagrpid);
440 memcpy(&id->nguid, &ns->nguid, sizeof(id->nguid));
442 id->lbaf[0].ds = ns->blksize_shift;
445 id->nsattr |= (1 << 0);
446 nvmet_put_namespace(ns);
448 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
451 nvmet_req_complete(req, status);
454 static void nvmet_execute_identify_nslist(struct nvmet_req *req)
456 static const int buf_size = NVME_IDENTIFY_DATA_SIZE;
457 struct nvmet_ctrl *ctrl = req->sq->ctrl;
459 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid);
464 list = kzalloc(buf_size, GFP_KERNEL);
466 status = NVME_SC_INTERNAL;
471 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) {
472 if (ns->nsid <= min_nsid)
474 list[i++] = cpu_to_le32(ns->nsid);
475 if (i == buf_size / sizeof(__le32))
480 status = nvmet_copy_to_sgl(req, 0, list, buf_size);
484 nvmet_req_complete(req, status);
487 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len,
488 void *id, off_t *off)
490 struct nvme_ns_id_desc desc = {
496 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc));
499 *off += sizeof(desc);
501 status = nvmet_copy_to_sgl(req, *off, id, len);
509 static void nvmet_execute_identify_desclist(struct nvmet_req *req)
515 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
517 status = NVME_SC_INVALID_NS | NVME_SC_DNR;
521 if (memchr_inv(&ns->uuid, 0, sizeof(ns->uuid))) {
522 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID,
528 if (memchr_inv(ns->nguid, 0, sizeof(ns->nguid))) {
529 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID,
536 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off,
537 off) != NVME_IDENTIFY_DATA_SIZE - off)
538 status = NVME_SC_INTERNAL | NVME_SC_DNR;
540 nvmet_put_namespace(ns);
542 nvmet_req_complete(req, status);
546 * A "minimum viable" abort implementation: the command is mandatory in the
547 * spec, but we are not required to do any useful work. We couldn't really
548 * do a useful abort, so don't bother even with waiting for the command
549 * to be exectuted and return immediately telling the command to abort
552 static void nvmet_execute_abort(struct nvmet_req *req)
554 nvmet_set_result(req, 1);
555 nvmet_req_complete(req, 0);
558 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
563 status = nvmet_file_flush(req);
565 status = nvmet_bdev_flush(req);
568 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid);
572 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
574 u32 write_protect = le32_to_cpu(req->cmd->common.cdw10[1]);
575 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
576 u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
578 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
579 if (unlikely(!req->ns))
582 mutex_lock(&subsys->lock);
583 switch (write_protect) {
584 case NVME_NS_WRITE_PROTECT:
585 req->ns->readonly = true;
586 status = nvmet_write_protect_flush_sync(req);
588 req->ns->readonly = false;
590 case NVME_NS_NO_WRITE_PROTECT:
591 req->ns->readonly = false;
599 nvmet_ns_changed(subsys, req->ns->nsid);
600 mutex_unlock(&subsys->lock);
604 static void nvmet_execute_set_features(struct nvmet_req *req)
606 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
607 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
611 switch (cdw10 & 0xff) {
612 case NVME_FEAT_NUM_QUEUES:
613 nvmet_set_result(req,
614 (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
617 val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
618 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
619 nvmet_set_result(req, req->sq->ctrl->kato);
621 case NVME_FEAT_ASYNC_EVENT:
622 val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
623 if (val32 & ~NVMET_AEN_CFG_ALL) {
624 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
628 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
629 nvmet_set_result(req, val32);
631 case NVME_FEAT_HOST_ID:
632 status = NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
634 case NVME_FEAT_WRITE_PROTECT:
635 status = nvmet_set_feat_write_protect(req);
638 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
642 nvmet_req_complete(req, status);
645 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
647 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
650 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
652 return NVME_SC_INVALID_NS | NVME_SC_DNR;
654 mutex_lock(&subsys->lock);
655 if (req->ns->readonly == true)
656 result = NVME_NS_WRITE_PROTECT;
658 result = NVME_NS_NO_WRITE_PROTECT;
659 nvmet_set_result(req, result);
660 mutex_unlock(&subsys->lock);
665 static void nvmet_execute_get_features(struct nvmet_req *req)
667 struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
668 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
671 switch (cdw10 & 0xff) {
673 * These features are mandatory in the spec, but we don't
674 * have a useful way to implement them. We'll eventually
675 * need to come up with some fake values for these.
678 case NVME_FEAT_ARBITRATION:
680 case NVME_FEAT_POWER_MGMT:
682 case NVME_FEAT_TEMP_THRESH:
684 case NVME_FEAT_ERR_RECOVERY:
686 case NVME_FEAT_IRQ_COALESCE:
688 case NVME_FEAT_IRQ_CONFIG:
690 case NVME_FEAT_WRITE_ATOMIC:
693 case NVME_FEAT_ASYNC_EVENT:
694 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled));
696 case NVME_FEAT_VOLATILE_WC:
697 nvmet_set_result(req, 1);
699 case NVME_FEAT_NUM_QUEUES:
700 nvmet_set_result(req,
701 (subsys->max_qid-1) | ((subsys->max_qid-1) << 16));
704 nvmet_set_result(req, req->sq->ctrl->kato * 1000);
706 case NVME_FEAT_HOST_ID:
707 /* need 128-bit host identifier flag */
708 if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) {
709 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
713 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid,
714 sizeof(req->sq->ctrl->hostid));
716 case NVME_FEAT_WRITE_PROTECT:
717 status = nvmet_get_feat_write_protect(req);
720 status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
724 nvmet_req_complete(req, status);
727 static void nvmet_execute_async_event(struct nvmet_req *req)
729 struct nvmet_ctrl *ctrl = req->sq->ctrl;
731 mutex_lock(&ctrl->lock);
732 if (ctrl->nr_async_event_cmds >= NVMET_ASYNC_EVENTS) {
733 mutex_unlock(&ctrl->lock);
734 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR);
737 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
738 mutex_unlock(&ctrl->lock);
740 schedule_work(&ctrl->async_event_work);
743 static void nvmet_execute_keep_alive(struct nvmet_req *req)
745 struct nvmet_ctrl *ctrl = req->sq->ctrl;
747 pr_debug("ctrl %d update keep-alive timer for %d secs\n",
748 ctrl->cntlid, ctrl->kato);
750 mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
751 nvmet_req_complete(req, 0);
754 u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
756 struct nvme_command *cmd = req->cmd;
759 ret = nvmet_check_ctrl_status(req, cmd);
763 switch (cmd->common.opcode) {
764 case nvme_admin_get_log_page:
765 req->data_len = nvmet_get_log_page_len(cmd);
767 switch (cmd->get_log_page.lid) {
770 * We currently never set the More bit in the status
771 * field, so all error log entries are invalid and can
772 * be zeroed out. This is called a minum viable
773 * implementation (TM) of this mandatory log page.
775 req->execute = nvmet_execute_get_log_page_noop;
778 req->execute = nvmet_execute_get_log_page_smart;
780 case NVME_LOG_FW_SLOT:
782 * We only support a single firmware slot which always
783 * is active, so we can zero out the whole firmware slot
784 * log and still claim to fully implement this mandatory
787 req->execute = nvmet_execute_get_log_page_noop;
789 case NVME_LOG_CHANGED_NS:
790 req->execute = nvmet_execute_get_log_changed_ns;
792 case NVME_LOG_CMD_EFFECTS:
793 req->execute = nvmet_execute_get_log_cmd_effects_ns;
796 req->execute = nvmet_execute_get_log_page_ana;
800 case nvme_admin_identify:
801 req->data_len = NVME_IDENTIFY_DATA_SIZE;
802 switch (cmd->identify.cns) {
804 req->execute = nvmet_execute_identify_ns;
806 case NVME_ID_CNS_CTRL:
807 req->execute = nvmet_execute_identify_ctrl;
809 case NVME_ID_CNS_NS_ACTIVE_LIST:
810 req->execute = nvmet_execute_identify_nslist;
812 case NVME_ID_CNS_NS_DESC_LIST:
813 req->execute = nvmet_execute_identify_desclist;
817 case nvme_admin_abort_cmd:
818 req->execute = nvmet_execute_abort;
821 case nvme_admin_set_features:
822 req->execute = nvmet_execute_set_features;
825 case nvme_admin_get_features:
826 req->execute = nvmet_execute_get_features;
829 case nvme_admin_async_event:
830 req->execute = nvmet_execute_async_event;
833 case nvme_admin_keep_alive:
834 req->execute = nvmet_execute_keep_alive;
839 pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
841 return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;