2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
9 #include <linux/utsname.h>
11 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
12 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
13 static int qla2x00_sns_gpn_id(scsi_qla_host_t *, sw_info_t *);
14 static int qla2x00_sns_gnn_id(scsi_qla_host_t *, sw_info_t *);
15 static int qla2x00_sns_rft_id(scsi_qla_host_t *);
16 static int qla2x00_sns_rnn_id(scsi_qla_host_t *);
17 static int qla_async_rftid(scsi_qla_host_t *, port_id_t *);
18 static int qla_async_rffid(scsi_qla_host_t *, port_id_t *, u8, u8);
19 static int qla_async_rnnid(scsi_qla_host_t *, port_id_t *, u8*);
20 static int qla_async_rsnn_nn(scsi_qla_host_t *);
23 * qla2x00_prep_ms_iocb() - Prepare common MS/CT IOCB fields for SNS CT query.
27 * Returns a pointer to the @vha's ms_iocb.
30 qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
32 struct qla_hw_data *ha = vha->hw;
33 ms_iocb_entry_t *ms_pkt;
35 ms_pkt = (ms_iocb_entry_t *)arg->iocb;
36 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
38 ms_pkt->entry_type = MS_IOCB_TYPE;
39 ms_pkt->entry_count = 1;
40 SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER);
41 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
42 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
43 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
44 ms_pkt->total_dsd_count = cpu_to_le16(2);
45 ms_pkt->rsp_bytecount = cpu_to_le32(arg->rsp_size);
46 ms_pkt->req_bytecount = cpu_to_le32(arg->req_size);
48 ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(arg->req_dma));
49 ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(arg->req_dma));
50 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
52 ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
53 ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
54 ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
56 vha->qla_stats.control_requests++;
62 * qla24xx_prep_ms_iocb() - Prepare common CT IOCB fields for SNS CT query.
66 * Returns a pointer to the @ha's ms_iocb.
69 qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, struct ct_arg *arg)
71 struct qla_hw_data *ha = vha->hw;
72 struct ct_entry_24xx *ct_pkt;
74 ct_pkt = (struct ct_entry_24xx *)arg->iocb;
75 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
77 ct_pkt->entry_type = CT_IOCB_TYPE;
78 ct_pkt->entry_count = 1;
79 ct_pkt->nport_handle = cpu_to_le16(arg->nport_handle);
80 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
81 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
82 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
83 ct_pkt->rsp_byte_count = cpu_to_le32(arg->rsp_size);
84 ct_pkt->cmd_byte_count = cpu_to_le32(arg->req_size);
86 ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(arg->req_dma));
87 ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(arg->req_dma));
88 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
90 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(arg->rsp_dma));
91 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(arg->rsp_dma));
92 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
93 ct_pkt->vp_index = vha->vp_idx;
95 vha->qla_stats.control_requests++;
101 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
102 * @p: CT request buffer
104 * @rsp_size: response size in bytes
106 * Returns a pointer to the intitialized @ct_req.
108 static inline struct ct_sns_req *
109 qla2x00_prep_ct_req(struct ct_sns_pkt *p, uint16_t cmd, uint16_t rsp_size)
111 memset(p, 0, sizeof(struct ct_sns_pkt));
113 p->p.req.header.revision = 0x01;
114 p->p.req.header.gs_type = 0xFC;
115 p->p.req.header.gs_subtype = 0x02;
116 p->p.req.command = cpu_to_be16(cmd);
117 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
123 qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
124 struct ct_sns_rsp *ct_rsp, const char *routine)
127 uint16_t comp_status;
128 struct qla_hw_data *ha = vha->hw;
129 bool lid_is_sns = false;
131 rval = QLA_FUNCTION_FAILED;
132 if (ms_pkt->entry_status != 0) {
133 ql_dbg(ql_dbg_disc, vha, 0x2031,
134 "%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
135 routine, ms_pkt->entry_status, vha->d_id.b.domain,
136 vha->d_id.b.area, vha->d_id.b.al_pa);
138 if (IS_FWI2_CAPABLE(ha))
139 comp_status = le16_to_cpu(
140 ((struct ct_entry_24xx *)ms_pkt)->comp_status);
142 comp_status = le16_to_cpu(ms_pkt->status);
143 switch (comp_status) {
145 case CS_DATA_UNDERRUN:
146 case CS_DATA_OVERRUN: /* Overrun? */
147 if (ct_rsp->header.response !=
148 cpu_to_be16(CT_ACCEPT_RESPONSE)) {
149 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
150 "%s failed rejected request on port_id: %02x%02x%02x Completion status 0x%x, response 0x%x\n",
151 routine, vha->d_id.b.domain,
152 vha->d_id.b.area, vha->d_id.b.al_pa,
153 comp_status, ct_rsp->header.response);
154 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
155 0x2078, (uint8_t *)&ct_rsp->header,
156 sizeof(struct ct_rsp_hdr));
157 rval = QLA_INVALID_COMMAND;
161 case CS_PORT_LOGGED_OUT:
162 if (IS_FWI2_CAPABLE(ha)) {
163 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
167 if (le16_to_cpu(ms_pkt->loop_id.extended) ==
172 ql_dbg(ql_dbg_async, vha, 0x502b,
173 "%s failed, Name server has logged out",
175 rval = QLA_NOT_LOGGED_IN;
176 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
177 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
181 rval = QLA_FUNCTION_TIMEOUT;
184 ql_dbg(ql_dbg_disc, vha, 0x2033,
185 "%s failed, completion status (%x) on port_id: "
186 "%02x%02x%02x.\n", routine, comp_status,
187 vha->d_id.b.domain, vha->d_id.b.area,
196 * qla2x00_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
198 * @fcport: fcport entry to updated
200 * Returns 0 on success.
203 qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
207 ms_iocb_entry_t *ms_pkt;
208 struct ct_sns_req *ct_req;
209 struct ct_sns_rsp *ct_rsp;
210 struct qla_hw_data *ha = vha->hw;
213 if (IS_QLA2100(ha) || IS_QLA2200(ha))
214 return qla2x00_sns_ga_nxt(vha, fcport);
216 arg.iocb = ha->ms_iocb;
217 arg.req_dma = ha->ct_sns_dma;
218 arg.rsp_dma = ha->ct_sns_dma;
219 arg.req_size = GA_NXT_REQ_SIZE;
220 arg.rsp_size = GA_NXT_RSP_SIZE;
221 arg.nport_handle = NPH_SNS;
224 /* Prepare common MS IOCB */
225 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
227 /* Prepare CT request */
228 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GA_NXT_CMD,
230 ct_rsp = &ha->ct_sns->p.rsp;
232 /* Prepare CT arguments -- port_id */
233 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
234 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
235 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
237 /* Execute MS IOCB */
238 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
239 sizeof(ms_iocb_entry_t));
240 if (rval != QLA_SUCCESS) {
242 ql_dbg(ql_dbg_disc, vha, 0x2062,
243 "GA_NXT issue IOCB failed (%d).\n", rval);
244 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
246 rval = QLA_FUNCTION_FAILED;
248 /* Populate fc_port_t entry. */
249 fcport->d_id.b.domain = ct_rsp->rsp.ga_nxt.port_id[0];
250 fcport->d_id.b.area = ct_rsp->rsp.ga_nxt.port_id[1];
251 fcport->d_id.b.al_pa = ct_rsp->rsp.ga_nxt.port_id[2];
253 memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name,
255 memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
258 fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
259 FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER;
261 if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
262 ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
263 fcport->d_id.b.domain = 0xf0;
265 ql_dbg(ql_dbg_disc, vha, 0x2063,
266 "GA_NXT entry - nn %8phN pn %8phN "
267 "port_id=%02x%02x%02x.\n",
268 fcport->node_name, fcport->port_name,
269 fcport->d_id.b.domain, fcport->d_id.b.area,
270 fcport->d_id.b.al_pa);
277 qla2x00_gid_pt_rsp_size(scsi_qla_host_t *vha)
279 return vha->hw->max_fibre_devices * 4 + 16;
283 * qla2x00_gid_pt() - SNS scan for fabric devices via GID_PT command.
285 * @list: switch info entries to populate
287 * NOTE: Non-Nx_Ports are not requested.
289 * Returns 0 on success.
292 qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
297 ms_iocb_entry_t *ms_pkt;
298 struct ct_sns_req *ct_req;
299 struct ct_sns_rsp *ct_rsp;
301 struct ct_sns_gid_pt_data *gid_data;
302 struct qla_hw_data *ha = vha->hw;
303 uint16_t gid_pt_rsp_size;
306 if (IS_QLA2100(ha) || IS_QLA2200(ha))
307 return qla2x00_sns_gid_pt(vha, list);
310 gid_pt_rsp_size = qla2x00_gid_pt_rsp_size(vha);
312 arg.iocb = ha->ms_iocb;
313 arg.req_dma = ha->ct_sns_dma;
314 arg.rsp_dma = ha->ct_sns_dma;
315 arg.req_size = GID_PT_REQ_SIZE;
316 arg.rsp_size = gid_pt_rsp_size;
317 arg.nport_handle = NPH_SNS;
320 /* Prepare common MS IOCB */
321 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
323 /* Prepare CT request */
324 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GID_PT_CMD, gid_pt_rsp_size);
325 ct_rsp = &ha->ct_sns->p.rsp;
327 /* Prepare CT arguments -- port_type */
328 ct_req->req.gid_pt.port_type = NS_NX_PORT_TYPE;
330 /* Execute MS IOCB */
331 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
332 sizeof(ms_iocb_entry_t));
333 if (rval != QLA_SUCCESS) {
335 ql_dbg(ql_dbg_disc, vha, 0x2055,
336 "GID_PT issue IOCB failed (%d).\n", rval);
337 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
339 rval = QLA_FUNCTION_FAILED;
341 /* Set port IDs in switch info list. */
342 for (i = 0; i < ha->max_fibre_devices; i++) {
343 gid_data = &ct_rsp->rsp.gid_pt.entries[i];
344 list[i].d_id.b.domain = gid_data->port_id[0];
345 list[i].d_id.b.area = gid_data->port_id[1];
346 list[i].d_id.b.al_pa = gid_data->port_id[2];
347 memset(list[i].fabric_port_name, 0, WWN_SIZE);
348 list[i].fp_speed = PORT_SPEED_UNKNOWN;
351 if (gid_data->control_byte & BIT_7) {
352 list[i].d_id.b.rsvd_1 = gid_data->control_byte;
358 * If we've used all available slots, then the switch is
359 * reporting back more devices than we can handle with this
360 * single call. Return a failed status, and let GA_NXT handle
363 if (i == ha->max_fibre_devices)
364 rval = QLA_FUNCTION_FAILED;
371 * qla2x00_gpn_id() - SNS Get Port Name (GPN_ID) query.
373 * @list: switch info entries to populate
375 * Returns 0 on success.
378 qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
380 int rval = QLA_SUCCESS;
383 ms_iocb_entry_t *ms_pkt;
384 struct ct_sns_req *ct_req;
385 struct ct_sns_rsp *ct_rsp;
386 struct qla_hw_data *ha = vha->hw;
389 if (IS_QLA2100(ha) || IS_QLA2200(ha))
390 return qla2x00_sns_gpn_id(vha, list);
392 arg.iocb = ha->ms_iocb;
393 arg.req_dma = ha->ct_sns_dma;
394 arg.rsp_dma = ha->ct_sns_dma;
395 arg.req_size = GPN_ID_REQ_SIZE;
396 arg.rsp_size = GPN_ID_RSP_SIZE;
397 arg.nport_handle = NPH_SNS;
399 for (i = 0; i < ha->max_fibre_devices; i++) {
401 /* Prepare common MS IOCB */
402 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
404 /* Prepare CT request */
405 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GPN_ID_CMD,
407 ct_rsp = &ha->ct_sns->p.rsp;
409 /* Prepare CT arguments -- port_id */
410 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
411 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
412 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
414 /* Execute MS IOCB */
415 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
416 sizeof(ms_iocb_entry_t));
417 if (rval != QLA_SUCCESS) {
419 ql_dbg(ql_dbg_disc, vha, 0x2056,
420 "GPN_ID issue IOCB failed (%d).\n", rval);
422 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
423 "GPN_ID") != QLA_SUCCESS) {
424 rval = QLA_FUNCTION_FAILED;
428 memcpy(list[i].port_name,
429 ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
432 /* Last device exit. */
433 if (list[i].d_id.b.rsvd_1 != 0)
441 * qla2x00_gnn_id() - SNS Get Node Name (GNN_ID) query.
443 * @list: switch info entries to populate
445 * Returns 0 on success.
448 qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
450 int rval = QLA_SUCCESS;
452 struct qla_hw_data *ha = vha->hw;
453 ms_iocb_entry_t *ms_pkt;
454 struct ct_sns_req *ct_req;
455 struct ct_sns_rsp *ct_rsp;
458 if (IS_QLA2100(ha) || IS_QLA2200(ha))
459 return qla2x00_sns_gnn_id(vha, list);
461 arg.iocb = ha->ms_iocb;
462 arg.req_dma = ha->ct_sns_dma;
463 arg.rsp_dma = ha->ct_sns_dma;
464 arg.req_size = GNN_ID_REQ_SIZE;
465 arg.rsp_size = GNN_ID_RSP_SIZE;
466 arg.nport_handle = NPH_SNS;
468 for (i = 0; i < ha->max_fibre_devices; i++) {
470 /* Prepare common MS IOCB */
471 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
473 /* Prepare CT request */
474 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GNN_ID_CMD,
476 ct_rsp = &ha->ct_sns->p.rsp;
478 /* Prepare CT arguments -- port_id */
479 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
480 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
481 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
483 /* Execute MS IOCB */
484 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
485 sizeof(ms_iocb_entry_t));
486 if (rval != QLA_SUCCESS) {
488 ql_dbg(ql_dbg_disc, vha, 0x2057,
489 "GNN_ID issue IOCB failed (%d).\n", rval);
491 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
492 "GNN_ID") != QLA_SUCCESS) {
493 rval = QLA_FUNCTION_FAILED;
497 memcpy(list[i].node_name,
498 ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
500 ql_dbg(ql_dbg_disc, vha, 0x2058,
501 "GID_PT entry - nn %8phN pn %8phN "
502 "portid=%02x%02x%02x.\n",
503 list[i].node_name, list[i].port_name,
504 list[i].d_id.b.domain, list[i].d_id.b.area,
505 list[i].d_id.b.al_pa);
508 /* Last device exit. */
509 if (list[i].d_id.b.rsvd_1 != 0)
516 static void qla2x00_async_sns_sp_done(void *s, int rc)
519 struct scsi_qla_host *vha = sp->vha;
520 struct ct_sns_pkt *ct_sns;
521 struct qla_work_evt *e;
524 if (rc == QLA_SUCCESS) {
525 ql_dbg(ql_dbg_disc, vha, 0x204f,
526 "Async done-%s exiting normally.\n",
528 } else if (rc == QLA_FUNCTION_TIMEOUT) {
529 ql_dbg(ql_dbg_disc, vha, 0x204f,
530 "Async done-%s timeout\n", sp->name);
532 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
533 memset(ct_sns, 0, sizeof(*ct_sns));
535 if (sp->retry_count > 3)
538 ql_dbg(ql_dbg_disc, vha, 0x204f,
539 "Async done-%s fail rc %x. Retry count %d\n",
540 sp->name, rc, sp->retry_count);
542 e = qla2x00_alloc_work(vha, QLA_EVT_SP_RETRY);
546 del_timer(&sp->u.iocb_cmd.timer);
548 qla2x00_post_work(vha, e);
553 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
556 /* please ignore kernel warning. otherwise, we have mem leak. */
557 if (sp->u.iocb_cmd.u.ctarg.req) {
558 dma_free_coherent(&vha->hw->pdev->dev,
559 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
560 sp->u.iocb_cmd.u.ctarg.req,
561 sp->u.iocb_cmd.u.ctarg.req_dma);
562 sp->u.iocb_cmd.u.ctarg.req = NULL;
565 if (sp->u.iocb_cmd.u.ctarg.rsp) {
566 dma_free_coherent(&vha->hw->pdev->dev,
567 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
568 sp->u.iocb_cmd.u.ctarg.rsp,
569 sp->u.iocb_cmd.u.ctarg.rsp_dma);
570 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
579 qla2x00_post_work(vha, e);
583 * qla2x00_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
586 * Returns 0 on success.
589 qla2x00_rft_id(scsi_qla_host_t *vha)
591 struct qla_hw_data *ha = vha->hw;
593 if (IS_QLA2100(ha) || IS_QLA2200(ha))
594 return qla2x00_sns_rft_id(vha);
596 return qla_async_rftid(vha, &vha->d_id);
599 static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
601 int rval = QLA_MEMORY_ALLOC_FAILED;
602 struct ct_sns_req *ct_req;
604 struct ct_sns_pkt *ct_sns;
606 if (!vha->flags.online)
609 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
613 sp->type = SRB_CT_PTHRU_CMD;
615 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
617 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
618 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
620 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
621 if (!sp->u.iocb_cmd.u.ctarg.req) {
622 ql_log(ql_log_warn, vha, 0xd041,
623 "%s: Failed to allocate ct_sns request.\n",
628 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
629 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
631 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
632 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
633 ql_log(ql_log_warn, vha, 0xd042,
634 "%s: Failed to allocate ct_sns request.\n",
638 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
639 memset(ct_sns, 0, sizeof(*ct_sns));
640 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
642 /* Prepare CT request */
643 ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE);
645 /* Prepare CT arguments -- port_id, FC-4 types */
646 ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain;
647 ct_req->req.rft_id.port_id[1] = vha->d_id.b.area;
648 ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa;
649 ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */
651 if (vha->flags.nvme_enabled)
652 ct_req->req.rft_id.fc4_types[6] = 1; /* NVMe type 28h */
654 sp->u.iocb_cmd.u.ctarg.req_size = RFT_ID_REQ_SIZE;
655 sp->u.iocb_cmd.u.ctarg.rsp_size = RFT_ID_RSP_SIZE;
656 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
657 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
658 sp->done = qla2x00_async_sns_sp_done;
660 rval = qla2x00_start_sp(sp);
661 if (rval != QLA_SUCCESS) {
662 ql_dbg(ql_dbg_disc, vha, 0x2043,
663 "RFT_ID issue IOCB failed (%d).\n", rval);
666 ql_dbg(ql_dbg_disc, vha, 0xffff,
667 "Async-%s - hdl=%x portid %06x.\n",
668 sp->name, sp->handle, d_id->b24);
677 * qla2x00_rff_id() - SNS Register FC-4 Features (RFF_ID) supported by the HBA.
681 * Returns 0 on success.
684 qla2x00_rff_id(scsi_qla_host_t *vha, u8 type)
686 struct qla_hw_data *ha = vha->hw;
688 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
689 ql_dbg(ql_dbg_disc, vha, 0x2046,
690 "RFF_ID call not supported on ISP2100/ISP2200.\n");
691 return (QLA_SUCCESS);
694 return qla_async_rffid(vha, &vha->d_id, qlt_rff_id(vha), type);
697 static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
698 u8 fc4feature, u8 fc4type)
700 int rval = QLA_MEMORY_ALLOC_FAILED;
701 struct ct_sns_req *ct_req;
703 struct ct_sns_pkt *ct_sns;
705 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
709 sp->type = SRB_CT_PTHRU_CMD;
711 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
713 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
714 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
716 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
717 if (!sp->u.iocb_cmd.u.ctarg.req) {
718 ql_log(ql_log_warn, vha, 0xd041,
719 "%s: Failed to allocate ct_sns request.\n",
724 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
725 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
727 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
728 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
729 ql_log(ql_log_warn, vha, 0xd042,
730 "%s: Failed to allocate ct_sns request.\n",
734 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
735 memset(ct_sns, 0, sizeof(*ct_sns));
736 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
738 /* Prepare CT request */
739 ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE);
741 /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */
742 ct_req->req.rff_id.port_id[0] = d_id->b.domain;
743 ct_req->req.rff_id.port_id[1] = d_id->b.area;
744 ct_req->req.rff_id.port_id[2] = d_id->b.al_pa;
745 ct_req->req.rff_id.fc4_feature = fc4feature;
746 ct_req->req.rff_id.fc4_type = fc4type; /* SCSI-FCP or FC-NVMe */
748 sp->u.iocb_cmd.u.ctarg.req_size = RFF_ID_REQ_SIZE;
749 sp->u.iocb_cmd.u.ctarg.rsp_size = RFF_ID_RSP_SIZE;
750 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
751 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
752 sp->done = qla2x00_async_sns_sp_done;
754 rval = qla2x00_start_sp(sp);
755 if (rval != QLA_SUCCESS) {
756 ql_dbg(ql_dbg_disc, vha, 0x2047,
757 "RFF_ID issue IOCB failed (%d).\n", rval);
761 ql_dbg(ql_dbg_disc, vha, 0xffff,
762 "Async-%s - hdl=%x portid %06x feature %x type %x.\n",
763 sp->name, sp->handle, d_id->b24, fc4feature, fc4type);
773 * qla2x00_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
776 * Returns 0 on success.
779 qla2x00_rnn_id(scsi_qla_host_t *vha)
781 struct qla_hw_data *ha = vha->hw;
783 if (IS_QLA2100(ha) || IS_QLA2200(ha))
784 return qla2x00_sns_rnn_id(vha);
786 return qla_async_rnnid(vha, &vha->d_id, vha->node_name);
789 static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
792 int rval = QLA_MEMORY_ALLOC_FAILED;
793 struct ct_sns_req *ct_req;
795 struct ct_sns_pkt *ct_sns;
797 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
801 sp->type = SRB_CT_PTHRU_CMD;
803 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
805 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
806 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
808 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
809 if (!sp->u.iocb_cmd.u.ctarg.req) {
810 ql_log(ql_log_warn, vha, 0xd041,
811 "%s: Failed to allocate ct_sns request.\n",
816 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
817 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
819 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
820 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
821 ql_log(ql_log_warn, vha, 0xd042,
822 "%s: Failed to allocate ct_sns request.\n",
826 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
827 memset(ct_sns, 0, sizeof(*ct_sns));
828 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
830 /* Prepare CT request */
831 ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE);
833 /* Prepare CT arguments -- port_id, node_name */
834 ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain;
835 ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area;
836 ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa;
837 memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE);
839 sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE;
840 sp->u.iocb_cmd.u.ctarg.rsp_size = RNN_ID_RSP_SIZE;
841 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
843 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
844 sp->done = qla2x00_async_sns_sp_done;
846 rval = qla2x00_start_sp(sp);
847 if (rval != QLA_SUCCESS) {
848 ql_dbg(ql_dbg_disc, vha, 0x204d,
849 "RNN_ID issue IOCB failed (%d).\n", rval);
852 ql_dbg(ql_dbg_disc, vha, 0xffff,
853 "Async-%s - hdl=%x portid %06x\n",
854 sp->name, sp->handle, d_id->b24);
865 qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
867 struct qla_hw_data *ha = vha->hw;
870 snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
871 ha->mr.fw_version, qla2x00_version_str);
874 "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
875 ha->fw_major_version, ha->fw_minor_version,
876 ha->fw_subminor_version, qla2x00_version_str);
880 * qla2x00_rsnn_nn() - SNS Register Symbolic Node Name (RSNN_NN) of the HBA.
883 * Returns 0 on success.
886 qla2x00_rsnn_nn(scsi_qla_host_t *vha)
888 struct qla_hw_data *ha = vha->hw;
890 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
891 ql_dbg(ql_dbg_disc, vha, 0x2050,
892 "RSNN_ID call unsupported on ISP2100/ISP2200.\n");
893 return (QLA_SUCCESS);
896 return qla_async_rsnn_nn(vha);
899 static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
901 int rval = QLA_MEMORY_ALLOC_FAILED;
902 struct ct_sns_req *ct_req;
904 struct ct_sns_pkt *ct_sns;
906 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
910 sp->type = SRB_CT_PTHRU_CMD;
911 sp->name = "rsnn_nn";
912 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
914 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
915 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
917 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
918 if (!sp->u.iocb_cmd.u.ctarg.req) {
919 ql_log(ql_log_warn, vha, 0xd041,
920 "%s: Failed to allocate ct_sns request.\n",
925 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
926 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
928 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
929 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
930 ql_log(ql_log_warn, vha, 0xd042,
931 "%s: Failed to allocate ct_sns request.\n",
935 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
936 memset(ct_sns, 0, sizeof(*ct_sns));
937 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
939 /* Prepare CT request */
940 ct_req = qla2x00_prep_ct_req(ct_sns, RSNN_NN_CMD, RSNN_NN_RSP_SIZE);
942 /* Prepare CT arguments -- node_name, symbolic node_name, size */
943 memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
945 /* Prepare the Symbolic Node Name */
946 qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
947 sizeof(ct_req->req.rsnn_nn.sym_node_name));
948 ct_req->req.rsnn_nn.name_len =
949 (uint8_t)strlen(ct_req->req.rsnn_nn.sym_node_name);
952 sp->u.iocb_cmd.u.ctarg.req_size = 24 + 1 + ct_req->req.rsnn_nn.name_len;
953 sp->u.iocb_cmd.u.ctarg.rsp_size = RSNN_NN_RSP_SIZE;
954 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
956 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
957 sp->done = qla2x00_async_sns_sp_done;
959 rval = qla2x00_start_sp(sp);
960 if (rval != QLA_SUCCESS) {
961 ql_dbg(ql_dbg_disc, vha, 0x2043,
962 "RFT_ID issue IOCB failed (%d).\n", rval);
965 ql_dbg(ql_dbg_disc, vha, 0xffff,
966 "Async-%s - hdl=%x.\n",
967 sp->name, sp->handle);
978 * qla2x00_prep_sns_cmd() - Prepare common SNS command request fields for query.
981 * @scmd_len: Subcommand length
982 * @data_size: response size in bytes
984 * Returns a pointer to the @ha's sns_cmd.
986 static inline struct sns_cmd_pkt *
987 qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
991 struct sns_cmd_pkt *sns_cmd;
992 struct qla_hw_data *ha = vha->hw;
994 sns_cmd = ha->sns_cmd;
995 memset(sns_cmd, 0, sizeof(struct sns_cmd_pkt));
996 wc = data_size / 2; /* Size in 16bit words. */
997 sns_cmd->p.cmd.buffer_length = cpu_to_le16(wc);
998 sns_cmd->p.cmd.buffer_address[0] = cpu_to_le32(LSD(ha->sns_cmd_dma));
999 sns_cmd->p.cmd.buffer_address[1] = cpu_to_le32(MSD(ha->sns_cmd_dma));
1000 sns_cmd->p.cmd.subcommand_length = cpu_to_le16(scmd_len);
1001 sns_cmd->p.cmd.subcommand = cpu_to_le16(cmd);
1002 wc = (data_size - 16) / 4; /* Size in 32bit words. */
1003 sns_cmd->p.cmd.size = cpu_to_le16(wc);
1005 vha->qla_stats.control_requests++;
1011 * qla2x00_sns_ga_nxt() - SNS scan for fabric devices via GA_NXT command.
1013 * @fcport: fcport entry to updated
1015 * This command uses the old Exectute SNS Command mailbox routine.
1017 * Returns 0 on success.
1020 qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
1022 int rval = QLA_SUCCESS;
1023 struct qla_hw_data *ha = vha->hw;
1024 struct sns_cmd_pkt *sns_cmd;
1027 /* Prepare SNS command request. */
1028 sns_cmd = qla2x00_prep_sns_cmd(vha, GA_NXT_CMD, GA_NXT_SNS_SCMD_LEN,
1029 GA_NXT_SNS_DATA_SIZE);
1031 /* Prepare SNS command arguments -- port_id. */
1032 sns_cmd->p.cmd.param[0] = fcport->d_id.b.al_pa;
1033 sns_cmd->p.cmd.param[1] = fcport->d_id.b.area;
1034 sns_cmd->p.cmd.param[2] = fcport->d_id.b.domain;
1036 /* Execute SNS command. */
1037 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GA_NXT_SNS_CMD_SIZE / 2,
1038 sizeof(struct sns_cmd_pkt));
1039 if (rval != QLA_SUCCESS) {
1041 ql_dbg(ql_dbg_disc, vha, 0x205f,
1042 "GA_NXT Send SNS failed (%d).\n", rval);
1043 } else if (sns_cmd->p.gan_data[8] != 0x80 ||
1044 sns_cmd->p.gan_data[9] != 0x02) {
1045 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
1046 "GA_NXT failed, rejected request ga_nxt_rsp:\n");
1047 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
1048 sns_cmd->p.gan_data, 16);
1049 rval = QLA_FUNCTION_FAILED;
1051 /* Populate fc_port_t entry. */
1052 fcport->d_id.b.domain = sns_cmd->p.gan_data[17];
1053 fcport->d_id.b.area = sns_cmd->p.gan_data[18];
1054 fcport->d_id.b.al_pa = sns_cmd->p.gan_data[19];
1056 memcpy(fcport->node_name, &sns_cmd->p.gan_data[284], WWN_SIZE);
1057 memcpy(fcport->port_name, &sns_cmd->p.gan_data[20], WWN_SIZE);
1059 if (sns_cmd->p.gan_data[16] != NS_N_PORT_TYPE &&
1060 sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
1061 fcport->d_id.b.domain = 0xf0;
1063 ql_dbg(ql_dbg_disc, vha, 0x2061,
1064 "GA_NXT entry - nn %8phN pn %8phN "
1065 "port_id=%02x%02x%02x.\n",
1066 fcport->node_name, fcport->port_name,
1067 fcport->d_id.b.domain, fcport->d_id.b.area,
1068 fcport->d_id.b.al_pa);
1075 * qla2x00_sns_gid_pt() - SNS scan for fabric devices via GID_PT command.
1077 * @list: switch info entries to populate
1079 * This command uses the old Exectute SNS Command mailbox routine.
1081 * NOTE: Non-Nx_Ports are not requested.
1083 * Returns 0 on success.
1086 qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
1089 struct qla_hw_data *ha = vha->hw;
1092 struct sns_cmd_pkt *sns_cmd;
1093 uint16_t gid_pt_sns_data_size;
1095 gid_pt_sns_data_size = qla2x00_gid_pt_rsp_size(vha);
1098 /* Prepare SNS command request. */
1099 sns_cmd = qla2x00_prep_sns_cmd(vha, GID_PT_CMD, GID_PT_SNS_SCMD_LEN,
1100 gid_pt_sns_data_size);
1102 /* Prepare SNS command arguments -- port_type. */
1103 sns_cmd->p.cmd.param[0] = NS_NX_PORT_TYPE;
1105 /* Execute SNS command. */
1106 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, GID_PT_SNS_CMD_SIZE / 2,
1107 sizeof(struct sns_cmd_pkt));
1108 if (rval != QLA_SUCCESS) {
1110 ql_dbg(ql_dbg_disc, vha, 0x206d,
1111 "GID_PT Send SNS failed (%d).\n", rval);
1112 } else if (sns_cmd->p.gid_data[8] != 0x80 ||
1113 sns_cmd->p.gid_data[9] != 0x02) {
1114 ql_dbg(ql_dbg_disc, vha, 0x202f,
1115 "GID_PT failed, rejected request, gid_rsp:\n");
1116 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
1117 sns_cmd->p.gid_data, 16);
1118 rval = QLA_FUNCTION_FAILED;
1120 /* Set port IDs in switch info list. */
1121 for (i = 0; i < ha->max_fibre_devices; i++) {
1122 entry = &sns_cmd->p.gid_data[(i * 4) + 16];
1123 list[i].d_id.b.domain = entry[1];
1124 list[i].d_id.b.area = entry[2];
1125 list[i].d_id.b.al_pa = entry[3];
1127 /* Last one exit. */
1128 if (entry[0] & BIT_7) {
1129 list[i].d_id.b.rsvd_1 = entry[0];
1135 * If we've used all available slots, then the switch is
1136 * reporting back more devices that we can handle with this
1137 * single call. Return a failed status, and let GA_NXT handle
1140 if (i == ha->max_fibre_devices)
1141 rval = QLA_FUNCTION_FAILED;
1148 * qla2x00_sns_gpn_id() - SNS Get Port Name (GPN_ID) query.
1150 * @list: switch info entries to populate
1152 * This command uses the old Exectute SNS Command mailbox routine.
1154 * Returns 0 on success.
1157 qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
1159 int rval = QLA_SUCCESS;
1160 struct qla_hw_data *ha = vha->hw;
1162 struct sns_cmd_pkt *sns_cmd;
1164 for (i = 0; i < ha->max_fibre_devices; i++) {
1166 /* Prepare SNS command request. */
1167 sns_cmd = qla2x00_prep_sns_cmd(vha, GPN_ID_CMD,
1168 GPN_ID_SNS_SCMD_LEN, GPN_ID_SNS_DATA_SIZE);
1170 /* Prepare SNS command arguments -- port_id. */
1171 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1172 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1173 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1175 /* Execute SNS command. */
1176 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1177 GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1178 if (rval != QLA_SUCCESS) {
1180 ql_dbg(ql_dbg_disc, vha, 0x2032,
1181 "GPN_ID Send SNS failed (%d).\n", rval);
1182 } else if (sns_cmd->p.gpn_data[8] != 0x80 ||
1183 sns_cmd->p.gpn_data[9] != 0x02) {
1184 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
1185 "GPN_ID failed, rejected request, gpn_rsp:\n");
1186 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207f,
1187 sns_cmd->p.gpn_data, 16);
1188 rval = QLA_FUNCTION_FAILED;
1191 memcpy(list[i].port_name, &sns_cmd->p.gpn_data[16],
1195 /* Last device exit. */
1196 if (list[i].d_id.b.rsvd_1 != 0)
1204 * qla2x00_sns_gnn_id() - SNS Get Node Name (GNN_ID) query.
1206 * @list: switch info entries to populate
1208 * This command uses the old Exectute SNS Command mailbox routine.
1210 * Returns 0 on success.
1213 qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
1215 int rval = QLA_SUCCESS;
1216 struct qla_hw_data *ha = vha->hw;
1218 struct sns_cmd_pkt *sns_cmd;
1220 for (i = 0; i < ha->max_fibre_devices; i++) {
1222 /* Prepare SNS command request. */
1223 sns_cmd = qla2x00_prep_sns_cmd(vha, GNN_ID_CMD,
1224 GNN_ID_SNS_SCMD_LEN, GNN_ID_SNS_DATA_SIZE);
1226 /* Prepare SNS command arguments -- port_id. */
1227 sns_cmd->p.cmd.param[0] = list[i].d_id.b.al_pa;
1228 sns_cmd->p.cmd.param[1] = list[i].d_id.b.area;
1229 sns_cmd->p.cmd.param[2] = list[i].d_id.b.domain;
1231 /* Execute SNS command. */
1232 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma,
1233 GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
1234 if (rval != QLA_SUCCESS) {
1236 ql_dbg(ql_dbg_disc, vha, 0x203f,
1237 "GNN_ID Send SNS failed (%d).\n", rval);
1238 } else if (sns_cmd->p.gnn_data[8] != 0x80 ||
1239 sns_cmd->p.gnn_data[9] != 0x02) {
1240 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
1241 "GNN_ID failed, rejected request, gnn_rsp:\n");
1242 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
1243 sns_cmd->p.gnn_data, 16);
1244 rval = QLA_FUNCTION_FAILED;
1247 memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
1250 ql_dbg(ql_dbg_disc, vha, 0x206e,
1251 "GID_PT entry - nn %8phN pn %8phN "
1252 "port_id=%02x%02x%02x.\n",
1253 list[i].node_name, list[i].port_name,
1254 list[i].d_id.b.domain, list[i].d_id.b.area,
1255 list[i].d_id.b.al_pa);
1258 /* Last device exit. */
1259 if (list[i].d_id.b.rsvd_1 != 0)
1267 * qla2x00_snd_rft_id() - SNS Register FC-4 TYPEs (RFT_ID) supported by the HBA.
1270 * This command uses the old Exectute SNS Command mailbox routine.
1272 * Returns 0 on success.
1275 qla2x00_sns_rft_id(scsi_qla_host_t *vha)
1278 struct qla_hw_data *ha = vha->hw;
1279 struct sns_cmd_pkt *sns_cmd;
1282 /* Prepare SNS command request. */
1283 sns_cmd = qla2x00_prep_sns_cmd(vha, RFT_ID_CMD, RFT_ID_SNS_SCMD_LEN,
1284 RFT_ID_SNS_DATA_SIZE);
1286 /* Prepare SNS command arguments -- port_id, FC-4 types */
1287 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1288 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1289 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1291 sns_cmd->p.cmd.param[5] = 0x01; /* FCP-3 */
1293 /* Execute SNS command. */
1294 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RFT_ID_SNS_CMD_SIZE / 2,
1295 sizeof(struct sns_cmd_pkt));
1296 if (rval != QLA_SUCCESS) {
1298 ql_dbg(ql_dbg_disc, vha, 0x2060,
1299 "RFT_ID Send SNS failed (%d).\n", rval);
1300 } else if (sns_cmd->p.rft_data[8] != 0x80 ||
1301 sns_cmd->p.rft_data[9] != 0x02) {
1302 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
1303 "RFT_ID failed, rejected request rft_rsp:\n");
1304 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
1305 sns_cmd->p.rft_data, 16);
1306 rval = QLA_FUNCTION_FAILED;
1308 ql_dbg(ql_dbg_disc, vha, 0x2073,
1309 "RFT_ID exiting normally.\n");
1316 * qla2x00_sns_rnn_id() - SNS Register Node Name (RNN_ID) of the HBA.
1319 * This command uses the old Exectute SNS Command mailbox routine.
1321 * Returns 0 on success.
1324 qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
1327 struct qla_hw_data *ha = vha->hw;
1328 struct sns_cmd_pkt *sns_cmd;
1331 /* Prepare SNS command request. */
1332 sns_cmd = qla2x00_prep_sns_cmd(vha, RNN_ID_CMD, RNN_ID_SNS_SCMD_LEN,
1333 RNN_ID_SNS_DATA_SIZE);
1335 /* Prepare SNS command arguments -- port_id, nodename. */
1336 sns_cmd->p.cmd.param[0] = vha->d_id.b.al_pa;
1337 sns_cmd->p.cmd.param[1] = vha->d_id.b.area;
1338 sns_cmd->p.cmd.param[2] = vha->d_id.b.domain;
1340 sns_cmd->p.cmd.param[4] = vha->node_name[7];
1341 sns_cmd->p.cmd.param[5] = vha->node_name[6];
1342 sns_cmd->p.cmd.param[6] = vha->node_name[5];
1343 sns_cmd->p.cmd.param[7] = vha->node_name[4];
1344 sns_cmd->p.cmd.param[8] = vha->node_name[3];
1345 sns_cmd->p.cmd.param[9] = vha->node_name[2];
1346 sns_cmd->p.cmd.param[10] = vha->node_name[1];
1347 sns_cmd->p.cmd.param[11] = vha->node_name[0];
1349 /* Execute SNS command. */
1350 rval = qla2x00_send_sns(vha, ha->sns_cmd_dma, RNN_ID_SNS_CMD_SIZE / 2,
1351 sizeof(struct sns_cmd_pkt));
1352 if (rval != QLA_SUCCESS) {
1354 ql_dbg(ql_dbg_disc, vha, 0x204a,
1355 "RNN_ID Send SNS failed (%d).\n", rval);
1356 } else if (sns_cmd->p.rnn_data[8] != 0x80 ||
1357 sns_cmd->p.rnn_data[9] != 0x02) {
1358 ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
1359 "RNN_ID failed, rejected request, rnn_rsp:\n");
1360 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
1361 sns_cmd->p.rnn_data, 16);
1362 rval = QLA_FUNCTION_FAILED;
1364 ql_dbg(ql_dbg_disc, vha, 0x204c,
1365 "RNN_ID exiting normally.\n");
1372 * qla2x00_mgmt_svr_login() - Login to fabric Management Service.
1375 * Returns 0 on success.
1378 qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
1381 uint16_t mb[MAILBOX_REGISTER_COUNT];
1382 struct qla_hw_data *ha = vha->hw;
1384 if (vha->flags.management_server_logged_in)
1387 rval = ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff,
1389 if (rval != QLA_SUCCESS || mb[0] != MBS_COMMAND_COMPLETE) {
1390 if (rval == QLA_MEMORY_ALLOC_FAILED)
1391 ql_dbg(ql_dbg_disc, vha, 0x2085,
1392 "Failed management_server login: loopid=%x "
1393 "rval=%d\n", vha->mgmt_svr_loop_id, rval);
1395 ql_dbg(ql_dbg_disc, vha, 0x2024,
1396 "Failed management_server login: loopid=%x "
1397 "mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
1398 vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6],
1400 ret = QLA_FUNCTION_FAILED;
1402 vha->flags.management_server_logged_in = 1;
1408 * qla2x00_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1410 * @req_size: request size in bytes
1411 * @rsp_size: response size in bytes
1413 * Returns a pointer to the @ha's ms_iocb.
1416 qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1419 ms_iocb_entry_t *ms_pkt;
1420 struct qla_hw_data *ha = vha->hw;
1421 ms_pkt = ha->ms_iocb;
1422 memset(ms_pkt, 0, sizeof(ms_iocb_entry_t));
1424 ms_pkt->entry_type = MS_IOCB_TYPE;
1425 ms_pkt->entry_count = 1;
1426 SET_TARGET_ID(ha, ms_pkt->loop_id, vha->mgmt_svr_loop_id);
1427 ms_pkt->control_flags = cpu_to_le16(CF_READ | CF_HEAD_TAG);
1428 ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1429 ms_pkt->cmd_dsd_count = cpu_to_le16(1);
1430 ms_pkt->total_dsd_count = cpu_to_le16(2);
1431 ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size);
1432 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1434 ms_pkt->dseg_req_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1435 ms_pkt->dseg_req_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1436 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
1438 ms_pkt->dseg_rsp_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1439 ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1440 ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
1446 * qla24xx_prep_ms_fdmi_iocb() - Prepare common MS IOCB fields for FDMI query.
1448 * @req_size: request size in bytes
1449 * @rsp_size: response size in bytes
1451 * Returns a pointer to the @ha's ms_iocb.
1454 qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size,
1457 struct ct_entry_24xx *ct_pkt;
1458 struct qla_hw_data *ha = vha->hw;
1460 ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1461 memset(ct_pkt, 0, sizeof(struct ct_entry_24xx));
1463 ct_pkt->entry_type = CT_IOCB_TYPE;
1464 ct_pkt->entry_count = 1;
1465 ct_pkt->nport_handle = cpu_to_le16(vha->mgmt_svr_loop_id);
1466 ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
1467 ct_pkt->cmd_dsd_count = cpu_to_le16(1);
1468 ct_pkt->rsp_dsd_count = cpu_to_le16(1);
1469 ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size);
1470 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1472 ct_pkt->dseg_0_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1473 ct_pkt->dseg_0_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1474 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
1476 ct_pkt->dseg_1_address[0] = cpu_to_le32(LSD(ha->ct_sns_dma));
1477 ct_pkt->dseg_1_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
1478 ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
1479 ct_pkt->vp_index = vha->vp_idx;
1484 static inline ms_iocb_entry_t *
1485 qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size)
1487 struct qla_hw_data *ha = vha->hw;
1488 ms_iocb_entry_t *ms_pkt = ha->ms_iocb;
1489 struct ct_entry_24xx *ct_pkt = (struct ct_entry_24xx *)ha->ms_iocb;
1491 if (IS_FWI2_CAPABLE(ha)) {
1492 ct_pkt->cmd_byte_count = cpu_to_le32(req_size);
1493 ct_pkt->dseg_0_len = ct_pkt->cmd_byte_count;
1495 ms_pkt->req_bytecount = cpu_to_le32(req_size);
1496 ms_pkt->dseg_req_length = ms_pkt->req_bytecount;
1503 * qla2x00_prep_ct_req() - Prepare common CT request fields for SNS query.
1504 * @p: CT request buffer
1506 * @rsp_size: response size in bytes
1508 * Returns a pointer to the intitialized @ct_req.
1510 static inline struct ct_sns_req *
1511 qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
1514 memset(p, 0, sizeof(struct ct_sns_pkt));
1516 p->p.req.header.revision = 0x01;
1517 p->p.req.header.gs_type = 0xFA;
1518 p->p.req.header.gs_subtype = 0x10;
1519 p->p.req.command = cpu_to_be16(cmd);
1520 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
1526 * qla2x00_fdmi_rhba() - perform RHBA FDMI registration
1529 * Returns 0 on success.
1532 qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
1537 ms_iocb_entry_t *ms_pkt;
1538 struct ct_sns_req *ct_req;
1539 struct ct_sns_rsp *ct_rsp;
1541 struct ct_fdmi_hba_attr *eiter;
1542 struct qla_hw_data *ha = vha->hw;
1545 /* Prepare common MS IOCB */
1546 /* Request size adjusted after CT preparation */
1547 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1549 /* Prepare CT request */
1550 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD, RHBA_RSP_SIZE);
1551 ct_rsp = &ha->ct_sns->p.rsp;
1553 /* Prepare FDMI command arguments -- attribute block, attributes. */
1554 memcpy(ct_req->req.rhba.hba_identifier, vha->port_name, WWN_SIZE);
1555 ct_req->req.rhba.entry_count = cpu_to_be32(1);
1556 memcpy(ct_req->req.rhba.port_name, vha->port_name, WWN_SIZE);
1557 size = 2 * WWN_SIZE + 4 + 4;
1560 ct_req->req.rhba.attrs.count =
1561 cpu_to_be32(FDMI_HBA_ATTR_COUNT);
1562 entries = ct_req->req.rhba.hba_identifier;
1565 eiter = entries + size;
1566 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1567 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1568 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1569 size += 4 + WWN_SIZE;
1571 ql_dbg(ql_dbg_disc, vha, 0x2025,
1572 "NodeName = %8phN.\n", eiter->a.node_name);
1575 eiter = entries + size;
1576 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1577 alen = strlen(QLA2XXX_MANUFACTURER);
1578 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
1579 "%s", "QLogic Corporation");
1580 alen += 4 - (alen & 3);
1581 eiter->len = cpu_to_be16(4 + alen);
1584 ql_dbg(ql_dbg_disc, vha, 0x2026,
1585 "Manufacturer = %s.\n", eiter->a.manufacturer);
1587 /* Serial number. */
1588 eiter = entries + size;
1589 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
1590 if (IS_FWI2_CAPABLE(ha))
1591 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
1592 sizeof(eiter->a.serial_num));
1594 sn = ((ha->serial0 & 0x1f) << 16) |
1595 (ha->serial2 << 8) | ha->serial1;
1596 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
1597 "%c%05d", 'A' + sn / 100000, sn % 100000);
1599 alen = strlen(eiter->a.serial_num);
1600 alen += 4 - (alen & 3);
1601 eiter->len = cpu_to_be16(4 + alen);
1604 ql_dbg(ql_dbg_disc, vha, 0x2027,
1605 "Serial no. = %s.\n", eiter->a.serial_num);
1608 eiter = entries + size;
1609 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
1610 snprintf(eiter->a.model, sizeof(eiter->a.model),
1611 "%s", ha->model_number);
1612 alen = strlen(eiter->a.model);
1613 alen += 4 - (alen & 3);
1614 eiter->len = cpu_to_be16(4 + alen);
1617 ql_dbg(ql_dbg_disc, vha, 0x2028,
1618 "Model Name = %s.\n", eiter->a.model);
1620 /* Model description. */
1621 eiter = entries + size;
1622 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
1623 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
1624 "%s", ha->model_desc);
1625 alen = strlen(eiter->a.model_desc);
1626 alen += 4 - (alen & 3);
1627 eiter->len = cpu_to_be16(4 + alen);
1630 ql_dbg(ql_dbg_disc, vha, 0x2029,
1631 "Model Desc = %s.\n", eiter->a.model_desc);
1633 /* Hardware version. */
1634 eiter = entries + size;
1635 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
1636 if (!IS_FWI2_CAPABLE(ha)) {
1637 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1638 "HW:%s", ha->adapter_id);
1639 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
1640 sizeof(eiter->a.hw_version))) {
1642 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
1643 sizeof(eiter->a.hw_version))) {
1646 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
1647 "HW:%s", ha->adapter_id);
1649 alen = strlen(eiter->a.hw_version);
1650 alen += 4 - (alen & 3);
1651 eiter->len = cpu_to_be16(4 + alen);
1654 ql_dbg(ql_dbg_disc, vha, 0x202a,
1655 "Hardware ver = %s.\n", eiter->a.hw_version);
1657 /* Driver version. */
1658 eiter = entries + size;
1659 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
1660 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
1661 "%s", qla2x00_version_str);
1662 alen = strlen(eiter->a.driver_version);
1663 alen += 4 - (alen & 3);
1664 eiter->len = cpu_to_be16(4 + alen);
1667 ql_dbg(ql_dbg_disc, vha, 0x202b,
1668 "Driver ver = %s.\n", eiter->a.driver_version);
1670 /* Option ROM version. */
1671 eiter = entries + size;
1672 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
1673 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
1674 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
1675 alen = strlen(eiter->a.orom_version);
1676 alen += 4 - (alen & 3);
1677 eiter->len = cpu_to_be16(4 + alen);
1680 ql_dbg(ql_dbg_disc, vha , 0x202c,
1681 "Optrom vers = %s.\n", eiter->a.orom_version);
1683 /* Firmware version */
1684 eiter = entries + size;
1685 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
1686 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
1687 sizeof(eiter->a.fw_version));
1688 alen = strlen(eiter->a.fw_version);
1689 alen += 4 - (alen & 3);
1690 eiter->len = cpu_to_be16(4 + alen);
1693 ql_dbg(ql_dbg_disc, vha, 0x202d,
1694 "Firmware vers = %s.\n", eiter->a.fw_version);
1696 /* Update MS request size. */
1697 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1699 ql_dbg(ql_dbg_disc, vha, 0x202e,
1700 "RHBA identifier = %8phN size=%d.\n",
1701 ct_req->req.rhba.hba_identifier, size);
1702 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
1705 /* Execute MS IOCB */
1706 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1707 sizeof(ms_iocb_entry_t));
1708 if (rval != QLA_SUCCESS) {
1710 ql_dbg(ql_dbg_disc, vha, 0x2030,
1711 "RHBA issue IOCB failed (%d).\n", rval);
1712 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
1714 rval = QLA_FUNCTION_FAILED;
1715 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1716 ct_rsp->header.explanation_code ==
1717 CT_EXPL_ALREADY_REGISTERED) {
1718 ql_dbg(ql_dbg_disc, vha, 0x2034,
1719 "HBA already registered.\n");
1720 rval = QLA_ALREADY_REGISTERED;
1722 ql_dbg(ql_dbg_disc, vha, 0x20ad,
1723 "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
1724 ct_rsp->header.reason_code,
1725 ct_rsp->header.explanation_code);
1728 ql_dbg(ql_dbg_disc, vha, 0x2035,
1729 "RHBA exiting normally.\n");
1736 * qla2x00_fdmi_rpa() - perform RPA registration
1739 * Returns 0 on success.
1742 qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
1746 struct qla_hw_data *ha = vha->hw;
1747 ms_iocb_entry_t *ms_pkt;
1748 struct ct_sns_req *ct_req;
1749 struct ct_sns_rsp *ct_rsp;
1751 struct ct_fdmi_port_attr *eiter;
1752 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
1753 struct new_utsname *p_sysid = NULL;
1756 /* Prepare common MS IOCB */
1757 /* Request size adjusted after CT preparation */
1758 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
1760 /* Prepare CT request */
1761 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
1763 ct_rsp = &ha->ct_sns->p.rsp;
1765 /* Prepare FDMI command arguments -- attribute block, attributes. */
1766 memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
1767 size = WWN_SIZE + 4;
1770 ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
1771 entries = ct_req->req.rpa.port_name;
1774 eiter = entries + size;
1775 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
1776 eiter->len = cpu_to_be16(4 + 32);
1777 eiter->a.fc4_types[2] = 0x01;
1780 ql_dbg(ql_dbg_disc, vha, 0x2039,
1781 "FC4_TYPES=%02x %02x.\n",
1782 eiter->a.fc4_types[2],
1783 eiter->a.fc4_types[1]);
1785 /* Supported speed. */
1786 eiter = entries + size;
1787 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
1788 eiter->len = cpu_to_be16(4 + 4);
1789 if (IS_CNA_CAPABLE(ha))
1790 eiter->a.sup_speed = cpu_to_be32(
1791 FDMI_PORT_SPEED_10GB);
1792 else if (IS_QLA27XX(ha))
1793 eiter->a.sup_speed = cpu_to_be32(
1794 FDMI_PORT_SPEED_32GB|
1795 FDMI_PORT_SPEED_16GB|
1796 FDMI_PORT_SPEED_8GB);
1797 else if (IS_QLA2031(ha))
1798 eiter->a.sup_speed = cpu_to_be32(
1799 FDMI_PORT_SPEED_16GB|
1800 FDMI_PORT_SPEED_8GB|
1801 FDMI_PORT_SPEED_4GB);
1802 else if (IS_QLA25XX(ha))
1803 eiter->a.sup_speed = cpu_to_be32(
1804 FDMI_PORT_SPEED_8GB|
1805 FDMI_PORT_SPEED_4GB|
1806 FDMI_PORT_SPEED_2GB|
1807 FDMI_PORT_SPEED_1GB);
1808 else if (IS_QLA24XX_TYPE(ha))
1809 eiter->a.sup_speed = cpu_to_be32(
1810 FDMI_PORT_SPEED_4GB|
1811 FDMI_PORT_SPEED_2GB|
1812 FDMI_PORT_SPEED_1GB);
1813 else if (IS_QLA23XX(ha))
1814 eiter->a.sup_speed = cpu_to_be32(
1815 FDMI_PORT_SPEED_2GB|
1816 FDMI_PORT_SPEED_1GB);
1818 eiter->a.sup_speed = cpu_to_be32(
1819 FDMI_PORT_SPEED_1GB);
1822 ql_dbg(ql_dbg_disc, vha, 0x203a,
1823 "Supported_Speed=%x.\n", eiter->a.sup_speed);
1825 /* Current speed. */
1826 eiter = entries + size;
1827 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
1828 eiter->len = cpu_to_be16(4 + 4);
1829 switch (ha->link_data_rate) {
1830 case PORT_SPEED_1GB:
1831 eiter->a.cur_speed =
1832 cpu_to_be32(FDMI_PORT_SPEED_1GB);
1834 case PORT_SPEED_2GB:
1835 eiter->a.cur_speed =
1836 cpu_to_be32(FDMI_PORT_SPEED_2GB);
1838 case PORT_SPEED_4GB:
1839 eiter->a.cur_speed =
1840 cpu_to_be32(FDMI_PORT_SPEED_4GB);
1842 case PORT_SPEED_8GB:
1843 eiter->a.cur_speed =
1844 cpu_to_be32(FDMI_PORT_SPEED_8GB);
1846 case PORT_SPEED_10GB:
1847 eiter->a.cur_speed =
1848 cpu_to_be32(FDMI_PORT_SPEED_10GB);
1850 case PORT_SPEED_16GB:
1851 eiter->a.cur_speed =
1852 cpu_to_be32(FDMI_PORT_SPEED_16GB);
1854 case PORT_SPEED_32GB:
1855 eiter->a.cur_speed =
1856 cpu_to_be32(FDMI_PORT_SPEED_32GB);
1859 eiter->a.cur_speed =
1860 cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
1865 ql_dbg(ql_dbg_disc, vha, 0x203b,
1866 "Current_Speed=%x.\n", eiter->a.cur_speed);
1868 /* Max frame size. */
1869 eiter = entries + size;
1870 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
1871 eiter->len = cpu_to_be16(4 + 4);
1872 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
1873 le16_to_cpu(icb24->frame_payload_size) :
1874 le16_to_cpu(ha->init_cb->frame_payload_size);
1875 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
1878 ql_dbg(ql_dbg_disc, vha, 0x203c,
1879 "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
1881 /* OS device name. */
1882 eiter = entries + size;
1883 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
1884 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
1885 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
1886 alen = strlen(eiter->a.os_dev_name);
1887 alen += 4 - (alen & 3);
1888 eiter->len = cpu_to_be16(4 + alen);
1891 ql_dbg(ql_dbg_disc, vha, 0x204b,
1892 "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
1895 eiter = entries + size;
1896 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
1897 p_sysid = utsname();
1899 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1900 "%s", p_sysid->nodename);
1902 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
1903 "%s", fc_host_system_hostname(vha->host));
1905 alen = strlen(eiter->a.host_name);
1906 alen += 4 - (alen & 3);
1907 eiter->len = cpu_to_be16(4 + alen);
1910 ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
1912 /* Update MS request size. */
1913 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
1915 ql_dbg(ql_dbg_disc, vha, 0x203e,
1916 "RPA portname %016llx, size = %d.\n",
1917 wwn_to_u64(ct_req->req.rpa.port_name), size);
1918 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
1921 /* Execute MS IOCB */
1922 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
1923 sizeof(ms_iocb_entry_t));
1924 if (rval != QLA_SUCCESS) {
1926 ql_dbg(ql_dbg_disc, vha, 0x2040,
1927 "RPA issue IOCB failed (%d).\n", rval);
1928 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
1930 rval = QLA_FUNCTION_FAILED;
1931 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
1932 ct_rsp->header.explanation_code ==
1933 CT_EXPL_ALREADY_REGISTERED) {
1934 ql_dbg(ql_dbg_disc, vha, 0x20cd,
1935 "RPA already registered.\n");
1936 rval = QLA_ALREADY_REGISTERED;
1940 ql_dbg(ql_dbg_disc, vha, 0x2041,
1941 "RPA exiting normally.\n");
1948 * qla2x00_fdmiv2_rhba() - perform RHBA FDMI v2 registration
1951 * Returns 0 on success.
1954 qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
1958 ms_iocb_entry_t *ms_pkt;
1959 struct ct_sns_req *ct_req;
1960 struct ct_sns_rsp *ct_rsp;
1962 struct ct_fdmiv2_hba_attr *eiter;
1963 struct qla_hw_data *ha = vha->hw;
1964 struct new_utsname *p_sysid = NULL;
1967 /* Prepare common MS IOCB */
1968 /* Request size adjusted after CT preparation */
1969 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
1971 /* Prepare CT request */
1972 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
1974 ct_rsp = &ha->ct_sns->p.rsp;
1976 /* Prepare FDMI command arguments -- attribute block, attributes. */
1977 memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
1978 ct_req->req.rhba2.entry_count = cpu_to_be32(1);
1979 memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
1980 size = 2 * WWN_SIZE + 4 + 4;
1983 ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
1984 entries = ct_req->req.rhba2.hba_identifier;
1987 eiter = entries + size;
1988 eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
1989 eiter->len = cpu_to_be16(4 + WWN_SIZE);
1990 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
1991 size += 4 + WWN_SIZE;
1993 ql_dbg(ql_dbg_disc, vha, 0x207d,
1994 "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
1997 eiter = entries + size;
1998 eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
1999 snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
2000 "%s", "QLogic Corporation");
2001 eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
2002 alen = strlen(eiter->a.manufacturer);
2003 alen += 4 - (alen & 3);
2004 eiter->len = cpu_to_be16(4 + alen);
2007 ql_dbg(ql_dbg_disc, vha, 0x20a5,
2008 "Manufacturer = %s.\n", eiter->a.manufacturer);
2010 /* Serial number. */
2011 eiter = entries + size;
2012 eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
2013 if (IS_FWI2_CAPABLE(ha))
2014 qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
2015 sizeof(eiter->a.serial_num));
2017 sn = ((ha->serial0 & 0x1f) << 16) |
2018 (ha->serial2 << 8) | ha->serial1;
2019 snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
2020 "%c%05d", 'A' + sn / 100000, sn % 100000);
2022 alen = strlen(eiter->a.serial_num);
2023 alen += 4 - (alen & 3);
2024 eiter->len = cpu_to_be16(4 + alen);
2027 ql_dbg(ql_dbg_disc, vha, 0x20a6,
2028 "Serial no. = %s.\n", eiter->a.serial_num);
2031 eiter = entries + size;
2032 eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
2033 snprintf(eiter->a.model, sizeof(eiter->a.model),
2034 "%s", ha->model_number);
2035 alen = strlen(eiter->a.model);
2036 alen += 4 - (alen & 3);
2037 eiter->len = cpu_to_be16(4 + alen);
2040 ql_dbg(ql_dbg_disc, vha, 0x20a7,
2041 "Model Name = %s.\n", eiter->a.model);
2043 /* Model description. */
2044 eiter = entries + size;
2045 eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
2046 snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
2047 "%s", ha->model_desc);
2048 alen = strlen(eiter->a.model_desc);
2049 alen += 4 - (alen & 3);
2050 eiter->len = cpu_to_be16(4 + alen);
2053 ql_dbg(ql_dbg_disc, vha, 0x20a8,
2054 "Model Desc = %s.\n", eiter->a.model_desc);
2056 /* Hardware version. */
2057 eiter = entries + size;
2058 eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
2059 if (!IS_FWI2_CAPABLE(ha)) {
2060 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2061 "HW:%s", ha->adapter_id);
2062 } else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
2063 sizeof(eiter->a.hw_version))) {
2065 } else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
2066 sizeof(eiter->a.hw_version))) {
2069 snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
2070 "HW:%s", ha->adapter_id);
2072 alen = strlen(eiter->a.hw_version);
2073 alen += 4 - (alen & 3);
2074 eiter->len = cpu_to_be16(4 + alen);
2077 ql_dbg(ql_dbg_disc, vha, 0x20a9,
2078 "Hardware ver = %s.\n", eiter->a.hw_version);
2080 /* Driver version. */
2081 eiter = entries + size;
2082 eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
2083 snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
2084 "%s", qla2x00_version_str);
2085 alen = strlen(eiter->a.driver_version);
2086 alen += 4 - (alen & 3);
2087 eiter->len = cpu_to_be16(4 + alen);
2090 ql_dbg(ql_dbg_disc, vha, 0x20aa,
2091 "Driver ver = %s.\n", eiter->a.driver_version);
2093 /* Option ROM version. */
2094 eiter = entries + size;
2095 eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
2096 snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
2097 "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2098 alen = strlen(eiter->a.orom_version);
2099 alen += 4 - (alen & 3);
2100 eiter->len = cpu_to_be16(4 + alen);
2103 ql_dbg(ql_dbg_disc, vha , 0x20ab,
2104 "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
2105 eiter->a.orom_version[0]);
2107 /* Firmware version */
2108 eiter = entries + size;
2109 eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
2110 ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
2111 sizeof(eiter->a.fw_version));
2112 alen = strlen(eiter->a.fw_version);
2113 alen += 4 - (alen & 3);
2114 eiter->len = cpu_to_be16(4 + alen);
2117 ql_dbg(ql_dbg_disc, vha, 0x20ac,
2118 "Firmware vers = %s.\n", eiter->a.fw_version);
2120 /* OS Name and Version */
2121 eiter = entries + size;
2122 eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
2123 p_sysid = utsname();
2125 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2127 p_sysid->sysname, p_sysid->release, p_sysid->version);
2129 snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
2130 "%s %s", "Linux", fc_host_system_hostname(vha->host));
2132 alen = strlen(eiter->a.os_version);
2133 alen += 4 - (alen & 3);
2134 eiter->len = cpu_to_be16(4 + alen);
2137 ql_dbg(ql_dbg_disc, vha, 0x20ae,
2138 "OS Name and Version = %s.\n", eiter->a.os_version);
2140 /* MAX CT Payload Length */
2141 eiter = entries + size;
2142 eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
2143 eiter->a.max_ct_len = cpu_to_be32(ha->frame_payload_size);
2144 eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
2145 eiter->len = cpu_to_be16(4 + 4);
2148 ql_dbg(ql_dbg_disc, vha, 0x20af,
2149 "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
2151 /* Node Sybolic Name */
2152 eiter = entries + size;
2153 eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
2154 qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
2155 sizeof(eiter->a.sym_name));
2156 alen = strlen(eiter->a.sym_name);
2157 alen += 4 - (alen & 3);
2158 eiter->len = cpu_to_be16(4 + alen);
2161 ql_dbg(ql_dbg_disc, vha, 0x20b0,
2162 "Symbolic Name = %s.\n", eiter->a.sym_name);
2165 eiter = entries + size;
2166 eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
2167 eiter->a.vendor_id = cpu_to_be32(0x1077);
2168 eiter->len = cpu_to_be16(4 + 4);
2171 ql_dbg(ql_dbg_disc, vha, 0x20b1,
2172 "Vendor Id = %x.\n", eiter->a.vendor_id);
2175 eiter = entries + size;
2176 eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
2177 eiter->a.num_ports = cpu_to_be32(1);
2178 eiter->len = cpu_to_be16(4 + 4);
2181 ql_dbg(ql_dbg_disc, vha, 0x20b2,
2182 "Port Num = %x.\n", eiter->a.num_ports);
2185 eiter = entries + size;
2186 eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
2187 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2188 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2189 size += 4 + WWN_SIZE;
2191 ql_dbg(ql_dbg_disc, vha, 0x20b3,
2192 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2195 eiter = entries + size;
2196 eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
2197 snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
2198 "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
2199 alen = strlen(eiter->a.bios_name);
2200 alen += 4 - (alen & 3);
2201 eiter->len = cpu_to_be16(4 + alen);
2204 ql_dbg(ql_dbg_disc, vha, 0x20b4,
2205 "BIOS Name = %s\n", eiter->a.bios_name);
2207 /* Vendor Identifier */
2208 eiter = entries + size;
2209 eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
2210 snprintf(eiter->a.vendor_identifier, sizeof(eiter->a.vendor_identifier),
2212 alen = strlen(eiter->a.vendor_identifier);
2213 alen += 4 - (alen & 3);
2214 eiter->len = cpu_to_be16(4 + alen);
2217 ql_dbg(ql_dbg_disc, vha, 0x201b,
2218 "Vendor Identifier = %s.\n", eiter->a.vendor_identifier);
2220 /* Update MS request size. */
2221 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2223 ql_dbg(ql_dbg_disc, vha, 0x20b5,
2224 "RHBA identifier = %016llx.\n",
2225 wwn_to_u64(ct_req->req.rhba2.hba_identifier));
2226 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
2229 /* Execute MS IOCB */
2230 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2231 sizeof(ms_iocb_entry_t));
2232 if (rval != QLA_SUCCESS) {
2234 ql_dbg(ql_dbg_disc, vha, 0x20b7,
2235 "RHBA issue IOCB failed (%d).\n", rval);
2236 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
2238 rval = QLA_FUNCTION_FAILED;
2240 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2241 ct_rsp->header.explanation_code ==
2242 CT_EXPL_ALREADY_REGISTERED) {
2243 ql_dbg(ql_dbg_disc, vha, 0x20b8,
2244 "HBA already registered.\n");
2245 rval = QLA_ALREADY_REGISTERED;
2247 ql_dbg(ql_dbg_disc, vha, 0x2016,
2248 "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2249 ct_rsp->header.reason_code,
2250 ct_rsp->header.explanation_code);
2253 ql_dbg(ql_dbg_disc, vha, 0x20b9,
2254 "RHBA FDMI V2 exiting normally.\n");
2261 * qla2x00_fdmi_dhba() -
2264 * Returns 0 on success.
2267 qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
2270 struct qla_hw_data *ha = vha->hw;
2271 ms_iocb_entry_t *ms_pkt;
2272 struct ct_sns_req *ct_req;
2273 struct ct_sns_rsp *ct_rsp;
2276 /* Prepare common MS IOCB */
2277 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, DHBA_REQ_SIZE,
2280 /* Prepare CT request */
2281 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, DHBA_CMD, DHBA_RSP_SIZE);
2282 ct_rsp = &ha->ct_sns->p.rsp;
2284 /* Prepare FDMI command arguments -- portname. */
2285 memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
2287 ql_dbg(ql_dbg_disc, vha, 0x2036,
2288 "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
2290 /* Execute MS IOCB */
2291 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2292 sizeof(ms_iocb_entry_t));
2293 if (rval != QLA_SUCCESS) {
2295 ql_dbg(ql_dbg_disc, vha, 0x2037,
2296 "DHBA issue IOCB failed (%d).\n", rval);
2297 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
2299 rval = QLA_FUNCTION_FAILED;
2301 ql_dbg(ql_dbg_disc, vha, 0x2038,
2302 "DHBA exiting normally.\n");
2309 * qla2x00_fdmiv2_rpa() -
2312 * Returns 0 on success.
2315 qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
2319 struct qla_hw_data *ha = vha->hw;
2320 ms_iocb_entry_t *ms_pkt;
2321 struct ct_sns_req *ct_req;
2322 struct ct_sns_rsp *ct_rsp;
2324 struct ct_fdmiv2_port_attr *eiter;
2325 struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
2326 struct new_utsname *p_sysid = NULL;
2329 /* Prepare common MS IOCB */
2330 /* Request size adjusted after CT preparation */
2331 ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
2333 /* Prepare CT request */
2334 ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD, RPA_RSP_SIZE);
2335 ct_rsp = &ha->ct_sns->p.rsp;
2337 /* Prepare FDMI command arguments -- attribute block, attributes. */
2338 memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
2339 size = WWN_SIZE + 4;
2342 ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
2343 entries = ct_req->req.rpa2.port_name;
2346 eiter = entries + size;
2347 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
2348 eiter->len = cpu_to_be16(4 + 32);
2349 eiter->a.fc4_types[2] = 0x01;
2352 ql_dbg(ql_dbg_disc, vha, 0x20ba,
2353 "FC4_TYPES=%02x %02x.\n",
2354 eiter->a.fc4_types[2],
2355 eiter->a.fc4_types[1]);
2357 if (vha->flags.nvme_enabled) {
2358 eiter->a.fc4_types[6] = 1; /* NVMe type 28h */
2359 ql_dbg(ql_dbg_disc, vha, 0x211f,
2360 "NVME FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2361 eiter->a.fc4_types[6]);
2364 /* Supported speed. */
2365 eiter = entries + size;
2366 eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
2367 eiter->len = cpu_to_be16(4 + 4);
2368 if (IS_CNA_CAPABLE(ha))
2369 eiter->a.sup_speed = cpu_to_be32(
2370 FDMI_PORT_SPEED_10GB);
2371 else if (IS_QLA27XX(ha))
2372 eiter->a.sup_speed = cpu_to_be32(
2373 FDMI_PORT_SPEED_32GB|
2374 FDMI_PORT_SPEED_16GB|
2375 FDMI_PORT_SPEED_8GB);
2376 else if (IS_QLA2031(ha))
2377 eiter->a.sup_speed = cpu_to_be32(
2378 FDMI_PORT_SPEED_16GB|
2379 FDMI_PORT_SPEED_8GB|
2380 FDMI_PORT_SPEED_4GB);
2381 else if (IS_QLA25XX(ha))
2382 eiter->a.sup_speed = cpu_to_be32(
2383 FDMI_PORT_SPEED_8GB|
2384 FDMI_PORT_SPEED_4GB|
2385 FDMI_PORT_SPEED_2GB|
2386 FDMI_PORT_SPEED_1GB);
2387 else if (IS_QLA24XX_TYPE(ha))
2388 eiter->a.sup_speed = cpu_to_be32(
2389 FDMI_PORT_SPEED_4GB|
2390 FDMI_PORT_SPEED_2GB|
2391 FDMI_PORT_SPEED_1GB);
2392 else if (IS_QLA23XX(ha))
2393 eiter->a.sup_speed = cpu_to_be32(
2394 FDMI_PORT_SPEED_2GB|
2395 FDMI_PORT_SPEED_1GB);
2397 eiter->a.sup_speed = cpu_to_be32(
2398 FDMI_PORT_SPEED_1GB);
2401 ql_dbg(ql_dbg_disc, vha, 0x20bb,
2402 "Supported Port Speed = %x.\n", eiter->a.sup_speed);
2404 /* Current speed. */
2405 eiter = entries + size;
2406 eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
2407 eiter->len = cpu_to_be16(4 + 4);
2408 switch (ha->link_data_rate) {
2409 case PORT_SPEED_1GB:
2410 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
2412 case PORT_SPEED_2GB:
2413 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
2415 case PORT_SPEED_4GB:
2416 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
2418 case PORT_SPEED_8GB:
2419 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
2421 case PORT_SPEED_10GB:
2422 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
2424 case PORT_SPEED_16GB:
2425 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
2427 case PORT_SPEED_32GB:
2428 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
2431 eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
2436 ql_dbg(ql_dbg_disc, vha, 0x2017,
2437 "Current_Speed = %x.\n", eiter->a.cur_speed);
2439 /* Max frame size. */
2440 eiter = entries + size;
2441 eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
2442 eiter->len = cpu_to_be16(4 + 4);
2443 eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
2444 le16_to_cpu(icb24->frame_payload_size):
2445 le16_to_cpu(ha->init_cb->frame_payload_size);
2446 eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
2449 ql_dbg(ql_dbg_disc, vha, 0x20bc,
2450 "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
2452 /* OS device name. */
2453 eiter = entries + size;
2454 eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
2455 alen = strlen(QLA2XXX_DRIVER_NAME);
2456 snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
2457 "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
2458 alen += 4 - (alen & 3);
2459 eiter->len = cpu_to_be16(4 + alen);
2462 ql_dbg(ql_dbg_disc, vha, 0x20be,
2463 "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
2466 eiter = entries + size;
2467 eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
2468 p_sysid = utsname();
2470 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2471 "%s", p_sysid->nodename);
2473 snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
2474 "%s", fc_host_system_hostname(vha->host));
2476 alen = strlen(eiter->a.host_name);
2477 alen += 4 - (alen & 3);
2478 eiter->len = cpu_to_be16(4 + alen);
2481 ql_dbg(ql_dbg_disc, vha, 0x201a,
2482 "HostName=%s.\n", eiter->a.host_name);
2485 eiter = entries + size;
2486 eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
2487 memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
2488 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2489 size += 4 + WWN_SIZE;
2491 ql_dbg(ql_dbg_disc, vha, 0x20c0,
2492 "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
2495 eiter = entries + size;
2496 eiter->type = cpu_to_be16(FDMI_PORT_NAME);
2497 memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
2498 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2499 size += 4 + WWN_SIZE;
2501 ql_dbg(ql_dbg_disc, vha, 0x20c1,
2502 "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
2504 /* Port Symbolic Name */
2505 eiter = entries + size;
2506 eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
2507 qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
2508 sizeof(eiter->a.port_sym_name));
2509 alen = strlen(eiter->a.port_sym_name);
2510 alen += 4 - (alen & 3);
2511 eiter->len = cpu_to_be16(4 + alen);
2514 ql_dbg(ql_dbg_disc, vha, 0x20c2,
2515 "port symbolic name = %s\n", eiter->a.port_sym_name);
2518 eiter = entries + size;
2519 eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
2520 eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
2521 eiter->len = cpu_to_be16(4 + 4);
2524 ql_dbg(ql_dbg_disc, vha, 0x20c3,
2525 "Port Type = %x.\n", eiter->a.port_type);
2527 /* Class of Service */
2528 eiter = entries + size;
2529 eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
2530 eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
2531 eiter->len = cpu_to_be16(4 + 4);
2534 ql_dbg(ql_dbg_disc, vha, 0x20c4,
2535 "Supported COS = %08x\n", eiter->a.port_supported_cos);
2537 /* Port Fabric Name */
2538 eiter = entries + size;
2539 eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
2540 memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
2541 eiter->len = cpu_to_be16(4 + WWN_SIZE);
2542 size += 4 + WWN_SIZE;
2544 ql_dbg(ql_dbg_disc, vha, 0x20c5,
2545 "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
2548 eiter = entries + size;
2549 eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
2550 eiter->a.port_fc4_type[0] = 0;
2551 eiter->a.port_fc4_type[1] = 0;
2552 eiter->a.port_fc4_type[2] = 1;
2553 eiter->a.port_fc4_type[3] = 0;
2554 eiter->len = cpu_to_be16(4 + 32);
2557 ql_dbg(ql_dbg_disc, vha, 0x20c6,
2558 "Port Active FC4 Type = %02x %02x.\n",
2559 eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
2561 if (vha->flags.nvme_enabled) {
2562 eiter->a.port_fc4_type[4] = 0;
2563 eiter->a.port_fc4_type[5] = 0;
2564 eiter->a.port_fc4_type[6] = 1; /* NVMe type 28h */
2565 ql_dbg(ql_dbg_disc, vha, 0x2120,
2566 "NVME Port Active FC4 Type = %02x 0x0 0x0 0x0 0x0 0x0.\n",
2567 eiter->a.port_fc4_type[6]);
2571 eiter = entries + size;
2572 eiter->type = cpu_to_be16(FDMI_PORT_STATE);
2573 eiter->a.port_state = cpu_to_be32(1);
2574 eiter->len = cpu_to_be16(4 + 4);
2577 ql_dbg(ql_dbg_disc, vha, 0x20c7,
2578 "Port State = %x.\n", eiter->a.port_state);
2580 /* Number of Ports */
2581 eiter = entries + size;
2582 eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
2583 eiter->a.num_ports = cpu_to_be32(1);
2584 eiter->len = cpu_to_be16(4 + 4);
2587 ql_dbg(ql_dbg_disc, vha, 0x20c8,
2588 "Number of ports = %x.\n", eiter->a.num_ports);
2591 eiter = entries + size;
2592 eiter->type = cpu_to_be16(FDMI_PORT_ID);
2593 eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
2594 eiter->len = cpu_to_be16(4 + 4);
2597 ql_dbg(ql_dbg_disc, vha, 0x201c,
2598 "Port Id = %x.\n", eiter->a.port_id);
2600 /* Update MS request size. */
2601 qla2x00_update_ms_fdmi_iocb(vha, size + 16);
2603 ql_dbg(ql_dbg_disc, vha, 0x2018,
2604 "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
2605 ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
2608 /* Execute MS IOCB */
2609 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2610 sizeof(ms_iocb_entry_t));
2611 if (rval != QLA_SUCCESS) {
2613 ql_dbg(ql_dbg_disc, vha, 0x20cb,
2614 "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
2615 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
2617 rval = QLA_FUNCTION_FAILED;
2618 if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
2619 ct_rsp->header.explanation_code ==
2620 CT_EXPL_ALREADY_REGISTERED) {
2621 ql_dbg(ql_dbg_disc, vha, 0x20ce,
2622 "RPA FDMI v2 already registered\n");
2623 rval = QLA_ALREADY_REGISTERED;
2625 ql_dbg(ql_dbg_disc, vha, 0x2020,
2626 "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
2627 ct_rsp->header.reason_code,
2628 ct_rsp->header.explanation_code);
2631 ql_dbg(ql_dbg_disc, vha, 0x20cc,
2632 "RPA FDMI V2 exiting normally.\n");
2639 * qla2x00_fdmi_register() -
2642 * Returns 0 on success.
2645 qla2x00_fdmi_register(scsi_qla_host_t *vha)
2647 int rval = QLA_FUNCTION_FAILED;
2648 struct qla_hw_data *ha = vha->hw;
2650 if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
2652 return QLA_FUNCTION_FAILED;
2654 rval = qla2x00_mgmt_svr_login(vha);
2658 rval = qla2x00_fdmiv2_rhba(vha);
2660 if (rval != QLA_ALREADY_REGISTERED)
2663 rval = qla2x00_fdmi_dhba(vha);
2667 rval = qla2x00_fdmiv2_rhba(vha);
2671 rval = qla2x00_fdmiv2_rpa(vha);
2678 rval = qla2x00_fdmi_rhba(vha);
2680 if (rval != QLA_ALREADY_REGISTERED)
2683 rval = qla2x00_fdmi_dhba(vha);
2687 rval = qla2x00_fdmi_rhba(vha);
2691 rval = qla2x00_fdmi_rpa(vha);
2697 * qla2x00_gfpn_id() - SNS Get Fabric Port Name (GFPN_ID) query.
2699 * @list: switch info entries to populate
2701 * Returns 0 on success.
2704 qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
2706 int rval = QLA_SUCCESS;
2708 struct qla_hw_data *ha = vha->hw;
2709 ms_iocb_entry_t *ms_pkt;
2710 struct ct_sns_req *ct_req;
2711 struct ct_sns_rsp *ct_rsp;
2714 if (!IS_IIDMA_CAPABLE(ha))
2715 return QLA_FUNCTION_FAILED;
2717 arg.iocb = ha->ms_iocb;
2718 arg.req_dma = ha->ct_sns_dma;
2719 arg.rsp_dma = ha->ct_sns_dma;
2720 arg.req_size = GFPN_ID_REQ_SIZE;
2721 arg.rsp_size = GFPN_ID_RSP_SIZE;
2722 arg.nport_handle = NPH_SNS;
2724 for (i = 0; i < ha->max_fibre_devices; i++) {
2726 /* Prepare common MS IOCB */
2727 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2729 /* Prepare CT request */
2730 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFPN_ID_CMD,
2732 ct_rsp = &ha->ct_sns->p.rsp;
2734 /* Prepare CT arguments -- port_id */
2735 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
2736 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
2737 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
2739 /* Execute MS IOCB */
2740 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2741 sizeof(ms_iocb_entry_t));
2742 if (rval != QLA_SUCCESS) {
2744 ql_dbg(ql_dbg_disc, vha, 0x2023,
2745 "GFPN_ID issue IOCB failed (%d).\n", rval);
2747 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2748 "GFPN_ID") != QLA_SUCCESS) {
2749 rval = QLA_FUNCTION_FAILED;
2752 /* Save fabric portname */
2753 memcpy(list[i].fabric_port_name,
2754 ct_rsp->rsp.gfpn_id.port_name, WWN_SIZE);
2757 /* Last device exit. */
2758 if (list[i].d_id.b.rsvd_1 != 0)
2766 static inline struct ct_sns_req *
2767 qla24xx_prep_ct_fm_req(struct ct_sns_pkt *p, uint16_t cmd,
2770 memset(p, 0, sizeof(struct ct_sns_pkt));
2772 p->p.req.header.revision = 0x01;
2773 p->p.req.header.gs_type = 0xFA;
2774 p->p.req.header.gs_subtype = 0x01;
2775 p->p.req.command = cpu_to_be16(cmd);
2776 p->p.req.max_rsp_size = cpu_to_be16((rsp_size - 16) / 4);
2782 * qla2x00_gpsc() - FCS Get Port Speed Capabilities (GPSC) query.
2784 * @list: switch info entries to populate
2786 * Returns 0 on success.
2789 qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
2793 struct qla_hw_data *ha = vha->hw;
2794 ms_iocb_entry_t *ms_pkt;
2795 struct ct_sns_req *ct_req;
2796 struct ct_sns_rsp *ct_rsp;
2799 if (!IS_IIDMA_CAPABLE(ha))
2800 return QLA_FUNCTION_FAILED;
2801 if (!ha->flags.gpsc_supported)
2802 return QLA_FUNCTION_FAILED;
2804 rval = qla2x00_mgmt_svr_login(vha);
2808 arg.iocb = ha->ms_iocb;
2809 arg.req_dma = ha->ct_sns_dma;
2810 arg.rsp_dma = ha->ct_sns_dma;
2811 arg.req_size = GPSC_REQ_SIZE;
2812 arg.rsp_size = GPSC_RSP_SIZE;
2813 arg.nport_handle = vha->mgmt_svr_loop_id;
2815 for (i = 0; i < ha->max_fibre_devices; i++) {
2817 /* Prepare common MS IOCB */
2818 ms_pkt = qla24xx_prep_ms_iocb(vha, &arg);
2820 /* Prepare CT request */
2821 ct_req = qla24xx_prep_ct_fm_req(ha->ct_sns, GPSC_CMD,
2823 ct_rsp = &ha->ct_sns->p.rsp;
2825 /* Prepare CT arguments -- port_name */
2826 memcpy(ct_req->req.gpsc.port_name, list[i].fabric_port_name,
2829 /* Execute MS IOCB */
2830 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2831 sizeof(ms_iocb_entry_t));
2832 if (rval != QLA_SUCCESS) {
2834 ql_dbg(ql_dbg_disc, vha, 0x2059,
2835 "GPSC issue IOCB failed (%d).\n", rval);
2836 } else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2837 "GPSC")) != QLA_SUCCESS) {
2838 /* FM command unsupported? */
2839 if (rval == QLA_INVALID_COMMAND &&
2840 (ct_rsp->header.reason_code ==
2841 CT_REASON_INVALID_COMMAND_CODE ||
2842 ct_rsp->header.reason_code ==
2843 CT_REASON_COMMAND_UNSUPPORTED)) {
2844 ql_dbg(ql_dbg_disc, vha, 0x205a,
2845 "GPSC command unsupported, disabling "
2847 ha->flags.gpsc_supported = 0;
2848 rval = QLA_FUNCTION_FAILED;
2851 rval = QLA_FUNCTION_FAILED;
2853 /* Save port-speed */
2854 switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
2856 list[i].fp_speed = PORT_SPEED_1GB;
2859 list[i].fp_speed = PORT_SPEED_2GB;
2862 list[i].fp_speed = PORT_SPEED_4GB;
2865 list[i].fp_speed = PORT_SPEED_10GB;
2868 list[i].fp_speed = PORT_SPEED_8GB;
2871 list[i].fp_speed = PORT_SPEED_16GB;
2874 list[i].fp_speed = PORT_SPEED_32GB;
2878 ql_dbg(ql_dbg_disc, vha, 0x205b,
2879 "GPSC ext entry - fpn "
2880 "%8phN speeds=%04x speed=%04x.\n",
2881 list[i].fabric_port_name,
2882 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
2883 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
2886 /* Last device exit. */
2887 if (list[i].d_id.b.rsvd_1 != 0)
2895 * qla2x00_gff_id() - SNS Get FC-4 Features (GFF_ID) query.
2898 * @list: switch info entries to populate
2902 qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
2907 ms_iocb_entry_t *ms_pkt;
2908 struct ct_sns_req *ct_req;
2909 struct ct_sns_rsp *ct_rsp;
2910 struct qla_hw_data *ha = vha->hw;
2911 uint8_t fcp_scsi_features = 0;
2914 for (i = 0; i < ha->max_fibre_devices; i++) {
2915 /* Set default FC4 Type as UNKNOWN so the default is to
2916 * Process this port */
2917 list[i].fc4_type = FC4_TYPE_UNKNOWN;
2919 /* Do not attempt GFF_ID if we are not FWI_2 capable */
2920 if (!IS_FWI2_CAPABLE(ha))
2923 arg.iocb = ha->ms_iocb;
2924 arg.req_dma = ha->ct_sns_dma;
2925 arg.rsp_dma = ha->ct_sns_dma;
2926 arg.req_size = GFF_ID_REQ_SIZE;
2927 arg.rsp_size = GFF_ID_RSP_SIZE;
2928 arg.nport_handle = NPH_SNS;
2930 /* Prepare common MS IOCB */
2931 ms_pkt = ha->isp_ops->prep_ms_iocb(vha, &arg);
2933 /* Prepare CT request */
2934 ct_req = qla2x00_prep_ct_req(ha->ct_sns, GFF_ID_CMD,
2936 ct_rsp = &ha->ct_sns->p.rsp;
2938 /* Prepare CT arguments -- port_id */
2939 ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain;
2940 ct_req->req.port_id.port_id[1] = list[i].d_id.b.area;
2941 ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa;
2943 /* Execute MS IOCB */
2944 rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
2945 sizeof(ms_iocb_entry_t));
2947 if (rval != QLA_SUCCESS) {
2948 ql_dbg(ql_dbg_disc, vha, 0x205c,
2949 "GFF_ID issue IOCB failed (%d).\n", rval);
2950 } else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
2951 "GFF_ID") != QLA_SUCCESS) {
2952 ql_dbg(ql_dbg_disc, vha, 0x205d,
2953 "GFF_ID IOCB status had a failure status code.\n");
2956 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
2957 fcp_scsi_features &= 0x0f;
2959 if (fcp_scsi_features)
2960 list[i].fc4_type = FC4_TYPE_FCP_SCSI;
2962 list[i].fc4_type = FC4_TYPE_OTHER;
2965 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
2966 list[i].fc4f_nvme &= 0xf;
2969 /* Last device exit. */
2970 if (list[i].d_id.b.rsvd_1 != 0)
2975 /* GID_PN completion processing. */
2976 void qla24xx_handle_gidpn_event(scsi_qla_host_t *vha, struct event_arg *ea)
2978 fc_port_t *fcport = ea->fcport;
2980 ql_dbg(ql_dbg_disc, vha, 0x201d,
2981 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
2982 __func__, fcport->port_name, fcport->disc_state,
2983 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
2984 fcport->rscn_gen, ea->sp->gen1, fcport->loop_id);
2986 if (fcport->disc_state == DSC_DELETE_PEND)
2989 if (ea->sp->gen2 != fcport->login_gen) {
2990 /* PLOGI/PRLI/LOGO came in while cmd was out.*/
2991 ql_dbg(ql_dbg_disc, vha, 0x201e,
2992 "%s %8phC generation changed rscn %d|%d n",
2993 __func__, fcport->port_name, fcport->last_rscn_gen,
2999 if (ea->sp->gen1 == fcport->rscn_gen) {
3000 fcport->scan_state = QLA_FCPORT_FOUND;
3001 fcport->flags |= FCF_FABRIC_DEVICE;
3003 if (fcport->d_id.b24 == ea->id.b24) {
3004 /* cable plugged into the same place */
3005 switch (vha->host->active_mode) {
3007 if (fcport->fw_login_state ==
3011 * Late RSCN was delivered.
3012 * Remote port already login'ed.
3014 ql_dbg(ql_dbg_disc, vha, 0x201f,
3015 "%s %d %8phC post adisc\n",
3018 data[0] = data[1] = 0;
3019 qla2x00_post_async_adisc_work(
3023 case MODE_INITIATOR:
3026 ql_dbg(ql_dbg_disc, vha, 0x201f,
3027 "%s %d %8phC post %s\n", __func__,
3028 __LINE__, fcport->port_name,
3029 (atomic_read(&fcport->state) ==
3030 FCS_ONLINE) ? "adisc" : "gnl");
3032 if (atomic_read(&fcport->state) ==
3036 data[0] = data[1] = 0;
3037 qla2x00_post_async_adisc_work(
3040 qla24xx_post_gnl_work(vha,
3045 } else { /* fcport->d_id.b24 != ea->id.b24 */
3046 fcport->d_id.b24 = ea->id.b24;
3047 fcport->id_changed = 1;
3048 if (fcport->deleted != QLA_SESS_DELETED) {
3049 ql_dbg(ql_dbg_disc, vha, 0x2021,
3050 "%s %d %8phC post del sess\n",
3051 __func__, __LINE__, fcport->port_name);
3052 qlt_schedule_sess_for_deletion(fcport);
3055 } else { /* ea->sp->gen1 != fcport->rscn_gen */
3056 ql_dbg(ql_dbg_disc, vha, 0x2022,
3057 "%s %d %8phC post gidpn\n",
3058 __func__, __LINE__, fcport->port_name);
3059 /* rscn came in while cmd was out */
3060 qla24xx_post_gidpn_work(vha, fcport);
3062 } else { /* ea->rc */
3064 if (ea->sp->gen1 == fcport->rscn_gen) {
3065 if (ea->sp->gen2 == fcport->login_gen) {
3066 ql_dbg(ql_dbg_disc, vha, 0x2042,
3067 "%s %d %8phC post del sess\n", __func__,
3068 __LINE__, fcport->port_name);
3069 qlt_schedule_sess_for_deletion(fcport);
3071 ql_dbg(ql_dbg_disc, vha, 0x2045,
3072 "%s %d %8phC login\n", __func__, __LINE__,
3074 qla24xx_fcport_handle_login(vha, fcport);
3077 ql_dbg(ql_dbg_disc, vha, 0x2049,
3078 "%s %d %8phC post gidpn\n", __func__, __LINE__,
3080 qla24xx_post_gidpn_work(vha, fcport);
3085 static void qla2x00_async_gidpn_sp_done(void *s, int res)
3088 struct scsi_qla_host *vha = sp->vha;
3089 fc_port_t *fcport = sp->fcport;
3090 u8 *id = fcport->ct_desc.ct_sns->p.rsp.rsp.gid_pn.port_id;
3091 struct event_arg ea;
3093 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3095 memset(&ea, 0, sizeof(ea));
3097 ea.id.b.domain = id[0];
3098 ea.id.b.area = id[1];
3099 ea.id.b.al_pa = id[2];
3102 ea.event = FCME_GIDPN_DONE;
3104 if (res == QLA_FUNCTION_TIMEOUT) {
3105 ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
3106 "Async done-%s WWPN %8phC timed out.\n",
3107 sp->name, fcport->port_name);
3108 qla24xx_post_gidpn_work(sp->vha, fcport);
3112 ql_dbg(ql_dbg_disc, sp->vha, 0xffff,
3113 "Async done-%s fail res %x, WWPN %8phC\n",
3114 sp->name, res, fcport->port_name);
3116 ql_dbg(ql_dbg_disc, vha, 0x204f,
3117 "Async done-%s good WWPN %8phC ID %3phC\n",
3118 sp->name, fcport->port_name, id);
3121 qla2x00_fcport_event_handler(vha, &ea);
3126 int qla24xx_async_gidpn(scsi_qla_host_t *vha, fc_port_t *fcport)
3128 int rval = QLA_FUNCTION_FAILED;
3129 struct ct_sns_req *ct_req;
3132 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3135 fcport->disc_state = DSC_GID_PN;
3136 fcport->scan_state = QLA_FCPORT_SCAN;
3137 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
3141 fcport->flags |= FCF_ASYNC_SENT;
3142 sp->type = SRB_CT_PTHRU_CMD;
3144 sp->gen1 = fcport->rscn_gen;
3145 sp->gen2 = fcport->login_gen;
3147 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3149 /* CT_IU preamble */
3150 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GID_PN_CMD,
3154 memcpy(ct_req->req.gid_pn.port_name, fcport->port_name,
3157 /* req & rsp use the same buffer */
3158 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3159 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3160 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3161 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3162 sp->u.iocb_cmd.u.ctarg.req_size = GID_PN_REQ_SIZE;
3163 sp->u.iocb_cmd.u.ctarg.rsp_size = GID_PN_RSP_SIZE;
3164 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3166 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3167 sp->done = qla2x00_async_gidpn_sp_done;
3169 rval = qla2x00_start_sp(sp);
3170 if (rval != QLA_SUCCESS)
3173 ql_dbg(ql_dbg_disc, vha, 0x20a4,
3174 "Async-%s - %8phC hdl=%x loopid=%x portid %02x%02x%02x.\n",
3175 sp->name, fcport->port_name,
3176 sp->handle, fcport->loop_id, fcport->d_id.b.domain,
3177 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3183 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3187 int qla24xx_post_gidpn_work(struct scsi_qla_host *vha, fc_port_t *fcport)
3189 struct qla_work_evt *e;
3192 ls = atomic_read(&vha->loop_state);
3193 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
3194 test_bit(UNLOADING, &vha->dpc_flags))
3197 e = qla2x00_alloc_work(vha, QLA_EVT_GIDPN);
3199 return QLA_FUNCTION_FAILED;
3201 e->u.fcport.fcport = fcport;
3202 fcport->flags |= FCF_ASYNC_ACTIVE;
3203 return qla2x00_post_work(vha, e);
3206 int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport)
3208 struct qla_work_evt *e;
3210 e = qla2x00_alloc_work(vha, QLA_EVT_GPSC);
3212 return QLA_FUNCTION_FAILED;
3214 e->u.fcport.fcport = fcport;
3215 fcport->flags |= FCF_ASYNC_ACTIVE;
3216 return qla2x00_post_work(vha, e);
3219 void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea)
3221 struct fc_port *fcport = ea->fcport;
3223 ql_dbg(ql_dbg_disc, vha, 0x20d8,
3224 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d lid %d\n",
3225 __func__, fcport->port_name, fcport->disc_state,
3226 fcport->fw_login_state, ea->rc, ea->sp->gen2, fcport->login_gen,
3227 ea->sp->gen2, fcport->rscn_gen|ea->sp->gen1, fcport->loop_id);
3229 if (fcport->disc_state == DSC_DELETE_PEND)
3232 if (ea->sp->gen2 != fcport->login_gen) {
3233 /* target side must have changed it. */
3234 ql_dbg(ql_dbg_disc, vha, 0x20d3,
3235 "%s %8phC generation changed\n",
3236 __func__, fcport->port_name);
3238 } else if (ea->sp->gen1 != fcport->rscn_gen) {
3239 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
3240 __func__, __LINE__, fcport->port_name);
3241 qla24xx_post_gidpn_work(vha, fcport);
3245 qla_post_iidma_work(vha, fcport);
3248 static void qla24xx_async_gpsc_sp_done(void *s, int res)
3251 struct scsi_qla_host *vha = sp->vha;
3252 struct qla_hw_data *ha = vha->hw;
3253 fc_port_t *fcport = sp->fcport;
3254 struct ct_sns_rsp *ct_rsp;
3255 struct event_arg ea;
3257 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3259 ql_dbg(ql_dbg_disc, vha, 0x2053,
3260 "Async done-%s res %x, WWPN %8phC \n",
3261 sp->name, res, fcport->port_name);
3263 fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
3265 if (res == QLA_FUNCTION_TIMEOUT)
3268 if (res == (DID_ERROR << 16)) {
3269 /* entry status error */
3272 if ((ct_rsp->header.reason_code ==
3273 CT_REASON_INVALID_COMMAND_CODE) ||
3274 (ct_rsp->header.reason_code ==
3275 CT_REASON_COMMAND_UNSUPPORTED)) {
3276 ql_dbg(ql_dbg_disc, vha, 0x2019,
3277 "GPSC command unsupported, disabling query.\n");
3278 ha->flags.gpsc_supported = 0;
3282 switch (be16_to_cpu(ct_rsp->rsp.gpsc.speed)) {
3284 fcport->fp_speed = PORT_SPEED_1GB;
3287 fcport->fp_speed = PORT_SPEED_2GB;
3290 fcport->fp_speed = PORT_SPEED_4GB;
3293 fcport->fp_speed = PORT_SPEED_10GB;
3296 fcport->fp_speed = PORT_SPEED_8GB;
3299 fcport->fp_speed = PORT_SPEED_16GB;
3302 fcport->fp_speed = PORT_SPEED_32GB;
3306 ql_dbg(ql_dbg_disc, vha, 0x2054,
3307 "Async-%s OUT WWPN %8phC speeds=%04x speed=%04x.\n",
3308 sp->name, fcport->fabric_port_name,
3309 be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
3310 be16_to_cpu(ct_rsp->rsp.gpsc.speed));
3312 memset(&ea, 0, sizeof(ea));
3313 ea.event = FCME_GPSC_DONE;
3317 qla2x00_fcport_event_handler(vha, &ea);
3323 int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport)
3325 int rval = QLA_FUNCTION_FAILED;
3326 struct ct_sns_req *ct_req;
3329 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3332 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3336 sp->type = SRB_CT_PTHRU_CMD;
3338 sp->gen1 = fcport->rscn_gen;
3339 sp->gen2 = fcport->login_gen;
3341 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3343 /* CT_IU preamble */
3344 ct_req = qla24xx_prep_ct_fm_req(fcport->ct_desc.ct_sns, GPSC_CMD,
3348 memcpy(ct_req->req.gpsc.port_name, fcport->fabric_port_name,
3351 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3352 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3353 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3354 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3355 sp->u.iocb_cmd.u.ctarg.req_size = GPSC_REQ_SIZE;
3356 sp->u.iocb_cmd.u.ctarg.rsp_size = GPSC_RSP_SIZE;
3357 sp->u.iocb_cmd.u.ctarg.nport_handle = vha->mgmt_svr_loop_id;
3359 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3360 sp->done = qla24xx_async_gpsc_sp_done;
3362 ql_dbg(ql_dbg_disc, vha, 0x205e,
3363 "Async-%s %8phC hdl=%x loopid=%x portid=%02x%02x%02x.\n",
3364 sp->name, fcport->port_name, sp->handle,
3365 fcport->loop_id, fcport->d_id.b.domain,
3366 fcport->d_id.b.area, fcport->d_id.b.al_pa);
3368 rval = qla2x00_start_sp(sp);
3369 if (rval != QLA_SUCCESS)
3375 fcport->flags &= ~FCF_ASYNC_SENT;
3377 fcport->flags &= ~FCF_ASYNC_ACTIVE;
3381 int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
3383 struct qla_work_evt *e;
3385 if (test_bit(UNLOADING, &vha->dpc_flags))
3388 e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
3390 return QLA_FUNCTION_FAILED;
3392 e->u.gpnid.id = *id;
3393 return qla2x00_post_work(vha, e);
3396 void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
3398 struct srb_iocb *c = &sp->u.iocb_cmd;
3402 if (c->u.els_plogi.els_plogi_pyld)
3403 dma_free_coherent(&vha->hw->pdev->dev,
3404 c->u.els_plogi.tx_size,
3405 c->u.els_plogi.els_plogi_pyld,
3406 c->u.els_plogi.els_plogi_pyld_dma);
3408 if (c->u.els_plogi.els_resp_pyld)
3409 dma_free_coherent(&vha->hw->pdev->dev,
3410 c->u.els_plogi.rx_size,
3411 c->u.els_plogi.els_resp_pyld,
3412 c->u.els_plogi.els_resp_pyld_dma);
3414 case SRB_CT_PTHRU_CMD:
3416 if (sp->u.iocb_cmd.u.ctarg.req) {
3417 dma_free_coherent(&vha->hw->pdev->dev,
3418 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3419 sp->u.iocb_cmd.u.ctarg.req,
3420 sp->u.iocb_cmd.u.ctarg.req_dma);
3421 sp->u.iocb_cmd.u.ctarg.req = NULL;
3424 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3425 dma_free_coherent(&vha->hw->pdev->dev,
3426 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3427 sp->u.iocb_cmd.u.ctarg.rsp,
3428 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3429 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3437 void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3439 fc_port_t *fcport, *conflict, *t;
3442 ql_dbg(ql_dbg_disc, vha, 0xffff,
3443 "%s %d port_id: %06x\n",
3444 __func__, __LINE__, ea->id.b24);
3447 /* cable is disconnected */
3448 list_for_each_entry_safe(fcport, t, &vha->vp_fcports, list) {
3449 if (fcport->d_id.b24 == ea->id.b24) {
3450 ql_dbg(ql_dbg_disc, vha, 0xffff,
3451 "%s %d %8phC DS %d\n",
3454 fcport->disc_state);
3455 fcport->scan_state = QLA_FCPORT_SCAN;
3456 switch (fcport->disc_state) {
3458 case DSC_DELETE_PEND:
3461 ql_dbg(ql_dbg_disc, vha, 0xffff,
3462 "%s %d %8phC post del sess\n",
3465 qlt_schedule_sess_for_deletion(fcport);
3471 /* cable is connected */
3472 fcport = qla2x00_find_fcport_by_wwpn(vha, ea->port_name, 1);
3474 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3476 if ((conflict->d_id.b24 == ea->id.b24) &&
3477 (fcport != conflict)) {
3478 /* 2 fcports with conflict Nport ID or
3479 * an existing fcport is having nport ID
3480 * conflict with new fcport.
3483 ql_dbg(ql_dbg_disc, vha, 0xffff,
3484 "%s %d %8phC DS %d\n",
3486 conflict->port_name,
3487 conflict->disc_state);
3488 conflict->scan_state = QLA_FCPORT_SCAN;
3489 switch (conflict->disc_state) {
3491 case DSC_DELETE_PEND:
3494 ql_dbg(ql_dbg_disc, vha, 0xffff,
3495 "%s %d %8phC post del sess\n",
3497 conflict->port_name);
3498 qlt_schedule_sess_for_deletion
3506 fcport->scan_state = QLA_FCPORT_FOUND;
3507 fcport->flags |= FCF_FABRIC_DEVICE;
3508 if (fcport->login_retry == 0) {
3509 fcport->login_retry =
3510 vha->hw->login_retry_count;
3511 ql_dbg(ql_dbg_disc, vha, 0xffff,
3512 "Port login retry %8phN, lid 0x%04x cnt=%d.\n",
3513 fcport->port_name, fcport->loop_id,
3514 fcport->login_retry);
3516 switch (fcport->disc_state) {
3517 case DSC_LOGIN_COMPLETE:
3518 /* recheck session is still intact. */
3519 ql_dbg(ql_dbg_disc, vha, 0x210d,
3520 "%s %d %8phC revalidate session with ADISC\n",
3521 __func__, __LINE__, fcport->port_name);
3522 data[0] = data[1] = 0;
3523 qla2x00_post_async_adisc_work(vha, fcport,
3527 ql_dbg(ql_dbg_disc, vha, 0x210d,
3528 "%s %d %8phC login\n", __func__, __LINE__,
3530 fcport->d_id = ea->id;
3531 qla24xx_fcport_handle_login(vha, fcport);
3533 case DSC_DELETE_PEND:
3534 fcport->d_id = ea->id;
3537 fcport->d_id = ea->id;
3541 list_for_each_entry_safe(conflict, t, &vha->vp_fcports,
3543 if (conflict->d_id.b24 == ea->id.b24) {
3544 /* 2 fcports with conflict Nport ID or
3545 * an existing fcport is having nport ID
3546 * conflict with new fcport.
3548 ql_dbg(ql_dbg_disc, vha, 0xffff,
3549 "%s %d %8phC DS %d\n",
3551 conflict->port_name,
3552 conflict->disc_state);
3554 conflict->scan_state = QLA_FCPORT_SCAN;
3555 switch (conflict->disc_state) {
3557 case DSC_DELETE_PEND:
3560 ql_dbg(ql_dbg_disc, vha, 0xffff,
3561 "%s %d %8phC post del sess\n",
3563 conflict->port_name);
3564 qlt_schedule_sess_for_deletion
3571 /* create new fcport */
3572 ql_dbg(ql_dbg_disc, vha, 0x2065,
3573 "%s %d %8phC post new sess\n",
3574 __func__, __LINE__, ea->port_name);
3575 qla24xx_post_newsess_work(vha, &ea->id,
3576 ea->port_name, NULL, NULL, FC4_TYPE_UNKNOWN);
3581 static void qla2x00_async_gpnid_sp_done(void *s, int res)
3584 struct scsi_qla_host *vha = sp->vha;
3585 struct ct_sns_req *ct_req =
3586 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
3587 struct ct_sns_rsp *ct_rsp =
3588 (struct ct_sns_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
3589 struct event_arg ea;
3590 struct qla_work_evt *e;
3591 unsigned long flags;
3594 ql_dbg(ql_dbg_disc, vha, 0x2066,
3595 "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n",
3596 sp->name, res, sp->gen1, ct_req->req.port_id.port_id,
3597 ct_rsp->rsp.gpn_id.port_name);
3599 ql_dbg(ql_dbg_disc, vha, 0x2066,
3600 "Async done-%s good rscn gen %d ID %3phC. %8phC\n",
3601 sp->name, sp->gen1, ct_req->req.port_id.port_id,
3602 ct_rsp->rsp.gpn_id.port_name);
3604 memset(&ea, 0, sizeof(ea));
3605 memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE);
3607 ea.id.b.domain = ct_req->req.port_id.port_id[0];
3608 ea.id.b.area = ct_req->req.port_id.port_id[1];
3609 ea.id.b.al_pa = ct_req->req.port_id.port_id[2];
3611 ea.event = FCME_GPNID_DONE;
3613 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3614 list_del(&sp->elem);
3615 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3618 if (res == QLA_FUNCTION_TIMEOUT) {
3619 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3623 } else if (sp->gen1) {
3624 /* There was another RSCN for this Nport ID */
3625 qla24xx_post_gpnid_work(sp->vha, &ea.id);
3630 qla2x00_fcport_event_handler(vha, &ea);
3632 e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
3634 /* please ignore kernel warning. otherwise, we have mem leak. */
3635 if (sp->u.iocb_cmd.u.ctarg.req) {
3636 dma_free_coherent(&vha->hw->pdev->dev,
3637 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
3638 sp->u.iocb_cmd.u.ctarg.req,
3639 sp->u.iocb_cmd.u.ctarg.req_dma);
3640 sp->u.iocb_cmd.u.ctarg.req = NULL;
3642 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3643 dma_free_coherent(&vha->hw->pdev->dev,
3644 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
3645 sp->u.iocb_cmd.u.ctarg.rsp,
3646 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3647 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3655 qla2x00_post_work(vha, e);
3658 /* Get WWPN with Nport ID. */
3659 int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
3661 int rval = QLA_FUNCTION_FAILED;
3662 struct ct_sns_req *ct_req;
3664 struct ct_sns_pkt *ct_sns;
3665 unsigned long flags;
3667 if (!vha->flags.online)
3670 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
3674 sp->type = SRB_CT_PTHRU_CMD;
3676 sp->u.iocb_cmd.u.ctarg.id = *id;
3678 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3680 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
3681 list_for_each_entry(tsp, &vha->gpnid_list, elem) {
3682 if (tsp->u.iocb_cmd.u.ctarg.id.b24 == id->b24) {
3684 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3689 list_add_tail(&sp->elem, &vha->gpnid_list);
3690 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
3692 sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
3693 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
3695 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
3696 if (!sp->u.iocb_cmd.u.ctarg.req) {
3697 ql_log(ql_log_warn, vha, 0xd041,
3698 "Failed to allocate ct_sns request.\n");
3702 sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
3703 sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
3705 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
3706 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
3707 ql_log(ql_log_warn, vha, 0xd042,
3708 "Failed to allocate ct_sns request.\n");
3712 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.rsp;
3713 memset(ct_sns, 0, sizeof(*ct_sns));
3715 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
3716 /* CT_IU preamble */
3717 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE);
3720 ct_req->req.port_id.port_id[0] = id->b.domain;
3721 ct_req->req.port_id.port_id[1] = id->b.area;
3722 ct_req->req.port_id.port_id[2] = id->b.al_pa;
3724 sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE;
3725 sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE;
3726 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3728 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3729 sp->done = qla2x00_async_gpnid_sp_done;
3731 ql_dbg(ql_dbg_disc, vha, 0x2067,
3732 "Async-%s hdl=%x ID %3phC.\n", sp->name,
3733 sp->handle, ct_req->req.port_id.port_id);
3735 rval = qla2x00_start_sp(sp);
3736 if (rval != QLA_SUCCESS)
3742 spin_lock_irqsave(&vha->hw->vport_slock, flags);
3743 list_del(&sp->elem);
3744 spin_unlock_irqrestore(&vha->hw->vport_slock, flags);
3746 if (sp->u.iocb_cmd.u.ctarg.req) {
3747 dma_free_coherent(&vha->hw->pdev->dev,
3748 sizeof(struct ct_sns_pkt),
3749 sp->u.iocb_cmd.u.ctarg.req,
3750 sp->u.iocb_cmd.u.ctarg.req_dma);
3751 sp->u.iocb_cmd.u.ctarg.req = NULL;
3753 if (sp->u.iocb_cmd.u.ctarg.rsp) {
3754 dma_free_coherent(&vha->hw->pdev->dev,
3755 sizeof(struct ct_sns_pkt),
3756 sp->u.iocb_cmd.u.ctarg.rsp,
3757 sp->u.iocb_cmd.u.ctarg.rsp_dma);
3758 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
3766 void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea)
3768 fc_port_t *fcport = ea->fcport;
3770 qla24xx_post_gnl_work(vha, fcport);
3773 void qla24xx_async_gffid_sp_done(void *s, int res)
3776 struct scsi_qla_host *vha = sp->vha;
3777 fc_port_t *fcport = sp->fcport;
3778 struct ct_sns_rsp *ct_rsp;
3779 struct event_arg ea;
3781 ql_dbg(ql_dbg_disc, vha, 0x2133,
3782 "Async done-%s res %x ID %x. %8phC\n",
3783 sp->name, res, fcport->d_id.b24, fcport->port_name);
3785 fcport->flags &= ~FCF_ASYNC_SENT;
3786 ct_rsp = &fcport->ct_desc.ct_sns->p.rsp;
3788 * FC-GS-7, 5.2.3.12 FC-4 Features - format
3789 * The format of the FC-4 Features object, as defined by the FC-4,
3790 * Shall be an array of 4-bit values, one for each type code value
3793 if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) {
3796 ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];
3797 fcport->fc4_type &= 0xf;
3800 if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) {
3801 /* w5 [00:03]/28h */
3803 ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET];
3804 fcport->fc4f_nvme &= 0xf;
3808 memset(&ea, 0, sizeof(ea));
3810 ea.fcport = sp->fcport;
3812 ea.event = FCME_GFFID_DONE;
3814 qla2x00_fcport_event_handler(vha, &ea);
3818 /* Get FC4 Feature with Nport ID. */
3819 int qla24xx_async_gffid(scsi_qla_host_t *vha, fc_port_t *fcport)
3821 int rval = QLA_FUNCTION_FAILED;
3822 struct ct_sns_req *ct_req;
3825 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
3828 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
3832 fcport->flags |= FCF_ASYNC_SENT;
3833 sp->type = SRB_CT_PTHRU_CMD;
3835 sp->gen1 = fcport->rscn_gen;
3836 sp->gen2 = fcport->login_gen;
3838 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
3839 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
3841 /* CT_IU preamble */
3842 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFF_ID_CMD,
3845 ct_req->req.gff_id.port_id[0] = fcport->d_id.b.domain;
3846 ct_req->req.gff_id.port_id[1] = fcport->d_id.b.area;
3847 ct_req->req.gff_id.port_id[2] = fcport->d_id.b.al_pa;
3849 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
3850 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
3851 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
3852 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
3853 sp->u.iocb_cmd.u.ctarg.req_size = GFF_ID_REQ_SIZE;
3854 sp->u.iocb_cmd.u.ctarg.rsp_size = GFF_ID_RSP_SIZE;
3855 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
3857 sp->done = qla24xx_async_gffid_sp_done;
3859 rval = qla2x00_start_sp(sp);
3860 if (rval != QLA_SUCCESS)
3863 ql_dbg(ql_dbg_disc, vha, 0x2132,
3864 "Async-%s hdl=%x %8phC.\n", sp->name,
3865 sp->handle, fcport->port_name);
3870 fcport->flags &= ~FCF_ASYNC_SENT;
3874 /* GPN_FT + GNN_FT*/
3875 static int qla2x00_is_a_vp(scsi_qla_host_t *vha, u64 wwn)
3877 struct qla_hw_data *ha = vha->hw;
3878 scsi_qla_host_t *vp;
3879 unsigned long flags;
3883 if (!ha->num_vhosts)
3886 spin_lock_irqsave(&ha->vport_slock, flags);
3887 list_for_each_entry(vp, &ha->vp_list, list) {
3888 twwn = wwn_to_u64(vp->port_name);
3894 spin_unlock_irqrestore(&ha->vport_slock, flags);
3899 void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3904 struct fab_scan_rp *rp, *trp;
3905 unsigned long flags;
3907 u16 dup = 0, dup_cnt = 0;
3909 ql_dbg(ql_dbg_disc, vha, 0xffff,
3910 "%s enter\n", __func__);
3912 if (sp->gen1 != vha->hw->base_qpair->chip_reset) {
3913 ql_dbg(ql_dbg_disc, vha, 0xffff,
3914 "%s scan stop due to chip reset %x/%x\n",
3915 sp->name, sp->gen1, vha->hw->base_qpair->chip_reset);
3921 vha->scan.scan_retry++;
3922 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
3923 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
3924 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
3926 ql_dbg(ql_dbg_disc, vha, 0xffff,
3927 "Fabric scan failed on all retries.\n");
3931 vha->scan.scan_retry = 0;
3933 list_for_each_entry(fcport, &vha->vp_fcports, list)
3934 fcport->scan_state = QLA_FCPORT_SCAN;
3936 for (i = 0; i < vha->hw->max_fibre_devices; i++) {
3940 rp = &vha->scan.l[i];
3943 wwn = wwn_to_u64(rp->port_name);
3947 /* Remove duplicate NPORT ID entries from switch data base */
3948 for (k = i + 1; k < vha->hw->max_fibre_devices; k++) {
3949 trp = &vha->scan.l[k];
3950 if (rp->id.b24 == trp->id.b24) {
3953 ql_dbg(ql_dbg_disc + ql_dbg_verbose,
3955 "Detected duplicate NPORT ID from switch data base: ID %06x WWN %8phN WWN %8phN\n",
3956 rp->id.b24, rp->port_name, trp->port_name);
3957 memset(trp, 0, sizeof(*trp));
3961 if (!memcmp(rp->port_name, vha->port_name, WWN_SIZE))
3964 /* Bypass reserved domain fields. */
3965 if ((rp->id.b.domain & 0xf0) == 0xf0)
3968 /* Bypass virtual ports of the same host. */
3969 if (qla2x00_is_a_vp(vha, wwn))
3972 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3973 if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE))
3975 fcport->scan_needed = 0;
3976 fcport->scan_state = QLA_FCPORT_FOUND;
3979 * If device was not a fabric device before.
3981 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
3982 qla2x00_clear_loop_id(fcport);
3983 fcport->flags |= FCF_FABRIC_DEVICE;
3984 } else if (fcport->d_id.b24 != rp->id.b24) {
3985 qlt_schedule_sess_for_deletion(fcport);
3987 fcport->d_id.b24 = rp->id.b24;
3992 ql_dbg(ql_dbg_disc, vha, 0xffff,
3993 "%s %d %8phC post new sess\n",
3994 __func__, __LINE__, rp->port_name);
3995 qla24xx_post_newsess_work(vha, &rp->id, rp->port_name,
3996 rp->node_name, NULL, rp->fc4type);
4001 ql_log(ql_log_warn, vha, 0xffff,
4002 "Detected %d duplicate NPORT ID(s) from switch data base\n",
4007 * Logout all previous fabric dev marked lost, except FCP2 devices.
4009 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4010 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) {
4011 fcport->scan_needed = 0;
4015 if (fcport->scan_state != QLA_FCPORT_FOUND) {
4016 fcport->scan_needed = 0;
4017 if ((qla_dual_mode_enabled(vha) ||
4018 qla_ini_mode_enabled(vha)) &&
4019 atomic_read(&fcport->state) == FCS_ONLINE) {
4020 if (fcport->loop_id != FC_NO_LOOP_ID) {
4021 if (fcport->flags & FCF_FCP2_DEVICE)
4022 fcport->logout_on_delete = 0;
4024 ql_dbg(ql_dbg_disc, vha, 0x20f0,
4025 "%s %d %8phC post del sess\n",
4029 qlt_schedule_sess_for_deletion(fcport);
4034 if (fcport->scan_needed ||
4035 fcport->disc_state != DSC_LOGIN_COMPLETE) {
4036 if (fcport->login_retry == 0) {
4037 fcport->login_retry =
4038 vha->hw->login_retry_count;
4039 ql_dbg(ql_dbg_disc, vha, 0x20a3,
4040 "Port login retry %8phN, lid 0x%04x retry cnt=%d.\n",
4041 fcport->port_name, fcport->loop_id,
4042 fcport->login_retry);
4044 fcport->scan_needed = 0;
4045 qla24xx_fcport_handle_login(vha, fcport);
4052 qla24xx_sp_unmap(vha, sp);
4053 spin_lock_irqsave(&vha->work_lock, flags);
4054 vha->scan.scan_flags &= ~SF_SCANNING;
4055 spin_unlock_irqrestore(&vha->work_lock, flags);
4058 list_for_each_entry(fcport, &vha->vp_fcports, list) {
4059 if (fcport->scan_needed) {
4060 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4061 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4068 static int qla2x00_post_gnnft_gpnft_done_work(struct scsi_qla_host *vha,
4071 struct qla_work_evt *e;
4073 if (cmd != QLA_EVT_GPNFT_DONE && cmd != QLA_EVT_GNNFT_DONE)
4074 return QLA_PARAMETER_ERROR;
4076 e = qla2x00_alloc_work(vha, cmd);
4078 return QLA_FUNCTION_FAILED;
4082 return qla2x00_post_work(vha, e);
4085 static int qla2x00_post_nvme_gpnft_done_work(struct scsi_qla_host *vha,
4088 struct qla_work_evt *e;
4090 if (cmd != QLA_EVT_GPNFT)
4091 return QLA_PARAMETER_ERROR;
4093 e = qla2x00_alloc_work(vha, cmd);
4095 return QLA_FUNCTION_FAILED;
4097 e->u.gpnft.fc4_type = FC4_TYPE_NVME;
4100 return qla2x00_post_work(vha, e);
4103 static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha,
4106 struct qla_hw_data *ha = vha->hw;
4107 int num_fibre_dev = ha->max_fibre_devices;
4108 struct ct_sns_req *ct_req =
4109 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
4110 struct ct_sns_gpnft_rsp *ct_rsp =
4111 (struct ct_sns_gpnft_rsp *)sp->u.iocb_cmd.u.ctarg.rsp;
4112 struct ct_sns_gpn_ft_data *d;
4113 struct fab_scan_rp *rp;
4114 u16 cmd = be16_to_cpu(ct_req->command);
4115 u8 fc4_type = sp->gen2;
4122 for (i = 0; i < num_fibre_dev; i++) {
4123 d = &ct_rsp->entries[i];
4126 id.b.domain = d->port_id[0];
4127 id.b.area = d->port_id[1];
4128 id.b.al_pa = d->port_id[2];
4129 wwn = wwn_to_u64(d->port_name);
4131 if (id.b24 == 0 || wwn == 0)
4134 if (fc4_type == FC4_TYPE_FCP_SCSI) {
4135 if (cmd == GPN_FT_CMD) {
4136 rp = &vha->scan.l[j];
4138 memcpy(rp->port_name, d->port_name, 8);
4140 rp->fc4type = FS_FC4TYPE_FCP;
4142 for (k = 0; k < num_fibre_dev; k++) {
4143 rp = &vha->scan.l[k];
4144 if (id.b24 == rp->id.b24) {
4145 memcpy(rp->node_name,
4152 /* Search if the fibre device supports FC4_TYPE_NVME */
4153 if (cmd == GPN_FT_CMD) {
4156 for (k = 0; k < num_fibre_dev; k++) {
4157 rp = &vha->scan.l[k];
4158 if (!memcmp(rp->port_name,
4161 * Supports FC-NVMe & FCP
4163 rp->fc4type |= FS_FC4TYPE_NVME;
4169 /* We found new FC-NVMe only port */
4171 for (k = 0; k < num_fibre_dev; k++) {
4172 rp = &vha->scan.l[k];
4173 if (wwn_to_u64(rp->port_name)) {
4177 memcpy(rp->port_name,
4186 for (k = 0; k < num_fibre_dev; k++) {
4187 rp = &vha->scan.l[k];
4188 if (id.b24 == rp->id.b24) {
4189 memcpy(rp->node_name,
4199 static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
4202 struct scsi_qla_host *vha = sp->vha;
4203 struct ct_sns_req *ct_req =
4204 (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req;
4205 u16 cmd = be16_to_cpu(ct_req->command);
4206 u8 fc4_type = sp->gen2;
4207 unsigned long flags;
4210 /* gen2 field is holding the fc4type */
4211 ql_dbg(ql_dbg_disc, vha, 0xffff,
4212 "Async done-%s res %x FC4Type %x\n",
4213 sp->name, res, sp->gen2);
4217 unsigned long flags;
4218 const char *name = sp->name;
4221 * We are in an Interrupt context, queue up this
4222 * sp for GNNFT_DONE work. This will allow all
4223 * the resource to get freed up.
4225 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
4226 QLA_EVT_GNNFT_DONE);
4228 /* Cleanup here to prevent memory leak */
4229 qla24xx_sp_unmap(vha, sp);
4233 spin_lock_irqsave(&vha->work_lock, flags);
4234 vha->scan.scan_flags &= ~SF_SCANNING;
4235 vha->scan.scan_retry++;
4236 spin_unlock_irqrestore(&vha->work_lock, flags);
4238 if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
4239 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4240 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4241 qla2xxx_wake_dpc(vha);
4243 ql_dbg(ql_dbg_disc, vha, 0xffff,
4244 "Async done-%s rescan failed on all retries.\n",
4251 qla2x00_find_free_fcp_nvme_slot(vha, sp);
4253 if ((fc4_type == FC4_TYPE_FCP_SCSI) && vha->flags.nvme_enabled &&
4254 cmd == GNN_FT_CMD) {
4255 del_timer(&sp->u.iocb_cmd.timer);
4256 spin_lock_irqsave(&vha->work_lock, flags);
4257 vha->scan.scan_flags &= ~SF_SCANNING;
4258 spin_unlock_irqrestore(&vha->work_lock, flags);
4261 rc = qla2x00_post_nvme_gpnft_done_work(vha, sp, QLA_EVT_GPNFT);
4263 qla24xx_sp_unmap(vha, sp);
4264 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4265 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4271 if (cmd == GPN_FT_CMD) {
4272 del_timer(&sp->u.iocb_cmd.timer);
4273 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
4274 QLA_EVT_GPNFT_DONE);
4276 rc = qla2x00_post_gnnft_gpnft_done_work(vha, sp,
4277 QLA_EVT_GNNFT_DONE);
4281 qla24xx_sp_unmap(vha, sp);
4282 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4283 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4289 * Get WWNN list for fc4_type
4291 * It is assumed the same SRB is re-used from GPNFT to avoid
4292 * mem free & re-alloc
4294 static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
4297 int rval = QLA_FUNCTION_FAILED;
4298 struct ct_sns_req *ct_req;
4299 struct ct_sns_pkt *ct_sns;
4300 unsigned long flags;
4302 if (!vha->flags.online) {
4303 spin_lock_irqsave(&vha->work_lock, flags);
4304 vha->scan.scan_flags &= ~SF_SCANNING;
4305 spin_unlock_irqrestore(&vha->work_lock, flags);
4309 if (!sp->u.iocb_cmd.u.ctarg.req || !sp->u.iocb_cmd.u.ctarg.rsp) {
4310 ql_log(ql_log_warn, vha, 0xffff,
4311 "%s: req %p rsp %p are not setup\n",
4312 __func__, sp->u.iocb_cmd.u.ctarg.req,
4313 sp->u.iocb_cmd.u.ctarg.rsp);
4314 spin_lock_irqsave(&vha->work_lock, flags);
4315 vha->scan.scan_flags &= ~SF_SCANNING;
4316 spin_unlock_irqrestore(&vha->work_lock, flags);
4318 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4319 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4323 ql_dbg(ql_dbg_disc, vha, 0xfffff,
4324 "%s: FC4Type %x, CT-PASSTRHU %s command ctarg rsp size %d, ctarg req size %d\n",
4325 __func__, fc4_type, sp->name, sp->u.iocb_cmd.u.ctarg.rsp_size,
4326 sp->u.iocb_cmd.u.ctarg.req_size);
4328 sp->type = SRB_CT_PTHRU_CMD;
4330 sp->gen1 = vha->hw->base_qpair->chip_reset;
4331 sp->gen2 = fc4_type;
4333 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4334 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4336 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4337 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4339 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4340 /* CT_IU preamble */
4341 ct_req = qla2x00_prep_ct_req(ct_sns, GNN_FT_CMD,
4342 sp->u.iocb_cmd.u.ctarg.rsp_size);
4345 ct_req->req.gpn_ft.port_type = fc4_type;
4347 sp->u.iocb_cmd.u.ctarg.req_size = GNN_FT_REQ_SIZE;
4348 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4350 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4352 rval = qla2x00_start_sp(sp);
4353 if (rval != QLA_SUCCESS) {
4354 spin_lock_irqsave(&vha->work_lock, flags);
4355 vha->scan.scan_flags &= ~SF_SCANNING;
4356 spin_unlock_irqrestore(&vha->work_lock, flags);
4360 ql_dbg(ql_dbg_disc, vha, 0xffff,
4361 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4362 sp->handle, ct_req->req.gpn_ft.port_type);
4366 if (sp->u.iocb_cmd.u.ctarg.req) {
4367 dma_free_coherent(&vha->hw->pdev->dev,
4368 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4369 sp->u.iocb_cmd.u.ctarg.req,
4370 sp->u.iocb_cmd.u.ctarg.req_dma);
4371 sp->u.iocb_cmd.u.ctarg.req = NULL;
4373 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4374 dma_free_coherent(&vha->hw->pdev->dev,
4375 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4376 sp->u.iocb_cmd.u.ctarg.rsp,
4377 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4378 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4386 void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp)
4388 ql_dbg(ql_dbg_disc, vha, 0xffff,
4389 "%s enter\n", __func__);
4390 qla24xx_async_gnnft(vha, sp, sp->gen2);
4393 /* Get WWPN list for certain fc4_type */
4394 int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
4396 int rval = QLA_FUNCTION_FAILED;
4397 struct ct_sns_req *ct_req;
4398 struct ct_sns_pkt *ct_sns;
4400 unsigned long flags;
4402 ql_dbg(ql_dbg_disc, vha, 0xffff,
4403 "%s enter\n", __func__);
4405 if (!vha->flags.online)
4408 spin_lock_irqsave(&vha->work_lock, flags);
4409 if (vha->scan.scan_flags & SF_SCANNING) {
4410 spin_unlock_irqrestore(&vha->work_lock, flags);
4411 ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n");
4414 vha->scan.scan_flags |= SF_SCANNING;
4415 spin_unlock_irqrestore(&vha->work_lock, flags);
4417 if (fc4_type == FC4_TYPE_FCP_SCSI) {
4418 ql_dbg(ql_dbg_disc, vha, 0xffff,
4419 "%s: Performing FCP Scan\n", __func__);
4422 sp->free(sp); /* should not happen */
4424 sp = qla2x00_get_sp(vha, NULL, GFP_KERNEL);
4426 spin_lock_irqsave(&vha->work_lock, flags);
4427 vha->scan.scan_flags &= ~SF_SCANNING;
4428 spin_unlock_irqrestore(&vha->work_lock, flags);
4432 sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
4433 &vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
4434 &sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
4435 sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
4436 if (!sp->u.iocb_cmd.u.ctarg.req) {
4437 ql_log(ql_log_warn, vha, 0xffff,
4438 "Failed to allocate ct_sns request.\n");
4439 spin_lock_irqsave(&vha->work_lock, flags);
4440 vha->scan.scan_flags &= ~SF_SCANNING;
4441 spin_unlock_irqrestore(&vha->work_lock, flags);
4444 sp->u.iocb_cmd.u.ctarg.req_size = GPN_FT_REQ_SIZE;
4446 rspsz = sizeof(struct ct_sns_gpnft_rsp) +
4447 ((vha->hw->max_fibre_devices - 1) *
4448 sizeof(struct ct_sns_gpn_ft_data));
4450 sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
4451 &vha->hw->pdev->dev, rspsz,
4452 &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
4453 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
4454 if (!sp->u.iocb_cmd.u.ctarg.rsp) {
4455 ql_log(ql_log_warn, vha, 0xffff,
4456 "Failed to allocate ct_sns request.\n");
4457 spin_lock_irqsave(&vha->work_lock, flags);
4458 vha->scan.scan_flags &= ~SF_SCANNING;
4459 spin_unlock_irqrestore(&vha->work_lock, flags);
4462 sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz;
4464 ql_dbg(ql_dbg_disc, vha, 0xffff,
4465 "%s scan list size %d\n", __func__, vha->scan.size);
4467 memset(vha->scan.l, 0, vha->scan.size);
4469 ql_dbg(ql_dbg_disc, vha, 0xffff,
4470 "NVME scan did not provide SP\n");
4474 sp->type = SRB_CT_PTHRU_CMD;
4476 sp->gen1 = vha->hw->base_qpair->chip_reset;
4477 sp->gen2 = fc4_type;
4479 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4480 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4482 rspsz = sp->u.iocb_cmd.u.ctarg.rsp_size;
4483 memset(sp->u.iocb_cmd.u.ctarg.rsp, 0, sp->u.iocb_cmd.u.ctarg.rsp_size);
4484 memset(sp->u.iocb_cmd.u.ctarg.req, 0, sp->u.iocb_cmd.u.ctarg.req_size);
4486 ct_sns = (struct ct_sns_pkt *)sp->u.iocb_cmd.u.ctarg.req;
4487 /* CT_IU preamble */
4488 ct_req = qla2x00_prep_ct_req(ct_sns, GPN_FT_CMD, rspsz);
4491 ct_req->req.gpn_ft.port_type = fc4_type;
4493 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4495 sp->done = qla2x00_async_gpnft_gnnft_sp_done;
4497 rval = qla2x00_start_sp(sp);
4498 if (rval != QLA_SUCCESS) {
4499 spin_lock_irqsave(&vha->work_lock, flags);
4500 vha->scan.scan_flags &= ~SF_SCANNING;
4501 spin_unlock_irqrestore(&vha->work_lock, flags);
4505 ql_dbg(ql_dbg_disc, vha, 0xffff,
4506 "Async-%s hdl=%x FC4Type %x.\n", sp->name,
4507 sp->handle, ct_req->req.gpn_ft.port_type);
4511 if (sp->u.iocb_cmd.u.ctarg.req) {
4512 dma_free_coherent(&vha->hw->pdev->dev,
4513 sp->u.iocb_cmd.u.ctarg.req_allocated_size,
4514 sp->u.iocb_cmd.u.ctarg.req,
4515 sp->u.iocb_cmd.u.ctarg.req_dma);
4516 sp->u.iocb_cmd.u.ctarg.req = NULL;
4518 if (sp->u.iocb_cmd.u.ctarg.rsp) {
4519 dma_free_coherent(&vha->hw->pdev->dev,
4520 sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
4521 sp->u.iocb_cmd.u.ctarg.rsp,
4522 sp->u.iocb_cmd.u.ctarg.rsp_dma);
4523 sp->u.iocb_cmd.u.ctarg.rsp = NULL;
4531 void qla_scan_work_fn(struct work_struct *work)
4533 struct fab_scan *s = container_of(to_delayed_work(work),
4534 struct fab_scan, scan_work);
4535 struct scsi_qla_host *vha = container_of(s, struct scsi_qla_host,
4537 unsigned long flags;
4539 ql_dbg(ql_dbg_disc, vha, 0xffff,
4540 "%s: schedule loop resync\n", __func__);
4541 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4542 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4543 qla2xxx_wake_dpc(vha);
4544 spin_lock_irqsave(&vha->work_lock, flags);
4545 vha->scan.scan_flags &= ~SF_QUEUED;
4546 spin_unlock_irqrestore(&vha->work_lock, flags);
4550 void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4552 qla24xx_post_gnl_work(vha, ea->fcport);
4555 static void qla2x00_async_gnnid_sp_done(void *s, int res)
4558 struct scsi_qla_host *vha = sp->vha;
4559 fc_port_t *fcport = sp->fcport;
4560 u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name;
4561 struct event_arg ea;
4564 fcport->flags &= ~FCF_ASYNC_SENT;
4565 wwnn = wwn_to_u64(node_name);
4567 memcpy(fcport->node_name, node_name, WWN_SIZE);
4569 memset(&ea, 0, sizeof(ea));
4573 ea.event = FCME_GNNID_DONE;
4575 ql_dbg(ql_dbg_disc, vha, 0x204f,
4576 "Async done-%s res %x, WWPN %8phC %8phC\n",
4577 sp->name, res, fcport->port_name, fcport->node_name);
4579 qla2x00_fcport_event_handler(vha, &ea);
4584 int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4586 int rval = QLA_FUNCTION_FAILED;
4587 struct ct_sns_req *ct_req;
4590 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4593 fcport->disc_state = DSC_GNN_ID;
4594 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4598 fcport->flags |= FCF_ASYNC_SENT;
4599 sp->type = SRB_CT_PTHRU_CMD;
4601 sp->gen1 = fcport->rscn_gen;
4602 sp->gen2 = fcport->login_gen;
4604 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4605 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4607 /* CT_IU preamble */
4608 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GNN_ID_CMD,
4612 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
4613 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
4614 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
4617 /* req & rsp use the same buffer */
4618 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4619 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4620 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4621 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4622 sp->u.iocb_cmd.u.ctarg.req_size = GNN_ID_REQ_SIZE;
4623 sp->u.iocb_cmd.u.ctarg.rsp_size = GNN_ID_RSP_SIZE;
4624 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4626 sp->done = qla2x00_async_gnnid_sp_done;
4628 rval = qla2x00_start_sp(sp);
4629 if (rval != QLA_SUCCESS)
4631 ql_dbg(ql_dbg_disc, vha, 0xffff,
4632 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4633 sp->name, fcport->port_name,
4634 sp->handle, fcport->loop_id, fcport->d_id.b24);
4639 fcport->flags &= ~FCF_ASYNC_SENT;
4644 int qla24xx_post_gnnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4646 struct qla_work_evt *e;
4649 ls = atomic_read(&vha->loop_state);
4650 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4651 test_bit(UNLOADING, &vha->dpc_flags))
4654 e = qla2x00_alloc_work(vha, QLA_EVT_GNNID);
4656 return QLA_FUNCTION_FAILED;
4658 e->u.fcport.fcport = fcport;
4659 return qla2x00_post_work(vha, e);
4663 void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea)
4665 fc_port_t *fcport = ea->fcport;
4667 ql_dbg(ql_dbg_disc, vha, 0xffff,
4668 "%s %8phC DS %d LS %d rc %d login %d|%d rscn %d|%d fcpcnt %d\n",
4669 __func__, fcport->port_name, fcport->disc_state,
4670 fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2,
4671 fcport->rscn_gen, ea->sp->gen1, vha->fcport_count);
4673 if (fcport->disc_state == DSC_DELETE_PEND)
4676 if (ea->sp->gen2 != fcport->login_gen) {
4677 /* target side must have changed it. */
4678 ql_dbg(ql_dbg_disc, vha, 0x20d3,
4679 "%s %8phC generation changed\n",
4680 __func__, fcport->port_name);
4682 } else if (ea->sp->gen1 != fcport->rscn_gen) {
4683 ql_dbg(ql_dbg_disc, vha, 0x20d4, "%s %d %8phC post gidpn\n",
4684 __func__, __LINE__, fcport->port_name);
4685 qla24xx_post_gidpn_work(vha, fcport);
4689 qla24xx_post_gpsc_work(vha, fcport);
4692 static void qla2x00_async_gfpnid_sp_done(void *s, int res)
4695 struct scsi_qla_host *vha = sp->vha;
4696 fc_port_t *fcport = sp->fcport;
4697 u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name;
4698 struct event_arg ea;
4701 wwn = wwn_to_u64(fpn);
4703 memcpy(fcport->fabric_port_name, fpn, WWN_SIZE);
4705 memset(&ea, 0, sizeof(ea));
4709 ea.event = FCME_GFPNID_DONE;
4711 ql_dbg(ql_dbg_disc, vha, 0x204f,
4712 "Async done-%s res %x, WWPN %8phC %8phC\n",
4713 sp->name, res, fcport->port_name, fcport->fabric_port_name);
4715 qla2x00_fcport_event_handler(vha, &ea);
4720 int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport)
4722 int rval = QLA_FUNCTION_FAILED;
4723 struct ct_sns_req *ct_req;
4726 if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4729 sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
4733 sp->type = SRB_CT_PTHRU_CMD;
4734 sp->name = "gfpnid";
4735 sp->gen1 = fcport->rscn_gen;
4736 sp->gen2 = fcport->login_gen;
4738 sp->u.iocb_cmd.timeout = qla2x00_async_iocb_timeout;
4739 qla2x00_init_timer(sp, qla2x00_get_async_timeout(vha) + 2);
4741 /* CT_IU preamble */
4742 ct_req = qla2x00_prep_ct_req(fcport->ct_desc.ct_sns, GFPN_ID_CMD,
4746 ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain;
4747 ct_req->req.port_id.port_id[1] = fcport->d_id.b.area;
4748 ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa;
4751 /* req & rsp use the same buffer */
4752 sp->u.iocb_cmd.u.ctarg.req = fcport->ct_desc.ct_sns;
4753 sp->u.iocb_cmd.u.ctarg.req_dma = fcport->ct_desc.ct_sns_dma;
4754 sp->u.iocb_cmd.u.ctarg.rsp = fcport->ct_desc.ct_sns;
4755 sp->u.iocb_cmd.u.ctarg.rsp_dma = fcport->ct_desc.ct_sns_dma;
4756 sp->u.iocb_cmd.u.ctarg.req_size = GFPN_ID_REQ_SIZE;
4757 sp->u.iocb_cmd.u.ctarg.rsp_size = GFPN_ID_RSP_SIZE;
4758 sp->u.iocb_cmd.u.ctarg.nport_handle = NPH_SNS;
4760 sp->done = qla2x00_async_gfpnid_sp_done;
4762 rval = qla2x00_start_sp(sp);
4763 if (rval != QLA_SUCCESS)
4766 ql_dbg(ql_dbg_disc, vha, 0xffff,
4767 "Async-%s - %8phC hdl=%x loopid=%x portid %06x.\n",
4768 sp->name, fcport->port_name,
4769 sp->handle, fcport->loop_id, fcport->d_id.b24);
4774 fcport->flags &= ~FCF_ASYNC_SENT;
4779 int qla24xx_post_gfpnid_work(struct scsi_qla_host *vha, fc_port_t *fcport)
4781 struct qla_work_evt *e;
4784 ls = atomic_read(&vha->loop_state);
4785 if (((ls != LOOP_READY) && (ls != LOOP_UP)) ||
4786 test_bit(UNLOADING, &vha->dpc_flags))
4789 e = qla2x00_alloc_work(vha, QLA_EVT_GFPNID);
4791 return QLA_FUNCTION_FAILED;
4793 e->u.fcport.fcport = fcport;
4794 return qla2x00_post_work(vha, e);