GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / scsi / qla2xxx / qla_bsg.c
1         /*
2  * QLogic Fibre Channel HBA Driver
3  * Copyright (c)  2003-2014 QLogic Corporation
4  *
5  * See LICENSE.qla2xxx for copyright and licensing details.
6  */
7 #include "qla_def.h"
8
9 #include <linux/kthread.h>
10 #include <linux/vmalloc.h>
11 #include <linux/delay.h>
12 #include <linux/bsg-lib.h>
13
14 /* BSG support for ELS/CT pass through */
15 void
16 qla2x00_bsg_job_done(void *ptr, int res)
17 {
18         srb_t *sp = ptr;
19         struct bsg_job *bsg_job = sp->u.bsg_job;
20         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
21
22         sp->free(sp);
23
24         bsg_reply->result = res;
25         bsg_job_done(bsg_job, bsg_reply->result,
26                        bsg_reply->reply_payload_rcv_len);
27 }
28
29 void
30 qla2x00_bsg_sp_free(void *ptr)
31 {
32         srb_t *sp = ptr;
33         struct qla_hw_data *ha = sp->vha->hw;
34         struct bsg_job *bsg_job = sp->u.bsg_job;
35         struct fc_bsg_request *bsg_request = bsg_job->request;
36         struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
37
38         if (sp->type == SRB_FXIOCB_BCMD) {
39                 piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
40                     &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
41
42                 if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
43                         dma_unmap_sg(&ha->pdev->dev,
44                             bsg_job->request_payload.sg_list,
45                             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
46
47                 if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
48                         dma_unmap_sg(&ha->pdev->dev,
49                             bsg_job->reply_payload.sg_list,
50                             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
51         } else {
52                 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
53                     bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
54
55                 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
56                     bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
57         }
58
59         if (sp->type == SRB_CT_CMD ||
60             sp->type == SRB_FXIOCB_BCMD ||
61             sp->type == SRB_ELS_CMD_HST)
62                 kfree(sp->fcport);
63         qla2x00_rel_sp(sp);
64 }
65
66 int
67 qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
68         struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
69 {
70         int i, ret, num_valid;
71         uint8_t *bcode;
72         struct qla_fcp_prio_entry *pri_entry;
73         uint32_t *bcode_val_ptr, bcode_val;
74
75         ret = 1;
76         num_valid = 0;
77         bcode = (uint8_t *)pri_cfg;
78         bcode_val_ptr = (uint32_t *)pri_cfg;
79         bcode_val = (uint32_t)(*bcode_val_ptr);
80
81         if (bcode_val == 0xFFFFFFFF) {
82                 /* No FCP Priority config data in flash */
83                 ql_dbg(ql_dbg_user, vha, 0x7051,
84                     "No FCP Priority config data.\n");
85                 return 0;
86         }
87
88         if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
89                         bcode[3] != 'S') {
90                 /* Invalid FCP priority data header*/
91                 ql_dbg(ql_dbg_user, vha, 0x7052,
92                     "Invalid FCP Priority data header. bcode=0x%x.\n",
93                     bcode_val);
94                 return 0;
95         }
96         if (flag != 1)
97                 return ret;
98
99         pri_entry = &pri_cfg->entry[0];
100         for (i = 0; i < pri_cfg->num_entries; i++) {
101                 if (pri_entry->flags & FCP_PRIO_ENTRY_TAG_VALID)
102                         num_valid++;
103                 pri_entry++;
104         }
105
106         if (num_valid == 0) {
107                 /* No valid FCP priority data entries */
108                 ql_dbg(ql_dbg_user, vha, 0x7053,
109                     "No valid FCP Priority data entries.\n");
110                 ret = 0;
111         } else {
112                 /* FCP priority data is valid */
113                 ql_dbg(ql_dbg_user, vha, 0x7054,
114                     "Valid FCP priority data. num entries = %d.\n",
115                     num_valid);
116         }
117
118         return ret;
119 }
120
121 static int
122 qla24xx_proc_fcp_prio_cfg_cmd(struct bsg_job *bsg_job)
123 {
124         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
125         struct fc_bsg_request *bsg_request = bsg_job->request;
126         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
127         scsi_qla_host_t *vha = shost_priv(host);
128         struct qla_hw_data *ha = vha->hw;
129         int ret = 0;
130         uint32_t len;
131         uint32_t oper;
132
133         if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
134                 ret = -EINVAL;
135                 goto exit_fcp_prio_cfg;
136         }
137
138         /* Get the sub command */
139         oper = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
140
141         /* Only set config is allowed if config memory is not allocated */
142         if (!ha->fcp_prio_cfg && (oper != QLFC_FCP_PRIO_SET_CONFIG)) {
143                 ret = -EINVAL;
144                 goto exit_fcp_prio_cfg;
145         }
146         switch (oper) {
147         case QLFC_FCP_PRIO_DISABLE:
148                 if (ha->flags.fcp_prio_enabled) {
149                         ha->flags.fcp_prio_enabled = 0;
150                         ha->fcp_prio_cfg->attributes &=
151                                 ~FCP_PRIO_ATTR_ENABLE;
152                         qla24xx_update_all_fcp_prio(vha);
153                         bsg_reply->result = DID_OK;
154                 } else {
155                         ret = -EINVAL;
156                         bsg_reply->result = (DID_ERROR << 16);
157                         goto exit_fcp_prio_cfg;
158                 }
159                 break;
160
161         case QLFC_FCP_PRIO_ENABLE:
162                 if (!ha->flags.fcp_prio_enabled) {
163                         if (ha->fcp_prio_cfg) {
164                                 ha->flags.fcp_prio_enabled = 1;
165                                 ha->fcp_prio_cfg->attributes |=
166                                     FCP_PRIO_ATTR_ENABLE;
167                                 qla24xx_update_all_fcp_prio(vha);
168                                 bsg_reply->result = DID_OK;
169                         } else {
170                                 ret = -EINVAL;
171                                 bsg_reply->result = (DID_ERROR << 16);
172                                 goto exit_fcp_prio_cfg;
173                         }
174                 }
175                 break;
176
177         case QLFC_FCP_PRIO_GET_CONFIG:
178                 len = bsg_job->reply_payload.payload_len;
179                 if (!len || len > FCP_PRIO_CFG_SIZE) {
180                         ret = -EINVAL;
181                         bsg_reply->result = (DID_ERROR << 16);
182                         goto exit_fcp_prio_cfg;
183                 }
184
185                 bsg_reply->result = DID_OK;
186                 bsg_reply->reply_payload_rcv_len =
187                         sg_copy_from_buffer(
188                         bsg_job->reply_payload.sg_list,
189                         bsg_job->reply_payload.sg_cnt, ha->fcp_prio_cfg,
190                         len);
191
192                 break;
193
194         case QLFC_FCP_PRIO_SET_CONFIG:
195                 len = bsg_job->request_payload.payload_len;
196                 if (!len || len > FCP_PRIO_CFG_SIZE) {
197                         bsg_reply->result = (DID_ERROR << 16);
198                         ret = -EINVAL;
199                         goto exit_fcp_prio_cfg;
200                 }
201
202                 if (!ha->fcp_prio_cfg) {
203                         ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
204                         if (!ha->fcp_prio_cfg) {
205                                 ql_log(ql_log_warn, vha, 0x7050,
206                                     "Unable to allocate memory for fcp prio "
207                                     "config data (%x).\n", FCP_PRIO_CFG_SIZE);
208                                 bsg_reply->result = (DID_ERROR << 16);
209                                 ret = -ENOMEM;
210                                 goto exit_fcp_prio_cfg;
211                         }
212                 }
213
214                 memset(ha->fcp_prio_cfg, 0, FCP_PRIO_CFG_SIZE);
215                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
216                 bsg_job->request_payload.sg_cnt, ha->fcp_prio_cfg,
217                         FCP_PRIO_CFG_SIZE);
218
219                 /* validate fcp priority data */
220
221                 if (!qla24xx_fcp_prio_cfg_valid(vha,
222                     (struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
223                         bsg_reply->result = (DID_ERROR << 16);
224                         ret = -EINVAL;
225                         /* If buffer was invalidatic int
226                          * fcp_prio_cfg is of no use
227                          */
228                         vfree(ha->fcp_prio_cfg);
229                         ha->fcp_prio_cfg = NULL;
230                         goto exit_fcp_prio_cfg;
231                 }
232
233                 ha->flags.fcp_prio_enabled = 0;
234                 if (ha->fcp_prio_cfg->attributes & FCP_PRIO_ATTR_ENABLE)
235                         ha->flags.fcp_prio_enabled = 1;
236                 qla24xx_update_all_fcp_prio(vha);
237                 bsg_reply->result = DID_OK;
238                 break;
239         default:
240                 ret = -EINVAL;
241                 break;
242         }
243 exit_fcp_prio_cfg:
244         if (!ret)
245                 bsg_job_done(bsg_job, bsg_reply->result,
246                                bsg_reply->reply_payload_rcv_len);
247         return ret;
248 }
249
250 static int
251 qla2x00_process_els(struct bsg_job *bsg_job)
252 {
253         struct fc_bsg_request *bsg_request = bsg_job->request;
254         struct fc_rport *rport;
255         fc_port_t *fcport = NULL;
256         struct Scsi_Host *host;
257         scsi_qla_host_t *vha;
258         struct qla_hw_data *ha;
259         srb_t *sp;
260         const char *type;
261         int req_sg_cnt, rsp_sg_cnt;
262         int rval =  (DID_ERROR << 16);
263         uint16_t nextlid = 0;
264
265         if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
266                 rport = fc_bsg_to_rport(bsg_job);
267                 fcport = *(fc_port_t **) rport->dd_data;
268                 host = rport_to_shost(rport);
269                 vha = shost_priv(host);
270                 ha = vha->hw;
271                 type = "FC_BSG_RPT_ELS";
272         } else {
273                 host = fc_bsg_to_shost(bsg_job);
274                 vha = shost_priv(host);
275                 ha = vha->hw;
276                 type = "FC_BSG_HST_ELS_NOLOGIN";
277         }
278
279         if (!vha->flags.online) {
280                 ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
281                 rval = -EIO;
282                 goto done;
283         }
284
285         /* pass through is supported only for ISP 4Gb or higher */
286         if (!IS_FWI2_CAPABLE(ha)) {
287                 ql_dbg(ql_dbg_user, vha, 0x7001,
288                     "ELS passthru not supported for ISP23xx based adapters.\n");
289                 rval = -EPERM;
290                 goto done;
291         }
292
293         /*  Multiple SG's are not supported for ELS requests */
294         if (bsg_job->request_payload.sg_cnt > 1 ||
295                 bsg_job->reply_payload.sg_cnt > 1) {
296                 ql_dbg(ql_dbg_user, vha, 0x7002,
297                     "Multiple SG's are not supported for ELS requests, "
298                     "request_sg_cnt=%x reply_sg_cnt=%x.\n",
299                     bsg_job->request_payload.sg_cnt,
300                     bsg_job->reply_payload.sg_cnt);
301                 rval = -EPERM;
302                 goto done;
303         }
304
305         /* ELS request for rport */
306         if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
307                 /* make sure the rport is logged in,
308                  * if not perform fabric login
309                  */
310                 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
311                         ql_dbg(ql_dbg_user, vha, 0x7003,
312                             "Failed to login port %06X for ELS passthru.\n",
313                             fcport->d_id.b24);
314                         rval = -EIO;
315                         goto done;
316                 }
317         } else {
318                 /* Allocate a dummy fcport structure, since functions
319                  * preparing the IOCB and mailbox command retrieves port
320                  * specific information from fcport structure. For Host based
321                  * ELS commands there will be no fcport structure allocated
322                  */
323                 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
324                 if (!fcport) {
325                         rval = -ENOMEM;
326                         goto done;
327                 }
328
329                 /* Initialize all required  fields of fcport */
330                 fcport->vha = vha;
331                 fcport->d_id.b.al_pa =
332                         bsg_request->rqst_data.h_els.port_id[0];
333                 fcport->d_id.b.area =
334                         bsg_request->rqst_data.h_els.port_id[1];
335                 fcport->d_id.b.domain =
336                         bsg_request->rqst_data.h_els.port_id[2];
337                 fcport->loop_id =
338                         (fcport->d_id.b.al_pa == 0xFD) ?
339                         NPH_FABRIC_CONTROLLER : NPH_F_PORT;
340         }
341
342         req_sg_cnt =
343                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
344                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
345         if (!req_sg_cnt) {
346                 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
347                     bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
348                 rval = -ENOMEM;
349                 goto done_free_fcport;
350         }
351
352         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
353                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
354         if (!rsp_sg_cnt) {
355                 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
356                     bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
357                 rval = -ENOMEM;
358                 goto done_free_fcport;
359         }
360
361         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
362                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
363                 ql_log(ql_log_warn, vha, 0x7008,
364                     "dma mapping resulted in different sg counts, "
365                     "request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
366                     "dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
367                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
368                 rval = -EAGAIN;
369                 goto done_unmap_sg;
370         }
371
372         /* Alloc SRB structure */
373         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
374         if (!sp) {
375                 rval = -ENOMEM;
376                 goto done_unmap_sg;
377         }
378
379         sp->type =
380                 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
381                  SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
382         sp->name =
383                 (bsg_request->msgcode == FC_BSG_RPT_ELS ?
384                  "bsg_els_rpt" : "bsg_els_hst");
385         sp->u.bsg_job = bsg_job;
386         sp->free = qla2x00_bsg_sp_free;
387         sp->done = qla2x00_bsg_job_done;
388
389         ql_dbg(ql_dbg_user, vha, 0x700a,
390             "bsg rqst type: %s els type: %x - loop-id=%x "
391             "portid=%-2x%02x%02x.\n", type,
392             bsg_request->rqst_data.h_els.command_code, fcport->loop_id,
393             fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
394
395         rval = qla2x00_start_sp(sp);
396         if (rval != QLA_SUCCESS) {
397                 ql_log(ql_log_warn, vha, 0x700e,
398                     "qla2x00_start_sp failed = %d\n", rval);
399                 qla2x00_rel_sp(sp);
400                 rval = -EIO;
401                 goto done_unmap_sg;
402         }
403         return rval;
404
405 done_unmap_sg:
406         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
407                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
408         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
409                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
410         goto done_free_fcport;
411
412 done_free_fcport:
413         if (bsg_request->msgcode == FC_BSG_RPT_ELS)
414                 kfree(fcport);
415 done:
416         return rval;
417 }
418
419 static inline uint16_t
420 qla24xx_calc_ct_iocbs(uint16_t dsds)
421 {
422         uint16_t iocbs;
423
424         iocbs = 1;
425         if (dsds > 2) {
426                 iocbs += (dsds - 2) / 5;
427                 if ((dsds - 2) % 5)
428                         iocbs++;
429         }
430         return iocbs;
431 }
432
433 static int
434 qla2x00_process_ct(struct bsg_job *bsg_job)
435 {
436         srb_t *sp;
437         struct fc_bsg_request *bsg_request = bsg_job->request;
438         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
439         scsi_qla_host_t *vha = shost_priv(host);
440         struct qla_hw_data *ha = vha->hw;
441         int rval = (DID_ERROR << 16);
442         int req_sg_cnt, rsp_sg_cnt;
443         uint16_t loop_id;
444         struct fc_port *fcport;
445         char  *type = "FC_BSG_HST_CT";
446
447         req_sg_cnt =
448                 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
449                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
450         if (!req_sg_cnt) {
451                 ql_log(ql_log_warn, vha, 0x700f,
452                     "dma_map_sg return %d for request\n", req_sg_cnt);
453                 rval = -ENOMEM;
454                 goto done;
455         }
456
457         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
458                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
459         if (!rsp_sg_cnt) {
460                 ql_log(ql_log_warn, vha, 0x7010,
461                     "dma_map_sg return %d for reply\n", rsp_sg_cnt);
462                 rval = -ENOMEM;
463                 goto done;
464         }
465
466         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
467             (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
468                 ql_log(ql_log_warn, vha, 0x7011,
469                     "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
470                     "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
471                     req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
472                 rval = -EAGAIN;
473                 goto done_unmap_sg;
474         }
475
476         if (!vha->flags.online) {
477                 ql_log(ql_log_warn, vha, 0x7012,
478                     "Host is not online.\n");
479                 rval = -EIO;
480                 goto done_unmap_sg;
481         }
482
483         loop_id =
484                 (bsg_request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
485                         >> 24;
486         switch (loop_id) {
487         case 0xFC:
488                 loop_id = cpu_to_le16(NPH_SNS);
489                 break;
490         case 0xFA:
491                 loop_id = vha->mgmt_svr_loop_id;
492                 break;
493         default:
494                 ql_dbg(ql_dbg_user, vha, 0x7013,
495                     "Unknown loop id: %x.\n", loop_id);
496                 rval = -EINVAL;
497                 goto done_unmap_sg;
498         }
499
500         /* Allocate a dummy fcport structure, since functions preparing the
501          * IOCB and mailbox command retrieves port specific information
502          * from fcport structure. For Host based ELS commands there will be
503          * no fcport structure allocated
504          */
505         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
506         if (!fcport) {
507                 ql_log(ql_log_warn, vha, 0x7014,
508                     "Failed to allocate fcport.\n");
509                 rval = -ENOMEM;
510                 goto done_unmap_sg;
511         }
512
513         /* Initialize all required  fields of fcport */
514         fcport->vha = vha;
515         fcport->d_id.b.al_pa = bsg_request->rqst_data.h_ct.port_id[0];
516         fcport->d_id.b.area = bsg_request->rqst_data.h_ct.port_id[1];
517         fcport->d_id.b.domain = bsg_request->rqst_data.h_ct.port_id[2];
518         fcport->loop_id = loop_id;
519
520         /* Alloc SRB structure */
521         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
522         if (!sp) {
523                 ql_log(ql_log_warn, vha, 0x7015,
524                     "qla2x00_get_sp failed.\n");
525                 rval = -ENOMEM;
526                 goto done_free_fcport;
527         }
528
529         sp->type = SRB_CT_CMD;
530         sp->name = "bsg_ct";
531         sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
532         sp->u.bsg_job = bsg_job;
533         sp->free = qla2x00_bsg_sp_free;
534         sp->done = qla2x00_bsg_job_done;
535
536         ql_dbg(ql_dbg_user, vha, 0x7016,
537             "bsg rqst type: %s else type: %x - "
538             "loop-id=%x portid=%02x%02x%02x.\n", type,
539             (bsg_request->rqst_data.h_ct.preamble_word2 >> 16),
540             fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
541             fcport->d_id.b.al_pa);
542
543         rval = qla2x00_start_sp(sp);
544         if (rval != QLA_SUCCESS) {
545                 ql_log(ql_log_warn, vha, 0x7017,
546                     "qla2x00_start_sp failed=%d.\n", rval);
547                 qla2x00_rel_sp(sp);
548                 rval = -EIO;
549                 goto done_free_fcport;
550         }
551         return rval;
552
553 done_free_fcport:
554         kfree(fcport);
555 done_unmap_sg:
556         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
557                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
558         dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
559                 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
560 done:
561         return rval;
562 }
563
564 /* Disable loopback mode */
565 static inline int
566 qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
567                             int wait, int wait2)
568 {
569         int ret = 0;
570         int rval = 0;
571         uint16_t new_config[4];
572         struct qla_hw_data *ha = vha->hw;
573
574         if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
575                 goto done_reset_internal;
576
577         memset(new_config, 0 , sizeof(new_config));
578         if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
579             ENABLE_INTERNAL_LOOPBACK ||
580             (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
581             ENABLE_EXTERNAL_LOOPBACK) {
582                 new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
583                 ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
584                     (new_config[0] & INTERNAL_LOOPBACK_MASK));
585                 memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
586
587                 ha->notify_dcbx_comp = wait;
588                 ha->notify_lb_portup_comp = wait2;
589
590                 ret = qla81xx_set_port_config(vha, new_config);
591                 if (ret != QLA_SUCCESS) {
592                         ql_log(ql_log_warn, vha, 0x7025,
593                             "Set port config failed.\n");
594                         ha->notify_dcbx_comp = 0;
595                         ha->notify_lb_portup_comp = 0;
596                         rval = -EINVAL;
597                         goto done_reset_internal;
598                 }
599
600                 /* Wait for DCBX complete event */
601                 if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
602                         (DCBX_COMP_TIMEOUT * HZ))) {
603                         ql_dbg(ql_dbg_user, vha, 0x7026,
604                             "DCBX completion not received.\n");
605                         ha->notify_dcbx_comp = 0;
606                         ha->notify_lb_portup_comp = 0;
607                         rval = -EINVAL;
608                         goto done_reset_internal;
609                 } else
610                         ql_dbg(ql_dbg_user, vha, 0x7027,
611                             "DCBX completion received.\n");
612
613                 if (wait2 &&
614                     !wait_for_completion_timeout(&ha->lb_portup_comp,
615                     (LB_PORTUP_COMP_TIMEOUT * HZ))) {
616                         ql_dbg(ql_dbg_user, vha, 0x70c5,
617                             "Port up completion not received.\n");
618                         ha->notify_lb_portup_comp = 0;
619                         rval = -EINVAL;
620                         goto done_reset_internal;
621                 } else
622                         ql_dbg(ql_dbg_user, vha, 0x70c6,
623                             "Port up completion received.\n");
624
625                 ha->notify_dcbx_comp = 0;
626                 ha->notify_lb_portup_comp = 0;
627         }
628 done_reset_internal:
629         return rval;
630 }
631
632 /*
633  * Set the port configuration to enable the internal or external loopback
634  * depending on the loopback mode.
635  */
636 static inline int
637 qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
638         uint16_t *new_config, uint16_t mode)
639 {
640         int ret = 0;
641         int rval = 0;
642         unsigned long rem_tmo = 0, current_tmo = 0;
643         struct qla_hw_data *ha = vha->hw;
644
645         if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
646                 goto done_set_internal;
647
648         if (mode == INTERNAL_LOOPBACK)
649                 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
650         else if (mode == EXTERNAL_LOOPBACK)
651                 new_config[0] = config[0] | (ENABLE_EXTERNAL_LOOPBACK << 1);
652         ql_dbg(ql_dbg_user, vha, 0x70be,
653              "new_config[0]=%02x\n", (new_config[0] & INTERNAL_LOOPBACK_MASK));
654
655         memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3);
656
657         ha->notify_dcbx_comp = 1;
658         ret = qla81xx_set_port_config(vha, new_config);
659         if (ret != QLA_SUCCESS) {
660                 ql_log(ql_log_warn, vha, 0x7021,
661                     "set port config failed.\n");
662                 ha->notify_dcbx_comp = 0;
663                 rval = -EINVAL;
664                 goto done_set_internal;
665         }
666
667         /* Wait for DCBX complete event */
668         current_tmo = DCBX_COMP_TIMEOUT * HZ;
669         while (1) {
670                 rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
671                     current_tmo);
672                 if (!ha->idc_extend_tmo || rem_tmo) {
673                         ha->idc_extend_tmo = 0;
674                         break;
675                 }
676                 current_tmo = ha->idc_extend_tmo * HZ;
677                 ha->idc_extend_tmo = 0;
678         }
679
680         if (!rem_tmo) {
681                 ql_dbg(ql_dbg_user, vha, 0x7022,
682                     "DCBX completion not received.\n");
683                 ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
684                 /*
685                  * If the reset of the loopback mode doesn't work take a FCoE
686                  * dump and reset the chip.
687                  */
688                 if (ret) {
689                         ha->isp_ops->fw_dump(vha, 0);
690                         set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
691                 }
692                 rval = -EINVAL;
693         } else {
694                 if (ha->flags.idc_compl_status) {
695                         ql_dbg(ql_dbg_user, vha, 0x70c3,
696                             "Bad status in IDC Completion AEN\n");
697                         rval = -EINVAL;
698                         ha->flags.idc_compl_status = 0;
699                 } else
700                         ql_dbg(ql_dbg_user, vha, 0x7023,
701                             "DCBX completion received.\n");
702         }
703
704         ha->notify_dcbx_comp = 0;
705         ha->idc_extend_tmo = 0;
706
707 done_set_internal:
708         return rval;
709 }
710
711 static int
712 qla2x00_process_loopback(struct bsg_job *bsg_job)
713 {
714         struct fc_bsg_request *bsg_request = bsg_job->request;
715         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
716         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
717         scsi_qla_host_t *vha = shost_priv(host);
718         struct qla_hw_data *ha = vha->hw;
719         int rval;
720         uint8_t command_sent;
721         char *type;
722         struct msg_echo_lb elreq;
723         uint16_t response[MAILBOX_REGISTER_COUNT];
724         uint16_t config[4], new_config[4];
725         uint8_t *fw_sts_ptr;
726         uint8_t *req_data = NULL;
727         dma_addr_t req_data_dma;
728         uint32_t req_data_len;
729         uint8_t *rsp_data = NULL;
730         dma_addr_t rsp_data_dma;
731         uint32_t rsp_data_len;
732
733         if (!vha->flags.online) {
734                 ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
735                 return -EIO;
736         }
737
738         memset(&elreq, 0, sizeof(elreq));
739
740         elreq.req_sg_cnt = dma_map_sg(&ha->pdev->dev,
741                 bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
742                 DMA_TO_DEVICE);
743
744         if (!elreq.req_sg_cnt) {
745                 ql_log(ql_log_warn, vha, 0x701a,
746                     "dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
747                 return -ENOMEM;
748         }
749
750         elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
751                 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
752                 DMA_FROM_DEVICE);
753
754         if (!elreq.rsp_sg_cnt) {
755                 ql_log(ql_log_warn, vha, 0x701b,
756                     "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
757                 rval = -ENOMEM;
758                 goto done_unmap_req_sg;
759         }
760
761         if ((elreq.req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
762                 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
763                 ql_log(ql_log_warn, vha, 0x701c,
764                     "dma mapping resulted in different sg counts, "
765                     "request_sg_cnt: %x dma_request_sg_cnt: %x "
766                     "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
767                     bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
768                     bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
769                 rval = -EAGAIN;
770                 goto done_unmap_sg;
771         }
772         req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
773         req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
774                 &req_data_dma, GFP_KERNEL);
775         if (!req_data) {
776                 ql_log(ql_log_warn, vha, 0x701d,
777                     "dma alloc failed for req_data.\n");
778                 rval = -ENOMEM;
779                 goto done_unmap_sg;
780         }
781
782         rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
783                 &rsp_data_dma, GFP_KERNEL);
784         if (!rsp_data) {
785                 ql_log(ql_log_warn, vha, 0x7004,
786                     "dma alloc failed for rsp_data.\n");
787                 rval = -ENOMEM;
788                 goto done_free_dma_req;
789         }
790
791         /* Copy the request buffer in req_data now */
792         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
793                 bsg_job->request_payload.sg_cnt, req_data, req_data_len);
794
795         elreq.send_dma = req_data_dma;
796         elreq.rcv_dma = rsp_data_dma;
797         elreq.transfer_size = req_data_len;
798
799         elreq.options = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
800         elreq.iteration_count =
801             bsg_request->rqst_data.h_vendor.vendor_cmd[2];
802
803         if (atomic_read(&vha->loop_state) == LOOP_READY &&
804             (ha->current_topology == ISP_CFG_F ||
805             (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE &&
806              req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
807             elreq.options == EXTERNAL_LOOPBACK) {
808                 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
809                 ql_dbg(ql_dbg_user, vha, 0x701e,
810                     "BSG request type: %s.\n", type);
811                 command_sent = INT_DEF_LB_ECHO_CMD;
812                 rval = qla2x00_echo_test(vha, &elreq, response);
813         } else {
814                 if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
815                         memset(config, 0, sizeof(config));
816                         memset(new_config, 0, sizeof(new_config));
817
818                         if (qla81xx_get_port_config(vha, config)) {
819                                 ql_log(ql_log_warn, vha, 0x701f,
820                                     "Get port config failed.\n");
821                                 rval = -EPERM;
822                                 goto done_free_dma_rsp;
823                         }
824
825                         if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
826                                 ql_dbg(ql_dbg_user, vha, 0x70c4,
827                                     "Loopback operation already in "
828                                     "progress.\n");
829                                 rval = -EAGAIN;
830                                 goto done_free_dma_rsp;
831                         }
832
833                         ql_dbg(ql_dbg_user, vha, 0x70c0,
834                             "elreq.options=%04x\n", elreq.options);
835
836                         if (elreq.options == EXTERNAL_LOOPBACK)
837                                 if (IS_QLA8031(ha) || IS_QLA8044(ha))
838                                         rval = qla81xx_set_loopback_mode(vha,
839                                             config, new_config, elreq.options);
840                                 else
841                                         rval = qla81xx_reset_loopback_mode(vha,
842                                             config, 1, 0);
843                         else
844                                 rval = qla81xx_set_loopback_mode(vha, config,
845                                     new_config, elreq.options);
846
847                         if (rval) {
848                                 rval = -EPERM;
849                                 goto done_free_dma_rsp;
850                         }
851
852                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
853                         ql_dbg(ql_dbg_user, vha, 0x7028,
854                             "BSG request type: %s.\n", type);
855
856                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
857                         rval = qla2x00_loopback_test(vha, &elreq, response);
858
859                         if (response[0] == MBS_COMMAND_ERROR &&
860                                         response[1] == MBS_LB_RESET) {
861                                 ql_log(ql_log_warn, vha, 0x7029,
862                                     "MBX command error, Aborting ISP.\n");
863                                 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
864                                 qla2xxx_wake_dpc(vha);
865                                 qla2x00_wait_for_chip_reset(vha);
866                                 /* Also reset the MPI */
867                                 if (IS_QLA81XX(ha)) {
868                                         if (qla81xx_restart_mpi_firmware(vha) !=
869                                             QLA_SUCCESS) {
870                                                 ql_log(ql_log_warn, vha, 0x702a,
871                                                     "MPI reset failed.\n");
872                                         }
873                                 }
874
875                                 rval = -EIO;
876                                 goto done_free_dma_rsp;
877                         }
878
879                         if (new_config[0]) {
880                                 int ret;
881
882                                 /* Revert back to original port config
883                                  * Also clear internal loopback
884                                  */
885                                 ret = qla81xx_reset_loopback_mode(vha,
886                                     new_config, 0, 1);
887                                 if (ret) {
888                                         /*
889                                          * If the reset of the loopback mode
890                                          * doesn't work take FCoE dump and then
891                                          * reset the chip.
892                                          */
893                                         ha->isp_ops->fw_dump(vha, 0);
894                                         set_bit(ISP_ABORT_NEEDED,
895                                             &vha->dpc_flags);
896                                 }
897
898                         }
899
900                 } else {
901                         type = "FC_BSG_HST_VENDOR_LOOPBACK";
902                         ql_dbg(ql_dbg_user, vha, 0x702b,
903                             "BSG request type: %s.\n", type);
904                         command_sent = INT_DEF_LB_LOOPBACK_CMD;
905                         rval = qla2x00_loopback_test(vha, &elreq, response);
906                 }
907         }
908
909         if (rval) {
910                 ql_log(ql_log_warn, vha, 0x702c,
911                     "Vendor request %s failed.\n", type);
912
913                 rval = 0;
914                 bsg_reply->result = (DID_ERROR << 16);
915                 bsg_reply->reply_payload_rcv_len = 0;
916         } else {
917                 ql_dbg(ql_dbg_user, vha, 0x702d,
918                     "Vendor request %s completed.\n", type);
919                 bsg_reply->result = (DID_OK << 16);
920                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
921                         bsg_job->reply_payload.sg_cnt, rsp_data,
922                         rsp_data_len);
923         }
924
925         bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
926             sizeof(response) + sizeof(uint8_t);
927         fw_sts_ptr = bsg_job->reply + sizeof(struct fc_bsg_reply);
928         memcpy(bsg_job->reply + sizeof(struct fc_bsg_reply), response,
929                         sizeof(response));
930         fw_sts_ptr += sizeof(response);
931         *fw_sts_ptr = command_sent;
932
933 done_free_dma_rsp:
934         dma_free_coherent(&ha->pdev->dev, rsp_data_len,
935                 rsp_data, rsp_data_dma);
936 done_free_dma_req:
937         dma_free_coherent(&ha->pdev->dev, req_data_len,
938                 req_data, req_data_dma);
939 done_unmap_sg:
940         dma_unmap_sg(&ha->pdev->dev,
941             bsg_job->reply_payload.sg_list,
942             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
943 done_unmap_req_sg:
944         dma_unmap_sg(&ha->pdev->dev,
945             bsg_job->request_payload.sg_list,
946             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
947         if (!rval)
948                 bsg_job_done(bsg_job, bsg_reply->result,
949                                bsg_reply->reply_payload_rcv_len);
950         return rval;
951 }
952
953 static int
954 qla84xx_reset(struct bsg_job *bsg_job)
955 {
956         struct fc_bsg_request *bsg_request = bsg_job->request;
957         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
958         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
959         scsi_qla_host_t *vha = shost_priv(host);
960         struct qla_hw_data *ha = vha->hw;
961         int rval = 0;
962         uint32_t flag;
963
964         if (!IS_QLA84XX(ha)) {
965                 ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
966                 return -EINVAL;
967         }
968
969         flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
970
971         rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
972
973         if (rval) {
974                 ql_log(ql_log_warn, vha, 0x7030,
975                     "Vendor request 84xx reset failed.\n");
976                 rval = (DID_ERROR << 16);
977
978         } else {
979                 ql_dbg(ql_dbg_user, vha, 0x7031,
980                     "Vendor request 84xx reset completed.\n");
981                 bsg_reply->result = DID_OK;
982                 bsg_job_done(bsg_job, bsg_reply->result,
983                                bsg_reply->reply_payload_rcv_len);
984         }
985
986         return rval;
987 }
988
989 static int
990 qla84xx_updatefw(struct bsg_job *bsg_job)
991 {
992         struct fc_bsg_request *bsg_request = bsg_job->request;
993         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
994         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
995         scsi_qla_host_t *vha = shost_priv(host);
996         struct qla_hw_data *ha = vha->hw;
997         struct verify_chip_entry_84xx *mn = NULL;
998         dma_addr_t mn_dma, fw_dma;
999         void *fw_buf = NULL;
1000         int rval = 0;
1001         uint32_t sg_cnt;
1002         uint32_t data_len;
1003         uint16_t options;
1004         uint32_t flag;
1005         uint32_t fw_ver;
1006
1007         if (!IS_QLA84XX(ha)) {
1008                 ql_dbg(ql_dbg_user, vha, 0x7032,
1009                     "Not 84xx, exiting.\n");
1010                 return -EINVAL;
1011         }
1012
1013         sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1014                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1015         if (!sg_cnt) {
1016                 ql_log(ql_log_warn, vha, 0x7033,
1017                     "dma_map_sg returned %d for request.\n", sg_cnt);
1018                 return -ENOMEM;
1019         }
1020
1021         if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1022                 ql_log(ql_log_warn, vha, 0x7034,
1023                     "DMA mapping resulted in different sg counts, "
1024                     "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1025                     bsg_job->request_payload.sg_cnt, sg_cnt);
1026                 rval = -EAGAIN;
1027                 goto done_unmap_sg;
1028         }
1029
1030         data_len = bsg_job->request_payload.payload_len;
1031         fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
1032                 &fw_dma, GFP_KERNEL);
1033         if (!fw_buf) {
1034                 ql_log(ql_log_warn, vha, 0x7035,
1035                     "DMA alloc failed for fw_buf.\n");
1036                 rval = -ENOMEM;
1037                 goto done_unmap_sg;
1038         }
1039
1040         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1041                 bsg_job->request_payload.sg_cnt, fw_buf, data_len);
1042
1043         mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1044         if (!mn) {
1045                 ql_log(ql_log_warn, vha, 0x7036,
1046                     "DMA alloc failed for fw buffer.\n");
1047                 rval = -ENOMEM;
1048                 goto done_free_fw_buf;
1049         }
1050
1051         flag = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1052         fw_ver = le32_to_cpu(*((uint32_t *)((uint32_t *)fw_buf + 2)));
1053
1054         mn->entry_type = VERIFY_CHIP_IOCB_TYPE;
1055         mn->entry_count = 1;
1056
1057         options = VCO_FORCE_UPDATE | VCO_END_OF_DATA;
1058         if (flag == A84_ISSUE_UPDATE_DIAGFW_CMD)
1059                 options |= VCO_DIAG_FW;
1060
1061         mn->options = cpu_to_le16(options);
1062         mn->fw_ver =  cpu_to_le32(fw_ver);
1063         mn->fw_size =  cpu_to_le32(data_len);
1064         mn->fw_seq_size =  cpu_to_le32(data_len);
1065         mn->dseg_address[0] = cpu_to_le32(LSD(fw_dma));
1066         mn->dseg_address[1] = cpu_to_le32(MSD(fw_dma));
1067         mn->dseg_length = cpu_to_le32(data_len);
1068         mn->data_seg_cnt = cpu_to_le16(1);
1069
1070         rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
1071
1072         if (rval) {
1073                 ql_log(ql_log_warn, vha, 0x7037,
1074                     "Vendor request 84xx updatefw failed.\n");
1075
1076                 rval = (DID_ERROR << 16);
1077         } else {
1078                 ql_dbg(ql_dbg_user, vha, 0x7038,
1079                     "Vendor request 84xx updatefw completed.\n");
1080
1081                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1082                 bsg_reply->result = DID_OK;
1083         }
1084
1085         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1086
1087 done_free_fw_buf:
1088         dma_free_coherent(&ha->pdev->dev, data_len, fw_buf, fw_dma);
1089
1090 done_unmap_sg:
1091         dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1092                 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1093
1094         if (!rval)
1095                 bsg_job_done(bsg_job, bsg_reply->result,
1096                                bsg_reply->reply_payload_rcv_len);
1097         return rval;
1098 }
1099
1100 static int
1101 qla84xx_mgmt_cmd(struct bsg_job *bsg_job)
1102 {
1103         struct fc_bsg_request *bsg_request = bsg_job->request;
1104         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1105         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1106         scsi_qla_host_t *vha = shost_priv(host);
1107         struct qla_hw_data *ha = vha->hw;
1108         struct access_chip_84xx *mn = NULL;
1109         dma_addr_t mn_dma, mgmt_dma;
1110         void *mgmt_b = NULL;
1111         int rval = 0;
1112         struct qla_bsg_a84_mgmt *ql84_mgmt;
1113         uint32_t sg_cnt;
1114         uint32_t data_len = 0;
1115         uint32_t dma_direction = DMA_NONE;
1116
1117         if (!IS_QLA84XX(ha)) {
1118                 ql_log(ql_log_warn, vha, 0x703a,
1119                     "Not 84xx, exiting.\n");
1120                 return -EINVAL;
1121         }
1122
1123         mn = dma_pool_zalloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
1124         if (!mn) {
1125                 ql_log(ql_log_warn, vha, 0x703c,
1126                     "DMA alloc failed for fw buffer.\n");
1127                 return -ENOMEM;
1128         }
1129
1130         mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
1131         mn->entry_count = 1;
1132         ql84_mgmt = (void *)bsg_request + sizeof(struct fc_bsg_request);
1133         switch (ql84_mgmt->mgmt.cmd) {
1134         case QLA84_MGMT_READ_MEM:
1135         case QLA84_MGMT_GET_INFO:
1136                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1137                         bsg_job->reply_payload.sg_list,
1138                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1139                 if (!sg_cnt) {
1140                         ql_log(ql_log_warn, vha, 0x703d,
1141                             "dma_map_sg returned %d for reply.\n", sg_cnt);
1142                         rval = -ENOMEM;
1143                         goto exit_mgmt;
1144                 }
1145
1146                 dma_direction = DMA_FROM_DEVICE;
1147
1148                 if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
1149                         ql_log(ql_log_warn, vha, 0x703e,
1150                             "DMA mapping resulted in different sg counts, "
1151                             "reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
1152                             bsg_job->reply_payload.sg_cnt, sg_cnt);
1153                         rval = -EAGAIN;
1154                         goto done_unmap_sg;
1155                 }
1156
1157                 data_len = bsg_job->reply_payload.payload_len;
1158
1159                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1160                     &mgmt_dma, GFP_KERNEL);
1161                 if (!mgmt_b) {
1162                         ql_log(ql_log_warn, vha, 0x703f,
1163                             "DMA alloc failed for mgmt_b.\n");
1164                         rval = -ENOMEM;
1165                         goto done_unmap_sg;
1166                 }
1167
1168                 if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) {
1169                         mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
1170                         mn->parameter1 =
1171                                 cpu_to_le32(
1172                                 ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1173
1174                 } else if (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO) {
1175                         mn->options = cpu_to_le16(ACO_REQUEST_INFO);
1176                         mn->parameter1 =
1177                                 cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.info.type);
1178
1179                         mn->parameter2 =
1180                                 cpu_to_le32(
1181                                 ql84_mgmt->mgmt.mgmtp.u.info.context);
1182                 }
1183                 break;
1184
1185         case QLA84_MGMT_WRITE_MEM:
1186                 sg_cnt = dma_map_sg(&ha->pdev->dev,
1187                         bsg_job->request_payload.sg_list,
1188                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1189
1190                 if (!sg_cnt) {
1191                         ql_log(ql_log_warn, vha, 0x7040,
1192                             "dma_map_sg returned %d.\n", sg_cnt);
1193                         rval = -ENOMEM;
1194                         goto exit_mgmt;
1195                 }
1196
1197                 dma_direction = DMA_TO_DEVICE;
1198
1199                 if (sg_cnt != bsg_job->request_payload.sg_cnt) {
1200                         ql_log(ql_log_warn, vha, 0x7041,
1201                             "DMA mapping resulted in different sg counts, "
1202                             "request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
1203                             bsg_job->request_payload.sg_cnt, sg_cnt);
1204                         rval = -EAGAIN;
1205                         goto done_unmap_sg;
1206                 }
1207
1208                 data_len = bsg_job->request_payload.payload_len;
1209                 mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
1210                         &mgmt_dma, GFP_KERNEL);
1211                 if (!mgmt_b) {
1212                         ql_log(ql_log_warn, vha, 0x7042,
1213                             "DMA alloc failed for mgmt_b.\n");
1214                         rval = -ENOMEM;
1215                         goto done_unmap_sg;
1216                 }
1217
1218                 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1219                         bsg_job->request_payload.sg_cnt, mgmt_b, data_len);
1220
1221                 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
1222                 mn->parameter1 =
1223                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.mem.start_addr);
1224                 break;
1225
1226         case QLA84_MGMT_CHNG_CONFIG:
1227                 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
1228                 mn->parameter1 =
1229                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.id);
1230
1231                 mn->parameter2 =
1232                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param0);
1233
1234                 mn->parameter3 =
1235                         cpu_to_le32(ql84_mgmt->mgmt.mgmtp.u.config.param1);
1236                 break;
1237
1238         default:
1239                 rval = -EIO;
1240                 goto exit_mgmt;
1241         }
1242
1243         if (ql84_mgmt->mgmt.cmd != QLA84_MGMT_CHNG_CONFIG) {
1244                 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->mgmt.len);
1245                 mn->dseg_count = cpu_to_le16(1);
1246                 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
1247                 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
1248                 mn->dseg_length = cpu_to_le32(ql84_mgmt->mgmt.len);
1249         }
1250
1251         rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
1252
1253         if (rval) {
1254                 ql_log(ql_log_warn, vha, 0x7043,
1255                     "Vendor request 84xx mgmt failed.\n");
1256
1257                 rval = (DID_ERROR << 16);
1258
1259         } else {
1260                 ql_dbg(ql_dbg_user, vha, 0x7044,
1261                     "Vendor request 84xx mgmt completed.\n");
1262
1263                 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1264                 bsg_reply->result = DID_OK;
1265
1266                 if ((ql84_mgmt->mgmt.cmd == QLA84_MGMT_READ_MEM) ||
1267                         (ql84_mgmt->mgmt.cmd == QLA84_MGMT_GET_INFO)) {
1268                         bsg_reply->reply_payload_rcv_len =
1269                                 bsg_job->reply_payload.payload_len;
1270
1271                         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1272                                 bsg_job->reply_payload.sg_cnt, mgmt_b,
1273                                 data_len);
1274                 }
1275         }
1276
1277 done_unmap_sg:
1278         if (mgmt_b)
1279                 dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
1280
1281         if (dma_direction == DMA_TO_DEVICE)
1282                 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1283                         bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1284         else if (dma_direction == DMA_FROM_DEVICE)
1285                 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1286                         bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1287
1288 exit_mgmt:
1289         dma_pool_free(ha->s_dma_pool, mn, mn_dma);
1290
1291         if (!rval)
1292                 bsg_job_done(bsg_job, bsg_reply->result,
1293                                bsg_reply->reply_payload_rcv_len);
1294         return rval;
1295 }
1296
1297 static int
1298 qla24xx_iidma(struct bsg_job *bsg_job)
1299 {
1300         struct fc_bsg_request *bsg_request = bsg_job->request;
1301         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1302         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1303         scsi_qla_host_t *vha = shost_priv(host);
1304         int rval = 0;
1305         struct qla_port_param *port_param = NULL;
1306         fc_port_t *fcport = NULL;
1307         int found = 0;
1308         uint16_t mb[MAILBOX_REGISTER_COUNT];
1309         uint8_t *rsp_ptr = NULL;
1310
1311         if (!IS_IIDMA_CAPABLE(vha->hw)) {
1312                 ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
1313                 return -EINVAL;
1314         }
1315
1316         port_param = (void *)bsg_request + sizeof(struct fc_bsg_request);
1317         if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
1318                 ql_log(ql_log_warn, vha, 0x7048,
1319                     "Invalid destination type.\n");
1320                 return -EINVAL;
1321         }
1322
1323         list_for_each_entry(fcport, &vha->vp_fcports, list) {
1324                 if (fcport->port_type != FCT_TARGET)
1325                         continue;
1326
1327                 if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
1328                         fcport->port_name, sizeof(fcport->port_name)))
1329                         continue;
1330
1331                 found = 1;
1332                 break;
1333         }
1334
1335         if (!found) {
1336                 ql_log(ql_log_warn, vha, 0x7049,
1337                     "Failed to find port.\n");
1338                 return -EINVAL;
1339         }
1340
1341         if (atomic_read(&fcport->state) != FCS_ONLINE) {
1342                 ql_log(ql_log_warn, vha, 0x704a,
1343                     "Port is not online.\n");
1344                 return -EINVAL;
1345         }
1346
1347         if (fcport->flags & FCF_LOGIN_NEEDED) {
1348                 ql_log(ql_log_warn, vha, 0x704b,
1349                     "Remote port not logged in flags = 0x%x.\n", fcport->flags);
1350                 return -EINVAL;
1351         }
1352
1353         if (port_param->mode)
1354                 rval = qla2x00_set_idma_speed(vha, fcport->loop_id,
1355                         port_param->speed, mb);
1356         else
1357                 rval = qla2x00_get_idma_speed(vha, fcport->loop_id,
1358                         &port_param->speed, mb);
1359
1360         if (rval) {
1361                 ql_log(ql_log_warn, vha, 0x704c,
1362                     "iIDMA cmd failed for %8phN -- "
1363                     "%04x %x %04x %04x.\n", fcport->port_name,
1364                     rval, fcport->fp_speed, mb[0], mb[1]);
1365                 rval = (DID_ERROR << 16);
1366         } else {
1367                 if (!port_param->mode) {
1368                         bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
1369                                 sizeof(struct qla_port_param);
1370
1371                         rsp_ptr = ((uint8_t *)bsg_reply) +
1372                                 sizeof(struct fc_bsg_reply);
1373
1374                         memcpy(rsp_ptr, port_param,
1375                                 sizeof(struct qla_port_param));
1376                 }
1377
1378                 bsg_reply->result = DID_OK;
1379                 bsg_job_done(bsg_job, bsg_reply->result,
1380                                bsg_reply->reply_payload_rcv_len);
1381         }
1382
1383         return rval;
1384 }
1385
1386 static int
1387 qla2x00_optrom_setup(struct bsg_job *bsg_job, scsi_qla_host_t *vha,
1388         uint8_t is_update)
1389 {
1390         struct fc_bsg_request *bsg_request = bsg_job->request;
1391         uint32_t start = 0;
1392         int valid = 0;
1393         struct qla_hw_data *ha = vha->hw;
1394
1395         if (unlikely(pci_channel_offline(ha->pdev)))
1396                 return -EINVAL;
1397
1398         start = bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1399         if (start > ha->optrom_size) {
1400                 ql_log(ql_log_warn, vha, 0x7055,
1401                     "start %d > optrom_size %d.\n", start, ha->optrom_size);
1402                 return -EINVAL;
1403         }
1404
1405         if (ha->optrom_state != QLA_SWAITING) {
1406                 ql_log(ql_log_info, vha, 0x7056,
1407                     "optrom_state %d.\n", ha->optrom_state);
1408                 return -EBUSY;
1409         }
1410
1411         ha->optrom_region_start = start;
1412         ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
1413         if (is_update) {
1414                 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
1415                         valid = 1;
1416                 else if (start == (ha->flt_region_boot * 4) ||
1417                     start == (ha->flt_region_fw * 4))
1418                         valid = 1;
1419                 else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
1420                     IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
1421                         valid = 1;
1422                 if (!valid) {
1423                         ql_log(ql_log_warn, vha, 0x7058,
1424                             "Invalid start region 0x%x/0x%x.\n", start,
1425                             bsg_job->request_payload.payload_len);
1426                         return -EINVAL;
1427                 }
1428
1429                 ha->optrom_region_size = start +
1430                     bsg_job->request_payload.payload_len > ha->optrom_size ?
1431                     ha->optrom_size - start :
1432                     bsg_job->request_payload.payload_len;
1433                 ha->optrom_state = QLA_SWRITING;
1434         } else {
1435                 ha->optrom_region_size = start +
1436                     bsg_job->reply_payload.payload_len > ha->optrom_size ?
1437                     ha->optrom_size - start :
1438                     bsg_job->reply_payload.payload_len;
1439                 ha->optrom_state = QLA_SREADING;
1440         }
1441
1442         ha->optrom_buffer = vzalloc(ha->optrom_region_size);
1443         if (!ha->optrom_buffer) {
1444                 ql_log(ql_log_warn, vha, 0x7059,
1445                     "Read: Unable to allocate memory for optrom retrieval "
1446                     "(%x)\n", ha->optrom_region_size);
1447
1448                 ha->optrom_state = QLA_SWAITING;
1449                 return -ENOMEM;
1450         }
1451
1452         return 0;
1453 }
1454
1455 static int
1456 qla2x00_read_optrom(struct bsg_job *bsg_job)
1457 {
1458         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1459         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1460         scsi_qla_host_t *vha = shost_priv(host);
1461         struct qla_hw_data *ha = vha->hw;
1462         int rval = 0;
1463
1464         if (ha->flags.nic_core_reset_hdlr_active)
1465                 return -EBUSY;
1466
1467         mutex_lock(&ha->optrom_mutex);
1468         rval = qla2x00_optrom_setup(bsg_job, vha, 0);
1469         if (rval) {
1470                 mutex_unlock(&ha->optrom_mutex);
1471                 return rval;
1472         }
1473
1474         ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
1475             ha->optrom_region_start, ha->optrom_region_size);
1476
1477         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1478             bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
1479             ha->optrom_region_size);
1480
1481         bsg_reply->reply_payload_rcv_len = ha->optrom_region_size;
1482         bsg_reply->result = DID_OK;
1483         vfree(ha->optrom_buffer);
1484         ha->optrom_buffer = NULL;
1485         ha->optrom_state = QLA_SWAITING;
1486         mutex_unlock(&ha->optrom_mutex);
1487         bsg_job_done(bsg_job, bsg_reply->result,
1488                        bsg_reply->reply_payload_rcv_len);
1489         return rval;
1490 }
1491
1492 static int
1493 qla2x00_update_optrom(struct bsg_job *bsg_job)
1494 {
1495         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1496         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1497         scsi_qla_host_t *vha = shost_priv(host);
1498         struct qla_hw_data *ha = vha->hw;
1499         int rval = 0;
1500
1501         mutex_lock(&ha->optrom_mutex);
1502         rval = qla2x00_optrom_setup(bsg_job, vha, 1);
1503         if (rval) {
1504                 mutex_unlock(&ha->optrom_mutex);
1505                 return rval;
1506         }
1507
1508         /* Set the isp82xx_no_md_cap not to capture minidump */
1509         ha->flags.isp82xx_no_md_cap = 1;
1510
1511         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1512             bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1513             ha->optrom_region_size);
1514
1515         ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
1516             ha->optrom_region_start, ha->optrom_region_size);
1517
1518         bsg_reply->result = DID_OK;
1519         vfree(ha->optrom_buffer);
1520         ha->optrom_buffer = NULL;
1521         ha->optrom_state = QLA_SWAITING;
1522         mutex_unlock(&ha->optrom_mutex);
1523         bsg_job_done(bsg_job, bsg_reply->result,
1524                        bsg_reply->reply_payload_rcv_len);
1525         return rval;
1526 }
1527
1528 static int
1529 qla2x00_update_fru_versions(struct bsg_job *bsg_job)
1530 {
1531         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1532         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1533         scsi_qla_host_t *vha = shost_priv(host);
1534         struct qla_hw_data *ha = vha->hw;
1535         int rval = 0;
1536         uint8_t bsg[DMA_POOL_SIZE];
1537         struct qla_image_version_list *list = (void *)bsg;
1538         struct qla_image_version *image;
1539         uint32_t count;
1540         dma_addr_t sfp_dma;
1541         void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1542         if (!sfp) {
1543                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1544                     EXT_STATUS_NO_MEMORY;
1545                 goto done;
1546         }
1547
1548         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1549             bsg_job->request_payload.sg_cnt, list, sizeof(bsg));
1550
1551         image = list->version;
1552         count = list->count;
1553         while (count--) {
1554                 memcpy(sfp, &image->field_info, sizeof(image->field_info));
1555                 rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1556                     image->field_address.device, image->field_address.offset,
1557                     sizeof(image->field_info), image->field_address.option);
1558                 if (rval) {
1559                         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1560                             EXT_STATUS_MAILBOX;
1561                         goto dealloc;
1562                 }
1563                 image++;
1564         }
1565
1566         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1567
1568 dealloc:
1569         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1570
1571 done:
1572         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1573         bsg_reply->result = DID_OK << 16;
1574         bsg_job_done(bsg_job, bsg_reply->result,
1575                        bsg_reply->reply_payload_rcv_len);
1576
1577         return 0;
1578 }
1579
1580 static int
1581 qla2x00_read_fru_status(struct bsg_job *bsg_job)
1582 {
1583         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1584         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1585         scsi_qla_host_t *vha = shost_priv(host);
1586         struct qla_hw_data *ha = vha->hw;
1587         int rval = 0;
1588         uint8_t bsg[DMA_POOL_SIZE];
1589         struct qla_status_reg *sr = (void *)bsg;
1590         dma_addr_t sfp_dma;
1591         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1592         if (!sfp) {
1593                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1594                     EXT_STATUS_NO_MEMORY;
1595                 goto done;
1596         }
1597
1598         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1599             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1600
1601         rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1602             sr->field_address.device, sr->field_address.offset,
1603             sizeof(sr->status_reg), sr->field_address.option);
1604         sr->status_reg = *sfp;
1605
1606         if (rval) {
1607                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1608                     EXT_STATUS_MAILBOX;
1609                 goto dealloc;
1610         }
1611
1612         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1613             bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr));
1614
1615         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1616
1617 dealloc:
1618         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1619
1620 done:
1621         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1622         bsg_reply->reply_payload_rcv_len = sizeof(*sr);
1623         bsg_reply->result = DID_OK << 16;
1624         bsg_job_done(bsg_job, bsg_reply->result,
1625                        bsg_reply->reply_payload_rcv_len);
1626
1627         return 0;
1628 }
1629
1630 static int
1631 qla2x00_write_fru_status(struct bsg_job *bsg_job)
1632 {
1633         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1634         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1635         scsi_qla_host_t *vha = shost_priv(host);
1636         struct qla_hw_data *ha = vha->hw;
1637         int rval = 0;
1638         uint8_t bsg[DMA_POOL_SIZE];
1639         struct qla_status_reg *sr = (void *)bsg;
1640         dma_addr_t sfp_dma;
1641         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1642         if (!sfp) {
1643                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1644                     EXT_STATUS_NO_MEMORY;
1645                 goto done;
1646         }
1647
1648         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1649             bsg_job->request_payload.sg_cnt, sr, sizeof(*sr));
1650
1651         *sfp = sr->status_reg;
1652         rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1653             sr->field_address.device, sr->field_address.offset,
1654             sizeof(sr->status_reg), sr->field_address.option);
1655
1656         if (rval) {
1657                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1658                     EXT_STATUS_MAILBOX;
1659                 goto dealloc;
1660         }
1661
1662         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1663
1664 dealloc:
1665         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1666
1667 done:
1668         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1669         bsg_reply->result = DID_OK << 16;
1670         bsg_job_done(bsg_job, bsg_reply->result,
1671                        bsg_reply->reply_payload_rcv_len);
1672
1673         return 0;
1674 }
1675
1676 static int
1677 qla2x00_write_i2c(struct bsg_job *bsg_job)
1678 {
1679         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1680         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1681         scsi_qla_host_t *vha = shost_priv(host);
1682         struct qla_hw_data *ha = vha->hw;
1683         int rval = 0;
1684         uint8_t bsg[DMA_POOL_SIZE];
1685         struct qla_i2c_access *i2c = (void *)bsg;
1686         dma_addr_t sfp_dma;
1687         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1688         if (!sfp) {
1689                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1690                     EXT_STATUS_NO_MEMORY;
1691                 goto done;
1692         }
1693
1694         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1695             bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1696
1697         memcpy(sfp, i2c->buffer, i2c->length);
1698         rval = qla2x00_write_sfp(vha, sfp_dma, sfp,
1699             i2c->device, i2c->offset, i2c->length, i2c->option);
1700
1701         if (rval) {
1702                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1703                     EXT_STATUS_MAILBOX;
1704                 goto dealloc;
1705         }
1706
1707         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1708
1709 dealloc:
1710         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1711
1712 done:
1713         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1714         bsg_reply->result = DID_OK << 16;
1715         bsg_job_done(bsg_job, bsg_reply->result,
1716                        bsg_reply->reply_payload_rcv_len);
1717
1718         return 0;
1719 }
1720
1721 static int
1722 qla2x00_read_i2c(struct bsg_job *bsg_job)
1723 {
1724         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1725         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1726         scsi_qla_host_t *vha = shost_priv(host);
1727         struct qla_hw_data *ha = vha->hw;
1728         int rval = 0;
1729         uint8_t bsg[DMA_POOL_SIZE];
1730         struct qla_i2c_access *i2c = (void *)bsg;
1731         dma_addr_t sfp_dma;
1732         uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma);
1733         if (!sfp) {
1734                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1735                     EXT_STATUS_NO_MEMORY;
1736                 goto done;
1737         }
1738
1739         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1740             bsg_job->request_payload.sg_cnt, i2c, sizeof(*i2c));
1741
1742         rval = qla2x00_read_sfp(vha, sfp_dma, sfp,
1743                 i2c->device, i2c->offset, i2c->length, i2c->option);
1744
1745         if (rval) {
1746                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
1747                     EXT_STATUS_MAILBOX;
1748                 goto dealloc;
1749         }
1750
1751         memcpy(i2c->buffer, sfp, i2c->length);
1752         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
1753             bsg_job->reply_payload.sg_cnt, i2c, sizeof(*i2c));
1754
1755         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = 0;
1756
1757 dealloc:
1758         dma_pool_free(ha->s_dma_pool, sfp, sfp_dma);
1759
1760 done:
1761         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1762         bsg_reply->reply_payload_rcv_len = sizeof(*i2c);
1763         bsg_reply->result = DID_OK << 16;
1764         bsg_job_done(bsg_job, bsg_reply->result,
1765                        bsg_reply->reply_payload_rcv_len);
1766
1767         return 0;
1768 }
1769
1770 static int
1771 qla24xx_process_bidir_cmd(struct bsg_job *bsg_job)
1772 {
1773         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
1774         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1775         scsi_qla_host_t *vha = shost_priv(host);
1776         struct qla_hw_data *ha = vha->hw;
1777         uint32_t rval = EXT_STATUS_OK;
1778         uint16_t req_sg_cnt = 0;
1779         uint16_t rsp_sg_cnt = 0;
1780         uint16_t nextlid = 0;
1781         uint32_t tot_dsds;
1782         srb_t *sp = NULL;
1783         uint32_t req_data_len;
1784         uint32_t rsp_data_len;
1785
1786         /* Check the type of the adapter */
1787         if (!IS_BIDI_CAPABLE(ha)) {
1788                 ql_log(ql_log_warn, vha, 0x70a0,
1789                         "This adapter is not supported\n");
1790                 rval = EXT_STATUS_NOT_SUPPORTED;
1791                 goto done;
1792         }
1793
1794         if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
1795                 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1796                 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
1797                 rval =  EXT_STATUS_BUSY;
1798                 goto done;
1799         }
1800
1801         /* Check if host is online */
1802         if (!vha->flags.online) {
1803                 ql_log(ql_log_warn, vha, 0x70a1,
1804                         "Host is not online\n");
1805                 rval = EXT_STATUS_DEVICE_OFFLINE;
1806                 goto done;
1807         }
1808
1809         /* Check if cable is plugged in or not */
1810         if (vha->device_flags & DFLG_NO_CABLE) {
1811                 ql_log(ql_log_warn, vha, 0x70a2,
1812                         "Cable is unplugged...\n");
1813                 rval = EXT_STATUS_INVALID_CFG;
1814                 goto done;
1815         }
1816
1817         /* Check if the switch is connected or not */
1818         if (ha->current_topology != ISP_CFG_F) {
1819                 ql_log(ql_log_warn, vha, 0x70a3,
1820                         "Host is not connected to the switch\n");
1821                 rval = EXT_STATUS_INVALID_CFG;
1822                 goto done;
1823         }
1824
1825         /* Check if operating mode is P2P */
1826         if (ha->operating_mode != P2P) {
1827                 ql_log(ql_log_warn, vha, 0x70a4,
1828                     "Host operating mode is not P2p\n");
1829                 rval = EXT_STATUS_INVALID_CFG;
1830                 goto done;
1831         }
1832
1833         mutex_lock(&ha->selflogin_lock);
1834         if (vha->self_login_loop_id == 0) {
1835                 /* Initialize all required  fields of fcport */
1836                 vha->bidir_fcport.vha = vha;
1837                 vha->bidir_fcport.d_id.b.al_pa = vha->d_id.b.al_pa;
1838                 vha->bidir_fcport.d_id.b.area = vha->d_id.b.area;
1839                 vha->bidir_fcport.d_id.b.domain = vha->d_id.b.domain;
1840                 vha->bidir_fcport.loop_id = vha->loop_id;
1841
1842                 if (qla2x00_fabric_login(vha, &(vha->bidir_fcport), &nextlid)) {
1843                         ql_log(ql_log_warn, vha, 0x70a7,
1844                             "Failed to login port %06X for bidirectional IOCB\n",
1845                             vha->bidir_fcport.d_id.b24);
1846                         mutex_unlock(&ha->selflogin_lock);
1847                         rval = EXT_STATUS_MAILBOX;
1848                         goto done;
1849                 }
1850                 vha->self_login_loop_id = nextlid - 1;
1851
1852         }
1853         /* Assign the self login loop id to fcport */
1854         mutex_unlock(&ha->selflogin_lock);
1855
1856         vha->bidir_fcport.loop_id = vha->self_login_loop_id;
1857
1858         req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1859                 bsg_job->request_payload.sg_list,
1860                 bsg_job->request_payload.sg_cnt,
1861                 DMA_TO_DEVICE);
1862
1863         if (!req_sg_cnt) {
1864                 rval = EXT_STATUS_NO_MEMORY;
1865                 goto done;
1866         }
1867
1868         rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1869                 bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
1870                 DMA_FROM_DEVICE);
1871
1872         if (!rsp_sg_cnt) {
1873                 rval = EXT_STATUS_NO_MEMORY;
1874                 goto done_unmap_req_sg;
1875         }
1876
1877         if ((req_sg_cnt !=  bsg_job->request_payload.sg_cnt) ||
1878                 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
1879                 ql_dbg(ql_dbg_user, vha, 0x70a9,
1880                     "Dma mapping resulted in different sg counts "
1881                     "[request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt: "
1882                     "%x dma_reply_sg_cnt: %x]\n",
1883                     bsg_job->request_payload.sg_cnt, req_sg_cnt,
1884                     bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
1885                 rval = EXT_STATUS_NO_MEMORY;
1886                 goto done_unmap_sg;
1887         }
1888
1889         req_data_len = bsg_job->request_payload.payload_len;
1890         rsp_data_len = bsg_job->reply_payload.payload_len;
1891
1892         if (req_data_len != rsp_data_len) {
1893                 rval = EXT_STATUS_BUSY;
1894                 ql_log(ql_log_warn, vha, 0x70aa,
1895                     "req_data_len != rsp_data_len\n");
1896                 goto done_unmap_sg;
1897         }
1898
1899         /* Alloc SRB structure */
1900         sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL);
1901         if (!sp) {
1902                 ql_dbg(ql_dbg_user, vha, 0x70ac,
1903                     "Alloc SRB structure failed\n");
1904                 rval = EXT_STATUS_NO_MEMORY;
1905                 goto done_unmap_sg;
1906         }
1907
1908         /*Populate srb->ctx with bidir ctx*/
1909         sp->u.bsg_job = bsg_job;
1910         sp->free = qla2x00_bsg_sp_free;
1911         sp->type = SRB_BIDI_CMD;
1912         sp->done = qla2x00_bsg_job_done;
1913
1914         /* Add the read and write sg count */
1915         tot_dsds = rsp_sg_cnt + req_sg_cnt;
1916
1917         rval = qla2x00_start_bidir(sp, vha, tot_dsds);
1918         if (rval != EXT_STATUS_OK)
1919                 goto done_free_srb;
1920         /* the bsg request  will be completed in the interrupt handler */
1921         return rval;
1922
1923 done_free_srb:
1924         mempool_free(sp, ha->srb_mempool);
1925 done_unmap_sg:
1926         dma_unmap_sg(&ha->pdev->dev,
1927             bsg_job->reply_payload.sg_list,
1928             bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1929 done_unmap_req_sg:
1930         dma_unmap_sg(&ha->pdev->dev,
1931             bsg_job->request_payload.sg_list,
1932             bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1933 done:
1934
1935         /* Return an error vendor specific response
1936          * and complete the bsg request
1937          */
1938         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = rval;
1939         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1940         bsg_reply->reply_payload_rcv_len = 0;
1941         bsg_reply->result = (DID_OK) << 16;
1942         bsg_job_done(bsg_job, bsg_reply->result,
1943                        bsg_reply->reply_payload_rcv_len);
1944         /* Always return success, vendor rsp carries correct status */
1945         return 0;
1946 }
1947
1948 static int
1949 qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
1950 {
1951         struct fc_bsg_request *bsg_request = bsg_job->request;
1952         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
1953         scsi_qla_host_t *vha = shost_priv(host);
1954         struct qla_hw_data *ha = vha->hw;
1955         int rval = (DID_ERROR << 16);
1956         struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
1957         srb_t *sp;
1958         int req_sg_cnt = 0, rsp_sg_cnt = 0;
1959         struct fc_port *fcport;
1960         char  *type = "FC_BSG_HST_FX_MGMT";
1961
1962         /* Copy the IOCB specific information */
1963         piocb_rqst = (struct qla_mt_iocb_rqst_fx00 *)
1964             &bsg_request->rqst_data.h_vendor.vendor_cmd[1];
1965
1966         /* Dump the vendor information */
1967         ql_dump_buffer(ql_dbg_user + ql_dbg_verbose , vha, 0x70cf,
1968             (uint8_t *)piocb_rqst, sizeof(struct qla_mt_iocb_rqst_fx00));
1969
1970         if (!vha->flags.online) {
1971                 ql_log(ql_log_warn, vha, 0x70d0,
1972                     "Host is not online.\n");
1973                 rval = -EIO;
1974                 goto done;
1975         }
1976
1977         if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID) {
1978                 req_sg_cnt = dma_map_sg(&ha->pdev->dev,
1979                     bsg_job->request_payload.sg_list,
1980                     bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1981                 if (!req_sg_cnt) {
1982                         ql_log(ql_log_warn, vha, 0x70c7,
1983                             "dma_map_sg return %d for request\n", req_sg_cnt);
1984                         rval = -ENOMEM;
1985                         goto done;
1986                 }
1987         }
1988
1989         if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) {
1990                 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
1991                     bsg_job->reply_payload.sg_list,
1992                     bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1993                 if (!rsp_sg_cnt) {
1994                         ql_log(ql_log_warn, vha, 0x70c8,
1995                             "dma_map_sg return %d for reply\n", rsp_sg_cnt);
1996                         rval = -ENOMEM;
1997                         goto done_unmap_req_sg;
1998                 }
1999         }
2000
2001         ql_dbg(ql_dbg_user, vha, 0x70c9,
2002             "request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
2003             "dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
2004             req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
2005
2006         /* Allocate a dummy fcport structure, since functions preparing the
2007          * IOCB and mailbox command retrieves port specific information
2008          * from fcport structure. For Host based ELS commands there will be
2009          * no fcport structure allocated
2010          */
2011         fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2012         if (!fcport) {
2013                 ql_log(ql_log_warn, vha, 0x70ca,
2014                     "Failed to allocate fcport.\n");
2015                 rval = -ENOMEM;
2016                 goto done_unmap_rsp_sg;
2017         }
2018
2019         /* Alloc SRB structure */
2020         sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
2021         if (!sp) {
2022                 ql_log(ql_log_warn, vha, 0x70cb,
2023                     "qla2x00_get_sp failed.\n");
2024                 rval = -ENOMEM;
2025                 goto done_free_fcport;
2026         }
2027
2028         /* Initialize all required  fields of fcport */
2029         fcport->vha = vha;
2030         fcport->loop_id = piocb_rqst->dataword;
2031
2032         sp->type = SRB_FXIOCB_BCMD;
2033         sp->name = "bsg_fx_mgmt";
2034         sp->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
2035         sp->u.bsg_job = bsg_job;
2036         sp->free = qla2x00_bsg_sp_free;
2037         sp->done = qla2x00_bsg_job_done;
2038
2039         ql_dbg(ql_dbg_user, vha, 0x70cc,
2040             "bsg rqst type: %s fx_mgmt_type: %x id=%x\n",
2041             type, piocb_rqst->func_type, fcport->loop_id);
2042
2043         rval = qla2x00_start_sp(sp);
2044         if (rval != QLA_SUCCESS) {
2045                 ql_log(ql_log_warn, vha, 0x70cd,
2046                     "qla2x00_start_sp failed=%d.\n", rval);
2047                 mempool_free(sp, ha->srb_mempool);
2048                 rval = -EIO;
2049                 goto done_free_fcport;
2050         }
2051         return rval;
2052
2053 done_free_fcport:
2054         kfree(fcport);
2055
2056 done_unmap_rsp_sg:
2057         if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID)
2058                 dma_unmap_sg(&ha->pdev->dev,
2059                     bsg_job->reply_payload.sg_list,
2060                     bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2061 done_unmap_req_sg:
2062         if (piocb_rqst->flags & SRB_FXDISC_REQ_DMA_VALID)
2063                 dma_unmap_sg(&ha->pdev->dev,
2064                     bsg_job->request_payload.sg_list,
2065                     bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2066
2067 done:
2068         return rval;
2069 }
2070
2071 static int
2072 qla26xx_serdes_op(struct bsg_job *bsg_job)
2073 {
2074         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2075         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2076         scsi_qla_host_t *vha = shost_priv(host);
2077         int rval = 0;
2078         struct qla_serdes_reg sr;
2079
2080         memset(&sr, 0, sizeof(sr));
2081
2082         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2083             bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2084
2085         switch (sr.cmd) {
2086         case INT_SC_SERDES_WRITE_REG:
2087                 rval = qla2x00_write_serdes_word(vha, sr.addr, sr.val);
2088                 bsg_reply->reply_payload_rcv_len = 0;
2089                 break;
2090         case INT_SC_SERDES_READ_REG:
2091                 rval = qla2x00_read_serdes_word(vha, sr.addr, &sr.val);
2092                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2093                     bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2094                 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2095                 break;
2096         default:
2097                 ql_dbg(ql_dbg_user, vha, 0x708c,
2098                     "Unknown serdes cmd %x.\n", sr.cmd);
2099                 rval = -EINVAL;
2100                 break;
2101         }
2102
2103         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2104             rval ? EXT_STATUS_MAILBOX : 0;
2105
2106         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2107         bsg_reply->result = DID_OK << 16;
2108         bsg_job_done(bsg_job, bsg_reply->result,
2109                        bsg_reply->reply_payload_rcv_len);
2110         return 0;
2111 }
2112
2113 static int
2114 qla8044_serdes_op(struct bsg_job *bsg_job)
2115 {
2116         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2117         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2118         scsi_qla_host_t *vha = shost_priv(host);
2119         int rval = 0;
2120         struct qla_serdes_reg_ex sr;
2121
2122         memset(&sr, 0, sizeof(sr));
2123
2124         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2125             bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
2126
2127         switch (sr.cmd) {
2128         case INT_SC_SERDES_WRITE_REG:
2129                 rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
2130                 bsg_reply->reply_payload_rcv_len = 0;
2131                 break;
2132         case INT_SC_SERDES_READ_REG:
2133                 rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
2134                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2135                     bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
2136                 bsg_reply->reply_payload_rcv_len = sizeof(sr);
2137                 break;
2138         default:
2139                 ql_dbg(ql_dbg_user, vha, 0x7020,
2140                     "Unknown serdes cmd %x.\n", sr.cmd);
2141                 rval = -EINVAL;
2142                 break;
2143         }
2144
2145         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2146             rval ? EXT_STATUS_MAILBOX : 0;
2147
2148         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2149         bsg_reply->result = DID_OK << 16;
2150         bsg_job_done(bsg_job, bsg_reply->result,
2151                        bsg_reply->reply_payload_rcv_len);
2152         return 0;
2153 }
2154
2155 static int
2156 qla27xx_get_flash_upd_cap(struct bsg_job *bsg_job)
2157 {
2158         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2159         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2160         scsi_qla_host_t *vha = shost_priv(host);
2161         struct qla_hw_data *ha = vha->hw;
2162         struct qla_flash_update_caps cap;
2163
2164         if (!(IS_QLA27XX(ha)))
2165                 return -EPERM;
2166
2167         memset(&cap, 0, sizeof(cap));
2168         cap.capabilities = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2169                            (uint64_t)ha->fw_attributes_ext[0] << 32 |
2170                            (uint64_t)ha->fw_attributes_h << 16 |
2171                            (uint64_t)ha->fw_attributes;
2172
2173         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2174             bsg_job->reply_payload.sg_cnt, &cap, sizeof(cap));
2175         bsg_reply->reply_payload_rcv_len = sizeof(cap);
2176
2177         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2178             EXT_STATUS_OK;
2179
2180         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2181         bsg_reply->result = DID_OK << 16;
2182         bsg_job_done(bsg_job, bsg_reply->result,
2183                        bsg_reply->reply_payload_rcv_len);
2184         return 0;
2185 }
2186
2187 static int
2188 qla27xx_set_flash_upd_cap(struct bsg_job *bsg_job)
2189 {
2190         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2191         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2192         scsi_qla_host_t *vha = shost_priv(host);
2193         struct qla_hw_data *ha = vha->hw;
2194         uint64_t online_fw_attr = 0;
2195         struct qla_flash_update_caps cap;
2196
2197         if (!(IS_QLA27XX(ha)))
2198                 return -EPERM;
2199
2200         memset(&cap, 0, sizeof(cap));
2201         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2202             bsg_job->request_payload.sg_cnt, &cap, sizeof(cap));
2203
2204         online_fw_attr = (uint64_t)ha->fw_attributes_ext[1] << 48 |
2205                          (uint64_t)ha->fw_attributes_ext[0] << 32 |
2206                          (uint64_t)ha->fw_attributes_h << 16 |
2207                          (uint64_t)ha->fw_attributes;
2208
2209         if (online_fw_attr != cap.capabilities) {
2210                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2211                     EXT_STATUS_INVALID_PARAM;
2212                 return -EINVAL;
2213         }
2214
2215         if (cap.outage_duration < MAX_LOOP_TIMEOUT)  {
2216                 bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2217                     EXT_STATUS_INVALID_PARAM;
2218                 return -EINVAL;
2219         }
2220
2221         bsg_reply->reply_payload_rcv_len = 0;
2222
2223         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2224             EXT_STATUS_OK;
2225
2226         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2227         bsg_reply->result = DID_OK << 16;
2228         bsg_job_done(bsg_job, bsg_reply->result,
2229                        bsg_reply->reply_payload_rcv_len);
2230         return 0;
2231 }
2232
2233 static int
2234 qla27xx_get_bbcr_data(struct bsg_job *bsg_job)
2235 {
2236         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2237         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2238         scsi_qla_host_t *vha = shost_priv(host);
2239         struct qla_hw_data *ha = vha->hw;
2240         struct qla_bbcr_data bbcr;
2241         uint16_t loop_id, topo, sw_cap;
2242         uint8_t domain, area, al_pa, state;
2243         int rval;
2244
2245         if (!(IS_QLA27XX(ha)))
2246                 return -EPERM;
2247
2248         memset(&bbcr, 0, sizeof(bbcr));
2249
2250         if (vha->flags.bbcr_enable)
2251                 bbcr.status = QLA_BBCR_STATUS_ENABLED;
2252         else
2253                 bbcr.status = QLA_BBCR_STATUS_DISABLED;
2254
2255         if (bbcr.status == QLA_BBCR_STATUS_ENABLED) {
2256                 rval = qla2x00_get_adapter_id(vha, &loop_id, &al_pa,
2257                         &area, &domain, &topo, &sw_cap);
2258                 if (rval != QLA_SUCCESS) {
2259                         bbcr.status = QLA_BBCR_STATUS_UNKNOWN;
2260                         bbcr.state = QLA_BBCR_STATE_OFFLINE;
2261                         bbcr.mbx1 = loop_id;
2262                         goto done;
2263                 }
2264
2265                 state = (vha->bbcr >> 12) & 0x1;
2266
2267                 if (state) {
2268                         bbcr.state = QLA_BBCR_STATE_OFFLINE;
2269                         bbcr.offline_reason_code = QLA_BBCR_REASON_LOGIN_REJECT;
2270                 } else {
2271                         bbcr.state = QLA_BBCR_STATE_ONLINE;
2272                         bbcr.negotiated_bbscn = (vha->bbcr >> 8) & 0xf;
2273                 }
2274
2275                 bbcr.configured_bbscn = vha->bbcr & 0xf;
2276         }
2277
2278 done:
2279         sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2280                 bsg_job->reply_payload.sg_cnt, &bbcr, sizeof(bbcr));
2281         bsg_reply->reply_payload_rcv_len = sizeof(bbcr);
2282
2283         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
2284
2285         bsg_job->reply_len = sizeof(struct fc_bsg_reply);
2286         bsg_reply->result = DID_OK << 16;
2287         bsg_job_done(bsg_job, bsg_reply->result,
2288                        bsg_reply->reply_payload_rcv_len);
2289         return 0;
2290 }
2291
2292 static int
2293 qla2x00_get_priv_stats(struct bsg_job *bsg_job)
2294 {
2295         struct fc_bsg_request *bsg_request = bsg_job->request;
2296         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2297         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2298         scsi_qla_host_t *vha = shost_priv(host);
2299         struct qla_hw_data *ha = vha->hw;
2300         struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2301         struct link_statistics *stats = NULL;
2302         dma_addr_t stats_dma;
2303         int rval;
2304         uint32_t *cmd = bsg_request->rqst_data.h_vendor.vendor_cmd;
2305         uint options = cmd[0] == QL_VND_GET_PRIV_STATS_EX ? cmd[1] : 0;
2306
2307         if (test_bit(UNLOADING, &vha->dpc_flags))
2308                 return -ENODEV;
2309
2310         if (unlikely(pci_channel_offline(ha->pdev)))
2311                 return -ENODEV;
2312
2313         if (qla2x00_reset_active(vha))
2314                 return -EBUSY;
2315
2316         if (!IS_FWI2_CAPABLE(ha))
2317                 return -EPERM;
2318
2319         stats = dma_zalloc_coherent(&ha->pdev->dev, sizeof(*stats),
2320                                     &stats_dma, GFP_KERNEL);
2321         if (!stats) {
2322                 ql_log(ql_log_warn, vha, 0x70e2,
2323                     "Failed to allocate memory for stats.\n");
2324                 return -ENOMEM;
2325         }
2326
2327         rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma, options);
2328
2329         if (rval == QLA_SUCCESS) {
2330                 ql_dump_buffer(ql_dbg_user + ql_dbg_verbose, vha, 0x70e3,
2331                     (uint8_t *)stats, sizeof(*stats));
2332                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2333                         bsg_job->reply_payload.sg_cnt, stats, sizeof(*stats));
2334         }
2335
2336         bsg_reply->reply_payload_rcv_len = sizeof(*stats);
2337         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2338             rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2339
2340         bsg_job->reply_len = sizeof(*bsg_reply);
2341         bsg_reply->result = DID_OK << 16;
2342         bsg_job_done(bsg_job, bsg_reply->result,
2343                        bsg_reply->reply_payload_rcv_len);
2344
2345         dma_free_coherent(&ha->pdev->dev, sizeof(*stats),
2346                 stats, stats_dma);
2347
2348         return 0;
2349 }
2350
2351 static int
2352 qla2x00_do_dport_diagnostics(struct bsg_job *bsg_job)
2353 {
2354         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2355         struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
2356         scsi_qla_host_t *vha = shost_priv(host);
2357         int rval;
2358         struct qla_dport_diag *dd;
2359
2360         if (!IS_QLA83XX(vha->hw) && !IS_QLA27XX(vha->hw))
2361                 return -EPERM;
2362
2363         dd = kmalloc(sizeof(*dd), GFP_KERNEL);
2364         if (!dd) {
2365                 ql_log(ql_log_warn, vha, 0x70db,
2366                     "Failed to allocate memory for dport.\n");
2367                 return -ENOMEM;
2368         }
2369
2370         sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2371             bsg_job->request_payload.sg_cnt, dd, sizeof(*dd));
2372
2373         rval = qla26xx_dport_diagnostics(
2374             vha, dd->buf, sizeof(dd->buf), dd->options);
2375         if (rval == QLA_SUCCESS) {
2376                 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2377                     bsg_job->reply_payload.sg_cnt, dd, sizeof(*dd));
2378         }
2379
2380         bsg_reply->reply_payload_rcv_len = sizeof(*dd);
2381         bsg_reply->reply_data.vendor_reply.vendor_rsp[0] =
2382             rval ? EXT_STATUS_MAILBOX : EXT_STATUS_OK;
2383
2384         bsg_job->reply_len = sizeof(*bsg_reply);
2385         bsg_reply->result = DID_OK << 16;
2386         bsg_job_done(bsg_job, bsg_reply->result,
2387                        bsg_reply->reply_payload_rcv_len);
2388
2389         kfree(dd);
2390
2391         return 0;
2392 }
2393
2394 static int
2395 qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
2396 {
2397         struct fc_bsg_request *bsg_request = bsg_job->request;
2398
2399         switch (bsg_request->rqst_data.h_vendor.vendor_cmd[0]) {
2400         case QL_VND_LOOPBACK:
2401                 return qla2x00_process_loopback(bsg_job);
2402
2403         case QL_VND_A84_RESET:
2404                 return qla84xx_reset(bsg_job);
2405
2406         case QL_VND_A84_UPDATE_FW:
2407                 return qla84xx_updatefw(bsg_job);
2408
2409         case QL_VND_A84_MGMT_CMD:
2410                 return qla84xx_mgmt_cmd(bsg_job);
2411
2412         case QL_VND_IIDMA:
2413                 return qla24xx_iidma(bsg_job);
2414
2415         case QL_VND_FCP_PRIO_CFG_CMD:
2416                 return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
2417
2418         case QL_VND_READ_FLASH:
2419                 return qla2x00_read_optrom(bsg_job);
2420
2421         case QL_VND_UPDATE_FLASH:
2422                 return qla2x00_update_optrom(bsg_job);
2423
2424         case QL_VND_SET_FRU_VERSION:
2425                 return qla2x00_update_fru_versions(bsg_job);
2426
2427         case QL_VND_READ_FRU_STATUS:
2428                 return qla2x00_read_fru_status(bsg_job);
2429
2430         case QL_VND_WRITE_FRU_STATUS:
2431                 return qla2x00_write_fru_status(bsg_job);
2432
2433         case QL_VND_WRITE_I2C:
2434                 return qla2x00_write_i2c(bsg_job);
2435
2436         case QL_VND_READ_I2C:
2437                 return qla2x00_read_i2c(bsg_job);
2438
2439         case QL_VND_DIAG_IO_CMD:
2440                 return qla24xx_process_bidir_cmd(bsg_job);
2441
2442         case QL_VND_FX00_MGMT_CMD:
2443                 return qlafx00_mgmt_cmd(bsg_job);
2444
2445         case QL_VND_SERDES_OP:
2446                 return qla26xx_serdes_op(bsg_job);
2447
2448         case QL_VND_SERDES_OP_EX:
2449                 return qla8044_serdes_op(bsg_job);
2450
2451         case QL_VND_GET_FLASH_UPDATE_CAPS:
2452                 return qla27xx_get_flash_upd_cap(bsg_job);
2453
2454         case QL_VND_SET_FLASH_UPDATE_CAPS:
2455                 return qla27xx_set_flash_upd_cap(bsg_job);
2456
2457         case QL_VND_GET_BBCR_DATA:
2458                 return qla27xx_get_bbcr_data(bsg_job);
2459
2460         case QL_VND_GET_PRIV_STATS:
2461         case QL_VND_GET_PRIV_STATS_EX:
2462                 return qla2x00_get_priv_stats(bsg_job);
2463
2464         case QL_VND_DPORT_DIAGNOSTICS:
2465                 return qla2x00_do_dport_diagnostics(bsg_job);
2466
2467         default:
2468                 return -ENOSYS;
2469         }
2470 }
2471
2472 int
2473 qla24xx_bsg_request(struct bsg_job *bsg_job)
2474 {
2475         struct fc_bsg_request *bsg_request = bsg_job->request;
2476         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2477         int ret = -EINVAL;
2478         struct fc_rport *rport;
2479         struct Scsi_Host *host;
2480         scsi_qla_host_t *vha;
2481
2482         /* In case no data transferred. */
2483         bsg_reply->reply_payload_rcv_len = 0;
2484
2485         if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
2486                 rport = fc_bsg_to_rport(bsg_job);
2487                 host = rport_to_shost(rport);
2488                 vha = shost_priv(host);
2489         } else {
2490                 host = fc_bsg_to_shost(bsg_job);
2491                 vha = shost_priv(host);
2492         }
2493
2494         if (qla2x00_chip_is_down(vha)) {
2495                 ql_dbg(ql_dbg_user, vha, 0x709f,
2496                     "BSG: ISP abort active/needed -- cmd=%d.\n",
2497                     bsg_request->msgcode);
2498                 return -EBUSY;
2499         }
2500
2501         ql_dbg(ql_dbg_user, vha, 0x7000,
2502             "Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
2503
2504         switch (bsg_request->msgcode) {
2505         case FC_BSG_RPT_ELS:
2506         case FC_BSG_HST_ELS_NOLOGIN:
2507                 ret = qla2x00_process_els(bsg_job);
2508                 break;
2509         case FC_BSG_HST_CT:
2510                 ret = qla2x00_process_ct(bsg_job);
2511                 break;
2512         case FC_BSG_HST_VENDOR:
2513                 ret = qla2x00_process_vendor_specific(bsg_job);
2514                 break;
2515         case FC_BSG_HST_ADD_RPORT:
2516         case FC_BSG_HST_DEL_RPORT:
2517         case FC_BSG_RPT_CT:
2518         default:
2519                 ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
2520                 break;
2521         }
2522         return ret;
2523 }
2524
2525 int
2526 qla24xx_bsg_timeout(struct bsg_job *bsg_job)
2527 {
2528         struct fc_bsg_reply *bsg_reply = bsg_job->reply;
2529         scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
2530         struct qla_hw_data *ha = vha->hw;
2531         srb_t *sp;
2532         int cnt, que;
2533         unsigned long flags;
2534         struct req_que *req;
2535
2536         /* find the bsg job from the active list of commands */
2537         spin_lock_irqsave(&ha->hardware_lock, flags);
2538         for (que = 0; que < ha->max_req_queues; que++) {
2539                 req = ha->req_q_map[que];
2540                 if (!req)
2541                         continue;
2542
2543                 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
2544                         sp = req->outstanding_cmds[cnt];
2545                         if (sp) {
2546                                 if (((sp->type == SRB_CT_CMD) ||
2547                                         (sp->type == SRB_ELS_CMD_HST) ||
2548                                         (sp->type == SRB_FXIOCB_BCMD))
2549                                         && (sp->u.bsg_job == bsg_job)) {
2550                                         req->outstanding_cmds[cnt] = NULL;
2551                                         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2552                                         if (ha->isp_ops->abort_command(sp)) {
2553                                                 ql_log(ql_log_warn, vha, 0x7089,
2554                                                     "mbx abort_command "
2555                                                     "failed.\n");
2556                                                 bsg_reply->result = -EIO;
2557                                         } else {
2558                                                 ql_dbg(ql_dbg_user, vha, 0x708a,
2559                                                     "mbx abort_command "
2560                                                     "success.\n");
2561                                                 bsg_reply->result = 0;
2562                                         }
2563                                         spin_lock_irqsave(&ha->hardware_lock, flags);
2564                                         goto done;
2565                                 }
2566                         }
2567                 }
2568         }
2569         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2570         ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
2571         bsg_reply->result = -ENXIO;
2572         return 0;
2573
2574 done:
2575         spin_unlock_irqrestore(&ha->hardware_lock, flags);
2576         sp->free(sp);
2577         return 0;
2578 }